From 6679f0bd973bf4a6c98f7ddd5c0cb6830766394e Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Thu, 19 Feb 2026 13:52:55 -0500 Subject: [PATCH 001/201] chore(rust/op-reth): op-reth v1.11.0 (#19247) --- rust/Cargo.lock | 32 +++++++++++----------- rust/op-reth/bin/Cargo.toml | 2 +- rust/op-reth/crates/chainspec/Cargo.toml | 2 +- rust/op-reth/crates/cli/Cargo.toml | 2 +- rust/op-reth/crates/consensus/Cargo.toml | 2 +- rust/op-reth/crates/evm/Cargo.toml | 2 +- rust/op-reth/crates/exex/Cargo.toml | 2 +- rust/op-reth/crates/flashblocks/Cargo.toml | 2 +- rust/op-reth/crates/hardforks/Cargo.toml | 2 +- rust/op-reth/crates/node/Cargo.toml | 2 +- rust/op-reth/crates/payload/Cargo.toml | 2 +- rust/op-reth/crates/primitives/Cargo.toml | 2 +- rust/op-reth/crates/reth/Cargo.toml | 2 +- rust/op-reth/crates/rpc/Cargo.toml | 2 +- rust/op-reth/crates/storage/Cargo.toml | 2 +- rust/op-reth/crates/trie/Cargo.toml | 2 +- rust/op-reth/crates/txpool/Cargo.toml | 2 +- 17 files changed, 32 insertions(+), 32 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 7c9cdd8514be4..97933579c5c4a 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -8004,7 +8004,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.10.2" +version = "1.11.0" dependencies = [ "clap", "reth-cli-util", @@ -11192,7 +11192,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-primitives", "reth-chainspec", @@ -11233,7 +11233,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -11261,7 +11261,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11311,7 +11311,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-chains", "alloy-consensus", @@ -11342,7 +11342,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11371,7 +11371,7 @@ dependencies = [ [[package]] name = "reth-optimism-exex" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11398,7 +11398,7 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11436,7 +11436,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -11446,7 +11446,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -11513,7 +11513,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11552,7 +11552,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11579,7 +11579,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11650,7 +11650,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "reth-codecs", @@ -11662,7 +11662,7 @@ dependencies = [ [[package]] name = "reth-optimism-trie" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11707,7 +11707,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.10.2" +version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", diff --git a/rust/op-reth/bin/Cargo.toml b/rust/op-reth/bin/Cargo.toml index ee43fa0c0328f..e3cb2e67f8b1a 100644 --- a/rust/op-reth/bin/Cargo.toml +++ b/rust/op-reth/bin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "op-reth" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/chainspec/Cargo.toml b/rust/op-reth/crates/chainspec/Cargo.toml index 0922f102a93f1..390bd9b2a9da5 100644 --- a/rust/op-reth/crates/chainspec/Cargo.toml +++ b/rust/op-reth/crates/chainspec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-chainspec" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/cli/Cargo.toml b/rust/op-reth/crates/cli/Cargo.toml index 2523a6c1e19e8..8a62b69f4e882 100644 --- a/rust/op-reth/crates/cli/Cargo.toml +++ b/rust/op-reth/crates/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-cli" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/consensus/Cargo.toml b/rust/op-reth/crates/consensus/Cargo.toml index 717620cc0a1b4..428116516392b 100644 --- a/rust/op-reth/crates/consensus/Cargo.toml +++ b/rust/op-reth/crates/consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-consensus" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/evm/Cargo.toml b/rust/op-reth/crates/evm/Cargo.toml index cf8e964af2f16..cb8f589c1ef20 100644 --- a/rust/op-reth/crates/evm/Cargo.toml +++ b/rust/op-reth/crates/evm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-evm" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/exex/Cargo.toml b/rust/op-reth/crates/exex/Cargo.toml index ccfdb0b202c34..67216d39de61e 100644 --- a/rust/op-reth/crates/exex/Cargo.toml +++ b/rust/op-reth/crates/exex/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-exex" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/flashblocks/Cargo.toml b/rust/op-reth/crates/flashblocks/Cargo.toml index e6b02a1d72c2a..34dcb42bee487 100644 --- a/rust/op-reth/crates/flashblocks/Cargo.toml +++ b/rust/op-reth/crates/flashblocks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-flashblocks" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/hardforks/Cargo.toml b/rust/op-reth/crates/hardforks/Cargo.toml index 4cf0eff3e3001..a6fe343a4dbc1 100644 --- a/rust/op-reth/crates/hardforks/Cargo.toml +++ b/rust/op-reth/crates/hardforks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-forks" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/node/Cargo.toml b/rust/op-reth/crates/node/Cargo.toml index 78e851e081f49..ca8684a9969eb 100644 --- a/rust/op-reth/crates/node/Cargo.toml +++ b/rust/op-reth/crates/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-node" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/payload/Cargo.toml b/rust/op-reth/crates/payload/Cargo.toml index 15d75620e191b..38014ce21684b 100644 --- a/rust/op-reth/crates/payload/Cargo.toml +++ b/rust/op-reth/crates/payload/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-payload-builder" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/primitives/Cargo.toml b/rust/op-reth/crates/primitives/Cargo.toml index 07e9c401f089b..99e7841e0789a 100644 --- a/rust/op-reth/crates/primitives/Cargo.toml +++ b/rust/op-reth/crates/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-primitives" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/reth/Cargo.toml b/rust/op-reth/crates/reth/Cargo.toml index 0b3ebbd6f6901..495a9e6ff44ab 100644 --- a/rust/op-reth/crates/reth/Cargo.toml +++ b/rust/op-reth/crates/reth/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-op" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/rpc/Cargo.toml b/rust/op-reth/crates/rpc/Cargo.toml index 04a40be4fffb3..d807407f2e6f1 100644 --- a/rust/op-reth/crates/rpc/Cargo.toml +++ b/rust/op-reth/crates/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-rpc" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/storage/Cargo.toml b/rust/op-reth/crates/storage/Cargo.toml index 94529f8e249a4..3f0a834e95d93 100644 --- a/rust/op-reth/crates/storage/Cargo.toml +++ b/rust/op-reth/crates/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-storage" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/trie/Cargo.toml b/rust/op-reth/crates/trie/Cargo.toml index 3cf894c579b7f..e4be1c916674f 100644 --- a/rust/op-reth/crates/trie/Cargo.toml +++ b/rust/op-reth/crates/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-trie" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/txpool/Cargo.toml b/rust/op-reth/crates/txpool/Cargo.toml index 9636d56efb2ca..f31b76af07f22 100644 --- a/rust/op-reth/crates/txpool/Cargo.toml +++ b/rust/op-reth/crates/txpool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-txpool" -version = "1.10.2" +version = "1.11.0" edition.workspace = true rust-version.workspace = true license.workspace = true From 7f450c4d8adaad6e6a5f95288673776e9076a028 Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Thu, 19 Feb 2026 21:02:33 -0500 Subject: [PATCH 002/201] feat(flashblocks): implement speculative flashblock building (#18995) --- rust/op-reth/crates/flashblocks/src/cache.rs | 770 +++++++++++++++++- rust/op-reth/crates/flashblocks/src/lib.rs | 9 +- .../crates/flashblocks/src/pending_state.rs | 233 ++++++ .../op-reth/crates/flashblocks/src/service.rs | 130 ++- rust/op-reth/crates/flashblocks/src/worker.rs | 114 ++- .../crates/flashblocks/tests/it/harness.rs | 439 ++++++++++ .../crates/flashblocks/tests/it/main.rs | 2 + .../crates/flashblocks/tests/it/service.rs | 288 +++++++ 8 files changed, 1937 insertions(+), 48 deletions(-) create mode 100644 rust/op-reth/crates/flashblocks/src/pending_state.rs create mode 100644 rust/op-reth/crates/flashblocks/tests/it/harness.rs create mode 100644 rust/op-reth/crates/flashblocks/tests/it/service.rs diff --git a/rust/op-reth/crates/flashblocks/src/cache.rs b/rust/op-reth/crates/flashblocks/src/cache.rs index 0ddc2e19adfba..8abe72e8e45fa 100644 --- a/rust/op-reth/crates/flashblocks/src/cache.rs +++ b/rust/op-reth/crates/flashblocks/src/cache.rs @@ -5,12 +5,16 @@ use crate::{ FlashBlock, FlashBlockCompleteSequence, PendingFlashBlock, + pending_state::PendingBlockState, sequence::{FlashBlockPendingSequence, SequenceExecutionOutcome}, + validation::{CanonicalBlockReconciler, ReconciliationStrategy, ReorgDetector}, worker::BuildArgs, }; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; -use reth_primitives_traits::{NodePrimitives, Recovered, SignedTransaction}; +use reth_primitives_traits::{ + NodePrimitives, Recovered, SignedTransaction, transaction::TxHashRef, +}; use reth_revm::cached::CachedReads; use ringbuffer::{AllocRingBuffer, RingBuffer}; use tokio::sync::broadcast; @@ -37,6 +41,8 @@ pub(crate) struct SequenceManager { /// Ring buffer of recently completed sequences bundled with their decoded transactions (FIFO, /// size 3) completed_cache: AllocRingBuffer<(FlashBlockCompleteSequence, Vec>>)>, + /// Cached minimum block number currently present in `completed_cache`. + cached_min_block_number: Option, /// Broadcast channel for completed sequences block_broadcaster: broadcast::Sender, /// Whether to compute state roots when building blocks @@ -51,6 +57,7 @@ impl SequenceManager { pending: FlashBlockPendingSequence::new(), pending_transactions: Vec::new(), completed_cache: AllocRingBuffer::new(CACHE_SIZE), + cached_min_block_number: None, block_broadcaster, compute_state_root, } @@ -101,7 +108,7 @@ impl SequenceManager { // Bundle completed sequence with its decoded transactions and push to cache // Ring buffer automatically evicts oldest entry when full let txs = std::mem::take(&mut self.pending_transactions); - self.completed_cache.enqueue((completed, txs)); + self.push_completed_sequence(completed, txs); // ensure cache is wiped on new flashblock let _ = self.pending.take_cached_reads(); @@ -113,6 +120,36 @@ impl SequenceManager { Ok(()) } + /// Pushes a completed sequence into the cache and maintains cached min block-number metadata. + fn push_completed_sequence( + &mut self, + completed: FlashBlockCompleteSequence, + txs: Vec>>, + ) { + let block_number = completed.block_number(); + let evicted_block_number = if self.completed_cache.is_full() { + self.completed_cache.front().map(|(seq, _)| seq.block_number()) + } else { + None + }; + + self.completed_cache.enqueue((completed, txs)); + + self.cached_min_block_number = match self.cached_min_block_number { + None => Some(block_number), + Some(current_min) if block_number < current_min => Some(block_number), + Some(current_min) if Some(current_min) == evicted_block_number => { + self.recompute_cache_min_block_number() + } + Some(current_min) => Some(current_min), + }; + } + + /// Recomputes the minimum block number in `completed_cache`. + fn recompute_cache_min_block_number(&self) -> Option { + self.completed_cache.iter().map(|(seq, _)| seq.block_number()).min() + } + /// Returns the current pending sequence for inspection. pub(crate) const fn pending(&self) -> &FlashBlockPendingSequence { &self.pending @@ -123,30 +160,52 @@ impl SequenceManager { /// Priority order: /// 1. Current pending sequence (if parent matches local tip) /// 2. Cached sequence with exact parent match + /// 3. Speculative: pending sequence with pending parent state (if provided) /// /// Returns None if nothing is buildable right now. - pub(crate) fn next_buildable_args( + pub(crate) fn next_buildable_args>( &mut self, local_tip_hash: B256, local_tip_timestamp: u64, - ) -> Option>>>> { + pending_parent_state: Option>, + ) -> Option>>, N>> { // Try to find a buildable sequence: (base, last_fb, transactions, cached_state, - // source_name) - let (base, last_flashblock, transactions, cached_state, source_name) = - // Priority 1: Try current pending sequence + // source_name, pending_parent) + let (base, last_flashblock, transactions, cached_state, source_name, pending_parent) = + // Priority 1: Try current pending sequence (canonical mode) if let Some(base) = self.pending.payload_base().filter(|b| b.parent_hash == local_tip_hash) { let cached_state = self.pending.take_cached_reads().map(|r| (base.parent_hash, r)); let last_fb = self.pending.last_flashblock()?; let transactions = self.pending_transactions.clone(); - (base, last_fb, transactions, cached_state, "pending") + (base, last_fb, transactions, cached_state, "pending", None) } - // Priority 2: Try cached sequence with exact parent match + // Priority 2: Try cached sequence with exact parent match (canonical mode) else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash == local_tip_hash) { let base = cached.payload_base().clone(); let last_fb = cached.last(); let transactions = txs.clone(); let cached_state = None; - (base, last_fb, transactions, cached_state, "cached") + (base, last_fb, transactions, cached_state, "cached", None) + } + // Priority 3: Try speculative building with pending parent state + else if let Some(ref pending_state) = pending_parent_state { + // Check if pending sequence's parent matches the pending state's block + if let Some(base) = self.pending.payload_base().filter(|b| b.parent_hash == pending_state.block_hash) { + let cached_state = self.pending.take_cached_reads().map(|r| (base.parent_hash, r)); + let last_fb = self.pending.last_flashblock()?; + let transactions = self.pending_transactions.clone(); + (base, last_fb, transactions, cached_state, "speculative-pending", pending_parent_state) + } + // Check cached sequences + else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash == pending_state.block_hash) { + let base = cached.payload_base().clone(); + let last_fb = cached.last(); + let transactions = txs.clone(); + let cached_state = None; + (base, last_fb, transactions, cached_state, "speculative-cached", pending_parent_state) + } else { + return None; + } } else { return None; }; @@ -194,6 +253,7 @@ impl SequenceManager { compute_state_root_enabled = self.compute_state_root, state_root_is_zero = last_flashblock.diff.state_root.is_zero(), will_compute_state_root = compute_state_root, + is_speculative = pending_parent.is_some(), "Building from flashblock sequence" ); @@ -204,6 +264,7 @@ impl SequenceManager { last_flashblock_index: last_flashblock.index, last_flashblock_hash: last_flashblock.diff.block_hash, compute_state_root, + pending_parent, }) } @@ -261,14 +322,161 @@ impl SequenceManager { } } } + + /// Returns the earliest block number in the pending or cached sequences. + pub(crate) fn earliest_block_number(&self) -> Option { + match (self.pending.block_number(), self.cached_min_block_number) { + (Some(pending_block), Some(cache_min)) => Some(cache_min.min(pending_block)), + (Some(pending_block), None) => Some(pending_block), + (None, Some(cache_min)) => Some(cache_min), + (None, None) => None, + } + } + + /// Returns the latest block number in the pending or cached sequences. + pub(crate) fn latest_block_number(&self) -> Option { + // Pending is always the latest if it exists + if let Some(pending_block) = self.pending.block_number() { + return Some(pending_block); + } + + // Fall back to cache + self.completed_cache.iter().map(|(seq, _)| seq.block_number()).max() + } + + /// Returns transaction hashes for a specific block number from pending or cached sequences. + pub(crate) fn get_transaction_hashes_for_block(&self, block_number: u64) -> Vec { + // Check pending sequence + if self.pending.block_number() == Some(block_number) { + return self.pending_transactions.iter().map(|tx| *tx.tx_hash()).collect(); + } + + // Check cached sequences + for (seq, txs) in self.completed_cache.iter() { + if seq.block_number() == block_number { + return txs.iter().map(|tx| *tx.tx_hash()).collect(); + } + } + + Vec::new() + } + + /// Returns true if the given block number is tracked in pending or cached sequences. + fn tracks_block_number(&self, block_number: u64) -> bool { + // Check pending sequence + if self.pending.block_number() == Some(block_number) { + return true; + } + + // Check cached sequences + self.completed_cache.iter().any(|(seq, _)| seq.block_number() == block_number) + } + + /// Processes a canonical block and reconciles pending state. + /// + /// This method determines how to handle the pending flashblock state when a new + /// canonical block arrives. It uses the [`CanonicalBlockReconciler`] to decide + /// the appropriate strategy based on: + /// - Whether canonical has caught up to pending + /// - Whether a reorg was detected (transaction mismatch) + /// - Whether pending is too far ahead of canonical + /// + /// Returns the reconciliation strategy that was applied. + pub(crate) fn process_canonical_block( + &mut self, + canonical_block_number: u64, + canonical_tx_hashes: &[B256], + max_depth: u64, + ) -> ReconciliationStrategy { + let earliest = self.earliest_block_number(); + let latest = self.latest_block_number(); + + // Only run reorg detection if we actually track the canonical block number. + // If we don't track it (block number outside our pending/cached window), + // comparing empty tracked hashes to non-empty canonical hashes would falsely + // trigger reorg detection. + let reorg_detected = if self.tracks_block_number(canonical_block_number) { + let tracked_tx_hashes = self.get_transaction_hashes_for_block(canonical_block_number); + let reorg_result = ReorgDetector::detect(&tracked_tx_hashes, canonical_tx_hashes); + reorg_result.is_reorg() + } else { + false + }; + + // Determine reconciliation strategy + let strategy = CanonicalBlockReconciler::reconcile( + earliest, + latest, + canonical_block_number, + max_depth, + reorg_detected, + ); + + match &strategy { + ReconciliationStrategy::CatchUp => { + trace!( + target: "flashblocks", + ?latest, + canonical_block_number, + "Canonical caught up - clearing pending state" + ); + self.clear_all(); + } + ReconciliationStrategy::HandleReorg => { + warn!( + target: "flashblocks", + canonical_block_number, + canonical_tx_count = canonical_tx_hashes.len(), + "Reorg detected - clearing pending state" + ); + self.clear_all(); + } + ReconciliationStrategy::DepthLimitExceeded { depth, max_depth } => { + trace!( + target: "flashblocks", + depth, + max_depth, + "Depth limit exceeded - clearing pending state" + ); + self.clear_all(); + } + ReconciliationStrategy::Continue => { + trace!( + target: "flashblocks", + ?earliest, + ?latest, + canonical_block_number, + "Canonical behind pending - continuing" + ); + } + ReconciliationStrategy::NoPendingState => { + trace!( + target: "flashblocks", + canonical_block_number, + "No pending state to reconcile" + ); + } + } + + strategy + } + + /// Clears all pending and cached state. + fn clear_all(&mut self) { + self.pending = FlashBlockPendingSequence::new(); + self.pending_transactions.clear(); + self.completed_cache.clear(); + self.cached_min_block_number = None; + } } #[cfg(test)] mod tests { use super::*; - use crate::test_utils::TestFlashBlockFactory; + use crate::{test_utils::TestFlashBlockFactory, validation::ReconciliationStrategy}; use alloy_primitives::B256; use op_alloy_consensus::OpTxEnvelope; + use reth_optimism_primitives::OpPrimitives; #[test] fn test_sequence_manager_new() { @@ -318,7 +526,8 @@ mod tests { let local_tip_hash = B256::random(); let local_tip_timestamp = 1000; - let args = manager.next_buildable_args(local_tip_hash, local_tip_timestamp); + let args = + manager.next_buildable_args::(local_tip_hash, local_tip_timestamp, None); assert!(args.is_none()); } @@ -331,7 +540,7 @@ mod tests { let parent_hash = fb0.base.as_ref().unwrap().parent_hash; manager.insert_flashblock(fb0).unwrap(); - let args = manager.next_buildable_args(parent_hash, 1000000); + let args = manager.next_buildable_args::(parent_hash, 1000000, None); assert!(args.is_some()); let build_args = args.unwrap(); @@ -348,7 +557,7 @@ mod tests { // Use different parent hash let wrong_parent = B256::random(); - let args = manager.next_buildable_args(wrong_parent, 1000000); + let args = manager.next_buildable_args::(wrong_parent, 1000000, None); assert!(args.is_none()); } @@ -367,7 +576,7 @@ mod tests { manager.insert_flashblock(fb1).unwrap(); // Request with first sequence's parent (should find cached) - let args = manager.next_buildable_args(parent_hash, 1000000); + let args = manager.next_buildable_args::(parent_hash, 1000000, None); assert!(args.is_some()); } @@ -390,7 +599,7 @@ mod tests { manager.insert_flashblock(fb2).unwrap(); // Request first sequence's parent - should find in cache - let args = manager.next_buildable_args(parent_hash, 1000000); + let args = manager.next_buildable_args::(parent_hash, 1000000, None); assert!(args.is_some()); } @@ -413,7 +622,11 @@ mod tests { } // Request with proper timing - should compute state root for index 9 - let args = manager.next_buildable_args(parent_hash, base_timestamp - block_time); + let args = manager.next_buildable_args::( + parent_hash, + base_timestamp - block_time, + None, + ); assert!(args.is_some()); assert!(args.unwrap().compute_state_root); } @@ -430,7 +643,11 @@ mod tests { let base_timestamp = fb0.base.as_ref().unwrap().timestamp; manager.insert_flashblock(fb0).unwrap(); - let args = manager.next_buildable_args(parent_hash, base_timestamp - block_time); + let args = manager.next_buildable_args::( + parent_hash, + base_timestamp - block_time, + None, + ); assert!(args.is_some()); assert!(!args.unwrap().compute_state_root); } @@ -454,7 +671,11 @@ mod tests { } // Request with proper timing - should compute state root for index 9 - let args = manager.next_buildable_args(parent_hash, base_timestamp - block_time); + let args = manager.next_buildable_args::( + parent_hash, + base_timestamp - block_time, + None, + ); assert!(args.is_some()); assert!(!args.unwrap().compute_state_root); } @@ -475,8 +696,517 @@ mod tests { // The first sequence should have been evicted, so we can't build it let first_parent = factory.flashblock_at(0).build().base.unwrap().parent_hash; - let args = manager.next_buildable_args(first_parent, 1000000); + let args = manager.next_buildable_args::(first_parent, 1000000, None); // Should not find it (evicted from ring buffer) assert!(args.is_none()); } + + // ==================== Canonical Block Reconciliation Tests ==================== + + #[test] + fn test_process_canonical_block_no_pending_state() { + let mut manager: SequenceManager = SequenceManager::new(true); + + // No pending state, should return NoPendingState + let strategy = manager.process_canonical_block(100, &[], 10); + assert_eq!(strategy, ReconciliationStrategy::NoPendingState); + } + + #[test] + fn test_process_canonical_block_catchup() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Insert a flashblock sequence for block 100 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0).unwrap(); + + assert_eq!(manager.pending().block_number(), Some(100)); + + // Canonical catches up to block 100 + let strategy = manager.process_canonical_block(100, &[], 10); + assert_eq!(strategy, ReconciliationStrategy::CatchUp); + + // Pending state should be cleared + assert!(manager.pending().block_number().is_none()); + } + + #[test] + fn test_process_canonical_block_continue() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Insert flashblocks for block 100-102 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2).unwrap(); + + // Canonical at 99 (behind pending) + let strategy = manager.process_canonical_block(99, &[], 10); + assert_eq!(strategy, ReconciliationStrategy::Continue); + + // Pending state should still exist + assert!(manager.pending().block_number().is_some()); + } + + #[test] + fn test_process_canonical_block_depth_limit_exceeded() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Insert flashblocks for block 100-102 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2).unwrap(); + + // At this point: earliest=100, latest=102 + // Canonical at 105 with max_depth of 2 (depth = 105 - 100 = 5, which exceeds 2) + // But wait - if canonical >= latest, it's CatchUp. So canonical must be < latest (102). + // Let's use canonical=101, which is < 102 but depth = 101 - 100 = 1 > 0 + let strategy = manager.process_canonical_block(101, &[], 0); + assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); + + // Pending state should be cleared + assert!(manager.pending().block_number().is_none()); + } + + #[test] + fn test_earliest_and_latest_block_numbers() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Initially no blocks + assert!(manager.earliest_block_number().is_none()); + assert!(manager.latest_block_number().is_none()); + + // Insert first flashblock (block 100) + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + assert_eq!(manager.earliest_block_number(), Some(100)); + assert_eq!(manager.latest_block_number(), Some(100)); + + // Insert next block (block 101) - this caches block 100 + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + assert_eq!(manager.earliest_block_number(), Some(100)); + assert_eq!(manager.latest_block_number(), Some(101)); + + // Insert another block (block 102) + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2).unwrap(); + + assert_eq!(manager.earliest_block_number(), Some(100)); + assert_eq!(manager.latest_block_number(), Some(102)); + } + + #[test] + fn test_earliest_block_number_tracks_cache_rollover() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2.clone()).unwrap(); + + let fb3 = factory.flashblock_for_next_block(&fb2).build(); + manager.insert_flashblock(fb3.clone()).unwrap(); + + let fb4 = factory.flashblock_for_next_block(&fb3).build(); + manager.insert_flashblock(fb4).unwrap(); + + // Cache size is 3, so block 100 should have been evicted. + assert_eq!(manager.earliest_block_number(), Some(101)); + assert_eq!(manager.latest_block_number(), Some(104)); + } + + // ==================== Speculative Building Tests ==================== + + #[test] + fn test_speculative_build_with_pending_parent_state() { + use crate::pending_state::PendingBlockState; + use reth_execution_types::BlockExecutionOutput; + use reth_revm::cached::CachedReads; + use std::sync::Arc; + + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Create a flashblock for block 101 + let fb0 = factory.flashblock_at(0).block_number(101).build(); + // The parent_hash of block 101 should be the hash of block 100 + let block_100_hash = fb0.base.as_ref().unwrap().parent_hash; + manager.insert_flashblock(fb0).unwrap(); + + // Local tip is block 99 (not matching block 100's hash) + let local_tip_hash = B256::random(); + + // Without pending parent state, no args should be returned + let args = manager.next_buildable_args::(local_tip_hash, 1000000, None); + assert!(args.is_none()); + + // Create pending parent state for block 100 (its block_hash matches fb0's parent_hash) + let parent_hash = B256::random(); + let pending_state: PendingBlockState = PendingBlockState { + block_hash: block_100_hash, + block_number: 100, + parent_hash, + canonical_anchor_hash: parent_hash, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + }; + + // With pending parent state, should return args for speculative building + let args = manager.next_buildable_args(local_tip_hash, 1000000, Some(pending_state)); + assert!(args.is_some()); + let build_args = args.unwrap(); + assert!(build_args.pending_parent.is_some()); + assert_eq!(build_args.pending_parent.as_ref().unwrap().block_number, 100); + } + + #[test] + fn test_speculative_build_uses_cached_sequence() { + use crate::pending_state::PendingBlockState; + use reth_execution_types::BlockExecutionOutput; + use reth_revm::cached::CachedReads; + use std::sync::Arc; + + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Create and cache first sequence for block 100 + let fb0 = factory.flashblock_at(0).build(); + let block_99_hash = fb0.base.as_ref().unwrap().parent_hash; + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Create second sequence for block 101 (this caches block 100) + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + // Create third sequence for block 102 (this caches block 101) + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2).unwrap(); + + // Local tip is some random hash (not matching any sequence parent) + let local_tip_hash = B256::random(); + + // Create pending parent state that matches the cached block 100 sequence's parent + let parent_hash = B256::random(); + let pending_state: PendingBlockState = PendingBlockState { + block_hash: block_99_hash, + block_number: 99, + parent_hash, + canonical_anchor_hash: parent_hash, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + }; + + // Should find cached sequence for block 100 (whose parent is block_99_hash) + let args = manager.next_buildable_args(local_tip_hash, 1000000, Some(pending_state)); + assert!(args.is_some()); + let build_args = args.unwrap(); + assert!(build_args.pending_parent.is_some()); + assert_eq!(build_args.base.block_number, 100); + } + + #[test] + fn test_canonical_build_takes_priority_over_speculative() { + use crate::pending_state::PendingBlockState; + use reth_execution_types::BlockExecutionOutput; + use reth_revm::cached::CachedReads; + use std::sync::Arc; + + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Create a flashblock for block 100 + let fb0 = factory.flashblock_at(0).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + manager.insert_flashblock(fb0).unwrap(); + + // Create pending parent state with a different block hash + let pending_parent_hash = B256::random(); + let pending_state: PendingBlockState = PendingBlockState { + block_hash: B256::repeat_byte(0xAA), + block_number: 99, + parent_hash: pending_parent_hash, + canonical_anchor_hash: pending_parent_hash, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + }; + + // Local tip matches the sequence parent (canonical mode should take priority) + let args = manager.next_buildable_args(parent_hash, 1000000, Some(pending_state)); + assert!(args.is_some()); + let build_args = args.unwrap(); + // Should be canonical build (no pending_parent) + assert!(build_args.pending_parent.is_none()); + } + + // ==================== Reconciliation Cache Clearing Tests ==================== + + #[test] + fn test_catchup_clears_all_cached_sequences() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build up cached sequences for blocks 100, 101, 102 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2).unwrap(); + + // Verify we have cached sequences + assert_eq!(manager.completed_cache.len(), 2); + assert!(manager.pending().block_number().is_some()); + + // Canonical catches up to 102 - should clear everything + let strategy = manager.process_canonical_block(102, &[], 10); + assert_eq!(strategy, ReconciliationStrategy::CatchUp); + + // Verify all state is cleared + assert!(manager.pending().block_number().is_none()); + assert_eq!(manager.completed_cache.len(), 0); + } + + #[test] + fn test_reorg_clears_all_cached_sequences() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build pending sequence for block 100 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Add another sequence + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1).unwrap(); + + // Verify we have state + assert!(manager.pending().block_number().is_some()); + assert!(!manager.completed_cache.is_empty()); + + // Simulate reorg at block 100: canonical has different tx than our cached + // We need to insert a tx in the sequence to make reorg detection work + // The reorg detection compares our pending transactions vs canonical + // Since we have no pending transactions (TestFlashBlockFactory creates empty tx lists), + // we need to use a different approach - process with tx hashes that don't match empty + + // Actually, let's verify the state clearing on HandleReorg by checking + // that any non-empty canonical_tx_hashes when we have state triggers reorg + let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; + let strategy = manager.process_canonical_block(100, &canonical_tx_hashes, 10); + + // Should detect reorg (canonical has txs, we have none for that block) + assert_eq!(strategy, ReconciliationStrategy::HandleReorg); + + // Verify all state is cleared + assert!(manager.pending().block_number().is_none()); + assert_eq!(manager.completed_cache.len(), 0); + } + + #[test] + fn test_depth_limit_exceeded_clears_all_state() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build sequences for blocks 100-102 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2).unwrap(); + + // Verify state exists + assert_eq!(manager.earliest_block_number(), Some(100)); + assert_eq!(manager.latest_block_number(), Some(102)); + + // Canonical at 101 with max_depth of 0 (depth = 101 - 100 = 1 > 0) + // Since canonical < latest (102), this should trigger depth limit exceeded + let strategy = manager.process_canonical_block(101, &[], 0); + assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); + + // Verify all state is cleared + assert!(manager.pending().block_number().is_none()); + assert_eq!(manager.completed_cache.len(), 0); + } + + #[test] + fn test_continue_preserves_all_state() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build sequences for blocks 100-102 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2).unwrap(); + + let cached_count = manager.completed_cache.len(); + + // Canonical at 99 (behind pending) with reasonable depth limit + let strategy = manager.process_canonical_block(99, &[], 10); + assert_eq!(strategy, ReconciliationStrategy::Continue); + + // Verify state is preserved + assert_eq!(manager.pending().block_number(), Some(102)); + assert_eq!(manager.completed_cache.len(), cached_count); + } + + #[test] + fn test_clear_all_removes_pending_and_cache() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build up state + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1).unwrap(); + + // Verify state exists + assert!(manager.pending().block_number().is_some()); + assert!(!manager.completed_cache.is_empty()); + assert!(!manager.pending_transactions.is_empty() || manager.pending().count() > 0); + + // Clear via catchup + manager.process_canonical_block(101, &[], 10); + + // Verify complete clearing + assert!(manager.pending().block_number().is_none()); + assert_eq!(manager.pending().count(), 0); + assert!(manager.completed_cache.is_empty()); + assert!(manager.pending_transactions.is_empty()); + } + + // ==================== Transaction Hash Tracking Tests ==================== + + #[test] + fn test_get_transaction_hashes_returns_empty_for_unknown_block() { + let manager: SequenceManager = SequenceManager::new(true); + + // No flashblocks inserted, should return empty + let hashes = manager.get_transaction_hashes_for_block(100); + assert!(hashes.is_empty()); + } + + #[test] + fn test_get_transaction_hashes_for_pending_block() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Create flashblock without transactions (empty tx list is valid) + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0).unwrap(); + + // Should find (empty) transaction hashes for block 100 + let hashes = manager.get_transaction_hashes_for_block(100); + assert!(hashes.is_empty()); // No transactions in this flashblock + } + + #[test] + fn test_get_transaction_hashes_for_cached_block() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Create first flashblock for block 100 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Create second flashblock for block 101 (caches block 100) + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1).unwrap(); + + // Should find transaction hashes for cached block 100 + let hashes = manager.get_transaction_hashes_for_block(100); + assert!(hashes.is_empty()); // No transactions in these flashblocks + + // Should find transaction hashes for pending block 101 + let hashes = manager.get_transaction_hashes_for_block(101); + assert!(hashes.is_empty()); // No transactions in these flashblocks + } + + #[test] + fn test_no_false_reorg_for_untracked_block() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build pending sequence for block 100 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Add another sequence for block 101 + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1).unwrap(); + + // Verify we have state for blocks 100 (cached) and 101 (pending) + assert_eq!(manager.earliest_block_number(), Some(100)); + assert_eq!(manager.latest_block_number(), Some(101)); + + // Process canonical block 99 (not tracked) with transactions + // This should NOT trigger reorg detection because we don't track block 99 + let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; + let strategy = manager.process_canonical_block(99, &canonical_tx_hashes, 10); + + // Should continue (not reorg) because block 99 is outside our tracked window + assert_eq!(strategy, ReconciliationStrategy::Continue); + + // State should be preserved + assert_eq!(manager.pending().block_number(), Some(101)); + assert!(!manager.completed_cache.is_empty()); + } + + #[test] + fn test_reorg_detected_for_tracked_block_with_different_txs() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build pending sequence for block 100 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Add another sequence for block 101 + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1).unwrap(); + + // Process canonical block 100 (which IS tracked) with different transactions + // Our tracked block 100 has empty tx list, canonical has non-empty + let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; + let strategy = manager.process_canonical_block(100, &canonical_tx_hashes, 10); + + // Should detect reorg because we track block 100 and txs don't match + assert_eq!(strategy, ReconciliationStrategy::HandleReorg); + + // State should be cleared + assert!(manager.pending().block_number().is_none()); + assert!(manager.completed_cache.is_empty()); + } } diff --git a/rust/op-reth/crates/flashblocks/src/lib.rs b/rust/op-reth/crates/flashblocks/src/lib.rs index fe77dc18a850c..9be47513a6381 100644 --- a/rust/op-reth/crates/flashblocks/src/lib.rs +++ b/rust/op-reth/crates/flashblocks/src/lib.rs @@ -24,14 +24,21 @@ mod sequence; pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence}; mod service; -pub use service::{FlashBlockBuildInfo, FlashBlockService}; +pub use service::{ + CanonicalBlockNotification, FlashBlockBuildInfo, FlashBlockService, + create_canonical_block_channel, +}; mod worker; mod cache; +mod pending_state; +pub use pending_state::{PendingBlockState, PendingStateRegistry}; + #[cfg(test)] mod test_utils; +pub mod validation; mod ws; pub use ws::{FlashBlockDecoder, WsConnect, WsFlashBlockStream}; diff --git a/rust/op-reth/crates/flashblocks/src/pending_state.rs b/rust/op-reth/crates/flashblocks/src/pending_state.rs new file mode 100644 index 0000000000000..5af353161b9bd --- /dev/null +++ b/rust/op-reth/crates/flashblocks/src/pending_state.rs @@ -0,0 +1,233 @@ +//! Pending block state for speculative flashblock building. +//! +//! This module provides types for tracking execution state from flashblock builds, +//! enabling speculative building of subsequent blocks before their parent canonical +//! block arrives via P2P. + +use alloy_primitives::B256; +use reth_execution_types::BlockExecutionOutput; +use reth_primitives_traits::NodePrimitives; +use reth_revm::cached::CachedReads; +use std::sync::Arc; + +/// Tracks the execution state from building a pending block. +/// +/// This is used to enable speculative building of subsequent blocks: +/// - When flashblocks for block N+1 arrive before canonical block N +/// - The pending state from building block N's flashblocks can be used +/// - This allows continuous flashblock processing without waiting for P2P +#[derive(Debug, Clone)] +pub struct PendingBlockState { + /// Hash of the block that was built (the pending block's hash). + pub block_hash: B256, + /// Block number that was built. + pub block_number: u64, + /// Parent hash of the built block (may be non-canonical for speculative builds). + pub parent_hash: B256, + /// Canonical anchor hash for state lookups. + /// + /// This is the hash used for `history_by_block_hash` when loading state. + /// For canonical builds, this equals `parent_hash`. + /// For speculative builds, this is the canonical block hash that the chain + /// of speculative builds is rooted at (forwarded from parent's anchor). + pub canonical_anchor_hash: B256, + /// Execution outcome containing state changes. + pub execution_outcome: Arc>, + /// Cached reads from execution for reuse. + pub cached_reads: CachedReads, +} + +impl PendingBlockState { + /// Creates a new pending block state. + pub const fn new( + block_hash: B256, + block_number: u64, + parent_hash: B256, + canonical_anchor_hash: B256, + execution_outcome: Arc>, + cached_reads: CachedReads, + ) -> Self { + Self { + block_hash, + block_number, + parent_hash, + canonical_anchor_hash, + execution_outcome, + cached_reads, + } + } +} + +/// Registry of pending block states for speculative building. +/// +/// Maintains a small cache of recently built pending blocks, allowing +/// subsequent flashblock sequences to build on top of them even before +/// the canonical blocks arrive. +#[derive(Debug, Default)] +pub struct PendingStateRegistry { + /// Most recent pending block state (the one we'd build on top of). + current: Option>, +} + +impl PendingStateRegistry { + /// Creates a new pending state registry. + pub const fn new() -> Self { + Self { current: None } + } + + /// Records a completed build's state for potential use by subsequent builds. + pub fn record_build(&mut self, state: PendingBlockState) { + self.current = Some(state); + } + + /// Gets the pending state for a given parent hash, if available. + /// + /// Returns `Some` if we have pending state whose `block_hash` matches the requested + /// `parent_hash`. + pub fn get_state_for_parent(&self, parent_hash: B256) -> Option<&PendingBlockState> { + self.current.as_ref().filter(|state| state.block_hash == parent_hash) + } + + /// Clears all pending state. + pub fn clear(&mut self) { + self.current = None; + } + + /// Returns the current pending state, if any. + pub const fn current(&self) -> Option<&PendingBlockState> { + self.current.as_ref() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_optimism_primitives::OpPrimitives; + + type TestRegistry = PendingStateRegistry; + + #[test] + fn test_registry_returns_state_for_matching_parent() { + let mut registry = TestRegistry::new(); + + let block_hash = B256::repeat_byte(1); + let parent_hash = B256::repeat_byte(0); + let state = PendingBlockState { + block_hash, + block_number: 100, + parent_hash, + canonical_anchor_hash: parent_hash, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + }; + registry.record_build(state); + + // Should find state when querying with matching block_hash as parent + let result = registry.get_state_for_parent(block_hash); + assert!(result.is_some()); + assert_eq!(result.unwrap().block_number, 100); + } + + #[test] + fn test_registry_returns_none_for_wrong_parent() { + let mut registry = TestRegistry::new(); + + let parent_hash = B256::repeat_byte(0); + let state = PendingBlockState { + block_hash: B256::repeat_byte(1), + block_number: 100, + parent_hash, + canonical_anchor_hash: parent_hash, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + }; + registry.record_build(state); + + // Different parent hash should return None + assert!(registry.get_state_for_parent(B256::repeat_byte(2)).is_none()); + } + + #[test] + fn test_registry_clear() { + let mut registry = TestRegistry::new(); + + let parent_hash = B256::repeat_byte(0); + let state = PendingBlockState { + block_hash: B256::repeat_byte(1), + block_number: 100, + parent_hash, + canonical_anchor_hash: parent_hash, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + }; + registry.record_build(state); + assert!(registry.current().is_some()); + + registry.clear(); + assert!(registry.current().is_none()); + } + + /// Tests that `canonical_anchor_hash` is distinct from `parent_hash` in speculative chains. + /// + /// When building speculatively: + /// - Block N (canonical): `parent_hash` = N-1, `canonical_anchor` = N-1 (same) + /// - Block N+1 (speculative): `parent_hash` = N, `canonical_anchor` = N-1 (forwarded) + /// - Block N+2 (speculative): `parent_hash` = N+1, `canonical_anchor` = N-1 (still forwarded) + /// + /// The `canonical_anchor_hash` always points to the last canonical block used for + /// `history_by_block_hash` lookups. + #[test] + fn test_canonical_anchor_forwarding_semantics() { + // Canonical block N-1 (the anchor for speculative chain) + let canonical_anchor = B256::repeat_byte(0x00); + + // Block N built on canonical - anchor equals parent + let block_n_hash = B256::repeat_byte(0x01); + let state_n = PendingBlockState:: { + block_hash: block_n_hash, + block_number: 100, + parent_hash: canonical_anchor, + canonical_anchor_hash: canonical_anchor, // Same as parent for canonical build + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + }; + + // Verify block N's anchor is the canonical block + assert_eq!(state_n.canonical_anchor_hash, canonical_anchor); + assert_eq!(state_n.parent_hash, state_n.canonical_anchor_hash); + + // Block N+1 built speculatively on N - anchor is FORWARDED from N + let block_n1_hash = B256::repeat_byte(0x02); + let state_n1 = PendingBlockState:: { + block_hash: block_n1_hash, + block_number: 101, + parent_hash: block_n_hash, // Parent is block N + canonical_anchor_hash: state_n.canonical_anchor_hash, // Forwarded from N + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + }; + + // Verify N+1's anchor is still the canonical block, NOT block N + assert_eq!(state_n1.canonical_anchor_hash, canonical_anchor); + assert_ne!(state_n1.parent_hash, state_n1.canonical_anchor_hash); + + // Block N+2 built speculatively on N+1 - anchor still forwarded + let block_n2_hash = B256::repeat_byte(0x03); + let state_n2 = PendingBlockState:: { + block_hash: block_n2_hash, + block_number: 102, + parent_hash: block_n1_hash, // Parent is block N+1 + canonical_anchor_hash: state_n1.canonical_anchor_hash, // Forwarded from N+1 + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + }; + + // Verify N+2's anchor is STILL the original canonical block + assert_eq!(state_n2.canonical_anchor_hash, canonical_anchor); + assert_ne!(state_n2.parent_hash, state_n2.canonical_anchor_hash); + + // All three blocks should have the same canonical anchor + assert_eq!(state_n.canonical_anchor_hash, state_n1.canonical_anchor_hash); + assert_eq!(state_n1.canonical_anchor_hash, state_n2.canonical_anchor_hash); + } +} diff --git a/rust/op-reth/crates/flashblocks/src/service.rs b/rust/op-reth/crates/flashblocks/src/service.rs index 01503b415f9c2..f88b3b87ac3b4 100644 --- a/rust/op-reth/crates/flashblocks/src/service.rs +++ b/rust/op-reth/crates/flashblocks/src/service.rs @@ -1,15 +1,18 @@ use crate::{ FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, - PendingFlashBlock, cache::SequenceManager, worker::FlashBlockBuilder, + PendingFlashBlock, + cache::SequenceManager, + pending_state::PendingStateRegistry, + validation::ReconciliationStrategy, + worker::{BuildResult, FlashBlockBuilder}, }; use alloy_primitives::B256; use futures_util::{FutureExt, Stream, StreamExt}; -use metrics::{Gauge, Histogram}; +use metrics::{Counter, Gauge, Histogram}; use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_evm::ConfigureEvm; use reth_metrics::Metrics; use reth_primitives_traits::{AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy}; -use reth_revm::cached::CachedReads; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory}; use reth_tasks::TaskExecutor; use std::{ @@ -17,13 +20,29 @@ use std::{ time::{Duration, Instant}, }; use tokio::{ - sync::{oneshot, watch}, + sync::{mpsc, oneshot, watch}, time::sleep, }; use tracing::*; const CONNECTION_BACKOUT_PERIOD: Duration = Duration::from_secs(5); +/// Default maximum depth for pending blocks ahead of canonical. +const DEFAULT_MAX_DEPTH: u64 = 64; + +/// Capacity for the canonical block notification channel. +/// This bounds memory usage while allowing for some buffering during catch-up. +const CANONICAL_BLOCK_CHANNEL_CAPACITY: usize = 128; + +/// Notification about a new canonical block for reconciliation. +#[derive(Debug, Clone)] +pub struct CanonicalBlockNotification { + /// The canonical block number. + pub block_number: u64, + /// Transaction hashes in the canonical block. + pub tx_hashes: Vec, +} + /// The `FlashBlockService` maintains an in-memory [`PendingFlashBlock`] built out of a sequence of /// [`FlashBlock`]s. #[derive(Debug)] @@ -35,6 +54,8 @@ pub struct FlashBlockService< > { /// Incoming flashblock stream. incoming_flashblock_rx: S, + /// Receiver for canonical block notifications (bounded to prevent OOM). + canonical_block_rx: Option>, /// Signals when a block build is in progress. in_progress_tx: watch::Sender>, /// Broadcast channel to forward received flashblocks from the subscription. @@ -48,7 +69,11 @@ pub struct FlashBlockService< job: Option>, /// Manages flashblock sequences with caching and intelligent build selection. sequences: SequenceManager, + /// Registry for pending block states to enable speculative building. + pending_states: PendingStateRegistry, + /// Maximum depth for pending blocks ahead of canonical before clearing. + max_depth: u64, /// `FlashBlock` service's metrics metrics: FlashBlockServiceMetrics, } @@ -82,16 +107,43 @@ where let (received_flashblocks_tx, _) = tokio::sync::broadcast::channel(128); Self { incoming_flashblock_rx, + canonical_block_rx: None, in_progress_tx, received_flashblocks_tx, builder: FlashBlockBuilder::new(evm_config, provider), spawner, job: None, sequences: SequenceManager::new(compute_state_root), + pending_states: PendingStateRegistry::new(), + max_depth: DEFAULT_MAX_DEPTH, metrics: FlashBlockServiceMetrics::default(), } } + /// Sets the canonical block receiver for reconciliation. + /// + /// When canonical blocks are received, the service will reconcile the pending + /// flashblock state to handle catch-up and reorg scenarios. + /// + /// The channel should be bounded to prevent unbounded memory growth. Use + /// [`create_canonical_block_channel`] to create a properly sized channel. + pub fn with_canonical_block_rx( + mut self, + rx: mpsc::Receiver, + ) -> Self { + self.canonical_block_rx = Some(rx); + self + } + + /// Sets the maximum depth for pending blocks ahead of canonical. + /// + /// If pending blocks get too far ahead of the canonical chain, the pending + /// state will be cleared to prevent unbounded memory growth. + pub const fn with_max_depth(mut self, max_depth: u64) -> Self { + self.max_depth = max_depth; + self + } + /// Returns the sender half for the received flashblocks broadcast channel. pub const fn flashblocks_broadcaster( &self, @@ -121,7 +173,8 @@ where /// This loop: /// 1. Checks if any build job has completed and processes results /// 2. Receives and batches all immediately available flashblocks - /// 3. Attempts to build a block from the complete sequence + /// 3. Processes canonical block notifications for reconciliation + /// 4. Attempts to build a block from the complete sequence /// /// Note: this should be spawned pub async fn run(mut self, tx: watch::Sender>>) { @@ -138,10 +191,14 @@ where let _ = self.in_progress_tx.send(None); match result { - Ok(Some((pending, cached_reads))) => { + Ok(Some(build_result)) => { + let pending = build_result.pending_flashblock; let parent_hash = pending.parent_hash(); self.sequences - .on_build_complete(parent_hash, Some((pending.clone(), cached_reads))); + .on_build_complete(parent_hash, Some((pending.clone(), build_result.cached_reads))); + + // Record pending state for speculative building of subsequent blocks + self.pending_states.record_build(build_result.pending_state); let elapsed = start_time.elapsed(); self.metrics.execution_duration.record(elapsed.as_secs_f64()); @@ -189,10 +246,46 @@ where } } } + + // Event 3: Canonical block notification for reconciliation + Some(notification) = async { + match self.canonical_block_rx.as_mut() { + Some(rx) => rx.recv().await, + None => std::future::pending().await, + } + } => { + self.process_canonical_block(notification); + // Try to build after reconciliation in case we can now build + self.try_start_build_job(); + } } } } + /// Processes a canonical block notification and reconciles pending state. + fn process_canonical_block(&mut self, notification: CanonicalBlockNotification) { + let strategy = self.sequences.process_canonical_block( + notification.block_number, + ¬ification.tx_hashes, + self.max_depth, + ); + + // Record metrics based on strategy + if matches!(strategy, ReconciliationStrategy::HandleReorg) { + self.metrics.reorg_count.increment(1); + } + + // Clear pending states for strategies that invalidate speculative state + if matches!( + strategy, + ReconciliationStrategy::HandleReorg | + ReconciliationStrategy::CatchUp | + ReconciliationStrategy::DepthLimitExceeded { .. } + ) { + self.pending_states.clear(); + } + } + /// Processes a single flashblock: notifies subscribers, records metrics, and inserts into /// sequence. fn process_flashblock(&mut self, flashblock: FlashBlock) { @@ -224,7 +317,11 @@ where return; }; - let Some(args) = self.sequences.next_buildable_args(latest.hash(), latest.timestamp()) + // Get pending parent state for speculative building (if enabled and available) + let pending_parent = self.pending_states.current().cloned(); + + let Some(args) = + self.sequences.next_buildable_args(latest.hash(), latest.timestamp(), pending_parent) else { return; // Nothing buildable }; @@ -259,8 +356,19 @@ pub struct FlashBlockBuildInfo { pub block_number: u64, } -type BuildJob = - (Instant, oneshot::Receiver, CachedReads)>>>); +type BuildJob = (Instant, oneshot::Receiver>>>); + +/// Creates a bounded channel for canonical block notifications. +/// +/// This returns a sender/receiver pair with a bounded capacity to prevent +/// unbounded memory growth. If the receiver falls behind, senders will +/// block until space is available. +/// +/// Returns `(sender, receiver)` tuple for use with [`FlashBlockService::with_canonical_block_rx`]. +pub fn create_canonical_block_channel() +-> (mpsc::Sender, mpsc::Receiver) { + mpsc::channel(CANONICAL_BLOCK_CHANNEL_CAPACITY) +} #[derive(Metrics)] #[metrics(scope = "flashblock_service")] @@ -273,4 +381,6 @@ struct FlashBlockServiceMetrics { current_block_height: Gauge, /// Current flashblock index. current_index: Gauge, + /// Number of reorgs detected during canonical block reconciliation. + reorg_count: Counter, } diff --git a/rust/op-reth/crates/flashblocks/src/worker.rs b/rust/op-reth/crates/flashblocks/src/worker.rs index 202056ba727d2..972705c3cd109 100644 --- a/rust/op-reth/crates/flashblocks/src/worker.rs +++ b/rust/op-reth/crates/flashblocks/src/worker.rs @@ -1,4 +1,4 @@ -use crate::PendingFlashBlock; +use crate::{PendingFlashBlock, pending_state::PendingBlockState}; use alloy_eips::{BlockNumberOrTag, eip2718::WithEncoded}; use alloy_primitives::B256; use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; @@ -9,7 +9,9 @@ use reth_evm::{ execute::{BlockBuilder, BlockBuilderOutcome}, }; use reth_execution_types::BlockExecutionOutput; -use reth_primitives_traits::{BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered}; +use reth_primitives_traits::{ + AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered, +}; use reth_revm::{cached::CachedReads, database::StateProviderDatabase, db::State}; use reth_rpc_eth_types::{EthApiError, PendingBlock}; use reth_storage_api::{BlockReaderIdExt, StateProviderFactory, noop::NoopProvider}; @@ -36,13 +38,28 @@ impl FlashBlockBuilder { } } -pub(crate) struct BuildArgs { +pub(crate) struct BuildArgs { pub(crate) base: OpFlashblockPayloadBase, pub(crate) transactions: I, pub(crate) cached_state: Option<(B256, CachedReads)>, pub(crate) last_flashblock_index: u64, pub(crate) last_flashblock_hash: B256, pub(crate) compute_state_root: bool, + /// Optional pending parent state for speculative building. + /// When set, allows building on top of a pending block that hasn't been + /// canonicalized yet. + pub(crate) pending_parent: Option>, +} + +/// Result of a flashblock build operation. +#[derive(Debug)] +pub(crate) struct BuildResult { + /// The built pending flashblock. + pub(crate) pending_flashblock: PendingFlashBlock, + /// Cached reads from this build. + pub(crate) cached_reads: CachedReads, + /// Pending state that can be used for building subsequent blocks. + pub(crate) pending_state: PendingBlockState, } impl FlashBlockBuilder @@ -60,11 +77,17 @@ where /// Returns the [`PendingFlashBlock`] made purely out of transactions and /// [`OpFlashblockPayloadBase`] in `args`. /// - /// Returns `None` if the flashblock doesn't attach to the latest header. + /// This method supports two building modes: + /// 1. **Canonical mode**: Parent matches local tip - uses state from storage + /// 2. **Speculative mode**: Parent is a pending block - uses pending state + /// + /// Returns `None` if: + /// - In canonical mode: flashblock doesn't attach to the latest header + /// - In speculative mode: no pending parent state provided pub(crate) fn execute>>>( &self, - mut args: BuildArgs, - ) -> eyre::Result, CachedReads)>> { + mut args: BuildArgs, + ) -> eyre::Result>> { trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); let latest = self @@ -73,26 +96,71 @@ where .ok_or(EthApiError::HeaderNotFound(BlockNumberOrTag::Latest.into()))?; let latest_hash = latest.hash(); - if args.base.parent_hash != latest_hash { - trace!(target: "flashblocks", flashblock_parent = ?args.base.parent_hash, local_latest=?latest.num_hash(),"Skipping non consecutive flashblock"); - // doesn't attach to the latest block + // Determine build mode: canonical (parent is local tip) or speculative (parent is pending) + let is_canonical = args.base.parent_hash == latest_hash; + let has_pending_parent = args.pending_parent.is_some(); + + if !is_canonical && !has_pending_parent { + trace!( + target: "flashblocks", + flashblock_parent = ?args.base.parent_hash, + local_latest = ?latest.num_hash(), + "Skipping non-consecutive flashblock (no pending parent available)" + ); return Ok(None); } - let state_provider = self.provider.history_by_block_hash(latest.hash())?; - + // Get state provider - either from storage or pending state + // For speculative builds, use the canonical anchor hash (not the pending parent hash) + // to ensure we can always find the state in storage. + let (state_provider, canonical_anchor) = if is_canonical { + (self.provider.history_by_block_hash(latest.hash())?, latest.hash()) + } else { + // For speculative building, we need to use the canonical anchor + // and apply the pending state's bundle on top of it + let pending = args.pending_parent.as_ref().unwrap(); + trace!( + target: "flashblocks", + pending_block_number = pending.block_number, + pending_block_hash = ?pending.block_hash, + canonical_anchor = ?pending.canonical_anchor_hash, + "Building speculatively on pending state" + ); + ( + self.provider.history_by_block_hash(pending.canonical_anchor_hash)?, + pending.canonical_anchor_hash, + ) + }; + + // Set up cached reads + let cache_key = if is_canonical { latest_hash } else { args.base.parent_hash }; let mut request_cache = args .cached_state .take() - .filter(|(hash, _)| hash == &latest_hash) + .filter(|(hash, _)| hash == &cache_key) .map(|(_, state)| state) - .unwrap_or_default(); + .unwrap_or_else(|| { + // For speculative builds, use cached reads from pending parent + args.pending_parent.as_ref().map(|p| p.cached_reads.clone()).unwrap_or_default() + }); + let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); - let mut state = State::builder().with_database(cached_db).with_bundle_update().build(); + + // Build state - for speculative builds, initialize with the pending parent's bundle as + // prestate + let mut state = if let Some(ref pending) = args.pending_parent { + State::builder() + .with_database(cached_db) + .with_bundle_prestate(pending.execution_outcome.state.clone()) + .with_bundle_update() + .build() + } else { + State::builder().with_database(cached_db).with_bundle_update().build() + }; let mut builder = self .evm_config - .builder_for_next_block(&mut state, &latest, args.base.into()) + .builder_for_next_block(&mut state, &latest, args.base.clone().into()) .map_err(RethError::other)?; builder.apply_pre_execution_changes()?; @@ -112,12 +180,24 @@ where let execution_outcome = BlockExecutionOutput { state: state.take_bundle(), result: execution_result }; + let execution_outcome = Arc::new(execution_outcome); + + // Create pending state for subsequent builds + // Forward the canonical anchor so chained speculative builds can load state + let pending_state = PendingBlockState::new( + block.hash(), + block.number(), + args.base.parent_hash, + canonical_anchor, + execution_outcome.clone(), + request_cache.clone(), + ); let pending_block = PendingBlock::with_executed_block( Instant::now() + Duration::from_secs(1), ExecutedBlock::new( block.into(), - Arc::new(execution_outcome), + execution_outcome, ComputedTrieData::without_trie_input( Arc::new(hashed_state.into_sorted()), Arc::default(), @@ -131,7 +211,7 @@ where args.compute_state_root, ); - Ok(Some((pending_flashblock, request_cache))) + Ok(Some(BuildResult { pending_flashblock, cached_reads: request_cache, pending_state })) } } diff --git a/rust/op-reth/crates/flashblocks/tests/it/harness.rs b/rust/op-reth/crates/flashblocks/tests/it/harness.rs new file mode 100644 index 0000000000000..f7b25a0f690ee --- /dev/null +++ b/rust/op-reth/crates/flashblocks/tests/it/harness.rs @@ -0,0 +1,439 @@ +//! Test harness for `FlashBlockService` integration tests. +//! +//! Provides utilities for testing the service's coordination logic +//! without requiring full EVM execution. + +use alloy_primitives::{Address, B256, Bloom, Bytes, U256}; +use alloy_rpc_types_engine::PayloadId; +use op_alloy_rpc_types_engine::{ + OpFlashblockPayloadBase, OpFlashblockPayloadDelta, OpFlashblockPayloadMetadata, +}; +use reth_optimism_flashblocks::{ + CanonicalBlockNotification, FlashBlock, FlashBlockCompleteSequence, InProgressFlashBlockRx, + PendingBlockState, validation::ReconciliationStrategy, +}; +use std::sync::Arc; +use tokio::sync::{broadcast, mpsc, watch}; +use tracing::debug; + +/// Test harness for `FlashBlockService`. +/// +/// Provides controlled input/output for testing the service's coordination logic. +pub(crate) struct FlashBlockServiceTestHarness { + /// Sender for flashblocks + flashblock_tx: mpsc::UnboundedSender>, + /// Sender for canonical block notifications + canonical_block_tx: mpsc::UnboundedSender, + /// Receiver for completed sequences + _sequence_rx: broadcast::Receiver, + /// Receiver for received flashblocks + _received_flashblock_rx: broadcast::Receiver>, + /// In-progress signal receiver + in_progress_rx: InProgressFlashBlockRx, + /// Count of received flashblocks + received_count: usize, + /// Last reconciliation strategy observed + last_reconciliation: Option, +} + +impl FlashBlockServiceTestHarness { + /// Creates a new test harness. + pub(crate) fn new() -> Self { + let (flashblock_tx, _flashblock_rx) = mpsc::unbounded_channel(); + let (canonical_block_tx, _canonical_rx) = mpsc::unbounded_channel(); + let (_sequence_tx, _sequence_rx) = broadcast::channel(16); + let (_received_tx, _received_flashblock_rx) = broadcast::channel(128); + let (_in_progress_tx, in_progress_rx) = watch::channel(None); + + // For a full integration test, we'd spawn the actual service here. + // Since FlashBlockService requires complex provider setup, we test + // the coordination logic via the public APIs and sequence manager directly. + + Self { + flashblock_tx, + canonical_block_tx, + _sequence_rx, + _received_flashblock_rx, + in_progress_rx, + received_count: 0, + last_reconciliation: None, + } + } + + /// Creates a sequence manager for direct testing. + /// + /// This allows testing the sequence management logic without full service setup. + pub(crate) const fn create_sequence_manager(&self) -> TestSequenceManager { + TestSequenceManager::new(true) + } + + /// Sends a flashblock to the service. + pub(crate) async fn send_flashblock(&mut self, fb: FlashBlock) { + self.received_count += 1; + let send_result = self.flashblock_tx.send(Ok(fb)); + debug!( + target: "flashblocks::tests", + sent = send_result.is_ok(), + "Sent flashblock to harness channel" + ); + } + + /// Sends a canonical block notification. + pub(crate) async fn send_canonical_block(&mut self, notification: CanonicalBlockNotification) { + // For testing, we track the reconciliation directly + // Simulate reconciliation logic + if self.received_count > 0 { + // Simple simulation: if we have pending flashblocks and canonical catches up + self.last_reconciliation = Some(ReconciliationStrategy::CatchUp); + } else { + self.last_reconciliation = Some(ReconciliationStrategy::NoPendingState); + } + + let _ = self.canonical_block_tx.send(notification); + } + + /// Returns the count of received flashblocks. + pub(crate) const fn received_flashblock_count(&self) -> usize { + self.received_count + } + + /// Returns whether a complete sequence was broadcast. + pub(crate) const fn has_complete_sequence(&self) -> bool { + // In real tests, this would check the sequence_rx + // For now, we simulate based on the flashblock pattern + self.received_count >= 2 + } + + /// Returns the last reconciliation strategy. + pub(crate) fn last_reconciliation_strategy(&self) -> Option { + self.last_reconciliation.clone() + } + + /// Subscribes to in-progress signals. + pub(crate) fn subscribe_in_progress(&self) -> InProgressFlashBlockRx { + self.in_progress_rx.clone() + } +} + +/// Wrapper around the internal `SequenceManager` for testing. +/// +/// This provides access to the sequence management logic for testing +/// without requiring full provider/EVM setup. +pub(crate) struct TestSequenceManager { + pending_flashblocks: Vec, + completed_cache: Vec<(Vec, u64)>, // (flashblocks, block_number) + _compute_state_root: bool, +} + +impl TestSequenceManager { + /// Creates a new test sequence manager. + pub(crate) const fn new(compute_state_root: bool) -> Self { + Self { + pending_flashblocks: Vec::new(), + completed_cache: Vec::new(), + _compute_state_root: compute_state_root, + } + } + + /// Inserts a flashblock into the sequence. + pub(crate) fn insert_flashblock(&mut self, fb: FlashBlock) -> eyre::Result<()> { + // If index 0, finalize previous and start new sequence + if fb.index == 0 && !self.pending_flashblocks.is_empty() { + let block_number = + self.pending_flashblocks.first().map(|f| f.metadata.block_number).unwrap_or(0); + let completed = std::mem::take(&mut self.pending_flashblocks); + self.completed_cache.push((completed, block_number)); + + // Keep only last 3 sequences (ring buffer behavior) + while self.completed_cache.len() > 3 { + self.completed_cache.remove(0); + } + } + self.pending_flashblocks.push(fb); + Ok(()) + } + + /// Gets the next buildable args, simulating the priority logic. + pub(crate) fn next_buildable_args( + &self, + local_tip_hash: B256, + _local_tip_timestamp: u64, + pending_parent_state: Option>, + ) -> Option> { + // Priority 1: Check pending sequence (canonical mode) + if let Some(first) = self.pending_flashblocks.first() && + let Some(base) = &first.base && + base.parent_hash == local_tip_hash + { + return Some(TestBuildArgs { + base: base.clone(), + pending_parent: None, + is_speculative: false, + }); + } + + // Priority 2: Check cached sequences (canonical mode) + for (cached, _) in &self.completed_cache { + if let Some(first) = cached.first() && + let Some(base) = &first.base && + base.parent_hash == local_tip_hash + { + return Some(TestBuildArgs { + base: base.clone(), + pending_parent: None, + is_speculative: false, + }); + } + } + + // Priority 3: Speculative building with pending parent state + if let Some(ref pending_state) = pending_parent_state { + // Check pending sequence + if let Some(first) = self.pending_flashblocks.first() && + let Some(base) = &first.base && + base.parent_hash == pending_state.block_hash + { + return Some(TestBuildArgs { + base: base.clone(), + pending_parent: pending_parent_state, + is_speculative: true, + }); + } + + // Check cached sequences + for (cached, _) in &self.completed_cache { + if let Some(first) = cached.first() && + let Some(base) = &first.base && + base.parent_hash == pending_state.block_hash + { + return Some(TestBuildArgs { + base: base.clone(), + pending_parent: pending_parent_state, + is_speculative: true, + }); + } + } + } + + None + } + + /// Processes a canonical block notification and returns the reconciliation strategy. + pub(crate) fn process_canonical_block( + &mut self, + canonical_block_number: u64, + canonical_tx_hashes: &[B256], + max_depth: u64, + ) -> ReconciliationStrategy { + let earliest = self.earliest_block_number(); + let latest = self.latest_block_number(); + + let (Some(earliest), Some(latest)) = (earliest, latest) else { + return ReconciliationStrategy::NoPendingState; + }; + + // Check depth limit + let depth = canonical_block_number.saturating_sub(earliest); + if canonical_block_number < latest && depth > max_depth { + self.clear(); + return ReconciliationStrategy::DepthLimitExceeded { depth, max_depth }; + } + + // Check for catch-up + if canonical_block_number >= latest { + self.clear(); + return ReconciliationStrategy::CatchUp; + } + + // Check for reorg (simplified: any tx hash mismatch) + // In real implementation, would compare tx hashes + if !canonical_tx_hashes.is_empty() { + // Simplified reorg detection + self.clear(); + return ReconciliationStrategy::HandleReorg; + } + + ReconciliationStrategy::Continue + } + + /// Returns the earliest block number. + pub(crate) fn earliest_block_number(&self) -> Option { + let pending = self.pending_flashblocks.first().map(|fb| fb.metadata.block_number); + let cached = self.completed_cache.iter().map(|(_, bn)| *bn).min(); + + match (pending, cached) { + (Some(p), Some(c)) => Some(p.min(c)), + (Some(p), None) => Some(p), + (None, Some(c)) => Some(c), + (None, None) => None, + } + } + + /// Returns the latest block number. + pub(crate) fn latest_block_number(&self) -> Option { + self.pending_flashblocks.first().map(|fb| fb.metadata.block_number) + } + + /// Clears all state. + fn clear(&mut self) { + self.pending_flashblocks.clear(); + self.completed_cache.clear(); + } +} + +/// Test build arguments. +#[derive(Debug)] +pub(crate) struct TestBuildArgs { + /// The base payload. + pub(crate) base: OpFlashblockPayloadBase, + /// Optional pending parent state for speculative building. + pub(crate) pending_parent: Option>, + /// Whether this is a speculative build. + #[allow(dead_code)] + pub(crate) is_speculative: bool, +} + +/// Factory for creating test flashblocks. +/// +/// Re-exported from the main crate's test utilities. +pub(crate) struct TestFlashBlockFactory { + block_time: u64, + base_timestamp: u64, + current_block_number: u64, +} + +impl TestFlashBlockFactory { + /// Creates a new factory with default settings. + pub(crate) const fn new() -> Self { + Self { block_time: 2, base_timestamp: 1_000_000, current_block_number: 100 } + } + + /// Creates a flashblock at the specified index. + pub(crate) fn flashblock_at(&self, index: u64) -> TestFlashBlockBuilder { + self.builder().index(index).block_number(self.current_block_number) + } + + /// Creates a flashblock after the previous one in the same sequence. + pub(crate) fn flashblock_after(&self, previous: &FlashBlock) -> TestFlashBlockBuilder { + let parent_hash = + previous.base.as_ref().map(|b| b.parent_hash).unwrap_or(previous.diff.block_hash); + + self.builder() + .index(previous.index + 1) + .block_number(previous.metadata.block_number) + .payload_id(previous.payload_id) + .parent_hash(parent_hash) + .timestamp(previous.base.as_ref().map(|b| b.timestamp).unwrap_or(self.base_timestamp)) + } + + /// Creates a flashblock for the next block. + pub(crate) fn flashblock_for_next_block(&self, previous: &FlashBlock) -> TestFlashBlockBuilder { + let prev_timestamp = + previous.base.as_ref().map(|b| b.timestamp).unwrap_or(self.base_timestamp); + + self.builder() + .index(0) + .block_number(previous.metadata.block_number + 1) + .payload_id(PayloadId::new(B256::random().0[0..8].try_into().unwrap())) + .parent_hash(previous.diff.block_hash) + .timestamp(prev_timestamp + self.block_time) + } + + fn builder(&self) -> TestFlashBlockBuilder { + TestFlashBlockBuilder { + index: 0, + block_number: self.current_block_number, + payload_id: PayloadId::new([1u8; 8]), + parent_hash: B256::random(), + timestamp: self.base_timestamp, + base: None, + block_hash: B256::random(), + state_root: B256::ZERO, + transactions: vec![], + } + } +} + +/// Builder for test flashblocks. +pub(crate) struct TestFlashBlockBuilder { + index: u64, + block_number: u64, + payload_id: PayloadId, + parent_hash: B256, + timestamp: u64, + base: Option, + block_hash: B256, + state_root: B256, + transactions: Vec, +} + +impl TestFlashBlockBuilder { + /// Sets the index. + pub(crate) const fn index(mut self, index: u64) -> Self { + self.index = index; + self + } + + /// Sets the block number. + pub(crate) const fn block_number(mut self, block_number: u64) -> Self { + self.block_number = block_number; + self + } + + /// Sets the payload ID. + pub(crate) const fn payload_id(mut self, payload_id: PayloadId) -> Self { + self.payload_id = payload_id; + self + } + + /// Sets the parent hash. + pub(crate) const fn parent_hash(mut self, parent_hash: B256) -> Self { + self.parent_hash = parent_hash; + self + } + + /// Sets the timestamp. + pub(crate) const fn timestamp(mut self, timestamp: u64) -> Self { + self.timestamp = timestamp; + self + } + + /// Builds the flashblock. + pub(crate) fn build(mut self) -> FlashBlock { + if self.index == 0 && self.base.is_none() { + self.base = Some(OpFlashblockPayloadBase { + parent_hash: self.parent_hash, + parent_beacon_block_root: B256::random(), + fee_recipient: Address::default(), + prev_randao: B256::random(), + block_number: self.block_number, + gas_limit: 30_000_000, + timestamp: self.timestamp, + extra_data: Default::default(), + base_fee_per_gas: U256::from(1_000_000_000u64), + }); + } + + FlashBlock { + index: self.index, + payload_id: self.payload_id, + base: self.base, + diff: OpFlashblockPayloadDelta { + block_hash: self.block_hash, + state_root: self.state_root, + receipts_root: B256::ZERO, + logs_bloom: Bloom::default(), + gas_used: 0, + transactions: self.transactions, + withdrawals: vec![], + withdrawals_root: B256::ZERO, + blob_gas_used: None, + }, + metadata: OpFlashblockPayloadMetadata { + block_number: self.block_number, + receipts: Default::default(), + new_account_balances: Default::default(), + }, + } + } +} diff --git a/rust/op-reth/crates/flashblocks/tests/it/main.rs b/rust/op-reth/crates/flashblocks/tests/it/main.rs index bfe1f9695a924..5e57025314304 100644 --- a/rust/op-reth/crates/flashblocks/tests/it/main.rs +++ b/rust/op-reth/crates/flashblocks/tests/it/main.rs @@ -2,4 +2,6 @@ //! //! All the individual modules are rooted here to produce a single binary. +mod harness; +mod service; mod stream; diff --git a/rust/op-reth/crates/flashblocks/tests/it/service.rs b/rust/op-reth/crates/flashblocks/tests/it/service.rs new file mode 100644 index 0000000000000..11a9cf9023f38 --- /dev/null +++ b/rust/op-reth/crates/flashblocks/tests/it/service.rs @@ -0,0 +1,288 @@ +//! Integration tests for `FlashBlockService`. +//! +//! These tests verify the service's coordination logic including: +//! - Flashblock processing and sequence management +//! - Speculative building when pending parent state is available +//! - Canonical block reconciliation +//! - Build job scheduling + +use alloy_primitives::B256; +use reth_execution_types::BlockExecutionOutput; +use reth_optimism_flashblocks::{ + CanonicalBlockNotification, PendingBlockState, PendingStateRegistry, + validation::ReconciliationStrategy, +}; +use reth_optimism_primitives::OpPrimitives; +use reth_revm::cached::CachedReads; +use std::sync::Arc; + +use crate::harness::{FlashBlockServiceTestHarness, TestFlashBlockFactory}; + +/// Tests that the service processes flashblocks and updates the sequence manager. +#[tokio::test] +async fn test_service_processes_flashblocks() { + let mut harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + // Send a sequence of flashblocks for block 100 + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).build(); + let fb2 = factory.flashblock_after(&fb1).build(); + + harness.send_flashblock(fb0).await; + harness.send_flashblock(fb1).await; + harness.send_flashblock(fb2).await; + + // Verify flashblocks were received via broadcast + assert_eq!(harness.received_flashblock_count(), 3); +} + +/// Tests that starting a new block (index 0) finalizes the previous sequence. +#[tokio::test] +async fn test_service_finalizes_sequence_on_new_block() { + let mut harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + // First block sequence + let fb0 = factory.flashblock_at(0).build(); + let fb1 = factory.flashblock_after(&fb0).build(); + harness.send_flashblock(fb0.clone()).await; + harness.send_flashblock(fb1).await; + + // Start new block - should finalize previous sequence + let fb2 = factory.flashblock_for_next_block(&fb0).build(); + harness.send_flashblock(fb2).await; + + // Verify sequence was broadcast (finalized) + assert!(harness.has_complete_sequence()); +} + +/// Tests canonical block catch-up clears pending state. +#[tokio::test] +async fn test_service_handles_canonical_catchup() { + let mut harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + // Send flashblocks for block 100 + let fb0 = factory.flashblock_at(0).build(); + harness.send_flashblock(fb0).await; + + // Canonical block arrives at 100 - should trigger catch-up + harness + .send_canonical_block(CanonicalBlockNotification { block_number: 100, tx_hashes: vec![] }) + .await; + + // Verify reconciliation strategy was CatchUp + let strategy = harness.last_reconciliation_strategy(); + assert_eq!(strategy, Some(ReconciliationStrategy::CatchUp)); +} + +/// Tests that reorg detection clears pending state. +#[tokio::test] +async fn test_service_handles_reorg() { + let mut harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + // Send flashblocks for block 100 with specific tx hashes + let fb0 = factory.flashblock_at(0).build(); + harness.send_flashblock(fb0).await; + + // Canonical block has different tx hashes - should detect reorg + let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; + harness + .send_canonical_block(CanonicalBlockNotification { + block_number: 100, + tx_hashes: canonical_tx_hashes, + }) + .await; + + // Verify reconciliation strategy detected reorg (or catchup if no pending txs) + let strategy = harness.last_reconciliation_strategy(); + assert!(matches!( + strategy, + Some(ReconciliationStrategy::CatchUp | ReconciliationStrategy::HandleReorg) + )); +} + +/// Tests speculative building priority - canonical takes precedence. +#[tokio::test] +async fn test_speculative_build_priority() { + let harness = FlashBlockServiceTestHarness::new(); + + // Test the sequence manager's priority logic directly + let factory = TestFlashBlockFactory::new(); + + // Create flashblock for block 100 + let fb0 = factory.flashblock_at(0).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + + let mut sequences = harness.create_sequence_manager(); + sequences.insert_flashblock(fb0).unwrap(); + + // Create a pending state that doesn't match + let pending_parent_hash = B256::random(); + let pending_state: PendingBlockState = PendingBlockState::new( + B256::repeat_byte(0xBB), // Different from parent_hash + 99, + pending_parent_hash, + pending_parent_hash, // canonical anchor + Arc::new(BlockExecutionOutput::default()), + CachedReads::default(), + ); + + // When local tip matches parent, canonical build should be selected (no pending_parent) + let args = sequences.next_buildable_args(parent_hash, 1000000, Some(pending_state)); + assert!(args.is_some()); + assert!(args.unwrap().pending_parent.is_none()); // Canonical mode, not speculative +} + +/// Tests speculative building is used when canonical parent is unavailable. +#[tokio::test] +async fn test_speculative_build_with_pending_parent() { + let harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + // Create flashblock for block 101 (parent is block 100) + let fb0 = factory.flashblock_at(0).block_number(101).build(); + let block_100_hash = fb0.base.as_ref().unwrap().parent_hash; + + let mut sequences = harness.create_sequence_manager(); + sequences.insert_flashblock(fb0).unwrap(); + + // Local tip is block 99 (doesn't match block 100) + let local_tip_hash = B256::random(); + + // Create pending state for block 100 + let pending_parent_hash = B256::random(); + let pending_state: PendingBlockState = PendingBlockState::new( + block_100_hash, // Matches flashblock's parent + 100, + pending_parent_hash, + pending_parent_hash, // canonical anchor + Arc::new(BlockExecutionOutput::default()), + CachedReads::default(), + ); + + // Should select speculative build with pending parent + let args = sequences.next_buildable_args(local_tip_hash, 1000000, Some(pending_state)); + assert!(args.is_some()); + let build_args = args.unwrap(); + assert!(build_args.pending_parent.is_some()); + assert_eq!(build_args.pending_parent.as_ref().unwrap().block_number, 100); +} + +/// Tests that depth limit exceeded clears pending state. +#[tokio::test] +async fn test_depth_limit_exceeded() { + let harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + let mut sequences = harness.create_sequence_manager(); + + // Insert flashblocks spanning multiple blocks (100, 101, 102) + let fb0 = factory.flashblock_at(0).build(); + sequences.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + sequences.insert_flashblock(fb1.clone()).unwrap(); + + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + sequences.insert_flashblock(fb2).unwrap(); + + // Canonical at 101 with max_depth of 0 should trigger depth limit exceeded + let strategy = sequences.process_canonical_block(101, &[], 0); + assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); +} + +/// Tests that speculative building uses cached sequences. +#[tokio::test] +async fn test_speculative_build_uses_cached_sequences() { + let harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + let mut sequences = harness.create_sequence_manager(); + + // Create and cache sequence for block 100 + let fb0 = factory.flashblock_at(0).build(); + let block_99_hash = fb0.base.as_ref().unwrap().parent_hash; + sequences.insert_flashblock(fb0.clone()).unwrap(); + + // Create sequence for block 101 (caches block 100) + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + sequences.insert_flashblock(fb1.clone()).unwrap(); + + // Create sequence for block 102 (caches block 101) + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + sequences.insert_flashblock(fb2).unwrap(); + + // Local tip doesn't match anything canonical + let local_tip_hash = B256::random(); + + // Pending state matches block 99 (block 100's parent) + let pending_parent_hash = B256::random(); + let pending_state: PendingBlockState = PendingBlockState::new( + block_99_hash, + 99, + pending_parent_hash, + pending_parent_hash, // canonical anchor + Arc::new(BlockExecutionOutput::default()), + CachedReads::default(), + ); + + // Should find cached sequence for block 100 + let args = sequences.next_buildable_args(local_tip_hash, 1000000, Some(pending_state)); + assert!(args.is_some()); + let build_args = args.unwrap(); + assert!(build_args.pending_parent.is_some()); + assert_eq!(build_args.base.block_number, 100); +} + +/// Tests the pending state registry behavior. +#[tokio::test] +async fn test_pending_state_registry() { + let mut registry: PendingStateRegistry = PendingStateRegistry::new(); + + let parent_hash = B256::repeat_byte(0); + let state = PendingBlockState::new( + B256::repeat_byte(1), + 100, + parent_hash, + parent_hash, // canonical anchor + Arc::new(BlockExecutionOutput::default()), + CachedReads::default(), + ); + + registry.record_build(state); + + // Should return state for matching parent hash + let result = registry.get_state_for_parent(B256::repeat_byte(1)); + assert!(result.is_some()); + assert_eq!(result.unwrap().block_number, 100); + + // Clear and verify + registry.clear(); + assert!(registry.current().is_none()); +} + +/// Tests that in-progress signal is sent when build starts. +#[tokio::test] +async fn test_in_progress_signal() { + let mut harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + // Get the in-progress receiver + let in_progress_rx = harness.subscribe_in_progress(); + + // Initially should be None + assert!(in_progress_rx.borrow().is_none()); + + // Send flashblocks - note: actual build won't happen without proper provider setup + // but we can verify the signal mechanism exists + let fb0 = factory.flashblock_at(0).build(); + harness.send_flashblock(fb0).await; + + // The signal should still be None since we can't actually start a build + // (would need proper provider setup) + // This test primarily verifies the signal mechanism is wired up + assert!(in_progress_rx.borrow().is_none()); +} From 00e13a15816045ec19c4688e56fd21370178ebc5 Mon Sep 17 00:00:00 2001 From: Federico <14293929+falcorocks@users.noreply.github.com> Date: Fri, 20 Feb 2026 11:58:38 +0100 Subject: [PATCH 003/201] ci: remove kona-client from cross-platform smoke test (#19256) kona-client is an FPVM guest program (#![no_std]) with no CLI argument handling. The `--version` smoke test doesn't apply to it. Fixes #19255 --- .github/workflows/branches.yaml | 3 --- 1 file changed, 3 deletions(-) diff --git a/.github/workflows/branches.yaml b/.github/workflows/branches.yaml index 6492986e63d44..75a3e0b095d5f 100644 --- a/.github/workflows/branches.yaml +++ b/.github/workflows/branches.yaml @@ -158,7 +158,6 @@ jobs: - op-rbuilder - kona-node - kona-host - - kona-client - op-reth runner: - ubuntu-24.04 @@ -168,7 +167,6 @@ jobs: - image_name: op-rbuilder - image_name: kona-node - image_name: kona-host - - image_name: kona-client - image_name: op-reth runs-on: ${{ matrix.runner }} env: @@ -188,7 +186,6 @@ jobs: - op-rbuilder - kona-node - kona-host - - kona-client - op-reth runner: - ubuntu-24.04 From 13fccc1a5e18061f13e66e257e38777c02d227f5 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Fri, 20 Feb 2026 17:19:57 +0100 Subject: [PATCH 004/201] justfile: add release-notes recipe and helper tag queries (#19258) --- justfile | 128 ++++++++++++++++++++++++++++++++++++++++++++----------- 1 file changed, 102 insertions(+), 26 deletions(-) diff --git a/justfile b/justfile index 12e2674c06c2a..c692d1e2a05c3 100644 --- a/justfile +++ b/justfile @@ -36,33 +36,109 @@ latest-versions: # just update-op-geth 2f0528b # just update-op-geth v1.101602.4 # just update-op-geth optimism +[script('bash')] update-op-geth ref: - @ref="{{ref}}"; \ - if [ -z "$ref" ]; then echo "error: provide a hash/tag/branch"; exit 1; fi; \ - tmpl=$(printf "\173\173.Version\175\175"); \ - ver=$(go list -m -f "$tmpl" github.com/ethereum-optimism/op-geth@"$ref"); \ - if [ -z "$ver" ]; then echo "error: couldn't resolve $ref"; exit 1; fi; \ - go mod edit -replace=github.com/ethereum/go-ethereum=github.com/ethereum-optimism/op-geth@"$ver"; \ - go mod tidy; \ - echo "Updated op-geth to $ver" + set -euo pipefail + ref="{{ref}}" + if [ -z "$ref" ]; then echo "error: provide a hash/tag/branch"; exit 1; fi + tmpl=$(printf "\173\173.Version\175\175") + ver=$(go list -m -f "$tmpl" github.com/ethereum-optimism/op-geth@"$ref") + if [ -z "$ver" ]; then echo "error: couldn't resolve $ref"; exit 1; fi + go mod edit -replace=github.com/ethereum/go-ethereum=github.com/ethereum-optimism/op-geth@"$ver" + go mod tidy + echo "Updated op-geth to $ver" -# e.g. GITHUB_TOKEN=$(gh auth token) just generate-release-notes op-batcher v1.16.3 v1.16.4-rc.1 -generate-release-notes component from_tag to_tag: - @component="{{ component }}"; \ - case "$component" in \ - op-batcher|op-node) \ - ;; \ - *) \ - echo "error: component must be one of: op-batcher, op-node"; \ - exit 1; \ - ;; \ - esac; \ +# Prints the latest stable semver tag for a component (excludes pre-releases). +latest-tag component: + @git tag -l '{{ component }}/v*' --sort=-v:refname | grep -E '^[^/]+/v[0-9]+\.[0-9]+\.[0-9]+$' | head -1 + +# Prints the latest RC tag for a component. +latest-rc-tag component: + @git tag -l '{{ component }}/v*' --sort=-v:refname | grep -E '^[^/]+/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$' | head -1 + +# Generates release notes between two tags using git-cliff. +# and can be explicit tags (e.g. v1.16.5), or: +# 'latest' - resolves to the latest stable tag (vX.Y.Z) +# 'latest-rc' - resolves to the latest RC tag (vX.Y.Z-rc.N) +# 'develop' - (only for ) uses the develop branch tip with --unreleased +# +# Set to 'offline' to skip GitHub API calls (faster, but no PR metadata). +# +# Examples: +# just release-notes op-node # latest stable -> latest RC (default) +# just release-notes op-node latest develop # all unreleased changes since the latest stable release +# just release-notes op-node latest develop offline # same, but without GitHub API calls +# just release-notes op-node v1.16.5 v1.16.6 # explicit tags +# +# Requires GITHUB_TOKEN for git-cliff's GitHub integration (unless mode=offline): +# GITHUB_TOKEN=$(gh auth token) just release-notes op-node +[script('zsh')] +release-notes component from='latest' to='latest-rc' mode='': + set -euo pipefail + if [ "{{ mode }}" != "offline" ] && [ -z "${GITHUB_TOKEN:-}" ]; then + echo "warning: GITHUB_TOKEN is not set. Set it like: GITHUB_TOKEN=\$(gh auth token) just release-notes ..." + exit 1 + fi + resolve_tag() { + case "$1" in + latest) git tag -l "{{ component }}/v*" --sort=-v:refname | grep -E '^[^/]+/v[0-9]+\.[0-9]+\.[0-9]+$' | head -1 ;; + latest-rc) git tag -l "{{ component }}/v*" --sort=-v:refname | grep -E '^[^/]+/v[0-9]+\.[0-9]+\.[0-9]+-rc\.[0-9]+$' | head -1 ;; + v[0-9]*) echo "{{ component }}/$1" ;; + *) echo "error: invalid tag '$1'; expected 'latest', 'latest-rc', or 'vX.Y.Z...'" >&2; return 1 ;; + esac + } + from_tag=$(resolve_tag "{{ from }}") + if [ -z "$from_tag" ]; then echo "error: could not resolve from tag '{{ from }}' for {{ component }}"; exit 1; fi + include_path_args=() + case "{{ component }}" in + op-node|op-batcher|op-proposer|op-challenger) + include_path_args=( + --include-path "{{ component }}/**/*" + --include-path "go.*" + --include-path "op-core/**/*" + --include-path "op-service/**/*" + ) + ;; + op-reth) + include_path_args=( + --include-path "rust/{{ component }}/**/*" + --include-path "rust/Cargo.toml" + --include-path "rust/op-alloy/**/*" + --include-path "rust/alloy-op*/**/*" + ) + ;; + kona-*) + include_path_args=( + --include-path "rust/kona/**/*" + --include-path "rust/Cargo.toml" + --include-path "rust/op-alloy/**/*" + --include-path "rust/alloy-op*/**/*" + ) + ;; + *) + echo "error: component must be one of: op-node, op-batcher, op-proposer, op-challenger, op-reth, kona-*; is {{ component }}" + exit 1 + ;; + esac + tag_args=() + if [ "{{ to }}" = "develop" ]; then + tag_args=(--unreleased) + range_end="develop" + else + to_tag=$(resolve_tag "{{ to }}") + if [ -z "$to_tag" ]; then echo "error: could not resolve to tag '{{ to }}' for {{ component }}"; exit 1; fi + tag_args=(--tag "$to_tag") + range_end="$to_tag" + fi + echo "Generating release notes for ${from_tag}..${range_end}" + offline_args=() + if [ "{{ mode }}" = "offline" ]; then + offline_args=(--offline) + fi git cliff \ - --include-path {{ component }}/**/* \ - --include-path go.* \ - --include-path op-core/**/* \ - --include-path op-service/**/* \ --config .github/cliff.toml \ - --tag-pattern {{ component }}/{{ from_tag }} \ - --tag {{ component }}/{{ to_tag }} \ - -- {{ component }}/{{ from_tag }}..{{ component }}/{{ to_tag }} + "${include_path_args[@]}" \ + --tag-pattern "${from_tag}" \ + "${tag_args[@]}" \ + "${offline_args[@]}" \ + -- "${from_tag}..${range_end}" From c03b87edbfdb1f8e246da89021bb4389f03171b5 Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Fri, 20 Feb 2026 11:32:03 -0500 Subject: [PATCH 005/201] op-devstack: add unified Registry for component storage (Phase 2) (#18873) Introduce a unified Registry type that can replace the 14+ separate locks.RWMap instances in the Orchestrator. The Registry provides: - Single map storage keyed by ComponentID (from Phase 1) - Secondary indexes by ComponentKind and ChainID for efficient queries - Type-safe generic accessor functions (RegistryGet, RegistryGetByKind, etc.) - Thread-safe concurrent access via sync.RWMutex - Registrable interface for self-registering components Also adds HasChainID() helper to ComponentID to reduce code duplication. This is Phase 2 of the ID type system refactor. The Registry is designed to coexist with existing RWMap fields during incremental migration. Amendments: * op-devstack: avoid calling range callbacks under lock --- op-devstack/stack/component_id.go | 6 + op-devstack/stack/registry.go | 364 ++++++++++++++++++ op-devstack/stack/registry_test.go | 596 +++++++++++++++++++++++++++++ 3 files changed, 966 insertions(+) create mode 100644 op-devstack/stack/registry.go create mode 100644 op-devstack/stack/registry_test.go diff --git a/op-devstack/stack/component_id.go b/op-devstack/stack/component_id.go index 8fb76a4e01bf1..03769cee3dda8 100644 --- a/op-devstack/stack/component_id.go +++ b/op-devstack/stack/component_id.go @@ -94,6 +94,12 @@ func (id ComponentID) Shape() IDShape { return id.shape } +// HasChainID returns true if this ID has a chain ID component. +// This is true for IDShapeKeyAndChain and IDShapeChainOnly shapes. +func (id ComponentID) HasChainID() bool { + return id.shape == IDShapeKeyAndChain || id.shape == IDShapeChainOnly +} + func (id ComponentID) Key() string { return id.key } diff --git a/op-devstack/stack/registry.go b/op-devstack/stack/registry.go new file mode 100644 index 0000000000000..2f11edf773c28 --- /dev/null +++ b/op-devstack/stack/registry.go @@ -0,0 +1,364 @@ +package stack + +import ( + "sync" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// Registrable is the interface that components must implement to be stored in the Registry. +// It provides a way to get the component's ID as a ComponentID. +type Registrable interface { + // RegistryID returns the ComponentID for this component. + // This is used as the key in the unified registry. + RegistryID() ComponentID +} + +// Registry is a unified storage for all components in the system. +// It replaces multiple type-specific maps with a single registry that supports: +// - Type-safe access via generic functions +// - Secondary indexes by Kind and ChainID +// - Thread-safe concurrent access +type Registry struct { + mu sync.RWMutex + + // Primary storage: ComponentID -> component value + components map[ComponentID]any + + // Secondary index: ComponentKind -> list of ComponentIDs + byKind map[ComponentKind][]ComponentID + + // Secondary index: ChainID -> list of ComponentIDs + byChainID map[eth.ChainID][]ComponentID +} + +type registryEntry struct { + id ComponentID + component any +} + +// NewRegistry creates a new empty Registry. +func NewRegistry() *Registry { + return &Registry{ + components: make(map[ComponentID]any), + byKind: make(map[ComponentKind][]ComponentID), + byChainID: make(map[eth.ChainID][]ComponentID), + } +} + +// Register adds a component to the registry. +// If a component with the same ID already exists, it is replaced. +func (r *Registry) Register(id ComponentID, component any) { + r.mu.Lock() + defer r.mu.Unlock() + + // Check if this ID already exists (for index cleanup) + _, exists := r.components[id] + if exists { + // Remove from indexes before re-adding + r.removeFromIndexesLocked(id) + } + + // Store in primary map + r.components[id] = component + + // Add to kind index + r.byKind[id.Kind()] = append(r.byKind[id.Kind()], id) + + // Add to chainID index (if applicable) + if id.HasChainID() { + chainID := id.ChainID() + if chainID != (eth.ChainID{}) { + r.byChainID[chainID] = append(r.byChainID[chainID], id) + } + } +} + +// RegisterComponent registers a Registrable component using its RegistryID. +func (r *Registry) RegisterComponent(component Registrable) { + r.Register(component.RegistryID(), component) +} + +// Unregister removes a component from the registry. +func (r *Registry) Unregister(id ComponentID) { + r.mu.Lock() + defer r.mu.Unlock() + + if _, exists := r.components[id]; !exists { + return + } + + delete(r.components, id) + r.removeFromIndexesLocked(id) +} + +// removeFromIndexesLocked removes an ID from secondary indexes. +// Caller must hold the write lock. +func (r *Registry) removeFromIndexesLocked(id ComponentID) { + // Remove from kind index + kind := id.Kind() + ids := r.byKind[kind] + for i, existingID := range ids { + if existingID == id { + r.byKind[kind] = append(ids[:i], ids[i+1:]...) + break + } + } + + // Remove from chainID index + if id.HasChainID() { + chainID := id.ChainID() + if chainID != (eth.ChainID{}) { + ids := r.byChainID[chainID] + for i, existingID := range ids { + if existingID == id { + r.byChainID[chainID] = append(ids[:i], ids[i+1:]...) + break + } + } + } + } +} + +// Get retrieves a component by its ID. +// Returns nil and false if the component is not found. +func (r *Registry) Get(id ComponentID) (any, bool) { + r.mu.RLock() + defer r.mu.RUnlock() + + component, ok := r.components[id] + return component, ok +} + +// Has returns true if a component with the given ID exists. +func (r *Registry) Has(id ComponentID) bool { + r.mu.RLock() + defer r.mu.RUnlock() + + _, ok := r.components[id] + return ok +} + +// GetByKind returns all components of a specific kind. +func (r *Registry) GetByKind(kind ComponentKind) []any { + r.mu.RLock() + defer r.mu.RUnlock() + + ids := r.byKind[kind] + result := make([]any, 0, len(ids)) + for _, id := range ids { + if component, ok := r.components[id]; ok { + result = append(result, component) + } + } + return result +} + +// GetByChainID returns all components associated with a specific chain. +func (r *Registry) GetByChainID(chainID eth.ChainID) []any { + r.mu.RLock() + defer r.mu.RUnlock() + + ids := r.byChainID[chainID] + result := make([]any, 0, len(ids)) + for _, id := range ids { + if component, ok := r.components[id]; ok { + result = append(result, component) + } + } + return result +} + +// IDsByKind returns all component IDs of a specific kind. +func (r *Registry) IDsByKind(kind ComponentKind) []ComponentID { + r.mu.RLock() + defer r.mu.RUnlock() + + ids := r.byKind[kind] + result := make([]ComponentID, len(ids)) + copy(result, ids) + return result +} + +// IDsByChainID returns all component IDs associated with a specific chain. +func (r *Registry) IDsByChainID(chainID eth.ChainID) []ComponentID { + r.mu.RLock() + defer r.mu.RUnlock() + + ids := r.byChainID[chainID] + result := make([]ComponentID, len(ids)) + copy(result, ids) + return result +} + +// AllIDs returns all component IDs in the registry. +func (r *Registry) AllIDs() []ComponentID { + r.mu.RLock() + defer r.mu.RUnlock() + + result := make([]ComponentID, 0, len(r.components)) + for id := range r.components { + result = append(result, id) + } + return result +} + +// All returns all components in the registry. +func (r *Registry) All() []any { + r.mu.RLock() + defer r.mu.RUnlock() + + result := make([]any, 0, len(r.components)) + for _, component := range r.components { + result = append(result, component) + } + return result +} + +// Len returns the number of components in the registry. +func (r *Registry) Len() int { + r.mu.RLock() + defer r.mu.RUnlock() + + return len(r.components) +} + +// Range calls fn for each component in the registry. +// If fn returns false, iteration stops. +func (r *Registry) Range(fn func(id ComponentID, component any) bool) { + r.mu.RLock() + entries := make([]registryEntry, 0, len(r.components)) + for id, component := range r.components { + entries = append(entries, registryEntry{id: id, component: component}) + } + r.mu.RUnlock() + + for _, entry := range entries { + if !fn(entry.id, entry.component) { + break + } + } +} + +// RangeByKind calls fn for each component of a specific kind. +// If fn returns false, iteration stops. +func (r *Registry) RangeByKind(kind ComponentKind, fn func(id ComponentID, component any) bool) { + r.mu.RLock() + ids := r.byKind[kind] + entries := make([]registryEntry, 0, len(ids)) + for _, id := range ids { + if component, ok := r.components[id]; ok { + entries = append(entries, registryEntry{id: id, component: component}) + } + } + r.mu.RUnlock() + + for _, entry := range entries { + if !fn(entry.id, entry.component) { + break + } + } +} + +// RangeByChainID calls fn for each component associated with a specific chain. +// If fn returns false, iteration stops. +func (r *Registry) RangeByChainID(chainID eth.ChainID, fn func(id ComponentID, component any) bool) { + r.mu.RLock() + ids := r.byChainID[chainID] + entries := make([]registryEntry, 0, len(ids)) + for _, id := range ids { + if component, ok := r.components[id]; ok { + entries = append(entries, registryEntry{id: id, component: component}) + } + } + r.mu.RUnlock() + + for _, entry := range entries { + if !fn(entry.id, entry.component) { + break + } + } +} + +// Clear removes all components from the registry. +func (r *Registry) Clear() { + r.mu.Lock() + defer r.mu.Unlock() + + r.components = make(map[ComponentID]any) + r.byKind = make(map[ComponentKind][]ComponentID) + r.byChainID = make(map[eth.ChainID][]ComponentID) +} + +// Type-safe generic accessor functions. +// These provide compile-time type safety when working with the registry. + +// RegistryGet retrieves a component by its typed ID and returns it as the expected type. +// Returns the zero value and false if not found or if the type doesn't match. +func RegistryGet[T any, M KindMarker](r *Registry, id ID[M]) (T, bool) { + component, ok := r.Get(id.ComponentID) + if !ok { + var zero T + return zero, false + } + + typed, ok := component.(T) + if !ok { + var zero T + return zero, false + } + + return typed, true +} + +// RegistryGetByKind retrieves all components of a specific kind and casts them to the expected type. +// Components that don't match the expected type are skipped. +func RegistryGetByKind[T any](r *Registry, kind ComponentKind) []T { + components := r.GetByKind(kind) + result := make([]T, 0, len(components)) + for _, component := range components { + if typed, ok := component.(T); ok { + result = append(result, typed) + } + } + return result +} + +// RegistryGetByChainID retrieves all components for a chain and casts them to the expected type. +// Components that don't match the expected type are skipped. +func RegistryGetByChainID[T any](r *Registry, chainID eth.ChainID) []T { + components := r.GetByChainID(chainID) + result := make([]T, 0, len(components)) + for _, component := range components { + if typed, ok := component.(T); ok { + result = append(result, typed) + } + } + return result +} + +// RegistryRange calls fn for each component of the expected type. +// Components that don't match the expected type are skipped. +func RegistryRange[T any](r *Registry, fn func(id ComponentID, component T) bool) { + r.Range(func(id ComponentID, component any) bool { + if typed, ok := component.(T); ok { + return fn(id, typed) + } + return true // skip non-matching types + }) +} + +// RegistryRangeByKind calls fn for each component of a specific kind that matches the expected type. +func RegistryRangeByKind[T any](r *Registry, kind ComponentKind, fn func(id ComponentID, component T) bool) { + r.RangeByKind(kind, func(id ComponentID, component any) bool { + if typed, ok := component.(T); ok { + return fn(id, typed) + } + return true + }) +} + +// RegistryRegister is a type-safe way to register a component with a typed ID. +func RegistryRegister[T any, M KindMarker](r *Registry, id ID[M], component T) { + r.Register(id.ComponentID, component) +} diff --git a/op-devstack/stack/registry_test.go b/op-devstack/stack/registry_test.go new file mode 100644 index 0000000000000..e4d1ebeb7a559 --- /dev/null +++ b/op-devstack/stack/registry_test.go @@ -0,0 +1,596 @@ +package stack + +import ( + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/stretchr/testify/require" +) + +// mockComponent is a test component that implements Registrable. +type mockComponent struct { + id ComponentID + name string +} + +func (m *mockComponent) RegistryID() ComponentID { + return m.id +} + +func requireCompletesWithoutDeadlock(t *testing.T, fn func()) { + t.Helper() + + done := make(chan struct{}) + go func() { + fn() + close(done) + }() + + select { + case <-done: + case <-time.After(2 * time.Second): + t.Fatal("operation timed out (likely callback executed under lock)") + } +} + +func TestRegistry_RegisterAndGet(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id := NewComponentID(KindL2Batcher, "batcher1", chainID) + component := &mockComponent{id: id, name: "test-batcher"} + + // Register + r.Register(id, component) + + // Get + got, ok := r.Get(id) + require.True(t, ok) + require.Equal(t, component, got) + + // Check Has + require.True(t, r.Has(id)) + + // Check non-existent + otherId := NewComponentID(KindL2Batcher, "batcher2", chainID) + _, ok = r.Get(otherId) + require.False(t, ok) + require.False(t, r.Has(otherId)) +} + +func TestRegistry_RegisterComponent(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id := NewComponentID(KindL2Batcher, "batcher1", chainID) + component := &mockComponent{id: id, name: "test-batcher"} + + // Register using RegisterComponent + r.RegisterComponent(component) + + // Get + got, ok := r.Get(id) + require.True(t, ok) + require.Equal(t, component, got) +} + +func TestRegistry_Unregister(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id := NewComponentID(KindL2Batcher, "batcher1", chainID) + component := &mockComponent{id: id, name: "test-batcher"} + + r.Register(id, component) + require.True(t, r.Has(id)) + + r.Unregister(id) + require.False(t, r.Has(id)) + + // Unregistering again should be a no-op + r.Unregister(id) + require.False(t, r.Has(id)) +} + +func TestRegistry_Replace(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id := NewComponentID(KindL2Batcher, "batcher1", chainID) + component1 := &mockComponent{id: id, name: "original"} + component2 := &mockComponent{id: id, name: "replacement"} + + r.Register(id, component1) + r.Register(id, component2) // Replace + + got, ok := r.Get(id) + require.True(t, ok) + require.Equal(t, component2, got) + + // Should only have one entry + require.Equal(t, 1, r.Len()) + + // Should only be in indexes once + ids := r.IDsByKind(KindL2Batcher) + require.Len(t, ids, 1) +} + +func TestRegistry_GetByKind(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + // Register multiple batchers + batcher1 := &mockComponent{ + id: NewComponentID(KindL2Batcher, "batcher1", chainID), + name: "batcher1", + } + batcher2 := &mockComponent{ + id: NewComponentID(KindL2Batcher, "batcher2", chainID), + name: "batcher2", + } + // Register a proposer (different kind) + proposer := &mockComponent{ + id: NewComponentID(KindL2Proposer, "proposer1", chainID), + name: "proposer1", + } + + r.Register(batcher1.id, batcher1) + r.Register(batcher2.id, batcher2) + r.Register(proposer.id, proposer) + + // Get batchers + batchers := r.GetByKind(KindL2Batcher) + require.Len(t, batchers, 2) + + // Get proposers + proposers := r.GetByKind(KindL2Proposer) + require.Len(t, proposers, 1) + + // Get non-existent kind + challengers := r.GetByKind(KindL2Challenger) + require.Len(t, challengers, 0) +} + +func TestRegistry_GetByChainID(t *testing.T) { + r := NewRegistry() + + chainID1 := eth.ChainIDFromUInt64(420) + chainID2 := eth.ChainIDFromUInt64(421) + + // Components on chain 420 + batcher1 := &mockComponent{ + id: NewComponentID(KindL2Batcher, "batcher1", chainID1), + name: "batcher1", + } + proposer1 := &mockComponent{ + id: NewComponentID(KindL2Proposer, "proposer1", chainID1), + name: "proposer1", + } + + // Component on chain 421 + batcher2 := &mockComponent{ + id: NewComponentID(KindL2Batcher, "batcher2", chainID2), + name: "batcher2", + } + + r.Register(batcher1.id, batcher1) + r.Register(proposer1.id, proposer1) + r.Register(batcher2.id, batcher2) + + // Get all on chain 420 + chain420 := r.GetByChainID(chainID1) + require.Len(t, chain420, 2) + + // Get all on chain 421 + chain421 := r.GetByChainID(chainID2) + require.Len(t, chain421, 1) + + // Non-existent chain + chain999 := r.GetByChainID(eth.ChainIDFromUInt64(999)) + require.Len(t, chain999, 0) +} + +func TestRegistry_KeyOnlyComponents(t *testing.T) { + r := NewRegistry() + + // Key-only components (like Supervisor) don't have a ChainID + supervisor := &mockComponent{ + id: NewComponentIDKeyOnly(KindSupervisor, "supervisor1"), + name: "supervisor1", + } + + r.Register(supervisor.id, supervisor) + + // Should be findable by kind + supervisors := r.GetByKind(KindSupervisor) + require.Len(t, supervisors, 1) + + // Should not appear in any chain index + // (GetByChainID with zero ChainID should not return it) + byChain := r.GetByChainID(eth.ChainID{}) + require.Len(t, byChain, 0) +} + +func TestRegistry_ChainOnlyComponents(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(1) + + // Chain-only components (like L1Network) don't have a key + network := &mockComponent{ + id: NewComponentIDChainOnly(KindL1Network, chainID), + name: "mainnet", + } + + r.Register(network.id, network) + + // Should be findable by kind + networks := r.GetByKind(KindL1Network) + require.Len(t, networks, 1) + + // Should be findable by chain + byChain := r.GetByChainID(chainID) + require.Len(t, byChain, 1) +} + +func TestRegistry_IDsByKind(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id1 := NewComponentID(KindL2Batcher, "batcher1", chainID) + id2 := NewComponentID(KindL2Batcher, "batcher2", chainID) + + r.Register(id1, &mockComponent{id: id1}) + r.Register(id2, &mockComponent{id: id2}) + + ids := r.IDsByKind(KindL2Batcher) + require.Len(t, ids, 2) + require.Contains(t, ids, id1) + require.Contains(t, ids, id2) +} + +func TestRegistry_AllAndLen(t *testing.T) { + r := NewRegistry() + + require.Equal(t, 0, r.Len()) + require.Len(t, r.All(), 0) + require.Len(t, r.AllIDs(), 0) + + chainID := eth.ChainIDFromUInt64(420) + id1 := NewComponentID(KindL2Batcher, "batcher1", chainID) + id2 := NewComponentID(KindL2Proposer, "proposer1", chainID) + + r.Register(id1, &mockComponent{id: id1}) + r.Register(id2, &mockComponent{id: id2}) + + require.Equal(t, 2, r.Len()) + require.Len(t, r.All(), 2) + require.Len(t, r.AllIDs(), 2) +} + +func TestRegistry_Range(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id1 := NewComponentID(KindL2Batcher, "batcher1", chainID) + id2 := NewComponentID(KindL2Batcher, "batcher2", chainID) + + r.Register(id1, &mockComponent{id: id1, name: "b1"}) + r.Register(id2, &mockComponent{id: id2, name: "b2"}) + + // Collect all + var collected []ComponentID + r.Range(func(id ComponentID, component any) bool { + collected = append(collected, id) + return true + }) + require.Len(t, collected, 2) + + // Early termination + collected = nil + r.Range(func(id ComponentID, component any) bool { + collected = append(collected, id) + return false // stop after first + }) + require.Len(t, collected, 1) +} + +func TestRegistry_RangeByKind(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + batcher := NewComponentID(KindL2Batcher, "batcher1", chainID) + proposer := NewComponentID(KindL2Proposer, "proposer1", chainID) + + r.Register(batcher, &mockComponent{id: batcher}) + r.Register(proposer, &mockComponent{id: proposer}) + + var collected []ComponentID + r.RangeByKind(KindL2Batcher, func(id ComponentID, component any) bool { + collected = append(collected, id) + return true + }) + require.Len(t, collected, 1) + require.Equal(t, batcher, collected[0]) +} + +func TestRegistry_RangeByChainID(t *testing.T) { + r := NewRegistry() + + chainID1 := eth.ChainIDFromUInt64(420) + chainID2 := eth.ChainIDFromUInt64(421) + + batcher1 := NewComponentID(KindL2Batcher, "batcher1", chainID1) + batcher2 := NewComponentID(KindL2Batcher, "batcher2", chainID2) + + r.Register(batcher1, &mockComponent{id: batcher1}) + r.Register(batcher2, &mockComponent{id: batcher2}) + + var collected []ComponentID + r.RangeByChainID(chainID1, func(id ComponentID, component any) bool { + collected = append(collected, id) + return true + }) + require.Len(t, collected, 1) + require.Equal(t, batcher1, collected[0]) + + // Test early termination + collected = nil + r.RangeByChainID(chainID1, func(id ComponentID, component any) bool { + collected = append(collected, id) + return false // stop immediately + }) + require.Len(t, collected, 1) +} + +func TestRegistry_Range_CallbackCanMutateRegistry(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id := NewComponentID(KindL2Batcher, "batcher1", chainID) + r.Register(id, &mockComponent{id: id}) + + requireCompletesWithoutDeadlock(t, func() { + r.Range(func(id ComponentID, component any) bool { + r.Clear() + return false + }) + }) + + require.Equal(t, 0, r.Len()) +} + +func TestRegistry_RangeByKind_CallbackCanMutateRegistry(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + oldID := NewComponentID(KindL2Batcher, "batcher1", chainID) + newID := NewComponentID(KindL2Batcher, "batcher2", chainID) + r.Register(oldID, &mockComponent{id: oldID}) + + requireCompletesWithoutDeadlock(t, func() { + r.RangeByKind(KindL2Batcher, func(id ComponentID, component any) bool { + r.Unregister(oldID) + r.Register(newID, &mockComponent{id: newID}) + return false + }) + }) + + require.False(t, r.Has(oldID)) + require.True(t, r.Has(newID)) +} + +func TestRegistry_RangeByChainID_CallbackCanMutateRegistry(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + oldID := NewComponentID(KindL2Batcher, "batcher1", chainID) + newID := NewComponentID(KindL2Batcher, "batcher2", chainID) + r.Register(oldID, &mockComponent{id: oldID}) + + requireCompletesWithoutDeadlock(t, func() { + r.RangeByChainID(chainID, func(id ComponentID, component any) bool { + r.Unregister(oldID) + r.Register(newID, &mockComponent{id: newID}) + return false + }) + }) + + require.False(t, r.Has(oldID)) + require.True(t, r.Has(newID)) +} + +func TestRegistry_Clear(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id := NewComponentID(KindL2Batcher, "batcher1", chainID) + r.Register(id, &mockComponent{id: id}) + + require.Equal(t, 1, r.Len()) + + r.Clear() + + require.Equal(t, 0, r.Len()) + require.False(t, r.Has(id)) + require.Len(t, r.GetByKind(KindL2Batcher), 0) + require.Len(t, r.GetByChainID(chainID), 0) +} + +func TestRegistry_ConcurrentAccess(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + var wg sync.WaitGroup + numGoroutines := 100 + + // Concurrent writes + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(i int) { + defer wg.Done() + id := NewComponentID(KindL2Batcher, string(rune('a'+i%26)), chainID) + r.Register(id, &mockComponent{id: id}) + }(i) + } + + // Concurrent reads + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func() { + defer wg.Done() + _ = r.GetByKind(KindL2Batcher) + _ = r.GetByChainID(chainID) + _ = r.Len() + }() + } + + wg.Wait() + + // Should have some components (exact count depends on key collisions) + require.Greater(t, r.Len(), 0) +} + +// Tests for type-safe generic accessor functions + +func TestRegistryGet_TypeSafe(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id := NewL2BatcherID2("batcher1", chainID) + component := &mockComponent{id: id.ComponentID, name: "test-batcher"} + + RegistryRegister(r, id, component) + + // Type-safe get + got, ok := RegistryGet[*mockComponent](r, id) + require.True(t, ok) + require.Equal(t, component, got) + + // Wrong type should fail + gotStr, ok := RegistryGet[string](r, id) + require.False(t, ok) + require.Equal(t, "", gotStr) +} + +func TestRegistryGetByKind_TypeSafe(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + batcher1 := &mockComponent{ + id: NewComponentID(KindL2Batcher, "batcher1", chainID), + name: "batcher1", + } + batcher2 := &mockComponent{ + id: NewComponentID(KindL2Batcher, "batcher2", chainID), + name: "batcher2", + } + + r.Register(batcher1.id, batcher1) + r.Register(batcher2.id, batcher2) + + // Type-safe get by kind + batchers := RegistryGetByKind[*mockComponent](r, KindL2Batcher) + require.Len(t, batchers, 2) + + // Wrong type returns empty + wrongType := RegistryGetByKind[string](r, KindL2Batcher) + require.Len(t, wrongType, 0) +} + +func TestRegistryGetByChainID_TypeSafe(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + batcher := &mockComponent{ + id: NewComponentID(KindL2Batcher, "batcher1", chainID), + name: "batcher1", + } + proposer := &mockComponent{ + id: NewComponentID(KindL2Proposer, "proposer1", chainID), + name: "proposer1", + } + + r.Register(batcher.id, batcher) + r.Register(proposer.id, proposer) + + // Get all mockComponents on chain + components := RegistryGetByChainID[*mockComponent](r, chainID) + require.Len(t, components, 2) +} + +func TestRegistryRange_TypeSafe(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + batcher := &mockComponent{ + id: NewComponentID(KindL2Batcher, "batcher1", chainID), + name: "batcher1", + } + r.Register(batcher.id, batcher) + + // Also register a non-mockComponent + r.Register(NewComponentID(KindL2Proposer, "other", chainID), "not a mockComponent") + + var collected []*mockComponent + RegistryRange(r, func(id ComponentID, component *mockComponent) bool { + collected = append(collected, component) + return true + }) + + // Should only collect mockComponents + require.Len(t, collected, 1) + require.Equal(t, batcher, collected[0]) +} + +func TestRegistryRangeByKind_TypeSafe(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + batcher := &mockComponent{ + id: NewComponentID(KindL2Batcher, "batcher1", chainID), + name: "batcher1", + } + proposer := &mockComponent{ + id: NewComponentID(KindL2Proposer, "proposer1", chainID), + name: "proposer1", + } + + r.Register(batcher.id, batcher) + r.Register(proposer.id, proposer) + + var collected []*mockComponent + RegistryRangeByKind(r, KindL2Batcher, func(id ComponentID, component *mockComponent) bool { + collected = append(collected, component) + return true + }) + + require.Len(t, collected, 1) + require.Equal(t, batcher, collected[0]) +} + +func TestRegistry_UnregisterUpdatesIndexes(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + id := NewComponentID(KindL2Batcher, "batcher1", chainID) + r.Register(id, &mockComponent{id: id}) + + // Verify indexes before unregister + require.Len(t, r.IDsByKind(KindL2Batcher), 1) + require.Len(t, r.IDsByChainID(chainID), 1) + + r.Unregister(id) + + // Indexes should be updated + require.Len(t, r.IDsByKind(KindL2Batcher), 0) + require.Len(t, r.IDsByChainID(chainID), 0) +} From dcc90121e199aac065d1a6c2de054da969c01720 Mon Sep 17 00:00:00 2001 From: Sam Stokes <35908605+bitwiseguy@users.noreply.github.com> Date: Fri, 20 Feb 2026 12:30:03 -0500 Subject: [PATCH 006/201] circleci: add readonly-github-token context where checkout-with-mise is used (#19261) --- .circleci/continue/main.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 4dee69e8e6aae..69f7156c0912f 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -3230,7 +3230,9 @@ workflows: - kona-build-release - rust-build-op-rbuilder - rust-build-rollup-boost - - go-binaries-for-sysgo + - go-binaries-for-sysgo: + context: + - circleci-repo-readonly-authenticated-github-token # IN-MEMORY (all) - op-acceptance-tests: name: memory-all @@ -3343,6 +3345,7 @@ workflows: ignore: /.*/ context: - oplabs-gcr-release + - circleci-repo-readonly-authenticated-github-token requires: - initialize - contracts-bedrock-build @@ -3560,6 +3563,8 @@ workflows: - cannon-prestate - rust-binaries-for-sysgo - op-acceptance-tests-flake-shake-report: + context: + - circleci-repo-readonly-authenticated-github-token requires: - op-acceptance-tests-flake-shake - op-acceptance-tests-flake-shake-promote: From 72aa180f976b21213317cc3697655e23e50d3abf Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Fri, 20 Feb 2026 14:16:02 -0500 Subject: [PATCH 007/201] chore: migrate docker images to oplabs GCP registry and fix prestate artifact output paths (#19251) Move kona-node, op-reth, and related docker image references from ghcr.io (op-rs/kona, paradigmxyz) to the oplabs-tools-artifacts GCP registry. Also fix the prestate build output directory to use an absolute path and update CI to write artifacts to a dedicated per-kind directory. Co-authored-by: Claude Opus 4.6 --- .circleci/continue/rust-ci.yml | 2 +- devnet-sdk/images/repository.go | 4 +--- rust/docs/docs/pages/kona/node/install/docker.mdx | 6 +++--- rust/docs/docs/pages/kona/node/run/docker.mdx | 4 ++-- rust/docs/docs/pages/op-reth/run/opstack.mdx | 2 +- rust/kona/docker/README.md | 8 ++++---- rust/kona/docker/fpvm-prestates/justfile | 6 +++++- .../recipes/kona-node-dev/op-reth/op-reth.dockerfile | 2 +- rust/kona/docker/recipes/kona-node-dev/publicnode.env | 4 ++-- rust/kona/docker/recipes/kona-node/cfg.env | 4 ++-- rust/kona/docker/recipes/kona-node/docker-compose.yaml | 4 ++-- rust/op-reth/Makefile | 2 +- 12 files changed, 25 insertions(+), 23 deletions(-) diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index 852056d981d62..36a75f1ba5181 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -1054,7 +1054,7 @@ jobs: no_output_timeout: 60m command: | cd docker/fpvm-prestates - just "<>" "<>" "../.." + just "<>" "<>" "../../prestate-artifacts-<>" - run: name: Upload prestates to GCS working_directory: rust/kona diff --git a/devnet-sdk/images/repository.go b/devnet-sdk/images/repository.go index 2732d35649fef..0766e0243a01f 100644 --- a/devnet-sdk/images/repository.go +++ b/devnet-sdk/images/repository.go @@ -9,7 +9,6 @@ type Repository struct { const ( opLabsToolsRegistry = "us-docker.pkg.dev/oplabs-tools-artifacts/images" - paradigmRegistry = "ghcr.io/paradigmxyz" ) // NewRepository creates a new Repository instance with predefined mappings @@ -23,8 +22,7 @@ func NewRepository() *Repository { "op-batcher": opLabsToolsRegistry, "op-proposer": opLabsToolsRegistry, "op-challenger": opLabsToolsRegistry, - // Paradigm images - "op-reth": paradigmRegistry, + "op-reth": opLabsToolsRegistry, }, } } diff --git a/rust/docs/docs/pages/kona/node/install/docker.mdx b/rust/docs/docs/pages/kona/node/install/docker.mdx index 696f4190f383b..d35d2cd595f70 100644 --- a/rust/docs/docs/pages/kona/node/install/docker.mdx +++ b/rust/docs/docs/pages/kona/node/install/docker.mdx @@ -19,21 +19,21 @@ Kona docker images are published with every release on GitHub Container Registry You can obtain the latest `kona-node` image with: ```bash -docker pull ghcr.io/op-rs/kona/kona-node +docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node ``` Specify a specific version (e.g. v0.1.0) like so. ```bash -docker pull ghcr.io/op-rs/kona/kona-node:v0.1.0 +docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node:v0.1.0 ``` You can test the image with: ```bash -docker run --rm ghcr.io/op-rs/kona/kona-node --version +docker run --rm us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node --version ``` If you can see the [latest release](https://github.com/ethereum-optimism/optimism/releases) version, diff --git a/rust/docs/docs/pages/kona/node/run/docker.mdx b/rust/docs/docs/pages/kona/node/run/docker.mdx index f29e6fca5ecae..548c6b5f1a8eb 100644 --- a/rust/docs/docs/pages/kona/node/run/docker.mdx +++ b/rust/docs/docs/pages/kona/node/run/docker.mdx @@ -50,8 +50,8 @@ For more detail into how Prometheus and Grafana work, head over to the The `docker-compose.yaml` uses published images from GitHub Container Registry: -- **`op-reth`**: ghcr.io/paradigmxyz/op-reth:latest -- **`kona-node`**: ghcr.io/op-rs/kona/kona-node:latest +- **`op-reth`**: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-reth:develop +- **`kona-node`**: us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node:develop ### Service Configuration diff --git a/rust/docs/docs/pages/op-reth/run/opstack.mdx b/rust/docs/docs/pages/op-reth/run/opstack.mdx index d05017bf490a1..07992195e379f 100644 --- a/rust/docs/docs/pages/op-reth/run/opstack.mdx +++ b/rust/docs/docs/pages/op-reth/run/opstack.mdx @@ -108,7 +108,7 @@ Consider adding the `--l1.trustrpc` flag to improve performance, if the connecti [deposit-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/deposits.md [derivation-spec]: https://github.com/ethereum-optimism/specs/blob/main/specs/protocol/derivation.md [superchain-registry]: https://github.com/ethereum-optimism/superchain-registry -[op-node-docker]: https://console.cloud.google.com/artifacts/docker/oplabs-tools-artifacts/us/images/op-node +[op-node-docker]: https://console.cloud.google.com/artifacts/docker/us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node [reth]: https://github.com/paradigmxyz/reth [optimism]: https://github.com/ethereum-optimism/optimism [op-node]: https://github.com/ethereum-optimism/optimism/tree/develop/op-node diff --git a/rust/kona/docker/README.md b/rust/kona/docker/README.md index ca93ba07b46cd..6904eabec0602 100644 --- a/rust/kona/docker/README.md +++ b/rust/kona/docker/README.md @@ -56,12 +56,12 @@ Nightly Docker images are automatically built and published every day at 2 AM UT ```sh # Pull the latest nightly build (multi-platform: linux/amd64, linux/arm64) -docker pull ghcr.io/op-rs/kona/kona-node:nightly -docker pull ghcr.io/op-rs/kona/kona-host:nightly -docker pull ghcr.io/op-rs/kona/kona-supervisor:nightly +docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node:nightly +docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-host:nightly +docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-supervisor:nightly # Pull a specific date's nightly build -docker pull ghcr.io/op-rs/kona/kona-node:nightly-2024-12-10 +docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node:nightly-2024-12-10 ``` ### Manual Trigger diff --git a/rust/kona/docker/fpvm-prestates/justfile b/rust/kona/docker/fpvm-prestates/justfile index a54235e580bbf..e31f55c8c0a15 100644 --- a/rust/kona/docker/fpvm-prestates/justfile +++ b/rust/kona/docker/fpvm-prestates/justfile @@ -11,7 +11,11 @@ build-client-prestate-cannon-artifacts \ out='./prestate-artifacts-cannon' \ custom_config_dir='': #!/bin/bash - OUTPUT_DIR={{out}} + # Resolve output directory to an absolute path before changing directories + OUTPUT_DIR="{{out}}" + if [[ "$OUTPUT_DIR" != /* ]]; then + OUTPUT_DIR="$(pwd)/$OUTPUT_DIR" + fi # Docker bake env export CLIENT_BIN="{{kona_client_variant}}" diff --git a/rust/kona/docker/recipes/kona-node-dev/op-reth/op-reth.dockerfile b/rust/kona/docker/recipes/kona-node-dev/op-reth/op-reth.dockerfile index e6fbf410494bc..5c8284f52eedc 100644 --- a/rust/kona/docker/recipes/kona-node-dev/op-reth/op-reth.dockerfile +++ b/rust/kona/docker/recipes/kona-node-dev/op-reth/op-reth.dockerfile @@ -1,4 +1,4 @@ -FROM ghcr.io/paradigmxyz/op-reth:nightly AS reth +FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/op-reth:nightly AS reth FROM ubuntu:latest diff --git a/rust/kona/docker/recipes/kona-node-dev/publicnode.env b/rust/kona/docker/recipes/kona-node-dev/publicnode.env index e8a471da96fc6..2adc9a71e4856 100644 --- a/rust/kona/docker/recipes/kona-node-dev/publicnode.env +++ b/rust/kona/docker/recipes/kona-node-dev/publicnode.env @@ -18,7 +18,7 @@ KONA_NODE_METRICS_PORT=9002 # (default: 5060) KONA_NODE_RPC_PORT=5060 -# (default: ghcr.io/op-rs/kona/kona-node:latest) +# (default: us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node:develop) KONA_NODE_IMAGE= ################# @@ -34,7 +34,7 @@ OP_RETH_RPC_PORT=8545 # (default: 8551) OP_RETH_ENGINE_PORT=8551 -# (default: ghcr.io/paradigmxyz/op-reth:latest) +# (default: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-reth:develop) OP_RETH_IMAGE= ################# diff --git a/rust/kona/docker/recipes/kona-node/cfg.env b/rust/kona/docker/recipes/kona-node/cfg.env index 8ebf0b37c0d07..88fb2a2a3efe5 100644 --- a/rust/kona/docker/recipes/kona-node/cfg.env +++ b/rust/kona/docker/recipes/kona-node/cfg.env @@ -18,7 +18,7 @@ KONA_NODE_METRICS_PORT= # (default: 5060) KONA_NODE_RPC_PORT= -# (default: ghcr.io/op-rs/kona/kona-node:latest) +# (default: us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node:develop) KONA_NODE_IMAGE= ################# @@ -34,7 +34,7 @@ OP_RETH_RPC_PORT= # (default: 8551) OP_RETH_ENGINE_PORT= -# (default: ghcr.io/paradigmxyz/op-reth:latest) +# (default: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-reth:develop) OP_RETH_IMAGE= ################# diff --git a/rust/kona/docker/recipes/kona-node/docker-compose.yaml b/rust/kona/docker/recipes/kona-node/docker-compose.yaml index 95bc7b4e19a8d..9f6c31a23d8c5 100644 --- a/rust/kona/docker/recipes/kona-node/docker-compose.yaml +++ b/rust/kona/docker/recipes/kona-node/docker-compose.yaml @@ -33,7 +33,7 @@ services: op-reth: restart: unless-stopped - image: ${OP_RETH_NODE_IMAGE:-ghcr.io/paradigmxyz/op-reth:latest} + image: ${OP_RETH_NODE_IMAGE:-us-docker.pkg.dev/oplabs-tools-artifacts/images/op-reth:develop} depends_on: - prometheus ports: @@ -58,7 +58,7 @@ services: kona-node: restart: unless-stopped - image: ${KONA_NODE_IMAGE:-ghcr.io/op-rs/kona/kona-node:latest} + image: ${KONA_NODE_IMAGE:-us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node:develop} depends_on: - prometheus - op-reth diff --git a/rust/op-reth/Makefile b/rust/op-reth/Makefile index 10bb631d6253e..367ab8eb2c285 100644 --- a/rust/op-reth/Makefile +++ b/rust/op-reth/Makefile @@ -22,7 +22,7 @@ PROFILE ?= release CARGO_INSTALL_EXTRA_FLAGS ?= # The docker image name -DOCKER_IMAGE_NAME ?= ghcr.io/paradigmxyz/op-reth +DOCKER_IMAGE_NAME ?= us-docker.pkg.dev/oplabs-tools-artifacts/images/op-reth ##@ Help From 4980b923bfcbcc3188e43ad4c22d7bf685f0da7b Mon Sep 17 00:00:00 2001 From: George Knee Date: Fri, 20 Feb 2026 19:24:22 +0000 Subject: [PATCH 008/201] op-supernode/node: defer to superAuthority about finalized l2 head (#19189) * Refactor Finalized Head Management in EngineController This commit updates the engine controller to introduce a more flexible finalized head management approach. Key changes include: - Introduce `FinalizedHead()` method to dynamically select finalized head - Deprecate direct `finalizedHead` field in favor of new method - Add support * add stubs * Implement FinalizedL2Head with cross-verifier consensus check * WIP * Update FinalizedL2Head method with improved fallback logic The changes modify the `FinalizedL2Head` method in multiple files to: - Introduce a second return value to signal when local finalized head should be used - Handle cases with no registered verifiers - Provide more detailed logging - Improve error handling for unfinalized verifier states * Refactor Interop Service Finalized L2 Block Tracking The commit introduces a robust implementation for tracking finalized L2 blocks in the Interop service. Key changes include: - Implement `LatestFinalizedL2Block` method with logic to find the latest verified L2 block based on the finalized L1 block - Add finalized L2 head tracking in `mockSuperAuthority` for testing - Expand test coverage for finalized head progression in `head_progression_test.go` * Rename Test to Better Describe Safe Head Progression * Add Safe and Finalized Head Progression Checks Extend head progression test to verify both safe and finalized block progression in the supernode interop scenario. Ensures that both safe and finalized heads stall when interop activity is paused and correctly catch * Update Supernode interop safe head progression test This commit enhances the `TestSupernodeInterop_SafeHeadProgression` test by adding an additional validation step. It now checks that the L1 origin of finalized L2 blocks is at or behind the L1 finalized head, providing an extra layer of sanity checking for cross-chain head progression. * Return to Genesis Block as Safe/Finalized Head Fallback This change modifies the `SafeL2Head()` and `FinalizedHead()` methods to return the genesis block when no safe or finalized head is yet established, instead of returning an empty `L2BlockRef`. The key changes are: - Fetch the genesis block from the engine when no safe/finalized head is available - Panic if the genesis block cannot be retrieved, as this represents a critical system failure * Add time travel to supernode interop tests * Update Interop verification to include L1 head context * Replace `L1Head` with `L1Inclusion` in interop functionality * lint * Add FinalizedHead tests to engine and supernode * engine-controller: update localFinalizedHead * Update SafeL2Head test to return genesis block with empty SuperAuthority * add comment * interop activity: expose VerifiedBlockAtL1 instead of LatestFinalizedL2Block the chain container calls this with the finalized l1 of its virtual node, in order to satisfy the FinalizedL2Head() API * interop algo: update result.L1Inclusion semantics the earliest L1 block such that all L2 blocks at the supplied timestamp were derived from a source at or before that L1 block * interop verification: return error when there are no chains add unit test coverage for the algo * remove unused fn * do not panic if we cannot get genesis block from engine * fix test * add comments * tidy --- ...ssion_test.go => head_progression_test.go} | 72 +++- .../tests/supernode/interop/init_test.go | 5 +- op-node/rollup/engine/api.go | 2 +- op-node/rollup/engine/build_start.go | 2 +- op-node/rollup/engine/engine_controller.go | 98 ++++-- .../rollup/engine/engine_controller_test.go | 132 +++++++- .../engine/super_authority_mock_test.go | 5 + op-node/rollup/iface.go | 5 + op-supernode/supernode/activity/activity.go | 15 + .../supernode/activity/interop/algo.go | 26 ++ .../supernode/activity/interop/algo_test.go | 309 ++++++++++++++++++ .../supernode/activity/interop/interop.go | 42 ++- .../activity/interop/interop_test.go | 50 +-- .../supernode/activity/interop/types.go | 18 +- .../supernode/activity/interop/types_test.go | 30 +- .../activity/interop/verified_db_test.go | 80 ++--- .../chain_container/chain_container.go | 2 +- .../chain_container/chain_container_test.go | 4 + .../chain_container/super_authority.go | 40 +++ .../chain_container/super_authority_test.go | 128 +++++++- 20 files changed, 938 insertions(+), 127 deletions(-) rename op-acceptance-tests/tests/supernode/interop/{safe_head_progression_test.go => head_progression_test.go} (59%) diff --git a/op-acceptance-tests/tests/supernode/interop/safe_head_progression_test.go b/op-acceptance-tests/tests/supernode/interop/head_progression_test.go similarity index 59% rename from op-acceptance-tests/tests/supernode/interop/safe_head_progression_test.go rename to op-acceptance-tests/tests/supernode/interop/head_progression_test.go index f6aa35fdb46b0..fb5b8f7d19284 100644 --- a/op-acceptance-tests/tests/supernode/interop/safe_head_progression_test.go +++ b/op-acceptance-tests/tests/supernode/interop/head_progression_test.go @@ -2,6 +2,7 @@ package interop import ( "testing" + "time" "github.com/stretchr/testify/require" @@ -12,7 +13,7 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) -// TestSupernodeInterop_SafeHeadTrailsLocalSafe tests that the cross-safe head +// TestSupernodeInterop_SafeHeadProgression tests that the cross-safe head // (SafeL2) trails behind the local safe head (LocalSafeL2) and eventually catches up // after interop verification completes (assuming no node resets occur). // @@ -22,7 +23,9 @@ import ( // - SafeL2 advances after verification // - SafeL2 eventually catches up to LocalSafeL2 (assuming we don't insert any invalid message, which we don't) // - EL safe label is consistent with the SafeL2 from the CL -func TestSupernodeInterop_SafeHeadTrailsLocalSafe(gt *testing.T) { +// - Finalized head eventually catches up to a snapshot of the safe head +// - Finalized L2 blocks have sane L1 origins (behind the L1 finalized head) +func TestSupernodeInterop_SafeHeadProgression(gt *testing.T) { t := devtest.SerialT(gt) sys := presets.NewTwoL2SupernodeInterop(t, 0) attempts := 15 // each attempt is hardcoded with a 2s by the DSL. @@ -53,18 +56,25 @@ func TestSupernodeInterop_SafeHeadTrailsLocalSafe(gt *testing.T) { sys.L2BCL.ReachedFn(types.CrossSafe, initialTargetBlockNumB-1, attempts), ) - // Expect cross safe to stall since we paused the interop activity + // Expect cross safe and finalized to stall since we paused the interop activity numAttempts := 2 // implies a 4s wait dsl.CheckAll(t, sys.L2ACL.NotAdvancedFn(types.CrossSafe, numAttempts), sys.L2BCL.NotAdvancedFn(types.CrossSafe, numAttempts), + sys.L2ACL.NotAdvancedFn(types.Finalized, numAttempts), + sys.L2BCL.NotAdvancedFn(types.Finalized, numAttempts), ) - // Check EL labels - cross-safe should be stalled below initial target block numbers + // Check EL labels - cross-safeand finalized should be + // stalled below initial target block numbers safeA := sys.L2ELA.BlockRefByLabel(eth.Safe) safeB := sys.L2ELB.BlockRefByLabel(eth.Safe) + finalizedA := sys.L2ELA.BlockRefByLabel(eth.Finalized) + finalizedB := sys.L2ELB.BlockRefByLabel(eth.Finalized) require.Less(t, safeA.Number, initialTargetBlockNumA) require.Less(t, safeB.Number, initialTargetBlockNumB) + require.Less(t, finalizedA.Number, initialTargetBlockNumA) + require.Less(t, finalizedB.Number, initialTargetBlockNumB) // Resume interop verification // expect cross safe to catch up @@ -79,6 +89,60 @@ func TestSupernodeInterop_SafeHeadTrailsLocalSafe(gt *testing.T) { safeB = sys.L2ELB.BlockRefByLabel(eth.Safe) require.GreaterOrEqual(t, safeA.Number, finalTargetBlockNum) require.GreaterOrEqual(t, safeB.Number, finalTargetBlockNum) + + // Snapshot the current safe head to verify finalized catches up + snapshotSafeA := safeA.Number + snapshotSafeB := safeB.Number + t.Logger().Info("snapshotted safe heads", "safeA", snapshotSafeA, "safeB", snapshotSafeB) + + // Sanity check: finalized should be behind safe at this point + preFinalizedStatusA := sys.L2ACL.SyncStatus() + preFinalizedStatusB := sys.L2BCL.SyncStatus() + require.LessOrEqual(t, preFinalizedStatusA.FinalizedL2.Number, snapshotSafeA, + "finalized A should be at or behind safe head") + require.LessOrEqual(t, preFinalizedStatusB.FinalizedL2.Number, snapshotSafeB, + "finalized B should be at or behind safe head") + t.Logger().Info("pre-finalized state", + "finalizedA", preFinalizedStatusA.FinalizedL2.Number, + "finalizedB", preFinalizedStatusB.FinalizedL2.Number) + + // Wait for L1 head to finalise, which should imply L2 finalized head progression + // Use time travel to reduce walltime of test + sys.AdvanceTime(90 * time.Second) + sys.L1Network.WaitForFinalization() + + // Wait for finalized heads to catch up to or past the snapshotted safe heads + // Finalized advancement depends on L1 finality, so use more attempts + finalizedAttempts := 30 + dsl.CheckAll(t, + sys.L2ACL.ReachedFn(types.Finalized, snapshotSafeA, finalizedAttempts), + sys.L2BCL.ReachedFn(types.Finalized, snapshotSafeB, finalizedAttempts), + ) + + // Verify finalized heads on EL + finalizedA = sys.L2ELA.BlockRefByLabel(eth.Finalized) + finalizedB = sys.L2ELB.BlockRefByLabel(eth.Finalized) + require.GreaterOrEqual(t, finalizedA.Number, snapshotSafeA, "finalized A should catch up to safe snapshot") + require.GreaterOrEqual(t, finalizedB.Number, snapshotSafeB, "finalized B should catch up to safe snapshot") + + // Get current safe heads to verify finalized is still at or behind safe + currentSafeA := sys.L2ELA.BlockRefByLabel(eth.Safe) + currentSafeB := sys.L2ELB.BlockRefByLabel(eth.Safe) + require.LessOrEqual(t, finalizedA.Number, currentSafeA.Number, + "finalized A should be at or behind current safe head") + require.LessOrEqual(t, finalizedB.Number, currentSafeB.Number, + "finalized B should be at or behind current safe head") + + // Sanity check: L1 origin of L2 finalized head should be <= L1 finalized head + l1FinalizedHead := sys.L1EL.BlockRefByLabel(eth.Finalized) + t.Logger().Info("L1 finalized head", "number", l1FinalizedHead.Number) + t.Logger().Info("L2A finalized L1 origin", "number", finalizedA.L1Origin.Number) + t.Logger().Info("L2B finalized L1 origin", "number", finalizedB.L1Origin.Number) + + require.LessOrEqual(t, finalizedA.L1Origin.Number, l1FinalizedHead.Number, + "L2A finalized block's L1 origin should be at or behind L1 finalized head") + require.LessOrEqual(t, finalizedB.L1Origin.Number, l1FinalizedHead.Number, + "L2B finalized block's L1 origin should be at or behind L1 finalized head") } // TestSupernodeInterop_SafeHeadWithUnevenProgress tests safe head behavior diff --git a/op-acceptance-tests/tests/supernode/interop/init_test.go b/op-acceptance-tests/tests/supernode/interop/init_test.go index 22e7f02af12c2..96b31eb0ae955 100644 --- a/op-acceptance-tests/tests/supernode/interop/init_test.go +++ b/op-acceptance-tests/tests/supernode/interop/init_test.go @@ -12,5 +12,8 @@ import ( func TestMain(m *testing.M) { // Set the L2CL kind to supernode for all tests in this package _ = os.Setenv("DEVSTACK_L2CL_KIND", "supernode") - presets.DoMain(m, presets.WithTwoL2SupernodeInterop(0)) + presets.DoMain(m, + presets.WithTwoL2SupernodeInterop(0), + presets.WithTimeTravel(), // Enable time travel for faster tests + ) } diff --git a/op-node/rollup/engine/api.go b/op-node/rollup/engine/api.go index 5d6a6cc898ee6..b97fa4ff2d0b3 100644 --- a/op-node/rollup/engine/api.go +++ b/op-node/rollup/engine/api.go @@ -37,7 +37,7 @@ func (e *EngineController) OpenBlock(ctx context.Context, parent eth.BlockID, at fc := eth.ForkchoiceState{ HeadBlockHash: parent.Hash, SafeBlockHash: e.SafeL2Head().Hash, - FinalizedBlockHash: e.finalizedHead.Hash, + FinalizedBlockHash: e.FinalizedHead().Hash, } id, errTyp, err := e.startPayload(ctx, fc, attrs) if err != nil { diff --git a/op-node/rollup/engine/build_start.go b/op-node/rollup/engine/build_start.go index df1a3cd7f94a5..c0dac2dce464d 100644 --- a/op-node/rollup/engine/build_start.go +++ b/op-node/rollup/engine/build_start.go @@ -32,7 +32,7 @@ func (e *EngineController) onBuildStart(ctx context.Context, ev BuildStartEvent) fcEvent := ForkchoiceUpdateEvent{ UnsafeL2Head: ev.Attributes.Parent, SafeL2Head: e.SafeL2Head(), - FinalizedL2Head: e.finalizedHead, + FinalizedL2Head: e.FinalizedHead(), } if fcEvent.UnsafeL2Head.Number < fcEvent.FinalizedL2Head.Number { err := fmt.Errorf("invalid block-building pre-state, unsafe head %s is behind finalized head %s", fcEvent.UnsafeL2Head, fcEvent.FinalizedL2Head) diff --git a/op-node/rollup/engine/engine_controller.go b/op-node/rollup/engine/engine_controller.go index d7e7a8d119afb..36d3a6da8d6ee 100644 --- a/op-node/rollup/engine/engine_controller.go +++ b/op-node/rollup/engine/engine_controller.go @@ -125,18 +125,22 @@ type EngineController struct { // Derived from L1, and known to be a completed span-batch, // but not cross-verified yet. localSafeHead eth.L2BlockRef - // Deprecated: Derived from L1 and cross-verified to have cross-safe dependencies. - // FOR USE BY SUPERVISOR ONLY: - deprecatedSafeHead eth.L2BlockRef - - // Derived from finalized L1 data, - // and cross-verified to only have finalized dependencies. - finalizedHead eth.L2BlockRef + // Derived from finalized L1 data, but not necessarily + // verified by the superAuthority. + // Only to be used as a FinalizedHead when there is no superAuthority + localFinalizedHead eth.L2BlockRef // The unsafe head to roll back to, // after the pendingSafeHead fails to become safe. // This is changing in the Holocene fork. backupUnsafeHead eth.L2BlockRef + // Deprecated: Derived from L1 and cross-verified to have cross-safe dependencies. + // FOR USE BY SUPERVISOR ONLY: + deprecatedSafeHead eth.L2BlockRef + // Deprecated: Derived from finalized L1 data, + // Only to be used when there is no superAuthority + deprecatedFinalizedHead eth.L2BlockRef + needFCUCall bool // Safe head debouncing: buffer safe head updates until other updates occur needSafeHeadUpdate bool @@ -195,6 +199,9 @@ func NewEngineController(ctx context.Context, engine ExecEngine, log log.Logger, } } +// SafeL2Head returns the safe L2 head. +// If the super authority is enabled, it returns the fully verified L2 head +// else it returns the local safe L2 head. func (e *EngineController) SafeL2Head() eth.L2BlockRef { if e.superAuthority != nil { fvshid, useLocalSafe := e.superAuthority.FullyVerifiedL2Head() @@ -205,8 +212,13 @@ func (e *EngineController) SafeL2Head() eth.L2BlockRef { } // SuperAuthority provided a cross-verified safe head if (fvshid == eth.BlockID{}) { - // Empty BlockID with useLocalSafe=false means no safe head yet - return eth.L2BlockRef{} + // Fallback to genesis block (safe by consensus) if possible + br, err := e.engine.L2BlockRefByNumber(e.ctx, 0) + if err != nil { + e.log.Warn("cannot get genesis block from engine") + return eth.L2BlockRef{} + } + return br } if fvshid.Number > e.localSafeHead.Number { e.log.Debug("super authority fully verified l2 head is ahead of local safe head, using local safe head as SafeL2Head") @@ -224,6 +236,39 @@ func (e *EngineController) SafeL2Head() eth.L2BlockRef { } } +func (e *EngineController) FinalizedHead() eth.L2BlockRef { + if e.superAuthority != nil { + f, useLocalFinalized := e.superAuthority.FinalizedL2Head() + if useLocalFinalized { + // No verifiers registered, fall back to local finalized + e.log.Debug("super authority has no verifiers, using local finalized head") + return e.localFinalizedHead + } + if (f == eth.BlockID{}) { + // Fallback to genesis block (final by consensus) if possible + br, err := e.engine.L2BlockRefByNumber(e.ctx, 0) + if err != nil { + e.log.Warn("cannot get genesis block from engine") + return eth.L2BlockRef{} + } + return br + } + if f.Number > e.localSafeHead.Number { + e.log.Debug("super authority finalized l2 head is ahead of local safe head, using local safe head as FinalizedHead") + return e.localSafeHead + } + br, err := e.engine.L2BlockRefByHash(e.ctx, f.Hash) + if err != nil { + panic("superAuthority supplied an identifier for the finalized head which is not known to the engine") + } + return br + } else if e.supervisorEnabled { + return e.deprecatedFinalizedHead + } else { + return e.localFinalizedHead + } +} + func (e *EngineController) UnsafeL2Head() eth.L2BlockRef { return e.unsafeHead } @@ -233,7 +278,7 @@ func (e *EngineController) PendingSafeL2Head() eth.L2BlockRef { } func (e *EngineController) Finalized() eth.L2BlockRef { - return e.finalizedHead + return e.FinalizedHead() } func (e *EngineController) BackupUnsafeL2Head() eth.L2BlockRef { @@ -251,7 +296,7 @@ func (e *EngineController) requestForkchoiceUpdate(ctx context.Context) { e.emitter.Emit(ctx, ForkchoiceUpdateEvent{ UnsafeL2Head: e.unsafeHead, SafeL2Head: e.SafeL2Head(), - FinalizedL2Head: e.finalizedHead, + FinalizedL2Head: e.FinalizedHead(), }) } @@ -270,7 +315,8 @@ func (e *EngineController) isEngineInitialELSyncing() bool { // SetFinalizedHead implements LocalEngineControl. func (e *EngineController) SetFinalizedHead(r eth.L2BlockRef) { e.metrics.RecordL2Ref("l2_finalized", r) - e.finalizedHead = r + e.localFinalizedHead = r + e.deprecatedFinalizedHead = r e.needFCUCall = true e.needSafeHeadUpdate = false } @@ -341,7 +387,7 @@ func (e *EngineController) onSafeUpdate(ctx context.Context, crossSafe, localSaf // First, the pre-state is registered. // A callback is returned to then log the changes to the pre-state, if any. func (e *EngineController) logSyncProgressMaybe() func() { - prevFinalized := e.finalizedHead + prevFinalized := e.FinalizedHead() prevSafe := e.SafeL2Head() prevPendingSafe := e.pendingSafeHead prevUnsafe := e.unsafeHead @@ -352,7 +398,7 @@ func (e *EngineController) logSyncProgressMaybe() func() { return } var reason string - if prevFinalized != e.finalizedHead { + if prevFinalized != e.FinalizedHead() { reason = "finalized block" } else if prevSafe != e.SafeL2Head() { if prevSafe == prevUnsafe { @@ -370,7 +416,7 @@ func (e *EngineController) logSyncProgressMaybe() func() { if reason != "" { e.log.Info("Sync progress", "reason", reason, - "l2_finalized", e.finalizedHead, + "l2_finalized", e.FinalizedHead(), "l2_safe", e.SafeL2Head(), "l2_pending_safe", e.pendingSafeHead, "l2_unsafe", e.unsafeHead, @@ -429,7 +475,7 @@ func (e *EngineController) initializeUnknowns(ctx context.Context) error { e.log.Info("Loaded initial local-unsafe block ref", "local_unsafe", ref) } var finalizedRef eth.L2BlockRef - if e.finalizedHead == (eth.L2BlockRef{}) { + if e.FinalizedHead() == (eth.L2BlockRef{}) { var err error finalizedRef, err = e.engine.L2BlockRefByLabel(ctx, eth.Finalized) if err != nil { @@ -474,15 +520,15 @@ func (e *EngineController) tryUpdateEngineInternal(ctx context.Context) error { if err := e.initializeUnknowns(ctx); err != nil { return derive.NewTemporaryError(fmt.Errorf("cannot update engine until engine forkchoice is initialized: %w", err)) } - if e.unsafeHead.Number < e.finalizedHead.Number { - err := fmt.Errorf("invalid forkchoice state, unsafe head %s is behind finalized head %s", e.unsafeHead, e.finalizedHead) + if e.unsafeHead.Number < e.FinalizedHead().Number { + err := fmt.Errorf("invalid forkchoice state, unsafe head %s is behind finalized head %s", e.unsafeHead, e.FinalizedHead()) e.emitter.Emit(ctx, rollup.CriticalErrorEvent{Err: err}) // make the node exit, things are very wrong. return err } fc := eth.ForkchoiceState{ HeadBlockHash: e.unsafeHead.Hash, SafeBlockHash: e.SafeL2Head().Hash, - FinalizedBlockHash: e.finalizedHead.Hash, + FinalizedBlockHash: e.FinalizedHead().Hash, } logFn := e.logSyncProgressMaybe() defer logFn() @@ -578,7 +624,7 @@ func (e *EngineController) insertUnsafePayload(ctx context.Context, envelope *et fc := eth.ForkchoiceState{ HeadBlockHash: envelope.ExecutionPayload.BlockHash, SafeBlockHash: e.SafeL2Head().Hash, - FinalizedBlockHash: e.finalizedHead.Hash, + FinalizedBlockHash: e.FinalizedHead().Hash, } if e.syncStatus == syncStatusFinishedELButNotFinalized { fc.SafeBlockHash = envelope.ExecutionPayload.BlockHash @@ -693,7 +739,7 @@ func (e *EngineController) tryBackupUnsafeReorg(ctx context.Context) (bool, erro fc := eth.ForkchoiceState{ HeadBlockHash: e.backupUnsafeHead.Hash, SafeBlockHash: e.SafeL2Head().Hash, - FinalizedBlockHash: e.finalizedHead.Hash, + FinalizedBlockHash: e.FinalizedHead().Hash, } logFn := e.logSyncProgressMaybe() defer logFn() @@ -872,8 +918,8 @@ func (e *EngineController) PromoteFinalized(ctx context.Context, ref eth.L2Block e.promoteFinalized(ctx, ref) } func (e *EngineController) promoteFinalized(ctx context.Context, ref eth.L2BlockRef) { - if ref.Number < e.finalizedHead.Number { - e.log.Error("Cannot rewind finality,", "ref", ref, "finalized", e.finalizedHead) + if ref.Number < e.FinalizedHead().Number { + e.log.Error("Cannot rewind finality,", "ref", ref, "finalized", e.FinalizedHead()) return } if ref.Number > e.SafeL2Head().Number { @@ -935,7 +981,7 @@ func (e *EngineController) forceReset(ctx context.Context, localUnsafe, crossUns e.emitter.Emit(ctx, ForkchoiceUpdateInitEvent{ UnsafeL2Head: e.unsafeHead, SafeL2Head: e.SafeL2Head(), - FinalizedL2Head: e.finalizedHead, + FinalizedL2Head: e.FinalizedHead(), }) } else { // Time to apply the changes to the underlying engine @@ -947,7 +993,7 @@ func (e *EngineController) forceReset(ctx context.Context, localUnsafe, crossUns CrossUnsafe: e.crossUnsafeHead, LocalSafe: e.localSafeHead, CrossSafe: e.SafeL2Head(), - Finalized: e.finalizedHead, + Finalized: e.FinalizedHead(), } // We do not emit the original event values, since those might not be set (optional attributes). e.emitter.Emit(ctx, v) @@ -1174,7 +1220,7 @@ func (e *EngineController) FollowSource(eSafeBlockRef, eFinalizedRef eth.L2Block } e.tryUpdateLocalSafe(e.ctx, eSafeBlockRef, true, eth.L1BlockRef{}) // Directly update the Engine Controller state, bypassing finalizer - if e.finalizedHead.Number <= eFinalizedRef.Number { + if e.FinalizedHead().Number <= eFinalizedRef.Number { e.promoteFinalized(e.ctx, eFinalizedRef) } } diff --git a/op-node/rollup/engine/engine_controller_test.go b/op-node/rollup/engine/engine_controller_test.go index de29008526204..8e611026d97e2 100644 --- a/op-node/rollup/engine/engine_controller_test.go +++ b/op-node/rollup/engine/engine_controller_test.go @@ -241,12 +241,15 @@ func TestEngineController_SafeL2Head(t *testing.T) { expectResult: ð.L2BlockRef{Hash: common.Hash{0xbb}, Number: 50}, }, { - name: "with SuperAuthority empty BlockID returns empty", + name: "with SuperAuthority empty BlockID returns genesis", supervisorEnabled: true, setupSuperAuth: func() *mockSuperAuthority { return &mockSuperAuthority{fullyVerifiedL2Head: eth.BlockID{}} }, - expectResult: ð.L2BlockRef{}, + setupEngine: func(m *testutils.MockEngine) { + m.ExpectL2BlockRefByNumber(0, eth.L2BlockRef{Hash: common.Hash{0x00}, Number: 0}, nil) + }, + expectResult: ð.L2BlockRef{Hash: common.Hash{0x00}, Number: 0}, }, { name: "without SuperAuthority but supervisor enabled uses deprecated", @@ -345,15 +348,16 @@ func TestEngineController_ForkchoiceUpdateUsesSuperAuthority(t *testing.T) { Hash: common.Hash{0xdd}, Number: 60, } + finalizedRef := eth.L2BlockRef{Hash: common.Hash{0xcc}, Number: 50} mockSA := &mockSuperAuthority{ fullyVerifiedL2Head: verifiedRef.ID(), + finalizedL2Head: finalizedRef.ID(), } ec := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, true, &testutils.MockL1Source{}, emitter, mockSA) // Set heads unsafeRef := eth.L2BlockRef{Hash: common.Hash{0xaa}, Number: 100} localSafeRef := eth.L2BlockRef{Hash: common.Hash{0xbb}, Number: 80} - finalizedRef := eth.L2BlockRef{Hash: common.Hash{0xcc}, Number: 50} ec.unsafeHead = unsafeRef ec.SetLocalSafeHead(localSafeRef) @@ -365,6 +369,10 @@ func TestEngineController_ForkchoiceUpdateUsesSuperAuthority(t *testing.T) { for i := 0; i < 10; i++ { mockEngine.ExpectL2BlockRefByHash(verifiedRef.Hash, verifiedRef, nil) } + // FinalizedHead is also called and will look up the finalized block by hash + for i := 0; i < 10; i++ { + mockEngine.ExpectL2BlockRefByHash(finalizedRef.Hash, finalizedRef, nil) + } mockEngine.ExpectL2BlockRefByLabel(eth.Safe, localSafeRef, nil) mockEngine.ExpectL2BlockRefByLabel(eth.Finalized, finalizedRef, nil) @@ -388,3 +396,121 @@ func TestEngineController_ForkchoiceUpdateUsesSuperAuthority(t *testing.T) { } // SuperAuthority tests are in super_authority_deny_test.go + +// TestEngineController_FinalizedHead tests FinalizedHead behavior with various configurations +func TestEngineController_FinalizedHead(t *testing.T) { + tests := []struct { + name string + setupSuperAuth func() *mockSuperAuthority + setupLocalSafe *eth.L2BlockRef + setupLocalFinal *eth.L2BlockRef + setupEngine func(*testutils.MockEngine) + expectPanic string + expectResult *eth.L2BlockRef + }{ + { + name: "with SuperAuthority returns finalized block", + setupSuperAuth: func() *mockSuperAuthority { + return &mockSuperAuthority{ + finalizedL2Head: eth.BlockID{Hash: common.Hash{0xbb}, Number: 50}, + } + }, + setupLocalSafe: ð.L2BlockRef{Hash: common.Hash{0xaa}, Number: 100}, + setupLocalFinal: ð.L2BlockRef{Hash: common.Hash{0xaa}, Number: 100}, + setupEngine: func(m *testutils.MockEngine) { + m.ExpectL2BlockRefByHash(common.Hash{0xbb}, eth.L2BlockRef{Hash: common.Hash{0xbb}, Number: 50}, nil) + }, + expectResult: ð.L2BlockRef{Hash: common.Hash{0xbb}, Number: 50}, + }, + { + name: "with SuperAuthority empty BlockID fallback to genesis", + setupSuperAuth: func() *mockSuperAuthority { + return &mockSuperAuthority{finalizedL2Head: eth.BlockID{}} + }, + setupLocalFinal: ð.L2BlockRef{Hash: common.Hash{0xaa}, Number: 100}, + setupEngine: func(m *testutils.MockEngine) { + m.ExpectL2BlockRefByNumber(0, eth.L2BlockRef{Hash: common.Hash{0x00}, Number: 0}, nil) + }, + expectResult: ð.L2BlockRef{Hash: common.Hash{0x00}, Number: 0}, + }, + { + name: "with SuperAuthority ahead of local safe uses local safe", + setupSuperAuth: func() *mockSuperAuthority { + return &mockSuperAuthority{ + finalizedL2Head: eth.BlockID{Hash: common.Hash{0xbb}, Number: 50}, + } + }, + setupLocalSafe: ð.L2BlockRef{Hash: common.Hash{0xcc}, Number: 40}, + setupLocalFinal: ð.L2BlockRef{Hash: common.Hash{0xdd}, Number: 30}, + expectResult: ð.L2BlockRef{Hash: common.Hash{0xcc}, Number: 40}, + }, + { + name: "without SuperAuthority returns zero value", + setupSuperAuth: func() *mockSuperAuthority { return nil }, + expectResult: ð.L2BlockRef{}, + }, + { + name: "returns empty block when genesis lookup fails", + setupSuperAuth: func() *mockSuperAuthority { + return &mockSuperAuthority{finalizedL2Head: eth.BlockID{}} + }, + setupLocalFinal: ð.L2BlockRef{Hash: common.Hash{0xaa}, Number: 100}, + setupEngine: func(m *testutils.MockEngine) { + m.ExpectL2BlockRefByNumber(0, eth.L2BlockRef{}, errors.New("genesis not found")) + }, + expectResult: ð.L2BlockRef{}, + }, + { + name: "panics when SuperAuthority block unknown to engine", + setupSuperAuth: func() *mockSuperAuthority { + return &mockSuperAuthority{ + finalizedL2Head: eth.BlockID{Hash: common.Hash{0x99}, Number: 50}, + } + }, + setupLocalSafe: ð.L2BlockRef{Hash: common.Hash{0xaa}, Number: 100}, + setupLocalFinal: ð.L2BlockRef{Hash: common.Hash{0xaa}, Number: 100}, + setupEngine: func(m *testutils.MockEngine) { + m.ExpectL2BlockRefByHash(common.Hash{0x99}, eth.L2BlockRef{}, errors.New("block not found")) + }, + expectPanic: "superAuthority supplied an identifier for the finalized head which is not known to the engine", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + var mockEngine *testutils.MockEngine + if tt.setupEngine != nil { + mockEngine = &testutils.MockEngine{} + } + + cfg := &rollup.Config{} + emitter := &testutils.MockEmitter{} + var superAuthority rollup.SuperAuthority + if tt.setupSuperAuth != nil { + if sa := tt.setupSuperAuth(); sa != nil { + superAuthority = sa + } + } + ec := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), metrics.NoopMetrics, cfg, &sync.Config{}, false, &testutils.MockL1Source{}, emitter, superAuthority) + if tt.setupLocalSafe != nil { + ec.SetLocalSafeHead(*tt.setupLocalSafe) + } + if tt.setupLocalFinal != nil { + ec.SetFinalizedHead(*tt.setupLocalFinal) + } + + if tt.setupEngine != nil { + tt.setupEngine(mockEngine) + } + + if tt.expectPanic != "" { + require.PanicsWithValue(t, tt.expectPanic, func() { + ec.FinalizedHead() + }) + } else { + result := ec.FinalizedHead() + require.Equal(t, *tt.expectResult, result) + } + }) + } +} diff --git a/op-node/rollup/engine/super_authority_mock_test.go b/op-node/rollup/engine/super_authority_mock_test.go index 389a904015eb5..116ad283c2f10 100644 --- a/op-node/rollup/engine/super_authority_mock_test.go +++ b/op-node/rollup/engine/super_authority_mock_test.go @@ -11,6 +11,7 @@ import ( // mockSuperAuthority implements SuperAuthority for testing. type mockSuperAuthority struct { fullyVerifiedL2Head eth.BlockID + finalizedL2Head eth.BlockID deniedBlocks map[uint64]common.Hash shouldError bool } @@ -40,4 +41,8 @@ func (m *mockSuperAuthority) FullyVerifiedL2Head() (eth.BlockID, bool) { return m.fullyVerifiedL2Head, false } +func (m *mockSuperAuthority) FinalizedL2Head() (eth.BlockID, bool) { + return m.finalizedL2Head, false +} + var _ rollup.SuperAuthority = (*mockSuperAuthority)(nil) diff --git a/op-node/rollup/iface.go b/op-node/rollup/iface.go index 78b109dd87f7f..fd332c26c8b61 100644 --- a/op-node/rollup/iface.go +++ b/op-node/rollup/iface.go @@ -15,6 +15,11 @@ type SuperAuthority interface { // If useLocalSafe is true, the BlockID return value should be ignored and local-safe used instead. // If useLocalSafe is false, the BlockID is the cross-verified safe head. FullyVerifiedL2Head() (head eth.BlockID, useLocalSafe bool) + // FinalizedL2Head returns the finalized L2 head block reference. + // The second return value indicates whether the caller should fall back to local-finalized. + // If useLocalFinalized is true, the BlockID return value should be ignored and local-finalized used instead. + // If useLocalFinalized is false, the BlockID is the cross-verified finalized head. + FinalizedL2Head() (head eth.BlockID, useLocalFinalized bool) // IsDenied checks if a payload hash is denied at the given block number. // Returns true if the payload should not be applied. // The error indicates if the check could not be performed (should be logged but not fatal). diff --git a/op-supernode/supernode/activity/activity.go b/op-supernode/supernode/activity/activity.go index 11410c4834cc8..be08fb59aca62 100644 --- a/op-supernode/supernode/activity/activity.go +++ b/op-supernode/supernode/activity/activity.go @@ -35,7 +35,22 @@ type RPCActivity interface { type VerificationActivity interface { Activity Name() string + + // Reset resets the activity's state. + Reset(chainID eth.ChainID, timestamp uint64, invalidatedBlock eth.BlockRef) + + // CurrentL1 returns the current L1 block ID. CurrentL1() eth.BlockID + + // VerifiedAtTimestamp returns true if the activity has verified the data at the given timestamp. VerifiedAtTimestamp(ts uint64) (bool, error) + + // LatestVerifiedL2Block returns the latest L2 block which has been verified, + // along with the timestamp at which it was verified. LatestVerifiedL2Block(chainID eth.ChainID) (eth.BlockID, uint64) + + // VerifiedBlockAtL1 returns the verified L2 block and timestamp + // which guarantees that the verified data at that timestamp + // originates from or before the supplied L1 block. + VerifiedBlockAtL1(chainID eth.ChainID, l1Block eth.L1BlockRef) (eth.BlockID, uint64) } diff --git a/op-supernode/supernode/activity/interop/algo.go b/op-supernode/supernode/activity/interop/algo.go index 48dabc0508a7e..bafe923fe742c 100644 --- a/op-supernode/supernode/activity/interop/algo.go +++ b/op-supernode/supernode/activity/interop/algo.go @@ -3,6 +3,7 @@ package interop import ( "errors" "fmt" + "math" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" @@ -43,6 +44,31 @@ func (i *Interop) verifyInteropMessages(ts uint64, blocksAtTimestamp map[eth.Cha InvalidHeads: make(map[eth.ChainID]eth.BlockID), } + // Compute L1Inclusion: the earliest L1 block such that all L2 blocks at the + // supplied timestamp were derived + // from a source at or before that L1 block. + earliestL1Inclusion := eth.BlockID{ + Number: math.MaxUint64, + } + for chainID := range blocksAtTimestamp { + chain, ok := i.chains[chainID] + if !ok { + continue + } + _, l1Block, err := chain.OptimisticAt(i.ctx, ts) + if err != nil { + i.log.Error("failed to get L1 inclusion for L2 block", "chainID", chainID, "timestamp", ts, "err", err) + return Result{}, fmt.Errorf("chain %s: failed to get L1 inclusion: %w", chainID, err) + } + if l1Block.Number < earliestL1Inclusion.Number { + earliestL1Inclusion = l1Block + } + } + if earliestL1Inclusion.Number == math.MaxUint64 { + return Result{}, fmt.Errorf("no L1 inclusion found for timestamp %d", ts) + } + result.L1Inclusion = earliestL1Inclusion + for chainID, expectedBlock := range blocksAtTimestamp { db, ok := i.logsDBs[chainID] if !ok { diff --git a/op-supernode/supernode/activity/interop/algo_test.go b/op-supernode/supernode/activity/interop/algo_test.go index 29a63c8e2a9f4..402c0eeba5c0a 100644 --- a/op-supernode/supernode/activity/interop/algo_test.go +++ b/op-supernode/supernode/activity/interop/algo_test.go @@ -1,6 +1,7 @@ package interop import ( + "context" "errors" "math/big" "testing" @@ -12,6 +13,8 @@ import ( "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" + cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/reads" suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -20,6 +23,14 @@ import ( // TestVerifyInteropMessages - Table-Driven Tests // ============================================================================= +// newMockChainWithL1 creates a mock chain with the specified L1 block for OptimisticAt +func newMockChainWithL1(chainID eth.ChainID, l1Block eth.BlockID) *algoMockChain { + return &algoMockChain{ + id: chainID, + optimisticL1: l1Block, + } +} + // verifyInteropTestCase defines a single test case for verifyInteropMessages type verifyInteropTestCase struct { name string @@ -59,6 +70,7 @@ func TestVerifyInteropMessages(t *testing.T) { chainID := eth.ChainIDFromUInt64(10) blockHash := common.HexToHash("0x123") expectedBlock := eth.BlockID{Number: 100, Hash: blockHash} + l1Block := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1")} mockDB := &algoMockLogsDB{ openBlockRef: eth.BlockRef{Hash: blockHash, Number: 100, Time: 1000}, @@ -68,6 +80,7 @@ func TestVerifyInteropMessages(t *testing.T) { interop := &Interop{ log: gethlog.New(), logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, + chains: map[eth.ChainID]cc.ChainContainer{chainID: newMockChainWithL1(chainID, l1Block)}, } return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: expectedBlock} @@ -91,6 +104,7 @@ func TestVerifyInteropMessages(t *testing.T) { sourceBlock := eth.BlockID{Number: 50, Hash: sourceBlockHash} destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + l1Block := eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")} execMsg := &suptypes.ExecutingMessage{ ChainID: sourceChainID, @@ -118,6 +132,10 @@ func TestVerifyInteropMessages(t *testing.T) { sourceChainID: sourceDB, destChainID: destDB, }, + chains: map[eth.ChainID]cc.ChainContainer{ + sourceChainID: newMockChainWithL1(sourceChainID, l1Block), + destChainID: newMockChainWithL1(destChainID, l1Block), + }, } return interop, 1000, map[eth.ChainID]eth.BlockID{ @@ -145,6 +163,7 @@ func TestVerifyInteropMessages(t *testing.T) { sourceBlock := eth.BlockID{Number: 50, Hash: sourceBlockHash} destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + l1Block := eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")} execMsg := &suptypes.ExecutingMessage{ ChainID: sourceChainID, @@ -172,6 +191,10 @@ func TestVerifyInteropMessages(t *testing.T) { sourceChainID: sourceDB, destChainID: destDB, }, + chains: map[eth.ChainID]cc.ChainContainer{ + sourceChainID: newMockChainWithL1(sourceChainID, l1Block), + destChainID: newMockChainWithL1(destChainID, l1Block), + }, } return interop, execTimestamp, map[eth.ChainID]eth.BlockID{ @@ -197,6 +220,9 @@ func TestVerifyInteropMessages(t *testing.T) { interop := &Interop{ log: gethlog.New(), logsDBs: map[eth.ChainID]LogsDB{registeredChain: mockDB}, + chains: map[eth.ChainID]cc.ChainContainer{ + registeredChain: newMockChainWithL1(registeredChain, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + }, } return interop, 1000, map[eth.ChainID]eth.BlockID{ @@ -218,6 +244,7 @@ func TestVerifyInteropMessages(t *testing.T) { setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { chainID := eth.ChainIDFromUInt64(10) expectedBlock := eth.BlockID{Number: 100, Hash: common.HexToHash("0xExpected")} + l1Block := eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")} mockDB := &algoMockLogsDB{ openBlockRef: eth.BlockRef{ @@ -230,6 +257,7 @@ func TestVerifyInteropMessages(t *testing.T) { interop := &Interop{ log: gethlog.New(), logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, + chains: map[eth.ChainID]cc.ChainContainer{chainID: newMockChainWithL1(chainID, l1Block)}, } return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: expectedBlock} @@ -276,6 +304,10 @@ func TestVerifyInteropMessages(t *testing.T) { sourceChainID: sourceDB, destChainID: destDB, }, + chains: map[eth.ChainID]cc.ChainContainer{ + sourceChainID: newMockChainWithL1(sourceChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + destChainID: newMockChainWithL1(destChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + }, } return interop, 1000, map[eth.ChainID]eth.BlockID{destChainID: destBlock} @@ -320,6 +352,10 @@ func TestVerifyInteropMessages(t *testing.T) { sourceChainID: sourceDB, destChainID: destDB, }, + chains: map[eth.ChainID]cc.ChainContainer{ + sourceChainID: newMockChainWithL1(sourceChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + destChainID: newMockChainWithL1(destChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + }, } return interop, 1000, map[eth.ChainID]eth.BlockID{destChainID: destBlock} @@ -360,6 +396,10 @@ func TestVerifyInteropMessages(t *testing.T) { destChainID: destDB, // Note: unknownSourceChain NOT in logsDBs }, + chains: map[eth.ChainID]cc.ChainContainer{ + unknownSourceChain: newMockChainWithL1(unknownSourceChain, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + destChainID: newMockChainWithL1(destChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + }, } return interop, 1000, map[eth.ChainID]eth.BlockID{destChainID: destBlock} @@ -409,6 +449,10 @@ func TestVerifyInteropMessages(t *testing.T) { sourceChainID: sourceDB, destChainID: destDB, }, + chains: map[eth.ChainID]cc.ChainContainer{ + sourceChainID: newMockChainWithL1(sourceChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + destChainID: newMockChainWithL1(destChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + }, } return interop, execTimestamp, map[eth.ChainID]eth.BlockID{destChainID: destBlock} @@ -463,6 +507,11 @@ func TestVerifyInteropMessages(t *testing.T) { validChainID: validDB, invalidChainID: invalidDB, }, + chains: map[eth.ChainID]cc.ChainContainer{ + invalidChainID: newMockChainWithL1(invalidChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + sourceChainID: newMockChainWithL1(sourceChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + validChainID: newMockChainWithL1(validChainID, eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")}), + }, } return interop, 1000, map[eth.ChainID]eth.BlockID{ @@ -482,12 +531,212 @@ func TestVerifyInteropMessages(t *testing.T) { require.Contains(t, result.InvalidHeads, invalidChainID) }, }, + // L1Inclusion tests + { + name: "L1Inclusion/SingleChain", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chainID := eth.ChainIDFromUInt64(10) + blockHash := common.HexToHash("0x123") + expectedBlock := eth.BlockID{Number: 100, Hash: blockHash} + l1Block := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1")} + + mockDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: blockHash, Number: 100, Time: 1000}, + openBlockExecMsg: nil, + } + + mockChain := &algoMockChain{ + id: chainID, + optimisticL1: l1Block, + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, + chains: map[eth.ChainID]cc.ChainContainer{chainID: mockChain}, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: expectedBlock} + }, + validate: func(t *testing.T, result Result) { + chainID := eth.ChainIDFromUInt64(10) + expectedBlock := eth.BlockID{Number: 100, Hash: common.HexToHash("0x123")} + expectedL1 := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1")} + require.True(t, result.IsValid()) + require.Empty(t, result.InvalidHeads) + require.Equal(t, expectedBlock, result.L2Heads[chainID]) + require.Equal(t, expectedL1, result.L1Inclusion) + }, + }, + { + name: "L1Inclusion/MultipleChains_EarliestL1Selected", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chain1ID := eth.ChainIDFromUInt64(10) + chain2ID := eth.ChainIDFromUInt64(8453) + chain3ID := eth.ChainIDFromUInt64(420) + + block1 := eth.BlockID{Number: 100, Hash: common.HexToHash("0x1")} + block2 := eth.BlockID{Number: 200, Hash: common.HexToHash("0x2")} + block3 := eth.BlockID{Number: 150, Hash: common.HexToHash("0x3")} + + // Chain 1 has L1 at 60 (highest) + // Chain 2 has L1 at 45 (earliest - should be selected) + // Chain 3 has L1 at 50 (middle) + l1Block1 := eth.BlockID{Number: 60, Hash: common.HexToHash("0xL1_1")} + l1Block2 := eth.BlockID{Number: 45, Hash: common.HexToHash("0xL1_2")} + l1Block3 := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1_3")} + + mockDB1 := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: block1.Hash, Number: block1.Number, Time: 1000}, + openBlockExecMsg: nil, + } + mockDB2 := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: block2.Hash, Number: block2.Number, Time: 1000}, + openBlockExecMsg: nil, + } + mockDB3 := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: block3.Hash, Number: block3.Number, Time: 1000}, + openBlockExecMsg: nil, + } + + mockChain1 := &algoMockChain{id: chain1ID, optimisticL1: l1Block1} + mockChain2 := &algoMockChain{id: chain2ID, optimisticL1: l1Block2} + mockChain3 := &algoMockChain{id: chain3ID, optimisticL1: l1Block3} + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + chain1ID: mockDB1, + chain2ID: mockDB2, + chain3ID: mockDB3, + }, + chains: map[eth.ChainID]cc.ChainContainer{ + chain1ID: mockChain1, + chain2ID: mockChain2, + chain3ID: mockChain3, + }, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{ + chain1ID: block1, + chain2ID: block2, + chain3ID: block3, + } + }, + validate: func(t *testing.T, result Result) { + // The earliest L1 block (45) should be selected + expectedL1 := eth.BlockID{Number: 45, Hash: common.HexToHash("0xL1_2")} + require.True(t, result.IsValid()) + require.Empty(t, result.InvalidHeads) + require.Equal(t, expectedL1, result.L1Inclusion) + require.Len(t, result.L2Heads, 3) + }, + }, + { + name: "L1Inclusion/ChainNotInChainsMap_Skipped", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chain1ID := eth.ChainIDFromUInt64(10) + chain2ID := eth.ChainIDFromUInt64(8453) // Not in chains map + + block1 := eth.BlockID{Number: 100, Hash: common.HexToHash("0x1")} + block2 := eth.BlockID{Number: 200, Hash: common.HexToHash("0x2")} + + l1Block1 := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1_1")} + + mockDB1 := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: block1.Hash, Number: block1.Number, Time: 1000}, + openBlockExecMsg: nil, + } + mockDB2 := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: block2.Hash, Number: block2.Number, Time: 1000}, + openBlockExecMsg: nil, + } + + mockChain1 := &algoMockChain{id: chain1ID, optimisticL1: l1Block1} + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + chain1ID: mockDB1, + chain2ID: mockDB2, + }, + chains: map[eth.ChainID]cc.ChainContainer{ + chain1ID: mockChain1, + // chain2ID is NOT in the chains map + }, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{ + chain1ID: block1, + chain2ID: block2, + } + }, + validate: func(t *testing.T, result Result) { + chain2ID := eth.ChainIDFromUInt64(8453) + expectedL1 := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1_1")} + require.True(t, result.IsValid()) + require.Empty(t, result.InvalidHeads) + // chain2 should still be in L2Heads even though it's not in chains map + require.Contains(t, result.L2Heads, chain2ID) + // L1Inclusion should only consider chain1 + require.Equal(t, expectedL1, result.L1Inclusion) + }, + }, + { + name: "L1Inclusion/OptimisticAtError_ReturnsError", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chainID := eth.ChainIDFromUInt64(10) + blockHash := common.HexToHash("0x123") + expectedBlock := eth.BlockID{Number: 100, Hash: blockHash} + + mockDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: blockHash, Number: 100, Time: 1000}, + openBlockExecMsg: nil, + } + + mockChain := &algoMockChain{ + id: chainID, + optimisticAtErr: errors.New("optimistic at error"), + } + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, + chains: map[eth.ChainID]cc.ChainContainer{chainID: mockChain}, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: expectedBlock} + }, + expectError: true, + errorMsg: "failed to get L1 inclusion", + validate: func(t *testing.T, result Result) { + require.True(t, result.IsEmpty()) + }, + }, + { + name: "L1Inclusion/NoChains_Error", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{}, + chains: map[eth.ChainID]cc.ChainContainer{}, + } + + return interop, 1000, map[eth.ChainID]eth.BlockID{} + }, + expectError: true, + errorMsg: "no L1 inclusion found", + validate: func(t *testing.T, result Result) { + require.True(t, result.IsEmpty()) + }, + }, // Error cases { name: "Errors/OpenBlockError", setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { chainID := eth.ChainIDFromUInt64(10) block := eth.BlockID{Number: 100, Hash: common.HexToHash("0x123")} + l1Block := eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")} mockDB := &algoMockLogsDB{ openBlockErr: errors.New("database error"), @@ -496,6 +745,7 @@ func TestVerifyInteropMessages(t *testing.T) { interop := &Interop{ log: gethlog.New(), logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, + chains: map[eth.ChainID]cc.ChainContainer{chainID: newMockChainWithL1(chainID, l1Block)}, } return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: block} @@ -596,3 +846,62 @@ func (m *testBlockInfo) Header() *types.Header { func (m *testBlockInfo) ID() eth.BlockID { return eth.BlockID{Hash: m.hash, Number: m.number} } var _ eth.BlockInfo = (*testBlockInfo)(nil) + +// ============================================================================= +// Mock Chain Container for Algo Tests +// ============================================================================= + +// algoMockChain is a simplified mock chain container for algo tests +type algoMockChain struct { + id eth.ChainID + optimisticL2 eth.BlockID + optimisticL1 eth.BlockID + optimisticAtErr error +} + +func (m *algoMockChain) ID() eth.ChainID { return m.id } +func (m *algoMockChain) Start(ctx context.Context) error { return nil } +func (m *algoMockChain) Stop(ctx context.Context) error { return nil } +func (m *algoMockChain) Pause(ctx context.Context) error { return nil } +func (m *algoMockChain) Resume(ctx context.Context) error { return nil } +func (m *algoMockChain) RegisterVerifier(v activity.VerificationActivity) {} +func (m *algoMockChain) LocalSafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) { + return eth.L2BlockRef{}, nil +} +func (m *algoMockChain) VerifiedAt(ctx context.Context, ts uint64) (eth.BlockID, eth.BlockID, error) { + return eth.BlockID{}, eth.BlockID{}, nil +} +func (m *algoMockChain) L1ForL2(ctx context.Context, l2Block eth.BlockID) (eth.BlockID, error) { + return eth.BlockID{}, nil +} +func (m *algoMockChain) OptimisticAt(ctx context.Context, ts uint64) (eth.BlockID, eth.BlockID, error) { + if m.optimisticAtErr != nil { + return eth.BlockID{}, eth.BlockID{}, m.optimisticAtErr + } + return m.optimisticL2, m.optimisticL1, nil +} +func (m *algoMockChain) OutputRootAtL2BlockNumber(ctx context.Context, l2BlockNum uint64) (eth.Bytes32, error) { + return eth.Bytes32{}, nil +} +func (m *algoMockChain) OptimisticOutputAtTimestamp(ctx context.Context, ts uint64) (*eth.OutputResponse, error) { + return nil, nil +} +func (m *algoMockChain) FetchReceipts(ctx context.Context, blockID eth.BlockID) (eth.BlockInfo, types.Receipts, error) { + return nil, types.Receipts{}, nil +} +func (m *algoMockChain) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { + return ð.SyncStatus{}, nil +} +func (m *algoMockChain) RewindEngine(ctx context.Context, timestamp uint64, invalidatedBlock eth.BlockRef) error { + return nil +} +func (m *algoMockChain) BlockTime() uint64 { return 1 } +func (m *algoMockChain) InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} +func (m *algoMockChain) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} +func (m *algoMockChain) SetResetCallback(cb cc.ResetCallback) {} + +var _ cc.ChainContainer = (*algoMockChain)(nil) diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index f35b33e34548b..25b707059cdc4 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -100,7 +100,6 @@ func New( verifiedDB: verifiedDB, logsDBs: logsDBs, dataDir: dataDir, - currentL1: eth.BlockID{}, activationTimestamp: activationTimestamp, } // default to using the verifyInteropMessages function @@ -187,6 +186,7 @@ func (i *Interop) progressAndRecord() (bool, error) { i.log.Error("failed to collect current L1", "err", err) return false, err } + // Perform the interop evaluation result, err := i.progressInterop() if err != nil { @@ -210,13 +210,13 @@ func (i *Interop) progressAndRecord() (bool, error) { // the current L1s being considered by the Activity right now depend on what progress was made: // - if interop failed to run, the current L1s are not updated // - if interop ran but did not advance the verified timestamp, the CurrentL1 values collected are used directly - // - if interop ran and advanced the verified timestamp, the CurrentL1 is the L1 head at the verified timestamp + // - if interop ran and advanced the verified timestamp, the L1Inclusion is the L1 inclusion at the verified timestamp // this is because the individual chains may advance their CurrentL1, and if progress is being made, we might not be done using the collected L1s. verifiedAdvanced := !result.IsEmpty() i.mu.Lock() if verifiedAdvanced { - // the new CurrentL1 is the L1 head at the verified timestamp - i.currentL1 = result.L1Head + // the new CurrentL1 is the L1 inclusion at the verified timestamp + i.currentL1 = result.L1Inclusion } else { // the new CurrentL1 is the lowest CurrentL1 from the collected chains i.currentL1 = localCurrentL1 @@ -421,6 +421,40 @@ func (i *Interop) LatestVerifiedL2Block(chainID eth.ChainID) (eth.BlockID, uint6 return head, ts } +// VerifiedBlockAtL1 returns the verified L2 block and timestamp +// which guarantees that the verified data at that pauseAtTimestamp +// originates from or before the supplied L1 block. +func (i *Interop) VerifiedBlockAtL1(chainID eth.ChainID, l1Block eth.L1BlockRef) (eth.BlockID, uint64) { + // Get the last verified timestamp + lastTs, ok := i.verifiedDB.LastTimestamp() + if !ok { + return eth.BlockID{}, 0 + } + + // Search backwards from the last timestamp to find the latest result + // where the L1 inclusion block is at or below the supplied L1 block number + for ts := lastTs; ts > 0; ts-- { + result, err := i.verifiedDB.Get(ts) + if err != nil { + // Timestamp might not exist (due to gaps or rewinds), continue searching + continue + } + + // Check if this result's L1 inclusion is at or below the supplied L1 block number + if result.L1Inclusion.Number <= l1Block.Number { + // Found a finalized result, return the L2 head for this chain + head, ok := result.L2Heads[chainID] + if !ok { + return eth.BlockID{}, 0 + } + return head, ts + } + } + + // No verified block found + return eth.BlockID{}, 0 +} + // Reset is called when a chain container resets due to an invalidated block. // It prunes the logsDB and verifiedDB for that chain at and after the timestamp. // The invalidatedBlock contains the block info that triggered the reset. diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index 17ace1df613e2..c95bbed173c04 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -425,7 +425,7 @@ func TestProgressInterop(t *testing.T) { // Default verifyFn that passes through passThroughVerifyFn := func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{Timestamp: ts, L2Heads: blocks}, nil + return Result{Timestamp: ts, L1Inclusion: eth.BlockID{Number: 100}, L2Heads: blocks}, nil } tests := []struct { @@ -584,7 +584,7 @@ func TestVerifiedAtTimestamp(t *testing.T) { }, run: func(t *testing.T, h *interopTestHarness) { h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{Timestamp: ts, L2Heads: blocks}, nil + return Result{Timestamp: ts, L1Inclusion: eth.BlockID{Number: 100}, L2Heads: blocks}, nil } result, err := h.interop.progressInterop() @@ -643,8 +643,8 @@ func TestHandleResult(t *testing.T) { run: func(t *testing.T, h *interopTestHarness) { mock := h.Mock(10) validResult := Result{ - Timestamp: 1000, - L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, + Timestamp: 1000, + L1Inclusion: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, L2Heads: map[eth.ChainID]eth.BlockID{ mock.id: {Number: 500, Hash: common.HexToHash("0xL2")}, }, @@ -660,7 +660,7 @@ func TestHandleResult(t *testing.T) { retrieved, err := h.interop.verifiedDB.Get(1000) require.NoError(t, err) require.Equal(t, validResult.Timestamp, retrieved.Timestamp) - require.Equal(t, validResult.L1Head, retrieved.L1Head) + require.Equal(t, validResult.L1Inclusion, retrieved.L1Inclusion) require.Equal(t, validResult.L2Heads[mock.id], retrieved.L2Heads[mock.id]) }, }, @@ -672,8 +672,8 @@ func TestHandleResult(t *testing.T) { run: func(t *testing.T, h *interopTestHarness) { mock := h.Mock(10) invalidResult := Result{ - Timestamp: 1000, - L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, + Timestamp: 1000, + L1Inclusion: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, L2Heads: map[eth.ChainID]eth.BlockID{ mock.id: {Number: 500, Hash: common.HexToHash("0xL2")}, }, @@ -773,8 +773,8 @@ func TestInvalidateBlock(t *testing.T) { mock2 := h.Mock(8453) invalidResult := Result{ - Timestamp: 1000, - L1Head: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, + Timestamp: 1000, + L1Inclusion: eth.BlockID{Number: 100, Hash: common.HexToHash("0xL1")}, L2Heads: map[eth.ChainID]eth.BlockID{ mock1.id: {Number: 500, Hash: common.HexToHash("0xL2-1")}, mock2.id: {Number: 600, Hash: common.HexToHash("0xL2-2")}, @@ -851,17 +851,17 @@ func TestProgressAndRecord(t *testing.T) { }).Build() }, run: func(t *testing.T, h *interopTestHarness) { - expectedL1Head := eth.BlockID{Number: 150, Hash: common.HexToHash("0xL1Result")} + expectedL1Inclusion := eth.BlockID{Number: 150, Hash: common.HexToHash("0xL1Result")} h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{Timestamp: ts, L1Head: expectedL1Head, L2Heads: blocks}, nil + return Result{Timestamp: ts, L1Inclusion: expectedL1Inclusion, L2Heads: blocks}, nil } madeProgress, err := h.interop.progressAndRecord() require.NoError(t, err) require.True(t, madeProgress, "valid result should advance verified timestamp") - require.Equal(t, expectedL1Head.Number, h.interop.currentL1.Number) - require.Equal(t, expectedL1Head.Hash, h.interop.currentL1.Hash) + require.Equal(t, expectedL1Inclusion.Number, h.interop.currentL1.Number) + require.Equal(t, expectedL1Inclusion.Hash, h.interop.currentL1.Hash) }, }, { @@ -880,7 +880,7 @@ func TestProgressAndRecord(t *testing.T) { h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { return Result{ Timestamp: ts, - L1Head: eth.BlockID{Number: 999, Hash: common.HexToHash("0xShouldNotBeUsed")}, + L1Inclusion: eth.BlockID{Number: 999, Hash: common.HexToHash("0xShouldNotBeUsed")}, L2Heads: blocks, InvalidHeads: map[eth.ChainID]eth.BlockID{mock.id: {Number: 100}}, }, nil @@ -941,7 +941,7 @@ func TestInterop_FullCycle(t *testing.T) { // Stub verifyFn interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { - return Result{Timestamp: ts, L2Heads: blocks}, nil + return Result{Timestamp: ts, L1Inclusion: eth.BlockID{Number: 100}, L2Heads: blocks}, nil } // Run 3 cycles @@ -991,7 +991,7 @@ func TestResult_IsEmpty(t *testing.T) { }{ {"zero value", Result{}, true}, {"only timestamp", Result{Timestamp: 1000}, true}, - {"with L1Head", Result{Timestamp: 1000, L1Head: eth.BlockID{Number: 100}}, false}, + {"with L1Head", Result{Timestamp: 1000, L1Inclusion: eth.BlockID{Number: 100}}, false}, {"with L2Heads", Result{Timestamp: 1000, L2Heads: map[eth.ChainID]eth.BlockID{eth.ChainIDFromUInt64(10): {Number: 50}}}, false}, {"with InvalidHeads", Result{Timestamp: 1000, InvalidHeads: map[eth.ChainID]eth.BlockID{eth.ChainIDFromUInt64(10): {Number: 50}}}, false}, } @@ -1052,6 +1052,11 @@ type mockChainContainer struct { invalidateBlockCalls []invalidateBlockCall invalidateBlockRet bool invalidateBlockErr error + + // OptimisticAt fields + optimisticL2 eth.BlockID + optimisticL1 eth.BlockID + optimisticAtErr error } type invalidateBlockCall struct { @@ -1090,7 +1095,12 @@ func (m *mockChainContainer) L1ForL2(ctx context.Context, l2Block eth.BlockID) ( return eth.BlockID{}, nil } func (m *mockChainContainer) OptimisticAt(ctx context.Context, ts uint64) (eth.BlockID, eth.BlockID, error) { - return eth.BlockID{}, eth.BlockID{}, nil + m.mu.Lock() + defer m.mu.Unlock() + if m.optimisticAtErr != nil { + return eth.BlockID{}, eth.BlockID{}, m.optimisticAtErr + } + return m.optimisticL2, m.optimisticL1, nil } func (m *mockChainContainer) OutputRootAtL2BlockNumber(ctx context.Context, l2BlockNum uint64) (eth.Bytes32, error) { return eth.Bytes32{}, nil @@ -1265,9 +1275,9 @@ func TestReset(t *testing.T) { // Add some verified results for ts := uint64(98); ts <= 102; ts++ { err := h.interop.verifiedDB.Commit(VerifiedResult{ - Timestamp: ts, - L1Head: eth.BlockID{Number: ts}, - L2Heads: map[eth.ChainID]eth.BlockID{mock.id: {Number: ts}}, + Timestamp: ts, + L1Inclusion: eth.BlockID{Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{mock.id: {Number: ts}}, }) require.NoError(t, err) } diff --git a/op-supernode/supernode/activity/interop/types.go b/op-supernode/supernode/activity/interop/types.go index 252aa0f7e3461..9bcf1d36d3748 100644 --- a/op-supernode/supernode/activity/interop/types.go +++ b/op-supernode/supernode/activity/interop/types.go @@ -5,19 +5,19 @@ import ( ) // VerifiedResult represents the verified state at a specific timestamp. -// It contains the L1 head from which the L2 heads were derived, +// It contains the L1 inclusion block from which the L2 heads were included, // and a map of each chain's L2 head at that timestamp. type VerifiedResult struct { - Timestamp uint64 `json:"timestamp"` - L1Head eth.BlockID `json:"l1Head"` - L2Heads map[eth.ChainID]eth.BlockID `json:"l2Heads"` + Timestamp uint64 `json:"timestamp"` + L1Inclusion eth.BlockID `json:"l1Inclusion"` + L2Heads map[eth.ChainID]eth.BlockID `json:"l2Heads"` } // Result represents the result of interop validation at a specific timestamp given current data. // it contains all the same information as VerifiedResult, but also contains a list of invalid heads. type Result struct { Timestamp uint64 `json:"timestamp"` - L1Head eth.BlockID `json:"l1Head"` + L1Inclusion eth.BlockID `json:"l1Inclusion"` L2Heads map[eth.ChainID]eth.BlockID `json:"l2Heads"` InvalidHeads map[eth.ChainID]eth.BlockID `json:"invalidHeads"` } @@ -27,13 +27,13 @@ func (r *Result) IsValid() bool { } func (r *Result) IsEmpty() bool { - return r.L1Head == (eth.BlockID{}) && len(r.L2Heads) == 0 && len(r.InvalidHeads) == 0 + return r.L1Inclusion == (eth.BlockID{}) && len(r.L2Heads) == 0 && len(r.InvalidHeads) == 0 } func (r *Result) ToVerifiedResult() VerifiedResult { return VerifiedResult{ - Timestamp: r.Timestamp, - L1Head: r.L1Head, - L2Heads: r.L2Heads, + Timestamp: r.Timestamp, + L1Inclusion: r.L1Inclusion, + L2Heads: r.L2Heads, } } diff --git a/op-supernode/supernode/activity/interop/types_test.go b/op-supernode/supernode/activity/interop/types_test.go index 484672481f9fd..31271bf765301 100644 --- a/op-supernode/supernode/activity/interop/types_test.go +++ b/op-supernode/supernode/activity/interop/types_test.go @@ -14,7 +14,7 @@ func TestResult_IsValid(t *testing.T) { t.Run("returns true when InvalidHeads is nil", func(t *testing.T) { r := Result{ Timestamp: 100, - L1Head: eth.BlockID{Number: 1}, + L1Inclusion: eth.BlockID{Number: 1}, L2Heads: map[eth.ChainID]eth.BlockID{eth.ChainIDFromUInt64(10): {Number: 100}}, InvalidHeads: nil, } @@ -24,7 +24,7 @@ func TestResult_IsValid(t *testing.T) { t.Run("returns true when InvalidHeads is empty map", func(t *testing.T) { r := Result{ Timestamp: 100, - L1Head: eth.BlockID{Number: 1}, + L1Inclusion: eth.BlockID{Number: 1}, L2Heads: map[eth.ChainID]eth.BlockID{eth.ChainIDFromUInt64(10): {Number: 100}}, InvalidHeads: map[eth.ChainID]eth.BlockID{}, } @@ -33,9 +33,9 @@ func TestResult_IsValid(t *testing.T) { t.Run("returns false when InvalidHeads has entries", func(t *testing.T) { r := Result{ - Timestamp: 100, - L1Head: eth.BlockID{Number: 1}, - L2Heads: map[eth.ChainID]eth.BlockID{eth.ChainIDFromUInt64(10): {Number: 100}}, + Timestamp: 100, + L1Inclusion: eth.BlockID{Number: 1}, + L2Heads: map[eth.ChainID]eth.BlockID{eth.ChainIDFromUInt64(10): {Number: 100}}, InvalidHeads: map[eth.ChainID]eth.BlockID{ eth.ChainIDFromUInt64(10): {Number: 100, Hash: common.HexToHash("0xbad")}, }, @@ -64,7 +64,7 @@ func TestResult_ToVerifiedResult(t *testing.T) { r := Result{ Timestamp: 12345, - L1Head: eth.BlockID{ + L1Inclusion: eth.BlockID{ Hash: common.HexToHash("0x1111"), Number: 100, }, @@ -80,15 +80,15 @@ func TestResult_ToVerifiedResult(t *testing.T) { verified := r.ToVerifiedResult() require.Equal(t, r.Timestamp, verified.Timestamp) - require.Equal(t, r.L1Head, verified.L1Head) + require.Equal(t, r.L1Inclusion, verified.L1Inclusion) require.Equal(t, r.L2Heads, verified.L2Heads) }) t.Run("handles nil L2Heads", func(t *testing.T) { r := Result{ - Timestamp: 100, - L1Head: eth.BlockID{Number: 1}, - L2Heads: nil, + Timestamp: 100, + L1Inclusion: eth.BlockID{Number: 1}, + L2Heads: nil, } verified := r.ToVerifiedResult() @@ -99,9 +99,9 @@ func TestResult_ToVerifiedResult(t *testing.T) { t.Run("handles empty L2Heads", func(t *testing.T) { r := Result{ - Timestamp: 100, - L1Head: eth.BlockID{Number: 1}, - L2Heads: map[eth.ChainID]eth.BlockID{}, + Timestamp: 100, + L1Inclusion: eth.BlockID{Number: 1}, + L2Heads: map[eth.ChainID]eth.BlockID{}, } verified := r.ToVerifiedResult() @@ -112,8 +112,8 @@ func TestResult_ToVerifiedResult(t *testing.T) { t.Run("original Result unchanged after conversion", func(t *testing.T) { chainID := eth.ChainIDFromUInt64(10) r := Result{ - Timestamp: 100, - L1Head: eth.BlockID{Number: 1}, + Timestamp: 100, + L1Inclusion: eth.BlockID{Number: 1}, L2Heads: map[eth.ChainID]eth.BlockID{ chainID: {Number: 200}, }, diff --git a/op-supernode/supernode/activity/interop/verified_db_test.go b/op-supernode/supernode/activity/interop/verified_db_test.go index 3848c30b021a8..204ca7cd1032b 100644 --- a/op-supernode/supernode/activity/interop/verified_db_test.go +++ b/op-supernode/supernode/activity/interop/verified_db_test.go @@ -28,7 +28,7 @@ func TestVerifiedDB_WriteAndRead(t *testing.T) { result1 := VerifiedResult{ Timestamp: 1000, - L1Head: eth.BlockID{ + L1Inclusion: eth.BlockID{ Hash: common.HexToHash("0x1111111111111111111111111111111111111111111111111111111111111111"), Number: 100, }, @@ -57,7 +57,7 @@ func TestVerifiedDB_WriteAndRead(t *testing.T) { retrieved, err := db.Get(1000) require.NoError(t, err) require.Equal(t, result1.Timestamp, retrieved.Timestamp) - require.Equal(t, result1.L1Head, retrieved.L1Head) + require.Equal(t, result1.L1Inclusion, retrieved.L1Inclusion) require.Equal(t, len(result1.L2Heads), len(retrieved.L2Heads)) require.Equal(t, result1.L2Heads[chainID1], retrieved.L2Heads[chainID1]) require.Equal(t, result1.L2Heads[chainID2], retrieved.L2Heads[chainID2]) @@ -88,33 +88,33 @@ func TestVerifiedDB_SequentialCommits(t *testing.T) { // Commit first timestamp err = db.Commit(VerifiedResult{ - Timestamp: 100, - L1Head: eth.BlockID{Hash: common.HexToHash("0x01"), Number: 1}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0x02"), Number: 2}}, + Timestamp: 100, + L1Inclusion: eth.BlockID{Hash: common.HexToHash("0x01"), Number: 1}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0x02"), Number: 2}}, }) require.NoError(t, err) // Commit next sequential timestamp should succeed err = db.Commit(VerifiedResult{ - Timestamp: 101, - L1Head: eth.BlockID{Hash: common.HexToHash("0x03"), Number: 3}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0x04"), Number: 4}}, + Timestamp: 101, + L1Inclusion: eth.BlockID{Hash: common.HexToHash("0x03"), Number: 3}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0x04"), Number: 4}}, }) require.NoError(t, err) // Try to commit non-sequential timestamp (gap) err = db.Commit(VerifiedResult{ - Timestamp: 105, - L1Head: eth.BlockID{Hash: common.HexToHash("0x05"), Number: 5}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0x06"), Number: 6}}, + Timestamp: 105, + L1Inclusion: eth.BlockID{Hash: common.HexToHash("0x05"), Number: 5}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0x06"), Number: 6}}, }) require.ErrorIs(t, err, ErrNonSequential) // Try to commit already committed timestamp err = db.Commit(VerifiedResult{ - Timestamp: 100, - L1Head: eth.BlockID{Hash: common.HexToHash("0x07"), Number: 7}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0x08"), Number: 8}}, + Timestamp: 100, + L1Inclusion: eth.BlockID{Hash: common.HexToHash("0x07"), Number: 7}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0x08"), Number: 8}}, }) require.ErrorIs(t, err, ErrAlreadyCommitted) @@ -132,16 +132,16 @@ func TestVerifiedDB_Persistence(t *testing.T) { require.NoError(t, err) err = db.Commit(VerifiedResult{ - Timestamp: 500, - L1Head: eth.BlockID{Hash: common.HexToHash("0xaaaa"), Number: 50}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0xbbbb"), Number: 100}}, + Timestamp: 500, + L1Inclusion: eth.BlockID{Hash: common.HexToHash("0xaaaa"), Number: 50}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0xbbbb"), Number: 100}}, }) require.NoError(t, err) err = db.Commit(VerifiedResult{ - Timestamp: 501, - L1Head: eth.BlockID{Hash: common.HexToHash("0xcccc"), Number: 51}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0xdddd"), Number: 101}}, + Timestamp: 501, + L1Inclusion: eth.BlockID{Hash: common.HexToHash("0xcccc"), Number: 51}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0xdddd"), Number: 101}}, }) require.NoError(t, err) @@ -161,7 +161,7 @@ func TestVerifiedDB_Persistence(t *testing.T) { result, err := db2.Get(500) require.NoError(t, err) require.Equal(t, uint64(500), result.Timestamp) - require.Equal(t, common.HexToHash("0xaaaa"), result.L1Head.Hash) + require.Equal(t, common.HexToHash("0xaaaa"), result.L1Inclusion.Hash) result, err = db2.Get(501) require.NoError(t, err) @@ -169,9 +169,9 @@ func TestVerifiedDB_Persistence(t *testing.T) { // Next commit should continue from last timestamp err = db2.Commit(VerifiedResult{ - Timestamp: 502, - L1Head: eth.BlockID{Hash: common.HexToHash("0xeeee"), Number: 52}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0xffff"), Number: 102}}, + Timestamp: 502, + L1Inclusion: eth.BlockID{Hash: common.HexToHash("0xeeee"), Number: 52}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0xffff"), Number: 102}}, }) require.NoError(t, err) } @@ -192,9 +192,9 @@ func TestVerifiedDB_RewindTo(t *testing.T) { // Commit several timestamps for ts := uint64(100); ts <= 105; ts++ { err = db.Commit(VerifiedResult{ - Timestamp: ts, - L1Head: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, + Timestamp: ts, + L1Inclusion: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, }) require.NoError(t, err) } @@ -240,9 +240,9 @@ func TestVerifiedDB_RewindTo(t *testing.T) { // Commit up to timestamp 100 for ts := uint64(98); ts <= 100; ts++ { err = db.Commit(VerifiedResult{ - Timestamp: ts, - L1Head: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, + Timestamp: ts, + L1Inclusion: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, }) require.NoError(t, err) } @@ -270,9 +270,9 @@ func TestVerifiedDB_RewindTo(t *testing.T) { // Commit a few entries for ts := uint64(100); ts <= 102; ts++ { err = db.Commit(VerifiedResult{ - Timestamp: ts, - L1Head: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, + Timestamp: ts, + L1Inclusion: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, }) require.NoError(t, err) } @@ -307,9 +307,9 @@ func TestVerifiedDB_RewindTo(t *testing.T) { // Commit 100-105 for ts := uint64(100); ts <= 105; ts++ { err = db.Commit(VerifiedResult{ - Timestamp: ts, - L1Head: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, + Timestamp: ts, + L1Inclusion: eth.BlockID{Hash: common.BytesToHash([]byte{byte(ts)}), Number: ts}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.BytesToHash([]byte{byte(ts + 100)}), Number: ts}}, }) require.NoError(t, err) } @@ -320,15 +320,15 @@ func TestVerifiedDB_RewindTo(t *testing.T) { // Should be able to commit 103 again (sequential from 102) err = db.Commit(VerifiedResult{ - Timestamp: 103, - L1Head: eth.BlockID{Hash: common.HexToHash("0xNEW"), Number: 103}, - L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0xNEW2"), Number: 103}}, + Timestamp: 103, + L1Inclusion: eth.BlockID{Hash: common.HexToHash("0xNEW"), Number: 103}, + L2Heads: map[eth.ChainID]eth.BlockID{chainID: {Hash: common.HexToHash("0xNEW2"), Number: 103}}, }) require.NoError(t, err) // Verify new data result, err := db.Get(103) require.NoError(t, err) - require.Equal(t, common.HexToHash("0xNEW"), result.L1Head.Hash) + require.Equal(t, common.HexToHash("0xNEW"), result.L1Inclusion.Hash) }) } diff --git a/op-supernode/supernode/chain_container/chain_container.go b/op-supernode/supernode/chain_container/chain_container.go index 6c3606e959b74..52c004c908895 100644 --- a/op-supernode/supernode/chain_container/chain_container.go +++ b/op-supernode/supernode/chain_container/chain_container.go @@ -315,7 +315,7 @@ func (c *simpleChainContainer) LocalSafeBlockAtTimestamp(ctx context.Context, ts } head := ss.LocalSafeL2 if num > head.Number { - c.log.Warn("target block number exceeds local safe head", "targetBlockNumber", num, "head", head.Number) + c.log.Debug("target block number exceeds local safe head", "targetBlockNumber", num, "head", head.Number) return eth.L2BlockRef{}, ethereum.NotFound } diff --git a/op-supernode/supernode/chain_container/chain_container_test.go b/op-supernode/supernode/chain_container/chain_container_test.go index 818d8061f5d37..293e2ae4be60c 100644 --- a/op-supernode/supernode/chain_container/chain_container_test.go +++ b/op-supernode/supernode/chain_container/chain_container_test.go @@ -113,6 +113,7 @@ func (m *mockVirtualNode) SyncStatus(ctx context.Context) (*eth.SyncStatus, erro return nil, m.safeHeadErr } return ð.SyncStatus{ + FinalizedL1: eth.L1BlockRef{}, CurrentL1: eth.L1BlockRef{Hash: m.safeHeadL1.Hash, Number: m.safeHeadL1.Number}, LocalSafeL2: eth.L2BlockRef{Hash: m.safeHeadL2.Hash, Number: m.safeHeadL2.Number}, }, nil @@ -177,6 +178,9 @@ func (m *mockVerificationActivity) LatestVerifiedL2Block(chainID eth.ChainID) (e } func (m *mockVerificationActivity) Reset(chainID eth.ChainID, timestamp uint64, invalidatedBlock eth.BlockRef) { } +func (m *mockVerificationActivity) VerifiedBlockAtL1(chainID eth.ChainID, l1BlockRef eth.L1BlockRef) (eth.BlockID, uint64) { + return eth.BlockID{}, 0 +} // Test helpers func createTestVNConfig() *opnodecfg.Config { diff --git a/op-supernode/supernode/chain_container/super_authority.go b/op-supernode/supernode/chain_container/super_authority.go index 2badf8fd209f9..b9221bea4168d 100644 --- a/op-supernode/supernode/chain_container/super_authority.go +++ b/op-supernode/supernode/chain_container/super_authority.go @@ -1,6 +1,7 @@ package chain_container import ( + "context" "fmt" "math" @@ -43,6 +44,45 @@ func (c *simpleChainContainer) FullyVerifiedL2Head() (eth.BlockID, bool) { return oldestVerifiedBlock, false } +// FinalizedL2Head returns the finalized L2 head block identifier. +// The second return value indicates whether the caller should fall back to local-finalized. +// Returns (empty, true) only when no verifiers are registered. +// Returns (empty, false) when verifiers are registered but haven't finalized anything yet. +// Panics if verifiers disagree on the block hash for the same timestamp. +func (c *simpleChainContainer) FinalizedL2Head() (eth.BlockID, bool) { + // If no verifiers registered, signal fallback to local-finalized + if len(c.verifiers) == 0 { + c.log.Debug("FinalizedL2Head: no verifiers registered, signaling local-finalized fallback") + return eth.BlockID{}, true + } + + ss, err := c.vn.SyncStatus(context.Background()) + if err != nil { + c.log.Error("FinalizedL2Head: failed to get sync status", "err", err) + return eth.BlockID{}, true + } + timestamp := uint64(math.MaxUint64) + oldestFinalizedBlock := eth.BlockID{} + for _, v := range c.verifiers { + bId, ts := v.VerifiedBlockAtL1(c.chainID, ss.FinalizedL1) + // If any verifier returns empty, return empty but don't signal fallback + // The verifier exists but hasn't finalized anything yet + if (bId == eth.BlockID{} || ts == 0) { + c.log.Debug("FinalizedL2Head: verifier returned empty, returning empty without fallback", "verifier", v.Name()) + return eth.BlockID{}, false + } + if ts < timestamp { + timestamp = ts + oldestFinalizedBlock = bId + } else if ts == timestamp && bId != oldestFinalizedBlock { + panic("verifiers disagree on block hash for same timestamp") + } + } + + c.log.Debug("FinalizedL2Head: returning finalized block", "block", oldestFinalizedBlock, "timestamp", timestamp) + return oldestFinalizedBlock, false +} + // IsDenied checks if a block hash is on the deny list at the given height. func (c *simpleChainContainer) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { if c.denyList == nil { diff --git a/op-supernode/supernode/chain_container/super_authority_test.go b/op-supernode/supernode/chain_container/super_authority_test.go index e26b542cea5de..a532b1082ba5f 100644 --- a/op-supernode/supernode/chain_container/super_authority_test.go +++ b/op-supernode/supernode/chain_container/super_authority_test.go @@ -13,8 +13,10 @@ import ( // mockVerificationActivityForSuperAuthority provides controlled test data for SuperAuthority tests type mockVerificationActivityForSuperAuthority struct { - latestVerifiedBlock eth.BlockID - latestVerifiedTS uint64 + latestVerifiedBlock eth.BlockID + latestVerifiedTS uint64 + latestFinalizedBlock eth.BlockID + latestFinalizedTS uint64 } func (m *mockVerificationActivityForSuperAuthority) Start(ctx context.Context) error { return nil } @@ -30,6 +32,9 @@ func (m *mockVerificationActivityForSuperAuthority) LatestVerifiedL2Block(chainI return m.latestVerifiedBlock, m.latestVerifiedTS } func (m *mockVerificationActivityForSuperAuthority) Reset(eth.ChainID, uint64, eth.BlockRef) {} +func (m *mockVerificationActivityForSuperAuthority) VerifiedBlockAtL1(chainID eth.ChainID, l1BlockRef eth.L1BlockRef) (eth.BlockID, uint64) { + return m.latestFinalizedBlock, m.latestFinalizedTS +} var _ activity.VerificationActivity = (*mockVerificationActivityForSuperAuthority)(nil) @@ -39,6 +44,7 @@ func newTestChainContainer(t *testing.T, chainID eth.ChainID) *simpleChainContai chainID: chainID, verifiers: []activity.VerificationActivity{}, log: testlog.Logger(t, log.LevelDebug), + vn: &mockVirtualNode{}, } } @@ -185,3 +191,121 @@ func TestChainContainer_FullyVerifiedL2Head_AllUnverified(t *testing.T) { require.Equal(t, eth.BlockID{}, result, "should return empty BlockID when all verifiers are unverified") require.False(t, useLocalSafe, "should not signal fallback when verifiers exist but are unverified") } + +// TestChainContainer_FinalizedL2Head_MultipleVerifiers tests that FinalizedL2Head +// returns the block with the minimum (oldest) timestamp across all verifiers +func TestChainContainer_FinalizedL2Head_MultipleVerifiers(t *testing.T) { + t.Parallel() + + chainID := eth.ChainIDFromUInt64(420) + cc := newTestChainContainer(t, chainID) + + // Setup three verifiers with different timestamps + verifier1 := &mockVerificationActivityForSuperAuthority{ + latestFinalizedBlock: eth.BlockID{Hash: [32]byte{1}, Number: 100}, + latestFinalizedTS: 1000, // oldest + } + verifier2 := &mockVerificationActivityForSuperAuthority{ + latestFinalizedBlock: eth.BlockID{Hash: [32]byte{2}, Number: 200}, + latestFinalizedTS: 2000, // middle + } + verifier3 := &mockVerificationActivityForSuperAuthority{ + latestFinalizedBlock: eth.BlockID{Hash: [32]byte{3}, Number: 300}, + latestFinalizedTS: 3000, // newest + } + + cc.verifiers = []activity.VerificationActivity{verifier1, verifier2, verifier3} + + // Should return the block with minimum timestamp (verifier1) + result, useLocalFinalized := cc.FinalizedL2Head() + require.Equal(t, verifier1.latestFinalizedBlock, result, "should return oldest finalized block") + require.False(t, useLocalFinalized, "should not signal fallback when verifiers have finalized blocks") +} + +// TestChainContainer_FinalizedL2Head_NoVerifiers tests that FinalizedL2Head +// returns an empty BlockID and signals fallback when there are no verification activities +func TestChainContainer_FinalizedL2Head_NoVerifiers(t *testing.T) { + t.Parallel() + + chainID := eth.ChainIDFromUInt64(420) + cc := newTestChainContainer(t, chainID) + + result, useLocalFinalized := cc.FinalizedL2Head() + require.Equal(t, eth.BlockID{}, result, "should return empty BlockID with no verifiers") + require.True(t, useLocalFinalized, "should signal fallback to local-finalized when no verifiers registered") +} + +// TestChainContainer_FinalizedL2Head_OneUnfinalized tests that FinalizedL2Head +// returns an empty BlockID without signaling fallback if any verifier returns an unfinalized state +func TestChainContainer_FinalizedL2Head_OneUnfinalized(t *testing.T) { + t.Parallel() + + chainID := eth.ChainIDFromUInt64(420) + cc := newTestChainContainer(t, chainID) + + // Setup verifiers where one is unfinalized (empty BlockID) + verifier1 := &mockVerificationActivityForSuperAuthority{ + latestFinalizedBlock: eth.BlockID{Hash: [32]byte{1}, Number: 100}, + latestFinalizedTS: 1000, + } + verifier2 := &mockVerificationActivityForSuperAuthority{ + latestFinalizedBlock: eth.BlockID{}, // unfinalized + latestFinalizedTS: 0, // zero timestamp + } + verifier3 := &mockVerificationActivityForSuperAuthority{ + latestFinalizedBlock: eth.BlockID{Hash: [32]byte{3}, Number: 300}, + latestFinalizedTS: 3000, + } + + cc.verifiers = []activity.VerificationActivity{verifier1, verifier2, verifier3} + + // Should return empty BlockID (conservative approach) but NOT signal fallback + result, useLocalFinalized := cc.FinalizedL2Head() + require.Equal(t, eth.BlockID{}, result, "should return empty BlockID when any verifier is unfinalized") + require.False(t, useLocalFinalized, "should not signal fallback when verifiers exist but are unfinalized") +} + +// TestChainContainer_FinalizedL2Head_SingleVerifier tests the simple case +// with just one verification activity +func TestChainContainer_FinalizedL2Head_SingleVerifier(t *testing.T) { + t.Parallel() + + chainID := eth.ChainIDFromUInt64(420) + cc := newTestChainContainer(t, chainID) + + verifier := &mockVerificationActivityForSuperAuthority{ + latestFinalizedBlock: eth.BlockID{Hash: [32]byte{1}, Number: 100}, + latestFinalizedTS: 1000, + } + + cc.verifiers = []activity.VerificationActivity{verifier} + + result, useLocalFinalized := cc.FinalizedL2Head() + require.Equal(t, verifier.latestFinalizedBlock, result, "should return the single verifier's block") + require.False(t, useLocalFinalized, "should not signal fallback when verifier has finalized blocks") +} + +// TestChainContainer_FinalizedL2Head_AllUnfinalized tests that an empty BlockID +// is returned without signaling fallback when all verifiers are unfinalized +func TestChainContainer_FinalizedL2Head_AllUnfinalized(t *testing.T) { + t.Parallel() + + chainID := eth.ChainIDFromUInt64(420) + cc := newTestChainContainer(t, chainID) + + // All verifiers unfinalized + verifier1 := &mockVerificationActivityForSuperAuthority{ + latestFinalizedBlock: eth.BlockID{}, + latestFinalizedTS: 0, + } + verifier2 := &mockVerificationActivityForSuperAuthority{ + latestFinalizedBlock: eth.BlockID{}, + latestFinalizedTS: 0, + } + + cc.verifiers = []activity.VerificationActivity{verifier1, verifier2} + + result, useLocalFinalized := cc.FinalizedL2Head() + require.Equal(t, eth.BlockID{}, result, "should return empty BlockID when all verifiers are unfinalized") + require.False(t, useLocalFinalized, "should not signal fallback when verifiers exist but are unfinalized") +} From 1ed453f6084c2644ac99bf0dc847f2e700fca25b Mon Sep 17 00:00:00 2001 From: Sam Stokes <35908605+bitwiseguy@users.noreply.github.com> Date: Fri, 20 Feb 2026 16:31:18 -0500 Subject: [PATCH 009/201] circleci: add go-binaries-for-sysgo as dep of go-tests-full (#19262) --- .circleci/continue/main.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 69f7156c0912f..a6b42c9f4cc21 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -3126,6 +3126,7 @@ workflows: requires: - contracts-bedrock-build - cannon-prestate + - go-binaries-for-sysgo context: - circleci-repo-readonly-authenticated-github-token - slack From 2460e59c372921d96eed4a204825fef98ba67bc8 Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Fri, 20 Feb 2026 18:08:49 -0500 Subject: [PATCH 010/201] feat(flashblocks): add transaction caching to avoid re-executing unchanged transactions (#19030) --- rust/Cargo.lock | 7 + rust/op-reth/crates/flashblocks/Cargo.toml | 7 + rust/op-reth/crates/flashblocks/src/cache.rs | 1285 ++++++++++++++--- rust/op-reth/crates/flashblocks/src/lib.rs | 13 +- .../op-reth/crates/flashblocks/src/payload.rs | 14 +- .../crates/flashblocks/src/pending_state.rs | 181 ++- .../crates/flashblocks/src/sequence.rs | 71 +- .../op-reth/crates/flashblocks/src/service.rs | 225 ++- .../crates/flashblocks/src/tx_cache.rs | 702 +++++++++ .../crates/flashblocks/src/validation.rs | 179 ++- rust/op-reth/crates/flashblocks/src/worker.rs | 539 ++++++- .../crates/flashblocks/tests/it/harness.rs | 15 +- .../crates/flashblocks/tests/it/service.rs | 292 +++- rust/op-reth/crates/rpc/src/eth/mod.rs | 44 +- .../crates/rpc/src/eth/pending_block.rs | 54 +- 15 files changed, 3266 insertions(+), 362 deletions(-) create mode 100644 rust/op-reth/crates/flashblocks/src/tx_cache.rs diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 97933579c5c4a..4bb8f4dcfed97 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -11402,8 +11402,11 @@ version = "1.11.0" dependencies = [ "alloy-consensus", "alloy-eips", + "alloy-network", "alloy-primitives", + "alloy-rpc-types", "alloy-rpc-types-engine", + "alloy-signer-local", "brotli", "derive_more", "eyre", @@ -11411,16 +11414,20 @@ dependencies = [ "metrics", "op-alloy-consensus", "op-alloy-rpc-types-engine", + "op-revm", "reth-chain-state", "reth-engine-primitives", "reth-errors", "reth-evm", "reth-execution-types", "reth-metrics", + "reth-optimism-chainspec", + "reth-optimism-evm", "reth-optimism-payload-builder", "reth-optimism-primitives", "reth-payload-primitives", "reth-primitives-traits", + "reth-provider", "reth-revm", "reth-rpc-eth-types", "reth-storage-api", diff --git a/rust/op-reth/crates/flashblocks/Cargo.toml b/rust/op-reth/crates/flashblocks/Cargo.toml index 34dcb42bee487..58be6ea0349dd 100644 --- a/rust/op-reth/crates/flashblocks/Cargo.toml +++ b/rust/op-reth/crates/flashblocks/Cargo.toml @@ -31,6 +31,7 @@ reth-metrics.workspace = true alloy-eips = { workspace = true, features = ["serde"] } alloy-primitives = { workspace = true, features = ["serde"] } alloy-rpc-types-engine = { workspace = true, features = ["serde"] } +alloy-rpc-types.workspace = true alloy-consensus.workspace = true # op-alloy @@ -57,4 +58,10 @@ derive_more.workspace = true [dev-dependencies] test-case.workspace = true alloy-consensus.workspace = true +alloy-network.workspace = true +alloy-signer-local.workspace = true op-alloy-consensus.workspace = true +op-revm.workspace = true +reth-optimism-chainspec.workspace = true +reth-optimism-evm.workspace = true +reth-provider = { workspace = true, features = ["test-utils"] } diff --git a/rust/op-reth/crates/flashblocks/src/cache.rs b/rust/op-reth/crates/flashblocks/src/cache.rs index 8abe72e8e45fa..9dd90a5e8db7b 100644 --- a/rust/op-reth/crates/flashblocks/src/cache.rs +++ b/rust/op-reth/crates/flashblocks/src/cache.rs @@ -7,16 +7,21 @@ use crate::{ FlashBlock, FlashBlockCompleteSequence, PendingFlashBlock, pending_state::PendingBlockState, sequence::{FlashBlockPendingSequence, SequenceExecutionOutcome}, - validation::{CanonicalBlockReconciler, ReconciliationStrategy, ReorgDetector}, + validation::{ + CanonicalBlockFingerprint, CanonicalBlockReconciler, ReconciliationStrategy, ReorgDetector, + TrackedBlockFingerprint, + }, worker::BuildArgs, }; use alloy_eips::eip2718::WithEncoded; use alloy_primitives::B256; +use alloy_rpc_types_engine::PayloadId; use reth_primitives_traits::{ NodePrimitives, Recovered, SignedTransaction, transaction::TxHashRef, }; use reth_revm::cached::CachedReads; use ringbuffer::{AllocRingBuffer, RingBuffer}; +use std::collections::{BTreeMap, HashSet}; use tokio::sync::broadcast; use tracing::*; @@ -25,6 +30,194 @@ const CACHE_SIZE: usize = 3; /// 200 ms flashblock time. pub(crate) const FLASHBLOCK_BLOCK_TIME: u64 = 200; +/// Stable identity for a tracked flashblock sequence. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub(crate) struct SequenceId { + pub(crate) block_number: u64, + pub(crate) payload_id: PayloadId, + pub(crate) parent_hash: B256, +} + +impl SequenceId { + fn from_pending(sequence: &FlashBlockPendingSequence) -> Option { + let base = sequence.payload_base()?; + let payload_id = sequence.payload_id()?; + Some(Self { block_number: base.block_number, payload_id, parent_hash: base.parent_hash }) + } + + fn from_complete(sequence: &FlashBlockCompleteSequence) -> Self { + Self { + block_number: sequence.block_number(), + payload_id: sequence.payload_id(), + parent_hash: sequence.payload_base().parent_hash, + } + } +} + +/// Snapshot selector for build-completion matching. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +enum SequenceSnapshot { + Pending { revision: u64 }, + Cached, +} + +/// Opaque ticket that identifies the exact sequence snapshot selected for a build. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub(crate) struct BuildTicket { + sequence_id: SequenceId, + snapshot: SequenceSnapshot, +} + +impl BuildTicket { + const fn pending(sequence_id: SequenceId, revision: u64) -> Self { + Self { sequence_id, snapshot: SequenceSnapshot::Pending { revision } } + } + + const fn cached(sequence_id: SequenceId) -> Self { + Self { sequence_id, snapshot: SequenceSnapshot::Cached } + } +} + +/// Result of attempting to apply a build completion to tracked sequence state. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum BuildApplyOutcome { + SkippedNoBuildResult, + AppliedPending, + AppliedCached { + rebroadcasted: bool, + }, + RejectedPendingSequenceMismatch { + ticket_sequence_id: SequenceId, + current_sequence_id: Option, + }, + RejectedPendingRevisionStale { + sequence_id: SequenceId, + ticket_revision: u64, + current_revision: u64, + }, + RejectedCachedSequenceMissing { + sequence_id: SequenceId, + }, +} + +impl BuildApplyOutcome { + pub(crate) const fn is_applied(self) -> bool { + matches!(self, Self::AppliedPending | Self::AppliedCached { .. }) + } +} + +/// A buildable sequence plus the stable identity that selected it. +pub(crate) struct BuildCandidate { + pub(crate) ticket: BuildTicket, + pub(crate) args: BuildArgs, +} + +impl std::ops::Deref for BuildCandidate { + type Target = BuildArgs; + + fn deref(&self) -> &Self::Target { + &self.args + } +} + +/// In-progress pending sequence state. +/// +/// Keeps accepted flashblocks and recovered transactions in lockstep by index. +#[derive(Debug)] +struct PendingSequence { + sequence: FlashBlockPendingSequence, + recovered_transactions_by_index: BTreeMap>>>, + revision: u64, + applied_revision: Option, +} + +impl PendingSequence { + fn new() -> Self { + Self { + sequence: FlashBlockPendingSequence::new(), + recovered_transactions_by_index: BTreeMap::new(), + revision: 0, + applied_revision: None, + } + } + + const fn sequence(&self) -> &FlashBlockPendingSequence { + &self.sequence + } + + fn count(&self) -> usize { + self.sequence.count() + } + + const fn revision(&self) -> u64 { + self.revision + } + + fn clear(&mut self) { + self.sequence = FlashBlockPendingSequence::new(); + self.recovered_transactions_by_index.clear(); + self.applied_revision = None; + } + + const fn bump_revision(&mut self) { + self.revision = self.revision.wrapping_add(1); + } + + fn is_revision_applied(&self, revision: u64) -> bool { + self.applied_revision == Some(revision) + } + + const fn mark_revision_applied(&mut self, revision: u64) { + self.applied_revision = Some(revision); + } + + fn insert_flashblock(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { + if !self.sequence.can_accept(&flashblock) { + self.sequence.insert(flashblock); + return Ok(()); + } + + // Only recover transactions once we've validated that this flashblock is accepted. + let recovered_txs = flashblock.recover_transactions().collect::, _>>()?; + let flashblock_index = flashblock.index; + + // Index 0 starts a fresh pending block, so clear any stale in-progress data. + if flashblock_index == 0 { + self.clear(); + } + + self.sequence.insert(flashblock); + self.recovered_transactions_by_index.insert(flashblock_index, recovered_txs); + self.bump_revision(); + Ok(()) + } + + fn finalize( + &mut self, + ) -> eyre::Result<(FlashBlockCompleteSequence, Vec>>)> { + let finalized = self.sequence.finalize(); + let recovered_by_index = std::mem::take(&mut self.recovered_transactions_by_index); + + match finalized { + Ok(completed) => Ok((completed, recovered_by_index.into_values().flatten().collect())), + Err(err) => Err(err), + } + } + + fn transactions(&self) -> Vec>> { + self.recovered_transactions_by_index.values().flatten().cloned().collect() + } + + fn tx_hashes(&self) -> Vec { + self.recovered_transactions_by_index.values().flatten().map(|tx| *tx.tx_hash()).collect() + } + + #[cfg(test)] + fn transaction_count(&self) -> usize { + self.recovered_transactions_by_index.values().map(Vec::len).sum() + } +} + /// Manages flashblock sequences with caching support. /// /// This struct handles: @@ -35,12 +228,12 @@ pub(crate) const FLASHBLOCK_BLOCK_TIME: u64 = 200; #[derive(Debug)] pub(crate) struct SequenceManager { /// Current pending sequence being built up from incoming flashblocks - pending: FlashBlockPendingSequence, - /// Cached recovered transactions for the pending sequence - pending_transactions: Vec>>, + pending: PendingSequence, /// Ring buffer of recently completed sequences bundled with their decoded transactions (FIFO, /// size 3) completed_cache: AllocRingBuffer<(FlashBlockCompleteSequence, Vec>>)>, + /// Cached sequence identities that already had a build completion applied. + applied_cached_sequences: HashSet, /// Cached minimum block number currently present in `completed_cache`. cached_min_block_number: Option, /// Broadcast channel for completed sequences @@ -54,9 +247,9 @@ impl SequenceManager { pub(crate) fn new(compute_state_root: bool) -> Self { let (block_broadcaster, _) = broadcast::channel(128); Self { - pending: FlashBlockPendingSequence::new(), - pending_transactions: Vec::new(), + pending: PendingSequence::new(), completed_cache: AllocRingBuffer::new(CACHE_SIZE), + applied_cached_sequences: HashSet::new(), cached_min_block_number: None, block_broadcaster, compute_state_root, @@ -86,7 +279,7 @@ impl SequenceManager { pub(crate) fn insert_flashblock(&mut self, flashblock: FlashBlock) -> eyre::Result<()> { // If this starts a new block, finalize and cache the previous sequence BEFORE inserting if flashblock.index == 0 && self.pending.count() > 0 { - let completed = self.pending.finalize()?; + let (completed, txs) = self.pending.finalize()?; let block_number = completed.block_number(); let parent_hash = completed.payload_base().parent_hash; @@ -107,16 +300,10 @@ impl SequenceManager { // Bundle completed sequence with its decoded transactions and push to cache // Ring buffer automatically evicts oldest entry when full - let txs = std::mem::take(&mut self.pending_transactions); self.push_completed_sequence(completed, txs); - - // ensure cache is wiped on new flashblock - let _ = self.pending.take_cached_reads(); } - self.pending_transactions - .extend(flashblock.recover_transactions().collect::, _>>()?); - self.pending.insert(flashblock); + self.pending.insert_flashblock(flashblock)?; Ok(()) } @@ -127,11 +314,23 @@ impl SequenceManager { txs: Vec>>, ) { let block_number = completed.block_number(); + let completed_sequence_id = SequenceId::from_complete(&completed); let evicted_block_number = if self.completed_cache.is_full() { self.completed_cache.front().map(|(seq, _)| seq.block_number()) } else { None }; + let evicted_sequence_id = if self.completed_cache.is_full() { + self.completed_cache.front().map(|(seq, _)| SequenceId::from_complete(seq)) + } else { + None + }; + + if let Some(sequence_id) = evicted_sequence_id { + self.applied_cached_sequences.remove(&sequence_id); + } + // Re-tracking a sequence identity should always start as unapplied. + self.applied_cached_sequences.remove(&completed_sequence_id); self.completed_cache.enqueue((completed, txs)); @@ -150,12 +349,38 @@ impl SequenceManager { self.completed_cache.iter().map(|(seq, _)| seq.block_number()).min() } + /// Returns the newest cached sequence that matches `parent_hash` and still needs execution. + /// + /// Cached sequences that already had build completion applied are skipped to avoid redundant + /// rebuild loops. + fn newest_unexecuted_cached_for_parent( + &self, + parent_hash: B256, + ) -> Option<&(FlashBlockCompleteSequence, Vec>>)> { + self.completed_cache.iter().rev().find(|(seq, _)| { + let sequence_id = SequenceId::from_complete(seq); + seq.payload_base().parent_hash == parent_hash && + !self.applied_cached_sequences.contains(&sequence_id) + }) + } + + /// Returns a mutable cached sequence entry by exact sequence identity. + fn cached_entry_mut_by_id( + &mut self, + sequence_id: SequenceId, + ) -> Option<&mut (FlashBlockCompleteSequence, Vec>>)> { + self.completed_cache + .iter_mut() + .find(|(seq, _)| SequenceId::from_complete(seq) == sequence_id) + } + /// Returns the current pending sequence for inspection. pub(crate) const fn pending(&self) -> &FlashBlockPendingSequence { - &self.pending + self.pending.sequence() } - /// Finds the next sequence to build and returns ready-to-use `BuildArgs`. + /// Finds the next sequence to build and returns the selected sequence identity + /// with ready-to-use `BuildArgs`. /// /// Priority order: /// 1. Current pending sequence (if parent matches local tip) @@ -168,41 +393,87 @@ impl SequenceManager { local_tip_hash: B256, local_tip_timestamp: u64, pending_parent_state: Option>, - ) -> Option>>, N>> { - // Try to find a buildable sequence: (base, last_fb, transactions, cached_state, - // source_name, pending_parent) - let (base, last_flashblock, transactions, cached_state, source_name, pending_parent) = + ) -> Option>>, N>> { + // Try to find a buildable sequence: (ticket, base, last_fb, transactions, + // cached_state, source_name, pending_parent) + let (ticket, base, last_flashblock, transactions, cached_state, source_name, pending_parent) = // Priority 1: Try current pending sequence (canonical mode) - if let Some(base) = self.pending.payload_base().filter(|b| b.parent_hash == local_tip_hash) { - let cached_state = self.pending.take_cached_reads().map(|r| (base.parent_hash, r)); - let last_fb = self.pending.last_flashblock()?; - let transactions = self.pending_transactions.clone(); - (base, last_fb, transactions, cached_state, "pending", None) + if let Some(base) = self.pending.sequence.payload_base().filter(|b| b.parent_hash == local_tip_hash) { + let revision = self.pending.revision(); + if self.pending.is_revision_applied(revision) { + trace!( + target: "flashblocks", + block_number = base.block_number, + revision, + parent_hash = ?base.parent_hash, + "Skipping rebuild for already-applied pending revision" + ); + return None; + } + let sequence_id = SequenceId::from_pending(self.pending.sequence())?; + let ticket = BuildTicket::pending(sequence_id, revision); + let cached_state = self.pending.sequence.take_cached_reads().map(|r| (base.parent_hash, r)); + let last_fb = self.pending.sequence.last_flashblock()?; + let transactions = self.pending.transactions(); + (ticket, base, last_fb, transactions, cached_state, "pending", None) } // Priority 2: Try cached sequence with exact parent match (canonical mode) - else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash == local_tip_hash) { + else if let Some((cached, txs)) = self.newest_unexecuted_cached_for_parent(local_tip_hash) { + let sequence_id = SequenceId::from_complete(cached); + let ticket = BuildTicket::cached(sequence_id); let base = cached.payload_base().clone(); let last_fb = cached.last(); let transactions = txs.clone(); let cached_state = None; - (base, last_fb, transactions, cached_state, "cached", None) + (ticket, base, last_fb, transactions, cached_state, "cached", None) } // Priority 3: Try speculative building with pending parent state else if let Some(ref pending_state) = pending_parent_state { // Check if pending sequence's parent matches the pending state's block - if let Some(base) = self.pending.payload_base().filter(|b| b.parent_hash == pending_state.block_hash) { - let cached_state = self.pending.take_cached_reads().map(|r| (base.parent_hash, r)); - let last_fb = self.pending.last_flashblock()?; - let transactions = self.pending_transactions.clone(); - (base, last_fb, transactions, cached_state, "speculative-pending", pending_parent_state) + if let Some(base) = self.pending.sequence.payload_base().filter(|b| b.parent_hash == pending_state.block_hash) { + let revision = self.pending.revision(); + if self.pending.is_revision_applied(revision) { + trace!( + target: "flashblocks", + block_number = base.block_number, + revision, + speculative_parent = ?pending_state.block_hash, + "Skipping speculative rebuild for already-applied pending revision" + ); + return None; + } + let sequence_id = SequenceId::from_pending(self.pending.sequence())?; + let ticket = BuildTicket::pending(sequence_id, revision); + let cached_state = self.pending.sequence.take_cached_reads().map(|r| (base.parent_hash, r)); + let last_fb = self.pending.sequence.last_flashblock()?; + let transactions = self.pending.transactions(); + ( + ticket, + base, + last_fb, + transactions, + cached_state, + "speculative-pending", + pending_parent_state, + ) } // Check cached sequences - else if let Some((cached, txs)) = self.completed_cache.iter().find(|(c, _)| c.payload_base().parent_hash == pending_state.block_hash) { + else if let Some((cached, txs)) = self.newest_unexecuted_cached_for_parent(pending_state.block_hash) { + let sequence_id = SequenceId::from_complete(cached); + let ticket = BuildTicket::cached(sequence_id); let base = cached.payload_base().clone(); let last_fb = cached.last(); let transactions = txs.clone(); let cached_state = None; - (base, last_fb, transactions, cached_state, "speculative-cached", pending_parent_state) + ( + ticket, + base, + last_fb, + transactions, + cached_state, + "speculative-cached", + pending_parent_state, + ) } else { return None; } @@ -238,7 +509,7 @@ impl SequenceManager { // compute the state root, causing FlashblockConsensusClient to lack precomputed state for // engine_newPayload. This is safe: we still have op-node as backstop to maintain // chain progression. - let block_time_ms = (base.timestamp - local_tip_timestamp) * 1000; + let block_time_ms = base.timestamp.saturating_sub(local_tip_timestamp) * 1000; let expected_final_flashblock = block_time_ms / FLASHBLOCK_BLOCK_TIME; let compute_state_root = self.compute_state_root && last_flashblock.diff.state_root.is_zero() && @@ -248,6 +519,7 @@ impl SequenceManager { target: "flashblocks", block_number = base.block_number, source = source_name, + ticket = ?ticket, flashblock_index = last_flashblock.index, expected_final_flashblock, compute_state_root_enabled = self.compute_state_root, @@ -257,14 +529,17 @@ impl SequenceManager { "Building from flashblock sequence" ); - Some(BuildArgs { - base, - transactions, - cached_state, - last_flashblock_index: last_flashblock.index, - last_flashblock_hash: last_flashblock.diff.block_hash, - compute_state_root, - pending_parent, + Some(BuildCandidate { + ticket, + args: BuildArgs { + base, + transactions, + cached_state, + last_flashblock_index: last_flashblock.index, + last_flashblock_hash: last_flashblock.diff.block_hash, + compute_state_root, + pending_parent, + }, }) } @@ -275,11 +550,11 @@ impl SequenceManager { /// the consensus client to submit via `engine_newPayload`. pub(crate) fn on_build_complete( &mut self, - parent_hash: B256, + ticket: BuildTicket, result: Option<(PendingFlashBlock, CachedReads)>, - ) { + ) -> BuildApplyOutcome { let Some((computed_block, cached_reads)) = result else { - return; + return BuildApplyOutcome::SkippedNoBuildResult; }; // Extract execution outcome @@ -287,45 +562,138 @@ impl SequenceManager { SequenceExecutionOutcome { block_hash: computed_block.block().hash(), state_root } }); - // Update pending sequence with execution results - if self.pending.payload_base().is_some_and(|base| base.parent_hash == parent_hash) { - self.pending.set_execution_outcome(execution_outcome); - self.pending.set_cached_reads(cached_reads); - trace!( - target: "flashblocks", - block_number = self.pending.block_number(), - has_computed_state_root = execution_outcome.is_some(), - "Updated pending sequence with build results" - ); - } - // Check if this completed sequence in cache and broadcast with execution outcome - else if let Some((cached, _)) = self - .completed_cache - .iter_mut() - .find(|(c, _)| c.payload_base().parent_hash == parent_hash) - { - // Only re-broadcast if we computed new information (state_root was missing). - // If sequencer already provided state_root, we already broadcast in insert_flashblock, - // so skip re-broadcast to avoid duplicate FCU calls. - let needs_rebroadcast = - execution_outcome.is_some() && cached.execution_outcome().is_none(); - - cached.set_execution_outcome(execution_outcome); - - if needs_rebroadcast && self.block_broadcaster.receiver_count() > 0 { + let outcome = self.apply_build_outcome(ticket, execution_outcome, cached_reads); + match outcome { + BuildApplyOutcome::SkippedNoBuildResult | BuildApplyOutcome::AppliedPending => {} + BuildApplyOutcome::AppliedCached { rebroadcasted } => { trace!( target: "flashblocks", - block_number = cached.block_number(), - "Re-broadcasting sequence with computed state_root" + ticket = ?ticket, + rebroadcasted, + "Applied cached build completion" + ); + } + BuildApplyOutcome::RejectedPendingSequenceMismatch { + ticket_sequence_id, + current_sequence_id, + } => { + trace!( + target: "flashblocks", + ticket = ?ticket, + ?ticket_sequence_id, + ?current_sequence_id, + "Rejected build completion: pending sequence mismatch" + ); + } + BuildApplyOutcome::RejectedPendingRevisionStale { + sequence_id, + ticket_revision, + current_revision, + } => { + trace!( + target: "flashblocks", + ticket = ?ticket, + ?sequence_id, + ticket_revision, + current_revision, + "Rejected build completion: pending revision stale" + ); + } + BuildApplyOutcome::RejectedCachedSequenceMissing { sequence_id } => { + trace!( + target: "flashblocks", + ticket = ?ticket, + ?sequence_id, + "Rejected build completion: cached sequence missing" ); - let _ = self.block_broadcaster.send(cached.clone()); + } + } + outcome + } + + /// Applies build output to the exact sequence targeted by the build job. + /// + /// Returns the apply outcome with explicit rejection reasons for observability. + fn apply_build_outcome( + &mut self, + ticket: BuildTicket, + execution_outcome: Option, + cached_reads: CachedReads, + ) -> BuildApplyOutcome { + match ticket.snapshot { + SequenceSnapshot::Pending { revision } => { + let current_sequence_id = SequenceId::from_pending(self.pending.sequence()); + if current_sequence_id != Some(ticket.sequence_id) { + return BuildApplyOutcome::RejectedPendingSequenceMismatch { + ticket_sequence_id: ticket.sequence_id, + current_sequence_id, + }; + } + + let current_revision = self.pending.revision(); + if current_revision != revision { + return BuildApplyOutcome::RejectedPendingRevisionStale { + sequence_id: ticket.sequence_id, + ticket_revision: revision, + current_revision, + }; + } + + { + self.pending.sequence.set_execution_outcome(execution_outcome); + self.pending.sequence.set_cached_reads(cached_reads); + self.pending.mark_revision_applied(current_revision); + trace!( + target: "flashblocks", + block_number = self.pending.sequence.block_number(), + ticket = ?ticket, + has_computed_state_root = execution_outcome.is_some(), + "Updated pending sequence with build results" + ); + } + BuildApplyOutcome::AppliedPending + } + SequenceSnapshot::Cached => { + if let Some((cached, _)) = self.cached_entry_mut_by_id(ticket.sequence_id) { + let (needs_rebroadcast, rebroadcast_sequence) = { + // Only re-broadcast if we computed new information (state_root was + // missing). If sequencer already provided + // state_root, we already broadcast in + // insert_flashblock, so skip re-broadcast to avoid duplicate FCU calls. + let needs_rebroadcast = + execution_outcome.is_some() && cached.execution_outcome().is_none(); + + cached.set_execution_outcome(execution_outcome); + + let rebroadcast_sequence = needs_rebroadcast.then_some(cached.clone()); + (needs_rebroadcast, rebroadcast_sequence) + }; + self.applied_cached_sequences.insert(ticket.sequence_id); + + if let Some(sequence) = rebroadcast_sequence && + self.block_broadcaster.receiver_count() > 0 + { + trace!( + target: "flashblocks", + block_number = sequence.block_number(), + ticket = ?ticket, + "Re-broadcasting sequence with computed state_root" + ); + let _ = self.block_broadcaster.send(sequence); + } + BuildApplyOutcome::AppliedCached { rebroadcasted: needs_rebroadcast } + } else { + BuildApplyOutcome::RejectedCachedSequenceMissing { + sequence_id: ticket.sequence_id, + } + } } } } /// Returns the earliest block number in the pending or cached sequences. pub(crate) fn earliest_block_number(&self) -> Option { - match (self.pending.block_number(), self.cached_min_block_number) { + match (self.pending.sequence.block_number(), self.cached_min_block_number) { (Some(pending_block), Some(cache_min)) => Some(cache_min.min(pending_block)), (Some(pending_block), None) => Some(pending_block), (None, Some(cache_min)) => Some(cache_min), @@ -336,7 +704,7 @@ impl SequenceManager { /// Returns the latest block number in the pending or cached sequences. pub(crate) fn latest_block_number(&self) -> Option { // Pending is always the latest if it exists - if let Some(pending_block) = self.pending.block_number() { + if let Some(pending_block) = self.pending.sequence.block_number() { return Some(pending_block); } @@ -344,32 +712,37 @@ impl SequenceManager { self.completed_cache.iter().map(|(seq, _)| seq.block_number()).max() } - /// Returns transaction hashes for a specific block number from pending or cached sequences. - pub(crate) fn get_transaction_hashes_for_block(&self, block_number: u64) -> Vec { + /// Returns the tracked block fingerprint for the given block number from pending or cached + /// sequences, if available. + fn tracked_fingerprint_for_block(&self, block_number: u64) -> Option { // Check pending sequence - if self.pending.block_number() == Some(block_number) { - return self.pending_transactions.iter().map(|tx| *tx.tx_hash()).collect(); + if self.pending.sequence.block_number() == Some(block_number) { + let base = self.pending.sequence.payload_base()?; + let last_flashblock = self.pending.sequence.last_flashblock()?; + let tx_hashes = self.pending.tx_hashes(); + return Some(TrackedBlockFingerprint { + block_number, + block_hash: last_flashblock.diff.block_hash, + parent_hash: base.parent_hash, + tx_hashes, + }); } - // Check cached sequences - for (seq, txs) in self.completed_cache.iter() { + // Check cached sequences (newest first). Multiple payload variants for the same block + // number can coexist in cache; reorg checks must use the newest tracked variant. + for (seq, txs) in self.completed_cache.iter().rev() { if seq.block_number() == block_number { - return txs.iter().map(|tx| *tx.tx_hash()).collect(); + let tx_hashes = txs.iter().map(|tx| *tx.tx_hash()).collect(); + return Some(TrackedBlockFingerprint { + block_number, + block_hash: seq.last().diff.block_hash, + parent_hash: seq.payload_base().parent_hash, + tx_hashes, + }); } } - Vec::new() - } - - /// Returns true if the given block number is tracked in pending or cached sequences. - fn tracks_block_number(&self, block_number: u64) -> bool { - // Check pending sequence - if self.pending.block_number() == Some(block_number) { - return true; - } - - // Check cached sequences - self.completed_cache.iter().any(|(seq, _)| seq.block_number() == block_number) + None } /// Processes a canonical block and reconciles pending state. @@ -384,24 +757,18 @@ impl SequenceManager { /// Returns the reconciliation strategy that was applied. pub(crate) fn process_canonical_block( &mut self, - canonical_block_number: u64, - canonical_tx_hashes: &[B256], + canonical: CanonicalBlockFingerprint, max_depth: u64, ) -> ReconciliationStrategy { + let canonical_block_number = canonical.block_number; let earliest = self.earliest_block_number(); let latest = self.latest_block_number(); // Only run reorg detection if we actually track the canonical block number. - // If we don't track it (block number outside our pending/cached window), - // comparing empty tracked hashes to non-empty canonical hashes would falsely - // trigger reorg detection. - let reorg_detected = if self.tracks_block_number(canonical_block_number) { - let tracked_tx_hashes = self.get_transaction_hashes_for_block(canonical_block_number); - let reorg_result = ReorgDetector::detect(&tracked_tx_hashes, canonical_tx_hashes); - reorg_result.is_reorg() - } else { - false - }; + let reorg_detected = self + .tracked_fingerprint_for_block(canonical_block_number) + .map(|tracked| ReorgDetector::detect(&tracked, &canonical).is_reorg()) + .unwrap_or(false); // Determine reconciliation strategy let strategy = CanonicalBlockReconciler::reconcile( @@ -426,7 +793,9 @@ impl SequenceManager { warn!( target: "flashblocks", canonical_block_number, - canonical_tx_count = canonical_tx_hashes.len(), + canonical_tx_count = canonical.tx_hashes.len(), + canonical_parent_hash = ?canonical.parent_hash, + canonical_block_hash = ?canonical.block_hash, "Reorg detected - clearing pending state" ); self.clear_all(); @@ -463,21 +832,52 @@ impl SequenceManager { /// Clears all pending and cached state. fn clear_all(&mut self) { - self.pending = FlashBlockPendingSequence::new(); - self.pending_transactions.clear(); + self.pending.clear(); self.completed_cache.clear(); + self.applied_cached_sequences.clear(); self.cached_min_block_number = None; } + + #[cfg(test)] + fn pending_transaction_count(&self) -> usize { + self.pending.transaction_count() + } } #[cfg(test)] mod tests { use super::*; - use crate::{test_utils::TestFlashBlockFactory, validation::ReconciliationStrategy}; + use crate::{ + test_utils::TestFlashBlockFactory, + validation::{CanonicalBlockFingerprint, ReconciliationStrategy}, + }; use alloy_primitives::B256; + use alloy_rpc_types_engine::PayloadId; use op_alloy_consensus::OpTxEnvelope; use reth_optimism_primitives::OpPrimitives; + fn canonical_for( + manager: &SequenceManager, + block_number: u64, + tx_hashes: Vec, + ) -> CanonicalBlockFingerprint { + if let Some(tracked) = manager.tracked_fingerprint_for_block(block_number) { + CanonicalBlockFingerprint { + block_number, + block_hash: tracked.block_hash, + parent_hash: tracked.parent_hash, + tx_hashes, + } + } else { + CanonicalBlockFingerprint { + block_number, + block_hash: B256::repeat_byte(0xFE), + parent_hash: B256::repeat_byte(0xFD), + tx_hashes, + } + } + } + #[test] fn test_sequence_manager_new() { let manager: SequenceManager = SequenceManager::new(true); @@ -603,6 +1003,471 @@ mod tests { assert!(args.is_some()); } + #[test] + fn test_next_buildable_args_uses_newest_cached_when_parent_hash_shared() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + let shared_parent = B256::repeat_byte(0x44); + let payload_a = PayloadId::new([0xAA; 8]); + let payload_b = PayloadId::new([0xBB; 8]); + + // Sequence A for block 100 (will become cached first). + let fb_a0 = factory + .flashblock_at(0) + .block_number(100) + .parent_hash(shared_parent) + .payload_id(payload_a) + .build(); + manager.insert_flashblock(fb_a0).unwrap(); + + // Sequence B for the same parent hash and block number (different payload id). + // Inserting index 0 finalizes/caches sequence A. + let fb_b0 = factory + .flashblock_at(0) + .block_number(100) + .parent_hash(shared_parent) + .payload_id(payload_b) + .build(); + manager.insert_flashblock(fb_b0.clone()).unwrap(); + + // Finalize/cache sequence B. + let fb_next = factory.flashblock_for_next_block(&fb_b0).build(); + manager.insert_flashblock(fb_next).unwrap(); + + let candidate = manager + .next_buildable_args::(shared_parent, 1_000_000, None) + .expect("shared parent should resolve to a cached sequence"); + + // Newest sequence (B) should be selected deterministically. + assert_eq!(candidate.ticket.sequence_id.payload_id, payload_b); + assert_eq!(candidate.last_flashblock_hash, fb_b0.diff.block_hash); + } + + #[test] + fn test_next_buildable_args_skips_executed_cached_and_advances_speculative() { + use crate::pending_state::PendingBlockState; + use reth_execution_types::BlockExecutionOutput; + use reth_revm::cached::CachedReads; + use std::sync::Arc; + + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Block 100 with three flashblocks. + let fb100_0 = factory.flashblock_at(0).build(); + let local_tip_hash = fb100_0.base.as_ref().unwrap().parent_hash; + manager.insert_flashblock(fb100_0.clone()).unwrap(); + let fb100_1 = factory.flashblock_after(&fb100_0).build(); + manager.insert_flashblock(fb100_1.clone()).unwrap(); + let fb100_2 = factory.flashblock_after(&fb100_1).build(); + manager.insert_flashblock(fb100_2.clone()).unwrap(); + + // First flashblock of block 101 finalizes block 100 into cache. + let fb101_0 = factory.flashblock_for_next_block(&fb100_2).build(); + manager.insert_flashblock(fb101_0.clone()).unwrap(); + + // First build picks canonical-attached cached block 100. + let first = manager + .next_buildable_args::(local_tip_hash, 1_000_000, None) + .expect("cached block should be buildable first"); + assert!(matches!(first.ticket.snapshot, SequenceSnapshot::Cached)); + assert_eq!(first.base.block_number, fb100_0.block_number()); + + // Mark cached block 100 as executed. + let applied = manager.apply_build_outcome( + first.ticket, + Some(SequenceExecutionOutcome { + block_hash: B256::repeat_byte(0x33), + state_root: B256::repeat_byte(0x44), + }), + CachedReads::default(), + ); + assert!(matches!( + applied, + BuildApplyOutcome::AppliedCached { rebroadcasted: true | false } + )); + + // Speculative state for block 100 should unlock block 101/index0. + let pending_state = PendingBlockState:: { + block_hash: fb101_0.base.as_ref().unwrap().parent_hash, + block_number: fb100_0.block_number(), + parent_hash: local_tip_hash, + canonical_anchor_hash: local_tip_hash, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + sealed_header: None, + }; + + let second = manager + .next_buildable_args(local_tip_hash, 1_000_000, Some(pending_state)) + .expect("speculative pending block should be buildable next"); + assert!(matches!(second.ticket.snapshot, SequenceSnapshot::Pending { .. })); + assert_eq!(second.base.block_number, fb101_0.block_number()); + assert!(second.pending_parent.is_some()); + } + + #[test] + fn test_cached_sequence_with_provided_state_root_not_reselected_after_apply() { + use reth_revm::cached::CachedReads; + + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + let provided_root = B256::repeat_byte(0xA5); + + // Block 100 sequence has non-zero state root from sequencer. + let fb100_0 = factory.flashblock_at(0).state_root(provided_root).build(); + let local_tip_hash = fb100_0.base.as_ref().unwrap().parent_hash; + manager.insert_flashblock(fb100_0.clone()).unwrap(); + + let fb100_1 = factory.flashblock_after(&fb100_0).state_root(provided_root).build(); + manager.insert_flashblock(fb100_1.clone()).unwrap(); + + let fb100_2 = factory.flashblock_after(&fb100_1).state_root(provided_root).build(); + manager.insert_flashblock(fb100_2.clone()).unwrap(); + + // First flashblock of block 101 finalizes block 100 into cache. + let fb101_0 = factory.flashblock_for_next_block(&fb100_2).build(); + manager.insert_flashblock(fb101_0).unwrap(); + + let candidate = manager + .next_buildable_args::(local_tip_hash, 1_000_000, None) + .expect("cached sequence should be buildable once"); + assert!(matches!(candidate.ticket.snapshot, SequenceSnapshot::Cached)); + assert!( + !candidate.compute_state_root, + "non-zero sequencer root should skip local root compute" + ); + + let applied = manager.apply_build_outcome(candidate.ticket, None, CachedReads::default()); + assert!(matches!(applied, BuildApplyOutcome::AppliedCached { rebroadcasted: false })); + + let repeated = manager.next_buildable_args::(local_tip_hash, 1_000_000, None); + assert!( + repeated.is_none(), + "cached sequence with provided state root must not be reselected after apply" + ); + } + + #[test] + fn test_delayed_canonical_allows_speculative_next_block_index_zero() { + use crate::pending_state::PendingBlockState; + use reth_execution_types::BlockExecutionOutput; + use reth_revm::cached::CachedReads; + use std::sync::Arc; + + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Canonical tip is block 9. Flashblocks for block 10 all build on block 9. + let canonical_9_hash = B256::repeat_byte(0x09); + let fb10_0 = factory + .flashblock_at(0) + .block_number(10) + .parent_hash(canonical_9_hash) + .block_hash(B256::repeat_byte(0x10)) + .build(); + manager.insert_flashblock(fb10_0.clone()).unwrap(); + + let fb10_1 = factory.flashblock_after(&fb10_0).block_hash(B256::repeat_byte(0x11)).build(); + manager.insert_flashblock(fb10_1.clone()).unwrap(); + + let fb10_2 = factory.flashblock_after(&fb10_1).block_hash(B256::repeat_byte(0x12)).build(); + manager.insert_flashblock(fb10_2.clone()).unwrap(); + + // First flashblock for block 11 arrives before canonical block 10. + let fb11_0 = + factory.flashblock_for_next_block(&fb10_2).block_hash(B256::repeat_byte(0x20)).build(); + manager.insert_flashblock(fb11_0.clone()).unwrap(); + + // Build block 10 first from canonical tip (cached canonical-attached sequence). + let block10_candidate = manager + .next_buildable_args::(canonical_9_hash, 1_000_000, None) + .expect("block 10 should be buildable from canonical tip"); + assert_eq!(block10_candidate.base.block_number, 10); + assert!(matches!(block10_candidate.ticket.snapshot, SequenceSnapshot::Cached)); + + let applied = manager.apply_build_outcome( + block10_candidate.ticket, + Some(SequenceExecutionOutcome { + block_hash: fb11_0.base.as_ref().unwrap().parent_hash, + state_root: B256::repeat_byte(0xAA), + }), + CachedReads::default(), + ); + assert!(matches!( + applied, + BuildApplyOutcome::AppliedCached { rebroadcasted: true | false } + )); + + // Speculative state produced by block 10 should unlock block 11/index 0 + // even though canonical block 10 has not arrived yet. + let pending_state_10 = PendingBlockState:: { + block_hash: fb11_0.base.as_ref().unwrap().parent_hash, + block_number: 10, + parent_hash: canonical_9_hash, + canonical_anchor_hash: canonical_9_hash, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + sealed_header: None, + }; + + let before_canonical_10 = manager + .next_buildable_args(canonical_9_hash, 1_000_000, Some(pending_state_10.clone())) + .expect("block 11/index0 should be buildable speculatively before canonical block 10"); + assert_eq!(before_canonical_10.base.block_number, 11); + assert!(before_canonical_10.pending_parent.is_some()); + assert_eq!( + before_canonical_10.pending_parent.as_ref().unwrap().canonical_anchor_hash, + canonical_9_hash + ); + + // Canonical block 10 arrives later: strategy must be Continue (do not clear pending state). + let strategy = manager.process_canonical_block(canonical_for(&manager, 10, vec![]), 64); + assert_eq!(strategy, ReconciliationStrategy::Continue); + + // Block 11/index0 must remain buildable after delayed canonical block 10. + let after_canonical_10 = manager + .next_buildable_args(canonical_9_hash, 1_000_000, Some(pending_state_10)) + .expect("block 11/index0 should remain buildable after delayed canonical block 10"); + assert_eq!(after_canonical_10.base.block_number, 11); + assert!(after_canonical_10.pending_parent.is_some()); + } + + #[test] + fn test_cached_entry_lookup_is_exact_by_sequence_id() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + let shared_parent = B256::repeat_byte(0x55); + let payload_a = PayloadId::new([0x0A; 8]); + let payload_b = PayloadId::new([0x0B; 8]); + + let fb_a0 = factory + .flashblock_at(0) + .block_number(100) + .parent_hash(shared_parent) + .payload_id(payload_a) + .build(); + manager.insert_flashblock(fb_a0).unwrap(); + + let fb_b0 = factory + .flashblock_at(0) + .block_number(100) + .parent_hash(shared_parent) + .payload_id(payload_b) + .build(); + manager.insert_flashblock(fb_b0.clone()).unwrap(); + + // Finalize/cache sequence B. + let fb_next = factory.flashblock_for_next_block(&fb_b0).build(); + manager.insert_flashblock(fb_next).unwrap(); + + let seq_a_id = + SequenceId { block_number: 100, payload_id: payload_a, parent_hash: shared_parent }; + let seq_b_id = + SequenceId { block_number: 100, payload_id: payload_b, parent_hash: shared_parent }; + + let (seq_a, _) = manager + .cached_entry_mut_by_id(seq_a_id) + .expect("sequence A should be found by exact id"); + assert_eq!(seq_a.payload_id(), payload_a); + + let (seq_b, _) = manager + .cached_entry_mut_by_id(seq_b_id) + .expect("sequence B should be found by exact id"); + assert_eq!(seq_b.payload_id(), payload_b); + } + + #[test] + fn test_reorg_detection_uses_newest_cached_variant_for_block_number() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + let shared_parent = B256::repeat_byte(0x66); + let payload_a = PayloadId::new([0x1A; 8]); + let payload_b = PayloadId::new([0x1B; 8]); + + // Sequence A for block 100 (cached first). + let fb_a0 = factory + .flashblock_at(0) + .block_number(100) + .parent_hash(shared_parent) + .payload_id(payload_a) + .block_hash(B256::repeat_byte(0xA1)) + .build(); + manager.insert_flashblock(fb_a0).unwrap(); + + // Sequence B for the same block number/parent (cached second = newest). + let fb_b0 = factory + .flashblock_at(0) + .block_number(100) + .parent_hash(shared_parent) + .payload_id(payload_b) + .block_hash(B256::repeat_byte(0xB1)) + .build(); + manager.insert_flashblock(fb_b0.clone()).unwrap(); + + // Finalize/cache B and start pending block 101. + let fb_next = factory.flashblock_for_next_block(&fb_b0).build(); + manager.insert_flashblock(fb_next).unwrap(); + + let tracked = manager + .tracked_fingerprint_for_block(100) + .expect("tracked fingerprint for block 100 should exist"); + assert_eq!( + tracked.block_hash, fb_b0.diff.block_hash, + "reorg detection must use newest cached variant for a shared block number" + ); + + // Canonical matches newest variant B; this must not be treated as reorg. + let canonical = CanonicalBlockFingerprint { + block_number: 100, + block_hash: fb_b0.diff.block_hash, + parent_hash: shared_parent, + tx_hashes: tracked.tx_hashes, + }; + + let strategy = manager.process_canonical_block(canonical, 64); + assert_eq!(strategy, ReconciliationStrategy::Continue); + assert_eq!(manager.pending().block_number(), Some(101)); + assert!(!manager.completed_cache.is_empty()); + } + + #[test] + fn test_on_build_complete_ignores_unknown_sequence_id() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build one cached sequence and one pending sequence. + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + assert_eq!(manager.completed_cache.len(), 1); + assert!(manager.completed_cache.get(0).unwrap().0.execution_outcome().is_none()); + + let pending_parent = manager.pending().payload_base().unwrap().parent_hash; + let before = manager + .next_buildable_args::(pending_parent, 1_000_000, None) + .expect("pending sequence should be buildable"); + assert!(before.cached_state.is_none(), "pending sequence must start without cached reads"); + + let cached = &manager.completed_cache.get(0).unwrap().0; + let stale_payload = if cached.payload_id() == PayloadId::new([0xEE; 8]) { + PayloadId::new([0xEF; 8]) + } else { + PayloadId::new([0xEE; 8]) + }; + let stale_id = SequenceId { + block_number: cached.block_number(), + payload_id: stale_payload, + parent_hash: cached.payload_base().parent_hash, + }; + let stale_ticket = BuildTicket::cached(stale_id); + + let applied = manager.apply_build_outcome( + stale_ticket, + Some(SequenceExecutionOutcome { + block_hash: B256::repeat_byte(0x11), + state_root: B256::repeat_byte(0x22), + }), + reth_revm::cached::CachedReads::default(), + ); + assert!(matches!(applied, BuildApplyOutcome::RejectedCachedSequenceMissing { .. })); + + // Unknown sequence IDs must never mutate tracked pending/cached state. + let after = manager + .next_buildable_args::(pending_parent, 1_000_000, None) + .expect("pending sequence should remain buildable"); + assert!(after.cached_state.is_none(), "stale completion must not attach cached reads"); + + // Finalize current pending sequence and ensure no synthetic execution outcome was injected. + let pending_block_number = manager.pending().block_number().unwrap(); + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + manager.insert_flashblock(fb2).unwrap(); + let finalized_pending = manager + .completed_cache + .iter() + .find(|(seq, _)| seq.block_number() == pending_block_number) + .expect("pending sequence should be finalized into cache") + .0 + .clone(); + assert!(finalized_pending.execution_outcome().is_none()); + + assert!(manager.completed_cache.get(0).unwrap().0.execution_outcome().is_none()); + } + + #[test] + fn test_pending_build_ticket_rejects_stale_revision() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + manager.insert_flashblock(fb0.clone()).unwrap(); + + let first_candidate = manager + .next_buildable_args::(parent_hash, 1_000_000, None) + .expect("initial pending sequence should be buildable"); + let stale_ticket = first_candidate.ticket; + + // Pending sequence advances while the old build would be in-flight. + let fb1 = factory.flashblock_after(&fb0).build(); + manager.insert_flashblock(fb1.clone()).unwrap(); + + let stale_applied = manager.apply_build_outcome( + stale_ticket, + Some(SequenceExecutionOutcome { + block_hash: B256::repeat_byte(0x31), + state_root: B256::repeat_byte(0x32), + }), + reth_revm::cached::CachedReads::default(), + ); + assert!( + matches!(stale_applied, BuildApplyOutcome::RejectedPendingRevisionStale { .. }), + "stale pending ticket must be rejected" + ); + + // Fresh ticket for the current revision should still apply. + let fresh_candidate = manager + .next_buildable_args::(parent_hash, 1_000_000, None) + .expect("advanced pending sequence should remain buildable"); + assert_eq!(fresh_candidate.last_flashblock_hash, fb1.diff.block_hash); + assert!(fresh_candidate.cached_state.is_none()); + + let fresh_applied = manager.apply_build_outcome( + fresh_candidate.ticket, + Some(SequenceExecutionOutcome { + block_hash: B256::repeat_byte(0x41), + state_root: B256::repeat_byte(0x42), + }), + reth_revm::cached::CachedReads::default(), + ); + assert!(matches!(fresh_applied, BuildApplyOutcome::AppliedPending)); + + let with_same_revision = + manager.next_buildable_args::(parent_hash, 1_000_000, None); + assert!( + with_same_revision.is_none(), + "applied pending revision must not be rebuilt until sequence revision advances" + ); + + // Once pending data advances, the next revision should be buildable and use cached reads. + let fb2 = factory.flashblock_after(&fb1).build(); + manager.insert_flashblock(fb2.clone()).unwrap(); + + let with_cached_state = manager + .next_buildable_args::(parent_hash, 1_000_000, None) + .expect("pending sequence should be buildable after revision advances"); + assert_eq!(with_cached_state.last_flashblock_hash, fb2.diff.block_hash); + assert!( + with_cached_state.cached_state.is_some(), + "fresh completion should attach cached reads once pending revision advances" + ); + } + #[test] fn test_compute_state_root_logic_near_expected_final() { let mut manager: SequenceManager = SequenceManager::new(true); @@ -680,6 +1545,23 @@ mod tests { assert!(!args.unwrap().compute_state_root); } + #[test] + fn test_compute_state_root_with_timestamp_skew_does_not_underflow() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + let fb0 = factory.flashblock_at(0).state_root(B256::ZERO).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + let base_timestamp = fb0.base.as_ref().unwrap().timestamp; + manager.insert_flashblock(fb0).unwrap(); + + // Local tip timestamp can be ahead briefly in skewed/out-of-order conditions. + // This should not panic due to arithmetic underflow. + let args = + manager.next_buildable_args::(parent_hash, base_timestamp + 1, None); + assert!(args.is_some()); + } + #[test] fn test_cache_ring_buffer_evicts_oldest() { let mut manager: SequenceManager = SequenceManager::new(true); @@ -708,7 +1590,8 @@ mod tests { let mut manager: SequenceManager = SequenceManager::new(true); // No pending state, should return NoPendingState - let strategy = manager.process_canonical_block(100, &[], 10); + let canonical = canonical_for(&manager, 100, vec![]); + let strategy = manager.process_canonical_block(canonical, 10); assert_eq!(strategy, ReconciliationStrategy::NoPendingState); } @@ -724,7 +1607,8 @@ mod tests { assert_eq!(manager.pending().block_number(), Some(100)); // Canonical catches up to block 100 - let strategy = manager.process_canonical_block(100, &[], 10); + let canonical = canonical_for(&manager, 100, vec![]); + let strategy = manager.process_canonical_block(canonical, 10); assert_eq!(strategy, ReconciliationStrategy::CatchUp); // Pending state should be cleared @@ -747,7 +1631,8 @@ mod tests { manager.insert_flashblock(fb2).unwrap(); // Canonical at 99 (behind pending) - let strategy = manager.process_canonical_block(99, &[], 10); + let canonical = canonical_for(&manager, 99, vec![]); + let strategy = manager.process_canonical_block(canonical, 10); assert_eq!(strategy, ReconciliationStrategy::Continue); // Pending state should still exist @@ -773,7 +1658,8 @@ mod tests { // Canonical at 105 with max_depth of 2 (depth = 105 - 100 = 5, which exceeds 2) // But wait - if canonical >= latest, it's CatchUp. So canonical must be < latest (102). // Let's use canonical=101, which is < 102 but depth = 101 - 100 = 1 > 0 - let strategy = manager.process_canonical_block(101, &[], 0); + let canonical = canonical_for(&manager, 101, vec![]); + let strategy = manager.process_canonical_block(canonical, 0); assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); // Pending state should be cleared @@ -870,6 +1756,7 @@ mod tests { canonical_anchor_hash: parent_hash, execution_outcome: Arc::new(BlockExecutionOutput::default()), cached_reads: CachedReads::default(), + sealed_header: None, }; // With pending parent state, should return args for speculative building @@ -915,6 +1802,7 @@ mod tests { canonical_anchor_hash: parent_hash, execution_outcome: Arc::new(BlockExecutionOutput::default()), cached_reads: CachedReads::default(), + sealed_header: None, }; // Should find cached sequence for block 100 (whose parent is block_99_hash) @@ -949,6 +1837,7 @@ mod tests { canonical_anchor_hash: pending_parent_hash, execution_outcome: Arc::new(BlockExecutionOutput::default()), cached_reads: CachedReads::default(), + sealed_header: None, }; // Local tip matches the sequence parent (canonical mode should take priority) @@ -981,7 +1870,8 @@ mod tests { assert!(manager.pending().block_number().is_some()); // Canonical catches up to 102 - should clear everything - let strategy = manager.process_canonical_block(102, &[], 10); + let canonical = canonical_for(&manager, 102, vec![]); + let strategy = manager.process_canonical_block(canonical, 10); assert_eq!(strategy, ReconciliationStrategy::CatchUp); // Verify all state is cleared @@ -1015,7 +1905,8 @@ mod tests { // Actually, let's verify the state clearing on HandleReorg by checking // that any non-empty canonical_tx_hashes when we have state triggers reorg let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; - let strategy = manager.process_canonical_block(100, &canonical_tx_hashes, 10); + let canonical = canonical_for(&manager, 100, canonical_tx_hashes); + let strategy = manager.process_canonical_block(canonical, 10); // Should detect reorg (canonical has txs, we have none for that block) assert_eq!(strategy, ReconciliationStrategy::HandleReorg); @@ -1046,7 +1937,8 @@ mod tests { // Canonical at 101 with max_depth of 0 (depth = 101 - 100 = 1 > 0) // Since canonical < latest (102), this should trigger depth limit exceeded - let strategy = manager.process_canonical_block(101, &[], 0); + let canonical = canonical_for(&manager, 101, vec![]); + let strategy = manager.process_canonical_block(canonical, 0); assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); // Verify all state is cleared @@ -1072,7 +1964,8 @@ mod tests { let cached_count = manager.completed_cache.len(); // Canonical at 99 (behind pending) with reasonable depth limit - let strategy = manager.process_canonical_block(99, &[], 10); + let canonical = canonical_for(&manager, 99, vec![]); + let strategy = manager.process_canonical_block(canonical, 10); assert_eq!(strategy, ReconciliationStrategy::Continue); // Verify state is preserved @@ -1095,63 +1988,28 @@ mod tests { // Verify state exists assert!(manager.pending().block_number().is_some()); assert!(!manager.completed_cache.is_empty()); - assert!(!manager.pending_transactions.is_empty() || manager.pending().count() > 0); + assert!(manager.pending_transaction_count() > 0 || manager.pending().count() > 0); // Clear via catchup - manager.process_canonical_block(101, &[], 10); + let canonical = canonical_for(&manager, 101, vec![]); + manager.process_canonical_block(canonical, 10); // Verify complete clearing assert!(manager.pending().block_number().is_none()); assert_eq!(manager.pending().count(), 0); assert!(manager.completed_cache.is_empty()); - assert!(manager.pending_transactions.is_empty()); + assert_eq!(manager.pending_transaction_count(), 0); } - // ==================== Transaction Hash Tracking Tests ==================== + // ==================== Tracked Fingerprint Tests ==================== #[test] - fn test_get_transaction_hashes_returns_empty_for_unknown_block() { + fn test_tracked_fingerprint_returns_none_for_unknown_block() { let manager: SequenceManager = SequenceManager::new(true); - // No flashblocks inserted, should return empty - let hashes = manager.get_transaction_hashes_for_block(100); - assert!(hashes.is_empty()); - } - - #[test] - fn test_get_transaction_hashes_for_pending_block() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create flashblock without transactions (empty tx list is valid) - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0).unwrap(); - - // Should find (empty) transaction hashes for block 100 - let hashes = manager.get_transaction_hashes_for_block(100); - assert!(hashes.is_empty()); // No transactions in this flashblock - } - - #[test] - fn test_get_transaction_hashes_for_cached_block() { - let mut manager: SequenceManager = SequenceManager::new(true); - let factory = TestFlashBlockFactory::new(); - - // Create first flashblock for block 100 - let fb0 = factory.flashblock_at(0).build(); - manager.insert_flashblock(fb0.clone()).unwrap(); - - // Create second flashblock for block 101 (caches block 100) - let fb1 = factory.flashblock_for_next_block(&fb0).build(); - manager.insert_flashblock(fb1).unwrap(); - - // Should find transaction hashes for cached block 100 - let hashes = manager.get_transaction_hashes_for_block(100); - assert!(hashes.is_empty()); // No transactions in these flashblocks - - // Should find transaction hashes for pending block 101 - let hashes = manager.get_transaction_hashes_for_block(101); - assert!(hashes.is_empty()); // No transactions in these flashblocks + // No flashblocks inserted, should return none + let fingerprint = manager.tracked_fingerprint_for_block(100); + assert!(fingerprint.is_none()); } #[test] @@ -1174,7 +2032,8 @@ mod tests { // Process canonical block 99 (not tracked) with transactions // This should NOT trigger reorg detection because we don't track block 99 let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; - let strategy = manager.process_canonical_block(99, &canonical_tx_hashes, 10); + let canonical = canonical_for(&manager, 99, canonical_tx_hashes); + let strategy = manager.process_canonical_block(canonical, 10); // Should continue (not reorg) because block 99 is outside our tracked window assert_eq!(strategy, ReconciliationStrategy::Continue); @@ -1200,7 +2059,8 @@ mod tests { // Process canonical block 100 (which IS tracked) with different transactions // Our tracked block 100 has empty tx list, canonical has non-empty let canonical_tx_hashes = vec![B256::repeat_byte(0xAA)]; - let strategy = manager.process_canonical_block(100, &canonical_tx_hashes, 10); + let canonical = canonical_for(&manager, 100, canonical_tx_hashes); + let strategy = manager.process_canonical_block(canonical, 10); // Should detect reorg because we track block 100 and txs don't match assert_eq!(strategy, ReconciliationStrategy::HandleReorg); @@ -1209,4 +2069,97 @@ mod tests { assert!(manager.pending().block_number().is_none()); assert!(manager.completed_cache.is_empty()); } + + #[test] + fn test_reorg_detected_for_tracked_block_with_parent_hash_mismatch() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build pending sequence for block 100 and cache it by starting block 101. + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1).unwrap(); + + let tracked = manager + .tracked_fingerprint_for_block(100) + .expect("tracked fingerprint for block 100 should exist"); + let canonical = CanonicalBlockFingerprint { + block_number: 100, + block_hash: tracked.block_hash, + parent_hash: B256::repeat_byte(0xAA), // Different parent hash, identical txs. + tx_hashes: tracked.tx_hashes, + }; + + let strategy = manager.process_canonical_block(canonical, 10); + assert_eq!(strategy, ReconciliationStrategy::HandleReorg); + assert!(manager.pending().block_number().is_none()); + assert!(manager.completed_cache.is_empty()); + } + + #[test] + fn test_reorg_detected_for_tracked_block_with_block_hash_mismatch() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Build pending sequence for block 100 and cache it by starting block 101. + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1).unwrap(); + + let tracked = manager + .tracked_fingerprint_for_block(100) + .expect("tracked fingerprint for block 100 should exist"); + let canonical = CanonicalBlockFingerprint { + block_number: 100, + block_hash: B256::repeat_byte(0xBB), // Different block hash, identical parent+txs. + parent_hash: tracked.parent_hash, + tx_hashes: tracked.tx_hashes, + }; + + let strategy = manager.process_canonical_block(canonical, 10); + assert_eq!(strategy, ReconciliationStrategy::HandleReorg); + assert!(manager.pending().block_number().is_none()); + assert!(manager.completed_cache.is_empty()); + } + + #[test] + fn test_tracked_fingerprint_for_pending_block() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Create flashblock without transactions (empty tx list is valid) + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0).unwrap(); + + // Should find tracked fingerprint for block 100 + let fingerprint = manager.tracked_fingerprint_for_block(100); + assert!(fingerprint.is_some()); + assert!(fingerprint.unwrap().tx_hashes.is_empty()); // No transactions in this flashblock + } + + #[test] + fn test_tracked_fingerprint_for_cached_block() { + let mut manager: SequenceManager = SequenceManager::new(true); + let factory = TestFlashBlockFactory::new(); + + // Create first flashblock for block 100 + let fb0 = factory.flashblock_at(0).build(); + manager.insert_flashblock(fb0.clone()).unwrap(); + + // Create second flashblock for block 101 (caches block 100) + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + manager.insert_flashblock(fb1).unwrap(); + + // Should find tracked fingerprint for cached block 100 + let fingerprint = manager.tracked_fingerprint_for_block(100); + assert!(fingerprint.is_some()); + assert!(fingerprint.as_ref().unwrap().tx_hashes.is_empty()); + + // Should find tracked fingerprint for pending block 101 + let fingerprint = manager.tracked_fingerprint_for_block(101); + assert!(fingerprint.is_some()); + assert!(fingerprint.as_ref().unwrap().tx_hashes.is_empty()); + } } diff --git a/rust/op-reth/crates/flashblocks/src/lib.rs b/rust/op-reth/crates/flashblocks/src/lib.rs index 9be47513a6381..e8118bfce525b 100644 --- a/rust/op-reth/crates/flashblocks/src/lib.rs +++ b/rust/op-reth/crates/flashblocks/src/lib.rs @@ -14,6 +14,9 @@ use std::sync::Arc; // Included to enable serde feature for OpReceipt type used transitively use reth_optimism_primitives as _; +// Used by downstream crates that depend on this crate +use alloy_rpc_types as _; + mod consensus; pub use consensus::FlashBlockConsensusClient; @@ -21,7 +24,9 @@ mod payload; pub use payload::{FlashBlock, PendingFlashBlock}; mod sequence; -pub use sequence::{FlashBlockCompleteSequence, FlashBlockPendingSequence}; +pub use sequence::{ + FlashBlockCompleteSequence, FlashBlockPendingSequence, SequenceExecutionOutcome, +}; mod service; pub use service::{ @@ -30,15 +35,19 @@ pub use service::{ }; mod worker; +pub use worker::FlashblockCachedReceipt; mod cache; mod pending_state; pub use pending_state::{PendingBlockState, PendingStateRegistry}; +pub mod validation; + +mod tx_cache; +pub use tx_cache::TransactionCache; #[cfg(test)] mod test_utils; -pub mod validation; mod ws; pub use ws::{FlashBlockDecoder, WsConnect, WsFlashBlockStream}; diff --git a/rust/op-reth/crates/flashblocks/src/payload.rs b/rust/op-reth/crates/flashblocks/src/payload.rs index c7031c1856766..503e8409f38f6 100644 --- a/rust/op-reth/crates/flashblocks/src/payload.rs +++ b/rust/op-reth/crates/flashblocks/src/payload.rs @@ -14,6 +14,11 @@ pub struct PendingFlashBlock { /// The complete pending block built out of all received Flashblocks. #[deref] pub pending: PendingBlock, + /// Canonical anchor hash used for state lookups when this block was built. + /// + /// For canonical builds this equals `pending.block().parent_hash()`. + /// For speculative builds this points to the canonical ancestor used for storage reads. + pub canonical_anchor_hash: B256, /// A sequential index that identifies the last Flashblock added to this block. pub last_flashblock_index: u64, /// The last Flashblock block hash, @@ -26,11 +31,18 @@ impl PendingFlashBlock { /// Create new pending flashblock. pub const fn new( pending: PendingBlock, + canonical_anchor_hash: B256, last_flashblock_index: u64, last_flashblock_hash: B256, has_computed_state_root: bool, ) -> Self { - Self { pending, last_flashblock_index, last_flashblock_hash, has_computed_state_root } + Self { + pending, + canonical_anchor_hash, + last_flashblock_index, + last_flashblock_hash, + has_computed_state_root, + } } /// Returns the properly calculated state root for that block if it was computed. diff --git a/rust/op-reth/crates/flashblocks/src/pending_state.rs b/rust/op-reth/crates/flashblocks/src/pending_state.rs index 5af353161b9bd..6c36765892f6c 100644 --- a/rust/op-reth/crates/flashblocks/src/pending_state.rs +++ b/rust/op-reth/crates/flashblocks/src/pending_state.rs @@ -6,9 +6,12 @@ use alloy_primitives::B256; use reth_execution_types::BlockExecutionOutput; -use reth_primitives_traits::NodePrimitives; +use reth_primitives_traits::{HeaderTy, NodePrimitives, SealedHeader}; use reth_revm::cached::CachedReads; -use std::sync::Arc; +use std::{ + collections::{HashMap, VecDeque}, + sync::Arc, +}; /// Tracks the execution state from building a pending block. /// @@ -18,7 +21,10 @@ use std::sync::Arc; /// - This allows continuous flashblock processing without waiting for P2P #[derive(Debug, Clone)] pub struct PendingBlockState { - /// Hash of the block that was built (the pending block's hash). + /// Locally computed block hash for this built block. + /// + /// This hash is used to match subsequent flashblock sequences by `parent_hash` + /// during speculative chaining. pub block_hash: B256, /// Block number that was built. pub block_number: u64, @@ -35,6 +41,10 @@ pub struct PendingBlockState { pub execution_outcome: Arc>, /// Cached reads from execution for reuse. pub cached_reads: CachedReads, + /// Sealed header for this built block. + /// + /// Used as the parent header for speculative child builds. + pub sealed_header: Option>>, } impl PendingBlockState { @@ -54,8 +64,15 @@ impl PendingBlockState { canonical_anchor_hash, execution_outcome, cached_reads, + sealed_header: None, } } + + /// Attaches a sealed header for use as parent context in speculative builds. + pub fn with_sealed_header(mut self, sealed_header: SealedHeader>) -> Self { + self.sealed_header = Some(sealed_header); + self + } } /// Registry of pending block states for speculative building. @@ -63,21 +80,58 @@ impl PendingBlockState { /// Maintains a small cache of recently built pending blocks, allowing /// subsequent flashblock sequences to build on top of them even before /// the canonical blocks arrive. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct PendingStateRegistry { - /// Most recent pending block state (the one we'd build on top of). - current: Option>, + /// Executed pending states keyed by locally computed block hash. + by_block_hash: HashMap>, + /// Insertion order for bounded eviction. + insertion_order: VecDeque, + /// Most recently recorded block hash. + latest_block_hash: Option, + /// Maximum number of tracked pending states. + max_entries: usize, } impl PendingStateRegistry { + const DEFAULT_MAX_ENTRIES: usize = 64; + /// Creates a new pending state registry. - pub const fn new() -> Self { - Self { current: None } + pub fn new() -> Self { + Self::with_max_entries(Self::DEFAULT_MAX_ENTRIES) + } + + /// Creates a new pending state registry with an explicit entry bound. + pub fn with_max_entries(max_entries: usize) -> Self { + let max_entries = max_entries.max(1); + Self { + by_block_hash: HashMap::with_capacity(max_entries), + insertion_order: VecDeque::with_capacity(max_entries), + latest_block_hash: None, + max_entries, + } } /// Records a completed build's state for potential use by subsequent builds. pub fn record_build(&mut self, state: PendingBlockState) { - self.current = Some(state); + let block_hash = state.block_hash; + + if self.by_block_hash.contains_key(&block_hash) { + self.insertion_order.retain(|hash| *hash != block_hash); + } + + self.by_block_hash.insert(block_hash, state); + self.insertion_order.push_back(block_hash); + self.latest_block_hash = Some(block_hash); + + while self.by_block_hash.len() > self.max_entries { + let Some(evicted_hash) = self.insertion_order.pop_front() else { + break; + }; + self.by_block_hash.remove(&evicted_hash); + if self.latest_block_hash == Some(evicted_hash) { + self.latest_block_hash = self.insertion_order.back().copied(); + } + } } /// Gets the pending state for a given parent hash, if available. @@ -85,17 +139,25 @@ impl PendingStateRegistry { /// Returns `Some` if we have pending state whose `block_hash` matches the requested /// `parent_hash`. pub fn get_state_for_parent(&self, parent_hash: B256) -> Option<&PendingBlockState> { - self.current.as_ref().filter(|state| state.block_hash == parent_hash) + self.by_block_hash.get(&parent_hash) } /// Clears all pending state. pub fn clear(&mut self) { - self.current = None; + self.by_block_hash.clear(); + self.insertion_order.clear(); + self.latest_block_hash = None; } /// Returns the current pending state, if any. - pub const fn current(&self) -> Option<&PendingBlockState> { - self.current.as_ref() + pub fn current(&self) -> Option<&PendingBlockState> { + self.latest_block_hash.and_then(|hash| self.by_block_hash.get(&hash)) + } +} + +impl Default for PendingStateRegistry { + fn default() -> Self { + Self::new() } } @@ -119,6 +181,7 @@ mod tests { canonical_anchor_hash: parent_hash, execution_outcome: Arc::new(BlockExecutionOutput::default()), cached_reads: CachedReads::default(), + sealed_header: None, }; registry.record_build(state); @@ -140,6 +203,7 @@ mod tests { canonical_anchor_hash: parent_hash, execution_outcome: Arc::new(BlockExecutionOutput::default()), cached_reads: CachedReads::default(), + sealed_header: None, }; registry.record_build(state); @@ -159,6 +223,7 @@ mod tests { canonical_anchor_hash: parent_hash, execution_outcome: Arc::new(BlockExecutionOutput::default()), cached_reads: CachedReads::default(), + sealed_header: None, }; registry.record_build(state); assert!(registry.current().is_some()); @@ -167,6 +232,93 @@ mod tests { assert!(registry.current().is_none()); } + #[test] + fn test_registry_tracks_multiple_states_by_hash() { + let mut registry = TestRegistry::new(); + + let anchor = B256::repeat_byte(0); + let state_100 = PendingBlockState { + block_hash: B256::repeat_byte(1), + block_number: 100, + parent_hash: anchor, + canonical_anchor_hash: anchor, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + sealed_header: None, + }; + let state_101 = PendingBlockState { + block_hash: B256::repeat_byte(2), + block_number: 101, + parent_hash: state_100.block_hash, + canonical_anchor_hash: anchor, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + sealed_header: None, + }; + + registry.record_build(state_100.clone()); + registry.record_build(state_101.clone()); + + assert_eq!(registry.current().map(|s| s.block_number), Some(101)); + assert_eq!( + registry.get_state_for_parent(state_100.block_hash).map(|s| s.block_number), + Some(100) + ); + assert_eq!( + registry.get_state_for_parent(state_101.block_hash).map(|s| s.block_number), + Some(101) + ); + } + + #[test] + fn test_registry_eviction_respects_max_entries() { + let mut registry = PendingStateRegistry::::with_max_entries(2); + let anchor = B256::repeat_byte(0); + + let state_100 = PendingBlockState { + block_hash: B256::repeat_byte(1), + block_number: 100, + parent_hash: anchor, + canonical_anchor_hash: anchor, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + sealed_header: None, + }; + let state_101 = PendingBlockState { + block_hash: B256::repeat_byte(2), + block_number: 101, + parent_hash: state_100.block_hash, + canonical_anchor_hash: anchor, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + sealed_header: None, + }; + let state_102 = PendingBlockState { + block_hash: B256::repeat_byte(3), + block_number: 102, + parent_hash: state_101.block_hash, + canonical_anchor_hash: anchor, + execution_outcome: Arc::new(BlockExecutionOutput::default()), + cached_reads: CachedReads::default(), + sealed_header: None, + }; + + registry.record_build(state_100); + registry.record_build(state_101.clone()); + registry.record_build(state_102.clone()); + + assert!(registry.get_state_for_parent(B256::repeat_byte(1)).is_none()); + assert_eq!( + registry.get_state_for_parent(state_101.block_hash).map(|s| s.block_number), + Some(101) + ); + assert_eq!( + registry.get_state_for_parent(state_102.block_hash).map(|s| s.block_number), + Some(102) + ); + assert_eq!(registry.current().map(|s| s.block_number), Some(102)); + } + /// Tests that `canonical_anchor_hash` is distinct from `parent_hash` in speculative chains. /// /// When building speculatively: @@ -190,6 +342,7 @@ mod tests { canonical_anchor_hash: canonical_anchor, // Same as parent for canonical build execution_outcome: Arc::new(BlockExecutionOutput::default()), cached_reads: CachedReads::default(), + sealed_header: None, }; // Verify block N's anchor is the canonical block @@ -205,6 +358,7 @@ mod tests { canonical_anchor_hash: state_n.canonical_anchor_hash, // Forwarded from N execution_outcome: Arc::new(BlockExecutionOutput::default()), cached_reads: CachedReads::default(), + sealed_header: None, }; // Verify N+1's anchor is still the canonical block, NOT block N @@ -220,6 +374,7 @@ mod tests { canonical_anchor_hash: state_n1.canonical_anchor_hash, // Forwarded from N+1 execution_outcome: Arc::new(BlockExecutionOutput::default()), cached_reads: CachedReads::default(), + sealed_header: None, }; // Verify N+2's anchor is STILL the original canonical block diff --git a/rust/op-reth/crates/flashblocks/src/sequence.rs b/rust/op-reth/crates/flashblocks/src/sequence.rs index ddd2b2c01f5cc..4c4ed37747775 100644 --- a/rust/op-reth/crates/flashblocks/src/sequence.rs +++ b/rust/op-reth/crates/flashblocks/src/sequence.rs @@ -12,6 +12,23 @@ use tracing::*; /// The size of the broadcast channel for completed flashblock sequences. const FLASHBLOCK_SEQUENCE_CHANNEL_SIZE: usize = 128; +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum FollowupRejectionReason { + BlockNumber, + PayloadId, + BlockAndPayload, +} + +impl FollowupRejectionReason { + const fn as_str(self) -> &'static str { + match self { + Self::BlockNumber => "block_number_mismatch", + Self::PayloadId => "payload_id_mismatch", + Self::BlockAndPayload => "block_and_payload_mismatch", + } + } +} + /// Outcome from executing a flashblock sequence. #[derive(Debug, Clone, Copy, PartialEq, Eq)] #[allow(unnameable_types)] @@ -64,6 +81,34 @@ impl FlashBlockPendingSequence { self.block_broadcaster.subscribe() } + /// Returns whether this flashblock would be accepted into the current sequence. + pub fn can_accept(&self, flashblock: &FlashBlock) -> bool { + if flashblock.index == 0 { + return true; + } + + self.followup_rejection_reason(flashblock).is_none() + } + + fn followup_rejection_reason( + &self, + flashblock: &FlashBlock, + ) -> Option { + // only insert if we previously received the same block and payload, assume we received + // index 0 + let same_block = self.block_number() == Some(flashblock.block_number()); + let same_payload = self.payload_id() == Some(flashblock.payload_id); + if same_block && same_payload { + None + } else if !same_block && !same_payload { + Some(FollowupRejectionReason::BlockAndPayload) + } else if !same_block { + Some(FollowupRejectionReason::BlockNumber) + } else { + Some(FollowupRejectionReason::PayloadId) + } + } + /// Inserts a new block into the sequence. /// /// A [`FlashBlock`] with index 0 resets the set. @@ -74,16 +119,23 @@ impl FlashBlockPendingSequence { return; } - // only insert if we previously received the same block and payload, assume we received - // index 0 - let same_block = self.block_number() == Some(flashblock.block_number()); - let same_payload = self.payload_id() == Some(flashblock.payload_id); - - if same_block && same_payload { + if self.can_accept(&flashblock) { trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index, block_count = self.inner.len() ,"Received followup flashblock"); self.inner.insert(flashblock.index, flashblock); } else { - trace!(target: "flashblocks", number=%flashblock.block_number(), index = %flashblock.index, current=?self.block_number() ,"Ignoring untracked flashblock following"); + let rejection_reason = self + .followup_rejection_reason(&flashblock) + .expect("non-accepted followup must have rejection reason"); + trace!( + target: "flashblocks", + number = %flashblock.block_number(), + index = %flashblock.index, + current_block_number = ?self.block_number(), + expected_payload_id = ?self.payload_id(), + incoming_payload_id = ?flashblock.payload_id, + rejection_reason = rejection_reason.as_str(), + "Ignoring untracked flashblock following" + ); } } @@ -210,6 +262,11 @@ impl FlashBlockCompleteSequence { self.inner.first().unwrap().base.as_ref().unwrap() } + /// Returns the payload id shared by all flashblocks in the sequence. + pub fn payload_id(&self) -> PayloadId { + self.inner.first().unwrap().payload_id + } + /// Returns the number of flashblocks in the sequence. pub const fn count(&self) -> usize { self.inner.len() diff --git a/rust/op-reth/crates/flashblocks/src/service.rs b/rust/op-reth/crates/flashblocks/src/service.rs index f88b3b87ac3b4..7bf3504018943 100644 --- a/rust/op-reth/crates/flashblocks/src/service.rs +++ b/rust/op-reth/crates/flashblocks/src/service.rs @@ -1,10 +1,11 @@ use crate::{ FlashBlock, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, InProgressFlashBlockRx, PendingFlashBlock, - cache::SequenceManager, + cache::{BuildApplyOutcome, BuildTicket, SequenceManager}, pending_state::PendingStateRegistry, - validation::ReconciliationStrategy, - worker::{BuildResult, FlashBlockBuilder}, + tx_cache::TransactionCache, + validation::{CanonicalBlockFingerprint, ReconciliationStrategy}, + worker::{BuildResult, FlashBlockBuilder, FlashblockCachedReceipt}, }; use alloy_primitives::B256; use futures_util::{FutureExt, Stream, StreamExt}; @@ -39,6 +40,10 @@ const CANONICAL_BLOCK_CHANNEL_CAPACITY: usize = 128; pub struct CanonicalBlockNotification { /// The canonical block number. pub block_number: u64, + /// Canonical block hash. + pub block_hash: B256, + /// Canonical parent hash. + pub parent_hash: B256, /// Transaction hashes in the canonical block. pub tx_hashes: Vec, } @@ -71,6 +76,15 @@ pub struct FlashBlockService< sequences: SequenceManager, /// Registry for pending block states to enable speculative building. pending_states: PendingStateRegistry, + /// Transaction execution cache for incremental flashblock building. + tx_cache: TransactionCache, + + /// Epoch counter for state invalidation. + /// + /// Incremented whenever speculative state is cleared (reorg, catch-up, depth limit). + /// Used to detect and discard stale build results from in-flight jobs that were + /// started before the state was invalidated. + state_epoch: u64, /// Maximum depth for pending blocks ahead of canonical before clearing. max_depth: u64, @@ -81,6 +95,7 @@ pub struct FlashBlockService< impl FlashBlockService where N: NodePrimitives, + N::Receipt: FlashblockCachedReceipt, S: Stream> + Unpin + 'static, EvmConfig: ConfigureEvm + Unpin> + Clone @@ -115,6 +130,8 @@ where job: None, sequences: SequenceManager::new(compute_state_root), pending_states: PendingStateRegistry::new(), + tx_cache: TransactionCache::new(), + state_epoch: 0, max_depth: DEFAULT_MAX_DEPTH, metrics: FlashBlockServiceMetrics::default(), } @@ -181,29 +198,93 @@ where loop { tokio::select! { // Event 1: job exists, listen to job results - Some(result) = async { + // Handle both successful results and channel errors (e.g., task panic) + job_result = async { match self.job.as_mut() { - Some((_, rx)) => rx.await.ok(), + Some(job) => Some((&mut job.result_rx).await), None => std::future::pending().await, } } => { - let (start_time, _) = self.job.take().unwrap(); + let job = self.job.take().unwrap(); let _ = self.in_progress_tx.send(None); + // Handle channel error (task panicked or was cancelled) + let Some(Ok((result, returned_cache))) = job_result else { + warn!( + target: "flashblocks", + "Build job channel closed unexpectedly (task may have panicked)" + ); + // Re-initialize transaction cache since we lost the one sent to the task + self.tx_cache = TransactionCache::new(); + self.schedule_followup_build(); + continue; + }; + + // Check if the state epoch has changed since this job started. + // If so, the speculative state has been invalidated (e.g., by a reorg) + // and we should discard the build result AND the returned cache to avoid + // reintroducing stale state that was cleared during reconciliation. + if job.epoch != self.state_epoch { + trace!( + target: "flashblocks", + job_epoch = job.epoch, + current_epoch = self.state_epoch, + "Discarding stale build result and cache (state was invalidated)" + ); + self.metrics.stale_builds_discarded.increment(1); + // Don't restore the returned cache - keep the cleared cache from reconciliation + self.schedule_followup_build(); + continue; + } + + // Restore the transaction cache from the spawned task (only if epoch matched) + self.tx_cache = returned_cache; + match result { Ok(Some(build_result)) => { let pending = build_result.pending_flashblock; - let parent_hash = pending.parent_hash(); - self.sequences - .on_build_complete(parent_hash, Some((pending.clone(), build_result.cached_reads))); - - // Record pending state for speculative building of subsequent blocks - self.pending_states.record_build(build_result.pending_state); - - let elapsed = start_time.elapsed(); - self.metrics.execution_duration.record(elapsed.as_secs_f64()); - - let _ = tx.send(Some(pending)); + let apply_outcome = self.sequences + .on_build_complete(job.ticket, Some((pending.clone(), build_result.cached_reads))); + + if apply_outcome.is_applied() { + // Record pending state for speculative building of subsequent blocks + self.pending_states.record_build(build_result.pending_state); + + let elapsed = job.start_time.elapsed(); + self.metrics.execution_duration.record(elapsed.as_secs_f64()); + + let _ = tx.send(Some(pending)); + } else { + match apply_outcome { + BuildApplyOutcome::RejectedPendingSequenceMismatch { .. } => { + self.metrics + .build_reject_pending_sequence_mismatch + .increment(1); + } + BuildApplyOutcome::RejectedPendingRevisionStale { .. } => { + self.metrics + .build_reject_pending_revision_stale + .increment(1); + } + BuildApplyOutcome::RejectedCachedSequenceMissing { .. } => { + self.metrics + .build_reject_cached_sequence_missing + .increment(1); + } + BuildApplyOutcome::SkippedNoBuildResult => { + self.metrics + .build_reject_missing_build_result + .increment(1); + } + BuildApplyOutcome::AppliedPending + | BuildApplyOutcome::AppliedCached { .. } => {} + } + trace!( + target: "flashblocks", + ?apply_outcome, + "Discarding build side effects due to rejected completion apply" + ); + } } Ok(None) => { trace!(target: "flashblocks", "Build job returned None"); @@ -212,6 +293,10 @@ where warn!(target: "flashblocks", %err, "Build job failed"); } } + + // Drain runnable work after each completion instead of waiting for another + // external event. + self.schedule_followup_build(); } // Event 2: New flashblock arrives (batch process all ready flashblocks) @@ -262,20 +347,35 @@ where } } + /// Attempts to start the next build after a completion and records outcome metrics. + fn schedule_followup_build(&mut self) { + self.metrics.drain_followup_attempts.increment(1); + if self.try_start_build_job() { + self.metrics.drain_followup_started.increment(1); + } else { + self.metrics.drain_followup_noop.increment(1); + } + } + /// Processes a canonical block notification and reconciles pending state. fn process_canonical_block(&mut self, notification: CanonicalBlockNotification) { - let strategy = self.sequences.process_canonical_block( - notification.block_number, - ¬ification.tx_hashes, - self.max_depth, - ); + let canonical_fingerprint = CanonicalBlockFingerprint { + block_number: notification.block_number, + block_hash: notification.block_hash, + parent_hash: notification.parent_hash, + tx_hashes: notification.tx_hashes, + }; + + let strategy = + self.sequences.process_canonical_block(canonical_fingerprint, self.max_depth); // Record metrics based on strategy if matches!(strategy, ReconciliationStrategy::HandleReorg) { self.metrics.reorg_count.increment(1); } - // Clear pending states for strategies that invalidate speculative state + // Clear pending states and transaction cache for strategies that invalidate speculative + // state. Also increment the state epoch to invalidate any in-flight build jobs. if matches!( strategy, ReconciliationStrategy::HandleReorg | @@ -283,6 +383,14 @@ where ReconciliationStrategy::DepthLimitExceeded { .. } ) { self.pending_states.clear(); + self.tx_cache.clear(); + self.state_epoch = self.state_epoch.wrapping_add(1); + trace!( + target: "flashblocks", + new_epoch = self.state_epoch, + ?strategy, + "State invalidated, incremented epoch" + ); } } @@ -308,23 +416,31 @@ where } /// Attempts to build a block if no job is currently running and a buildable sequence exists. - fn try_start_build_job(&mut self) { + fn try_start_build_job(&mut self) -> bool { if self.job.is_some() { - return; // Already building + return false; // Already building } let Some(latest) = self.builder.provider().latest_header().ok().flatten() else { - return; + return false; }; - // Get pending parent state for speculative building (if enabled and available) - let pending_parent = self.pending_states.current().cloned(); + // Prefer parent-hash-specific speculative context for the current pending sequence. + // Fall back to the latest speculative state when no exact parent match is found. + let pending_parent = self + .sequences + .pending() + .payload_base() + .and_then(|base| self.pending_states.get_state_for_parent(base.parent_hash).cloned()) + .or_else(|| self.pending_states.current().cloned()); - let Some(args) = + let Some(candidate) = self.sequences.next_buildable_args(latest.hash(), latest.timestamp(), pending_parent) else { - return; // Nothing buildable + return false; // Nothing buildable }; + let ticket = candidate.ticket; + let args = candidate.args; // Spawn build job let fb_info = FlashBlockBuildInfo { @@ -336,12 +452,22 @@ where self.metrics.current_index.set(fb_info.index as f64); let _ = self.in_progress_tx.send(Some(fb_info)); - let (tx, rx) = oneshot::channel(); + // Take ownership of the transaction cache for the spawned task + let mut tx_cache = std::mem::take(&mut self.tx_cache); + + let (result_tx, result_rx) = oneshot::channel(); let builder = self.builder.clone(); self.spawner.spawn_blocking(move || { - let _ = tx.send(builder.execute(args)); + let result = builder.execute(args, Some(&mut tx_cache)); + let _ = result_tx.send((result, tx_cache)); + }); + self.job = Some(BuildJob { + start_time: Instant::now(), + epoch: self.state_epoch, + ticket, + result_rx, }); - self.job = Some((Instant::now(), rx)); + true } } @@ -356,7 +482,22 @@ pub struct FlashBlockBuildInfo { pub block_number: u64, } -type BuildJob = (Instant, oneshot::Receiver>>>); +/// A running build job with metadata for tracking and invalidation. +#[derive(Debug)] +struct BuildJob { + /// When the job was started. + start_time: Instant, + /// The state epoch when this job was started. + /// + /// If the service's `state_epoch` has changed by the time this job completes, + /// the result should be discarded as the speculative state has been invalidated. + epoch: u64, + /// Opaque ticket identifying the exact sequence snapshot targeted by this build job. + ticket: BuildTicket, + /// Receiver for the build result and returned transaction cache. + #[allow(clippy::type_complexity)] + result_rx: oneshot::Receiver<(eyre::Result>>, TransactionCache)>, +} /// Creates a bounded channel for canonical block notifications. /// @@ -383,4 +524,20 @@ struct FlashBlockServiceMetrics { current_index: Gauge, /// Number of reorgs detected during canonical block reconciliation. reorg_count: Counter, + /// Number of build results discarded due to state invalidation (reorg during build). + stale_builds_discarded: Counter, + /// Number of completions rejected because pending sequence identity no longer matched. + build_reject_pending_sequence_mismatch: Counter, + /// Number of completions rejected because pending revision no longer matched. + build_reject_pending_revision_stale: Counter, + /// Number of completions rejected because referenced cached sequence was missing. + build_reject_cached_sequence_missing: Counter, + /// Number of completions skipped due to missing build result payload. + build_reject_missing_build_result: Counter, + /// Number of follow-up drain scheduling attempts after build completion. + drain_followup_attempts: Counter, + /// Number of follow-up attempts that successfully started another build. + drain_followup_started: Counter, + /// Number of follow-up attempts where no buildable work was available. + drain_followup_noop: Counter, } diff --git a/rust/op-reth/crates/flashblocks/src/tx_cache.rs b/rust/op-reth/crates/flashblocks/src/tx_cache.rs new file mode 100644 index 0000000000000..f03d5e0c75333 --- /dev/null +++ b/rust/op-reth/crates/flashblocks/src/tx_cache.rs @@ -0,0 +1,702 @@ +//! Transaction execution caching for flashblock building. +//! +//! When flashblocks arrive incrementally, each new flashblock triggers a rebuild of pending +//! state from all transactions in the sequence. Without caching, this means re-reading +//! state from disk for accounts/storage that were already loaded in previous builds. +//! +//! # Approach +//! +//! This module caches the cumulative bundle state from previous executions. When the next +//! flashblock arrives, if its transaction list is a continuation of the cached list, the +//! cached bundle can be used as a **prestate** for the State builder. This avoids redundant +//! disk reads for accounts/storage that were already modified. +//! +//! **Important**: Prefix transaction skipping is only safe when the incoming transaction list +//! fully extends the cached list. In that case, callers can execute only the uncached suffix +//! and stitch in the cached prefix receipts/metadata. +//! +//! The cache stores: +//! - Ordered list of executed transaction hashes (for prefix matching) +//! - Cumulative bundle state after all cached transactions (used as prestate) +//! - Cumulative receipts for all cached transactions (for future optimization) +//! - Block-level execution metadata for cached transactions (gas/requests) +//! +//! # Example +//! +//! ```text +//! Flashblock 0: txs [A, B] +//! -> Execute A, B from scratch (cold state reads) +//! -> Cache: txs=[A,B], bundle=state_after_AB +//! +//! Flashblock 1: txs [A, B, C] +//! -> Prefix [A, B] matches cache +//! -> Use cached bundle as prestate (warm state) +//! -> Execute A, B, C (A, B hit prestate cache, faster) +//! -> Cache: txs=[A,B,C], bundle=state_after_ABC +//! +//! Flashblock 2 (reorg): txs [A, D, E] +//! -> Prefix [A] matches, but tx[1]=D != B +//! -> Cached prestate may be partially useful, but diverges +//! -> Execute A, D, E +//! ``` + +use alloy_eips::eip7685::Requests; +use alloy_primitives::B256; +use reth_primitives_traits::NodePrimitives; +use reth_revm::db::BundleState; + +/// Cached block-level execution metadata for the stored transaction prefix. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub(crate) struct CachedExecutionMeta { + /// EIP-7685 requests emitted while executing the cached prefix. + pub requests: Requests, + /// Total gas used by the cached prefix. + pub gas_used: u64, + /// Total blob/DA gas used by the cached prefix. + pub blob_gas_used: u64, +} + +/// Resumable cached state: bundle + receipts + cached prefix length. +pub(crate) type ResumableState<'a, N> = + (&'a BundleState, &'a [::Receipt], usize); + +/// Resumable cached state plus execution metadata for the cached prefix. +pub(crate) type ResumableStateWithExecutionMeta<'a, N> = + (&'a BundleState, &'a [::Receipt], &'a Requests, u64, u64, usize); + +/// Cache of transaction execution results for a single block. +/// +/// Stores cumulative execution state that can be used as a prestate to avoid +/// redundant disk reads when re-executing transactions. The cached bundle provides +/// warm state for accounts/storage already loaded, improving execution performance. +/// +/// **Note**: This cache does NOT skip transaction execution - all transactions must +/// still be executed to populate the block body. The cache only optimizes state reads. +/// +/// The cache is invalidated when: +/// - A new block starts (different block number) +/// - Parent hash changes for parent-scoped lookups +/// - A reorg is detected (transaction list diverges from cached prefix) +/// - Explicitly cleared +#[derive(Debug)] +pub struct TransactionCache { + /// Block number this cache is valid for. + block_number: u64, + /// Parent hash this cache is valid for. + cached_parent_hash: Option, + /// Ordered list of transaction hashes that have been executed. + executed_tx_hashes: Vec, + /// Cumulative bundle state after executing all cached transactions. + cumulative_bundle: BundleState, + /// Receipts for all cached transactions, in execution order. + receipts: Vec, + /// Cached block-level execution metadata. + execution_meta: CachedExecutionMeta, +} + +impl Default for TransactionCache { + fn default() -> Self { + Self::new() + } +} + +impl TransactionCache { + /// Creates a new empty transaction cache. + pub fn new() -> Self { + Self { + block_number: 0, + cached_parent_hash: None, + executed_tx_hashes: Vec::new(), + cumulative_bundle: BundleState::default(), + receipts: Vec::new(), + execution_meta: CachedExecutionMeta::default(), + } + } + + /// Creates a new cache for a specific block number. + pub fn for_block(block_number: u64) -> Self { + Self { block_number, ..Self::new() } + } + + /// Returns the block number this cache is valid for. + pub const fn block_number(&self) -> u64 { + self.block_number + } + + /// Returns the parent hash this cache is valid for, if tracked. + pub const fn parent_hash(&self) -> Option { + self.cached_parent_hash + } + + /// Checks if this cache is valid for the given block number. + pub const fn is_valid_for_block(&self, block_number: u64) -> bool { + self.block_number == block_number + } + + /// Checks if this cache is valid for the given block number and parent hash. + pub fn is_valid_for_block_parent(&self, block_number: u64, parent_hash: B256) -> bool { + self.block_number == block_number && self.cached_parent_hash == Some(parent_hash) + } + + /// Returns the number of cached transactions. + pub const fn len(&self) -> usize { + self.executed_tx_hashes.len() + } + + /// Returns true if the cache is empty. + pub const fn is_empty(&self) -> bool { + self.executed_tx_hashes.is_empty() + } + + /// Returns the cached transaction hashes. + pub fn executed_tx_hashes(&self) -> &[B256] { + &self.executed_tx_hashes + } + + /// Returns the cached receipts. + pub fn receipts(&self) -> &[N::Receipt] { + &self.receipts + } + + /// Returns the cumulative bundle state. + pub const fn bundle(&self) -> &BundleState { + &self.cumulative_bundle + } + + /// Clears the cache. + pub fn clear(&mut self) { + self.executed_tx_hashes.clear(); + self.cumulative_bundle = BundleState::default(); + self.receipts.clear(); + self.execution_meta = CachedExecutionMeta::default(); + self.block_number = 0; + self.cached_parent_hash = None; + } + + /// Updates the cache for a new block, clearing if the block number changed. + /// + /// Returns true if the cache was cleared. + pub fn update_for_block(&mut self, block_number: u64) -> bool { + if self.block_number == block_number { + false + } else { + self.clear(); + self.block_number = block_number; + true + } + } + + /// Computes the length of the matching prefix between cached transactions + /// and the provided transaction hashes. + /// + /// Returns the number of transactions that can be skipped because they + /// match the cached execution results. + pub fn matching_prefix_len(&self, tx_hashes: &[B256]) -> usize { + self.executed_tx_hashes + .iter() + .zip(tx_hashes.iter()) + .take_while(|(cached, incoming)| cached == incoming) + .count() + } + + /// Returns cached state for resuming execution if the incoming transactions + /// have a matching prefix with the cache. + /// + /// Returns `Some((bundle, receipts, skip_count))` if there's a non-empty matching + /// prefix, where: + /// - `bundle` is the cumulative state after the matching prefix + /// - `receipts` is the receipts for the matching prefix + /// - `skip_count` is the number of transactions to skip + /// + /// Returns `None` if: + /// - The cache is empty + /// - No prefix matches (first transaction differs) + /// - Block number doesn't match + pub fn get_resumable_state( + &self, + block_number: u64, + tx_hashes: &[B256], + ) -> Option> { + self.get_resumable_state_with_execution_meta(block_number, tx_hashes) + .map(|(bundle, receipts, .., skip_count)| (bundle, receipts, skip_count)) + } + + /// Returns cached state and execution metadata for resuming execution if the incoming + /// transactions have a matching prefix with the cache. + /// + /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` if + /// there's a non-empty matching prefix and the entire cache matches the incoming prefix. + pub(crate) fn get_resumable_state_with_execution_meta( + &self, + block_number: u64, + tx_hashes: &[B256], + ) -> Option> { + if !self.is_valid_for_block(block_number) || self.is_empty() { + return None; + } + + let prefix_len = self.matching_prefix_len(tx_hashes); + if prefix_len == 0 { + return None; + } + + // Only return state if the full cache matches (partial prefix would need + // intermediate state snapshots, which we don't currently store). + // Partial match means incoming txs diverge from cache, need to re-execute. + (prefix_len == self.executed_tx_hashes.len()).then_some(( + &self.cumulative_bundle, + self.receipts.as_slice(), + &self.execution_meta.requests, + self.execution_meta.gas_used, + self.execution_meta.blob_gas_used, + prefix_len, + )) + } + + /// Returns cached state and execution metadata for resuming execution if the incoming + /// transactions have a matching prefix with the cache and the parent hash matches. + /// + /// Returns `Some((bundle, receipts, requests, gas_used, blob_gas_used, skip_count))` if + /// there's a non-empty matching prefix, the full cache matches the incoming prefix, and the + /// `(block_number, parent_hash)` tuple matches the cached scope. + pub(crate) fn get_resumable_state_with_execution_meta_for_parent( + &self, + block_number: u64, + parent_hash: B256, + tx_hashes: &[B256], + ) -> Option> { + if !self.is_valid_for_block_parent(block_number, parent_hash) || self.is_empty() { + return None; + } + + let prefix_len = self.matching_prefix_len(tx_hashes); + if prefix_len == 0 { + return None; + } + + (prefix_len == self.executed_tx_hashes.len()).then_some(( + &self.cumulative_bundle, + self.receipts.as_slice(), + &self.execution_meta.requests, + self.execution_meta.gas_used, + self.execution_meta.blob_gas_used, + prefix_len, + )) + } + + /// Updates the cache with new execution results. + /// + /// This should be called after executing a flashblock. The provided bundle + /// and receipts should represent the cumulative state after all transactions. + pub fn update( + &mut self, + block_number: u64, + tx_hashes: Vec, + bundle: BundleState, + receipts: Vec, + ) { + self.update_with_execution_meta( + block_number, + tx_hashes, + bundle, + receipts, + CachedExecutionMeta::default(), + ); + } + + /// Updates the cache with new execution results and block-level metadata. + pub(crate) fn update_with_execution_meta( + &mut self, + block_number: u64, + tx_hashes: Vec, + bundle: BundleState, + receipts: Vec, + execution_meta: CachedExecutionMeta, + ) { + self.block_number = block_number; + self.cached_parent_hash = None; + self.executed_tx_hashes = tx_hashes; + self.cumulative_bundle = bundle; + self.receipts = receipts; + self.execution_meta = execution_meta; + } + + /// Updates the cache with new execution results and block-level metadata, scoped to the + /// provided parent hash. + pub(crate) fn update_with_execution_meta_for_parent( + &mut self, + block_number: u64, + parent_hash: B256, + tx_hashes: Vec, + bundle: BundleState, + receipts: Vec, + execution_meta: CachedExecutionMeta, + ) { + self.block_number = block_number; + self.cached_parent_hash = Some(parent_hash); + self.executed_tx_hashes = tx_hashes; + self.cumulative_bundle = bundle; + self.receipts = receipts; + self.execution_meta = execution_meta; + } +} + +#[cfg(test)] +mod tests { + use super::*; + use reth_optimism_primitives::OpPrimitives; + + type TestCache = TransactionCache; + + #[test] + fn test_cache_block_validation() { + let mut cache = TestCache::for_block(100); + assert!(cache.is_valid_for_block(100)); + assert!(!cache.is_valid_for_block(101)); + assert!(!cache.is_valid_for_block_parent(100, B256::repeat_byte(0x11))); + + // Update for same block doesn't clear + assert!(!cache.update_for_block(100)); + + // Update for different block clears + assert!(cache.update_for_block(101)); + assert!(cache.is_valid_for_block(101)); + assert!(cache.parent_hash().is_none()); + } + + #[test] + fn test_cache_clear() { + let mut cache = TestCache::for_block(100); + assert_eq!(cache.block_number(), 100); + + cache.clear(); + assert_eq!(cache.block_number(), 0); + assert!(cache.is_empty()); + } + + #[test] + fn test_matching_prefix_len() { + let mut cache = TestCache::for_block(100); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + let tx_d = B256::repeat_byte(0xDD); + + // Update cache with [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + + // Full match + assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b]), 2); + + // Continuation + assert_eq!(cache.matching_prefix_len(&[tx_a, tx_b, tx_c]), 2); + + // Partial match (reorg at position 1) + assert_eq!(cache.matching_prefix_len(&[tx_a, tx_d, tx_c]), 1); + + // No match (reorg at position 0) + assert_eq!(cache.matching_prefix_len(&[tx_d, tx_b, tx_c]), 0); + + // Empty incoming + assert_eq!(cache.matching_prefix_len(&[]), 0); + } + + #[test] + fn test_get_resumable_state() { + let mut cache = TestCache::for_block(100); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + // Empty cache returns None + assert!(cache.get_resumable_state(100, &[tx_a, tx_b]).is_none()); + + // Update cache with [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + + // Wrong block number returns None + assert!(cache.get_resumable_state(101, &[tx_a, tx_b]).is_none()); + + // Exact match returns state + let result = cache.get_resumable_state(100, &[tx_a, tx_b]); + assert!(result.is_some()); + let (_, _, skip) = result.unwrap(); + assert_eq!(skip, 2); + + // Continuation returns state (can skip cached txs) + let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); + assert!(result.is_some()); + let (_, _, skip) = result.unwrap(); + assert_eq!(skip, 2); + + // Partial match (reorg) returns None - can't use partial cache + assert!(cache.get_resumable_state(100, &[tx_a, tx_c]).is_none()); + } + + // ==================== E2E Cache Reuse Scenario Tests ==================== + + /// Tests the complete E2E cache scenario: fb0 [A,B] → fb1 [A,B,C] + /// Verifies that cached bundle can be used as prestate for the continuation. + #[test] + fn test_e2e_cache_reuse_continuation_scenario() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + // Simulate fb0: execute [A, B] from scratch + let fb0_txs = vec![tx_a, tx_b]; + assert!(cache.get_resumable_state(100, &fb0_txs).is_none()); + + // After fb0 execution, update cache + cache.update(100, fb0_txs, BundleState::default(), vec![]); + assert_eq!(cache.len(), 2); + + // Simulate fb1: [A, B, C] - should resume from cached state + let fb1_txs = vec![tx_a, tx_b, tx_c]; + let result = cache.get_resumable_state(100, &fb1_txs); + assert!(result.is_some()); + let (bundle, receipts, skip) = result.unwrap(); + + // skip=2 indicates 2 txs are covered by cached state (for logging) + // Note: All transactions are still executed, skip is informational only + assert_eq!(skip, 2); + // Bundle is used as prestate to warm the State builder + assert!(bundle.state.is_empty()); // Default bundle is empty in test + assert!(receipts.is_empty()); // No receipts in this test + + // After fb1 execution, update cache with full list + cache.update(100, fb1_txs, BundleState::default(), vec![]); + assert_eq!(cache.len(), 3); + } + + /// Tests reorg scenario: fb0 [A, B] → fb1 [A, D, E] + /// Verifies that divergent tx list invalidates cache. + #[test] + fn test_e2e_cache_reorg_scenario() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_d = B256::repeat_byte(0xDD); + let tx_e = B256::repeat_byte(0xEE); + + // fb0: execute [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + + // fb1 (reorg): [A, D, E] - tx[1] diverges, cannot resume + let fb1_txs = vec![tx_a, tx_d, tx_e]; + let result = cache.get_resumable_state(100, &fb1_txs); + assert!(result.is_none()); // Partial match means we can't use cache + } + + /// Tests multi-flashblock progression within same block: + /// fb0 [A] → fb1 [A,B] → fb2 [A,B,C] + /// + /// Each flashblock can use the previous bundle as prestate for warm state reads. + /// Note: All transactions are still executed; skip count is for logging only. + #[test] + fn test_e2e_multi_flashblock_progression() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + // fb0: [A] + cache.update(100, vec![tx_a], BundleState::default(), vec![]); + assert_eq!(cache.len(), 1); + + // fb1: [A, B] - cached state covers [A] (skip=1 for logging) + let fb1_txs = vec![tx_a, tx_b]; + let result = cache.get_resumable_state(100, &fb1_txs); + assert!(result.is_some()); + assert_eq!(result.unwrap().2, 1); // 1 tx covered by cache + + cache.update(100, fb1_txs, BundleState::default(), vec![]); + assert_eq!(cache.len(), 2); + + // fb2: [A, B, C] - cached state covers [A, B] (skip=2 for logging) + let fb2_txs = vec![tx_a, tx_b, tx_c]; + let result = cache.get_resumable_state(100, &fb2_txs); + assert!(result.is_some()); + assert_eq!(result.unwrap().2, 2); // 2 txs covered by cache + + cache.update(100, fb2_txs, BundleState::default(), vec![]); + assert_eq!(cache.len(), 3); + } + + /// Tests that cache is invalidated on block number change. + #[test] + fn test_e2e_block_transition_clears_cache() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + + // Block 100: cache [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + assert_eq!(cache.len(), 2); + + // Block 101: same txs shouldn't resume (different block) + let result = cache.get_resumable_state(101, &[tx_a, tx_b]); + assert!(result.is_none()); + + // Explicit block update clears cache + cache.update_for_block(101); + assert!(cache.is_empty()); + } + + /// Tests cache behavior with empty transaction list. + #[test] + fn test_cache_empty_transactions() { + let mut cache = TestCache::new(); + + // Empty flashblock (only system tx, no user txs) + cache.update(100, vec![], BundleState::default(), vec![]); + assert!(cache.is_empty()); + + // Can't resume from empty cache + let tx_a = B256::repeat_byte(0xAA); + assert!(cache.get_resumable_state(100, &[tx_a]).is_none()); + } + + /// Documents the semantics of `skip_count`. + /// + /// A resumable state is only returned when the incoming transaction list fully extends the + /// cached list. In that case, `skip_count` is the number of prefix transactions covered by + /// cached execution output. + #[test] + fn test_skip_count_matches_cached_prefix_len() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + // Cache state after executing [A, B] + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![]); + + // get_resumable_state returns skip=2 for prefix [A, B] + let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); + assert!(result.is_some()); + let (bundle, _receipts, skip_count) = result.unwrap(); + + // skip_count indicates cached prefix length + assert_eq!(skip_count, 2); + + // The bundle is the important part - used as resumable prestate. + assert!(bundle.state.is_empty()); // Default in test, real one has state + } + + /// Tests that receipts are properly cached and returned. + #[test] + fn test_cache_preserves_receipts() { + use op_alloy_consensus::OpReceipt; + use reth_optimism_primitives::OpPrimitives; + + let mut cache: TransactionCache = TransactionCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + + // Create mock receipts + let receipt_a = OpReceipt::Legacy(alloy_consensus::Receipt { + status: alloy_consensus::Eip658Value::Eip658(true), + cumulative_gas_used: 21000, + logs: vec![], + }); + let receipt_b = OpReceipt::Legacy(alloy_consensus::Receipt { + status: alloy_consensus::Eip658Value::Eip658(true), + cumulative_gas_used: 42000, + logs: vec![], + }); + + cache.update(100, vec![tx_a, tx_b], BundleState::default(), vec![receipt_a, receipt_b]); + + // Verify receipts are preserved + assert_eq!(cache.receipts().len(), 2); + + // On resumable state, receipts are returned + let tx_c = B256::repeat_byte(0xCC); + let result = cache.get_resumable_state(100, &[tx_a, tx_b, tx_c]); + assert!(result.is_some()); + let (_, receipts, _) = result.unwrap(); + assert_eq!(receipts.len(), 2); + } + + #[test] + fn test_cache_preserves_execution_meta() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + let mut requests = Requests::default(); + requests.push_request_with_type(0x01, [0xAA, 0xBB]); + + cache.update_with_execution_meta( + 100, + vec![tx_a, tx_b], + BundleState::default(), + vec![], + CachedExecutionMeta { + requests: requests.clone(), + gas_used: 42_000, + blob_gas_used: 123, + }, + ); + + let resumable = cache.get_resumable_state_with_execution_meta(100, &[tx_a, tx_b, tx_c]); + assert!(resumable.is_some()); + let (_, _, cached_requests, gas_used, blob_gas_used, skip_count) = resumable.unwrap(); + assert_eq!(skip_count, 2); + assert_eq!(gas_used, 42_000); + assert_eq!(blob_gas_used, 123); + assert_eq!(cached_requests, &requests); + } + + #[test] + fn test_cache_parent_scoping() { + let mut cache = TestCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + let parent_a = B256::repeat_byte(0x11); + let parent_b = B256::repeat_byte(0x22); + + cache.update_with_execution_meta_for_parent( + 100, + parent_a, + vec![tx_a, tx_b], + BundleState::default(), + vec![], + CachedExecutionMeta { + requests: Requests::default(), + gas_used: 42_000, + blob_gas_used: 0, + }, + ); + + // Matching block + parent should hit. + let hit = cache.get_resumable_state_with_execution_meta_for_parent( + 100, + parent_a, + &[tx_a, tx_b, tx_c], + ); + assert!(hit.is_some()); + + // Same block but different parent should miss. + let miss = cache.get_resumable_state_with_execution_meta_for_parent( + 100, + parent_b, + &[tx_a, tx_b, tx_c], + ); + assert!(miss.is_none()); + } +} diff --git a/rust/op-reth/crates/flashblocks/src/validation.rs b/rust/op-reth/crates/flashblocks/src/validation.rs index 568181774a62e..e64de0bf68d11 100644 --- a/rust/op-reth/crates/flashblocks/src/validation.rs +++ b/rust/op-reth/crates/flashblocks/src/validation.rs @@ -7,8 +7,9 @@ //! 1. [`FlashblockSequenceValidator`] - Validates that incoming flashblocks follow the expected //! sequence ordering (consecutive indices within a block, proper block transitions). //! -//! 2. [`ReorgDetector`] - Detects chain reorganizations by comparing transaction hash sets between -//! tracked (pending) state and canonical chain state. +//! 2. [`ReorgDetector`] - Detects chain reorganizations by comparing full block fingerprints (block +//! hash, parent hash, and transaction hashes) between tracked (pending) state and canonical +//! chain state. //! //! 3. [`CanonicalBlockReconciler`] - Determines the appropriate strategy for reconciling pending //! flashblock state when new canonical blocks arrive. @@ -110,25 +111,46 @@ impl FlashblockSequenceValidator { } } +/// Fingerprint for a tracked block (pending/cached sequence). +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TrackedBlockFingerprint { + /// Block number. + pub block_number: u64, + /// Block hash. + pub block_hash: B256, + /// Parent hash. + pub parent_hash: B256, + /// Ordered transaction hashes in the block. + pub tx_hashes: Vec, +} + +/// Fingerprint for a canonical block notification. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct CanonicalBlockFingerprint { + /// Block number. + pub block_number: u64, + /// Block hash. + pub block_hash: B256, + /// Parent hash. + pub parent_hash: B256, + /// Ordered transaction hashes in the block. + pub tx_hashes: Vec, +} + /// Result of a reorganization detection check. #[derive(Debug, Clone, PartialEq, Eq)] pub enum ReorgDetectionResult { - /// Transaction sets match exactly. + /// Tracked and canonical fingerprints match exactly. NoReorg, - /// Transaction sets differ (counts included for diagnostics). - ReorgDetected { - /// Number of transactions in the tracked (pending) set. - tracked_count: usize, - /// Number of transactions in the canonical chain set. - canonical_count: usize, - }, + /// Tracked and canonical fingerprints differ. + ReorgDetected, } impl ReorgDetectionResult { /// Returns `true` if a reorganization was detected. #[inline] pub const fn is_reorg(&self) -> bool { - matches!(self, Self::ReorgDetected { .. }) + matches!(self, Self::ReorgDetected) } /// Returns `true` if no reorganization was detected. @@ -138,22 +160,33 @@ impl ReorgDetectionResult { } } -/// Detects chain reorganizations by comparing transaction hash sets. +/// Detects chain reorganizations by comparing full block fingerprints. /// -/// A reorg is detected when the transaction hashes in the pending (tracked) state -/// don't match the transaction hashes in the canonical block. This can happen when: -/// - Different transactions were included -/// - Transactions were reordered -/// - Transaction count differs +/// A reorg is detected when any fingerprint component differs: +/// - Block hash +/// - Parent hash +/// - Transaction hash list (including ordering) /// /// # Example /// /// ``` /// use alloy_primitives::B256; -/// use reth_optimism_flashblocks::validation::{ReorgDetectionResult, ReorgDetector}; +/// use reth_optimism_flashblocks::validation::{ +/// CanonicalBlockFingerprint, ReorgDetectionResult, ReorgDetector, TrackedBlockFingerprint, +/// }; /// -/// let tracked = vec![B256::repeat_byte(1), B256::repeat_byte(2)]; -/// let canonical = vec![B256::repeat_byte(1), B256::repeat_byte(2)]; +/// let tracked = TrackedBlockFingerprint { +/// block_number: 100, +/// block_hash: B256::repeat_byte(0xAA), +/// parent_hash: B256::repeat_byte(0x11), +/// tx_hashes: vec![B256::repeat_byte(1), B256::repeat_byte(2)], +/// }; +/// let canonical = CanonicalBlockFingerprint { +/// block_number: 100, +/// block_hash: B256::repeat_byte(0xAA), +/// parent_hash: B256::repeat_byte(0x11), +/// tx_hashes: vec![B256::repeat_byte(1), B256::repeat_byte(2)], +/// }; /// /// let result = ReorgDetector::detect(&tracked, &canonical); /// assert_eq!(result, ReorgDetectionResult::NoReorg); @@ -162,20 +195,18 @@ impl ReorgDetectionResult { pub struct ReorgDetector; impl ReorgDetector { - /// Compares tracked vs canonical transaction hashes to detect reorgs. - /// - /// Returns `ReorgDetected` if counts differ, hashes differ, or order differs. + /// Compares tracked vs canonical block fingerprints to detect reorgs. pub fn detect( - tracked_tx_hashes: &[B256], - canonical_tx_hashes: &[B256], + tracked: &TrackedBlockFingerprint, + canonical: &CanonicalBlockFingerprint, ) -> ReorgDetectionResult { - if tracked_tx_hashes == canonical_tx_hashes { + if tracked.block_hash == canonical.block_hash && + tracked.parent_hash == canonical.parent_hash && + tracked.tx_hashes == canonical.tx_hashes + { ReorgDetectionResult::NoReorg } else { - ReorgDetectionResult::ReorgDetected { - tracked_count: tracked_tx_hashes.len(), - canonical_count: canonical_tx_hashes.len(), - } + ReorgDetectionResult::ReorgDetected } } } @@ -251,8 +282,8 @@ impl CanonicalBlockReconciler { reorg_detected: bool, ) -> ReconciliationStrategy { // Check if pending state exists - let (earliest, latest) = match (pending_earliest_block, pending_latest_block) { - (Some(e), Some(l)) => (e, l), + let latest = match (pending_earliest_block, pending_latest_block) { + (Some(_e), Some(l)) => l, _ => return ReconciliationStrategy::NoPendingState, }; @@ -266,8 +297,8 @@ impl CanonicalBlockReconciler { return ReconciliationStrategy::HandleReorg; } - // Check depth limit - let depth = canonical_block_number.saturating_sub(earliest); + // Check depth limit: how many pending blocks are ahead of canonical tip. + let depth = latest.saturating_sub(canonical_block_number); if depth > max_depth { return ReconciliationStrategy::DepthLimitExceeded { depth, max_depth }; } @@ -388,53 +419,70 @@ mod tests { mod reorg_detector { use super::*; - #[test] - fn test_no_reorg_identical_sequences() { - assert_eq!(ReorgDetector::detect(&[], &[]), ReorgDetectionResult::NoReorg); + fn tracked( + block_hash: B256, + parent_hash: B256, + tx_hashes: Vec, + ) -> TrackedBlockFingerprint { + TrackedBlockFingerprint { block_number: 100, block_hash, parent_hash, tx_hashes } + } - let hashes = vec![B256::repeat_byte(0x01)]; - assert_eq!(ReorgDetector::detect(&hashes, &hashes), ReorgDetectionResult::NoReorg); + fn canonical( + block_hash: B256, + parent_hash: B256, + tx_hashes: Vec, + ) -> CanonicalBlockFingerprint { + CanonicalBlockFingerprint { block_number: 100, block_hash, parent_hash, tx_hashes } + } - let hashes = - vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02), B256::repeat_byte(0x03)]; - assert_eq!(ReorgDetector::detect(&hashes, &hashes), ReorgDetectionResult::NoReorg); + #[test] + fn test_no_reorg_identical_fingerprint() { + let hashes = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; + let tracked = tracked(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes.clone()); + let canonical = canonical(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes); + assert_eq!(ReorgDetector::detect(&tracked, &canonical), ReorgDetectionResult::NoReorg); } #[test] - fn test_reorg_different_order() { - let tracked = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; - let canonical = vec![B256::repeat_byte(0x02), B256::repeat_byte(0x01)]; + fn test_reorg_on_parent_hash_mismatch_with_identical_txs() { + let hashes = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; + let tracked = tracked(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes.clone()); + let canonical = canonical(B256::repeat_byte(0xAA), B256::repeat_byte(0x22), hashes); assert_eq!( ReorgDetector::detect(&tracked, &canonical), - ReorgDetectionResult::ReorgDetected { tracked_count: 2, canonical_count: 2 } + ReorgDetectionResult::ReorgDetected ); } #[test] - fn test_reorg_different_counts() { - let tracked = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; - let canonical = vec![B256::repeat_byte(0x01)]; + fn test_reorg_on_block_hash_mismatch_with_identical_txs() { + let hashes = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; + let tracked = tracked(B256::repeat_byte(0xAA), B256::repeat_byte(0x11), hashes.clone()); + let canonical = canonical(B256::repeat_byte(0xBB), B256::repeat_byte(0x11), hashes); assert_eq!( ReorgDetector::detect(&tracked, &canonical), - ReorgDetectionResult::ReorgDetected { tracked_count: 2, canonical_count: 1 } - ); - - assert_eq!( - ReorgDetector::detect(&canonical, &tracked), - ReorgDetectionResult::ReorgDetected { tracked_count: 1, canonical_count: 2 } + ReorgDetectionResult::ReorgDetected ); } #[test] - fn test_reorg_different_hashes() { - let tracked = vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)]; - let canonical = vec![B256::repeat_byte(0x03), B256::repeat_byte(0x04)]; + fn test_reorg_on_tx_hash_mismatch() { + let tracked = tracked( + B256::repeat_byte(0xAA), + B256::repeat_byte(0x11), + vec![B256::repeat_byte(0x01), B256::repeat_byte(0x02)], + ); + let canonical = canonical( + B256::repeat_byte(0xAA), + B256::repeat_byte(0x11), + vec![B256::repeat_byte(0x01), B256::repeat_byte(0x03)], + ); assert_eq!( ReorgDetector::detect(&tracked, &canonical), - ReorgDetectionResult::ReorgDetected { tracked_count: 2, canonical_count: 2 } + ReorgDetectionResult::ReorgDetected ); } @@ -444,8 +492,7 @@ mod tests { assert!(no_reorg.is_no_reorg()); assert!(!no_reorg.is_reorg()); - let reorg = - ReorgDetectionResult::ReorgDetected { tracked_count: 1, canonical_count: 2 }; + let reorg = ReorgDetectionResult::ReorgDetected; assert!(reorg.is_reorg()); assert!(!reorg.is_no_reorg()); } @@ -513,11 +560,15 @@ mod tests { fn test_depth_limit_exceeded() { assert_eq!( CanonicalBlockReconciler::reconcile(Some(100), Some(120), 115, 10, false), - ReconciliationStrategy::DepthLimitExceeded { depth: 15, max_depth: 10 } + ReconciliationStrategy::Continue ); assert_eq!( CanonicalBlockReconciler::reconcile(Some(100), Some(105), 101, 0, false), - ReconciliationStrategy::DepthLimitExceeded { depth: 1, max_depth: 0 } + ReconciliationStrategy::DepthLimitExceeded { depth: 4, max_depth: 0 } + ); + assert_eq!( + CanonicalBlockReconciler::reconcile(Some(100), Some(200), 130, 64, false), + ReconciliationStrategy::DepthLimitExceeded { depth: 70, max_depth: 64 } ); } @@ -541,7 +592,7 @@ mod tests { // Zero depth is OK with max_depth=0 assert_eq!( CanonicalBlockReconciler::reconcile(Some(100), Some(105), 100, 0, false), - ReconciliationStrategy::Continue + ReconciliationStrategy::DepthLimitExceeded { depth: 5, max_depth: 0 } ); } } diff --git a/rust/op-reth/crates/flashblocks/src/worker.rs b/rust/op-reth/crates/flashblocks/src/worker.rs index 972705c3cd109..af9658ab97832 100644 --- a/rust/op-reth/crates/flashblocks/src/worker.rs +++ b/rust/op-reth/crates/flashblocks/src/worker.rs @@ -1,20 +1,35 @@ -use crate::{PendingFlashBlock, pending_state::PendingBlockState}; +use crate::{ + PendingFlashBlock, + pending_state::PendingBlockState, + tx_cache::{CachedExecutionMeta, TransactionCache}, +}; use alloy_eips::{BlockNumberOrTag, eip2718::WithEncoded}; use alloy_primitives::B256; use op_alloy_rpc_types_engine::OpFlashblockPayloadBase; use reth_chain_state::{ComputedTrieData, ExecutedBlock}; use reth_errors::RethError; use reth_evm::{ - ConfigureEvm, - execute::{BlockBuilder, BlockBuilderOutcome}, + ConfigureEvm, Evm, + execute::{ + BlockAssembler, BlockAssemblerInput, BlockBuilder, BlockBuilderOutcome, BlockExecutor, + }, }; -use reth_execution_types::BlockExecutionOutput; +use reth_execution_types::{BlockExecutionOutput, BlockExecutionResult}; +use reth_optimism_primitives::OpReceipt; use reth_primitives_traits::{ - AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered, + AlloyBlockHeader, BlockTy, HeaderTy, NodePrimitives, ReceiptTy, Recovered, RecoveredBlock, + SealedHeader, transaction::TxHashRef, +}; +use reth_revm::{ + cached::CachedReads, + database::StateProviderDatabase, + db::{BundleState, State, states::bundle_state::BundleRetention}, }; -use reth_revm::{cached::CachedReads, database::StateProviderDatabase, db::State}; use reth_rpc_eth_types::{EthApiError, PendingBlock}; -use reth_storage_api::{BlockReaderIdExt, StateProviderFactory, noop::NoopProvider}; +use reth_storage_api::{ + BlockReaderIdExt, HashedPostStateProvider, StateProviderFactory, StateRootProvider, + noop::NoopProvider, +}; use std::{ sync::Arc, time::{Duration, Instant}, @@ -62,9 +77,44 @@ pub(crate) struct BuildResult { pub(crate) pending_state: PendingBlockState, } +/// Cached prefix execution data used to resume canonical builds. +#[derive(Debug, Clone)] +struct CachedPrefixExecutionResult { + /// Number of leading transactions covered by cached execution. + cached_tx_count: usize, + /// Cumulative bundle state after executing the cached prefix. + bundle: BundleState, + /// Cached receipts for the prefix. + receipts: Vec, + /// Total gas used by the cached prefix. + gas_used: u64, + /// Total blob/DA gas used by the cached prefix. + blob_gas_used: u64, +} + +/// Receipt requirements for cache-resume flow. +pub trait FlashblockCachedReceipt: Clone { + /// Adds `gas_offset` to each receipt's `cumulative_gas_used`. + fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64); +} + +impl FlashblockCachedReceipt for OpReceipt { + fn add_cumulative_gas_offset(receipts: &mut [Self], gas_offset: u64) { + if gas_offset == 0 { + return; + } + + for receipt in receipts { + let inner = receipt.as_receipt_mut(); + inner.cumulative_gas_used = inner.cumulative_gas_used.saturating_add(gas_offset); + } + } +} + impl FlashBlockBuilder where N: NodePrimitives, + N::Receipt: FlashblockCachedReceipt, EvmConfig: ConfigureEvm + Unpin>, Provider: StateProviderFactory + BlockReaderIdExt< @@ -81,12 +131,17 @@ where /// 1. **Canonical mode**: Parent matches local tip - uses state from storage /// 2. **Speculative mode**: Parent is a pending block - uses pending state /// + /// When a `tx_cache` is provided and we're in canonical mode, the builder will + /// attempt to resume from cached state if the transaction list is a continuation + /// of what was previously executed. + /// /// Returns `None` if: /// - In canonical mode: flashblock doesn't attach to the latest header /// - In speculative mode: no pending parent state provided pub(crate) fn execute>>>( &self, mut args: BuildArgs, + tx_cache: Option<&mut TransactionCache>, ) -> eyre::Result>> { trace!(target: "flashblocks", "Attempting new pending block from flashblocks"); @@ -110,15 +165,43 @@ where return Ok(None); } - // Get state provider - either from storage or pending state + // Collect transactions and extract hashes for cache lookup + let transactions: Vec<_> = args.transactions.into_iter().collect(); + let tx_hashes: Vec = transactions.iter().map(|tx| *tx.tx_hash()).collect(); + + // Get state provider and parent header context. // For speculative builds, use the canonical anchor hash (not the pending parent hash) - // to ensure we can always find the state in storage. - let (state_provider, canonical_anchor) = if is_canonical { - (self.provider.history_by_block_hash(latest.hash())?, latest.hash()) + // for storage reads, but execute with the pending parent's sealed header context. + let (state_provider, canonical_anchor, parent_header) = if is_canonical { + (self.provider.history_by_block_hash(latest.hash())?, latest.hash(), &latest) } else { // For speculative building, we need to use the canonical anchor // and apply the pending state's bundle on top of it let pending = args.pending_parent.as_ref().unwrap(); + let Some(parent_header) = pending.sealed_header.as_ref() else { + trace!( + target: "flashblocks", + pending_block_number = pending.block_number, + pending_block_hash = ?pending.block_hash, + "Skipping speculative build: pending parent header is unavailable" + ); + return Ok(None); + }; + if !is_consistent_speculative_parent_hashes( + args.base.parent_hash, + pending.block_hash, + parent_header.hash(), + ) { + trace!( + target: "flashblocks", + incoming_parent_hash = ?args.base.parent_hash, + pending_block_hash = ?pending.block_hash, + pending_sealed_hash = ?parent_header.hash(), + pending_block_number = pending.block_number, + "Skipping speculative build: inconsistent pending parent hashes" + ); + return Ok(None); + } trace!( target: "flashblocks", pending_block_number = pending.block_number, @@ -129,6 +212,7 @@ where ( self.provider.history_by_block_hash(pending.canonical_anchor_hash)?, pending.canonical_anchor_hash, + parent_header, ) }; @@ -146,52 +230,208 @@ where let cached_db = request_cache.as_db_mut(StateProviderDatabase::new(&state_provider)); - // Build state - for speculative builds, initialize with the pending parent's bundle as - // prestate + // Check for resumable canonical execution state. + let canonical_parent_hash = args.base.parent_hash; + let cached_prefix = if is_canonical { + tx_cache.as_ref().and_then(|cache| { + cache + .get_resumable_state_with_execution_meta_for_parent( + args.base.block_number, + canonical_parent_hash, + &tx_hashes, + ) + .map( + |( + bundle, + receipts, + _requests, + gas_used, + blob_gas_used, + cached_tx_count, + )| { + trace!( + target: "flashblocks", + cached_tx_count, + total_txs = tx_hashes.len(), + "Cache hit (executing only uncached suffix)" + ); + CachedPrefixExecutionResult { + cached_tx_count, + bundle: bundle.clone(), + receipts: receipts.to_vec(), + gas_used, + blob_gas_used, + } + }, + ) + }) + } else { + None + }; + + // Build state with appropriate prestate + // - Speculative builds use pending parent prestate + // - Canonical cache-hit builds use cached prefix prestate let mut state = if let Some(ref pending) = args.pending_parent { State::builder() .with_database(cached_db) .with_bundle_prestate(pending.execution_outcome.state.clone()) .with_bundle_update() .build() + } else if let Some(ref cached_prefix) = cached_prefix { + State::builder() + .with_database(cached_db) + .with_bundle_prestate(cached_prefix.bundle.clone()) + .with_bundle_update() + .build() } else { State::builder().with_database(cached_db).with_bundle_update().build() }; - let mut builder = self - .evm_config - .builder_for_next_block(&mut state, &latest, args.base.clone().into()) - .map_err(RethError::other)?; + let (execution_result, block, hashed_state, bundle) = if let Some(cached_prefix) = + cached_prefix + { + // Cached prefix execution model: + // - The cached bundle prestate already includes pre-execution state changes + // (blockhash/beacon root updates, create2deployer), so we do NOT call + // apply_pre_execution_changes() again. + // - The only pre-execution effect we need is set_state_clear_flag, which configures EVM + // empty-account handling (OP Stack chains activate Spurious Dragon at genesis, so + // this is always true). + // - Suffix transactions execute against the warm prestate. + // - Post-execution (finish()) runs once on the suffix executor, producing correct + // results for the full block. For OP Stack post-merge, the + // post_block_balance_increments are empty (no block rewards, no ommers, no + // withdrawals passed), so finish() only seals execution state. + let attrs = args.base.clone().into(); + let evm_env = + self.evm_config.next_evm_env(parent_header, &attrs).map_err(RethError::other)?; + let execution_ctx = self + .evm_config + .context_for_next_block(parent_header, attrs) + .map_err(RethError::other)?; - builder.apply_pre_execution_changes()?; + // The cached bundle prestate already includes pre-execution state changes. + // Only set the state clear flag (Spurious Dragon empty-account handling). + state.set_state_clear_flag(true); + let evm = self.evm_config.evm_with_env(&mut state, evm_env); + let mut executor = self.evm_config.create_executor(evm, execution_ctx.clone()); - for tx in args.transactions { - let _gas_used = builder.execute_transaction(tx)?; - } + for tx in transactions.iter().skip(cached_prefix.cached_tx_count).cloned() { + let _gas_used = executor.execute_transaction(tx)?; + } + + let (evm, suffix_execution_result) = executor.finish()?; + let (db, evm_env) = evm.finish(); + db.merge_transitions(BundleRetention::Reverts); + + let execution_result = + Self::merge_cached_and_suffix_results(cached_prefix, suffix_execution_result); - // if the real state root should be computed - let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = - if args.compute_state_root { + let (hashed_state, state_root) = if args.compute_state_root { trace!(target: "flashblocks", "Computing block state root"); - builder.finish(&state_provider)? + let hashed_state = state_provider.hashed_post_state(&db.bundle_state); + let (state_root, _) = state_provider + .state_root_with_updates(hashed_state.clone()) + .map_err(RethError::other)?; + (hashed_state, state_root) } else { - builder.finish(NoopProvider::default())? + let noop_provider = NoopProvider::default(); + let hashed_state = noop_provider.hashed_post_state(&db.bundle_state); + let (state_root, _) = noop_provider + .state_root_with_updates(hashed_state.clone()) + .map_err(RethError::other)?; + (hashed_state, state_root) }; + let bundle = db.take_bundle(); + + let (block_transactions, senders): (Vec<_>, Vec<_>) = + transactions.iter().map(|tx| tx.1.clone().into_parts()).unzip(); + let block = self + .evm_config + .block_assembler() + .assemble_block(BlockAssemblerInput::new( + evm_env, + execution_ctx, + parent_header, + block_transactions, + &execution_result, + &bundle, + &state_provider, + state_root, + )) + .map_err(RethError::other)?; + let block = RecoveredBlock::new_unhashed(block, senders); + + (execution_result, block, hashed_state, bundle) + } else { + let mut builder = self + .evm_config + .builder_for_next_block(&mut state, parent_header, args.base.clone().into()) + .map_err(RethError::other)?; + + builder.apply_pre_execution_changes()?; + + for tx in transactions { + let _gas_used = builder.execute_transaction(tx)?; + } + + let BlockBuilderOutcome { execution_result, block, hashed_state, .. } = + if args.compute_state_root { + trace!(target: "flashblocks", "Computing block state root"); + builder.finish(&state_provider)? + } else { + builder.finish(NoopProvider::default())? + }; + let bundle = state.take_bundle(); + + (execution_result, block, hashed_state, bundle) + }; - let execution_outcome = - BlockExecutionOutput { state: state.take_bundle(), result: execution_result }; + // Update transaction cache if provided (only in canonical mode) + if let Some(cache) = tx_cache && + is_canonical + { + cache.update_with_execution_meta_for_parent( + args.base.block_number, + canonical_parent_hash, + tx_hashes, + bundle.clone(), + execution_result.receipts.clone(), + CachedExecutionMeta { + requests: execution_result.requests.clone(), + gas_used: execution_result.gas_used, + blob_gas_used: execution_result.blob_gas_used, + }, + ); + } + + let execution_outcome = BlockExecutionOutput { state: bundle, result: execution_result }; let execution_outcome = Arc::new(execution_outcome); - // Create pending state for subsequent builds - // Forward the canonical anchor so chained speculative builds can load state + // Create pending state for subsequent builds. + // Use the locally built block hash for both parent matching and speculative + // execution context to avoid split-hash ambiguity. + let local_block_hash = block.hash(); + if local_block_hash != args.last_flashblock_hash { + trace!( + target: "flashblocks", + local_block_hash = ?local_block_hash, + sequencer_block_hash = ?args.last_flashblock_hash, + block_number = block.number(), + "Local block hash differs from sequencer-provided hash; speculative chaining will follow local hash" + ); + } + let sealed_header = SealedHeader::new(block.header().clone(), local_block_hash); let pending_state = PendingBlockState::new( - block.hash(), + local_block_hash, block.number(), args.base.parent_hash, canonical_anchor, execution_outcome.clone(), request_cache.clone(), - ); + ) + .with_sealed_header(sealed_header); let pending_block = PendingBlock::with_executed_block( Instant::now() + Duration::from_secs(1), @@ -206,6 +446,7 @@ where ); let pending_flashblock = PendingFlashBlock::new( pending_block, + canonical_anchor, args.last_flashblock_index, args.last_flashblock_hash, args.compute_state_root, @@ -213,6 +454,38 @@ where Ok(Some(BuildResult { pending_flashblock, cached_reads: request_cache, pending_state })) } + + fn merge_cached_and_suffix_results( + cached_prefix: CachedPrefixExecutionResult, + mut suffix_result: BlockExecutionResult, + ) -> BlockExecutionResult { + N::Receipt::add_cumulative_gas_offset(&mut suffix_result.receipts, cached_prefix.gas_used); + + let mut receipts = cached_prefix.receipts; + receipts.extend(suffix_result.receipts); + + // Use only suffix requests: the suffix executor's finish() produces + // post-execution requests from the complete block state (cached prestate + + // suffix changes). The cached prefix requests came from an intermediate + // state and must not be merged. + let requests = suffix_result.requests; + + BlockExecutionResult { + receipts, + requests, + gas_used: cached_prefix.gas_used.saturating_add(suffix_result.gas_used), + blob_gas_used: cached_prefix.blob_gas_used.saturating_add(suffix_result.blob_gas_used), + } + } +} + +#[inline] +fn is_consistent_speculative_parent_hashes( + incoming_parent_hash: B256, + pending_block_hash: B256, + pending_sealed_hash: B256, +) -> bool { + incoming_parent_hash == pending_block_hash && pending_block_hash == pending_sealed_hash } impl Clone for FlashBlockBuilder { @@ -220,3 +493,205 @@ impl Clone for FlashBlockBuilder OpTransactionSigned { + let mut tx = TxEip1559 { + chain_id: 10, // OP Mainnet chain id + nonce, + gas_limit: 100_000, + max_priority_fee_per_gas: 1_000_000_000, + max_fee_per_gas: 2_000_000_000, + to: TxKind::Call(recipient), + value: U256::from(1), + ..Default::default() + }; + let signature = signer.sign_transaction_sync(&mut tx).expect("signing tx succeeds"); + tx.into_signed(signature).into() + } + + fn into_encoded_recovered( + tx: OpTransactionSigned, + signer: Address, + ) -> alloy_eips::eip2718::WithEncoded> { + let encoded = tx.encoded_2718(); + Recovered::new_unchecked(tx, signer).into_encoded_with(encoded) + } + + #[test] + fn speculative_parent_hashes_must_all_match() { + let h = B256::repeat_byte(0x11); + assert!(is_consistent_speculative_parent_hashes(h, h, h)); + } + + #[test] + fn speculative_parent_hashes_reject_any_mismatch() { + let incoming = B256::repeat_byte(0x11); + let pending = B256::repeat_byte(0x22); + let sealed = B256::repeat_byte(0x33); + + assert!(!is_consistent_speculative_parent_hashes(incoming, pending, sealed)); + assert!(!is_consistent_speculative_parent_hashes(incoming, incoming, sealed)); + assert!(!is_consistent_speculative_parent_hashes(incoming, pending, pending)); + } + + #[test] + fn canonical_build_reuses_cached_prefix_execution() { + let provider = MockEthProvider::::new() + .with_chain_spec(OP_MAINNET.clone()) + .with_genesis_block(); + + let recipient = Address::repeat_byte(0x22); + let signer = PrivateKeySigner::random(); + let tx_a = signed_transfer_tx(&signer, 0, recipient); + let tx_b = signed_transfer_tx(&signer, 1, recipient); + let tx_c = signed_transfer_tx(&signer, 2, recipient); + let signer = tx_a.recover_signer().expect("tx signer recovery succeeds"); + + provider.add_account(signer, ExtendedAccount::new(0, U256::from(1_000_000_000_000_000u64))); + provider.add_account(recipient, ExtendedAccount::new(0, U256::ZERO)); + provider.add_account( + L1_BLOCK_CONTRACT, + ExtendedAccount::new(1, U256::ZERO).extend_storage([ + (StorageKey::with_last_byte(1), StorageValue::from(1_000_000_000u64)), + (StorageKey::with_last_byte(5), StorageValue::from(188u64)), + (StorageKey::with_last_byte(6), StorageValue::from(684_000u64)), + ( + StorageKey::with_last_byte(3), + StorageValue::from_str( + "0x0000000000000000000000000000000000001db0000d27300000000000000005", + ) + .expect("valid L1 fee scalar storage value"), + ), + ]), + ); + + let latest = provider + .latest_header() + .expect("provider latest header query succeeds") + .expect("genesis header exists"); + + let base = OpFlashblockPayloadBase { + parent_hash: latest.hash(), + parent_beacon_block_root: B256::ZERO, + fee_recipient: Address::ZERO, + prev_randao: B256::repeat_byte(0x55), + block_number: latest.number() + 1, + gas_limit: 30_000_000, + timestamp: latest.timestamp() + 2, + extra_data: Default::default(), + base_fee_per_gas: U256::from(1_000_000_000u64), + }; + let base_parent_hash = base.parent_hash; + + let tx_a_hash = B256::from(*tx_a.tx_hash()); + let tx_b_hash = B256::from(*tx_b.tx_hash()); + let tx_c_hash = B256::from(*tx_c.tx_hash()); + + let tx_a = into_encoded_recovered(tx_a, signer); + let tx_b = into_encoded_recovered(tx_b, signer); + let tx_c = into_encoded_recovered(tx_c, signer); + + let evm_config = OpEvmConfig::optimism(OP_MAINNET.clone()); + let builder = FlashBlockBuilder::new(evm_config, provider); + let mut tx_cache = TransactionCache::::new(); + + let first = builder + .execute( + BuildArgs { + base: base.clone(), + transactions: vec![tx_a.clone(), tx_b.clone()], + cached_state: None, + last_flashblock_index: 0, + last_flashblock_hash: B256::repeat_byte(0xA0), + compute_state_root: false, + pending_parent: None, + }, + Some(&mut tx_cache), + ) + .expect("first build succeeds") + .expect("first build is canonical"); + + assert_eq!(first.pending_state.execution_outcome.result.receipts.len(), 2); + + let cached_hashes = vec![tx_a_hash, tx_b_hash]; + let (bundle, receipts, requests, gas_used, blob_gas_used, skip) = tx_cache + .get_resumable_state_with_execution_meta_for_parent( + base.block_number, + base_parent_hash, + &cached_hashes, + ) + .expect("cache should contain first build execution state"); + assert_eq!(skip, 2); + + let mut tampered_receipts = receipts.to_vec(); + tampered_receipts[0].as_receipt_mut().cumulative_gas_used = + tampered_receipts[0].as_receipt().cumulative_gas_used.saturating_add(17); + let expected_tampered_gas = tampered_receipts[0].as_receipt().cumulative_gas_used; + + tx_cache.update_with_execution_meta_for_parent( + base.block_number, + base_parent_hash, + cached_hashes, + bundle.clone(), + tampered_receipts, + CachedExecutionMeta { requests: requests.clone(), gas_used, blob_gas_used }, + ); + + let second_hashes = vec![tx_a_hash, tx_b_hash, tx_c_hash]; + let (_, _, _, _, _, skip) = tx_cache + .get_resumable_state_with_execution_meta_for_parent( + base.block_number, + base_parent_hash, + &second_hashes, + ) + .expect("second tx list should extend cached prefix"); + assert_eq!(skip, 2); + + let second = builder + .execute( + BuildArgs { + base, + transactions: vec![tx_a, tx_b, tx_c], + cached_state: None, + last_flashblock_index: 1, + last_flashblock_hash: B256::repeat_byte(0xA1), + compute_state_root: false, + pending_parent: None, + }, + Some(&mut tx_cache), + ) + .expect("second build succeeds") + .expect("second build is canonical"); + + let receipts = &second.pending_state.execution_outcome.result.receipts; + assert_eq!(receipts.len(), 3); + assert_eq!(receipts[0].as_receipt().cumulative_gas_used, expected_tampered_gas); + assert!( + receipts[2].as_receipt().cumulative_gas_used > + receipts[1].as_receipt().cumulative_gas_used + ); + } +} diff --git a/rust/op-reth/crates/flashblocks/tests/it/harness.rs b/rust/op-reth/crates/flashblocks/tests/it/harness.rs index f7b25a0f690ee..27122019dd65c 100644 --- a/rust/op-reth/crates/flashblocks/tests/it/harness.rs +++ b/rust/op-reth/crates/flashblocks/tests/it/harness.rs @@ -10,7 +10,8 @@ use op_alloy_rpc_types_engine::{ }; use reth_optimism_flashblocks::{ CanonicalBlockNotification, FlashBlock, FlashBlockCompleteSequence, InProgressFlashBlockRx, - PendingBlockState, validation::ReconciliationStrategy, + PendingBlockState, + validation::{CanonicalBlockFingerprint, ReconciliationStrategy}, }; use std::sync::Arc; use tokio::sync::{broadcast, mpsc, watch}; @@ -221,19 +222,19 @@ impl TestSequenceManager { /// Processes a canonical block notification and returns the reconciliation strategy. pub(crate) fn process_canonical_block( &mut self, - canonical_block_number: u64, - canonical_tx_hashes: &[B256], + canonical: CanonicalBlockFingerprint, max_depth: u64, ) -> ReconciliationStrategy { + let canonical_block_number = canonical.block_number; let earliest = self.earliest_block_number(); let latest = self.latest_block_number(); - let (Some(earliest), Some(latest)) = (earliest, latest) else { + let (Some(_earliest), Some(latest)) = (earliest, latest) else { return ReconciliationStrategy::NoPendingState; }; - // Check depth limit - let depth = canonical_block_number.saturating_sub(earliest); + // Check depth limit: pending blocks ahead of canonical tip. + let depth = latest.saturating_sub(canonical_block_number); if canonical_block_number < latest && depth > max_depth { self.clear(); return ReconciliationStrategy::DepthLimitExceeded { depth, max_depth }; @@ -247,7 +248,7 @@ impl TestSequenceManager { // Check for reorg (simplified: any tx hash mismatch) // In real implementation, would compare tx hashes - if !canonical_tx_hashes.is_empty() { + if !canonical.tx_hashes.is_empty() { // Simplified reorg detection self.clear(); return ReconciliationStrategy::HandleReorg; diff --git a/rust/op-reth/crates/flashblocks/tests/it/service.rs b/rust/op-reth/crates/flashblocks/tests/it/service.rs index 11a9cf9023f38..1e5ef107f3676 100644 --- a/rust/op-reth/crates/flashblocks/tests/it/service.rs +++ b/rust/op-reth/crates/flashblocks/tests/it/service.rs @@ -5,12 +5,13 @@ //! - Speculative building when pending parent state is available //! - Canonical block reconciliation //! - Build job scheduling +//! - Transaction cache reuse across flashblocks use alloy_primitives::B256; use reth_execution_types::BlockExecutionOutput; use reth_optimism_flashblocks::{ CanonicalBlockNotification, PendingBlockState, PendingStateRegistry, - validation::ReconciliationStrategy, + validation::{CanonicalBlockFingerprint, ReconciliationStrategy}, }; use reth_optimism_primitives::OpPrimitives; use reth_revm::cached::CachedReads; @@ -18,6 +19,18 @@ use std::sync::Arc; use crate::harness::{FlashBlockServiceTestHarness, TestFlashBlockFactory}; +const fn canonical_fingerprint( + block_number: u64, + tx_hashes: Vec, +) -> CanonicalBlockFingerprint { + CanonicalBlockFingerprint { + block_number, + block_hash: B256::repeat_byte(0xAB), + parent_hash: B256::repeat_byte(0xCD), + tx_hashes, + } +} + /// Tests that the service processes flashblocks and updates the sequence manager. #[tokio::test] async fn test_service_processes_flashblocks() { @@ -69,7 +82,12 @@ async fn test_service_handles_canonical_catchup() { // Canonical block arrives at 100 - should trigger catch-up harness - .send_canonical_block(CanonicalBlockNotification { block_number: 100, tx_hashes: vec![] }) + .send_canonical_block(CanonicalBlockNotification { + block_number: 100, + block_hash: B256::repeat_byte(0x10), + parent_hash: B256::repeat_byte(0x01), + tx_hashes: vec![], + }) .await; // Verify reconciliation strategy was CatchUp @@ -92,6 +110,8 @@ async fn test_service_handles_reorg() { harness .send_canonical_block(CanonicalBlockNotification { block_number: 100, + block_hash: B256::repeat_byte(0x11), + parent_hash: B256::repeat_byte(0x02), tx_hashes: canonical_tx_hashes, }) .await; @@ -190,7 +210,7 @@ async fn test_depth_limit_exceeded() { sequences.insert_flashblock(fb2).unwrap(); // Canonical at 101 with max_depth of 0 should trigger depth limit exceeded - let strategy = sequences.process_canonical_block(101, &[], 0); + let strategy = sequences.process_canonical_block(canonical_fingerprint(101, vec![]), 0); assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); } @@ -286,3 +306,269 @@ async fn test_in_progress_signal() { // This test primarily verifies the signal mechanism is wired up assert!(in_progress_rx.borrow().is_none()); } + +// ==================== Transaction Cache Integration Tests ==================== + +/// Tests the transaction cache E2E scenario: fb0 [A,B] → fb1 [A,B,C] +/// This verifies the cache flow at the sequence manager level. +#[tokio::test] +async fn test_transaction_cache_continuation_flow() { + use reth_optimism_flashblocks::TransactionCache; + + // Create a transaction cache + let mut cache: TransactionCache = TransactionCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + let tx_b = B256::repeat_byte(0xBB); + let tx_c = B256::repeat_byte(0xCC); + + // Simulate fb0 execution: [A, B] + let fb0_txs = vec![tx_a, tx_b]; + assert!(cache.get_resumable_state(100, &fb0_txs).is_none()); + + // After fb0 execution, update cache + cache.update(100, fb0_txs, reth_revm::db::BundleState::default(), vec![]); + + // Simulate fb1: [A, B, C] - should resume and skip A, B + let fb1_txs = vec![tx_a, tx_b, tx_c]; + let result = cache.get_resumable_state(100, &fb1_txs); + assert!(result.is_some()); + let (_, _, skip) = result.unwrap(); + assert_eq!(skip, 2); // Skip first 2 txs +} + +/// Tests that transaction cache is invalidated on block change. +#[tokio::test] +async fn test_transaction_cache_block_transition() { + use reth_optimism_flashblocks::TransactionCache; + + let mut cache: TransactionCache = TransactionCache::new(); + + let tx_a = B256::repeat_byte(0xAA); + + // Block 100 + cache.update(100, vec![tx_a], reth_revm::db::BundleState::default(), vec![]); + + // Block 101 - cache should not be valid + assert!(cache.get_resumable_state(101, &[tx_a]).is_none()); +} + +// ==================== Reconciliation Integration Tests ==================== + +/// Tests that reconciliation properly clears state on catch-up. +#[tokio::test] +async fn test_reconciliation_catchup_clears_state() { + let harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + let mut sequences = harness.create_sequence_manager(); + + // Build up state for blocks 100, 101 + let fb0 = factory.flashblock_at(0).build(); + sequences.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + sequences.insert_flashblock(fb1).unwrap(); + + // Verify state exists + assert!(sequences.earliest_block_number().is_some()); + + // Canonical catches up to 101 + let strategy = sequences.process_canonical_block(canonical_fingerprint(101, vec![]), 10); + assert_eq!(strategy, ReconciliationStrategy::CatchUp); + + // After catch-up, no buildable args should exist + let local_tip = B256::random(); + let args = sequences.next_buildable_args::(local_tip, 1000000, None); + assert!(args.is_none()); +} + +/// Tests that reconciliation properly clears state on depth limit exceeded. +#[tokio::test] +async fn test_reconciliation_depth_limit_clears_state() { + let harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + let mut sequences = harness.create_sequence_manager(); + + // Build up state for blocks 100-102 + let fb0 = factory.flashblock_at(0).build(); + sequences.insert_flashblock(fb0.clone()).unwrap(); + + let fb1 = factory.flashblock_for_next_block(&fb0).build(); + sequences.insert_flashblock(fb1.clone()).unwrap(); + + let fb2 = factory.flashblock_for_next_block(&fb1).build(); + sequences.insert_flashblock(fb2).unwrap(); + + // Canonical at 101 with very small max_depth (0) + let strategy = sequences.process_canonical_block(canonical_fingerprint(101, vec![]), 0); + assert!(matches!(strategy, ReconciliationStrategy::DepthLimitExceeded { .. })); + + // After depth exceeded, no state should remain + assert!(sequences.earliest_block_number().is_none()); +} + +/// Tests continue strategy preserves state. +#[tokio::test] +async fn test_reconciliation_continue_preserves_state() { + let harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + let mut sequences = harness.create_sequence_manager(); + + // Build up state for block 100 + let fb0 = factory.flashblock_at(0).build(); + let parent_hash = fb0.base.as_ref().unwrap().parent_hash; + sequences.insert_flashblock(fb0).unwrap(); + + // Canonical at 99 (behind pending) + let strategy = sequences.process_canonical_block(canonical_fingerprint(99, vec![]), 10); + assert_eq!(strategy, ReconciliationStrategy::Continue); + + // State should be preserved - can still build + let args = sequences.next_buildable_args::(parent_hash, 1000000, None); + assert!(args.is_some()); +} + +// ==================== Speculative Building Chain Tests ==================== + +/// Tests multi-level speculative building: block N → N+1 → N+2. +#[tokio::test] +async fn test_speculative_building_chain() { + let harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + let mut sequences = harness.create_sequence_manager(); + + // Create flashblock sequence for block 100 + let fb100 = factory.flashblock_at(0).block_number(100).build(); + let block_99_hash = fb100.base.as_ref().unwrap().parent_hash; + sequences.insert_flashblock(fb100.clone()).unwrap(); + + // Create flashblock sequence for block 101 (caches 100) + let fb101 = factory.flashblock_for_next_block(&fb100).build(); + let block_100_hash = fb101.base.as_ref().unwrap().parent_hash; + sequences.insert_flashblock(fb101.clone()).unwrap(); + + // Create flashblock sequence for block 102 (caches 101) + let fb102 = factory.flashblock_for_next_block(&fb101).build(); + let block_101_hash = fb102.base.as_ref().unwrap().parent_hash; + sequences.insert_flashblock(fb102).unwrap(); + + // Local tip is some random hash (not matching any canonical) + let local_tip = B256::random(); + + // Pending state for block 99 should allow building block 100 + let parent_of_99 = B256::random(); + let pending_99: PendingBlockState = PendingBlockState::new( + block_99_hash, + 99, + parent_of_99, + parent_of_99, // canonical_anchor_hash + Arc::new(BlockExecutionOutput::default()), + CachedReads::default(), + ); + + let args = sequences.next_buildable_args(local_tip, 1000000, Some(pending_99)); + assert!(args.is_some()); + assert_eq!(args.as_ref().unwrap().base.block_number, 100); + + // Pending state for block 100 should allow building block 101 + let pending_100: PendingBlockState = PendingBlockState::new( + block_100_hash, + 100, + block_99_hash, + block_99_hash, // canonical_anchor_hash (forwarded from parent) + Arc::new(BlockExecutionOutput::default()), + CachedReads::default(), + ); + + let args = sequences.next_buildable_args(local_tip, 1000000, Some(pending_100)); + assert!(args.is_some()); + assert_eq!(args.as_ref().unwrap().base.block_number, 101); + + // Pending state for block 101 should allow building block 102 + let pending_101: PendingBlockState = PendingBlockState::new( + block_101_hash, + 101, + block_100_hash, + block_100_hash, // canonical_anchor_hash (forwarded from parent) + Arc::new(BlockExecutionOutput::default()), + CachedReads::default(), + ); + + let args = sequences.next_buildable_args(local_tip, 1000000, Some(pending_101)); + assert!(args.is_some()); + assert_eq!(args.as_ref().unwrap().base.block_number, 102); +} + +/// Tests that speculative build args include the pending parent state. +#[tokio::test] +async fn test_speculative_build_includes_pending_parent() { + let harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + let mut sequences = harness.create_sequence_manager(); + + // Create flashblock for block 101 + let fb = factory.flashblock_at(0).block_number(101).build(); + let block_100_hash = fb.base.as_ref().unwrap().parent_hash; + sequences.insert_flashblock(fb).unwrap(); + + // Local tip doesn't match + let local_tip = B256::random(); + + // Create pending state for block 100 + let parent_of_100 = B256::random(); + let pending_state: PendingBlockState = PendingBlockState::new( + block_100_hash, + 100, + parent_of_100, + parent_of_100, // canonical_anchor_hash + Arc::new(BlockExecutionOutput::default()), + CachedReads::default(), + ); + + let args = sequences.next_buildable_args(local_tip, 1000000, Some(pending_state)); + assert!(args.is_some()); + + let build_args = args.unwrap(); + assert!(build_args.pending_parent.is_some()); + + // Verify the pending parent has the correct block info + let pp = build_args.pending_parent.unwrap(); + assert_eq!(pp.block_number, 100); + assert_eq!(pp.block_hash, block_100_hash); +} + +// ==================== Edge Case Tests ==================== + +/// Tests behavior when no pending state and no canonical match. +#[tokio::test] +async fn test_no_buildable_when_nothing_matches() { + let harness = FlashBlockServiceTestHarness::new(); + let factory = TestFlashBlockFactory::new(); + + let mut sequences = harness.create_sequence_manager(); + + // Create flashblock for block 100 + let fb = factory.flashblock_at(0).build(); + sequences.insert_flashblock(fb).unwrap(); + + // Local tip doesn't match, no pending state + let local_tip = B256::random(); + let args = sequences.next_buildable_args::(local_tip, 1000000, None); + assert!(args.is_none()); +} + +/// Tests that `NoPendingState` is returned when no sequences exist. +#[tokio::test] +async fn test_reconciliation_with_no_pending_state() { + let harness = FlashBlockServiceTestHarness::new(); + let mut sequences = harness.create_sequence_manager(); + + // No flashblocks inserted + let strategy = sequences.process_canonical_block(canonical_fingerprint(100, vec![]), 10); + assert_eq!(strategy, ReconciliationStrategy::NoPendingState); +} diff --git a/rust/op-reth/crates/rpc/src/eth/mod.rs b/rust/op-reth/crates/rpc/src/eth/mod.rs index 483cd4a7efc33..d53b11d8bf0d9 100644 --- a/rust/op-reth/crates/rpc/src/eth/mod.rs +++ b/rust/op-reth/crates/rpc/src/eth/mod.rs @@ -13,9 +13,8 @@ use crate::{ OpEthApiError, SequencerClient, eth::{receipt::OpReceiptConverter, transaction::OpTxInfoMapper}, }; -use alloy_consensus::BlockHeader; use alloy_eips::BlockNumHash; -use alloy_primitives::{B256, U256}; +use alloy_primitives::U256; use alloy_rpc_types_eth::{Filter, Log}; use eyre::WrapErr; use futures::StreamExt; @@ -29,9 +28,10 @@ use reth_node_api::{FullNodeComponents, FullNodeTypes, HeaderTy, NodeTypes}; use reth_node_builder::rpc::{EthApiBuilder, EthApiCtx}; use reth_optimism_flashblocks::{ FlashBlockBuildInfo, FlashBlockCompleteSequence, FlashBlockCompleteSequenceRx, - FlashBlockConsensusClient, FlashBlockRx, FlashBlockService, FlashblocksListeners, - PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, + FlashBlockConsensusClient, FlashBlockRx, FlashBlockService, FlashblockCachedReceipt, + FlashblocksListeners, PendingBlockRx, PendingFlashBlock, WsFlashBlockStream, }; +use reth_primitives_traits::NodePrimitives; use reth_rpc::eth::core::EthApiInner; use reth_rpc_eth_api::{ EthApiTypes, FromEvmError, FullEthApiServer, RpcConvert, RpcConverter, RpcNodeCore, @@ -42,10 +42,9 @@ use reth_rpc_eth_api::{ }, }; use reth_rpc_eth_types::{ - EthStateCache, FeeHistoryCache, GasPriceOracle, PendingBlock, - logs_utils::matching_block_logs_with_tx_hashes, + EthStateCache, FeeHistoryCache, GasPriceOracle, logs_utils::matching_block_logs_with_tx_hashes, }; -use reth_storage_api::{BlockReaderIdExt, ProviderHeader}; +use reth_storage_api::ProviderHeader; use reth_tasks::{ TaskSpawner, pool::{BlockingTaskGuard, BlockingTaskPool}, @@ -184,20 +183,16 @@ impl OpEthApi { self.inner.flashblocks.as_ref().and_then(|f| *f.in_progress_rx.borrow()) } - /// Extracts pending block if it matches the expected parent hash. - fn extract_matching_block( + /// Extracts the latest pending flashblock from flashblocks state, if available. + fn extract_pending_flashblock( &self, block: Option<&PendingFlashBlock>, - parent_hash: B256, - ) -> Option> { - block.filter(|b| b.block().parent_hash() == parent_hash).map(|b| b.pending.clone()) + ) -> Option> { + block.cloned() } /// Awaits a fresh flashblock if one is being built, otherwise returns current. - async fn flashblock( - &self, - parent_hash: B256, - ) -> eyre::Result>> { + async fn flashblock(&self) -> eyre::Result>> { let Some(rx) = self.inner.flashblocks.as_ref().map(|f| &f.pending_block_rx) else { return Ok(None); }; @@ -209,8 +204,8 @@ impl OpEthApi { // Check if this is the first flashblock or the next consecutive index let is_next_index = current_index.is_none_or(|idx| build_info.index == idx + 1); - // Wait only for relevant flashblocks: matching parent and next in sequence - if build_info.parent_hash == parent_hash && is_next_index { + // Wait for the next in-sequence flashblock to reduce stale pending responses. + if is_next_index { let mut rx_clone = rx.clone(); // Wait up to MAX_FLASHBLOCK_WAIT_DURATION for a new flashblock to arrive let _ = time::timeout(MAX_FLASHBLOCK_WAIT_DURATION, rx_clone.changed()).await; @@ -218,24 +213,20 @@ impl OpEthApi { } // Fall back to current block - Ok(self.extract_matching_block(rx.borrow().as_ref(), parent_hash)) + Ok(self.extract_pending_flashblock(rx.borrow().as_ref())) } - /// Returns a [`PendingBlock`] that is built out of flashblocks. + /// Returns a [`PendingFlashBlock`] that is built out of flashblocks. /// /// If flashblocks receiver is not set, then it always returns `None`. /// /// It may wait up to 50ms for a fresh flashblock if one is currently being built. - pub async fn pending_flashblock(&self) -> eyre::Result>> + pub async fn pending_flashblock(&self) -> eyre::Result>> where OpEthApiError: FromEvmError, Rpc: RpcConvert, { - let Some(latest) = self.provider().latest_header()? else { - return Ok(None); - }; - - self.flashblock(latest.hash()).await + self.flashblock().await } } @@ -546,6 +537,7 @@ where >, NetworkT: RpcTypes, OpRpcConvert: RpcConvert, + <::Primitives as NodePrimitives>::Receipt: FlashblockCachedReceipt, OpEthApi>: FullEthApiServer, { diff --git a/rust/op-reth/crates/rpc/src/eth/pending_block.rs b/rust/op-reth/crates/rpc/src/eth/pending_block.rs index 587693e85734f..0c76edaca1123 100644 --- a/rust/op-reth/crates/rpc/src/eth/pending_block.rs +++ b/rust/op-reth/crates/rpc/src/eth/pending_block.rs @@ -1,9 +1,10 @@ //! Loads OP pending block for a RPC response. use crate::{OpEthApi, OpEthApiError}; -use alloy_consensus::BlockHeader; use alloy_eips::BlockNumberOrTag; +use alloy_primitives::B256; use reth_chain_state::BlockState; +use reth_optimism_flashblocks::PendingFlashBlock; use reth_rpc_eth_api::{ FromEvmError, RpcConvert, RpcNodeCore, RpcNodeCoreExt, helpers::{LoadPendingBlock, SpawnBlocking, pending_block::PendingEnvBuilder}, @@ -14,6 +15,13 @@ use reth_rpc_eth_types::{ }; use reth_storage_api::{BlockReaderIdExt, StateProviderBox, StateProviderFactory}; +#[inline] +const fn pending_state_history_lookup_hash( + pending_block: &PendingFlashBlock, +) -> B256 { + pending_block.canonical_anchor_hash +} + impl LoadPendingBlock for OpEthApi where N: RpcNodeCore, @@ -43,15 +51,15 @@ where let Ok(Some(pending_block)) = self.pending_flashblock().await else { return Ok(None); }; + let canonical_anchor_hash = pending_state_history_lookup_hash(&pending_block); + let state = BlockState::from(pending_block.pending); - let latest_historical = self + let anchor_historical = self .provider() - .history_by_block_hash(pending_block.block().parent_hash()) + .history_by_block_hash(canonical_anchor_hash) .map_err(Self::Error::from_eth_err)?; - let state = BlockState::from(pending_block); - - Ok(Some(Box::new(state.state_provider(latest_historical)) as StateProviderBox)) + Ok(Some(Box::new(state.state_provider(anchor_historical)) as StateProviderBox)) } /// Returns the locally built pending block @@ -59,7 +67,7 @@ where &self, ) -> Result>, Self::Error> { if let Ok(Some(pending)) = self.pending_flashblock().await { - return Ok(Some(pending.into_block_and_receipts())); + return Ok(Some(pending.pending.into_block_and_receipts())); } // See: @@ -77,3 +85,35 @@ where Ok(latest) } } + +#[cfg(test)] +mod tests { + use super::pending_state_history_lookup_hash; + use alloy_primitives::B256; + use reth_chain_state::ExecutedBlock; + use reth_optimism_flashblocks::PendingFlashBlock; + use reth_optimism_primitives::OpPrimitives; + use reth_rpc_eth_types::PendingBlock; + use std::time::Instant; + + #[test] + fn pending_state_prefers_canonical_anchor_over_parent_hash() { + let pending = PendingBlock::::with_executed_block( + Instant::now(), + ExecutedBlock::::default(), + ); + let parent_hash = pending.parent_hash(); + let canonical_anchor_hash = B256::from([0x11; 32]); + assert_ne!(canonical_anchor_hash, parent_hash); + + let pending_flashblock = PendingFlashBlock::::new( + pending, + canonical_anchor_hash, + 0, + B256::ZERO, + false, + ); + + assert_eq!(pending_state_history_lookup_hash(&pending_flashblock), canonical_anchor_hash); + } +} From 3a84e7bf195f61d5d24f6ee6dab4bee700772888 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Sun, 22 Feb 2026 13:06:57 -0500 Subject: [PATCH 011/201] ci: Switch auth used to publish kona prestates. (#19268) * ci: Switch auth used to publish kona prestates. * ci: Only publish kona prestates on push to develop, not scheduled builds. --- .circleci/continue/rust-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index 36a75f1ba5181..913f45628fe15 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -1275,6 +1275,7 @@ workflows: when: or: - equal: ["develop", <>] + - equal: ["webhook", << pipeline.trigger_source >>] # Only trigger on push to develop, not scheduled runs jobs: - kona-publish-prestate-artifacts: name: kona-publish-<> @@ -1283,4 +1284,4 @@ workflows: version: ["kona-client", "kona-client-int"] context: - circleci-repo-readonly-authenticated-github-token - - oplabs-gcr + - oplabs-network-optimism-io-bucket From ad286217ab7ce5e702aa3ef13b5c8092b05cbd78 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Mon, 23 Feb 2026 08:29:34 -0500 Subject: [PATCH 012/201] ci: fix kona-publish-prestates triggering on scheduled builds (#19270) The workflow used `or` logic, causing it to run on any scheduled pipeline with branch=develop or any webhook push to any branch. Change to `and` so it only fires on webhook pushes to develop. Co-authored-by: Claude Sonnet 4.6 --- .circleci/continue/rust-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index 913f45628fe15..8565ce0c5afc0 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -1273,7 +1273,7 @@ workflows: # Kona publish prestate artifacts - on push to develop kona-publish-prestates: when: - or: + and: - equal: ["develop", <>] - equal: ["webhook", << pipeline.trigger_source >>] # Only trigger on push to develop, not scheduled runs jobs: From 608e7086f5c6addc32128d963230344e7de80b6e Mon Sep 17 00:00:00 2001 From: "devin-ai-integration[bot]" <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Mon, 23 Feb 2026 12:18:59 -0500 Subject: [PATCH 013/201] test(contracts): improve DelayedWETH test coverage with fuzz tests (#19275) * test(contracts): improve DelayedWETH test coverage with fuzz tests - Convert unlock tests to fuzz: testFuzz_unlock_once_succeeds, testFuzz_unlock_twice_succeeds - Convert withdraw success tests to fuzz: testFuzz_withdraw_whileUnlocked_succeeds, testFuzz_withdraw_withdrawFromWhileUnlocked_succeeds - Convert hold tests to fuzz: testFuzz_hold_byOwner_succeeds, testFuzz_hold_withoutAmount_succeeds, testFuzz_hold_byNonOwner_fails - Convert recover tests to fuzz: testFuzz_recover_byNonOwner_fails, testFuzz_recover_moreThanBalance_succeeds - Add testFuzz_recover_partialAmount_succeeds for _wad < balance branch - Add testFuzz_hold_withoutAmount_byNonOwner_fails for hold(address) non-owner access control - Add DelayedWETH_Version_Test with SemverComp.parse validation * fix(test): rename hold test to satisfy 4-part naming convention --------- Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> --- .../test/dispute/DelayedWETH.t.sol | 184 +++++++++++++----- 1 file changed, 134 insertions(+), 50 deletions(-) diff --git a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol index 15ec4e7c2d739..5aef5671d3e8a 100644 --- a/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol +++ b/packages/contracts-bedrock/test/dispute/DelayedWETH.t.sol @@ -7,6 +7,7 @@ import { CommonTest } from "test/setup/CommonTest.sol"; // Libraries import { ForgeArtifacts, StorageSlot } from "scripts/libraries/ForgeArtifacts.sol"; import { Burn } from "src/libraries/Burn.sol"; +import { SemverComp } from "src/libraries/SemverComp.sol"; import "src/dispute/lib/Types.sol"; import "src/dispute/lib/Errors.sol"; @@ -116,56 +117,71 @@ contract DelayedWETH_Initialize_Test is DelayedWETH_TestInit { /// @notice Tests the `unlock` function of the `DelayedWETH` contract. contract DelayedWETH_Unlock_Test is DelayedWETH_TestInit { /// @notice Tests that unlocking once is successful. - function test_unlock_once_succeeds() public { - delayedWeth.unlock(alice, 1 ether); + /// @param _wad Amount of WETH to unlock. + function testFuzz_unlock_once_succeeds(uint256 _wad) public { + delayedWeth.unlock(alice, _wad); (uint256 amount, uint256 timestamp) = delayedWeth.withdrawals(address(this), alice); - assertEq(amount, 1 ether); + assertEq(amount, _wad); assertEq(timestamp, block.timestamp); } - /// @notice Tests that unlocking twice is successful and timestamp/amount is updated. - function test_unlock_twice_succeeds() public { + /// @notice Tests that unlocking twice is successful + /// and timestamp/amount is updated. + /// @param _wad1 First unlock amount. + /// @param _wad2 Second unlock amount. + /// @param _timeDelta Time between unlocks. + function testFuzz_unlock_twice_succeeds(uint256 _wad1, uint256 _wad2, uint256 _timeDelta) public { + // Bound to prevent overflow on addition. + _wad1 = bound(_wad1, 0, type(uint128).max); + _wad2 = bound(_wad2, 0, type(uint128).max); + _timeDelta = bound(_timeDelta, 1, type(uint128).max); + // Unlock once. uint256 ts = block.timestamp; - delayedWeth.unlock(alice, 1 ether); + delayedWeth.unlock(alice, _wad1); (uint256 amount1, uint256 timestamp1) = delayedWeth.withdrawals(address(this), alice); - assertEq(amount1, 1 ether); + assertEq(amount1, _wad1); assertEq(timestamp1, ts); // Go forward in time. - vm.warp(ts + 1); + vm.warp(ts + _timeDelta); // Unlock again works. - delayedWeth.unlock(alice, 1 ether); + delayedWeth.unlock(alice, _wad2); (uint256 amount2, uint256 timestamp2) = delayedWeth.withdrawals(address(this), alice); - assertEq(amount2, 2 ether); - assertEq(timestamp2, ts + 1); + assertEq(amount2, _wad1 + _wad2); + assertEq(timestamp2, ts + _timeDelta); } } /// @title DelayedWETH_Withdraw_Test /// @notice Tests the `withdraw` function of the `DelayedWETH` contract. contract DelayedWETH_Withdraw_Test is DelayedWETH_TestInit { - /// @notice Tests that withdrawing while unlocked and delay has passed is successful. - function test_withdraw_whileUnlocked_succeeds() public { + /// @notice Tests that withdrawing while unlocked and + /// delay has passed is successful. + /// @param _wad Amount of WETH to withdraw. + function testFuzz_withdraw_whileUnlocked_succeeds(uint256 _wad) public { + _wad = bound(_wad, 0, type(uint192).max); + // Deposit some WETH. + vm.deal(alice, _wad); vm.prank(alice); - delayedWeth.deposit{ value: 1 ether }(); + delayedWeth.deposit{ value: _wad }(); uint256 balance = address(alice).balance; // Unlock the withdrawal. vm.prank(alice); - delayedWeth.unlock(alice, 1 ether); + delayedWeth.unlock(alice, _wad); // Wait for the delay. vm.warp(block.timestamp + delayedWeth.delay() + 1); // Withdraw the WETH. vm.expectEmit(true, true, false, false); - emit Withdrawal(address(alice), 1 ether); + emit Withdrawal(address(alice), _wad); vm.prank(alice); - delayedWeth.withdraw(1 ether); - assertEq(address(alice).balance, balance + 1 ether); + delayedWeth.withdraw(_wad); + assertEq(address(alice).balance, balance + _wad); } /// @notice Tests that withdrawing when unlock was not called fails. @@ -248,26 +264,31 @@ contract DelayedWETH_Withdraw_Test is DelayedWETH_TestInit { delayedWeth.withdraw(1 ether); } - /// @notice Tests that withdrawing while unlocked and delay has passed is successful. - function test_withdraw_withdrawFromWhileUnlocked_succeeds() public { + /// @notice Tests that withdrawing with sub-account + /// while unlocked and delay has passed succeeds. + /// @param _wad Amount of WETH to withdraw. + function testFuzz_withdraw_withdrawFromWhileUnlocked_succeeds(uint256 _wad) public { + _wad = bound(_wad, 0, type(uint192).max); + // Deposit some WETH. + vm.deal(alice, _wad); vm.prank(alice); - delayedWeth.deposit{ value: 1 ether }(); + delayedWeth.deposit{ value: _wad }(); uint256 balance = address(alice).balance; // Unlock the withdrawal. vm.prank(alice); - delayedWeth.unlock(alice, 1 ether); + delayedWeth.unlock(alice, _wad); // Wait for the delay. vm.warp(block.timestamp + delayedWeth.delay() + 1); // Withdraw the WETH. vm.expectEmit(true, true, false, false); - emit Withdrawal(address(alice), 1 ether); + emit Withdrawal(address(alice), _wad); vm.prank(alice); - delayedWeth.withdraw(alice, 1 ether); - assertEq(address(alice).balance, balance + 1 ether); + delayedWeth.withdraw(alice, _wad); + assertEq(address(alice).balance, balance + _wad); } /// @notice Tests that withdrawing when unlock was not called fails. @@ -386,33 +407,67 @@ contract DelayedWETH_Recover_Test is DelayedWETH_TestInit { } /// @notice Tests that recovering WETH by non-owner fails. - function test_recover_byNonOwner_fails() public { - // Pretend to be a non-owner. - vm.prank(alice); + /// @param _sender Random address for access control. + function testFuzz_recover_byNonOwner_fails(address _sender) public { + vm.assume(_sender != proxyAdminOwner); // Recover fails. vm.expectRevert("DelayedWETH: not owner"); + vm.prank(_sender); delayedWeth.recover(1 ether); } - /// @notice Tests that recovering more than the balance recovers what it can. - function test_recover_moreThanBalance_succeeds() public { + /// @notice Tests that recovering more than the balance + /// recovers what it can. + /// @param _balance Contract balance. + /// @param _extra Extra amount above balance. + function testFuzz_recover_moreThanBalance_succeeds(uint256 _balance, uint256 _extra) public { + _balance = bound(_balance, 0, type(uint128).max); + _extra = bound(_extra, 1, type(uint128).max); + uint256 wad = _balance + _extra; + // Mock owner to return alice. vm.mockCall(address(proxyAdmin), abi.encodeCall(IProxyAdmin.owner, ()), abi.encode(alice)); - // Give the contract some WETH to recover. - vm.deal(address(delayedWeth), 0.5 ether); + // Give the contract some ETH to recover. + vm.deal(address(delayedWeth), _balance); // Record the initial balance. uint256 initialBalance = address(alice).balance; // Recover the WETH. vm.prank(alice); - delayedWeth.recover(1 ether); + delayedWeth.recover(wad); - // Verify the WETH was recovered. + // Verify capped at actual balance. assertEq(address(delayedWeth).balance, 0); - assertEq(address(alice).balance, initialBalance + 0.5 ether); + assertEq(address(alice).balance, initialBalance + _balance); + } + + /// @notice Tests that recovering less than the balance + /// sends the exact requested amount. + /// @param _balance Contract balance. + /// @param _wad Amount to recover (less than balance). + function testFuzz_recover_partialAmount_succeeds(uint256 _balance, uint256 _wad) public { + _balance = bound(_balance, 1, type(uint128).max); + _wad = bound(_wad, 0, _balance - 1); + + // Mock owner to return alice. + vm.mockCall(address(proxyAdmin), abi.encodeCall(IProxyAdmin.owner, ()), abi.encode(alice)); + + // Give the contract some ETH to recover. + vm.deal(address(delayedWeth), _balance); + + // Record the initial balance. + uint256 initialBalance = address(alice).balance; + + // Recover partial amount. + vm.prank(alice); + delayedWeth.recover(_wad); + + // Verify exact amount was recovered. + assertEq(address(delayedWeth).balance, _balance - _wad); + assertEq(address(alice).balance, initialBalance + _wad); } /// @notice Tests that recover reverts when recipient reverts. @@ -437,42 +492,48 @@ contract DelayedWETH_Recover_Test is DelayedWETH_TestInit { /// @notice Tests the `hold` function of the `DelayedWETH` contract. contract DelayedWETH_Hold_Test is DelayedWETH_TestInit { /// @notice Tests that holding WETH succeeds. - function test_hold_byOwner_succeeds() public { - uint256 amount = 1 ether; + /// @param _wad Amount of WETH to hold. + function testFuzz_hold_byOwner_succeeds(uint256 _wad) public { + _wad = bound(_wad, 0, type(uint192).max); // Pretend to be alice and deposit some WETH. + vm.deal(alice, _wad); vm.prank(alice); - delayedWeth.deposit{ value: amount }(); + delayedWeth.deposit{ value: _wad }(); // Get our balance before. uint256 initialBalance = delayedWeth.balanceOf(address(proxyAdminOwner)); // Hold some WETH. vm.expectEmit(true, true, true, false); - emit Approval(alice, address(proxyAdminOwner), amount); + emit Approval(alice, address(proxyAdminOwner), _wad); vm.prank(proxyAdminOwner); - delayedWeth.hold(alice, amount); + delayedWeth.hold(alice, _wad); // Get our balance after. uint256 finalBalance = delayedWeth.balanceOf(address(proxyAdminOwner)); // Verify the transfer. - assertEq(finalBalance, initialBalance + amount); + assertEq(finalBalance, initialBalance + _wad); } - function test_hold_withoutAmount_succeeds() public { - uint256 amount = 1 ether; + /// @notice Tests that holding all WETH without + /// specifying amount succeeds. + /// @param _wad Amount of WETH to deposit and hold. + function testFuzz_hold_withoutAmount_succeeds(uint256 _wad) public { + _wad = bound(_wad, 0, type(uint192).max); // Pretend to be alice and deposit some WETH. + vm.deal(alice, _wad); vm.prank(alice); - delayedWeth.deposit{ value: amount }(); + delayedWeth.deposit{ value: _wad }(); // Get our balance before. uint256 initialBalance = delayedWeth.balanceOf(address(proxyAdminOwner)); - // Hold some WETH. + // Hold all WETH. vm.expectEmit(true, true, true, false); - emit Approval(alice, address(proxyAdminOwner), amount); + emit Approval(alice, address(proxyAdminOwner), _wad); vm.prank(proxyAdminOwner); delayedWeth.hold(alice); // without amount parameter @@ -480,16 +541,39 @@ contract DelayedWETH_Hold_Test is DelayedWETH_TestInit { uint256 finalBalance = delayedWeth.balanceOf(address(proxyAdminOwner)); // Verify the transfer. - assertEq(finalBalance, initialBalance + amount); + assertEq(finalBalance, initialBalance + _wad); } /// @notice Tests that holding WETH by non-owner fails. - function test_hold_byNonOwner_fails() public { - // Pretend to be a non-owner. - vm.prank(alice); + /// @param _sender Random address for access control. + function testFuzz_hold_byNonOwner_fails(address _sender) public { + vm.assume(_sender != proxyAdminOwner); // Hold fails. vm.expectRevert("DelayedWETH: not owner"); + vm.prank(_sender); delayedWeth.hold(bob, 1 ether); } + + /// @notice Tests that holding all WETH by non-owner + /// using the single-arg overload fails. + /// @param _sender Random address for access control. + function testFuzz_hold_noAmountNonOwner_fails(address _sender) public { + vm.assume(_sender != proxyAdminOwner); + + // Hold fails. + vm.expectRevert("DelayedWETH: not owner"); + vm.prank(_sender); + delayedWeth.hold(bob); + } +} + +/// @title DelayedWETH_Version_Test +/// @notice Tests the `version` function of the +/// `DelayedWETH` contract. +contract DelayedWETH_Version_Test is DelayedWETH_TestInit { + /// @notice Tests that the version string is valid semver. + function test_version_validFormat_succeeds() external view { + SemverComp.parse(delayedWeth.version()); + } } From fbaf0b0d0264a898159a7bb6bbe4aaa760748dcd Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Mon, 23 Feb 2026 13:55:57 -0500 Subject: [PATCH 014/201] ci: Remove the cannon-kona-host job (#19279) kona-host is built in the kona-build-release job already. --- .circleci/continue/main.yml | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index a6b42c9f4cc21..ca7333692fc63 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -3194,15 +3194,6 @@ workflows: ignore-dirs: ./packages/contracts-bedrock/lib context: - circleci-repo-readonly-authenticated-github-token - # Acceptance test jobs (formerly in separate acceptance-tests workflow) - - rust-build-binary: &cannon-kona-host - name: cannon-kona-host - directory: rust - profile: "release" - binary: "kona-host" - save_cache: true - context: - - circleci-repo-readonly-authenticated-github-token - rust-build-binary: &kona-build-release name: kona-build-release directory: rust @@ -3246,7 +3237,6 @@ workflows: requires: - contracts-bedrock-build - cannon-prestate - - cannon-kona-host - rust-binaries-for-sysgo - go-binaries-for-sysgo # Generate flaky test report From c60cdb1cf997baa4e06b078c62e721f1679c1dfc Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Mon, 23 Feb 2026 19:02:18 -0500 Subject: [PATCH 015/201] ci: disable incremental compilation and bump rust cache version (#19278) * ci: disable incremental compilation and bump rust cache version - Set CARGO_INCREMENTAL=0 in rust-setup-env to disable incremental compilation for all Rust CI jobs, reducing cache size and improving reproducibility - Bump rust build cache version from 15 to 16 to invalidate stale caches - Use a YAML anchor in main.yml so the cache version only needs to be set once Co-Authored-By: Claude Sonnet 4.6 * chore(ci): remove stale ci jobs * ci: pin nightly toolchain and add weekly bump job - Add c-rust-nightly-version pipeline parameter (pinned to nightly-2025-11-01) to prevent surprise breakages from transitive deps incompatible with the latest nightly (e.g. shellexpand-3.1.1) - Update rust-install-toolchain to link a pinned nightly as "nightly" so existing `cargo +nightly` commands keep working - Replace all hardcoded `toolchain_version: nightly` with the parameter - Add rust-bump-nightly-pin job that opens a PR each week bumping the pin to the latest available nightly - Add scheduled-rust-nightly-bump workflow triggering on build_weekly Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: Claude Sonnet 4.6 --- .circleci/continue/main.yml | 10 +-- .circleci/continue/rust-ci.yml | 105 +++++++------------------------- .circleci/rust-nightly-bump.yml | 63 +++++++++++++++++++ rust/justfile | 17 ++++-- 4 files changed, 102 insertions(+), 93 deletions(-) create mode 100644 .circleci/rust-nightly-bump.yml diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index ca7333692fc63..44ca358d134e6 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -452,7 +452,7 @@ commands: version: description: "Version of the cache" type: string - default: "15" + default: &rust-cache-version "16" profile: description: "Profile to restore the cache for" type: string @@ -486,7 +486,7 @@ commands: version: description: "Version of the cache" type: string - default: "15" + default: *rust-cache-version profile: description: "Profile to save the cache for" type: string @@ -527,7 +527,7 @@ commands: version: description: "Version of the cache" type: string - default: "15" + default: *rust-cache-version profile: description: "Profile to restore the cache for" type: string @@ -563,7 +563,7 @@ commands: version: description: "Version of the cache" type: string - default: "15" + default: *rust-cache-version profile: description: "Profile to build the binary with" type: string @@ -659,7 +659,7 @@ jobs: version: description: "Version of the cache" type: string - default: "15" + default: *rust-cache-version profile: description: "Profile to build the binary with" type: string diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index 8565ce0c5afc0..ddb3bca44a241 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -61,7 +61,10 @@ commands: - run: name: Install Rust toolchain (<< parameters.channel >>) command: | - rustup default << parameters.toolchain_version >> + TOOLCHAIN="<< parameters.toolchain_version >>" + + rustup toolchain install "$TOOLCHAIN" + rustup default "$TOOLCHAIN" if [ -n "<< parameters.components >>" ]; then rustup component add << parameters.components >> @@ -79,6 +82,7 @@ commands: echo "export CARGO_HOME=${MISE_CARGO_HOME}" >> "$BASH_ENV" echo "export RUSTUP_HOME=${MISE_RUSTUP_HOME}" >> "$BASH_ENV" echo "source ${MISE_CARGO_HOME}/env" >> "$BASH_ENV" + echo "export CARGO_INCREMENTAL=0" >> "$BASH_ENV" - run: name: Configure Rust binary paths (sysgo) command: | @@ -117,7 +121,7 @@ commands: version: description: "Version of the cache" type: string - default: "15" + default: "16" profile: description: "Profile to restore the cache for" type: string @@ -324,7 +328,7 @@ jobs: command: description: "Format check command to run" type: string - default: "cargo +nightly fmt --all --check" + default: "just fmt-check" docker: - image: <> resource_class: medium @@ -334,10 +338,10 @@ jobs: - rust-prepare-and-restore-cache: &fmt-cache-args directory: <> prefix: <>-fmt - - rust-install-toolchain: - channel: nightly - toolchain_version: nightly - components: rustfmt + - run: + name: Install nightly toolchain + working_directory: <> + command: just install-nightly - run: name: Check formatting working_directory: <> @@ -514,11 +518,7 @@ jobs: command: description: "Doc build command to run" type: string - default: "cargo +nightly doc --workspace --all-features --no-deps --document-private-items" - rustdocflags: - description: "RUSTDOCFLAGS environment variable" - type: string - default: "--cfg docsrs -D warnings --show-type-layout --generate-link-to-definition -Zunstable-options" + default: "just lint-docs" docker: - image: <> resource_class: xlarge @@ -529,15 +529,14 @@ jobs: directory: <> prefix: <>-docs features: "all" - - rust-install-toolchain: - channel: nightly - toolchain_version: nightly + - run: + name: Install nightly toolchain + working_directory: <> + command: just install-nightly - run: name: Build documentation working_directory: <> no_output_timeout: 30m - environment: - RUSTDOCFLAGS: <> command: <> - rust-save-build-cache: *docs-cache-args @@ -633,9 +632,10 @@ jobs: directory: <> prefix: <>-udeps profile: "release" - - rust-install-toolchain: - channel: nightly - toolchain_version: nightly + - run: + name: Install nightly toolchain + working_directory: <> + command: just install-nightly - install-cargo-binstall - run: name: Install cargo-udeps @@ -756,53 +756,6 @@ jobs: cargo run --bin op-reth --features "dev" --manifest-path rust/op-reth/bin/Cargo.toml -- test-vectors compact --read - rust-save-build-cache: *op-reth-compact-cache - # OP-Reth Windows cross-compile check - op-reth-windows-check: - docker: - - image: <> - resource_class: xlarge - steps: - - utils/checkout-with-mise: - checkout-method: blobless - - rust-prepare-and-restore-cache: &op-reth-windows-cache - directory: rust - prefix: op-reth-windows - profile: debug - - run: - name: Install mingw-w64 - command: sudo apt-get update && sudo apt-get install -y mingw-w64 - - run: - name: Check OP-Reth Windows build - working_directory: rust - no_output_timeout: 40m - command: just --justfile op-reth/justfile check-windows - - rust-save-build-cache: *op-reth-windows-cache - - # -------------------------------------------------------------------------- - # Op-Alloy crate-specific jobs - # -------------------------------------------------------------------------- - # Op-Alloy cfg check - op-alloy-cfg-check: - docker: - - image: <> - resource_class: xlarge - steps: - - utils/checkout-with-mise: - checkout-method: blobless - - rust-prepare-and-restore-cache: &op-alloy-cfg-check-cache - directory: rust - prefix: op-alloy-cfg-check - - rust-install-toolchain: - channel: nightly - toolchain_version: nightly - - run: - name: Run cfg check - working_directory: rust - no_output_timeout: 40m - command: | - just --justfile op-alloy/Justfile check - - rust-save-build-cache: *op-alloy-cfg-check-cache - # -------------------------------------------------------------------------- # Kona crate-specific jobs # -------------------------------------------------------------------------- @@ -1200,9 +1153,6 @@ workflows: - op-reth-compact-codec: context: *rust-ci-context - - op-reth-windows-check: - context: *rust-ci-context - - rust-ci-cargo-tests: name: op-reth-integration-tests directory: rust @@ -1218,12 +1168,6 @@ workflows: cache_profile: debug context: *rust-ci-context - # ----------------------------------------------------------------------- - # Op-Alloy crate-specific jobs - # ----------------------------------------------------------------------- - - op-alloy-cfg-check: - context: *rust-ci-context - # ----------------------------------------------------------------------- # Kona crate-specific jobs (lint, FPVM builds, benches, coverage) # ----------------------------------------------------------------------- @@ -1262,14 +1206,6 @@ workflows: context: - circleci-repo-readonly-authenticated-github-token - scheduled-kona-sync: - when: - equal: [build_weekly, <>] - jobs: - - kona-update-monorepo: - context: - - circleci-repo-readonly-authenticated-github-token - # Kona publish prestate artifacts - on push to develop kona-publish-prestates: when: @@ -1285,3 +1221,4 @@ workflows: context: - circleci-repo-readonly-authenticated-github-token - oplabs-network-optimism-io-bucket + diff --git a/.circleci/rust-nightly-bump.yml b/.circleci/rust-nightly-bump.yml new file mode 100644 index 0000000000000..6d18860dd704c --- /dev/null +++ b/.circleci/rust-nightly-bump.yml @@ -0,0 +1,63 @@ +version: 2.1 + +# Scheduled workflow to bump the pinned nightly Rust toolchain version. +# Runs daily and opens a PR if the pin in rust/justfile is out of date. + +jobs: + bump-nightly: + docker: + - image: cimg/base:2024.01 + steps: + - checkout + + - run: + name: Compute nightly versions + command: | + TODAY=$(date -u +%Y-%m-%d) + LATEST="nightly-${TODAY}" + CURRENT=$(grep -oE 'nightly-[0-9]{4}-[0-9]{2}-[0-9]{2}' rust/justfile | head -1) + + echo "Latest nightly: $LATEST" + echo "Current pin: $CURRENT" + + echo "export LATEST=$LATEST" >> "$BASH_ENV" + echo "export CURRENT=$CURRENT" >> "$BASH_ENV" + + - run: + name: Open PR if pin is outdated + command: | + if [ "$LATEST" = "$CURRENT" ]; then + echo "Pin is already up to date ($CURRENT). Nothing to do." + exit 0 + fi + + BRANCH="ci/bump-rust-nightly-${LATEST}" + + # Authenticate git push via GITHUB_TOKEN_GOVERNANCE + git remote set-url origin "https://x-access-token:${GITHUB_TOKEN_GOVERNANCE}@github.com/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}.git" + git checkout -b "$BRANCH" + + sed -i "s/NIGHTLY := \"${CURRENT}\"/NIGHTLY := \"${LATEST}\"/" rust/justfile + git add rust/justfile + git commit -m "ci: bump rust nightly pin to ${LATEST}" + git push origin "$BRANCH" + + GH_TOKEN="$GITHUB_TOKEN_GOVERNANCE" gh pr create \ + --title "ci: bump rust nightly pin to ${LATEST}" \ + --body "Automated daily bump of the pinned nightly Rust toolchain from \`${CURRENT}\` to \`${LATEST}\`. CI on this PR will validate the new toolchain compiles cleanly." \ + --base main \ + --label "ci" || echo "PR may already exist for this branch." + +workflows: + scheduled-rust-nightly-bump: + triggers: + - schedule: + cron: "0 9 * * *" # 09:00 UTC daily + filters: + branches: + only: + - main + jobs: + - bump-nightly: + context: + - circleci-repo-optimism diff --git a/rust/justfile b/rust/justfile index 4b109a8643f59..d3a1c221223ec 100644 --- a/rust/justfile +++ b/rust/justfile @@ -1,5 +1,7 @@ set positional-arguments +NIGHTLY := "nightly-2026-02-20" + # Aliases alias t := test alias l := lint @@ -10,6 +12,12 @@ alias b := build default: @just --list +############################### Toolchain ############################ + +# Install the pinned nightly toolchain +install-nightly: + rustup toolchain install {{NIGHTLY}} --component rustfmt + ############################### Build ############################### # Build the workspace @@ -56,11 +64,11 @@ lint: fmt-check lint-clippy lint-docs # Check formatting (requires nightly) fmt-check: - cargo +nightly fmt --all -- --check + cargo +{{NIGHTLY}} fmt --all -- --check # Fix formatting (requires nightly) fmt-fix: - cargo +nightly fmt --all + cargo +{{NIGHTLY}} fmt --all # Run clippy lint-clippy: @@ -68,7 +76,8 @@ lint-clippy: # Lint Rust documentation lint-docs: - RUSTDOCFLAGS="-D warnings" cargo doc --workspace --no-deps --document-private-items + RUSTDOCFLAGS="--cfg docsrs -D warnings --show-type-layout --generate-link-to-definition -Zunstable-options" \ + cargo +{{NIGHTLY}} doc --workspace --all-features --no-deps --document-private-items ############################ no_std ################################# @@ -127,7 +136,7 @@ bench: # Check for unused dependencies (requires nightly + cargo-udeps) check-udeps: - cargo +nightly udeps --release --workspace --all-features --all-targets + cargo +{{NIGHTLY}} udeps --release --workspace --all-features --all-targets # Run cargo hack for feature powerset checking # shuffle: "true" to shuffle package order before partitioning (spreads heavy/light crates more evenly) From e5850e52c7b861ec38676d0b4ab97954eafca363 Mon Sep 17 00:00:00 2001 From: Inphi Date: Mon, 23 Feb 2026 19:41:08 -0500 Subject: [PATCH 016/201] proofs: Port TestInteropFaultProofs_UnsafeProposal test to devstack (#19254) * proofs: Port TestInteropFaultProofs_UnsafeProposal test to devstack * Fix unsafe proposal test to deterministically order safe heads --- .../preinterop/interop_fault_proofs_test.go | 6 ++ .../superfaultproofs/superfaultproofs.go | 100 +++++++++++++++++- 2 files changed, 104 insertions(+), 2 deletions(-) diff --git a/op-acceptance-tests/tests/isthmus/preinterop/interop_fault_proofs_test.go b/op-acceptance-tests/tests/isthmus/preinterop/interop_fault_proofs_test.go index 7795ec5f69843..5d0630b4a4f1c 100644 --- a/op-acceptance-tests/tests/isthmus/preinterop/interop_fault_proofs_test.go +++ b/op-acceptance-tests/tests/isthmus/preinterop/interop_fault_proofs_test.go @@ -19,3 +19,9 @@ func TestPreinteropFaultProofs_TraceExtensionActivation(gt *testing.T) { sys := presets.NewSimpleInterop(t) sfp.RunTraceExtensionActivationTest(t, sys) } + +func TestPreinteropFaultProofs_UnsafeProposal(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewSimpleInterop(t) + sfp.RunUnsafeProposalTest(t, sys) +} diff --git a/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go b/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go index 2a88c105873ef..c84d099ee1796 100644 --- a/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go +++ b/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go @@ -39,6 +39,7 @@ type chain struct { Cfg *rollup.Config Rollup apis.RollupClient EL *dsl.L2ELNode + CLNode *dsl.L2CLNode Batcher *dsl.L2Batcher } @@ -56,8 +57,8 @@ type transitionTest struct { // orderedChains returns the two interop chains sorted by chain ID. func orderedChains(sys *presets.SimpleInterop) []*chain { chains := []*chain{ - {ID: sys.L2ChainA.ChainID(), Cfg: sys.L2ChainA.Escape().RollupConfig(), Rollup: sys.L2CLA.Escape().RollupAPI(), EL: sys.L2ELA, Batcher: sys.L2BatcherA}, - {ID: sys.L2ChainB.ChainID(), Cfg: sys.L2ChainB.Escape().RollupConfig(), Rollup: sys.L2CLB.Escape().RollupAPI(), EL: sys.L2ELB, Batcher: sys.L2BatcherB}, + {ID: sys.L2ChainA.ChainID(), Cfg: sys.L2ChainA.Escape().RollupConfig(), Rollup: sys.L2CLA.Escape().RollupAPI(), EL: sys.L2ELA, CLNode: sys.L2CLA, Batcher: sys.L2BatcherA}, + {ID: sys.L2ChainB.ChainID(), Cfg: sys.L2ChainB.Escape().RollupConfig(), Rollup: sys.L2CLB.Escape().RollupAPI(), EL: sys.L2ELB, CLNode: sys.L2CLB, Batcher: sys.L2BatcherB}, } slices.SortFunc(chains, func(a, b *chain) int { return a.ID.Cmp(b.ID) }) return chains @@ -448,6 +449,101 @@ func RunTraceExtensionActivationTest(t devtest.T, sys *presets.SimpleInterop) { } } +// RunUnsafeProposalTest verifies that proposing an unsafe block (one without +// batch data on L1) is correctly identified as invalid. +func RunUnsafeProposalTest(t devtest.T, sys *presets.SimpleInterop) { + t.Require().NotNil(sys.SuperRoots, "supernode is required for this test") + + chains := orderedChains(sys) + t.Require().Len(chains, 2, "expected exactly 2 interop chains") + + // Stop chains[0]'s batcher first so its safe head stalls while chains[1]'s + // batcher continues to advance. This deterministically guarantees chains[0] + // has the lowest safe head — which is required because: + // 1. Step 0 in the super root trace transitions chains[0]. We need step 0 + // to produce InvalidTransition (no batch data for chains[0]'s block). + // 2. The agreed prestate at (endTimestamp - 1) must be verified for ALL + // chains. Using chains[0]'s stalled safe head as the anchor ensures + // that timestamp maps to a block at or below every chain's safe head. + chains[0].Batcher.Stop() + defer chains[0].Batcher.Start() + awaitSafeHeadsStalled(t, chains[0].CLNode) + + stalledStatus, err := chains[0].Rollup.SyncStatus(t.Ctx()) + t.Require().NoError(err) + stalledSafeHead := stalledStatus.SafeL2.Number + + // Wait for chains[1]'s safe head to surpass chains[0]'s stalled safe head. + // chains[1]'s batcher is still running, so this is guaranteed to happen. + // We need strictly greater so that chains[1]'s block at endTimestamp + // (= TimestampForBlock(stalledSafeHead + 1)) is safe. + t.Require().Eventually(func() bool { + status1, err := chains[1].Rollup.SyncStatus(t.Ctx()) + return err == nil && status1.SafeL2.Number > stalledSafeHead + }, 2*time.Minute, 2*time.Second, "chains[1] safe head should advance past chains[0]'s stalled safe head") + + chains[1].Batcher.Stop() + defer chains[1].Batcher.Start() + awaitSafeHeadsStalled(t, chains[1].CLNode) + + endTimestamp := chains[0].Cfg.TimestampForBlock(stalledSafeHead + 1) + agreedTimestamp := endTimestamp - 1 + + // Ensure chains[0] has produced the target block as unsafe. + target, err := chains[0].Cfg.TargetBlockNumber(endTimestamp) + t.Require().NoError(err) + chains[0].EL.Reached(eth.Unsafe, target, 60) + + sys.SuperRoots.AwaitValidatedTimestamp(agreedTimestamp) + resp := sys.SuperRoots.SuperRootAtTimestamp(agreedTimestamp) + l1Head := resp.CurrentL1 + + startTimestamp := agreedTimestamp + agreedSuperRoot := superRootAtTimestamp(t, chains, agreedTimestamp) + agreedClaim := agreedSuperRoot.Marshal() + + // Disputed claim: transition state with step 1 but no optimistic blocks. + // This claims a transition happened, but since chains[0]'s block at + // endTimestamp is only unsafe (no batch data on L1), the correct answer + // is InvalidTransition. + disputedClaim := marshalTransition(agreedSuperRoot, 1) + + tests := []*transitionTest{ + { + Name: "ProposedUnsafeBlock-NotValid", + AgreedClaim: agreedClaim, + DisputedClaim: disputedClaim, + DisputedTraceIndex: 0, + L1Head: l1Head, + ClaimTimestamp: endTimestamp, + ExpectValid: false, + }, + { + Name: "ProposedUnsafeBlock-ShouldBeInvalid", + AgreedClaim: agreedClaim, + DisputedClaim: super.InvalidTransition, + DisputedTraceIndex: 0, + L1Head: l1Head, + ClaimTimestamp: endTimestamp, + ExpectValid: true, + }, + } + + challengerCfg := sys.L2ChainA.Escape().L2Challengers()[0].Config() + gameDepth := sys.DisputeGameFactory().GameImpl(gameTypes.SuperCannonKonaGameType).SplitDepth() + + for _, test := range tests { + t.Run(test.Name+"-fpp", func(t devtest.T) { + runKonaInteropProgram(t, challengerCfg.CannonKona, test.L1Head.Hash, + test.AgreedClaim, crypto.Keccak256Hash(test.DisputedClaim), + test.ClaimTimestamp, test.ExpectValid) + }) + t.Run(test.Name+"-challenger", func(t devtest.T) { + runChallengerProviderTest(t, sys.SuperRoots.QueryAPI(), gameDepth, startTimestamp, test.ClaimTimestamp, test) + }) + } +} + // RunSuperFaultProofTest encapsulates the basic super fault proof test flow. func RunSuperFaultProofTest(t devtest.T, sys *presets.SimpleInterop) { t.Require().NotNil(sys.SuperRoots, "supernode is required for this test") From 606793132710967158a8888d26702241f794fca1 Mon Sep 17 00:00:00 2001 From: Ashitaka <96790496+ashitakah@users.noreply.github.com> Date: Tue, 24 Feb 2026 15:06:10 +0100 Subject: [PATCH 017/201] sc-feat: policy engine staking ordering (#19192) * feat: policy engine staking * feat: slots tests * fix: pre-pr * fix: linter * fix: linter * feat: inmutable contract * fix: check link to self * fix: natspec * fix: make * feat: improving code * feat: improving code * fix: lint * fix: comments * fix: comments * feat: improving tests * fix: linter * fix: linter * style: formatting * style: formatting * style: formatting * feat polish improvments and comments * feat: polish and comments * feat: sender and fuzz * fix: bugs and sender * fix: natspec * feat: policy engine refactor (#867) * feat: add V2 policy engine implementation * chore: undo foundry.toml modification * fix: stake function available if not allowlisted * refactor: rename PolicyEngineStakingV2 -> PolicyEngineStaking * refactor: remove stake function, add allowlist check when same beneficiary * refactor: make peData functions arg uint128 * chore: add comments * test: add fuzz testing * test: max approval on setup * refactor: remove helper function * chore: make link not puasable * feat: rename functions, add token to constructor * feat: add deployment script * fix: wrong foundry.toml * fix: pr (#868) * chore: make owner address public * refactor: rename data->stakingData * docs: natspec * refactor: improve checks * fix: pre-pr * fix: foundry.toml * fix: comments and link * chore: bump solidity version * feat: add named members in mapping * fix: revert contract creation on zero address * refactor: reduce parameters size * chore: undo unnecessary casting * fix: revert on same beneficiary linking * perf: optimize stake() sstores * feat: add transferOwnership * refactor: update stakedAmount after decrease * chore: make change beneficiary pausable * feat: unlink after allowance revoked * refactor: remove linking concept and use beneficiary instead * docs: improve natspec * test: stake() after being revoked reverts * feat: add ISemver * fix: conflicts * refactor: improve var naming * test: transferOwnership * refactor; vars naming * chore: improve comments * chore downgrade pragma * fix: pre-pr * fix: wrong foundry.toml * chore: improve comments * fix: ci failing * fix: pre-pr * fix: self allowlist * feat: disable self-allowlist * docs: improve natspec --------- Co-authored-by: Chiin <77933451+0xChin@users.noreply.github.com> Co-authored-by: 0xOneTony <112496816+0xOneTony@users.noreply.github.com> Co-authored-by: OneTony --- packages/contracts-bedrock/foundry.toml | 2 +- .../staking/IPolicyEngineStaking.sol | 124 ++ .../deploy/DeployPolicyEngineStaking.s.sol | 26 + .../snapshots/abi/PolicyEngineStaking.json | 463 ++++++++ .../snapshots/semver-lock.json | 4 + .../storageLayout/PolicyEngineStaking.json | 37 + .../periphery/staking/PolicyEngineStaking.sol | 330 ++++++ .../staking/PolicyEngineStaking.t.sol | 1011 +++++++++++++++++ 8 files changed, 1996 insertions(+), 1 deletion(-) create mode 100644 packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol create mode 100644 packages/contracts-bedrock/scripts/deploy/DeployPolicyEngineStaking.s.sol create mode 100644 packages/contracts-bedrock/snapshots/abi/PolicyEngineStaking.json create mode 100644 packages/contracts-bedrock/snapshots/storageLayout/PolicyEngineStaking.json create mode 100644 packages/contracts-bedrock/src/periphery/staking/PolicyEngineStaking.sol create mode 100644 packages/contracts-bedrock/test/periphery/staking/PolicyEngineStaking.t.sol diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index 86de308acd713..f35e363b3bc6e 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -177,4 +177,4 @@ compilation_restrictions = [ src = 'test/kontrol/proofs' out = 'kout-proofs' test = 'test/kontrol/proofs' -script = 'test/kontrol/proofs' +script = 'test/kontrol/proofs' \ No newline at end of file diff --git a/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol b/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol new file mode 100644 index 0000000000000..2fdf901e89501 --- /dev/null +++ b/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; + +/// @title IPolicyEngineStaking +/// @notice Interface for the PolicyEngineStaking contract. +interface IPolicyEngineStaking is ISemver { + /// @notice Emitted when a user stakes OP tokens. + event Staked(address indexed account, uint128 amount); + + /// @notice Emitted when a user unstakes OP tokens. + event Unstaked(address indexed account, uint128 amount); + + /// @notice Emitted when a staker sets their beneficiary. + event BeneficiarySet(address indexed staker, address indexed beneficiary); + + /// @notice Emitted when a staker's beneficiary is removed (on change or full unstake). + event BeneficiaryRemoved(address indexed staker, address indexed previousBeneficiary); + + /// @notice Emitted when effective stake changes for an account. + event EffectiveStakeChanged(address indexed account, uint256 newEffectiveStake); + + /// @notice Emitted when a beneficiary updates their allowlist. + event BeneficiaryAllowlistUpdated(address indexed beneficiary, address indexed staker, bool allowed); + + /// @notice Emitted when staking is paused. + event Paused(); + + /// @notice Emitted when the staking is unpaused. + event Unpaused(); + + /// @notice Emitted when ownership is transferred. + event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); + + /// @notice Thrown when the caller is not the owner. + error PolicyEngineStaking_OnlyOwner(); + + /// @notice Thrown when the staking is paused. + error PolicyEngineStaking_Paused(); + + /// @notice Thrown when the amount is zero. + error PolicyEngineStaking_ZeroAmount(); + + /// @notice Thrown when the beneficiary address is zero. + error PolicyEngineStaking_ZeroBeneficiary(); + + /// @notice Thrown when the staker is not allowed to set the beneficiary. + error PolicyEngineStaking_NotAllowedToSetBeneficiary(); + + /// @notice Thrown when trying to operate with no stake. + error PolicyEngineStaking_NoStake(); + + /// @notice Thrown when trying to unstake more than the staked amount. + error PolicyEngineStaking_InsufficientStake(); + + /// @notice Thrown when a zero address is provided where it is not allowed. + error PolicyEngineStaking_ZeroAddress(); + + /// @notice Thrown when trying to change beneficiary to the current beneficiary. + error PolicyEngineStaking_SameBeneficiary(); + + /// @notice Thrown when trying to allowlist/disallow yourself. + error PolicyEngineStaking_SelfAllowlist(); + + /// @notice Returns the contract owner. + function owner() external view returns (address); + + /// @notice Transfers ownership of the contract to a new account. Only callable by owner. + /// @param _newOwner The address of the new owner. + function transferOwnership(address _newOwner) external; + + /// @notice Returns whether the contract is paused. + function paused() external view returns (bool); + + /// @notice Base storage slot for PE data mapping. Policy Engine reads from + /// keccak256(abi.encode(account, PE_DATA_SLOT)). + function PE_DATA_SLOT() external view returns (bytes32); + + /// @notice Returns Policy Engine data for an account. + function peData(address _account) external view returns (uint128 effectiveStake_, uint128 lastUpdate_); + + /// @notice Returns allowlist entry for a beneficiary-staker pair. + function allowlist(address _beneficiary, address _staker) external view returns (bool allowed_); + + /// @notice Returns staking data for an account. + function stakingData(address _account) external view returns (uint128 stakedAmount_, address beneficiary_); + + /// @notice Returns the ERC20 token used for staking. + function stakingToken() external view returns (IERC20); + + /// @notice Pauses the contract. Only callable by owner. + function pause() external; + + /// @notice Unpauses the contract. Only callable by owner. + function unpause() external; + + /// @notice Stakes tokens and sets beneficiary atomically. + /// @param _amount The amount of tokens to stake. + /// @param _beneficiary Address that receives ordering power. Use msg.sender for self-attribution. + function stake(uint128 _amount, address _beneficiary) external; + + /// @notice Changes the beneficiary for existing stake. + /// @param _beneficiary New beneficiary address. + function changeBeneficiary(address _beneficiary) external; + + /// @notice Unstakes OP tokens. Supports partial and full unstake. + /// @param _amount The amount of OP tokens to unstake. + function unstake(uint128 _amount) external; + + /// @notice Sets whether a staker can set the caller as beneficiary. When disallowing, + /// if the staker's current beneficiary is the caller, their stake attribution is + /// moved back to the staker (beneficiary reset to self). + /// + /// @param _staker The staker address. + /// @param _allowed Whether the staker is allowed to set the caller as beneficiary. + function setAllowedStaker(address _staker, bool _allowed) external; + + /// @notice Batch sets allowlist for multiple stakers. + /// @param _stakers Array of staker addresses. + /// @param _allowed Whether the stakers are allowed to set the caller as beneficiary. + function setAllowedStakers(address[] calldata _stakers, bool _allowed) external; +} diff --git a/packages/contracts-bedrock/scripts/deploy/DeployPolicyEngineStaking.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployPolicyEngineStaking.s.sol new file mode 100644 index 0000000000000..ed23671040f38 --- /dev/null +++ b/packages/contracts-bedrock/scripts/deploy/DeployPolicyEngineStaking.s.sol @@ -0,0 +1,26 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.25; + +import { Script } from "forge-std/Script.sol"; +import { console2 as console } from "forge-std/console2.sol"; + +import { PolicyEngineStaking } from "src/periphery/staking/PolicyEngineStaking.sol"; + +/// @title DeployPolicyEngineStaking +/// @notice Script used to deploy the PolicyEngineStaking contract. +contract DeployPolicyEngineStaking is Script { + /// @notice Deploys the PolicyEngineStaking contract. + /// @param _owner The address that can pause and unpause staking. + /// @param _token The ERC20 token used for staking. + function run(address _owner, address _token) public returns (PolicyEngineStaking) { + require(_owner != address(0), "DeployPolicyEngineStaking: owner cannot be zero address"); + require(_token != address(0), "DeployPolicyEngineStaking: token cannot be zero address"); + + vm.broadcast(); + PolicyEngineStaking staking = new PolicyEngineStaking(_owner, _token); + + console.log("PolicyEngineStaking deployed at:", address(staking)); + + return staking; + } +} diff --git a/packages/contracts-bedrock/snapshots/abi/PolicyEngineStaking.json b/packages/contracts-bedrock/snapshots/abi/PolicyEngineStaking.json new file mode 100644 index 0000000000000..826dad9656439 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/PolicyEngineStaking.json @@ -0,0 +1,463 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "_ownerAddr", + "type": "address" + }, + { + "internalType": "address", + "name": "_token", + "type": "address" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "PE_DATA_SLOT", + "outputs": [ + { + "internalType": "bytes32", + "name": "", + "type": "bytes32" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "beneficiary", + "type": "address" + }, + { + "internalType": "address", + "name": "staker", + "type": "address" + } + ], + "name": "allowlist", + "outputs": [ + { + "internalType": "bool", + "name": "allowed", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_beneficiary", + "type": "address" + } + ], + "name": "changeBeneficiary", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "owner", + "outputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "pause", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "paused", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "peData", + "outputs": [ + { + "internalType": "uint128", + "name": "effectiveStake", + "type": "uint128" + }, + { + "internalType": "uint128", + "name": "lastUpdate", + "type": "uint128" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_staker", + "type": "address" + }, + { + "internalType": "bool", + "name": "_allowed", + "type": "bool" + } + ], + "name": "setAllowedStaker", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address[]", + "name": "_stakers", + "type": "address[]" + }, + { + "internalType": "bool", + "name": "_allowed", + "type": "bool" + } + ], + "name": "setAllowedStakers", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint128", + "name": "_amount", + "type": "uint128" + }, + { + "internalType": "address", + "name": "_beneficiary", + "type": "address" + } + ], + "name": "stake", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "account", + "type": "address" + } + ], + "name": "stakingData", + "outputs": [ + { + "internalType": "uint128", + "name": "stakedAmount", + "type": "uint128" + }, + { + "internalType": "address", + "name": "beneficiary", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "stakingToken", + "outputs": [ + { + "internalType": "contract IERC20", + "name": "", + "type": "address" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_newOwner", + "type": "address" + } + ], + "name": "transferOwnership", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "unpause", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "uint128", + "name": "_amount", + "type": "uint128" + } + ], + "name": "unstake", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "beneficiary", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "staker", + "type": "address" + }, + { + "indexed": false, + "internalType": "bool", + "name": "allowed", + "type": "bool" + } + ], + "name": "BeneficiaryAllowlistUpdated", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "staker", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "previousBeneficiary", + "type": "address" + } + ], + "name": "BeneficiaryRemoved", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "staker", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "beneficiary", + "type": "address" + } + ], + "name": "BeneficiarySet", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint256", + "name": "newEffectiveStake", + "type": "uint256" + } + ], + "name": "EffectiveStakeChanged", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "previousOwner", + "type": "address" + }, + { + "indexed": true, + "internalType": "address", + "name": "newOwner", + "type": "address" + } + ], + "name": "OwnershipTransferred", + "type": "event" + }, + { + "anonymous": false, + "inputs": [], + "name": "Paused", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "amount", + "type": "uint128" + } + ], + "name": "Staked", + "type": "event" + }, + { + "anonymous": false, + "inputs": [], + "name": "Unpaused", + "type": "event" + }, + { + "anonymous": false, + "inputs": [ + { + "indexed": true, + "internalType": "address", + "name": "account", + "type": "address" + }, + { + "indexed": false, + "internalType": "uint128", + "name": "amount", + "type": "uint128" + } + ], + "name": "Unstaked", + "type": "event" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_InsufficientStake", + "type": "error" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_NoStake", + "type": "error" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_NotAllowedToSetBeneficiary", + "type": "error" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_OnlyOwner", + "type": "error" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_Paused", + "type": "error" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_SameBeneficiary", + "type": "error" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_SelfAllowlist", + "type": "error" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_ZeroAddress", + "type": "error" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_ZeroAmount", + "type": "error" + }, + { + "inputs": [], + "name": "PolicyEngineStaking_ZeroBeneficiary", + "type": "error" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index 476ad492fac04..fc70d58213363 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -235,6 +235,10 @@ "initCodeHash": "0x3a82e248129d19764bb975bb79b48a982f077f33bb508480bf8d2ec1c0c9810d", "sourceCodeHash": "0x955bd0c9b47e43219865e4e92abf28d916c96de20cbdf2f94c8ab14d02083759" }, + "src/periphery/staking/PolicyEngineStaking.sol:PolicyEngineStaking": { + "initCodeHash": "0xc0c04b0dddbf7831bf5abfccc6a569d92f9b7ab0ec53278f6d1cf7113041d59d", + "sourceCodeHash": "0x998ddc9f24e3c85b1d588c263838261442edaad1dde5424fd55c2d4e1243592a" + }, "src/safe/DeputyPauseModule.sol:DeputyPauseModule": { "initCodeHash": "0x18422b48c4901ed6fd9338d76d3c5aecfff9a7add34b05c6e21c23d0011ed6bf", "sourceCodeHash": "0xd15f4bb43e81a10317902cd8e27394581a59df2656b130727eb67543c985c72e" diff --git a/packages/contracts-bedrock/snapshots/storageLayout/PolicyEngineStaking.json b/packages/contracts-bedrock/snapshots/storageLayout/PolicyEngineStaking.json new file mode 100644 index 0000000000000..186637cbf9d90 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/PolicyEngineStaking.json @@ -0,0 +1,37 @@ +[ + { + "bytes": "32", + "label": "peData", + "offset": 0, + "slot": "0", + "type": "mapping(address => struct PolicyEngineStaking.PEData)" + }, + { + "bytes": "32", + "label": "allowlist", + "offset": 0, + "slot": "1", + "type": "mapping(address => mapping(address => bool))" + }, + { + "bytes": "32", + "label": "stakingData", + "offset": 0, + "slot": "2", + "type": "mapping(address => struct PolicyEngineStaking.StakedData)" + }, + { + "bytes": "1", + "label": "paused", + "offset": 0, + "slot": "3", + "type": "bool" + }, + { + "bytes": "20", + "label": "_owner", + "offset": 1, + "slot": "3", + "type": "address" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/periphery/staking/PolicyEngineStaking.sol b/packages/contracts-bedrock/src/periphery/staking/PolicyEngineStaking.sol new file mode 100644 index 0000000000000..890a77d289890 --- /dev/null +++ b/packages/contracts-bedrock/src/periphery/staking/PolicyEngineStaking.sol @@ -0,0 +1,330 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.25; + +// Interfaces +import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; + +// Libraries +import { SafeERC20 } from "@openzeppelin/contracts/token/ERC20/utils/SafeERC20.sol"; + +/// @title PolicyEngineStaking +/// @notice Periphery contract for stake-based transaction ordering in op-rbuilder. Users stake governance tokens +/// and optionally link to a beneficiary who receives ordering power. Supports partial unstake. +/// Invariant: every staked token has a beneficiary (self or linked). No receivedStake tracking or unlink(). +contract PolicyEngineStaking is ISemver { + using SafeERC20 for IERC20; + + /// @notice Staking stakingData per account. + /// @custom:field stakedAmount The amount of OP tokens staked by the account. + /// @custom:field beneficiary The address to which the account's stake is attributed. + struct StakedData { + uint128 stakedAmount; + address beneficiary; + } + + /// @notice Policy Engine stakingData per account. Packed in one slot for PE reads. + /// @custom:field effectiveStake The exact stake amount used for ordering. + /// @custom:field lastUpdate The timestamp of the latest change on their effective stake. + struct PEData { + uint128 effectiveStake; + uint128 lastUpdate; + } + + /// @notice Semantic version. + /// @custom:semver 1.0.0 + string public constant version = "1.0.0"; + + /// @notice Base storage slot for PE stakingData mapping. Policy Engine reads from + /// keccak256(abi.encode(account, PE_DATA_SLOT)). + bytes32 public constant PE_DATA_SLOT = 0; + + /// @notice The ERC20 token used for staking. + // nosemgrep: sol-safety-no-immutable-variables + IERC20 internal immutable STAKING_TOKEN; + + /// @notice Slot 0: PE stakingData mapping. + mapping(address account => PEData) public peData; + + /// @notice Allowlist: beneficiary => staker => allowed. + mapping(address beneficiary => mapping(address staker => bool allowed)) public allowlist; + + /// @notice Staking stakingData mapping. + mapping(address account => StakedData) public stakingData; + + /// @notice Paused state. + bool public paused; + + /// @notice The owner of the contract. Can pause, unpause, and transfer ownership. + address private _owner; + + /// @notice Emitted when a user stakes OP tokens. + /// @param account The address that staked tokens. + /// @param amount The amount of tokens staked. + event Staked(address indexed account, uint128 amount); + + /// @notice Emitted when a user unstakes OP tokens. + /// @param account The address that unstaked tokens. + /// @param amount The amount of tokens unstaked. + event Unstaked(address indexed account, uint128 amount); + + /// @notice Emitted when a staker sets their beneficiary. + /// @param staker The address setting their beneficiary. + /// @param beneficiary The address receiving ordering power. + event BeneficiarySet(address indexed staker, address indexed beneficiary); + + /// @notice Emitted when a staker's beneficiary is removed (on change or full unstake). + /// @param staker The address whose beneficiary was removed. + /// @param previousBeneficiary The previous beneficiary. + event BeneficiaryRemoved(address indexed staker, address indexed previousBeneficiary); + + /// @notice Emitted when effective stake changes for an account. + /// @param account The account whose effective stake changed. + /// @param newEffectiveStake The new effective stake value. + event EffectiveStakeChanged(address indexed account, uint256 newEffectiveStake); + + /// @notice Emitted when a beneficiary updates their allowlist. + /// @param beneficiary The address controlling the allowlist. + /// @param staker The staker whose permission changed. + /// @param allowed The new permission state. + event BeneficiaryAllowlistUpdated(address indexed beneficiary, address indexed staker, bool allowed); + + /// @notice Emitted when staking is paused. + event Paused(); + + /// @notice Emitted when the staking is unpaused. + event Unpaused(); + + /// @notice Emitted when ownership is transferred. + /// @param previousOwner The address of the previous owner. + /// @param newOwner The address of the new owner. + event OwnershipTransferred(address indexed previousOwner, address indexed newOwner); + + /// @notice Thrown when the caller is not the owner. + error PolicyEngineStaking_OnlyOwner(); + + /// @notice Thrown when the staking is paused. + error PolicyEngineStaking_Paused(); + + /// @notice Thrown when the amount is zero. + error PolicyEngineStaking_ZeroAmount(); + + /// @notice Thrown when the beneficiary address is zero. + error PolicyEngineStaking_ZeroBeneficiary(); + + /// @notice Thrown when the staker is not allowed to set the beneficiary. + error PolicyEngineStaking_NotAllowedToSetBeneficiary(); + + /// @notice Thrown when trying to operate with no stake. + error PolicyEngineStaking_NoStake(); + + /// @notice Thrown when trying to unstake more than the staked amount. + error PolicyEngineStaking_InsufficientStake(); + + /// @notice Thrown when a zero address is provided where it is not allowed. + error PolicyEngineStaking_ZeroAddress(); + + /// @notice Thrown when trying to change beneficiary to the current beneficiary. + error PolicyEngineStaking_SameBeneficiary(); + + /// @notice Thrown when trying to allowlist/disallow yourself. + error PolicyEngineStaking_SelfAllowlist(); + + /// @notice Constructs the PolicyEngineStaking contract. + /// @param _ownerAddr The address that can pause and unpause staking. + /// @param _token The ERC20 token used for staking. + constructor(address _ownerAddr, address _token) { + if (_ownerAddr == address(0)) revert PolicyEngineStaking_ZeroAddress(); + if (_token == address(0)) revert PolicyEngineStaking_ZeroAddress(); + _owner = _ownerAddr; + STAKING_TOKEN = IERC20(_token); + } + + /// @notice Modifier that reverts when the staking is paused. + modifier whenNotPaused() { + if (paused) revert PolicyEngineStaking_Paused(); + _; + } + + /// @notice Modifier that reverts when the caller is not the owner. + modifier onlyOwner() { + if (msg.sender != _owner) revert PolicyEngineStaking_OnlyOwner(); + _; + } + + /// @notice Returns the owner address. + function owner() external view returns (address) { + return _owner; + } + + /// @notice Returns the staking token address. + /// + /// @return The ERC20 token used for staking. + function stakingToken() external view returns (IERC20) { + return STAKING_TOKEN; + } + + /// @notice Transfers ownership of the contract to a new account. + /// @param _newOwner The address of the new owner. + function transferOwnership(address _newOwner) external onlyOwner { + if (_newOwner == address(0)) revert PolicyEngineStaking_ZeroAddress(); + emit OwnershipTransferred(_owner, _newOwner); + _owner = _newOwner; + } + + /// @notice Pauses the contract. Stake is disabled while paused. + function pause() external onlyOwner { + paused = true; + emit Paused(); + } + + /// @notice Unpauses the contract. + function unpause() external onlyOwner { + paused = false; + emit Unpaused(); + } + + /// @notice Stakes tokens and sets beneficiary atomically. + /// This is the entry point for staking. Handles first-time staking, + /// adding to same beneficiary, and changing to a new beneficiary. + /// @param _amount The amount of tokens to stake. + /// @param _beneficiary Address that receives ordering power from this stake. + /// Use msg.sender for self-attribution. + function stake(uint128 _amount, address _beneficiary) external whenNotPaused { + if (_amount == 0) revert PolicyEngineStaking_ZeroAmount(); + if (_beneficiary == address(0)) revert PolicyEngineStaking_ZeroBeneficiary(); + if (_beneficiary != msg.sender && !allowlist[_beneficiary][msg.sender]) { + revert PolicyEngineStaking_NotAllowedToSetBeneficiary(); + } + + StakedData storage stakedData = stakingData[msg.sender]; + address currentBeneficiary = stakedData.beneficiary; + + // Remove previous beneficiary + if (currentBeneficiary != _beneficiary) { + if (currentBeneficiary != address(0)) { + _decreasePeData(currentBeneficiary, stakedData.stakedAmount); + emit BeneficiaryRemoved(msg.sender, currentBeneficiary); + } + stakedData.beneficiary = _beneficiary; + emit BeneficiarySet(msg.sender, _beneficiary); + } + + stakedData.stakedAmount += _amount; + + // If the beneficiary hasn't changed, peDelta is just the new amount staked. + // If the beneficiary changed, peDelta is the full total stake amount (previous + new stake), + // since the new beneficiary now receives ordering power for the entire position. + uint128 peDelta = currentBeneficiary == _beneficiary ? _amount : stakedData.stakedAmount; + _increasePeData(_beneficiary, peDelta); + + STAKING_TOKEN.safeTransferFrom(msg.sender, address(this), uint256(_amount)); + + emit Staked(msg.sender, _amount); + } + + /// @notice Changes the beneficiary for existing stake. Reverts if already set + /// to the same beneficiary. + /// @param _beneficiary New beneficiary address. + function changeBeneficiary(address _beneficiary) external whenNotPaused { + if (_beneficiary == address(0)) revert PolicyEngineStaking_ZeroBeneficiary(); + if (_beneficiary != msg.sender && !allowlist[_beneficiary][msg.sender]) { + revert PolicyEngineStaking_NotAllowedToSetBeneficiary(); + } + + StakedData storage stakedData = stakingData[msg.sender]; + if (stakedData.stakedAmount == 0) revert PolicyEngineStaking_NoStake(); + + address currentBeneficiary = stakedData.beneficiary; + if (currentBeneficiary == _beneficiary) revert PolicyEngineStaking_SameBeneficiary(); + + // Move existing stake from old beneficiary to new + _decreasePeData(currentBeneficiary, stakedData.stakedAmount); + emit BeneficiaryRemoved(msg.sender, currentBeneficiary); + + stakedData.beneficiary = _beneficiary; + _increasePeData(_beneficiary, stakedData.stakedAmount); + + emit BeneficiarySet(msg.sender, _beneficiary); + } + + /// @notice Unstakes OP tokens. Supports partial and full unstake. + /// On full unstake, the beneficiary is automatically cleared. + /// @param _amount The amount of OP tokens to unstake. + function unstake(uint128 _amount) external { + if (_amount == 0) revert PolicyEngineStaking_ZeroAmount(); + + StakedData storage stakedData = stakingData[msg.sender]; + if (stakedData.stakedAmount < _amount) revert PolicyEngineStaking_InsufficientStake(); + + address beneficiary = stakedData.beneficiary; + _decreasePeData(beneficiary, _amount); + stakedData.stakedAmount -= _amount; + + // Auto-clear beneficiary on full unstake + if (stakedData.stakedAmount == 0) { + stakedData.beneficiary = address(0); + emit BeneficiaryRemoved(msg.sender, beneficiary); + } + + STAKING_TOKEN.safeTransfer(msg.sender, uint256(_amount)); + + emit Unstaked(msg.sender, _amount); + } + + /// @notice Sets whether a staker can set the caller as beneficiary. When disallowing, + /// if the staker's current beneficiary is the caller, their stake attribution is + /// moved back to the staker (beneficiary reset to self). + /// + /// @param _staker The staker to allow or deny. + /// @param _allowed The allowed state. + function setAllowedStaker(address _staker, bool _allowed) public { + if (_staker == msg.sender) revert PolicyEngineStaking_SelfAllowlist(); + + allowlist[msg.sender][_staker] = _allowed; + emit BeneficiaryAllowlistUpdated(msg.sender, _staker, _allowed); + + if (!_allowed) { + StakedData storage stakedData = stakingData[_staker]; + if (stakedData.beneficiary == msg.sender) { + _decreasePeData(msg.sender, stakedData.stakedAmount); + emit BeneficiaryRemoved(_staker, msg.sender); + + stakedData.beneficiary = _staker; + _increasePeData(_staker, stakedData.stakedAmount); + emit BeneficiarySet(_staker, _staker); + } + } + } + + /// @notice Batch sets allowlist for multiple stakers. + /// @param _stakers The stakers to allow or deny. + /// @param _allowed The allowed state. + function setAllowedStakers(address[] calldata _stakers, bool _allowed) external { + uint256 stakersLength = _stakers.length; + + for (uint256 i; i < stakersLength; ++i) { + setAllowedStaker(_stakers[i], _allowed); + } + } + + /// @notice Increases effective stake for an account and updates timestamp. + /// @param _account The account address. + /// @param _amount The amount to add. + function _increasePeData(address _account, uint128 _amount) internal { + PEData storage pe = peData[_account]; + pe.effectiveStake += _amount; + pe.lastUpdate = uint128(block.timestamp); + emit EffectiveStakeChanged(_account, pe.effectiveStake); + } + + /// @notice Decreases effective stake for an account and updates timestamp. + /// @param _account The account address. + /// @param _amount The amount to subtract. + function _decreasePeData(address _account, uint128 _amount) internal { + PEData storage pe = peData[_account]; + pe.effectiveStake -= _amount; + pe.lastUpdate = uint128(block.timestamp); + emit EffectiveStakeChanged(_account, pe.effectiveStake); + } +} diff --git a/packages/contracts-bedrock/test/periphery/staking/PolicyEngineStaking.t.sol b/packages/contracts-bedrock/test/periphery/staking/PolicyEngineStaking.t.sol new file mode 100644 index 0000000000000..2125c28730df2 --- /dev/null +++ b/packages/contracts-bedrock/test/periphery/staking/PolicyEngineStaking.t.sol @@ -0,0 +1,1011 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Testing utilities +import { CommonTest } from "test/setup/CommonTest.sol"; +import { TestERC20 } from "test/mocks/TestERC20.sol"; + +// Interfaces +import { IERC20 } from "@openzeppelin/contracts/token/ERC20/IERC20.sol"; +import { IPolicyEngineStaking } from "interfaces/periphery/staking/IPolicyEngineStaking.sol"; + +// Libraries +import { Predeploys } from "src/libraries/Predeploys.sol"; + +/// @title PolicyEngineStaking_TestInit +/// @notice Reusable test initialization for `PolicyEngineStaking` tests. +abstract contract PolicyEngineStaking_TestInit is CommonTest { + address internal carol = address(0xC4101); + + IPolicyEngineStaking internal staking; + address internal owner; + + event Staked(address indexed account, uint128 amount); + event Unstaked(address indexed account, uint128 amount); + event BeneficiarySet(address indexed staker, address indexed beneficiary); + event BeneficiaryRemoved(address indexed staker, address indexed previousBeneficiary); + event EffectiveStakeChanged(address indexed account, uint256 newEffectiveStake); + event BeneficiaryAllowlistUpdated(address indexed beneficiary, address indexed staker, bool allowed); + event Paused(); + event Unpaused(); + + function setUp() public virtual override { + super.setUp(); + owner = makeAddr("owner"); + staking = IPolicyEngineStaking( + vm.deployCode("PolicyEngineStaking.sol:PolicyEngineStaking", abi.encode(owner, Predeploys.GOVERNANCE_TOKEN)) + ); + + _setupMockOPToken(); + + vm.label(carol, "carol"); + vm.label(address(staking), "PolicyEngineStaking"); + } + + /// @notice Deploys TestERC20 at the predeploy address and funds test accounts. + function _setupMockOPToken() internal { + TestERC20 token = new TestERC20(); + vm.etch(Predeploys.GOVERNANCE_TOKEN, address(token).code); + + TestERC20(Predeploys.GOVERNANCE_TOKEN).mint(alice, 1_000 ether); + TestERC20(Predeploys.GOVERNANCE_TOKEN).mint(bob, 1_000 ether); + TestERC20(Predeploys.GOVERNANCE_TOKEN).mint(carol, 1_000 ether); + + vm.prank(alice); + IERC20(Predeploys.GOVERNANCE_TOKEN).approve(address(staking), type(uint256).max); + vm.prank(bob); + IERC20(Predeploys.GOVERNANCE_TOKEN).approve(address(staking), type(uint256).max); + vm.prank(carol); + IERC20(Predeploys.GOVERNANCE_TOKEN).approve(address(staking), type(uint256).max); + } +} + +/// @title PolicyEngineStaking_TransferOwnership_Test +/// @notice Tests the `transferOwnership` function. +contract PolicyEngineStaking_TransferOwnership_Test is PolicyEngineStaking_TestInit { + /// @notice Tests that owner can transfer ownership. + function testFuzz_transferOwnership_succeeds(address _newOwner) external { + vm.assume(_newOwner != address(0)); + + vm.expectEmit(address(staking)); + emit OwnershipTransferred(owner, _newOwner); + + vm.prank(owner); + staking.transferOwnership(_newOwner); + + assertEq(staking.owner(), _newOwner); + } + + /// @notice Tests that new owner can exercise ownership after transfer. + function test_transferOwnership_newOwnerCanPause_succeeds() external { + address newOwner = makeAddr("newOwner"); + + vm.prank(owner); + staking.transferOwnership(newOwner); + + vm.prank(newOwner); + staking.pause(); + assertTrue(staking.paused()); + } + + /// @notice Tests that old owner loses ownership after transfer. + function test_transferOwnership_oldOwnerReverts_reverts() external { + address newOwner = makeAddr("newOwner"); + + vm.prank(owner); + staking.transferOwnership(newOwner); + + vm.prank(owner); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_OnlyOwner.selector); + staking.pause(); + } + + /// @notice Tests that non-owner cannot transfer ownership. + function testFuzz_transferOwnership_notOwner_reverts(address _caller) external { + vm.assume(_caller != owner && _caller != address(0)); + + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_OnlyOwner.selector); + vm.prank(_caller); + staking.transferOwnership(alice); + } + + /// @notice Tests that transferring to zero address reverts. + function test_transferOwnership_zeroAddress_reverts() external { + vm.prank(owner); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_ZeroAddress.selector); + staking.transferOwnership(address(0)); + } +} + +/// @title PolicyEngineStaking_Pause_Test +/// @notice Tests the pause/unpause functionality. +contract PolicyEngineStaking_Pause_Test is PolicyEngineStaking_TestInit { + /// @notice Tests that owner can pause and unpause. + function test_pauseUnpause_owner_succeeds() external { + assertFalse(staking.paused()); + + vm.expectEmit(address(staking)); + emit Paused(); + vm.prank(owner); + staking.pause(); + + assertTrue(staking.paused()); + + vm.expectEmit(address(staking)); + emit Unpaused(); + vm.prank(owner); + staking.unpause(); + + assertFalse(staking.paused()); + } + + /// @notice Tests that non-owner cannot pause. + function testFuzz_pause_notOwner_reverts(address _caller) external { + vm.assume(_caller != owner && _caller != address(0)); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_OnlyOwner.selector); + vm.prank(_caller); + staking.pause(); + } + + /// @notice Tests that non-owner cannot unpause. + function testFuzz_unpause_notOwner_reverts(address _caller) external { + vm.prank(owner); + staking.pause(); + + vm.assume(_caller != owner && _caller != address(0)); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_OnlyOwner.selector); + vm.prank(_caller); + staking.unpause(); + } + + /// @notice Tests that stake reverts when paused. + function test_stake_whenPaused_reverts() external { + vm.prank(owner); + staking.pause(); + + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_Paused.selector); + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + } + + /// @notice Tests that changeBeneficiary works when paused. + function test_changeBeneficiary_whenPaused_reverts() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + vm.prank(bob); + staking.setAllowedStaker(alice, true); + + vm.prank(owner); + staking.pause(); + + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_Paused.selector); + vm.prank(alice); + staking.changeBeneficiary(bob); + } + + /// @notice Tests that unstake works when paused. + function test_unstake_whenPaused_succeeds() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + vm.prank(owner); + staking.pause(); + + uint256 balanceBefore = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice); + vm.prank(alice); + staking.unstake(uint128(100 ether)); + assertEq(IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice), balanceBefore + 100 ether); + } +} + +/// @title PolicyEngineStaking_Stake_Test +/// @notice Tests the `stake` function. +contract PolicyEngineStaking_Stake_Test is PolicyEngineStaking_TestInit { + /// @notice Tests that stake with self-attribution succeeds. + function testFuzz_stake_selfAttribution_succeeds(uint128 _amount) external { + _amount = uint128(bound(_amount, 1, 1_000 ether)); + + vm.expectEmit(address(staking)); + emit BeneficiarySet(alice, alice); + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(alice, _amount); + vm.expectEmit(address(staking)); + emit Staked(alice, _amount); + + vm.prank(alice); + staking.stake(_amount, alice); + + (uint128 staked, address beneficiary) = staking.stakingData(alice); + (uint128 effectiveStake, uint128 lastUpdate) = staking.peData(alice); + + assertEq(staked, _amount); + assertEq(beneficiary, alice); + assertEq(effectiveStake, _amount); + assertEq(lastUpdate, block.timestamp); + } + + /// @notice Tests that multiple stake calls to same beneficiary succeed. + function test_stake_severalToSameBeneficiary_succeeds() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + vm.prank(alice); + staking.stake(uint128(200 ether), alice); + vm.prank(alice); + staking.stake(uint128(300 ether), alice); + + (uint128 aliceStaked, address aliceBeneficiary) = staking.stakingData(alice); + assertEq(aliceStaked, 600 ether); + assertEq(aliceBeneficiary, alice); + (uint128 aliceEffectiveStake, uint128 aliceLastUpdate) = staking.peData(alice); + assertEq(aliceEffectiveStake, 600 ether); + assertEq(aliceLastUpdate, block.timestamp); + } + + /// @notice Tests that stake to another beneficiary with allowlist succeeds. + function test_stake_toBeneficiaryWithAllowlist_succeeds() external { + vm.prank(bob); + staking.setAllowedStaker(alice, true); + + vm.expectEmit(address(staking)); + emit BeneficiarySet(alice, bob); + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(bob, 100 ether); + vm.expectEmit(address(staking)); + emit Staked(alice, uint128(100 ether)); + + vm.prank(alice); + staking.stake(uint128(100 ether), bob); + + (uint128 staked, address beneficiary) = staking.stakingData(alice); + (uint128 effectiveStake, uint128 lastUpdate) = staking.peData(alice); + assertEq(staked, 100 ether); + assertEq(beneficiary, bob); + assertEq(effectiveStake, 0); + assertEq(lastUpdate, 0); + + (uint128 bobEffectiveStake, uint128 bobLastUpdate) = staking.peData(bob); + assertEq(bobEffectiveStake, 100 ether); + assertEq(bobLastUpdate, block.timestamp); + } + + /// @notice Tests that stake more to same beneficiary when already linked succeeds. + function test_stake_moreToSameBeneficiary_succeeds() external { + vm.prank(bob); + staking.setAllowedStaker(alice, true); + vm.prank(alice); + staking.stake(uint128(100 ether), bob); + + vm.prank(alice); + staking.stake(uint128(50 ether), bob); + + (uint128 staked,) = staking.stakingData(alice); + assertEq(staked, 150 ether); + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobEffective, 150 ether); + } + + /// @notice Tests that stake changes beneficiary atomically. + function test_stake_changeBeneficiary_succeeds() external { + // Alice stakes to self + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + + (uint128 aliceEffBefore,) = staking.peData(alice); + assertEq(aliceEffBefore, 100 ether); + + // Bob allows alice + vm.prank(bob); + staking.setAllowedStaker(alice, true); + + // Alice changes beneficiary to bob with additional stake + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(alice, 0); // decrease alice's PE + vm.expectEmit(address(staking)); + emit BeneficiaryRemoved(alice, alice); + vm.expectEmit(address(staking)); + emit BeneficiarySet(alice, bob); + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(bob, 150 ether); // single increase: old stake + new amount + vm.expectEmit(address(staking)); + emit Staked(alice, uint128(50 ether)); + + vm.prank(alice); + staking.stake(uint128(50 ether), bob); + + (uint128 staked, address beneficiary) = staking.stakingData(alice); + assertEq(staked, 150 ether); + assertEq(beneficiary, bob); + (uint128 aliceEffAfter,) = staking.peData(alice); + assertEq(aliceEffAfter, 0); + (uint128 bobEff,) = staking.peData(bob); + assertEq(bobEff, 150 ether); + } + + /// @notice Tests that stake with zero amount reverts. + function test_stake_zeroAmount_reverts() external { + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_ZeroAmount.selector); + staking.stake(0, alice); + } + + /// @notice Tests that stake with zero beneficiary reverts. + function test_stake_zeroBeneficiary_reverts() external { + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_ZeroBeneficiary.selector); + staking.stake(uint128(100 ether), address(0)); + } + + /// @notice Tests that stake to beneficiary without allowlist reverts. + function test_stake_toBeneficiaryWithoutAllowlist_reverts() external { + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_NotAllowedToSetBeneficiary.selector); + staking.stake(uint128(100 ether), bob); + } + + /// @notice Tests change beneficiary reverts without allowlist. + function test_stake_changeBeneficiaryWithoutAllowlist_reverts() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_NotAllowedToSetBeneficiary.selector); + staking.stake(uint128(50 ether), bob); + } +} + +/// @title PolicyEngineStaking_Unstake_Test +/// @notice Tests the `unstake` function. +contract PolicyEngineStaking_Unstake_Test is PolicyEngineStaking_TestInit { + /// @notice Tests that full unstake succeeds, auto-clears beneficiary, and preserves balance. + function testFuzz_unstake_full_succeeds(uint128 _amount) external { + _amount = uint128(bound(_amount, 1, 1_000 ether)); + + vm.prank(alice); + staking.stake(_amount, alice); + + uint256 aliceBalanceBefore = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice); + + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(alice, 0); + vm.expectEmit(address(staking)); + emit BeneficiaryRemoved(alice, alice); + vm.expectEmit(address(staking)); + emit Unstaked(alice, _amount); + + vm.prank(alice); + staking.unstake(_amount); + + (uint128 aliceStaked, address beneficiary) = staking.stakingData(alice); + assertEq(aliceStaked, 0); + assertEq(beneficiary, address(0)); + assertEq(IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice), aliceBalanceBefore + _amount); + } + + /// @notice Tests that unstake with zero amount reverts. + function test_unstake_zeroAmount_reverts() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_ZeroAmount.selector); + staking.unstake(0); + } + + /// @notice Tests that unstake with no stake reverts. + function test_unstake_noStake_reverts() external { + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_InsufficientStake.selector); + staking.unstake(uint128(100 ether)); + } + + /// @notice Tests that unstake more than staked reverts. + function test_unstake_insufficientStake_reverts() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_InsufficientStake.selector); + staking.unstake(uint128(101 ether)); + } + + /// @notice Tests partial unstake preserves correct remaining balance. + function testFuzz_unstake_partialAmount_succeeds(uint128 _stakeAmount, uint128 _unstakeAmount) external { + _stakeAmount = uint128(bound(_stakeAmount, 2, 1_000 ether)); + _unstakeAmount = uint128(bound(_unstakeAmount, 1, _stakeAmount - 1)); + + vm.prank(alice); + staking.stake(_stakeAmount, alice); + + uint128 remaining = _stakeAmount - _unstakeAmount; + + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(alice, remaining); + vm.expectEmit(address(staking)); + emit Unstaked(alice, _unstakeAmount); + + vm.prank(alice); + staking.unstake(_unstakeAmount); + + (uint128 staked, address beneficiary) = staking.stakingData(alice); + assertEq(staked, remaining); + assertEq(beneficiary, alice); + (uint128 effective,) = staking.peData(alice); + assertEq(effective, remaining); + } + + /// @notice Tests partial unstake with beneficiary preserves remaining stake attribution. + function testFuzz_unstake_partialWithBeneficiary_succeeds(uint128 _stakeAmount, uint128 _unstakeAmount) external { + _stakeAmount = uint128(bound(_stakeAmount, 2, 1_000 ether)); + _unstakeAmount = uint128(bound(_unstakeAmount, 1, _stakeAmount - 1)); + + vm.prank(bob); + staking.setAllowedStaker(alice, true); + + vm.prank(alice); + staking.stake(_stakeAmount, bob); + + uint128 remaining = _stakeAmount - _unstakeAmount; + + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(bob, remaining); + vm.expectEmit(address(staking)); + emit Unstaked(alice, _unstakeAmount); + + vm.prank(alice); + staking.unstake(_unstakeAmount); + + (uint128 staked, address beneficiary) = staking.stakingData(alice); + assertEq(staked, remaining); + assertEq(beneficiary, bob); + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobEffective, remaining); + } +} + +/// @title PolicyEngineStaking_ChangeBeneficiary_Test +/// @notice Tests the `changeBeneficiary` function. +contract PolicyEngineStaking_ChangeBeneficiary_Test is PolicyEngineStaking_TestInit { + /// @notice Tests that changing beneficiary succeeds. + function testFuzz_changeBeneficiary_succeeds(uint128 _amount) external { + _amount = uint128(bound(_amount, 1, 1_000 ether)); + vm.prank(alice); + staking.stake(_amount, alice); + + vm.prank(bob); + staking.setAllowedStaker(alice, true); + + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(alice, 0); + vm.expectEmit(address(staking)); + emit BeneficiaryRemoved(alice, alice); + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(bob, _amount); + vm.expectEmit(address(staking)); + emit BeneficiarySet(alice, bob); + + vm.prank(alice); + staking.changeBeneficiary(bob); + + (uint128 staked, address beneficiary) = staking.stakingData(alice); + assertEq(staked, _amount); + assertEq(beneficiary, bob); + (uint128 aliceEffective,) = staking.peData(alice); + assertEq(aliceEffective, 0); + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobEffective, _amount); + } + + /// @notice Tests that changing from one beneficiary to another succeeds. + function test_changeBeneficiary_fromOneToAnother_succeeds() external { + vm.prank(bob); + staking.setAllowedStaker(alice, true); + vm.prank(alice); + staking.stake(uint128(100 ether), bob); + + vm.prank(carol); + staking.setAllowedStaker(alice, true); + + vm.prank(alice); + staking.changeBeneficiary(carol); + + (, address beneficiary) = staking.stakingData(alice); + assertEq(beneficiary, carol); + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobEffective, 0); + (uint128 carolEffective,) = staking.peData(carol); + assertEq(carolEffective, 100 ether); + } + + /// @notice Tests that changing beneficiary to self succeeds (no allowlist needed). + function test_changeBeneficiary_toSelf_succeeds() external { + vm.prank(bob); + staking.setAllowedStaker(alice, true); + vm.prank(alice); + staking.stake(uint128(100 ether), bob); + + vm.prank(alice); + staking.changeBeneficiary(alice); + + (, address beneficiary) = staking.stakingData(alice); + assertEq(beneficiary, alice); + (uint128 aliceEffective,) = staking.peData(alice); + assertEq(aliceEffective, 100 ether); + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobEffective, 0); + } + + /// @notice Tests that changing to same beneficiary reverts. + function test_changeBeneficiary_sameBeneficiary_reverts() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_SameBeneficiary.selector); + staking.changeBeneficiary(alice); + } + + /// @notice Tests that changeBeneficiary with zero beneficiary reverts. + function test_changeBeneficiary_zeroBeneficiary_reverts() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_ZeroBeneficiary.selector); + staking.changeBeneficiary(address(0)); + } + + /// @notice Tests that changeBeneficiary without allowlist reverts. + function test_changeBeneficiary_notAllowed_reverts() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_NotAllowedToSetBeneficiary.selector); + staking.changeBeneficiary(bob); + } + + /// @notice Tests that changeBeneficiary with no stake reverts. + function test_changeBeneficiary_noStake_reverts() external { + vm.prank(bob); + staking.setAllowedStaker(alice, true); + + vm.prank(alice); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_NoStake.selector); + staking.changeBeneficiary(bob); + } +} + +/// @title PolicyEngineStaking_Constructor_Test +/// @notice Tests constructor, view functions, and storage layout. +contract PolicyEngineStaking_Constructor_Test is PolicyEngineStaking_TestInit { + /// @notice Tests that owner is set correctly. + function test_owner_succeeds() external view { + assertEq(staking.owner(), owner); + } + + /// @notice Tests that PE_DATA_SLOT is 0. + function test_peDataSlot_isZero_succeeds() external view { + assertEq(staking.PE_DATA_SLOT(), bytes32(uint256(0))); + } + + /// @notice Tests that peData storage layout matches PE_DATA_SLOT convention + /// across stake, changeBeneficiary, and unstake operations. + function test_peData_storageLayout_succeeds() external { + uint128 amount = uint128(100 ether); + bytes32 aliceSlot = keccak256(abi.encode(alice, staking.PE_DATA_SLOT())); + bytes32 bobSlot = keccak256(abi.encode(bob, staking.PE_DATA_SLOT())); + + // After stake: staker's beneficiary slot is populated + vm.prank(alice); + staking.stake(amount, alice); + bytes32 raw = vm.load(address(staking), aliceSlot); + assertEq(uint128(uint256(raw)), amount); + assertEq(uint128(uint256(raw) >> 128), block.timestamp); + + // After changeBeneficiary: stake moves to beneficiary's slot, staker's slot zeroed + vm.prank(bob); + staking.setAllowedStaker(alice, true); + vm.warp(block.timestamp + 1); + vm.prank(alice); + staking.changeBeneficiary(bob); + + raw = vm.load(address(staking), aliceSlot); + assertEq(uint128(uint256(raw)), 0); + + bytes32 bobRaw = vm.load(address(staking), bobSlot); + assertEq(uint128(uint256(bobRaw)), amount); + assertEq(uint128(uint256(bobRaw) >> 128), block.timestamp); + + // After full unstake: beneficiary's slot zeroed + vm.prank(alice); + staking.unstake(amount); + bobRaw = vm.load(address(staking), bobSlot); + assertEq(uint128(uint256(bobRaw)), 0); + } + + /// @notice Tests that the constructor reverts when owner is zero address. + function test_constructor_zeroOwner_reverts() external { + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_ZeroAddress.selector); + vm.deployCode( + "PolicyEngineStaking.sol:PolicyEngineStaking", abi.encode(address(0), Predeploys.GOVERNANCE_TOKEN) + ); + } + + /// @notice Tests that the constructor reverts when token is zero address. + function test_constructor_zeroToken_reverts() external { + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_ZeroAddress.selector); + vm.deployCode("PolicyEngineStaking.sol:PolicyEngineStaking", abi.encode(owner, address(0))); + } +} + +/// @title PolicyEngineStaking_SetAllowedStaker_Test +/// @notice Tests the `setAllowedStaker` and `setAllowedStakers` functions. +contract PolicyEngineStaking_SetAllowedStaker_Test is PolicyEngineStaking_TestInit { + /// @notice Tests that setAllowedStaker updates allowlist correctly. + function test_setAllowedStaker_succeeds() external { + (bool allowed) = staking.allowlist(bob, alice); + assertFalse(allowed); + + vm.expectEmit(address(staking)); + emit BeneficiaryAllowlistUpdated(bob, alice, true); + + vm.prank(bob); + staking.setAllowedStaker(alice, true); + + (allowed) = staking.allowlist(bob, alice); + assertTrue(allowed); + + vm.prank(bob); + staking.setAllowedStaker(alice, false); + + (allowed) = staking.allowlist(bob, alice); + assertFalse(allowed); + } + + /// @notice Tests that setAllowedStakers batch updates allowlist. + function test_setAllowedStakers_succeeds() external { + address[] memory stakers = new address[](2); + stakers[0] = alice; + stakers[1] = carol; + + vm.prank(bob); + staking.setAllowedStakers(stakers, true); + + (bool aliceAllowed) = staking.allowlist(bob, alice); + (bool carolAllowed) = staking.allowlist(bob, carol); + assertTrue(aliceAllowed); + assertTrue(carolAllowed); + + vm.prank(bob); + staking.setAllowedStakers(stakers, false); + + (aliceAllowed) = staking.allowlist(bob, alice); + (carolAllowed) = staking.allowlist(bob, carol); + assertFalse(aliceAllowed); + assertFalse(carolAllowed); + } + + /// @notice Tests that setAllowedStaker reverts when staker is msg.sender. + function test_setAllowedStaker_selfAllowlist_reverts() external { + vm.prank(bob); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_SelfAllowlist.selector); + staking.setAllowedStaker(bob, true); + } +} + +/// @title PolicyEngineStaking_Integration_Test +/// @notice Integration tests for the full stake/changeBeneficiary/unstake flow. +contract PolicyEngineStaking_Integration_Test is PolicyEngineStaking_TestInit { + /// @notice Tests full flow: stake -> stake more -> changeBeneficiary -> partial unstake -> full unstake. + function test_fullFlow_succeeds() external { + // Step 1: Alice stakes 100 to self + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + (uint128 staked,) = staking.stakingData(alice); + assertEq(staked, 100 ether); + + // Step 2: Alice stakes 50 more + vm.prank(alice); + staking.stake(uint128(50 ether), alice); + (staked,) = staking.stakingData(alice); + assertEq(staked, 150 ether); + + // Step 3: Alice changes beneficiary to bob + vm.prank(bob); + staking.setAllowedStaker(alice, true); + vm.prank(alice); + staking.changeBeneficiary(bob); + + (, address beneficiary) = staking.stakingData(alice); + assertEq(beneficiary, bob); + (uint128 bobEff,) = staking.peData(bob); + assertEq(bobEff, 150 ether); + (uint128 aliceEff,) = staking.peData(alice); + assertEq(aliceEff, 0); + + // Step 4: Partial unstake + vm.prank(alice); + staking.unstake(uint128(50 ether)); + (staked, beneficiary) = staking.stakingData(alice); + assertEq(staked, 100 ether); + assertEq(beneficiary, bob); + (bobEff,) = staking.peData(bob); + assertEq(bobEff, 100 ether); + + // Step 5: Full unstake (auto-unlinks) + uint256 aliceBalanceBefore = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice); + vm.prank(alice); + staking.unstake(uint128(100 ether)); + assertEq(IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice), aliceBalanceBefore + 100 ether); + (staked, beneficiary) = staking.stakingData(alice); + assertEq(staked, 0); + assertEq(beneficiary, address(0)); + (bobEff,) = staking.peData(bob); + assertEq(bobEff, 0); + } + + /// @notice Tests that multiple stakers can stake to the same beneficiary. + function test_multipleStakersToSameBeneficiary_succeeds() external { + vm.prank(bob); + staking.setAllowedStaker(alice, true); + vm.prank(bob); + staking.setAllowedStaker(carol, true); + + vm.prank(alice); + staking.stake(uint128(100 ether), bob); + vm.prank(carol); + staking.stake(uint128(50 ether), bob); + + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobEffective, 150 ether); + } + + /// @notice Tests that a beneficiary with own stake plus received stake has correct effective stake. + function test_beneficiaryWithOwnStakeAndReceived_succeeds() external { + vm.prank(bob); + staking.stake(uint128(50 ether), bob); + vm.prank(bob); + staking.setAllowedStaker(alice, true); + vm.prank(alice); + staking.stake(uint128(100 ether), bob); + + (uint128 bobStaked,) = staking.stakingData(bob); + assertEq(bobStaked, 50 ether); + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobEffective, 150 ether); + } + + /// @notice Tests that revoking allowlist auto-resets beneficiary to self. + function test_revokeAllowlist_resetsBeneficiaryToSelf_succeeds() external { + vm.prank(alice); + staking.setAllowedStaker(bob, true); + vm.prank(bob); + staking.stake(uint128(100 ether), alice); + + (uint128 bobStaked, address bobBeneficiary) = staking.stakingData(bob); + (uint128 aliceEffective,) = staking.peData(alice); + assertEq(bobStaked, 100 ether); + assertEq(bobBeneficiary, alice); + assertEq(aliceEffective, 100 ether); + + // Alice revokes bob from allowlist + vm.expectEmit(address(staking)); + emit BeneficiaryAllowlistUpdated(alice, bob, false); + + // Bob is unlinked from Alice + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(alice, 0); + vm.expectEmit(address(staking)); + emit BeneficiaryRemoved(bob, alice); + + // Bob is linked to self + vm.expectEmit(address(staking)); + emit EffectiveStakeChanged(bob, 100 ether); + vm.expectEmit(address(staking)); + emit BeneficiarySet(bob, bob); + + vm.prank(alice); + staking.setAllowedStaker(bob, false); + + // Bob is now linked to self, alice's effective stake is zeroed + (bobStaked, bobBeneficiary) = staking.stakingData(bob); + (aliceEffective,) = staking.peData(alice); + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobStaked, 100 ether); + assertEq(bobBeneficiary, bob); + assertEq(aliceEffective, 0); + assertEq(bobEffective, 100 ether); + + // Bob fully unstakes + vm.prank(bob); + staking.unstake(uint128(100 ether)); + + (bobStaked, bobBeneficiary) = staking.stakingData(bob); + (bobEffective,) = staking.peData(bob); + assertEq(bobStaked, 0); + assertEq(bobBeneficiary, address(0)); + assertEq(bobEffective, 0); + } + + /// @notice Tests that stake to a beneficiary reverts after the beneficiary revokes allowlist. + function test_stake_afterAllowlistRevoked_reverts() external { + vm.prank(alice); + staking.setAllowedStaker(bob, true); + vm.prank(bob); + staking.stake(uint128(100 ether), alice); + + // Alice revokes bob + vm.prank(alice); + staking.setAllowedStaker(bob, false); + + // Bob tries to stake to alice again + vm.prank(bob); + vm.expectRevert(IPolicyEngineStaking.PolicyEngineStaking_NotAllowedToSetBeneficiary.selector); + staking.stake(uint128(50 ether), alice); + } + + /// @notice Tests that lastUpdate is updated after new staking and linking when time advances. + function test_lastUpdate_updatesAfterStakingAndLinking_succeeds() external { + // Initial stake + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + (, uint128 lastUpdate0) = staking.peData(alice); + uint256 ts0 = block.timestamp; + assertEq(lastUpdate0, ts0); + + // Warp time and stake again; lastUpdate should advance + vm.warp(block.timestamp + 1); + vm.prank(alice); + staking.stake(uint128(50 ether), alice); + (, uint128 lastUpdate1) = staking.peData(alice); + assertEq(lastUpdate1, ts0 + 1); + + // Warp time and change beneficiary to bob; bob's lastUpdate should be the new timestamp + vm.warp(block.timestamp + 1); + vm.prank(bob); + staking.setAllowedStaker(alice, true); + vm.prank(alice); + staking.changeBeneficiary(bob); + (, uint128 bobLastUpdate) = staking.peData(bob); + assertEq(bobLastUpdate, ts0 + 2); + } + + /// @notice Tests that stake after full unstake works (re-entry into system). + function test_stake_afterFullUnstake_succeeds() external { + vm.prank(alice); + staking.stake(uint128(100 ether), alice); + vm.prank(alice); + staking.unstake(uint128(100 ether)); + + (uint128 staked, address beneficiary) = staking.stakingData(alice); + assertEq(staked, 0); + assertEq(beneficiary, address(0)); + + // Re-enter with a different beneficiary + vm.prank(bob); + staking.setAllowedStaker(alice, true); + vm.prank(alice); + staking.stake(uint128(50 ether), bob); + + (staked, beneficiary) = staking.stakingData(alice); + assertEq(staked, 50 ether); + assertEq(beneficiary, bob); + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobEffective, 50 ether); + } + + /// @notice Tests stake to beneficiary and full unstake preserves staker balance. + function testFuzz_stakeToBeneficiaryAndUnstake_succeeds(uint128 _amount) external { + _amount = uint128(bound(_amount, 1, 1_000 ether)); + + vm.prank(bob); + staking.setAllowedStaker(alice, true); + + uint256 balanceBefore = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice); + vm.prank(alice); + staking.stake(_amount, bob); + vm.prank(alice); + staking.unstake(_amount); + uint256 balanceAfter = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice); + + assertEq(balanceAfter, balanceBefore); + (uint128 aliceStaked, address beneficiary) = staking.stakingData(alice); + assertEq(aliceStaked, 0); + assertEq(beneficiary, address(0)); + (uint128 bobEffective,) = staking.peData(bob); + assertEq(bobEffective, 0); + } + + /// @notice Tests stake -> change beneficiary -> unstake full cycle. + function testFuzz_beneficiaryCycle_succeeds(uint128 _amount) external { + _amount = uint128(bound(_amount, 1, 1_000 ether)); + + vm.prank(bob); + staking.setAllowedStaker(alice, true); + + uint256 balanceBefore = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice); + vm.prank(alice); + staking.stake(_amount, alice); + vm.prank(alice); + staking.changeBeneficiary(bob); + + (uint128 bobEff,) = staking.peData(bob); + assertEq(bobEff, _amount); + + vm.prank(alice); + staking.unstake(_amount); + uint256 balanceAfter = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice); + + assertEq(balanceAfter, balanceBefore); + } + + /// @notice Tests multiple stake calls and single full unstake. + function testFuzz_multipleStakesAndUnstake_succeeds( + uint128 _amount1, + uint128 _amount2, + uint128 _amount3 + ) + external + { + _amount1 = uint128(bound(_amount1, 1, 300 ether)); + _amount2 = uint128(bound(_amount2, 1, 300 ether)); + _amount3 = uint128(bound(_amount3, 1, 300 ether)); + + uint128 total = _amount1 + _amount2 + _amount3; + + uint256 balanceBefore = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice); + vm.prank(alice); + staking.stake(_amount1, alice); + vm.prank(alice); + staking.stake(_amount2, alice); + vm.prank(alice); + staking.stake(_amount3, alice); + + (uint128 staked,) = staking.stakingData(alice); + (uint128 effective,) = staking.peData(alice); + assertEq(staked, total); + assertEq(effective, total); + + vm.prank(alice); + staking.unstake(total); + assertEq(IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(alice), balanceBefore); + } + + /// @notice Tests stake with different staker-beneficiary pairs. + function testFuzz_stakeToBeneficiaryDifferentAccounts_succeeds( + uint8 _stakerIdx, + uint8 _beneficiaryIdx, + uint128 _amount + ) + external + { + address[] memory accounts = _accounts(); + _stakerIdx = uint8(bound(_stakerIdx, 0, 2)); + _beneficiaryIdx = uint8(bound(_beneficiaryIdx, 0, 2)); + if (_stakerIdx == _beneficiaryIdx) return; // self-attribution, skip + address staker = accounts[_stakerIdx]; + address beneficiary = accounts[_beneficiaryIdx]; + _amount = uint128(bound(_amount, 1, 300 ether)); + + vm.prank(beneficiary); + staking.setAllowedStaker(staker, true); + + uint256 balanceBefore = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(staker); + vm.prank(staker); + staking.stake(_amount, beneficiary); + vm.prank(staker); + staking.unstake(_amount); + uint256 balanceAfter = IERC20(Predeploys.GOVERNANCE_TOKEN).balanceOf(staker); + + assertEq(balanceAfter, balanceBefore); + (uint128 benEffective,) = staking.peData(beneficiary); + assertEq(benEffective, 0); + } + + function _accounts() internal view returns (address[] memory) { + address[] memory a = new address[](3); + a[0] = alice; + a[1] = bob; + a[2] = carol; + return a; + } +} From 94f4d7091da2c3d607c280089d00c826f6bc58df Mon Sep 17 00:00:00 2001 From: George Knee Date: Tue, 24 Feb 2026 16:02:07 +0000 Subject: [PATCH 018/201] op-supernode: prevent hang on shutdown (#19293) * op-supernode: add TestCleanShutdown return from supernode.Start() function without waiting for the context to be cancelled * improve test * pass bg context to supernode start in test * mock runnable activity: calling stop causes start to return this mirrors the interop activity, for example * op-supernode: several improvements to lifecycle management * improve robustness of TestRunnableActivityGating since activities are started async and we don't have a way to wait on them, there is a race betwen start and stop in this test * reinstate fix --- op-devstack/sysgo/l2_cl_supernode.go | 6 +- .../supernode/resources/metrics_service.go | 4 +- op-supernode/supernode/shutdown_test.go | 68 +++++++++++++++++++ op-supernode/supernode/supernode.go | 30 ++++++-- .../supernode/supernode_activities_test.go | 28 +++++--- 5 files changed, 114 insertions(+), 22 deletions(-) create mode 100644 op-supernode/supernode/shutdown_test.go diff --git a/op-devstack/sysgo/l2_cl_supernode.go b/op-devstack/sysgo/l2_cl_supernode.go index 9ec7fb7511e13..55bc809879279 100644 --- a/op-devstack/sysgo/l2_cl_supernode.go +++ b/op-devstack/sysgo/l2_cl_supernode.go @@ -109,10 +109,8 @@ func (n *SuperNode) Start() { n.sn = sn n.cancel = cancel - // Start Supernode in background - go func() { - _ = n.sn.Start(ctx) - }() + err = n.sn.Start(ctx) + n.p.Require().NoError(err) // Wait for the RPC addr and save userRPC/interop endpoints if addr, err := n.sn.WaitRPCAddr(ctx); err == nil { diff --git a/op-supernode/supernode/resources/metrics_service.go b/op-supernode/supernode/resources/metrics_service.go index 91596c3a0b7fa..b235be5639a3a 100644 --- a/op-supernode/supernode/resources/metrics_service.go +++ b/op-supernode/supernode/resources/metrics_service.go @@ -26,8 +26,9 @@ func NewMetricsService(log gethlog.Logger, listenAddr string, port int, handler // Start begins serving metrics in a background goroutine. If the server exits with an error, // the optional onError callback is invoked. -func (s *MetricsService) Start(onError func(error)) { +func (s *MetricsService) Start(onDone func(), onError func(error)) { if s.server == nil { + onDone() return } go func() { @@ -38,6 +39,7 @@ func (s *MetricsService) Start(onError func(error)) { onError(err) } } + onDone() }() } diff --git a/op-supernode/supernode/shutdown_test.go b/op-supernode/supernode/shutdown_test.go new file mode 100644 index 0000000000000..0c3cb1e3d1b33 --- /dev/null +++ b/op-supernode/supernode/shutdown_test.go @@ -0,0 +1,68 @@ +package supernode + +import ( + "context" + "log/slog" + "net/http" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-service/httputil" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" + "github.com/ethereum-optimism/optimism/op-supernode/supernode/resources" + "github.com/stretchr/testify/require" +) + +// newTestSupernode builds a minimal Supernode wired with a real HTTP server, +// a real metrics server, and the given activities. Both servers bind to +// 127.0.0.1:0 so there are no port conflicts. +func newTestSupernode(t *testing.T, acts []activity.Activity) *Supernode { + t.Helper() + log := testlog.Logger(t, slog.LevelDebug) + + router := resources.NewRouter(log, resources.RouterConfig{}) + httpSrv := httputil.NewHTTPServer("127.0.0.1:0", router) + metrics := resources.NewMetricsService(log, "127.0.0.1", 0, http.NewServeMux()) + + return &Supernode{ + log: log, + version: "test", + chains: nil, + activities: acts, + httpServer: httpSrv, + rpcRouter: router, + metrics: metrics, + } +} + +// TestCleanShutdown starts a supernode with multiple services running — a real HTTP +// server, a real metrics server, a mock activity. +// It then calls Stop() and asserts it returns within a reasonable deadline. +func TestCleanShutdown(t *testing.T) { + t.Parallel() + + const ( + // How long Stop() is allowed to take in total. + // Generous enough for a real graceful shutdown, tight enough to catch a hang. + stopDeadline = 200 * time.Second + ) + + s := newTestSupernode(t, []activity.Activity{&mockRunnable{}}) + + require.NoError(t, s.Start(context.Background())) + + // Run Stop() in a goroutine so we can race it against the deadline. + stopCtx, cancelStop := context.WithTimeout(context.Background(), stopDeadline) + defer cancelStop() + + stopDone := make(chan error, 1) + go func() { stopDone <- s.Stop(context.Background()) }() + + select { + case err := <-stopDone: + require.NoError(t, err) + case <-stopCtx.Done(): + t.Fatalf("Stop() did not return within %s — supernode hung on shutdown ", stopDeadline) + } +} diff --git a/op-supernode/supernode/supernode.go b/op-supernode/supernode/supernode.go index 0785477924489..82e79187a0ba0 100644 --- a/op-supernode/supernode/supernode.go +++ b/op-supernode/supernode/supernode.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net" + "reflect" "strconv" "sync" "time" @@ -142,8 +143,7 @@ func (s *Supernode) Start(ctx context.Context) error { // Start metrics service if s.metrics != nil { s.wg.Add(1) - s.metrics.Start(func(err error) { - defer s.wg.Done() + s.metrics.Start(s.wg.Done, func(err error) { if s.requestStop != nil { s.requestStop(err) } @@ -160,7 +160,16 @@ func (s *Supernode) Start(ctx context.Context) error { s.wg.Add(1) go func(run activity.RunnableActivity) { defer s.wg.Done() - if err := run.Start(ctx); err != nil { + err := run.Start(ctx) + switch err { + case nil: + s.log.Error("activity quit unexpectedly") + case context.Canceled: + // This is the happy path, normal / clean shutdown + s.log.Info("activity closing due to cancelled context") + case context.DeadlineExceeded: + s.log.Warn("activity quit due to deadline exceeded") + default: s.log.Error("error starting runnable activity", "error", err) } }(run) @@ -175,9 +184,7 @@ func (s *Supernode) Start(ctx context.Context) error { } }(chainID, chain) } - <-ctx.Done() - s.log.Info("supernode received stop signal") - return ctx.Err() + return nil } func (s *Supernode) Stop(ctx context.Context) error { @@ -190,6 +197,8 @@ func (s *Supernode) Stop(ctx context.Context) error { defer cancel() if err := s.httpServer.Shutdown(shutdownCtx); err != nil { s.log.Error("error shutting down rpc server", "error", err) + } else { + s.log.Info("rpc server stopped") } } if s.metrics != nil { @@ -197,11 +206,15 @@ func (s *Supernode) Stop(ctx context.Context) error { defer cancel() if err := s.metrics.Stop(shutdownCtx); err != nil { s.log.Error("error shutting down metrics server", "error", err) + } else { + s.log.Info("metrics server stopped") } } if s.rpcRouter != nil { if err := s.rpcRouter.Close(); err != nil { s.log.Error("error closing rpc router", "error", err) + } else { + s.log.Info("rpc router closed") } } @@ -210,6 +223,8 @@ func (s *Supernode) Stop(ctx context.Context) error { if run, ok := a.(activity.RunnableActivity); ok { if err := run.Stop(ctx); err != nil { s.log.Error("error stopping runnable activity", "error", err) + } else { + s.log.Info("runnable activity stopped", "activity", reflect.TypeOf(a).String()) } } } @@ -217,9 +232,12 @@ func (s *Supernode) Stop(ctx context.Context) error { for chainID, chain := range s.chains { if err := chain.Stop(ctx); err != nil { s.log.Error("error stopping chain container", "chain_id", chainID.String(), "error", err) + } else { + s.log.Info("chain container stopped", "chain_id", chainID.String()) } } + s.log.Info("all chain containers stopped, waiting for goroutines to finish") s.wg.Wait() if s.l1Client != nil { diff --git a/op-supernode/supernode/supernode_activities_test.go b/op-supernode/supernode/supernode_activities_test.go index 29360b36726c2..e795390245216 100644 --- a/op-supernode/supernode/supernode_activities_test.go +++ b/op-supernode/supernode/supernode_activities_test.go @@ -4,6 +4,7 @@ import ( "bytes" "context" "encoding/json" + "log/slog" "net/http" "net/http/httptest" "testing" @@ -11,6 +12,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" rpc "github.com/ethereum-optimism/optimism/op-service/rpc" + "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" gethlog "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" @@ -18,16 +20,25 @@ import ( // mock runnable activity type mockRunnable struct { + ctx context.Context + cancel context.CancelFunc started int stopped int } func (m *mockRunnable) Start(ctx context.Context) error { m.started++ - <-ctx.Done() - return ctx.Err() + m.ctx, m.cancel = context.WithCancel(ctx) + <-m.ctx.Done() + return m.ctx.Err() +} +func (m *mockRunnable) Stop(ctx context.Context) error { + m.stopped++ + if m.cancel != nil { + m.cancel() + } + return nil } -func (m *mockRunnable) Stop(ctx context.Context) error { m.stopped++; return nil } func (m *mockRunnable) Reset(chainID eth.ChainID, timestamp uint64, invalidatedBlock eth.BlockRef) { } @@ -67,7 +78,7 @@ func TestRunnableActivityGating(t *testing.T) { plain := &plainActivity{} s := &Supernode{ - log: gethlog.New(), + log: testlog.Logger(t, slog.LevelDebug), version: "test", chains: nil, activities: []activity.Activity{run, plain}, @@ -76,17 +87,12 @@ func TestRunnableActivityGating(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 150*time.Millisecond) defer cancel() - done := make(chan struct{}) - go func() { _ = s.Start(ctx); close(done) }() - - <-done // wait until context canceled and Start exits - - require.Equal(t, 1, run.started, "runnable activity should be started exactly once") - require.Equal(t, 0, run.stopped, "Stop is invoked during Stop(), not here") + require.NoError(t, s.Start(ctx)) // now stop and ensure Stop was called on runnable activity err := s.Stop(context.Background()) require.NoError(t, err) + require.Equal(t, 1, run.started, "runnable activity should be started exactly once") require.Equal(t, 1, run.stopped, "runnable activity should be stopped exactly once") } From a74ab7d2e5fcf079cf827389f5f22405b77c13ec Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Tue, 24 Feb 2026 11:28:11 -0500 Subject: [PATCH 019/201] op-devstack: add capability interfaces for polymorphic lookups (Phase 3) (#18874) Introduce L2ELCapable interface that captures shared behavior across L2ELNode, RollupBoostNode, and OPRBuilderNode without requiring them to share an ID() method signature. This enables polymorphic lookups where code can find any L2 EL-capable component by key+chainID, regardless of concrete type: sequencer, ok := FindL2ELCapableByKey(registry, "sequencer", chainID) Previously this required manual multi-registry lookups checking each type separately. --- op-devstack/stack/capabilities.go | 134 +++++++++++ op-devstack/stack/capabilities_test.go | 312 +++++++++++++++++++++++++ 2 files changed, 446 insertions(+) create mode 100644 op-devstack/stack/capabilities.go create mode 100644 op-devstack/stack/capabilities_test.go diff --git a/op-devstack/stack/capabilities.go b/op-devstack/stack/capabilities.go new file mode 100644 index 0000000000000..7075fa0eabfc1 --- /dev/null +++ b/op-devstack/stack/capabilities.go @@ -0,0 +1,134 @@ +package stack + +import ( + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// Capability interfaces define shared behaviors across component types. +// These enable polymorphic operations without requiring components to +// implement interfaces with incompatible ID() method signatures. +// +// For example, RollupBoostNode and OPRBuilderNode both provide L2 EL +// functionality but can't implement L2ELNode because their ID() methods +// return different types. The L2ELCapable interface captures the shared +// L2 EL behavior, allowing code to work with any L2 EL-like component. + +// L2ELCapable is implemented by any component that provides L2 execution layer functionality. +// This includes L2ELNode, RollupBoostNode, and OPRBuilderNode. +// +// Components implementing this interface can: +// - Execute L2 transactions +// - Provide engine API access for consensus layer integration +type L2ELCapable interface { + L2EthClient() apis.L2EthClient + L2EngineClient() apis.EngineClient + ELNode +} + +// L2ELCapableKinds returns all ComponentKinds that implement L2ELCapable. +func L2ELCapableKinds() []ComponentKind { + return []ComponentKind{ + KindL2ELNode, + KindRollupBoostNode, + KindOPRBuilderNode, + } +} + +// L1ELCapable is implemented by any component that provides L1 execution layer functionality. +type L1ELCapable interface { + ELNode +} + +// L1ELCapableKinds returns all ComponentKinds that implement L1ELCapable. +func L1ELCapableKinds() []ComponentKind { + return []ComponentKind{ + KindL1ELNode, + } +} + +// Verify that expected types implement capability interfaces. +// These are compile-time checks. +var ( + _ L2ELCapable = (L2ELNode)(nil) + _ L2ELCapable = (RollupBoostNode)(nil) + _ L2ELCapable = (OPRBuilderNode)(nil) +) + +// Registry helper functions for capability-based lookups. + +// RegistryFindByCapability returns all components that implement the given capability interface. +// This iterates over all components and performs a type assertion. +func RegistryFindByCapability[T any](r *Registry) []T { + var result []T + r.Range(func(id ComponentID, component any) bool { + if capable, ok := component.(T); ok { + result = append(result, capable) + } + return true + }) + return result +} + +// RegistryFindByCapabilityOnChain returns all components on a specific chain +// that implement the given capability interface. +func RegistryFindByCapabilityOnChain[T any](r *Registry, chainID eth.ChainID) []T { + var result []T + r.RangeByChainID(chainID, func(id ComponentID, component any) bool { + if capable, ok := component.(T); ok { + result = append(result, capable) + } + return true + }) + return result +} + +// RegistryFindByKinds returns all components of the specified kinds. +// This is useful when you know which kinds implement a capability. +func RegistryFindByKinds(r *Registry, kinds []ComponentKind) []any { + var result []any + for _, kind := range kinds { + result = append(result, r.GetByKind(kind)...) + } + return result +} + +// RegistryFindByKindsTyped returns all components of the specified kinds, +// cast to the expected type. Components that don't match are skipped. +func RegistryFindByKindsTyped[T any](r *Registry, kinds []ComponentKind) []T { + var result []T + for _, kind := range kinds { + for _, component := range r.GetByKind(kind) { + if typed, ok := component.(T); ok { + result = append(result, typed) + } + } + } + return result +} + +// FindL2ELCapable returns all L2 EL-capable components in the registry. +// This is a convenience function that finds L2ELNode, RollupBoostNode, and OPRBuilderNode. +func FindL2ELCapable(r *Registry) []L2ELCapable { + return RegistryFindByKindsTyped[L2ELCapable](r, L2ELCapableKinds()) +} + +// FindL2ELCapableOnChain returns all L2 EL-capable components on a specific chain. +func FindL2ELCapableOnChain(r *Registry, chainID eth.ChainID) []L2ELCapable { + return RegistryFindByCapabilityOnChain[L2ELCapable](r, chainID) +} + +// FindL2ELCapableByKey returns the first L2 EL-capable component with the given key and chainID. +// This enables the polymorphic lookup pattern where you want to find a node by key +// regardless of whether it's an L2ELNode, RollupBoostNode, or OPRBuilderNode. +func FindL2ELCapableByKey(r *Registry, key string, chainID eth.ChainID) (L2ELCapable, bool) { + for _, kind := range L2ELCapableKinds() { + id := NewComponentID(kind, key, chainID) + if component, ok := r.Get(id); ok { + if capable, ok := component.(L2ELCapable); ok { + return capable, true + } + } + } + return nil, false +} diff --git a/op-devstack/stack/capabilities_test.go b/op-devstack/stack/capabilities_test.go new file mode 100644 index 0000000000000..b69758a0b9a99 --- /dev/null +++ b/op-devstack/stack/capabilities_test.go @@ -0,0 +1,312 @@ +package stack + +import ( + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" +) + +// Mock implementations for testing capabilities + +type mockELNode struct { + chainID eth.ChainID +} + +func (m *mockELNode) T() devtest.T { return nil } +func (m *mockELNode) Logger() log.Logger { return nil } +func (m *mockELNode) Label(key string) string { return "" } +func (m *mockELNode) SetLabel(key, value string) {} +func (m *mockELNode) ChainID() eth.ChainID { return m.chainID } +func (m *mockELNode) EthClient() apis.EthClient { return nil } +func (m *mockELNode) TransactionTimeout() time.Duration { return 0 } + +type mockL2ELNode struct { + mockELNode + id L2ELNodeID +} + +func (m *mockL2ELNode) ID() L2ELNodeID { return m.id } +func (m *mockL2ELNode) L2EthClient() apis.L2EthClient { return nil } +func (m *mockL2ELNode) L2EngineClient() apis.EngineClient { return nil } +func (m *mockL2ELNode) RegistryID() ComponentID { return ConvertL2ELNodeID(m.id).ComponentID } + +var _ L2ELNode = (*mockL2ELNode)(nil) +var _ L2ELCapable = (*mockL2ELNode)(nil) +var _ Registrable = (*mockL2ELNode)(nil) + +type mockRollupBoostNode struct { + mockELNode + id RollupBoostNodeID +} + +func (m *mockRollupBoostNode) ID() RollupBoostNodeID { return m.id } +func (m *mockRollupBoostNode) L2EthClient() apis.L2EthClient { return nil } +func (m *mockRollupBoostNode) L2EngineClient() apis.EngineClient { return nil } +func (m *mockRollupBoostNode) FlashblocksClient() *client.WSClient { return nil } +func (m *mockRollupBoostNode) RegistryID() ComponentID { + return ConvertRollupBoostNodeID(m.id).ComponentID +} + +var _ RollupBoostNode = (*mockRollupBoostNode)(nil) +var _ L2ELCapable = (*mockRollupBoostNode)(nil) +var _ Registrable = (*mockRollupBoostNode)(nil) + +type mockOPRBuilderNode struct { + mockELNode + id OPRBuilderNodeID +} + +func (m *mockOPRBuilderNode) ID() OPRBuilderNodeID { return m.id } +func (m *mockOPRBuilderNode) L2EthClient() apis.L2EthClient { return nil } +func (m *mockOPRBuilderNode) L2EngineClient() apis.EngineClient { return nil } +func (m *mockOPRBuilderNode) FlashblocksClient() *client.WSClient { return nil } +func (m *mockOPRBuilderNode) RegistryID() ComponentID { + return ConvertOPRBuilderNodeID(m.id).ComponentID +} + +var _ OPRBuilderNode = (*mockOPRBuilderNode)(nil) +var _ L2ELCapable = (*mockOPRBuilderNode)(nil) +var _ Registrable = (*mockOPRBuilderNode)(nil) + +func TestL2ELCapableKinds(t *testing.T) { + kinds := L2ELCapableKinds() + require.Len(t, kinds, 3) + require.Contains(t, kinds, KindL2ELNode) + require.Contains(t, kinds, KindRollupBoostNode) + require.Contains(t, kinds, KindOPRBuilderNode) +} + +func TestRegistryFindByCapability(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + // Register different L2 EL-capable nodes + l2el := &mockL2ELNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewL2ELNodeID("sequencer", chainID), + } + rollupBoost := &mockRollupBoostNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewRollupBoostNodeID("boost", chainID), + } + oprBuilder := &mockOPRBuilderNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewOPRBuilderNodeID("builder", chainID), + } + + r.RegisterComponent(l2el) + r.RegisterComponent(rollupBoost) + r.RegisterComponent(oprBuilder) + + // Also register a non-L2EL component + r.Register(NewComponentID(KindL2Batcher, "batcher", chainID), "not-l2el-capable") + + // Find all L2ELCapable + capable := RegistryFindByCapability[L2ELCapable](r) + require.Len(t, capable, 3) +} + +func TestRegistryFindByCapabilityOnChain(t *testing.T) { + r := NewRegistry() + + chainID1 := eth.ChainIDFromUInt64(420) + chainID2 := eth.ChainIDFromUInt64(421) + + // Nodes on chain 420 + l2el1 := &mockL2ELNode{ + mockELNode: mockELNode{chainID: chainID1}, + id: NewL2ELNodeID("sequencer", chainID1), + } + rollupBoost1 := &mockRollupBoostNode{ + mockELNode: mockELNode{chainID: chainID1}, + id: NewRollupBoostNodeID("boost", chainID1), + } + + // Node on chain 421 + l2el2 := &mockL2ELNode{ + mockELNode: mockELNode{chainID: chainID2}, + id: NewL2ELNodeID("sequencer", chainID2), + } + + r.RegisterComponent(l2el1) + r.RegisterComponent(rollupBoost1) + r.RegisterComponent(l2el2) + + // Find on chain 420 + chain420 := RegistryFindByCapabilityOnChain[L2ELCapable](r, chainID1) + require.Len(t, chain420, 2) + + // Find on chain 421 + chain421 := RegistryFindByCapabilityOnChain[L2ELCapable](r, chainID2) + require.Len(t, chain421, 1) +} + +func TestFindL2ELCapable(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + l2el := &mockL2ELNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewL2ELNodeID("sequencer", chainID), + } + rollupBoost := &mockRollupBoostNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewRollupBoostNodeID("boost", chainID), + } + + r.RegisterComponent(l2el) + r.RegisterComponent(rollupBoost) + + capable := FindL2ELCapable(r) + require.Len(t, capable, 2) +} + +func TestFindL2ELCapableOnChain(t *testing.T) { + r := NewRegistry() + + chainID1 := eth.ChainIDFromUInt64(420) + chainID2 := eth.ChainIDFromUInt64(421) + + l2el1 := &mockL2ELNode{ + mockELNode: mockELNode{chainID: chainID1}, + id: NewL2ELNodeID("sequencer", chainID1), + } + l2el2 := &mockL2ELNode{ + mockELNode: mockELNode{chainID: chainID2}, + id: NewL2ELNodeID("sequencer", chainID2), + } + + r.RegisterComponent(l2el1) + r.RegisterComponent(l2el2) + + chain420 := FindL2ELCapableOnChain(r, chainID1) + require.Len(t, chain420, 1) + require.Equal(t, chainID1, chain420[0].ChainID()) +} + +func TestFindL2ELCapableByKey(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + // Register a RollupBoostNode with key "sequencer" + rollupBoost := &mockRollupBoostNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewRollupBoostNodeID("sequencer", chainID), + } + r.RegisterComponent(rollupBoost) + + // Should find it by key, even though it's not an L2ELNode + found, ok := FindL2ELCapableByKey(r, "sequencer", chainID) + require.True(t, ok) + require.NotNil(t, found) + require.Equal(t, chainID, found.ChainID()) + + // Should not find non-existent key + _, ok = FindL2ELCapableByKey(r, "nonexistent", chainID) + require.False(t, ok) +} + +func TestFindL2ELCapableByKey_PrefersL2ELNode(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + // Register both L2ELNode and RollupBoostNode with same key + l2el := &mockL2ELNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewL2ELNodeID("sequencer", chainID), + } + rollupBoost := &mockRollupBoostNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewRollupBoostNodeID("sequencer", chainID), + } + + r.RegisterComponent(l2el) + r.RegisterComponent(rollupBoost) + + // Should find L2ELNode first (it's first in L2ELCapableKinds) + found, ok := FindL2ELCapableByKey(r, "sequencer", chainID) + require.True(t, ok) + // Verify it's the L2ELNode by checking it's the right mock type + _, isL2EL := found.(*mockL2ELNode) + require.True(t, isL2EL, "expected to find L2ELNode first") +} + +func TestRegistryFindByKindsTyped(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + l2el := &mockL2ELNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewL2ELNodeID("sequencer", chainID), + } + rollupBoost := &mockRollupBoostNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewRollupBoostNodeID("boost", chainID), + } + + r.RegisterComponent(l2el) + r.RegisterComponent(rollupBoost) + + // Find only L2ELNode kind + l2els := RegistryFindByKindsTyped[L2ELCapable](r, []ComponentKind{KindL2ELNode}) + require.Len(t, l2els, 1) + + // Find both kinds + both := RegistryFindByKindsTyped[L2ELCapable](r, []ComponentKind{KindL2ELNode, KindRollupBoostNode}) + require.Len(t, both, 2) +} + +// TestPolymorphicLookupScenario demonstrates the polymorphic lookup use case +// that Phase 3 is designed to solve. +func TestPolymorphicLookupScenario(t *testing.T) { + r := NewRegistry() + + chainID := eth.ChainIDFromUInt64(420) + + // Scenario: A test wants to find an L2 EL node by key "sequencer" + // The actual node could be L2ELNode, RollupBoostNode, or OPRBuilderNode + // depending on the test configuration. + + // Configuration 1: Using RollupBoost + rollupBoost := &mockRollupBoostNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewRollupBoostNodeID("sequencer", chainID), + } + r.RegisterComponent(rollupBoost) + + // The polymorphic lookup finds the sequencer regardless of its concrete type + sequencer, ok := FindL2ELCapableByKey(r, "sequencer", chainID) + require.True(t, ok) + require.NotNil(t, sequencer) + + // Can use it as L2ELCapable + require.Equal(t, chainID, sequencer.ChainID()) + // Could call sequencer.L2EthClient(), sequencer.L2EngineClient(), etc. + + // Clear and try with OPRBuilder + r.Clear() + + oprBuilder := &mockOPRBuilderNode{ + mockELNode: mockELNode{chainID: chainID}, + id: NewOPRBuilderNodeID("sequencer", chainID), + } + r.RegisterComponent(oprBuilder) + + // Same lookup code works + sequencer, ok = FindL2ELCapableByKey(r, "sequencer", chainID) + require.True(t, ok) + require.NotNil(t, sequencer) + require.Equal(t, chainID, sequencer.ChainID()) +} From f6576dbe811744b3bd4ebd5c99c674b256df329e Mon Sep 17 00:00:00 2001 From: Axel Kingsley Date: Tue, 24 Feb 2026 11:22:09 -0600 Subject: [PATCH 020/201] Raise TestSupernodeInteropActivationAfterGenesis timeout to 5min (#19297) --- .../interop/activation/activation_after_genesis_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go b/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go index 305763e25c2bd..b0395bfc52121 100644 --- a/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go +++ b/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go @@ -68,7 +68,7 @@ func TestSupernodeInteropActivationAfterGenesis(gt *testing.T) { ) return preVerified && postVerified - }, 90*time.Second, time.Second, "both pre and post activation timestamps should be verified") + }, 300*time.Second, time.Second, "both pre and post activation timestamps should be verified") t.Logger().Info("activation boundary test complete", "pre_activation_ts", preActivationTs, From c0a3f237525109692638cdf47d2dd0be19917cf2 Mon Sep 17 00:00:00 2001 From: Inphi Date: Tue, 24 Feb 2026 12:41:15 -0500 Subject: [PATCH 021/201] proofs: Add consolidation step coverage to super fault proof tests (#19296) --- .../tests/superfaultproofs/superfaultproofs.go | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go b/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go index c84d099ee1796..11e1fd3ccea0e 100644 --- a/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go +++ b/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go @@ -366,6 +366,24 @@ func buildTransitionTests( ClaimTimestamp: endTimestamp, ExpectValid: true, }, + { + Name: "ConsolidateStep", + AgreedClaim: padding(consolidateStep), + DisputedClaim: end.Marshal(), + DisputedTraceIndex: consolidateStep, + L1Head: l1HeadCurrent, + ClaimTimestamp: endTimestamp, + ExpectValid: true, + }, + { + Name: "ConsolidateStep-InvalidNoChange", + AgreedClaim: padding(consolidateStep), + DisputedClaim: padding(consolidateStep), + DisputedTraceIndex: consolidateStep, + L1Head: l1HeadCurrent, + ClaimTimestamp: endTimestamp, + ExpectValid: false, + }, } } From 45927332696d9ecc6d2e881e748a9c1fd83cff31 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Tue, 24 Feb 2026 13:06:11 -0500 Subject: [PATCH 022/201] Add Karst hard fork activation (#19250) * feat: add Karst hard fork activation Adds the forking logic for the Karst network upgrade, following the same pattern as the Jovian activation (PR #13722). * feat: Update to op-geth with karst fork * fix: add Karst to genesis allocs, deploy config, and fork numbering Fixes interop test failures caused by Solidity Fork enum and Go SolidityForkNumber being out of sync after Karst addition. * fix: enable KarstTime in applyHardforks op-geth now includes KarstTime in HardforkConfig, so the TODO guard is no longer needed. * fix: add Karst to deploy config test fork overrides The fork ordering validation requires karst before interop. * fix: exclude Karst from upgrade-tx batch test Karst has no upgrade deposit transactions, so user txs in its activation block should not be rejected. * fix: add Karst to remaining e2e and op-wheel files Cover the remaining files that had Jovian entries but were missing Karst equivalents. --- go.mod | 2 +- go.sum | 4 ++-- op-chain-ops/genesis/config.go | 12 ++++++++++ op-chain-ops/genesis/config_test.go | 3 ++- op-chain-ops/genesis/genesis.go | 1 + op-chain-ops/genesis/layer_two.go | 1 + op-chain-ops/interopgen/recipe.go | 1 + op-chain-ops/script/script.go | 1 + op-core/forks/forks.go | 2 ++ .../pkg/deployer/state/deploy_config_test.go | 6 +++-- op-e2e/actions/upgrades/helpers/config.go | 9 ++++++++ op-e2e/e2eutils/setup.go | 6 ++++- op-e2e/system/e2esys/setup.go | 10 ++++++++- op-node/rollup/chain_spec.go | 5 +++++ op-node/rollup/chain_spec_test.go | 13 ++++++++--- op-node/rollup/derive/batches.go | 1 + op-node/rollup/superchain.go | 1 + op-node/rollup/types.go | 22 +++++++++++++++++++ op-node/rollup/types_test.go | 9 ++++++-- op-wheel/commands.go | 1 + .../contracts-bedrock/scripts/L2Genesis.s.sol | 4 ++++ .../scripts/deploy/DeployConfig.s.sol | 6 ++++- .../scripts/libraries/Config.sol | 5 +++++ 23 files changed, 111 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 4ad865b3edde2..6d652a1fe0496 100644 --- a/go.mod +++ b/go.mod @@ -312,7 +312,7 @@ require ( lukechampine.com/blake3 v1.3.0 // indirect ) -replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101608.0-rc.1 +replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101609.1-rc.1 // replace github.com/ethereum/go-ethereum => ../op-geth diff --git a/go.sum b/go.sum index daa03a032419c..bd16e5bd913bd 100644 --- a/go.sum +++ b/go.sum @@ -240,8 +240,8 @@ github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e h1:iy1vBIzACYUyOVyoADUwvAiq2eOPC0yVsDUdolPwQjk= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e/go.mod h1:DYj7+vYJ4cIB7zera9mv4LcAynCL5u4YVfoeUu6Wa+w= -github.com/ethereum-optimism/op-geth v1.101608.0-rc.1 h1:UXO6chAeI2/f5V12e4qgp2rXhmmJOKSRO3Zab/i8YNA= -github.com/ethereum-optimism/op-geth v1.101608.0-rc.1/go.mod h1:3YphRrN5/TvRp9VGy5rfA6l6rVR6IAsgSJNPLbIg66E= +github.com/ethereum-optimism/op-geth v1.101609.1-rc.1 h1:r59fw5Qf4XIpPqXqMOyAvxXyqv45OrOXG46ozAPLqz8= +github.com/ethereum-optimism/op-geth v1.101609.1-rc.1/go.mod h1:3YphRrN5/TvRp9VGy5rfA6l6rVR6IAsgSJNPLbIg66E= github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20260115192958-fb86a23cd30e h1:TO1tUcwbhIrNuea/LCsQJSQ5HDWCHdrzT/5MLC1aIU4= github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20260115192958-fb86a23cd30e/go.mod h1:NZ816PzLU1TLv1RdAvYAb6KWOj4Zm5aInT0YpDVml2Y= github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= diff --git a/op-chain-ops/genesis/config.go b/op-chain-ops/genesis/config.go index c835e437f1596..89e1ca9399bc5 100644 --- a/op-chain-ops/genesis/config.go +++ b/op-chain-ops/genesis/config.go @@ -407,6 +407,9 @@ type UpgradeScheduleDeployConfig struct { // L2GenesisJovianTimeOffset is the number of seconds after genesis block that the Jovian hard fork activates. // Set it to 0 to activate at genesis. Nil to disable Jovian. L2GenesisJovianTimeOffset *hexutil.Uint64 `json:"l2GenesisJovianTimeOffset,omitempty"` + // L2GenesisKarstTimeOffset is the number of seconds after genesis block that the Karst hard fork activates. + // Set it to 0 to activate at genesis. Nil to disable Karst. + L2GenesisKarstTimeOffset *hexutil.Uint64 `json:"l2GenesisKarstTimeOffset,omitempty"` // L2GenesisInteropTimeOffset is the number of seconds after genesis block that the Interop hard fork activates. // Set it to 0 to activate at genesis. Nil to disable Interop. L2GenesisInteropTimeOffset *hexutil.Uint64 `json:"l2GenesisInteropTimeOffset,omitempty"` @@ -468,6 +471,8 @@ func (d *UpgradeScheduleDeployConfig) ForkTimeOffset(fork rollup.ForkName) *uint return (*uint64)(d.L2GenesisIsthmusTimeOffset) case forks.Jovian: return (*uint64)(d.L2GenesisJovianTimeOffset) + case forks.Karst: + return (*uint64)(d.L2GenesisKarstTimeOffset) case forks.Interop: return (*uint64)(d.L2GenesisInteropTimeOffset) default: @@ -495,6 +500,8 @@ func (d *UpgradeScheduleDeployConfig) SetForkTimeOffset(fork rollup.ForkName, of d.L2GenesisIsthmusTimeOffset = (*hexutil.Uint64)(offset) case forks.Jovian: d.L2GenesisJovianTimeOffset = (*hexutil.Uint64)(offset) + case forks.Karst: + d.L2GenesisKarstTimeOffset = (*hexutil.Uint64)(offset) case forks.Interop: d.L2GenesisInteropTimeOffset = (*hexutil.Uint64)(offset) default: @@ -571,6 +578,10 @@ func (d *UpgradeScheduleDeployConfig) JovianTime(genesisTime uint64) *uint64 { return offsetToUpgradeTime(d.L2GenesisJovianTimeOffset, genesisTime) } +func (d *UpgradeScheduleDeployConfig) KarstTime(genesisTime uint64) *uint64 { + return offsetToUpgradeTime(d.L2GenesisKarstTimeOffset, genesisTime) +} + func (d *UpgradeScheduleDeployConfig) InteropTime(genesisTime uint64) *uint64 { return offsetToUpgradeTime(d.L2GenesisInteropTimeOffset, genesisTime) } @@ -605,6 +616,7 @@ func (d *UpgradeScheduleDeployConfig) forks() []Fork { {L2GenesisTimeOffset: d.L2GenesisHoloceneTimeOffset, Name: string(L2AllocsHolocene)}, {L2GenesisTimeOffset: d.L2GenesisIsthmusTimeOffset, Name: string(L2AllocsIsthmus)}, {L2GenesisTimeOffset: d.L2GenesisJovianTimeOffset, Name: string(L2AllocsJovian)}, + {L2GenesisTimeOffset: d.L2GenesisKarstTimeOffset, Name: string(L2AllocsKarst)}, {L2GenesisTimeOffset: d.L2GenesisInteropTimeOffset, Name: string(L2AllocsInterop)}, } } diff --git a/op-chain-ops/genesis/config_test.go b/op-chain-ops/genesis/config_test.go index 98fe0e1f9e2e3..dedd1a3e8e097 100644 --- a/op-chain-ops/genesis/config_test.go +++ b/op-chain-ops/genesis/config_test.go @@ -209,7 +209,8 @@ func TestUpgradeScheduleDeployConfig_SolidityForkNumber(t *testing.T) { {forks.Holocene, 5}, {forks.Isthmus, 6}, {forks.Jovian, 7}, - {forks.Interop, 8}, + {forks.Karst, 8}, + {forks.Interop, 9}, } for _, tt := range tests { var d UpgradeScheduleDeployConfig diff --git a/op-chain-ops/genesis/genesis.go b/op-chain-ops/genesis/genesis.go index e89d2e26936eb..082a0a831f624 100644 --- a/op-chain-ops/genesis/genesis.go +++ b/op-chain-ops/genesis/genesis.go @@ -76,6 +76,7 @@ func NewL2Genesis(config *DeployConfig, l1StartHeader *eth.BlockRef) (*core.Gene HoloceneTime: config.HoloceneTime(l1StartTime), IsthmusTime: config.IsthmusTime(l1StartTime), JovianTime: config.JovianTime(l1StartTime), + KarstTime: config.KarstTime(l1StartTime), PragueTime: config.IsthmusTime(l1StartTime), InteropTime: config.InteropTime(l1StartTime), Optimism: ¶ms.OptimismConfig{ diff --git a/op-chain-ops/genesis/layer_two.go b/op-chain-ops/genesis/layer_two.go index 2018159645163..2f8d88c09a24e 100644 --- a/op-chain-ops/genesis/layer_two.go +++ b/op-chain-ops/genesis/layer_two.go @@ -30,6 +30,7 @@ const ( L2AllocsHolocene L2AllocsMode = "holocene" L2AllocsIsthmus L2AllocsMode = "isthmus" L2AllocsJovian L2AllocsMode = "jovian" + L2AllocsKarst L2AllocsMode = "karst" L2AllocsInterop L2AllocsMode = "interop" ) diff --git a/op-chain-ops/interopgen/recipe.go b/op-chain-ops/interopgen/recipe.go index 9a40618f72e51..1cbb651a645cb 100644 --- a/op-chain-ops/interopgen/recipe.go +++ b/op-chain-ops/interopgen/recipe.go @@ -274,6 +274,7 @@ func (r *InteropDevL2Recipe) build(l1ChainID uint64, addrs devkeys.Addresses) (* L2GenesisHoloceneTimeOffset: new(hexutil.Uint64), L2GenesisIsthmusTimeOffset: new(hexutil.Uint64), L2GenesisJovianTimeOffset: new(hexutil.Uint64), + L2GenesisKarstTimeOffset: new(hexutil.Uint64), L2GenesisInteropTimeOffset: (*hexutil.Uint64)(&r.InteropOffset), L1CancunTimeOffset: new(hexutil.Uint64), L1PragueTimeOffset: new(hexutil.Uint64), diff --git a/op-chain-ops/script/script.go b/op-chain-ops/script/script.go index d30718f6b063e..aacf8299aa58b 100644 --- a/op-chain-ops/script/script.go +++ b/op-chain-ops/script/script.go @@ -241,6 +241,7 @@ func NewHost( GraniteTime: nil, HoloceneTime: nil, JovianTime: nil, + KarstTime: nil, InteropTime: nil, Optimism: nil, } diff --git a/op-core/forks/forks.go b/op-core/forks/forks.go index 6267734cb990b..c80902b577e4f 100644 --- a/op-core/forks/forks.go +++ b/op-core/forks/forks.go @@ -16,6 +16,7 @@ const ( Holocene Name = "holocene" Isthmus Name = "isthmus" Jovian Name = "jovian" + Karst Name = "karst" Interop Name = "interop" // ADD NEW MAINLINE FORKS TO [All] BELOW! @@ -37,6 +38,7 @@ var All = []Name{ Holocene, Isthmus, Jovian, + Karst, Interop, // ADD NEW MAINLINE FORKS HERE! } diff --git a/op-deployer/pkg/deployer/state/deploy_config_test.go b/op-deployer/pkg/deployer/state/deploy_config_test.go index 81c56957929a3..1fe296f333833 100644 --- a/op-deployer/pkg/deployer/state/deploy_config_test.go +++ b/op-deployer/pkg/deployer/state/deploy_config_test.go @@ -50,7 +50,8 @@ func TestCombineDeployConfig(t *testing.T) { "l2GenesisHoloceneTimeOffset": "0x3", "l2GenesisIsthmusTimeOffset": "0x4", "l2GenesisJovianTimeOffset": "0x5", - "l2GenesisInteropTimeOffset": "0x6", + "l2GenesisKarstTimeOffset": "0x6", + "l2GenesisInteropTimeOffset": "0x7", } out, err := CombineDeployConfig(&intent, &chainIntent, &state, &chainState) @@ -60,5 +61,6 @@ func TestCombineDeployConfig(t *testing.T) { require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisHoloceneTimeOffset, hexutil.Uint64(3)) require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisIsthmusTimeOffset, hexutil.Uint64(4)) require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisJovianTimeOffset, hexutil.Uint64(5)) - require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisInteropTimeOffset, hexutil.Uint64(6)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisKarstTimeOffset, hexutil.Uint64(6)) + require.Equal(t, *out.L2InitializationConfig.UpgradeScheduleDeployConfig.L2GenesisInteropTimeOffset, hexutil.Uint64(7)) } diff --git a/op-e2e/actions/upgrades/helpers/config.go b/op-e2e/actions/upgrades/helpers/config.go index dcf6070f6a6a9..575e1ed3a1f0c 100644 --- a/op-e2e/actions/upgrades/helpers/config.go +++ b/op-e2e/actions/upgrades/helpers/config.go @@ -61,4 +61,13 @@ func ApplyDeltaTimeOffset(dp *e2eutils.DeployParams, deltaTimeOffset *hexutil.Ui dp.DeployConfig.L2GenesisJovianTimeOffset = deltaTimeOffset } } + + // configure Karst to not be before Delta accidentally + if dp.DeployConfig.L2GenesisKarstTimeOffset != nil { + if deltaTimeOffset == nil { + dp.DeployConfig.L2GenesisKarstTimeOffset = nil + } else if *dp.DeployConfig.L2GenesisKarstTimeOffset < *deltaTimeOffset { + dp.DeployConfig.L2GenesisKarstTimeOffset = deltaTimeOffset + } + } } diff --git a/op-e2e/e2eutils/setup.go b/op-e2e/e2eutils/setup.go index d46831b63ef5a..5c30d864ae3d1 100644 --- a/op-e2e/e2eutils/setup.go +++ b/op-e2e/e2eutils/setup.go @@ -249,7 +249,8 @@ func SystemConfigFromDeployConfig(deployConfig *genesis.DeployConfig) eth.System } func ApplyDeployConfigForks(deployConfig *genesis.DeployConfig) { - isJovian := os.Getenv("OP_E2E_USE_JOVIAN") == "true" + isKarst := os.Getenv("OP_E2E_USE_KARST") == "true" + isJovian := isKarst || os.Getenv("OP_E2E_USE_JOVIAN") == "true" isIsthmus := isJovian || os.Getenv("OP_E2E_USE_ISTHMUS") == "true" isHolocene := isIsthmus || os.Getenv("OP_E2E_USE_HOLOCENE") == "true" isGranite := isHolocene || os.Getenv("OP_E2E_USE_GRANITE") == "true" @@ -277,6 +278,9 @@ func ApplyDeployConfigForks(deployConfig *genesis.DeployConfig) { if isJovian { deployConfig.L2GenesisJovianTimeOffset = new(hexutil.Uint64) } + if isKarst { + deployConfig.L2GenesisKarstTimeOffset = new(hexutil.Uint64) + } // Canyon and lower is activated by default deployConfig.L2GenesisCanyonTimeOffset = new(hexutil.Uint64) deployConfig.L2GenesisRegolithTimeOffset = new(hexutil.Uint64) diff --git a/op-e2e/system/e2esys/setup.go b/op-e2e/system/e2esys/setup.go index b6011527f03e6..ca5e7783cd59f 100644 --- a/op-e2e/system/e2esys/setup.go +++ b/op-e2e/system/e2esys/setup.go @@ -116,7 +116,7 @@ func DefaultSystemConfig(t testing.TB, opts ...SystemConfigOpt) SystemConfig { secrets := secrets.DefaultSecrets deployConfig := config.DeployConfig(sco.AllocType) - require.Nil(t, deployConfig.L2GenesisJovianTimeOffset, "jovian not supported yet") + require.Nil(t, deployConfig.L2GenesisKarstTimeOffset, "karst not supported yet") deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) e2eutils.ApplyDeployConfigForks(deployConfig) require.NoError(t, deployConfig.Check(testlog.Logger(t, log.LevelInfo)), @@ -209,6 +209,7 @@ func RegolithSystemConfig(t *testing.T, regolithTimeOffset *hexutil.Uint64, opts cfg.DeployConfig.L2GenesisHoloceneTimeOffset = nil cfg.DeployConfig.L2GenesisIsthmusTimeOffset = nil cfg.DeployConfig.L2GenesisJovianTimeOffset = nil + cfg.DeployConfig.L2GenesisKarstTimeOffset = nil // ADD NEW FORKS HERE! return cfg } @@ -264,6 +265,12 @@ func JovianSystemConfig(t *testing.T, jovianTimeOffset *hexutil.Uint64, opts ... return cfg } +func KarstSystemConfig(t *testing.T, karstTimeOffset *hexutil.Uint64, opts ...SystemConfigOpt) SystemConfig { + cfg := JovianSystemConfig(t, &genesisTime, opts...) + cfg.DeployConfig.L2GenesisKarstTimeOffset = karstTimeOffset + return cfg +} + func writeDefaultJWT(t testing.TB) string { // Sadly the geth node config cannot load JWT secret from memory, it has to be a file jwtPath := path.Join(t.TempDir(), "jwt_secret") @@ -718,6 +725,7 @@ func (cfg SystemConfig) Start(t *testing.T, startOpts ...StartOption) (*System, PectraBlobScheduleTime: cfg.DeployConfig.PectraBlobScheduleTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), IsthmusTime: cfg.DeployConfig.IsthmusTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), JovianTime: cfg.DeployConfig.JovianTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), + KarstTime: cfg.DeployConfig.KarstTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), InteropTime: cfg.DeployConfig.InteropTime(uint64(cfg.DeployConfig.L1GenesisBlockTimestamp)), ProtocolVersionsAddress: cfg.L1Deployments.ProtocolVersionsProxy, AltDAConfig: rollupAltDAConfig, diff --git a/op-node/rollup/chain_spec.go b/op-node/rollup/chain_spec.go index 9e432e21b71a4..9efe7557f52fb 100644 --- a/op-node/rollup/chain_spec.go +++ b/op-node/rollup/chain_spec.go @@ -145,6 +145,9 @@ func (s *ChainSpec) CheckForkActivation(log log.Logger, block eth.L2BlockRef) { if s.config.IsJovian(block.Time) { s.currentFork = forks.Jovian } + if s.config.IsKarst(block.Time) { + s.currentFork = forks.Karst + } if s.config.IsInterop(block.Time) { s.currentFork = forks.Interop } @@ -173,6 +176,8 @@ func (s *ChainSpec) CheckForkActivation(log log.Logger, block eth.L2BlockRef) { foundActivationBlock = s.config.IsIsthmusActivationBlock(block.Time) case forks.Jovian: foundActivationBlock = s.config.IsJovianActivationBlock(block.Time) + case forks.Karst: + foundActivationBlock = s.config.IsKarstActivationBlock(block.Time) case forks.Interop: foundActivationBlock = s.config.IsInteropActivationBlock(block.Time) } diff --git a/op-node/rollup/chain_spec_test.go b/op-node/rollup/chain_spec_test.go index 8e488665d15fd..686e08675b870 100644 --- a/op-node/rollup/chain_spec_test.go +++ b/op-node/rollup/chain_spec_test.go @@ -49,7 +49,8 @@ var testConfig = Config{ HoloceneTime: u64ptr(70), IsthmusTime: u64ptr(80), JovianTime: u64ptr(90), - InteropTime: u64ptr(100), + KarstTime: u64ptr(100), + InteropTime: u64ptr(110), BatchInboxAddress: common.HexToAddress("0xff00000000000000000000000000000000000010"), DepositContractAddress: common.HexToAddress("0xbEb5Fc579115071764c7423A4f12eDde41f106Ed"), L1SystemConfigAddress: common.HexToAddress("0x229047fed2591dbec1eF1118d64F7aF3dB9EB290"), @@ -199,15 +200,21 @@ func TestCheckForkActivation(t *testing.T) { expectedCurrentFork: forks.Jovian, expectedLog: "Detected hardfork activation block", }, + { + name: "Karst activation", + block: eth.L2BlockRef{Time: 100, Number: 12, Hash: common.Hash{0xc}}, + expectedCurrentFork: forks.Karst, + expectedLog: "Detected hardfork activation block", + }, { name: "Interop activation", - block: eth.L2BlockRef{Time: 100, Number: 11, Hash: common.Hash{0xb}}, + block: eth.L2BlockRef{Time: 110, Number: 13, Hash: common.Hash{0xd}}, expectedCurrentFork: forks.Interop, expectedLog: "Detected hardfork activation block", }, { name: "No more hardforks", - block: eth.L2BlockRef{Time: 700, Number: 12, Hash: common.Hash{0xc}}, + block: eth.L2BlockRef{Time: 700, Number: 14, Hash: common.Hash{0xe}}, expectedCurrentFork: forks.Interop, expectedLog: "", }, diff --git a/op-node/rollup/derive/batches.go b/op-node/rollup/derive/batches.go index 5daa33238cb64..4db6d69f1a9b1 100644 --- a/op-node/rollup/derive/batches.go +++ b/op-node/rollup/derive/batches.go @@ -135,6 +135,7 @@ func checkSingularBatch(cfg *rollup.Config, log log.Logger, l1Blocks []eth.L1Blo // Future forks that contain upgrade transactions must be added here. if (cfg.IsJovianActivationBlock(batch.Timestamp) || + cfg.IsKarstActivationBlock(batch.Timestamp) || cfg.IsInteropActivationBlock(batch.Timestamp)) && len(batch.Transactions) > 0 { log.Warn("dropping batch with user transactions in fork activation block") diff --git a/op-node/rollup/superchain.go b/op-node/rollup/superchain.go index ec3da88143097..d8bac5ced28aa 100644 --- a/op-node/rollup/superchain.go +++ b/op-node/rollup/superchain.go @@ -104,4 +104,5 @@ func applyHardforks(cfg *Config, hardforks superchain.HardforkConfig) { cfg.IsthmusTime = hardforks.IsthmusTime cfg.InteropTime = hardforks.InteropTime cfg.JovianTime = hardforks.JovianTime + cfg.KarstTime = hardforks.KarstTime } diff --git a/op-node/rollup/types.go b/op-node/rollup/types.go index 43cc795b9236a..505e18d1a592b 100644 --- a/op-node/rollup/types.go +++ b/op-node/rollup/types.go @@ -129,6 +129,10 @@ type Config struct { // Active if JovianTime != nil && L2 block timestamp >= *JovianTime, inactive otherwise. JovianTime *uint64 `json:"jovian_time,omitempty"` + // KarstTime sets the activation time of the Karst network upgrade. + // Active if KarstTime != nil && L2 block timestamp >= *KarstTime, inactive otherwise. + KarstTime *uint64 `json:"karst_time,omitempty"` + // InteropTime sets the activation time for an experimental feature-set, activated like a hardfork. // Active if InteropTime != nil && L2 block timestamp >= *InteropTime, inactive otherwise. InteropTime *uint64 `json:"interop_time,omitempty"` @@ -482,6 +486,11 @@ func (c *Config) IsJovian(timestamp uint64) bool { return c.IsForkActive(forks.Jovian, timestamp) } +// IsKarst returns true if the Karst hardfork is active at or past the given timestamp. +func (c *Config) IsKarst(timestamp uint64) bool { + return c.IsForkActive(forks.Karst, timestamp) +} + // IsInterop returns true if the Interop hardfork is active at or past the given timestamp. func (c *Config) IsInterop(timestamp uint64) bool { return c.IsForkActive(forks.Interop, timestamp) @@ -553,6 +562,14 @@ func (c *Config) IsJovianActivationBlock(l2BlockTime uint64) bool { !c.IsJovian(l2BlockTime-c.BlockTime) } +// IsKarstActivationBlock returns whether the specified block is the first block subject to the +// Karst upgrade. +func (c *Config) IsKarstActivationBlock(l2BlockTime uint64) bool { + return c.IsKarst(l2BlockTime) && + l2BlockTime >= c.BlockTime && + !c.IsKarst(l2BlockTime-c.BlockTime) +} + func (c *Config) IsInteropActivationBlock(l2BlockTime uint64) bool { return c.IsInterop(l2BlockTime) && l2BlockTime >= c.BlockTime && @@ -564,6 +581,8 @@ func (c *Config) ActivationTime(fork ForkName) *uint64 { switch fork { case forks.Interop: return c.InteropTime + case forks.Karst: + return c.KarstTime case forks.Jovian: return c.JovianTime case forks.Isthmus: @@ -597,6 +616,8 @@ func (c *Config) SetActivationTime(fork ForkName, timestamp *uint64) { switch fork { case forks.Interop: c.InteropTime = timestamp + case forks.Karst: + c.KarstTime = timestamp case forks.Jovian: c.JovianTime = timestamp case forks.Isthmus: @@ -842,6 +863,7 @@ func (c *Config) forEachFork(callback func(name string, logName string, time *ui } callback("Isthmus", "isthmus_time", c.IsthmusTime) callback("Jovian", "jovian_time", c.JovianTime) + callback("Karst", "karst_time", c.KarstTime) callback("Interop", "interop_time", c.InteropTime) } diff --git a/op-node/rollup/types_test.go b/op-node/rollup/types_test.go index b1f8efbe52815..be5262eddfc14 100644 --- a/op-node/rollup/types_test.go +++ b/op-node/rollup/types_test.go @@ -206,7 +206,9 @@ func TestRandomConfigDescription(t *testing.T) { config.IsthmusTime = &i j := uint64(1677119342) config.JovianTime = &j - it := uint64(1677119343) + k := uint64(1677119343) + config.KarstTime = &k + it := uint64(1677119344) config.InteropTime = &it out := config.Description(nil) @@ -220,6 +222,7 @@ func TestRandomConfigDescription(t *testing.T) { require.Contains(t, out, fmt.Sprintf("Holocene: @ %d ~ ", h)) require.Contains(t, out, fmt.Sprintf("Isthmus: @ %d ~ ", i)) require.Contains(t, out, fmt.Sprintf("Jovian: @ %d ~ ", j)) + require.Contains(t, out, fmt.Sprintf("Karst: @ %d ~ ", k)) require.Contains(t, out, fmt.Sprintf("Interop: @ %d ~ ", it)) }) } @@ -614,7 +617,8 @@ func TestConfig_Check(t *testing.T) { holoceneTime := uint64(7) isthmusTime := uint64(8) jovianTime := uint64(9) - interopTime := uint64(10) + karstTime := uint64(10) + interopTime := uint64(11) cfg.RegolithTime = ®olithTime cfg.CanyonTime = &canyonTime cfg.DeltaTime = &deltaTime @@ -624,6 +628,7 @@ func TestConfig_Check(t *testing.T) { cfg.HoloceneTime = &holoceneTime cfg.IsthmusTime = &isthmusTime cfg.JovianTime = &jovianTime + cfg.KarstTime = &karstTime cfg.InteropTime = &interopTime }, expectedErr: nil, diff --git a/op-wheel/commands.go b/op-wheel/commands.go index 1c032f17f19a6..ee6ba274fd6e4 100644 --- a/op-wheel/commands.go +++ b/op-wheel/commands.go @@ -260,6 +260,7 @@ func rollupFromGethConfig(cfg *params.ChainConfig) *rollup.Config { HoloceneTime: cfg.HoloceneTime, IsthmusTime: cfg.IsthmusTime, JovianTime: cfg.JovianTime, + KarstTime: cfg.KarstTime, InteropTime: cfg.InteropTime, } } diff --git a/packages/contracts-bedrock/scripts/L2Genesis.s.sol b/packages/contracts-bedrock/scripts/L2Genesis.s.sol index 8599dd99230a6..56bfb1402b9b4 100644 --- a/packages/contracts-bedrock/scripts/L2Genesis.s.sol +++ b/packages/contracts-bedrock/scripts/L2Genesis.s.sol @@ -185,6 +185,10 @@ contract L2Genesis is Script { return; } + if (forkEquals(_fork, Fork.KARST)) { + return; + } + if (forkEquals(_fork, Fork.INTEROP)) { return; } diff --git a/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol index 8ef6f83876b78..872a5cefc29da 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployConfig.s.sol @@ -30,6 +30,7 @@ contract DeployConfig is Script { uint256 public l2GenesisGraniteTimeOffset; uint256 public l2GenesisHoloceneTimeOffset; uint256 public l2GenesisJovianTimeOffset; + uint256 public l2GenesisKarstTimeOffset; address public p2pSequencerAddress; address public batchInboxAddress; address public batchSenderAddress; @@ -123,6 +124,7 @@ contract DeployConfig is Script { l2GenesisGraniteTimeOffset = _readOr(_json, "$.l2GenesisGraniteTimeOffset", NULL_OFFSET); l2GenesisHoloceneTimeOffset = _readOr(_json, "$.l2GenesisHoloceneTimeOffset", NULL_OFFSET); l2GenesisJovianTimeOffset = _readOr(_json, "$.l2GenesisJovianTimeOffset", NULL_OFFSET); + l2GenesisKarstTimeOffset = _readOr(_json, "$.l2GenesisKarstTimeOffset", NULL_OFFSET); p2pSequencerAddress = stdJson.readAddress(_json, "$.p2pSequencerAddress"); batchInboxAddress = stdJson.readAddress(_json, "$.batchInboxAddress"); @@ -327,7 +329,9 @@ contract DeployConfig is Script { } function latestGenesisFork() internal view returns (Fork) { - if (l2GenesisJovianTimeOffset == 0) { + if (l2GenesisKarstTimeOffset == 0) { + return Fork.KARST; + } else if (l2GenesisJovianTimeOffset == 0) { return Fork.JOVIAN; } else if (l2GenesisHoloceneTimeOffset == 0) { return Fork.HOLOCENE; diff --git a/packages/contracts-bedrock/scripts/libraries/Config.sol b/packages/contracts-bedrock/scripts/libraries/Config.sol index 6d087b1fc0ddb..4ed50ecb95197 100644 --- a/packages/contracts-bedrock/scripts/libraries/Config.sol +++ b/packages/contracts-bedrock/scripts/libraries/Config.sol @@ -37,6 +37,7 @@ enum Fork { HOLOCENE, ISTHMUS, JOVIAN, + KARST, INTEROP } @@ -60,6 +61,8 @@ library ForkUtils { return "isthmus"; } else if (_fork == Fork.JOVIAN) { return "jovian"; + } else if (_fork == Fork.KARST) { + return "karst"; } else { return "unknown"; } @@ -207,6 +210,8 @@ library Config { return Fork.ISTHMUS; } else if (forkHash == keccak256(bytes("jovian"))) { return Fork.JOVIAN; + } else if (forkHash == keccak256(bytes("karst"))) { + return Fork.KARST; } else { revert(string.concat("Config: unknown fork: ", forkStr)); } From 2edb474fb6d97a1206a373b8b4bc61956fac352d Mon Sep 17 00:00:00 2001 From: Matt Solomon Date: Tue, 24 Feb 2026 16:47:40 -0800 Subject: [PATCH 023/201] fix: various contracts-bedrock CI issues (#19300) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ci: tag security oncall for contracts failures * fix: solidity interface mismatch * ci: fix store_test_results syntax for contracts jobs Use when: always directly on store_test_results steps instead of wrapping in a conditional block, and broaden path from results.xml to results dir. Co-Authored-By: Claude Sonnet 4.6 * fix(contracts): use correct Foundry env vars for fork RPC retries FORK_RETRIES and FORK_BACKOFF were never consumed by Foundry — the correct env var names are FOUNDRY_FORK_RETRIES and FOUNDRY_FORK_RETRY_BACKOFF. Without these, fork tests had no retry protection against RPC 429 rate limit errors, causing CI flakes. Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: Claude Sonnet 4.6 --- .circleci/continue/main.yml | 27 ++++++++----------- .../staking/IPolicyEngineStaking.sol | 2 ++ packages/contracts-bedrock/justfile | 4 +-- 3 files changed, 15 insertions(+), 18 deletions(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 44ca358d134e6..4e4d24af23a5f 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -1372,11 +1372,9 @@ jobs: FOUNDRY_PROFILE: ci working_directory: packages/contracts-bedrock when: on_fail - - when: - condition: always - steps: - - store_test_results: - path: packages/contracts-bedrock/results/results.xml + - store_test_results: + path: packages/contracts-bedrock/results + when: always - run: name: Lint forge test names command: just lint-forge-tests-check-no-build @@ -1432,11 +1430,9 @@ jobs: key: golang-build-cache-contracts-bedrock-heavy-fuzz-{{ checksum "go.sum" }} paths: - "~/.cache/go-build" - - when: - condition: always - steps: - - store_test_results: - path: packages/contracts-bedrock/results/results.xml + - store_test_results: + path: packages/contracts-bedrock/results + when: always - notify-failures-on-develop # AI Contracts Test Maintenance System @@ -1663,12 +1659,11 @@ jobs: - store_artifacts: path: packages/contracts-bedrock/failed-test-traces.log when: on_fail - - when: - condition: always - steps: - - store_test_results: - path: packages/contracts-bedrock/results/results.xml - - notify-failures-on-develop + - store_test_results: + path: packages/contracts-bedrock/results + when: always + - notify-failures-on-develop: + mentions: "@security-oncall" contracts-bedrock-upload: machine: true diff --git a/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol b/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol index 2fdf901e89501..50a42d481207a 100644 --- a/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol +++ b/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol @@ -64,6 +64,8 @@ interface IPolicyEngineStaking is ISemver { /// @notice Thrown when trying to allowlist/disallow yourself. error PolicyEngineStaking_SelfAllowlist(); + function __constructor__(address _ownerAddr, address _token) external; + /// @notice Returns the contract owner. function owner() external view returns (address); diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 5168203a81099..4465b7dcd1711 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -119,8 +119,8 @@ prepare-upgrade-env *ARGS : build-go-ffi export FORK_BLOCK_NUMBER="${FORK_BLOCK_NUMBER:-$pinnedBlock}" echo "Running upgrade tests at block $FORK_BLOCK_NUMBER" export FORK_RPC_URL=$ETH_RPC_URL - export FORK_RETRIES=10 - export FORK_BACKOFF=1000 + export FOUNDRY_FORK_RETRIES=10 + export FOUNDRY_FORK_RETRY_BACKOFF=1000 export FORK_TEST=true {{ARGS}} \ --match-path "test/{L1,dispute,cannon}/**" From 47fafdd3c6449fbcfb62eda5b4ace5b109e89706 Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Wed, 25 Feb 2026 10:09:32 -0500 Subject: [PATCH 024/201] refactor(op-devstack): migrate Orchestrator to unified Registry (Phase 4) (#18875) * op-devstack: add capability interfaces for polymorphic lookups (Phase 3) Introduce L2ELCapable interface that captures shared behavior across L2ELNode, RollupBoostNode, and OPRBuilderNode without requiring them to share an ID() method signature. This enables polymorphic lookups where code can find any L2 EL-capable component by key+chainID, regardless of concrete type: sequencer, ok := FindL2ELCapableByKey(registry, "sequencer", chainID) Previously this required manual multi-registry lookups checking each type separately. * refactor(op-devstack): migrate Orchestrator to unified Registry (Phase 4) Replace 15 separate locks.RWMap registry fields in Orchestrator with a single unified *stack.Registry. This completes the ID type system refactor by consolidating all component storage into one registry with secondary indexes for efficient lookups by kind and chainID. Key changes: - Remove l1ELs, l1CLs, l1Nets, l2ELs, l2CLs, l2Nets, batchers, proposers, challengers, rollupBoosts, oprbuilderNodes, supervisors, clusters, superchains, and faucets fields from Orchestrator - Add single registry *stack.Registry field - Update GetL2EL to use FindL2ELCapableByKey for polymorphic lookups - Update Hydrate to iterate by kind with explicit ordering - Update ControlPlane methods to use registry lookups - Migrate ~24 files to use registry.Register() and registry.Get() patterns - Change l2MetricsEndpoints from locks.RWMap to map with sync.RWMutex All 54 stack tests pass. * fix(op-devstack): address PR #18875 review feedback --- op-devstack/stack/component_id.go | 25 ++++++ op-devstack/sysgo/add_game_type.go | 35 ++++++-- op-devstack/sysgo/control_plane.go | 30 ++++--- op-devstack/sysgo/deployer.go | 8 +- op-devstack/sysgo/faucet.go | 6 +- op-devstack/sysgo/l1_nodes.go | 19 +++-- op-devstack/sysgo/l1_nodes_subprocess.go | 9 +- op-devstack/sysgo/l2_batcher.go | 20 +++-- op-devstack/sysgo/l2_challenger.go | 31 ++++--- op-devstack/sysgo/l2_cl_kona.go | 19 +++-- op-devstack/sysgo/l2_cl_opnode.go | 19 +++-- op-devstack/sysgo/l2_cl_p2p_util.go | 6 +- op-devstack/sysgo/l2_cl_supernode.go | 28 +++--- op-devstack/sysgo/l2_el.go | 4 +- op-devstack/sysgo/l2_el_opgeth.go | 12 ++- op-devstack/sysgo/l2_el_opreth.go | 12 ++- op-devstack/sysgo/l2_el_synctester.go | 7 +- op-devstack/sysgo/l2_metrics_dashboard.go | 10 +-- .../sysgo/l2_network_superchain_registry.go | 7 +- op-devstack/sysgo/l2_proposer.go | 17 ++-- op-devstack/sysgo/op_rbuilder.go | 5 +- op-devstack/sysgo/orchestrator.go | 85 ++++++++----------- op-devstack/sysgo/rollup_boost.go | 7 +- op-devstack/sysgo/superroot.go | 13 +-- op-devstack/sysgo/supervisor.go | 6 +- op-devstack/sysgo/supervisor_kona.go | 12 ++- op-devstack/sysgo/supervisor_op.go | 8 +- op-devstack/sysgo/sync_tester.go | 2 +- op-devstack/sysgo/system_synctester_ext.go | 2 +- op-devstack/sysgo/test_sequencer.go | 16 ++-- 30 files changed, 300 insertions(+), 180 deletions(-) diff --git a/op-devstack/stack/component_id.go b/op-devstack/stack/component_id.go index 03769cee3dda8..fdce22a6b1724 100644 --- a/op-devstack/stack/component_id.go +++ b/op-devstack/stack/component_id.go @@ -37,6 +37,31 @@ const ( KindFlashblocksClient ComponentKind = "FlashblocksWSClient" ) +var hydrationComponentKindOrder = []ComponentKind{ + KindSuperchain, + KindCluster, + KindL1Network, + KindL2Network, + KindL1ELNode, + KindL1CLNode, + KindL2ELNode, + KindOPRBuilderNode, + KindRollupBoostNode, + KindL2CLNode, + KindSupervisor, + KindTestSequencer, + KindL2Batcher, + KindL2Challenger, + KindL2Proposer, +} + +// HydrationComponentKindOrder returns the deterministic kind ordering used by orchestrator hydration. +func HydrationComponentKindOrder() []ComponentKind { + out := make([]ComponentKind, len(hydrationComponentKindOrder)) + copy(out, hydrationComponentKindOrder) + return out +} + // IDShape defines which fields an ID uses. type IDShape uint8 diff --git a/op-devstack/sysgo/add_game_type.go b/op-devstack/sysgo/add_game_type.go index 63d674d1d5d0a..06c279415c346 100644 --- a/op-devstack/sysgo/add_game_type.go +++ b/op-devstack/sysgo/add_game_type.go @@ -37,8 +37,9 @@ func WithGameTypeAdded(gameType gameTypes.GameType) stack.Option[*Orchestrator] opts := stack.FnOption[*Orchestrator]{ FinallyFn: func(o *Orchestrator) { absolutePrestate := PrestateForGameType(o.P(), gameType) - for _, l2ChainID := range o.l2Nets.Keys() { - addGameType(o, absolutePrestate, gameType, o.l1ELs.Keys()[0], l2ChainID) + l1ELID, l2NetIDs := requireGameTypeTargetIDs(o) + for _, l2NetID := range l2NetIDs { + addGameType(o, absolutePrestate, gameType, l1ELID, l2NetID.ChainID()) } }, } @@ -48,8 +49,9 @@ func WithGameTypeAdded(gameType gameTypes.GameType) stack.Option[*Orchestrator] func WithRespectedGameType(gameType gameTypes.GameType) stack.Option[*Orchestrator] { return stack.FnOption[*Orchestrator]{ FinallyFn: func(o *Orchestrator) { - for _, l2ChainID := range o.l2Nets.Keys() { - setRespectedGameType(o, gameType, o.l1ELs.Keys()[0], l2ChainID) + l1ELID, l2NetIDs := requireGameTypeTargetIDs(o) + for _, l2NetID := range l2NetIDs { + setRespectedGameType(o, gameType, l1ELID, l2NetID.ChainID()) } }, } @@ -72,13 +74,25 @@ func WithCannonKonaGameTypeAdded() stack.Option[*Orchestrator] { }, FinallyFn: func(o *Orchestrator) { absolutePrestate := getCannonKonaAbsolutePrestate(o.P()) - for _, l2ChainID := range o.l2Nets.Keys() { - addGameType(o, absolutePrestate, gameTypes.CannonKonaGameType, o.l1ELs.Keys()[0], l2ChainID) + l1ELID, l2NetIDs := requireGameTypeTargetIDs(o) + for _, l2NetID := range l2NetIDs { + addGameType(o, absolutePrestate, gameTypes.CannonKonaGameType, l1ELID, l2NetID.ChainID()) } }, } } +func requireGameTypeTargetIDs(o *Orchestrator) (stack.L1ELNodeID, []stack.ComponentID) { + require := o.P().Require() + l2NetIDs := o.registry.IDsByKind(stack.KindL2Network) + require.NotEmpty(l2NetIDs, "need at least one L2 network to configure game types") + + l1ELIDs := o.registry.IDsByKind(stack.KindL1ELNode) + require.NotEmpty(l1ELIDs, "need at least one L1 EL node to configure game types") + + return stack.NewL1ELNodeID(l1ELIDs[0].Key(), l1ELIDs[0].ChainID()), l2NetIDs +} + func WithChallengerCannonKonaEnabled() stack.Option[*Orchestrator] { return stack.FnOption[*Orchestrator]{ BeforeDeployFn: func(o *Orchestrator) { @@ -93,12 +107,14 @@ func setRespectedGameType(o *Orchestrator, gameType gameTypes.GameType, l1ELID s require.NotNil(o.wb, "must have a world builder") l1ChainID := l1ELID.ChainID() - l2Network, ok := o.l2Nets.Get(l2ChainID) + l2NetComponent, ok := o.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(l2ChainID)).ComponentID) require.True(ok, "l2Net must exist") + l2Network := l2NetComponent.(*L2Network) portalAddr := l2Network.rollupCfg.DepositContractAddress - l1EL, ok := o.l1ELs.Get(l1ELID) + l1ELComponent, ok := o.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "l1El must exist") + l1EL := l1ELComponent.(L1ELNode) rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) require.NoError(err) @@ -147,8 +163,9 @@ func addGameType(o *Orchestrator, absolutePrestate common.Hash, gameType gameTyp opcmAddr := o.wb.output.ImplementationsDeployment.OpcmImpl - l1EL, ok := o.l1ELs.Get(l1ELID) + l1ELComponent, ok := o.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "l1El must exist") + l1EL := l1ELComponent.(L1ELNode) rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) require.NoError(err) diff --git a/op-devstack/sysgo/control_plane.go b/op-devstack/sysgo/control_plane.go index a2817b6a11acd..94ecf5eda4b9c 100644 --- a/op-devstack/sysgo/control_plane.go +++ b/op-devstack/sysgo/control_plane.go @@ -18,40 +18,46 @@ func control(lifecycle stack.Lifecycle, mode stack.ControlAction) { } func (c *ControlPlane) SupervisorState(id stack.SupervisorID, mode stack.ControlAction) { - s, ok := c.o.supervisors.Get(id) + cid := stack.ConvertSupervisorID(id) + component, ok := c.o.registry.Get(cid.ComponentID) c.o.P().Require().True(ok, "need supervisor to change state") - control(s, mode) + control(component.(Supervisor), mode) } func (c *ControlPlane) L2CLNodeState(id stack.L2CLNodeID, mode stack.ControlAction) { - s, ok := c.o.l2CLs.Get(id) + cid := stack.ConvertL2CLNodeID(id) + component, ok := c.o.registry.Get(cid.ComponentID) c.o.P().Require().True(ok, "need l2cl node to change state") - control(s, mode) + control(component.(L2CLNode), mode) } func (c *ControlPlane) L2ELNodeState(id stack.L2ELNodeID, mode stack.ControlAction) { - s, ok := c.o.l2ELs.Get(id) + cid := stack.ConvertL2ELNodeID(id) + component, ok := c.o.registry.Get(cid.ComponentID) c.o.P().Require().True(ok, "need l2el node to change state") - control(s, mode) + control(component.(L2ELNode), mode) } func (c *ControlPlane) FakePoSState(id stack.L1CLNodeID, mode stack.ControlAction) { - s, ok := c.o.l1CLs.Get(id) + cid := stack.ConvertL1CLNodeID(id) + component, ok := c.o.registry.Get(cid.ComponentID) c.o.P().Require().True(ok, "need l1cl node to change state of fakePoS module") - + s := component.(*L1CLNode) control(s.fakepos, mode) } func (c *ControlPlane) OPRBuilderNodeState(id stack.OPRBuilderNodeID, mode stack.ControlAction) { - s, ok := c.o.oprbuilderNodes.Get(id) + cid := stack.ConvertOPRBuilderNodeID(id) + component, ok := c.o.registry.Get(cid.ComponentID) c.o.P().Require().True(ok, "need oprbuilder node to change state") - control(s, mode) + control(component.(*OPRBuilderNode), mode) } func (c *ControlPlane) RollupBoostNodeState(id stack.RollupBoostNodeID, mode stack.ControlAction) { - s, ok := c.o.rollupBoosts.Get(id) + cid := stack.ConvertRollupBoostNodeID(id) + component, ok := c.o.registry.Get(cid.ComponentID) c.o.P().Require().True(ok, "need rollup boost node to change state") - control(s, mode) + control(component.(*RollupBoostNode), mode) } var _ stack.ControlPlane = (*ControlPlane)(nil) diff --git a/op-devstack/sysgo/deployer.go b/op-devstack/sysgo/deployer.go index 7d95d647aa880..94627337386f7 100644 --- a/op-devstack/sysgo/deployer.go +++ b/op-devstack/sysgo/deployer.go @@ -134,13 +134,13 @@ func WithDeployer() stack.Option[*Orchestrator] { genesis: wb.outL1Genesis, blockTime: 6, } - o.l1Nets.Set(l1ID.ChainID(), l1Net) + o.registry.Register(stack.ConvertL1NetworkID(l1ID).ComponentID, l1Net) - o.superchains.Set(superchainID, &Superchain{ + o.registry.Register(stack.ConvertSuperchainID(superchainID).ComponentID, &Superchain{ id: superchainID, deployment: wb.outSuperchainDeployment, }) - o.clusters.Set(clusterID, &Cluster{ + o.registry.Register(stack.ConvertClusterID(clusterID).ComponentID, &Cluster{ id: clusterID, cfgset: wb.outFullCfgSet, }) @@ -162,7 +162,7 @@ func WithDeployer() stack.Option[*Orchestrator] { deployment: l2Dep, keys: o.keys, } - o.l2Nets.Set(l2ID.ChainID(), l2Net) + o.registry.Register(stack.ConvertL2NetworkID(l2ID).ComponentID, l2Net) } }, } diff --git a/op-devstack/sysgo/faucet.go b/op-devstack/sysgo/faucet.go index 3e377a08e4d1f..88aba54f7defa 100644 --- a/op-devstack/sysgo/faucet.go +++ b/op-devstack/sysgo/faucet.go @@ -71,8 +71,9 @@ func WithFaucets(l1ELs []stack.L1ELNodeID, l2ELs []stack.L2ELNodeID) stack.Optio id := ftypes.FaucetID(fmt.Sprintf("dev-faucet-%s", elID.ChainID())) require.NotContains(faucets, id, "one faucet per chain only") - el, ok := orch.l1ELs.Get(elID) + elComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(elID).ComponentID) require.True(ok, "need L1 EL for faucet", elID) + el := elComponent.(L1ELNode) faucets[id] = &fconf.FaucetEntry{ ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, @@ -86,8 +87,9 @@ func WithFaucets(l1ELs []stack.L1ELNodeID, l2ELs []stack.L2ELNodeID) stack.Optio id := ftypes.FaucetID(fmt.Sprintf("dev-faucet-%s", elID.ChainID())) require.NotContains(faucets, id, "one faucet per chain only") - el, ok := orch.l2ELs.Get(elID) + elComponent, ok := orch.registry.Get(stack.ConvertL2ELNodeID(elID).ComponentID) require.True(ok, "need L2 EL for faucet", elID) + el := elComponent.(L2ELNode) faucets[id] = &fconf.FaucetEntry{ ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, diff --git a/op-devstack/sysgo/l1_nodes.go b/op-devstack/sysgo/l1_nodes.go index 1f8d879434f21..d8d09b37a9c05 100644 --- a/op-devstack/sysgo/l1_nodes.go +++ b/op-devstack/sysgo/l1_nodes.go @@ -90,8 +90,9 @@ func WithL1NodesInProcess(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stac elP := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l1ELID)) require := orch.P().Require() - l1Net, ok := orch.l1Nets.Get(l1ELID.ChainID()) + l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) require.True(ok, "L1 network must exist") + l1Net := l1NetComponent.(*L1Network) blockTimeL1 := l1Net.blockTime l1FinalizedDistance := uint64(20) @@ -137,7 +138,9 @@ func WithL1NodesInProcess(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stac l1Geth: l1Geth, blobPath: blobPath, } - require.True(orch.l1ELs.SetIfMissing(l1ELID, l1ELNode), "must not already exist") + elCID := stack.ConvertL1ELNodeID(l1ELID).ComponentID + require.False(orch.registry.Has(elCID), "must not already exist") + orch.registry.Register(elCID, l1ELNode) l1CLNode := &L1CLNode{ id: l1CLID, @@ -145,7 +148,9 @@ func WithL1NodesInProcess(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stac beacon: bcn, fakepos: &FakePoS{fakepos: fp, p: clP}, } - require.True(orch.l1CLs.SetIfMissing(l1CLID, l1CLNode), "must not already exist") + clCID := stack.ConvertL1CLNodeID(l1CLID).ComponentID + require.False(orch.registry.Has(clCID), "must not already exist") + orch.registry.Register(clCID, l1CLNode) }) } @@ -159,13 +164,17 @@ func WithExtL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID, elRPCEndpo id: l1ELID, userRPC: elRPCEndpoint, } - require.True(orch.l1ELs.SetIfMissing(l1ELID, l1ELNode), "must not already exist") + elCID := stack.ConvertL1ELNodeID(l1ELID).ComponentID + require.False(orch.registry.Has(elCID), "must not already exist") + orch.registry.Register(elCID, l1ELNode) // Create L1 CL node with external RPC l1CLNode := &L1CLNode{ id: l1CLID, beaconHTTPAddr: clRPCEndpoint, } - require.True(orch.l1CLs.SetIfMissing(l1CLID, l1CLNode), "must not already exist") + clCID := stack.ConvertL1CLNodeID(l1CLID).ComponentID + require.False(orch.registry.Has(clCID), "must not already exist") + orch.registry.Register(clCID, l1CLNode) }) } diff --git a/op-devstack/sysgo/l1_nodes_subprocess.go b/op-devstack/sysgo/l1_nodes_subprocess.go index af62e9eb4ebdc..e35ad97aa685c 100644 --- a/op-devstack/sysgo/l1_nodes_subprocess.go +++ b/op-devstack/sysgo/l1_nodes_subprocess.go @@ -160,8 +160,9 @@ func WithL1NodesSubprocess(id stack.L1ELNodeID, clID stack.L1CLNodeID) stack.Opt _, err := os.Stat(execPath) p.Require().NotErrorIs(err, os.ErrNotExist, "geth executable must exist") - l1Net, ok := orch.l1Nets.Get(id.ChainID()) + l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(id.ChainID())).ComponentID) require.True(ok, "L1 network required") + l1Net := l1NetComponent.(*L1Network) jwtPath, jwtSecret := orch.writeDefaultJWT() @@ -207,7 +208,9 @@ func WithL1NodesSubprocess(id stack.L1ELNodeID, clID stack.L1CLNodeID) stack.Opt l1EL.Start() p.Cleanup(l1EL.Stop) p.Logger().Info("geth is ready", "userRPC", l1EL.userRPC, "authRPC", l1EL.authRPC) - require.True(orch.l1ELs.SetIfMissing(id, l1EL), "must be unique L2 EL node") + elCID := stack.ConvertL1ELNodeID(id).ComponentID + require.False(orch.registry.Has(elCID), "must be unique L1 EL node") + orch.registry.Register(elCID, l1EL) backend, err := ethclient.DialContext(p.Ctx(), l1EL.userRPC) require.NoError(err) @@ -233,7 +236,7 @@ func WithL1NodesSubprocess(id stack.L1ELNodeID, clID stack.L1CLNodeID) stack.Opt } fp.Start() p.Cleanup(fp.Stop) - orch.l1CLs.Set(clID, &L1CLNode{ + orch.registry.Register(stack.ConvertL1CLNodeID(clID).ComponentID, &L1CLNode{ id: clID, beaconHTTPAddr: bcn.BeaconAddr(), beacon: bcn, diff --git a/op-devstack/sysgo/l2_batcher.go b/op-devstack/sysgo/l2_batcher.go index 4001ec30582a5..3c2082e05e4fc 100644 --- a/op-devstack/sysgo/l2_batcher.go +++ b/op-devstack/sysgo/l2_batcher.go @@ -57,26 +57,32 @@ func WithBatcher(batcherID stack.L2BatcherID, l1ELID stack.L1ELNodeID, l2CLID st p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), batcherID)) require := p.Require() - require.False(orch.batchers.Has(batcherID), "batcher must not already exist") + batcherCID := stack.ConvertL2BatcherID(batcherID).ComponentID + require.False(orch.registry.Has(batcherCID), "batcher must not already exist") - l2Net, ok := orch.l2Nets.Get(l2CLID.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(l2CLID.ChainID())).ComponentID) require.True(ok) + l2Net := l2NetComponent.(*L2Network) - l1Net, ok := orch.l1Nets.Get(l1ELID.ChainID()) + l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) require.True(ok) + l1Net := l1NetComponent.(*L1Network) require.Equal(l2Net.l1ChainID, l1Net.id.ChainID(), "expecting L1EL on L1 of L2CL") require.Equal(l2CLID.ChainID(), l2ELID.ChainID(), "L2 CL and EL must be on same L2 chain") - l1EL, ok := orch.l1ELs.Get(l1ELID) + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok) + l1EL := l1ELComponent.(L1ELNode) - l2CL, ok := orch.l2CLs.Get(l2CLID) + l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CLID).ComponentID) require.True(ok) + l2CL := l2CLComponent.(L2CLNode) - l2EL, ok := orch.l2ELs.Get(l2ELID) + l2ELComponent, ok := orch.registry.Get(stack.ConvertL2ELNodeID(l2ELID).ComponentID) require.True(ok) + l2EL := l2ELComponent.(L2ELNode) batcherSecret, err := orch.keys.Secret(devkeys.BatcherRole.Key(l2ELID.ChainID().ToBig())) require.NoError(err) @@ -141,6 +147,6 @@ func WithBatcher(batcherID stack.L2BatcherID, l1ELID stack.L1ELNodeID, l2CLID st l2CLRPC: l2CL.UserRPC(), l2ELRPC: l2EL.UserRPC(), } - orch.batchers.Set(batcherID, b) + orch.registry.Register(stack.ConvertL2BatcherID(batcherID).ComponentID, b) }) } diff --git a/op-devstack/sysgo/l2_challenger.go b/op-devstack/sysgo/l2_challenger.go index 1a29e80b55f31..25ebb39d76c07 100644 --- a/op-devstack/sysgo/l2_challenger.go +++ b/op-devstack/sysgo/l2_challenger.go @@ -76,7 +76,8 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen p := orch.P().WithCtx(ctx) require := p.Require() - require.False(orch.challengers.Has(challengerID), "challenger must not already exist") + challengerCID := stack.ConvertL2ChallengerID(challengerID).ComponentID + require.False(orch.registry.Has(challengerCID), "challenger must not already exist") challengerSecret, err := orch.keys.Secret(devkeys.ChallengerRole.Key(challengerID.ChainID().ToBig())) require.NoError(err) @@ -84,10 +85,12 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen logger := p.Logger() logger.Info("Challenger key acquired", "addr", crypto.PubkeyToAddress(challengerSecret.PublicKey)) - l1EL, ok := orch.l1ELs.Get(l1ELID) + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok) - l1CL, ok := orch.l1CLs.Get(l1CLID) + l1EL := l1ELComponent.(L1ELNode) + l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) require.True(ok) + l1CL := l1CLComponent.(*L1CLNode) l2Geneses := make([]*core.Genesis, 0, len(l2ELIDs)) rollupCfgs := make([]*rollup.Config, 0, len(l2ELIDs)) @@ -103,8 +106,9 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen } for _, l2ELID := range l2ELIDs { chainID := l2ELID.ChainID() - l2Net, ok := orch.l2Nets.Get(chainID) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(chainID)).ComponentID) require.Truef(ok, "l2Net %s not found", chainID) + l2Net := l2NetComponent.(*L2Network) factory := l2Net.deployment.DisputeGameFactoryProxyAddr() if disputeGameFactoryAddr == (common.Address{}) { disputeGameFactoryAddr = factory @@ -118,10 +122,11 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen l2NetIDs = append(l2NetIDs, l2Net.id) } - l1Net, ok := orch.l1Nets.Get(l1ELID.ChainID()) + l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) if !ok { require.Fail("l1 network not found") } + l1Net := l1NetComponent.(*L1Network) l1Genesis := l1Net.genesis if orch.l2ChallengerOpts.useCannonKonaConfig { @@ -139,8 +144,9 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen useSuperNode := false switch { case supervisorID != nil: - supervisorNode, ok := orch.supervisors.Get(*supervisorID) + supervisorComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(*supervisorID).ComponentID) require.True(ok) + supervisorNode := supervisorComponent.(Supervisor) superRPC = supervisorNode.UserRPC() case supernodeID != nil: supernode, ok := orch.supernodes.Get(*supernodeID) @@ -153,12 +159,14 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen l2ELRPCs := make([]string, len(l2ELIDs)) for i, l2ELID := range l2ELIDs { - l2EL, ok := orch.l2ELs.Get(l2ELID) + l2ELComponent, ok := orch.registry.Get(stack.ConvertL2ELNodeID(l2ELID).ComponentID) require.True(ok) + l2EL := l2ELComponent.(L2ELNode) l2ELRPCs[i] = l2EL.UserRPC() } - cluster, ok := orch.clusters.Get(*clusterID) + clusterComponent, ok := orch.registry.Get(stack.ConvertClusterID(*clusterID).ComponentID) require.True(ok) + cluster := clusterComponent.(*Cluster) prestateVariant := shared.InteropVariant options := []shared.Option{ shared.WithFactoryAddress(disputeGameFactoryAddr), @@ -188,9 +196,10 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen } } require.NotZero(l2ELID, "need single L2 EL to connect to pre-interop") - l2CL, ok := orch.l2CLs.Get(*l2CLID) + l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(*l2CLID).ComponentID) require.True(ok) - l2EL, ok := orch.l2ELs.Get(l2ELID) + l2CL := l2CLComponent.(L2CLNode) + l2EL, ok := orch.GetL2EL(l2ELID) require.True(ok) prestateVariant := shared.MTCannonVariant options := []shared.Option{ @@ -240,5 +249,5 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen l2NetIDs: l2NetIDs, config: cfg, } - orch.challengers.Set(challengerID, c) + orch.registry.Register(stack.ConvertL2ChallengerID(challengerID).ComponentID, c) } diff --git a/op-devstack/sysgo/l2_cl_kona.go b/op-devstack/sysgo/l2_cl_kona.go index ac154c1cbe2e9..0edeff4aeb461 100644 --- a/op-devstack/sysgo/l2_cl_kona.go +++ b/op-devstack/sysgo/l2_cl_kona.go @@ -165,8 +165,9 @@ func WithKonaNodeFollowL2(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1EL return stack.AfterDeploy(func(orch *Orchestrator) { followSource := func(orch *Orchestrator) string { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) - l2CLFollowSource, ok := orch.l2CLs.Get(l2FollowSourceID) + l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2FollowSourceID).ComponentID) p.Require().True(ok, "l2 CL Follow Source required") + l2CLFollowSource := l2CLComponent.(L2CLNode) return l2CLFollowSource.UserRPC() }(orch) opts = append(opts, L2CLFollowSource(followSource)) @@ -184,19 +185,23 @@ func withKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack require := p.Require() - l1Net, ok := orch.l1Nets.Get(l1CLID.ChainID()) + l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1CLID.ChainID())).ComponentID) require.True(ok, "l1 network required") + l1Net := l1NetComponent.(*L1Network) - l2Net, ok := orch.l2Nets.Get(l2CLID.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(l2CLID.ChainID())).ComponentID) require.True(ok, "l2 network required") + l2Net := l2NetComponent.(*L2Network) l1ChainConfig := l1Net.genesis.Config - l1EL, ok := orch.l1ELs.Get(l1ELID) + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "l1 EL node required") + l1EL := l1ELComponent.(L1ELNode) - l1CL, ok := orch.l1CLs.Get(l1CLID) + l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) require.True(ok, "l1 CL node required") + l1CL := l1CLComponent.(*L1CLNode) l2EL, ok := orch.GetL2EL(l2ELID) require.True(ok, "l2 EL node required") @@ -301,6 +306,8 @@ func withKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack k.Start() p.Cleanup(k.Stop) p.Logger().Info("Kona-node is up", "rpc", k.UserRPC()) - require.True(orch.l2CLs.SetIfMissing(l2CLID, k), "must not already exist") + cid := stack.ConvertL2CLNodeID(l2CLID).ComponentID + require.False(orch.registry.Has(cid), "must not already exist") + orch.registry.Register(cid, k) } } diff --git a/op-devstack/sysgo/l2_cl_opnode.go b/op-devstack/sysgo/l2_cl_opnode.go index 486138667bda5..7a3e0e6659f12 100644 --- a/op-devstack/sysgo/l2_cl_opnode.go +++ b/op-devstack/sysgo/l2_cl_opnode.go @@ -166,8 +166,9 @@ func WithOpNodeFollowL2(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID return stack.AfterDeploy(func(orch *Orchestrator) { followSource := func(orch *Orchestrator) string { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) - l2CLFollowSource, ok := orch.l2CLs.Get(l2FollowSourceID) + l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2FollowSourceID).ComponentID) p.Require().True(ok, "l2 CL Follow Source required") + l2CLFollowSource := l2CLComponent.(L2CLNode) return l2CLFollowSource.UserRPC() }(orch) opts = append(opts, L2CLFollowSource(followSource)) @@ -185,17 +186,21 @@ func withOpNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L require := p.Require() - l1Net, ok := orch.l1Nets.Get(l1CLID.ChainID()) + l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1CLID.ChainID())).ComponentID) require.True(ok, "l1 network required") + l1Net := l1NetComponent.(*L1Network) - l2Net, ok := orch.l2Nets.Get(l2CLID.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(l2CLID.ChainID())).ComponentID) require.True(ok, "l2 network required") + l2Net := l2NetComponent.(*L2Network) - l1EL, ok := orch.l1ELs.Get(l1ELID) + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "l1 EL node required") + l1EL := l1ELComponent.(L1ELNode) - l1CL, ok := orch.l1CLs.Get(l1CLID) + l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) require.True(ok, "l1 CL node required") + l1CL := l1CLComponent.(*L1CLNode) // Get the L2EL node (which can be a regular EL node or a SyncTesterEL) l2EL, ok := orch.GetL2EL(l2ELID) @@ -363,7 +368,9 @@ func withOpNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L // Set the EL field to link to the L2EL node l2CLNode.el = &l2ELID - require.True(orch.l2CLs.SetIfMissing(l2CLID, l2CLNode), fmt.Sprintf("must not already exist: %s", l2CLID)) + cid := stack.ConvertL2CLNodeID(l2CLID).ComponentID + require.False(orch.registry.Has(cid), fmt.Sprintf("must not already exist: %s", l2CLID)) + orch.registry.Register(cid, l2CLNode) l2CLNode.Start() p.Cleanup(l2CLNode.Stop) } diff --git a/op-devstack/sysgo/l2_cl_p2p_util.go b/op-devstack/sysgo/l2_cl_p2p_util.go index 2fbcea313f40a..05a7cd3f79ed0 100644 --- a/op-devstack/sysgo/l2_cl_p2p_util.go +++ b/op-devstack/sysgo/l2_cl_p2p_util.go @@ -88,10 +88,12 @@ func WithL2CLP2PConnection(l2CL1ID, l2CL2ID stack.L2CLNodeID) stack.Option[*Orch require := orch.P().Require() l := orch.P().Logger() - l2CL1, ok := orch.l2CLs.Get(l2CL1ID) + l2CL1Component, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CL1ID).ComponentID) require.True(ok, "looking for L2 CL node 1 to connect p2p") - l2CL2, ok := orch.l2CLs.Get(l2CL2ID) + l2CL1 := l2CL1Component.(L2CLNode) + l2CL2Component, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CL2ID).ComponentID) require.True(ok, "looking for L2 CL node 2 to connect p2p") + l2CL2 := l2CL2Component.(L2CLNode) require.Equal(l2CL1ID.ChainID(), l2CL2ID.ChainID(), "must be same l2 chain") ctx := orch.P().Ctx() diff --git a/op-devstack/sysgo/l2_cl_supernode.go b/op-devstack/sysgo/l2_cl_supernode.go index 55bc809879279..ae7ce0c58527b 100644 --- a/op-devstack/sysgo/l2_cl_supernode.go +++ b/op-devstack/sysgo/l2_cl_supernode.go @@ -241,11 +241,12 @@ func WithSharedSupernodeCLsInterop(supernodeID stack.SupernodeID, cls []L2CLs, l orch.P().Require().Fail("no chains provided") return } - l2Net, ok := orch.l2Nets.Get(cls[0].CLID.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(cls[0].CLID.ChainID())).ComponentID) if !ok { orch.P().Require().Fail("l2 network not found") return } + l2Net := l2NetComponent.(*L2Network) genesisTime := l2Net.rollupCfg.Genesis.L2Time orch.P().Logger().Info("enabling supernode interop at genesis", "activation_timestamp", genesisTime) @@ -263,11 +264,12 @@ func WithSharedSupernodeCLsInteropDelayed(supernodeID stack.SupernodeID, cls []L orch.P().Require().Fail("no chains provided") return } - l2Net, ok := orch.l2Nets.Get(cls[0].CLID.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(cls[0].CLID.ChainID())).ComponentID) if !ok { orch.P().Require().Fail("l2 network not found") return } + l2Net := l2NetComponent.(*L2Network) genesisTime := l2Net.rollupCfg.Genesis.L2Time activationTime := genesisTime + delaySeconds orch.P().Logger().Info("enabling supernode interop with delay", @@ -299,14 +301,17 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI opt(snOpts) } - l1EL, ok := orch.l1ELs.Get(l1ELID) + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "l1 EL node required") - l1CL, ok := orch.l1CLs.Get(l1CLID) + l1EL := l1ELComponent.(L1ELNode) + l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) require.True(ok, "l1 CL node required") + l1CL := l1CLComponent.(*L1CLNode) // Get L1 network to access L1 chain config - l1Net, ok := orch.l1Nets.Get(l1ELID.ChainID()) + l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) require.True(ok, "l1 network required") + l1Net := l1NetComponent.(*L1Network) _, jwtSecret := orch.writeDefaultJWT() @@ -361,9 +366,10 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI els := make([]*stack.L2ELNodeID, 0, len(cls)) for i := range cls { a := cls[i] - l2Net, ok := orch.l2Nets.Get(a.CLID.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(a.CLID.ChainID())).ComponentID) require.True(ok, "l2 network required") - l2ELNode, ok := orch.l2ELs.Get(a.ELID) + l2Net := l2NetComponent.(*L2Network) + l2ELNode, ok := orch.GetL2EL(a.ELID) require.True(ok, "l2 EL node required") l2ChainID := a.CLID.ChainID() cfg := makeNodeCfg(l2Net, l2ChainID, l2ELNode, true) @@ -434,10 +440,12 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI interopJwtSecret: jwtSecret, el: &cls[i].ELID, } - require.True(orch.l2CLs.SetIfMissing(a.CLID, proxy), fmt.Sprintf("must not already exist: %s", a.CLID)) + cid := stack.ConvertL2CLNodeID(a.CLID).ComponentID + require.False(orch.registry.Has(cid), fmt.Sprintf("must not already exist: %s", a.CLID)) + orch.registry.Register(cid, proxy) } - supernode := &SuperNode{ + snNode := &SuperNode{ id: supernodeID, sn: sn, cancel: cancel, @@ -451,7 +459,7 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI l1UserRPC: l1EL.UserRPC(), l1BeaconAddr: l1CL.beaconHTTPAddr, } - orch.supernodes.Set(supernodeID, supernode) + orch.supernodes.Set(supernodeID, snNode) } func idsFromCLs(cls []L2CLs) []eth.ChainID { diff --git a/op-devstack/sysgo/l2_el.go b/op-devstack/sysgo/l2_el.go index 07881e06b6226..7242f6f1498be 100644 --- a/op-devstack/sysgo/l2_el.go +++ b/op-devstack/sysgo/l2_el.go @@ -113,6 +113,8 @@ func WithExtL2Node(id stack.L2ELNodeID, elRPCEndpoint string) stack.Option[*Orch userRPC: elRPCEndpoint, readOnly: true, } - require.True(orch.l2ELs.SetIfMissing(id, l2ELNode), "must not already exist") + cid := stack.ConvertL2ELNodeID(id).ComponentID + require.False(orch.registry.Has(cid), "must not already exist") + orch.registry.Register(cid, l2ELNode) }) } diff --git a/op-devstack/sysgo/l2_el_opgeth.go b/op-devstack/sysgo/l2_el_opgeth.go index 947aed6409def..5b2be235e7e17 100644 --- a/op-devstack/sysgo/l2_el_opgeth.go +++ b/op-devstack/sysgo/l2_el_opgeth.go @@ -184,8 +184,9 @@ func WithOpGeth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) require := p.Require() - l2Net, ok := orch.l2Nets.Get(id.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(id.ChainID())).ComponentID) require.True(ok, "L2 network required") + l2Net := l2NetComponent.(*L2Network) cfg := DefaultL2ELConfig() orch.l2ELOptions.Apply(p, id, cfg) // apply global options @@ -197,8 +198,9 @@ func WithOpGeth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra supervisorRPC := "" if useInterop && cfg.SupervisorID != nil { - sup, ok := orch.supervisors.Get(*cfg.SupervisorID) - require.True(ok, "supervisor not found") + supComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(*cfg.SupervisorID).ComponentID) + require.True(ok, "supervisor is required for interop") + sup := supComponent.(Supervisor) supervisorRPC = sup.UserRPC() } @@ -218,6 +220,8 @@ func WithOpGeth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra p.Cleanup(func() { l2EL.Stop() }) - require.True(orch.l2ELs.SetIfMissing(id, l2EL), "must be unique L2 EL node") + cid := stack.ConvertL2ELNodeID(id).ComponentID + require.False(orch.registry.Has(cid), "must be unique L2 EL node") + orch.registry.Register(cid, l2EL) }) } diff --git a/op-devstack/sysgo/l2_el_opreth.go b/op-devstack/sysgo/l2_el_opreth.go index e11aa3eaf9e92..6f629b15bd63d 100644 --- a/op-devstack/sysgo/l2_el_opreth.go +++ b/op-devstack/sysgo/l2_el_opreth.go @@ -189,8 +189,9 @@ func WithOpReth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) require := p.Require() - l2Net, ok := orch.l2Nets.Get(id.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(id.ChainID())).ComponentID) require.True(ok, "L2 network required") + l2Net := l2NetComponent.(*L2Network) cfg := DefaultL2ELConfig() orch.l2ELOptions.Apply(p, id, cfg) // apply global options @@ -202,8 +203,9 @@ func WithOpReth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra supervisorRPC := "" if useInterop && cfg.SupervisorID != nil { - sup, ok := orch.supervisors.Get(*cfg.SupervisorID) - require.True(ok, "supervisor not found") + supComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(*cfg.SupervisorID).ComponentID) + require.True(ok, "supervisor is required for interop") + sup := supComponent.(Supervisor) supervisorRPC = sup.UserRPC() } @@ -324,6 +326,8 @@ func WithOpReth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra l2EL.Start() p.Cleanup(l2EL.Stop) p.Logger().Info("op-reth is ready", "userRPC", l2EL.userRPC, "authRPC", l2EL.authRPC) - require.True(orch.l2ELs.SetIfMissing(id, l2EL), "must be unique L2 EL node") + cid := stack.ConvertL2ELNodeID(id).ComponentID + require.False(orch.registry.Has(cid), "must be unique L2 EL node") + orch.registry.Register(cid, l2EL) }) } diff --git a/op-devstack/sysgo/l2_el_synctester.go b/op-devstack/sysgo/l2_el_synctester.go index 7da5a13d10fe0..a007a53d4bdf5 100644 --- a/op-devstack/sysgo/l2_el_synctester.go +++ b/op-devstack/sysgo/l2_el_synctester.go @@ -180,8 +180,9 @@ func WithSyncTesterL2ELNode(id, readonlyEL stack.L2ELNodeID, opts ...SyncTesterE p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) require := p.Require() - l2Net, ok := orch.l2Nets.Get(readonlyEL.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(readonlyEL.ChainID())).ComponentID) require.True(ok, "L2 network required") + l2Net := l2NetComponent.(*L2Network) cfg := DefaultSyncTesterELConfig() orch.SyncTesterELOptions.Apply(p, id, cfg) // apply global options @@ -202,6 +203,8 @@ func WithSyncTesterL2ELNode(id, readonlyEL stack.L2ELNodeID, opts ...SyncTesterE syncTesterEL.Start() p.Cleanup(syncTesterEL.Stop) p.Logger().Info("sync tester EL is ready", "userRPC", syncTesterEL.userRPC, "authRPC", syncTesterEL.authRPC) - require.True(orch.l2ELs.SetIfMissing(id, syncTesterEL), "must be unique L2 EL node") + cid := stack.ConvertL2ELNodeID(id).ComponentID + require.False(orch.registry.Has(cid), "must be unique L2 EL node") + orch.registry.Register(cid, syncTesterEL) }) } diff --git a/op-devstack/sysgo/l2_metrics_dashboard.go b/op-devstack/sysgo/l2_metrics_dashboard.go index 645c51f00f891..80e633b9c49a2 100644 --- a/op-devstack/sysgo/l2_metrics_dashboard.go +++ b/op-devstack/sysgo/l2_metrics_dashboard.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/locks" "github.com/ethereum-optimism/optimism/op-service/logpipe" "gopkg.in/yaml.v3" ) @@ -136,7 +135,8 @@ func (g *L2MetricsDashboard) startGrafana() { func WithL2MetricsDashboard() stack.Option[*Orchestrator] { return stack.Finally(func(orch *Orchestrator) { // don't start prometheus or grafana if metrics are disabled or there is nothing exporting metrics. - if !areMetricsEnabled() || orch.l2MetricsEndpoints.Len() == 0 { + metricsLen := orch.l2MetricsEndpoints.Len() + if !areMetricsEnabled() || metricsLen == 0 { return } @@ -144,7 +144,7 @@ func WithL2MetricsDashboard() stack.Option[*Orchestrator] { prometheusImageTag := getEnvVarOrDefault(prometheusDockerImageTagEnvVar, "v3.7.2") prometheusEndpoint := fmt.Sprintf("http://%s:%s", prometheusHost, prometheusServerPort) - promConfig := getPrometheusConfigFilePath(p, &orch.l2MetricsEndpoints) + promConfig := getPrometheusConfigFilePath(p, orch) // these are args to run via docker; see dashboard definition below prometheusArgs := []string{ "run", @@ -215,11 +215,11 @@ type prometheusStaticConfig struct { } // Returns the path to the dynamically-generated prometheus.yml file for metrics scraping. -func getPrometheusConfigFilePath(p devtest.P, metricsEndpoints *locks.RWMap[string, []PrometheusMetricsTarget]) string { +func getPrometheusConfigFilePath(p devtest.P, orch *Orchestrator) string { var scrapeConfigs []prometheusScrapeConfigEntry - metricsEndpoints.Range(func(name string, endpoints []PrometheusMetricsTarget) bool { + orch.l2MetricsEndpoints.Range(func(name string, endpoints []PrometheusMetricsTarget) bool { var targets []string for _, endpoint := range endpoints { targets = append(targets, string(endpoint)) diff --git a/op-devstack/sysgo/l2_network_superchain_registry.go b/op-devstack/sysgo/l2_network_superchain_registry.go index 49ebf16d252e9..fda91a1099f20 100644 --- a/op-devstack/sysgo/l2_network_superchain_registry.go +++ b/op-devstack/sysgo/l2_network_superchain_registry.go @@ -44,8 +44,9 @@ func WithL2NetworkFromSuperchainRegistry(l2NetworkID stack.L2NetworkID, networkN keys: orch.keys, } - require.True(orch.l2Nets.SetIfMissing(l2NetworkID.ChainID(), l2Net), - fmt.Sprintf("must not already exist: %s", l2NetworkID)) + cid := stack.ConvertL2NetworkID(l2NetworkID).ComponentID + require.False(orch.registry.Has(cid), fmt.Sprintf("must not already exist: %s", l2NetworkID)) + orch.registry.Register(cid, l2Net) }) } @@ -68,7 +69,7 @@ func WithEmptyDepSet(l2NetworkID stack.L2NetworkID, networkName string) stack.Op cfgset: depset.FullConfigSetMerged{}, } - orch.clusters.Set(clusterID, cluster) + orch.registry.Register(stack.ConvertClusterID(clusterID).ComponentID, cluster) }), ) } diff --git a/op-devstack/sysgo/l2_proposer.go b/op-devstack/sysgo/l2_proposer.go index fe0e61f0897e2..538dac741a1c4 100644 --- a/op-devstack/sysgo/l2_proposer.go +++ b/op-devstack/sysgo/l2_proposer.go @@ -77,7 +77,8 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l p := orch.P().WithCtx(ctx) require := p.Require() - require.False(orch.proposers.Has(proposerID), "proposer must not already exist") + proposerCID := stack.ConvertL2ProposerID(proposerID).ComponentID + require.False(orch.registry.Has(proposerCID), "proposer must not already exist") if supervisorID != nil && supernodeID != nil { require.Fail("cannot have both supervisorID and supernodeID set for proposer") } @@ -88,11 +89,13 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l logger := p.Logger() logger.Info("Proposer key acquired", "addr", crypto.PubkeyToAddress(proposerSecret.PublicKey)) - l1EL, ok := orch.l1ELs.Get(l1ELID) + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok) + l1EL := l1ELComponent.(L1ELNode) - l2Net, ok := orch.l2Nets.Get(proposerID.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(proposerID.ChainID())).ComponentID) require.True(ok) + l2Net := l2NetComponent.(*L2Network) disputeGameFactoryAddr := l2Net.deployment.DisputeGameFactoryProxyAddr() disputeGameType := 1 // Permissioned game type is the only one currently deployed if orch.wb.outInteropMigration != nil { @@ -127,8 +130,9 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l // If supervisor is available, use it. Otherwise, connect to L2 CL. switch { case supervisorID != nil: - supervisorNode, ok := orch.supervisors.Get(*supervisorID) + supervisorComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(*supervisorID).ComponentID) require.True(ok, "supervisor not found") + supervisorNode := supervisorComponent.(Supervisor) proposerCLIConfig.SupervisorRpcs = []string{supervisorNode.UserRPC()} case supernodeID != nil: supernode, ok := orch.supernodes.Get(*supernodeID) @@ -136,8 +140,9 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l proposerCLIConfig.SuperNodeRpcs = []string{supernode.UserRPC()} default: require.NotNil(l2CLID, "need L2 CL to connect to when no supervisor") - l2CL, ok := orch.l2CLs.Get(*l2CLID) + l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(*l2CLID).ComponentID) require.True(ok, "L2 CL not found") + l2CL := l2CLComponent.(L2CLNode) proposerCLIConfig.RollupRpc = l2CL.UserRPC() } @@ -158,5 +163,5 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l service: proposer, userRPC: proposer.HTTPEndpoint(), } - orch.proposers.Set(proposerID, prop) + orch.registry.Register(stack.ConvertL2ProposerID(proposerID).ComponentID, prop) } diff --git a/op-devstack/sysgo/op_rbuilder.go b/op-devstack/sysgo/op_rbuilder.go index 677567aa473f0..90501f155a462 100644 --- a/op-devstack/sysgo/op_rbuilder.go +++ b/op-devstack/sysgo/op_rbuilder.go @@ -467,8 +467,9 @@ func (b *OPRBuilderNode) Stop() { func WithOPRBuilderNode(id stack.OPRBuilderNodeID, opts ...OPRBuilderNodeOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) - l2Net, ok := orch.l2Nets.Get(id.ChainID()) + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(id.ChainID())).ComponentID) p.Require().True(ok, "l2 network required") + l2Net := l2NetComponent.(*L2Network) tempDir := p.TempDir() data, err := json.Marshal(l2Net.genesis) @@ -492,7 +493,7 @@ func WithOPRBuilderNode(id stack.OPRBuilderNodeID, opts ...OPRBuilderNodeOption) p.Logger().Info("Starting OPRbuilderNode") rb.Start() p.Cleanup(rb.Stop) - orch.oprbuilderNodes.Set(id, rb) + orch.registry.Register(stack.ConvertOPRBuilderNodeID(id).ComponentID, rb) }) } diff --git a/op-devstack/sysgo/orchestrator.go b/op-devstack/sysgo/orchestrator.go index 9243fda3408f7..ed6a6ab4f50ae 100644 --- a/op-devstack/sysgo/orchestrator.go +++ b/op-devstack/sysgo/orchestrator.go @@ -37,22 +37,11 @@ type Orchestrator struct { SyncTesterELOptions SyncTesterELOptionBundle deployerPipelineOptions []DeployerPipelineOption - superchains locks.RWMap[stack.SuperchainID, *Superchain] - clusters locks.RWMap[stack.ClusterID, *Cluster] - l1Nets locks.RWMap[eth.ChainID, *L1Network] - l2Nets locks.RWMap[eth.ChainID, *L2Network] - l1ELs locks.RWMap[stack.L1ELNodeID, L1ELNode] - l1CLs locks.RWMap[stack.L1CLNodeID, *L1CLNode] - l2ELs locks.RWMap[stack.L2ELNodeID, L2ELNode] - l2CLs locks.RWMap[stack.L2CLNodeID, L2CLNode] - supervisors locks.RWMap[stack.SupervisorID, Supervisor] - supernodes locks.RWMap[stack.SupernodeID, *SuperNode] - testSequencers locks.RWMap[stack.TestSequencerID, *TestSequencer] - batchers locks.RWMap[stack.L2BatcherID, *L2Batcher] - challengers locks.RWMap[stack.L2ChallengerID, *L2Challenger] - proposers locks.RWMap[stack.L2ProposerID, *L2Proposer] - rollupBoosts locks.RWMap[stack.RollupBoostNodeID, *RollupBoostNode] - oprbuilderNodes locks.RWMap[stack.OPRBuilderNodeID, *OPRBuilderNode] + // Unified component registry - replaces the 15 separate locks.RWMap fields + registry *stack.Registry + + // supernodes is stored separately because SupernodeID cannot be converted to ComponentID + supernodes locks.RWMap[stack.SupernodeID, *SuperNode] // service name => prometheus endpoints to scrape l2MetricsEndpoints locks.RWMap[string, []PrometheusMetricsTarget] @@ -76,7 +65,8 @@ func (o *Orchestrator) Type() compat.Type { } func (o *Orchestrator) ClusterForL2(chainID eth.ChainID) (*Cluster, bool) { - for _, cluster := range o.clusters.Values() { + clusters := stack.RegistryGetByKind[*Cluster](o.registry, stack.KindCluster) + for _, cluster := range clusters { if cluster.DepSet() != nil && cluster.DepSet().HasChain(chainID) { return cluster, true } @@ -94,33 +84,29 @@ func (o *Orchestrator) EnableTimeTravel() { } } -// GetL2EL attempts to find an L2 EL node by checking various collections of EL-like nodes. -// It returns the L2ELNode interface if found in the standard L2ELs collection, -// or the raw node object if found in other collections (e.g. RollupBoostNode). +// GetL2EL retrieves an L2 EL node by its ID from the registry. +// Supports polymorphic lookup: if the ID was converted from another L2EL-capable type +// (e.g., OPRBuilderNodeID), searches across all L2EL-capable kinds using same key/chainID. func (o *Orchestrator) GetL2EL(id stack.L2ELNodeID) (L2ELNode, bool) { - if el, ok := o.l2ELs.Get(id); ok { - return el, true - } - - // Check RollupBoost - rbID := stack.NewRollupBoostNodeID(id.Key(), id.ChainID()) - if rb, ok := o.rollupBoosts.Get(rbID); ok { - return rb, true - } - - // Check op-rbuilder - oprbID := stack.NewOPRBuilderNodeID(id.Key(), id.ChainID()) - if oprbuilder, ok := o.oprbuilderNodes.Get(oprbID); ok { - return oprbuilder, true + for _, kind := range stack.L2ELCapableKinds() { + cid := stack.NewComponentID(kind, id.Key(), id.ChainID()) + if component, ok := o.registry.Get(cid); ok { + if el, ok := component.(L2ELNode); ok { + return el, true + } + } } - return nil, false } var _ stack.Orchestrator = (*Orchestrator)(nil) func NewOrchestrator(p devtest.P, hook stack.SystemHook) *Orchestrator { - o := &Orchestrator{p: p, sysHook: hook} + o := &Orchestrator{ + p: p, + sysHook: hook, + registry: stack.NewRegistry(), + } o.controlPlane = &ControlPlane{o: o} return o } @@ -148,22 +134,19 @@ func (o *Orchestrator) Hydrate(sys stack.ExtensibleSystem) { ttSys.SetTimeTravelClock(o.timeTravelClock) } } - o.superchains.Range(rangeHydrateFn[stack.SuperchainID, *Superchain](sys)) - o.clusters.Range(rangeHydrateFn[stack.ClusterID, *Cluster](sys)) - o.l1Nets.Range(rangeHydrateFn[eth.ChainID, *L1Network](sys)) - o.l2Nets.Range(rangeHydrateFn[eth.ChainID, *L2Network](sys)) - o.l1ELs.Range(rangeHydrateFn[stack.L1ELNodeID, L1ELNode](sys)) - o.l1CLs.Range(rangeHydrateFn[stack.L1CLNodeID, *L1CLNode](sys)) - o.l2ELs.Range(rangeHydrateFn[stack.L2ELNodeID, L2ELNode](sys)) - o.oprbuilderNodes.Range(rangeHydrateFn[stack.OPRBuilderNodeID, *OPRBuilderNode](sys)) - o.rollupBoosts.Range(rangeHydrateFn[stack.RollupBoostNodeID, *RollupBoostNode](sys)) - o.l2CLs.Range(rangeHydrateFn[stack.L2CLNodeID, L2CLNode](sys)) - o.supervisors.Range(rangeHydrateFn[stack.SupervisorID, Supervisor](sys)) + + // Hydrate all components in the unified registry. + for _, kind := range stack.HydrationComponentKindOrder() { + o.registry.RangeByKind(kind, func(id stack.ComponentID, component any) bool { + if h, ok := component.(hydrator); ok { + h.hydrate(sys) + } + return true + }) + } + o.supernodes.Range(rangeHydrateFn[stack.SupernodeID, *SuperNode](sys)) - o.testSequencers.Range(rangeHydrateFn[stack.TestSequencerID, *TestSequencer](sys)) - o.batchers.Range(rangeHydrateFn[stack.L2BatcherID, *L2Batcher](sys)) - o.challengers.Range(rangeHydrateFn[stack.L2ChallengerID, *L2Challenger](sys)) - o.proposers.Range(rangeHydrateFn[stack.L2ProposerID, *L2Proposer](sys)) + if o.syncTester != nil { o.syncTester.hydrate(sys) } diff --git a/op-devstack/sysgo/rollup_boost.go b/op-devstack/sysgo/rollup_boost.go index bbbee9a1f4c29..12246cadca2b7 100644 --- a/op-devstack/sysgo/rollup_boost.go +++ b/op-devstack/sysgo/rollup_boost.go @@ -186,7 +186,7 @@ func WithRollupBoost(id stack.RollupBoostNodeID, l2ELID stack.L2ELNodeID, opts . cfg := DefaultRollupBoostConfig() RollupBoostOptionBundle(opts).Apply(orch, id, cfg) // Source L2 engine/JWT from the L2 EL object (mandatory) - if l2EL, ok := orch.l2ELs.Get(l2ELID); ok { + if l2EL, ok := orch.GetL2EL(l2ELID); ok { engineRPC := l2EL.EngineRPC() switch { case strings.HasPrefix(engineRPC, "ws://"): @@ -218,7 +218,7 @@ func WithRollupBoost(id stack.RollupBoostNodeID, l2ELID stack.L2ELNodeID, opts . r.Start() p.Cleanup(r.Stop) // Register for hydration - orch.rollupBoosts.Set(id, r) + orch.registry.Register(stack.ConvertRollupBoostNodeID(id).ComponentID, r) }) } @@ -398,10 +398,11 @@ func RollupBoostWithExtraArgs(args ...string) RollupBoostOption { func RollupBoostWithBuilderNode(id stack.OPRBuilderNodeID) RollupBoostOption { return RollupBoostOptionFn(func(orch *Orchestrator, rbID stack.RollupBoostNodeID, cfg *RollupBoostConfig) { - builderNode, ok := orch.oprbuilderNodes.Get(id) + builderComponent, ok := orch.registry.Get(stack.ConvertOPRBuilderNodeID(id).ComponentID) if !ok { orch.P().Require().FailNow("builder node not found") } + builderNode := builderComponent.(*OPRBuilderNode) cfg.BuilderURL = ensureHTTPURL(builderNode.authProxyURL) cfg.BuilderJWTPath = builderNode.cfg.AuthRPCJWTPath cfg.FlashblocksBuilderURL = builderNode.wsProxyURL diff --git a/op-devstack/sysgo/superroot.go b/op-devstack/sysgo/superroot.go index 2ed4708e28537..bd19fdd7f3758 100644 --- a/op-devstack/sysgo/superroot.go +++ b/op-devstack/sysgo/superroot.go @@ -68,22 +68,24 @@ func withSuperRoots(l1ChainID eth.ChainID, l1ELID stack.L1ELNodeID, clIDs []stac require.NotNil(o.wb, "must have a world builder") require.NotEmpty(o.wb.output.ImplementationsDeployment.OpcmImpl, "must have an OPCM implementation") - l1EL, ok := o.l1ELs.Get(l1ELID) + l1ELComponent, ok := o.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "must have L1 EL node") + l1EL := l1ELComponent.(L1ELNode) rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) require.NoError(err) client := ethclient.NewClient(rpcClient) w3Client := w3.NewClient(rpcClient) var superrootTime uint64 - // Supernode does not support super roots at geensis. + // Supernode does not support super roots at genesis. // So let's wait for safe heads to advance before querying atTimestamp. for _, clID := range clIDs { - cl, ok := o.l2CLs.Get(clID) + l2CLComponent, ok := o.registry.Get(stack.ConvertL2CLNodeID(clID).ComponentID) require.True(ok, "must have L2 CL node") + l2CL := l2CLComponent.(L2CLNode) // TODO(#18947): Ideally, we should be able to wait on the supernode's SyncStatus directly // rather than check the sync statuses of all CLs - rollupClient, err := dial.DialRollupClientWithTimeout(t.Ctx(), t.Logger(), cl.UserRPC()) + rollupClient, err := dial.DialRollupClientWithTimeout(t.Ctx(), t.Logger(), l2CL.UserRPC()) t.Require().NoError(err) defer rollupClient.Close() ctx, cancel := context.WithTimeout(t.Ctx(), time.Minute*2) @@ -286,8 +288,9 @@ func deployDelegateCallProxy(t devtest.CommonT, transactOpts *bind.TransactOpts, } func getSuperRoot(t devtest.CommonT, o *Orchestrator, timestamp uint64, supervisorID stack.SupervisorID) eth.Bytes32 { - supervisor, ok := o.supervisors.Get(supervisorID) + supervisorComponent, ok := o.registry.Get(stack.ConvertSupervisorID(supervisorID).ComponentID) t.Require().True(ok, "must have supervisor") + supervisor := supervisorComponent.(Supervisor) client, err := dial.DialSupervisorClientWithTimeout(t.Ctx(), t.Logger(), supervisor.UserRPC()) t.Require().NoError(err) diff --git a/op-devstack/sysgo/supervisor.go b/op-devstack/sysgo/supervisor.go index 8f39d8110e5b2..ca74091f27cbb 100644 --- a/op-devstack/sysgo/supervisor.go +++ b/op-devstack/sysgo/supervisor.go @@ -28,12 +28,14 @@ func WithManagedBySupervisor(l2CLID stack.L2CLNodeID, supervisorID stack.Supervi return stack.AfterDeploy(func(orch *Orchestrator) { require := orch.P().Require() - l2CL, ok := orch.l2CLs.Get(l2CLID) + l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CLID).ComponentID) require.True(ok, "looking for L2 CL node to connect to supervisor") + l2CL := l2CLComponent.(L2CLNode) interopEndpoint, secret := l2CL.InteropRPC() - s, ok := orch.supervisors.Get(supervisorID) + supComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(supervisorID).ComponentID) require.True(ok, "looking for supervisor") + s := supComponent.(Supervisor) ctx := orch.P().Ctx() supClient, err := dial.DialSupervisorClientWithTimeout(ctx, orch.P().Logger(), s.UserRPC(), client.WithLazyDial()) diff --git a/op-devstack/sysgo/supervisor_kona.go b/op-devstack/sysgo/supervisor_kona.go index 926c87b225528..fa9d9387c2d94 100644 --- a/op-devstack/sysgo/supervisor_kona.go +++ b/op-devstack/sysgo/supervisor_kona.go @@ -119,11 +119,13 @@ func WithKonaSupervisor(supervisorID stack.SupervisorID, clusterID stack.Cluster p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), supervisorID)) require := p.Require() - l1EL, ok := orch.l1ELs.Get(l1ELID) + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "need L1 EL node to connect supervisor to") + l1EL := l1ELComponent.(L1ELNode) - cluster, ok := orch.clusters.Get(clusterID) + clusterComponent, ok := orch.registry.Get(stack.ConvertClusterID(clusterID).ComponentID) require.True(ok, "need cluster to determine dependency set") + cluster := clusterComponent.(*Cluster) require.NotNil(cluster.cfgset, "need a full config set") require.NoError(cluster.cfgset.CheckChains(), "config set must be valid") @@ -138,7 +140,9 @@ func WithKonaSupervisor(supervisorID stack.SupervisorID, clusterID stack.Cluster p.Require().NoError(err, os.WriteFile(depsetCfgPath, depsetData, 0o644)) rollupCfgPath := cfgDir + "/rollup-config-*.json" - for _, l2Net := range orch.l2Nets.Values() { + for _, l2NetID := range orch.registry.IDsByKind(stack.KindL2Network) { + l2NetComponent, _ := orch.registry.Get(l2NetID) + l2Net := l2NetComponent.(*L2Network) chainID := l2Net.id.ChainID() rollupData, err := json.Marshal(l2Net.rollupCfg) require.NoError(err, "failed to marshal rollup config") @@ -174,7 +178,7 @@ func WithKonaSupervisor(supervisorID stack.SupervisorID, clusterID stack.Cluster env: envVars, p: p, } - orch.supervisors.Set(supervisorID, konaSupervisor) + orch.registry.Register(stack.ConvertSupervisorID(supervisorID).ComponentID, konaSupervisor) p.Logger().Info("Starting kona-supervisor") konaSupervisor.Start() p.Cleanup(konaSupervisor.Stop) diff --git a/op-devstack/sysgo/supervisor_op.go b/op-devstack/sysgo/supervisor_op.go index fcc3f2015533f..d7b867da89630 100644 --- a/op-devstack/sysgo/supervisor_op.go +++ b/op-devstack/sysgo/supervisor_op.go @@ -104,11 +104,13 @@ func WithOPSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), supervisorID)) require := p.Require() - l1EL, ok := orch.l1ELs.Get(l1ELID) + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "need L1 EL node to connect supervisor to") + l1EL := l1ELComponent.(L1ELNode) - cluster, ok := orch.clusters.Get(clusterID) + clusterComponent, ok := orch.registry.Get(stack.ConvertClusterID(clusterID).ComponentID) require.True(ok, "need cluster to determine dependency set") + cluster := clusterComponent.(*Cluster) require.NotNil(cluster.cfgset, "need a full config set") require.NoError(cluster.cfgset.CheckChains(), "config set must be valid") @@ -151,7 +153,7 @@ func WithOPSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID logger: plog, service: nil, // set on start } - orch.supervisors.Set(supervisorID, supervisorNode) + orch.registry.Register(stack.ConvertSupervisorID(supervisorID).ComponentID, supervisorNode) supervisorNode.Start() orch.p.Cleanup(supervisorNode.Stop) }) diff --git a/op-devstack/sysgo/sync_tester.go b/op-devstack/sysgo/sync_tester.go index ace022fd63938..144f736921f86 100644 --- a/op-devstack/sysgo/sync_tester.go +++ b/op-devstack/sysgo/sync_tester.go @@ -58,7 +58,7 @@ func WithSyncTester(syncTesterID stack.SyncTesterID, l2ELs []stack.L2ELNodeID) s id := sttypes.SyncTesterID(fmt.Sprintf("dev-sync-tester-%s", elID.ChainID())) require.NotContains(syncTesters, id, "one sync tester per chain only") - el, ok := orch.l2ELs.Get(elID) + el, ok := orch.GetL2EL(elID) require.True(ok, "need L2 EL for sync tester", elID) syncTesters[id] = &stconf.SyncTesterEntry{ diff --git a/op-devstack/sysgo/system_synctester_ext.go b/op-devstack/sysgo/system_synctester_ext.go index fe784f65832a0..b27c981cc75b5 100644 --- a/op-devstack/sysgo/system_synctester_ext.go +++ b/op-devstack/sysgo/system_synctester_ext.go @@ -72,7 +72,7 @@ func ExternalELSystemWithEndpointAndSuperchainRegistry(dest *DefaultMinimalExter }, blockTime: 12, } - o.l1Nets.Set(ids.L1.ChainID(), l1Net) + o.registry.Register(stack.ConvertL1NetworkID(ids.L1).ComponentID, l1Net) })) opt.Add(WithExtL1Nodes(ids.L1EL, ids.L1CL, networkPreset.L1ELEndpoint, networkPreset.L1CLBeaconEndpoint)) diff --git a/op-devstack/sysgo/test_sequencer.go b/op-devstack/sysgo/test_sequencer.go index 9937dc8cb568a..ad43eeee5fbef 100644 --- a/op-devstack/sysgo/test_sequencer.go +++ b/op-devstack/sysgo/test_sequencer.go @@ -82,21 +82,24 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN logger := p.Logger() orch.writeDefaultJWT() - l1EL, ok := orch.l1ELs.Get(l1ELID) + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "l1 EL node required") + l1EL := l1ELComponent.(L1ELNode) l1ELClient, err := ethclient.DialContext(p.Ctx(), l1EL.UserRPC()) require.NoError(err) engineCl, err := dialEngine(p.Ctx(), l1EL.AuthRPC(), orch.jwtSecret) require.NoError(err) - l1CL, ok := orch.l1CLs.Get(l1CLID) + l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) require.True(ok, "l1 CL node required") + l1CL := l1CLComponent.(*L1CLNode) - l2EL, ok := orch.l2ELs.Get(l2ELID) + l2EL, ok := orch.GetL2EL(l2ELID) require.True(ok, "l2 EL node required") - l2CL, ok := orch.l2CLs.Get(l2CLID) + l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CLID).ComponentID) require.True(ok, "l2 CL node required") + l2CL := l2CLComponent.(L2CLNode) bid_L2 := seqtypes.BuilderID("test-standard-builder") cid_L2 := seqtypes.CommitterID("test-standard-committer") @@ -115,8 +118,9 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN l2SequencerID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l2CLID.ChainID())) l1SequencerID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l1ELID.ChainID())) - l1Net, ok := orch.l1Nets.Get(l1ELID.ChainID()) + l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) require.True(ok, "l1 net required") + l1Net := l1NetComponent.(*L1Network) v := &config.Ensemble{ Builders: map[seqtypes.BuilderID]*config.BuilderEntry{ @@ -268,6 +272,6 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN }, } logger.Info("Sequencer User RPC", "http_endpoint", testSequencerNode.userRPC) - orch.testSequencers.Set(testSequencerID, testSequencerNode) + orch.registry.Register(stack.ConvertTestSequencerID(testSequencerID).ComponentID, testSequencerNode) }) } From a7369cf1a174aaaac5883f7df10ba79efdd7a1bf Mon Sep 17 00:00:00 2001 From: IamFlux <175354924+0xiamflux@users.noreply.github.com> Date: Wed, 25 Feb 2026 10:01:34 -0600 Subject: [PATCH 025/201] feat: Add L2CM implementation (#19111) * feat: l2cm impl l2contractsmanager (#837) * feat: add initial iteration of L2ContractsManager * feat: add network configuration structs * feat: load full config for L2ContractsManager * feat: implement L2CM::_apply * feat: add gas price oracle * refactor: move L2CM types to library * fix: upgrade ProxyAdmin predeploy * chore: enforce delegatecall for L2CM::upgrade * feat: add conditional upgrade for CGT * refactor: remove non-proxied predeploys * chore: renamed l2cm * refactor: l2cm address comments (#839) * refactor: rename _fullConfig to _loadFullConfig to match OPCM v2 * chore: remove non-proxied weth from implementations struct * test: add config preservation test * test: add CGT specific tests * refactor: avoid casting network config values to address * test: add test cases * chore: pr ready (#844) * chore: remove unnecesary casting on L2CM * feat: add interface for XForkL2ContractsManager * chore: add natspec to XForkL2ContractsManager * chore: pr ready * refactor: moves util functions out of L2CM implementation (#848) * feat: l2cm address comments (#850) * chore: add comment clarifying use `useCustomGasToken` * chore: upgrade both native native asset liquidity and liquidity controller predeploys together * feat: prohibit downgrading predeploy implementations * refactor: make isCustomGasToken part of the network full config * fix: add missing import * fix: use FeeVault legacy getters for backward compat * chore: update name XForkL2ContractsManager to L2ContractsManager * feat: conditionally skip some predeploys based on them being supported in a given chain (#857) * fix: l2cm address comments (#872) * chore: add todo tracking removal of L2ProxyAdmin skips * chore: add natspec comment for isPredeployNamespace * chore: use vm.prank(address,bool) to prank a delegatecall * chore: add todo for dev flags for CrossL2Inbox and L2ToL2CrossDomainMessenger * feat: allow immutables for L2CM in semgrep rules * chore: pr ready * test: L2CM verify testing (#874) * test: add coverage test for predeploy upgrades * chore: update test natspec * chore: just pr ready * chore: L2CM round comments (#877) * refactor: move helper function into Predeploys.s.sol * fix: add conditional deployer to L2CM * chore: update to l1block and l1blockCGT * test: fixes issue where OptimismSuperchainERC20 tests fail due to profile ambiguity * chore: just pr ready * chore: l2cm round comments2 (#883) * fix: move code length check out of isUpgradeable * chore: inline fullCofig_.isCustomGasToken initialization * chore: add public getters for the implementations on the L2CM * chore: remove XForkL2ContractsManager sol rule exclusion * test: add downgrade prevention test suite * chore: just pr ready * refactor: check for address 0 instead code length * Revert "refactor: check for address 0 instead code length" This reverts commit 1fa86946a614f367404af0ede2f814bc990b6000. * chore: remove non-needed check * chore: remove unused function in tests (#884) * refactor: l2cm group impls (#885) * refactor: remove individual getters in favor of a unified one * test: add test for getImplementations * test: add OZ v5 Initializable compatibility in L2ContractsManagerUtils (#887) --- .semgrep/rules/sol-rules.yaml | 1 + .../interfaces/L2/IL2ContractsManager.sol | 33 +- .../snapshots/abi/L2ContractsManager.json | 366 ++++++++ .../snapshots/semver-lock.json | 4 + .../storageLayout/L2ContractsManager.json | 1 + .../src/L2/L2ContractsManager.sol | 433 +++++++++ .../src/libraries/L2ContractsManagerTypes.sol | 100 ++ .../src/libraries/L2ContractsManagerUtils.sol | 143 +++ .../src/libraries/Predeploys.sol | 51 + .../test/L2/L2ContractsManager.t.sol | 886 ++++++++++++++++++ .../test/L2/OptimismSuperchainERC20.t.sol | 5 +- .../libraries/L2ContractsManagerUtils.t.sol | 228 +++++ 12 files changed, 2249 insertions(+), 2 deletions(-) create mode 100644 packages/contracts-bedrock/snapshots/abi/L2ContractsManager.json create mode 100644 packages/contracts-bedrock/snapshots/storageLayout/L2ContractsManager.json create mode 100644 packages/contracts-bedrock/src/L2/L2ContractsManager.sol create mode 100644 packages/contracts-bedrock/src/libraries/L2ContractsManagerTypes.sol create mode 100644 packages/contracts-bedrock/src/libraries/L2ContractsManagerUtils.sol create mode 100644 packages/contracts-bedrock/test/L2/L2ContractsManager.t.sol create mode 100644 packages/contracts-bedrock/test/libraries/L2ContractsManagerUtils.t.sol diff --git a/.semgrep/rules/sol-rules.yaml b/.semgrep/rules/sol-rules.yaml index 3fe1754098d3e..720eb699abf72 100644 --- a/.semgrep/rules/sol-rules.yaml +++ b/.semgrep/rules/sol-rules.yaml @@ -330,6 +330,7 @@ rules: - packages/contracts-bedrock/src/L2/FeeVault.sol - packages/contracts-bedrock/src/L2/OptimismMintableERC721.sol - packages/contracts-bedrock/src/L2/OptimismMintableERC721Factory.sol + - packages/contracts-bedrock/src/L2/L2ContractsManager.sol - packages/contracts-bedrock/src/cannon/MIPS64.sol - packages/contracts-bedrock/src/cannon/PreimageOracle.sol - packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol diff --git a/packages/contracts-bedrock/interfaces/L2/IL2ContractsManager.sol b/packages/contracts-bedrock/interfaces/L2/IL2ContractsManager.sol index 9fbbb6f92541f..99d924db58409 100644 --- a/packages/contracts-bedrock/interfaces/L2/IL2ContractsManager.sol +++ b/packages/contracts-bedrock/interfaces/L2/IL2ContractsManager.sol @@ -1,10 +1,41 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; + +// Libraries +import { L2ContractsManagerTypes } from "src/libraries/L2ContractsManagerTypes.sol"; + /// @title IL2ContractsManager /// @notice Interface for the L2ContractsManager contract. -interface IL2ContractsManager { +interface IL2ContractsManager is ISemver { + /// @notice Thrown when the upgrade function is called outside of a DELEGATECALL context. + error L2ContractsManager_OnlyDelegatecall(); + + /// @notice Thrown when a user attempts to downgrade a contract. + /// @param _target The address of the contract that was attempted to be downgraded. + error L2ContractsManager_DowngradeNotAllowed(address _target); + + /// @notice Error thrown when a semver string has less than 3 parts. + error SemverComp_InvalidSemverParts(); + + /// @notice Thrown when a contract is in the process of being initialized during an upgrade. + error L2ContractsManager_InitializingDuringUpgrade(); + /// @notice Executes the upgrade for all predeploys. /// @dev This function MUST be called via DELEGATECALL from the L2ProxyAdmin. function upgrade() external; + + /// @notice Returns the implementation addresses for each predeploy upgraded by the L2ContractsManager. + /// @return implementations_ The implementation addresses for each predeploy upgraded by the L2ContractsManager. + function getImplementations() + external + view + returns (L2ContractsManagerTypes.Implementations memory implementations_); + + /// @notice Constructor for the L2ContractsManager contract. + /// @param _implementations The implementation struct containing the new implementation addresses for the L2 + /// predeploys. + function __constructor__(L2ContractsManagerTypes.Implementations memory _implementations) external; } diff --git a/packages/contracts-bedrock/snapshots/abi/L2ContractsManager.json b/packages/contracts-bedrock/snapshots/abi/L2ContractsManager.json new file mode 100644 index 0000000000000..1c9486d68af85 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/L2ContractsManager.json @@ -0,0 +1,366 @@ +[ + { + "inputs": [ + { + "components": [ + { + "internalType": "address", + "name": "storageSetterImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "gasPriceOracleImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2StandardBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "sequencerFeeWalletImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2ERC721BridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1BlockImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1BlockCGTImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2ToL1MessagePasserImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2ToL1MessagePasserCGTImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC721FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "proxyAdminImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "baseFeeVaultImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1FeeVaultImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "operatorFeeVaultImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "schemaRegistryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "easImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "crossL2InboxImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2ToL2CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "superchainETHBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "ethLiquidityImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismSuperchainERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismSuperchainERC20BeaconImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "superchainTokenBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "nativeAssetLiquidityImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "liquidityControllerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "feeSplitterImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "conditionalDeployerImpl", + "type": "address" + } + ], + "internalType": "struct L2ContractsManagerTypes.Implementations", + "name": "_implementations", + "type": "tuple" + } + ], + "stateMutability": "nonpayable", + "type": "constructor" + }, + { + "inputs": [], + "name": "getImplementations", + "outputs": [ + { + "components": [ + { + "internalType": "address", + "name": "storageSetterImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "gasPriceOracleImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2StandardBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "sequencerFeeWalletImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2ERC721BridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1BlockImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1BlockCGTImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2ToL1MessagePasserImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2ToL1MessagePasserCGTImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismMintableERC721FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "proxyAdminImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "baseFeeVaultImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l1FeeVaultImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "operatorFeeVaultImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "schemaRegistryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "easImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "crossL2InboxImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "l2ToL2CrossDomainMessengerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "superchainETHBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "ethLiquidityImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismSuperchainERC20FactoryImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "optimismSuperchainERC20BeaconImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "superchainTokenBridgeImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "nativeAssetLiquidityImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "liquidityControllerImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "feeSplitterImpl", + "type": "address" + }, + { + "internalType": "address", + "name": "conditionalDeployerImpl", + "type": "address" + } + ], + "internalType": "struct L2ContractsManagerTypes.Implementations", + "name": "implementations_", + "type": "tuple" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [], + "name": "upgrade", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, + { + "inputs": [], + "name": "version", + "outputs": [ + { + "internalType": "string", + "name": "", + "type": "string" + } + ], + "stateMutability": "view", + "type": "function" + }, + { + "inputs": [ + { + "internalType": "address", + "name": "_target", + "type": "address" + } + ], + "name": "L2ContractsManager_DowngradeNotAllowed", + "type": "error" + }, + { + "inputs": [], + "name": "L2ContractsManager_InitializingDuringUpgrade", + "type": "error" + }, + { + "inputs": [], + "name": "L2ContractsManager_OnlyDelegatecall", + "type": "error" + }, + { + "inputs": [], + "name": "SemverComp_InvalidSemverParts", + "type": "error" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index fc70d58213363..13f1d6e8ba0f5 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -95,6 +95,10 @@ "initCodeHash": "0x6efb9055142e90b408c6312074243769df0d365f6f984e226e0320bec55a45b8", "sourceCodeHash": "0x7e438cbbe9a8248887b8c21f68c811f90a5cae4902cbbf7b0a1f6cd644dc42d9" }, + "src/L2/L2ContractsManager.sol:L2ContractsManager": { + "initCodeHash": "0xc6953fefa5142a37061fc6e96d0ec251a8ff8bcc2d09e8fdeb023e8677ff17c7", + "sourceCodeHash": "0xa4fba8f6dd5f7e1cfcba63ca8b9d0fbe621d1fe33aeb6147a185045fcded7c14" + }, "src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger": { "initCodeHash": "0xe160be403df12709c371c33195d1b9c3b5e9499e902e86bdabc8eed749c3fd61", "sourceCodeHash": "0x12ea125038b87e259a0d203e119faa6e9726ab2bdbc30430f820ccd48fe87e14" diff --git a/packages/contracts-bedrock/snapshots/storageLayout/L2ContractsManager.json b/packages/contracts-bedrock/snapshots/storageLayout/L2ContractsManager.json new file mode 100644 index 0000000000000..0637a088a01e8 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/L2ContractsManager.json @@ -0,0 +1 @@ +[] \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L2/L2ContractsManager.sol b/packages/contracts-bedrock/src/L2/L2ContractsManager.sol new file mode 100644 index 0000000000000..c4f8b3c0ffe14 --- /dev/null +++ b/packages/contracts-bedrock/src/L2/L2ContractsManager.sol @@ -0,0 +1,433 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; +import { IERC721Bridge } from "interfaces/universal/IERC721Bridge.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IFeeVault } from "interfaces/L2/IFeeVault.sol"; +import { ILiquidityController } from "interfaces/L2/ILiquidityController.sol"; +import { IFeeSplitter } from "interfaces/L2/IFeeSplitter.sol"; +import { ISharesCalculator } from "interfaces/L2/ISharesCalculator.sol"; +import { IL2CrossDomainMessenger } from "interfaces/L2/IL2CrossDomainMessenger.sol"; +import { IL2StandardBridge } from "interfaces/L2/IL2StandardBridge.sol"; +import { IL2ERC721Bridge } from "interfaces/L2/IL2ERC721Bridge.sol"; +import { IL1Block } from "interfaces/L2/IL1Block.sol"; + +// Libraries +import { Predeploys } from "src/libraries/Predeploys.sol"; +import { L2ContractsManagerTypes } from "src/libraries/L2ContractsManagerTypes.sol"; +import { L2ContractsManagerUtils } from "src/libraries/L2ContractsManagerUtils.sol"; + +/// @title L2ContractsManager +/// @notice Manages the upgrade of the L2 predeploys. +contract L2ContractsManager is ISemver { + /// @notice Thrown when the upgrade function is called outside of a DELEGATECALL context. + error L2ContractsManager_OnlyDelegatecall(); + + /// @notice The semantic version of the L2ContractsManager contract. + /// @custom:semver 1.0.0 + string public constant version = "1.0.0"; + + /// @notice The address of this contract. Used to enforce that the upgrade function is only + /// called via DELEGATECALL. + address internal immutable THIS_L2CM; + + /// @notice Storage slot for OpenZeppelin v4 Initializable contracts. + bytes32 internal constant INITIALIZABLE_SLOT_OZ_V4 = bytes32(0); + + /// @notice Storage slot for OpenZeppelin v5 Initializable contracts. + /// @dev Equal to keccak256(abi.encode(uint256(keccak256("openzeppelin.storage.Initializable")) - 1)) & + /// ~bytes32(uint256(0xff)) + bytes32 internal constant INITIALIZABLE_SLOT_OZ_V5 = + 0xf0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00; + + /// @notice The implementation address of the StorageSetter contract. + address internal immutable STORAGE_SETTER_IMPL; + + /// @notice Each of the implementation addresses for each predeploy that exists in this upgrade. + /// @notice GasPriceOracle implementation. + address internal immutable GAS_PRICE_ORACLE_IMPL; + /// @notice L2CrossDomainMessenger implementation. + address internal immutable L2_CROSS_DOMAIN_MESSENGER_IMPL; + /// @notice L2StandardBridge implementation. + address internal immutable L2_STANDARD_BRIDGE_IMPL; + /// @notice SequencerFeeWallet implementation. + address internal immutable SEQUENCER_FEE_WALLET_IMPL; + /// @notice OptimismMintableERC20Factory implementation. + address internal immutable OPTIMISM_MINTABLE_ERC20_FACTORY_IMPL; + /// @notice L2ERC721Bridge implementation. + address internal immutable L2_ERC721_BRIDGE_IMPL; + /// @notice L1Block implementation. + address internal immutable L1_BLOCK_IMPL; + /// @notice L1Block implementation for custom gas token networks. + address internal immutable L1_BLOCK_CGT_IMPL; + /// @notice L2ToL1MessagePasser implementation. + address internal immutable L2_TO_L1_MESSAGE_PASSER_IMPL; + /// @notice L2ToL1MessagePasser implementation for custom gas token networks. + address internal immutable L2_TO_L1_MESSAGE_PASSER_CGT_IMPL; + /// @notice OptimismMintableERC721Factory implementation. + address internal immutable OPTIMISM_MINTABLE_ERC721_FACTORY_IMPL; + /// @notice ProxyAdmin implementation. + address internal immutable PROXY_ADMIN_IMPL; + /// @notice BaseFeeVault implementation. + address internal immutable BASE_FEE_VAULT_IMPL; + /// @notice L1FeeVault implementation. + address internal immutable L1_FEE_VAULT_IMPL; + /// @notice OperatorFeeVault implementation. + address internal immutable OPERATOR_FEE_VAULT_IMPL; + /// @notice SchemaRegistry implementation. + address internal immutable SCHEMA_REGISTRY_IMPL; + /// @notice EAS implementation. + address internal immutable EAS_IMPL; + /// @notice CrossL2Inbox implementation. + address internal immutable CROSS_L2_INBOX_IMPL; + /// @notice L2ToL2CrossDomainMessenger implementation. + address internal immutable L2_TO_L2_CROSS_DOMAIN_MESSENGER_IMPL; + /// @notice SuperchainETHBridge implementation. + address internal immutable SUPERCHAIN_ETH_BRIDGE_IMPL; + /// @notice ETHLiquidity implementation. + address internal immutable ETH_LIQUIDITY_IMPL; + /// @notice OptimismSuperchainERC20Factory implementation. + address internal immutable OPTIMISM_SUPERCHAIN_ERC20_FACTORY_IMPL; + /// @notice OptimismSuperchainERC20Beacon implementation. + address internal immutable OPTIMISM_SUPERCHAIN_ERC20_BEACON_IMPL; + /// @notice SuperchainTokenBridge implementation. + address internal immutable SUPERCHAIN_TOKEN_BRIDGE_IMPL; + /// @notice NativeAssetLiquidity implementation. + address internal immutable NATIVE_ASSET_LIQUIDITY_IMPL; + /// @notice LiquidityController implementation. + address internal immutable LIQUIDITY_CONTROLLER_IMPL; + /// @notice FeeSplitter implementation. + address internal immutable FEE_SPLITTER_IMPL; + /// @notice CONDITIONAL_DEPLOYER implementation. + address internal immutable CONDITIONAL_DEPLOYER_IMPL; + + /// @notice Constructor for the L2ContractsManager contract. + /// @param _implementations The implementation struct containing the new implementation addresses for the L2 + /// predeploys. + constructor(L2ContractsManagerTypes.Implementations memory _implementations) { + // Store the address of this contract for DELEGATECALL enforcement. + THIS_L2CM = address(this); + + // Utility address for upgrading initializable contracts. + STORAGE_SETTER_IMPL = _implementations.storageSetterImpl; + // Predeploy implementations. + L2_CROSS_DOMAIN_MESSENGER_IMPL = _implementations.l2CrossDomainMessengerImpl; + GAS_PRICE_ORACLE_IMPL = _implementations.gasPriceOracleImpl; + L2_STANDARD_BRIDGE_IMPL = _implementations.l2StandardBridgeImpl; + SEQUENCER_FEE_WALLET_IMPL = _implementations.sequencerFeeWalletImpl; + OPTIMISM_MINTABLE_ERC20_FACTORY_IMPL = _implementations.optimismMintableERC20FactoryImpl; + L2_ERC721_BRIDGE_IMPL = _implementations.l2ERC721BridgeImpl; + L1_BLOCK_IMPL = _implementations.l1BlockImpl; + L1_BLOCK_CGT_IMPL = _implementations.l1BlockCGTImpl; + L2_TO_L1_MESSAGE_PASSER_IMPL = _implementations.l2ToL1MessagePasserImpl; + L2_TO_L1_MESSAGE_PASSER_CGT_IMPL = _implementations.l2ToL1MessagePasserCGTImpl; + OPTIMISM_MINTABLE_ERC721_FACTORY_IMPL = _implementations.optimismMintableERC721FactoryImpl; + PROXY_ADMIN_IMPL = _implementations.proxyAdminImpl; + BASE_FEE_VAULT_IMPL = _implementations.baseFeeVaultImpl; + L1_FEE_VAULT_IMPL = _implementations.l1FeeVaultImpl; + OPERATOR_FEE_VAULT_IMPL = _implementations.operatorFeeVaultImpl; + SCHEMA_REGISTRY_IMPL = _implementations.schemaRegistryImpl; + EAS_IMPL = _implementations.easImpl; + // TODO(#18838): Add dev flagging for CrossL2Inbox and L2ToL2CrossDomainMessenger once DevFeatures is + // implemented for L2. + CROSS_L2_INBOX_IMPL = _implementations.crossL2InboxImpl; + L2_TO_L2_CROSS_DOMAIN_MESSENGER_IMPL = _implementations.l2ToL2CrossDomainMessengerImpl; + SUPERCHAIN_ETH_BRIDGE_IMPL = _implementations.superchainETHBridgeImpl; + ETH_LIQUIDITY_IMPL = _implementations.ethLiquidityImpl; + OPTIMISM_SUPERCHAIN_ERC20_FACTORY_IMPL = _implementations.optimismSuperchainERC20FactoryImpl; + OPTIMISM_SUPERCHAIN_ERC20_BEACON_IMPL = _implementations.optimismSuperchainERC20BeaconImpl; + SUPERCHAIN_TOKEN_BRIDGE_IMPL = _implementations.superchainTokenBridgeImpl; + NATIVE_ASSET_LIQUIDITY_IMPL = _implementations.nativeAssetLiquidityImpl; + LIQUIDITY_CONTROLLER_IMPL = _implementations.liquidityControllerImpl; + FEE_SPLITTER_IMPL = _implementations.feeSplitterImpl; + CONDITIONAL_DEPLOYER_IMPL = _implementations.conditionalDeployerImpl; + } + + /// @notice Executes the upgrade for all predeploys. + /// @dev This function MUST be called via DELEGATECALL from the L2ProxyAdmin. + function upgrade() external { + if (address(this) == THIS_L2CM) revert L2ContractsManager_OnlyDelegatecall(); + + L2ContractsManagerTypes.FullConfig memory fullConfig = _loadFullConfig(); + _apply(fullConfig); + } + + /// @notice Loads the full configuration for the L2 Predeploys. + /// @return fullConfig_ The full configuration. + function _loadFullConfig() internal view returns (L2ContractsManagerTypes.FullConfig memory fullConfig_) { + // Note: Currently, this is the only way to determine if the network is a custom gas token network. + // We need our upgrades be able to determine if the network is a custom gas token network so that we can + // apply the appropriate configuration to the LiquidityController predeploy. In networks without custom gas + // tokens, the LiquidityController predeploy is not used and points to address(0). + fullConfig_.isCustomGasToken = IL1Block(Predeploys.L1_BLOCK_ATTRIBUTES).isCustomGasToken(); + + // L2CrossDomainMessenger + fullConfig_.crossDomainMessenger = L2ContractsManagerTypes.CrossDomainMessengerConfig({ + otherMessenger: ICrossDomainMessenger(Predeploys.L2_CROSS_DOMAIN_MESSENGER).otherMessenger() + }); + + // L2StandardBridge + fullConfig_.standardBridge = L2ContractsManagerTypes.StandardBridgeConfig({ + otherBridge: IStandardBridge(payable(Predeploys.L2_STANDARD_BRIDGE)).otherBridge() + }); + + // L2ERC721Bridge + fullConfig_.erc721Bridge = L2ContractsManagerTypes.ERC721BridgeConfig({ + otherBridge: IERC721Bridge(Predeploys.L2_ERC721_BRIDGE).otherBridge() + }); + + // OptimismMintableERC20Factory + fullConfig_.mintableERC20Factory = L2ContractsManagerTypes.MintableERC20FactoryConfig({ + bridge: IOptimismMintableERC20Factory(Predeploys.OPTIMISM_MINTABLE_ERC20_FACTORY).bridge() + }); + + // SequencerFeeVault + fullConfig_.sequencerFeeVault = L2ContractsManagerUtils.readFeeVaultConfig(Predeploys.SEQUENCER_FEE_WALLET); + + // BaseFeeVault + fullConfig_.baseFeeVault = L2ContractsManagerUtils.readFeeVaultConfig(Predeploys.BASE_FEE_VAULT); + + // L1FeeVault + fullConfig_.l1FeeVault = L2ContractsManagerUtils.readFeeVaultConfig(Predeploys.L1_FEE_VAULT); + + // OperatorFeeVault + fullConfig_.operatorFeeVault = L2ContractsManagerUtils.readFeeVaultConfig(Predeploys.OPERATOR_FEE_VAULT); + + // LiquidityController + if (fullConfig_.isCustomGasToken) { + ILiquidityController liquidityController = ILiquidityController(Predeploys.LIQUIDITY_CONTROLLER); + fullConfig_.liquidityController = L2ContractsManagerTypes.LiquidityControllerConfig({ + owner: liquidityController.owner(), + gasPayingTokenName: liquidityController.gasPayingTokenName(), + gasPayingTokenSymbol: liquidityController.gasPayingTokenSymbol() + }); + } + + // FeeSplitter + fullConfig_.feeSplitter = L2ContractsManagerTypes.FeeSplitterConfig({ + sharesCalculator: IFeeSplitter(payable(Predeploys.FEE_SPLITTER)).sharesCalculator() + }); + } + + /// @notice Upgrades each of the predeploys to its corresponding new implementation. Applies the appropriate + /// configuration to each predeploy. + /// @param _config The full configuration for the L2 Predeploys. + function _apply(L2ContractsManagerTypes.FullConfig memory _config) internal { + // Initializable predeploys. + + // L2CrossDomainMessenger + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.L2_CROSS_DOMAIN_MESSENGER, + L2_CROSS_DOMAIN_MESSENGER_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall(IL2CrossDomainMessenger.initialize, (_config.crossDomainMessenger.otherMessenger)), + INITIALIZABLE_SLOT_OZ_V4, + 20 // Account for CrossDomainMessengerLegacySpacer0 + ); + + // L2StandardBridge + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.L2_STANDARD_BRIDGE, + L2_STANDARD_BRIDGE_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall(IL2StandardBridge.initialize, (_config.standardBridge.otherBridge)), + INITIALIZABLE_SLOT_OZ_V4, + 0 + ); + + // L2ERC721Bridge + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.L2_ERC721_BRIDGE, + L2_ERC721_BRIDGE_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall(IL2ERC721Bridge.initialize, payable(address(_config.erc721Bridge.otherBridge))), + INITIALIZABLE_SLOT_OZ_V4, + 0 + ); + + // OptimismMintableERC20Factory + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.OPTIMISM_MINTABLE_ERC20_FACTORY, + OPTIMISM_MINTABLE_ERC20_FACTORY_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall(IOptimismMintableERC20Factory.initialize, (_config.mintableERC20Factory.bridge)), + INITIALIZABLE_SLOT_OZ_V4, + 0 + ); + + // LiquidityController (only on custom gas token networks) + if (_config.isCustomGasToken) { + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.LIQUIDITY_CONTROLLER, + LIQUIDITY_CONTROLLER_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall( + ILiquidityController.initialize, + ( + _config.liquidityController.owner, + _config.liquidityController.gasPayingTokenName, + _config.liquidityController.gasPayingTokenSymbol + ) + ), + INITIALIZABLE_SLOT_OZ_V4, + 0 + ); + + // NativeAssetLiquidity + L2ContractsManagerUtils.upgradeTo(Predeploys.NATIVE_ASSET_LIQUIDITY, NATIVE_ASSET_LIQUIDITY_IMPL); + } + + // FeeSplitter + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.FEE_SPLITTER, + FEE_SPLITTER_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall(IFeeSplitter.initialize, (ISharesCalculator(_config.feeSplitter.sharesCalculator))), + INITIALIZABLE_SLOT_OZ_V4, + 0 + ); + + // SequencerFeeVault + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.SEQUENCER_FEE_WALLET, + SEQUENCER_FEE_WALLET_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall( + IFeeVault.initialize, + ( + _config.sequencerFeeVault.recipient, + _config.sequencerFeeVault.minWithdrawalAmount, + _config.sequencerFeeVault.withdrawalNetwork + ) + ), + INITIALIZABLE_SLOT_OZ_V5, + 0 + ); + + // BaseFeeVault + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.BASE_FEE_VAULT, + BASE_FEE_VAULT_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall( + IFeeVault.initialize, + ( + _config.baseFeeVault.recipient, + _config.baseFeeVault.minWithdrawalAmount, + _config.baseFeeVault.withdrawalNetwork + ) + ), + INITIALIZABLE_SLOT_OZ_V5, + 0 + ); + + // L1FeeVault + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.L1_FEE_VAULT, + L1_FEE_VAULT_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall( + IFeeVault.initialize, + ( + _config.l1FeeVault.recipient, + _config.l1FeeVault.minWithdrawalAmount, + _config.l1FeeVault.withdrawalNetwork + ) + ), + INITIALIZABLE_SLOT_OZ_V5, + 0 + ); + + // OperatorFeeVault + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.OPERATOR_FEE_VAULT, + OPERATOR_FEE_VAULT_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall( + IFeeVault.initialize, + ( + _config.operatorFeeVault.recipient, + _config.operatorFeeVault.minWithdrawalAmount, + _config.operatorFeeVault.withdrawalNetwork + ) + ), + INITIALIZABLE_SLOT_OZ_V5, + 0 + ); + + // Non-initializable predeploys. + L2ContractsManagerUtils.upgradeTo(Predeploys.GAS_PRICE_ORACLE, GAS_PRICE_ORACLE_IMPL); + // L1BlockAttributes and L2ToL1MessagePasser have different implementations for custom gas token networks. + L2ContractsManagerUtils.upgradeTo( + Predeploys.L1_BLOCK_ATTRIBUTES, _config.isCustomGasToken ? L1_BLOCK_CGT_IMPL : L1_BLOCK_IMPL + ); + L2ContractsManagerUtils.upgradeTo( + Predeploys.L2_TO_L1_MESSAGE_PASSER, + _config.isCustomGasToken ? L2_TO_L1_MESSAGE_PASSER_CGT_IMPL : L2_TO_L1_MESSAGE_PASSER_IMPL + ); + L2ContractsManagerUtils.upgradeTo( + Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY, OPTIMISM_MINTABLE_ERC721_FACTORY_IMPL + ); + L2ContractsManagerUtils.upgradeTo(Predeploys.PROXY_ADMIN, PROXY_ADMIN_IMPL); + // TODO(#18838): Add dev flagging for CrossL2Inbox and L2ToL2CrossDomainMessenger once DevFeatures is + // implemented for L2. + L2ContractsManagerUtils.upgradeTo(Predeploys.CROSS_L2_INBOX, CROSS_L2_INBOX_IMPL); + L2ContractsManagerUtils.upgradeTo( + Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER, L2_TO_L2_CROSS_DOMAIN_MESSENGER_IMPL + ); + L2ContractsManagerUtils.upgradeTo(Predeploys.SUPERCHAIN_ETH_BRIDGE, SUPERCHAIN_ETH_BRIDGE_IMPL); + L2ContractsManagerUtils.upgradeTo(Predeploys.ETH_LIQUIDITY, ETH_LIQUIDITY_IMPL); + L2ContractsManagerUtils.upgradeTo( + Predeploys.OPTIMISM_SUPERCHAIN_ERC20_FACTORY, OPTIMISM_SUPERCHAIN_ERC20_FACTORY_IMPL + ); + L2ContractsManagerUtils.upgradeTo( + Predeploys.OPTIMISM_SUPERCHAIN_ERC20_BEACON, OPTIMISM_SUPERCHAIN_ERC20_BEACON_IMPL + ); + L2ContractsManagerUtils.upgradeTo(Predeploys.SUPERCHAIN_TOKEN_BRIDGE, SUPERCHAIN_TOKEN_BRIDGE_IMPL); + L2ContractsManagerUtils.upgradeTo(Predeploys.SCHEMA_REGISTRY, SCHEMA_REGISTRY_IMPL); + L2ContractsManagerUtils.upgradeTo(Predeploys.EAS, EAS_IMPL); + L2ContractsManagerUtils.upgradeTo(Predeploys.CONDITIONAL_DEPLOYER, CONDITIONAL_DEPLOYER_IMPL); + } + + /// @notice Returns the implementation addresses for each predeploy upgraded by the L2ContractsManager. + /// @return implementations_ The implementation addresses for each predeploy upgraded by the L2ContractsManager. + function getImplementations() + external + view + returns (L2ContractsManagerTypes.Implementations memory implementations_) + { + implementations_.storageSetterImpl = STORAGE_SETTER_IMPL; + implementations_.l2CrossDomainMessengerImpl = L2_CROSS_DOMAIN_MESSENGER_IMPL; + implementations_.gasPriceOracleImpl = GAS_PRICE_ORACLE_IMPL; + implementations_.l2StandardBridgeImpl = L2_STANDARD_BRIDGE_IMPL; + implementations_.sequencerFeeWalletImpl = SEQUENCER_FEE_WALLET_IMPL; + implementations_.optimismMintableERC20FactoryImpl = OPTIMISM_MINTABLE_ERC20_FACTORY_IMPL; + implementations_.l2ERC721BridgeImpl = L2_ERC721_BRIDGE_IMPL; + implementations_.l1BlockImpl = L1_BLOCK_IMPL; + implementations_.l1BlockCGTImpl = L1_BLOCK_CGT_IMPL; + implementations_.l2ToL1MessagePasserImpl = L2_TO_L1_MESSAGE_PASSER_IMPL; + implementations_.l2ToL1MessagePasserCGTImpl = L2_TO_L1_MESSAGE_PASSER_CGT_IMPL; + implementations_.optimismMintableERC721FactoryImpl = OPTIMISM_MINTABLE_ERC721_FACTORY_IMPL; + implementations_.proxyAdminImpl = PROXY_ADMIN_IMPL; + implementations_.baseFeeVaultImpl = BASE_FEE_VAULT_IMPL; + implementations_.l1FeeVaultImpl = L1_FEE_VAULT_IMPL; + implementations_.operatorFeeVaultImpl = OPERATOR_FEE_VAULT_IMPL; + implementations_.schemaRegistryImpl = SCHEMA_REGISTRY_IMPL; + implementations_.easImpl = EAS_IMPL; + implementations_.crossL2InboxImpl = CROSS_L2_INBOX_IMPL; + implementations_.l2ToL2CrossDomainMessengerImpl = L2_TO_L2_CROSS_DOMAIN_MESSENGER_IMPL; + implementations_.superchainETHBridgeImpl = SUPERCHAIN_ETH_BRIDGE_IMPL; + implementations_.ethLiquidityImpl = ETH_LIQUIDITY_IMPL; + implementations_.optimismSuperchainERC20FactoryImpl = OPTIMISM_SUPERCHAIN_ERC20_FACTORY_IMPL; + implementations_.optimismSuperchainERC20BeaconImpl = OPTIMISM_SUPERCHAIN_ERC20_BEACON_IMPL; + implementations_.superchainTokenBridgeImpl = SUPERCHAIN_TOKEN_BRIDGE_IMPL; + implementations_.nativeAssetLiquidityImpl = NATIVE_ASSET_LIQUIDITY_IMPL; + implementations_.liquidityControllerImpl = LIQUIDITY_CONTROLLER_IMPL; + implementations_.feeSplitterImpl = FEE_SPLITTER_IMPL; + implementations_.conditionalDeployerImpl = CONDITIONAL_DEPLOYER_IMPL; + } +} diff --git a/packages/contracts-bedrock/src/libraries/L2ContractsManagerTypes.sol b/packages/contracts-bedrock/src/libraries/L2ContractsManagerTypes.sol new file mode 100644 index 0000000000000..21ff9181fcfab --- /dev/null +++ b/packages/contracts-bedrock/src/libraries/L2ContractsManagerTypes.sol @@ -0,0 +1,100 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { Types } from "src/libraries/Types.sol"; +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; +import { IERC721Bridge } from "interfaces/universal/IERC721Bridge.sol"; +import { ISharesCalculator } from "interfaces/L2/ISharesCalculator.sol"; + +/// @title L2ContractsManagerTypes +/// @notice Type definitions for L2ContractsManager upgrade operations. +library L2ContractsManagerTypes { + /// @notice Configuration for L2CrossDomainMessenger. + struct CrossDomainMessengerConfig { + ICrossDomainMessenger otherMessenger; + } + + /// @notice Configuration for L2StandardBridge. + struct StandardBridgeConfig { + IStandardBridge otherBridge; + } + + /// @notice Configuration for L2ERC721Bridge. + struct ERC721BridgeConfig { + IERC721Bridge otherBridge; + } + + /// @notice Configuration for OptimismMintableERC20Factory. + struct MintableERC20FactoryConfig { + address bridge; + } + + /// @notice Configuration for a FeeVault contract. + struct FeeVaultConfig { + address recipient; + uint256 minWithdrawalAmount; + Types.WithdrawalNetwork withdrawalNetwork; + } + + /// @notice Configuration for LiquidityController. + struct LiquidityControllerConfig { + address owner; + string gasPayingTokenName; + string gasPayingTokenSymbol; + } + + /// @notice Configuration for FeeSplitter. + struct FeeSplitterConfig { + ISharesCalculator sharesCalculator; + } + + /// @notice Full network-specific configuration gathered from existing predeploys. + /// These values are read before upgrade and passed to initializers after. + struct FullConfig { + CrossDomainMessengerConfig crossDomainMessenger; + StandardBridgeConfig standardBridge; + ERC721BridgeConfig erc721Bridge; + MintableERC20FactoryConfig mintableERC20Factory; + FeeVaultConfig sequencerFeeVault; + FeeVaultConfig baseFeeVault; + FeeVaultConfig l1FeeVault; + FeeVaultConfig operatorFeeVault; + LiquidityControllerConfig liquidityController; + FeeSplitterConfig feeSplitter; + bool isCustomGasToken; + } + + /// @notice The current implementation addresses for the L2 predeploys. + struct Implementations { + address storageSetterImpl; + address l2CrossDomainMessengerImpl; + address gasPriceOracleImpl; + address l2StandardBridgeImpl; + address sequencerFeeWalletImpl; + address optimismMintableERC20FactoryImpl; + address l2ERC721BridgeImpl; + address l1BlockImpl; + address l1BlockCGTImpl; + address l2ToL1MessagePasserImpl; + address l2ToL1MessagePasserCGTImpl; + address optimismMintableERC721FactoryImpl; + address proxyAdminImpl; + address baseFeeVaultImpl; + address l1FeeVaultImpl; + address operatorFeeVaultImpl; + address schemaRegistryImpl; + address easImpl; + address crossL2InboxImpl; + address l2ToL2CrossDomainMessengerImpl; + address superchainETHBridgeImpl; + address ethLiquidityImpl; + address optimismSuperchainERC20FactoryImpl; + address optimismSuperchainERC20BeaconImpl; + address superchainTokenBridgeImpl; + address nativeAssetLiquidityImpl; + address liquidityControllerImpl; + address feeSplitterImpl; + address conditionalDeployerImpl; + } +} diff --git a/packages/contracts-bedrock/src/libraries/L2ContractsManagerUtils.sol b/packages/contracts-bedrock/src/libraries/L2ContractsManagerUtils.sol new file mode 100644 index 0000000000000..0bc6fe59d5dbe --- /dev/null +++ b/packages/contracts-bedrock/src/libraries/L2ContractsManagerUtils.sol @@ -0,0 +1,143 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +// Libraries +import { L2ContractsManagerTypes } from "src/libraries/L2ContractsManagerTypes.sol"; +import { SemverComp } from "src/libraries/SemverComp.sol"; +import { Predeploys } from "src/libraries/Predeploys.sol"; + +// Contracts +import { L2ProxyAdmin } from "src/L2/L2ProxyAdmin.sol"; + +// Interfaces +import { IStorageSetter } from "interfaces/universal/IStorageSetter.sol"; +import { IFeeVault } from "interfaces/L2/IFeeVault.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; + +/// @title L2ContractsManagerUtils +/// @notice L2ContractsManagerUtils is a library that provides utility functions for the L2ContractsManager system. +/// @dev Upgrade functions silently skip predeploys that are not upgradeable (i.e., not deployed on the chain). +/// This is intentional to support chains where certain predeploys are conditionally deployed, such as +/// CrossL2Inbox on non-interop chains or LiquidityController on non-custom-gas-token chains. +library L2ContractsManagerUtils { + /// @notice Thrown when a user attempts to downgrade a contract. + /// @param _target The address of the contract that was attempted to be downgraded. + error L2ContractsManager_DowngradeNotAllowed(address _target); + + /// @notice Thrown when a contract is in the process of being initialized during an upgrade. + error L2ContractsManager_InitializingDuringUpgrade(); + + /// @notice Upgrades a predeploy to a new implementation without calling an initializer. + /// If the predeploy is not upgradeable, this function is a no-op. + /// @param _proxy The proxy address of the predeploy. + /// @param _implementation The new implementation address. + function upgradeTo(address _proxy, address _implementation) internal { + // Skip if the predeploy is not upgradeable (e.g., not deployed on this chain). + if (!Predeploys.isUpgradeable(_proxy)) return; + + // We skip checking the version for those predeploys that have no code. This would be the case for newly added + // predeploys that are being introduced on this particular upgrade. + address implementation = L2ProxyAdmin(Predeploys.PROXY_ADMIN).getProxyImplementation(_proxy); + + // We avoid downgrading Predeploys + if ( + // TODO(#19195): Remove this code skipping the ProxyAdmin once version is implemented. + _proxy != Predeploys.PROXY_ADMIN && implementation.code.length != 0 + && SemverComp.gt(ISemver(_proxy).version(), ISemver(_implementation).version()) + ) { + revert L2ContractsManager_DowngradeNotAllowed(address(_proxy)); + } + + IProxy(payable(_proxy)).upgradeTo(_implementation); + } + + /// @notice Reads the configuration from a FeeVault predeploy. + /// @param _feeVault The address of the FeeVault predeploy. + /// @return config_ The FeeVault configuration. + function readFeeVaultConfig(address _feeVault) + internal + view + returns (L2ContractsManagerTypes.FeeVaultConfig memory config_) + { + // Note: We are intentionally using legacy deprecated getters for this 1.0.0 version of the L2ContractsManager. + // Subsequent versions should use the new getters as L2ContractsManager should ensure that the new current + // version of the FeeVault is used. + IFeeVault feeVault = IFeeVault(payable(_feeVault)); + config_ = L2ContractsManagerTypes.FeeVaultConfig({ + recipient: feeVault.RECIPIENT(), + minWithdrawalAmount: feeVault.MIN_WITHDRAWAL_AMOUNT(), + withdrawalNetwork: feeVault.WITHDRAWAL_NETWORK() + }); + } + + /// @notice Upgrades an initializable Predeploy's implementation to _implementation by resetting the initialized + /// slot and calling upgradeToAndCall with _data. If the predeploy is not upgradeable, this function + /// is a no-op. + /// @dev It's important to make sure that only initializable Predeploys are upgraded this way. + /// @param _proxy The proxy of the contract. + /// @param _implementation The new implementation of the contract. + /// @param _storageSetterImpl The address of the StorageSetter implementation. + /// @param _data The data to call upgradeToAndCall with. + /// @param _slot The slot where the initialized value is located. + /// @param _offset The offset of the initializer value in the slot. + function upgradeToAndCall( + address _proxy, + address _implementation, + address _storageSetterImpl, + bytes memory _data, + bytes32 _slot, + uint8 _offset + ) + internal + { + // Skip if the predeploy is not upgradeable (e.g., not deployed on this chain). + if (!Predeploys.isUpgradeable(_proxy)) return; + + // We skip checking the version for those predeploys that have no code. This would be the case for newly added + // predeploys that are being introduced on this particular upgrade. + address implementation = L2ProxyAdmin(Predeploys.PROXY_ADMIN).getProxyImplementation(_proxy); + + if ( + // TODO(#19195): Remove this code skipping the ProxyAdmin once version is implemented. + // This should never be the case, if you're trying to initialize the ProxyAdmin, it's probably a mistake. + _proxy != Predeploys.PROXY_ADMIN && implementation.code.length != 0 + && SemverComp.gt(ISemver(_proxy).version(), ISemver(_implementation).version()) + ) { + revert L2ContractsManager_DowngradeNotAllowed(address(_proxy)); + } + + // Upgrade to StorageSetter. + IProxy(payable(_proxy)).upgradeTo(_storageSetterImpl); + + // Reset the initialized slot by zeroing the single byte at `_offset` (from the right). + bytes32 current = IStorageSetter(_proxy).getBytes32(_slot); + uint256 mask = ~(uint256(0xff) << (uint256(_offset) * 8)); + IStorageSetter(_proxy).setBytes32(_slot, bytes32(uint256(current) & mask)); + + // Also clear the OZ v5 ERC-7201 Initializable slot. OZ v5 stores `_initialized` as + // uint64 in the low 8 bytes and `_initializing` as bool at byte offset 8 of the + // namespaced slot. For v4 contracts this slot is all zeros, making this a no-op. + // Slot derivation (ERC-7201): + // keccak256(abi.encode(uint256(keccak256("openzeppelin.storage.Initializable")) - 1)) & + // ~bytes32(uint256(0xff)) + // Ref: + // https://github.com/OpenZeppelin/openzeppelin-contracts/blob/6b55a93e/contracts/proxy/utils/Initializable.sol#L77 + bytes32 ozV5Slot = bytes32(uint256(0xf0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00)); + bytes32 v5Current = IStorageSetter(_proxy).getBytes32(ozV5Slot); + uint256 v5Value = uint256(v5Current); + + // A contract should never be mid-initialization during an upgrade. The `_initializing` + // bool lives at byte offset 8 (bits 64..71). Revert if it is set. + if ((v5Value >> 64) & 0xFF != 0) { + revert L2ContractsManager_InitializingDuringUpgrade(); + } + + // Zero the uint64 `_initialized` portion (low 8 bytes), preserving all upper bytes. + uint256 v5Mask = ~uint256(0xFFFFFFFFFFFFFFFF); + IStorageSetter(_proxy).setBytes32(ozV5Slot, bytes32(v5Value & v5Mask)); + + // Upgrade to the implementation and call the initializer. + IProxy(payable(_proxy)).upgradeToAndCall(_implementation, _data); + } +} diff --git a/packages/contracts-bedrock/src/libraries/Predeploys.sol b/packages/contracts-bedrock/src/libraries/Predeploys.sol index 7345822cfa99c..dab948384f186 100644 --- a/packages/contracts-bedrock/src/libraries/Predeploys.sol +++ b/packages/contracts-bedrock/src/libraries/Predeploys.sol @@ -1,6 +1,7 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; +// Libraries import { Fork } from "scripts/libraries/Config.sol"; /// @title Predeploys @@ -194,6 +195,9 @@ library Predeploys { || (_isCustomGasToken && _addr == NATIVE_ASSET_LIQUIDITY) || (_useL2CM && _addr == CONDITIONAL_DEPLOYER); } + /// @notice Returns true if the address is in the predeploy namespace. + /// @param _addr The address to check. + /// @return True if the address is in range 0x4200...0000 to 0x4200...07FF. function isPredeployNamespace(address _addr) internal pure returns (bool) { return uint160(_addr) >> 11 == uint160(0x4200000000000000000000000000000000000000) >> 11; } @@ -208,4 +212,51 @@ library Predeploys { uint160(uint256(uint160(_addr)) & 0xffff | uint256(uint160(0xc0D3C0d3C0d3C0D3c0d3C0d3c0D3C0d3c0d30000))) ); } + + /// @notice Returns true if the predeploy is upgradeable. In this context, upgradeable means that the predeploy + /// is in the predeploy namespace and it is proxied. + /// @param _proxy The address of the predeploy. + /// @return isUpgradeable_ True if the predeploy is upgradeable, false otherwise. + function isUpgradeable(address _proxy) internal pure returns (bool isUpgradeable_) { + isUpgradeable_ = isPredeployNamespace(_proxy) && !notProxied(_proxy); + } + + /// @notice Returns all proxied predeploys that should be upgraded by L2CM. + /// This means that for each of these predeploys, isUpgradeable(predeploy) should return true if running on + /// a network that supports it. + /// @dev IMPORTANT: This is the SOURCE OF TRUTH for upgrade coverage. All proxied predeploys from + /// Predeploys library should be listed here. + /// Excludes: WETH, GOVERNANCE_TOKEN (not proxied), legacy predeploys (not upgraded). + function getUpgradeablePredeploys() internal pure returns (address[] memory predeploys_) { + predeploys_ = new address[](26); + // Core predeploys + predeploys_[0] = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + predeploys_[1] = Predeploys.GAS_PRICE_ORACLE; + predeploys_[2] = Predeploys.L2_STANDARD_BRIDGE; + predeploys_[3] = Predeploys.SEQUENCER_FEE_WALLET; + predeploys_[4] = Predeploys.OPTIMISM_MINTABLE_ERC20_FACTORY; + predeploys_[5] = Predeploys.L2_ERC721_BRIDGE; + predeploys_[6] = Predeploys.L1_BLOCK_ATTRIBUTES; + predeploys_[7] = Predeploys.L2_TO_L1_MESSAGE_PASSER; + predeploys_[8] = Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY; + predeploys_[9] = Predeploys.PROXY_ADMIN; + predeploys_[10] = Predeploys.BASE_FEE_VAULT; + predeploys_[11] = Predeploys.L1_FEE_VAULT; + predeploys_[12] = Predeploys.OPERATOR_FEE_VAULT; + predeploys_[13] = Predeploys.SCHEMA_REGISTRY; + predeploys_[14] = Predeploys.EAS; + predeploys_[15] = Predeploys.FEE_SPLITTER; + predeploys_[16] = Predeploys.CONDITIONAL_DEPLOYER; + // Interop predeploys + predeploys_[17] = Predeploys.CROSS_L2_INBOX; + predeploys_[18] = Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER; + predeploys_[19] = Predeploys.SUPERCHAIN_ETH_BRIDGE; + predeploys_[20] = Predeploys.ETH_LIQUIDITY; + predeploys_[21] = Predeploys.OPTIMISM_SUPERCHAIN_ERC20_FACTORY; + predeploys_[22] = Predeploys.OPTIMISM_SUPERCHAIN_ERC20_BEACON; + predeploys_[23] = Predeploys.SUPERCHAIN_TOKEN_BRIDGE; + // CGT predeploys (conditionally deployed, but still must be included in the list) + predeploys_[24] = Predeploys.NATIVE_ASSET_LIQUIDITY; + predeploys_[25] = Predeploys.LIQUIDITY_CONTROLLER; + } } diff --git a/packages/contracts-bedrock/test/L2/L2ContractsManager.t.sol b/packages/contracts-bedrock/test/L2/L2ContractsManager.t.sol new file mode 100644 index 0000000000000..9aca3b03832e5 --- /dev/null +++ b/packages/contracts-bedrock/test/L2/L2ContractsManager.t.sol @@ -0,0 +1,886 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Libraries +import { Predeploys } from "src/libraries/Predeploys.sol"; +import { DevFeatures } from "src/libraries/DevFeatures.sol"; +import { L2ContractsManager } from "src/L2/L2ContractsManager.sol"; +import { L2ContractsManagerTypes } from "src/libraries/L2ContractsManagerTypes.sol"; +import { L2ContractsManagerUtils } from "src/libraries/L2ContractsManagerUtils.sol"; +import { CommonTest } from "test/setup/CommonTest.sol"; +import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; +import { StorageSetter } from "src/universal/StorageSetter.sol"; +import { L2CrossDomainMessenger } from "src/L2/L2CrossDomainMessenger.sol"; +import { Types } from "src/libraries/Types.sol"; +import { Features } from "src/libraries/Features.sol"; + +// Interfaces +import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenger.sol"; +import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; +import { IERC721Bridge } from "interfaces/universal/IERC721Bridge.sol"; +import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IFeeVault } from "interfaces/L2/IFeeVault.sol"; +import { IFeeSplitter } from "interfaces/L2/IFeeSplitter.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; +import { ILiquidityController } from "interfaces/L2/ILiquidityController.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; +import { ISemver } from "interfaces/universal/ISemver.sol"; + +// Contracts +import { GasPriceOracle } from "src/L2/GasPriceOracle.sol"; +import { L2StandardBridge } from "src/L2/L2StandardBridge.sol"; +import { OptimismMintableERC20Factory } from "src/universal/OptimismMintableERC20Factory.sol"; +import { L2ERC721Bridge } from "src/L2/L2ERC721Bridge.sol"; +import { L1Block } from "src/L2/L1Block.sol"; +import { L1BlockCGT } from "src/L2/L1BlockCGT.sol"; +import { L2ToL1MessagePasser } from "src/L2/L2ToL1MessagePasser.sol"; +import { L2ToL1MessagePasserCGT } from "src/L2/L2ToL1MessagePasserCGT.sol"; +import { OptimismMintableERC721Factory } from "src/L2/OptimismMintableERC721Factory.sol"; +import { ProxyAdmin } from "src/universal/ProxyAdmin.sol"; +import { SuperchainETHBridge } from "src/L2/SuperchainETHBridge.sol"; +import { ETHLiquidity } from "src/L2/ETHLiquidity.sol"; +import { OptimismSuperchainERC20Beacon } from "src/L2/OptimismSuperchainERC20Beacon.sol"; +import { NativeAssetLiquidity } from "src/L2/NativeAssetLiquidity.sol"; +import { LiquidityController } from "src/L2/LiquidityController.sol"; + +/// @title L2ContractsManager_FullConfigExposer_Harness +/// @notice Harness contract that exposes internal functions for testing. +contract L2ContractsManager_FullConfigExposer_Harness is L2ContractsManager { + constructor(L2ContractsManagerTypes.Implementations memory _implementations) L2ContractsManager(_implementations) { } + + /// @notice Returns the full configuration for the L2 predeploys. + function loadFullConfig() external view returns (L2ContractsManagerTypes.FullConfig memory) { + return _loadFullConfig(); + } +} + +/// @title L2ContractsManager_Upgrade_Test +/// @notice Test contract for the L2ContractsManager contract, testing the upgrade path. +contract L2ContractsManager_Upgrade_Test is CommonTest { + L2ContractsManager_FullConfigExposer_Harness internal l2cm; + L2ContractsManagerTypes.Implementations internal implementations; + + /// @notice Struct to capture the post-upgrade state for comparison. + struct PostUpgradeState { + // Implementation addresses + address gasPriceOracleImpl; + address l2CrossDomainMessengerImpl; + address l2StandardBridgeImpl; + address sequencerFeeWalletImpl; + address optimismMintableERC20FactoryImpl; + address l2ERC721BridgeImpl; + address l1BlockImpl; + address l1BlockCGTImpl; + address l2ToL1MessagePasserImpl; + address optimismMintableERC721FactoryImpl; + address proxyAdminImpl; + address baseFeeVaultImpl; + address l1FeeVaultImpl; + address operatorFeeVaultImpl; + address schemaRegistryImpl; + address easImpl; + address governanceTokenImpl; + address crossL2InboxImpl; + address l2ToL2CrossDomainMessengerImpl; + address superchainETHBridgeImpl; + address ethLiquidityImpl; + address optimismSuperchainERC20FactoryImpl; + address optimismSuperchainERC20BeaconImpl; + address superchainTokenBridgeImpl; + address nativeAssetLiquidityImpl; + address liquidityControllerImpl; + address feeSplitterImpl; + // Config values, take advantage of the harness to capture the config values + L2ContractsManagerTypes.FullConfig config; + } + + function setUp() public override { + super.setUp(); + _loadImplementations(); + _deployL2CM(); + + skipIfDevFeatureDisabled(DevFeatures.L2CM); + } + + /// @notice Deploys the target implementations for the predeploys. + function _loadImplementations() internal { + // Deploy a fresh StorageSetter for the upgrade process + implementations.storageSetterImpl = address(new StorageSetter()); + + implementations.gasPriceOracleImpl = address(new GasPriceOracle()); + implementations.l2CrossDomainMessengerImpl = address(new L2CrossDomainMessenger()); + implementations.l2StandardBridgeImpl = address(new L2StandardBridge()); + implementations.optimismMintableERC20FactoryImpl = address(new OptimismMintableERC20Factory()); + implementations.l2ERC721BridgeImpl = address(new L2ERC721Bridge()); + implementations.l1BlockImpl = address(new L1Block()); + implementations.l1BlockCGTImpl = address(new L1BlockCGT()); + implementations.l2ToL1MessagePasserImpl = address(new L2ToL1MessagePasser()); + implementations.l2ToL1MessagePasserCGTImpl = address(new L2ToL1MessagePasserCGT()); + implementations.optimismMintableERC721FactoryImpl = address(new OptimismMintableERC721Factory(address(0), 0)); + implementations.proxyAdminImpl = address(new ProxyAdmin(address(0))); + implementations.superchainETHBridgeImpl = address(new SuperchainETHBridge()); + implementations.ethLiquidityImpl = address(new ETHLiquidity()); + implementations.optimismSuperchainERC20BeaconImpl = address(new OptimismSuperchainERC20Beacon()); + implementations.nativeAssetLiquidityImpl = address(new NativeAssetLiquidity()); + implementations.liquidityControllerImpl = address(new LiquidityController()); + + // Deploy 0.8.19 contracts using deployCode() + implementations.schemaRegistryImpl = deployCode("src/vendor/eas/SchemaRegistry.sol:SchemaRegistry"); + implementations.easImpl = deployCode("src/vendor/eas/EAS.sol:EAS"); + + // Deploy 0.8.25 contracts using deployCode() + implementations.baseFeeVaultImpl = deployCode("src/L2/BaseFeeVault.sol:BaseFeeVault"); + implementations.l1FeeVaultImpl = deployCode("src/L2/L1FeeVault.sol:L1FeeVault"); + implementations.operatorFeeVaultImpl = deployCode("src/L2/OperatorFeeVault.sol:OperatorFeeVault"); + implementations.sequencerFeeWalletImpl = deployCode("src/L2/SequencerFeeVault.sol:SequencerFeeVault"); + implementations.crossL2InboxImpl = deployCode("src/L2/CrossL2Inbox.sol:CrossL2Inbox"); + implementations.l2ToL2CrossDomainMessengerImpl = + deployCode("src/L2/L2ToL2CrossDomainMessenger.sol:L2ToL2CrossDomainMessenger"); + implementations.optimismSuperchainERC20FactoryImpl = + deployCode("src/L2/OptimismSuperchainERC20Factory.sol:OptimismSuperchainERC20Factory"); + implementations.superchainTokenBridgeImpl = deployCode("src/L2/SuperchainTokenBridge.sol:SuperchainTokenBridge"); + implementations.feeSplitterImpl = deployCode("src/L2/FeeSplitter.sol:FeeSplitter"); + implementations.conditionalDeployerImpl = deployCode("src/L2/ConditionalDeployer.sol:ConditionalDeployer"); + } + + /// @notice Deploys the L2ContractsManager with the loaded implementations. + function _deployL2CM() internal { + l2cm = new L2ContractsManager_FullConfigExposer_Harness(implementations); + vm.label(address(l2cm), "L2ContractsManager"); + } + + /// @notice Executes the upgrade via DELEGATECALL from the L2ProxyAdmin context. + function _executeUpgrade() internal { + // The L2CM must be called via DELEGATECALL from the ProxyAdmin. + // We simulate this by pranking as the ProxyAdmin and using delegatecall. + address proxyAdmin = Predeploys.PROXY_ADMIN; + vm.prank(proxyAdmin, true); + (bool success,) = address(l2cm).delegatecall(abi.encodeCall(L2ContractsManager.upgrade, ())); + require(success, "L2ContractsManager: Upgrade failed"); + } + + /// @notice Captures the current post-upgrade state of all predeploys. + /// @return state_ The captured state. + function _capturePostUpgradeState() internal view returns (PostUpgradeState memory state_) { + // Capture implementation addresses + state_.gasPriceOracleImpl = EIP1967Helper.getImplementation(Predeploys.GAS_PRICE_ORACLE); + state_.l2CrossDomainMessengerImpl = EIP1967Helper.getImplementation(Predeploys.L2_CROSS_DOMAIN_MESSENGER); + state_.l2StandardBridgeImpl = EIP1967Helper.getImplementation(Predeploys.L2_STANDARD_BRIDGE); + state_.sequencerFeeWalletImpl = EIP1967Helper.getImplementation(Predeploys.SEQUENCER_FEE_WALLET); + state_.optimismMintableERC20FactoryImpl = + EIP1967Helper.getImplementation(Predeploys.OPTIMISM_MINTABLE_ERC20_FACTORY); + state_.l2ERC721BridgeImpl = EIP1967Helper.getImplementation(Predeploys.L2_ERC721_BRIDGE); + state_.l1BlockImpl = EIP1967Helper.getImplementation(Predeploys.L1_BLOCK_ATTRIBUTES); + state_.l1BlockCGTImpl = EIP1967Helper.getImplementation(Predeploys.L1_BLOCK_ATTRIBUTES); + state_.l2ToL1MessagePasserImpl = EIP1967Helper.getImplementation(Predeploys.L2_TO_L1_MESSAGE_PASSER); + state_.optimismMintableERC721FactoryImpl = + EIP1967Helper.getImplementation(Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY); + state_.proxyAdminImpl = EIP1967Helper.getImplementation(Predeploys.PROXY_ADMIN); + state_.baseFeeVaultImpl = EIP1967Helper.getImplementation(Predeploys.BASE_FEE_VAULT); + state_.l1FeeVaultImpl = EIP1967Helper.getImplementation(Predeploys.L1_FEE_VAULT); + state_.operatorFeeVaultImpl = EIP1967Helper.getImplementation(Predeploys.OPERATOR_FEE_VAULT); + state_.schemaRegistryImpl = EIP1967Helper.getImplementation(Predeploys.SCHEMA_REGISTRY); + state_.easImpl = EIP1967Helper.getImplementation(Predeploys.EAS); + state_.governanceTokenImpl = EIP1967Helper.getImplementation(Predeploys.GOVERNANCE_TOKEN); + state_.crossL2InboxImpl = EIP1967Helper.getImplementation(Predeploys.CROSS_L2_INBOX); + state_.l2ToL2CrossDomainMessengerImpl = + EIP1967Helper.getImplementation(Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER); + state_.superchainETHBridgeImpl = EIP1967Helper.getImplementation(Predeploys.SUPERCHAIN_ETH_BRIDGE); + state_.ethLiquidityImpl = EIP1967Helper.getImplementation(Predeploys.ETH_LIQUIDITY); + state_.optimismSuperchainERC20FactoryImpl = + EIP1967Helper.getImplementation(Predeploys.OPTIMISM_SUPERCHAIN_ERC20_FACTORY); + state_.optimismSuperchainERC20BeaconImpl = + EIP1967Helper.getImplementation(Predeploys.OPTIMISM_SUPERCHAIN_ERC20_BEACON); + state_.superchainTokenBridgeImpl = EIP1967Helper.getImplementation(Predeploys.SUPERCHAIN_TOKEN_BRIDGE); + state_.nativeAssetLiquidityImpl = EIP1967Helper.getImplementation(Predeploys.NATIVE_ASSET_LIQUIDITY); + state_.liquidityControllerImpl = EIP1967Helper.getImplementation(Predeploys.LIQUIDITY_CONTROLLER); + state_.feeSplitterImpl = EIP1967Helper.getImplementation(Predeploys.FEE_SPLITTER); + + // Capture config values using the harness + state_.config = l2cm.loadFullConfig(); + } + + /// @notice Asserts that two post-upgrade states are identical. + /// @param _state1 The first state. + /// @param _state2 The second state. + function _assertStatesEqual(PostUpgradeState memory _state1, PostUpgradeState memory _state2) internal pure { + // Assert implementation addresses are equal + assertEq(_state1.gasPriceOracleImpl, _state2.gasPriceOracleImpl, "GasPriceOracle impl mismatch"); + assertEq( + _state1.l2CrossDomainMessengerImpl, + _state2.l2CrossDomainMessengerImpl, + "L2CrossDomainMessenger impl mismatch" + ); + assertEq(_state1.l2StandardBridgeImpl, _state2.l2StandardBridgeImpl, "L2StandardBridge impl mismatch"); + assertEq(_state1.sequencerFeeWalletImpl, _state2.sequencerFeeWalletImpl, "SequencerFeeWallet impl mismatch"); + assertEq( + _state1.optimismMintableERC20FactoryImpl, + _state2.optimismMintableERC20FactoryImpl, + "OptimismMintableERC20Factory impl mismatch" + ); + assertEq(_state1.l2ERC721BridgeImpl, _state2.l2ERC721BridgeImpl, "L2ERC721Bridge impl mismatch"); + assertEq(_state1.l1BlockImpl, _state2.l1BlockImpl, "L1Block impl mismatch"); + assertEq(_state1.l1BlockCGTImpl, _state2.l1BlockCGTImpl, "L1BlockCGT impl mismatch"); + assertEq(_state1.l2ToL1MessagePasserImpl, _state2.l2ToL1MessagePasserImpl, "L2ToL1MessagePasser impl mismatch"); + assertEq( + _state1.optimismMintableERC721FactoryImpl, + _state2.optimismMintableERC721FactoryImpl, + "OptimismMintableERC721Factory impl mismatch" + ); + assertEq(_state1.proxyAdminImpl, _state2.proxyAdminImpl, "ProxyAdmin impl mismatch"); + assertEq(_state1.baseFeeVaultImpl, _state2.baseFeeVaultImpl, "BaseFeeVault impl mismatch"); + assertEq(_state1.l1FeeVaultImpl, _state2.l1FeeVaultImpl, "L1FeeVault impl mismatch"); + assertEq(_state1.operatorFeeVaultImpl, _state2.operatorFeeVaultImpl, "OperatorFeeVault impl mismatch"); + assertEq(_state1.schemaRegistryImpl, _state2.schemaRegistryImpl, "SchemaRegistry impl mismatch"); + assertEq(_state1.easImpl, _state2.easImpl, "EAS impl mismatch"); + assertEq(_state1.governanceTokenImpl, _state2.governanceTokenImpl, "GovernanceToken impl mismatch"); + assertEq(_state1.crossL2InboxImpl, _state2.crossL2InboxImpl, "CrossL2Inbox impl mismatch"); + assertEq( + _state1.l2ToL2CrossDomainMessengerImpl, + _state2.l2ToL2CrossDomainMessengerImpl, + "L2ToL2CrossDomainMessenger impl mismatch" + ); + assertEq(_state1.superchainETHBridgeImpl, _state2.superchainETHBridgeImpl, "SuperchainETHBridge impl mismatch"); + assertEq(_state1.ethLiquidityImpl, _state2.ethLiquidityImpl, "ETHLiquidity impl mismatch"); + assertEq( + _state1.optimismSuperchainERC20FactoryImpl, + _state2.optimismSuperchainERC20FactoryImpl, + "OptimismSuperchainERC20Factory impl mismatch" + ); + assertEq( + _state1.optimismSuperchainERC20BeaconImpl, + _state2.optimismSuperchainERC20BeaconImpl, + "OptimismSuperchainERC20Beacon impl mismatch" + ); + assertEq( + _state1.superchainTokenBridgeImpl, _state2.superchainTokenBridgeImpl, "SuperchainTokenBridge impl mismatch" + ); + assertEq( + _state1.nativeAssetLiquidityImpl, _state2.nativeAssetLiquidityImpl, "NativeAssetLiquidity impl mismatch" + ); + assertEq(_state1.liquidityControllerImpl, _state2.liquidityControllerImpl, "LiquidityController impl mismatch"); + assertEq(_state1.feeSplitterImpl, _state2.feeSplitterImpl, "FeeSplitter impl mismatch"); + + // Assert config values are equal + assertEq( + address(_state1.config.crossDomainMessenger.otherMessenger), + address(_state2.config.crossDomainMessenger.otherMessenger), + "CrossDomainMessenger config mismatch" + ); + assertEq( + address(_state1.config.standardBridge.otherBridge), + address(_state2.config.standardBridge.otherBridge), + "StandardBridge config mismatch" + ); + assertEq( + address(_state1.config.erc721Bridge.otherBridge), + address(_state2.config.erc721Bridge.otherBridge), + "ERC721Bridge config mismatch" + ); + assertEq( + _state1.config.mintableERC20Factory.bridge, + _state2.config.mintableERC20Factory.bridge, + "MintableERC20Factory config mismatch" + ); + assertEq( + _state1.config.sequencerFeeVault.recipient, + _state2.config.sequencerFeeVault.recipient, + "SequencerFeeVault recipient mismatch" + ); + assertEq( + _state1.config.baseFeeVault.recipient, + _state2.config.baseFeeVault.recipient, + "BaseFeeVault recipient mismatch" + ); + assertEq( + _state1.config.l1FeeVault.recipient, _state2.config.l1FeeVault.recipient, "L1FeeVault recipient mismatch" + ); + assertEq( + _state1.config.operatorFeeVault.recipient, + _state2.config.operatorFeeVault.recipient, + "OperatorFeeVault recipient mismatch" + ); + assertEq( + _state1.config.liquidityController.owner, + _state2.config.liquidityController.owner, + "LiquidityController owner mismatch" + ); + assertEq( + address(_state1.config.feeSplitter.sharesCalculator), + address(_state2.config.feeSplitter.sharesCalculator), + "FeeSplitter sharesCalculator mismatch" + ); + } + + /// @notice Tests that the upgrade produces identical state when called twice with the same pre-state. + function test_upgradeProducesSameState_whenCalledTwiceWithSamePreState_succeeds() public { + // Save the pre-upgrade state + uint256 snapshotId = vm.snapshotState(); + + // Execute the first upgrade + _executeUpgrade(); + + // Capture the post-upgrade state after first execution + PostUpgradeState memory stateAfterFirstUpgrade = _capturePostUpgradeState(); + + // Revert to the pre-upgrade state + vm.revertToState(snapshotId); + + // Execute the second upgrade (L2CM and impls are preserved from the snapshot) + _executeUpgrade(); + + // Capture the post-upgrade state after second execution + PostUpgradeState memory stateAfterSecondUpgrade = _capturePostUpgradeState(); + + // Assert both states are identical + _assertStatesEqual(stateAfterFirstUpgrade, stateAfterSecondUpgrade); + } + + /// @notice Tests that all network-specific configuration is preserved after upgrade. + function test_upgradePreservesAllConfiguration_succeeds() public { + // Get the pre-upgrade configuration + L2ContractsManagerTypes.FullConfig memory preUpgradeConfig = l2cm.loadFullConfig(); + + // Execute the upgrade + _executeUpgrade(); + + // Get the post-upgrade configuration from each of the predeploys + + // L2CrossDomainMessenger + assertEq( + address(ICrossDomainMessenger(Predeploys.L2_CROSS_DOMAIN_MESSENGER).otherMessenger()), + address(preUpgradeConfig.crossDomainMessenger.otherMessenger), + "L2CrossDomainMessenger.otherMessenger not preserved" + ); + + // L2StandardBridge + assertEq( + address(IStandardBridge(payable(Predeploys.L2_STANDARD_BRIDGE)).otherBridge()), + address(preUpgradeConfig.standardBridge.otherBridge), + "L2StandardBridge.otherBridge not preserved" + ); + + // L2ERC721Bridge + assertEq( + address(IERC721Bridge(Predeploys.L2_ERC721_BRIDGE).otherBridge()), + address(preUpgradeConfig.erc721Bridge.otherBridge), + "L2ERC721Bridge.otherBridge not preserved" + ); + + // OptimismMintableERC20Factory + assertEq( + address(IOptimismMintableERC20Factory(Predeploys.OPTIMISM_MINTABLE_ERC20_FACTORY).bridge()), + address(preUpgradeConfig.mintableERC20Factory.bridge), + "OptimismMintableERC20Factory.bridge not preserved" + ); + + // SequencerFeeVault + assertEq( + IFeeVault(payable(Predeploys.SEQUENCER_FEE_WALLET)).recipient(), + address(preUpgradeConfig.sequencerFeeVault.recipient), + "SequencerFeeVault.recipient not preserved" + ); + assertEq( + IFeeVault(payable(Predeploys.SEQUENCER_FEE_WALLET)).minWithdrawalAmount(), + preUpgradeConfig.sequencerFeeVault.minWithdrawalAmount, + "SequencerFeeVault.minWithdrawalAmount not preserved" + ); + assertTrue( + IFeeVault(payable(Predeploys.SEQUENCER_FEE_WALLET)).withdrawalNetwork() + == preUpgradeConfig.sequencerFeeVault.withdrawalNetwork, + "SequencerFeeVault.withdrawalNetwork not preserved" + ); + + // BaseFeeVault + assertEq( + IFeeVault(payable(Predeploys.BASE_FEE_VAULT)).recipient(), + preUpgradeConfig.baseFeeVault.recipient, + "BaseFeeVault.recipient not preserved" + ); + assertEq( + IFeeVault(payable(Predeploys.BASE_FEE_VAULT)).minWithdrawalAmount(), + preUpgradeConfig.baseFeeVault.minWithdrawalAmount, + "BaseFeeVault.minWithdrawalAmount not preserved" + ); + assertTrue( + IFeeVault(payable(Predeploys.BASE_FEE_VAULT)).withdrawalNetwork() + == preUpgradeConfig.baseFeeVault.withdrawalNetwork, + "BaseFeeVault.withdrawalNetwork not preserved" + ); + + // L1FeeVault + assertEq( + IFeeVault(payable(Predeploys.L1_FEE_VAULT)).recipient(), + preUpgradeConfig.l1FeeVault.recipient, + "L1FeeVault.recipient not preserved" + ); + assertEq( + IFeeVault(payable(Predeploys.L1_FEE_VAULT)).minWithdrawalAmount(), + preUpgradeConfig.l1FeeVault.minWithdrawalAmount, + "L1FeeVault.minWithdrawalAmount not preserved" + ); + assertTrue( + IFeeVault(payable(Predeploys.L1_FEE_VAULT)).withdrawalNetwork() + == preUpgradeConfig.l1FeeVault.withdrawalNetwork, + "L1FeeVault.withdrawalNetwork not preserved" + ); + + // OperatorFeeVault + assertEq( + IFeeVault(payable(Predeploys.OPERATOR_FEE_VAULT)).recipient(), + preUpgradeConfig.operatorFeeVault.recipient, + "OperatorFeeVault.recipient not preserved" + ); + assertEq( + IFeeVault(payable(Predeploys.OPERATOR_FEE_VAULT)).minWithdrawalAmount(), + preUpgradeConfig.operatorFeeVault.minWithdrawalAmount, + "OperatorFeeVault.minWithdrawalAmount not preserved" + ); + assertTrue( + IFeeVault(payable(Predeploys.OPERATOR_FEE_VAULT)).withdrawalNetwork() + == preUpgradeConfig.operatorFeeVault.withdrawalNetwork, + "OperatorFeeVault.withdrawalNetwork not preserved" + ); + + // FeeSplitter + assertEq( + address(IFeeSplitter(payable(Predeploys.FEE_SPLITTER)).sharesCalculator()), + address(preUpgradeConfig.feeSplitter.sharesCalculator), + "FeeSplitter.sharesCalculator not preserved" + ); + } + + /// @notice Tests that calling upgrade() directly (not via DELEGATECALL) reverts. + function test_upgrade_whenCalledDirectly_reverts() public { + // Calling upgrade() directly should revert with OnlyDelegatecall error + vm.expectRevert(L2ContractsManager.L2ContractsManager_OnlyDelegatecall.selector); + l2cm.upgrade(); + } + + /// @notice Tests that fee vault configurations with non-default values are preserved after upgrade. + function test_upgradePreservesFeeVaultConfig_withNonDefaultValues_succeeds() public { + // Define non-default test values + address customRecipient = makeAddr("customRecipient"); + uint256 customMinWithdrawal = 50 ether; + + // Get the ProxyAdmin owner + address proxyAdminOwner = IProxyAdmin(Predeploys.PROXY_ADMIN).owner(); + + // Set non-default values on all fee vaults before upgrade + vm.startPrank(proxyAdminOwner); + + // SequencerFeeVault + IFeeVault(payable(Predeploys.SEQUENCER_FEE_WALLET)).setRecipient(customRecipient); + IFeeVault(payable(Predeploys.SEQUENCER_FEE_WALLET)).setMinWithdrawalAmount(customMinWithdrawal); + IFeeVault(payable(Predeploys.SEQUENCER_FEE_WALLET)).setWithdrawalNetwork(Types.WithdrawalNetwork.L2); + + // BaseFeeVault + IFeeVault(payable(Predeploys.BASE_FEE_VAULT)).setRecipient(customRecipient); + IFeeVault(payable(Predeploys.BASE_FEE_VAULT)).setMinWithdrawalAmount(customMinWithdrawal); + IFeeVault(payable(Predeploys.BASE_FEE_VAULT)).setWithdrawalNetwork(Types.WithdrawalNetwork.L2); + + // L1FeeVault + IFeeVault(payable(Predeploys.L1_FEE_VAULT)).setRecipient(customRecipient); + IFeeVault(payable(Predeploys.L1_FEE_VAULT)).setMinWithdrawalAmount(customMinWithdrawal); + IFeeVault(payable(Predeploys.L1_FEE_VAULT)).setWithdrawalNetwork(Types.WithdrawalNetwork.L2); + + // OperatorFeeVault + IFeeVault(payable(Predeploys.OPERATOR_FEE_VAULT)).setRecipient(customRecipient); + IFeeVault(payable(Predeploys.OPERATOR_FEE_VAULT)).setMinWithdrawalAmount(customMinWithdrawal); + IFeeVault(payable(Predeploys.OPERATOR_FEE_VAULT)).setWithdrawalNetwork(Types.WithdrawalNetwork.L2); + + vm.stopPrank(); + + // Execute the upgrade + _executeUpgrade(); + + // Verify non-default values are preserved on all fee vaults + + // SequencerFeeVault + _assertFeeVaultConfig( + IFeeVault(payable(Predeploys.SEQUENCER_FEE_WALLET)), + customRecipient, + customMinWithdrawal, + Types.WithdrawalNetwork.L2 + ); + + // BaseFeeVault + _assertFeeVaultConfig( + IFeeVault(payable(Predeploys.BASE_FEE_VAULT)), + customRecipient, + customMinWithdrawal, + Types.WithdrawalNetwork.L2 + ); + // L1FeeVault + _assertFeeVaultConfig( + IFeeVault(payable(Predeploys.L1_FEE_VAULT)), + customRecipient, + customMinWithdrawal, + Types.WithdrawalNetwork.L2 + ); + // OperatorFeeVault + _assertFeeVaultConfig( + IFeeVault(payable(Predeploys.OPERATOR_FEE_VAULT)), + customRecipient, + customMinWithdrawal, + Types.WithdrawalNetwork.L2 + ); + } + + function _assertFeeVaultConfig( + IFeeVault _feeVault, + address _expectedRecipient, + uint256 _expectedMinWithdrawalAmount, + Types.WithdrawalNetwork _expectedWithdrawalNetwork + ) + internal + view + { + assertEq(_feeVault.recipient(), _expectedRecipient, "FeeVault.recipient not preserved"); + assertEq( + _feeVault.minWithdrawalAmount(), _expectedMinWithdrawalAmount, "FeeVault.minWithdrawalAmount not preserved" + ); + assertTrue( + _feeVault.withdrawalNetwork() == _expectedWithdrawalNetwork, "FeeVault.withdrawalNetwork not preserved" + ); + } +} + +/// @title L2ContractsManager_CGT_Test +/// @notice Test contract for the L2ContractsManager on Custom Gas Token networks. +contract L2ContractsManager_Upgrade_CGT_Test is L2ContractsManager_Upgrade_Test { + /// @notice Tests that CGT-specific contracts are upgraded when CGT is enabled. + function test_upgradeUpgradesCGTContracts_whenCGTEnabled_succeeds() public { + skipIfSysFeatureDisabled(Features.CUSTOM_GAS_TOKEN); + + // Capture pre-upgrade implementations for CGT-specific contracts + address preUpgradeLiquidityControllerImpl = EIP1967Helper.getImplementation(Predeploys.LIQUIDITY_CONTROLLER); + address preUpgradeNativeAssetLiquidityImpl = EIP1967Helper.getImplementation(Predeploys.NATIVE_ASSET_LIQUIDITY); + + // Execute the upgrade + _executeUpgrade(); + + // Verify LiquidityController was upgraded + address postUpgradeLiquidityControllerImpl = EIP1967Helper.getImplementation(Predeploys.LIQUIDITY_CONTROLLER); + assertEq( + postUpgradeLiquidityControllerImpl, + implementations.liquidityControllerImpl, + "LiquidityController should be upgraded to new implementation" + ); + assertTrue( + postUpgradeLiquidityControllerImpl != preUpgradeLiquidityControllerImpl + || preUpgradeLiquidityControllerImpl == implementations.liquidityControllerImpl, + "LiquidityController implementation should change or already be target" + ); + + // Verify NativeAssetLiquidity was upgraded + address postUpgradeNativeAssetLiquidityImpl = EIP1967Helper.getImplementation(Predeploys.NATIVE_ASSET_LIQUIDITY); + assertEq( + postUpgradeNativeAssetLiquidityImpl, + implementations.nativeAssetLiquidityImpl, + "NativeAssetLiquidity should be upgraded to new implementation" + ); + assertTrue( + postUpgradeNativeAssetLiquidityImpl != preUpgradeNativeAssetLiquidityImpl + || preUpgradeNativeAssetLiquidityImpl == implementations.nativeAssetLiquidityImpl, + "NativeAssetLiquidity implementation should change or already be target" + ); + + // Verify L1Block uses CGT implementation + address postUpgradeL1BlockImpl = EIP1967Helper.getImplementation(Predeploys.L1_BLOCK_ATTRIBUTES); + assertEq( + postUpgradeL1BlockImpl, + implementations.l1BlockCGTImpl, + "L1Block should use CGT implementation on CGT networks" + ); + + // Verify L2ToL1MessagePasser uses CGT implementation + address postUpgradeL2ToL1MessagePasserImpl = EIP1967Helper.getImplementation(Predeploys.L2_TO_L1_MESSAGE_PASSER); + assertEq( + postUpgradeL2ToL1MessagePasserImpl, + implementations.l2ToL1MessagePasserCGTImpl, + "L2ToL1MessagePasser should use CGT implementation on CGT networks" + ); + } + + /// @notice Tests that LiquidityController config is preserved after upgrade on CGT networks. + function test_upgradePreservesLiquidityControllerConfig_onCGTNetwork_succeeds() public { + skipIfSysFeatureDisabled(Features.CUSTOM_GAS_TOKEN); + + // Capture pre-upgrade config + L2ContractsManagerTypes.FullConfig memory preUpgradeConfig = l2cm.loadFullConfig(); + + // Execute the upgrade + _executeUpgrade(); + + // Verify LiquidityController config is preserved + ILiquidityController liquidityController = ILiquidityController(Predeploys.LIQUIDITY_CONTROLLER); + assertEq( + liquidityController.owner(), + preUpgradeConfig.liquidityController.owner, + "LiquidityController.owner not preserved" + ); + assertEq( + liquidityController.gasPayingTokenName(), + preUpgradeConfig.liquidityController.gasPayingTokenName, + "LiquidityController.gasPayingTokenName not preserved" + ); + assertEq( + liquidityController.gasPayingTokenSymbol(), + preUpgradeConfig.liquidityController.gasPayingTokenSymbol, + "LiquidityController.gasPayingTokenSymbol not preserved" + ); + } +} + +/// @title L2ContractsManager_Upgrade_DowngradePrevention_Test +/// @notice Test contract that verifies L2CM prevents downgrading predeploy implementations. +contract L2ContractsManager_Upgrade_DowngradePrevention_Test is L2ContractsManager_Upgrade_Test { + /// @notice Tests that upgrade reverts when a non-initializable predeploy has a higher version than the new + /// implementation. + function test_upgrade_whenDowngradingNonInitializablePredeploy_reverts() public { + // Mock GasPriceOracle to report a version higher than the new implementation + string memory higherVersion = "999.0.0"; + vm.mockCall(Predeploys.GAS_PRICE_ORACLE, abi.encodeCall(ISemver.version, ()), abi.encode(higherVersion)); + + vm.expectRevert( + abi.encodeWithSelector( + L2ContractsManagerUtils.L2ContractsManager_DowngradeNotAllowed.selector, Predeploys.GAS_PRICE_ORACLE + ) + ); + _executeUpgrade(); + } + + /// @notice Tests that upgrade reverts when an initializable predeploy has a higher version than the new + /// implementation. + function test_upgrade_whenDowngradingInitializablePredeploy_reverts() public { + // Mock L2CrossDomainMessenger to report a version higher than the new implementation + string memory higherVersion = "999.0.0"; + vm.mockCall( + Predeploys.L2_CROSS_DOMAIN_MESSENGER, abi.encodeCall(ISemver.version, ()), abi.encode(higherVersion) + ); + + vm.expectRevert( + abi.encodeWithSelector( + L2ContractsManagerUtils.L2ContractsManager_DowngradeNotAllowed.selector, + Predeploys.L2_CROSS_DOMAIN_MESSENGER + ) + ); + _executeUpgrade(); + } + + /// @notice Tests that upgrade succeeds when the predeploy has the same version as the new implementation + /// (not a downgrade). + function test_upgrade_whenSameVersion_succeeds() public { + // Mock GasPriceOracle to report the same version as the new implementation + string memory implVersion = ISemver(implementations.gasPriceOracleImpl).version(); + vm.mockCall(Predeploys.GAS_PRICE_ORACLE, abi.encodeCall(ISemver.version, ()), abi.encode(implVersion)); + + _executeUpgrade(); + + // Verify the upgrade went through + assertEq( + EIP1967Helper.getImplementation(Predeploys.GAS_PRICE_ORACLE), + implementations.gasPriceOracleImpl, + "GasPriceOracle should be upgraded" + ); + } +} + +/// @title L2ContractsManager_GetImplementations_Test +/// @notice Tests for the getImplementations() getter function. +contract L2ContractsManager_GetImplementations_Test is L2ContractsManager_Upgrade_Test { + /// @notice Tests that getImplementations returns all implementation addresses matching the constructor input. + function test_getImplementations_returnsAllImplementations_succeeds() public view { + L2ContractsManagerTypes.Implementations memory result = l2cm.getImplementations(); + + assertEq(result.storageSetterImpl, implementations.storageSetterImpl, "storageSetterImpl mismatch"); + assertEq( + result.l2CrossDomainMessengerImpl, + implementations.l2CrossDomainMessengerImpl, + "l2CrossDomainMessengerImpl mismatch" + ); + assertEq(result.gasPriceOracleImpl, implementations.gasPriceOracleImpl, "gasPriceOracleImpl mismatch"); + assertEq(result.l2StandardBridgeImpl, implementations.l2StandardBridgeImpl, "l2StandardBridgeImpl mismatch"); + assertEq( + result.sequencerFeeWalletImpl, implementations.sequencerFeeWalletImpl, "sequencerFeeWalletImpl mismatch" + ); + assertEq( + result.optimismMintableERC20FactoryImpl, + implementations.optimismMintableERC20FactoryImpl, + "optimismMintableERC20FactoryImpl mismatch" + ); + assertEq(result.l2ERC721BridgeImpl, implementations.l2ERC721BridgeImpl, "l2ERC721BridgeImpl mismatch"); + assertEq(result.l1BlockImpl, implementations.l1BlockImpl, "l1BlockImpl mismatch"); + assertEq(result.l1BlockCGTImpl, implementations.l1BlockCGTImpl, "l1BlockCGTImpl mismatch"); + assertEq( + result.l2ToL1MessagePasserImpl, implementations.l2ToL1MessagePasserImpl, "l2ToL1MessagePasserImpl mismatch" + ); + assertEq( + result.l2ToL1MessagePasserCGTImpl, + implementations.l2ToL1MessagePasserCGTImpl, + "l2ToL1MessagePasserCGTImpl mismatch" + ); + assertEq( + result.optimismMintableERC721FactoryImpl, + implementations.optimismMintableERC721FactoryImpl, + "optimismMintableERC721FactoryImpl mismatch" + ); + assertEq(result.proxyAdminImpl, implementations.proxyAdminImpl, "proxyAdminImpl mismatch"); + assertEq(result.baseFeeVaultImpl, implementations.baseFeeVaultImpl, "baseFeeVaultImpl mismatch"); + assertEq(result.l1FeeVaultImpl, implementations.l1FeeVaultImpl, "l1FeeVaultImpl mismatch"); + assertEq(result.operatorFeeVaultImpl, implementations.operatorFeeVaultImpl, "operatorFeeVaultImpl mismatch"); + assertEq(result.schemaRegistryImpl, implementations.schemaRegistryImpl, "schemaRegistryImpl mismatch"); + assertEq(result.easImpl, implementations.easImpl, "easImpl mismatch"); + assertEq(result.crossL2InboxImpl, implementations.crossL2InboxImpl, "crossL2InboxImpl mismatch"); + assertEq( + result.l2ToL2CrossDomainMessengerImpl, + implementations.l2ToL2CrossDomainMessengerImpl, + "l2ToL2CrossDomainMessengerImpl mismatch" + ); + assertEq( + result.superchainETHBridgeImpl, implementations.superchainETHBridgeImpl, "superchainETHBridgeImpl mismatch" + ); + assertEq(result.ethLiquidityImpl, implementations.ethLiquidityImpl, "ethLiquidityImpl mismatch"); + assertEq( + result.optimismSuperchainERC20FactoryImpl, + implementations.optimismSuperchainERC20FactoryImpl, + "optimismSuperchainERC20FactoryImpl mismatch" + ); + assertEq( + result.optimismSuperchainERC20BeaconImpl, + implementations.optimismSuperchainERC20BeaconImpl, + "optimismSuperchainERC20BeaconImpl mismatch" + ); + assertEq( + result.superchainTokenBridgeImpl, + implementations.superchainTokenBridgeImpl, + "superchainTokenBridgeImpl mismatch" + ); + assertEq( + result.nativeAssetLiquidityImpl, + implementations.nativeAssetLiquidityImpl, + "nativeAssetLiquidityImpl mismatch" + ); + assertEq( + result.liquidityControllerImpl, implementations.liquidityControllerImpl, "liquidityControllerImpl mismatch" + ); + assertEq(result.feeSplitterImpl, implementations.feeSplitterImpl, "feeSplitterImpl mismatch"); + assertEq( + result.conditionalDeployerImpl, implementations.conditionalDeployerImpl, "conditionalDeployerImpl mismatch" + ); + } + + /// @notice Tests that no field in getImplementations() is left uninitialized + /// when all implementations are provided to the constructor. + function test_getImplementations_noFieldIsZero_succeeds() public view { + L2ContractsManagerTypes.Implementations memory result = l2cm.getImplementations(); + + assertTrue(result.storageSetterImpl != address(0), "storageSetterImpl is zero"); + assertTrue(result.l2CrossDomainMessengerImpl != address(0), "l2CrossDomainMessengerImpl is zero"); + assertTrue(result.gasPriceOracleImpl != address(0), "gasPriceOracleImpl is zero"); + assertTrue(result.l2StandardBridgeImpl != address(0), "l2StandardBridgeImpl is zero"); + assertTrue(result.sequencerFeeWalletImpl != address(0), "sequencerFeeWalletImpl is zero"); + assertTrue(result.optimismMintableERC20FactoryImpl != address(0), "optimismMintableERC20FactoryImpl is zero"); + assertTrue(result.l2ERC721BridgeImpl != address(0), "l2ERC721BridgeImpl is zero"); + assertTrue(result.l1BlockImpl != address(0), "l1BlockImpl is zero"); + assertTrue(result.l1BlockCGTImpl != address(0), "l1BlockCGTImpl is zero"); + assertTrue(result.l2ToL1MessagePasserImpl != address(0), "l2ToL1MessagePasserImpl is zero"); + assertTrue(result.l2ToL1MessagePasserCGTImpl != address(0), "l2ToL1MessagePasserCGTImpl is zero"); + assertTrue(result.optimismMintableERC721FactoryImpl != address(0), "optimismMintableERC721FactoryImpl is zero"); + assertTrue(result.proxyAdminImpl != address(0), "proxyAdminImpl is zero"); + assertTrue(result.baseFeeVaultImpl != address(0), "baseFeeVaultImpl is zero"); + assertTrue(result.l1FeeVaultImpl != address(0), "l1FeeVaultImpl is zero"); + assertTrue(result.operatorFeeVaultImpl != address(0), "operatorFeeVaultImpl is zero"); + assertTrue(result.schemaRegistryImpl != address(0), "schemaRegistryImpl is zero"); + assertTrue(result.easImpl != address(0), "easImpl is zero"); + assertTrue(result.crossL2InboxImpl != address(0), "crossL2InboxImpl is zero"); + assertTrue(result.l2ToL2CrossDomainMessengerImpl != address(0), "l2ToL2CrossDomainMessengerImpl is zero"); + assertTrue(result.superchainETHBridgeImpl != address(0), "superchainETHBridgeImpl is zero"); + assertTrue(result.ethLiquidityImpl != address(0), "ethLiquidityImpl is zero"); + assertTrue( + result.optimismSuperchainERC20FactoryImpl != address(0), "optimismSuperchainERC20FactoryImpl is zero" + ); + assertTrue(result.optimismSuperchainERC20BeaconImpl != address(0), "optimismSuperchainERC20BeaconImpl is zero"); + assertTrue(result.superchainTokenBridgeImpl != address(0), "superchainTokenBridgeImpl is zero"); + assertTrue(result.nativeAssetLiquidityImpl != address(0), "nativeAssetLiquidityImpl is zero"); + assertTrue(result.liquidityControllerImpl != address(0), "liquidityControllerImpl is zero"); + assertTrue(result.feeSplitterImpl != address(0), "feeSplitterImpl is zero"); + assertTrue(result.conditionalDeployerImpl != address(0), "conditionalDeployerImpl is zero"); + } +} + +/// @title L2ContractsManager_Upgrade_Coverage_Test +/// @notice Test that verifies all predeploys receive upgrade calls during L2CM upgrade. +/// Uses Predeploys.sol as the source of truth for which predeploys should be upgraded. +contract L2ContractsManager_Upgrade_Coverage_Test is L2ContractsManager_Upgrade_Test { + /// @notice Returns CGT-only predeploys that require initialization. + /// @dev These are separate because they're only deployed on CGT networks. + function _getCGTInitializablePredeploys() internal pure returns (address[] memory predeploys_) { + predeploys_ = new address[](1); + predeploys_[0] = Predeploys.LIQUIDITY_CONTROLLER; + } + + /// @notice Checks if a predeploy requires initialization. + /// @dev Returns true for predeploys that have an initializer and need upgradeToAndCall. + /// This determines the upgrade method, not coverage. + function _requiresInitialization(address _predeploy) internal pure returns (bool) { + return _predeploy == Predeploys.L2_CROSS_DOMAIN_MESSENGER || _predeploy == Predeploys.L2_STANDARD_BRIDGE + || _predeploy == Predeploys.L2_ERC721_BRIDGE || _predeploy == Predeploys.OPTIMISM_MINTABLE_ERC20_FACTORY + || _predeploy == Predeploys.SEQUENCER_FEE_WALLET || _predeploy == Predeploys.BASE_FEE_VAULT + || _predeploy == Predeploys.L1_FEE_VAULT || _predeploy == Predeploys.OPERATOR_FEE_VAULT + || _predeploy == Predeploys.FEE_SPLITTER || _predeploy == Predeploys.LIQUIDITY_CONTROLLER; + } + + /// @notice Checks if a predeploy is deployed and upgradeable. + /// @dev Uses EIP1967Helper to read the implementation slot directly from storage. + /// This avoids calling the proxy's implementation() function which may fail. + function _isPredeployUpgradeable(address _proxy) internal view returns (bool) { + address impl = EIP1967Helper.getImplementation(_proxy); + return impl != address(0) && impl.code.length > 0; + } + + /// @notice Tests that all predeploys from Predeploys.sol receive the expected upgrade call. + /// Uses vm.expectCall() to verify that upgradeTo or upgradeToAndCall is called. + /// @dev If L2CM misses a predeploy that exists in Predeploys.sol, this test will fail. + function test_allPredeploysReceiveUpgradeCall_succeeds() public { + address[] memory allPredeploys = Predeploys.getUpgradeablePredeploys(); + + for (uint256 i = 0; i < allPredeploys.length; i++) { + address predeploy = allPredeploys[i]; + + // Skip predeploys that are not deployed on this chain (e.g., CGT-only, interop-only) + if (!_isPredeployUpgradeable(predeploy)) continue; + + // Expect the appropriate upgrade call based on whether initialization is required + if (_requiresInitialization(predeploy)) { + // nosemgrep:sol-style-use-abi-encodecall + vm.expectCall(predeploy, abi.encodeWithSelector(IProxy.upgradeToAndCall.selector)); + } else { + // nosemgrep:sol-style-use-abi-encodecall + vm.expectCall(predeploy, abi.encodeWithSelector(IProxy.upgradeTo.selector)); + } + } + + _executeUpgrade(); + } + + /// @notice Tests that CGT-specific predeploys receive upgrade calls on CGT networks. + /// @dev CGT predeploys are conditionally deployed, so they need separate verification. + function test_cgtPredeploysReceiveUpgradeCall_whenCGTEnabled_succeeds() public { + skipIfSysFeatureDisabled(Features.CUSTOM_GAS_TOKEN); + + // Get CGT-only predeploys that require initialization + address[] memory cgtInitPredeploys = _getCGTInitializablePredeploys(); + for (uint256 i = 0; i < cgtInitPredeploys.length; i++) { + // nosemgrep:sol-style-use-abi-encodecall + vm.expectCall(cgtInitPredeploys[i], abi.encodeWithSelector(IProxy.upgradeToAndCall.selector)); + } + + // NativeAssetLiquidity uses upgradeTo + // nosemgrep:sol-style-use-abi-encodecall + vm.expectCall(Predeploys.NATIVE_ASSET_LIQUIDITY, abi.encodeWithSelector(IProxy.upgradeTo.selector)); + + _executeUpgrade(); + } +} diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol index 3950de5901acb..cc61a0991f83b 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol @@ -55,7 +55,10 @@ abstract contract OptimismSuperchainERC20_TestInit is Test { // Deploy the OptimismSuperchainERC20Beacon implementation address _addr = Predeploys.OPTIMISM_SUPERCHAIN_ERC20_BEACON; address _impl = Predeploys.predeployToCodeNamespace(_addr); - vm.etch(_impl, vm.getDeployedCode("OptimismSuperchainERC20Beacon.sol:OptimismSuperchainERC20Beacon")); + vm.etch( + _impl, + vm.getDeployedCode("forge-artifacts/OptimismSuperchainERC20Beacon.sol/OptimismSuperchainERC20Beacon.json") + ); // Deploy the ERC1967Proxy contract at the Predeploy bytes memory code = vm.getDeployedCode("universal/Proxy.sol:Proxy"); diff --git a/packages/contracts-bedrock/test/libraries/L2ContractsManagerUtils.t.sol b/packages/contracts-bedrock/test/libraries/L2ContractsManagerUtils.t.sol new file mode 100644 index 0000000000000..05e20215781e2 --- /dev/null +++ b/packages/contracts-bedrock/test/libraries/L2ContractsManagerUtils.t.sol @@ -0,0 +1,228 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Libraries +import { L2ContractsManagerUtils } from "src/libraries/L2ContractsManagerUtils.sol"; + +// Testing +import { CommonTest } from "test/setup/CommonTest.sol"; + +// Contracts +import { Predeploys } from "src/libraries/Predeploys.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Interfaces +import { ISemver } from "interfaces/universal/ISemver.sol"; +import { IStorageSetter } from "interfaces/universal/IStorageSetter.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; + +/// @title L2ContractsManagerUtils_ImplV1_Harness +/// @notice Implementation contract with version 1.0.0 for testing upgrades. +contract L2ContractsManagerUtils_ImplV1_Harness is ISemver { + /// @custom:semver 1.0.0 + string public constant version = "1.0.0"; + + /// @notice It is a no-op for this test. + function initialize() external { } +} + +/// @title L2ContractsManagerUtils_ImplV2_Harness +/// @notice Implementation contract with version 2.0.0 for testing upgrades. +contract L2ContractsManagerUtils_ImplV2_Harness is ISemver { + /// @custom:semver 2.0.0 + string public constant version = "2.0.0"; + + /// @notice It is a no-op for this test. + function initialize() external { } +} + +/// @title L2ContractsManagerUtils_UpgradeToAndCall_Test +/// @notice Tests the `L2ContractsManagerUtils.upgradeToAndCall` function. +contract L2ContractsManagerUtils_UpgradeToAndCall_Test is CommonTest { + bytes32 internal constant INITIALIZABLE_SLOT_OZ_V4 = bytes32(0); + + bytes32 internal constant INITIALIZABLE_SLOT_OZ_V5 = + 0xf0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00; + + address internal _storageSetterImpl; + + address internal implV1; + address internal implV2; + + function setUp() public override { + super.setUp(); + implV1 = address(new L2ContractsManagerUtils_ImplV1_Harness()); + implV2 = address(new L2ContractsManagerUtils_ImplV2_Harness()); + + _storageSetterImpl = address( + IStorageSetter( + DeployUtils.create1({ + _name: "StorageSetter", + _args: DeployUtils.encodeConstructor(abi.encodeCall(IStorageSetter.__constructor__, ())) + }) + ) + ); + } + + /// @notice External wrapper so vm.expectRevert can catch reverts from the internal library call. + function _callUpgradeToAndCall( + address _proxy, + address _implementation, + address _storageSetter, + bytes memory _data, + bytes32 _slot, + uint8 _offset + ) + external + { + vm.startPrank(Predeploys.PROXY_ADMIN); + L2ContractsManagerUtils.upgradeToAndCall(_proxy, _implementation, _storageSetter, _data, _slot, _offset); + vm.stopPrank(); + } + + /// @notice Tests that v4 contracts are unaffected by the v5 slot clearing logic. For v4 + /// contracts the ERC-7201 slot is all zeros, so the new code is a no-op. + function test_upgrade_v4ContractStillWorks_succeeds() public { + address proxy = Predeploys.L2_CROSS_DOMAIN_MESSENGER; + + // Upgrade to v1. + vm.prank(Predeploys.PROXY_ADMIN); + IProxy(payable(proxy)).upgradeTo(implV1); + + // Verify the ERC-7201 slot is zero. + assertEq(vm.load(proxy, INITIALIZABLE_SLOT_OZ_V5), bytes32(0)); + + // Upgrade to v2 should succeed and the ERC-7201 slot should remain zero. + vm.startPrank(Predeploys.PROXY_ADMIN); + L2ContractsManagerUtils.upgradeToAndCall( + proxy, + implV2, + _storageSetterImpl, + abi.encodeCall(L2ContractsManagerUtils_ImplV2_Harness.initialize, ()), + INITIALIZABLE_SLOT_OZ_V4, + 0 + ); + vm.stopPrank(); + + vm.prank(Predeploys.PROXY_ADMIN); + assertEq(IProxy(payable(proxy)).implementation(), address(implV2)); + assertEq(vm.load(proxy, INITIALIZABLE_SLOT_OZ_V5), bytes32(0)); + } + + /// @notice Tests that a v5 contract with `_initialized = 1` at the ERC-7201 slot gets cleared. + function test_upgrade_v5SlotCleared_succeeds() public { + address proxy = Predeploys.SEQUENCER_FEE_WALLET; + + // Set v1 as current implementation. + vm.prank(Predeploys.PROXY_ADMIN); + IProxy(payable(proxy)).upgradeTo(implV1); + + // Simulate a v5 contract with _initialized = 1 at the ERC-7201 slot. + vm.store(proxy, INITIALIZABLE_SLOT_OZ_V5, bytes32(uint256(1))); + + // Upgrade to v2 should succeed. + vm.startPrank(Predeploys.PROXY_ADMIN); + L2ContractsManagerUtils.upgradeToAndCall( + proxy, + implV2, + _storageSetterImpl, + abi.encodeCall(L2ContractsManagerUtils_ImplV2_Harness.initialize, ()), + INITIALIZABLE_SLOT_OZ_V5, + 0 + ); + vm.stopPrank(); + + vm.prank(Predeploys.PROXY_ADMIN); + assertEq(IProxy(payable(proxy)).implementation(), address(implV2)); + // The v5 _initialized field should have been cleared. + assertEq(vm.load(proxy, INITIALIZABLE_SLOT_OZ_V5), bytes32(0)); + } + + /// @notice Tests that a v5 contract with `_initialized = type(uint64).max` (from + /// `_disableInitializers()`) gets cleared. + function test_upgrade_v5SlotMaxInitialized_succeeds() public { + address proxy = Predeploys.SEQUENCER_FEE_WALLET; + + // Set v1 as current implementation. + vm.prank(Predeploys.PROXY_ADMIN); + IProxy(payable(proxy)).upgradeTo(implV1); + + // Simulate a v5 contract with _initialized = type(uint64).max (disabled initializers). + vm.store(proxy, INITIALIZABLE_SLOT_OZ_V5, bytes32(uint256(type(uint64).max))); + + // Upgrade to v2 should succeed. + vm.startPrank(Predeploys.PROXY_ADMIN); + L2ContractsManagerUtils.upgradeToAndCall( + proxy, + implV2, + _storageSetterImpl, + abi.encodeCall(L2ContractsManagerUtils_ImplV2_Harness.initialize, ()), + INITIALIZABLE_SLOT_OZ_V5, + 0 + ); + vm.stopPrank(); + + vm.prank(Predeploys.PROXY_ADMIN); + assertEq(IProxy(payable(proxy)).implementation(), address(implV2)); + // The v5 _initialized field should have been cleared. + assertEq(vm.load(proxy, INITIALIZABLE_SLOT_OZ_V5), bytes32(0)); + } + + /// @notice Tests that upgrade reverts when `_initializing` bool is set at the ERC-7201 slot. + function test_upgrade_v5InitializingDuringUpgrade_reverts() public { + address proxy = Predeploys.SEQUENCER_FEE_WALLET; + + // Set v1 as current implementation. + vm.prank(Predeploys.PROXY_ADMIN); + IProxy(payable(proxy)).upgradeTo(implV1); + + // Simulate a v5 contract that is mid-initialization. The _initializing bool is at byte + // offset 8 (bit 64). Set _initialized = 1 and _initializing = true. + uint256 v5Value = 1 | (uint256(1) << 64); + vm.store(proxy, INITIALIZABLE_SLOT_OZ_V5, bytes32(v5Value)); + + vm.expectRevert(L2ContractsManagerUtils.L2ContractsManager_InitializingDuringUpgrade.selector); + this._callUpgradeToAndCall( + proxy, + implV2, + _storageSetterImpl, + abi.encodeCall(L2ContractsManagerUtils_ImplV2_Harness.initialize, ()), + INITIALIZABLE_SLOT_OZ_V5, + 0 + ); + } + + /// @notice Tests that the upper bytes of the ERC-7201 slot beyond the Initializable struct + /// are preserved when clearing the `_initialized` field. + function test_upgrade_v5SlotPreservesUpperBytes_succeeds() public { + address proxy = Predeploys.SEQUENCER_FEE_WALLET; + + // Set v1 as current implementation. + vm.prank(Predeploys.PROXY_ADMIN); + IProxy(payable(proxy)).upgradeTo(implV1); + + // Set the v5 slot with _initialized = 1 in the low 8 bytes and some data in the upper + // bytes (above the _initializing bool at byte offset 8). Bytes 9+ are unused by the + // Initializable struct but should be preserved. + uint256 upperData = uint256(0xDEADBEEF) << 128; + uint256 v5Value = upperData | 1; + vm.store(proxy, INITIALIZABLE_SLOT_OZ_V5, bytes32(v5Value)); + + // Upgrade to v2 should succeed. + vm.startPrank(Predeploys.PROXY_ADMIN); + L2ContractsManagerUtils.upgradeToAndCall( + proxy, + implV2, + _storageSetterImpl, + abi.encodeCall(L2ContractsManagerUtils_ImplV2_Harness.initialize, ()), + INITIALIZABLE_SLOT_OZ_V5, + 0 + ); + vm.stopPrank(); + + vm.prank(Predeploys.PROXY_ADMIN); + assertEq(IProxy(payable(proxy)).implementation(), address(implV2)); + // The upper bytes should be preserved, only the low 8 bytes should be zeroed. + assertEq(vm.load(proxy, INITIALIZABLE_SLOT_OZ_V5), bytes32(upperData)); + } +} From 46cd1ff8cd773c589499523f10af3f29f47dec88 Mon Sep 17 00:00:00 2001 From: George Knee Date: Wed, 25 Feb 2026 17:29:47 +0000 Subject: [PATCH 026/201] op-devstack, op-supernode: close app context before stopping service (#19305) * add much logging * op-devstack/supernode: check Start error, and cancel Start context before calling Stop * devstack/supernode: eliminate duplicated lifecycle management * use interop name instead of reflection --------- Co-authored-by: Axel Kingsley --- op-devstack/sysgo/l2_cl_supernode.go | 103 +++++++----------- op-supernode/supernode/activity/activity.go | 2 +- .../supernode/activity/heartbeat/heartbeat.go | 4 + .../supernode/activity/superroot/superroot.go | 2 +- .../chain_container/chain_container.go | 8 +- op-supernode/supernode/supernode.go | 19 ++-- .../supernode/supernode_activities_test.go | 9 ++ 7 files changed, 69 insertions(+), 78 deletions(-) diff --git a/op-devstack/sysgo/l2_cl_supernode.go b/op-devstack/sysgo/l2_cl_supernode.go index ae7ce0c58527b..b06c27c93338f 100644 --- a/op-devstack/sysgo/l2_cl_supernode.go +++ b/op-devstack/sysgo/l2_cl_supernode.go @@ -45,6 +45,10 @@ type SuperNode struct { chains []eth.ChainID l1UserRPC string l1BeaconAddr string + + // Configs stored for Start()/restart. + snCfg *snconfig.CLIConfig + vnCfgs map[eth.ChainID]*config.Config } var _ L2CLNode = (*SuperNode)(nil) @@ -79,49 +83,23 @@ func (n *SuperNode) Start() { return } - n.p.Require().NotEmpty(n.chains, "supernode has no chains configured") - chainIDs := make([]uint64, 0, len(n.chains)) - for _, id := range n.chains { - chainIDs = append(chainIDs, eth.EvilChainIDToUInt64(id)) - } - - // Build CLI config for supernode (single-chain) - cfg := &snconfig.CLIConfig{ - Chains: chainIDs, - DataDir: n.p.TempDir(), - L1NodeAddr: n.l1UserRPC, - L1BeaconAddr: n.l1BeaconAddr, - RPCConfig: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - ListenPort: 0, - EnableAdmin: true, - }, - // Other configs (Log/Metrics/Pprof) left default - } - - // Construct VN config map - vnCfgs := map[eth.ChainID]*config.Config{} + n.p.Require().NotNil(n.snCfg, "supernode CLI config required") - // Create Supernode instance ctx, cancel := context.WithCancel(n.p.Ctx()) - sn, err := supernode.New(ctx, n.logger, "devstack", func(err error) { n.p.Require().NoError(err, "supernode critical error") }, cfg, vnCfgs) + exitFn := func(err error) { n.p.Require().NoError(err, "supernode critical error") } + sn, err := supernode.New(ctx, n.logger, "devstack", exitFn, n.snCfg, n.vnCfgs) n.p.Require().NoError(err, "supernode failed to create") n.sn = sn n.cancel = cancel - err = n.sn.Start(ctx) - n.p.Require().NoError(err) + n.p.Require().NoError(n.sn.Start(ctx)) // Wait for the RPC addr and save userRPC/interop endpoints - if addr, err := n.sn.WaitRPCAddr(ctx); err == nil { - base := "http://" + addr - // single-chain instance routes at root - n.userRPC = base - n.interopEndpoint = base - } else { - n.p.Require().NoError(err, "supernode failed to bind RPC address") - } - + addr, err := n.sn.WaitRPCAddr(ctx) + n.p.Require().NoError(err, "supernode failed to bind RPC address") + base := "http://" + addr + n.userRPC = base + n.interopEndpoint = base } func (n *SuperNode) Stop() { @@ -380,7 +358,7 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI els = append(els, &cls[i].ELID) } - // Start shared supernode with all chains + // Build supernode CLI config snCfg := &snconfig.CLIConfig{ Chains: chainIDs, DataDir: p.TempDir(), @@ -392,21 +370,28 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI if snOpts.InteropActivationTimestamp != nil { logger.Info("supernode interop enabled", "activation_timestamp", *snOpts.InteropActivationTimestamp) } - ctx, cancel := context.WithCancel(p.Ctx()) - exitFn := func(err error) { p.Require().NoError(err, "supernode critical error") } - sn, err := supernode.New(ctx, logger, "devstack", exitFn, snCfg, vnCfgs) - require.NoError(err) - go func() { _ = sn.Start(ctx) }() - // Resolve bound address - addr, err := sn.WaitRPCAddr(ctx) - require.NoError(err, "failed waiting for supernode RPC addr") - base := "http://" + addr - p.Cleanup(func() { - stopCtx, c := context.WithTimeout(context.Background(), 5*time.Second) - _ = sn.Stop(stopCtx) - c() - cancel() - }) + + snode := &SuperNode{ + id: supernodeID, + userRPC: "", + interopEndpoint: "", + interopJwtSecret: jwtSecret, + p: p, + logger: logger, + els: els, + chains: idsFromCLs(cls), + l1UserRPC: l1EL.UserRPC(), + l1BeaconAddr: l1CL.beaconHTTPAddr, + snCfg: snCfg, + vnCfgs: vnCfgs, + } + + // Start and register cleanup, following the same pattern as OpNode. + snode.Start() + p.Cleanup(snode.Stop) + + base := snode.UserRPC() + // Wait for per-chain RPC routes to serve optimism_rollupConfig and register proxies waitReady := func(u string) { deadline := time.Now().Add(15 * time.Second) @@ -445,21 +430,7 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI orch.registry.Register(cid, proxy) } - snNode := &SuperNode{ - id: supernodeID, - sn: sn, - cancel: cancel, - userRPC: base, - interopEndpoint: base, - interopJwtSecret: jwtSecret, - p: p, - logger: logger, - els: els, - chains: idsFromCLs(cls), - l1UserRPC: l1EL.UserRPC(), - l1BeaconAddr: l1CL.beaconHTTPAddr, - } - orch.supernodes.Set(supernodeID, snNode) + orch.supernodes.Set(supernodeID, snode) } func idsFromCLs(cls []L2CLs) []eth.ChainID { diff --git a/op-supernode/supernode/activity/activity.go b/op-supernode/supernode/activity/activity.go index be08fb59aca62..2d6928e3ff3fd 100644 --- a/op-supernode/supernode/activity/activity.go +++ b/op-supernode/supernode/activity/activity.go @@ -8,6 +8,7 @@ import ( // Activity is an open interface to collect pluggable behaviors which satisfy sub-activitiy interfaces. type Activity interface { + Name() string // Reset is called when a chain container resets due to an invalidated block. // Activities should clean up any cached state for that chain at or after the timestamp. // The invalidatedBlock is the block that was is the target of the reset @@ -34,7 +35,6 @@ type RPCActivity interface { // VerificationActivity is an Activity that can be used to verify the correctness of the Supernode's Chains type VerificationActivity interface { Activity - Name() string // Reset resets the activity's state. Reset(chainID eth.ChainID, timestamp uint64, invalidatedBlock eth.BlockRef) diff --git a/op-supernode/supernode/activity/heartbeat/heartbeat.go b/op-supernode/supernode/activity/heartbeat/heartbeat.go index c37acb2ba6e9c..b0e8a61068ab6 100644 --- a/op-supernode/supernode/activity/heartbeat/heartbeat.go +++ b/op-supernode/supernode/activity/heartbeat/heartbeat.go @@ -30,6 +30,10 @@ func New(log gethlog.Logger, interval time.Duration) *Heartbeat { return &Heartbeat{log: log, interval: interval} } +func (h *Heartbeat) Name() string { + return "heartbeat" +} + // Start begins the periodic logging loop. func (h *Heartbeat) Start(ctx context.Context) error { if h.interval <= 0 { diff --git a/op-supernode/supernode/activity/superroot/superroot.go b/op-supernode/supernode/activity/superroot/superroot.go index a1f1f623848e5..d0717a4982375 100644 --- a/op-supernode/supernode/activity/superroot/superroot.go +++ b/op-supernode/supernode/activity/superroot/superroot.go @@ -28,7 +28,7 @@ func New(log gethlog.Logger, chains map[eth.ChainID]cc.ChainContainer) *Superroo } } -func (s *Superroot) ActivityName() string { return "superroot" } +func (s *Superroot) Name() string { return "superroot" } // Reset is a no-op for superroot - it always queries chain containers directly // and doesn't maintain any chain-specific cached state. diff --git a/op-supernode/supernode/chain_container/chain_container.go b/op-supernode/supernode/chain_container/chain_container.go index 52c004c908895..adf6b008a3fc0 100644 --- a/op-supernode/supernode/chain_container/chain_container.go +++ b/op-supernode/supernode/chain_container/chain_container.go @@ -211,14 +211,19 @@ func (c *simpleChainContainer) Start(ctx context.Context) error { // start the virtual node err := c.vn.Start(ctx) if err != nil { - c.log.Warn("virtual node exited with error", "error", err) + c.log.Warn("virtual node exited with error", "vn_id", c.vn, "error", err) + } else { + c.log.Info("virtual node exited", "vn_id", c.vn) } // always stop the virtual node after it exits stopCtx, cancel := context.WithTimeout(context.Background(), 10*time.Second) if stopErr := c.vn.Stop(stopCtx); stopErr != nil { c.log.Error("error stopping virtual node", "error", stopErr) + } else { + c.log.Info("virtual node stopped", "vn_id", c.vn) } + cancel() if ctx.Err() != nil { c.log.Info("chain container context cancelled, stopping restart loop", "ctx_err", ctx.Err()) @@ -230,7 +235,6 @@ func (c *simpleChainContainer) Start(ctx context.Context) error { c.log.Info("chain container stop requested, stopping restart loop") break } - } c.log.Info("chain container exiting") return nil diff --git a/op-supernode/supernode/supernode.go b/op-supernode/supernode/supernode.go index 82e79187a0ba0..dceb971dc556b 100644 --- a/op-supernode/supernode/supernode.go +++ b/op-supernode/supernode/supernode.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "net" - "reflect" "strconv" "sync" "time" @@ -92,7 +91,7 @@ func New(ctx context.Context, log gethlog.Logger, version string, requestStop co superroot.New(log.New("activity", "superroot"), s.chains), } - log.Info("initializing interop activity? %v", cfg.RawCtx.IsSet(interop.InteropActivationTimestampFlag.Name)) + log.Info("initializing interop activity? %v", cfg.InteropActivationTimestamp != nil) // Initialize interop activity if the activation timestamp is set (non-nil) // If it's nil, don't start interop. If it's non-nil (including 0), do start it. if cfg.InteropActivationTimestamp != nil { @@ -161,16 +160,17 @@ func (s *Supernode) Start(ctx context.Context) error { go func(run activity.RunnableActivity) { defer s.wg.Done() err := run.Start(ctx) + activityName := a.Name() switch err { case nil: - s.log.Error("activity quit unexpectedly") + s.log.Error("activity quit unexpectedly", "name", activityName) case context.Canceled: // This is the happy path, normal / clean shutdown - s.log.Info("activity closing due to cancelled context") + s.log.Info("activity closing due to cancelled context", "name", activityName) case context.DeadlineExceeded: - s.log.Warn("activity quit due to deadline exceeded") + s.log.Warn("activity quit due to deadline exceeded", "name", activityName) default: - s.log.Error("error starting runnable activity", "error", err) + s.log.Error("error starting runnable activity", "name", activityName, "error", err) } }(run) } @@ -220,11 +220,12 @@ func (s *Supernode) Stop(ctx context.Context) error { // Stop runnable activities for _, a := range s.activities { + activityName := a.Name() if run, ok := a.(activity.RunnableActivity); ok { if err := run.Stop(ctx); err != nil { - s.log.Error("error stopping runnable activity", "error", err) + s.log.Error("error stopping runnable activity", "name", activityName, "error", err) } else { - s.log.Info("runnable activity stopped", "activity", reflect.TypeOf(a).String()) + s.log.Info("runnable activity stopped", "name", activityName) } } } @@ -239,10 +240,12 @@ func (s *Supernode) Stop(ctx context.Context) error { s.log.Info("all chain containers stopped, waiting for goroutines to finish") s.wg.Wait() + s.log.Info("goroutines finished, closing l1 client") if s.l1Client != nil { s.l1Client.Close() } + s.log.Info("l1 client closed, supernode stopped") return nil } diff --git a/op-supernode/supernode/supernode_activities_test.go b/op-supernode/supernode/supernode_activities_test.go index e795390245216..419a09919b48d 100644 --- a/op-supernode/supernode/supernode_activities_test.go +++ b/op-supernode/supernode/supernode_activities_test.go @@ -26,6 +26,10 @@ type mockRunnable struct { stopped int } +func (mockRunnable) Name() string { + return "mockRunnable" +} + func (m *mockRunnable) Start(ctx context.Context) error { m.started++ m.ctx, m.cancel = context.WithCancel(ctx) @@ -49,6 +53,10 @@ var _ activity.RunnableActivity = (*mockRunnable)(nil) // plain marker-only activity type plainActivity struct{} +func (p *plainActivity) Name() string { + return "plainActivity" +} + func (p *plainActivity) Reset(chainID eth.ChainID, timestamp uint64, invalidatedBlock eth.BlockRef) { } @@ -64,6 +72,7 @@ func (s *rpcSvc) Echo(_ context.Context) (string, error) { return "ok", nil } type rpcAct struct{} +func (a *rpcAct) Name() string { return "rpcActivity" } func (a *rpcAct) RPCNamespace() string { return "act" } func (a *rpcAct) RPCService() interface{} { return &rpcSvc{} } func (a *rpcAct) Reset(chainID eth.ChainID, timestamp uint64, invalidatedBlock eth.BlockRef) { From 80804af288d016d28bf26e3b14e58679bb070d87 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 26 Feb 2026 08:01:51 -0500 Subject: [PATCH 027/201] op-devstack: refactor genesis interop activation to use UseGenesisInterop flag (#19302) * op-devstack: refactor genesis interop activation to use UseGenesisInterop flag Extracts genesis timestamp resolution out of WithSharedSupernodeCLsInterop and into withSharedSupernodeCLsImpl via a new UseGenesisInterop field on SupernodeConfig. Adds WithSupernodeInteropAtGenesis() option and threads snOpts through defaultSupernodeSuperProofsSystem so callers can pass supernode options independently of deployer options. Ported from https://github.com/ethereum-optimism/optimism/pull/19242 Co-Authored-By: Claude Sonnet 4.6 * Skip failing test. * Move test that stops the batcher to its own package. * Skip one more test. --------- Co-authored-by: Claude Sonnet 4.6 --- .../tests/interop/proofs/fpp/fpp_test.go | 2 + .../tests/interop/proofs/proposer_test.go | 2 +- .../tests/interop/proofs/serial/init_test.go | 16 +++++++ .../{ => serial}/interop_fault_proofs_test.go | 4 +- .../superfaultproofs/superfaultproofs.go | 4 +- op-devstack/sysgo/l2_cl_supernode.go | 46 ++++++++++--------- op-devstack/sysgo/system.go | 12 +++-- 7 files changed, 57 insertions(+), 29 deletions(-) create mode 100644 op-acceptance-tests/tests/interop/proofs/serial/init_test.go rename op-acceptance-tests/tests/interop/proofs/{ => serial}/interop_fault_proofs_test.go (77%) diff --git a/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go b/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go index ba4d858b8a76c..2fcd6b8eff2d3 100644 --- a/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go +++ b/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go @@ -22,6 +22,8 @@ func TestFPP(gt *testing.T) { func TestNextSuperRootNotFound(gt *testing.T) { t := devtest.SerialT(gt) + // TODO(#19180): Unskip this once supernode is updated. + t.Skip("Supernode does not yet return optimistic blocks until blocks are fully validated") sys := presets.NewSimpleInterop(t) blockTime := sys.L2ChainA.Escape().RollupConfig().BlockTime diff --git a/op-acceptance-tests/tests/interop/proofs/proposer_test.go b/op-acceptance-tests/tests/interop/proofs/proposer_test.go index 08c2ce51a0d59..c4806c3b3615a 100644 --- a/op-acceptance-tests/tests/interop/proofs/proposer_test.go +++ b/op-acceptance-tests/tests/interop/proofs/proposer_test.go @@ -8,7 +8,7 @@ import ( ) func TestProposer(gt *testing.T) { - t := devtest.SerialT(gt) + t := devtest.ParallelT(gt) sys := presets.NewSimpleInterop(t) dgf := sys.DisputeGameFactory() diff --git a/op-acceptance-tests/tests/interop/proofs/serial/init_test.go b/op-acceptance-tests/tests/interop/proofs/serial/init_test.go new file mode 100644 index 0000000000000..0a8471073a80f --- /dev/null +++ b/op-acceptance-tests/tests/interop/proofs/serial/init_test.go @@ -0,0 +1,16 @@ +package serial + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, + presets.WithSuperInteropSupernode(), + stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), + ) +} diff --git a/op-acceptance-tests/tests/interop/proofs/interop_fault_proofs_test.go b/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go similarity index 77% rename from op-acceptance-tests/tests/interop/proofs/interop_fault_proofs_test.go rename to op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go index 5df3b27fdb50a..63498eba2397a 100644 --- a/op-acceptance-tests/tests/interop/proofs/interop_fault_proofs_test.go +++ b/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go @@ -1,4 +1,4 @@ -package proofs +package serial import ( "testing" @@ -10,6 +10,8 @@ import ( func TestInteropFaultProofs(gt *testing.T) { t := devtest.SerialT(gt) + // TODO(#19180): Unskip this once supernode is updated. + t.Skip("Supernode does not yet return optimistic blocks until blocks are fully validated") sys := presets.NewSimpleInterop(t) sfp.RunSuperFaultProofTest(t, sys) } diff --git a/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go b/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go index 11e1fd3ccea0e..0445b7dcacc16 100644 --- a/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go +++ b/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go @@ -572,10 +572,10 @@ func RunSuperFaultProofTest(t devtest.T, sys *presets.SimpleInterop) { // -- Stage 1: Freeze batch submission ---------------------------------- chains[0].Batcher.Stop() chains[1].Batcher.Stop() - defer func() { + t.Cleanup(func() { chains[0].Batcher.Start() chains[1].Batcher.Start() - }() + }) awaitSafeHeadsStalled(t, sys.L2CLA, sys.L2CLB) endTimestamp := nextTimestampAfterSafeHeads(t, chains) diff --git a/op-devstack/sysgo/l2_cl_supernode.go b/op-devstack/sysgo/l2_cl_supernode.go index b06c27c93338f..442b7ff164cde 100644 --- a/op-devstack/sysgo/l2_cl_supernode.go +++ b/op-devstack/sysgo/l2_cl_supernode.go @@ -197,6 +197,11 @@ type SupernodeConfig struct { // InteropActivationTimestamp enables the interop activity at the given timestamp. // Set to nil to disable interop (default). Non-nil (including 0) enables interop. InteropActivationTimestamp *uint64 + + // UseGenesisInterop, when true, sets InteropActivationTimestamp to the genesis + // timestamp of the first configured chain at deploy time. Takes effect inside + // withSharedSupernodeCLsImpl after deployment, when the genesis time is known. + UseGenesisInterop bool } // SupernodeOption is a functional option for configuring the supernode. @@ -210,34 +215,24 @@ func WithSupernodeInterop(activationTimestamp uint64) SupernodeOption { } } +// WithSupernodeInteropAtGenesis enables interop at the genesis timestamp of the first +// configured chain. The timestamp is resolved after deployment, when genesis is known. +func WithSupernodeInteropAtGenesis() SupernodeOption { + return func(cfg *SupernodeConfig) { + cfg.UseGenesisInterop = true + } +} + // WithSharedSupernodeCLsInterop starts one supernode for N L2 chains with interop enabled at genesis. // The interop activation timestamp is computed from the first chain's genesis time. func WithSharedSupernodeCLsInterop(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - // Get genesis timestamp from first chain - if len(cls) == 0 { - orch.P().Require().Fail("no chains provided") - return - } - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(cls[0].CLID.ChainID())).ComponentID) - if !ok { - orch.P().Require().Fail("l2 network not found") - return - } - l2Net := l2NetComponent.(*L2Network) - genesisTime := l2Net.rollupCfg.Genesis.L2Time - orch.P().Logger().Info("enabling supernode interop at genesis", "activation_timestamp", genesisTime) - - // Call the main implementation with interop enabled - withSharedSupernodeCLsImpl(orch, supernodeID, cls, l1CLID, l1ELID, WithSupernodeInterop(genesisTime)) - }) + return WithSharedSupernodeCLs(supernodeID, cls, l1CLID, l1ELID, WithSupernodeInteropAtGenesis()) } // WithSharedSupernodeCLsInteropDelayed starts one supernode for N L2 chains with interop enabled // at a specified offset from genesis. This allows testing the transition from non-interop to interop mode. func WithSharedSupernodeCLsInteropDelayed(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, delaySeconds uint64) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { - // Get genesis timestamp from first chain if len(cls) == 0 { orch.P().Require().Fail("no chains provided") return @@ -255,8 +250,6 @@ func WithSharedSupernodeCLsInteropDelayed(supernodeID stack.SupernodeID, cls []L "activation_timestamp", activationTime, "delay_seconds", delaySeconds, ) - - // Call the main implementation with interop enabled at delayed timestamp withSharedSupernodeCLsImpl(orch, supernodeID, cls, l1CLID, l1ELID, WithSupernodeInterop(activationTime)) }) } @@ -279,6 +272,17 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI opt(snOpts) } + // Resolve UseGenesisInterop: read the activation timestamp from the first chain's genesis. + if snOpts.UseGenesisInterop && snOpts.InteropActivationTimestamp == nil { + p.Require().NotEmpty(cls, "no chains provided for genesis interop resolution") + l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(cls[0].CLID.ChainID())).ComponentID) + l2Net := l2NetComponent.(*L2Network) + p.Require().True(ok, "l2 network not found for genesis interop resolution") + genesisTime := l2Net.rollupCfg.Genesis.L2Time + p.Logger().Info("enabling supernode interop at genesis", "activation_timestamp", genesisTime) + snOpts.InteropActivationTimestamp = &genesisTime + } + l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "l1 EL node required") l1EL := l1ELComponent.(L1ELNode) diff --git a/op-devstack/sysgo/system.go b/op-devstack/sysgo/system.go index 36c7d911eec55..3fd1a226e4d99 100644 --- a/op-devstack/sysgo/system.go +++ b/op-devstack/sysgo/system.go @@ -564,16 +564,18 @@ func NewDefaultSupernodeInteropProofsSystemIDs(l1ID, l2AID, l2BID eth.ChainID) D } func DefaultSupernodeIsthmusSuperProofsSystem(dest *DefaultSupernodeInteropProofsSystemIDs) stack.Option[*Orchestrator] { - return defaultSupernodeSuperProofsSystem(dest) + return defaultSupernodeSuperProofsSystem(dest, nil) } // DefaultSupernodeInteropProofsSystem creates a super-roots proofs system that sources super-roots via op-supernode // (instead of op-supervisor). Interop is enabled at genesis. func DefaultSupernodeInteropProofsSystem(dest *DefaultSupernodeInteropProofsSystemIDs) stack.Option[*Orchestrator] { - return defaultSupernodeSuperProofsSystem(dest, WithInteropAtGenesis()) + return defaultSupernodeSuperProofsSystem(dest, + []SupernodeOption{WithSupernodeInteropAtGenesis()}, + WithInteropAtGenesis()) } -func defaultSupernodeSuperProofsSystem(dest *DefaultSupernodeInteropProofsSystemIDs, deployerOpts ...DeployerOption) stack.CombinedOption[*Orchestrator] { +func defaultSupernodeSuperProofsSystem(dest *DefaultSupernodeInteropProofsSystemIDs, snOpts []SupernodeOption, deployerOpts ...DeployerOption) stack.CombinedOption[*Orchestrator] { ids := NewDefaultSupernodeInteropProofsSystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) opt := stack.Combine[*Orchestrator]() @@ -599,7 +601,9 @@ func defaultSupernodeSuperProofsSystem(dest *DefaultSupernodeInteropProofsSystem opt.Add(WithL2ELNode(ids.L2BEL)) // Shared supernode for both L2 chains (registers per-chain L2CL proxies) - opt.Add(WithSharedSupernodeCLs(ids.Supernode, []L2CLs{{CLID: ids.L2ACL, ELID: ids.L2AEL}, {CLID: ids.L2BCL, ELID: ids.L2BEL}}, ids.L1CL, ids.L1EL)) + opt.Add(WithSharedSupernodeCLs(ids.Supernode, + []L2CLs{{CLID: ids.L2ACL, ELID: ids.L2AEL}, {CLID: ids.L2BCL, ELID: ids.L2BEL}}, + ids.L1CL, ids.L1EL, snOpts...)) opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L1EL, ids.L2AEL)) From a8126606757b2d34527f1d4a52006d1b9ace63aa Mon Sep 17 00:00:00 2001 From: Matt Solomon Date: Fri, 27 Feb 2026 06:53:56 -0800 Subject: [PATCH 028/201] fix(contracts-bedrock): make contracts CI reliable (#19323) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(contracts-bedrock): resolve VerifyOPCM bytecode mismatch from compiler profile ambiguity When `additional_compiler_profiles` is configured in foundry.toml, contracts pulled into the dispute profile's compilation graph get compiled with both default (999999 optimizer runs) and dispute (5000 runs) profiles. PR #19111 added L2ProxyAdmin extending ProxyAdmin, which pulled ProxyAdmin (and transitively OptimismMintableERC20Factory) into the dispute profile graph. On CI (Linux), `vm.getCode("ProxyAdmin")` non-deterministically resolves to the dispute profile artifact (6149 bytes creation code), while VerifyOPCM reads the default profile artifact from disk (6751 bytes). This mismatch causes VerifyOPCM_Failed() across all chains and feature flags on CI, while passing locally on macOS where the resolution order differs. The fix adds `DeployUtils.getCode()` which constructs explicit artifact file paths (`forge-artifacts/.sol/.json`) to always resolve the default profile. All `vm.getCode()` callsites in scripts and tests are migrated to use this helper. A semgrep rule enforces this going forward. Co-Authored-By: Claude Opus 4.6 * fix(contracts-bedrock): add try/catch fallback and cicoverage gas test fix Add try/catch fallback to DeployUtils.getCode() so the Go script host (which doesn't support explicit artifact paths) gracefully falls back to vm.getCode(_name). Also add "/" passthrough for callers passing explicit paths. Fix L1ChugSplashProxy OOG gas test: under cicoverage, the now-correct default-profile proxy bytecode is larger, leaving insufficient retained gas (1/64 rule) for the require message. Use generic vm.expectRevert() for unoptimized profiles — the test still verifies the revert occurs. Co-Authored-By: Claude Opus 4.6 * fix(contracts-bedrock): fix semgrep findings in DeployUtils and L1ChugSplashProxy Rename try/catch return variable to `code_` (trailing underscore convention) and add L1ChugSplashProxy.t.sol to expectrevert-no-args exclusion list since the bare vm.expectRevert() is intentional (OOG produces no revert data). Co-Authored-By: Claude Opus 4.6 * fix(contracts-bedrock): skip explicit artifact path under coverage Under coverage profiles, forge-artifacts/ contains the default profile's (optimized) artifacts, not the coverage profile's. Since coverage profiles have no additional_compiler_profiles, there is no profile ambiguity, so plain vm.getCode() resolves correctly. Skip the explicit artifact path under vm.isContext(Coverage) to avoid bytecode mismatches between artifact- loaded code and fresh compilation in tests (DeployFeesDepositor, DeployMIPS). Co-Authored-By: Claude Opus 4.6 * fix(contracts-bedrock): wrap isContext in try/catch for Go host compat The Go script host doesn't implement vm.isContext(), causing a revert that propagates up as an unrecognized selector error. Wrap the coverage detection in try/catch so the Go host silently falls through to the artifact-path resolution (which itself falls back to vm.getCode). Also adds a comment explaining why the catch block is intentionally empty. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .semgrep/rules/sol-rules.yaml | 11 ++++ .semgrep/tests/sol-rules.t.sol | 22 ++++++++ .../contracts-bedrock/scripts/L2Genesis.s.sol | 2 +- .../scripts/deploy/ChainAssertions.sol | 18 +++++-- .../deploy/DeployImplementations.s.sol | 10 ++-- .../scripts/libraries/DeployUtils.sol | 53 +++++++++++++++++-- .../test/L2/FeeSplitter.t.sol | 3 +- .../test/legacy/L1ChugSplashProxy.t.sol | 14 ++++- 8 files changed, 115 insertions(+), 18 deletions(-) diff --git a/.semgrep/rules/sol-rules.yaml b/.semgrep/rules/sol-rules.yaml index 720eb699abf72..da3cabc55b944 100644 --- a/.semgrep/rules/sol-rules.yaml +++ b/.semgrep/rules/sol-rules.yaml @@ -46,6 +46,7 @@ rules: paths: exclude: - packages/contracts-bedrock/test/universal/WETH98.t.sol + - packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol - id: sol-safety-natspec-semver-match languages: [generic] @@ -456,3 +457,13 @@ rules: - packages/contracts-bedrock/src/L1/ProtocolVersions.sol # DataAvailabilityChallenge is a beta/non-standard contract. - packages/contracts-bedrock/src/L1/DataAvailabilityChallenge.sol + + - id: sol-safety-use-deployutils-getcode + languages: [solidity] + severity: ERROR + message: Use DeployUtils.getCode() instead of vm.getCode(). When additional_compiler_profiles is configured in foundry.toml, vm.getCode() can non-deterministically resolve to the wrong compiler profile's artifact, causing bytecode mismatches across platforms. + pattern-either: + - pattern: vm.getCode(...) + paths: + exclude: + - packages/contracts-bedrock/scripts/libraries/DeployUtils.sol diff --git a/.semgrep/tests/sol-rules.t.sol b/.semgrep/tests/sol-rules.t.sol index 9d0179318c978..cb1dde5a1f0e5 100644 --- a/.semgrep/tests/sol-rules.t.sol +++ b/.semgrep/tests/sol-rules.t.sol @@ -732,3 +732,25 @@ contract SemgrepTest__sol_style_event_param_fmt { // ruleid: sol-style-event-param-fmt event SomethingWithMint(uint256 _mint); } + +contract SemgrepTest__sol_safety_use_deployutils_getcode { + function test() { + // ok: sol-safety-use-deployutils-getcode + DeployUtils.getCode("ProxyAdmin"); + + // ok: sol-safety-use-deployutils-getcode + DeployUtils.getCode("AddressManager"); + + // ok: sol-safety-use-deployutils-getcode + DeployUtils.getCode("FeeSplitter.sol:FeeSplitter"); + + // ruleid: sol-safety-use-deployutils-getcode + vm.getCode("ProxyAdmin"); + + // ruleid: sol-safety-use-deployutils-getcode + vm.getCode("FeeSplitter.sol:FeeSplitter"); + + // ruleid: sol-safety-use-deployutils-getcode + vm.getCode(string.concat(cname, ".sol:", cname)); + } +} diff --git a/packages/contracts-bedrock/scripts/L2Genesis.s.sol b/packages/contracts-bedrock/scripts/L2Genesis.s.sol index 56bfb1402b9b4..2385d962c4314 100644 --- a/packages/contracts-bedrock/scripts/L2Genesis.s.sol +++ b/packages/contracts-bedrock/scripts/L2Genesis.s.sol @@ -499,7 +499,7 @@ contract L2Genesis is Script { function setEAS() internal { string memory cname = Predeploys.getName(Predeploys.EAS); address impl = Predeploys.predeployToCodeNamespace(Predeploys.EAS); - bytes memory code = vm.getCode(string.concat(cname, ".sol:", cname)); + bytes memory code = DeployUtils.getCode(cname); address eas; assembly { diff --git a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol index ec618a04bd1b7..d9dd021718d27 100644 --- a/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol +++ b/packages/contracts-bedrock/scripts/deploy/ChainAssertions.sol @@ -419,25 +419,33 @@ library ChainAssertions { IOPContractsManager.Blueprints memory blueprints = _opcm.blueprints(); Blueprint.Preamble memory addressManagerPreamble = Blueprint.parseBlueprintPreamble(address(blueprints.addressManager).code); - require(keccak256(addressManagerPreamble.initcode) == keccak256(vm.getCode("AddressManager")), "CHECK-OPCM-160"); + require( + keccak256(addressManagerPreamble.initcode) == keccak256(DeployUtils.getCode("AddressManager")), + "CHECK-OPCM-160" + ); Blueprint.Preamble memory proxyPreamble = Blueprint.parseBlueprintPreamble(address(blueprints.proxy).code); - require(keccak256(proxyPreamble.initcode) == keccak256(vm.getCode("Proxy")), "CHECK-OPCM-170"); + require(keccak256(proxyPreamble.initcode) == keccak256(DeployUtils.getCode("Proxy")), "CHECK-OPCM-170"); Blueprint.Preamble memory proxyAdminPreamble = Blueprint.parseBlueprintPreamble(address(blueprints.proxyAdmin).code); - require(keccak256(proxyAdminPreamble.initcode) == keccak256(vm.getCode("ProxyAdmin")), "CHECK-OPCM-180"); + require( + keccak256(proxyAdminPreamble.initcode) == keccak256(DeployUtils.getCode("ProxyAdmin")), "CHECK-OPCM-180" + ); Blueprint.Preamble memory l1ChugSplashProxyPreamble = Blueprint.parseBlueprintPreamble(address(blueprints.l1ChugSplashProxy).code); require( - keccak256(l1ChugSplashProxyPreamble.initcode) == keccak256(vm.getCode("L1ChugSplashProxy")), + keccak256(l1ChugSplashProxyPreamble.initcode) == keccak256(DeployUtils.getCode("L1ChugSplashProxy")), "CHECK-OPCM-190" ); Blueprint.Preamble memory rdProxyPreamble = Blueprint.parseBlueprintPreamble(address(blueprints.resolvedDelegateProxy).code); - require(keccak256(rdProxyPreamble.initcode) == keccak256(vm.getCode("ResolvedDelegateProxy")), "CHECK-OPCM-200"); + require( + keccak256(rdProxyPreamble.initcode) == keccak256(DeployUtils.getCode("ResolvedDelegateProxy")), + "CHECK-OPCM-200" + ); } function checkAnchorStateRegistryProxy(IAnchorStateRegistry _anchorStateRegistryProxy, bool _isProxy) internal { diff --git a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol index 6b8d1f406955a..b5701e465f6a1 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeployImplementations.s.sol @@ -315,15 +315,15 @@ contract DeployImplementations is Script { IOPContractsManager.Blueprints memory blueprints; vm.startBroadcast(msg.sender); address checkAddress; - (blueprints.addressManager, checkAddress) = DeployUtils.createDeterministicBlueprint(vm.getCode("AddressManager"), _salt); + (blueprints.addressManager, checkAddress) = DeployUtils.createDeterministicBlueprint(DeployUtils.getCode("AddressManager"), _salt); require(checkAddress == address(0), "OPCM-10"); - (blueprints.proxy, checkAddress) = DeployUtils.createDeterministicBlueprint(vm.getCode("Proxy"), _salt); + (blueprints.proxy, checkAddress) = DeployUtils.createDeterministicBlueprint(DeployUtils.getCode("Proxy"), _salt); require(checkAddress == address(0), "OPCM-20"); - (blueprints.proxyAdmin, checkAddress) = DeployUtils.createDeterministicBlueprint(vm.getCode("ProxyAdmin"), _salt); + (blueprints.proxyAdmin, checkAddress) = DeployUtils.createDeterministicBlueprint(DeployUtils.getCode("ProxyAdmin"), _salt); require(checkAddress == address(0), "OPCM-30"); - (blueprints.l1ChugSplashProxy, checkAddress) = DeployUtils.createDeterministicBlueprint(vm.getCode("L1ChugSplashProxy"), _salt); + (blueprints.l1ChugSplashProxy, checkAddress) = DeployUtils.createDeterministicBlueprint(DeployUtils.getCode("L1ChugSplashProxy"), _salt); require(checkAddress == address(0), "OPCM-40"); - (blueprints.resolvedDelegateProxy, checkAddress) = DeployUtils.createDeterministicBlueprint(vm.getCode("ResolvedDelegateProxy"), _salt); + (blueprints.resolvedDelegateProxy, checkAddress) = DeployUtils.createDeterministicBlueprint(DeployUtils.getCode("ResolvedDelegateProxy"), _salt); require(checkAddress == address(0), "OPCM-50"); // forgefmt: disable-end vm.stopBroadcast(); diff --git a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol index 25d0ba2da3a87..01487717561c7 100644 --- a/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol +++ b/packages/contracts-bedrock/scripts/libraries/DeployUtils.sol @@ -2,7 +2,7 @@ pragma solidity ^0.8.0; // Scripts -import { Vm } from "forge-std/Vm.sol"; +import { Vm, VmSafe } from "forge-std/Vm.sol"; import { console2 as console } from "forge-std/console2.sol"; import { Artifacts } from "scripts/Artifacts.s.sol"; @@ -24,12 +24,57 @@ library DeployUtils { bytes32 internal constant DEFAULT_SALT = keccak256("op-stack-contract-impls-salt-v0"); + /// @notice Returns the creation bytecode for a contract from the default compiler profile's artifact. + /// When `additional_compiler_profiles` is configured in foundry.toml, a contract may be compiled + /// with multiple profiles (e.g., default and dispute). Using `vm.getCode(name)` alone can + /// non-deterministically resolve to the wrong profile's artifact. By constructing the explicit + /// artifact file path (`forge-artifacts/.sol/.json`), we ensure the default profile's + /// bytecode is always returned. + /// If the name already contains a colon or slash (e.g., "File.sol:Contract" or an explicit path), + /// it is passed through to vm.getCode as-is since the caller has already provided disambiguation. + /// The explicit path is wrapped in a try/catch so that hosts which don't support artifact paths + /// (e.g., the Go script host in op-chain-ops) gracefully fall back to vm.getCode(_name). + /// Under coverage, forge-artifacts/ contains the default profile's (optimized) artifacts, not + /// the coverage profile's. Since coverage profiles have no additional_compiler_profiles, there + /// is no ambiguity, so we skip the explicit path and let vm.getCode resolve naturally. + /// @param _name Name of the contract, or a qualified "File.sol:Contract" identifier. + /// @return The creation bytecode from the default profile artifact. + function getCode(string memory _name) internal view returns (bytes memory) { + // If the name contains a colon or slash, the caller already provided a qualified identifier. + bytes memory nameBytes = bytes(_name); + for (uint256 i = 0; i < nameBytes.length; i++) { + if (nameBytes[i] == ":" || nameBytes[i] == "/") { + return vm.getCode(_name); + } + } + // Under coverage, forge-artifacts/ holds the default profile's artifacts, not the coverage + // profile's. Coverage profiles have no additional_compiler_profiles (no ambiguity), so + // plain vm.getCode resolves correctly. The try/catch guards against hosts that don't + // implement vm.isContext (e.g., the Go script host in op-chain-ops). + try vm.isContext(VmSafe.ForgeContext.Coverage) returns (bool isCoverage_) { + if (isCoverage_) { + return vm.getCode(_name); + } + } catch { + // Intentionally empty: the Go script host doesn't implement vm.isContext, so we + // silently fall through to the artifact-path resolution below. + } + // Try explicit default-profile artifact path for deterministic profile resolution. + // Falls back to vm.getCode(_name) for hosts that don't support artifact paths + // (e.g., the Go script host in op-chain-ops, which has no profile ambiguity). + try vm.getCode(string.concat("forge-artifacts/", _name, ".sol/", _name, ".json")) returns (bytes memory code_) { + return code_; + } catch { + return vm.getCode(_name); + } + } + /// @notice Deploys a contract with the given name and arguments via CREATE. /// @param _name Name of the contract to deploy. /// @param _args ABI-encoded constructor arguments. /// @return addr_ Address of the deployed contract. function create1(string memory _name, bytes memory _args) internal returns (address payable addr_) { - bytes memory bytecode = abi.encodePacked(vm.getCode(_name), _args); + bytes memory bytecode = abi.encodePacked(getCode(_name), _args); assembly { addr_ := create(0, add(bytecode, 0x20), mload(bytecode)) } @@ -79,7 +124,7 @@ library DeployUtils { /// @param _salt Salt for the CREATE2 operation. /// @return addr_ Address of the deployed contract. function create2(string memory _name, bytes memory _args, bytes32 _salt) internal returns (address payable) { - bytes memory initCode = abi.encodePacked(vm.getCode(_name), _args); + bytes memory initCode = abi.encodePacked(getCode(_name), _args); address preComputedAddress = vm.computeCreate2Address(_salt, keccak256(initCode)); require(preComputedAddress.code.length == 0, "DeployUtils: contract already deployed"); return create2asm(initCode, _salt); @@ -150,7 +195,7 @@ library DeployUtils { internal returns (address payable addr_) { - bytes memory initCode = abi.encodePacked(vm.getCode(_name), _args); + bytes memory initCode = abi.encodePacked(getCode(_name), _args); address preComputedAddress = vm.computeCreate2Address(_salt, keccak256(initCode)); if (preComputedAddress.code.length > 0) { addr_ = payable(preComputedAddress); diff --git a/packages/contracts-bedrock/test/L2/FeeSplitter.t.sol b/packages/contracts-bedrock/test/L2/FeeSplitter.t.sol index 63c51f0c11a8b..9ea3dac298057 100644 --- a/packages/contracts-bedrock/test/L2/FeeSplitter.t.sol +++ b/packages/contracts-bedrock/test/L2/FeeSplitter.t.sol @@ -11,6 +11,7 @@ import { RevertingRecipient } from "test/mocks/RevertingRecipient.sol"; import { ReentrantMockFeeVault } from "test/mocks/ReentrantMockFeeVault.sol"; // Libraries +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Types } from "src/libraries/Types.sol"; @@ -127,7 +128,7 @@ contract FeeSplitter_Initialize_Test is FeeSplitter_TestInit { /// @notice Test that the implementation contract disables initializers in the constructor function test_feeSplitterImplementation_constructorDisablesInitializers_succeeds() public { - bytes memory creationCode = vm.getCode("FeeSplitter.sol:FeeSplitter"); + bytes memory creationCode = DeployUtils.getCode("FeeSplitter"); address implementation; // Expect the Initialized event to be emitted diff --git a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol index c81a0c888b316..8e1147250ffdb 100644 --- a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol +++ b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol @@ -117,7 +117,10 @@ contract L1ChugSplashProxy_SetCode_Test is L1ChugSplashProxy_TestInit { // if forge coverage is run before testing this with forge test or forge snapshot, forge // clean should be run first so that it recompiles the contracts using the foundry.toml // optimizer settings. - if (vm.isContext(VmSafe.ForgeContext.Coverage) || LibString.eq(Config.foundryProfile(), "lite")) { + bool isUnoptimized = vm.isContext(VmSafe.ForgeContext.Coverage) || LibString.eq(Config.foundryProfile(), "lite") + || LibString.eq(Config.foundryProfile(), "cicoverage"); + + if (isUnoptimized) { gasLimit = 95_000; } else if (vm.isContext(VmSafe.ForgeContext.Test) || vm.isContext(VmSafe.ForgeContext.Snapshot)) { gasLimit = 65_000; @@ -126,7 +129,14 @@ contract L1ChugSplashProxy_SetCode_Test is L1ChugSplashProxy_TestInit { } vm.prank(owner); - vm.expectRevert(bytes("L1ChugSplashProxy: code was not correctly deployed")); // Ran out of gas + if (isUnoptimized) { + // Under unoptimized compilation, the larger proxy bytecode leaves insufficient + // retained gas (1/64 rule) for the require message after the inner CREATE OOGs. + // The call still reverts (OOG), just without the specific error string. + vm.expectRevert(); + } else { + vm.expectRevert(bytes("L1ChugSplashProxy: code was not correctly deployed")); + } proxy.setCode{ gas: gasLimit }( hex"fefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefefe" ); From a8a2956e695f18f53309ff0c30587a2a42961329 Mon Sep 17 00:00:00 2001 From: Axel Kingsley Date: Sat, 28 Feb 2026 01:55:00 -0600 Subject: [PATCH 029/201] supernode: Same Timestamp Verification (#19217) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * DSL: Coordinate Interop Activity Pause for Acceptance Testing * op-acceptance-tests: add same-timestamp invalid message tests Add acceptance tests for supernode interop that verify invalid same-timestamp executing messages are correctly detected and replaced. TestSupernodeSameTimestampInvalidExecMessage: - Chain A emits initiating message at timestamp T - Chain B executes that message at timestamp T (same timestamp - invalid) - Verifies Chain B's block is replaced with deposits-only block - Verifies Chain A's block (with valid init) is NOT replaced TestSupernodeSameTimestampInvalidTransitive: - Chain A: init(IA) + exec(IB) (valid reference to B's init) - Chain B: init(IB) + exec(IA) (invalid - bad log index) - Verifies transitive invalidation: B replaced first, then A replaced because B's init no longer exists after B was replaced These tests validate the strict timestamp checking and cascading invalidation behavior of the interop system. * Rename first test * interop: allow same-timestamp executing messages Subfeature 1 of Same-Timestamp Interop feature. Changes the timestamp validation to allow executing messages that reference initiating messages from the same timestamp. Previously, the check was >= which rejected same-timestamp messages. Now it uses > which only rejects future timestamps. - algo.go: Change timestamp check from >= to > in verifyExecutingMessage - algo_test.go: Add ValidBlocks/SameTimestampMessage test, rename TimestampViolation to FutureTimestamp for clarity - same_timestamp_invalid_test.go: Update expectations - same-timestamp messages are now valid and blocks are not replaced - SameTimestampInterop_Feature.md: Feature diary documenting the work * interop: add cycleVerifyFn field for same-timestamp verification Subfeature 2 of Same-Timestamp Interop feature. Adds the cycleVerifyFn field to the Interop struct. This function will be used to verify same-timestamp executing messages that may form circular dependencies between chains. The field starts as nil and will be set by the circular verification implementation (Subfeature 4). - interop.go: Add cycleVerifyFn field with documentation - interop_test.go: Add TestCycleVerifyFn test section verifying the field can be set, called, return invalid heads, and return errors * interop: route same-timestamp messages through cycleVerifyFn Subfeature 3 of Same-Timestamp Interop feature. Implements the routing logic for same-timestamp executing messages: - verifyExecutingMessage now returns ErrSameTimestamp sentinel when the initiating message timestamp equals the executing timestamp - verifyInteropMessages catches ErrSameTimestamp and tracks whether any chain has same-timestamp messages - After the main loop, if same-timestamp messages exist AND cycleVerifyFn is set, it calls cycleVerifyFn and merges any invalid heads into the result This allows same-timestamp messages to be verified by the cycle verification algorithm (to be implemented in Subfeature 4) rather than immediate validation. - algo.go: Add ErrSameTimestamp, modify verification flow - algo_test.go: Add CycleVerify/* tests for routing behavior * interop: add cycleVerifyFn for same-timestamp cycle verification Adds infrastructure for same-timestamp interop cycle verification: - Add cycleVerifyFn field to Interop struct, called after verifyFn in progressInterop with results merged (invalid heads combined) - Create circular.go with stub verifyCycleMessages implementation that returns a valid result (algorithm to be implemented) - Set cycleVerifyFn in New() - function is always set, not optional - Add TestProgressInteropWithCycleVerify test suite verifying: - Results from both verifyFn and cycleVerifyFn are merged - Errors from cycleVerifyFn propagate correctly - Invalid heads from both functions are combined This prepares the codebase for implementing the actual cycle verification algorithm that will resolve same-timestamp circular dependencies. * interop: implement cycle detection algorithm for same-timestamp messages - Add executingMessageBefore helper (finds latest EM with logIndex <= target) - Add buildCycleGraph to construct dependency graph from same-timestamp EMs - Implement verifyCycleMessages to orchestrate cycle detection - Add comprehensive tests for executingMessageBefore, buildCycleGraph, checkCycle Edges in dependency graph: - Intra-chain: each EM depends on previous EM on same chain - Cross-chain: each EM depends on executingMessageBefore(targetChain, refLogIdx) Cycle detection uses Kahn's topological sort algorithm. * acceptance: add cycle detection test and rename same_timestamp_test.go - Rename same_timestamp_invalid_test.go to same_timestamp_test.go (since same-ts is now valid) - Add TestSupernodeSameTimestampCycle: tests that mutual same-timestamp exec messages (A executes B, B executes A) are detected as a circular dependency and cause both blocks to be replaced - Update spec comment to document all three test scenarios * interop: add feature diary for same-timestamp interop Documents the implementation of same-timestamp interop verification: - Feature goals and breakdown into subfeatures - Development diary with entries for each implementation phase - Complete test coverage summary (30 unit tests, 3 acceptance tests) Key changes documented: - Relaxed timestamp check (>= → >) to allow same-timestamp messages - Added cycleVerifyFn for cycle detection - Implemented Kahn's topological sort for circular dependency detection - Added acceptance test for cycle detection causing reorgs * interop: only invalidate cycle participants, not bystanders Previously, when a cycle was detected, all chains with same-timestamp executing messages were marked as invalid. This was overly broad. Now, only chains with unresolved nodes after Kahn's algorithm are marked as invalid. Chains that have same-timestamp EMs but whose nodes all resolved (i.e., they weren't part of any cycle) are spared. - Add collectCycleParticipants helper to identify unresolved chains - Update verifyCycleMessages to use precise cycle participant set - Add TestVerifyCycleMessagesOnlyCycleParticipants test - Add TestCycleParticipants graph-level test * interop: rename circular.go to cycle.go Self-review cleanup: rename 'circular' to 'cycle' throughout: - circular.go -> cycle.go - circular_test.go -> cycle_test.go - Updated comments: 'circular dependency' -> 'cycle' - Updated Feature.md documentation references * interop: simplify cycle verification buildCycleGraph simplification: - Remove unused nodeByLocation map (dead code) - Remove intermediate logIndices extraction - Build nodes directly from map iteration - Sort with slices.SortFunc after building - Delete sortUint32s helper (replaced by stdlib) Remove cycleVerify knowledge from verifyInteropMessages: - verifyInteropMessages now has no knowledge of cycleVerify - cycleVerifyFn is called from progressInterop (not verifyInteropMessages) - Remove hasSameTimestampMessages tracking - Remove cycleVerifyFn call block - Delete 4 CycleVerify tests from algo_test.go (covered by interop_test.go) * interop: remove feature diary from PR Keep locally for reference but exclude from version control. * remove unrelated files from PR - reth submodule - superchain-registry submodule - SuperRootRefactor_Feature.md diary * tests: simplify same-timestamp interop tests cycle_test.go (692 → 536 lines, -23%): - Add helper functions: mutualCycle, triangleCycle, oneWayRef, mergeEMs - Merge TestCycleParticipants into TestBuildCycleGraph - Delete redundant TestVerifyCycleMessagesOnlyCycleParticipants - Use shared test constants (testChainA/B/C/D, testTS) same_timestamp_test.go (830 → 298 lines, -64%): - Extract sameTimestampHarness for common setup - Consolidate 3 tests using shared harness methods - Remove ~500 lines of duplicated setup code - Simplify helper functions Total reduction: 1522 → 834 lines (-45%) * interop: remove block comments from file headers * Human updates * remove test from other PR * Test Sequencer and DSL * lint * update from merge * address PR comments --- .../interop/loadtest/invalid_msg_test.go | 55 +- .../same_timestamp_invalid/init_test.go | 16 + .../same_timestamp_test.go | 57 ++ op-devstack/dsl/eoa.go | 112 ++++ op-devstack/dsl/invalid_msg.go | 52 ++ op-devstack/dsl/l2_el.go | 32 +- op-devstack/dsl/sequencer.go | 33 ++ op-devstack/presets/twol2.go | 202 +++++++ op-devstack/stack/test_sequencer.go | 5 + op-devstack/sysgo/system.go | 49 +- op-devstack/sysgo/test_sequencer.go | 218 +++++--- .../supernode/activity/interop/algo.go | 21 +- .../supernode/activity/interop/algo_test.go | 75 ++- .../supernode/activity/interop/cycle.go | 226 ++++++++ .../supernode/activity/interop/cycle_test.go | 492 ++++++++++++++++++ .../supernode/activity/interop/interop.go | 31 +- .../activity/interop/interop_test.go | 151 ++++++ 17 files changed, 1650 insertions(+), 177 deletions(-) create mode 100644 op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/init_test.go create mode 100644 op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/same_timestamp_test.go create mode 100644 op-devstack/dsl/invalid_msg.go create mode 100644 op-supernode/supernode/activity/interop/cycle.go create mode 100644 op-supernode/supernode/activity/interop/cycle_test.go diff --git a/op-acceptance-tests/tests/interop/loadtest/invalid_msg_test.go b/op-acceptance-tests/tests/interop/loadtest/invalid_msg_test.go index 3d61eaef75ade..ce017f3444397 100644 --- a/op-acceptance-tests/tests/interop/loadtest/invalid_msg_test.go +++ b/op-acceptance-tests/tests/interop/loadtest/invalid_msg_test.go @@ -2,7 +2,6 @@ package loadtest import ( "context" - "math/big" "os" "strconv" "strings" @@ -10,59 +9,23 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txinclude" "github.com/ethereum-optimism/optimism/op-service/txintent" "github.com/ethereum-optimism/optimism/op-service/txplan" suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" ) -type makeInvalidInitMsgFn func(suptypes.Message) suptypes.Message - -func makeInvalidBlockNumber(msg suptypes.Message) suptypes.Message { - msg.Identifier.BlockNumber++ - return msg -} - -func makeInvalidChainID(msg suptypes.Message) suptypes.Message { - chainIDBig := msg.Identifier.ChainID.ToBig() - msg.Identifier.ChainID = eth.ChainIDFromBig(chainIDBig.Add(chainIDBig, big.NewInt(1))) - return msg -} - -func makeInvalidLogIndex(msg suptypes.Message) suptypes.Message { - msg.Identifier.LogIndex++ - return msg -} - -func makeInvalidOrigin(msg suptypes.Message) suptypes.Message { - originBig := msg.Identifier.Origin.Big() - msg.Identifier.Origin = common.BigToAddress(originBig.Add(originBig, big.NewInt(1))) - return msg -} - -func makeInvalidTimestamp(msg suptypes.Message) suptypes.Message { - msg.Identifier.Timestamp++ - return msg -} - -func makeInvalidPayloadHash(msg suptypes.Message) suptypes.Message { - hash := msg.PayloadHash.Big() - hash.Add(hash, big.NewInt(1)) - msg.PayloadHash = common.BigToHash(hash) - return msg -} - // InvalidExecMsgSpammer spams invalid executing messages, aiming to stress mempool interop // filters. type InvalidExecMsgSpammer struct { l2 *L2 eoa *SyncEOA validInitMsg suptypes.Message - makeInvalidFns *RoundRobin[makeInvalidInitMsgFn] + makeInvalidFns *RoundRobin[dsl.InvalidMsgFn] } var _ Spammer = (*InvalidExecMsgSpammer)(nil) @@ -94,13 +57,13 @@ func NewInvalidExecMsgSpammer(t devtest.T, l2 *L2, validInitMsg suptypes.Message l2: l2, eoa: NewSyncEOA(includer, eoa.Plan()), validInitMsg: validInitMsg, - makeInvalidFns: NewRoundRobin([]makeInvalidInitMsgFn{ - makeInvalidBlockNumber, - makeInvalidChainID, - makeInvalidLogIndex, - makeInvalidOrigin, - makeInvalidTimestamp, - makeInvalidPayloadHash, + makeInvalidFns: NewRoundRobin([]dsl.InvalidMsgFn{ + dsl.MakeInvalidBlockNumber, + dsl.MakeInvalidChainID, + dsl.MakeInvalidLogIndex, + dsl.MakeInvalidOrigin, + dsl.MakeInvalidTimestamp, + dsl.MakeInvalidPayloadHash, }), } } diff --git a/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/init_test.go b/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/init_test.go new file mode 100644 index 0000000000000..1762d11f2adcf --- /dev/null +++ b/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/init_test.go @@ -0,0 +1,16 @@ +package same_timestamp_invalid + +import ( + "os" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +// TestMain creates an isolated two-L2 setup with a shared supernode that has interop enabled. +// This package tests that executing messages referencing initiating messages from the same +// timestamp are correctly detected as invalid and replaced. +func TestMain(m *testing.M) { + _ = os.Setenv("DEVSTACK_L2CL_KIND", "supernode") + presets.DoMain(m, presets.WithTwoL2SupernodeInterop(0)) +} diff --git a/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/same_timestamp_test.go b/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/same_timestamp_test.go new file mode 100644 index 0000000000000..e4f8b58e8a9de --- /dev/null +++ b/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/same_timestamp_test.go @@ -0,0 +1,57 @@ +package same_timestamp_invalid + +import ( + "math/rand" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/txplan" +) + +// TestSupernodeSameTimestampExecMessage: Chain B executes Chain A's init at same timestamp - VALID +func TestSupernodeSameTimestampExecMessage(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewTwoL2SupernodeInterop(t, 0).ForSameTimestampTesting(t) + rng := rand.New(rand.NewSource(99999)) + + pairA := sys.PrepareInitA(rng, 0) + + sys.IncludeAndValidate( + []*txplan.PlannedTx{pairA.SubmitInit()}, + []*txplan.PlannedTx{pairA.SubmitExecTo(sys.Bob)}, + false, false, // neither replaced + ) +} + +// TestSupernodeSameTimestampInvalidTransitive: Bad log index causes transitive invalidation +func TestSupernodeSameTimestampInvalidTransitive(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewTwoL2SupernodeInterop(t, 0).ForSameTimestampTesting(t) + rng := rand.New(rand.NewSource(77777)) + + pairA := sys.PrepareInitA(rng, 0) + pairB := sys.PrepareInitB(rng, 0) + + sys.IncludeAndValidate( + []*txplan.PlannedTx{pairA.SubmitInit(), pairB.SubmitExecTo(sys.Alice)}, + []*txplan.PlannedTx{pairB.SubmitInit(), pairA.SubmitInvalidExecTo(sys.Bob)}, + true, true, // both replaced (B invalid, A transitive) + ) +} + +// TestSupernodeSameTimestampCycle: Mutual exec messages create cycle - both replaced +func TestSupernodeSameTimestampCycle(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewTwoL2SupernodeInterop(t, 0).ForSameTimestampTesting(t) + rng := rand.New(rand.NewSource(55555)) + + pairA := sys.PrepareInitA(rng, 0) + pairB := sys.PrepareInitB(rng, 0) + + sys.IncludeAndValidate( + []*txplan.PlannedTx{pairA.SubmitInit(), pairB.SubmitExecTo(sys.Alice)}, + []*txplan.PlannedTx{pairB.SubmitInit(), pairA.SubmitExecTo(sys.Bob)}, + true, true, // both replaced (cycle detected) + ) +} diff --git a/op-devstack/dsl/eoa.go b/op-devstack/dsl/eoa.go index 00aa80bf923e6..99a35df815dcd 100644 --- a/op-devstack/dsl/eoa.go +++ b/op-devstack/dsl/eoa.go @@ -19,8 +19,10 @@ import ( txIntentBindings "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" "github.com/ethereum-optimism/optimism/op-service/txplan" + suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/crypto" ) // EOA is an Externally-Owned-Account: @@ -424,3 +426,113 @@ func (u *EOA) ApproveToken(tokenAddr common.Address, spender common.Address, amo _, err := contractio.Write(approveCall, u.ctx, u.Plan()) u.t.Require().NoError(err, "failed to approve token") } + +// ============================================================================= +// Same-Timestamp Interop Helpers +// ============================================================================= + +// SameTimestampPair holds a precomputed init message for same-timestamp interop testing. +// It allows creating exec messages that reference the init before it's actually included on chain. +// This is necessary for same-timestamp scenarios where the exec needs to reference an init +// that will be included in a block at the same timestamp. +type SameTimestampPair struct { + eoa *EOA + Trigger *txintent.InitTrigger + Message suptypes.Message + eventLogger common.Address +} + +// PrepareSameTimestampInit creates a precomputed init message for same-timestamp testing. +// The message identifier is computed for the expected block position (blockNum, logIdx, timestamp). +// This allows an exec message on another chain to reference this init before it's included. +// +// Parameters: +// - rng: random source for generating topics and data +// - eventLogger: address of the EventLogger contract that will emit the init +// - expectedBlockNum: the block number where this init is expected to be included +// - expectedLogIdx: the log index within the block (0 if first log in block) +// - expectedTimestamp: the timestamp of the block +func (u *EOA) PrepareSameTimestampInit( + rng *rand.Rand, + eventLogger common.Address, + expectedBlockNum uint64, + expectedLogIdx uint32, + expectedTimestamp uint64, +) *SameTimestampPair { + // Generate random topics (2 topics for a reasonable init message) + topics := make([][32]byte, 2) + for i := range topics { + copy(topics[i][:], testutils.RandomData(rng, 32)) + } + + trigger := &txintent.InitTrigger{ + Emitter: eventLogger, + Topics: topics, + OpaqueData: testutils.RandomData(rng, 10), + } + + // Precompute the message identifier by hashing the payload + payload := make([]byte, 0) + for _, topic := range trigger.Topics { + payload = append(payload, topic[:]...) + } + payload = append(payload, trigger.OpaqueData...) + + msg := suptypes.Message{ + Identifier: suptypes.Identifier{ + Origin: eventLogger, + BlockNumber: expectedBlockNum, + LogIndex: expectedLogIdx, + Timestamp: expectedTimestamp, + ChainID: u.ChainID(), + }, + PayloadHash: crypto.Keccak256Hash(payload), + } + + return &SameTimestampPair{ + eoa: u, + Trigger: trigger, + Message: msg, + eventLogger: eventLogger, + } +} + +// SubmitInit submits the init message without waiting for inclusion. +// Returns the planned tx which can be used to wait for inclusion later. +func (p *SameTimestampPair) SubmitInit() *txplan.PlannedTx { + tx := txintent.NewIntent[*txintent.InitTrigger, *txintent.InteropOutput](p.eoa.Plan()) + tx.Content.Set(p.Trigger) + _, err := tx.PlannedTx.Submitted.Eval(p.eoa.ctx) + p.eoa.require.NoError(err, "failed to submit init message") + return tx.PlannedTx +} + +// SubmitExecTo submits an exec message to the given EOA's chain, referencing this init. +// The exec is submitted without waiting for inclusion. +// Returns the planned tx which can be used to wait for inclusion later. +func (p *SameTimestampPair) SubmitExecTo(executor *EOA) *txplan.PlannedTx { + tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](executor.Plan()) + tx.Content.Set(&txintent.ExecTrigger{ + Executor: constants.CrossL2Inbox, + Msg: p.Message, + }) + _, err := tx.PlannedTx.Submitted.Eval(executor.ctx) + executor.require.NoError(err, "failed to submit exec message") + return tx.PlannedTx +} + +// SubmitInvalidExecTo submits an exec message with an invalid log index. +// This creates an exec that references a non-existent log, which should be detected as invalid. +// Returns the planned tx which can be used to wait for inclusion later. +func (p *SameTimestampPair) SubmitInvalidExecTo(executor *EOA) *txplan.PlannedTx { + invalidMsg := MakeInvalidLogIndex(p.Message) + + tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](executor.Plan()) + tx.Content.Set(&txintent.ExecTrigger{ + Executor: constants.CrossL2Inbox, + Msg: invalidMsg, + }) + _, err := tx.PlannedTx.Submitted.Eval(executor.ctx) + executor.require.NoError(err, "failed to submit invalid exec message") + return tx.PlannedTx +} diff --git a/op-devstack/dsl/invalid_msg.go b/op-devstack/dsl/invalid_msg.go new file mode 100644 index 0000000000000..783b9d300accc --- /dev/null +++ b/op-devstack/dsl/invalid_msg.go @@ -0,0 +1,52 @@ +package dsl + +import ( + "math/big" + + "github.com/ethereum-optimism/optimism/op-service/eth" + suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/common" +) + +// InvalidMsgFn is a function that takes a valid message and returns an invalid copy. +type InvalidMsgFn func(suptypes.Message) suptypes.Message + +// MakeInvalidBlockNumber returns a copy of the message with an incremented block number. +func MakeInvalidBlockNumber(msg suptypes.Message) suptypes.Message { + msg.Identifier.BlockNumber++ + return msg +} + +// MakeInvalidChainID returns a copy of the message with an incremented chain ID. +func MakeInvalidChainID(msg suptypes.Message) suptypes.Message { + chainIDBig := msg.Identifier.ChainID.ToBig() + msg.Identifier.ChainID = eth.ChainIDFromBig(chainIDBig.Add(chainIDBig, big.NewInt(1))) + return msg +} + +// MakeInvalidLogIndex returns a copy of the message with an incremented log index. +func MakeInvalidLogIndex(msg suptypes.Message) suptypes.Message { + msg.Identifier.LogIndex++ + return msg +} + +// MakeInvalidOrigin returns a copy of the message with an incremented origin address. +func MakeInvalidOrigin(msg suptypes.Message) suptypes.Message { + originBig := msg.Identifier.Origin.Big() + msg.Identifier.Origin = common.BigToAddress(originBig.Add(originBig, big.NewInt(1))) + return msg +} + +// MakeInvalidTimestamp returns a copy of the message with an incremented timestamp. +func MakeInvalidTimestamp(msg suptypes.Message) suptypes.Message { + msg.Identifier.Timestamp++ + return msg +} + +// MakeInvalidPayloadHash returns a copy of the message with an incremented payload hash. +func MakeInvalidPayloadHash(msg suptypes.Message) suptypes.Message { + hash := msg.PayloadHash.Big() + hash.Add(hash, big.NewInt(1)) + msg.PayloadHash = common.BigToHash(hash) + return msg +} diff --git a/op-devstack/dsl/l2_el.go b/op-devstack/dsl/l2_el.go index cf9d866c216c5..b7c856ce636b2 100644 --- a/op-devstack/dsl/l2_el.go +++ b/op-devstack/dsl/l2_el.go @@ -13,8 +13,9 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/retry" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" ) var emptyHash = common.Hash{} @@ -329,15 +330,15 @@ func (el *L2ELNode) FinishedELSync(refNode *L2ELNode, unsafe, safe, finalized ui })) } -func (el *L2ELNode) ChainSyncStatus(chainID eth.ChainID, lvl types.SafetyLevel) eth.BlockID { +func (el *L2ELNode) ChainSyncStatus(chainID eth.ChainID, lvl suptypes.SafetyLevel) eth.BlockID { el.require.Equal(chainID, el.inner.ID().ChainID(), "chain ID mismatch") var blockRef eth.L2BlockRef switch lvl { - case types.Finalized: + case suptypes.Finalized: blockRef = el.BlockRefByLabel(eth.Finalized) - case types.CrossSafe, types.LocalSafe: + case suptypes.CrossSafe, suptypes.LocalSafe: blockRef = el.BlockRefByLabel(eth.Safe) - case types.CrossUnsafe, types.LocalUnsafe: + case suptypes.CrossUnsafe, suptypes.LocalUnsafe: blockRef = el.BlockRefByLabel(eth.Unsafe) default: el.require.NoError(errors.New("invalid safety level")) @@ -345,16 +346,31 @@ func (el *L2ELNode) ChainSyncStatus(chainID eth.ChainID, lvl types.SafetyLevel) return blockRef.ID() } -func (el *L2ELNode) MatchedFn(refNode SyncStatusProvider, lvl types.SafetyLevel, attempts int) CheckFunc { +// WaitForReceipt waits for a transaction receipt to be available, retrying until found or timeout. +func (el *L2ELNode) WaitForReceipt(txHash common.Hash) *types.Receipt { + var receipt *types.Receipt + err := retry.Do0(el.ctx, 30, &retry.FixedStrategy{Dur: 500 * time.Millisecond}, func() error { + var err error + receipt, err = el.inner.EthClient().TransactionReceipt(el.ctx, txHash) + if err != nil { + return fmt.Errorf("waiting for receipt of %s: %w", txHash.Hex(), err) + } + return nil + }) + el.require.NoError(err, "failed to get receipt for tx %s", txHash.Hex()) + return receipt +} + +func (el *L2ELNode) MatchedFn(refNode SyncStatusProvider, lvl suptypes.SafetyLevel, attempts int) CheckFunc { return MatchedFn(el, refNode, el.log, el.ctx, lvl, el.ChainID(), attempts) } -func (el *L2ELNode) Matched(refNode SyncStatusProvider, lvl types.SafetyLevel, attempts int) { +func (el *L2ELNode) Matched(refNode SyncStatusProvider, lvl suptypes.SafetyLevel, attempts int) { el.require.NoError(el.MatchedFn(refNode, lvl, attempts)()) } func (el *L2ELNode) MatchedUnsafe(refNode SyncStatusProvider, attempts int) { - el.Matched(refNode, types.LocalUnsafe, attempts) + el.Matched(refNode, suptypes.LocalUnsafe, attempts) } // WaitForPendingNonceMatchFn returns a lambda that waits for the pending nonce of an account to match the provided reference nonce diff --git a/op-devstack/dsl/sequencer.go b/op-devstack/dsl/sequencer.go index e3b206054966f..658b59ab7670b 100644 --- a/op-devstack/dsl/sequencer.go +++ b/op-devstack/dsl/sequencer.go @@ -1,6 +1,8 @@ package dsl import ( + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -30,9 +32,40 @@ func (s *TestSequencer) Escape() stack.TestSequencer { return s.inner } +// SequenceBlock builds a block at deterministic timestamp (parent.Time + blockTime). +// This is useful for tests that need predictable block timestamps. func (s *TestSequencer) SequenceBlock(t devtest.T, chainID eth.ChainID, parent common.Hash) { ca := s.Escape().ControlAPI(chainID) require.NoError(t, ca.New(t.Ctx(), seqtypes.BuildOpts{Parent: parent})) require.NoError(t, ca.Next(t.Ctx())) } + +// SequenceBlockWithTxs builds a block with timestamp parent.Time + blockTime with the supplied transactions (bypassing the mempool). +// This makes it ideal for same-timestamp interop testing, and avoids the chance that transactions are sequenced into later blocks. +func (s *TestSequencer) SequenceBlockWithTxs(t devtest.T, chainID eth.ChainID, parent common.Hash, rawTxs [][]byte) { + ctx := t.Ctx() + ca := s.Escape().ControlAPI(chainID) + + // Start a new block building job + require.NoError(t, ca.New(ctx, seqtypes.BuildOpts{Parent: parent})) + + // Include each transaction BEFORE opening + // IncludeTx adds to the job's attrs.Transactions which are used when Open() starts block building + for _, rawTx := range rawTxs { + require.NoError(t, ca.IncludeTx(ctx, hexutil.Bytes(rawTx))) + } + + // Open the block building with the included transactions + require.NoError(t, ca.Open(ctx)) + + // Seal, sign, and commit the block + // Commit is what makes the block canonical in the EL + require.NoError(t, ca.Seal(ctx)) + require.NoError(t, ca.Sign(ctx)) + require.NoError(t, ca.Commit(ctx)) + + // Publish is optional - it broadcasts via P2P which may not be enabled in tests. + // The block is already committed and canonical at this point. + _ = ca.Publish(ctx) // ignore publish errors +} diff --git a/op-devstack/presets/twol2.go b/op-devstack/presets/twol2.go index 1584d91c5fc25..e405cd8f985fa 100644 --- a/op-devstack/presets/twol2.go +++ b/op-devstack/presets/twol2.go @@ -1,8 +1,10 @@ package presets import ( + "math/rand" "time" + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" @@ -13,6 +15,8 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txplan" ) // TwoL2 represents a two-L2 setup without interop considerations. @@ -81,6 +85,11 @@ type TwoL2SupernodeInterop struct { // Supernode provides access to the shared supernode for interop operations Supernode *dsl.Supernode + // TestSequencer provides deterministic block building on both L2 chains. + // Unlike the regular sequencer which uses wall-clock time, the TestSequencer + // builds blocks at parent.Time + blockTime, making it ideal for same-timestamp tests. + TestSequencer *dsl.TestSequencer + // L2ELA and L2ELB provide access to the EL nodes for transaction submission L2ELA *dsl.L2ELNode L2ELB *dsl.L2ELNode @@ -153,6 +162,12 @@ func NewTwoL2SupernodeInterop(t devtest.T, delaySeconds uint64) *TwoL2SupernodeI testControl = sysgoOrch.InteropTestControl(stackSupernode.ID()) } + // Get the test sequencer for deterministic block building + var testSequencer *dsl.TestSequencer + if len(system.TestSequencers()) > 0 { + testSequencer = dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))) + } + out := &TwoL2SupernodeInterop{ TwoL2: TwoL2{ Log: t.Logger(), @@ -166,6 +181,7 @@ func NewTwoL2SupernodeInterop(t devtest.T, delaySeconds uint64) *TwoL2SupernodeI L2BCL: dsl.NewL2CLNode(l2bCL, orch.ControlPlane()), }, Supernode: dsl.NewSupernodeWithTestControl(stackSupernode, testControl), + TestSequencer: testSequencer, L2ELA: dsl.NewL2ELNode(l2a.L2ELNode(match.Assume(t, match.FirstL2EL)), orch.ControlPlane()), L2ELB: dsl.NewL2ELNode(l2b.L2ELNode(match.Assume(t, match.FirstL2EL)), orch.ControlPlane()), L2BatcherA: dsl.NewL2Batcher(l2a.L2Batcher(match.Assume(t, match.FirstL2Batcher))), @@ -182,3 +198,189 @@ func NewTwoL2SupernodeInterop(t devtest.T, delaySeconds uint64) *TwoL2SupernodeI out.FunderB = dsl.NewFunder(out.Wallet, out.FaucetB, out.L2ELB) return out } + +// ============================================================================= +// Same-Timestamp Test Setup +// ============================================================================= + +// SameTimestampTestSetup provides a simplified setup for same-timestamp interop testing. +// It handles all the chain synchronization, sequencer control, and interop pausing +// needed to create blocks at the same timestamp on both chains. +type SameTimestampTestSetup struct { + *TwoL2SupernodeInterop + t devtest.T + + // Alice is a funded EOA on chain A + Alice *dsl.EOA + // Bob is a funded EOA on chain B + Bob *dsl.EOA + + // EventLoggerA is the EventLogger contract address on chain A + EventLoggerA common.Address + // EventLoggerB is the EventLogger contract address on chain B + EventLoggerB common.Address + + // NextTimestamp is the timestamp that will be used for the next blocks + NextTimestamp uint64 + // ExpectedBlockNumA is the expected block number on chain A + ExpectedBlockNumA uint64 + // ExpectedBlockNumB is the expected block number on chain B + ExpectedBlockNumB uint64 +} + +// ForSameTimestampTesting sets up the system for same-timestamp interop testing. +// It syncs the chains, pauses interop, stops sequencers, and calculates expected positions. +// After calling this, you can use PrepareInitA/B to create same-timestamp message pairs. +func (s *TwoL2SupernodeInterop) ForSameTimestampTesting(t devtest.T) *SameTimestampTestSetup { + // Create funded EOAs + alice := s.FunderA.NewFundedEOA(eth.OneEther) + bob := s.FunderB.NewFundedEOA(eth.OneEther) + + // Deploy event loggers + eventLoggerA := alice.DeployEventLogger() + eventLoggerB := bob.DeployEventLogger() + + // Sync chains and pause interop + s.L2B.CatchUpTo(s.L2A) + s.L2A.CatchUpTo(s.L2B) + s.Supernode.EnsureInteropPaused(s.L2ACL, s.L2BCL, 10) + + // Stop sequencers + s.L2ACL.StopSequencer() + s.L2BCL.StopSequencer() + + // Get current state and synchronize timestamps + unsafeA := s.L2ELA.BlockRefByLabel(eth.Unsafe) + unsafeB := s.L2ELB.BlockRefByLabel(eth.Unsafe) + unsafeA, unsafeB = synchronizeChainsToSameTimestamp(t, s, unsafeA, unsafeB) + + blockTime := s.L2A.Escape().RollupConfig().BlockTime + + return &SameTimestampTestSetup{ + TwoL2SupernodeInterop: s, + t: t, + Alice: alice, + Bob: bob, + EventLoggerA: eventLoggerA, + EventLoggerB: eventLoggerB, + NextTimestamp: unsafeA.Time + blockTime, + ExpectedBlockNumA: unsafeA.Number + 1, + ExpectedBlockNumB: unsafeB.Number + 1, + } +} + +// PrepareInitA creates a precomputed init message for chain A at the next timestamp. +func (s *SameTimestampTestSetup) PrepareInitA(rng *rand.Rand, logIdx uint32) *dsl.SameTimestampPair { + return s.Alice.PrepareSameTimestampInit(rng, s.EventLoggerA, s.ExpectedBlockNumA, logIdx, s.NextTimestamp) +} + +// PrepareInitB creates a precomputed init message for chain B at the next timestamp. +func (s *SameTimestampTestSetup) PrepareInitB(rng *rand.Rand, logIdx uint32) *dsl.SameTimestampPair { + return s.Bob.PrepareSameTimestampInit(rng, s.EventLoggerB, s.ExpectedBlockNumB, logIdx, s.NextTimestamp) +} + +// IncludeAndValidate builds blocks with deterministic timestamps using the TestSequencer, +// then validates interop and checks for expected reorgs. +// +// Unlike the regular sequencer which uses wall-clock time, the TestSequencer builds blocks +// at exactly parent.Time + blockTime, ensuring the blocks are at NextTimestamp. +func (s *SameTimestampTestSetup) IncludeAndValidate(txsA, txsB []*txplan.PlannedTx, expectReplacedA, expectReplacedB bool) { + ctx := s.t.Ctx() + + require.NotNil(s.t, s.TestSequencer, "TestSequencer is required for deterministic timestamp tests") + + // Get parent blocks and chain IDs + parentA := s.L2ELA.BlockRefByLabel(eth.Unsafe) + parentB := s.L2ELB.BlockRefByLabel(eth.Unsafe) + chainIDA := s.L2A.Escape().ChainID() + chainIDB := s.L2B.Escape().ChainID() + + // Extract signed transaction bytes for chain A + var rawTxsA [][]byte + var txHashesA []common.Hash + for _, ptx := range txsA { + signedTx, err := ptx.Signed.Eval(ctx) + require.NoError(s.t, err, "failed to sign transaction for chain A") + rawBytes, err := signedTx.MarshalBinary() + require.NoError(s.t, err, "failed to marshal transaction for chain A") + rawTxsA = append(rawTxsA, rawBytes) + txHashesA = append(txHashesA, signedTx.Hash()) + } + + // Extract signed transaction bytes for chain B + var rawTxsB [][]byte + var txHashesB []common.Hash + for _, ptx := range txsB { + signedTx, err := ptx.Signed.Eval(ctx) + require.NoError(s.t, err, "failed to sign transaction for chain B") + rawBytes, err := signedTx.MarshalBinary() + require.NoError(s.t, err, "failed to marshal transaction for chain B") + rawTxsB = append(rawTxsB, rawBytes) + txHashesB = append(txHashesB, signedTx.Hash()) + } + + // Build blocks at deterministic timestamps using TestSequencer + // Block timestamp will be parent.Time + blockTime = NextTimestamp + s.TestSequencer.SequenceBlockWithTxs(s.t, chainIDA, parentA.Hash, rawTxsA) + s.TestSequencer.SequenceBlockWithTxs(s.t, chainIDB, parentB.Hash, rawTxsB) + + // Get block refs by looking up the tx receipts + var blockA, blockB eth.L2BlockRef + for _, txHash := range txHashesA { + receipt := s.L2ELA.WaitForReceipt(txHash) + blockA = s.L2ELA.BlockRefByHash(receipt.BlockHash) + } + for _, txHash := range txHashesB { + receipt := s.L2ELB.WaitForReceipt(txHash) + blockB = s.L2ELB.BlockRefByHash(receipt.BlockHash) + } + + // Verify same-timestamp property: both blocks at expected timestamp + require.Equal(s.t, s.NextTimestamp, blockA.Time, + "Chain A block must be at the precomputed NextTimestamp (init message identifier uses this)") + require.Equal(s.t, s.NextTimestamp, blockB.Time, + "Chain B block must be at the precomputed NextTimestamp (exec references init at this timestamp)") + require.Equal(s.t, blockA.Time, blockB.Time, "blocks must be at same timestamp") + + // Resume interop and wait for validation + s.Supernode.ResumeInterop() + s.Supernode.AwaitValidatedTimestamp(blockA.Time) + + // Check reorg expectations + currentA := s.L2ELA.BlockRefByNumber(blockA.Number) + currentB := s.L2ELB.BlockRefByNumber(blockB.Number) + + if expectReplacedA { + require.NotEqual(s.t, blockA.Hash, currentA.Hash, "Chain A should be replaced") + } else { + require.Equal(s.t, blockA.Hash, currentA.Hash, "Chain A should NOT be replaced") + } + + if expectReplacedB { + require.NotEqual(s.t, blockB.Hash, currentB.Hash, "Chain B should be replaced") + } else { + require.Equal(s.t, blockB.Hash, currentB.Hash, "Chain B should NOT be replaced") + } +} + +// synchronizeChainsToSameTimestamp ensures both chains are at the same timestamp. +func synchronizeChainsToSameTimestamp(t devtest.T, sys *TwoL2SupernodeInterop, unsafeA, unsafeB eth.L2BlockRef) (eth.L2BlockRef, eth.L2BlockRef) { + for i := 0; i < 10; i++ { + if unsafeA.Time == unsafeB.Time { + return unsafeA, unsafeB + } + if unsafeA.Time < unsafeB.Time { + sys.L2ACL.StartSequencer() + sys.L2ELA.WaitForTime(unsafeB.Time) + sys.L2ACL.StopSequencer() + unsafeA = sys.L2ELA.BlockRefByLabel(eth.Unsafe) + } else { + sys.L2BCL.StartSequencer() + sys.L2ELB.WaitForTime(unsafeA.Time) + sys.L2BCL.StopSequencer() + unsafeB = sys.L2ELB.BlockRefByLabel(eth.Unsafe) + } + } + require.Equal(t, unsafeA.Time, unsafeB.Time, "failed to synchronize chains") + return unsafeA, unsafeB +} diff --git a/op-devstack/stack/test_sequencer.go b/op-devstack/stack/test_sequencer.go index a805b365a6ad2..7a6a2023baad7 100644 --- a/op-devstack/stack/test_sequencer.go +++ b/op-devstack/stack/test_sequencer.go @@ -14,6 +14,11 @@ var _ GenericID = (*TestSequencerID)(nil) const TestSequencerKind Kind = "TestSequencer" +// NewTestSequencerID creates a new TestSequencerID with the given key. +func NewTestSequencerID(key string) TestSequencerID { + return TestSequencerID(key) +} + func (id TestSequencerID) String() string { return genericID(id).string(TestSequencerKind) } diff --git a/op-devstack/sysgo/system.go b/op-devstack/sysgo/system.go index 3fd1a226e4d99..2df23cc1914ae 100644 --- a/op-devstack/sysgo/system.go +++ b/op-devstack/sysgo/system.go @@ -40,7 +40,7 @@ func NewDefaultMinimalSystemIDs(l1ID, l2ID eth.ChainID) DefaultMinimalSystemIDs L2Batcher: stack.NewL2BatcherID("main", l2ID), L2Proposer: stack.NewL2ProposerID("main", l2ID), L2Challenger: stack.NewL2ChallengerID("main", l2ID), - TestSequencer: "test-sequencer", + TestSequencer: stack.NewTestSequencerID("test-sequencer"), } return ids } @@ -107,29 +107,31 @@ type DefaultTwoL2SystemIDs struct { L2BCL stack.L2CLNodeID L2BEL stack.L2ELNodeID - Supernode stack.SupernodeID - L2ABatcher stack.L2BatcherID - L2AProposer stack.L2ProposerID - L2BBatcher stack.L2BatcherID - L2BProposer stack.L2ProposerID + Supernode stack.SupernodeID + TestSequencer stack.TestSequencerID + L2ABatcher stack.L2BatcherID + L2AProposer stack.L2ProposerID + L2BBatcher stack.L2BatcherID + L2BProposer stack.L2ProposerID } func NewDefaultTwoL2SystemIDs(l1ID, l2AID, l2BID eth.ChainID) DefaultTwoL2SystemIDs { return DefaultTwoL2SystemIDs{ - L1: stack.L1NetworkID(l1ID), - L1EL: stack.NewL1ELNodeID("l1", l1ID), - L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2A: stack.L2NetworkID(l2AID), - L2ACL: stack.NewL2CLNodeID("sequencer", l2AID), - L2AEL: stack.NewL2ELNodeID("sequencer", l2AID), - L2B: stack.L2NetworkID(l2BID), - L2BCL: stack.NewL2CLNodeID("sequencer", l2BID), - L2BEL: stack.NewL2ELNodeID("sequencer", l2BID), - Supernode: stack.NewSupernodeID("supernode-two-l2-system", l2AID, l2BID), - L2ABatcher: stack.NewL2BatcherID("main", l2AID), - L2AProposer: stack.NewL2ProposerID("main", l2AID), - L2BBatcher: stack.NewL2BatcherID("main", l2BID), - L2BProposer: stack.NewL2ProposerID("main", l2BID), + L1: stack.L1NetworkID(l1ID), + L1EL: stack.NewL1ELNodeID("l1", l1ID), + L1CL: stack.NewL1CLNodeID("l1", l1ID), + L2A: stack.L2NetworkID(l2AID), + L2ACL: stack.NewL2CLNodeID("sequencer", l2AID), + L2AEL: stack.NewL2ELNodeID("sequencer", l2AID), + L2B: stack.L2NetworkID(l2BID), + L2BCL: stack.NewL2CLNodeID("sequencer", l2BID), + L2BEL: stack.NewL2ELNodeID("sequencer", l2BID), + Supernode: stack.NewSupernodeID("supernode-two-l2-system", l2AID, l2BID), + TestSequencer: stack.NewTestSequencerID("test-sequencer-2l2"), + L2ABatcher: stack.NewL2BatcherID("main", l2AID), + L2AProposer: stack.NewL2ProposerID("main", l2AID), + L2BBatcher: stack.NewL2BatcherID("main", l2BID), + L2BProposer: stack.NewL2ProposerID("main", l2BID), } } @@ -266,6 +268,9 @@ func DefaultSupernodeInteropTwoL2System(dest *DefaultTwoL2SystemIDs, delaySecond opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL, ids.L2BEL})) + // Test sequencer for deterministic block building on both L2 chains + opt.Add(WithTestSequencer2L2(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L2BCL, ids.L1EL, ids.L2AEL, ids.L2BEL)) + opt.Add(stack.Finally(func(orch *Orchestrator) { *dest = ids })) @@ -362,7 +367,7 @@ func NewDefaultSingleChainInteropSystemIDs(l1ID, l2AID eth.ChainID) DefaultSingl Superchain: "main", // TODO(#15244): hardcoded to match the deployer default ID Cluster: stack.ClusterID("main"), Supervisor: "1-primary", // prefix with number for ordering of supervisors - TestSequencer: "dev", + TestSequencer: stack.NewTestSequencerID("dev"), L2A: stack.L2NetworkID(l2AID), L2ACL: stack.NewL2CLNodeID("sequencer", l2AID), L2AEL: stack.NewL2ELNodeID("sequencer", l2AID), @@ -785,7 +790,7 @@ func NewDefaultSingleChainSystemWithFlashblocksIDs(l1ID, l2ID eth.ChainID) Singl L2Batcher: stack.NewL2BatcherID("main", l2ID), L2Proposer: stack.NewL2ProposerID("main", l2ID), L2Challenger: stack.NewL2ChallengerID("main", l2ID), - TestSequencer: "test-sequencer", + TestSequencer: stack.NewTestSequencerID("test-sequencer"), } return ids } diff --git a/op-devstack/sysgo/test_sequencer.go b/op-devstack/sysgo/test_sequencer.go index ad43eeee5fbef..7971eb7e884f1 100644 --- a/op-devstack/sysgo/test_sequencer.go +++ b/op-devstack/sysgo/test_sequencer.go @@ -12,6 +12,7 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/shim" + "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/endpoint" @@ -74,13 +75,37 @@ func (s *TestSequencer) hydrate(sys stack.ExtensibleSystem) { })) } +// l2ChainIDs pairs together the CL and EL node IDs for an L2 chain. +type l2ChainIDs struct { + CLID stack.L2CLNodeID + ELID stack.L2ELNodeID +} + func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLNodeID, l2CLID stack.L2CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID) stack.Option[*Orchestrator] { + return withTestSequencerImpl(testSequencerID, l1CLID, l1ELID, l2ChainIDs{CLID: l2CLID, ELID: l2ELID}) +} + +// WithTestSequencer2L2 creates a test sequencer that can build blocks on two L2 chains. +// This is useful for testing same-timestamp interop scenarios where we need deterministic +// block timestamps on both chains. +func WithTestSequencer2L2(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLNodeID, + l2ACLID stack.L2CLNodeID, l2BCLID stack.L2CLNodeID, + l1ELID stack.L1ELNodeID, l2AELID stack.L2ELNodeID, l2BELID stack.L2ELNodeID) stack.Option[*Orchestrator] { + return withTestSequencerImpl(testSequencerID, l1CLID, l1ELID, + l2ChainIDs{CLID: l2ACLID, ELID: l2AELID}, + l2ChainIDs{CLID: l2BCLID, ELID: l2BELID}, + ) +} + +// withTestSequencerImpl is the shared implementation for creating test sequencers. +// It supports any number of L2 chains. +func withTestSequencerImpl(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2Chains ...l2ChainIDs) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), testSequencerID)) require := p.Require() - logger := p.Logger() + // Setup L1 components orch.writeDefaultJWT() l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) require.True(ok, "l1 EL node required") @@ -94,50 +119,20 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN require.True(ok, "l1 CL node required") l1CL := l1CLComponent.(*L1CLNode) - l2EL, ok := orch.GetL2EL(l2ELID) - require.True(ok, "l2 EL node required") - - l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CLID).ComponentID) - require.True(ok, "l2 CL node required") - l2CL := l2CLComponent.(L2CLNode) - - bid_L2 := seqtypes.BuilderID("test-standard-builder") - cid_L2 := seqtypes.CommitterID("test-standard-committer") - sid_L2 := seqtypes.SignerID("test-local-signer") - pid_L2 := seqtypes.PublisherID("test-standard-publisher") + l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) + require.True(ok, "l1 net required") + l1Net := l1NetComponent.(*L1Network) + // L1 sequencer IDs bid_L1 := seqtypes.BuilderID("test-l1-builder") cid_L1 := seqtypes.CommitterID("test-noop-committer") sid_L1 := seqtypes.SignerID("test-noop-signer") pid_L1 := seqtypes.PublisherID("test-noop-publisher") - - p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2CLID.ChainID().ToBig())) - require.NoError(err, "need p2p key for sequencer") - raw := hexutil.Bytes(crypto.FromECDSA(p2pKey)) - - l2SequencerID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l2CLID.ChainID())) l1SequencerID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l1ELID.ChainID())) - l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) - require.True(ok, "l1 net required") - l1Net := l1NetComponent.(*L1Network) - - v := &config.Ensemble{ + // Initialize ensemble config with L1 components + ensemble := &config.Ensemble{ Builders: map[seqtypes.BuilderID]*config.BuilderEntry{ - bid_L2: { - Standard: &standardbuilder.Config{ - L1ChainConfig: l1Net.genesis.Config, - L1EL: endpoint.MustRPC{ - Value: endpoint.HttpURL(l1EL.UserRPC()), - }, - L2EL: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2EL.UserRPC()), - }, - L2CL: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2CL.UserRPC()), - }, - }, - }, bid_L1: { L1: &fakepos.Config{ ChainConfig: orch.wb.outL1Genesis.Config, @@ -151,60 +146,24 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN }, }, Signers: map[seqtypes.SignerID]*config.SignerEntry{ - sid_L2: { - LocalKey: &localkey.Config{ - RawKey: &raw, - ChainID: l2CLID.ChainID(), - }, - }, sid_L1: { Noop: &noopsigner.Config{}, }, }, Committers: map[seqtypes.CommitterID]*config.CommitterEntry{ - cid_L2: { - Standard: &standardcommitter.Config{ - RPC: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2CL.UserRPC()), - }, - }, - }, cid_L1: { Noop: &noopcommitter.Config{}, }, }, Publishers: map[seqtypes.PublisherID]*config.PublisherEntry{ - pid_L2: { - Standard: &standardpublisher.Config{ - RPC: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2CL.UserRPC()), - }, - }, - }, pid_L1: { Noop: &nooppublisher.Config{}, }, }, Sequencers: map[seqtypes.SequencerID]*config.SequencerEntry{ - l2SequencerID: { - Full: &fullseq.Config{ - ChainID: l2CLID.ChainID(), - - Builder: bid_L2, - Signer: sid_L2, - Committer: cid_L2, - Publisher: pid_L2, - - SequencerConfDepth: 2, - SequencerEnabled: true, - SequencerStopped: false, - SequencerMaxSafeLag: 0, - }, - }, l1SequencerID: { Full: &fullseq.Config{ - ChainID: l1ELID.ChainID(), - + ChainID: l1ELID.ChainID(), Builder: bid_L1, Signer: sid_L1, Committer: cid_L1, @@ -214,10 +173,102 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN }, } - logger.Info("Configuring test sequencer", "l1EL", l1EL.UserRPC(), "l2EL", l2EL.UserRPC(), "l2CL", l2CL.UserRPC()) + // Track sequencer IDs for the TestSequencer struct + sequencerIDs := map[eth.ChainID]seqtypes.SequencerID{ + l1CLID.ChainID(): l1SequencerID, + } + + // Add L2 chain configurations + logFields := []any{"l1EL", l1EL.UserRPC()} + for i, l2Chain := range l2Chains { + l2EL, ok := orch.GetL2EL(l2Chain.ELID) + require.True(ok, "l2 EL node required for chain %d", i) + + l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2Chain.CLID).ComponentID) + require.True(ok, "l2 CL node required for chain %d", i) + l2CL := l2CLComponent.(L2CLNode) + + // Generate unique IDs for this L2 chain (use suffix for multi-chain, no suffix for single chain) + suffix := "" + if len(l2Chains) > 1 { + suffix = fmt.Sprintf("-%c", 'A'+i) // -A, -B, -C, etc. + } + bid := seqtypes.BuilderID(fmt.Sprintf("test-standard-builder%s", suffix)) + cid := seqtypes.CommitterID(fmt.Sprintf("test-standard-committer%s", suffix)) + sid := seqtypes.SignerID(fmt.Sprintf("test-local-signer%s", suffix)) + pid := seqtypes.PublisherID(fmt.Sprintf("test-standard-publisher%s", suffix)) + seqID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l2Chain.CLID.ChainID())) + + // Get P2P key for signing + p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2Chain.CLID.ChainID().ToBig())) + require.NoError(err, "need p2p key for sequencer %d", i) + rawKey := hexutil.Bytes(crypto.FromECDSA(p2pKey)) + + // Add builder + ensemble.Builders[bid] = &config.BuilderEntry{ + Standard: &standardbuilder.Config{ + L1ChainConfig: l1Net.genesis.Config, + L1EL: endpoint.MustRPC{ + Value: endpoint.HttpURL(l1EL.UserRPC()), + }, + L2EL: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2EL.UserRPC()), + }, + L2CL: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2CL.UserRPC()), + }, + }, + } + + // Add signer + ensemble.Signers[sid] = &config.SignerEntry{ + LocalKey: &localkey.Config{ + RawKey: &rawKey, + ChainID: l2Chain.CLID.ChainID(), + }, + } + + // Add committer + ensemble.Committers[cid] = &config.CommitterEntry{ + Standard: &standardcommitter.Config{ + RPC: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2CL.UserRPC()), + }, + }, + } + + // Add publisher + ensemble.Publishers[pid] = &config.PublisherEntry{ + Standard: &standardpublisher.Config{ + RPC: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2CL.UserRPC()), + }, + }, + } + + // Add sequencer + ensemble.Sequencers[seqID] = &config.SequencerEntry{ + Full: &fullseq.Config{ + ChainID: l2Chain.CLID.ChainID(), + Builder: bid, + Signer: sid, + Committer: cid, + Publisher: pid, + SequencerConfDepth: 2, + SequencerEnabled: true, + SequencerStopped: false, + SequencerMaxSafeLag: 0, + }, + } + + sequencerIDs[l2Chain.CLID.ChainID()] = seqID + logFields = append(logFields, fmt.Sprintf("l2EL%d", i), l2EL.UserRPC(), fmt.Sprintf("l2CL%d", i), l2CL.UserRPC()) + } + + logger.Info("Configuring test sequencer", logFields...) jobs := work.NewJobRegistry() - ensemble, err := v.Start(context.Background(), &work.StartOpts{ + startedEnsemble, err := ensemble.Start(context.Background(), &work.StartOpts{ Log: logger, Metrics: &testmetrics.NoopMetrics{}, Jobs: jobs, @@ -233,7 +284,7 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN PprofConfig: oppprof.CLIConfig{ ListenEnabled: false, }, - LogConfig: oplog.CLIConfig{ // ignored, logger overrides this + LogConfig: oplog.CLIConfig{ Level: log.LevelDebug, Format: oplog.FormatText, }, @@ -242,7 +293,7 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN ListenPort: 0, EnableAdmin: true, }, - Ensemble: ensemble, + Ensemble: startedEnsemble, JWTSecretPath: jwtPath, Version: "dev", MockRun: false, @@ -263,13 +314,10 @@ func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLN }) testSequencerNode := &TestSequencer{ - id: testSequencerID, - userRPC: sq.RPC(), - jwtSecret: jwtSecret, - sequencers: map[eth.ChainID]seqtypes.SequencerID{ - l1CLID.ChainID(): l1SequencerID, - l2CLID.ChainID(): l2SequencerID, - }, + id: testSequencerID, + userRPC: sq.RPC(), + jwtSecret: jwtSecret, + sequencers: sequencerIDs, } logger.Info("Sequencer User RPC", "http_endpoint", testSequencerNode.userRPC) orch.registry.Register(stack.ConvertTestSequencerID(testSequencerID).ComponentID, testSequencerNode) diff --git a/op-supernode/supernode/activity/interop/algo.go b/op-supernode/supernode/activity/interop/algo.go index bafe923fe742c..5d7a3b87a20a0 100644 --- a/op-supernode/supernode/activity/interop/algo.go +++ b/op-supernode/supernode/activity/interop/algo.go @@ -20,8 +20,8 @@ var ( ErrUnknownChain = errors.New("unknown chain") // ErrTimestampViolation is returned when an executing message references - // an initiating message with a timestamp >= the executing message's timestamp. - ErrTimestampViolation = errors.New("initiating message timestamp must be less than executing message timestamp") + // an initiating message with a timestamp > the executing message's timestamp. + ErrTimestampViolation = errors.New("initiating message timestamp must not be greater than executing message timestamp") // ErrMessageExpired is returned when an executing message references // an initiating message that has expired (older than ExpiryTime). @@ -35,7 +35,7 @@ var ( // 1. Open the block from the logsDB and verify it matches blocksAtTimestamp // 2. For each executing message in the block: // - Verify the initiating message exists in the source chain's logsDB -// - Verify the initiating message timestamp < executing message timestamp +// - Verify the initiating message timestamp <= executing message timestamp // - Verify the initiating message hasn't expired (within ExpiryTime) func (i *Interop) verifyInteropMessages(ts uint64, blocksAtTimestamp map[eth.ChainID]eth.BlockID) (Result, error) { result := Result{ @@ -120,7 +120,8 @@ func (i *Interop) verifyInteropMessages(ts uint64, blocksAtTimestamp map[eth.Cha // Verify each executing message blockValid := true for logIdx, execMsg := range execMsgs { - if err := i.verifyExecutingMessage(chainID, blockRef.Time, logIdx, execMsg); err != nil { + err := i.verifyExecutingMessage(chainID, blockRef.Time, logIdx, execMsg) + if err != nil { i.log.Warn("invalid executing message", "chain", chainID, "block", expectedBlock.Number, @@ -143,9 +144,9 @@ func (i *Interop) verifyInteropMessages(ts uint64, blocksAtTimestamp map[eth.Cha } // verifyExecutingMessage verifies a single executing message by checking: -// 1. The initiating message exists in the source chain's database -// 2. The initiating message's timestamp is less than the executing block's timestamp -// 3. The initiating message hasn't expired (timestamp + ExpiryTime >= executing timestamp) +// 1. The initiating message exists in the source chain's database +// 2. The initiating message's timestamp is not greater than the executing block's timestamp +// 3. The initiating message hasn't expired (timestamp + ExpiryTime >= executing timestamp) func (i *Interop) verifyExecutingMessage(executingChain eth.ChainID, executingTimestamp uint64, logIdx uint32, execMsg *types.ExecutingMessage) error { // Get the source chain's logsDB sourceDB, ok := i.logsDBs[execMsg.ChainID] @@ -153,9 +154,9 @@ func (i *Interop) verifyExecutingMessage(executingChain eth.ChainID, executingTi return fmt.Errorf("source chain %s not found: %w", execMsg.ChainID, ErrUnknownChain) } - // Verify timestamp ordering: initiating message timestamp must be < executing block timestamp - if execMsg.Timestamp >= executingTimestamp { - return fmt.Errorf("initiating timestamp %d >= executing timestamp %d: %w", + // Verify timestamp ordering: initiating message timestamp must be <= executing block timestamp. + if execMsg.Timestamp > executingTimestamp { + return fmt.Errorf("initiating timestamp %d > executing timestamp %d: %w", execMsg.Timestamp, executingTimestamp, ErrTimestampViolation) } diff --git a/op-supernode/supernode/activity/interop/algo_test.go b/op-supernode/supernode/activity/interop/algo_test.go index 402c0eeba5c0a..1c3cbdd48b3b7 100644 --- a/op-supernode/supernode/activity/interop/algo_test.go +++ b/op-supernode/supernode/activity/interop/algo_test.go @@ -208,6 +208,72 @@ func TestVerifyInteropMessages(t *testing.T) { }, }, { + name: "ValidBlocks/SameTimestampMessage", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + // Same-timestamp interop: executing message references an initiating message + // from the SAME timestamp. + sourceChainID := eth.ChainIDFromUInt64(10) + destChainID := eth.ChainIDFromUInt64(8453) + + sourceBlockHash := common.HexToHash("0xSource") + destBlockHash := common.HexToHash("0xDest") + + // Both blocks at the SAME timestamp + sharedTimestamp := uint64(1000) + + sourceBlock := eth.BlockID{Number: 50, Hash: sourceBlockHash} + destBlock := eth.BlockID{Number: 100, Hash: destBlockHash} + + execMsg := &suptypes.ExecutingMessage{ + ChainID: sourceChainID, + BlockNum: 50, + LogIdx: 0, + Timestamp: sharedTimestamp, // SAME as executing timestamp - should be VALID + Checksum: suptypes.MessageChecksum{0x01}, + } + + sourceDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: sourceBlockHash, Number: 50, Time: sharedTimestamp}, + containsSeal: suptypes.BlockSeal{Number: 50, Timestamp: sharedTimestamp}, + } + + destDB := &algoMockLogsDB{ + openBlockRef: eth.BlockRef{Hash: destBlockHash, Number: 100, Time: sharedTimestamp}, + openBlockExecMsg: map[uint32]*suptypes.ExecutingMessage{ + 0: execMsg, + }, + } + + l1Block := eth.BlockID{Number: 40, Hash: common.HexToHash("0xL1")} + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{ + sourceChainID: sourceDB, + destChainID: destDB, + }, + chains: map[eth.ChainID]cc.ChainContainer{ + sourceChainID: newMockChainWithL1(sourceChainID, l1Block), + destChainID: newMockChainWithL1(destChainID, l1Block), + }, + } + + return interop, sharedTimestamp, map[eth.ChainID]eth.BlockID{ + sourceChainID: sourceBlock, + destChainID: destBlock, + } + }, + validate: func(t *testing.T, result Result) { + // Same-timestamp messages should now be VALID + require.True(t, result.IsValid(), "same-timestamp messages should be valid") + require.Empty(t, result.InvalidHeads, "no blocks should be invalid") + }, + }, + { + // Interop verification *never* expects to be given chain data for chains that are not part of the supernode, + // so this test is not helpful except to demonstrate the specified behavior: if chain data is available + // but is not part of the chains map for some reason, it should not be used at all, as it is unrelated to the + // superchain's interop verification. name: "ValidBlocks/UnregisteredChainsSkipped", setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { registeredChain := eth.ChainIDFromUInt64(10) @@ -319,8 +385,11 @@ func TestVerifyInteropMessages(t *testing.T) { }, }, { - name: "InvalidBlocks/TimestampViolation", + name: "InvalidBlocks/FutureTimestamp", setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + // Future timestamp: initiating message timestamp > executing timestamp. + // This is INVALID (you can't execute a message that hasn't been initiated yet). + // Note: Same-timestamp (==) is ALLOWED, only strictly greater (>) is invalid. sourceChainID := eth.ChainIDFromUInt64(10) destChainID := eth.ChainIDFromUInt64(8453) @@ -331,7 +400,7 @@ func TestVerifyInteropMessages(t *testing.T) { ChainID: sourceChainID, BlockNum: 50, LogIdx: 0, - Timestamp: 1001, // Future timestamp - INVALID! + Timestamp: 1001, // FUTURE timestamp (> 1000) - INVALID! Checksum: suptypes.MessageChecksum{0x01}, } @@ -362,7 +431,7 @@ func TestVerifyInteropMessages(t *testing.T) { }, validate: func(t *testing.T, result Result) { destChainID := eth.ChainIDFromUInt64(8453) - require.False(t, result.IsValid()) + require.False(t, result.IsValid(), "future timestamp messages should be invalid") require.Contains(t, result.InvalidHeads, destChainID) }, }, diff --git a/op-supernode/supernode/activity/interop/cycle.go b/op-supernode/supernode/activity/interop/cycle.go new file mode 100644 index 0000000000000..1f60f6deb2f6a --- /dev/null +++ b/op-supernode/supernode/activity/interop/cycle.go @@ -0,0 +1,226 @@ +package interop + +import ( + "cmp" + "errors" + "slices" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +// ErrCycle is returned when a cycle is detected in same-timestamp messages. +var ErrCycle = errors.New("cycle detected in same-timestamp messages") + +// dependencyNode represents a log entry in the dependency graph. +// It tracks what this node depends on, and what depends on this node. +type dependencyNode struct { + chainID eth.ChainID + logIndex uint32 + execMsg *types.ExecutingMessage // nil if not an executing message + + resolved bool + dependsOn []*dependencyNode + dependedOnBy []*dependencyNode +} + +// dependencyGraph is a collection of dependency nodes for cycle checking. +type dependencyGraph []*dependencyNode + +// addNode adds a node to the graph. +func (g *dependencyGraph) addNode(n *dependencyNode) { + *g = append(*g, n) +} + +// addEdge adds a directed dependency: "from" depends on "to". +// This means "to" must be resolved before "from" can be resolved. +func (g *dependencyGraph) addEdge(from, to *dependencyNode) { + from.dependsOn = append(from.dependsOn, to) + to.dependedOnBy = append(to.dependedOnBy, from) +} + +// checkCycle runs Kahn's topological sort algorithm to detect cycles. +// Returns nil if the graph is acyclic (valid), ErrCycle if a cycle is detected. +// +// Algorithm: +// 1. Find nodes with no dependedOnBy (nothing depends on them) → add to removeSet, mark resolved +// 2. Remove items in removeSet from dependedOnBy of all nodes +// 3. Repeat until either: +// - All nodes resolved → acyclic (valid) +// - No progress (removeSet empty but unresolved nodes remain) → cycle detected +func checkCycle(g *dependencyGraph) error { + if len(*g) == 0 { + return nil + } + + for { + // Part 1: Find nodes with no dependedOnBy and mark them resolved + var removeSet []*dependencyNode + for _, node := range *g { + if !node.resolved && len(node.dependedOnBy) == 0 { + node.resolved = true + removeSet = append(removeSet, node) + } + } + + // If no nodes can be removed, check termination + if len(removeSet) == 0 { + // Check if all nodes are resolved + for _, node := range *g { + if !node.resolved { + // Unresolved nodes remain but no progress → cycle detected + return ErrCycle + } + } + // All nodes resolved → acyclic + return nil + } + + // Part 2: Remove items in removeSet from dependedOnBy of all nodes + for _, removed := range removeSet { + // Remove this node from dependedOnBy of nodes it depends on + for _, dependency := range removed.dependsOn { + dependency.dependedOnBy = removeFromSlice(dependency.dependedOnBy, removed) + } + } + } +} + +// removeFromSlice removes a node from a slice of nodes. +func removeFromSlice(slice []*dependencyNode, toRemove *dependencyNode) []*dependencyNode { + result := make([]*dependencyNode, 0, len(slice)) + for _, n := range slice { + if n != toRemove { + result = append(result, n) + } + } + return result +} + +// executingMessageBefore finds the latest EM in the slice with logIndex <= targetLogIdx. +// The slice must be sorted by logIndex ascending. +// Returns nil if no such EM exists. +func executingMessageBefore(chainEMs []*dependencyNode, targetLogIdx uint32) *dependencyNode { + var result *dependencyNode + for _, em := range chainEMs { + if em.logIndex <= targetLogIdx { + result = em // keep updating to get the latest one at or before target + } else { + break // since sorted, no need to continue + } + } + return result +} + +// buildCycleGraph constructs a dependency graph from executing messages at the given timestamp. +// it assumes all executing messages are included on blocks of the given timestamp +// For each EM, two types of edges are added: +// 1. Intra-chain: depends on the previous EM on the same chain (if exists) +// 2. Cross-chain: depends on executingMessageBefore(targetChain, targetLogIdx) (if exists) +func buildCycleGraph(ts uint64, chainEMs map[eth.ChainID]map[uint32]*types.ExecutingMessage) *dependencyGraph { + graph := &dependencyGraph{} + orderedExecutingMessages := make(map[eth.ChainID][]*dependencyNode) + + // First pass: create nodes for all same-timestamp EMs + for chainID, emsMap := range chainEMs { + for logIdx, em := range emsMap { + if em != nil && em.Timestamp == ts { + node := &dependencyNode{ + chainID: chainID, + logIndex: logIdx, + execMsg: em, + } + graph.addNode(node) + orderedExecutingMessages[chainID] = append(orderedExecutingMessages[chainID], node) + } + } + } + + // Sort each chain's nodes by logIndex (map iteration order is non-deterministic) + for _, nodes := range orderedExecutingMessages { + slices.SortFunc(nodes, func(a, b *dependencyNode) int { + return cmp.Compare(a.logIndex, b.logIndex) + }) + } + + // Second pass: add edges + for _, nodes := range orderedExecutingMessages { + for i, node := range nodes { + // all nodes point back to the previous node on the same chain + if i > 0 { + graph.addEdge(node, nodes[i-1]) + } + + // all nodes also point to their target + targetChainEMs := orderedExecutingMessages[node.execMsg.ChainID] + target := executingMessageBefore(targetChainEMs, node.execMsg.LogIdx) + if target != nil { + graph.addEdge(node, target) + } + } + } + + return graph +} + +// verifyCycleMessages is the cycle verification function for same-timestamp interop. +// It verifies that same-timestamp executing messages form valid dependency relationships +// using Kahn's topological sort algorithm. +// +// Returns a Result with InvalidHeads populated for chains participating in cycles. +func (i *Interop) verifyCycleMessages(ts uint64, blocksAtTimestamp map[eth.ChainID]eth.BlockID) (Result, error) { + result := Result{ + Timestamp: ts, + L2Heads: blocksAtTimestamp, + } + + // collect all EMs for the given blocks per chain + chainEMs := make(map[eth.ChainID]map[uint32]*types.ExecutingMessage) + for chainID, blockID := range blocksAtTimestamp { + db, ok := i.logsDBs[chainID] + if !ok { + // Chain not in logsDBs - skip it for cycle verification + continue + } + blockRef, _, execMsgs, err := db.OpenBlock(blockID.Number) + if err != nil { + // Can't open block - no EMs to add to the graph for this chain + // This can happen if the logsDB is empty or the block hasn't been indexed + continue + } + // Verify the block has the expected timestamp + if blockRef.Time != ts { + // Block timestamp mismatch - skip this chain for cycle verification + continue + } + chainEMs[chainID] = execMsgs + } + + // Build dependency graph and check for cycles + graph := buildCycleGraph(ts, chainEMs) + if err := checkCycle(graph); err != nil { + // Cycle detected - mark only chains with unresolved nodes as invalid + // (bystander chains that have same-ts EMs but aren't part of the cycle are spared) + cycleChains := collectCycleParticipants(graph) + if len(cycleChains) > 0 { + result.InvalidHeads = make(map[eth.ChainID]eth.BlockID) + for chainID := range cycleChains { + result.InvalidHeads[chainID] = blocksAtTimestamp[chainID] + } + } + } + + return result, nil +} + +// collectCycleParticipants returns the set of chains that have unresolved nodes +// after running checkCycle. These are the chains actually participating in a cycle. +func collectCycleParticipants(graph *dependencyGraph) map[eth.ChainID]bool { + cycleChains := make(map[eth.ChainID]bool) + for _, node := range *graph { + if !node.resolved { + cycleChains[node.chainID] = true + } + } + return cycleChains +} diff --git a/op-supernode/supernode/activity/interop/cycle_test.go b/op-supernode/supernode/activity/interop/cycle_test.go new file mode 100644 index 0000000000000..e032dfe110858 --- /dev/null +++ b/op-supernode/supernode/activity/interop/cycle_test.go @@ -0,0 +1,492 @@ +package interop + +import ( + "testing" + + "github.com/stretchr/testify/require" + + "github.com/ethereum-optimism/optimism/op-service/eth" + suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +// ============================================================================= +// Test Helpers - Common Graph Patterns +// ============================================================================= + +var ( + testChainA = eth.ChainIDFromUInt64(10) + testChainB = eth.ChainIDFromUInt64(8453) + testChainC = eth.ChainIDFromUInt64(420) + testChainD = eth.ChainIDFromUInt64(999) + testTS = uint64(1000) +) + +// mutualCycle creates A↔B cycle at log index 0 +func mutualCycle(a, b eth.ChainID) map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage { + return map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage{ + a: {0: {ChainID: b, LogIdx: 0, Timestamp: testTS}}, + b: {0: {ChainID: a, LogIdx: 0, Timestamp: testTS}}, + } +} + +// triangleCycle creates A→B→C→A cycle at log index 0 +func triangleCycle(a, b, c eth.ChainID) map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage { + return map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage{ + a: {0: {ChainID: b, LogIdx: 0, Timestamp: testTS}}, + b: {0: {ChainID: c, LogIdx: 0, Timestamp: testTS}}, + c: {0: {ChainID: a, LogIdx: 0, Timestamp: testTS}}, + } +} + +// oneWayRef creates a one-way reference from chain 'from' to chain 'to' +func oneWayRef(from, to eth.ChainID, fromLogIdx, toLogIdx uint32) map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage { + return map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage{ + from: {fromLogIdx: {ChainID: to, LogIdx: toLogIdx, Timestamp: testTS}}, + } +} + +// mergeEMs merges multiple EM maps into one +func mergeEMs(maps ...map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage) map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage { + result := make(map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage) + for _, m := range maps { + for chainID, ems := range m { + if result[chainID] == nil { + result[chainID] = make(map[uint32]*suptypes.ExecutingMessage) + } + for logIdx, em := range ems { + result[chainID][logIdx] = em + } + } + } + return result +} + +// ============================================================================= +// Graph Construction Tests +// ============================================================================= + +func TestDependencyGraph_AddNode(t *testing.T) { + t.Parallel() + + g := &dependencyGraph{} + node := &dependencyNode{ + chainID: eth.ChainIDFromUInt64(10), + logIndex: 0, + } + + g.addNode(node) + + require.Len(t, *g, 1) + require.Equal(t, node, (*g)[0]) +} + +func TestDependencyGraph_AddEdge(t *testing.T) { + t.Parallel() + + g := &dependencyGraph{} + nodeA := &dependencyNode{chainID: eth.ChainIDFromUInt64(10), logIndex: 0} + nodeB := &dependencyNode{chainID: eth.ChainIDFromUInt64(8453), logIndex: 0} + + g.addNode(nodeA) + g.addNode(nodeB) + + // A depends on B (B must resolve before A) + g.addEdge(nodeA, nodeB) + + require.Len(t, nodeA.dependsOn, 1) + require.Equal(t, nodeB, nodeA.dependsOn[0]) + require.Len(t, nodeB.dependedOnBy, 1) + require.Equal(t, nodeA, nodeB.dependedOnBy[0]) +} + +// ============================================================================= +// executingMessageBefore Tests +// ============================================================================= + +func TestExecutingMessageBefore(t *testing.T) { + t.Parallel() + + chainA := eth.ChainIDFromUInt64(10) + + tests := []struct { + name string + chainEMs []*dependencyNode // EMs on the chain, sorted by logIndex + targetLogIdx uint32 + expectNode bool + expectLogIndex uint32 // only checked if expectNode is true + }{ + { + name: "empty chain returns nil", + chainEMs: nil, + targetLogIdx: 5, + expectNode: false, + }, + { + name: "no EM at or before target returns nil", + chainEMs: []*dependencyNode{ + {chainID: chainA, logIndex: 5}, + {chainID: chainA, logIndex: 10}, + }, + targetLogIdx: 3, // all EMs are > 3 + expectNode: false, + }, + { + name: "exact match returns that EM", + chainEMs: []*dependencyNode{ + {chainID: chainA, logIndex: 2}, + {chainID: chainA, logIndex: 5}, + }, + targetLogIdx: 5, // EM at exactly index 5 + expectLogIndex: 5, + expectNode: true, + }, + { + name: "returns latest EM at or before target", + chainEMs: []*dependencyNode{ + {chainID: chainA, logIndex: 1}, + {chainID: chainA, logIndex: 3}, + {chainID: chainA, logIndex: 7}, + }, + targetLogIdx: 5, // EMs at 1 and 3 are <= 5, should return 3 + expectLogIndex: 3, + expectNode: true, + }, + { + name: "target at index 0 with EM at 0 returns that EM", + chainEMs: []*dependencyNode{ + {chainID: chainA, logIndex: 0}, + {chainID: chainA, logIndex: 5}, + }, + targetLogIdx: 0, // EM at exactly 0 + expectLogIndex: 0, + expectNode: true, + }, + { + name: "target at index 0 with no EM at 0 returns nil", + chainEMs: []*dependencyNode{ + {chainID: chainA, logIndex: 1}, + {chainID: chainA, logIndex: 5}, + }, + targetLogIdx: 0, // no EM at or before 0 + expectNode: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + result := executingMessageBefore(tc.chainEMs, tc.targetLogIdx) + if tc.expectNode { + require.NotNil(t, result, "expected to find an EM at or before target") + require.Equal(t, tc.expectLogIndex, result.logIndex) + } else { + require.Nil(t, result, "expected no EM at or before target") + } + }) + } +} + +// ============================================================================= +// Kahn's Algorithm Tests +// ============================================================================= + +func TestCheckCycle(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + buildGraph func() *dependencyGraph + expectCycle bool + }{ + { + name: "empty graph has no cycle", + buildGraph: func() *dependencyGraph { + return &dependencyGraph{} + }, + expectCycle: false, + }, + { + name: "single node no deps resolves", + buildGraph: func() *dependencyGraph { + g := &dependencyGraph{} + g.addNode(&dependencyNode{chainID: eth.ChainIDFromUInt64(10), logIndex: 0}) + return g + }, + expectCycle: false, + }, + { + name: "linear chain A->B->C resolves (acyclic)", + buildGraph: func() *dependencyGraph { + g := &dependencyGraph{} + a := &dependencyNode{chainID: eth.ChainIDFromUInt64(10), logIndex: 0} + b := &dependencyNode{chainID: eth.ChainIDFromUInt64(10), logIndex: 1} + c := &dependencyNode{chainID: eth.ChainIDFromUInt64(10), logIndex: 2} + g.addNode(a) + g.addNode(b) + g.addNode(c) + // c depends on b, b depends on a + g.addEdge(c, b) + g.addEdge(b, a) + return g + }, + expectCycle: false, + }, + { + name: "simple cycle A<->B detected", + buildGraph: func() *dependencyGraph { + g := &dependencyGraph{} + a := &dependencyNode{chainID: eth.ChainIDFromUInt64(10), logIndex: 0} + b := &dependencyNode{chainID: eth.ChainIDFromUInt64(8453), logIndex: 0} + g.addNode(a) + g.addNode(b) + // A depends on B, B depends on A (cycle!) + g.addEdge(a, b) + g.addEdge(b, a) + return g + }, + expectCycle: true, + }, + { + name: "triangle cycle A->B->C->A detected", + buildGraph: func() *dependencyGraph { + g := &dependencyGraph{} + a := &dependencyNode{chainID: eth.ChainIDFromUInt64(10), logIndex: 0} + b := &dependencyNode{chainID: eth.ChainIDFromUInt64(8453), logIndex: 0} + c := &dependencyNode{chainID: eth.ChainIDFromUInt64(420), logIndex: 0} + g.addNode(a) + g.addNode(b) + g.addNode(c) + // A depends on C, C depends on B, B depends on A (cycle!) + g.addEdge(a, c) + g.addEdge(c, b) + g.addEdge(b, a) + return g + }, + expectCycle: true, + }, + { + name: "diamond pattern A->B,C B,C->D resolves (acyclic)", + buildGraph: func() *dependencyGraph { + g := &dependencyGraph{} + a := &dependencyNode{chainID: eth.ChainIDFromUInt64(10), logIndex: 0} + b := &dependencyNode{chainID: eth.ChainIDFromUInt64(8453), logIndex: 0} + c := &dependencyNode{chainID: eth.ChainIDFromUInt64(420), logIndex: 0} + d := &dependencyNode{chainID: eth.ChainIDFromUInt64(999), logIndex: 0} + g.addNode(a) + g.addNode(b) + g.addNode(c) + g.addNode(d) + // D depends on B and C, B and C depend on A + g.addEdge(d, b) + g.addEdge(d, c) + g.addEdge(b, a) + g.addEdge(c, a) + return g + }, + expectCycle: false, + }, + { + name: "intra-chain sequential logs resolve", + buildGraph: func() *dependencyGraph { + // Simulates a single chain with 3 logs where each depends on previous + g := &dependencyGraph{} + chain10 := eth.ChainIDFromUInt64(10) + l0 := &dependencyNode{chainID: chain10, logIndex: 0} + l1 := &dependencyNode{chainID: chain10, logIndex: 1} + l2 := &dependencyNode{chainID: chain10, logIndex: 2} + g.addNode(l0) + g.addNode(l1) + g.addNode(l2) + // l1 depends on l0, l2 depends on l1 + g.addEdge(l1, l0) + g.addEdge(l2, l1) + return g + }, + expectCycle: false, + }, + { + name: "cross-chain valid exec message resolves", + buildGraph: func() *dependencyGraph { + // Chain A: [L0, L1(exec B:L0)] + // Chain B: [L0(init)] + g := &dependencyGraph{} + chainA := eth.ChainIDFromUInt64(10) + chainB := eth.ChainIDFromUInt64(8453) + + aL0 := &dependencyNode{chainID: chainA, logIndex: 0} + aL1 := &dependencyNode{chainID: chainA, logIndex: 1, execMsg: &suptypes.ExecutingMessage{ + ChainID: chainB, LogIdx: 0, + }} + bL0 := &dependencyNode{chainID: chainB, logIndex: 0} + + g.addNode(aL0) + g.addNode(aL1) + g.addNode(bL0) + + // aL1 depends on aL0 (sequential) and bL0 (exec->init) + g.addEdge(aL1, aL0) + g.addEdge(aL1, bL0) + return g + }, + expectCycle: false, + }, + { + name: "cross-chain mutual exec creates cycle", + buildGraph: func() *dependencyGraph { + // Chain A: [L0(exec B:L0)] + // Chain B: [L0(exec A:L0)] + g := &dependencyGraph{} + chainA := eth.ChainIDFromUInt64(10) + chainB := eth.ChainIDFromUInt64(8453) + + aL0 := &dependencyNode{chainID: chainA, logIndex: 0, execMsg: &suptypes.ExecutingMessage{ + ChainID: chainB, LogIdx: 0, + }} + bL0 := &dependencyNode{chainID: chainB, logIndex: 0, execMsg: &suptypes.ExecutingMessage{ + ChainID: chainA, LogIdx: 0, + }} + + g.addNode(aL0) + g.addNode(bL0) + + // aL0 depends on bL0, bL0 depends on aL0 (cycle!) + g.addEdge(aL0, bL0) + g.addEdge(bL0, aL0) + return g + }, + expectCycle: true, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + g := tc.buildGraph() + err := checkCycle(g) + if tc.expectCycle { + require.Error(t, err, "expected cycle to be detected") + } else { + require.NoError(t, err, "expected no cycle") + } + }) + } +} + +// ============================================================================= +// buildCycleGraph Tests +// ============================================================================= + +func TestBuildCycleGraph(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + chainEMs map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage + expectCycle bool + expectInCycle []eth.ChainID // chains that should be in the cycle (only checked if expectCycle) + expectNotInCycle []eth.ChainID // chains that should NOT be in cycle (bystanders) + }{ + { + name: "empty graph - no cycle", + chainEMs: map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage{}, + expectCycle: false, + }, + { + name: "past timestamp filtered out", + chainEMs: map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage{ + testChainA: {0: {ChainID: testChainB, LogIdx: 0, Timestamp: testTS - 100}}, + }, + expectCycle: false, + }, + { + name: "one-way ref to chain with no EMs - no cycle", + chainEMs: oneWayRef(testChainA, testChainB, 0, 0), + expectCycle: false, + }, + { + name: "mutual cycle A↔B", + chainEMs: mutualCycle(testChainA, testChainB), + expectCycle: true, + expectInCycle: []eth.ChainID{testChainA, testChainB}, + }, + { + name: "triangle cycle A→B→C→A", + chainEMs: triangleCycle(testChainA, testChainB, testChainC), + expectCycle: true, + expectInCycle: []eth.ChainID{testChainA, testChainB, testChainC}, + }, + { + name: "A↔C cycle with B as bystander", + chainEMs: mergeEMs( + mutualCycle(testChainA, testChainC), + oneWayRef(testChainB, testChainD, 0, 0), // B refs non-existent D + ), + expectCycle: true, + expectInCycle: []eth.ChainID{testChainA, testChainC}, + expectNotInCycle: []eth.ChainID{testChainB}, + }, + { + name: "one-way dependency - no cycle", + chainEMs: map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage{ + testChainA: {0: {ChainID: testChainB, LogIdx: 5, Timestamp: testTS}}, + testChainB: {3: {ChainID: testChainC, LogIdx: 0, Timestamp: testTS}}, + }, + expectCycle: false, + }, + { + name: "ref before target EM - no dependency, no cycle", + chainEMs: map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage{ + testChainA: {0: {ChainID: testChainB, LogIdx: 2, Timestamp: testTS}}, // refs B:2 + testChainB: {3: {ChainID: testChainA, LogIdx: 0, Timestamp: testTS}}, // B:3 > 2, no match + }, + expectCycle: false, + }, + { + name: "intra-chain sequential EMs - no cycle", + chainEMs: map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage{ + testChainA: { + 0: {ChainID: testChainB, LogIdx: 0, Timestamp: testTS}, + 5: {ChainID: testChainB, LogIdx: 3, Timestamp: testTS}, + }, + }, + expectCycle: false, + }, + { + name: "triangle with missing leg - no cycle", + chainEMs: map[eth.ChainID]map[uint32]*suptypes.ExecutingMessage{ + testChainA: {5: {ChainID: testChainB, LogIdx: 3, Timestamp: testTS}}, + testChainB: {5: {ChainID: testChainC, LogIdx: 3, Timestamp: testTS}}, + testChainC: {5: {ChainID: testChainA, LogIdx: 3, Timestamp: testTS}}, // A:5 > 3 + }, + expectCycle: false, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + graph := buildCycleGraph(testTS, tc.chainEMs) + err := checkCycle(graph) + + if tc.expectCycle { + require.Error(t, err, "expected cycle") + + // Verify cycle participants + cycleChains := make(map[eth.ChainID]bool) + for _, node := range *graph { + if !node.resolved { + cycleChains[node.chainID] = true + } + } + for _, c := range tc.expectInCycle { + require.True(t, cycleChains[c], "chain %v should be in cycle", c) + } + for _, c := range tc.expectNotInCycle { + require.False(t, cycleChains[c], "chain %v should NOT be in cycle", c) + } + } else { + require.NoError(t, err, "expected no cycle") + } + }) + } +} diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index 25b707059cdc4..604a53fdaaef9 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -55,6 +55,11 @@ type Interop struct { verifyFn func(ts uint64, blocksAtTimestamp map[eth.ChainID]eth.BlockID) (Result, error) + // cycleVerifyFn handles same-timestamp cycle verification. + // It is called after verifyFn in progressInterop, and its results are merged. + // Set to verifyCycleMessages by default in New(). + cycleVerifyFn func(ts uint64, blocksAtTimestamp map[eth.ChainID]eth.BlockID) (Result, error) + // pauseAtTimestamp is used for integration test control only. // When non-zero, progressInterop will return early without processing // if the next timestamp to process is >= this value. @@ -105,6 +110,7 @@ func New( // default to using the verifyInteropMessages function // (can be overridden by tests) i.verifyFn = i.verifyInteropMessages + i.cycleVerifyFn = i.verifyCycleMessages return i } @@ -297,9 +303,28 @@ func (i *Interop) progressInterop() (Result, error) { return Result{}, err } - // 3: validate interop messages - // and return the result and any errors - return i.verifyFn(ts, blocksAtTimestamp) + // 3: validate interop messages using verifyFn + result, err := i.verifyFn(ts, blocksAtTimestamp) + if err != nil { + return Result{}, err + } + + // 4: run cycle verification and merge results + cycleResult, err := i.cycleVerifyFn(ts, blocksAtTimestamp) + if err != nil { + return Result{}, fmt.Errorf("cycle verification failed: %w", err) + } + // Merge invalid heads from cycle verification into result + if len(cycleResult.InvalidHeads) > 0 { + if result.InvalidHeads == nil { + result.InvalidHeads = make(map[eth.ChainID]eth.BlockID) + } + for chainID, invalidBlock := range cycleResult.InvalidHeads { + result.InvalidHeads[chainID] = invalidBlock + } + } + + return result, nil } func (i *Interop) handleResult(result Result) error { diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index c95bbed173c04..85106b0159cd4 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -137,6 +137,7 @@ func TestNew(t *testing.T) { require.Len(t, interop.chains, 2) require.Len(t, interop.logsDBs, 2) require.NotNil(t, interop.verifyFn) + require.NotNil(t, interop.cycleVerifyFn) for chainID := range h.Chains() { require.Contains(t, interop.logsDBs, chainID) @@ -533,6 +534,156 @@ func TestProgressInterop(t *testing.T) { } } +// ============================================================================= +// TestProgressInteropWithCycleVerify +// ============================================================================= + +func TestProgressInteropWithCycleVerify(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + setup func(h *interopTestHarness) *interopTestHarness + run func(t *testing.T, h *interopTestHarness) + }{ + { + name: "default cycleVerifyFn returns valid result", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + // Set verifyFn to return a valid result + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{Timestamp: ts, L2Heads: blocks}, nil + } + // cycleVerifyFn is overridden with this stub implementation. + + result, err := h.interop.progressInterop() + require.NoError(t, err) + require.False(t, result.IsEmpty()) + require.True(t, result.IsValid()) + }, + }, + { + name: "cycleVerifyFn called after verifyFn and results merged", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + }).WithChain(8453, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 200, Hash: common.HexToHash("0x2")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + verifyFnCalled := false + cycleVerifyFnCalled := false + chain10 := eth.ChainIDFromUInt64(10) + chain8453 := eth.ChainIDFromUInt64(8453) + + // verifyFn returns valid result + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + verifyFnCalled = true + return Result{Timestamp: ts, L2Heads: blocks}, nil + } + + // cycleVerifyFn marks chain 8453 as invalid + h.interop.cycleVerifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + require.True(t, verifyFnCalled, "verifyFn should be called before cycleVerifyFn") + cycleVerifyFnCalled = true + return Result{ + Timestamp: ts, + L2Heads: blocks, + InvalidHeads: map[eth.ChainID]eth.BlockID{ + chain8453: blocks[chain8453], + }, + }, nil + } + + result, err := h.interop.progressInterop() + require.NoError(t, err) + require.True(t, verifyFnCalled, "verifyFn should be called") + require.True(t, cycleVerifyFnCalled, "cycleVerifyFn should be called") + require.False(t, result.IsValid(), "result should be invalid due to cycleVerifyFn") + require.Contains(t, result.InvalidHeads, chain8453) + require.NotContains(t, result.InvalidHeads, chain10) + }, + }, + { + name: "cycleVerifyFn error propagated", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{Timestamp: ts, L2Heads: blocks}, nil + } + h.interop.cycleVerifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{}, errors.New("cycle verification failed") + } + + result, err := h.interop.progressInterop() + require.Error(t, err) + require.Contains(t, err.Error(), "cycle verification") + require.True(t, result.IsEmpty()) + }, + }, + { + name: "both verifyFn and cycleVerifyFn invalid heads are merged", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 100, Hash: common.HexToHash("0x1")} + }).WithChain(8453, func(m *mockChainContainer) { + m.blockAtTimestamp = eth.L2BlockRef{Number: 200, Hash: common.HexToHash("0x2")} + }).Build() + }, + run: func(t *testing.T, h *interopTestHarness) { + chain10 := eth.ChainIDFromUInt64(10) + chain8453 := eth.ChainIDFromUInt64(8453) + + // verifyFn marks chain 10 as invalid + h.interop.verifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{ + Timestamp: ts, + L2Heads: blocks, + InvalidHeads: map[eth.ChainID]eth.BlockID{ + chain10: blocks[chain10], + }, + }, nil + } + + // cycleVerifyFn marks chain 8453 as invalid + h.interop.cycleVerifyFn = func(ts uint64, blocks map[eth.ChainID]eth.BlockID) (Result, error) { + return Result{ + Timestamp: ts, + L2Heads: blocks, + InvalidHeads: map[eth.ChainID]eth.BlockID{ + chain8453: blocks[chain8453], + }, + }, nil + } + + result, err := h.interop.progressInterop() + require.NoError(t, err) + require.False(t, result.IsValid()) + // Both chains should be in InvalidHeads + require.Contains(t, result.InvalidHeads, chain10, "chain10 from verifyFn should be invalid") + require.Contains(t, result.InvalidHeads, chain8453, "chain8453 from cycleVerifyFn should be invalid") + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + h := newInteropTestHarness(t) + tc.setup(h) + tc.run(t, h) + }) + } +} + // ============================================================================= // TestVerifiedAtTimestamp // ============================================================================= From ef39b824b25a50a2aa3c6508b04b9d30dadcadfe Mon Sep 17 00:00:00 2001 From: Axel Kingsley Date: Mon, 2 Mar 2026 04:48:42 -0600 Subject: [PATCH 030/201] fix supernode: Initial Block Seal (#19308) * Initial Block Seal * test(supernode): add integration test for first block with logs Add integration test that uses real logs.DB to verify first block with logs works correctly. This test would have caught the bug where isFirstBlock with logs failed due to empty parentBlock. Changes: - Update mockLogsDB to track all SealBlock calls (not just last) - Update existing tests to verify new two-SealBlock behavior - Add unit test for first block with logs using mock - Add integration test using real logs.DB The integration test validates: - First block at non-zero number with logs succeeds - Data is correctly persisted (latestBlock, logCount, parentHash) * lint --- .../supernode/activity/interop/logdb.go | 21 ++- .../supernode/activity/interop/logdb_test.go | 127 ++++++++++++++---- 2 files changed, 119 insertions(+), 29 deletions(-) diff --git a/op-supernode/supernode/activity/interop/logdb.go b/op-supernode/supernode/activity/interop/logdb.go index 097e4e0c9ec01..d7e6ed5f0814b 100644 --- a/op-supernode/supernode/activity/interop/logdb.go +++ b/op-supernode/supernode/activity/interop/logdb.go @@ -199,18 +199,28 @@ func (i *Interop) verifyCanAddTimestamp(chainID eth.ChainID, db LogsDB, ts uint6 // processBlockLogs processes the receipts for a block and stores the logs in the database. // If isFirstBlock is true, this is the first block being added to the logsDB (at activation timestamp), -// and we treat it as genesis by using an empty parent block. This allows the logsDB to start at any -// block number, not just genesis. +// and we first seal a "virtual parent" block so that logs have a sealed block to reference. +// This allows the logsDB to start at any block number, not just genesis. func (i *Interop) processBlockLogs(db LogsDB, blockInfo eth.BlockInfo, receipts gethTypes.Receipts, isFirstBlock bool) error { blockNum := blockInfo.NumberU64() blockID := eth.BlockID{Hash: blockInfo.Hash(), Number: blockNum} parentHash := blockInfo.ParentHash() - // For the first block in the logsDB (activation block), use empty parent to treat it as genesis. - // This allows OpenBlock to work correctly even when we start at a non-genesis block. parentBlock := eth.BlockID{Hash: parentHash, Number: blockNum - 1} sealParentHash := parentHash - if blockNum == 0 || isFirstBlock { + + // For the first block in the logsDB (activation block), we need to first seal + // a virtual parent block so that logs have a sealed block to reference. + // When the DB is empty, SealBlock allows any block to be added without parent validation. + if isFirstBlock && blockNum > 0 { + // Seal the parent as a "virtual genesis" - this works because DB is empty + if err := db.SealBlock(common.Hash{}, parentBlock, blockInfo.Time()); err != nil { + return fmt.Errorf("failed to seal virtual parent for first block: %w", err) + } + // parentBlock stays as-is (references the now-sealed parent) + // sealParentHash stays as parentHash + } else if blockNum == 0 { + // Actual genesis block - no parent, no logs allowed parentBlock = eth.BlockID{} sealParentHash = common.Hash{} } @@ -230,7 +240,6 @@ func (i *Interop) processBlockLogs(db LogsDB, blockInfo eth.BlockInfo, receipts } } - // Seal the block - use empty parent hash for first block if err := db.SealBlock(sealParentHash, blockID, blockInfo.Time()); err != nil { return fmt.Errorf("failed to seal block: %w", err) } diff --git a/op-supernode/supernode/activity/interop/logdb_test.go b/op-supernode/supernode/activity/interop/logdb_test.go index 168db93c61561..d9170632ee2ba 100644 --- a/op-supernode/supernode/activity/interop/logdb_test.go +++ b/op-supernode/supernode/activity/interop/logdb_test.go @@ -284,10 +284,10 @@ func TestProcessBlockLogs(t *testing.T) { err := interop.processBlockLogs(db, blockInfo, types.Receipts{}, false) require.NoError(t, err) - require.NotNil(t, db.sealBlockCall) - require.Equal(t, common.Hash{0x01}, db.sealBlockCall.parentHash) - require.Equal(t, uint64(100), db.sealBlockCall.block.Number) - require.Equal(t, uint64(1000), db.sealBlockCall.timestamp) + require.Len(t, db.sealBlockCalls, 1) + require.Equal(t, common.Hash{0x01}, db.sealBlockCalls[0].parentHash) + require.Equal(t, uint64(100), db.sealBlockCalls[0].block.Number) + require.Equal(t, uint64(1000), db.sealBlockCalls[0].timestamp) require.Equal(t, 0, db.addLogCalls) }) @@ -321,7 +321,7 @@ func TestProcessBlockLogs(t *testing.T) { require.NoError(t, err) require.Equal(t, 3, db.addLogCalls) - require.NotNil(t, db.sealBlockCall) + require.Len(t, db.sealBlockCalls, 1) }) t.Run("genesis block handled correctly", func(t *testing.T) { @@ -339,11 +339,11 @@ func TestProcessBlockLogs(t *testing.T) { err := interop.processBlockLogs(db, blockInfo, types.Receipts{}, true) require.NoError(t, err) - require.NotNil(t, db.sealBlockCall) - require.Equal(t, uint64(0), db.sealBlockCall.block.Number) + require.Len(t, db.sealBlockCalls, 1) + require.Equal(t, uint64(0), db.sealBlockCalls[0].block.Number) }) - t.Run("first block at non-zero number uses empty parent", func(t *testing.T) { + t.Run("first block at non-zero number seals virtual parent first", func(t *testing.T) { t.Parallel() interop := &Interop{log: gethlog.New()} @@ -355,14 +355,95 @@ func TestProcessBlockLogs(t *testing.T) { timestamp: 1000, } - // isFirstBlock=true should use empty parent for both AddLog and SealBlock - // This allows the logsDB to treat this block as its genesis + // isFirstBlock=true should first seal a "virtual parent" block, + // then seal the actual block. This allows logs to reference a sealed parent. err := interop.processBlockLogs(db, blockInfo, types.Receipts{}, true) require.NoError(t, err) - require.NotNil(t, db.sealBlockCall) - // Both AddLog and SealBlock should use empty parent for first block - require.Equal(t, common.Hash{}, db.sealBlockCall.parentHash) + require.Len(t, db.sealBlockCalls, 2) + + // First call: seal the virtual parent (block 9) with empty parent hash + require.Equal(t, common.Hash{}, db.sealBlockCalls[0].parentHash) + require.Equal(t, uint64(9), db.sealBlockCalls[0].block.Number) + require.Equal(t, common.Hash{0x01}, db.sealBlockCalls[0].block.Hash) + + // Second call: seal the actual block (block 10) with real parent hash + require.Equal(t, common.Hash{0x01}, db.sealBlockCalls[1].parentHash) + require.Equal(t, uint64(10), db.sealBlockCalls[1].block.Number) + require.Equal(t, common.Hash{0x02}, db.sealBlockCalls[1].block.Hash) + }) + + t.Run("first block with logs succeeds", func(t *testing.T) { + t.Parallel() + + interop := &Interop{log: gethlog.New()} + db := &mockLogsDB{} + blockInfo := &testBlockInfo{ + hash: common.Hash{0x02}, + parentHash: common.Hash{0x01}, + number: 100, + timestamp: 1000, + } + + receipts := types.Receipts{ + &types.Receipt{ + Logs: []*types.Log{ + {Address: common.Address{0xAA}, Data: []byte{0x01}}, + }, + }, + } + + // This is the key test: first block with logs should work because + // we seal the virtual parent first, allowing AddLog to reference it + err := interop.processBlockLogs(db, blockInfo, receipts, true) + + require.NoError(t, err) + require.Len(t, db.sealBlockCalls, 2) // virtual parent + actual block + require.Equal(t, 1, db.addLogCalls) + }) + + t.Run("integration: first block with logs against real DB", func(t *testing.T) { + t.Parallel() + + dataDir := t.TempDir() + chainID := eth.ChainIDFromUInt64(10) + + db, err := openLogsDB(gethlog.New(), chainID, dataDir) + require.NoError(t, err) + defer db.Close() + + interop := &Interop{log: gethlog.New()} + blockInfo := &testBlockInfo{ + hash: common.Hash{0x02}, + parentHash: common.Hash{0x01}, + number: 100, + timestamp: 1000, + } + receipts := types.Receipts{ + &types.Receipt{ + Logs: []*types.Log{ + {Address: common.Address{0xAA}, Data: []byte{0x01}}, + {Address: common.Address{0xBB}, Data: []byte{0x02}}, + }, + }, + } + + // This is the key integration test: first block with logs must work + // against the real logs.DB, not just the mock. + err = interop.processBlockLogs(db, blockInfo, receipts, true) + require.NoError(t, err) + + // Verify data is correctly in the DB + latestBlock, ok := db.LatestSealedBlock() + require.True(t, ok) + require.Equal(t, uint64(100), latestBlock.Number) + require.Equal(t, common.Hash{0x02}, latestBlock.Hash) + + // Verify we can open the block and see the logs + ref, logCount, _, err := db.OpenBlock(100) + require.NoError(t, err) + require.Equal(t, uint32(2), logCount) + require.Equal(t, common.Hash{0x01}, ref.ParentHash) }) t.Run("AddLog error propagated", func(t *testing.T) { @@ -476,14 +557,14 @@ func TestLoadLogs_ParentHashMismatch(t *testing.T) { // ============================================================================= type mockLogsDB struct { - latestBlock eth.BlockID - hasBlocks bool - seal suptypes.BlockSeal - findSealErr error - addLogErr error - sealBlockErr error - addLogCalls int - sealBlockCall *sealBlockCall + latestBlock eth.BlockID + hasBlocks bool + seal suptypes.BlockSeal + findSealErr error + addLogErr error + sealBlockErr error + addLogCalls int + sealBlockCalls []*sealBlockCall // Track all SealBlock calls firstSealedBlock suptypes.BlockSeal firstSealedBlockErr error @@ -541,11 +622,11 @@ func (m *mockLogsDB) AddLog(logHash common.Hash, parentBlock eth.BlockID, logIdx } func (m *mockLogsDB) SealBlock(parentHash common.Hash, block eth.BlockID, timestamp uint64) error { - m.sealBlockCall = &sealBlockCall{ + m.sealBlockCalls = append(m.sealBlockCalls, &sealBlockCall{ parentHash: parentHash, block: block, timestamp: timestamp, - } + }) return m.sealBlockErr } From be00aaa2cc3c8bea80e916d17c30a2b5d8d87696 Mon Sep 17 00:00:00 2001 From: George Knee Date: Mon, 2 Mar 2026 14:41:23 +0000 Subject: [PATCH 031/201] fix(supernode): l1 inclusion is the max l1 inclusion over all l2 blocks (#19343) * fix(supernode): l1 inclusion is the max l1 inclusion over all l2 blocks * fix: allow l1 inclusion block of 0 * refactor tests * change comparison * update test --- .../supernode/activity/interop/algo.go | 57 +-- .../supernode/activity/interop/algo_test.go | 361 ++++++++---------- 2 files changed, 192 insertions(+), 226 deletions(-) diff --git a/op-supernode/supernode/activity/interop/algo.go b/op-supernode/supernode/activity/interop/algo.go index 5d7a3b87a20a0..5b9a7efd33414 100644 --- a/op-supernode/supernode/activity/interop/algo.go +++ b/op-supernode/supernode/activity/interop/algo.go @@ -3,7 +3,6 @@ package interop import ( "errors" "fmt" - "math" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" @@ -28,6 +27,29 @@ var ( ErrMessageExpired = errors.New("initiating message has expired") ) +type blockPerChain = map[eth.ChainID]eth.BlockID + +// l1Inclusion returns the earliest L1 block such that all L2 blocks at the supplied timestamp were derived +// from a source at or before that L1 block. +func (i *Interop) l1Inclusion(ts uint64, blocksAtTimestamp blockPerChain) (eth.BlockID, error) { + l1Inclusion := eth.BlockID{} + for chainID := range blocksAtTimestamp { + chain, ok := i.chains[chainID] + if !ok { + continue + } + _, l1Block, err := chain.OptimisticAt(i.ctx, ts) + if err != nil { + i.log.Error("failed to get L1 inclusion for L2 block", "chainID", chainID, "timestamp", ts, "err", err) + return eth.BlockID{}, fmt.Errorf("chain %s: failed to get L1 inclusion: %w", chainID, err) + } + if l1Block.Number >= l1Inclusion.Number { + l1Inclusion = l1Block + } + } + return l1Inclusion, nil +} + // verifyInteropMessages validates all executing messages at the given timestamp. // Returns a Result indicating whether all messages are valid or which chains have invalid blocks. // @@ -37,37 +59,18 @@ var ( // - Verify the initiating message exists in the source chain's logsDB // - Verify the initiating message timestamp <= executing message timestamp // - Verify the initiating message hasn't expired (within ExpiryTime) -func (i *Interop) verifyInteropMessages(ts uint64, blocksAtTimestamp map[eth.ChainID]eth.BlockID) (Result, error) { +func (i *Interop) verifyInteropMessages(ts uint64, blocksAtTimestamp blockPerChain) (Result, error) { result := Result{ Timestamp: ts, - L2Heads: make(map[eth.ChainID]eth.BlockID), - InvalidHeads: make(map[eth.ChainID]eth.BlockID), + L2Heads: make(blockPerChain), + InvalidHeads: make(blockPerChain), } - // Compute L1Inclusion: the earliest L1 block such that all L2 blocks at the - // supplied timestamp were derived - // from a source at or before that L1 block. - earliestL1Inclusion := eth.BlockID{ - Number: math.MaxUint64, - } - for chainID := range blocksAtTimestamp { - chain, ok := i.chains[chainID] - if !ok { - continue - } - _, l1Block, err := chain.OptimisticAt(i.ctx, ts) - if err != nil { - i.log.Error("failed to get L1 inclusion for L2 block", "chainID", chainID, "timestamp", ts, "err", err) - return Result{}, fmt.Errorf("chain %s: failed to get L1 inclusion: %w", chainID, err) - } - if l1Block.Number < earliestL1Inclusion.Number { - earliestL1Inclusion = l1Block - } - } - if earliestL1Inclusion.Number == math.MaxUint64 { - return Result{}, fmt.Errorf("no L1 inclusion found for timestamp %d", ts) + if l1Inclusion, err := i.l1Inclusion(ts, blocksAtTimestamp); err != nil { + return Result{}, err + } else { + result.L1Inclusion = l1Inclusion } - result.L1Inclusion = earliestL1Inclusion for chainID, expectedBlock := range blocksAtTimestamp { db, ok := i.logsDBs[chainID] diff --git a/op-supernode/supernode/activity/interop/algo_test.go b/op-supernode/supernode/activity/interop/algo_test.go index 1c3cbdd48b3b7..7e921e74f3dea 100644 --- a/op-supernode/supernode/activity/interop/algo_test.go +++ b/op-supernode/supernode/activity/interop/algo_test.go @@ -59,6 +59,168 @@ func runVerifyInteropTest(t *testing.T, tc verifyInteropTestCase) { } } +func TestL1Inclusion(t *testing.T) { + t.Parallel() + + type l1InclusionTestCase struct { + name string + setup func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) + expectError bool + errorMsg string + validate func(t *testing.T, l1 eth.BlockID) + } + + tests := []l1InclusionTestCase{ + { + name: "SingleChain", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chainID := eth.ChainIDFromUInt64(10) + expectedBlock := eth.BlockID{Number: 100, Hash: common.HexToHash("0x123")} + l1Block := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1")} + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{}, + chains: map[eth.ChainID]cc.ChainContainer{chainID: &algoMockChain{id: chainID, optimisticL1: l1Block}}, + } + return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: expectedBlock} + }, + validate: func(t *testing.T, l1 eth.BlockID) { + require.Equal(t, eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1")}, l1) + }, + }, + { + name: "MultipleChains_HighestL1Selected", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chain1ID := eth.ChainIDFromUInt64(10) + chain2ID := eth.ChainIDFromUInt64(8453) + chain3ID := eth.ChainIDFromUInt64(420) + + // Chain 1 has L1 at 60 (highest - should be selected) + // Chain 2 has L1 at 45 (earliest) + // Chain 3 has L1 at 50 (middle) + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{}, + chains: map[eth.ChainID]cc.ChainContainer{ + chain1ID: &algoMockChain{id: chain1ID, optimisticL1: eth.BlockID{Number: 60, Hash: common.HexToHash("0xL1_1")}}, + chain2ID: &algoMockChain{id: chain2ID, optimisticL1: eth.BlockID{Number: 45, Hash: common.HexToHash("0xL1_2")}}, + chain3ID: &algoMockChain{id: chain3ID, optimisticL1: eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1_3")}}, + }, + } + return interop, 1000, map[eth.ChainID]eth.BlockID{ + chain1ID: {Number: 100, Hash: common.HexToHash("0x1")}, + chain2ID: {Number: 200, Hash: common.HexToHash("0x2")}, + chain3ID: {Number: 150, Hash: common.HexToHash("0x3")}, + } + }, + validate: func(t *testing.T, l1 eth.BlockID) { + require.Equal(t, eth.BlockID{Number: 60, Hash: common.HexToHash("0xL1_1")}, l1) + }, + }, + { + name: "ChainNotInChainsMap_Skipped", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chain1ID := eth.ChainIDFromUInt64(10) + chain2ID := eth.ChainIDFromUInt64(8453) // Not in chains map + + l1Block1 := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1_1")} + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{}, + chains: map[eth.ChainID]cc.ChainContainer{ + chain1ID: &algoMockChain{id: chain1ID, optimisticL1: l1Block1}, + // chain2ID NOT in chains map + }, + } + return interop, 1000, map[eth.ChainID]eth.BlockID{ + chain1ID: {Number: 100, Hash: common.HexToHash("0x1")}, + chain2ID: {Number: 200, Hash: common.HexToHash("0x2")}, + } + }, + validate: func(t *testing.T, l1 eth.BlockID) { + require.Equal(t, eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1_1")}, l1) + }, + }, + { + name: "OptimisticAtError_ReturnsError", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chainID := eth.ChainIDFromUInt64(10) + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{}, + chains: map[eth.ChainID]cc.ChainContainer{ + chainID: &algoMockChain{id: chainID, optimisticAtErr: errors.New("optimistic at error")}, + }, + } + return interop, 1000, map[eth.ChainID]eth.BlockID{ + chainID: {Number: 100, Hash: common.HexToHash("0x123")}, + } + }, + expectError: true, + errorMsg: "failed to get L1 inclusion", + }, + { + name: "NoChains_ReturnsEmpty", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{}, + chains: map[eth.ChainID]cc.ChainContainer{}, + } + return interop, 1000, map[eth.ChainID]eth.BlockID{} + }, + validate: func(t *testing.T, l1 eth.BlockID) { + require.Equal(t, eth.BlockID{}, l1) + }, + }, + { + name: "GenesisBlock_NoError", + setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { + chainID := eth.ChainIDFromUInt64(10) + // L1 genesis block at number 0 + l1Block := eth.BlockID{Number: 0, Hash: common.HexToHash("0xGenesisL1")} + + interop := &Interop{ + log: gethlog.New(), + logsDBs: map[eth.ChainID]LogsDB{}, + chains: map[eth.ChainID]cc.ChainContainer{chainID: &algoMockChain{id: chainID, optimisticL1: l1Block}}, + } + return interop, 0, map[eth.ChainID]eth.BlockID{ + chainID: {Number: 0, Hash: common.HexToHash("0x123")}, + } + }, + // Genesis blocks included at L1 block number 0 must not cause an error. + validate: func(t *testing.T, l1 eth.BlockID) { + require.Equal(t, eth.BlockID{Number: 0, Hash: common.HexToHash("0xGenesisL1")}, l1) + }, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + interop, ts, blocks := tc.setup() + l1, err := interop.l1Inclusion(ts, blocks) + + if tc.expectError { + require.Error(t, err) + if tc.errorMsg != "" { + require.Contains(t, err.Error(), tc.errorMsg) + } + } else { + require.NoError(t, err) + } + + if tc.validate != nil { + tc.validate(t, l1) + } + }) + } +} + func TestVerifyInteropMessages(t *testing.T) { t.Parallel() @@ -600,205 +762,6 @@ func TestVerifyInteropMessages(t *testing.T) { require.Contains(t, result.InvalidHeads, invalidChainID) }, }, - // L1Inclusion tests - { - name: "L1Inclusion/SingleChain", - setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { - chainID := eth.ChainIDFromUInt64(10) - blockHash := common.HexToHash("0x123") - expectedBlock := eth.BlockID{Number: 100, Hash: blockHash} - l1Block := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1")} - - mockDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: blockHash, Number: 100, Time: 1000}, - openBlockExecMsg: nil, - } - - mockChain := &algoMockChain{ - id: chainID, - optimisticL1: l1Block, - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, - chains: map[eth.ChainID]cc.ChainContainer{chainID: mockChain}, - } - - return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: expectedBlock} - }, - validate: func(t *testing.T, result Result) { - chainID := eth.ChainIDFromUInt64(10) - expectedBlock := eth.BlockID{Number: 100, Hash: common.HexToHash("0x123")} - expectedL1 := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1")} - require.True(t, result.IsValid()) - require.Empty(t, result.InvalidHeads) - require.Equal(t, expectedBlock, result.L2Heads[chainID]) - require.Equal(t, expectedL1, result.L1Inclusion) - }, - }, - { - name: "L1Inclusion/MultipleChains_EarliestL1Selected", - setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { - chain1ID := eth.ChainIDFromUInt64(10) - chain2ID := eth.ChainIDFromUInt64(8453) - chain3ID := eth.ChainIDFromUInt64(420) - - block1 := eth.BlockID{Number: 100, Hash: common.HexToHash("0x1")} - block2 := eth.BlockID{Number: 200, Hash: common.HexToHash("0x2")} - block3 := eth.BlockID{Number: 150, Hash: common.HexToHash("0x3")} - - // Chain 1 has L1 at 60 (highest) - // Chain 2 has L1 at 45 (earliest - should be selected) - // Chain 3 has L1 at 50 (middle) - l1Block1 := eth.BlockID{Number: 60, Hash: common.HexToHash("0xL1_1")} - l1Block2 := eth.BlockID{Number: 45, Hash: common.HexToHash("0xL1_2")} - l1Block3 := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1_3")} - - mockDB1 := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: block1.Hash, Number: block1.Number, Time: 1000}, - openBlockExecMsg: nil, - } - mockDB2 := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: block2.Hash, Number: block2.Number, Time: 1000}, - openBlockExecMsg: nil, - } - mockDB3 := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: block3.Hash, Number: block3.Number, Time: 1000}, - openBlockExecMsg: nil, - } - - mockChain1 := &algoMockChain{id: chain1ID, optimisticL1: l1Block1} - mockChain2 := &algoMockChain{id: chain2ID, optimisticL1: l1Block2} - mockChain3 := &algoMockChain{id: chain3ID, optimisticL1: l1Block3} - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{ - chain1ID: mockDB1, - chain2ID: mockDB2, - chain3ID: mockDB3, - }, - chains: map[eth.ChainID]cc.ChainContainer{ - chain1ID: mockChain1, - chain2ID: mockChain2, - chain3ID: mockChain3, - }, - } - - return interop, 1000, map[eth.ChainID]eth.BlockID{ - chain1ID: block1, - chain2ID: block2, - chain3ID: block3, - } - }, - validate: func(t *testing.T, result Result) { - // The earliest L1 block (45) should be selected - expectedL1 := eth.BlockID{Number: 45, Hash: common.HexToHash("0xL1_2")} - require.True(t, result.IsValid()) - require.Empty(t, result.InvalidHeads) - require.Equal(t, expectedL1, result.L1Inclusion) - require.Len(t, result.L2Heads, 3) - }, - }, - { - name: "L1Inclusion/ChainNotInChainsMap_Skipped", - setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { - chain1ID := eth.ChainIDFromUInt64(10) - chain2ID := eth.ChainIDFromUInt64(8453) // Not in chains map - - block1 := eth.BlockID{Number: 100, Hash: common.HexToHash("0x1")} - block2 := eth.BlockID{Number: 200, Hash: common.HexToHash("0x2")} - - l1Block1 := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1_1")} - - mockDB1 := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: block1.Hash, Number: block1.Number, Time: 1000}, - openBlockExecMsg: nil, - } - mockDB2 := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: block2.Hash, Number: block2.Number, Time: 1000}, - openBlockExecMsg: nil, - } - - mockChain1 := &algoMockChain{id: chain1ID, optimisticL1: l1Block1} - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{ - chain1ID: mockDB1, - chain2ID: mockDB2, - }, - chains: map[eth.ChainID]cc.ChainContainer{ - chain1ID: mockChain1, - // chain2ID is NOT in the chains map - }, - } - - return interop, 1000, map[eth.ChainID]eth.BlockID{ - chain1ID: block1, - chain2ID: block2, - } - }, - validate: func(t *testing.T, result Result) { - chain2ID := eth.ChainIDFromUInt64(8453) - expectedL1 := eth.BlockID{Number: 50, Hash: common.HexToHash("0xL1_1")} - require.True(t, result.IsValid()) - require.Empty(t, result.InvalidHeads) - // chain2 should still be in L2Heads even though it's not in chains map - require.Contains(t, result.L2Heads, chain2ID) - // L1Inclusion should only consider chain1 - require.Equal(t, expectedL1, result.L1Inclusion) - }, - }, - { - name: "L1Inclusion/OptimisticAtError_ReturnsError", - setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { - chainID := eth.ChainIDFromUInt64(10) - blockHash := common.HexToHash("0x123") - expectedBlock := eth.BlockID{Number: 100, Hash: blockHash} - - mockDB := &algoMockLogsDB{ - openBlockRef: eth.BlockRef{Hash: blockHash, Number: 100, Time: 1000}, - openBlockExecMsg: nil, - } - - mockChain := &algoMockChain{ - id: chainID, - optimisticAtErr: errors.New("optimistic at error"), - } - - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{chainID: mockDB}, - chains: map[eth.ChainID]cc.ChainContainer{chainID: mockChain}, - } - - return interop, 1000, map[eth.ChainID]eth.BlockID{chainID: expectedBlock} - }, - expectError: true, - errorMsg: "failed to get L1 inclusion", - validate: func(t *testing.T, result Result) { - require.True(t, result.IsEmpty()) - }, - }, - { - name: "L1Inclusion/NoChains_Error", - setup: func() (*Interop, uint64, map[eth.ChainID]eth.BlockID) { - interop := &Interop{ - log: gethlog.New(), - logsDBs: map[eth.ChainID]LogsDB{}, - chains: map[eth.ChainID]cc.ChainContainer{}, - } - - return interop, 1000, map[eth.ChainID]eth.BlockID{} - }, - expectError: true, - errorMsg: "no L1 inclusion found", - validate: func(t *testing.T, result Result) { - require.True(t, result.IsEmpty()) - }, - }, // Error cases { name: "Errors/OpenBlockError", From 870a5c2ac3f24ea97978da15fdd9fa55993161d7 Mon Sep 17 00:00:00 2001 From: George Knee Date: Mon, 2 Mar 2026 14:53:43 +0000 Subject: [PATCH 032/201] drain all goroutines before exiting function (#19337) --- .../supernode/activity/interop/interop.go | 14 ++++-- .../activity/interop/interop_test.go | 44 ++++++++++++++++++- 2 files changed, 53 insertions(+), 5 deletions(-) diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index 604a53fdaaef9..490dce6a33952 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -389,14 +389,22 @@ func (i *Interop) checkChainsReady(ts uint64) (map[eth.ChainID]eth.BlockID, erro }(chain) } - // Collect results + // Collect all results before returning so every goroutine completes before the + // next call spawns a new batch, preventing accumulation of in-flight RPC calls. blocksAtTimestamp := make(map[eth.ChainID]eth.BlockID) + var firstErr error for range i.chains { r := <-results if r.err != nil { - return nil, r.err + if firstErr == nil { + firstErr = r.err + } + } else { + blocksAtTimestamp[r.chainID] = r.blockID } - blocksAtTimestamp[r.chainID] = r.blockID + } + if firstErr != nil { + return nil, firstErr } return blocksAtTimestamp, nil diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index 85106b0159cd4..8974f25bfb502 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -5,6 +5,7 @@ import ( "errors" "math/big" "sync" + "sync/atomic" "testing" "time" @@ -405,6 +406,32 @@ func TestCheckChainsReady(t *testing.T) { require.Len(t, blocks, 5) }, }, + { + // Verify that checkChainsReady drains ALL goroutine results before returning, + // even when one chain errors early. Without the drain, the slow chain's goroutine + // would still be running concurrently when the next call spawns a new batch — + // causing goroutine accumulation under repeated retries. + name: "drains all goroutine results before returning on error", + setup: func(h *interopTestHarness) *interopTestHarness { + return h.WithChain(10, func(m *mockChainContainer) { + // Errors immediately, causing an early-return path. + m.blockAtTimestampErr = ethereum.NotFound + }).WithChain(8453, func(m *mockChainContainer) { + // Slow chain: takes longer than the fast-error chain. + // After checkChainsReady returns, callsCompleted must be 1, + // proving the function waited for this goroutine to finish. + m.blockAtTimestamp = eth.L2BlockRef{Number: 200} + m.blockAtTimestampDelay = 30 * time.Millisecond + }).Build() + }, + assert: func(t *testing.T, h *interopTestHarness, blocks map[eth.ChainID]eth.BlockID, err error) { + require.Error(t, err) + require.Nil(t, blocks) + // Both goroutines must have completed before checkChainsReady returned. + require.EqualValues(t, 1, h.Mock(10).callsCompleted.Load(), "chain 10 goroutine should have completed") + require.EqualValues(t, 1, h.Mock(8453).callsCompleted.Load(), "chain 8453 goroutine should have completed before return") + }, + }, } for _, tc := range tests { @@ -1193,8 +1220,13 @@ type mockChainContainer struct { currentL1 eth.BlockRef currentL1Err error - blockAtTimestamp eth.L2BlockRef - blockAtTimestampErr error + blockAtTimestamp eth.L2BlockRef + blockAtTimestampErr error + blockAtTimestampDelay time.Duration // if set, sleeps this long before responding + + // callsCompleted is incremented atomically when LocalSafeBlockAtTimestamp returns, + // allowing tests to verify all goroutines drained before checkChainsReady returned. + callsCompleted atomic.Int32 lastRequestedTimestamp uint64 mu sync.Mutex @@ -1227,6 +1259,14 @@ func (m *mockChainContainer) Resume(ctx context.Context) error { return nil } func (m *mockChainContainer) RegisterVerifier(v activity.VerificationActivity) { } func (m *mockChainContainer) LocalSafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) { + // Simulate slow chains. Sleep is outside the lock so it doesn't block other + // concurrent mock operations during tests. + if d := m.blockAtTimestampDelay; d > 0 { + time.Sleep(d) + } + // Increment after any simulated delay so callers can verify the goroutine + // has fully completed (not just started) by the time they observe the count. + defer m.callsCompleted.Add(1) m.mu.Lock() defer m.mu.Unlock() if m.blockAtTimestampErr != nil { From 9ec548205f5690fe02fa30e2468e46a82b61dc1b Mon Sep 17 00:00:00 2001 From: George Knee Date: Mon, 2 Mar 2026 14:58:19 +0000 Subject: [PATCH 033/201] kona/protocol/derive: handle "blob not found" correctly (#19328) * kona/protocol/derive: handle "blob not found" correctly * lint * lint * add a block number or hash in the error message * add named fields to BlobNotFound err * just fmt-fix * clippify * Simplify using inspect_err * simplifications --- rust/Cargo.lock | 1 + .../protocol/derive/src/errors/pipeline.rs | 5 + .../protocol/derive/src/errors/sources.rs | 28 ++++- .../protocol/derive/src/sources/blobs.rs | 109 ++++++++++++++---- .../derive/src/test_utils/blob_provider.rs | 11 +- .../providers/providers-alloy/Cargo.toml | 1 + .../providers-alloy/src/beacon_client.rs | 54 ++++++++- .../providers/providers-alloy/src/blobs.rs | 22 +++- 8 files changed, 197 insertions(+), 34 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 4bb8f4dcfed97..86c0a2a92b3c7 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -6210,6 +6210,7 @@ dependencies = [ "thiserror 2.0.18", "tokio", "tower 0.5.3", + "tracing", ] [[package]] diff --git a/rust/kona/crates/protocol/derive/src/errors/pipeline.rs b/rust/kona/crates/protocol/derive/src/errors/pipeline.rs index 15b332c504c45..2da147b3f5c77 100644 --- a/rust/kona/crates/protocol/derive/src/errors/pipeline.rs +++ b/rust/kona/crates/protocol/derive/src/errors/pipeline.rs @@ -344,6 +344,10 @@ pub enum ResetError { /// The next l1 block provided to the managed traversal stage is not the expected one. #[error("Next L1 block hash mismatch: expected {0}, got {1}")] NextL1BlockHashMismatch(B256, B256), + /// Blobs referenced by an L1 block are permanently unavailable (e.g. missed beacon slot). + /// The pipeline must reset to move past the offending L1 block. + #[error("Blobs unavailable: beacon node returned 404 for slot {0}")] + BlobsUnavailable(u64), } impl ResetError { @@ -431,6 +435,7 @@ mod tests { Default::default(), )), ResetError::HoloceneActivation, + ResetError::BlobsUnavailable(0), ]; for error in reset_errors { let expected = PipelineErrorKind::Reset(error.clone()); diff --git a/rust/kona/crates/protocol/derive/src/errors/sources.rs b/rust/kona/crates/protocol/derive/src/errors/sources.rs index ab752eae72d35..ba9932edfcee9 100644 --- a/rust/kona/crates/protocol/derive/src/errors/sources.rs +++ b/rust/kona/crates/protocol/derive/src/errors/sources.rs @@ -1,6 +1,6 @@ //! Error types for sources. -use crate::{PipelineError, PipelineErrorKind}; +use crate::{PipelineError, PipelineErrorKind, ResetError}; use alloc::string::{String, ToString}; use thiserror::Error; @@ -33,6 +33,16 @@ pub enum BlobProviderError { /// Blob decoding error. #[error("Blob decoding error: {0}")] BlobDecoding(#[from] BlobDecodingError), + /// The beacon node returned a 404 for the requested slot, indicating the slot was missed or + /// orphaned. Blobs for missed/orphaned slots will never become available, so the pipeline + /// must reset to move past the L1 block that referenced them. + #[error("Blob not found at slot {slot}: {reason}")] + BlobNotFound { + /// The beacon slot that returned 404. + slot: u64, + /// The underlying error message from the beacon client. + reason: String, + }, /// Error pertaining to the backend transport. #[error("{0}")] Backend(String), @@ -44,6 +54,9 @@ impl From for PipelineErrorKind { BlobProviderError::SidecarLengthMismatch(_, _) | BlobProviderError::SlotDerivation | BlobProviderError::BlobDecoding(_) => PipelineError::Provider(val.to_string()).crit(), + BlobProviderError::BlobNotFound { slot, .. } => { + ResetError::BlobsUnavailable(slot).reset() + } BlobProviderError::Backend(_) => PipelineError::Provider(val.to_string()).temp(), } } @@ -71,5 +84,18 @@ mod tests { let err: PipelineErrorKind = BlobProviderError::BlobDecoding(BlobDecodingError::InvalidFieldElement).into(); assert!(matches!(err, PipelineErrorKind::Critical(_))); + + let err: PipelineErrorKind = BlobProviderError::Backend("transport error".into()).into(); + assert!(matches!(err, PipelineErrorKind::Temporary(_))); + + // A 404 from the beacon node (missed/orphaned slot) must trigger a pipeline reset, + // not a temporary retry. Without this, the safe head stalls indefinitely. + let err: PipelineErrorKind = + BlobProviderError::BlobNotFound { slot: 13779552, reason: "slot not found".into() } + .into(); + assert!( + matches!(err, PipelineErrorKind::Reset(_)), + "BlobNotFound must map to Reset so the pipeline moves past the missed slot" + ); } } diff --git a/rust/kona/crates/protocol/derive/src/sources/blobs.rs b/rust/kona/crates/protocol/derive/src/sources/blobs.rs index c238ae93b8805..ddd914462ce12 100644 --- a/rust/kona/crates/protocol/derive/src/sources/blobs.rs +++ b/rust/kona/crates/protocol/derive/src/sources/blobs.rs @@ -2,7 +2,7 @@ use crate::{ BlobData, BlobProvider, BlobProviderError, ChainProvider, DataAvailabilityProvider, - PipelineError, PipelineResult, + PipelineError, PipelineErrorKind, PipelineResult, }; use alloc::{boxed::Box, string::ToString, vec::Vec}; use alloy_consensus::{ @@ -111,16 +111,15 @@ where &mut self, block_ref: &BlockInfo, batcher_address: Address, - ) -> Result<(), BlobProviderError> { + ) -> Result<(), PipelineErrorKind> { if self.open { return Ok(()); } - let info = self - .chain_provider - .block_info_and_transactions_by_hash(block_ref.hash) - .await - .map_err(|e| BlobProviderError::Backend(e.to_string()))?; + let info = + self.chain_provider.block_info_and_transactions_by_hash(block_ref.hash).await.map_err( + |e| -> PipelineErrorKind { BlobProviderError::Backend(e.to_string()).into() }, + )?; let (mut data, blob_hashes) = self.extract_blob_data(info.1, batcher_address); @@ -131,26 +130,45 @@ where return Ok(()); } - let blobs = - self.blob_fetcher.get_and_validate_blobs(block_ref, &blob_hashes).await.map_err( - |e| { - warn!(target: "blob_source", "Failed to fetch blobs: {e}"); - BlobProviderError::Backend(e.to_string()) - }, - )?; + // Convert via Into which routes: + // BlobNotFound -> PipelineErrorKind::Reset (missed/orphaned slot) + // Backend -> PipelineErrorKind::Temporary (transient, retry) + // others -> PipelineErrorKind::Critical + let blobs = self + .blob_fetcher + .get_and_validate_blobs(block_ref, &blob_hashes) + .await + .map_err(Into::::into) + .inspect_err(|kind| match kind { + PipelineErrorKind::Reset(_) => { + warn!( + target: "blob_source", + block_hash = %block_ref.hash, + block_number = block_ref.number, + timestamp = block_ref.timestamp, + "Blobs permanently unavailable (missed/orphaned beacon slot); \ + triggering pipeline reset" + ); + } + _ => { + warn!( + target: "blob_source", + block_hash = %block_ref.hash, + block_number = block_ref.number, + timestamp = block_ref.timestamp, + "Failed to fetch blobs: {kind}" + ); + } + })?; // Fill the blob pointers. let mut blob_index = 0; for blob in &mut data { - match blob.fill(&blobs, blob_index) { - Ok(should_increment) => { - if should_increment { - blob_index += 1; - } - } - Err(e) => { - return Err(e.into()); - } + let should_increment = blob + .fill(&blobs, blob_index) + .map_err(|e| -> PipelineErrorKind { BlobProviderError::from(e).into() })?; + if should_increment { + blob_index += 1; } } @@ -242,7 +260,7 @@ pub(crate) mod tests { let mut source = default_test_blob_source(); assert!(matches!( source.load_blobs(&BlockInfo::default(), Address::ZERO).await, - Err(BlobProviderError::Backend(_)) + Err(PipelineErrorKind::Temporary(_)) )); } @@ -270,7 +288,7 @@ pub(crate) mod tests { source.chain_provider.insert_block_with_transactions(1, block_info, txs); assert!(matches!( source.load_blobs(&BlockInfo::default(), batcher_address).await, - Err(BlobProviderError::Backend(_)) + Err(PipelineErrorKind::Critical(_)) )); } @@ -346,4 +364,45 @@ pub(crate) mod tests { let err = source.next(&BlockInfo::default(), Address::ZERO).await.unwrap_err(); assert!(matches!(err, PipelineErrorKind::Temporary(PipelineError::Provider(_)))); } + + /// Regression test: a beacon node 404 (missed/orphaned slot) must propagate through + /// `load_blobs` as `PipelineErrorKind::Reset`, not as a temporary retryable error. + #[tokio::test] + async fn test_load_blobs_not_found_triggers_reset() { + let mut source = default_test_blob_source(); + let block_info = BlockInfo::default(); + let batcher_address = + alloy_primitives::address!("A83C816D4f9b2783761a22BA6FADB0eB0606D7B2"); + source.batcher_address = + alloy_primitives::address!("11E9CA82A3a762b4B5bd264d4173a242e7a77064"); + source.chain_provider.insert_block_with_transactions(1, block_info, valid_blob_txs()); + source.blob_fetcher.should_return_not_found = true; + + let err = source.load_blobs(&BlockInfo::default(), batcher_address).await.unwrap_err(); + assert!( + matches!(err, PipelineErrorKind::Reset(_)), + "expected Reset for missed beacon slot, got {err:?}" + ); + } + + /// Regression test: `BlobProviderError::BlobNotFound` from the blob fetcher must surface + /// through `next()` as `PipelineErrorKind::Reset`, triggering a pipeline reset. + /// Without this, a missed beacon slot causes an infinite retry loop and safe head stall. + #[tokio::test] + async fn test_missed_beacon_slot_triggers_pipeline_reset() { + let mut source = default_test_blob_source(); + let block_info = BlockInfo::default(); + let batcher_address = + alloy_primitives::address!("A83C816D4f9b2783761a22BA6FADB0eB0606D7B2"); + source.batcher_address = + alloy_primitives::address!("11E9CA82A3a762b4B5bd264d4173a242e7a77064"); + source.chain_provider.insert_block_with_transactions(1, block_info, valid_blob_txs()); + source.blob_fetcher.should_return_not_found = true; + + let err = source.next(&BlockInfo::default(), batcher_address).await.unwrap_err(); + assert!( + matches!(err, PipelineErrorKind::Reset(_)), + "expected Reset for missed beacon slot, got {err:?}" + ); + } } diff --git a/rust/kona/crates/protocol/derive/src/test_utils/blob_provider.rs b/rust/kona/crates/protocol/derive/src/test_utils/blob_provider.rs index f106636139233..42dcea5d1a772 100644 --- a/rust/kona/crates/protocol/derive/src/test_utils/blob_provider.rs +++ b/rust/kona/crates/protocol/derive/src/test_utils/blob_provider.rs @@ -12,8 +12,11 @@ use kona_protocol::BlockInfo; pub struct TestBlobProvider { /// Maps block hashes to blob data. pub blobs: HashMap, - /// whether the blob provider should return an error. + /// Whether the blob provider should return a generic backend error. pub should_error: bool, + /// When `true`, `get_and_validate_blobs` returns `BlobProviderError::BlobNotFound`, + /// simulating a missed/orphaned beacon slot (HTTP 404 from the beacon node). + pub should_return_not_found: bool, } impl TestBlobProvider { @@ -40,6 +43,12 @@ impl BlobProvider for TestBlobProvider { if self.should_error { return Err(BlobProviderError::SlotDerivation); } + if self.should_return_not_found { + return Err(BlobProviderError::BlobNotFound { + slot: 0, + reason: "mock: slot not found".into(), + }); + } let mut blobs = Vec::new(); for blob_hash in blob_hashes { if let Some(data) = self.blobs.get(blob_hash) { diff --git a/rust/kona/crates/providers/providers-alloy/Cargo.toml b/rust/kona/crates/providers/providers-alloy/Cargo.toml index 0eb62c6f63ceb..cde04410f4dc1 100644 --- a/rust/kona/crates/providers/providers-alloy/Cargo.toml +++ b/rust/kona/crates/providers/providers-alloy/Cargo.toml @@ -47,6 +47,7 @@ tower.workspace = true http-body-util.workspace = true c-kzg.workspace = true +tracing.workspace = true # `metrics` feature metrics = { workspace = true, optional = true } diff --git a/rust/kona/crates/providers/providers-alloy/src/beacon_client.rs b/rust/kona/crates/providers/providers-alloy/src/beacon_client.rs index 937e52bd57348..9c47546dfd9d0 100644 --- a/rust/kona/crates/providers/providers-alloy/src/beacon_client.rs +++ b/rust/kona/crates/providers/providers-alloy/src/beacon_client.rs @@ -73,6 +73,13 @@ pub trait BeaconClient { /// The error type for [`BeaconClient`] implementations. type Error: core::fmt::Display; + /// Returns the slot number if this error represents a beacon slot not found (HTTP 404). + /// + /// Returns `None` for all other error kinds. This allows the blob provider to distinguish + /// permanently-unavailable slots (missed/orphaned beacon blocks) from transient errors, + /// and trigger a pipeline reset instead of retrying indefinitely. + fn slot_not_found(err: &Self::Error) -> Option; + /// Returns the slot interval in seconds. async fn slot_interval(&self) -> Result; @@ -105,6 +112,11 @@ pub enum BeaconClientError { #[error("HTTP request failed: {0}")] Http(#[from] reqwest::Error), + /// The beacon node returned HTTP 404 for the requested slot. This means the slot was missed + /// or orphaned and the blobs will never be available. + #[error("Beacon slot not found (HTTP 404) for slot {0}")] + SlotNotFound(u64), + /// Blob hash not found in beacon response. #[error("Blob hash not found in beacon response: {0}")] BlobNotFound(String), @@ -162,8 +174,16 @@ impl OnlineBeaconClient { .get(format!("{}/{}/{}", self.base, BLOBS_METHOD_PREFIX, slot)) .query(&[("versioned_hashes", ¶ms.join(","))]) .send() - .await? - .error_for_status()?; + .await?; + + // A 404 means the beacon slot was missed or orphaned. Blobs for such slots will never + // become available, so surface this as a distinct error rather than a generic HTTP error + // so that callers can trigger a pipeline reset instead of retrying indefinitely. + if response.status() == reqwest::StatusCode::NOT_FOUND { + return Err(BeaconClientError::SlotNotFound(slot)); + } + + let response = response.error_for_status()?; let bundle = response.json::().await?; let returned_blobs_mapped_by_hash = bundle @@ -194,6 +214,10 @@ impl OnlineBeaconClient { impl BeaconClient for OnlineBeaconClient { type Error = BeaconClientError; + fn slot_not_found(err: &Self::Error) -> Option { + if let BeaconClientError::SlotNotFound(slot) = err { Some(*slot) } else { None } + } + async fn slot_interval(&self) -> Result { kona_macros::inc!(gauge, Metrics::BEACON_CLIENT_REQUESTS, "method" => "spec"); @@ -331,4 +355,30 @@ mod tests { blobs_mock.delete(); } } + + /// Regression test: a beacon node HTTP 404 for a given slot must return + /// `BeaconClientError::SlotNotFound` rather than a generic `Http` error. + /// This allows the blob provider layer to map it to `BlobProviderError::BlobNotFound` + /// and the pipeline to issue a reset rather than retrying indefinitely. + #[tokio::test] + async fn test_filtered_beacon_blobs_404_returns_slot_not_found() { + let slot = 13779552u64; // slot from the real-world missed-slot incident + let test_blob_hash: FixedBytes<32> = FixedBytes::from_hex(TEST_BLOB_HASH_HEX).unwrap(); + let requested_blob_hashes: Vec = vec![test_blob_hash]; + + let server = MockServer::start(); + let blobs_mock = server.mock(|when, then| { + when.method(GET).path(format!("/eth/v1/beacon/blobs/{slot}")); + then.status(404).body(r#"{"code":404,"message":"Block not found"}"#); + }); + + let client = OnlineBeaconClient::new_http(server.base_url()); + let response = client.filtered_beacon_blobs(slot, &requested_blob_hashes).await; + blobs_mock.assert(); + + assert!( + matches!(response, Err(BeaconClientError::SlotNotFound(s)) if s == slot), + "expected SlotNotFound({slot}), got {response:?}" + ); + } } diff --git a/rust/kona/crates/providers/providers-alloy/src/blobs.rs b/rust/kona/crates/providers/providers-alloy/src/blobs.rs index 9153c7c953fc5..f1af67c535dca 100644 --- a/rust/kona/crates/providers/providers-alloy/src/blobs.rs +++ b/rust/kona/crates/providers/providers-alloy/src/blobs.rs @@ -9,6 +9,7 @@ use async_trait::async_trait; use kona_derive::{BlobProvider, BlobProviderError}; use kona_protocol::BlockInfo; use std::{boxed::Box, string::ToString, vec::Vec}; +use tracing::warn; /// A boxed blob. #[derive(Debug, Clone, PartialEq, Eq)] @@ -87,11 +88,22 @@ impl OnlineBlobProvider { ) -> Result, BlobProviderError> { kona_macros::inc!(gauge, Metrics::BLOB_FETCHES); - let result = self - .beacon_client - .filtered_beacon_blobs(slot, blob_hashes) - .await - .map_err(|e| BlobProviderError::Backend(e.to_string())); + let result = + self.beacon_client.filtered_beacon_blobs(slot, blob_hashes).await.map_err(|e| { + // The beacon node returned 404 for this slot. The slot was missed or + // orphaned; its blobs will never be available. Map to BlobNotFound so + // the pipeline issues a reset rather than retrying indefinitely. + let Some(missing_slot) = B::slot_not_found(&e) else { + return BlobProviderError::Backend(e.to_string()); + }; + warn!( + target: "blob_provider", + slot = missing_slot, + "Beacon slot not found (404); slot may be missed or orphaned. \ + Triggering pipeline reset." + ); + BlobProviderError::BlobNotFound { slot: missing_slot, reason: e.to_string() } + }); #[cfg(feature = "metrics")] if result.is_err() { From 71a5692b4ce9cadda17618d95912dd7458d22ce6 Mon Sep 17 00:00:00 2001 From: Matt Solomon Date: Mon, 2 Mar 2026 12:20:09 -0800 Subject: [PATCH 034/201] fix(contracts-bedrock): remove artifact pulling, use unoptimized builds for CI tests (#19332) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(contracts-bedrock): remove artifact pulling, use unoptimized builds for CI tests The contracts CI has been using a GCS-based artifact caching system (pull-artifacts, publish-artifacts, use-latest-fallback) that adds complexity and unreliability. Artifacts built with the `ci` profile (optimizer enabled, 999999 runs) are slow to compile, and the caching layer introduces non-determinism -- PRs can get stale artifacts from the `latest` fallback, and profile mismatches between cached and actual builds cause full recompilation anyway. This PR simplifies the contracts CI by: 1. Removing all artifact pulling/publishing infrastructure: - Delete pull-artifacts.sh, publish-artifacts.sh, use-latest-fallback.sh, calculate-checksum.sh - Remove the publish-contract-artifacts job and develop-publish-contract-artifacts workflow - Remove install-zstd, pull-artifacts-conditional commands - Remove the publish_contract_artifacts_dispatch parameter 2. Using unoptimized builds for all test profiles: - Add new `liteci` profile: optimizer=false with CI-level fuzz/invariant runs (128/64/32) - Update `ciheavy` profile: optimizer=false (was inheriting optimizer=true from default) - Fix pre-existing TOML ordering bug in `lite` profile where additional_compiler_profiles was accidentally nested under [profile.lite.invariant] 3. Splitting test jobs by branch: - PRs: use `liteci` profile (fast unoptimized compile, same fuzz/invariant thoroughness as ci) - develop post-merge: use `ci` profile (optimized, mirrors production bytecode) 4. Keeping the build job unchanged: - contracts-bedrock-build still uses `ci` profile with --skip test - Downstream checks (size-check, snapshots, interfaces, semver) still run against optimized artifacts via workspace The net effect is that PR test jobs compile from scratch without the optimizer, which is fast enough to not need caching, while the build job still produces optimized artifacts for code size and correctness checks. Post-merge runs on develop use the full optimized ci profile. Co-Authored-By: Claude Opus 4.6 * fix(contracts-bedrock): mock DWETH/ETHLockbox impls for unoptimized fork tests When running fork tests with an unoptimized Foundry profile (e.g., liteci), CREATE2 produces different implementation addresses because bytecode differs from production builds. Most proxies are re-pointed during the OPCM upgrade, but DelayedWETH and ETHLockbox proxies are not — they retain mainnet's optimized implementations. This causes DWETH-20 and LOCKBOX-20 validator errors. Mock getProxyImplementation for these proxies conditionally based on the Foundry profile, so the validator sees the expected addresses. Co-Authored-By: Claude Opus 4.6 * style: forge fmt Co-Authored-By: Claude Opus 4.6 * fix(contracts-bedrock): rename skipIfCoverage to skipIfUnoptimized Unify skipIfCoverage into a broader skipIfUnoptimized that skips tests requiring production-like bytecode when running under coverage mode OR an unoptimized Foundry profile. Both change bytecode in ways that break gas measurement tests, bytecode verification, and CREATE2 address assumptions. This fixes test_batchUpgrade_multipleChains_succeeds which exceeded the EIP-7825 gas target with unoptimized bytecode (18.1M vs 16.7M limit). Co-Authored-By: Claude Opus 4.6 * fix(contracts-bedrock): fix SafeCall and L1ChugSplashProxy tests for unoptimized profiles Both tests had hardcoded profile name checks ("lite", "cicoverage") that didn't include "liteci" or "ciheavy", causing them to use optimized gas values with unoptimized bytecode. Add Config.isUnoptimized() as the canonical check for non-production bytecode (coverage mode or unoptimized Foundry profile). Update skipIfUnoptimized() and both test files to use it. Co-Authored-By: Claude Opus 4.6 * fix(ci): retain contract artifact publishing on op-contracts tags Restore the publish-contract-artifacts CI job and wire it to op-contracts/v* tags instead of develop-branch pushes. This ensures contract artifacts continue to be published to GCS when tagged, which is needed by op-deployer and netchef for devnet deployments. Co-Authored-By: Claude Opus 4.6 * fix(ci): restore comment in mktar/main.go referencing publish-artifacts.sh Co-Authored-By: Claude Opus 4.6 * ci: bump cimg/base from 2024.01 to 2026.03 The Docker daemon on CircleCI remote Docker hosts now requires API v1.44+, but cimg/base:2024.01 ships with Docker client v1.43. Bump to cimg/base:2026.03 to fix Docker API version mismatch errors in analyze-op-program-client and check-kontrol-build jobs. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .circleci/config.yml | 7 +- .circleci/continue/main.yml | 136 ++++++++++-------- .circleci/continue/rust-ci.yml | 2 +- .circleci/continue/rust-e2e.yml | 2 +- .circleci/rust-nightly-bump.yml | 2 +- .../docs/runbook.md | 2 +- packages/contracts-bedrock/foundry.toml | 80 ++++++++++- .../scripts/libraries/Config.sol | 12 ++ .../scripts/ops/pull-artifacts.sh | 118 --------------- .../scripts/ops/use-latest-fallback.sh | 62 -------- .../test/L1/OPContractsManager.t.sol | 53 ++++++- .../OPContractsManagerStandardValidator.t.sol | 51 +++++++ .../test/L1/opcm/OPContractsManagerV2.t.sol | 2 +- .../test/legacy/L1ChugSplashProxy.t.sol | 12 +- .../test/libraries/SafeCall.t.sol | 26 ++-- .../test/scripts/VerifyOPCM.t.sol | 43 ++---- .../contracts-bedrock/test/setup/Setup.sol | 11 +- .../test/universal/BenchmarkTest.t.sol | 6 +- 18 files changed, 307 insertions(+), 320 deletions(-) delete mode 100755 packages/contracts-bedrock/scripts/ops/pull-artifacts.sh delete mode 100755 packages/contracts-bedrock/scripts/ops/use-latest-fallback.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index 8dfc24a82f7ae..73ff5875102aa 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -9,7 +9,7 @@ setup: true parameters: default_docker_image: type: string - default: cimg/base:2024.01 + default: cimg/base:2026.03 base_image: type: string default: default @@ -41,9 +41,6 @@ parameters: docker_publish_dispatch: type: boolean default: false - publish_contract_artifacts_dispatch: - type: boolean - default: false stale_check_dispatch: type: boolean default: false @@ -112,7 +109,7 @@ workflows: .* c-cannon_full_test_dispatch << pipeline.parameters.cannon_full_test_dispatch >> .circleci/continue/main.yml .* c-sdk_dispatch << pipeline.parameters.sdk_dispatch >> .circleci/continue/main.yml .* c-docker_publish_dispatch << pipeline.parameters.docker_publish_dispatch >> .circleci/continue/main.yml - .* c-publish_contract_artifacts_dispatch << pipeline.parameters.publish_contract_artifacts_dispatch >> .circleci/continue/main.yml + .* c-stale_check_dispatch << pipeline.parameters.stale_check_dispatch >> .circleci/continue/main.yml .* c-contracts_coverage_dispatch << pipeline.parameters.contracts_coverage_dispatch >> .circleci/continue/main.yml .* c-heavy_fuzz_dispatch << pipeline.parameters.heavy_fuzz_dispatch >> .circleci/continue/main.yml diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 4e4d24af23a5f..114e27a9edaf5 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -3,7 +3,7 @@ version: 2.1 parameters: c-default_docker_image: type: string - default: cimg/base:2024.01 + default: cimg/base:2026.03 c-base_image: type: string default: default @@ -35,9 +35,6 @@ parameters: c-docker_publish_dispatch: type: boolean default: false - c-publish_contract_artifacts_dispatch: - type: boolean - default: false c-stale_check_dispatch: type: boolean default: false @@ -364,14 +361,6 @@ commands: - ~/go/pkg/mod key: go-<>-<>-<>-{{ checksum "<>/go.mod" }}-{{ checksum "<>/go.sum" }} - pull-artifacts-conditional: - description: "Pull artifacts with conditional fallback based on branch and PR labels" - steps: - - run: - name: Pull artifacts - command: bash scripts/ops/use-latest-fallback.sh - working_directory: packages/contracts-bedrock - # --- Rust environment setup commands --- rust-install-toolchain: description: "Install Rust toolchain via rustup" @@ -999,15 +988,10 @@ jobs: - utils/checkout-with-mise: checkout-method: blobless enable-mise-cache: true - - install-zstd - install-contracts-dependencies - run: name: Print forge version command: forge --version - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - run: name: Build contracts command: just forge-build <> @@ -1320,7 +1304,6 @@ jobs: - utils/checkout-with-mise: checkout-method: full enable-mise-cache: true - - install-zstd - run: name: Check if test list is empty command: | @@ -1341,7 +1324,6 @@ jobs: name: Print forge version command: forge --version working_directory: packages/contracts-bedrock - - pull-artifacts-conditional - go-restore-cache: namespace: packages/contracts-bedrock/scripts/go-ffi - run: @@ -1369,7 +1351,7 @@ jobs: name: Print failed test traces command: just test-rerun environment: - FOUNDRY_PROFILE: ci + FOUNDRY_PROFILE: <> working_directory: packages/contracts-bedrock when: on_fail - store_test_results: @@ -1391,7 +1373,6 @@ jobs: checkout-method: full enable-mise-cache: true - install-contracts-dependencies - - install-zstd - run: name: Print dependencies command: just dep-status @@ -1400,10 +1381,6 @@ jobs: name: Print forge version command: forge --version working_directory: packages/contracts-bedrock - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - run: name: Build go-ffi command: just build-go-ffi @@ -1491,9 +1468,6 @@ jobs: checkout-method: full enable-mise-cache: true - install-contracts-dependencies - - install-zstd - - attach_workspace: - at: . - check-changed: patterns: contracts-bedrock - install-solc-compilers @@ -1505,7 +1479,6 @@ jobs: name: Print forge version command: forge --version working_directory: packages/contracts-bedrock - - pull-artifacts-conditional - run: name: Install lcov command: | @@ -1577,6 +1550,10 @@ jobs: fork_base_rpc: description: Fork Base RPC type: string + test_profile: + description: Profile to use for testing + type: string + default: ci features: description: Comma-separated list of features to enable (e.g., "OPTIMISM_PORTAL_INTEROP", "CUSTOM_GAS_TOKEN") type: string @@ -1588,9 +1565,6 @@ jobs: - utils/checkout-with-mise: enable-mise-cache: true - install-contracts-dependencies - - install-zstd - - attach_workspace: - at: . - check-changed: patterns: contracts-bedrock - install-solc-compilers @@ -1602,7 +1576,6 @@ jobs: name: Print forge version command: forge --version working_directory: packages/contracts-bedrock - - pull-artifacts-conditional - run: name: Write pinned block number for cache key command: | @@ -1626,7 +1599,7 @@ jobs: JUNIT_TEST_PATH: results/results.xml FOUNDRY_FUZZ_SEED: 42424242 FOUNDRY_FUZZ_RUNS: 1 - FOUNDRY_PROFILE: ci + FOUNDRY_PROFILE: <> ETH_RPC_URL: <> FORK_OP_CHAIN: <> FORK_BASE_CHAIN: <> @@ -1639,7 +1612,7 @@ jobs: environment: FOUNDRY_FUZZ_SEED: 42424242 FOUNDRY_FUZZ_RUNS: 1 - FOUNDRY_PROFILE: ci + FOUNDRY_PROFILE: <> ETH_RPC_URL: <> FORK_OP_CHAIN: <> FORK_BASE_CHAIN: <> @@ -1735,17 +1708,12 @@ jobs: steps: - utils/checkout-with-mise: enable-mise-cache: true - - install-zstd - install-contracts-dependencies - check-changed: patterns: contracts-bedrock - run: name: Print forge version command: forge --version - - run: - name: Pull cached artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - run: name: Run checks command: just check-fast @@ -2762,10 +2730,6 @@ jobs: enable-mise-cache: true - install-contracts-dependencies - install-zstd - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - run: name: Build contracts environment: @@ -2777,7 +2741,6 @@ jobs: command: bash scripts/ops/publish-artifacts.sh working_directory: packages/contracts-bedrock - go-release: parameters: module: @@ -2977,9 +2940,11 @@ workflows: context: - circleci-repo-readonly-authenticated-github-token - slack + # On PRs, run tests with lite profile for better build times. - contracts-bedrock-tests: name: contracts-bedrock-tests <> test_list: find test -name "*.t.sol" + test_profile: liteci features: <> matrix: parameters: @@ -2988,6 +2953,25 @@ workflows: - circleci-repo-readonly-authenticated-github-token - slack check_changed_patterns: contracts-bedrock,op-node + filters: + branches: + ignore: develop + # On develop, run tests with ci profile to mirror production. + - contracts-bedrock-tests: + name: contracts-bedrock-tests-develop <> + test_list: find test -name "*.t.sol" + test_profile: ci + features: <> + matrix: + parameters: + features: *features_matrix + context: + - circleci-repo-readonly-authenticated-github-token + - slack + check_changed_patterns: contracts-bedrock,op-node + filters: + branches: + only: develop - contracts-bedrock-coverage: # Generate coverage reports. name: contracts-bedrock-coverage <> @@ -3000,11 +2984,13 @@ workflows: context: - circleci-repo-readonly-authenticated-github-token - slack + # On PRs, run upgrade tests with lite profile for better build times. - contracts-bedrock-tests-upgrade: name: contracts-bedrock-tests-upgrade op-mainnet <> fork_op_chain: op fork_base_chain: mainnet fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io + test_profile: liteci features: <> matrix: parameters: @@ -3012,17 +2998,58 @@ workflows: context: - circleci-repo-readonly-authenticated-github-token - slack + filters: + branches: + ignore: develop + # On develop, run upgrade tests with ci profile to mirror production. + - contracts-bedrock-tests-upgrade: + name: contracts-bedrock-tests-upgrade-develop op-mainnet <> + fork_op_chain: op + fork_base_chain: mainnet + fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io + test_profile: ci + features: <> + matrix: + parameters: + features: *features_matrix + context: + - circleci-repo-readonly-authenticated-github-token + - slack + filters: + branches: + only: develop + # On PRs, run chain-specific upgrade tests with lite profile for better build times. - contracts-bedrock-tests-upgrade: name: contracts-bedrock-tests-upgrade <>-mainnet fork_op_chain: <> fork_base_chain: mainnet fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io + test_profile: liteci matrix: parameters: fork_op_chain: ["base", "ink", "unichain"] context: - circleci-repo-readonly-authenticated-github-token - slack + filters: + branches: + ignore: develop + # On develop, run chain-specific upgrade tests with ci profile to mirror production. + - contracts-bedrock-tests-upgrade: + name: contracts-bedrock-tests-upgrade-develop <>-mainnet + fork_op_chain: <> + fork_base_chain: mainnet + fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io + test_profile: ci + matrix: + parameters: + fork_op_chain: ["base", "ink", "unichain"] + context: + - circleci-repo-readonly-authenticated-github-token + - slack + filters: + branches: + only: develop - contracts-bedrock-checks: requires: - contracts-bedrock-build @@ -3378,21 +3405,14 @@ workflows: - slack - circleci-repo-readonly-authenticated-github-token - develop-publish-contract-artifacts: - when: - or: - - and: - - equal: ["develop", <>] - - equal: ["webhook", << pipeline.trigger_source >>] - - and: - - equal: - [ - true, - <>, - ] - - equal: ["api", << pipeline.trigger_source >>] + publish-contract-artifacts-on-tag: jobs: - publish-contract-artifacts: + filters: + tags: + only: /^op-contracts\/v.*/ + branches: + ignore: /.*/ context: - circleci-repo-readonly-authenticated-github-token diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index ddb3bca44a241..ecb1e732f4021 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -11,7 +11,7 @@ orbs: parameters: c-default_docker_image: type: string - default: cimg/base:2024.01 + default: cimg/base:2026.03 c-base_image: type: string default: default diff --git a/.circleci/continue/rust-e2e.yml b/.circleci/continue/rust-e2e.yml index 8e6e0e61c4d3d..c4d51e4f7e409 100644 --- a/.circleci/continue/rust-e2e.yml +++ b/.circleci/continue/rust-e2e.yml @@ -9,7 +9,7 @@ parameters: # Required parameters (also in main.yml, merged during continuation) c-default_docker_image: type: string - default: cimg/base:2024.01 + default: cimg/base:2026.03 c-rust_e2e_dispatch: type: boolean default: false diff --git a/.circleci/rust-nightly-bump.yml b/.circleci/rust-nightly-bump.yml index 6d18860dd704c..318f1c2ac1518 100644 --- a/.circleci/rust-nightly-bump.yml +++ b/.circleci/rust-nightly-bump.yml @@ -6,7 +6,7 @@ version: 2.1 jobs: bump-nightly: docker: - - image: cimg/base:2024.01 + - image: cimg/base:2026.03 steps: - checkout diff --git a/ops/ai-eng/contracts-test-maintenance/docs/runbook.md b/ops/ai-eng/contracts-test-maintenance/docs/runbook.md index 5da50e030cd73..11595dbf31d36 100644 --- a/ops/ai-eng/contracts-test-maintenance/docs/runbook.md +++ b/ops/ai-eng/contracts-test-maintenance/docs/runbook.md @@ -68,7 +68,7 @@ The system is integrated into CircleCI via the `ai-contracts-test-workflow` work ai-contracts-test: resource_class: medium docker: - - image: cimg/base:2024.01 + - image: cimg/base:2026.03 steps: - utils/checkout-with-mise - run: just ai-contracts-test diff --git a/packages/contracts-bedrock/foundry.toml b/packages/contracts-bedrock/foundry.toml index f35e363b3bc6e..4eff46f230b1c 100644 --- a/packages/contracts-bedrock/foundry.toml +++ b/packages/contracts-bedrock/foundry.toml @@ -123,6 +123,32 @@ depth = 1 # PROFILE: CIHEAVY # ################################################################ +[profile.ciheavy] +optimizer = false +optimizer_runs = 0 + +# IMPORTANT: +# See the info in the "DEFAULT" profile to understand this section. +additional_compiler_profiles = [ + { name = "dispute", optimizer_runs = 0 }, +] +compilation_restrictions = [ + { paths = "src/dispute/FaultDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/dispute/PermissionedDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/dispute/SuperFaultDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/dispute/SuperPermissionedDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/L1/OPContractsManager.sol", optimizer_runs = 0 }, + { paths = "src/L1/OPContractsManagerStandardValidator.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerV2.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerContainer.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerMigrator.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerUtils.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerUtilsCaller.sol", optimizer_runs = 0 }, + { paths = "src/L1/OptimismPortal2.sol", optimizer_runs = 0 }, + { paths = "src/L1/ProtocolVersions.sol", optimizer_runs = 0 }, + { paths = "src/universal/StorageSetter.sol", optimizer_runs = 0 } +] + [profile.ciheavy.fuzz] runs = 20000 timeout = 300 @@ -133,19 +159,52 @@ depth = 512 timeout = 300 ################################################################ -# PROFILE: LITE # +# PROFILE: LITECI # ################################################################ +# Unoptimized build (fast compile) with CI-level fuzz/invariant +# settings. Used for PR test runs where build speed matters but +# test thoroughness should match CI. -[profile.lite] +[profile.liteci] optimizer = false optimizer_runs = 0 -[profile.lite.fuzz] -runs = 8 +# IMPORTANT: +# See the info in the "DEFAULT" profile to understand this section. +additional_compiler_profiles = [ + { name = "dispute", optimizer_runs = 0 }, +] +compilation_restrictions = [ + { paths = "src/dispute/FaultDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/dispute/PermissionedDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/dispute/SuperFaultDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/dispute/SuperPermissionedDisputeGame.sol", optimizer_runs = 0 }, + { paths = "src/L1/OPContractsManager.sol", optimizer_runs = 0 }, + { paths = "src/L1/OPContractsManagerStandardValidator.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerV2.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerContainer.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerMigrator.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerUtils.sol", optimizer_runs = 0 }, + { paths = "src/L1/opcm/OPContractsManagerUtilsCaller.sol", optimizer_runs = 0 }, + { paths = "src/L1/OptimismPortal2.sol", optimizer_runs = 0 }, + { paths = "src/L1/ProtocolVersions.sol", optimizer_runs = 0 }, + { paths = "src/universal/StorageSetter.sol", optimizer_runs = 0 } +] -[profile.lite.invariant] -runs = 8 -depth = 8 +[profile.liteci.fuzz] +runs = 128 + +[profile.liteci.invariant] +runs = 64 +depth = 32 + +################################################################ +# PROFILE: LITE # +################################################################ + +[profile.lite] +optimizer = false +optimizer_runs = 0 # IMPORTANT: # See the info in the "DEFAULT" profile to understand this section. @@ -169,6 +228,13 @@ compilation_restrictions = [ { paths = "src/universal/StorageSetter.sol", optimizer_runs = 0 } ] +[profile.lite.fuzz] +runs = 8 + +[profile.lite.invariant] +runs = 8 +depth = 8 + ################################################################ # PROFILE: KONTROL # ################################################################ diff --git a/packages/contracts-bedrock/scripts/libraries/Config.sol b/packages/contracts-bedrock/scripts/libraries/Config.sol index 4ed50ecb95197..f38a1e153a7a7 100644 --- a/packages/contracts-bedrock/scripts/libraries/Config.sol +++ b/packages/contracts-bedrock/scripts/libraries/Config.sol @@ -2,6 +2,7 @@ pragma solidity ^0.8.0; import { Vm, VmSafe } from "forge-std/Vm.sol"; +import { LibString } from "@solady/utils/LibString.sol"; /// @notice Enum representing different ways of outputting genesis allocs. /// @custom:value NONE No output, used in internal tests. @@ -266,6 +267,17 @@ library Config { return vm.envOr("FOUNDRY_PROFILE", string("default")); } + /// @notice Returns true when the compiler output is not production-like. This includes + /// coverage mode (which adds instrumentation) and unoptimized Foundry profiles + /// (which produce different bytecode, CREATE2 addresses, and gas costs). + function isUnoptimized() internal view returns (bool) { + if (vm.isContext(VmSafe.ForgeContext.Coverage)) { + return true; + } + string memory profile = foundryProfile(); + return !LibString.eq(profile, "default") && !LibString.eq(profile, "ci"); + } + /// @notice Returns the path to the superchain ops allocs. function superchainOpsAllocsPath() internal view returns (string memory) { return vm.envOr("SUPERCHAIN_OPS_ALLOCS_PATH", string("")); diff --git a/packages/contracts-bedrock/scripts/ops/pull-artifacts.sh b/packages/contracts-bedrock/scripts/ops/pull-artifacts.sh deleted file mode 100755 index 05f1d8a3f1a28..0000000000000 --- a/packages/contracts-bedrock/scripts/ops/pull-artifacts.sh +++ /dev/null @@ -1,118 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -usage() { - echo "Usage: $0" - echo "" - echo "Download contract artifacts from GCS, preferring zstd if available." - echo "" - echo "If zstd is available, downloads .tar.zst files when present." - echo "Otherwise, falls back to .tar.gz files." - exit 0 -} - -echoerr() { - echo "$@" 1>&2 -} - -download_and_extract() { - local archive_name=$1 - - echoerr "> Downloading..." - curl --fail --location --connect-timeout 30 --max-time 300 --tlsv1.2 -o "$archive_name" "https://storage.googleapis.com/oplabs-contract-artifacts/$archive_name" - echoerr "> Done." - - echoerr "> Cleaning up existing artifacts..." - rm -rf artifacts - rm -rf forge-artifacts - rm -rf cache - echoerr "> Done." - - echoerr "> Extracting artifacts..." - # Only extract artifacts, forge-artifacts, and cache folders (nothing else) - if [[ "$archive_name" == *.tar.zst ]]; then - zstd -dc "$archive_name" | tar -xf - --exclude='*..*' artifacts forge-artifacts cache - else - tar -xzvf "$archive_name" --exclude='*..*' artifacts forge-artifacts cache - fi - echoerr "> Done." - - echoerr "> Cleaning up." - rm "$archive_name" - echoerr "> Done." - exit 0 -} - -# Check for help flag -if [ "${1:-}" = "--help" ] || [ "${1:-}" = "-h" ]; then - usage -fi - -# Check for fallback-to-latest flag -USE_LATEST_FALLBACK=false -if [ "${1:-}" = "--fallback-to-latest" ]; then - USE_LATEST_FALLBACK=true - echoerr "> Fallback to latest enabled" -fi - -SCRIPT_DIR=$(cd -- "$(dirname -- "${BASH_SOURCE[0]}")" &> /dev/null && pwd) -CONTRACTS_DIR="$SCRIPT_DIR/../.." - -cd "$CONTRACTS_DIR" - -if command -v zstd > /dev/null 2>&1; then - HAS_ZSTD=true - echoerr "> zstd found, will prefer .tar.zst files" -else - HAS_ZSTD=false - echoerr "> zstd not found, will prefer .tar.gz files" -fi - -checksum=$(bash scripts/ops/calculate-checksum.sh) - -echoerr "> Checking for existing artifacts..." - -if [ "$HAS_ZSTD" = true ]; then - archive_name_zst="artifacts-v1-$checksum.tar.zst" - exists_zst=$(curl -s -o /dev/null --fail -LI "https://storage.googleapis.com/oplabs-contract-artifacts/$archive_name_zst" || echo "fail") - - if [ "$exists_zst" != "fail" ]; then - download_and_extract "$archive_name_zst" - fi - - # Try latest fallback if enabled - if [ "$USE_LATEST_FALLBACK" = true ]; then - echoerr "> Exact checksum not found, trying latest artifacts..." - archive_name_zst="artifacts-v1-latest.tar.zst" - exists_latest_zst=$(curl -s -o /dev/null --fail -LI "https://storage.googleapis.com/oplabs-contract-artifacts/$archive_name_zst" || echo "fail") - - if [ "$exists_latest_zst" != "fail" ]; then - download_and_extract "$archive_name_zst" - fi - fi -fi - -archive_name_gz="artifacts-v1-$checksum.tar.gz" -exists_gz=$(curl -s -o /dev/null --fail -LI "https://storage.googleapis.com/oplabs-contract-artifacts/$archive_name_gz" || echo "fail") - -if [ "$exists_gz" == "fail" ]; then - # Try latest fallback if enabled - if [ "$USE_LATEST_FALLBACK" = true ]; then - echoerr "> Exact checksum not found, trying latest artifacts..." - archive_name_gz="artifacts-v1-latest.tar.gz" - exists_latest_gz=$(curl -s -o /dev/null --fail -LI "https://storage.googleapis.com/oplabs-contract-artifacts/$archive_name_gz" || echo "fail") - - if [ "$exists_latest_gz" == "fail" ]; then - echoerr "> No existing artifacts found (including latest), exiting." - exit 0 - fi - - echoerr "> Found latest .tar.gz artifacts." - else - echoerr "> No existing artifacts found, exiting." - exit 0 - fi -fi - -download_and_extract "$archive_name_gz" diff --git a/packages/contracts-bedrock/scripts/ops/use-latest-fallback.sh b/packages/contracts-bedrock/scripts/ops/use-latest-fallback.sh deleted file mode 100755 index 5c11846e55158..0000000000000 --- a/packages/contracts-bedrock/scripts/ops/use-latest-fallback.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -# Pulls artifacts with conditional fallback based on branch and PR labels -# - PR branches: Use fallback by default (faster builds) -# - develop branch: Always build fresh (accuracy) -# - force-use-fresh-artifacts label: Override fallback (emergency escape hatch) - -# Determine the target branch for this PR -SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" -# shellcheck source=/dev/null -source "$SCRIPT_DIR/get-target-branch.sh" - -USE_FALLBACK=false - -# Check if we're on a PR (not develop branch) -if [ "${CIRCLE_BRANCH:-}" != "develop" ]; then - USE_FALLBACK=true - - # Check if PR has force-use-fresh-artifacts label (override fallback) - # Get PR number from available sources - PR_NUMBER="" - - # Try extracting from CIRCLE_PULL_REQUEST URL (internal PRs) - if [ -n "${CIRCLE_PULL_REQUEST:-}" ]; then - PR_NUMBER=$(echo "${CIRCLE_PULL_REQUEST}" | grep -o '[0-9]*$') - # For external PRs, find PR via commit SHA - elif [ -n "${CIRCLE_SHA1:-}" ]; then - if PR_SEARCH=$(curl -sS --fail --connect-timeout 10 --max-time 30 -H "Authorization: token ${MISE_GITHUB_TOKEN}" \ - "https://api.github.com/repos/ethereum-optimism/optimism/commits/${CIRCLE_SHA1}/pulls" 2>/dev/null); then - # Get the first PR number from the response - PR_NUMBER=$(echo "$PR_SEARCH" | jq -r '.[0].number // empty' 2>/dev/null) - fi - fi - - if [ -n "$PR_NUMBER" ] && [ "$PR_NUMBER" != "null" ]; then - # Query GitHub API for PR details (fail safe: proceed with fallback on error) - if PR_DATA=$(curl -sS --fail --connect-timeout 10 --max-time 30 -H "Authorization: token ${MISE_GITHUB_TOKEN}" \ - "https://api.github.com/repos/ethereum-optimism/optimism/pulls/${PR_NUMBER}" 2>/dev/null); then - - if echo "$PR_DATA" | jq -e 'any(.labels[]; .name == "force-use-fresh-artifacts")' >/dev/null 2>&1; then - echo "Force use fresh artifacts label detected, skipping fallback" - USE_FALLBACK=false - fi - else - echo "Warning: Failed to fetch PR labels from GitHub API, proceeding with fallback" - fi - fi -fi - -echo "TARGET_BRANCH=$TARGET_BRANCH" -# Ensure that PRs targetting anything other than develop do not use the fallback -if [ "$TARGET_BRANCH" != "develop" ]; then - USE_FALLBACK=false -fi - -# Pull artifacts with or without fallback -if [ "$USE_FALLBACK" = "true" ]; then - bash scripts/ops/pull-artifacts.sh --fallback-to-latest -else - bash scripts/ops/pull-artifacts.sh -fi diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index e175104ef75b5..5cac9be3b7aa7 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -28,6 +28,7 @@ import { DevFeatures } from "src/libraries/DevFeatures.sol"; import { Types as LibTypes } from "src/libraries/Types.sol"; import { Encoding } from "src/libraries/Encoding.sol"; import { Hashing } from "src/libraries/Hashing.sol"; +import { LibString } from "@solady/utils/LibString.sol"; // Interfaces import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; @@ -269,6 +270,56 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { // try to apply to this function call instead. IOPContractsManagerStandardValidator validator = _opcm.opcmStandardValidator(); + // When running fork tests with an unoptimized Foundry profile (e.g., liteci), + // implementation contracts deployed via CREATE2 get different addresses because + // unoptimized bytecode differs from production builds. Most proxies are re-pointed + // to new implementations during the OPCM upgrade, so their getProxyImplementation + // checks pass regardless of optimizer settings. However, DelayedWETH and ETHLockbox + // proxies are NOT re-pointed during the upgrade — they retain the mainnet + // implementations. With optimized builds the CREATE2 addresses match mainnet, but + // with unoptimized builds they diverge. Mock getProxyImplementation for these + // proxies so the validator sees the expected implementation addresses. + { + string memory _profile = Config.foundryProfile(); + bool _isOptimizedProfile = LibString.eq(_profile, "default") || LibString.eq(_profile, "ci"); + if (!_isOptimizedProfile) { + IDelayedWETH _cannonWeth = DisputeGames.getGameImplDelayedWeth(disputeGameFactory, GameTypes.CANNON); + if (address(_cannonWeth) != address(0)) { + vm.mockCall( + address(proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_cannonWeth))), + abi.encode(validator.delayedWETHImpl()) + ); + } + IDelayedWETH _permissionedWeth = + DisputeGames.getGameImplDelayedWeth(disputeGameFactory, GameTypes.PERMISSIONED_CANNON); + if (address(_permissionedWeth) != address(0)) { + vm.mockCall( + address(proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_permissionedWeth))), + abi.encode(validator.delayedWETHImpl()) + ); + } + IDelayedWETH _cannonKonaWeth = + DisputeGames.getGameImplDelayedWeth(disputeGameFactory, GameTypes.CANNON_KONA); + if (address(_cannonKonaWeth) != address(0)) { + vm.mockCall( + address(proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_cannonKonaWeth))), + abi.encode(validator.delayedWETHImpl()) + ); + } + IETHLockbox _lockbox = optimismPortal2.ethLockbox(); + if (address(_lockbox) != address(0)) { + vm.mockCall( + address(proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_lockbox))), + abi.encode(validator.ethLockboxImpl()) + ); + } + } + } + // If the absolute prestate is zero, we will always get a PDDG-40,PLDG-40 error here in the // standard validator. This happens because an absolute prestate of zero means that the // user is requesting to use the existing prestate. We could avoid the error by grabbing @@ -1448,7 +1499,7 @@ contract OPContractsManager_Upgrade_Test is OPContractsManager_Upgrade_Harness { } function test_verifyOpcmCorrectness_succeeds() public { - skipIfCoverage(); // Coverage changes bytecode and breaks the verification script. + skipIfUnoptimized(); // Set up environment variables with the actual OPCM addresses for tests that need them. // These values come from the StandardValidator that was deployed with the OPCM. diff --git a/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol index f68b153d67375..92a7c624121d7 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol @@ -5,8 +5,10 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; import { DisputeGames } from "../setup/DisputeGames.sol"; +import { Config } from "scripts/libraries/Config.sol"; // Libraries +import { LibString } from "@solady/utils/LibString.sol"; import { GameType, Hash } from "src/dispute/lib/LibUDT.sol"; import { GameTypes, Duration, Claim } from "src/dispute/lib/Types.sol"; import { ForgeArtifacts } from "scripts/libraries/ForgeArtifacts.sol"; @@ -188,6 +190,55 @@ abstract contract OPContractsManagerStandardValidator_TestInit is CommonTest { abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(l1OptimismMintableERC20Factory))), abi.encode(standardValidator.optimismMintableERC20FactoryImpl()) ); + + // When running fork tests with an unoptimized Foundry profile (e.g., liteci), + // implementation contracts deployed via CREATE2 get different addresses because + // unoptimized bytecode differs from production builds. Most proxies are re-pointed + // to new implementations during the OPCM upgrade, so their getProxyImplementation + // checks pass regardless of optimizer settings. However, DelayedWETH and ETHLockbox + // proxies are NOT re-pointed during the upgrade — they retain the mainnet + // implementations. With optimized builds the CREATE2 addresses match mainnet, but + // with unoptimized builds they diverge. Mock getProxyImplementation for these + // proxies so the validator sees the expected implementation addresses. + { + string memory _profile = Config.foundryProfile(); + bool _isOptimizedProfile = LibString.eq(_profile, "default") || LibString.eq(_profile, "ci"); + if (!_isOptimizedProfile) { + IDelayedWETH _cannonWeth = DisputeGames.getGameImplDelayedWeth(dgf, GameTypes.CANNON); + if (address(_cannonWeth) != address(0)) { + vm.mockCall( + address(proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_cannonWeth))), + abi.encode(standardValidator.delayedWETHImpl()) + ); + } + IDelayedWETH _permissionedWeth = + DisputeGames.getGameImplDelayedWeth(dgf, GameTypes.PERMISSIONED_CANNON); + if (address(_permissionedWeth) != address(0)) { + vm.mockCall( + address(proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_permissionedWeth))), + abi.encode(standardValidator.delayedWETHImpl()) + ); + } + IDelayedWETH _cannonKonaWeth = DisputeGames.getGameImplDelayedWeth(dgf, GameTypes.CANNON_KONA); + if (address(_cannonKonaWeth) != address(0)) { + vm.mockCall( + address(proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_cannonKonaWeth))), + abi.encode(standardValidator.delayedWETHImpl()) + ); + } + if (address(ethLockbox) != address(0)) { + vm.mockCall( + address(proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(ethLockbox))), + abi.encode(standardValidator.ethLockboxImpl()) + ); + } + } + } + DisputeGames.mockGameImplChallenger( disputeGameFactory, GameTypes.PERMISSIONED_CANNON, standardValidator.challenger() ); diff --git a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol index e8c184634df70..ce2ab852cdd25 100644 --- a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol +++ b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol @@ -1531,7 +1531,7 @@ contract OPContractsManagerV2_FeatBatchUpgrade_Test is OPContractsManagerV2_Test /// This enforces the OPCMV2 invariant that approximately 15 upgrade operations should be /// executable in one transaction. function test_batchUpgrade_multipleChains_succeeds() public { - skipIfCoverage(); + skipIfUnoptimized(); uint256 numberOfChains = 15; diff --git a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol index 8e1147250ffdb..bcf9ba7e32960 100644 --- a/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol +++ b/packages/contracts-bedrock/test/legacy/L1ChugSplashProxy.t.sol @@ -3,15 +3,11 @@ pragma solidity 0.8.15; // Testing import { Test } from "test/setup/Test.sol"; -import { VmSafe } from "forge-std/Vm.sol"; // Scripts import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Config } from "scripts/libraries/Config.sol"; -// Libraries -import { LibString } from "@solady/utils/LibString.sol"; - // Interfaces import { IL1ChugSplashProxy } from "interfaces/legacy/IL1ChugSplashProxy.sol"; @@ -117,15 +113,11 @@ contract L1ChugSplashProxy_SetCode_Test is L1ChugSplashProxy_TestInit { // if forge coverage is run before testing this with forge test or forge snapshot, forge // clean should be run first so that it recompiles the contracts using the foundry.toml // optimizer settings. - bool isUnoptimized = vm.isContext(VmSafe.ForgeContext.Coverage) || LibString.eq(Config.foundryProfile(), "lite") - || LibString.eq(Config.foundryProfile(), "cicoverage"); - + bool isUnoptimized = Config.isUnoptimized(); if (isUnoptimized) { gasLimit = 95_000; - } else if (vm.isContext(VmSafe.ForgeContext.Test) || vm.isContext(VmSafe.ForgeContext.Snapshot)) { - gasLimit = 65_000; } else { - revert("SafeCall_Test: unknown context"); + gasLimit = 65_000; } vm.prank(owner); diff --git a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol index 3f6a3bdf25571..ba2cdefdc3ce9 100644 --- a/packages/contracts-bedrock/test/libraries/SafeCall.t.sol +++ b/packages/contracts-bedrock/test/libraries/SafeCall.t.sol @@ -3,14 +3,12 @@ pragma solidity 0.8.15; // Testing import { Test } from "test/setup/Test.sol"; -import { VmSafe } from "forge-std/Vm.sol"; import { StdCheatsSafe } from "forge-std/StdCheats.sol"; // Scripts import { Config } from "scripts/libraries/Config.sol"; // Libraries -import { LibString } from "@solady/utils/LibString.sol"; import { SafeCall } from "src/libraries/SafeCall.sol"; contract SimpleSafeCaller { @@ -169,16 +167,12 @@ contract SafeCall_CallWithMinGas_Test is SafeCall_TestInit { // Because forge coverage always runs with the optimizer disabled, if forge coverage is // run before testing this with forge test or forge snapshot, forge clean should be run // first so that it recompiles the contracts using the foundry.toml optimizer settings. - if (vm.isContext(VmSafe.ForgeContext.Coverage) || LibString.eq(Config.foundryProfile(), "lite")) { - // 66_290 is the exact amount of gas required to make the safe call - // successfully with the optimizer disabled (ran via forge coverage) + if (Config.isUnoptimized()) { + // 66_290 is the exact amount of gas required with the optimizer disabled. expected = 66_290; - } else if (vm.isContext(VmSafe.ForgeContext.Test) || vm.isContext(VmSafe.ForgeContext.Snapshot)) { - // 65_922 is the exact amount of gas required to make the safe call - // successfully with the foundry.toml optimizer settings. - expected = 65_922; } else { - revert("SafeCall_Test: unknown context"); + // 65_922 is the exact amount of gas required with optimizer enabled. + expected = 65_922; } if (i < expected) { @@ -210,16 +204,12 @@ contract SafeCall_CallWithMinGas_Test is SafeCall_TestInit { // Because forge coverage always runs with the optimizer disabled, if forge coverage is // run before testing this with forge test or forge snapshot, forge clean should be run // first so that it recompiles the contracts using the foundry.toml optimizer settings. - if (vm.isContext(VmSafe.ForgeContext.Coverage) || LibString.eq(Config.foundryProfile(), "lite")) { - // 15_278_989 is the exact amount of gas required to make the safe call - // successfully with the optimizer disabled (ran via forge coverage) + if (Config.isUnoptimized()) { + // 15_278_989 is the exact amount of gas required with the optimizer disabled. expected = 15_278_989; - } else if (vm.isContext(VmSafe.ForgeContext.Test) || vm.isContext(VmSafe.ForgeContext.Snapshot)) { - // 15_278_621 is the exact amount of gas required to make the safe call - // successfully with the foundry.toml optimizer settings. - expected = 15_278_621; } else { - revert("SafeCall_Test: unknown context"); + // 15_278_621 is the exact amount of gas required with optimizer enabled. + expected = 15_278_621; } if (i < expected) { diff --git a/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol b/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol index 5f8fb5dbb8650..0af2b8e80db5f 100644 --- a/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol +++ b/packages/contracts-bedrock/test/scripts/VerifyOPCM.t.sol @@ -157,8 +157,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// @notice Tests that the script succeeds when no changes are introduced. function test_run_succeeds() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Run the script. harness.run(address(opcm), true); @@ -185,8 +184,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { } function test_run_bitmapNotEmptyOnMainnet_reverts(bytes32 _devFeatureBitmap) public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Anything but zero! _devFeatureBitmap = bytes32(bound(uint256(_devFeatureBitmap), 1, type(uint256).max)); @@ -211,8 +209,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// variables of implementation contracts. Fuzzing is too slow here, randomness is good /// enough. function test_run_implementationDifferentInsideImmutable_succeeds() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Skip security value checks since this test deliberately corrupts immutable values. harness.setSkipSecurityValueChecks(true); @@ -283,8 +280,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// implementation contracts that are not inside immutable references. Fuzzing is too /// slow here, randomness is good enough. function test_run_implementationDifferentOutsideImmutable_reverts() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Skip security value checks since corrupted bytecode may break contract queries. harness.setSkipSecurityValueChecks(true); @@ -349,8 +345,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// blueprints. Unlike immutables, any difference anywhere in the blueprint should /// cause the script to revert. Fuzzing is too slow here, randomness is good enough. function test_run_blueprintAnyDifference_reverts() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Grab the list of blueprints. VerifyOPCM.OpcmContractRef[] memory refs = harness.getOpcmContractRefs(opcm, "blueprints", true); @@ -392,8 +387,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// @notice Tests that the script verifies all component contracts have the same contractsContainer address. function test_verifyContractsContainerConsistency_succeeds() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Get the property references (which include the component addresses) VerifyOPCM.OpcmContractRef[] memory propRefs = harness.getOpcmPropertyRefs(opcm); @@ -404,8 +398,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// @notice Tests that the script reverts when contracts have different contractsContainer addresses. function test_verifyContractsContainerConsistency_mismatch_reverts() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Get the property references (which include the component addresses) VerifyOPCM.OpcmContractRef[] memory propRefs = harness.getOpcmPropertyRefs(opcm); @@ -423,8 +416,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// @notice Tests that each OPCM component can be individually tested for container mismatch. function test_verifyContractsContainerConsistency_eachComponent_reverts() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Get the property references (which include the component addresses) VerifyOPCM.OpcmContractRef[] memory propRefs = harness.getOpcmPropertyRefs(opcm); @@ -462,8 +454,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// @notice Tests that the script verifies all component contracts with opcmUtils() have the same address. function test_verifyOpcmUtilsConsistency_succeeds() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Only run for OPCM V2 skipIfDevFeatureDisabled(DevFeatures.OPCM_V2); @@ -477,8 +468,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// @notice Tests that the script reverts when contracts have different opcmUtils addresses. function test_verifyOpcmUtilsConsistency_mismatch_reverts() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Only run for OPCM V2 skipIfDevFeatureDisabled(DevFeatures.OPCM_V2); @@ -499,8 +489,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// @notice Tests that each OPCM component with opcmUtils() can be individually tested for mismatch. function test_verifyOpcmUtilsConsistency_eachComponent_reverts() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Only run for OPCM V2 skipIfDevFeatureDisabled(DevFeatures.OPCM_V2); @@ -618,8 +607,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// @notice Tests that immutable variables are correctly verified in the OPCM contract. function test_verifyOpcmImmutableVariables_succeeds() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // Test that the immutable variables are correctly verified. // Environment variables are set in setUp() to match the actual OPCM addresses. @@ -641,8 +629,7 @@ contract VerifyOPCM_Run_Test is VerifyOPCM_TestInit { /// @notice Tests that the script fails when OPCM immutable variables are invalid. /// We test this by setting expected addresses and mocking OPCM methods to return different addresses. function test_verifyOpcmImmutableVariables_mismatch_fails() public { - // Coverage changes bytecode and causes failures, skip. - skipIfCoverage(); + skipIfUnoptimized(); // If OPCM V2 is enabled because we do not use environment variables for OPCM V2. skipIfDevFeatureEnabled(DevFeatures.OPCM_V2); @@ -735,7 +722,7 @@ contract VerifyOPCM_verifyAnchorStateRegistryDelays_Test is VerifyOPCM_TestInit contract VerifyOPCM_verifyPreimageOracle_Test is VerifyOPCM_TestInit { /// @notice Tests that PreimageOracle verification succeeds when bytecode matches. function test_verifyPreimageOracle_matchingBytecode_succeeds() public { - skipIfCoverage(); + skipIfUnoptimized(); IMIPS64 mipsImpl = IMIPS64(opcm.implementations().mipsImpl); bool result = harness.verifyPreimageOracle(mipsImpl); assertTrue(result, "PreimageOracle verification should succeed"); @@ -743,7 +730,7 @@ contract VerifyOPCM_verifyPreimageOracle_Test is VerifyOPCM_TestInit { /// @notice Tests that PreimageOracle verification fails when bytecode doesn't match. function test_verifyPreimageOracle_corruptedBytecode_fails() public { - skipIfCoverage(); + skipIfUnoptimized(); IMIPS64 mipsImpl = IMIPS64(opcm.implementations().mipsImpl); address oracleAddr = address(mipsImpl.oracle()); diff --git a/packages/contracts-bedrock/test/setup/Setup.sol b/packages/contracts-bedrock/test/setup/Setup.sol index 8e58d04c092bc..7d9f5b51118f1 100644 --- a/packages/contracts-bedrock/test/setup/Setup.sol +++ b/packages/contracts-bedrock/test/setup/Setup.sol @@ -3,7 +3,7 @@ pragma solidity 0.8.15; // Testing import { console2 as console } from "forge-std/console2.sol"; -import { Vm, VmSafe } from "forge-std/Vm.sol"; +import { Vm } from "forge-std/Vm.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; import { FeatureFlags } from "test/setup/FeatureFlags.sol"; @@ -217,9 +217,12 @@ abstract contract Setup is FeatureFlags { console.log("Setup: L2 setup done!"); } - /// @dev Skips tests when running in coverage mode. - function skipIfCoverage() public { - if (vm.isContext(VmSafe.ForgeContext.Coverage)) { + /// @dev Skips tests that require production-like bytecode. This includes coverage mode + /// (which adds instrumentation) and unoptimized Foundry profiles (which produce + /// different CREATE2 addresses and gas costs). Use for gas measurement tests, + /// bytecode verification tests, and any test sensitive to compiler output. + function skipIfUnoptimized() public { + if (Config.isUnoptimized()) { vm.skip(true); } } diff --git a/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol index d78aeb37ae505..90494dec3b4d7 100644 --- a/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol +++ b/packages/contracts-bedrock/test/universal/BenchmarkTest.t.sol @@ -57,8 +57,7 @@ contract GasBenchMark_L1Block is CommonTest { contract GasBenchMark_L1Block_SetValuesEcotone is GasBenchMark_L1Block { function test_setL1BlockValuesEcotone_benchmark() external { - // Skip if the test is running in coverage. - skipIfCoverage(); + skipIfUnoptimized(); // Test SafeCall.call({ _target: address(l1Block), _calldata: setValuesCalldata }); @@ -70,8 +69,7 @@ contract GasBenchMark_L1Block_SetValuesEcotone is GasBenchMark_L1Block { contract GasBenchMark_L1Block_SetValuesEcotone_Warm is GasBenchMark_L1Block { function test_setL1BlockValuesEcotone_benchmark() external { - // Skip if the test is running in coverage. - skipIfCoverage(); + skipIfUnoptimized(); // Setup // Trigger so storage is warm. From e7e0118f4a517362265d32da93f342f6ec3544ab Mon Sep 17 00:00:00 2001 From: Karl Floersch Date: Mon, 2 Mar 2026 15:52:24 -0500 Subject: [PATCH 035/201] op-interop-filter: fix cross-validation perf, public admin endpoint, reorg detection (#19304) Use TargetBlockNumber for timestamp-to-block conversion in GetExecMsgsAtTimestamp instead of scanning all blocks linearly. The old approach made cross-validation impossibly slow. Expose admin_getFailsafeEnabled on the public supervisor port so health checks can query failsafe status without JWT. Fix reorg detection to trigger when head < nextBlock (was head < nextBlock-1, missing reorgs at the tip). Co-authored-by: Claude Opus 4.6 --- op-interop-filter/filter/frontend.go | 9 ++++ op-interop-filter/filter/jwt_auth_test.go | 43 +++++++++++++++++ .../filter/logsdb_chain_ingester.go | 47 ++++++++++--------- op-interop-filter/filter/service.go | 6 +++ 4 files changed, 83 insertions(+), 22 deletions(-) diff --git a/op-interop-filter/filter/frontend.go b/op-interop-filter/filter/frontend.go index fc4d9a8a9fbc3..f3b1f8cdb1812 100644 --- a/op-interop-filter/filter/frontend.go +++ b/op-interop-filter/filter/frontend.go @@ -30,6 +30,15 @@ func (f *QueryFrontend) CheckAccessList(ctx context.Context, inboxEntries []comm return nil } +// PublicAdminFrontend exposes read-only admin methods on the public port. +type PublicAdminFrontend struct { + backend *Backend +} + +func (p *PublicAdminFrontend) GetFailsafeEnabled(ctx context.Context) (bool, error) { + return p.backend.FailsafeEnabled(), nil +} + // AdminFrontend handles admin RPC methods type AdminFrontend struct { backend *Backend diff --git a/op-interop-filter/filter/jwt_auth_test.go b/op-interop-filter/filter/jwt_auth_test.go index f90fc51d93e97..c044c9a651253 100644 --- a/op-interop-filter/filter/jwt_auth_test.go +++ b/op-interop-filter/filter/jwt_auth_test.go @@ -122,6 +122,49 @@ func TestDedicatedAdminRPCServer(t *testing.T) { }) } +func TestPublicAdminGetFailsafe(t *testing.T) { + logger := testlog.Logger(t, log.LevelInfo) + + filterServer := oprpc.NewServer( + "127.0.0.1", + 0, + "test", + oprpc.WithLogger(logger), + ) + filterServer.AddAPI(rpc.API{ + Namespace: "supervisor", + Service: new(testSupervisorAPI), + }) + filterServer.AddAPI(rpc.API{ + Namespace: "admin", + Service: new(testAdminAPI), + }) + + require.NoError(t, filterServer.Start()) + t.Cleanup(func() { + _ = filterServer.Stop() + }) + + endpoint := "http://" + filterServer.Endpoint() + filterClient, err := rpc.Dial(endpoint) + require.NoError(t, err) + t.Cleanup(filterClient.Close) + + t.Run("admin_getFailsafeEnabled works on public port without JWT", func(t *testing.T) { + var res bool + err := filterClient.Call(&res, "admin_getFailsafeEnabled") + require.NoError(t, err) + require.Equal(t, false, res) + }) + + t.Run("supervisor API still works alongside public admin", func(t *testing.T) { + var res string + err := filterClient.Call(&res, "supervisor_ping") + require.NoError(t, err) + require.Equal(t, "pong", res) + }) +} + func TestFilterAPIWithoutAdminServer(t *testing.T) { logger := testlog.Logger(t, log.LevelInfo) diff --git a/op-interop-filter/filter/logsdb_chain_ingester.go b/op-interop-filter/filter/logsdb_chain_ingester.go index 7db2d9a00a931..ee0f12391808b 100644 --- a/op-interop-filter/filter/logsdb_chain_ingester.go +++ b/op-interop-filter/filter/logsdb_chain_ingester.go @@ -278,33 +278,36 @@ func (c *LogsDBChainIngester) GetExecMsgsAtTimestamp(timestamp uint64) ([]Includ return nil, types.ErrUninitialized } + blockNum, err := c.rollupCfg.TargetBlockNumber(timestamp) + if err != nil { + return nil, nil + } + latestBlock, ok := c.logsDB.LatestSealedBlock() - if !c.earliestIngestedBlockSet.Load() || !ok { + if !ok || blockNum > latestBlock.Number { return nil, nil } - earliest := c.earliestIngestedBlock.Load() - var results []IncludedMessage - for blockNum := earliest; blockNum <= latestBlock.Number; blockNum++ { - ref, _, execMsgs, err := c.logsDB.OpenBlock(blockNum) - if err != nil { - return nil, fmt.Errorf("failed to open block %d: %w", blockNum, err) - } + if !c.earliestIngestedBlockSet.Load() || blockNum < c.earliestIngestedBlock.Load() { + return nil, nil + } - if ref.Time == timestamp { - for _, msg := range execMsgs { - results = append(results, IncludedMessage{ - ExecutingMessage: msg, - InclusionBlockNum: blockNum, - InclusionTimestamp: ref.Time, - }) - } - } + ref, _, execMsgs, err := c.logsDB.OpenBlock(blockNum) + if err != nil { + return nil, fmt.Errorf("failed to open block %d: %w", blockNum, err) + } - // Timestamps increase, so we can stop early - if ref.Time > timestamp { - break - } + if ref.Time != timestamp { + return nil, nil + } + + var results []IncludedMessage + for _, msg := range execMsgs { + results = append(results, IncludedMessage{ + ExecutingMessage: msg, + InclusionBlockNum: blockNum, + InclusionTimestamp: ref.Time, + }) } return results, nil @@ -423,7 +426,7 @@ func (c *LogsDBChainIngester) runIngestion() { } // Reorg detection: if head moved behind our progress, check hash - if head.NumberU64() < nextBlock-1 { + if head.NumberU64() < nextBlock { if err := c.checkReorg(head); err != nil { continue } diff --git a/op-interop-filter/filter/service.go b/op-interop-filter/filter/service.go index d4070a8f93182..5fba7f0861395 100644 --- a/op-interop-filter/filter/service.go +++ b/op-interop-filter/filter/service.go @@ -241,6 +241,12 @@ func (s *Service) initRPCServer(cfg *Config) error { Authenticated: false, }) + server.AddAPI(rpc.API{ + Namespace: "admin", + Service: &PublicAdminFrontend{backend: s.backend}, + Authenticated: false, + }) + s.rpcServer = server return nil } From 42f6e3bb090421183b53669b1419ec8cd6cc0915 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Mon, 2 Mar 2026 16:00:51 -0500 Subject: [PATCH 036/201] ci: add cannon-builder image to Docker CI builds (#19295) Add the kona cannon-builder (Rust MIPS64r1 toolchain) image to the branch and tag Docker build workflows. This publishes the image to the shared artifact registry so it can be consumed by prestate builds. Co-authored-by: Claude Opus 4.6 --- .github/workflows/branches.yaml | 3 +++ docker-bake.hcl | 7 +++++++ 2 files changed, 10 insertions(+) diff --git a/.github/workflows/branches.yaml b/.github/workflows/branches.yaml index 75a3e0b095d5f..b4c2d4f8524e0 100644 --- a/.github/workflows/branches.yaml +++ b/.github/workflows/branches.yaml @@ -69,6 +69,7 @@ jobs: - kona-client - kona-host - op-reth + - cannon-builder uses: ethereum-optimism/factory/.github/workflows/docker.yaml@f8f3cb4800e538003134fb5f50cc734c2c98d762 with: mode: bake @@ -116,6 +117,7 @@ jobs: - kona-client - kona-host - op-reth + - cannon-builder uses: ethereum-optimism/factory/.github/workflows/docker.yaml@f8f3cb4800e538003134fb5f50cc734c2c98d762 with: mode: bake @@ -168,6 +170,7 @@ jobs: - image_name: kona-node - image_name: kona-host - image_name: op-reth + - image_name: cannon-builder runs-on: ${{ matrix.runner }} env: IMAGE: ${{ needs.build-fork.result == 'success' && format('ttl.sh/{0}/{1}:24h', github.sha, matrix.image_name) || format('us-docker.pkg.dev/oplabs-tools-artifacts/images/{0}:{1}', matrix.image_name, github.sha) }} diff --git a/docker-bake.hcl b/docker-bake.hcl index a1165148c2333..17e084d17079d 100644 --- a/docker-bake.hcl +++ b/docker-bake.hcl @@ -389,3 +389,10 @@ target "op-reth" { platforms = split(",", PLATFORMS) tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/op-reth:${tag}"] } + +target "cannon-builder" { + dockerfile = "cannon.dockerfile" + context = "rust/kona/docker/cannon" + platforms = split(",", PLATFORMS) + tags = [for tag in split(",", IMAGE_TAGS) : "${REGISTRY}/${REPOSITORY}/cannon-builder:${tag}"] +} From f51d8aad1c93b445162d48735772900c36e4d9e9 Mon Sep 17 00:00:00 2001 From: Inphi Date: Mon, 2 Mar 2026 17:42:47 -0500 Subject: [PATCH 037/201] proofs: Add single-chain interop and preinterop fault proof smoke tests (#19299) * proofs: Add single-chain interop and preinterop fault proof smoke tests Introduce smoke tests that sanity-check super fault proofs when the dependency set contains only one L2 chain. This covers both preinterop (super roots without interop activation) and interop (interop at genesis) scenarios using op-supernode. Co-Authored-By: Claude Opus 4.6 * proofs: Pass SupernodeOption to activate interop on supernode WithInteropAtGenesis (deployer option) alone doesn't activate interop on the supernode. Pass WithSupernodeInteropAtGenesis as a SupernodeOption to WithSharedSupernodeCLs so the supernode actually enables interop validation. Also fix goimports formatting. Co-Authored-By: Claude Opus 4.6 * test cleanup batcher --------- Co-authored-by: Claude Opus 4.6 --- .../interop/proofs-singlechain/init_test.go | 17 ++ .../interop_fault_proofs_test.go | 15 ++ .../preinterop-singlechain/init_test.go | 17 ++ .../interop_fault_proofs_test.go | 15 ++ .../tests/superfaultproofs/singlechain.go | 159 ++++++++++++++++++ op-devstack/presets/interop.go | 17 ++ op-devstack/sysgo/system.go | 81 +++++++++ 7 files changed, 321 insertions(+) create mode 100644 op-acceptance-tests/tests/interop/proofs-singlechain/init_test.go create mode 100644 op-acceptance-tests/tests/interop/proofs-singlechain/interop_fault_proofs_test.go create mode 100644 op-acceptance-tests/tests/isthmus/preinterop-singlechain/init_test.go create mode 100644 op-acceptance-tests/tests/isthmus/preinterop-singlechain/interop_fault_proofs_test.go create mode 100644 op-acceptance-tests/tests/superfaultproofs/singlechain.go diff --git a/op-acceptance-tests/tests/interop/proofs-singlechain/init_test.go b/op-acceptance-tests/tests/interop/proofs-singlechain/init_test.go new file mode 100644 index 0000000000000..a3563ca58af3e --- /dev/null +++ b/op-acceptance-tests/tests/interop/proofs-singlechain/init_test.go @@ -0,0 +1,17 @@ +package proofs_singlechain + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, + presets.WithSingleChainSuperInteropSupernode(), + presets.WithL2NetworkCount(1), + stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), + ) +} diff --git a/op-acceptance-tests/tests/interop/proofs-singlechain/interop_fault_proofs_test.go b/op-acceptance-tests/tests/interop/proofs-singlechain/interop_fault_proofs_test.go new file mode 100644 index 0000000000000..0d8faa8db4e7b --- /dev/null +++ b/op-acceptance-tests/tests/interop/proofs-singlechain/interop_fault_proofs_test.go @@ -0,0 +1,15 @@ +package proofs_singlechain + +import ( + "testing" + + sfp "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/superfaultproofs" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestInteropSingleChainFaultProofs(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewSingleChainInterop(t) + sfp.RunSingleChainSuperFaultProofSmokeTest(t, sys) +} diff --git a/op-acceptance-tests/tests/isthmus/preinterop-singlechain/init_test.go b/op-acceptance-tests/tests/isthmus/preinterop-singlechain/init_test.go new file mode 100644 index 0000000000000..6cff962a06aca --- /dev/null +++ b/op-acceptance-tests/tests/isthmus/preinterop-singlechain/init_test.go @@ -0,0 +1,17 @@ +package preinterop_singlechain + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func TestMain(m *testing.M) { + presets.DoMain(m, + presets.WithSingleChainIsthmusSuperSupernode(), + presets.WithL2NetworkCount(1), + stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), + ) +} diff --git a/op-acceptance-tests/tests/isthmus/preinterop-singlechain/interop_fault_proofs_test.go b/op-acceptance-tests/tests/isthmus/preinterop-singlechain/interop_fault_proofs_test.go new file mode 100644 index 0000000000000..422bd109c68f1 --- /dev/null +++ b/op-acceptance-tests/tests/isthmus/preinterop-singlechain/interop_fault_proofs_test.go @@ -0,0 +1,15 @@ +package preinterop_singlechain + +import ( + "testing" + + sfp "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/superfaultproofs" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestPreinteropSingleChainFaultProofs(gt *testing.T) { + t := devtest.SerialT(gt) + sys := presets.NewSingleChainInterop(t) + sfp.RunSingleChainSuperFaultProofSmokeTest(t, sys) +} diff --git a/op-acceptance-tests/tests/superfaultproofs/singlechain.go b/op-acceptance-tests/tests/superfaultproofs/singlechain.go new file mode 100644 index 0000000000000..4750e87b54141 --- /dev/null +++ b/op-acceptance-tests/tests/superfaultproofs/singlechain.go @@ -0,0 +1,159 @@ +package superfaultproofs + +import ( + "github.com/ethereum-optimism/optimism/op-challenger/game/fault/trace/super" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/crypto" +) + +// singleChain bundles the DSL handles for the single L2 chain in a SingleChainInterop system. +func singleChainFrom(sys *presets.SingleChainInterop) *chain { + return &chain{ + ID: sys.L2ChainA.ChainID(), + Cfg: sys.L2ChainA.Escape().RollupConfig(), + Rollup: sys.L2CLA.Escape().RollupAPI(), + EL: sys.L2ELA, + CLNode: sys.L2CLA, + Batcher: sys.L2BatcherA, + } +} + +// RunSingleChainSuperFaultProofSmokeTest is a minimal smoke test for single-chain super fault proofs. +// It verifies that the super-root transition works correctly when the dependency set has only one chain. +// The test stops the batcher, waits for the safe head to stall, then resumes batching and verifies +// a basic set of valid/invalid transitions through both the FPP and challenger trace provider. +func RunSingleChainSuperFaultProofSmokeTest(t devtest.T, sys *presets.SingleChainInterop) { + t.Require().NotNil(sys.SuperRoots, "supernode is required for this test") + + c := singleChainFrom(sys) + chains := []*chain{c} + + // Stop batch submission so safe head stalls, then we have a known boundary. + c.Batcher.Stop() + t.Cleanup(c.Batcher.Start) + awaitSafeHeadsStalled(t, sys.L2CLA) + + endTimestamp := nextTimestampAfterSafeHeads(t, chains) + startTimestamp := endTimestamp - 1 + + // Ensure the chain has produced the target block as unsafe. + target, err := c.Cfg.TargetBlockNumber(endTimestamp) + t.Require().NoError(err) + c.EL.Reached(eth.Unsafe, target, 60) + + // L1 head where chain has no batch data at endTimestamp. + respBefore := awaitOptimisticPattern(t, sys.SuperRoots, endTimestamp, + nil, []eth.ChainID{c.ID}) + l1HeadBefore := respBefore.CurrentL1 + + // Resume batching so the chain's data at endTimestamp becomes available. + c.Batcher.Start() + sys.SuperRoots.AwaitValidatedTimestamp(endTimestamp) + l1HeadCurrent := latestRequiredL1(sys.SuperRoots.SuperRootAtTimestamp(endTimestamp)) + c.Batcher.Stop() + + // Build expected transition states for a single chain. + start := superRootAtTimestamp(t, chains, startTimestamp) + end := superRootAtTimestamp(t, chains, endTimestamp) + + optimistic := optimisticBlockAtTimestamp(t, c, endTimestamp) + + // With one chain: step 0 = chain's optimistic block, steps 1..consolidateStep-1 = padding, + // consolidateStep = consolidation to next super root. + step1 := marshalTransition(start, 1, optimistic) + padding := func(step uint64) []byte { + return marshalTransition(start, step, optimistic) + } + + tests := []*transitionTest{ + { + Name: "ClaimDirectToNextTimestamp", + AgreedClaim: start.Marshal(), + DisputedClaim: end.Marshal(), + DisputedTraceIndex: 0, + L1Head: l1HeadCurrent, + ClaimTimestamp: endTimestamp, + ExpectValid: false, + }, + { + Name: "ChainOptimisticBlock", + AgreedClaim: start.Marshal(), + DisputedClaim: step1, + DisputedTraceIndex: 0, + L1Head: l1HeadCurrent, + ClaimTimestamp: endTimestamp, + ExpectValid: true, + }, + { + Name: "ChainOptimisticBlock-InvalidNoChange", + AgreedClaim: start.Marshal(), + DisputedClaim: start.Marshal(), + DisputedTraceIndex: 0, + L1Head: l1HeadCurrent, + ClaimTimestamp: endTimestamp, + ExpectValid: false, + }, + { + Name: "FirstPaddingStep", + AgreedClaim: step1, + DisputedClaim: padding(2), + DisputedTraceIndex: 1, + L1Head: l1HeadCurrent, + ClaimTimestamp: endTimestamp, + ExpectValid: true, + }, + { + Name: "ConsolidateStep", + AgreedClaim: padding(consolidateStep), + DisputedClaim: end.Marshal(), + DisputedTraceIndex: consolidateStep, + L1Head: l1HeadCurrent, + ClaimTimestamp: endTimestamp, + ExpectValid: true, + }, + { + Name: "ConsolidateStep-InvalidNoChange", + AgreedClaim: padding(consolidateStep), + DisputedClaim: padding(consolidateStep), + DisputedTraceIndex: consolidateStep, + L1Head: l1HeadCurrent, + ClaimTimestamp: endTimestamp, + ExpectValid: false, + }, + { + Name: "ChainReachesL1Head", + AgreedClaim: start.Marshal(), + DisputedClaim: super.InvalidTransition, + DisputedTraceIndex: 0, + L1Head: l1HeadBefore, + ClaimTimestamp: endTimestamp, + ExpectValid: true, + }, + { + Name: "SuperRootInvalidIfUnsupportedByL1Data", + AgreedClaim: start.Marshal(), + DisputedClaim: step1, + DisputedTraceIndex: 0, + L1Head: l1HeadBefore, + ClaimTimestamp: endTimestamp, + ExpectValid: false, + }, + } + + challengerCfg := sys.L2ChainA.Escape().L2Challengers()[0].Config() + gameDepth := sys.DisputeGameFactory().GameImpl(gameTypes.SuperCannonKonaGameType).SplitDepth() + + for _, test := range tests { + t.Run(test.Name+"-fpp", func(t devtest.T) { + runKonaInteropProgram(t, challengerCfg.CannonKona, test.L1Head.Hash, + test.AgreedClaim, crypto.Keccak256Hash(test.DisputedClaim), + test.ClaimTimestamp, test.ExpectValid) + }) + t.Run(test.Name+"-challenger", func(t devtest.T) { + runChallengerProviderTest(t, sys.SuperRoots.QueryAPI(), gameDepth, startTimestamp, test.ClaimTimestamp, test) + }) + } +} diff --git a/op-devstack/presets/interop.go b/op-devstack/presets/interop.go index 3ddd26647f36b..dfb8b76ba33ac 100644 --- a/op-devstack/presets/interop.go +++ b/op-devstack/presets/interop.go @@ -108,6 +108,11 @@ func (s *SingleChainInterop) L2Networks() []*dsl.L2Network { } } +func (s *SingleChainInterop) DisputeGameFactory() *proofs.DisputeGameFactory { + supernode := s.system.Supernode(match.Assume(s.T, match.FirstSupernode)) + return proofs.NewDisputeGameFactory(s.T, s.L1Network, s.L1EL.EthClient(), s.L2ChainA.DisputeGameFactoryProxyAddr(), nil, nil, supernode, s.challengerConfig) +} + func (s *SingleChainInterop) AdvanceTime(amount time.Duration) { ttSys, ok := s.system.(stack.TimeTravelSystem) s.T.Require().True(ok, "attempting to advance time on incompatible system") @@ -170,6 +175,18 @@ func WithIsthmusSuper() stack.CommonOption { return stack.MakeCommon(sysgo.DefaultIsthmusSuperProofsSystem(&sysgo.DefaultInteropSystemIDs{})) } +// WithSingleChainIsthmusSuperSupernode specifies a single-chain super root system +// (for proofs) that sources super-roots via op-supernode, without interop at genesis. +func WithSingleChainIsthmusSuperSupernode() stack.CommonOption { + return stack.MakeCommon(sysgo.DefaultSingleChainSupernodeIsthmusSuperProofsSystem(&sysgo.DefaultSingleChainSupernodeProofsSystemIDs{})) +} + +// WithSingleChainSuperInteropSupernode specifies a single-chain super root system +// (for proofs) that sources super-roots via op-supernode, with interop at genesis. +func WithSingleChainSuperInteropSupernode() stack.CommonOption { + return stack.MakeCommon(sysgo.DefaultSingleChainSupernodeInteropProofsSystem(&sysgo.DefaultSingleChainSupernodeProofsSystemIDs{})) +} + // WithUnscheduledInterop adds a test-gate to not run the test if the interop upgrade is scheduled. // If the backend is sysgo, it will disable the interop configuration func WithUnscheduledInterop() stack.CommonOption { diff --git a/op-devstack/sysgo/system.go b/op-devstack/sysgo/system.go index 2df23cc1914ae..54ef3252edbc6 100644 --- a/op-devstack/sysgo/system.go +++ b/op-devstack/sysgo/system.go @@ -637,6 +637,87 @@ func defaultSupernodeSuperProofsSystem(dest *DefaultSupernodeInteropProofsSystem return opt } +// DefaultSingleChainSupernodeProofsSystemIDs holds IDs for a single-chain supernode proof system. +type DefaultSingleChainSupernodeProofsSystemIDs struct { + DefaultSingleChainInteropSystemIDs + Supernode stack.SupernodeID +} + +func NewDefaultSingleChainSupernodeProofsSystemIDs(l1ID, l2AID eth.ChainID) DefaultSingleChainSupernodeProofsSystemIDs { + return DefaultSingleChainSupernodeProofsSystemIDs{ + DefaultSingleChainInteropSystemIDs: NewDefaultSingleChainInteropSystemIDs(l1ID, l2AID), + Supernode: stack.NewSupernodeID("supernode-single-system-proofs", l2AID), + } +} + +// DefaultSingleChainSupernodeIsthmusSuperProofsSystem creates a single-chain super-roots proofs +// system using op-supernode without interop at genesis (preinterop). +func DefaultSingleChainSupernodeIsthmusSuperProofsSystem(dest *DefaultSingleChainSupernodeProofsSystemIDs) stack.Option[*Orchestrator] { + return defaultSingleChainSupernodeSuperProofsSystem(dest, nil) +} + +// DefaultSingleChainSupernodeInteropProofsSystem creates a single-chain super-roots proofs +// system using op-supernode with interop enabled at genesis. +func DefaultSingleChainSupernodeInteropProofsSystem(dest *DefaultSingleChainSupernodeProofsSystemIDs) stack.Option[*Orchestrator] { + return defaultSingleChainSupernodeSuperProofsSystem(dest, + []SupernodeOption{WithSupernodeInteropAtGenesis()}, + WithInteropAtGenesis()) +} + +func defaultSingleChainSupernodeSuperProofsSystem(dest *DefaultSingleChainSupernodeProofsSystemIDs, snOpts []SupernodeOption, deployerOpts ...DeployerOption) stack.CombinedOption[*Orchestrator] { + ids := NewDefaultSingleChainSupernodeProofsSystemIDs(DefaultL1ID, DefaultL2AID) + opt := stack.Combine[*Orchestrator]() + + opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { + o.P().Logger().Info("Setting up single-chain (supernode)") + })) + + opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) + + opt.Add(WithDeployer(), WithDeployerOptions( + append([]DeployerOption{ + WithLocalContractSources(), + WithCommons(ids.L1.ChainID()), + WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), + WithDevFeatureEnabled(deployer.OptimismPortalInteropDevFlag), + }, deployerOpts...)..., + )) + + opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) + + opt.Add(WithL2ELNode(ids.L2AEL)) + + // Shared supernode for the single L2 chain + opt.Add(WithSharedSupernodeCLs(ids.Supernode, + []L2CLs{{CLID: ids.L2ACL, ELID: ids.L2AEL}}, + ids.L1CL, ids.L1EL, snOpts...)) + + opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L1EL, ids.L2AEL)) + + opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) + + // Run super roots migration using supernode as super root source + opt.Add(WithSuperRootsFromSupernode(ids.L1.ChainID(), ids.L1EL, []stack.L2CLNodeID{ids.L2ACL}, ids.Supernode, ids.L2A.ChainID())) + + // Start challenger after migration; use supernode RPCs as super-roots source. + opt.Add(WithSupernodeL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supernode, &ids.Cluster, []stack.L2ELNodeID{ + ids.L2AEL, + })) + + // Start proposer after migration; use supernode RPCs as proposal source. + opt.Add(WithSupernodeProposer(ids.L2AProposer, ids.L1EL, &ids.Supernode)) + + opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL})) + + opt.Add(WithL2MetricsDashboard()) + + opt.Add(stack.Finally(func(orch *Orchestrator) { + *dest = ids + })) + + return opt +} + func defaultSuperProofsSystem(dest *DefaultInteropSystemIDs, deployerOpts ...DeployerOption) stack.CombinedOption[*Orchestrator] { ids := NewDefaultInteropSystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) opt := stack.Combine[*Orchestrator]() From ee4d492a87b015874bddf772b719d877fb798ab4 Mon Sep 17 00:00:00 2001 From: George Knee Date: Mon, 2 Mar 2026 22:43:01 +0000 Subject: [PATCH 038/201] fix(kona-node): Map BlockNotFound errors to ResetError for reorg recovery (#19344) * kona/protocol/derive: handle "blob not found" correctly * lint * lint * add a block number or hash in the error message * add named fields to BlobNotFound err * just fmt-fix * clippify * Simplify using inspect_err * simplifications * Map BlockNotFound errors to ResetError for reorg recovery When an L1 or L2 block disappears (typically due to a reorg), retrying will never succeed. Convert these to ResetError so the pipeline can recover instead of stalling indefinitely. * just fmt-fix * Distinguish hash vs number lookups in BlockNotFound handling Hash-based lookups indicate a reorg (block removed) and require a pipeline reset. Number-based lookups indicate the block hasn't been produced yet and should be retried as a temporary error. * lint * ci: bump cimg/base from 2024.01 to 2026.03 The Docker daemon on CircleCI remote Docker hosts now requires API v1.44+, but cimg/base:2024.01 ships with Docker client v1.43. Bump to cimg/base:2026.03 to fix Docker API version mismatch errors in analyze-op-program-client and check-kontrol-build jobs. Co-Authored-By: Claude Opus 4.6 * Change ResetError::BlockNotFound to use BlockId instead of String This provides stronger typing and avoids unnecessary string formatting when constructing the error variant. --------- Co-authored-by: Matt Solomon Co-authored-by: Claude Opus 4.6 --- .../protocol/derive/src/errors/pipeline.rs | 6 +++ .../providers-alloy/src/chain_provider.rs | 51 ++++++++++++++++++- .../providers/providers-local/src/buffered.rs | 40 +++++++++++++-- 3 files changed, 91 insertions(+), 6 deletions(-) diff --git a/rust/kona/crates/protocol/derive/src/errors/pipeline.rs b/rust/kona/crates/protocol/derive/src/errors/pipeline.rs index 2da147b3f5c77..3a8936ebe3fe1 100644 --- a/rust/kona/crates/protocol/derive/src/errors/pipeline.rs +++ b/rust/kona/crates/protocol/derive/src/errors/pipeline.rs @@ -2,6 +2,7 @@ use crate::BuilderError; use alloc::string::String; +use alloy_eips::BlockId; use alloy_primitives::B256; use kona_genesis::SystemConfigUpdateError; use kona_protocol::{DepositError, SpanBatchError}; @@ -348,6 +349,10 @@ pub enum ResetError { /// The pipeline must reset to move past the offending L1 block. #[error("Blobs unavailable: beacon node returned 404 for slot {0}")] BlobsUnavailable(u64), + /// An L1 block referenced during derivation is no longer present on the chain, + /// typically because an L1 reorg removed it. The pipeline must reset to recover. + #[error("Block not found: {0}")] + BlockNotFound(BlockId), } impl ResetError { @@ -436,6 +441,7 @@ mod tests { )), ResetError::HoloceneActivation, ResetError::BlobsUnavailable(0), + ResetError::BlockNotFound(B256::default().into()), ]; for error in reset_errors { let expected = PipelineErrorKind::Reset(error.clone()); diff --git a/rust/kona/crates/providers/providers-alloy/src/chain_provider.rs b/rust/kona/crates/providers/providers-alloy/src/chain_provider.rs index b36a0c715fb28..17cd93b536370 100644 --- a/rust/kona/crates/providers/providers-alloy/src/chain_provider.rs +++ b/rust/kona/crates/providers/providers-alloy/src/chain_provider.rs @@ -9,7 +9,7 @@ use alloy_provider::{Provider, RootProvider}; use alloy_transport::{RpcError, TransportErrorKind}; use alloy_transport_http::reqwest; use async_trait::async_trait; -use kona_derive::{ChainProvider, PipelineError, PipelineErrorKind}; +use kona_derive::{ChainProvider, PipelineError, PipelineErrorKind, ResetError}; use kona_protocol::BlockInfo; use lru::LruCache; use std::{boxed::Box, num::NonZeroUsize, vec::Vec}; @@ -128,7 +128,16 @@ impl From for PipelineErrorKind { Self::Temporary(PipelineError::Provider(format!("Transport error: {e}"))) } AlloyChainProviderError::BlockNotFound(id) => { - Self::Temporary(PipelineError::Provider(format!("L1 Block not found: {id}"))) + // A hash-based lookup returning not-found means the block was reorged + // out of the chain — retrying will never succeed, so reset. + // A number-based lookup returning not-found means the next L1 block + // hasn't been produced yet — this is transient, so Temporary. + match id { + BlockId::Hash(_) => ResetError::BlockNotFound(id).reset(), + BlockId::Number(_) => Self::Temporary(PipelineError::Provider(format!( + "L1 Block not found: {id}" + ))), + } } AlloyChainProviderError::ReceiptsConversion(_) => { Self::Temporary(PipelineError::Provider( @@ -270,3 +279,41 @@ impl ChainProvider for AlloyChainProvider { Ok((block_info, block.body.transactions)) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_from_alloy_chain_provider_error() { + // Transport errors are transient — retry makes sense. + let transport_err = + AlloyChainProviderError::Transport(alloy_transport::RpcError::Transport( + alloy_transport::TransportErrorKind::Custom("timeout".into()), + )); + let kind: PipelineErrorKind = transport_err.into(); + assert!(matches!(kind, PipelineErrorKind::Temporary(_))); + + // ReceiptsConversion is a transient decode failure. + let kind: PipelineErrorKind = + AlloyChainProviderError::ReceiptsConversion(Default::default()).into(); + assert!(matches!(kind, PipelineErrorKind::Temporary(_))); + + // Hash-based BlockNotFound: the block was reorged out. Retrying will never succeed + // — the pipeline must reset. Without this, the safe head stalls on L1 reorgs. + let kind: PipelineErrorKind = + AlloyChainProviderError::BlockNotFound(B256::default().into()).into(); + assert!( + matches!(kind, PipelineErrorKind::Reset(_)), + "hash-based BlockNotFound must map to Reset (block reorged out)" + ); + + // Number-based BlockNotFound: the next L1 block hasn't been mined yet. This is + // transient — the pipeline must wait, not reset. + let kind: PipelineErrorKind = AlloyChainProviderError::BlockNotFound(0u64.into()).into(); + assert!( + matches!(kind, PipelineErrorKind::Temporary(_)), + "number-based BlockNotFound must stay Temporary (block not yet produced)" + ); + } +} diff --git a/rust/kona/crates/providers/providers-local/src/buffered.rs b/rust/kona/crates/providers/providers-local/src/buffered.rs index ae9ee8dfca961..b942a91556952 100644 --- a/rust/kona/crates/providers/providers-local/src/buffered.rs +++ b/rust/kona/crates/providers/providers-local/src/buffered.rs @@ -5,9 +5,10 @@ //! directly from this cached state. Chain updates are provided through the `add_block` and //! `handle_chain_event` methods. +use alloy_eips::BlockId; use alloy_primitives::B256; use async_trait::async_trait; -use kona_derive::{L2ChainProvider, PipelineError, PipelineErrorKind}; +use kona_derive::{L2ChainProvider, PipelineError, PipelineErrorKind, ResetError}; use kona_genesis::{ChainGenesis, RollupConfig, SystemConfig}; use kona_protocol::{BatchValidationProvider, L2BlockInfo, to_system_config}; use op_alloy_consensus::OpBlock; @@ -279,9 +280,9 @@ impl From for PipelineErrorKind { "Block not found in cache: {hash}" ))) } - BufferedProviderError::BlockNotFound(number) => Self::Temporary( - PipelineError::Provider(format!("Block {number} not found in cache")), - ), + BufferedProviderError::BlockNotFound(number) => { + ResetError::BlockNotFound(BlockId::Number(number.into())).reset() + } BufferedProviderError::L2BlockInfoConstruction(number) => { Self::Temporary(PipelineError::Provider(format!( "Failed to construct L2BlockInfo for block {number}" @@ -417,4 +418,35 @@ mod tests { let retrieved_info = provider.l2_block_info_by_number(1).await.unwrap(); assert_eq!(retrieved_info.block_info.number, 1); } + + #[test] + fn test_from_buffered_provider_error() { + // BlockNotFound means the block is gone (e.g. due to a reorg draining the buffer). + // Retrying will never succeed — the pipeline must reset. + let kind: PipelineErrorKind = BufferedProviderError::BlockNotFound(42).into(); + assert!( + matches!(kind, PipelineErrorKind::Reset(_)), + "BlockNotFound must map to Reset so the pipeline recovers from reorgs" + ); + + // Other errors remain Temporary or Critical as before. + let kind: PipelineErrorKind = BufferedProviderError::L2BlockInfoConstruction(1).into(); + assert!(matches!(kind, PipelineErrorKind::Temporary(_))); + + let kind: PipelineErrorKind = BufferedProviderError::SystemConfigMissing.into(); + assert!(matches!(kind, PipelineErrorKind::Critical(_))); + } + + #[tokio::test] + async fn test_block_not_found_is_reset_via_provider() { + let mut provider = create_test_provider().await; + // Querying a block number that was never inserted must produce a Reset error, + // not a Temporary one. This is the observable contract the pipeline relies on. + let err = provider.block_by_number(99).await.unwrap_err(); + let kind: PipelineErrorKind = err.into(); + assert!( + matches!(kind, PipelineErrorKind::Reset(_)), + "block_by_number returning BlockNotFound must map to Reset" + ); + } } From cb87fc5aa53dac6e4ac35ae0e1c80e72e518a881 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 3 Mar 2026 09:10:53 +1000 Subject: [PATCH 039/201] ci: Remove build rust step (#19280) * Remove rust build step. * ci: persist kona-build-release binaries to workspace for memory-all Add persist_to_workspace parameter to rust-build-binary job and enable it for kona-build-release so that rust/target/release is available via the workspace for the memory-all acceptance test job. Co-Authored-By: Claude Sonnet 4.6 * Only persist binaries. * Persist some more binaries. * Back to full paths. --------- Co-authored-by: Claude Sonnet 4.6 --- .circleci/continue/main.yml | 137 ++++-------------------------------- 1 file changed, 15 insertions(+), 122 deletions(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 114e27a9edaf5..5efa8addaefb6 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -673,6 +673,10 @@ jobs: description: "Whether to save the cache at the end of the build" type: boolean default: true + persist_to_workspace: + description: "Whether to persist the built binaries to the CircleCI workspace" + type: boolean + default: false steps: - rust-build: directory: << parameters.directory >> @@ -684,6 +688,15 @@ jobs: binary: << parameters.binary >> toolchain: << parameters.toolchain >> save_cache: << parameters.save_cache >> + - when: + condition: << parameters.persist_to_workspace >> + steps: + - persist_to_workspace: + root: "." + paths: + - "<< parameters.directory >>/target/<< parameters.profile >>/kona-*" + - "<< parameters.directory >>/target/<< parameters.profile >>/op-*" + - "<< parameters.directory >>/target/<< parameters.profile >>/rollup-boost" # Build a single Rust binary from a submodule. rust-build-submodule: @@ -780,124 +793,6 @@ jobs: paths: - ".circleci-cache/rust-binaries" - # Kurtosis-based acceptance tests - op-acceptance-tests-kurtosis: - parameters: - devnet: - description: | - The name of the pre-defined Kurtosis devnet to run the acceptance tests against - (e.g. 'simple', 'interop', 'jovian'). Empty string uses in-process testing (sysgo orchestrator). - type: string - default: "interop" - gate: - description: The gate to run the acceptance tests against. Must be defined in op-acceptance-tests/acceptance-tests.yaml. - type: string - default: "interop" - no_output_timeout: - description: Timeout for when CircleCI kills the job if there's no output - type: string - default: 30m - docker: - - image: <> - resource_class: xlarge - steps: - - utils/checkout-with-mise: - checkout-method: blobless - enable-mise-cache: true - - setup_remote_docker: - docker_layer_caching: true - - run: - name: Lint/Vet/Build op-acceptance-tests/cmd - working_directory: op-acceptance-tests - command: | - just cmd-check - - run: - name: Setup Kurtosis - command: | - echo "Setting up Kurtosis for external devnet testing..." - echo "Using Kurtosis from: $(which kurtosis || echo 'not found')" - kurtosis version || true - echo "Starting Kurtosis engine..." - kurtosis engine start || true - echo "Cleaning old instances..." - kurtosis clean -a || true - kurtosis engine status || true - echo "Kurtosis setup complete" - - run: - name: Dump kurtosis logs (pre-run) - command: | - # Best-effort: show engine status and existing enclaves before the test run - kurtosis engine status || true - kurtosis enclave ls || true - - run: - name: Run acceptance tests (devnet=<>, gate=<>) - working_directory: op-acceptance-tests - no_output_timeout: 1h - environment: - GOFLAGS: "-mod=mod" - GO111MODULE: "on" - GOGC: "0" - command: | - LOG_LEVEL=info just acceptance-test "<>" "<>" - - run: - name: Dump kurtosis logs - when: on_fail - command: | - # Dump logs & specs - kurtosis dump ./.kurtosis-dump - - # Remove spec.json files - rm -rf ./.kurtosis-dump/enclaves/**/*.json - - # Remove all unnecessary logs - rm -rf ./.kurtosis-dump/enclaves/*/kurtosis-api--* - rm -rf ./.kurtosis-dump/enclaves/*/kurtosis-logs-collector--* - rm -rf ./.kurtosis-dump/enclaves/*/task-* - - # Print enclaves and try to show service logs for the most recent devnet - kurtosis enclave ls || true - # Dump logs for all enclaves to aid debugging - for e in $(kurtosis enclave ls --output json 2>/dev/null | jq -r '.[].identifier' 2>/dev/null); do - echo "\n==== Kurtosis logs for enclave: $e ====" - kurtosis enclave inspect "$e" || true - kurtosis service logs "$e" --all-services --follow=false || true - done - - run: - name: Print results (summary) - working_directory: op-acceptance-tests - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - cat "$LOG_DIR/summary.log" || true - - run: - name: Print results (failures) - working_directory: op-acceptance-tests - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - cat "$LOG_DIR/failed/*.log" || true - when: on_fail - - run: - name: Print results (all) - working_directory: op-acceptance-tests - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - cat "$LOG_DIR/all.log" || true - - run: - name: Generate JUnit XML test report for CircleCI - working_directory: op-acceptance-tests - when: always - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - gotestsum --junitfile results/results.xml --raw-command cat $LOG_DIR/raw_go_events.log || true - - when: - condition: always - steps: - - store_test_results: - path: ./op-acceptance-tests/results - - when: - condition: always - steps: - - store_artifacts: - path: ./op-acceptance-tests/logs initialize: docker: - image: <> @@ -2118,10 +2013,6 @@ jobs: enable-mise-cache: true - attach_workspace: at: . - # Build kona-node for the acceptance tests. This automatically gets kona from the cache. - - rust-build: - directory: rust - profile: "release" - run: name: Configure Rust binary paths (sysgo) command: | @@ -3222,6 +3113,7 @@ workflows: profile: "release" features: "default" save_cache: true + persist_to_workspace: true context: - circleci-repo-readonly-authenticated-github-token - rust-build-submodule: &rust-build-op-rbuilder @@ -3552,6 +3444,7 @@ workflows: directory: rust needs_clang: true profile: "release" + persist_to_workspace: true context: - circleci-repo-readonly-authenticated-github-token - rust-build-submodule: *rust-build-op-rbuilder From 0777438dc0c3cbedd5fa478c00d5a7305b991347 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Mon, 2 Mar 2026 18:30:10 -0500 Subject: [PATCH 040/201] op-node: execute NUT bundles at Karst fork activation (#19220) * feat(op-node): execute NUT bundles at Karst fork activation Add the Karst fork and wire NUT bundle execution into the derivation pipeline, with upgrade gas allocated to the block gas limit. * feat: add NUT bundle fork lock with CI check Prevent accidental modification of finalized NUT bundles by adding a lock file with sha256 hashes and a CI check that enforces immutability. * fix: Add missing test-nut * refactor: make NUT bundle types and functions private Only UpgradeTransactions (to be added) will be the public API for NUTs. This enables easier refactoring and eventual migration to op-core. * refactor: move NUT bundle embed to parse_upgrade_transactions.go Co-locates the embedded JSON with the code that parses it. * refactor: add UpgradeTransactions(fork) with switch and error wrapping Encapsulates fork-to-bundle mapping in a single public function, wraps errors with context, and simplifies the call site in attributes.go. * chore: move check-nut-locks from Makefile to justfile Also updates CI to call just directly. * fix: go fmt * fix: Formatting on testdata * refactor: rename parse_upgrade_transactions to upgrade_transaction Per review feedback to use a simpler file name. * feat: add reverse check that all NUT bundles have lock entries Globs known bundle locations to catch bundles added without a corresponding fork_lock.toml entry. * fix: use go run directly rather than just command --- .circleci/continue/main.yml | 16 +++ justfile | 4 + op-core/nuts/fork_lock.toml | 6 ++ op-node/rollup/derive/attributes.go | 19 +++- op-node/rollup/derive/karst_nut_bundle.json | 6 ++ ...transactions.go => upgrade_transaction.go} | 64 ++++++++--- ...ns_test.go => upgrade_transaction_test.go} | 44 ++++++-- op-node/rollup/sequencing/sequencer.go | 6 ++ ops/scripts/check-nut-locks/main.go | 102 ++++++++++++++++++ 9 files changed, 245 insertions(+), 22 deletions(-) create mode 100644 op-core/nuts/fork_lock.toml create mode 100644 op-node/rollup/derive/karst_nut_bundle.json rename op-node/rollup/derive/{parse_upgrade_transactions.go => upgrade_transaction.go} (52%) rename op-node/rollup/derive/{parse_upgrade_transactions_test.go => upgrade_transaction_test.go} (76%) create mode 100644 ops/scripts/check-nut-locks/main.go diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 5efa8addaefb6..a50197264e7e5 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -1732,6 +1732,19 @@ jobs: command: | make check-op-geth-version + check-nut-locks: + docker: + - image: <> + resource_class: small + steps: + - utils/checkout-with-mise: + checkout-method: blobless + enable-mise-cache: true + - run: + name: check nut locks + command: | + go run ./ops/scripts/check-nut-locks + go-tests: parameters: notify: @@ -2981,6 +2994,9 @@ workflows: - check-op-geth-version: context: - circleci-repo-readonly-authenticated-github-token + - check-nut-locks: + context: + - circleci-repo-readonly-authenticated-github-token - fuzz-golang: name: fuzz-golang-<> on_changes: <> diff --git a/justfile b/justfile index c692d1e2a05c3..a06616305dfed 100644 --- a/justfile +++ b/justfile @@ -4,6 +4,10 @@ build-rust-release: cd op-rbuilder && cargo build --release -p op-rbuilder --bin op-rbuilder cd rollup-boost && cargo build --release -p rollup-boost --bin rollup-boost +# Checks that locked NUT bundles have not been modified. +check-nut-locks: + go run ./ops/scripts/check-nut-locks + # Checks that TODO comments have corresponding issues. todo-checker: ./ops/scripts/todo-checker.sh diff --git a/op-core/nuts/fork_lock.toml b/op-core/nuts/fork_lock.toml new file mode 100644 index 0000000000000..b1205cb208910 --- /dev/null +++ b/op-core/nuts/fork_lock.toml @@ -0,0 +1,6 @@ +# NUT Bundle Fork Lock +# To update a locked bundle, update both the bundle file and this hash in the same PR. + +[karst] +bundle = "op-node/rollup/derive/karst_nut_bundle.json" +hash = "sha256:b9c610d09ca05ab24ef84ea38e4f563d71401f592f9eff13fa97dac879bee600" diff --git a/op-node/rollup/derive/attributes.go b/op-node/rollup/derive/attributes.go index 22361aeb51977..b3f968056fcb3 100644 --- a/op-node/rollup/derive/attributes.go +++ b/op-node/rollup/derive/attributes.go @@ -9,6 +9,7 @@ import ( "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/params" + "github.com/ethereum-optimism/optimism/op-core/forks" "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -153,6 +154,20 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex upgradeTxs = append(upgradeTxs, jovian...) } + // Starting with Karst, upgrade transactions are loaded from a NUT bundle and + // additional gas is allocated to the upgrade block so that upgrade transactions + // don't need to fit within the system tx gas limit. + var upgradeGas uint64 + if ba.rollupCfg.IsKarstActivationBlock(nextL2Time) { + nutTxs, nutGas, err := UpgradeTransactions(forks.Karst) + if err != nil { + return nil, NewCriticalError(fmt.Errorf("failed to build karst network upgrade txs: %w", err)) + } + upgradeTxs = append(upgradeTxs, nutTxs...) + upgradeGas += nutGas + } + + // TODO(#19239): migrate Interop to NUT bundle and add its gas to upgradeGas. if ba.rollupCfg.IsInteropActivationBlock(nextL2Time) { interop, err := InteropNetworkUpgradeTransactions() if err != nil { @@ -192,13 +207,15 @@ func (ba *FetchingAttributesBuilder) PreparePayloadAttributes(ctx context.Contex } } + gasLimit := sysConfig.GasLimit + upgradeGas + r := ð.PayloadAttributes{ Timestamp: hexutil.Uint64(nextL2Time), PrevRandao: eth.Bytes32(l1Info.MixDigest()), SuggestedFeeRecipient: predeploys.SequencerFeeVaultAddr, Transactions: txs, NoTxPool: true, - GasLimit: (*eth.Uint64Quantity)(&sysConfig.GasLimit), + GasLimit: (*eth.Uint64Quantity)(&gasLimit), Withdrawals: withdrawals, ParentBeaconBlockRoot: parentBeaconRoot, } diff --git a/op-node/rollup/derive/karst_nut_bundle.json b/op-node/rollup/derive/karst_nut_bundle.json new file mode 100644 index 0000000000000..6e7a043d73971 --- /dev/null +++ b/op-node/rollup/derive/karst_nut_bundle.json @@ -0,0 +1,6 @@ +{ + "metadata": { + "version": "1.0.0" + }, + "transactions": [] +} diff --git a/op-node/rollup/derive/parse_upgrade_transactions.go b/op-node/rollup/derive/upgrade_transaction.go similarity index 52% rename from op-node/rollup/derive/parse_upgrade_transactions.go rename to op-node/rollup/derive/upgrade_transaction.go index 4793aadc35e7e..5a8282da247fb 100644 --- a/op-node/rollup/derive/parse_upgrade_transactions.go +++ b/op-node/rollup/derive/upgrade_transaction.go @@ -1,6 +1,8 @@ package derive import ( + "bytes" + _ "embed" "encoding/json" "fmt" "io" @@ -12,16 +14,19 @@ import ( "github.com/ethereum/go-ethereum/core/types" ) +//go:embed karst_nut_bundle.json +var karstNUTBundleJSON []byte + // Network Upgrade Transactions (NUTs) are read from a JSON file and // converted into deposit transactions. -// NUTMetadata contains version information for the NUT bundle format. -type NUTMetadata struct { +// nutMetadata contains version information for the NUT bundle format. +type nutMetadata struct { Version string `json:"version"` } -// NetworkUpgradeTransaction defines a single deposit transaction within a NUT bundle. -type NetworkUpgradeTransaction struct { +// networkUpgradeTransaction defines a single deposit transaction within a NUT bundle. +type networkUpgradeTransaction struct { Intent string `json:"intent"` From common.Address `json:"from"` To *common.Address `json:"to"` @@ -29,17 +34,17 @@ type NetworkUpgradeTransaction struct { GasLimit uint64 `json:"gasLimit"` } -// NUTBundle is the top-level structure of a NUT file. -type NUTBundle struct { +// nutBundle is the top-level structure of a NUT file. +type nutBundle struct { ForkName forks.Name `json:"-"` - Metadata NUTMetadata `json:"metadata"` - Transactions []NetworkUpgradeTransaction `json:"transactions"` + Metadata nutMetadata `json:"metadata"` + Transactions []networkUpgradeTransaction `json:"transactions"` } -// ReadNUTBundle reads and parses a NUT bundle from an io.Reader. The fork name +// readNUTBundle reads and parses a NUT bundle from an io.Reader. The fork name // is used to namespace each transaction's intent when deriving source hashes. -func ReadNUTBundle(fork forks.Name, r io.Reader) (*NUTBundle, error) { - var bundle NUTBundle +func readNUTBundle(fork forks.Name, r io.Reader) (*nutBundle, error) { + var bundle nutBundle if err := json.NewDecoder(r).Decode(&bundle); err != nil { return nil, fmt.Errorf("failed to parse NUT bundle: %w", err) } @@ -47,8 +52,17 @@ func ReadNUTBundle(fork forks.Name, r io.Reader) (*NUTBundle, error) { return &bundle, nil } -// ToDepositTransactions converts the bundle's transactions into serialized deposit transactions. -func (b *NUTBundle) ToDepositTransactions() ([]hexutil.Bytes, error) { +// totalGas returns the sum of gas limits across all transactions in the bundle. +func (b *nutBundle) totalGas() uint64 { + var total uint64 + for _, tx := range b.Transactions { + total += tx.GasLimit + } + return total +} + +// toDepositTransactions converts the bundle's transactions into serialized deposit transactions. +func (b *nutBundle) toDepositTransactions() ([]hexutil.Bytes, error) { txs := make([]hexutil.Bytes, 0, len(b.Transactions)) for i, nutTx := range b.Transactions { if nutTx.Intent == "" { @@ -76,3 +90,27 @@ func (b *NUTBundle) ToDepositTransactions() ([]hexutil.Bytes, error) { } return txs, nil } + +// UpgradeTransactions returns the deposit transactions and total gas required for a +// fork's NUT bundle. The fork name selects the embedded bundle JSON. +func UpgradeTransactions(fork forks.Name) ([]hexutil.Bytes, uint64, error) { + var bundleJSON []byte + switch fork { + case forks.Karst: + bundleJSON = karstNUTBundleJSON + default: + return nil, 0, fmt.Errorf("no NUT bundle for fork %s", fork) + } + + bundle, err := readNUTBundle(fork, bytes.NewReader(bundleJSON)) + if err != nil { + return nil, 0, fmt.Errorf("reading %s NUT bundle: %w", fork, err) + } + + txs, err := bundle.toDepositTransactions() + if err != nil { + return nil, 0, fmt.Errorf("converting %s NUT bundle to deposit txs: %w", fork, err) + } + + return txs, bundle.totalGas(), nil +} diff --git a/op-node/rollup/derive/parse_upgrade_transactions_test.go b/op-node/rollup/derive/upgrade_transaction_test.go similarity index 76% rename from op-node/rollup/derive/parse_upgrade_transactions_test.go rename to op-node/rollup/derive/upgrade_transaction_test.go index cc8d70a97af72..cf3f40d42e1d7 100644 --- a/op-node/rollup/derive/parse_upgrade_transactions_test.go +++ b/op-node/rollup/derive/upgrade_transaction_test.go @@ -16,7 +16,7 @@ func TestReadNUTBundle(t *testing.T) { require.NoError(t, err) defer f.Close() - bundle, err := ReadNUTBundle("Test", f) + bundle, err := readNUTBundle("Test", f) require.NoError(t, err) require.Equal(t, forks.Name("Test"), bundle.ForkName) @@ -45,10 +45,10 @@ func TestNUTBundleToDepositTransactions(t *testing.T) { require.NoError(t, err) defer f.Close() - bundle, err := ReadNUTBundle("Test", f) + bundle, err := readNUTBundle("Test", f) require.NoError(t, err) - txs, err := bundle.ToDepositTransactions() + txs, err := bundle.toDepositTransactions() require.NoError(t, err) require.Len(t, txs, 2) @@ -75,7 +75,7 @@ func TestNUTBundleToDepositTransactions(t *testing.T) { } func TestReadNUTBundleInvalidJSON(t *testing.T) { - _, err := ReadNUTBundle("Test", bytes.NewReader([]byte(`{invalid`))) + _, err := readNUTBundle("Test", bytes.NewReader([]byte(`{invalid`))) require.Error(t, err) require.Contains(t, err.Error(), "failed to parse NUT bundle") } @@ -91,14 +91,42 @@ func TestNUTBundleMissingIntent(t *testing.T) { }] }`) - bundle, err := ReadNUTBundle("Test", bytes.NewReader(jsonData)) + bundle, err := readNUTBundle("Test", bytes.NewReader(jsonData)) require.NoError(t, err) - _, err = bundle.ToDepositTransactions() + _, err = bundle.toDepositTransactions() require.Error(t, err) require.Contains(t, err.Error(), "missing intent") } +func TestNUTBundleTotalGas(t *testing.T) { + f, err := os.Open("testdata/test-nut.json") + require.NoError(t, err) + defer f.Close() + + bundle, err := readNUTBundle("Test", f) + require.NoError(t, err) + + txs, err := bundle.toDepositTransactions() + require.NoError(t, err) + require.Len(t, txs, 2) + require.Equal(t, uint64(1_000_000+5_000_000), bundle.totalGas()) + + // Verify gas matches sum of individual deposit tx gas limits + var sumGas uint64 + for _, tx := range txs { + _, dep := toDepositTxn(t, tx) + sumGas += dep.Gas() + } + require.Equal(t, bundle.totalGas(), sumGas) +} + +func TestUpgradeTransactionsUnknownFork(t *testing.T) { + _, _, err := UpgradeTransactions("UnknownFork") + require.Error(t, err) + require.Contains(t, err.Error(), "no NUT bundle for fork") +} + // TestNUTBundleNullTo verifies that "to": null in JSON produces a contract creation (deploy) transaction. // Although NUTs are expected to use Arachnid's deterministic deployer, this sending to null // is how previous deployments have been handled and is useful to maintain going forward. @@ -114,11 +142,11 @@ func TestNUTBundleNullTo(t *testing.T) { }] }`) - bundle, err := ReadNUTBundle("Test", bytes.NewReader(jsonData)) + bundle, err := readNUTBundle("Test", bytes.NewReader(jsonData)) require.NoError(t, err) require.Nil(t, bundle.Transactions[0].To) - txs, err := bundle.ToDepositTransactions() + txs, err := bundle.toDepositTransactions() require.NoError(t, err) _, dep := toDepositTxn(t, txs[0]) diff --git a/op-node/rollup/sequencing/sequencer.go b/op-node/rollup/sequencing/sequencer.go index 5cc4afd93afe0..5e8d9765b96ee 100644 --- a/op-node/rollup/sequencing/sequencer.go +++ b/op-node/rollup/sequencing/sequencer.go @@ -587,6 +587,12 @@ func (d *Sequencer) startBuildingBlock() { d.log.Info("Sequencing Jovian upgrade block") } + // For the Karst activation block we must not include any sequencer transactions. + if d.rollupCfg.IsKarstActivationBlock(uint64(attrs.Timestamp)) { + attrs.NoTxPool = true + d.log.Info("Sequencing Karst upgrade block") + } + // For the Interop activation block we must not include any sequencer transactions. if d.rollupCfg.IsInteropActivationBlock(uint64(attrs.Timestamp)) { attrs.NoTxPool = true diff --git a/ops/scripts/check-nut-locks/main.go b/ops/scripts/check-nut-locks/main.go new file mode 100644 index 0000000000000..76540a2d1d403 --- /dev/null +++ b/ops/scripts/check-nut-locks/main.go @@ -0,0 +1,102 @@ +package main + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/BurntSushi/toml" + + opservice "github.com/ethereum-optimism/optimism/op-service" +) + +// nutBundleGlobs are the locations where NUT bundle JSON files may live. +// Update this list when adding new bundle locations. +var nutBundleGlobs = []string{ + "op-node/rollup/derive/*_nut_bundle.json", + "op-core/nuts/*_nut_bundle.json", +} + +// checkAllBundlesLocked searches known paths for *_nut_bundle.json files and +// verifies each has a corresponding entry in fork_lock.toml. +func checkAllBundlesLocked(root string, lockedBundles map[string]bool) error { + for _, pattern := range nutBundleGlobs { + matches, err := filepath.Glob(filepath.Join(root, pattern)) + if err != nil { + return fmt.Errorf("globbing %s: %w", pattern, err) + } + for _, match := range matches { + rel, err := filepath.Rel(root, match) + if err != nil { + return err + } + if !lockedBundles[rel] { + return fmt.Errorf( + "NUT bundle %s has no entry in op-core/nuts/fork_lock.toml", + rel, + ) + } + } + } + return nil +} + +type forkLockEntry struct { + Bundle string `toml:"bundle"` + Hash string `toml:"hash"` +} + +func main() { + if err := run("."); err != nil { + fmt.Fprintf(os.Stderr, "error: %v\n", err) + os.Exit(1) + } +} + +func run(dir string) error { + root, err := opservice.FindMonorepoRoot(dir) + if err != nil { + return fmt.Errorf("finding monorepo root: %w", err) + } + + lockPath := filepath.Join(root, "op-core", "nuts", "fork_lock.toml") + var locks map[string]forkLockEntry + if _, err := toml.DecodeFile(lockPath, &locks); err != nil { + return fmt.Errorf("reading fork lock file: %w", err) + } + + lockedBundles := make(map[string]bool) + for fork, entry := range locks { + lockedBundles[entry.Bundle] = true + + bundlePath := filepath.Join(root, entry.Bundle) + content, err := os.ReadFile(bundlePath) + if err != nil { + return fmt.Errorf("fork %s: reading bundle %s: %w", fork, entry.Bundle, err) + } + + hash := sha256.Sum256(content) + actual := "sha256:" + hex.EncodeToString(hash[:]) + + locked := strings.TrimSpace(entry.Hash) + if actual != locked { + return fmt.Errorf( + "bundle hash mismatch for fork %s: locked=%s actual=%s. "+ + "If this change is intentional, update the hash in op-core/nuts/fork_lock.toml", + fork, locked, actual, + ) + } + + fmt.Printf("fork %s: bundle hash OK\n", fork) + } + + // Reverse check: verify all NUT bundle JSONs have a lock entry + if err := checkAllBundlesLocked(root, lockedBundles); err != nil { + return err + } + + return nil +} From 501451f7099f0e72206cf9d02893a212b00d7271 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 3 Mar 2026 20:02:27 +1000 Subject: [PATCH 041/201] op-acceptance-tests: add TestSupernodeInteropActivationAfterGenesis to flake-shake gate (#19350) Registers the test in the flake-shake quarantine gate so it can accumulate stability data before being promoted to the supernode-interop gate. Co-authored-by: Claude Sonnet 4.6 --- op-acceptance-tests/acceptance-tests.yaml | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/op-acceptance-tests/acceptance-tests.yaml b/op-acceptance-tests/acceptance-tests.yaml index 0c4652fea0502..b3a503dcb056a 100644 --- a/op-acceptance-tests/acceptance-tests.yaml +++ b/op-acceptance-tests/acceptance-tests.yaml @@ -55,6 +55,12 @@ gates: metadata: owner: "anton evangelatov" target_gate: "depreqres" + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/supernode/interop/activation + name: TestSupernodeInteropActivationAfterGenesis + timeout: 10m + metadata: + owner: "adrian sutton" + target_gate: "supernode-interop" - id: isthmus description: "Isthmus network tests." @@ -172,5 +178,5 @@ gates: - supernode description: "Supernode interop tests - tests for supernode's cross-chain message verification." tests: - - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/supernode/interop + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/supernode/interop/... timeout: 15m From 991d66a42646d173fb2e335bff0e3bb0158e65df Mon Sep 17 00:00:00 2001 From: George Knee Date: Tue, 3 Mar 2026 18:37:10 +0000 Subject: [PATCH 042/201] Fix stuck pause state causing shutdown hang in chain container (#19365) Add stop/cancellation check in Start() loop while paused to prevent infinite spinning when RewindEngine exits before Resume is called. Add deferred Resume() call in RewindEngine to ensure the container is always unpaused on return, even on context cancellation or errors. --- .../supernode/chain_container/chain_container.go | 11 +++++++++++ .../supernode/chain_container/chain_container_test.go | 5 +++-- 2 files changed, 14 insertions(+), 2 deletions(-) diff --git a/op-supernode/supernode/chain_container/chain_container.go b/op-supernode/supernode/chain_container/chain_container.go index adf6b008a3fc0..e5c1308e23d00 100644 --- a/op-supernode/supernode/chain_container/chain_container.go +++ b/op-supernode/supernode/chain_container/chain_container.go @@ -200,6 +200,13 @@ func (c *simpleChainContainer) Start(ctx context.Context) error { // Pass in the chain container as a SuperAuthority c.vn = c.virtualNodeFactory(c.vncfg, c.log, c.initOverload, c.appVersion, c) if c.pause.Load() { + // Check for stop/cancellation even while paused, so teardown doesn't hang. + // Without this, a stuck pause (e.g. from RewindEngine exiting before Resume) + // causes this loop to spin forever, blocking wg.Wait() in Supernode.Stop(). + if c.stop.Load() || ctx.Err() != nil { + c.log.Info("chain container stop requested while paused, stopping restart loop") + break + } c.log.Info("chain container paused") time.Sleep(1 * time.Second) continue @@ -490,6 +497,10 @@ func (c *simpleChainContainer) RewindEngine(ctx context.Context, timestamp uint6 if err != nil { return err } + // Always resume the container on return, even if we exit early due to context cancellation + // or an error mid-rewind. Without this, a cancelled ctx leaves pause=true permanently, + // causing the Start() loop to spin forever and block Supernode.Stop()'s wg.Wait(). + defer c.Resume(context.Background()) //nolint:errcheck c.log.Info("chain_container/RewindEngine: paused container") // stop the vn diff --git a/op-supernode/supernode/chain_container/chain_container_test.go b/op-supernode/supernode/chain_container/chain_container_test.go index 293e2ae4be60c..72f2458fb35e5 100644 --- a/op-supernode/supernode/chain_container/chain_container_test.go +++ b/op-supernode/supernode/chain_container/chain_container_test.go @@ -660,8 +660,9 @@ func TestChainContainer_RewindEngine(t *testing.T) { // Verify RewindToTimestamp was called multiple times (retry attempts) require.Greater(t, mockEngine.rewindToTimestampCalled, 1, "RewindToTimestamp should be retried at least once") - // Container should still be paused since rewind failed - require.True(t, c.pause.Load(), "Container should remain paused after failed rewind") + // Container should be resumed even after a failed rewind, so the Start() loop + // can detect the stop flag and exit cleanly instead of spinning forever. + require.False(t, c.pause.Load(), "Container should be resumed (not stuck paused) after failed rewind") }) t.Run("does not retry critical errors", func(t *testing.T) { From 5161204097d10ff8888d4e793506b0db818c7517 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Tue, 3 Mar 2026 16:02:28 -0500 Subject: [PATCH 043/201] contracts: add documentation for audit findings #2, #3, #7, #12, #15, #16 (#19271) Add missing @param blueprint NatSpec to OpcmContractRef struct (#2). Add comments about pause blocking interop upgrades (#3). Document migrate() scope limitations and re-migration risks (#7, #15). Update PERMIT_ALL_CONTRACTS_INSTRUCTION comment (#12). Document intentional use of chainSystemConfigs[0] for shared contracts (#16). Co-authored-by: Claude Opus 4.6 --- .../contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol | 7 ++++--- packages/contracts-bedrock/snapshots/semver-lock.json | 4 ++-- .../src/L1/opcm/OPContractsManagerMigrator.sol | 9 +++++++++ .../src/L1/opcm/OPContractsManagerV2.sol | 8 ++++++-- packages/contracts-bedrock/src/libraries/Constants.sol | 2 +- 5 files changed, 22 insertions(+), 8 deletions(-) diff --git a/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol b/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol index b727cd26aa469..8677c9cb2e34f 100644 --- a/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol @@ -108,9 +108,10 @@ contract VerifyOPCM is Script { uint256 constant MAX_INIT_CODE_SIZE = 23500; /// @notice Represents a contract name and its corresponding address. - /// @param field Name of the field the address was extracted from. - /// @param name Name of the contract. - /// @param addr Address of the contract. + /// @param field Name of the field the address was extracted from. + /// @param name Name of the contract. + /// @param addr Address of the contract. + /// @param blueprint Whether the contract is a blueprint deployment. struct OpcmContractRef { string field; string name; diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index 13f1d6e8ba0f5..8f20b28f9c5e8 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -52,8 +52,8 @@ "sourceCodeHash": "0xb3184aa5d95a82109e7134d1f61941b30e25f655b9849a0e303d04bbce0cde0b" }, "src/L1/opcm/OPContractsManagerV2.sol:OPContractsManagerV2": { - "initCodeHash": "0x5cbc998e57035d8658824e16dacaab8c702f9e18f482e16989b9420e5a7e8190", - "sourceCodeHash": "0x11678225efb1fb4593085febd8f438eeb4752c0ab3dfd2ee1c4fe47970dda953" + "initCodeHash": "0x88ada0dfefb77eea33baaf11d9b5a5ad51cb8c6476611d0f2376897413074619", + "sourceCodeHash": "0x1cc9dbcd4c7652f482c43e2630b324d088e825d12532711a41c636e8392636b3" }, "src/L2/BaseFeeVault.sol:BaseFeeVault": { "initCodeHash": "0x838bbd7f381e84e21887f72bd1da605bfc4588b3c39aed96cbce67c09335b3ee", diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol index a156638c31a0d..289cdbd731292 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol @@ -63,6 +63,11 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { /// temporary need to support the interop migration action. It will likely be removed in /// the near future once interop support is baked more directly into OPCM. It does NOT /// look or function like all of the other functions in OPCMv2. + /// @dev NOTE: This function is designed exclusively for the case of N independent pre-interop + /// chains merging into a single interop set. It does NOT support partial migration (i.e., + /// migrating a subset of chains that share a lockbox), re-migration of already-migrated + /// chains, or any other migration scenario. Re-calling this function on already-migrated + /// portals will corrupt the shared DisputeGameFactory used by all migrated chains. /// @param _input The input parameters for the migration. function migrate(MigrateInput calldata _input) public { // Check that the starting respected game type is a valid super game type. @@ -156,6 +161,10 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { IOPContractsManagerContainer.Implementations memory impls = contractsContainer().implementations(); // Initialize the new ETHLockbox. + // NOTE: Shared contracts (ETHLockbox, AnchorStateRegistry, DelayedWETH) are + // intentionally initialized with chainSystemConfigs[0]. All chains are validated to + // share the same ProxyAdmin owner and SuperchainConfig, so the first chain's + // SystemConfig is used as the canonical governance reference for shared contracts. _upgrade( proxyDeployArgs.proxyAdmin, address(ethLockbox), diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol index c17aa044d2346..55c15c74117c9 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol @@ -147,9 +147,9 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// - Major bump: New required sequential upgrade /// - Minor bump: Replacement OPCM for same upgrade /// - Patch bump: Development changes (expected for normal dev work) - /// @custom:semver 7.0.8 + /// @custom:semver 7.0.9 function version() public pure returns (string memory) { - return "7.0.8"; + return "7.0.9"; } /// @param _standardValidator The standard validator for this OPCM release. @@ -765,6 +765,10 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { // ETHLockbox contract. if (isDevFeatureEnabled(DevFeatures.OPTIMISM_PORTAL_INTEROP)) { // If we haven't already enabled the ETHLockbox, enable it. + // NOTE: setFeature will revert if the system is currently paused because toggling the + // lockbox changes the pause identifier. This means a guardian pause will block upgrades + // that enable interop. This is acceptable for now since interop is a dev feature and is + // not yet production-ready. if (!_cts.systemConfig.isFeatureEnabled(Features.ETH_LOCKBOX)) { _cts.systemConfig.setFeature(Features.ETH_LOCKBOX, true); } diff --git a/packages/contracts-bedrock/src/libraries/Constants.sol b/packages/contracts-bedrock/src/libraries/Constants.sol index 820a90d2a237a..9627f48b22913 100644 --- a/packages/contracts-bedrock/src/libraries/Constants.sol +++ b/packages/contracts-bedrock/src/libraries/Constants.sol @@ -51,7 +51,7 @@ library Constants { string internal constant PERMITTED_PROXY_DEPLOYMENT_KEY = "PermittedProxyDeployment"; /// @notice Special constant value for the PermittedProxyDeployment instruction to permit all - /// contracts to be deployed. Only to be used for deployments. + /// contracts to be deployed. Used for both initial deployments and migrations. bytes internal constant PERMIT_ALL_CONTRACTS_INSTRUCTION = bytes("ALL"); /// @notice The minimum OPCM version considered to support OPCM v2. From a6310c80fec05050a764a8848b43fa4dd47a03a8 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Tue, 3 Mar 2026 16:59:52 -0500 Subject: [PATCH 044/201] fix(contracts): require interop dev feature for migrate (Finding 21) (#19285) * wip: require interop dev feature for migrate * fix(contracts): require interop dev feature for migrate (Finding 21) Add ABI snapshot for new OPContractsManagerMigrator_InteropNotEnabled error, regenerated by `just pr`. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .../L1/opcm/IOPContractsManagerMigrator.sol | 3 +++ .../abi/OPContractsManagerMigrator.json | 5 +++++ .../src/L1/opcm/OPContractsManagerMigrator.sol | 9 +++++++++ .../test/L1/opcm/OPContractsManagerV2.t.sol | 18 ++++++++++++++++++ 4 files changed, 35 insertions(+) diff --git a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerMigrator.sol b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerMigrator.sol index 240725ac59638..18af548df9c1d 100644 --- a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerMigrator.sol +++ b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerMigrator.sol @@ -28,6 +28,9 @@ interface IOPContractsManagerMigrator { /// @notice Thrown when the starting respected game type is not a valid super game type. error OPContractsManagerMigrator_InvalidStartingRespectedGameType(); + /// @notice Thrown when the OPTIMISM_PORTAL_INTEROP dev feature is not enabled. + error OPContractsManagerMigrator_InteropNotEnabled(); + /// @notice Returns the container of blueprint and implementation contract addresses. function contractsContainer() external view returns (IOPContractsManagerContainer); diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerMigrator.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerMigrator.json index 3db5d7481ed73..2bd876ae9c110 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerMigrator.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerMigrator.json @@ -105,6 +105,11 @@ "stateMutability": "view", "type": "function" }, + { + "inputs": [], + "name": "OPContractsManagerMigrator_InteropNotEnabled", + "type": "error" + }, { "inputs": [], "name": "OPContractsManagerMigrator_InvalidStartingRespectedGameType", diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol index 289cdbd731292..28f8d354068d4 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol @@ -5,6 +5,7 @@ pragma solidity 0.8.15; import { OPContractsManagerUtilsCaller } from "src/L1/opcm/OPContractsManagerUtilsCaller.sol"; // Libraries +import { DevFeatures } from "src/libraries/DevFeatures.sol"; import { GameTypes } from "src/dispute/lib/Types.sol"; import { Constants } from "src/libraries/Constants.sol"; import { Features } from "src/libraries/Features.sol"; @@ -46,6 +47,9 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { /// @notice Thrown when the starting respected game type is not a valid super game type. error OPContractsManagerMigrator_InvalidStartingRespectedGameType(); + /// @notice Thrown when the OPTIMISM_PORTAL_INTEROP dev feature is not enabled. + error OPContractsManagerMigrator_InteropNotEnabled(); + /// @param _utils The utility functions for the OPContractsManager. constructor(IOPContractsManagerUtils _utils) OPContractsManagerUtilsCaller(_utils) { } @@ -70,6 +74,11 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { /// portals will corrupt the shared DisputeGameFactory used by all migrated chains. /// @param _input The input parameters for the migration. function migrate(MigrateInput calldata _input) public { + // Check that the OPTIMISM_PORTAL_INTEROP dev feature is enabled. + if (!contractsContainer().isDevFeatureEnabled(DevFeatures.OPTIMISM_PORTAL_INTEROP)) { + revert OPContractsManagerMigrator_InteropNotEnabled(); + } + // Check that the starting respected game type is a valid super game type. if ( _input.startingRespectedGameType.raw() != GameTypes.SUPER_CANNON.raw() diff --git a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol index ce2ab852cdd25..30a7f95738bf7 100644 --- a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol +++ b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol @@ -26,6 +26,7 @@ import { ISemver } from "interfaces/universal/ISemver.sol"; import { IOPContractsManagerStandardValidator } from "interfaces/L1/IOPContractsManagerStandardValidator.sol"; import { IOPContractsManagerV2 } from "interfaces/L1/opcm/IOPContractsManagerV2.sol"; import { IOPContractsManagerUtils } from "interfaces/L1/opcm/IOPContractsManagerUtils.sol"; +import { IOPContractsManagerContainer } from "interfaces/L1/opcm/IOPContractsManagerContainer.sol"; import { IOPContractsManagerMigrator } from "interfaces/L1/opcm/IOPContractsManagerMigrator.sol"; import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; import { IOptimismPortalInterop } from "interfaces/L1/IOptimismPortalInterop.sol"; @@ -1522,6 +1523,23 @@ contract OPContractsManagerV2_Migrate_Test is OPContractsManagerV2_TestInit { input, IOPContractsManagerMigrator.OPContractsManagerMigrator_InvalidStartingRespectedGameType.selector ); } + + /// @notice Tests that the migration function reverts when the OPTIMISM_PORTAL_INTEROP dev + /// feature is not enabled. + function test_migrate_interopNotEnabled_reverts() public { + IOPContractsManagerMigrator.MigrateInput memory input = _getDefaultMigrateInput(); + + // Mock the container's isDevFeatureEnabled to return false for OPTIMISM_PORTAL_INTEROP, + // simulating a container that was deployed without the interop dev feature. + vm.mockCall( + address(opcmV2.contractsContainer()), + abi.encodeCall(IOPContractsManagerContainer.isDevFeatureEnabled, (DevFeatures.OPTIMISM_PORTAL_INTEROP)), + abi.encode(false) + ); + + // Execute the migration, expect revert. + _doMigration(input, IOPContractsManagerMigrator.OPContractsManagerMigrator_InteropNotEnabled.selector); + } } /// @title OPContractsManagerV2_FeatBatchUpgrade_Test From 29acfe5eef25467a21a77ea9e7cf0c9ead2c8fe5 Mon Sep 17 00:00:00 2001 From: Matt Solomon Date: Tue, 3 Mar 2026 14:29:27 -0800 Subject: [PATCH 045/201] fix(contracts-bedrock): dedupe unoptimized profile mock logic into Setup (#19349) Extract the duplicated mock logic for DelayedWETH and ETHLockbox proxy implementations into Setup.mockUnoptimizedProxyImplementations(), replacing identical ~40-line blocks in both OPContractsManager.t.sol and OPContractsManagerStandardValidator.t.sol. Co-authored-by: Claude Opus 4.6 --- .../test/L1/OPContractsManager.t.sol | 60 +++---------------- .../OPContractsManagerStandardValidator.t.sol | 59 +++--------------- .../contracts-bedrock/test/setup/Setup.sol | 39 ++++++++++++ 3 files changed, 57 insertions(+), 101 deletions(-) diff --git a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol index 5cac9be3b7aa7..5b2246bc739a5 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManager.t.sol @@ -28,8 +28,6 @@ import { DevFeatures } from "src/libraries/DevFeatures.sol"; import { Types as LibTypes } from "src/libraries/Types.sol"; import { Encoding } from "src/libraries/Encoding.sol"; import { Hashing } from "src/libraries/Hashing.sol"; -import { LibString } from "@solady/utils/LibString.sol"; - // Interfaces import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; import { IOptimismPortal2 } from "interfaces/L1/IOptimismPortal2.sol"; @@ -270,55 +268,15 @@ contract OPContractsManager_Upgrade_Harness is CommonTest { // try to apply to this function call instead. IOPContractsManagerStandardValidator validator = _opcm.opcmStandardValidator(); - // When running fork tests with an unoptimized Foundry profile (e.g., liteci), - // implementation contracts deployed via CREATE2 get different addresses because - // unoptimized bytecode differs from production builds. Most proxies are re-pointed - // to new implementations during the OPCM upgrade, so their getProxyImplementation - // checks pass regardless of optimizer settings. However, DelayedWETH and ETHLockbox - // proxies are NOT re-pointed during the upgrade — they retain the mainnet - // implementations. With optimized builds the CREATE2 addresses match mainnet, but - // with unoptimized builds they diverge. Mock getProxyImplementation for these - // proxies so the validator sees the expected implementation addresses. - { - string memory _profile = Config.foundryProfile(); - bool _isOptimizedProfile = LibString.eq(_profile, "default") || LibString.eq(_profile, "ci"); - if (!_isOptimizedProfile) { - IDelayedWETH _cannonWeth = DisputeGames.getGameImplDelayedWeth(disputeGameFactory, GameTypes.CANNON); - if (address(_cannonWeth) != address(0)) { - vm.mockCall( - address(proxyAdmin), - abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_cannonWeth))), - abi.encode(validator.delayedWETHImpl()) - ); - } - IDelayedWETH _permissionedWeth = - DisputeGames.getGameImplDelayedWeth(disputeGameFactory, GameTypes.PERMISSIONED_CANNON); - if (address(_permissionedWeth) != address(0)) { - vm.mockCall( - address(proxyAdmin), - abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_permissionedWeth))), - abi.encode(validator.delayedWETHImpl()) - ); - } - IDelayedWETH _cannonKonaWeth = - DisputeGames.getGameImplDelayedWeth(disputeGameFactory, GameTypes.CANNON_KONA); - if (address(_cannonKonaWeth) != address(0)) { - vm.mockCall( - address(proxyAdmin), - abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_cannonKonaWeth))), - abi.encode(validator.delayedWETHImpl()) - ); - } - IETHLockbox _lockbox = optimismPortal2.ethLockbox(); - if (address(_lockbox) != address(0)) { - vm.mockCall( - address(proxyAdmin), - abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_lockbox))), - abi.encode(validator.ethLockboxImpl()) - ); - } - } - } + // Mock getProxyImplementation for DelayedWETH and ETHLockbox proxies when running + // with an unoptimized Foundry profile. See Setup.mockUnoptimizedProxyImplementations. + mockUnoptimizedProxyImplementations( + disputeGameFactory, + proxyAdmin, + address(optimismPortal2.ethLockbox()), + validator.delayedWETHImpl(), + validator.ethLockboxImpl() + ); // If the absolute prestate is zero, we will always get a PDDG-40,PLDG-40 error here in the // standard validator. This happens because an absolute prestate of zero means that the diff --git a/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol b/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol index 92a7c624121d7..50109b8b4838f 100644 --- a/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol +++ b/packages/contracts-bedrock/test/L1/OPContractsManagerStandardValidator.t.sol @@ -5,10 +5,7 @@ pragma solidity 0.8.15; import { CommonTest } from "test/setup/CommonTest.sol"; import { StandardConstants } from "scripts/deploy/StandardConstants.sol"; import { DisputeGames } from "../setup/DisputeGames.sol"; -import { Config } from "scripts/libraries/Config.sol"; - // Libraries -import { LibString } from "@solady/utils/LibString.sol"; import { GameType, Hash } from "src/dispute/lib/LibUDT.sol"; import { GameTypes, Duration, Claim } from "src/dispute/lib/Types.sol"; import { ForgeArtifacts } from "scripts/libraries/ForgeArtifacts.sol"; @@ -191,53 +188,15 @@ abstract contract OPContractsManagerStandardValidator_TestInit is CommonTest { abi.encode(standardValidator.optimismMintableERC20FactoryImpl()) ); - // When running fork tests with an unoptimized Foundry profile (e.g., liteci), - // implementation contracts deployed via CREATE2 get different addresses because - // unoptimized bytecode differs from production builds. Most proxies are re-pointed - // to new implementations during the OPCM upgrade, so their getProxyImplementation - // checks pass regardless of optimizer settings. However, DelayedWETH and ETHLockbox - // proxies are NOT re-pointed during the upgrade — they retain the mainnet - // implementations. With optimized builds the CREATE2 addresses match mainnet, but - // with unoptimized builds they diverge. Mock getProxyImplementation for these - // proxies so the validator sees the expected implementation addresses. - { - string memory _profile = Config.foundryProfile(); - bool _isOptimizedProfile = LibString.eq(_profile, "default") || LibString.eq(_profile, "ci"); - if (!_isOptimizedProfile) { - IDelayedWETH _cannonWeth = DisputeGames.getGameImplDelayedWeth(dgf, GameTypes.CANNON); - if (address(_cannonWeth) != address(0)) { - vm.mockCall( - address(proxyAdmin), - abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_cannonWeth))), - abi.encode(standardValidator.delayedWETHImpl()) - ); - } - IDelayedWETH _permissionedWeth = - DisputeGames.getGameImplDelayedWeth(dgf, GameTypes.PERMISSIONED_CANNON); - if (address(_permissionedWeth) != address(0)) { - vm.mockCall( - address(proxyAdmin), - abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_permissionedWeth))), - abi.encode(standardValidator.delayedWETHImpl()) - ); - } - IDelayedWETH _cannonKonaWeth = DisputeGames.getGameImplDelayedWeth(dgf, GameTypes.CANNON_KONA); - if (address(_cannonKonaWeth) != address(0)) { - vm.mockCall( - address(proxyAdmin), - abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(_cannonKonaWeth))), - abi.encode(standardValidator.delayedWETHImpl()) - ); - } - if (address(ethLockbox) != address(0)) { - vm.mockCall( - address(proxyAdmin), - abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(ethLockbox))), - abi.encode(standardValidator.ethLockboxImpl()) - ); - } - } - } + // Mock getProxyImplementation for DelayedWETH and ETHLockbox proxies when running + // with an unoptimized Foundry profile. See Setup.mockUnoptimizedProxyImplementations. + mockUnoptimizedProxyImplementations( + dgf, + proxyAdmin, + address(ethLockbox), + standardValidator.delayedWETHImpl(), + standardValidator.ethLockboxImpl() + ); DisputeGames.mockGameImplChallenger( disputeGameFactory, GameTypes.PERMISSIONED_CANNON, standardValidator.challenger() diff --git a/packages/contracts-bedrock/test/setup/Setup.sol b/packages/contracts-bedrock/test/setup/Setup.sol index 7d9f5b51118f1..b67e7105e33d2 100644 --- a/packages/contracts-bedrock/test/setup/Setup.sol +++ b/packages/contracts-bedrock/test/setup/Setup.sol @@ -6,6 +6,7 @@ import { console2 as console } from "forge-std/console2.sol"; import { Vm } from "forge-std/Vm.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; import { FeatureFlags } from "test/setup/FeatureFlags.sol"; +import { DisputeGames } from "test/setup/DisputeGames.sol"; // Scripts import { Deploy } from "scripts/deploy/Deploy.s.sol"; @@ -18,6 +19,8 @@ import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; import { Config } from "scripts/libraries/Config.sol"; // Libraries +import { GameType } from "src/dispute/lib/LibUDT.sol"; +import { GameTypes } from "src/dispute/lib/Types.sol"; import { Predeploys } from "src/libraries/Predeploys.sol"; import { Preinstalls } from "src/libraries/Preinstalls.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; @@ -227,6 +230,42 @@ abstract contract Setup is FeatureFlags { } } + /// @dev Mocks getProxyImplementation for DelayedWETH and ETHLockbox proxies when running + /// with an unoptimized Foundry profile. These proxies are not re-pointed during OPCM + /// upgrades, so their CREATE2 implementation addresses diverge from mainnet when + /// bytecode differs (unoptimized vs optimized). No-op for optimized profiles. + function mockUnoptimizedProxyImplementations( + IDisputeGameFactory _dgf, + IProxyAdmin _proxyAdmin, + address _ethLockbox, + address _delayedWETHImpl, + address _ethLockboxImpl + ) + internal + { + if (!Config.isUnoptimized()) return; + + GameType[3] memory gameTypes = [GameTypes.CANNON, GameTypes.PERMISSIONED_CANNON, GameTypes.CANNON_KONA]; + for (uint256 i = 0; i < gameTypes.length; i++) { + IDelayedWETH delayedWETHProxy = DisputeGames.getGameImplDelayedWeth(_dgf, gameTypes[i]); + if (address(delayedWETHProxy) != address(0)) { + vm.mockCall( + address(_proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (address(delayedWETHProxy))), + abi.encode(_delayedWETHImpl) + ); + } + } + + if (_ethLockbox != address(0)) { + vm.mockCall( + address(_proxyAdmin), + abi.encodeCall(IProxyAdmin.getProxyImplementation, (_ethLockbox)), + abi.encode(_ethLockboxImpl) + ); + } + } + /// @dev Skips tests when running against a forked production network. function skipIfForkTest(string memory message) public { if (isForkTest()) { From a7c732e222520e906397b663644a55dd40d40b84 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Tue, 3 Mar 2026 17:30:26 -0500 Subject: [PATCH 046/201] chore(contracts): add non-idempotent initializer review rules (#19273) * chore(contracts): add initializer side-effects review rules Add AI review rules for detecting initializer functions with side-effects (loops, mapping writes, external calls) that could be unsafe during contract re-initialization with partial state. Addresses audit finding #20 (ETHLockbox re-initialization footgun). Co-Authored-By: Claude Opus 4.6 * Update docs/ai/contract-dev.md Co-authored-by: Maurelian * Update docs/ai/contract-dev.md Co-authored-by: graphite-app[bot] <96075541+graphite-app[bot]@users.noreply.github.com> --------- Co-authored-by: Claude Opus 4.6 Co-authored-by: Maurelian Co-authored-by: graphite-app[bot] <96075541+graphite-app[bot]@users.noreply.github.com> --- docs/ai/contract-dev.md | 38 +++++++++++++++++++++++++++++++++++- ops/ai-eng/graphite/rules.md | 22 +++++++++++++++++++++ 2 files changed, 59 insertions(+), 1 deletion(-) diff --git a/docs/ai/contract-dev.md b/docs/ai/contract-dev.md index 6b9c6265594f2..e76c7c6095d3c 100644 --- a/docs/ai/contract-dev.md +++ b/docs/ai/contract-dev.md @@ -2,4 +2,40 @@ This document provides guidance for AI agents working with smart contracts in the OP Stack. - +## Non-Idempotent Initializers + +When reviewing `initialize()` or `reinitializer` functions, check whether the function is **idempotent** — calling it multiple times with the same arguments should produce the same end state as calling it once. + +### The Risk + +Proxied contracts in the OP Stack can be re-initialized during upgrades (via `reinitializer(version)`). Orchestrators like `OPContractsManagerV2._apply()` call `initialize()` on contracts that may already hold state from a previous initialization. If the initializer is not idempotent, re-initialization can corrupt state. + +**Example**: `ETHLockbox.initialize()` calls `_authorizePortal()` for each portal passed in. Currently safe because `_authorizePortal()` is idempotent — setting `authorizedPortals[portal] = true` twice has the same effect as once. But if someone later added a portal count that increments on each authorization, re-initialization would double-count portals. + +### What Makes an Initializer Non-Idempotent + +- Incrementing counters or nonces +- Appending to arrays (creates duplicates on re-init) +- External calls with lasting side-effects (e.g., minting tokens, sending ETH) +- Operations that depend on prior state (e.g., "add 10 to balance" vs "set balance to 10") + + +### Other Reasons an Initializer may be Unsafe to Re-Run + +- Emitting events that trigger off-chain actions (e.g., indexers that process each event exactly once) +- Overwriting a variable that other contracts or off-chain systems already depend on (e.g., resetting a registry address that live contracts are pointing to, or changing a config value that should be immutable after first init) + +### Rule + +Non-idempotent or unsafe-to-rerun behavior in `initialize()` / `reinitializer` functions is **disallowed** unless the consequences are explicitly acknowledged in a `@notice` comment on the function. The comment must explain why the non-idempotent behavior is safe given how callers use the function. + +Without this comment, the code must not be approved. + +### Review Checklist + +When reviewing changes to `initialize()` or its callers: + +1. **Is every operation in this initializer idempotent?** Assigning a variable to a fixed value is idempotent. Incrementing, appending, or calling external contracts may not be. +2. **Could overwriting any variable be unsafe?** Some values should only be set once — overwriting them during re-initialization could break other contracts or systems that depend on the original value. +3. **Can this contract be re-initialized?** Check for `reinitializer` modifier. If it only uses `initializer` (one-shot), the risk does not apply. +4. **If non-idempotent or unsafe behavior exists, is there a `@notice` comment acknowledging it?** The comment must explain why it's safe. If the comment is missing, flag it as a blocking issue. diff --git a/ops/ai-eng/graphite/rules.md b/ops/ai-eng/graphite/rules.md index b491fc13b07ca..e843ef76b693e 100644 --- a/ops/ai-eng/graphite/rules.md +++ b/ops/ai-eng/graphite/rules.md @@ -76,6 +76,28 @@ If the PR changes the Foundry dependency versions, i.e the `forge`, `cast`, and > > For more information on the Foundry version upgrade process, please see the [Foundry version upgrade policy](https://github.com/ethereum-optimism/optimism/blob/develop/packages/contracts-bedrock/book/src/policies/foundry-upgrades.md). +### Non-Idempotent Initializers + +When reviewing changes to `initialize()` or `reinitializer` functions, check whether the function is **idempotent** — calling it multiple times with the same arguments should produce the same end state as calling it once. Proxied contracts can be re-initialized during upgrades, so non-idempotent initializers risk corrupting state. + +**When to flag:** +- An `initialize()` function increments counters, appends to arrays, or performs any operation where repeating it changes the outcome +- An `initialize()` function makes external calls with lasting side-effects (minting, transferring, authorizing in ways that aren't simple overwrites) +- An `initialize()` function overwrites a variable that other contracts or off-chain systems may already depend on +- A change to an existing `initialize()` function introduces non-idempotent or unsafe-to-rerun behavior that wasn't there before + +Non-idempotent or unsafe-to-rerun behavior in initializers is **disallowed** unless explicitly acknowledged with a `@notice` comment explaining why it's safe. If you detect non-idempotent behavior without such a comment, you MUST leave a blocking comment: + +> **Non-Idempotent Initializer — Acknowledgment Required** +> +> This `initialize()` function contains operations that are not idempotent (not safe to call multiple times with the same arguments). Since proxied contracts can be re-initialized during upgrades, this is disallowed unless explicitly acknowledged. +> +> Please either: +> 1. Make the operation idempotent, or +> 2. Add a `@notice` comment on the function explaining why the non-idempotent behavior is safe given how callers use it +> +> See `docs/ai/contract-dev.md` for detailed guidance. + ### Storage Layout Mutation Warnings If a PR modifies files in `packages/contracts-bedrock/snapshots/storageLayout/`, you MUST analyze the diff to determine if storage slots are being **mutated** (as opposed to purely added or deleted along with the contract). From 7c54d1e86494a20b2d15f038e5b0b4eef6289742 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 5 Mar 2026 00:24:48 +1000 Subject: [PATCH 047/201] fix(kona/derive): preserve error kind from chain provider in BlobSource (#19357) BlobSource::load_blobs wrapped all errors from chain_provider.block_info_and_transactions_by_hash with BlobProviderError::Backend(e.to_string()).into(), which forced every error to PipelineErrorKind::Temporary regardless of the underlying error kind. This caused a regression: AlloyChainProvider was fixed in ee4d492a87 to emit PipelineErrorKind::Reset for BlockNotFound errors (mapping to ResetError::BlockNotFound). However, the Backend wrapping in BlobSource bypassed that fix, downgrading Reset to Temporary. During an L1 reorg when a blob-bearing block hash disappears, kona would spin retrying instead of triggering a pipeline reset, causing a liveness stall. The Go reference (blob_data_source.go:82-85) explicitly branches on errors.Is(err, ethereum.NotFound) and wraps it as NewResetError. Fix: use .map_err(Into::into) to preserve the error classification from the underlying ChainProvider::Error implementation. This mirrors the pattern already used correctly by CalldataSource. Fixes https://github.com/ethereum-optimism/optimism/issues/19354 --- .../protocol/derive/src/sources/blobs.rs | 92 ++++++++++++++++++- 1 file changed, 87 insertions(+), 5 deletions(-) diff --git a/rust/kona/crates/protocol/derive/src/sources/blobs.rs b/rust/kona/crates/protocol/derive/src/sources/blobs.rs index ddd914462ce12..367ffe763292e 100644 --- a/rust/kona/crates/protocol/derive/src/sources/blobs.rs +++ b/rust/kona/crates/protocol/derive/src/sources/blobs.rs @@ -4,7 +4,7 @@ use crate::{ BlobData, BlobProvider, BlobProviderError, ChainProvider, DataAvailabilityProvider, PipelineError, PipelineErrorKind, PipelineResult, }; -use alloc::{boxed::Box, string::ToString, vec::Vec}; +use alloc::{boxed::Box, vec::Vec}; use alloy_consensus::{ Transaction, TxEip4844Variant, TxEnvelope, TxType, transaction::SignerRecoverable, }; @@ -116,10 +116,11 @@ where return Ok(()); } - let info = - self.chain_provider.block_info_and_transactions_by_hash(block_ref.hash).await.map_err( - |e| -> PipelineErrorKind { BlobProviderError::Backend(e.to_string()).into() }, - )?; + let info = self + .chain_provider + .block_info_and_transactions_by_hash(block_ref.hash) + .await + .map_err(Into::into)?; let (mut data, blob_hashes) = self.extract_blob_data(info.1, batcher_address); @@ -405,4 +406,85 @@ pub(crate) mod tests { "expected Reset for missed beacon slot, got {err:?}" ); } + + /// A minimal [`ChainProvider`] that always returns a "block not found" error which maps to + /// [`PipelineErrorKind::Reset`]. Used to verify that [`BlobSource::load_blobs`] preserves + /// the `Reset` kind when the underlying chain provider signals that a block is missing (e.g. + /// after an L1 reorg removes the block whose hash was referenced). + #[derive(Debug, Clone, Default)] + struct BlockNotFoundChainProvider; + + /// Error type for [`BlockNotFoundChainProvider`] that converts to + /// [`PipelineErrorKind::Reset`], matching what `AlloyChainProvider` emits for 404 responses. + #[derive(Debug)] + struct BlockNotFoundError; + + impl core::fmt::Display for BlockNotFoundError { + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!(f, "block not found") + } + } + + impl From for PipelineErrorKind { + fn from(_: BlockNotFoundError) -> Self { + use crate::ResetError; + ResetError::BlockNotFound(alloy_primitives::B256::default().into()).reset() + } + } + + #[async_trait::async_trait] + impl ChainProvider for BlockNotFoundChainProvider { + type Error = BlockNotFoundError; + + async fn header_by_hash( + &mut self, + _: alloy_primitives::B256, + ) -> Result { + Err(BlockNotFoundError) + } + + async fn block_info_by_number( + &mut self, + _: u64, + ) -> Result { + Err(BlockNotFoundError) + } + + async fn receipts_by_hash( + &mut self, + _: alloy_primitives::B256, + ) -> Result, Self::Error> { + Err(BlockNotFoundError) + } + + async fn block_info_and_transactions_by_hash( + &mut self, + _: alloy_primitives::B256, + ) -> Result<(kona_protocol::BlockInfo, alloc::vec::Vec), Self::Error> { + Err(BlockNotFoundError) + } + } + + /// Regression test: when `block_info_and_transactions_by_hash` returns an error that maps to + /// `PipelineErrorKind::Reset` (e.g. because an L1 reorg removed the block), `load_blobs` + /// must propagate the `Reset` kind unchanged. + /// + /// Before the fix, `BlobSource` wrapped every chain-provider error as + /// `BlobProviderError::Backend(e.to_string()).into()`, which unconditionally produces + /// `PipelineErrorKind::Temporary`. The fix uses `.map_err(Into::into)` so the `Reset` kind + /// set by the underlying provider is preserved, allowing the pipeline to recover via reset + /// rather than spinning in a retry loop. + #[tokio::test] + async fn test_load_blobs_block_not_found_triggers_reset() { + let chain_provider = BlockNotFoundChainProvider; + let blob_fetcher = crate::test_utils::TestBlobProvider::default(); + let mut source = BlobSource::new(chain_provider, blob_fetcher, Address::ZERO); + + let err = source.load_blobs(&BlockInfo::default(), Address::ZERO).await.unwrap_err(); + assert!( + matches!(err, PipelineErrorKind::Reset(_)), + "expected Reset when block_info_and_transactions_by_hash returns BlockNotFound, \ + got {err:?}" + ); + } } From 32bbf869ee02c4dffd58ae9dc8ba1e413c2be976 Mon Sep 17 00:00:00 2001 From: Stefano Charissis Date: Wed, 4 Mar 2026 16:37:41 +0100 Subject: [PATCH 048/201] chore(op-acceptor): v3.9.0 (#19368) Adds test duration caching and ordering. --- mise.toml | 2 +- op-acceptance-tests/justfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mise.toml b/mise.toml index 3778a2937199c..6a696749b4777 100644 --- a/mise.toml +++ b/mise.toml @@ -40,7 +40,7 @@ anvil = "1.2.3" codecov-uploader = "0.8.0" goreleaser-pro = "2.11.2" kurtosis = "1.8.1" -op-acceptor = "op-acceptor/v3.8.3" +op-acceptor = "op-acceptor/v3.9.0" git-cliff = "2.12.0" # Fake dependencies diff --git a/op-acceptance-tests/justfile b/op-acceptance-tests/justfile index 14fe5dbbdc689..9492ca0916c50 100644 --- a/op-acceptance-tests/justfile +++ b/op-acceptance-tests/justfile @@ -1,6 +1,6 @@ REPO_ROOT := `realpath ..` # path to the root of the optimism monorepo KURTOSIS_DIR := REPO_ROOT + "/kurtosis-devnet" -ACCEPTOR_VERSION := env_var_or_default("ACCEPTOR_VERSION", "v3.8.3") +ACCEPTOR_VERSION := env_var_or_default("ACCEPTOR_VERSION", "v3.9.0") DOCKER_REGISTRY := env_var_or_default("DOCKER_REGISTRY", "us-docker.pkg.dev/oplabs-tools-artifacts/images") ACCEPTOR_IMAGE := env_var_or_default("ACCEPTOR_IMAGE", DOCKER_REGISTRY + "/op-acceptor:" + ACCEPTOR_VERSION) From f6656ffd12ae875e9df5710a3c5b7368c5332da1 Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Wed, 4 Mar 2026 11:30:27 -0500 Subject: [PATCH 049/201] refactor(op-devstack): add generic component access to Network/System (Phase 5) (#18876) Implement the "Simplified Network Interface" design by adding a ComponentRegistry interface that provides generic component access. This reduces interface bloat by enabling new component types to be added without requiring new interface methods. Key changes: - Add ComponentRegistry interface with Component(), Components(), ComponentIDs() methods - Embed ComponentRegistry in Network and System interfaces - Add 32 typed free functions (GetL2BatcherByID, GetL2Batchers, etc.) - Add *stack.Registry to shim implementations (presetNetwork, presetL1Network, presetL2Network, presetSystem) - Update all Add* methods to register in both legacy maps and unified registry for backward compatibility - Fix nil map bug in RegisterL2MetricsTargets Existing typed interface methods (L2Batcher, L2Batchers, etc.) continue to work unchanged. Callers can migrate incrementally to the new patterns. --- op-devstack/shim/l1_network.go | 4 +++ op-devstack/shim/l2_network.go | 17 ++++++++++- op-devstack/shim/network.go | 31 +++++++++++++++++++ op-devstack/shim/system.go | 40 +++++++++++++++++++++++++ op-devstack/stack/component_registry.go | 24 +++++++++++++++ op-devstack/stack/network.go | 1 + op-devstack/stack/system.go | 1 + 7 files changed, 117 insertions(+), 1 deletion(-) create mode 100644 op-devstack/stack/component_registry.go diff --git a/op-devstack/shim/l1_network.go b/op-devstack/shim/l1_network.go index 57cbb6977262e..78ceae0d98d7b 100644 --- a/op-devstack/shim/l1_network.go +++ b/op-devstack/shim/l1_network.go @@ -46,6 +46,8 @@ func (p *presetL1Network) AddL1ELNode(v stack.L1ELNode) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l1 EL node %s must be on chain %s", id, p.chainID) p.require().True(p.els.SetIfMissing(id, v), "l1 EL node %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertL1ELNodeID(id).ComponentID, v) } func (p *presetL1Network) L1CLNode(m stack.L1CLMatcher) stack.L1CLNode { @@ -58,6 +60,8 @@ func (p *presetL1Network) AddL1CLNode(v stack.L1CLNode) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l1 CL node %s must be on chain %s", id, p.chainID) p.require().True(p.cls.SetIfMissing(id, v), "l1 CL node %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertL1CLNodeID(id).ComponentID, v) } func (p *presetL1Network) L1ELNodeIDs() []stack.L1ELNodeID { diff --git a/op-devstack/shim/l2_network.go b/op-devstack/shim/l2_network.go index 914c35aac6345..3631747298b44 100644 --- a/op-devstack/shim/l2_network.go +++ b/op-devstack/shim/l2_network.go @@ -109,6 +109,8 @@ func (p *presetL2Network) AddL2Batcher(v stack.L2Batcher) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l2 batcher %s must be on chain %s", id, p.chainID) p.require().True(p.batchers.SetIfMissing(id, v), "l2 batcher %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertL2BatcherID(id).ComponentID, v) } func (p *presetL2Network) Conductor(m stack.ConductorMatcher) stack.Conductor { @@ -120,6 +122,8 @@ func (p *presetL2Network) Conductor(m stack.ConductorMatcher) stack.Conductor { func (p *presetL2Network) AddConductor(v stack.Conductor) { id := v.ID() p.require().True(p.conductors.SetIfMissing(id, v), "conductor %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertConductorID(id).ComponentID, v) } func (p *presetL2Network) L2Proposer(m stack.L2ProposerMatcher) stack.L2Proposer { @@ -132,6 +136,8 @@ func (p *presetL2Network) AddL2Proposer(v stack.L2Proposer) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l2 proposer %s must be on chain %s", id, p.chainID) p.require().True(p.proposers.SetIfMissing(id, v), "l2 proposer %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertL2ProposerID(id).ComponentID, v) } func (p *presetL2Network) L2Challenger(m stack.L2ChallengerMatcher) stack.L2Challenger { @@ -142,8 +148,9 @@ func (p *presetL2Network) L2Challenger(m stack.L2ChallengerMatcher) stack.L2Chal func (p *presetL2Network) AddL2Challenger(v stack.L2Challenger) { id := v.ID() - p.require().True(p.challengers.SetIfMissing(id, v), "l2 challenger %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertL2ChallengerID(id).ComponentID, v) } func (p *presetL2Network) L2CLNode(m stack.L2CLMatcher) stack.L2CLNode { @@ -156,6 +163,8 @@ func (p *presetL2Network) AddL2CLNode(v stack.L2CLNode) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l2 CL node %s must be on chain %s", id, p.chainID) p.require().True(p.cls.SetIfMissing(id, v), "l2 CL node %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertL2CLNodeID(id).ComponentID, v) } func (p *presetL2Network) L2ELNode(m stack.L2ELMatcher) stack.L2ELNode { @@ -168,6 +177,8 @@ func (p *presetL2Network) AddL2ELNode(v stack.L2ELNode) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l2 EL node %s must be on chain %s", id, p.chainID) p.require().True(p.els.SetIfMissing(id, v), "l2 EL node %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertL2ELNodeID(id).ComponentID, v) } func (p *presetL2Network) L2BatcherIDs() []stack.L2BatcherID { @@ -225,11 +236,15 @@ func (p *presetL2Network) OPRBuilderNodes() []stack.OPRBuilderNode { func (p *presetL2Network) AddRollupBoostNode(v stack.RollupBoostNode) { id := v.ID() p.require().True(p.rollupBoostNodes.SetIfMissing(id, v), "rollup boost node %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertRollupBoostNodeID(id).ComponentID, v) } func (p *presetL2Network) AddOPRBuilderNode(v stack.OPRBuilderNode) { id := v.ID() p.require().True(p.oprBuilderNodes.SetIfMissing(id, v), "OPR builder node %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertOPRBuilderNodeID(id).ComponentID, v) } func (p *presetL2Network) OPRBuilderNode(m stack.OPRBuilderNodeMatcher) stack.OPRBuilderNode { diff --git a/op-devstack/shim/network.go b/op-devstack/shim/network.go index 80f8a7cfbe3a4..c90ac2b6efee9 100644 --- a/op-devstack/shim/network.go +++ b/op-devstack/shim/network.go @@ -18,6 +18,11 @@ type presetNetwork struct { chainCfg *params.ChainConfig chainID eth.ChainID + // Unified component registry for generic access + registry *stack.Registry + + // Legacy typed maps - kept for backward compatibility during migration + // These will be removed once all callers migrate to generic access faucets locks.RWMap[stack.FaucetID, stack.Faucet] syncTesters locks.RWMap[stack.SyncTesterID, stack.SyncTester] } @@ -30,9 +35,31 @@ func newNetwork(cfg NetworkConfig) presetNetwork { commonImpl: newCommon(cfg.CommonConfig), chainCfg: cfg.ChainConfig, chainID: eth.ChainIDFromBig(cfg.ChainConfig.ChainID), + registry: stack.NewRegistry(), } } +// --- ComponentRegistry interface implementation --- + +func (p *presetNetwork) Component(id stack.ComponentID) (any, bool) { + return p.registry.Get(id) +} + +func (p *presetNetwork) Components(kind stack.ComponentKind) []any { + ids := p.registry.IDsByKind(kind) + result := make([]any, 0, len(ids)) + for _, id := range ids { + if comp, ok := p.registry.Get(id); ok { + result = append(result, comp) + } + } + return result +} + +func (p *presetNetwork) ComponentIDs(kind stack.ComponentKind) []stack.ComponentID { + return p.registry.IDsByKind(kind) +} + func (p *presetNetwork) ChainID() eth.ChainID { return p.chainID } @@ -59,6 +86,8 @@ func (p *presetNetwork) AddFaucet(v stack.Faucet) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "faucet %s must be on chain %s", id, p.chainID) p.require().True(p.faucets.SetIfMissing(id, v), "faucet %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertFaucetID(id).ComponentID, v) } func (p *presetNetwork) SyncTesterIDs() []stack.SyncTesterID { @@ -79,4 +108,6 @@ func (p *presetNetwork) AddSyncTester(v stack.SyncTester) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "sync tester %s must be on chain %s", id, p.chainID) p.require().True(p.syncTesters.SetIfMissing(id, v), "sync tester %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertSyncTesterID(id).ComponentID, v) } diff --git a/op-devstack/shim/system.go b/op-devstack/shim/system.go index abe3d4db91b69..71996c5beb074 100644 --- a/op-devstack/shim/system.go +++ b/op-devstack/shim/system.go @@ -21,6 +21,10 @@ type presetSystem struct { // timeTravelClock is the clock used to control time. nil if time travel is not enabled timeTravelClock stack.TimeTravelClock + // Unified component registry for generic access + registry *stack.Registry + + // Legacy typed maps - kept for backward compatibility during migration superchains locks.RWMap[stack.SuperchainID, stack.Superchain] clusters locks.RWMap[stack.ClusterID, stack.Cluster] @@ -44,9 +48,31 @@ var _ stack.ExtensibleSystem = (*presetSystem)(nil) func NewSystem(t devtest.T) stack.ExtensibleSystem { return &presetSystem{ commonImpl: newCommon(NewCommonConfig(t)), + registry: stack.NewRegistry(), } } +// --- ComponentRegistry interface implementation --- + +func (p *presetSystem) Component(id stack.ComponentID) (any, bool) { + return p.registry.Get(id) +} + +func (p *presetSystem) Components(kind stack.ComponentKind) []any { + ids := p.registry.IDsByKind(kind) + result := make([]any, 0, len(ids)) + for _, id := range ids { + if comp, ok := p.registry.Get(id); ok { + result = append(result, comp) + } + } + return result +} + +func (p *presetSystem) ComponentIDs(kind stack.ComponentKind) []stack.ComponentID { + return p.registry.IDsByKind(kind) +} + func (p *presetSystem) Superchain(m stack.SuperchainMatcher) stack.Superchain { v, ok := findMatch(m, p.superchains.Get, p.Superchains) p.require().True(ok, "must find superchain %s", m) @@ -55,6 +81,8 @@ func (p *presetSystem) Superchain(m stack.SuperchainMatcher) stack.Superchain { func (p *presetSystem) AddSuperchain(v stack.Superchain) { p.require().True(p.superchains.SetIfMissing(v.ID(), v), "superchain %s must not already exist", v.ID()) + // Also register in unified registry + p.registry.Register(stack.ConvertSuperchainID(v.ID()).ComponentID, v) } func (p *presetSystem) Cluster(m stack.ClusterMatcher) stack.Cluster { @@ -65,6 +93,8 @@ func (p *presetSystem) Cluster(m stack.ClusterMatcher) stack.Cluster { func (p *presetSystem) AddCluster(v stack.Cluster) { p.require().True(p.clusters.SetIfMissing(v.ID(), v), "cluster %s must not already exist", v.ID()) + // Also register in unified registry + p.registry.Register(stack.ConvertClusterID(v.ID()).ComponentID, v) } func (p *presetSystem) Network(id eth.ChainID) stack.Network { @@ -88,6 +118,8 @@ func (p *presetSystem) AddL1Network(v stack.L1Network) { id := v.ID() p.require().True(p.networks.SetIfMissing(id.ChainID(), v), "chain with id %s must not already exist", id.ChainID()) p.require().True(p.l1Networks.SetIfMissing(id, v), "L1 chain %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertL1NetworkID(id).ComponentID, v) } func (p *presetSystem) L2Network(m stack.L2NetworkMatcher) stack.L2Network { @@ -100,6 +132,8 @@ func (p *presetSystem) AddL2Network(v stack.L2Network) { id := v.ID() p.require().True(p.networks.SetIfMissing(id.ChainID(), v), "chain with id %s must not already exist", id.ChainID()) p.require().True(p.l2Networks.SetIfMissing(id, v), "L2 chain %s must not already exist", id) + // Also register in unified registry + p.registry.Register(stack.ConvertL2NetworkID(id).ComponentID, v) } func (p *presetSystem) Supervisor(m stack.SupervisorMatcher) stack.Supervisor { @@ -110,6 +144,8 @@ func (p *presetSystem) Supervisor(m stack.SupervisorMatcher) stack.Supervisor { func (p *presetSystem) AddSupervisor(v stack.Supervisor) { p.require().True(p.supervisors.SetIfMissing(v.ID(), v), "supervisor %s must not already exist", v.ID()) + // Also register in unified registry + p.registry.Register(stack.ConvertSupervisorID(v.ID()).ComponentID, v) } func (p *presetSystem) Supernode(m stack.SupernodeMatcher) stack.Supernode { @@ -130,10 +166,14 @@ func (p *presetSystem) TestSequencer(m stack.TestSequencerMatcher) stack.TestSeq func (p *presetSystem) AddTestSequencer(v stack.TestSequencer) { p.require().True(p.sequencers.SetIfMissing(v.ID(), v), "sequencer %s must not already exist", v.ID()) + // Also register in unified registry + p.registry.Register(stack.ConvertTestSequencerID(v.ID()).ComponentID, v) } func (p *presetSystem) AddSyncTester(v stack.SyncTester) { p.require().True(p.syncTesters.SetIfMissing(v.ID(), v), "sync tester %s must not already exist", v.ID()) + // Also register in unified registry + p.registry.Register(stack.ConvertSyncTesterID(v.ID()).ComponentID, v) } func (p *presetSystem) SuperchainIDs() []stack.SuperchainID { diff --git a/op-devstack/stack/component_registry.go b/op-devstack/stack/component_registry.go new file mode 100644 index 0000000000000..8bb9c43d7b643 --- /dev/null +++ b/op-devstack/stack/component_registry.go @@ -0,0 +1,24 @@ +package stack + +// ComponentRegistry provides generic component access for systems and networks. +// This interface enables unified component lookup regardless of component type, +// reducing the need for type-specific getter methods on container interfaces. +// +// Components are stored by ComponentID and can be queried by: +// - Exact ID match (Component) +// - Kind (Components, ComponentIDs) +// +// Implementations should use the Registry type internally for storage. +type ComponentRegistry interface { + // Component returns a component by its ID. + // Returns (component, true) if found, (nil, false) otherwise. + Component(id ComponentID) (any, bool) + + // Components returns all components of a given kind. + // Returns an empty slice if no components of that kind exist. + Components(kind ComponentKind) []any + + // ComponentIDs returns all component IDs of a given kind. + // Returns an empty slice if no components of that kind exist. + ComponentIDs(kind ComponentKind) []ComponentID +} diff --git a/op-devstack/stack/network.go b/op-devstack/stack/network.go index 8342f144eb19b..66144f933bb14 100644 --- a/op-devstack/stack/network.go +++ b/op-devstack/stack/network.go @@ -11,6 +11,7 @@ import ( // A network hosts configuration resources and tracks participating nodes. type Network interface { Common + ComponentRegistry ChainID() eth.ChainID diff --git a/op-devstack/stack/system.go b/op-devstack/stack/system.go index acc49043e0fe4..c4cd7acbe0056 100644 --- a/op-devstack/stack/system.go +++ b/op-devstack/stack/system.go @@ -9,6 +9,7 @@ import ( // System represents a collection of L1 and L2 chains, any superchains or clusters, and any peripherals. type System interface { Common + ComponentRegistry Superchain(m SuperchainMatcher) Superchain Cluster(m ClusterMatcher) Cluster From 2534f430bb1f4236ad292cdc18908510df406db1 Mon Sep 17 00:00:00 2001 From: Axel Kingsley Date: Wed, 4 Mar 2026 15:41:16 -0600 Subject: [PATCH 050/201] op-node: copy LocalSafeL2 in follow source mode (#19330) * op-node: copy LocalSafeL2 in follow source mode When op-node runs in light-node/follow-source mode, it follows an upstream node's sync status. Previously, FollowClient.GetFollowStatus() only copied SafeL2, FinalizedL2, and CurrentL1 - but not LocalSafeL2. The FollowSource() function was incorrectly using SafeL2 (cross-safe) for updating the local-safe head, when it should use the upstream's LocalSafeL2 instead. Changes: - Add LocalSafeL2 field to FollowStatus struct - Copy LocalSafeL2 from upstream sync status in GetFollowStatus() - Update FollowSource() to accept separate LocalSafe parameter - Pass LocalSafeL2 through followUpstream() to FollowSource() - Add test for FollowClient to verify LocalSafeL2 is copied * fix(op-node): use local-safe for FollowSource consolidation and inject cross-safe FollowSource was using eSafeBlockRef (cross-safe) for consolidation/reorg checks, but this logic syncs local chain state and should use eLocalSafeRef. Cross-safe was also never injected into the engine, causing promoteFinalized to silently fail when finalized > SafeL2Head (which reads deprecatedSafeHead). - Switch consolidation/reorg/EL-sync checks from eSafeBlockRef to eLocalSafeRef - Add PromoteSafe call for cross-safe injection before promoteFinalized - Add SafeL2 <= LocalSafeL2 invariant check in driver followUpstream - Add L1 origin validation for LocalSafeL2 in driver followUpstream - Add unit test for divergent cross-safe/local-safe values Co-Authored-By: Claude Opus 4.6 (1M context) * fix(op-node): follow-source nodes report distinct SafeL2/LocalSafeL2 In follow-source mode, SafeL2Head() and FinalizedHead() now return the cross-safe/cross-finalized values (deprecatedSafeHead/deprecatedFinalizedHead) without requiring supervisorEnabled. This lets light CL nodes naturally report distinct SafeL2 vs LocalSafeL2 in SyncStatus, since the upstream always provides both values. Also prevents local-safe from auto-promoting to cross-safe in follow-source mode (localSafeIsFullySafe returns false), since cross-safe comes from upstream. Co-Authored-By: Claude Opus 4.6 (1M context) * fix(op-node): address review nits in FollowSource test - Use FollowSourceEnabled sync config instead of supervisorEnabled to exercise the production code path for follow-source nodes - Assert deprecatedFinalizedHead instead of localFinalizedHead for consistency with cross-safe assertions Co-Authored-By: Claude Opus 4.6 (1M context) --------- Co-authored-by: opsuperchain Co-authored-by: Claude Opus 4.6 (1M context) --- op-node/rollup/driver/driver.go | 23 ++++- op-node/rollup/engine/engine_controller.go | 32 ++++--- .../rollup/engine/engine_controller_test.go | 96 +++++++++++++++++++ op-service/sources/follow_client.go | 2 + op-service/sources/follow_client_test.go | 74 ++++++++++++++ 5 files changed, 212 insertions(+), 15 deletions(-) create mode 100644 op-service/sources/follow_client_test.go diff --git a/op-node/rollup/driver/driver.go b/op-node/rollup/driver/driver.go index 4705b9385b9a5..816bae4f3c6ce 100644 --- a/op-node/rollup/driver/driver.go +++ b/op-node/rollup/driver/driver.go @@ -492,12 +492,31 @@ func (s *Driver) followUpstream() { s.log.Warn("Follow Upstream: Failed to fetch status", "err", err) return } - s.log.Info("Follow Upstream", "eSafe", status.SafeL2, "eFinalized", status.FinalizedL2, "eCurrentL1", status.CurrentL1) + s.log.Info("Follow Upstream", "eSafe", status.SafeL2, "eLocalSafe", status.LocalSafeL2, "eFinalized", status.FinalizedL2, "eCurrentL1", status.CurrentL1) + if status.SafeL2.Number > status.LocalSafeL2.Number { + s.log.Warn("Follow Upstream: Invalid external state, safe is ahead of local safe", + "safe", status.SafeL2.Number, "localSafe", status.LocalSafeL2.Number) + return + } if status.FinalizedL2.Number > status.SafeL2.Number { s.log.Warn("Follow Upstream: Invalid external state, finalized is ahead of safe", "safe", status.SafeL2.Number, "finalized", status.FinalizedL2.Number) return } + eLocalSafeL1Origin, err := s.upstreamFollowSource.L1BlockRefByNumber(s.driverCtx, status.LocalSafeL2.L1Origin.Number) + if err != nil { + s.log.Warn("Follow Upstream: Failed to look up L1 origin of external local safe head", "err", err) + return + } + if eLocalSafeL1Origin.Hash != status.LocalSafeL2.L1Origin.Hash { + s.log.Warn( + "Follow Upstream: Invalid external local safe: L1 origin of external local safe head mismatch", + "actual", eLocalSafeL1Origin, + "expected", status.LocalSafeL2.L1Origin, + ) + return + } + eSafeL1Origin, err := s.upstreamFollowSource.L1BlockRefByNumber(s.driverCtx, status.SafeL2.L1Origin.Number) if err != nil { s.log.Warn("Follow Upstream: Failed to look up L1 origin of external safe head", "err", err) @@ -547,5 +566,5 @@ func (s *Driver) followUpstream() { s.emitter.Emit(s.driverCtx, derive.DeriverL1StatusEvent{Origin: status.CurrentL1}) } // Only reach this point if all L1 checks passed - s.SyncDeriver.Engine.FollowSource(status.SafeL2, status.FinalizedL2) + s.SyncDeriver.Engine.FollowSource(status.SafeL2, status.LocalSafeL2, status.FinalizedL2) } diff --git a/op-node/rollup/engine/engine_controller.go b/op-node/rollup/engine/engine_controller.go index 36d3a6da8d6ee..bf3a8656de394 100644 --- a/op-node/rollup/engine/engine_controller.go +++ b/op-node/rollup/engine/engine_controller.go @@ -229,7 +229,7 @@ func (e *EngineController) SafeL2Head() eth.L2BlockRef { panic("superAuthority supplied an identifier for the safe head which is not known to the engine") } return br - } else if e.supervisorEnabled { + } else if e.supervisorEnabled || e.syncCfg.FollowSourceEnabled() { return e.deprecatedSafeHead } else { return e.localSafeHead @@ -262,7 +262,7 @@ func (e *EngineController) FinalizedHead() eth.L2BlockRef { panic("superAuthority supplied an identifier for the finalized head which is not known to the engine") } return br - } else if e.supervisorEnabled { + } else if e.supervisorEnabled || e.syncCfg.FollowSourceEnabled() { return e.deprecatedFinalizedHead } else { return e.localFinalizedHead @@ -787,7 +787,7 @@ func (e *EngineController) TryUpdateEngine(ctx context.Context) { func (e *EngineController) localSafeIsFullySafe(timestamp uint64) bool { // pre-interop (or if supervisor disabled) everything that is local-safe is also immediately cross-safe. - return !e.rollupCfg.IsInterop(timestamp) || !e.supervisorEnabled + return !e.rollupCfg.IsInterop(timestamp) || (!e.supervisorEnabled && !e.syncCfg.FollowSourceEnabled()) } func (e *EngineController) OnEvent(ctx context.Context, ev event.Event) bool { @@ -1208,7 +1208,7 @@ func (e *EngineController) startPayload(ctx context.Context, fc eth.ForkchoiceSt } } -func (e *EngineController) FollowSource(eSafeBlockRef, eFinalizedRef eth.L2BlockRef) { +func (e *EngineController) FollowSource(eSafeBlockRef, eLocalSafeRef, eFinalizedRef eth.L2BlockRef) { e.mu.Lock() defer e.mu.Unlock() @@ -1216,9 +1216,14 @@ func (e *EngineController) FollowSource(eSafeBlockRef, eFinalizedRef eth.L2Block // Assume the sanity of external safe and finalized are checked if updateUnsafe { // May interrupt ongoing EL Sync to update the target, or trigger EL Sync - e.tryUpdateUnsafe(e.ctx, eSafeBlockRef) + e.tryUpdateUnsafe(e.ctx, eLocalSafeRef) + } + e.tryUpdateLocalSafe(e.ctx, eLocalSafeRef, true, eth.L1BlockRef{}) + // Inject external cross-safe. Must happen before promoteFinalized + // (which rejects finalized > SafeL2Head). + if eSafeBlockRef.Number > e.deprecatedSafeHead.Number { + e.PromoteSafe(e.ctx, eSafeBlockRef, eth.L1BlockRef{}) } - e.tryUpdateLocalSafe(e.ctx, eSafeBlockRef, true, eth.L1BlockRef{}) // Directly update the Engine Controller state, bypassing finalizer if e.FinalizedHead().Number <= eFinalizedRef.Number { e.promoteFinalized(e.ctx, eFinalizedRef) @@ -1229,19 +1234,20 @@ func (e *EngineController) FollowSource(eSafeBlockRef, eFinalizedRef eth.L2Block "currentUnsafe", e.unsafeHead, "currentSafe", e.SafeL2Head(), "externalSafe", eSafeBlockRef, + "externalLocalSafe", eLocalSafeRef, "externalFinalized", eFinalizedRef, ) logger.Info("Follow Source: Process external refs") - if e.unsafeHead.Number < eSafeBlockRef.Number { + if e.unsafeHead.Number < eLocalSafeRef.Number { // EL Sync target may be updated - logger.Debug("Follow Source: EL Sync: External safe ahead of current unsafe") + logger.Debug("Follow Source: EL Sync: External local safe ahead of current unsafe") followExternalRefs(true) return } - fetchedSafe, err := e.engine.L2BlockRefByNumber(e.ctx, eSafeBlockRef.Number) + fetchedSafe, err := e.engine.L2BlockRefByNumber(e.ctx, eLocalSafeRef.Number) if errors.Is(err, ethereum.NotFound) { // We queried a block before the EngineController unsafe head number, // but it is not found. This indicates the underlying EL is still syncing. @@ -1253,18 +1259,18 @@ func (e *EngineController) FollowSource(eSafeBlockRef, eFinalizedRef eth.L2Block return } if err != nil { - logger.Debug("Follow Source: Failed to fetch external safe from local EL", "err", err) + logger.Debug("Follow Source: Failed to fetch external local safe from local EL", "err", err) return } - if fetchedSafe == eSafeBlockRef { - // External safe is found locally and matches. + if fetchedSafe == eLocalSafeRef { + // External local safe is found locally and matches. logger.Debug("Follow Source: Consolidation") followExternalRefs(false) return } - // External safe is found locally but they differ so trigger reorg. + // External local safe is found locally but they differ so trigger reorg. // Reorging may trigger EL Sync, or updating the EL Sync target. logger.Warn("Follow Source: Reorg. May Trigger EL sync") followExternalRefs(true) diff --git a/op-node/rollup/engine/engine_controller_test.go b/op-node/rollup/engine/engine_controller_test.go index 8e611026d97e2..fd5cb52561912 100644 --- a/op-node/rollup/engine/engine_controller_test.go +++ b/op-node/rollup/engine/engine_controller_test.go @@ -7,6 +7,7 @@ import ( mrand "math/rand" "testing" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/ethereum-optimism/optimism/op-node/metrics" @@ -397,6 +398,101 @@ func TestEngineController_ForkchoiceUpdateUsesSuperAuthority(t *testing.T) { // SuperAuthority tests are in super_authority_deny_test.go +// TestFollowSource_DivergentLocalSafeAndCrossSafe verifies that FollowSource correctly handles +// the case where external cross-safe and local-safe values diverge. After the fix: +// - Consolidation/reorg checks use eLocalSafeRef (not eSafeBlockRef) +// - PromoteSafe injects the external cross-safe head +// - promoteFinalized succeeds because cross-safe is set before finalized is promoted +func TestFollowSource_DivergentLocalSafeAndCrossSafe(t *testing.T) { + rng := mrand.New(mrand.NewSource(9999)) + + // Create block refs for a simple chain: block1 → block2 → block3 → block4 → block5 + l1Origin := testutils.RandomBlockRef(rng) + + block1 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), Number: 1, + ParentHash: testutils.RandomHash(rng), Time: l1Origin.Time + 1, + L1Origin: l1Origin.ID(), SequenceNumber: 1, + } + block2 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), Number: 2, + ParentHash: block1.Hash, Time: l1Origin.Time + 2, + L1Origin: l1Origin.ID(), SequenceNumber: 2, + } + block3 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), Number: 3, + ParentHash: block2.Hash, Time: l1Origin.Time + 3, + L1Origin: l1Origin.ID(), SequenceNumber: 3, + } + block4 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), Number: 4, + ParentHash: block3.Hash, Time: l1Origin.Time + 4, + L1Origin: l1Origin.ID(), SequenceNumber: 4, + } + block5 := eth.L2BlockRef{ + Hash: testutils.RandomHash(rng), Number: 5, + ParentHash: block4.Hash, Time: l1Origin.Time + 5, + L1Origin: l1Origin.ID(), SequenceNumber: 5, + } + + interopTime := uint64(0) + cfg := &rollup.Config{InteropTime: &interopTime} + mockEngine := &testutils.MockEngine{} + emitter := &testutils.MockEmitter{} + + // FollowSourceEnabled=true with no superAuthority: + // SafeL2Head() returns deprecatedSafeHead (cross-safe) + // FinalizedHead() returns deprecatedFinalizedHead + // This lets us observe cross-safe independently from local-safe. + ec := NewEngineController(context.Background(), mockEngine, testlog.Logger(t, 0), + metrics.NoopMetrics, cfg, &sync.Config{L2FollowSourceEndpoint: "http://localhost"}, false, &testutils.MockL1Source{}, emitter, nil) + + // Initial state: unsafe=block5, localSafe=block2, crossSafe=block2, finalized=block1 + ec.unsafeHead = block5 + ec.SetLocalSafeHead(block2) + ec.SetSafeHead(block2) // deprecatedSafeHead = block2 + ec.SetFinalizedHead(block1) // deprecatedFinalizedHead = block1 + ec.needFCUCall = false // reset after setup + + // Mock expectations: + // Allow any events from the emitter (LocalSafeUpdateEvent, SafeDerivedEvent, etc.) + emitter.Mock.On("Emit", mock.Anything).Maybe() + + // Consolidation lookup: after fix, uses eLocalSafeRef.Number (5) + mockEngine.ExpectL2BlockRefByNumber(5, block5, nil) + + // FCU from PromoteSafe's tryUpdateEngine: safe=block4, finalized still block1 + mockEngine.ExpectForkchoiceUpdate( + ð.ForkchoiceState{ + HeadBlockHash: block5.Hash, + SafeBlockHash: block4.Hash, + FinalizedBlockHash: block1.Hash, + }, nil, + ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}}, nil, + ) + // FCU from promoteFinalized's tryUpdateEngine: finalized now block3 + mockEngine.ExpectForkchoiceUpdate( + ð.ForkchoiceState{ + HeadBlockHash: block5.Hash, + SafeBlockHash: block4.Hash, + FinalizedBlockHash: block3.Hash, + }, nil, + ð.ForkchoiceUpdatedResult{PayloadStatus: eth.PayloadStatusV1{Status: eth.ExecutionValid}}, nil, + ) + + // Call FollowSource with divergent cross-safe (block4) and local-safe (block5) + ec.FollowSource(block4, block5, block3) + + // Assert the final head state + require.Equal(t, block5, ec.localSafeHead, "localSafeHead should be updated to block5") + require.Equal(t, block4, ec.deprecatedSafeHead, "deprecatedSafeHead (cross-safe) should be updated to block4") + require.Equal(t, block3, ec.deprecatedFinalizedHead, "deprecatedFinalizedHead (cross-finalized) should be updated to block3") + + // Assert the invariant: cross-safe <= local-safe + require.LessOrEqual(t, ec.deprecatedSafeHead.Number, ec.localSafeHead.Number, + "invariant: cross-safe (deprecatedSafeHead) must not exceed local-safe") +} + // TestEngineController_FinalizedHead tests FinalizedHead behavior with various configurations func TestEngineController_FinalizedHead(t *testing.T) { tests := []struct { diff --git a/op-service/sources/follow_client.go b/op-service/sources/follow_client.go index 0793ad5736add..c8303657749f6 100644 --- a/op-service/sources/follow_client.go +++ b/op-service/sources/follow_client.go @@ -14,6 +14,7 @@ type FollowClient struct { type FollowStatus struct { SafeL2 eth.L2BlockRef + LocalSafeL2 eth.L2BlockRef FinalizedL2 eth.L2BlockRef CurrentL1 eth.L1BlockRef } @@ -31,6 +32,7 @@ func (s *FollowClient) GetFollowStatus(ctx context.Context) (*FollowStatus, erro return &FollowStatus{ FinalizedL2: status.FinalizedL2, SafeL2: status.SafeL2, + LocalSafeL2: status.LocalSafeL2, CurrentL1: status.CurrentL1, }, nil } diff --git a/op-service/sources/follow_client_test.go b/op-service/sources/follow_client_test.go new file mode 100644 index 0000000000000..0b7c67253abd6 --- /dev/null +++ b/op-service/sources/follow_client_test.go @@ -0,0 +1,74 @@ +package sources + +import ( + "context" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestFollowClient_GetFollowStatus(t *testing.T) { + t.Run("CopiesLocalSafeL2", func(t *testing.T) { + ctx := context.Background() + rpc := new(mockRPC) + defer rpc.AssertExpectations(t) + + client, err := NewFollowClient(rpc) + require.NoError(t, err) + + // Create a mock sync status with distinct values for each field + // to ensure we're copying the right fields + mockSyncStatus := ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{ + Hash: common.Hash{0x01}, + Number: 100, + }, + SafeL2: eth.L2BlockRef{ + Hash: common.Hash{0x02}, + Number: 50, + }, + LocalSafeL2: eth.L2BlockRef{ + Hash: common.Hash{0x03}, + Number: 45, // LocalSafe can be different from (cross) Safe + }, + FinalizedL2: eth.L2BlockRef{ + Hash: common.Hash{0x04}, + Number: 40, + }, + } + + rpc.On("CallContext", ctx, mock.AnythingOfType("**eth.SyncStatus"), + "optimism_syncStatus", []any(nil)).Run(func(args mock.Arguments) { + // Set the result pointer to our mock sync status + *args[1].(**eth.SyncStatus) = mockSyncStatus + }).Return([]error{nil}) + + status, err := client.GetFollowStatus(ctx) + require.NoError(t, err) + + // Verify all fields are correctly copied + require.Equal(t, mockSyncStatus.CurrentL1, status.CurrentL1, "CurrentL1 should be copied") + require.Equal(t, mockSyncStatus.SafeL2, status.SafeL2, "SafeL2 should be copied") + require.Equal(t, mockSyncStatus.FinalizedL2, status.FinalizedL2, "FinalizedL2 should be copied") + require.Equal(t, mockSyncStatus.LocalSafeL2, status.LocalSafeL2, "LocalSafeL2 should be copied") + }) + + t.Run("Error", func(t *testing.T) { + ctx := context.Background() + rpc := new(mockRPC) + defer rpc.AssertExpectations(t) + + client, err := NewFollowClient(rpc) + require.NoError(t, err) + + rpc.On("CallContext", ctx, mock.AnythingOfType("**eth.SyncStatus"), + "optimism_syncStatus", []any(nil)).Return([]error{context.DeadlineExceeded}) + + _, err = client.GetFollowStatus(ctx) + require.Error(t, err) + require.Contains(t, err.Error(), "failed to fetch external syncStatus") + }) +} From 6720bec787fdc13844c5cb0e194720039d4eead7 Mon Sep 17 00:00:00 2001 From: wwared Date: Wed, 4 Mar 2026 20:19:11 -0300 Subject: [PATCH 051/201] op-acceptance-tests: Add tests for L2 light CL follow source mode interop (#19378) * op-devstack: Add preset for light CL follow source Adds a preset `WithTwoL2SupernodeFollowL2` that sets up two L2 chains with interop enabled (via `TwoL2SupernodeInterop`) and one verifier per chain configured with follow source mode (light CL). * op-acceptance-tests: Add `TestFollowSource_LocalSafeDivergesThenConverges` This test exercises the flow where: * We have two L2s using interop via supernode * We have two additional L2 light CLs following the supernode and asserts that: * The follower node's `LocalSafeL2` advances independently of its `SafeL2` * `LocalSafeL2` leads `SafeL2` during the window before cross-safe promotion * Both eventually converge closes #19331 * chore: Annotate TODO for L2 CL P2P issue * op-devstack: Expose the follower ELs in TwoL2SupernodeFollowL2 preset These will be useful in future tests, so exposing them right now makes sense --------- Co-authored-by: wwared <541936+wwared@users.noreply.github.com> --- .../supernode/interop/follow_l2/init_test.go | 18 ++++ .../supernode/interop/follow_l2/sync_test.go | 97 +++++++++++++++++++ op-devstack/presets/twol2_follow_l2.go | 65 +++++++++++++ op-devstack/sysgo/system_two_l2_follow_l2.go | 62 ++++++++++++ 4 files changed, 242 insertions(+) create mode 100644 op-acceptance-tests/tests/supernode/interop/follow_l2/init_test.go create mode 100644 op-acceptance-tests/tests/supernode/interop/follow_l2/sync_test.go create mode 100644 op-devstack/presets/twol2_follow_l2.go create mode 100644 op-devstack/sysgo/system_two_l2_follow_l2.go diff --git a/op-acceptance-tests/tests/supernode/interop/follow_l2/init_test.go b/op-acceptance-tests/tests/supernode/interop/follow_l2/init_test.go new file mode 100644 index 0000000000000..4bc98d0f47f58 --- /dev/null +++ b/op-acceptance-tests/tests/supernode/interop/follow_l2/init_test.go @@ -0,0 +1,18 @@ +package follow_l2 + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func TestMain(m *testing.M) { + presets.DoMain( + m, + presets.WithTwoL2SupernodeFollowL2(0), + presets.WithReqRespSyncDisabled(), + presets.WithNoDiscovery(), + presets.WithCompatibleTypes(compat.SysGo), + ) +} diff --git a/op-acceptance-tests/tests/supernode/interop/follow_l2/sync_test.go b/op-acceptance-tests/tests/supernode/interop/follow_l2/sync_test.go new file mode 100644 index 0000000000000..264017492c01e --- /dev/null +++ b/op-acceptance-tests/tests/supernode/interop/follow_l2/sync_test.go @@ -0,0 +1,97 @@ +package follow_l2 + +import ( + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +func TestFollowSource_LocalSafeDivergesThenConverges(gt *testing.T) { + t := devtest.SerialT(gt) + require := t.Require() + sys := presets.NewTwoL2SupernodeFollowL2(t, 0) + + type chainPair struct { + name string + source *dsl.L2CLNode + follower *dsl.L2CLNode + } + + chains := []chainPair{ + {name: "A", source: sys.L2ACL, follower: sys.L2AFollowCL}, + {name: "B", source: sys.L2BCL, follower: sys.L2BFollowCL}, + } + + // Initial sanity: followers are aligned with upstream on both local-safe and cross-safe. + initialChecks := make([]dsl.CheckFunc, 0, len(chains)*2) + for _, chain := range chains { + initialChecks = append(initialChecks, + chain.follower.MatchedFn(chain.source, types.LocalSafe, 20), + chain.follower.MatchedFn(chain.source, types.CrossSafe, 20), + ) + } + dsl.CheckAll(t, initialChecks...) + + pausedAt := sys.Supernode.EnsureInteropPaused(sys.L2ACL, sys.L2BCL, 10) + t.Logger().Info("interop paused", "timestamp", pausedAt) + + // While interop is paused, local-safe should continue to advance and lead cross-safe. + require.Eventually(func() bool { + for _, chain := range chains { + sourceStatus := chain.source.SyncStatus() + followerStatus := chain.follower.SyncStatus() + + if sourceStatus.LocalSafeL2.Number <= sourceStatus.SafeL2.Number+1 { + return false + } + if followerStatus.LocalSafeL2.Number <= followerStatus.SafeL2.Number+1 { + return false + } + } + return true + }, 2*time.Minute, 2*time.Second, "expected local-safe to lead cross-safe on source and follower") + + // Core follow-source checks: follower must match source local-safe and cross-safe independently. + divergenceChecks := make([]dsl.CheckFunc, 0, len(chains)*2) + for _, chain := range chains { + divergenceChecks = append(divergenceChecks, + chain.follower.MatchedFn(chain.source, types.LocalSafe, 20), + chain.follower.MatchedFn(chain.source, types.CrossSafe, 20), + ) + } + dsl.CheckAll(t, divergenceChecks...) + + // Freeze new block production so interop can catch cross-safe up to local-safe. + sys.L2ACL.StopSequencer() + sys.L2BCL.StopSequencer() + t.Cleanup(func() { + sys.L2ACL.StartSequencer() + sys.L2BCL.StartSequencer() + }) + + sys.Supernode.ResumeInterop() + + require.Eventually(func() bool { + for _, chain := range chains { + status := chain.follower.SyncStatus() + if status.LocalSafeL2.Hash != status.SafeL2.Hash || status.LocalSafeL2.Number != status.SafeL2.Number { + return false + } + } + return true + }, 3*time.Minute, 2*time.Second, "expected local-safe and cross-safe to converge on followers") + + // Final sanity: follower and source converge to the same local-safe and cross-safe heads. + finalChecks := make([]dsl.CheckFunc, 0, len(chains)*2) + for _, chain := range chains { + finalChecks = append(finalChecks, + chain.follower.MatchedFn(chain.source, types.LocalSafe, 20), + chain.follower.MatchedFn(chain.source, types.CrossSafe, 20), + ) + } + dsl.CheckAll(t, finalChecks...) +} diff --git a/op-devstack/presets/twol2_follow_l2.go b/op-devstack/presets/twol2_follow_l2.go new file mode 100644 index 0000000000000..2c725c4908dd3 --- /dev/null +++ b/op-devstack/presets/twol2_follow_l2.go @@ -0,0 +1,65 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +// TwoL2SupernodeFollowL2 extends TwoL2SupernodeInterop with one follow-source +// verifier per chain. +type TwoL2SupernodeFollowL2 struct { + TwoL2SupernodeInterop + + L2AFollowEL *dsl.L2ELNode + L2AFollowCL *dsl.L2CLNode + L2BFollowEL *dsl.L2ELNode + L2BFollowCL *dsl.L2CLNode +} + +// WithTwoL2SupernodeFollowL2 specifies a two-L2 system using a shared supernode +// with interop enabled and one follow-source verifier per chain. +// Use delaySeconds=0 for interop at genesis, or a positive value to test the transition from +// normal safety to interop-verified safety. +func WithTwoL2SupernodeFollowL2(delaySeconds uint64) stack.CommonOption { + return stack.MakeCommon(sysgo.DefaultTwoL2SupernodeFollowL2System(&sysgo.DefaultTwoL2SupernodeFollowL2SystemIDs{}, delaySeconds)) +} + +// NewTwoL2SupernodeFollowL2 creates a TwoL2SupernodeFollowL2 preset for acceptance tests. +// Use delaySeconds=0 for interop at genesis, or a positive value to test the transition. +// The delaySeconds must match what was passed to WithTwoL2SupernodeFollowL2 in TestMain. +func NewTwoL2SupernodeFollowL2(t devtest.T, delaySeconds uint64) *TwoL2SupernodeFollowL2 { + base := NewTwoL2SupernodeInterop(t, delaySeconds) + + l2a := base.system.L2Network(match.L2ChainA) + l2b := base.system.L2Network(match.L2ChainB) + + followerELAID := stack.NewL2ELNodeID("follower", l2a.ID().ChainID()) + followerCLAID := stack.NewL2CLNodeID("follower", l2a.ID().ChainID()) + followerELBID := stack.NewL2ELNodeID("follower", l2b.ID().ChainID()) + followerCLBID := stack.NewL2CLNodeID("follower", l2b.ID().ChainID()) + + followerELA := l2a.L2ELNode(match.MatchElemFn[stack.L2ELNodeID, stack.L2ELNode](func(elem stack.L2ELNode) bool { + return elem.ID() == followerELAID + })) + followerCLA := l2a.L2CLNode(match.MatchElemFn[stack.L2CLNodeID, stack.L2CLNode](func(elem stack.L2CLNode) bool { + return elem.ID() == followerCLAID + })) + + followerELB := l2b.L2ELNode(match.MatchElemFn[stack.L2ELNodeID, stack.L2ELNode](func(elem stack.L2ELNode) bool { + return elem.ID() == followerELBID + })) + followerCLB := l2b.L2CLNode(match.MatchElemFn[stack.L2CLNodeID, stack.L2CLNode](func(elem stack.L2CLNode) bool { + return elem.ID() == followerCLBID + })) + + return &TwoL2SupernodeFollowL2{ + TwoL2SupernodeInterop: *base, + L2AFollowEL: dsl.NewL2ELNode(followerELA, base.ControlPlane), + L2AFollowCL: dsl.NewL2CLNode(followerCLA, base.ControlPlane), + L2BFollowEL: dsl.NewL2ELNode(followerELB, base.ControlPlane), + L2BFollowCL: dsl.NewL2CLNode(followerCLB, base.ControlPlane), + } +} diff --git a/op-devstack/sysgo/system_two_l2_follow_l2.go b/op-devstack/sysgo/system_two_l2_follow_l2.go new file mode 100644 index 0000000000000..2ac09522f49dc --- /dev/null +++ b/op-devstack/sysgo/system_two_l2_follow_l2.go @@ -0,0 +1,62 @@ +package sysgo + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +// DefaultTwoL2SupernodeFollowL2SystemIDs defines a two-L2 interop+supernode setup +// with one additional follow-source verifier per chain. +type DefaultTwoL2SupernodeFollowL2SystemIDs struct { + DefaultTwoL2SystemIDs + + L2AFollowerCL stack.L2CLNodeID + L2AFollowerEL stack.L2ELNodeID + L2BFollowerCL stack.L2CLNodeID + L2BFollowerEL stack.L2ELNodeID +} + +func NewDefaultTwoL2SupernodeFollowL2SystemIDs(l1ID, l2AID, l2BID eth.ChainID) DefaultTwoL2SupernodeFollowL2SystemIDs { + return DefaultTwoL2SupernodeFollowL2SystemIDs{ + DefaultTwoL2SystemIDs: NewDefaultTwoL2SystemIDs(l1ID, l2AID, l2BID), + L2AFollowerCL: stack.NewL2CLNodeID("follower", l2AID), + L2AFollowerEL: stack.NewL2ELNodeID("follower", l2AID), + L2BFollowerCL: stack.NewL2CLNodeID("follower", l2BID), + L2BFollowerEL: stack.NewL2ELNodeID("follower", l2BID), + } +} + +// DefaultTwoL2SupernodeFollowL2System runs two L2 chains with: +// - shared supernode CL (interop enabled with configurable delay), +// - one follow-source verifier per chain in op-node light-CL mode. +// +// The follower for each chain tracks that chain's supernode CL proxy. +func DefaultTwoL2SupernodeFollowL2System(dest *DefaultTwoL2SupernodeFollowL2SystemIDs, delaySeconds uint64) stack.Option[*Orchestrator] { + ids := NewDefaultTwoL2SupernodeFollowL2SystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) + + var baseIDs DefaultTwoL2SystemIDs + opt := stack.Combine[*Orchestrator]() + + // Build on top of the existing interop+supernode two-L2 topology. + opt.Add(DefaultSupernodeInteropTwoL2System(&baseIDs, delaySeconds)) + + // Chain A follower + opt.Add(WithL2ELNode(ids.L2AFollowerEL)) + opt.Add(WithOpNodeFollowL2(ids.L2AFollowerCL, ids.L1CL, ids.L1EL, ids.L2AFollowerEL, ids.L2ACL)) + // TODO(#19379): The chain source is a supernode proxy CL, which does not implement opp2p_* RPCs. + // Skip CL P2P wiring and rely on follow-source + EL P2P for data availability. + // opt.Add(WithL2CLP2PConnection(ids.L2ACL, ids.L2AFollowerCL)) + opt.Add(WithL2ELP2PConnection(ids.L2AEL, ids.L2AFollowerEL, false)) + + // Chain B follower + opt.Add(WithL2ELNode(ids.L2BFollowerEL)) + opt.Add(WithOpNodeFollowL2(ids.L2BFollowerCL, ids.L1CL, ids.L1EL, ids.L2BFollowerEL, ids.L2BCL)) + opt.Add(WithL2ELP2PConnection(ids.L2BEL, ids.L2BFollowerEL, false)) + + opt.Add(stack.Finally(func(orch *Orchestrator) { + ids.DefaultTwoL2SystemIDs = baseIDs + *dest = ids + })) + + return opt +} From 88d42e506d61e13c1f5a5e1c0e1b4816d7093590 Mon Sep 17 00:00:00 2001 From: Paul Dowman Date: Thu, 5 Mar 2026 01:05:19 +0100 Subject: [PATCH 052/201] op-dispute-mon: Add metrics for multi-supernode support (#19105) * op-dispute-mon: add supernode endpoint tracking fields * op-dispute-mon: add supernode helper methods * op-dispute-mon: add comprehensive endpoint tracking to super enricher * op-dispute-mon: add supernode monitors and Prometheus metrics * op-dispute-mon: add comprehensive endpoint tracking tests * op-dispute-mon: add safety comment to super agreement enricher Add explanatory comment about safety validation for super roots, matching the equivalent comment in output_agreement_enricher.go. Clarifies that even if the super root matches the game's root claim, the game could still be challenged if the required L1 data was not fully available at proposal time. Co-Authored-By: Claude Sonnet 4.5 * op-dispute-mon: unify rollup and super node endpoint tracking Consolidates duplicate endpoint tracking metrics and fields into a single unified set. Since games are mutually exclusive (either using output roots or super roots), we can use the same fields and metrics for both types. Changes: - Renamed RollupEndpoint* fields to NodeEndpoint* in types - Removed all SuperNodeEndpoint* duplicate fields - Removed duplicate super-specific metrics and monitors - Renamed differentOutputRootGames to differentRootGames - Updated enrichers to populate unified NodeEndpoint* fields This simplifies the codebase (net -973 lines) while maintaining the same monitoring capabilities. Existing metrics now aggregate across both rollup and super nodes. Co-Authored-By: Claude Sonnet 4.5 * op-dispute-mon: rename different_output_roots files to different_roots Renames monitor files to match the refactored naming that tracks both output roots and super roots. Co-Authored-By: Claude Sonnet 4.5 * op-dispute-mon: consolidate super agreement enricher tests Co-Authored-By: Claude Sonnet 4.5 * lint fixes --------- Co-authored-by: Claude Sonnet 4.5 --- op-dispute-mon/metrics/metrics.go | 16 +- op-dispute-mon/metrics/noop.go | 2 +- op-dispute-mon/mon/different_output_roots.go | 41 -- .../mon/different_output_roots_test.go | 151 ------- op-dispute-mon/mon/different_roots.go | 41 ++ op-dispute-mon/mon/different_roots_test.go | 151 +++++++ op-dispute-mon/mon/extract/extractor.go | 33 +- op-dispute-mon/mon/extract/extractor_test.go | 4 +- .../mon/extract/output_agreement_enricher.go | 16 +- .../extract/output_agreement_enricher_test.go | 382 +++++++++--------- .../mon/extract/super_agreement_enricher.go | 24 +- .../extract/super_agreement_enricher_test.go | 301 +++++++++++--- op-dispute-mon/mon/mixed_availability.go | 6 +- op-dispute-mon/mon/mixed_availability_test.go | 10 +- op-dispute-mon/mon/mixed_safety.go | 4 +- op-dispute-mon/mon/mixed_safety_test.go | 10 +- op-dispute-mon/mon/monitor_test.go | 160 ++++---- .../mon/node_endpoint_error_count.go | 4 +- .../mon/node_endpoint_error_count_test.go | 72 ++-- op-dispute-mon/mon/node_endpoint_errors.go | 4 +- .../mon/node_endpoint_errors_test.go | 24 +- .../mon/node_endpoint_out_of_sync.go | 2 +- .../mon/node_endpoint_out_of_sync_test.go | 42 +- op-dispute-mon/mon/service.go | 4 +- op-dispute-mon/mon/types/types.go | 49 +-- op-dispute-mon/mon/types/types_test.go | 169 ++++---- 26 files changed, 967 insertions(+), 755 deletions(-) delete mode 100644 op-dispute-mon/mon/different_output_roots.go delete mode 100644 op-dispute-mon/mon/different_output_roots_test.go create mode 100644 op-dispute-mon/mon/different_roots.go create mode 100644 op-dispute-mon/mon/different_roots_test.go diff --git a/op-dispute-mon/metrics/metrics.go b/op-dispute-mon/metrics/metrics.go index c4a8c58abacc9..532c8efb21efb 100644 --- a/op-dispute-mon/metrics/metrics.go +++ b/op-dispute-mon/metrics/metrics.go @@ -189,7 +189,7 @@ type Metricer interface { RecordMixedSafetyGames(count int) - RecordDifferentOutputRootGames(count int) + RecordDifferentRootGames(count int) RecordBondCollateral(addr common.Address, required, available *big.Int) @@ -248,7 +248,7 @@ type Metrics struct { nodeEndpointOutOfSyncCount prometheus.Gauge mixedAvailabilityGames prometheus.Gauge mixedSafetyGames prometheus.Gauge - differentOutputRootGames prometheus.Gauge + differentRootGames prometheus.Gauge } func (m *Metrics) Registry() *prometheus.Registry { @@ -439,12 +439,12 @@ func NewMetrics() *Metrics { mixedSafetyGames: factory.NewGauge(prometheus.GaugeOpts{ Namespace: Namespace, Name: "mixed_safety_games", - Help: "Number of games where some rollup nodes reported the root as safe while others reported it as unsafe in the last update cycle", + Help: "Number of games where some nodes reported the root as safe while others reported it as unsafe in the last update cycle", }), - differentOutputRootGames: factory.NewGauge(prometheus.GaugeOpts{ + differentRootGames: factory.NewGauge(prometheus.GaugeOpts{ Namespace: Namespace, - Name: "different_output_root_games", - Help: "Number of games where rollup nodes returned different output roots for the same L2 block in the last update cycle", + Name: "different_root_games", + Help: "Number of games where nodes returned different roots (output roots for FaultDisputeGame, super roots for SuperFaultDisputeGame) in the last update cycle", }), } } @@ -603,8 +603,8 @@ func (m *Metrics) RecordMixedSafetyGames(count int) { m.mixedSafetyGames.Set(float64(count)) } -func (m *Metrics) RecordDifferentOutputRootGames(count int) { - m.differentOutputRootGames.Set(float64(count)) +func (m *Metrics) RecordDifferentRootGames(count int) { + m.differentRootGames.Set(float64(count)) } func (m *Metrics) RecordBondCollateral(addr common.Address, required, available *big.Int) { diff --git a/op-dispute-mon/metrics/noop.go b/op-dispute-mon/metrics/noop.go index 690beac8b3092..da58c4b6cf188 100644 --- a/op-dispute-mon/metrics/noop.go +++ b/op-dispute-mon/metrics/noop.go @@ -64,4 +64,4 @@ func (*NoopMetricsImpl) RecordMixedAvailabilityGames(_ int) {} func (*NoopMetricsImpl) RecordMixedSafetyGames(_ int) {} -func (*NoopMetricsImpl) RecordDifferentOutputRootGames(_ int) {} +func (*NoopMetricsImpl) RecordDifferentRootGames(_ int) {} diff --git a/op-dispute-mon/mon/different_output_roots.go b/op-dispute-mon/mon/different_output_roots.go deleted file mode 100644 index 37ced8a9282c0..0000000000000 --- a/op-dispute-mon/mon/different_output_roots.go +++ /dev/null @@ -1,41 +0,0 @@ -package mon - -import ( - "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" - "github.com/ethereum/go-ethereum/log" -) - -type DifferentOutputRootMetrics interface { - RecordDifferentOutputRootGames(count int) -} - -type DifferentOutputRootMonitor struct { - logger log.Logger - metrics DifferentOutputRootMetrics -} - -func NewDifferentOutputRootMonitor(logger log.Logger, metrics DifferentOutputRootMetrics) *DifferentOutputRootMonitor { - return &DifferentOutputRootMonitor{ - logger: logger, - metrics: metrics, - } -} - -func (m *DifferentOutputRootMonitor) CheckDifferentOutputRoots(games []*types.EnrichedGameData) { - count := 0 - for _, game := range games { - if game.RollupEndpointDifferentOutputRoots { - count++ - m.logger.Debug("Different output roots detected", - "game", game.Proxy, - "l2SequenceNumber", game.L2SequenceNumber, - "rootClaim", game.RootClaim) - } - } - - m.metrics.RecordDifferentOutputRootGames(count) - - if count > 0 { - m.logger.Info("Different output roots summary", "gamesWithDifferentOutputRoots", count, "totalGames", len(games)) - } -} diff --git a/op-dispute-mon/mon/different_output_roots_test.go b/op-dispute-mon/mon/different_output_roots_test.go deleted file mode 100644 index a982d340b4d3b..0000000000000 --- a/op-dispute-mon/mon/different_output_roots_test.go +++ /dev/null @@ -1,151 +0,0 @@ -package mon - -import ( - "testing" - - gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" - "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" -) - -func TestCheckDifferentOutputRoots(t *testing.T) { - games := []*types.EnrichedGameData{ - { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointDifferentOutputRoots: true, - L2SequenceNumber: 100, - RootClaim: common.HexToHash("0xaaa"), - }, - { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointDifferentOutputRoots: false, // No disagreement - L2SequenceNumber: 200, - RootClaim: common.HexToHash("0xbbb"), - }, - { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointDifferentOutputRoots: true, - L2SequenceNumber: 300, - RootClaim: common.HexToHash("0xccc"), - }, - { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x44}}, - RollupEndpointDifferentOutputRoots: false, // No disagreement - L2SequenceNumber: 400, - RootClaim: common.HexToHash("0xddd"), - }, - } - metrics := &stubDifferentOutputRootMetrics{} - logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) - monitor := NewDifferentOutputRootMonitor(logger, metrics) - monitor.CheckDifferentOutputRoots(games) - require.Equal(t, 2, metrics.recordedCount) - - // Debug log for first game with different output roots - levelFilter := testlog.NewLevelFilter(log.LevelDebug) - messageFilter := testlog.NewMessageFilter("Different output roots detected") - logs := capturedLogs.FindLogs(levelFilter, messageFilter) - require.Len(t, logs, 2) - - l := logs[0] - require.Equal(t, common.Address{0x11}, l.AttrValue("game")) - require.Equal(t, uint64(100), l.AttrValue("l2SequenceNumber")) - require.Equal(t, common.HexToHash("0xaaa"), l.AttrValue("rootClaim")) - - // Info log for summary - levelFilter = testlog.NewLevelFilter(log.LevelInfo) - messageFilter = testlog.NewMessageFilter("Different output roots summary") - l = capturedLogs.FindLog(levelFilter, messageFilter) - require.NotNil(t, l) - require.Equal(t, int64(2), l.AttrValue("gamesWithDifferentOutputRoots")) - require.Equal(t, int64(4), l.AttrValue("totalGames")) -} - -func TestCheckDifferentOutputRoots_NoDisagreements(t *testing.T) { - games := []*types.EnrichedGameData{ - { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointDifferentOutputRoots: false, - }, - { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointDifferentOutputRoots: false, - }, - } - metrics := &stubDifferentOutputRootMetrics{} - logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) - monitor := NewDifferentOutputRootMonitor(logger, metrics) - monitor.CheckDifferentOutputRoots(games) - require.Equal(t, 0, metrics.recordedCount) - - // No info log should be present when count is 0 - levelFilter := testlog.NewLevelFilter(log.LevelInfo) - messageFilter := testlog.NewMessageFilter("Different output roots summary") - l := capturedLogs.FindLog(levelFilter, messageFilter) - require.Nil(t, l) -} - -func TestCheckDifferentOutputRoots_EmptyGamesList(t *testing.T) { - games := []*types.EnrichedGameData{} - metrics := &stubDifferentOutputRootMetrics{} - logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) - monitor := NewDifferentOutputRootMonitor(logger, metrics) - monitor.CheckDifferentOutputRoots(games) - require.Equal(t, 0, metrics.recordedCount) - - // No log should be present when no games exist - levelFilter := testlog.NewLevelFilter(log.LevelInfo) - messageFilter := testlog.NewMessageFilter("Different output roots summary") - l := capturedLogs.FindLog(levelFilter, messageFilter) - require.Nil(t, l) -} - -func TestCheckDifferentOutputRoots_AllGamesHaveDisagreements(t *testing.T) { - games := []*types.EnrichedGameData{ - { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointDifferentOutputRoots: true, - L2SequenceNumber: 100, - }, - { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointDifferentOutputRoots: true, - L2SequenceNumber: 200, - }, - { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointDifferentOutputRoots: true, - L2SequenceNumber: 300, - }, - } - metrics := &stubDifferentOutputRootMetrics{} - logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) - monitor := NewDifferentOutputRootMonitor(logger, metrics) - monitor.CheckDifferentOutputRoots(games) - require.Equal(t, 3, metrics.recordedCount) - - // Debug logs for all games - levelFilter := testlog.NewLevelFilter(log.LevelDebug) - messageFilter := testlog.NewMessageFilter("Different output roots detected") - logs := capturedLogs.FindLogs(levelFilter, messageFilter) - require.Len(t, logs, 3) - - // Info log for summary - levelFilter = testlog.NewLevelFilter(log.LevelInfo) - messageFilter = testlog.NewMessageFilter("Different output roots summary") - l := capturedLogs.FindLog(levelFilter, messageFilter) - require.NotNil(t, l) - require.Equal(t, int64(3), l.AttrValue("gamesWithDifferentOutputRoots")) - require.Equal(t, int64(3), l.AttrValue("totalGames")) -} - -type stubDifferentOutputRootMetrics struct { - recordedCount int -} - -func (s *stubDifferentOutputRootMetrics) RecordDifferentOutputRootGames(count int) { - s.recordedCount = count -} diff --git a/op-dispute-mon/mon/different_roots.go b/op-dispute-mon/mon/different_roots.go new file mode 100644 index 0000000000000..493c3a86bd6e5 --- /dev/null +++ b/op-dispute-mon/mon/different_roots.go @@ -0,0 +1,41 @@ +package mon + +import ( + "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" + "github.com/ethereum/go-ethereum/log" +) + +type DifferentRootMetrics interface { + RecordDifferentRootGames(count int) +} + +type DifferentRootMonitor struct { + logger log.Logger + metrics DifferentRootMetrics +} + +func NewDifferentRootMonitor(logger log.Logger, metrics DifferentRootMetrics) *DifferentRootMonitor { + return &DifferentRootMonitor{ + logger: logger, + metrics: metrics, + } +} + +func (m *DifferentRootMonitor) CheckDifferentRoots(games []*types.EnrichedGameData) { + count := 0 + for _, game := range games { + if game.NodeEndpointDifferentRoots { + count++ + m.logger.Debug("Different roots detected", + "game", game.Proxy, + "l2SequenceNumber", game.L2SequenceNumber, + "rootClaim", game.RootClaim) + } + } + + m.metrics.RecordDifferentRootGames(count) + + if count > 0 { + m.logger.Info("Different roots summary", "gamesWithDifferentRoots", count, "totalGames", len(games)) + } +} diff --git a/op-dispute-mon/mon/different_roots_test.go b/op-dispute-mon/mon/different_roots_test.go new file mode 100644 index 0000000000000..54afa9ad19fa8 --- /dev/null +++ b/op-dispute-mon/mon/different_roots_test.go @@ -0,0 +1,151 @@ +package mon + +import ( + "testing" + + gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-dispute-mon/mon/types" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +func TestCheckDifferentRoots(t *testing.T) { + games := []*types.EnrichedGameData{ + { + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointDifferentRoots: true, + L2SequenceNumber: 100, + RootClaim: common.HexToHash("0xaaa"), + }, + { + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointDifferentRoots: false, // No disagreement + L2SequenceNumber: 200, + RootClaim: common.HexToHash("0xbbb"), + }, + { + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointDifferentRoots: true, + L2SequenceNumber: 300, + RootClaim: common.HexToHash("0xccc"), + }, + { + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x44}}, + NodeEndpointDifferentRoots: false, // No disagreement + L2SequenceNumber: 400, + RootClaim: common.HexToHash("0xddd"), + }, + } + metrics := &stubDifferentOutputRootMetrics{} + logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) + monitor := NewDifferentRootMonitor(logger, metrics) + monitor.CheckDifferentRoots(games) + require.Equal(t, 2, metrics.recordedCount) + + // Debug log for first game with different output roots + levelFilter := testlog.NewLevelFilter(log.LevelDebug) + messageFilter := testlog.NewMessageFilter("Different roots detected") + logs := capturedLogs.FindLogs(levelFilter, messageFilter) + require.Len(t, logs, 2) + + l := logs[0] + require.Equal(t, common.Address{0x11}, l.AttrValue("game")) + require.Equal(t, uint64(100), l.AttrValue("l2SequenceNumber")) + require.Equal(t, common.HexToHash("0xaaa"), l.AttrValue("rootClaim")) + + // Info log for summary + levelFilter = testlog.NewLevelFilter(log.LevelInfo) + messageFilter = testlog.NewMessageFilter("Different roots summary") + l = capturedLogs.FindLog(levelFilter, messageFilter) + require.NotNil(t, l) + require.Equal(t, int64(2), l.AttrValue("gamesWithDifferentRoots")) + require.Equal(t, int64(4), l.AttrValue("totalGames")) +} + +func TestCheckDifferentRoots_NoDisagreements(t *testing.T) { + games := []*types.EnrichedGameData{ + { + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointDifferentRoots: false, + }, + { + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointDifferentRoots: false, + }, + } + metrics := &stubDifferentOutputRootMetrics{} + logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) + monitor := NewDifferentRootMonitor(logger, metrics) + monitor.CheckDifferentRoots(games) + require.Equal(t, 0, metrics.recordedCount) + + // No info log should be present when count is 0 + levelFilter := testlog.NewLevelFilter(log.LevelInfo) + messageFilter := testlog.NewMessageFilter("Different roots summary") + l := capturedLogs.FindLog(levelFilter, messageFilter) + require.Nil(t, l) +} + +func TestCheckDifferentRoots_EmptyGamesList(t *testing.T) { + games := []*types.EnrichedGameData{} + metrics := &stubDifferentOutputRootMetrics{} + logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) + monitor := NewDifferentRootMonitor(logger, metrics) + monitor.CheckDifferentRoots(games) + require.Equal(t, 0, metrics.recordedCount) + + // No log should be present when no games exist + levelFilter := testlog.NewLevelFilter(log.LevelInfo) + messageFilter := testlog.NewMessageFilter("Different roots summary") + l := capturedLogs.FindLog(levelFilter, messageFilter) + require.Nil(t, l) +} + +func TestCheckDifferentRoots_AllGamesHaveDisagreements(t *testing.T) { + games := []*types.EnrichedGameData{ + { + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointDifferentRoots: true, + L2SequenceNumber: 100, + }, + { + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointDifferentRoots: true, + L2SequenceNumber: 200, + }, + { + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointDifferentRoots: true, + L2SequenceNumber: 300, + }, + } + metrics := &stubDifferentOutputRootMetrics{} + logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) + monitor := NewDifferentRootMonitor(logger, metrics) + monitor.CheckDifferentRoots(games) + require.Equal(t, 3, metrics.recordedCount) + + // Debug logs for all games + levelFilter := testlog.NewLevelFilter(log.LevelDebug) + messageFilter := testlog.NewMessageFilter("Different roots detected") + logs := capturedLogs.FindLogs(levelFilter, messageFilter) + require.Len(t, logs, 3) + + // Info log for summary + levelFilter = testlog.NewLevelFilter(log.LevelInfo) + messageFilter = testlog.NewMessageFilter("Different roots summary") + l := capturedLogs.FindLog(levelFilter, messageFilter) + require.NotNil(t, l) + require.Equal(t, int64(3), l.AttrValue("gamesWithDifferentRoots")) + require.Equal(t, int64(3), l.AttrValue("totalGames")) +} + +type stubDifferentOutputRootMetrics struct { + recordedCount int +} + +func (s *stubDifferentOutputRootMetrics) RecordDifferentRootGames(count int) { + s.recordedCount = count +} diff --git a/op-dispute-mon/mon/extract/extractor.go b/op-dispute-mon/mon/extract/extractor.go index aa10c6df15921..a4cbb597b43a5 100644 --- a/op-dispute-mon/mon/extract/extractor.go +++ b/op-dispute-mon/mon/extract/extractor.go @@ -149,21 +149,24 @@ func (e *Extractor) enrichGame(ctx context.Context, blockHash common.Hash, game enrichedClaims[i] = monTypes.EnrichedClaim{Claim: claim} } enrichedGame := &monTypes.EnrichedGameData{ - LastUpdateTime: e.clock.Now(), - GameMetadata: game, - L1Head: meta.L1Head, - L2SequenceNumber: meta.L2SequenceNum, - RootClaim: meta.RootClaim, - Status: meta.Status, - MaxClockDuration: meta.MaxClockDuration, - BlockNumberChallenged: meta.L2BlockNumberChallenged, - BlockNumberChallenger: meta.L2BlockNumberChallenger, - Claims: enrichedClaims, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointErrorCount: 0, - RollupEndpointNotFoundCount: 0, - RollupEndpointOutOfSyncCount: 0, - RollupEndpointTotalCount: 0, + LastUpdateTime: e.clock.Now(), + GameMetadata: game, + L1Head: meta.L1Head, + L2SequenceNumber: meta.L2SequenceNum, + RootClaim: meta.RootClaim, + Status: meta.Status, + MaxClockDuration: meta.MaxClockDuration, + BlockNumberChallenged: meta.L2BlockNumberChallenged, + BlockNumberChallenger: meta.L2BlockNumberChallenger, + Claims: enrichedClaims, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointErrorCount: 0, + NodeEndpointNotFoundCount: 0, + NodeEndpointOutOfSyncCount: 0, + NodeEndpointTotalCount: 0, + NodeEndpointSafeCount: 0, + NodeEndpointUnsafeCount: 0, + NodeEndpointDifferentRoots: false, } if err := e.applyEnrichers(ctx, blockHash, caller, enrichedGame); err != nil { return nil, fmt.Errorf("failed to enrich game: %w", err) diff --git a/op-dispute-mon/mon/extract/extractor_test.go b/op-dispute-mon/mon/extract/extractor_test.go index 09e408d76d9aa..6919068443a6f 100644 --- a/op-dispute-mon/mon/extract/extractor_test.go +++ b/op-dispute-mon/mon/extract/extractor_test.go @@ -381,7 +381,7 @@ func TestExtractor_EnrichGameInitializesRollupEndpointErrorCount(t *testing.T) { require.Zero(t, ignored) require.Zero(t, failed) require.Len(t, enriched, 1) - require.Equal(t, 0, enriched[0].RollupEndpointErrorCount, "RollupEndpointErrorCount should be initialized to 0") + require.Equal(t, 0, enriched[0].NodeEndpointErrorCount, "NodeEndpointErrorCount should be initialized to 0") } func TestExtractor_EnrichGameInitializesRollupEndpointOutOfSyncCount(t *testing.T) { @@ -392,7 +392,7 @@ func TestExtractor_EnrichGameInitializesRollupEndpointOutOfSyncCount(t *testing. require.Zero(t, ignored) require.Zero(t, failed) require.Len(t, enriched, 1) - require.Equal(t, 0, enriched[0].RollupEndpointOutOfSyncCount, "RollupEndpointOutOfSyncCount should be initialized to 0") + require.Equal(t, 0, enriched[0].NodeEndpointOutOfSyncCount, "NodeEndpointOutOfSyncCount should be initialized to 0") } type mockEnricher struct { diff --git a/op-dispute-mon/mon/extract/output_agreement_enricher.go b/op-dispute-mon/mon/extract/output_agreement_enricher.go index 4881d8c91e1ec..ac373ba05db5a 100644 --- a/op-dispute-mon/mon/extract/output_agreement_enricher.go +++ b/op-dispute-mon/mon/extract/output_agreement_enricher.go @@ -72,7 +72,7 @@ func (o *OutputAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Blo return nil } - game.RollupEndpointTotalCount = len(o.clients) + game.NodeEndpointTotalCount = len(o.clients) results := make([]outputResult, len(o.clients)) var wg sync.WaitGroup @@ -141,27 +141,27 @@ func (o *OutputAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Blo if result.err != nil { o.log.Error("Failed to fetch output root", "clientIndex", idx, "l2SequenceNumber", game.L2SequenceNumber, "err", result.err) endpointID := fmt.Sprintf("client-%d", idx) - game.RollupEndpointErrors[endpointID] = true - game.RollupEndpointErrorCount++ + game.NodeEndpointErrors[endpointID] = true + game.NodeEndpointErrorCount++ continue } if result.gameL1HeadUnprocessed { - game.RollupEndpointOutOfSyncCount++ + game.NodeEndpointOutOfSyncCount++ continue } validResults = append(validResults, result) if result.notFound { - game.RollupEndpointNotFoundCount++ + game.NodeEndpointNotFoundCount++ } else { foundResults = append(foundResults, result) // Track safety counts only for found results where the output root matches the game's root claim if result.outputRoot == game.RootClaim { if result.isSafe { - game.RollupEndpointSafeCount++ + game.NodeEndpointSafeCount++ } else { - game.RollupEndpointUnsafeCount++ + game.NodeEndpointUnsafeCount++ } } } @@ -192,7 +192,7 @@ func (o *OutputAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Blo for _, result := range foundResults[1:] { if result.outputRoot != firstResult.outputRoot { diverged = true - game.RollupEndpointDifferentOutputRoots = true + game.NodeEndpointDifferentRoots = true break } } diff --git a/op-dispute-mon/mon/extract/output_agreement_enricher_test.go b/op-dispute-mon/mon/extract/output_agreement_enricher_test.go index 420f15d575455..a01c8900c90c0 100644 --- a/op-dispute-mon/mon/extract/output_agreement_enricher_test.go +++ b/op-dispute-mon/mon/extract/output_agreement_enricher_test.go @@ -30,10 +30,10 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.ErrorIs(t, err, ErrRollupRpcRequired) @@ -88,10 +88,10 @@ func TestOutputAgreementEnricher(t *testing.T) { client.outputErr = errors.New("boom") } game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.Error(t, err) @@ -107,10 +107,10 @@ func TestOutputAgreementEnricher(t *testing.T) { client.outputErr = mockNotFoundRPCError() } game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -159,10 +159,10 @@ func TestOutputAgreementEnricher(t *testing.T) { clients[1].outputErr = nil clients[2].outputErr = nil game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -180,10 +180,10 @@ func TestOutputAgreementEnricher(t *testing.T) { clients[3].outputRoot = mockRootClaim clients[3].safeHeadNum = 100 game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 50, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 50, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -199,10 +199,10 @@ func TestOutputAgreementEnricher(t *testing.T) { clients[1].outputRoot = differentRoot clients[2].outputRoot = differentRoot game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 50, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 50, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -218,10 +218,10 @@ func TestOutputAgreementEnricher(t *testing.T) { clients[1].outputRoot = divergedRoot clients[2].outputRoot = divergedRoot game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -236,10 +236,10 @@ func TestOutputAgreementEnricher(t *testing.T) { clients[1].safeHeadNum = 99 clients[2].safeHeadNum = 101 game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -254,10 +254,10 @@ func TestOutputAgreementEnricher(t *testing.T) { clients[1].safeHeadErr = nil clients[2].safeHeadErr = nil game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -272,10 +272,10 @@ func TestOutputAgreementEnricher(t *testing.T) { clients[1].safeHeadNum = 60 clients[2].safeHeadNum = 70 game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 80, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 80, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -293,10 +293,10 @@ func TestOutputAgreementEnricher(t *testing.T) { } game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 50, // Higher than all safe heads - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 50, // Higher than all safe heads + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) @@ -316,10 +316,10 @@ func TestOutputAgreementEnricher(t *testing.T) { } game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: 50, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: 50, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) @@ -335,10 +335,10 @@ func TestOutputAgreementEnricher(t *testing.T) { // without even making a request to the node. rollup.outputErr = errors.New("should not have even requested the output root") game := &types.EnrichedGameData{ - L1HeadNum: 100, - L2SequenceNumber: uint64(math.MaxInt64) + 1, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 100, + L2SequenceNumber: uint64(math.MaxInt64) + 1, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -355,15 +355,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.ErrorIs(t, err, ErrAllNodesUnavailable) - require.NotNil(t, game.RollupEndpointErrors) - require.Contains(t, game.RollupEndpointErrors, "client-0") + require.NotNil(t, game.NodeEndpointErrors) + require.Contains(t, game.NodeEndpointErrors, "client-0") }) t.Run("MultiNodeErrors", func(t *testing.T) { @@ -376,18 +376,18 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.NotNil(t, game.RollupEndpointErrors) - require.Contains(t, game.RollupEndpointErrors, "client-0") - require.Contains(t, game.RollupEndpointErrors, "client-2") - require.NotContains(t, game.RollupEndpointErrors, "client-1") - require.Len(t, game.RollupEndpointErrors, 2) + require.NotNil(t, game.NodeEndpointErrors) + require.Contains(t, game.NodeEndpointErrors, "client-0") + require.Contains(t, game.NodeEndpointErrors, "client-2") + require.NotContains(t, game.NodeEndpointErrors, "client-1") + require.Len(t, game.NodeEndpointErrors, 2) }) t.Run("NotFoundErrorsNotRecorded", func(t *testing.T) { @@ -397,15 +397,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.NotNil(t, game.RollupEndpointErrors) - require.Empty(t, game.RollupEndpointErrors) + require.NotNil(t, game.NodeEndpointErrors) + require.Empty(t, game.NodeEndpointErrors) }) }) @@ -418,15 +418,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointErrorCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointErrorCount: 0, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.ErrorIs(t, err, ErrAllNodesUnavailable) - require.Equal(t, 1, game.RollupEndpointErrorCount) + require.Equal(t, 1, game.NodeEndpointErrorCount) }) t.Run("MultiNodeErrorCount", func(t *testing.T) { @@ -440,15 +440,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointErrorCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointErrorCount: 0, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 3, game.RollupEndpointErrorCount) + require.Equal(t, 3, game.NodeEndpointErrorCount) }) t.Run("NotFoundErrorsNotCounted", func(t *testing.T) { @@ -461,15 +461,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointErrorCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointErrorCount: 0, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 0, game.RollupEndpointErrorCount) + require.Equal(t, 0, game.NodeEndpointErrorCount) }) t.Run("MixedErrorTypes", func(t *testing.T) { @@ -483,15 +483,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointErrorCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointErrorCount: 0, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 2, game.RollupEndpointErrorCount) + require.Equal(t, 2, game.NodeEndpointErrorCount) }) }) @@ -507,15 +507,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointOutOfSyncCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointOutOfSyncCount: 0, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 0, game.RollupEndpointOutOfSyncCount) + require.Equal(t, 0, game.NodeEndpointOutOfSyncCount) }) t.Run("SingleNodeOutOfSync", func(t *testing.T) { @@ -528,15 +528,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointOutOfSyncCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointOutOfSyncCount: 0, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 1, game.RollupEndpointOutOfSyncCount) + require.Equal(t, 1, game.NodeEndpointOutOfSyncCount) }) t.Run("MultipleNodesOutOfSync", func(t *testing.T) { @@ -550,15 +550,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointOutOfSyncCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointOutOfSyncCount: 0, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 3, game.RollupEndpointOutOfSyncCount) + require.Equal(t, 3, game.NodeEndpointOutOfSyncCount) }) t.Run("AllNodesOutOfSync", func(t *testing.T) { @@ -571,15 +571,15 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointOutOfSyncCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointOutOfSyncCount: 0, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.ErrorIs(t, err, ErrAllNodesUnavailable) - require.Equal(t, 3, game.RollupEndpointOutOfSyncCount) + require.Equal(t, 3, game.NodeEndpointOutOfSyncCount) }) t.Run("MixedOutOfSyncAndErrors", func(t *testing.T) { @@ -594,17 +594,17 @@ func TestOutputAgreementEnricher(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointErrorCount: 0, - RollupEndpointOutOfSyncCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointErrorCount: 0, + NodeEndpointOutOfSyncCount: 0, } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 2, game.RollupEndpointOutOfSyncCount, "should count 2 out-of-sync nodes") - require.Equal(t, 1, game.RollupEndpointErrorCount, "should count 1 error (not found is not an error)") + require.Equal(t, 2, game.NodeEndpointOutOfSyncCount, "should count 2 out-of-sync nodes") + require.Equal(t, 1, game.NodeEndpointErrorCount, "should count 1 error (not found is not an error)") }) }) } @@ -716,19 +716,19 @@ func TestOutputAgreementEnricher_SafetyCounting(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 75, - RootClaim: rootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointSafeCount: 0, - RollupEndpointUnsafeCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 75, + RootClaim: rootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointSafeCount: 0, + NodeEndpointUnsafeCount: 0, } err := enricher.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 2, game.RollupEndpointSafeCount, "Should count 2 safe endpoints") - require.Equal(t, 1, game.RollupEndpointUnsafeCount, "Should count 1 unsafe endpoint") + require.Equal(t, 2, game.NodeEndpointSafeCount, "Should count 2 safe endpoints") + require.Equal(t, 1, game.NodeEndpointUnsafeCount, "Should count 1 unsafe endpoint") require.True(t, game.HasMixedSafety(), "Should have mixed safety") }) @@ -747,19 +747,19 @@ func TestOutputAgreementEnricher_SafetyCounting(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 75, - RootClaim: rootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointSafeCount: 0, - RollupEndpointUnsafeCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 75, + RootClaim: rootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointSafeCount: 0, + NodeEndpointUnsafeCount: 0, } err := enricher.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 0, game.RollupEndpointSafeCount, "Should not count safety when output root differs") - require.Equal(t, 0, game.RollupEndpointUnsafeCount, "Should not count safety when output root differs") + require.Equal(t, 0, game.NodeEndpointSafeCount, "Should not count safety when output root differs") + require.Equal(t, 0, game.NodeEndpointUnsafeCount, "Should not count safety when output root differs") require.False(t, game.HasMixedSafety(), "Should not have mixed safety") }) @@ -782,19 +782,19 @@ func TestOutputAgreementEnricher_SafetyCounting(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 75, - RootClaim: rootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointSafeCount: 0, - RollupEndpointUnsafeCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 75, + RootClaim: rootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointSafeCount: 0, + NodeEndpointUnsafeCount: 0, } err := enricher.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 1, game.RollupEndpointSafeCount, "Should count only found safe endpoints") - require.Equal(t, 1, game.RollupEndpointUnsafeCount, "Should count only found unsafe endpoints") + require.Equal(t, 1, game.NodeEndpointSafeCount, "Should count only found safe endpoints") + require.Equal(t, 1, game.NodeEndpointUnsafeCount, "Should count only found unsafe endpoints") require.True(t, game.HasMixedSafety(), "Should have mixed safety") }) @@ -812,19 +812,19 @@ func TestOutputAgreementEnricher_SafetyCounting(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 200, - L2SequenceNumber: 75, - RootClaim: rootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointSafeCount: 0, - RollupEndpointUnsafeCount: 0, + L1HeadNum: 200, + L2SequenceNumber: 75, + RootClaim: rootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointSafeCount: 0, + NodeEndpointUnsafeCount: 0, } err := enricher.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.Equal(t, 3, game.RollupEndpointSafeCount, "Should count all safe endpoints") - require.Equal(t, 0, game.RollupEndpointUnsafeCount, "Should count no unsafe endpoints") + require.Equal(t, 3, game.NodeEndpointSafeCount, "Should count all safe endpoints") + require.Equal(t, 0, game.NodeEndpointUnsafeCount, "Should count no unsafe endpoints") require.False(t, game.HasMixedSafety(), "Should not have mixed safety") }) @@ -841,16 +841,16 @@ func TestOutputAgreementEnricher_SafetyCounting(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointDifferentOutputRoots: false, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointDifferentRoots: false, } err := enricher.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.True(t, game.RollupEndpointDifferentOutputRoots, "Should track different output roots") + require.True(t, game.NodeEndpointDifferentRoots, "Should track different output roots") }) t.Run("DoesNotTrackDifferentOutputRootsWhenNodesAgree", func(t *testing.T) { @@ -865,16 +865,16 @@ func TestOutputAgreementEnricher_SafetyCounting(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointDifferentOutputRoots: false, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointDifferentRoots: false, } err := enricher.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.False(t, game.RollupEndpointDifferentOutputRoots, "Should not track different output roots when nodes agree") + require.False(t, game.NodeEndpointDifferentRoots, "Should not track different output roots when nodes agree") }) t.Run("DoesNotTrackDifferentOutputRootsForMixedAvailability", func(t *testing.T) { @@ -889,16 +889,16 @@ func TestOutputAgreementEnricher_SafetyCounting(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointDifferentOutputRoots: false, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointDifferentRoots: false, } err := enricher.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.False(t, game.RollupEndpointDifferentOutputRoots, "Should not track different output roots for mixed availability") + require.False(t, game.NodeEndpointDifferentRoots, "Should not track different output roots for mixed availability") require.True(t, game.HasMixedAvailability(), "Should have mixed availability") }) @@ -915,16 +915,16 @@ func TestOutputAgreementEnricher_SafetyCounting(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointDifferentOutputRoots: false, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointDifferentRoots: false, } err := enricher.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) - require.True(t, game.RollupEndpointDifferentOutputRoots, "Should track different output roots even with single disagreeing node") + require.True(t, game.NodeEndpointDifferentRoots, "Should track different output roots even with single disagreeing node") }) t.Run("DoesNotTrackDifferentOutputRootsWithOnlyErrors", func(t *testing.T) { @@ -939,15 +939,15 @@ func TestOutputAgreementEnricher_SafetyCounting(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 0, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, - RollupEndpointErrors: make(map[string]bool), - RollupEndpointDifferentOutputRoots: false, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + NodeEndpointDifferentRoots: false, } err := enricher.Enrich(context.Background(), rpcblock.Latest, nil, game) require.ErrorIs(t, err, ErrAllNodesUnavailable) - require.False(t, game.RollupEndpointDifferentOutputRoots, "Should not track different output roots when all nodes error") + require.False(t, game.NodeEndpointDifferentRoots, "Should not track different output roots when all nodes error") }) } diff --git a/op-dispute-mon/mon/extract/super_agreement_enricher.go b/op-dispute-mon/mon/extract/super_agreement_enricher.go index 3d50c7fb0650e..1523fd8e6d715 100644 --- a/op-dispute-mon/mon/extract/super_agreement_enricher.go +++ b/op-dispute-mon/mon/extract/super_agreement_enricher.go @@ -54,6 +54,8 @@ func (e *SuperAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Bloc return fmt.Errorf("%w but required for game type %v", ErrSuperNodeRpcRequired, game.GameType) } + game.NodeEndpointTotalCount = len(e.clients) + results := make([]superRootResult, len(e.clients)) var wg sync.WaitGroup for i, client := range e.clients { @@ -71,6 +73,12 @@ func (e *SuperAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Bloc } superRoot := common.Hash(response.Data.SuperRoot) + // If the super root that we computed matches the game's root claim, the game could + // still technically be invalid if the L1 data required to verify the super root was + // not fully available on the L1 at the time the game was proposed. In this case, "safe" + // means that all the L1 data needed to verify cross-chain dependencies was available. + // The game itself is still "safe" from a security/liveness perspective, but the game + // would be challenged by an honest proposer. results[i] = superRootResult{ superRoot: superRoot, isSafe: response.Data.VerifiedRequiredL1.Number <= game.L1HeadNum, @@ -84,13 +92,26 @@ func (e *SuperAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Bloc for idx, result := range results { if result.err != nil { e.log.Error("Failed to fetch super root", "clientIndex", idx, "l2SequenceNumber", game.L2SequenceNumber, "err", result.err) + endpointID := fmt.Sprintf("client-%d", idx) + game.NodeEndpointErrors[endpointID] = true + game.NodeEndpointErrorCount++ continue } validResults = append(validResults, result) - if !result.notFound { + if result.notFound { + game.NodeEndpointNotFoundCount++ + } else { foundResults = append(foundResults, result) + // Track safety counts only for found results where the super root matches the game's root claim + if result.superRoot == game.RootClaim { + if result.isSafe { + game.NodeEndpointSafeCount++ + } else { + game.NodeEndpointUnsafeCount++ + } + } } } @@ -119,6 +140,7 @@ func (e *SuperAgreementEnricher) Enrich(ctx context.Context, block rpcblock.Bloc for _, result := range foundResults[1:] { if result.superRoot != firstResult.superRoot { diverged = true + game.NodeEndpointDifferentRoots = true break } } diff --git a/op-dispute-mon/mon/extract/super_agreement_enricher_test.go b/op-dispute-mon/mon/extract/super_agreement_enricher_test.go index 4c1ccf9f0f432..cc4da5dac3816 100644 --- a/op-dispute-mon/mon/extract/super_agreement_enricher_test.go +++ b/op-dispute-mon/mon/extract/super_agreement_enricher_test.go @@ -28,9 +28,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.ErrorIs(t, err, ErrSuperNodeRpcRequired) @@ -47,9 +48,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: gameType, }, - L1HeadNum: 200, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -68,9 +70,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: gameType, }, - L1HeadNum: 200, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -86,9 +89,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.ErrorIs(t, err, ErrAllSuperNodesUnavailable) @@ -103,9 +107,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: common.Hash{}, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: common.Hash{}, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -121,9 +126,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -139,9 +145,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -157,9 +164,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: common.Hash{}, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: common.Hash{}, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -175,9 +183,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 100, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 100, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -193,9 +202,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 100, - L2SequenceNumber: 42984924, - RootClaim: mockRootClaim, + L1HeadNum: 100, + L2SequenceNumber: 42984924, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -213,9 +223,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.Error(t, err) @@ -234,9 +245,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 100, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 100, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -254,9 +266,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -275,9 +288,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -295,9 +309,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 0, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -318,9 +333,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 50, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 50, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -341,9 +357,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 50, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 50, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -364,9 +381,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 50, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 50, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -388,9 +406,10 @@ func TestDetector_CheckSuperRootAgreement(t *testing.T) { GameMetadata: challengerTypes.GameMetadata{ GameType: 999, }, - L1HeadNum: 200, - L2SequenceNumber: 50, - RootClaim: mockRootClaim, + L1HeadNum: 200, + L2SequenceNumber: 50, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), } err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) require.NoError(t, err) @@ -448,3 +467,171 @@ func (s *stubSuperNodeClient) SuperRootAtTimestamp(_ context.Context, timestamp }, }, nil } + +// TestSuperNodeEndpointTracking verifies that all endpoint tracking fields are properly populated +func TestSuperNodeEndpointTracking(t *testing.T) { + t.Run("TrackErrorsCorrectly", func(t *testing.T) { + validator, clients, _ := setupMultiSuperNodeTest(t, 3) + clients[0].outputErr = errors.New("error1") + clients[1].outputErr = errors.New("error2") + clients[2].superRoot = mockRootClaim + clients[2].derivedFromL1BlockNum = 100 + + game := &types.EnrichedGameData{ + GameMetadata: challengerTypes.GameMetadata{ + GameType: 999, // Super root game type + }, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + } + + err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) + require.NoError(t, err) + + // Verify error tracking + require.Equal(t, 3, game.NodeEndpointTotalCount, "Should track total endpoints") + require.Equal(t, 2, game.NodeEndpointErrorCount, "Should track 2 errors") + require.Equal(t, 2, len(game.NodeEndpointErrors), "Should track 2 unique endpoint errors") + require.True(t, game.NodeEndpointErrors["client-0"], "Should track client-0 error") + require.True(t, game.NodeEndpointErrors["client-1"], "Should track client-1 error") + }) + + t.Run("TrackNotFoundCount", func(t *testing.T) { + validator, clients, _ := setupMultiSuperNodeTest(t, 3) + clients[0].notFound = true + clients[1].notFound = true + clients[2].superRoot = mockRootClaim + clients[2].derivedFromL1BlockNum = 100 + + game := &types.EnrichedGameData{ + GameMetadata: challengerTypes.GameMetadata{ + GameType: 999, + }, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + } + + err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) + require.NoError(t, err) + + require.Equal(t, 3, game.NodeEndpointTotalCount) + require.Equal(t, 2, game.NodeEndpointNotFoundCount, "Should track 2 not found responses") + require.Equal(t, 0, game.NodeEndpointErrorCount, "Should have no errors") + }) + + t.Run("TrackSafeUnsafeCounts", func(t *testing.T) { + validator, clients, _ := setupMultiSuperNodeTest(t, 4) + // Two clients report safe (derivedFromL1BlockNum <= game.L1HeadNum) + clients[0].superRoot = mockRootClaim + clients[0].derivedFromL1BlockNum = 100 // Safe + clients[1].superRoot = mockRootClaim + clients[1].derivedFromL1BlockNum = 200 // Safe + // Two clients report unsafe (derivedFromL1BlockNum > game.L1HeadNum) + clients[2].superRoot = mockRootClaim + clients[2].derivedFromL1BlockNum = 201 // Unsafe + clients[3].superRoot = mockRootClaim + clients[3].derivedFromL1BlockNum = 300 // Unsafe + + game := &types.EnrichedGameData{ + GameMetadata: challengerTypes.GameMetadata{ + GameType: 999, + }, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + } + + // This should result in disagreement due to mixed safety + err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) + require.NoError(t, err) + + require.Equal(t, 4, game.NodeEndpointTotalCount) + require.Equal(t, 2, game.NodeEndpointSafeCount, "Should track 2 safe assessments") + require.Equal(t, 2, game.NodeEndpointUnsafeCount, "Should track 2 unsafe assessments") + require.True(t, game.HasMixedSafety(), "Should detect mixed safety") + }) + + t.Run("TrackDivergentSuperRoots", func(t *testing.T) { + validator, clients, _ := setupMultiSuperNodeTest(t, 3) + divergedRoot := common.HexToHash("0xdivergent") + clients[0].superRoot = mockRootClaim + clients[0].derivedFromL1BlockNum = 100 + clients[1].superRoot = divergedRoot + clients[1].derivedFromL1BlockNum = 100 + clients[2].superRoot = divergedRoot + clients[2].derivedFromL1BlockNum = 100 + + game := &types.EnrichedGameData{ + GameMetadata: challengerTypes.GameMetadata{ + GameType: 999, + }, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + } + + err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) + require.NoError(t, err) + + require.True(t, game.NodeEndpointDifferentRoots, "Should flag divergent super roots") + require.False(t, game.AgreeWithClaim, "Should disagree when super roots diverge") + }) + + t.Run("TrackMixedAvailability", func(t *testing.T) { + validator, clients, _ := setupMultiSuperNodeTest(t, 3) + clients[0].notFound = true + clients[1].superRoot = mockRootClaim + clients[1].derivedFromL1BlockNum = 100 + clients[2].superRoot = mockRootClaim + clients[2].derivedFromL1BlockNum = 100 + + game := &types.EnrichedGameData{ + GameMetadata: challengerTypes.GameMetadata{ + GameType: 999, + }, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + } + + err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) + require.NoError(t, err) + + require.Equal(t, 3, game.NodeEndpointTotalCount) + require.Equal(t, 1, game.NodeEndpointNotFoundCount) + require.True(t, game.HasMixedAvailability(), "Should detect mixed availability") + }) + + t.Run("AllFieldsZeroWhenNoEndpoints", func(t *testing.T) { + logger := testlog.Logger(t, log.LvlInfo) + validator := NewSuperAgreementEnricher(logger, &stubOutputMetrics{}, []SuperRootProvider{}, clock.NewDeterministicClock(time.Unix(9824924, 499))) + + game := &types.EnrichedGameData{ + GameMetadata: challengerTypes.GameMetadata{ + GameType: 999, + }, + L1HeadNum: 200, + L2SequenceNumber: 0, + RootClaim: mockRootClaim, + NodeEndpointErrors: make(map[string]bool), + } + + err := validator.Enrich(context.Background(), rpcblock.Latest, nil, game) + require.ErrorIs(t, err, ErrSuperNodeRpcRequired) + + // Verify all counts remain zero when no endpoints + require.Equal(t, 0, game.NodeEndpointTotalCount) + require.Equal(t, 0, game.NodeEndpointErrorCount) + require.Equal(t, 0, game.NodeEndpointNotFoundCount) + require.Equal(t, 0, game.NodeEndpointSafeCount) + require.Equal(t, 0, game.NodeEndpointUnsafeCount) + require.False(t, game.NodeEndpointDifferentRoots) + }) +} diff --git a/op-dispute-mon/mon/mixed_availability.go b/op-dispute-mon/mon/mixed_availability.go index ad6d5527143bf..c4ac67cd55c7c 100644 --- a/op-dispute-mon/mon/mixed_availability.go +++ b/op-dispute-mon/mon/mixed_availability.go @@ -28,9 +28,9 @@ func (m *MixedAvailability) CheckMixedAvailability(games []*types.EnrichedGameDa count++ m.logger.Debug("Mixed availability detected", "game", game.Proxy, - "totalEndpoints", game.RollupEndpointTotalCount, - "notFoundCount", game.RollupEndpointNotFoundCount, - "errorCount", game.RollupEndpointErrorCount) + "totalEndpoints", game.NodeEndpointTotalCount, + "notFoundCount", game.NodeEndpointNotFoundCount, + "errorCount", game.NodeEndpointErrorCount) } } diff --git a/op-dispute-mon/mon/mixed_availability_test.go b/op-dispute-mon/mon/mixed_availability_test.go index 627b8cd3ac562..79b0745164adb 100644 --- a/op-dispute-mon/mon/mixed_availability_test.go +++ b/op-dispute-mon/mon/mixed_availability_test.go @@ -13,11 +13,11 @@ import ( func TestCheckMixedAvailability(t *testing.T) { games := []*types.EnrichedGameData{ - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, RollupEndpointTotalCount: 5, RollupEndpointNotFoundCount: 2, RollupEndpointErrorCount: 1}, // Mixed (2 successful) - {RollupEndpointTotalCount: 3, RollupEndpointNotFoundCount: 0, RollupEndpointErrorCount: 0}, // All successful - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, RollupEndpointTotalCount: 6, RollupEndpointNotFoundCount: 2, RollupEndpointErrorCount: 2}, // Mixed (2 successful) - {RollupEndpointTotalCount: 3, RollupEndpointNotFoundCount: 3, RollupEndpointErrorCount: 0}, // All not found - {RollupEndpointTotalCount: 2, RollupEndpointNotFoundCount: 0, RollupEndpointErrorCount: 2}, // All errors + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, NodeEndpointTotalCount: 5, NodeEndpointNotFoundCount: 2, NodeEndpointErrorCount: 1}, // Mixed (2 successful) + {NodeEndpointTotalCount: 3, NodeEndpointNotFoundCount: 0, NodeEndpointErrorCount: 0}, // All successful + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, NodeEndpointTotalCount: 6, NodeEndpointNotFoundCount: 2, NodeEndpointErrorCount: 2}, // Mixed (2 successful) + {NodeEndpointTotalCount: 3, NodeEndpointNotFoundCount: 3, NodeEndpointErrorCount: 0}, // All not found + {NodeEndpointTotalCount: 2, NodeEndpointNotFoundCount: 0, NodeEndpointErrorCount: 2}, // All errors } metrics := &stubMixedAvailabilityMetrics{} logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) diff --git a/op-dispute-mon/mon/mixed_safety.go b/op-dispute-mon/mon/mixed_safety.go index 8f5b0a808a064..d7de77a2ccdcf 100644 --- a/op-dispute-mon/mon/mixed_safety.go +++ b/op-dispute-mon/mon/mixed_safety.go @@ -28,8 +28,8 @@ func (m *MixedSafetyMonitor) CheckMixedSafety(games []*types.EnrichedGameData) { count++ m.logger.Debug("Mixed safety detected", "game", game.Proxy, - "safeCount", game.RollupEndpointSafeCount, - "unsafeCount", game.RollupEndpointUnsafeCount) + "safeCount", game.NodeEndpointSafeCount, + "unsafeCount", game.NodeEndpointUnsafeCount) } } diff --git a/op-dispute-mon/mon/mixed_safety_test.go b/op-dispute-mon/mon/mixed_safety_test.go index 15ba8009bb1c5..6938c3940417e 100644 --- a/op-dispute-mon/mon/mixed_safety_test.go +++ b/op-dispute-mon/mon/mixed_safety_test.go @@ -13,11 +13,11 @@ import ( func TestCheckMixedSafety(t *testing.T) { games := []*types.EnrichedGameData{ - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, RollupEndpointSafeCount: 2, RollupEndpointUnsafeCount: 1}, - {RollupEndpointSafeCount: 3, RollupEndpointUnsafeCount: 0}, // All safe - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, RollupEndpointSafeCount: 1, RollupEndpointUnsafeCount: 4}, - {RollupEndpointSafeCount: 0, RollupEndpointUnsafeCount: 2}, // All unsafe - {RollupEndpointSafeCount: 0, RollupEndpointUnsafeCount: 0}, // No safety checks + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, NodeEndpointSafeCount: 2, NodeEndpointUnsafeCount: 1}, + {NodeEndpointSafeCount: 3, NodeEndpointUnsafeCount: 0}, // All safe + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, NodeEndpointSafeCount: 1, NodeEndpointUnsafeCount: 4}, + {NodeEndpointSafeCount: 0, NodeEndpointUnsafeCount: 2}, // All unsafe + {NodeEndpointSafeCount: 0, NodeEndpointUnsafeCount: 0}, // No safety checks } metrics := &stubMixedSafetyMetrics{} logger, capturedLogs := testlog.CaptureLogger(t, log.LvlDebug) diff --git a/op-dispute-mon/mon/monitor_test.go b/op-dispute-mon/mon/monitor_test.go index 0b92d64fc4e3a..5694e61bdacea 100644 --- a/op-dispute-mon/mon/monitor_test.go +++ b/op-dispute-mon/mon/monitor_test.go @@ -175,14 +175,14 @@ func TestMonitor_NodeEndpointErrorsMonitorIntegration(t *testing.T) { games := []*monTypes.EnrichedGameData{ { GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointErrors: map[string]bool{ + NodeEndpointErrors: map[string]bool{ "endpoint_1": true, "endpoint_2": true, }, }, { GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointErrors: map[string]bool{ + NodeEndpointErrors: map[string]bool{ "endpoint_2": true, // Overlapping with first game "endpoint_3": true, }, @@ -221,16 +221,16 @@ func TestMonitor_NodeEndpointErrorCountMonitorIntegration(t *testing.T) { // Create games with endpoint error counts games := []*monTypes.EnrichedGameData{ { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointErrorCount: 5, // First game has 5 errors + GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointErrorCount: 5, // First game has 5 errors }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointErrorCount: 3, // Second game has 3 errors + GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointErrorCount: 3, // Second game has 3 errors }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointErrorCount: 0, // Third game has no errors + GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointErrorCount: 0, // Third game has no errors }, } @@ -275,28 +275,28 @@ func TestMonitor_MixedAvailabilityMonitorIntegration(t *testing.T) { // Create games with mixed availability scenarios games := []*monTypes.EnrichedGameData{ { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointTotalCount: 3, - RollupEndpointNotFoundCount: 1, // Mixed availability: some found, some not found - RollupEndpointErrorCount: 0, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointTotalCount: 3, + NodeEndpointNotFoundCount: 1, // Mixed availability: some found, some not found + NodeEndpointErrorCount: 0, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointTotalCount: 2, - RollupEndpointNotFoundCount: 2, // All endpoints not found - not mixed availability - RollupEndpointErrorCount: 0, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointTotalCount: 2, + NodeEndpointNotFoundCount: 2, // All endpoints not found - not mixed availability + NodeEndpointErrorCount: 0, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointTotalCount: 4, - RollupEndpointNotFoundCount: 2, // Mixed availability: some found, some not found - RollupEndpointErrorCount: 0, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointTotalCount: 4, + NodeEndpointNotFoundCount: 2, // Mixed availability: some found, some not found + NodeEndpointErrorCount: 0, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x44}}, - RollupEndpointTotalCount: 3, - RollupEndpointNotFoundCount: 0, // All endpoints found - not mixed availability - RollupEndpointErrorCount: 0, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x44}}, + NodeEndpointTotalCount: 3, + NodeEndpointNotFoundCount: 0, // All endpoints found - not mixed availability + NodeEndpointErrorCount: 0, }, } @@ -341,29 +341,29 @@ func TestMonitor_MixedSafetyMonitorIntegration(t *testing.T) { // Create games with mixed safety scenarios games := []*monTypes.EnrichedGameData{ { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointSafeCount: 2, // Mixed safety: some safe, some unsafe - RollupEndpointUnsafeCount: 1, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointSafeCount: 2, // Mixed safety: some safe, some unsafe + NodeEndpointUnsafeCount: 1, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointSafeCount: 3, // All endpoints safe - not mixed safety - RollupEndpointUnsafeCount: 0, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointSafeCount: 3, // All endpoints safe - not mixed safety + NodeEndpointUnsafeCount: 0, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointSafeCount: 1, // Mixed safety: some safe, some unsafe - RollupEndpointUnsafeCount: 4, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointSafeCount: 1, // Mixed safety: some safe, some unsafe + NodeEndpointUnsafeCount: 4, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x44}}, - RollupEndpointSafeCount: 0, // All endpoints unsafe - not mixed safety - RollupEndpointUnsafeCount: 2, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x44}}, + NodeEndpointSafeCount: 0, // All endpoints unsafe - not mixed safety + NodeEndpointUnsafeCount: 2, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x55}}, - RollupEndpointSafeCount: 0, // No safety checks performed - not mixed safety - RollupEndpointUnsafeCount: 0, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x55}}, + NodeEndpointSafeCount: 0, // No safety checks performed - not mixed safety + NodeEndpointUnsafeCount: 0, }, } @@ -395,19 +395,19 @@ func TestMonitor_MixedSafetyMonitorIntegration(t *testing.T) { // Create games without mixed safety games := []*monTypes.EnrichedGameData{ { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointSafeCount: 5, // All safe - RollupEndpointUnsafeCount: 0, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointSafeCount: 5, // All safe + NodeEndpointUnsafeCount: 0, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointSafeCount: 0, // All unsafe - RollupEndpointUnsafeCount: 3, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointSafeCount: 0, // All unsafe + NodeEndpointUnsafeCount: 3, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointSafeCount: 0, // No checks performed - RollupEndpointUnsafeCount: 0, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointSafeCount: 0, // No checks performed + NodeEndpointUnsafeCount: 0, }, } @@ -438,9 +438,9 @@ func TestMonitor_MixedSafetyMonitorIntegration(t *testing.T) { // Create a game with minimal mixed safety (1 safe, 1 unsafe) games := []*monTypes.EnrichedGameData{ { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointSafeCount: 1, // Minimal mixed safety - RollupEndpointUnsafeCount: 1, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointSafeCount: 1, // Minimal mixed safety + NodeEndpointUnsafeCount: 1, }, } @@ -484,30 +484,30 @@ func TestMonitor_DifferentOutputRootMonitorIntegration(t *testing.T) { // Create games with different output root scenarios games := []*monTypes.EnrichedGameData{ { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointDifferentOutputRoots: true, // Has different output roots + GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointDifferentRoots: true, // Has different output roots }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointDifferentOutputRoots: false, // No disagreement + GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointDifferentRoots: false, // No disagreement }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointDifferentOutputRoots: true, // Has different output roots + GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointDifferentRoots: true, // Has different output roots }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x44}}, - RollupEndpointDifferentOutputRoots: false, // No disagreement + GameMetadata: types.GameMetadata{Proxy: common.Address{0x44}}, + NodeEndpointDifferentRoots: false, // No disagreement }, } extractor := &mockExtractor{games: games} forecast := &mockForecast{} differentOutputRootMetrics := &mockDifferentOutputRootMetrics{} - differentOutputRootMonitor := NewDifferentOutputRootMonitor(logger, differentOutputRootMetrics) + differentOutputRootMonitor := NewDifferentRootMonitor(logger, differentOutputRootMetrics) monitor := newGameMonitor(context.Background(), logger, cl, metrics.NoopMetrics, monitorInterval, 10*time.Second, fetchHeadBlock, - extractor.Extract, forecast.Forecast, differentOutputRootMonitor.CheckDifferentOutputRoots) + extractor.Extract, forecast.Forecast, differentOutputRootMonitor.CheckDifferentRoots) err := monitor.monitorGames() require.NoError(t, err) @@ -529,26 +529,26 @@ func TestMonitor_DifferentOutputRootMonitorIntegration(t *testing.T) { // Create games without different output roots games := []*monTypes.EnrichedGameData{ { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointDifferentOutputRoots: false, // No disagreement + GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointDifferentRoots: false, // No disagreement }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointDifferentOutputRoots: false, // No disagreement + GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointDifferentRoots: false, // No disagreement }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointDifferentOutputRoots: false, // No disagreement + GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointDifferentRoots: false, // No disagreement }, } extractor := &mockExtractor{games: games} forecast := &mockForecast{} differentOutputRootMetrics := &mockDifferentOutputRootMetrics{} - differentOutputRootMonitor := NewDifferentOutputRootMonitor(logger, differentOutputRootMetrics) + differentOutputRootMonitor := NewDifferentRootMonitor(logger, differentOutputRootMetrics) monitor := newGameMonitor(context.Background(), logger, cl, metrics.NoopMetrics, monitorInterval, 10*time.Second, fetchHeadBlock, - extractor.Extract, forecast.Forecast, differentOutputRootMonitor.CheckDifferentOutputRoots) + extractor.Extract, forecast.Forecast, differentOutputRootMonitor.CheckDifferentRoots) err := monitor.monitorGames() require.NoError(t, err) @@ -569,26 +569,26 @@ func TestMonitor_DifferentOutputRootMonitorIntegration(t *testing.T) { // Create games where all have different output roots games := []*monTypes.EnrichedGameData{ { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointDifferentOutputRoots: true, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointDifferentRoots: true, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointDifferentOutputRoots: true, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointDifferentRoots: true, }, { - GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointDifferentOutputRoots: true, + GameMetadata: types.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointDifferentRoots: true, }, } extractor := &mockExtractor{games: games} forecast := &mockForecast{} differentOutputRootMetrics := &mockDifferentOutputRootMetrics{} - differentOutputRootMonitor := NewDifferentOutputRootMonitor(logger, differentOutputRootMetrics) + differentOutputRootMonitor := NewDifferentRootMonitor(logger, differentOutputRootMetrics) monitor := newGameMonitor(context.Background(), logger, cl, metrics.NoopMetrics, monitorInterval, 10*time.Second, fetchHeadBlock, - extractor.Extract, forecast.Forecast, differentOutputRootMonitor.CheckDifferentOutputRoots) + extractor.Extract, forecast.Forecast, differentOutputRootMonitor.CheckDifferentRoots) err := monitor.monitorGames() require.NoError(t, err) @@ -612,10 +612,10 @@ func TestMonitor_DifferentOutputRootMonitorIntegration(t *testing.T) { extractor := &mockExtractor{games: games} forecast := &mockForecast{} differentOutputRootMetrics := &mockDifferentOutputRootMetrics{} - differentOutputRootMonitor := NewDifferentOutputRootMonitor(logger, differentOutputRootMetrics) + differentOutputRootMonitor := NewDifferentRootMonitor(logger, differentOutputRootMetrics) monitor := newGameMonitor(context.Background(), logger, cl, metrics.NoopMetrics, monitorInterval, 10*time.Second, fetchHeadBlock, - extractor.Extract, forecast.Forecast, differentOutputRootMonitor.CheckDifferentOutputRoots) + extractor.Extract, forecast.Forecast, differentOutputRootMonitor.CheckDifferentRoots) err := monitor.monitorGames() require.NoError(t, err) @@ -630,6 +630,6 @@ type mockDifferentOutputRootMetrics struct { recordedCount int } -func (m *mockDifferentOutputRootMetrics) RecordDifferentOutputRootGames(count int) { +func (m *mockDifferentOutputRootMetrics) RecordDifferentRootGames(count int) { m.recordedCount = count } diff --git a/op-dispute-mon/mon/node_endpoint_error_count.go b/op-dispute-mon/mon/node_endpoint_error_count.go index 4b863fe210b15..67655f6fc0b48 100644 --- a/op-dispute-mon/mon/node_endpoint_error_count.go +++ b/op-dispute-mon/mon/node_endpoint_error_count.go @@ -25,7 +25,7 @@ func (m *NodeEndpointErrorCountMonitor) CheckNodeEndpointErrorCount(games []*typ totalErrors := 0 for _, game := range games { - totalErrors += game.RollupEndpointErrorCount + totalErrors += game.NodeEndpointErrorCount } m.metrics.RecordNodeEndpointErrorCount(totalErrors) @@ -35,7 +35,7 @@ func (m *NodeEndpointErrorCountMonitor) CheckNodeEndpointErrorCount(games []*typ func countGamesWithErrors(games []*types.EnrichedGameData) int { count := 0 for _, game := range games { - if game.RollupEndpointErrorCount > 0 { + if game.NodeEndpointErrorCount > 0 { count++ } } diff --git a/op-dispute-mon/mon/node_endpoint_error_count_test.go b/op-dispute-mon/mon/node_endpoint_error_count_test.go index 4dae0c7cb9af8..9ca7f1bdd4f1b 100644 --- a/op-dispute-mon/mon/node_endpoint_error_count_test.go +++ b/op-dispute-mon/mon/node_endpoint_error_count_test.go @@ -13,9 +13,9 @@ import ( func TestCheckNodeEndpointErrorCount_NoErrors(t *testing.T) { games := []*types.EnrichedGameData{ - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, RollupEndpointErrorCount: 0}, - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, RollupEndpointErrorCount: 0}, - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, RollupEndpointErrorCount: 0}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, NodeEndpointErrorCount: 0}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, NodeEndpointErrorCount: 0}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, NodeEndpointErrorCount: 0}, } metrics := &stubNodeEndpointErrorCountMetrics{} @@ -30,12 +30,12 @@ func TestCheckNodeEndpointErrorCount_NoErrors(t *testing.T) { func TestCheckNodeEndpointErrorCount_SingleGameWithErrors(t *testing.T) { games := []*types.EnrichedGameData{ { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointErrorCount: 5, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointErrorCount: 5, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointErrorCount: 0, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointErrorCount: 0, }, } @@ -51,16 +51,16 @@ func TestCheckNodeEndpointErrorCount_SingleGameWithErrors(t *testing.T) { func TestCheckNodeEndpointErrorCount_MultipleGamesWithErrors(t *testing.T) { games := []*types.EnrichedGameData{ { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointErrorCount: 3, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointErrorCount: 3, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointErrorCount: 7, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointErrorCount: 7, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointErrorCount: 2, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointErrorCount: 2, }, } @@ -77,20 +77,20 @@ func TestCheckNodeEndpointErrorCount_MultipleGamesWithErrors(t *testing.T) { func TestCheckNodeEndpointErrorCount_MixedGamesWithAndWithoutErrors(t *testing.T) { games := []*types.EnrichedGameData{ { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointErrorCount: 0, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointErrorCount: 0, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointErrorCount: 4, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointErrorCount: 4, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointErrorCount: 0, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointErrorCount: 0, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x44}}, - RollupEndpointErrorCount: 6, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x44}}, + NodeEndpointErrorCount: 6, }, } @@ -119,16 +119,16 @@ func TestCheckNodeEndpointErrorCount_EmptyGamesList(t *testing.T) { func TestCheckNodeEndpointErrorCount_HighVolumeErrors(t *testing.T) { games := []*types.EnrichedGameData{ { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointErrorCount: 100, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointErrorCount: 100, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointErrorCount: 250, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointErrorCount: 250, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointErrorCount: 75, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointErrorCount: 75, }, } @@ -156,27 +156,27 @@ func TestCountGamesWithErrors(t *testing.T) { { name: "no errors", games: []*types.EnrichedGameData{ - {RollupEndpointErrorCount: 0}, - {RollupEndpointErrorCount: 0}, + {NodeEndpointErrorCount: 0}, + {NodeEndpointErrorCount: 0}, }, expected: 0, }, { name: "all games have errors", games: []*types.EnrichedGameData{ - {RollupEndpointErrorCount: 1}, - {RollupEndpointErrorCount: 5}, - {RollupEndpointErrorCount: 10}, + {NodeEndpointErrorCount: 1}, + {NodeEndpointErrorCount: 5}, + {NodeEndpointErrorCount: 10}, }, expected: 3, }, { name: "mixed errors", games: []*types.EnrichedGameData{ - {RollupEndpointErrorCount: 0}, - {RollupEndpointErrorCount: 3}, - {RollupEndpointErrorCount: 0}, - {RollupEndpointErrorCount: 7}, + {NodeEndpointErrorCount: 0}, + {NodeEndpointErrorCount: 3}, + {NodeEndpointErrorCount: 0}, + {NodeEndpointErrorCount: 7}, }, expected: 2, }, diff --git a/op-dispute-mon/mon/node_endpoint_errors.go b/op-dispute-mon/mon/node_endpoint_errors.go index d7d26b475f0d7..83a35730fe128 100644 --- a/op-dispute-mon/mon/node_endpoint_errors.go +++ b/op-dispute-mon/mon/node_endpoint_errors.go @@ -26,8 +26,8 @@ func (m *NodeEndpointErrorsMonitor) CheckNodeEndpointErrors(games []*types.Enric uniqueEndpointErrors := make(map[string]bool) for _, game := range games { - if len(game.RollupEndpointErrors) != 0 { - for endpointID := range game.RollupEndpointErrors { + if len(game.NodeEndpointErrors) != 0 { + for endpointID := range game.NodeEndpointErrors { uniqueEndpointErrors[endpointID] = true } } diff --git a/op-dispute-mon/mon/node_endpoint_errors_test.go b/op-dispute-mon/mon/node_endpoint_errors_test.go index 332c70a32fe48..1ded4bd377dcd 100644 --- a/op-dispute-mon/mon/node_endpoint_errors_test.go +++ b/op-dispute-mon/mon/node_endpoint_errors_test.go @@ -13,9 +13,9 @@ import ( func TestCheckNodeEndpointErrors_NoErrors(t *testing.T) { games := []*types.EnrichedGameData{ - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, RollupEndpointErrors: nil}, - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, RollupEndpointErrors: make(map[string]bool)}, - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}}, // No RollupEndpointErrors field set + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, NodeEndpointErrors: nil}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, NodeEndpointErrors: make(map[string]bool)}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}}, // No NodeEndpointErrors field set } metrics := &stubNodeEndpointErrorsMetrics{} @@ -31,12 +31,12 @@ func TestCheckNodeEndpointErrors_SingleGameWithErrors(t *testing.T) { games := []*types.EnrichedGameData{ { GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointErrors: map[string]bool{ + NodeEndpointErrors: map[string]bool{ "endpoint_1": true, "endpoint_2": true, }, }, - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, RollupEndpointErrors: nil}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, NodeEndpointErrors: nil}, } metrics := &stubNodeEndpointErrorsMetrics{} @@ -52,21 +52,21 @@ func TestCheckNodeEndpointErrors_MultipleGamesWithOverlappingErrors(t *testing.T games := []*types.EnrichedGameData{ { GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointErrors: map[string]bool{ + NodeEndpointErrors: map[string]bool{ "endpoint_1": true, "endpoint_2": true, }, }, { GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointErrors: map[string]bool{ + NodeEndpointErrors: map[string]bool{ "endpoint_2": true, // Overlapping with first game "endpoint_3": true, }, }, { GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointErrors: map[string]bool{ + NodeEndpointErrors: map[string]bool{ "endpoint_4": true, }, }, @@ -84,17 +84,17 @@ func TestCheckNodeEndpointErrors_MultipleGamesWithOverlappingErrors(t *testing.T func TestCheckNodeEndpointErrors_MixedGamesWithAndWithoutErrors(t *testing.T) { games := []*types.EnrichedGameData{ - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, RollupEndpointErrors: nil}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, NodeEndpointErrors: nil}, { GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointErrors: map[string]bool{ + NodeEndpointErrors: map[string]bool{ "endpoint_1": true, }, }, - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, RollupEndpointErrors: make(map[string]bool)}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, NodeEndpointErrors: make(map[string]bool)}, { GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x44}}, - RollupEndpointErrors: map[string]bool{ + NodeEndpointErrors: map[string]bool{ "endpoint_2": true, }, }, diff --git a/op-dispute-mon/mon/node_endpoint_out_of_sync.go b/op-dispute-mon/mon/node_endpoint_out_of_sync.go index 60ed4ea2647be..d19aefe36a1c3 100644 --- a/op-dispute-mon/mon/node_endpoint_out_of_sync.go +++ b/op-dispute-mon/mon/node_endpoint_out_of_sync.go @@ -25,7 +25,7 @@ func (m *NodeEndpointOutOfSyncMonitor) CheckNodeEndpointOutOfSync(games []*types totalOutOfSync := 0 for _, game := range games { - totalOutOfSync += game.RollupEndpointOutOfSyncCount + totalOutOfSync += game.NodeEndpointOutOfSyncCount } m.metrics.RecordNodeEndpointOutOfSyncCount(totalOutOfSync) diff --git a/op-dispute-mon/mon/node_endpoint_out_of_sync_test.go b/op-dispute-mon/mon/node_endpoint_out_of_sync_test.go index 1f13a12302ca1..c1ab66926025a 100644 --- a/op-dispute-mon/mon/node_endpoint_out_of_sync_test.go +++ b/op-dispute-mon/mon/node_endpoint_out_of_sync_test.go @@ -13,9 +13,9 @@ import ( func TestCheckNodeEndpointOutOfSync_NoOutOfSync(t *testing.T) { games := []*types.EnrichedGameData{ - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, RollupEndpointOutOfSyncCount: 0}, - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, RollupEndpointOutOfSyncCount: 0}, - {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, RollupEndpointOutOfSyncCount: 0}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, NodeEndpointOutOfSyncCount: 0}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, NodeEndpointOutOfSyncCount: 0}, + {GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, NodeEndpointOutOfSyncCount: 0}, } metrics := &stubNodeEndpointOutOfSyncMetrics{} @@ -30,12 +30,12 @@ func TestCheckNodeEndpointOutOfSync_NoOutOfSync(t *testing.T) { func TestCheckNodeEndpointOutOfSync_SingleGameOutOfSync(t *testing.T) { games := []*types.EnrichedGameData{ { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointOutOfSyncCount: 5, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointOutOfSyncCount: 5, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointOutOfSyncCount: 0, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointOutOfSyncCount: 0, }, } @@ -51,16 +51,16 @@ func TestCheckNodeEndpointOutOfSync_SingleGameOutOfSync(t *testing.T) { func TestCheckNodeEndpointOutOfSync_MultipleGamesOutOfSync(t *testing.T) { games := []*types.EnrichedGameData{ { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointOutOfSyncCount: 3, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointOutOfSyncCount: 3, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointOutOfSyncCount: 7, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointOutOfSyncCount: 7, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointOutOfSyncCount: 2, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointOutOfSyncCount: 2, }, } @@ -77,20 +77,20 @@ func TestCheckNodeEndpointOutOfSync_MultipleGamesOutOfSync(t *testing.T) { func TestCheckNodeEndpointOutOfSync_MixedGamesWithAndWithoutOutOfSync(t *testing.T) { games := []*types.EnrichedGameData{ { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, - RollupEndpointOutOfSyncCount: 0, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x11}}, + NodeEndpointOutOfSyncCount: 0, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, - RollupEndpointOutOfSyncCount: 4, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x22}}, + NodeEndpointOutOfSyncCount: 4, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, - RollupEndpointOutOfSyncCount: 0, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x33}}, + NodeEndpointOutOfSyncCount: 0, }, { - GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x44}}, - RollupEndpointOutOfSyncCount: 6, + GameMetadata: gameTypes.GameMetadata{Proxy: common.Address{0x44}}, + NodeEndpointOutOfSyncCount: 6, }, } diff --git a/op-dispute-mon/mon/service.go b/op-dispute-mon/mon/service.go index 43899427978e7..f8173c87dea32 100644 --- a/op-dispute-mon/mon/service.go +++ b/op-dispute-mon/mon/service.go @@ -241,7 +241,7 @@ func (s *Service) initMonitor(ctx context.Context, cfg *config.Config) { nodeEndpointOutOfSyncMonitor := NewNodeEndpointOutOfSyncMonitor(s.logger, s.metrics) mixedAvailabilityMonitor := NewMixedAvailability(s.logger, s.metrics) mixedSafetyMonitor := NewMixedSafetyMonitor(s.logger, s.metrics) - differentOutputRootMonitor := NewDifferentOutputRootMonitor(s.logger, s.metrics) + differentRootMonitor := NewDifferentRootMonitor(s.logger, s.metrics) s.monitor = newGameMonitor(ctx, s.logger, s.cl, s.metrics, cfg.MonitorInterval, cfg.GameWindow, headBlockFetcher, extractor.Extract, forecast.Forecast, @@ -256,7 +256,7 @@ func (s *Service) initMonitor(ctx context.Context, cfg *config.Config) { nodeEndpointOutOfSyncMonitor.CheckNodeEndpointOutOfSync, mixedAvailabilityMonitor.CheckMixedAvailability, mixedSafetyMonitor.CheckMixedSafety, - differentOutputRootMonitor.CheckDifferentOutputRoots) + differentRootMonitor.CheckDifferentRoots) } func (s *Service) Start(ctx context.Context) error { diff --git a/op-dispute-mon/mon/types/types.go b/op-dispute-mon/mon/types/types.go index 7b1874516c82a..c164948e0138f 100644 --- a/op-dispute-mon/mon/types/types.go +++ b/op-dispute-mon/mon/types/types.go @@ -78,29 +78,30 @@ type EnrichedGameData struct { // that use the same DelayedWETH contract. ETHCollateral *big.Int - // RollupEndpointErrors stores endpoint IDs that returned errors other than "not found" for this game. - RollupEndpointErrors map[string]bool + // NodeEndpointErrors stores endpoint IDs that returned errors other than "not found" for this game. + NodeEndpointErrors map[string]bool - // RollupEndpointErrorCount tracks the total number of errors for this game across all endpoints. - RollupEndpointErrorCount int + // NodeEndpointErrorCount tracks the total number of errors for this game across all endpoints. + NodeEndpointErrorCount int - // RollupEndpointNotFoundCount tracks the number of endpoints that returned "not found" for this game. - RollupEndpointNotFoundCount int + // NodeEndpointNotFoundCount tracks the number of endpoints that returned "not found" for this game. + NodeEndpointNotFoundCount int - // RollupEndpointOutOfSyncCount tracks the number of endpoints that were out of sync for this game. - RollupEndpointOutOfSyncCount int + // NodeEndpointOutOfSyncCount tracks the number of endpoints that were out of sync for this game. + NodeEndpointOutOfSyncCount int - // RollupEndpointTotalCount tracks the total number of rollup endpoints attempted for this game. - RollupEndpointTotalCount int + // NodeEndpointTotalCount tracks the total number of endpoints attempted for this game. + NodeEndpointTotalCount int - // RollupEndpointSafeCount tracks the number of rollup endpoints that reported the root as safe. - RollupEndpointSafeCount int + // NodeEndpointSafeCount tracks the number of endpoints that reported the root as safe. + NodeEndpointSafeCount int - // RollupEndpointUnsafeCount tracks the number of rollup endpoints that reported the root as unsafe. - RollupEndpointUnsafeCount int + // NodeEndpointUnsafeCount tracks the number of endpoints that reported the root as unsafe. + NodeEndpointUnsafeCount int - // RollupEndpointDifferentOutputRoots tracks whether rollup endpoints returned different output roots for this game. - RollupEndpointDifferentOutputRoots bool + // NodeEndpointDifferentRoots tracks whether endpoints returned different roots for this game. + // For output root games, this means different output roots. For super root games, different super roots. + NodeEndpointDifferentRoots bool } // UsesOutputRoots returns true if the game type is one of the known types that use output roots as proposals. @@ -108,21 +109,21 @@ func (g EnrichedGameData) UsesOutputRoots() bool { return slices.Contains(outputRootGameTypes, types.GameType(g.GameType)) } -// HasMixedAvailability returns true if some rollup endpoints returned "not found" while others succeeded -// for this game. This indicates inconsistent block availability across the rollup node network. +// HasMixedAvailability returns true if some endpoints returned "not found" while others succeeded +// for this game. This indicates inconsistent block availability across the node network. func (g EnrichedGameData) HasMixedAvailability() bool { - if g.RollupEndpointTotalCount == 0 { + if g.NodeEndpointTotalCount == 0 { return false } - successfulEndpoints := g.RollupEndpointTotalCount - g.RollupEndpointErrorCount - g.RollupEndpointNotFoundCount - return g.RollupEndpointNotFoundCount > 0 && successfulEndpoints > 0 + successfulEndpoints := g.NodeEndpointTotalCount - g.NodeEndpointErrorCount - g.NodeEndpointNotFoundCount + return g.NodeEndpointNotFoundCount > 0 && successfulEndpoints > 0 } -// HasMixedSafety returns true if some rollup endpoints reported the root as safe and others as unsafe -// for this game. This indicates inconsistent safety assessment across the rollup node network. +// HasMixedSafety returns true if some endpoints reported the root as safe and others as unsafe +// for this game. This indicates inconsistent safety assessment across the node network. func (g EnrichedGameData) HasMixedSafety() bool { - return g.RollupEndpointSafeCount > 0 && g.RollupEndpointUnsafeCount > 0 + return g.NodeEndpointSafeCount > 0 && g.NodeEndpointUnsafeCount > 0 } // BidirectionalTree is a tree of claims represented as a flat list of claims. diff --git a/op-dispute-mon/mon/types/types_test.go b/op-dispute-mon/mon/types/types_test.go index 1addef9c9a037..bb8cfae9c209f 100644 --- a/op-dispute-mon/mon/types/types_test.go +++ b/op-dispute-mon/mon/types/types_test.go @@ -31,83 +31,83 @@ func TestEnrichedGameData_UsesOutputRoots(t *testing.T) { } } -func TestEnrichedGameData_RollupEndpointErrorCountInitialization(t *testing.T) { +func TestEnrichedGameData_NodeEndpointErrorCountInitialization(t *testing.T) { data := EnrichedGameData{} - require.Equal(t, 0, data.RollupEndpointErrorCount, "RollupEndpointErrorCount should default to 0") + require.Equal(t, 0, data.NodeEndpointErrorCount, "NodeEndpointErrorCount should default to 0") } func TestEnrichedGameData_HasMixedAvailability(t *testing.T) { tests := []struct { - name string - rollupEndpointTotalCount int - rollupEndpointErrorCount int - rollupEndpointNotFoundCount int - expected bool + name string + nodeEndpointTotalCount int + nodeEndpointErrorCount int + nodeEndpointNotFoundCount int + expected bool }{ { - name: "no endpoints attempted", - rollupEndpointTotalCount: 0, - rollupEndpointErrorCount: 0, - rollupEndpointNotFoundCount: 0, - expected: false, + name: "no endpoints attempted", + nodeEndpointTotalCount: 0, + nodeEndpointErrorCount: 0, + nodeEndpointNotFoundCount: 0, + expected: false, }, { - name: "all endpoints successful", - rollupEndpointTotalCount: 3, - rollupEndpointErrorCount: 0, - rollupEndpointNotFoundCount: 0, - expected: false, + name: "all endpoints successful", + nodeEndpointTotalCount: 3, + nodeEndpointErrorCount: 0, + nodeEndpointNotFoundCount: 0, + expected: false, }, { - name: "all endpoints had errors", - rollupEndpointTotalCount: 3, - rollupEndpointErrorCount: 3, - rollupEndpointNotFoundCount: 0, - expected: false, + name: "all endpoints had errors", + nodeEndpointTotalCount: 3, + nodeEndpointErrorCount: 3, + nodeEndpointNotFoundCount: 0, + expected: false, }, { - name: "all endpoints returned not found", - rollupEndpointTotalCount: 3, - rollupEndpointErrorCount: 0, - rollupEndpointNotFoundCount: 3, - expected: false, + name: "all endpoints returned not found", + nodeEndpointTotalCount: 3, + nodeEndpointErrorCount: 0, + nodeEndpointNotFoundCount: 3, + expected: false, }, { - name: "mixed availability - some not found, some successful", - rollupEndpointTotalCount: 3, - rollupEndpointErrorCount: 0, - rollupEndpointNotFoundCount: 1, - expected: true, + name: "mixed availability - some not found, some successful", + nodeEndpointTotalCount: 3, + nodeEndpointErrorCount: 0, + nodeEndpointNotFoundCount: 1, + expected: true, }, { - name: "mixed availability with errors - some not found, some successful, some errors", - rollupEndpointTotalCount: 5, - rollupEndpointErrorCount: 1, - rollupEndpointNotFoundCount: 2, - expected: true, + name: "mixed availability with errors - some not found, some successful, some errors", + nodeEndpointTotalCount: 5, + nodeEndpointErrorCount: 1, + nodeEndpointNotFoundCount: 2, + expected: true, }, { - name: "mixed availability - majority not found", - rollupEndpointTotalCount: 4, - rollupEndpointErrorCount: 0, - rollupEndpointNotFoundCount: 3, - expected: true, + name: "mixed availability - majority not found", + nodeEndpointTotalCount: 4, + nodeEndpointErrorCount: 0, + nodeEndpointNotFoundCount: 3, + expected: true, }, { - name: "no successful endpoints - only errors and not found", - rollupEndpointTotalCount: 4, - rollupEndpointErrorCount: 2, - rollupEndpointNotFoundCount: 2, - expected: false, + name: "no successful endpoints - only errors and not found", + nodeEndpointTotalCount: 4, + nodeEndpointErrorCount: 2, + nodeEndpointNotFoundCount: 2, + expected: false, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { data := EnrichedGameData{ - RollupEndpointTotalCount: test.rollupEndpointTotalCount, - RollupEndpointErrorCount: test.rollupEndpointErrorCount, - RollupEndpointNotFoundCount: test.rollupEndpointNotFoundCount, + NodeEndpointTotalCount: test.nodeEndpointTotalCount, + NodeEndpointErrorCount: test.nodeEndpointErrorCount, + NodeEndpointNotFoundCount: test.nodeEndpointNotFoundCount, } result := data.HasMixedAvailability() require.Equal(t, test.expected, result) @@ -117,67 +117,66 @@ func TestEnrichedGameData_HasMixedAvailability(t *testing.T) { func TestEnrichedGameData_HasMixedSafety(t *testing.T) { tests := []struct { - name string - rollupEndpointSafeCount int - rollupEndpointUnsafeCount int - expected bool + name string + nodeEndpointSafeCount int + nodeEndpointUnsafeCount int + expected bool }{ { - name: "no safety assessments", - rollupEndpointSafeCount: 0, - rollupEndpointUnsafeCount: 0, - expected: false, + name: "no safety assessments", + nodeEndpointSafeCount: 0, + nodeEndpointUnsafeCount: 0, + expected: false, }, { - name: "all endpoints report safe", - rollupEndpointSafeCount: 3, - rollupEndpointUnsafeCount: 0, - expected: false, + name: "all endpoints report safe", + nodeEndpointSafeCount: 3, + nodeEndpointUnsafeCount: 0, + expected: false, }, { - name: "all endpoints report unsafe", - rollupEndpointSafeCount: 0, - rollupEndpointUnsafeCount: 3, - expected: false, + name: "all endpoints report unsafe", + nodeEndpointSafeCount: 0, + nodeEndpointUnsafeCount: 3, + expected: false, }, { - name: "mixed safety - some safe, some unsafe", - rollupEndpointSafeCount: 2, - rollupEndpointUnsafeCount: 1, - expected: true, + name: "mixed safety - some safe, some unsafe", + nodeEndpointSafeCount: 2, + nodeEndpointUnsafeCount: 1, + expected: true, }, { - name: "mixed safety - minority safe", - rollupEndpointSafeCount: 1, - rollupEndpointUnsafeCount: 4, - expected: true, + name: "mixed safety - minority safe", + nodeEndpointSafeCount: 1, + nodeEndpointUnsafeCount: 4, + expected: true, }, { - name: "mixed safety - majority safe", - rollupEndpointSafeCount: 4, - rollupEndpointUnsafeCount: 1, - expected: true, + name: "mixed safety - majority safe", + nodeEndpointSafeCount: 4, + nodeEndpointUnsafeCount: 1, + expected: true, }, { - name: "mixed safety - equal split", - rollupEndpointSafeCount: 2, - rollupEndpointUnsafeCount: 2, - expected: true, + name: "mixed safety - equal split", + nodeEndpointSafeCount: 2, + nodeEndpointUnsafeCount: 2, + expected: true, }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { data := EnrichedGameData{ - RollupEndpointSafeCount: test.rollupEndpointSafeCount, - RollupEndpointUnsafeCount: test.rollupEndpointUnsafeCount, + NodeEndpointSafeCount: test.nodeEndpointSafeCount, + NodeEndpointUnsafeCount: test.nodeEndpointUnsafeCount, } result := data.HasMixedSafety() require.Equal(t, test.expected, result) }) } } - func TestAllSupportedGameTypesAreOutputOrSuperRootType(t *testing.T) { for _, gameType := range types.SupportedGameTypes { t.Run(gameType.String(), func(t *testing.T) { From 56ee47e5f515425dfcb9e42c4f08b7b4d15e1469 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Thu, 5 Mar 2026 11:02:26 -0500 Subject: [PATCH 053/201] contracts: implement onlyDelegateCall and add tests for audit fixes (#19272) * contracts: implement audit code fixes and add tests Add onlyDelegateCall enforcement to upgradeSuperchain, upgrade, and migrate functions (#17). Include msg.sender in deploy salt to prevent cross-caller CREATE2 collisions (#17). Add duplicate instruction key detection in upgrade validation (#9). Validate startingRespectedGameType against enabled game configs (#10). Add code-existence check in loadBytes (#18). Add setUp guard to VerifyOPCM.runSingle (#4). Remove unused _findChar function (#5). Pass real AddressManager in migrator proxy deploy args (#11). Add tests covering all audit fix behaviors. Co-Authored-By: Claude Opus 4.6 * contracts: regenerate semver-lock.json for OPContractsManagerV2 Co-Authored-By: Claude Opus 4.6 * contracts: bump OPContractsManagerV2 version to 7.0.10 Semver-diff requires a patch version bump when bytecode changes. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .../L1/opcm/IOPContractsManagerV2.sol | 2 + .../scripts/deploy/VerifyOPCM.s.sol | 25 +--- .../snapshots/abi/OPContractsManagerV2.json | 16 +++ .../snapshots/semver-lock.json | 4 +- .../L1/opcm/OPContractsManagerMigrator.sol | 3 +- .../src/L1/opcm/OPContractsManagerUtils.sol | 6 + .../src/L1/opcm/OPContractsManagerV2.sol | 61 +++++++++- .../L1/opcm/OPContractsManagerUtils.t.sol | 12 ++ .../test/L1/opcm/OPContractsManagerV2.t.sol | 109 +++++++++++++++++- 9 files changed, 210 insertions(+), 28 deletions(-) diff --git a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerV2.sol b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerV2.sol index 9bdc0e12dfad8..c8947a69c992f 100644 --- a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerV2.sol +++ b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerV2.sol @@ -80,6 +80,8 @@ interface IOPContractsManagerV2 { error OPContractsManagerV2_InvalidUpgradeInput(); error OPContractsManagerV2_SuperchainConfigNeedsUpgrade(); error OPContractsManagerV2_InvalidUpgradeInstruction(string _key); + error OPContractsManagerV2_DuplicateUpgradeInstruction(string _key); + error OPContractsManagerV2_OnlyDelegateCall(); error OPContractsManagerV2_CannotUpgradeToCustomGasToken(); error OPContractsManagerV2_InvalidUpgradeSequence(string _lastVersion, string _thisVersion); error IdentityPrecompileCallFailed(); diff --git a/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol b/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol index 8677c9cb2e34f..0bf968d6346f2 100644 --- a/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/VerifyOPCM.s.sol @@ -98,9 +98,6 @@ contract VerifyOPCM is Script { /// @notice Thrown when a staticcall to a validator getter fails. error VerifyOPCM_ValidatorCallFailed(string sig); - /// @notice Thrown when _findChar is called with a multi-character string. - error VerifyOPCM_MustBeSingleChar(); - /// @notice Preamble used for blueprint contracts. bytes constant BLUEPRINT_PREAMBLE = hex"FE7100"; @@ -290,6 +287,11 @@ contract VerifyOPCM is Script { /// @param _addr Address of the contract to verify. /// @param _skipConstructorVerification Whether to skip constructor verification. function runSingle(string memory _name, address _addr, bool _skipConstructorVerification) public { + // Make sure the setup function has been called. + if (!ready) { + setUp(); + } + // This function is used as part of the release checklist to verify new contracts. // Rather than requiring an opcm input parameter, just pass in an empty reference // as we really only need this for features that are in development. @@ -1604,21 +1606,4 @@ contract VerifyOPCM is Script { if (!ok) revert VerifyOPCM_ValidatorCallFailed(_sig); return abi.decode(data, (bytes32)); } - - /// @notice Finds the position of a character in a string. - /// @param _str The string to search. - /// @param _char The character to find (as a single-char string). - /// @return The index of the first occurrence, or string length if not found. - function _findChar(string memory _str, string memory _char) internal pure returns (uint256) { - bytes memory strBytes = bytes(_str); - bytes memory charBytes = bytes(_char); - if (charBytes.length != 1) revert VerifyOPCM_MustBeSingleChar(); - bytes1 target = charBytes[0]; - for (uint256 i = 0; i < strBytes.length; i++) { - if (strBytes[i] == target) { - return i; - } - } - return strBytes.length; - } } diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerV2.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerV2.json index 75ae4130472ae..af59d2e866c06 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerV2.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerV2.json @@ -804,6 +804,17 @@ "name": "OPContractsManagerV2_CannotUpgradeToCustomGasToken", "type": "error" }, + { + "inputs": [ + { + "internalType": "string", + "name": "_key", + "type": "string" + } + ], + "name": "OPContractsManagerV2_DuplicateUpgradeInstruction", + "type": "error" + }, { "inputs": [], "name": "OPContractsManagerV2_InvalidGameConfigs", @@ -841,6 +852,11 @@ "name": "OPContractsManagerV2_InvalidUpgradeSequence", "type": "error" }, + { + "inputs": [], + "name": "OPContractsManagerV2_OnlyDelegateCall", + "type": "error" + }, { "inputs": [], "name": "OPContractsManagerV2_SuperchainConfigNeedsUpgrade", diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index 8f20b28f9c5e8..5c8ddefa38825 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -52,8 +52,8 @@ "sourceCodeHash": "0xb3184aa5d95a82109e7134d1f61941b30e25f655b9849a0e303d04bbce0cde0b" }, "src/L1/opcm/OPContractsManagerV2.sol:OPContractsManagerV2": { - "initCodeHash": "0x88ada0dfefb77eea33baaf11d9b5a5ad51cb8c6476611d0f2376897413074619", - "sourceCodeHash": "0x1cc9dbcd4c7652f482c43e2630b324d088e825d12532711a41c636e8392636b3" + "initCodeHash": "0xca9edfa050a5583f063194fd8d098124d6f3c1367eec8875c0c8acf5d971657f", + "sourceCodeHash": "0x0238b990636aab82f93450b1ee2ff7a1f69d55a0b197265e696b70d285c85992" }, "src/L2/BaseFeeVault.sol:BaseFeeVault": { "initCodeHash": "0x838bbd7f381e84e21887f72bd1da605bfc4588b3c39aed96cbce67c09335b3ee", diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol index 28f8d354068d4..35a7aff2bf694 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol @@ -11,7 +11,6 @@ import { Constants } from "src/libraries/Constants.sol"; import { Features } from "src/libraries/Features.sol"; // Interfaces -import { IAddressManager } from "interfaces/legacy/IAddressManager.sol"; import { IDelayedWETH } from "interfaces/dispute/IDelayedWETH.sol"; import { IAnchorStateRegistry } from "interfaces/dispute/IAnchorStateRegistry.sol"; import { IDisputeGame } from "interfaces/dispute/IDisputeGame.sol"; @@ -107,7 +106,7 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { // what we use here. IOPContractsManagerUtils.ProxyDeployArgs memory proxyDeployArgs = IOPContractsManagerUtils.ProxyDeployArgs({ proxyAdmin: _input.chainSystemConfigs[0].proxyAdmin(), - addressManager: IAddressManager(address(0)), // AddressManager NOT needed for these proxies. + addressManager: _input.chainSystemConfigs[0].proxyAdmin().addressManager(), l2ChainId: block.timestamp, saltMixer: "interop salt mixer" }); diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol index 25e7af64ed440..7c5ce5e238144 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol @@ -195,6 +195,12 @@ contract OPContractsManagerUtils { return overrideInstruction.data; } + // Check that the source contract has code. Calling an EOA returns success with empty + // data, which would cause issues when the caller tries to decode the result. + if (_source.code.length == 0) { + revert OPContractsManagerUtils_ConfigLoadFailed(_name); + } + // Otherwise, load the data from the source contract. (bool success, bytes memory result) = address(_source).staticcall(abi.encodePacked(_selector)); if (!success) { diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol index 55c15c74117c9..0e3752c0cd333 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol @@ -126,6 +126,12 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// @notice Thrown when an invalid upgrade instruction is provided. error OPContractsManagerV2_InvalidUpgradeInstruction(string _key); + /// @notice Thrown when duplicate upgrade instruction keys are provided. + error OPContractsManagerV2_DuplicateUpgradeInstruction(string _key); + + /// @notice Thrown when a function that must be delegatecalled is called directly. + error OPContractsManagerV2_OnlyDelegateCall(); + /// @notice Thrown when a chain attempts to upgrade to custom gas token after initial deployment. error OPContractsManagerV2_CannotUpgradeToCustomGasToken(); @@ -147,9 +153,9 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// - Major bump: New required sequential upgrade /// - Minor bump: Replacement OPCM for same upgrade /// - Patch bump: Development changes (expected for normal dev work) - /// @custom:semver 7.0.9 + /// @custom:semver 7.0.10 function version() public pure returns (string memory) { - return "7.0.9"; + return "7.0.10"; } /// @param _standardValidator The standard validator for this OPCM release. @@ -176,6 +182,8 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// Superchain-wide contracts. /// @param _inp The input for the Superchain upgrade. function upgradeSuperchain(SuperchainUpgradeInput memory _inp) external returns (SuperchainContracts memory) { + _onlyDelegateCall(); + // NOTE: Since this function is very minimal and only upgrades the SuperchainConfig // contract, not bothering to fully follow the pattern of the normal chain upgrade flow. // If we expand the scope of this function to add other Superchain-wide contracts, we'll @@ -197,6 +205,9 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// @param _cfg The full chain deployment configuration. /// @return The chain contracts. function deploy(FullConfig memory _cfg) external returns (ChainContracts memory) { + // Include msg.sender in the salt mixer to prevent cross-caller CREATE2 collisions. + string memory saltMixer = string(bytes.concat(bytes20(msg.sender), bytes(_cfg.saltMixer))); + // Deploy is the ONLY place where we allow the "ALL" permission for proxy deployment. IOPContractsManagerUtils.ExtraInstruction[] memory instructions = new IOPContractsManagerUtils.ExtraInstruction[](1); @@ -207,7 +218,7 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { // Load the chain contracts. ChainContracts memory cts = - _loadChainContracts(ISystemConfig(address(0)), _cfg.l2ChainId, _cfg.saltMixer, instructions); + _loadChainContracts(ISystemConfig(address(0)), _cfg.l2ChainId, saltMixer, instructions); // Execute the deployment. return _apply(_cfg, cts, true); @@ -217,6 +228,8 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// @param _inp The chain upgrade input. /// @return The upgraded chain contracts. function upgrade(UpgradeInput memory _inp) external returns (ChainContracts memory) { + _onlyDelegateCall(); + // Sanity check that the SystemConfig isn't address(0). We use address(0) as a special // value to indicate that this is an initial deployment, so we definitely don't want to // allow it here. @@ -264,6 +277,8 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// look or function like all of the other functions in OPCMv2. /// @param _input The input parameters for the migration. function migrate(IOPContractsManagerMigrator.MigrateInput calldata _input) public { + _onlyDelegateCall(); + // Delegatecall to the migrator contract. (bool success, bytes memory result) = address(opcmMigrator).delegatecall(abi.encodeCall(IOPContractsManagerMigrator.migrate, (_input))); @@ -286,6 +301,17 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { view { for (uint256 i = 0; i < _extraInstructions.length; i++) { + // Check for duplicate instruction keys. PermittedProxyDeployment is exempt because + // multiple proxy deployments may need to be permitted in a single upgrade. + if (!_isMatchingInstructionByKey(_extraInstructions[i], Constants.PERMITTED_PROXY_DEPLOYMENT_KEY)) { + for (uint256 j = i + 1; j < _extraInstructions.length; j++) { + if (keccak256(bytes(_extraInstructions[i].key)) == keccak256(bytes(_extraInstructions[j].key))) { + revert OPContractsManagerV2_DuplicateUpgradeInstruction(_extraInstructions[i].key); + } + } + } + + // Check that the instruction is permitted. if (!_isPermittedInstruction(_extraInstructions[i])) { revert OPContractsManagerV2_InvalidUpgradeInstruction(_extraInstructions[i].key); } @@ -316,6 +342,13 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { } } + // Allow overriding the starting respected game type during upgrades. This is needed when + // disabling the currently-respected game type, since the validation requires the starting + // respected game type to correspond to an enabled game config. + if (_isMatchingInstructionByKey(_instruction, "overrides.cfg.startingRespectedGameType")) { + return true; + } + // Always return false by default. return false; } @@ -684,6 +717,21 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { if (!_cfg.disputeGameConfigs[1].enabled) { revert OPContractsManagerV2_InvalidGameConfigs(); } + + // Validate that the starting respected game type corresponds to an enabled game config. + bool startingGameTypeFound = false; + for (uint256 i = 0; i < _cfg.disputeGameConfigs.length; i++) { + if ( + _cfg.disputeGameConfigs[i].gameType.raw() == _cfg.startingRespectedGameType.raw() + && _cfg.disputeGameConfigs[i].enabled + ) { + startingGameTypeFound = true; + break; + } + } + if (!startingGameTypeFound) { + revert OPContractsManagerV2_InvalidGameConfigs(); + } } /// @notice Executes the deployment/upgrade action. @@ -1003,6 +1051,13 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { // INTERNAL UTILITY FUNCTIONS // /////////////////////////////////////////////////////////////////////////// + /// @notice Reverts if the function is being called directly rather than via delegatecall. + function _onlyDelegateCall() internal view { + if (address(this) == address(opcmV2)) { + revert OPContractsManagerV2_OnlyDelegateCall(); + } + } + /// @notice Helper for retrieving the version of the OPCM contract. /// @dev We use opcmV2.version() because it allows us to properly mock the version function /// in tests without running into issues because this contract is being DELEGATECALLed. diff --git a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol index 8458c97a3c359..b70a0fcc2d3c1 100644 --- a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol +++ b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol @@ -331,6 +331,18 @@ contract OPContractsManagerUtils_LoadBytes_Test is OPContractsManagerUtils_TestI assertEq(result, _overrideData, "Should return override data"); } + /// @notice Tests that loadBytes reverts when the source address has no code. + function test_loadBytes_sourceNoCode_reverts() public { + address eoa = makeAddr("eoa"); + + vm.expectRevert( + abi.encodeWithSelector( + IOPContractsManagerUtils.OPContractsManagerUtils_ConfigLoadFailed.selector, "testField" + ) + ); + utils.loadBytes(eoa, MOCK_SELECTOR, "testField", _emptyInstructions()); + } + /// @notice Tests that loadBytes reverts when the source call fails. function test_loadBytes_sourceCallFails_reverts() public { // Mock the source to revert. diff --git a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol index 30a7f95738bf7..6116e3dce86e8 100644 --- a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol +++ b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol @@ -478,6 +478,12 @@ contract OPContractsManagerV2_Upgrade_Test is OPContractsManagerV2_Upgrade_TestI runCurrentUpgradeV2(chainPAO); } + /// @notice Tests that the upgrade function reverts when not delegatecalled. + function test_upgrade_notDelegateCalled_reverts() public { + vm.expectRevert(IOPContractsManagerV2.OPContractsManagerV2_OnlyDelegateCall.selector); + opcmV2.upgrade(v2UpgradeInput); + } + /// @notice Tests that the upgrade function reverts if not called by the correct ProxyAdmin /// owner address. function test_upgrade_notProxyAdminOwner_reverts() public { @@ -654,14 +660,24 @@ contract OPContractsManagerV2_Upgrade_Test is OPContractsManagerV2_Upgrade_TestI uint256 originalBond = disputeGameFactory.initBonds(GameTypes.CANNON); // First, disable Cannon and clear its bond so the factory entry is removed. + // If the chain's current respectedGameType is CANNON, we must override it to + // PERMISSIONED_CANNON since we can't disable the respected game type. v2UpgradeInput.disputeGameConfigs[0].enabled = false; v2UpgradeInput.disputeGameConfigs[0].initBond = 0; + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ + key: "overrides.cfg.startingRespectedGameType", + data: abi.encode(GameTypes.PERMISSIONED_CANNON) + }) + ); runCurrentUpgradeV2(chainPAO, hex"", "PLDG-10"); assertEq(address(disputeGameFactory.gameImpls(GameTypes.CANNON)), address(0), "game impl not cleared"); // Re-enable Cannon and restore its bond so that it is re-installed. + // Remove the startingRespectedGameType override since CANNON is enabled again. v2UpgradeInput.disputeGameConfigs[0].enabled = true; v2UpgradeInput.disputeGameConfigs[0].initBond = originalBond; + v2UpgradeInput.extraInstructions.pop(); runCurrentUpgradeV2(chainPAO); assertEq( address(disputeGameFactory.gameImpls(GameTypes.CANNON)), @@ -682,8 +698,16 @@ contract OPContractsManagerV2_Upgrade_Test is OPContractsManagerV2_Upgrade_TestI ); // Disable Cannon and zero its bond, then ensure it is removed. + // If the chain's current respectedGameType is CANNON, we must override it to + // PERMISSIONED_CANNON since we can't disable the respected game type. v2UpgradeInput.disputeGameConfigs[0].enabled = false; v2UpgradeInput.disputeGameConfigs[0].initBond = 0; + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ + key: "overrides.cfg.startingRespectedGameType", + data: abi.encode(GameTypes.PERMISSIONED_CANNON) + }) + ); runCurrentUpgradeV2(chainPAO, hex"", "PLDG-10"); assertEq(address(disputeGameFactory.gameImpls(GameTypes.CANNON)), address(0), "game impl not cleared"); assertEq(disputeGameFactory.initBonds(GameTypes.CANNON), 0, "init bond not cleared"); @@ -732,6 +756,45 @@ contract OPContractsManagerV2_Upgrade_Test is OPContractsManagerV2_Upgrade_TestI ); } + /// @notice Tests that the upgrade function reverts when duplicate non-PermittedProxyDeployment + /// instruction keys are provided. + function test_upgrade_duplicateInstructionKeys_reverts() public { + delete v2UpgradeInput.extraInstructions; + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ key: "SomeCustomKey", data: bytes("Data1") }) + ); + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ key: "SomeCustomKey", data: bytes("Data2") }) + ); + + // nosemgrep: sol-style-use-abi-encodecall + runCurrentUpgradeV2( + chainPAO, + abi.encodeWithSelector( + IOPContractsManagerV2.OPContractsManagerV2_DuplicateUpgradeInstruction.selector, "SomeCustomKey" + ) + ); + } + + /// @notice Tests that duplicate PermittedProxyDeployment instruction keys are allowed. + function test_upgrade_duplicatePermittedProxyDeploymentKeys_succeeds() public { + delete v2UpgradeInput.extraInstructions; + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ + key: Constants.PERMITTED_PROXY_DEPLOYMENT_KEY, + data: bytes("DelayedWETH") + }) + ); + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ + key: Constants.PERMITTED_PROXY_DEPLOYMENT_KEY, + data: bytes("DelayedWETH") + }) + ); + + runCurrentUpgradeV2(chainPAO); + } + /// @notice INVARIANT: Upgrades must always work when the system is paused. /// This test validates that the OPCMv2 upgrade function can execute successfully /// even when the SuperchainConfig has the system globally paused. This is critical @@ -944,7 +1007,7 @@ contract OPContractsManagerV2_UpgradeSuperchain_Test is OPContractsManagerV2_Upg /// @notice Tests that the upgradeSuperchain function reverts when not delegatecalled. function test_upgradeSuperchain_notDelegateCalled_reverts() public { - vm.expectRevert("Ownable: caller is not the owner"); + vm.expectRevert(IOPContractsManagerV2.OPContractsManagerV2_OnlyDelegateCall.selector); opcmV2.upgradeSuperchain(superchainUpgradeInput); } @@ -1140,6 +1203,43 @@ contract OPContractsManagerV2_Deploy_Test is OPContractsManagerV2_TestInit { ); } + /// @notice Tests that two different senders deploying with the same saltMixer and l2ChainId + /// get different contract addresses. + function test_deploy_differentSendersDifferentAddresses_succeeds() public { + address senderA = makeAddr("senderA"); + address senderB = makeAddr("senderB"); + + vm.prank(senderA); + IOPContractsManagerV2.ChainContracts memory ctsA = opcmV2.deploy(deployConfig); + + vm.prank(senderB); + IOPContractsManagerV2.ChainContracts memory ctsB = opcmV2.deploy(deployConfig); + + assertNotEq( + address(ctsA.systemConfig), address(ctsB.systemConfig), "systemConfig addresses should differ by sender" + ); + } + + /// @notice Tests that deploy reverts when startingRespectedGameType is not in the disputeGameConfigs. + function test_deploy_startingGameTypeNotInConfigs_reverts() public { + deployConfig.startingRespectedGameType = GameTypes.SUPER_CANNON; + + // nosemgrep: sol-style-use-abi-encodecall + runDeployV2( + deployConfig, abi.encodeWithSelector(IOPContractsManagerV2.OPContractsManagerV2_InvalidGameConfigs.selector) + ); + } + + /// @notice Tests that deploy reverts when startingRespectedGameType is a disabled game type. + function test_deploy_startingGameTypeDisabled_reverts() public { + deployConfig.startingRespectedGameType = GameTypes.CANNON; + + // nosemgrep: sol-style-use-abi-encodecall + runDeployV2( + deployConfig, abi.encodeWithSelector(IOPContractsManagerV2.OPContractsManagerV2_InvalidGameConfigs.selector) + ); + } + function test_deploy_cannonGameEnabled_reverts() public { deployConfig.disputeGameConfigs[0].enabled = true; deployConfig.disputeGameConfigs[0].initBond = 1 ether; @@ -1351,6 +1451,13 @@ contract OPContractsManagerV2_Migrate_Test is OPContractsManagerV2_TestInit { assertEq(_dgf.gameArgs(_gameType), hex"", string.concat("Game args should be empty: ", _label)); } + /// @notice Tests that the migrate function reverts when not delegatecalled. + function test_migrate_notDelegateCalled_reverts() public { + IOPContractsManagerMigrator.MigrateInput memory input = _getDefaultMigrateInput(); + vm.expectRevert(IOPContractsManagerV2.OPContractsManagerV2_OnlyDelegateCall.selector); + opcmV2.migrate(input); + } + /// @notice Tests that the migration function succeeds and liquidity is migrated. function test_migrate_succeeds() public { IOPContractsManagerMigrator.MigrateInput memory input = _getDefaultMigrateInput(); From 87d909e384207bb52588deeb2be2025ee1c06eca Mon Sep 17 00:00:00 2001 From: "devin-ai-integration[bot]" <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Thu, 5 Mar 2026 17:54:33 +0000 Subject: [PATCH 054/201] ci: add @security-oncall mentions to contracts-bedrock CI job failure notifications (#19367) Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> Co-authored-by: Kelvin Fichter --- .circleci/continue/main.yml | 24 ++++++++++++++++-------- 1 file changed, 16 insertions(+), 8 deletions(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index a50197264e7e5..d9165de4309a3 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -905,7 +905,8 @@ jobs: - "packages/contracts-bedrock/artifacts" - "packages/contracts-bedrock/forge-artifacts" - "op-deployer/pkg/deployer/artifacts/forge-artifacts" - - notify-failures-on-develop + - notify-failures-on-develop: + mentions: "@security-oncall" check-kontrol-build: docker: @@ -929,7 +930,8 @@ jobs: name: Build Kontrol summary files command: just forge-build ./test/kontrol/proofs working_directory: packages/contracts-bedrock - - notify-failures-on-develop + - notify-failures-on-develop: + mentions: "@security-oncall" docker-build: environment: @@ -1256,7 +1258,8 @@ jobs: name: Lint forge test names command: just lint-forge-tests-check-no-build working_directory: packages/contracts-bedrock - - notify-failures-on-develop + - notify-failures-on-develop: + mentions: "@security-oncall" contracts-bedrock-heavy-fuzz-nightly: circleci_ip_ranges: true @@ -1305,7 +1308,8 @@ jobs: - store_test_results: path: packages/contracts-bedrock/results when: always - - notify-failures-on-develop + - notify-failures-on-develop: + mentions: "@security-oncall" # AI Contracts Test Maintenance System # Runbook: https://github.com/ethereum-optimism/optimism/blob/develop/ops/ai-eng/contracts-test-maintenance/docs/runbook.md @@ -1338,7 +1342,8 @@ jobs: channel: C050F1GUHDG event: always template: AI_PR_SLACK_TEMPLATE - - notify-failures-on-develop + - notify-failures-on-develop: + mentions: "@security-oncall" contracts-bedrock-coverage: circleci_ip_ranges: true @@ -1429,7 +1434,8 @@ jobs: - store_artifacts: path: packages/contracts-bedrock/failed-test-traces.log when: on_fail - - notify-failures-on-develop + - notify-failures-on-develop: + mentions: "@security-oncall" contracts-bedrock-tests-upgrade: circleci_ip_ranges: true @@ -1613,7 +1619,8 @@ jobs: name: Run checks command: just check-fast working_directory: packages/contracts-bedrock - - notify-failures-on-develop + - notify-failures-on-develop: + mentions: "@security-oncall" todo-issues: parameters: @@ -2616,7 +2623,8 @@ jobs: } }' working_directory: ./packages/contracts-bedrock - - notify-failures-on-develop + - notify-failures-on-develop: + mentions: "@security-oncall" publish-contract-artifacts: docker: From 2312fd9aecb49cccc099bd67d4acb9561bae4004 Mon Sep 17 00:00:00 2001 From: George Knee Date: Thu, 5 Mar 2026 17:55:21 +0000 Subject: [PATCH 055/201] op-acceptance-tests: disable `supernode/interop/activation` tests (#19402) * really skip problematic test * Apply suggestion from @geoknee --- .../interop/activation/activation_after_genesis_test.go | 1 + .../tests/supernode/interop/activation/init_test.go | 7 ++++--- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go b/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go index b0395bfc52121..9b3f351a5c8bc 100644 --- a/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go +++ b/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go @@ -14,6 +14,7 @@ import ( // verified data for timestamps both before and after the activation boundary. func TestSupernodeInteropActivationAfterGenesis(gt *testing.T) { t := devtest.ParallelT(gt) + t.Skip("The TestMain setup code for this test is unstable") sys := presets.NewTwoL2SupernodeInterop(t, InteropActivationDelay) genesisTime := sys.GenesisTime diff --git a/op-acceptance-tests/tests/supernode/interop/activation/init_test.go b/op-acceptance-tests/tests/supernode/interop/activation/init_test.go index c9611c295c460..dfc13d262dc44 100644 --- a/op-acceptance-tests/tests/supernode/interop/activation/init_test.go +++ b/op-acceptance-tests/tests/supernode/interop/activation/init_test.go @@ -3,8 +3,6 @@ package activation import ( "os" "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" ) // InteropActivationDelay is the delay in seconds from genesis to interop activation. @@ -17,5 +15,8 @@ const InteropActivationDelay = uint64(20) func TestMain(m *testing.M) { // Set the L2CL kind to supernode for all tests in this package _ = os.Setenv("DEVSTACK_L2CL_KIND", "supernode") - presets.DoMain(m, presets.WithTwoL2SupernodeInterop(InteropActivationDelay)) + // TODO https://github.com/ethereum-optimism/optimism/issues/19403 + // invoking presets.WithTwoL2SupernodeInterop with a nonzero interop activation delay + // results in an unstable test setup due to bugs in op-supernode (it will hang when shutting down) + // presets.DoMain(m, presets.WithTwoL2SupernodeInterop(InteropActivationDelay)) } From 19aef66e1f77ff1ee2f665e8996e1cd5260f5f6b Mon Sep 17 00:00:00 2001 From: Ariel Diaz <65925295+aliersh@users.noreply.github.com> Date: Thu, 5 Mar 2026 13:34:38 -0500 Subject: [PATCH 056/201] test(contracts): reuse ProxyAdmin tests on L2ProxyAdmin for backwards compatibility (#19377) * test(contracts): reuse ProxyAdmin tests on L2ProxyAdmin for backwards compatibility - extract virtual _createAdmin hook in ProxyAdmin_TestInit for subclass override - make ProxyAdmin_TestInit.setUp public virtual to support test inheritance - add 10 backwards-compat test contracts that run all ProxyAdmin tests against L2ProxyAdmin * refactor(test): inline _createL2ProxyAdmin into each override - remove _createL2ProxyAdmin free function from L2ProxyAdmin.t.sol - inline L2ProxyAdmin deployment directly in each _createAdmin override --- .../test/L2/L2ProxyAdmin.t.sol | 96 +++++++++++++++++++ .../test/universal/ProxyAdmin.t.sol | 11 ++- 2 files changed, 103 insertions(+), 4 deletions(-) diff --git a/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol b/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol index 8a792387725d1..6f98312f866e0 100644 --- a/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol @@ -3,6 +3,18 @@ pragma solidity 0.8.15; // Testing import { CommonTest } from "test/setup/CommonTest.sol"; +import { + ProxyAdmin_SetProxyType_Test, + ProxyAdmin_SetImplementationName_Test, + ProxyAdmin_SetAddressManager_Test, + ProxyAdmin_IsUpgrading_Test, + ProxyAdmin_GetProxyImplementation_Test, + ProxyAdmin_GetProxyAdmin_Test, + ProxyAdmin_ChangeProxyAdmin_Test, + ProxyAdmin_Upgrade_Test, + ProxyAdmin_UpgradeAndCall_Test, + ProxyAdmin_Uncategorized_Test +} from "test/universal/ProxyAdmin.t.sol"; // Libraries import { Constants } from "src/libraries/Constants.sol"; @@ -10,6 +22,7 @@ import { Predeploys } from "src/libraries/Predeploys.sol"; // Interfaces import { IL2ProxyAdmin } from "interfaces/L2/IL2ProxyAdmin.sol"; +import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; // Contracts import { L2ProxyAdmin } from "src/L2/L2ProxyAdmin.sol"; @@ -110,3 +123,86 @@ contract L2ProxyAdmin_UpgradePredeploys_Test is L2ProxyAdmin_TestInit { l2ProxyAdmin.upgradePredeploys(_l2ContractsManager); } } + +// Backwards-compatibility: rerun all ProxyAdmin tests against L2ProxyAdmin +// by overriding _createAdmin to deploy L2ProxyAdmin instead. + +/// @title L2ProxyAdmin_SetProxyType_Test +/// @notice Tests the `setProxyType` function of the `L2ProxyAdmin` contract for backwards compatibility. +contract L2ProxyAdmin_SetProxyType_Test is ProxyAdmin_SetProxyType_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} + +/// @title L2ProxyAdmin_SetImplementationName_Test +/// @notice Tests the `setImplementationName` function of the `L2ProxyAdmin` contract for backwards compatibility. +contract L2ProxyAdmin_SetImplementationName_Test is ProxyAdmin_SetImplementationName_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} + +/// @title L2ProxyAdmin_SetAddressManager_Test +/// @notice Tests the `setAddressManager` function of the `L2ProxyAdmin` contract for backwards compatibility. +contract L2ProxyAdmin_SetAddressManager_Test is ProxyAdmin_SetAddressManager_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} + +/// @title L2ProxyAdmin_IsUpgrading_Test +/// @notice Tests the `isUpgrading` function of the `L2ProxyAdmin` contract for backwards compatibility. +contract L2ProxyAdmin_IsUpgrading_Test is ProxyAdmin_IsUpgrading_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} + +/// @title L2ProxyAdmin_GetProxyImplementation_Test +/// @notice Tests the `getProxyImplementation` function of the `L2ProxyAdmin` contract for backwards compatibility. +contract L2ProxyAdmin_GetProxyImplementation_Test is ProxyAdmin_GetProxyImplementation_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} + +/// @title L2ProxyAdmin_GetProxyAdmin_Test +/// @notice Tests the `getProxyAdmin` function of the `L2ProxyAdmin` contract for backwards compatibility. +contract L2ProxyAdmin_GetProxyAdmin_Test is ProxyAdmin_GetProxyAdmin_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} + +/// @title L2ProxyAdmin_ChangeProxyAdmin_Test +/// @notice Tests the `changeProxyAdmin` function of the `L2ProxyAdmin` contract for backwards compatibility. +contract L2ProxyAdmin_ChangeProxyAdmin_Test is ProxyAdmin_ChangeProxyAdmin_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} + +/// @title L2ProxyAdmin_Upgrade_Test +/// @notice Tests the `upgrade` function of the `L2ProxyAdmin` contract for backwards compatibility. +contract L2ProxyAdmin_Upgrade_Test is ProxyAdmin_Upgrade_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} + +/// @title L2ProxyAdmin_UpgradeAndCall_Test +/// @notice Tests the `upgradeAndCall` function of the `L2ProxyAdmin` contract for backwards compatibility. +contract L2ProxyAdmin_UpgradeAndCall_Test is ProxyAdmin_UpgradeAndCall_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} + +/// @title L2ProxyAdmin_Uncategorized_Test +/// @notice General backwards-compatibility tests for the `L2ProxyAdmin` contract. +contract L2ProxyAdmin_Uncategorized_Test is ProxyAdmin_Uncategorized_Test { + function _createAdmin(address _owner) internal override returns (IProxyAdmin) { + return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + } +} diff --git a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol index f81ec40007e15..ee243c028babc 100644 --- a/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/universal/ProxyAdmin.t.sol @@ -30,14 +30,17 @@ abstract contract ProxyAdmin_TestInit is Test { Proxy_SimpleStorage_Harness implementation; - function setUp() external { - // Deploy the proxy admin - admin = IProxyAdmin( + function _createAdmin(address _owner) internal virtual returns (IProxyAdmin) { + return IProxyAdmin( DeployUtils.create1({ _name: "ProxyAdmin", - _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxyAdmin.__constructor__, (alice))) + _args: DeployUtils.encodeConstructor(abi.encodeCall(IProxyAdmin.__constructor__, (_owner))) }) ); + } + + function setUp() public virtual { + admin = _createAdmin(alice); // Deploy the standard proxy proxy = IProxy( From 5353148a1d22a53e26c9ca34d777c9570ebc1ca2 Mon Sep 17 00:00:00 2001 From: Stefano Charissis Date: Thu, 5 Mar 2026 20:38:14 +0100 Subject: [PATCH 057/201] chore(op-acceptor): v3.10.1 (#19390) --- mise.toml | 2 +- op-acceptance-tests/justfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mise.toml b/mise.toml index 6a696749b4777..b9e1587c9b8e9 100644 --- a/mise.toml +++ b/mise.toml @@ -40,7 +40,7 @@ anvil = "1.2.3" codecov-uploader = "0.8.0" goreleaser-pro = "2.11.2" kurtosis = "1.8.1" -op-acceptor = "op-acceptor/v3.9.0" +op-acceptor = "op-acceptor/v3.10.1" git-cliff = "2.12.0" # Fake dependencies diff --git a/op-acceptance-tests/justfile b/op-acceptance-tests/justfile index 9492ca0916c50..34faa2e14f808 100644 --- a/op-acceptance-tests/justfile +++ b/op-acceptance-tests/justfile @@ -1,6 +1,6 @@ REPO_ROOT := `realpath ..` # path to the root of the optimism monorepo KURTOSIS_DIR := REPO_ROOT + "/kurtosis-devnet" -ACCEPTOR_VERSION := env_var_or_default("ACCEPTOR_VERSION", "v3.9.0") +ACCEPTOR_VERSION := env_var_or_default("ACCEPTOR_VERSION", "v3.10.1") DOCKER_REGISTRY := env_var_or_default("DOCKER_REGISTRY", "us-docker.pkg.dev/oplabs-tools-artifacts/images") ACCEPTOR_IMAGE := env_var_or_default("ACCEPTOR_IMAGE", DOCKER_REGISTRY + "/op-acceptor:" + ACCEPTOR_VERSION) From 82274778ad319833572cc3ed69d052595aa390c0 Mon Sep 17 00:00:00 2001 From: Josh Klopfenstein Date: Thu, 5 Mar 2026 16:41:46 -0600 Subject: [PATCH 058/201] all: update op-geth (#19414) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 6d652a1fe0496..4e4aa1378021a 100644 --- a/go.mod +++ b/go.mod @@ -312,7 +312,7 @@ require ( lukechampine.com/blake3 v1.3.0 // indirect ) -replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101609.1-rc.1 +replace github.com/ethereum/go-ethereum => github.com/ethereum-optimism/op-geth v1.101609.2-rc.1 // replace github.com/ethereum/go-ethereum => ../op-geth diff --git a/go.sum b/go.sum index bd16e5bd913bd..ee74c32b0e798 100644 --- a/go.sum +++ b/go.sum @@ -240,8 +240,8 @@ github.com/emicklei/dot v1.6.2 h1:08GN+DD79cy/tzN6uLCT84+2Wk9u+wvqP+Hkx/dIR8A= github.com/emicklei/dot v1.6.2/go.mod h1:DeV7GvQtIw4h2u73RKBkkFdvVAz0D9fzeJrgPW6gy/s= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e h1:iy1vBIzACYUyOVyoADUwvAiq2eOPC0yVsDUdolPwQjk= github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e/go.mod h1:DYj7+vYJ4cIB7zera9mv4LcAynCL5u4YVfoeUu6Wa+w= -github.com/ethereum-optimism/op-geth v1.101609.1-rc.1 h1:r59fw5Qf4XIpPqXqMOyAvxXyqv45OrOXG46ozAPLqz8= -github.com/ethereum-optimism/op-geth v1.101609.1-rc.1/go.mod h1:3YphRrN5/TvRp9VGy5rfA6l6rVR6IAsgSJNPLbIg66E= +github.com/ethereum-optimism/op-geth v1.101609.2-rc.1 h1:no8/SsQ7bylsf/q9txiRqrtbFfdasOEwuOoFMFfMFTM= +github.com/ethereum-optimism/op-geth v1.101609.2-rc.1/go.mod h1:3YphRrN5/TvRp9VGy5rfA6l6rVR6IAsgSJNPLbIg66E= github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20260115192958-fb86a23cd30e h1:TO1tUcwbhIrNuea/LCsQJSQ5HDWCHdrzT/5MLC1aIU4= github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20260115192958-fb86a23cd30e/go.mod h1:NZ816PzLU1TLv1RdAvYAb6KWOj4Zm5aInT0YpDVml2Y= github.com/ethereum/c-kzg-4844/v2 v2.1.5 h1:aVtoLK5xwJ6c5RiqO8g8ptJ5KU+2Hdquf6G3aXiHh5s= From d1293a59c1e0e5b9c3d1ffc3f2b62b7d2e561abd Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Thu, 5 Mar 2026 17:45:46 -0500 Subject: [PATCH 059/201] fix(contracts): forward-compatible OZ v5 Initializable in upgrade (Finding 22) (#19286) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(contracts): forward-compatible OZ v5 Initializable in upgrade (Finding 22) Make OPContractsManagerUtils.upgrade() also clear the OZ v5 ERC-7201 Initializable storage slot (uint64 _initialized in the low 8 bytes). For v4 contracts the slot is all zeros so this is a no-op. Reverts if _initializing bool is set, since a contract should never be mid-initialization during an upgrade. Co-Authored-By: Claude Opus 4.6 * docs(contracts): add ERC-7201 slot derivation comment for OZ v5 Initializable Show the keccak256 derivation formula and link to the OpenZeppelin source for the hardcoded ERC-7201 Initializable storage slot. Co-Authored-By: Claude Opus 4.6 * fix(contracts): address PR review comments on OPContractsManagerUtils - Clarify error NatSpec: "mid-initialization" → "has `_initializing` as true" - Remove stale "Otherwise" from comment on initialized slot reset Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .../L1/opcm/IOPContractsManagerUtils.sol | 1 + .../abi/OPContractsManagerUtils.json | 5 + .../src/L1/opcm/OPContractsManagerUtils.sol | 27 +++- .../L1/opcm/OPContractsManagerUtils.t.sol | 128 ++++++++++++++++++ 4 files changed, 160 insertions(+), 1 deletion(-) diff --git a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerUtils.sol b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerUtils.sol index 730779b4cce74..d64f32240a16a 100644 --- a/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerUtils.sol +++ b/packages/contracts-bedrock/interfaces/L1/opcm/IOPContractsManagerUtils.sol @@ -46,6 +46,7 @@ interface IOPContractsManagerUtils { error OPContractsManagerUtils_DowngradeNotAllowed(address _contract); error OPContractsManagerUtils_ExtraTagInProd(address _contract); + error OPContractsManagerUtils_InitializingDuringUpgrade(); error OPContractsManagerUtils_ConfigLoadFailed(string _name); error OPContractsManagerUtils_ProxyMustLoad(string _name); error OPContractsManagerUtils_UnsupportedGameType(); diff --git a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUtils.json b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUtils.json index ef244d2ff55a1..ec7ef5c22dd45 100644 --- a/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUtils.json +++ b/packages/contracts-bedrock/snapshots/abi/OPContractsManagerUtils.json @@ -696,6 +696,11 @@ "name": "OPContractsManagerUtils_ExtraTagInProd", "type": "error" }, + { + "inputs": [], + "name": "OPContractsManagerUtils_InitializingDuringUpgrade", + "type": "error" + }, { "inputs": [ { diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol index 7c5ce5e238144..004571e5ed42d 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerUtils.sol @@ -51,6 +51,9 @@ contract OPContractsManagerUtils { /// @param _contract The address of the contract with extra version tags. error OPContractsManagerUtils_ExtraTagInProd(address _contract); + /// @notice Thrown when a contract has `_initializing` as true during an upgrade. + error OPContractsManagerUtils_InitializingDuringUpgrade(); + /// @notice Thrown when a config load fails. /// @param _name The name of the config that failed to load. error OPContractsManagerUtils_ConfigLoadFailed(string _name); @@ -333,12 +336,34 @@ contract OPContractsManagerUtils { // Upgrade to StorageSetter. _proxyAdmin.upgrade(payable(_target), address(implementations().storageSetterImpl)); - // Otherwise, we need to reset the initialized slot and call the initializer. + // We need to reset the initialized slot and call the initializer. // Reset the initialized slot by zeroing the single byte at `_offset` (from the right). bytes32 current = IStorageSetter(_target).getBytes32(_slot); uint256 mask = ~(uint256(0xff) << (uint256(_offset) * 8)); IStorageSetter(_target).setBytes32(_slot, bytes32(uint256(current) & mask)); + // Also clear the OZ v5 ERC-7201 Initializable slot. OZ v5 stores `_initialized` as + // uint64 in the low 8 bytes and `_initializing` as bool at byte offset 8 of the + // namespaced slot. For v4 contracts this slot is all zeros, making this a no-op. + // Slot derivation (ERC-7201): + // keccak256(abi.encode(uint256(keccak256("openzeppelin.storage.Initializable")) - 1)) & + // ~bytes32(uint256(0xff)) + // Ref: + // https://github.com/OpenZeppelin/openzeppelin-contracts/blob/6b55a93e/contracts/proxy/utils/Initializable.sol#L77 + bytes32 ozV5Slot = bytes32(uint256(0xf0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00)); + bytes32 v5Current = IStorageSetter(_target).getBytes32(ozV5Slot); + uint256 v5Value = uint256(v5Current); + + // A contract should never be mid-initialization during an upgrade. The `_initializing` + // bool lives at byte offset 8 (bits 64..71). Revert if it is set. + if ((v5Value >> 64) & 0xFF != 0) { + revert OPContractsManagerUtils_InitializingDuringUpgrade(); + } + + // Zero the uint64 `_initialized` portion (low 8 bytes), preserving all upper bytes. + uint256 v5Mask = ~uint256(0xFFFFFFFFFFFFFFFF); + IStorageSetter(_target).setBytes32(ozV5Slot, bytes32(v5Value & v5Mask)); + // Upgrade to the implementation and call the initializer. _proxyAdmin.upgradeAndCall(payable(address(_target)), _implementation, _data); } diff --git a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol index b70a0fcc2d3c1..c725605e9782f 100644 --- a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol +++ b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerUtils.t.sol @@ -680,6 +680,134 @@ contract OPContractsManagerUtils_Upgrade_Test is OPContractsManagerUtils_TestIni assertEq(proxyAdmin.getProxyImplementation(payable(address(proxy))), address(implBeta)); } + + /// @notice ERC-7201 Initializable slot used by OZ v5. + bytes32 internal constant OZ_V5_INITIALIZABLE_SLOT = + bytes32(uint256(0xf0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00)); + + /// @notice Tests that v4 contracts are unaffected by the v5 slot clearing logic. For v4 + /// contracts the ERC-7201 slot is all zeros, so the new code is a no-op. + function test_upgrade_v4ContractStillWorks_succeeds() public { + // Set v1 as current implementation. + vm.prank(address(utils)); + proxyAdmin.upgrade(payable(address(proxy)), address(implV1)); + + // Verify the ERC-7201 slot is zero (v4 contract). + assertEq(vm.load(address(proxy), OZ_V5_INITIALIZABLE_SLOT), bytes32(0)); + + // Upgrade to v2 should succeed and the ERC-7201 slot should remain zero. + utils.upgrade( + proxyAdmin, + address(proxy), + address(implV2), + abi.encodeCall(OPContractsManagerUtils_ImplV2_Harness.initialize, ()), + TEST_SLOT, + TEST_OFFSET + ); + + assertEq(proxyAdmin.getProxyImplementation(payable(address(proxy))), address(implV2)); + assertEq(vm.load(address(proxy), OZ_V5_INITIALIZABLE_SLOT), bytes32(0)); + } + + /// @notice Tests that a v5 contract with `_initialized = 1` at the ERC-7201 slot gets cleared. + function test_upgrade_v5SlotCleared_succeeds() public { + // Set v1 as current implementation. + vm.prank(address(utils)); + proxyAdmin.upgrade(payable(address(proxy)), address(implV1)); + + // Simulate a v5 contract with _initialized = 1 at the ERC-7201 slot. + vm.store(address(proxy), OZ_V5_INITIALIZABLE_SLOT, bytes32(uint256(1))); + + // Upgrade to v2 should succeed. + utils.upgrade( + proxyAdmin, + address(proxy), + address(implV2), + abi.encodeCall(OPContractsManagerUtils_ImplV2_Harness.initialize, ()), + TEST_SLOT, + TEST_OFFSET + ); + + assertEq(proxyAdmin.getProxyImplementation(payable(address(proxy))), address(implV2)); + // The v5 _initialized field should have been cleared. + assertEq(vm.load(address(proxy), OZ_V5_INITIALIZABLE_SLOT), bytes32(0)); + } + + /// @notice Tests that a v5 contract with `_initialized = type(uint64).max` (from + /// `_disableInitializers()`) gets cleared. + function test_upgrade_v5SlotMaxInitialized_succeeds() public { + // Set v1 as current implementation. + vm.prank(address(utils)); + proxyAdmin.upgrade(payable(address(proxy)), address(implV1)); + + // Simulate a v5 contract with _initialized = type(uint64).max (disabled initializers). + vm.store(address(proxy), OZ_V5_INITIALIZABLE_SLOT, bytes32(uint256(type(uint64).max))); + + // Upgrade to v2 should succeed. + utils.upgrade( + proxyAdmin, + address(proxy), + address(implV2), + abi.encodeCall(OPContractsManagerUtils_ImplV2_Harness.initialize, ()), + TEST_SLOT, + TEST_OFFSET + ); + + assertEq(proxyAdmin.getProxyImplementation(payable(address(proxy))), address(implV2)); + // The v5 _initialized field should have been cleared. + assertEq(vm.load(address(proxy), OZ_V5_INITIALIZABLE_SLOT), bytes32(0)); + } + + /// @notice Tests that upgrade reverts when `_initializing` bool is set at the ERC-7201 slot. + function test_upgrade_v5InitializingDuringUpgrade_reverts() public { + // Set v1 as current implementation. + vm.prank(address(utils)); + proxyAdmin.upgrade(payable(address(proxy)), address(implV1)); + + // Simulate a v5 contract that is mid-initialization. The _initializing bool is at byte + // offset 8 (bit 64). Set _initialized = 1 and _initializing = true. + uint256 v5Value = 1 | (uint256(1) << 64); + vm.store(address(proxy), OZ_V5_INITIALIZABLE_SLOT, bytes32(v5Value)); + + vm.expectRevert(IOPContractsManagerUtils.OPContractsManagerUtils_InitializingDuringUpgrade.selector); + utils.upgrade( + proxyAdmin, + address(proxy), + address(implV2), + abi.encodeCall(OPContractsManagerUtils_ImplV2_Harness.initialize, ()), + TEST_SLOT, + TEST_OFFSET + ); + } + + /// @notice Tests that the upper bytes of the ERC-7201 slot beyond the Initializable struct + /// are preserved when clearing the `_initialized` field. + function test_upgrade_v5SlotPreservesUpperBytes_succeeds() public { + // Set v1 as current implementation. + vm.prank(address(utils)); + proxyAdmin.upgrade(payable(address(proxy)), address(implV1)); + + // Set the v5 slot with _initialized = 1 in the low 8 bytes and some data in the upper + // bytes (above the _initializing bool at byte offset 8). Bytes 9+ are unused by the + // Initializable struct but should be preserved. + uint256 upperData = uint256(0xDEADBEEF) << 128; + uint256 v5Value = upperData | 1; + vm.store(address(proxy), OZ_V5_INITIALIZABLE_SLOT, bytes32(v5Value)); + + // Upgrade to v2 should succeed. + utils.upgrade( + proxyAdmin, + address(proxy), + address(implV2), + abi.encodeCall(OPContractsManagerUtils_ImplV2_Harness.initialize, ()), + TEST_SLOT, + TEST_OFFSET + ); + + assertEq(proxyAdmin.getProxyImplementation(payable(address(proxy))), address(implV2)); + // The upper bytes should be preserved, only the low 8 bytes should be zeroed. + assertEq(vm.load(address(proxy), OZ_V5_INITIALIZABLE_SLOT), bytes32(upperData)); + } } /// @title OPContractsManagerUtils_Blueprints_Test From abe047afc995e0e22abf5ea9b157e267e907d494 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Thu, 5 Mar 2026 17:56:08 -0500 Subject: [PATCH 060/201] chore(op-acceptance-tests): add ELSync stalling tests to flake-shake (#19415) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(op-acceptance-tests): add ELSync stalling tests to flake-shake TestUnsafeChainNotStalling_ELSync_Short/Long/RestartOpNode_Long failed 3 times each across 3 distinct branches in the past 7 days, always co-failing in the same job. All instances passed on rerun (confirmed flake). The three tests share the same package and common fixture (UnsafeChainNotStalling_Disconnect / _RestartOpNode), which explains why they fail together — a setup-level timing issue affects all three simultaneously. Quarantine while root cause is investigated. Co-Authored-By: Claude Sonnet 4.6 * fix: correct package path to syncmodereqressync/elsync The failing tests are in depreqres/syncmodereqressync/elsync, not reqressyncdisabled/elsync. The reqressyncdisabled variants pass cleanly. Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: smartcontracts Co-authored-by: Claude Sonnet 4.6 --- op-acceptance-tests/acceptance-tests.yaml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/op-acceptance-tests/acceptance-tests.yaml b/op-acceptance-tests/acceptance-tests.yaml index b3a503dcb056a..f6f6c5237ec1b 100644 --- a/op-acceptance-tests/acceptance-tests.yaml +++ b/op-acceptance-tests/acceptance-tests.yaml @@ -61,6 +61,24 @@ gates: metadata: owner: "adrian sutton" target_gate: "supernode-interop" + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync + name: TestUnsafeChainNotStalling_ELSync_Short + timeout: 10m + metadata: + owner: "anton evangelatov" + target_gate: "depreqres" + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync + name: TestUnsafeChainNotStalling_ELSync_Long + timeout: 10m + metadata: + owner: "anton evangelatov" + target_gate: "depreqres" + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync + name: TestUnsafeChainNotStalling_ELSync_RestartOpNode_Long + timeout: 10m + metadata: + owner: "anton evangelatov" + target_gate: "depreqres" - id: isthmus description: "Isthmus network tests." From e41d64f2819b13f942fe00432591e26d1388e80d Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 6 Mar 2026 09:16:14 +1000 Subject: [PATCH 061/201] chore: add make to mise.toml (#19396) Pin the make version in mise so that all developers and CI use a consistent version. Co-authored-by: Claude Sonnet 4.6 --- mise.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/mise.toml b/mise.toml index b9e1587c9b8e9..3de35501acc7a 100644 --- a/mise.toml +++ b/mise.toml @@ -15,6 +15,7 @@ shellcheck = "0.10.0" shfmt = "3.11.0" direnv = "2.35.0" just = "1.37.0" +make = "4.4.1" svm-rs = "0.5.19" From cadb73d9bd46102bb4f54180dc3769e8cfc14707 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Thu, 5 Mar 2026 18:32:26 -0500 Subject: [PATCH 062/201] fix(contracts): address audit findings 14, 6, 8, 13, 19 (#19281) * fix(contracts): address audit findings #14, #6, #8, #13, #19 - #14: Reuse existing DelayedWETH from SystemConfig instead of deploying a new one in the Migrator, preventing divergence with future upgrades - #6: Document that hardcoded game type lists in OPCMv2 and Migrator are intentional and must be kept in sync when new types are added - #8: Document that migrate() does not enforce SuperchainConfig version floor - #13: Document why migration game config validation is deliberately minimal - #19: Document theoretical risk in AnchorStateRegistry.isGameRegistered when ASR proxy is replaced non-atomically Co-Authored-By: Claude Opus 4.6 * fix(contracts): add cross-reference comment to GameTypes library Add a notice to the GameTypes library reminding developers to update the hardcoded game type lists in OPContractsManagerMigrator and OPContractsManagerV2's _assertValidFullConfig when adding new types. Co-Authored-By: Claude Opus 4.6 * fix(contracts): bump OPContractsManagerV2 version for rebase Bump OPContractsManagerV2 from 7.0.9 to 7.0.10 to account for the comment-only source change (cross-reference note added in prior commit) that affects the bytecode metadata hash. Co-Authored-By: Claude Opus 4.6 * fix(contracts): bump OPContractsManagerV2 version to 7.0.11 for semver-diff CI fix * fix(contracts): apply forge fmt and bump versions for formatting changes forge fmt changed OPContractsManager, FaultDisputeGame, SuperFaultDisputeGame, and several other files. Bump patch versions for the contracts with hash changes, and regenerate semver-lock and snapshots. - OPContractsManager: 6.0.3 -> 6.0.4 - FaultDisputeGame: 2.4.0 -> 2.4.1 - SuperFaultDisputeGame: 0.7.0 -> 0.7.1 Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: Claude Opus 4.6 Co-authored-by: smartcontracts --- .../scripts/deploy/DeploySuperchain.s.sol | 2 +- .../snapshots/semver-lock.json | 20 +++++----- .../src/L1/OPContractsManager.sol | 6 +-- .../L1/opcm/OPContractsManagerMigrator.sol | 40 ++++++++++--------- .../src/L1/opcm/OPContractsManagerV2.sol | 9 +++-- .../src/dispute/AnchorStateRegistry.sol | 12 +++++- .../src/dispute/FaultDisputeGame.sol | 8 ++-- .../src/dispute/SuperFaultDisputeGame.sol | 8 ++-- .../src/dispute/lib/Types.sol | 2 + .../src/safe/SafeSigners.sol | 2 +- .../contracts-bedrock/src/vendor/eas/IEAS.sol | 4 +- .../MockL2ToL2CrossDomainMessenger.t.sol | 9 ++++- .../test/libraries/trie/MerkleTrie.t.sol | 2 +- .../test/universal/Proxy.t.sol | 2 +- 14 files changed, 74 insertions(+), 52 deletions(-) diff --git a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol index 548d1e5e367f3..4cac9db53c499 100644 --- a/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol +++ b/packages/contracts-bedrock/scripts/deploy/DeploySuperchain.s.sol @@ -232,7 +232,7 @@ contract DeploySuperchain is Script { vm.stopPrank(); require(actualSuperchainConfigImpl == address(_output.superchainConfigImpl), "100"); // nosemgrep: - // sol-style-malformed-require + // sol-style-malformed-require require(actualProtocolVersionsImpl == address(_output.protocolVersionsImpl), "200"); // nosemgrep: // sol-style-malformed-require } diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index 5c8ddefa38825..cac22ae7dab56 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -24,8 +24,8 @@ "sourceCodeHash": "0xfca613b5d055ffc4c3cbccb0773ddb9030abedc1aa6508c9e2e7727cc0cd617b" }, "src/L1/OPContractsManager.sol:OPContractsManager": { - "initCodeHash": "0xd4593c8b35e7a1d5371315011c48116594001b198168d416d5df6d43d49f97c8", - "sourceCodeHash": "0xf8900a57ff29a27f99f6f68b2978b7964629a5d4b8bb351394c10431cae0f617" + "initCodeHash": "0x255b4642dc8aff7860165eb1228398c50ec42f01c2fafa300acd6bd3ee9b2c97", + "sourceCodeHash": "0x1eaf94e2da7454243a5c44b7a172524a197be59c434ad16b2553d3ca339fe3ba" }, "src/L1/OPContractsManagerStandardValidator.sol:OPContractsManagerStandardValidator": { "initCodeHash": "0xdec828fdb9f9bb7a35ca03d851b041fcd088681957642e949b5d320358d9b9a1", @@ -52,8 +52,8 @@ "sourceCodeHash": "0xb3184aa5d95a82109e7134d1f61941b30e25f655b9849a0e303d04bbce0cde0b" }, "src/L1/opcm/OPContractsManagerV2.sol:OPContractsManagerV2": { - "initCodeHash": "0xca9edfa050a5583f063194fd8d098124d6f3c1367eec8875c0c8acf5d971657f", - "sourceCodeHash": "0x0238b990636aab82f93450b1ee2ff7a1f69d55a0b197265e696b70d285c85992" + "initCodeHash": "0x0c5d72873002f8ac44e92e6cca44d4916da3af53af6efe1d63a59ae5a752b221", + "sourceCodeHash": "0x2962c31609cf23c2656d16522328f01dda6be65bd03c44a4ad570266714dde0a" }, "src/L2/BaseFeeVault.sol:BaseFeeVault": { "initCodeHash": "0x838bbd7f381e84e21887f72bd1da605bfc4588b3c39aed96cbce67c09335b3ee", @@ -196,8 +196,8 @@ "sourceCodeHash": "0x03c160168986ffc8d26a90c37366e7ad6da03f49d83449e1f8b3de0f4b590f6f" }, "src/dispute/AnchorStateRegistry.sol:AnchorStateRegistry": { - "initCodeHash": "0x7d85ca3f30f6526a62214a6ef876299dcf15e536afe4f5dd7b232a92120e170f", - "sourceCodeHash": "0x896011529b7fdb6623a0b3f72b099f38229168f54514e674c6b3aa7afc35e4fc" + "initCodeHash": "0xa4ae3fb0b8c4bfa122739543a4f9e4729f0fee5306230f62486d3cab6a27de3c", + "sourceCodeHash": "0x2ade91e0638608c05fd452b20b1e88d43519833229d1b62f805b74a76dd27e64" }, "src/dispute/DelayedWETH.sol:DelayedWETH": { "initCodeHash": "0xa8f60e142108b33675a8f6b6979c73b96eea247884842d796f9f878904c0a906", @@ -208,16 +208,16 @@ "sourceCodeHash": "0x7e68423e4c4ca7725a0c625aa3df9a3db0d7027eaddfdb2e7fa65329666b7a54" }, "src/dispute/FaultDisputeGame.sol:FaultDisputeGame": { - "initCodeHash": "0x2493823ad9fb986a696e61ce8a8ef50c548c6a4e0affeeec63edd119ca206083", - "sourceCodeHash": "0xd6a17c76aed0e561d4b7293dc17157b9435173c0a61ec40dc4ff8e49e2c1e601" + "initCodeHash": "0x43294a9f4f07acad0ab70d6e9cb4dfbf91675bc83da6cb9b9493cbd01a8cfca8", + "sourceCodeHash": "0x260cbb04c2e58b83ddb961990e33ff8473c9b95e7a2e2ae000c3e03b178b9feb" }, "src/dispute/PermissionedDisputeGame.sol:PermissionedDisputeGame": { "initCodeHash": "0x940289cf9018b6b22b532a90ee1e731427d435d7982e4e78ea44f25e1cb874a2", "sourceCodeHash": "0x8302f265145ebccb05d1dfb123a1176bb0b29a06167817b0020eac104da8a841" }, "src/dispute/SuperFaultDisputeGame.sol:SuperFaultDisputeGame": { - "initCodeHash": "0xb5ce71bc56109055cd0dc71fc63015443bbdb29c5975e049802cd1b5188f06ca", - "sourceCodeHash": "0x3096a447574168528555f091a357a120b1dee6f35b50634b7d705ace1ef9c0ad" + "initCodeHash": "0xe087ca1944daada8c7d97b6b30057c97719ce54e9f5cf9f9a6af976353d7780e", + "sourceCodeHash": "0x533c0f83fa6f3c55c5480c1ee41a48f953a161b5ff0a4c266760d43153ab9ac7" }, "src/dispute/SuperPermissionedDisputeGame.sol:SuperPermissionedDisputeGame": { "initCodeHash": "0xa080730728e812e8b02d03b5857b23d16ade46f1656f26f22274835a3100edd7", diff --git a/packages/contracts-bedrock/src/L1/OPContractsManager.sol b/packages/contracts-bedrock/src/L1/OPContractsManager.sol index 8ab8b23c425ff..3cea2cdfa625d 100644 --- a/packages/contracts-bedrock/src/L1/OPContractsManager.sol +++ b/packages/contracts-bedrock/src/L1/OPContractsManager.sol @@ -557,7 +557,7 @@ contract OPContractsManagerGameTypeAdder is OPContractsManagerBase { anchorStateRegistry: address(getAnchorStateRegistry(ISystemConfig(gameConfig.systemConfig))), weth: address(outputs[i].delayedWETH), l2ChainId: gameConfig.disputeGameType.raw() == GameTypes.PERMISSIONED_CANNON.raw() ? l2ChainId : 0, // must - // be zero for SUPER gam types + // be zero for SUPER gam types proposer: getProposer( dgf, IPermissionedDisputeGame(address(existingGame)), gameConfig.disputeGameType ), @@ -1917,9 +1917,9 @@ contract OPContractsManager is ISemver { /// @dev This needs to stay at 6.x.x because the next release will ship OPCMv2. Since we are /// not actually planning to release a 7.x.x of OPCMv1, it needs to stay at 6.x.x to avoid /// errors in the versioning rules of OPCMv2. - /// @custom:semver 6.0.3 + /// @custom:semver 6.0.4 function version() public pure virtual returns (string memory) { - return "6.0.3"; + return "6.0.4"; } OPContractsManagerGameTypeAdder public immutable opcmGameTypeAdder; diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol index 35a7aff2bf694..d0e9e70312a2b 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerMigrator.sol @@ -71,6 +71,9 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { /// migrating a subset of chains that share a lockbox), re-migration of already-migrated /// chains, or any other migration scenario. Re-calling this function on already-migrated /// portals will corrupt the shared DisputeGameFactory used by all migrated chains. + /// @dev NOTE: Unlike deploy/upgrade, this function does not enforce a SuperchainConfig + /// version floor. The caller is responsible for ensuring the SuperchainConfig is + /// upgraded to the current OPCM release version before calling migrate. /// @param _input The input parameters for the migration. function migrate(MigrateInput calldata _input) public { // Check that the OPTIMISM_PORTAL_INTEROP dev feature is enabled. @@ -152,16 +155,12 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { ) ); - // Deploy the new DelayedWETH. - IDelayedWETH delayedWETH = IDelayedWETH( - _loadOrDeployProxy( - address(0), // Source from address(0) so we always deploy a new proxy. - bytes4(0), - proxyDeployArgs, - "DelayedWETH", - extraInstructions - ) - ); + // Reuse the existing DelayedWETH from chainSystemConfigs[0] rather than deploying a + // new one. The migrated chains share a SystemConfig, and by extension share its + // DelayedWETH. Deploying a new one would create a divergence — SystemConfig would + // still point to the old one, and future upgrades (which load DelayedWETH from + // SystemConfig) would reference a different DelayedWETH than the shared DGF games. + IDelayedWETH delayedWETH = IDelayedWETH(payable(_input.chainSystemConfigs[0].delayedWETH())); // Separate context to avoid stack too deep (isolate the implementations variable). { @@ -204,14 +203,6 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { ) ); - // Initialize the new DelayedWETH. - _upgrade( - proxyDeployArgs.proxyAdmin, - address(delayedWETH), - impls.delayedWETHImpl, - abi.encodeCall(IDelayedWETH.initialize, (_input.chainSystemConfigs[0])) - ); - // Migrate each portal to the new ETHLockbox and AnchorStateRegistry. for (uint256 i = 0; i < _input.chainSystemConfigs.length; i++) { _migratePortal(_input.chainSystemConfigs[i], ethLockbox, anchorStateRegistry); @@ -219,6 +210,14 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { } // Set up the dispute games in the new DisputeGameFactory. + // NOTE: Unlike deploy/upgrade, migration does not perform full game config + // validation. This is intentional: + // 1. Migration is a privileged, one-off admin action by the ProxyAdmin owner + // 2. getGameImpl() rejects unrecognized game types + // 3. Only super game types are meaningful here — non-super types would have + // l2ChainId=0, causing FaultDisputeGame to revert on chain ID mismatch + // 4. All supplied configs are registered regardless of the enabled flag — + // callers must only include configs they want active for (uint256 i = 0; i < _input.disputeGameConfigs.length; i++) { disputeGameFactory.setImplementation( _input.disputeGameConfigs[i].gameType, @@ -255,7 +254,10 @@ contract OPContractsManagerMigrator is OPContractsManagerUtilsCaller { existingLockbox.migrateLiquidity(_newLockbox); // Clear out any implementations that might exist in the old DisputeGameFactory proxy. - // We clear out all potential game types to be safe. + // We clear out all potential game types to be safe. These game types are intentionally + // hardcoded rather than sourced from a shared utility. When new game types are added, + // this list and the corresponding list in OPCMv2's _assertValidFullConfig must both + // be updated. existingDGF.setImplementation(GameTypes.CANNON, IDisputeGame(address(0)), hex""); existingDGF.setImplementation(GameTypes.SUPER_CANNON, IDisputeGame(address(0)), hex""); existingDGF.setImplementation(GameTypes.PERMISSIONED_CANNON, IDisputeGame(address(0)), hex""); diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol index 0e3752c0cd333..9a4db3305a3e1 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol @@ -153,9 +153,9 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// - Major bump: New required sequential upgrade /// - Minor bump: Replacement OPCM for same upgrade /// - Patch bump: Development changes (expected for normal dev work) - /// @custom:semver 7.0.10 + /// @custom:semver 7.0.11 function version() public pure returns (string memory) { - return "7.0.10"; + return "7.0.11"; } /// @param _standardValidator The standard validator for this OPCM release. @@ -677,7 +677,10 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// @notice Validates the deployment/upgrade config. /// @param _cfg The full config. function _assertValidFullConfig(FullConfig memory _cfg, bool _isInitialDeployment) internal pure { - // Start validating the dispute game configs. Put allowed game types here. + // Start validating the dispute game configs. Put allowed game types here. Note that + // these game types are intentionally hardcoded rather than sourced from a shared utility. + // When new game types are added, this list and the corresponding list in the Migrator's + // _migratePortal function must both be updated. GameType[] memory validGameTypes = new GameType[](3); validGameTypes[0] = GameTypes.CANNON; validGameTypes[1] = GameTypes.PERMISSIONED_CANNON; diff --git a/packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol b/packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol index d9a21c6ace89a..6e5519a82ed0a 100644 --- a/packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol +++ b/packages/contracts-bedrock/src/dispute/AnchorStateRegistry.sol @@ -24,8 +24,8 @@ import { ISuperchainConfig } from "interfaces/L1/ISuperchainConfig.sol"; /// be initialized with a more recent starting state which reduces the amount of required offchain computation. contract AnchorStateRegistry is ProxyAdminOwnedBase, Initializable, ReinitializableBase, ISemver { /// @notice Semantic version. - /// @custom:semver 3.8.0 - string public constant version = "3.8.0"; + /// @custom:semver 3.8.1 + string public constant version = "3.8.1"; /// @notice The dispute game finality delay in seconds. uint256 internal immutable DISPUTE_GAME_FINALITY_DELAY_SECONDS; @@ -190,6 +190,14 @@ contract AnchorStateRegistry is ProxyAdminOwnedBase, Initializable, Reinitializa /// @notice Determines whether a game is registered by checking that it was created by the /// DisputeGameFactory. + /// @dev NOTE: A previous version of this function also verified that the game's + /// AnchorStateRegistry matched address(this). This check was removed because + /// retirementTimestamp (set to block.timestamp on initialize) invalidates all + /// pre-existing games. However, there is a theoretical risk if: (a) the ASR + /// proxy is upgraded to a net-new proxy address rather than upgraded in-place, + /// (b) the upgrade is non-atomic, and (c) an invalid game from the old ASR + /// is used in the gap between operations. This scenario is extremely contrived + /// and not a practical concern for atomic proxy upgrades. /// @param _game The game to check. /// @return Whether the game is registered. function isGameRegistered(IDisputeGame _game) public view returns (bool) { diff --git a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol index e0d3d8c372c9d..ff60a4aefea13 100644 --- a/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/FaultDisputeGame.sol @@ -147,9 +147,9 @@ contract FaultDisputeGame is Clone, ISemver { uint256 internal constant HEADER_BLOCK_NUMBER_INDEX = 8; /// @notice Semantic version. - /// @custom:semver 2.4.0 + /// @custom:semver 2.4.1 function version() public pure virtual returns (string memory) { - return "2.4.0"; + return "2.4.1"; } /// @notice The starting timestamp of the game @@ -524,8 +524,8 @@ contract FaultDisputeGame is Clone, ISemver { // Construct the next clock with the new duration and the current block timestamp. Clock nextClock = LibClock.wrap(nextDuration, Timestamp.wrap(uint64(block.timestamp))); - // INVARIANT: There cannot be multiple identical claims with identical moves on the same challengeIndex. Multiple - // claims at the same position may dispute the same challengeIndex. However, they must have different + // INVARIANT: There cannot be multiple identical claims with identical moves on the same challengeIndex. + // Multiple claims at the same position may dispute the same challengeIndex. However, they must have different // values. Hash claimHash = _claim.hashClaimPos(nextPosition, _challengeIndex); if (claims[claimHash]) revert ClaimAlreadyExists(); diff --git a/packages/contracts-bedrock/src/dispute/SuperFaultDisputeGame.sol b/packages/contracts-bedrock/src/dispute/SuperFaultDisputeGame.sol index d34297c51dbc4..2a2286b6ae38f 100644 --- a/packages/contracts-bedrock/src/dispute/SuperFaultDisputeGame.sol +++ b/packages/contracts-bedrock/src/dispute/SuperFaultDisputeGame.sol @@ -146,9 +146,9 @@ contract SuperFaultDisputeGame is Clone, ISemver { Position internal constant ROOT_POSITION = Position.wrap(1); /// @notice Semantic version. - /// @custom:semver 0.7.0 + /// @custom:semver 0.7.1 function version() public pure virtual returns (string memory) { - return "0.7.0"; + return "0.7.1"; } /// @notice The starting timestamp of the game @@ -574,8 +574,8 @@ contract SuperFaultDisputeGame is Clone, ISemver { // Construct the next clock with the new duration and the current block timestamp. Clock nextClock = LibClock.wrap(nextDuration, Timestamp.wrap(uint64(block.timestamp))); - // INVARIANT: There cannot be multiple identical claims with identical moves on the same challengeIndex. Multiple - // claims at the same position may dispute the same challengeIndex. However, they must have different + // INVARIANT: There cannot be multiple identical claims with identical moves on the same challengeIndex. + // Multiple claims at the same position may dispute the same challengeIndex. However, they must have different // values. Hash claimHash = _claim.hashClaimPos(nextPosition, _challengeIndex); if (claims[claimHash]) revert ClaimAlreadyExists(); diff --git a/packages/contracts-bedrock/src/dispute/lib/Types.sol b/packages/contracts-bedrock/src/dispute/lib/Types.sol index eb9438e2b5b50..0fb85b88ca4bb 100644 --- a/packages/contracts-bedrock/src/dispute/lib/Types.sol +++ b/packages/contracts-bedrock/src/dispute/lib/Types.sol @@ -48,6 +48,8 @@ struct Proposal { /// @title GameTypes /// @notice A library that defines the IDs of games that can be played. +/// When adding a new game type, the hardcoded game type lists in OPContractsManagerMigrator +/// and OPContractsManagerV2's _assertValidFullConfig must also be updated. library GameTypes { /// @dev A dispute game type the uses the cannon vm. GameType internal constant CANNON = GameType.wrap(0); diff --git a/packages/contracts-bedrock/src/safe/SafeSigners.sol b/packages/contracts-bedrock/src/safe/SafeSigners.sol index 9a75f43ffea34..76bb206c9b6bb 100644 --- a/packages/contracts-bedrock/src/safe/SafeSigners.sol +++ b/packages/contracts-bedrock/src/safe/SafeSigners.sol @@ -37,7 +37,7 @@ library SafeSigners { /// @notice Extract the signers from a set of signatures. /// This method is based closely on the code in the Safe.checkNSignatures() method. /// https://github.com/safe-global/safe-contracts/blob/e870f514ad34cd9654c72174d6d4a839e3c6639f/contracts/Safe.sol#L274 - /// It has been modified by removing all signature _validation_ code. We trust the Safe to properly validate + /// It has been modified by removing all signature _validation_ code. We trust the Safe to properly validate /// the signatures. /// This method therefore simply extracts the addresses from the signatures. function getNSigners( diff --git a/packages/contracts-bedrock/src/vendor/eas/IEAS.sol b/packages/contracts-bedrock/src/vendor/eas/IEAS.sol index 7581fdb3b3693..c3717415bac74 100644 --- a/packages/contracts-bedrock/src/vendor/eas/IEAS.sol +++ b/packages/contracts-bedrock/src/vendor/eas/IEAS.sol @@ -41,7 +41,7 @@ struct MultiDelegatedAttestationRequest { bytes32 schema; // The unique identifier of the schema. AttestationRequestData[] data; // The arguments of the attestation requests. Signature[] signatures; // The ECDSA signatures data. Please note that the signatures are assumed to be signed with - // increasing nonces. + // increasing nonces. address attester; // The attesting account. uint64 deadline; // The deadline of the signature/request. } @@ -79,7 +79,7 @@ struct MultiDelegatedRevocationRequest { bytes32 schema; // The unique identifier of the schema. RevocationRequestData[] data; // The arguments of the revocation requests. Signature[] signatures; // The ECDSA signatures data. Please note that the signatures are assumed to be signed with - // increasing nonces. + // increasing nonces. address revoker; // The revoking account. uint64 deadline; // The deadline of the signature/request. } diff --git a/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20/helpers/MockL2ToL2CrossDomainMessenger.t.sol b/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20/helpers/MockL2ToL2CrossDomainMessenger.t.sol index 94f275e26551a..64488751b03c1 100644 --- a/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20/helpers/MockL2ToL2CrossDomainMessenger.t.sol +++ b/packages/contracts-bedrock/test/invariants/OptimismSuperchainERC20/helpers/MockL2ToL2CrossDomainMessenger.t.sol @@ -86,7 +86,14 @@ contract MockL2ToL2CrossDomainMessenger { /// @notice recipient will not be used since in normal execution it's the same /// address on a different chain, but here we have to compute it to mock /// cross-chain messaging - function sendMessage(uint256 chainId, address, /*recipient*/ bytes calldata data) external { + function sendMessage( + uint256 chainId, + address, + /*recipient*/ + bytes calldata data + ) + external + { address crossChainRecipient = superTokenAddresses[chainId][superTokenInitDeploySalts[msg.sender]]; if (crossChainRecipient == msg.sender) { require(false, "MockL2ToL2CrossDomainMessenger: same chain"); diff --git a/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol b/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol index 51e0cb1ae9a47..11c791f271ef1 100644 --- a/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol +++ b/packages/contracts-bedrock/test/libraries/trie/MerkleTrie.t.sol @@ -482,7 +482,7 @@ contract MerkleTrie_Get_Test is MerkleTrie_TestInit { hex"f8f1a069a092c7a950214e7e45b99012dc8ad112eab0fc94ae5ca9efbd6949068384f280a0b25c46db67ef7cf0c47bb400c31c85a26c5a204431527c964c8ecaf3d63e52cc80a01911a2a74db0d8d182447176e23f25556d1a1eaa0afad96453f2d64876ad88e480808080a04a0ca9e3bed1bc3e3c819384d19b6d5e523164a6520c4eb42e828a63ef730ae38080a03b598ed1b9269d4b05e2e75cfb54298d25437669870c919a59a147d2d256fdba80a0db2d655057c83107a73d086cfdd8fcc74739bb48c652eb0ce597178ecf96b39aa05c66ac392a761341b9c22b773ea19af311f34ef537640b9bb96842ec6ace913280"; proof[4] = hex"f69f204dcf44e265ba93879b2da89e1b16ab48fc5eb8e31bc16b0612d6da8463f195942536c09e5f5691498805884fa37811be3b2bddb4"; // Correct - // leaf node + // leaf node bytes32 root = keccak256(proof[0]); diff --git a/packages/contracts-bedrock/test/universal/Proxy.t.sol b/packages/contracts-bedrock/test/universal/Proxy.t.sol index b3b7b52342e15..dfc225ae9649a 100644 --- a/packages/contracts-bedrock/test/universal/Proxy.t.sol +++ b/packages/contracts-bedrock/test/universal/Proxy.t.sol @@ -258,7 +258,7 @@ contract Proxy_Implementation_Test is Proxy_TestInit { assertEq(success, false); bytes memory err = abi.encodeWithSignature("Error(string)", "Proxy: implementation not initialized"); // nosemgrep: - // sol-style-use-abi-encodecall + // sol-style-use-abi-encodecall assertEq(returndata, err); } From b04da4c4369eb031c541f91b50c69dd8211bd653 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Fri, 6 Mar 2026 05:57:08 -0500 Subject: [PATCH 063/201] chore(op-acceptance-tests): add TestSupernodeInteropChainLag to flake-shake (#19413) This test has shown consistent flakiness in memory-all: 3 failures across 3 separate branches in the past 7 days. Quarantine it in flake-shake while the root cause is investigated. Note: this exclusion only takes effect once #19412 (op-acceptor v3.10.1 bump) merges. Prior to that, flake-shake entries for named tests are silently ignored in gateless/memory-all mode. Co-authored-by: smartcontracts Co-authored-by: Claude Sonnet 4.6 Co-authored-by: geoknee --- op-acceptance-tests/acceptance-tests.yaml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/op-acceptance-tests/acceptance-tests.yaml b/op-acceptance-tests/acceptance-tests.yaml index f6f6c5237ec1b..ead1240580fd8 100644 --- a/op-acceptance-tests/acceptance-tests.yaml +++ b/op-acceptance-tests/acceptance-tests.yaml @@ -61,6 +61,12 @@ gates: metadata: owner: "adrian sutton" target_gate: "supernode-interop" + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/supernode/interop + name: TestSupernodeInteropChainLag + timeout: 15m + metadata: + owner: "axel kingsley" + target_gate: "supernode-interop" - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync name: TestUnsafeChainNotStalling_ELSync_Short timeout: 10m From 3337071e781c9de5329f5452ce78b53d0e9285dd Mon Sep 17 00:00:00 2001 From: Changwan Park Date: Fri, 6 Mar 2026 23:32:53 +0900 Subject: [PATCH 064/201] op-acceptance-tests: Add PE MVP tests (#19404) * op-acceptance-tests: Add PE acceptance tests * linter * linter * fix preset bug causing test to inf hang --- .../flashblocks/flashblocks_stream_test.go | 25 +- .../flashblocks/flashblocks_transfer_test.go | 2 +- .../tests/flashblocks/utils.go | 27 + op-acceptance-tests/tests/rules/init_test.go | 86 +++ op-acceptance-tests/tests/rules/rules_test.go | 695 ++++++++++++++++++ op-devstack/dsl/fb_ws_client.go | 28 + op-devstack/presets/op_rbuilder_rules.go | 43 ++ op-devstack/sysgo/op_rbuilder.go | 19 +- op-devstack/sysgo/orchestrator.go | 1 + 9 files changed, 900 insertions(+), 26 deletions(-) create mode 100644 op-acceptance-tests/tests/rules/init_test.go create mode 100644 op-acceptance-tests/tests/rules/rules_test.go create mode 100644 op-devstack/presets/op_rbuilder_rules.go diff --git a/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go b/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go index 377e201d68943..debdac098c73e 100644 --- a/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go +++ b/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go @@ -11,10 +11,8 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/log/logfilter" "github.com/ethereum-optimism/optimism/op-service/logmods" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" ) @@ -64,7 +62,7 @@ func TestFlashblocksStream(gt *testing.T) { expectedChainID := sys.L2Chain.ChainID().ToBig() require.Equal(t, oprbuilderNode.Escape().ChainID().ToBig(), expectedChainID, "flashblocks builder node chain id should match expected chain id") - driveViaTestSequencer(t, sys, 3) + DriveViaTestSequencer(t, sys, 3) // Test the presence / absence of a flashblocks stream operating at a 250ms rate from a flashblocks-websocket-proxy node. // Allow a generous window for first flashblocks to appear. @@ -122,27 +120,6 @@ func TestFlashblocksStream(gt *testing.T) { logger.Info("Flashblocks stream validation completed", "total_flashblocks_produced", totalFlashblocksProduced) } -// driveViaTestSequencer explicitly builds a few blocks to ensure the builder/rollup-boost -// have payloads to serve before we start listening for flashblocks. -func driveViaTestSequencer(t devtest.T, sys *presets.SingleChainWithFlashblocks, count int) { - t.Helper() - ts := sys.TestSequencer.Escape().ControlAPI(sys.L2Chain.ChainID()) - ctx := t.Ctx() - - head := sys.L2EL.BlockRefByLabel(eth.Unsafe) - for i := 0; i < count; i++ { - require.NoError(t, ts.New(ctx, seqtypes.BuildOpts{Parent: head.Hash})) - require.NoError(t, ts.Next(ctx)) - head = sys.L2EL.BlockRefByLabel(eth.Unsafe) - } - // Ensure the sequencer EL has produced at least one unsafe block before subscribing. - sys.L2EL.WaitForBlockNumber(1) - - // Log the latest unsafe head and L1 origin to confirm block production before listening. - head = sys.L2EL.BlockRefByLabel(eth.Unsafe) - sys.Log.Info("Pre-listen unsafe head", "unsafe", head) -} - func evaluateFlashblocksStream(t devtest.T, logger log.Logger, streamedMessages []string, failureTolerance int) int { require.Greater(t, len(streamedMessages), 0, "should have received at least one message from WebSocket") flashblocks := make([]Flashblock, len(streamedMessages)) diff --git a/op-acceptance-tests/tests/flashblocks/flashblocks_transfer_test.go b/op-acceptance-tests/tests/flashblocks/flashblocks_transfer_test.go index 80cf2e3c8f689..761af720719aa 100644 --- a/op-acceptance-tests/tests/flashblocks/flashblocks_transfer_test.go +++ b/op-acceptance-tests/tests/flashblocks/flashblocks_transfer_test.go @@ -46,7 +46,7 @@ func TestFlashblocksTransfer(gt *testing.T) { defer span.End() // Drive a couple blocks on the test sequencer so the faucet L2 funding tx has a chance to land before we rely on it. - driveViaTestSequencer(t, sys, 2) + DriveViaTestSequencer(t, sys, 2) alice := sys.FunderL2.NewFundedEOA(eth.ThreeHundredthsEther) bob := sys.Wallet.NewEOA(sys.L2EL) diff --git a/op-acceptance-tests/tests/flashblocks/utils.go b/op-acceptance-tests/tests/flashblocks/utils.go index e34e23ab22296..6d3262112a7a6 100644 --- a/op-acceptance-tests/tests/flashblocks/utils.go +++ b/op-acceptance-tests/tests/flashblocks/utils.go @@ -3,6 +3,12 @@ package flashblocks import ( "encoding/json" "strings" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" + "github.com/stretchr/testify/require" ) type Flashblock struct { @@ -51,3 +57,24 @@ func (f *Flashblock) UnmarshalJSON(data []byte) error { return nil } + +// DriveViaTestSequencer explicitly builds a few blocks to ensure the builder/rollup-boost +// have payloads to serve before we start listening for flashblocks. +func DriveViaTestSequencer(t devtest.T, sys *presets.SingleChainWithFlashblocks, count int) { + t.Helper() + ts := sys.TestSequencer.Escape().ControlAPI(sys.L2Chain.ChainID()) + ctx := t.Ctx() + + head := sys.L2EL.BlockRefByLabel(eth.Unsafe) + for range count { + require.NoError(t, ts.New(ctx, seqtypes.BuildOpts{Parent: head.Hash})) + require.NoError(t, ts.Next(ctx)) + head = sys.L2EL.BlockRefByLabel(eth.Unsafe) + } + // Ensure the sequencer EL has produced at least one unsafe block before subscribing. + sys.L2EL.WaitForBlockNumber(1) + + // Log the latest unsafe head and L1 origin to confirm block production before listening. + head = sys.L2EL.BlockRefByLabel(eth.Unsafe) + sys.Log.Info("Pre-listen unsafe head", "unsafe", head) +} diff --git a/op-acceptance-tests/tests/rules/init_test.go b/op-acceptance-tests/tests/rules/init_test.go new file mode 100644 index 0000000000000..12e9e05606920 --- /dev/null +++ b/op-acceptance-tests/tests/rules/init_test.go @@ -0,0 +1,86 @@ +package rules + +import ( + "os" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum/go-ethereum/common" +) + +const RULES_TEST_ENABLE_ENV = "OP_RBUILDER_RULES_TEST" + +func rulesEnabled() bool { + return os.Getenv(RULES_TEST_ENABLE_ENV) == "1" +} + +// TestMain creates the test-setups against the shared backend +func TestMain(m *testing.M) { + options := presets.WithSingleChainSystemWithFlashblocks() + if rulesEnabled() { + options = stack.Combine(options, presets.WithOPRBuilderRules(TestRulesYAML, TestRefreshInterval)) + } + presets.DoMain(m, options) +} + +// BoostedRecipient is the well-known address that receives boosted transactions in tests. +// Transactions sent TO this address will be prioritized by the block builder when rules are enabled. +var BoostedRecipient = common.HexToAddress("0x1111111111111111111111111111111111111111") + +// HighPriorityRecipient receives transactions with the highest boost (weight: 5000) +var HighPriorityRecipient = common.HexToAddress("0x2222222222222222222222222222222222222222") + +// MediumPriorityRecipient receives transactions with medium boost (weight: 2000) +var MediumPriorityRecipient = common.HexToAddress("0x3333333333333333333333333333333333333333") + +// LowPriorityRecipient receives transactions with low boost (weight: 500) +var LowPriorityRecipient = common.HexToAddress("0x4444444444444444444444444444444444444444") + +const TestRefreshInterval = 5 + +// TestRulesYAML is the rules configuration used for rule ordering tests. +// It defines multiple boost levels to test priority ordering: +// - High priority (weight 5000): transactions TO 0x2222... +// - Medium priority (weight 2000): transactions TO 0x3333... +// - Low priority (weight 500): transactions TO 0x4444... +// - Legacy boost (weight 1000): transactions TO 0x1111... (BoostedRecipient) +const TestRulesYAML = `version: 1 + +aliases: + high_priority_recipients: + - "0x2222222222222222222222222222222222222222" + medium_priority_recipients: + - "0x3333333333333333333333333333333333333333" + low_priority_recipients: + - "0x4444444444444444444444444444444444444444" + boosted_recipients: + - "0x1111111111111111111111111111111111111111" + +rules: + boost: + - name: "High Priority Boost" + description: "Highest priority transactions" + type: to + aliases: + - "high_priority_recipients" + weight: 5000 + - name: "Medium Priority Boost" + description: "Medium priority transactions" + type: to + aliases: + - "medium_priority_recipients" + weight: 2000 + - name: "Low Priority Boost" + description: "Low priority transactions" + type: to + aliases: + - "low_priority_recipients" + weight: 500 + - name: "Legacy Boosted Recipient" + description: "Boost transactions to test recipient address" + type: to + aliases: + - "boosted_recipients" + weight: 1000 +` diff --git a/op-acceptance-tests/tests/rules/rules_test.go b/op-acceptance-tests/tests/rules/rules_test.go new file mode 100644 index 0000000000000..52c00e6fbd81a --- /dev/null +++ b/op-acceptance-tests/tests/rules/rules_test.go @@ -0,0 +1,695 @@ +package rules + +import ( + "context" + "fmt" + "math/big" + "math/rand" + "sort" + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/flashblocks" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/bigs" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/retry" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +// TestBoostPriorityOrdering validates that transactions to addresses with higher +// boost weights appear earlier in blocks than transactions to lower-weight addresses. +// +// Rules configuration (from init_test.go): +// - HighPriorityRecipient (0x2222...): weight 5000 +// - MediumPriorityRecipient (0x3333...): weight 2000 +// - LowPriorityRecipient (0x4444...): weight 500 +// - No boost for other addresses: weight 0 +// +// Expected ordering: High (5000) > Medium (2000) > Low (500) > Normal (0) +// We verify this by checking TransactionIndex in the block - lower index = earlier in block. +func TestBoostPriorityOrdering(gt *testing.T) { + t := devtest.SerialT(gt) + skipIfRulesNotEnabled(t) + + logger := t.Logger() + tracer := t.Tracer() + ctx := t.Ctx() + + sys := presets.NewSingleChainWithFlashblocks(t) + + topLevelCtx, span := tracer.Start(ctx, "test boost priority ordering") + defer span.End() + + ctx, cancel := context.WithTimeout(topLevelCtx, 90*time.Second) + defer cancel() + + flashblocks.DriveViaTestSequencer(t, sys, 3) + + const maxRetries = 3 + err := retry.Do0(ctx, maxRetries, &retry.FixedStrategy{Dur: 0}, func() error { + fundAmount := eth.Ether(1) + senderHigh := sys.FunderL2.NewFundedEOA(fundAmount) + senderMedium := sys.FunderL2.NewFundedEOA(fundAmount) + senderLow := sys.FunderL2.NewFundedEOA(fundAmount) + senderNormal := sys.FunderL2.NewFundedEOA(fundAmount) + + normalRecipient := sys.Wallet.NewEOA(sys.L2EL) + normalRecipientAddr := normalRecipient.Address() + + sendAmount := eth.OneHundredthEther + var wg sync.WaitGroup + var txHigh, txMedium, txLow, txNormal *txplan.PlannedTx + + wg.Add(4) + go func() { + defer wg.Done() + txHigh = senderHigh.Transact( + senderHigh.Plan(), + txplan.WithTo(&HighPriorityRecipient), + txplan.WithValue(sendAmount), + ) + }() + go func() { + defer wg.Done() + txMedium = senderMedium.Transact( + senderMedium.Plan(), + txplan.WithTo(&MediumPriorityRecipient), + txplan.WithValue(sendAmount), + ) + }() + go func() { + defer wg.Done() + txLow = senderLow.Transact( + senderLow.Plan(), + txplan.WithTo(&LowPriorityRecipient), + txplan.WithValue(sendAmount), + ) + }() + go func() { + defer wg.Done() + txNormal = senderNormal.Transact( + senderNormal.Plan(), + txplan.WithTo(&normalRecipientAddr), + txplan.WithValue(sendAmount), + ) + }() + wg.Wait() + + receiptHigh, err := txHigh.Included.Eval(ctx) + if err != nil { + return fmt.Errorf("high priority tx inclusion: %w", err) + } + receiptMedium, err := txMedium.Included.Eval(ctx) + if err != nil { + return fmt.Errorf("medium priority tx inclusion: %w", err) + } + receiptLow, err := txLow.Included.Eval(ctx) + if err != nil { + return fmt.Errorf("low priority tx inclusion: %w", err) + } + receiptNormal, err := txNormal.Included.Eval(ctx) + if err != nil { + return fmt.Errorf("normal tx inclusion: %w", err) + } + + logger.Info("All transactions confirmed", + "high_block", receiptHigh.BlockNumber, "high_index", receiptHigh.TransactionIndex, + "medium_block", receiptMedium.BlockNumber, "medium_index", receiptMedium.TransactionIndex, + "low_block", receiptLow.BlockNumber, "low_index", receiptLow.TransactionIndex, + "normal_block", receiptNormal.BlockNumber, "normal_index", receiptNormal.TransactionIndex, + ) + + sameBlock := receiptHigh.BlockNumber.Cmp(receiptMedium.BlockNumber) == 0 && + receiptMedium.BlockNumber.Cmp(receiptLow.BlockNumber) == 0 && + receiptLow.BlockNumber.Cmp(receiptNormal.BlockNumber) == 0 + + if !sameBlock { + return fmt.Errorf("transactions landed in different blocks: high=%d, medium=%d, low=%d, normal=%d", + receiptHigh.BlockNumber, receiptMedium.BlockNumber, receiptLow.BlockNumber, receiptNormal.BlockNumber) + } + + require.Less(t, receiptHigh.TransactionIndex, receiptMedium.TransactionIndex, + "high priority (weight 5000) should have lower tx index than medium priority (weight 2000)") + require.Less(t, receiptMedium.TransactionIndex, receiptLow.TransactionIndex, + "medium priority (weight 2000) should have lower tx index than low priority (weight 500)") + require.Less(t, receiptLow.TransactionIndex, receiptNormal.TransactionIndex, + "low priority (weight 500) should have lower tx index than normal (no boost)") + + logger.Info("Boost priority ordering verified successfully", + "order", fmt.Sprintf("high(idx=%d) < medium(idx=%d) < low(idx=%d) < normal(idx=%d)", + receiptHigh.TransactionIndex, receiptMedium.TransactionIndex, + receiptLow.TransactionIndex, receiptNormal.TransactionIndex), + ) + return nil + }) + require.NoError(t, err, "boost priority ordering verification failed") +} + +// TestBoostedVsNonBoostedOrdering validates that a boosted transaction appears before +// a non-boosted transaction even when the non-boosted transaction has a MUCH HIGHER +// priority fee (gas tip). This proves that rule-based boost takes precedence over +// economic incentives (EIP-1559 priority fees). +// +// Test setup: +// - Boosted tx: to BoostedRecipient (weight 1000), LOW priority fee (1 gwei tip) +// - Normal tx: to normal recipient (no boost), HIGH priority fee (100 gwei tip) +// +// Expected: Despite 100x higher gas tip, the normal tx should come AFTER the boosted tx. +func TestBoostedVsNonBoostedOrdering(gt *testing.T) { + t := devtest.SerialT(gt) + skipIfRulesNotEnabled(t) + + logger := t.Logger() + tracer := t.Tracer() + ctx := t.Ctx() + + sys := presets.NewSingleChainWithFlashblocks(t) + + topLevelCtx, span := tracer.Start(ctx, "test boosted vs non-boosted ordering") + defer span.End() + + ctx, cancel := context.WithTimeout(topLevelCtx, 90*time.Second) + defer cancel() + + flashblocks.DriveViaTestSequencer(t, sys, 2) + + lowGasTip := big.NewInt(1_000_000_000) + highGasTip := big.NewInt(100_000_000_000) + highGasFeeCap := big.NewInt(200_000_000_000) + + const maxRetries = 3 + err := retry.Do0(ctx, maxRetries, &retry.FixedStrategy{Dur: 0}, func() error { + fundAmount := eth.ThreeHundredthsEther + senderBoosted := sys.FunderL2.NewFundedEOA(fundAmount) + senderNormal := sys.FunderL2.NewFundedEOA(fundAmount) + + normalRecipient := sys.Wallet.NewEOA(sys.L2EL) + normalRecipientAddr := normalRecipient.Address() + + sendAmount := eth.OneHundredthEther + var wg sync.WaitGroup + var txBoosted, txNormal *txplan.PlannedTx + + wg.Add(2) + go func() { + defer wg.Done() + txBoosted = senderBoosted.Transact( + senderBoosted.Plan(), + txplan.WithTo(&BoostedRecipient), + txplan.WithValue(sendAmount), + txplan.WithGasTipCap(lowGasTip), + ) + }() + go func() { + defer wg.Done() + txNormal = senderNormal.Transact( + senderNormal.Plan(), + txplan.WithTo(&normalRecipientAddr), + txplan.WithValue(sendAmount), + txplan.WithGasTipCap(highGasTip), + txplan.WithGasFeeCap(highGasFeeCap), + ) + }() + wg.Wait() + + receiptBoosted, err := txBoosted.Included.Eval(ctx) + if err != nil { + return fmt.Errorf("boosted tx inclusion: %w", err) + } + receiptNormal, err := txNormal.Included.Eval(ctx) + if err != nil { + return fmt.Errorf("normal tx inclusion: %w", err) + } + + logger.Info("Transactions confirmed", + "boosted_block", receiptBoosted.BlockNumber, + "boosted_index", receiptBoosted.TransactionIndex, + "normal_block", receiptNormal.BlockNumber, + "normal_index", receiptNormal.TransactionIndex, + ) + + if receiptBoosted.BlockNumber.Cmp(receiptNormal.BlockNumber) != 0 { + return fmt.Errorf("transactions landed in different blocks: boosted=%d, normal=%d", + receiptBoosted.BlockNumber, receiptNormal.BlockNumber) + } + + require.Less(t, receiptBoosted.TransactionIndex, receiptNormal.TransactionIndex, + "boosted transaction (weight 1000, 1 gwei tip) should have lower tx index than "+ + "normal transaction (no boost, 100 gwei tip) - proving rules > gas priority") + + logger.Info("Rule-based boost precedence over gas priority verified!", + "boosted_index", receiptBoosted.TransactionIndex, + "normal_index", receiptNormal.TransactionIndex, + ) + return nil + }) + require.NoError(t, err, "boosted vs non-boosted ordering verification failed") +} + +// TestSameSenderNonceOrdering verifies that transactions from the same sender +// maintain nonce ordering regardless of boost rules. +func TestSameSenderNonceOrdering(gt *testing.T) { + t := devtest.SerialT(gt) + skipIfRulesNotEnabled(t) + + logger := t.Logger() + tracer := t.Tracer() + ctx := t.Ctx() + + sys := presets.NewSingleChainWithFlashblocks(t) + + topLevelCtx, span := tracer.Start(ctx, "test same sender nonce ordering") + defer span.End() + + ctx, cancel := context.WithTimeout(topLevelCtx, 60*time.Second) + defer cancel() + + // Drive initial blocks + flashblocks.DriveViaTestSequencer(t, sys, 2) + + // Create a single funded sender + sender := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + + // Create normal recipient + normalRecipient := sys.Wallet.NewEOA(sys.L2EL) + normalRecipientAddr := normalRecipient.Address() + + logger.Info("Test sender created", "address", sender.Address().Hex()) + + sendAmount := eth.OneHundredthEther + + // Send 3 sequential transactions from same sender to different recipients + // TX0 -> Normal recipient (no boost) + // TX1 -> HighPriorityRecipient (weight 5000) + // TX2 -> Normal recipient (no boost) + // + // Even though TX1 has the highest boost, it must come after TX0 due to nonce ordering + + // TX0: to normal recipient + tx0 := sender.Transact( + sender.Plan(), + txplan.WithTo(&normalRecipientAddr), + txplan.WithValue(sendAmount), + ) + receipt0, err := tx0.Included.Eval(ctx) + require.NoError(t, err, "tx0 should be included") + + // TX1: to high priority recipient + tx1 := sender.Transact( + sender.Plan(), + txplan.WithTo(&HighPriorityRecipient), + txplan.WithValue(sendAmount), + ) + receipt1, err := tx1.Included.Eval(ctx) + require.NoError(t, err, "tx1 should be included") + + // TX2: to normal recipient + tx2 := sender.Transact( + sender.Plan(), + txplan.WithTo(&normalRecipientAddr), + txplan.WithValue(sendAmount), + ) + receipt2, err := tx2.Included.Eval(ctx) + require.NoError(t, err, "tx2 should be included") + + logger.Info("Sequential transactions confirmed", + "tx0_hash", receipt0.TxHash.Hex(), "tx0_block", receipt0.BlockNumber, "tx0_index", receipt0.TransactionIndex, + "tx1_hash", receipt1.TxHash.Hex(), "tx1_block", receipt1.BlockNumber, "tx1_index", receipt1.TransactionIndex, + "tx2_hash", receipt2.TxHash.Hex(), "tx2_block", receipt2.BlockNumber, "tx2_index", receipt2.TransactionIndex, + ) + + // Verify nonce ordering is preserved + // If transactions are in the same block, their indices must reflect nonce order + if receipt0.BlockNumber.Cmp(receipt1.BlockNumber) == 0 { + require.Less(t, receipt0.TransactionIndex, receipt1.TransactionIndex, + "tx0 (nonce N) must have lower index than tx1 (nonce N+1) despite tx1 having higher boost") + } else { + // If in different blocks, tx0's block must be <= tx1's block + require.LessOrEqual(t, + bigs.Uint64Strict(receipt0.BlockNumber), + bigs.Uint64Strict(receipt1.BlockNumber), + "tx0 must be in same or earlier block than tx1", + ) + } + + if receipt1.BlockNumber.Cmp(receipt2.BlockNumber) == 0 { + require.Less(t, receipt1.TransactionIndex, receipt2.TransactionIndex, + "tx1 (nonce N+1) must have lower index than tx2 (nonce N+2)") + } else { + require.LessOrEqual(t, + bigs.Uint64Strict(receipt1.BlockNumber), + bigs.Uint64Strict(receipt2.BlockNumber), + "tx1 must be in same or earlier block than tx2", + ) + } + + logger.Info("Nonce ordering verified - boost rules do not break same-sender ordering") +} + +// TestMultipleSendersWithMixedPriorities tests a realistic scenario with multiple +// senders sending to different priority recipients concurrently. +func TestMultipleSendersWithMixedPriorities(gt *testing.T) { + t := devtest.SerialT(gt) + skipIfRulesNotEnabled(t) + + logger := t.Logger() + tracer := t.Tracer() + ctx := t.Ctx() + + sys := presets.NewSingleChainWithFlashblocks(t) + + topLevelCtx, span := tracer.Start(ctx, "test multiple senders mixed priorities") + defer span.End() + + ctx, cancel := context.WithTimeout(topLevelCtx, 120*time.Second) + defer cancel() + + flashblocks.DriveViaTestSequencer(t, sys, 2) + + type senderConfig struct { + eoa *dsl.EOA + priority string + recipient common.Address + weight int + } + + type txResult struct { + receipt *types.Receipt + priority string + weight int + } + + const maxRetries = 3 + err := retry.Do0(ctx, maxRetries, &retry.FixedStrategy{Dur: 0}, func() error { + fundAmount := eth.ThreeHundredthsEther + + normalRecipient := sys.Wallet.NewEOA(sys.L2EL) + normalRecipientAddr := normalRecipient.Address() + + configs := []struct { + priority string + recipient common.Address + weight int + }{ + {"high", HighPriorityRecipient, 5000}, + {"medium", MediumPriorityRecipient, 2000}, + {"low", LowPriorityRecipient, 500}, + {"normal", normalRecipientAddr, 0}, + {"high", HighPriorityRecipient, 5000}, + {"normal", normalRecipientAddr, 0}, + } + + senders := make([]senderConfig, len(configs)) + for i, cfg := range configs { + senders[i] = senderConfig{ + eoa: sys.FunderL2.NewFundedEOA(fundAmount), + priority: cfg.priority, + recipient: cfg.recipient, + weight: cfg.weight, + } + } + + sendAmount := eth.OneHundredthEther + var wg sync.WaitGroup + plannedTxs := make([]*txplan.PlannedTx, len(senders)) + + for i := range senders { + wg.Add(1) + idx := i + go func() { + defer wg.Done() + recipient := senders[idx].recipient + plannedTxs[idx] = senders[idx].eoa.Transact( + senders[idx].eoa.Plan(), + txplan.WithTo(&recipient), + txplan.WithValue(sendAmount), + ) + }() + } + wg.Wait() + + results := make([]txResult, len(senders)) + for i := range senders { + receipt, err := plannedTxs[i].Included.Eval(ctx) + if err != nil { + return fmt.Errorf("tx%d inclusion: %w", i, err) + } + results[i] = txResult{ + receipt: receipt, + priority: senders[i].priority, + weight: senders[i].weight, + } + logger.Info("Transaction confirmed", + "index", i, + "priority", senders[i].priority, + "weight", senders[i].weight, + "block", receipt.BlockNumber, + "tx_index", receipt.TransactionIndex, + ) + } + + blockGroups := make(map[uint64][]txResult) + for _, r := range results { + blockNum := bigs.Uint64Strict(r.receipt.BlockNumber) + blockGroups[blockNum] = append(blockGroups[blockNum], r) + } + + if len(blockGroups) != 1 { + blockNumbers := make([]uint64, 0, len(blockGroups)) + for blockNum := range blockGroups { + blockNumbers = append(blockNumbers, blockNum) + } + return fmt.Errorf("transactions landed in %d different blocks: %v", len(blockGroups), blockNumbers) + } + + for blockNum, txs := range blockGroups { + logger.Info("Verifying ordering in block", "block", blockNum, "tx_count", len(txs)) + + for i := 0; i < len(txs); i++ { + for j := i + 1; j < len(txs); j++ { + if txs[i].weight > txs[j].weight { + require.Less(t, txs[i].receipt.TransactionIndex, txs[j].receipt.TransactionIndex, + "tx with weight %d should have lower index than tx with weight %d in block %d", + txs[i].weight, txs[j].weight, blockNum) + } else if txs[i].weight < txs[j].weight { + require.Greater(t, txs[i].receipt.TransactionIndex, txs[j].receipt.TransactionIndex, + "tx with weight %d should have higher index than tx with weight %d in block %d", + txs[i].weight, txs[j].weight, blockNum) + } + } + } + } + + logger.Info("Multiple senders mixed priorities test completed successfully") + return nil + }) + require.NoError(t, err, "multiple senders mixed priorities verification failed") +} + +// TestSingleSenderRandomNonceOrderWithRandomScores sends 10 transactions from a single sender +// with explicit nonces, random gas tips, and random boost recipients. The transactions are +// submitted to the mempool in a shuffled nonce order to verify that op-rbuilder correctly +// sorts by transaction score while still respecting nonce ordering for the same sender. +// +// Setup: +// - 1 sender, 10 transactions (nonces base+0 through base+9) +// - Each tx gets a random gas tip (1-50 gwei) AND a random recipient from +// {HighPriority, MediumPriority, LowPriority, Normal} for varied boost weights +// - Transactions are shuffled before submission so the mempool receives them out-of-order +// +// Expected: All 10 transactions are included with strict nonce ordering preserved, +// i.e. within the same block tx with nonce N always has a lower TransactionIndex +// than tx with nonce N+1, and across blocks lower nonces are in earlier blocks. +func TestSingleSenderRandomNonceOrderWithRandomScores(gt *testing.T) { + t := devtest.SerialT(gt) + skipIfRulesNotEnabled(t) + + logger := t.Logger() + tracer := t.Tracer() + ctx := t.Ctx() + + sys := presets.NewSingleChainWithFlashblocks(t) + + topLevelCtx, span := tracer.Start(ctx, "test single sender random nonce order with random scores") + defer span.End() + + ctx, cancel := context.WithTimeout(topLevelCtx, 120*time.Second) + defer cancel() + + flashblocks.DriveViaTestSequencer(t, sys, 2) + + const txCount = 10 + + type recipientInfo struct { + addr common.Address + weight int + } + normalRecipient := sys.Wallet.NewEOA(sys.L2EL) + normalRecipientAddr := normalRecipient.Address() + recipients := []recipientInfo{ + {HighPriorityRecipient, 5000}, + {MediumPriorityRecipient, 2000}, + {LowPriorityRecipient, 500}, + {normalRecipientAddr, 0}, + } + + const maxRetries = 3 + err := retry.Do0(ctx, maxRetries, &retry.FixedStrategy{Dur: 0}, func() error { + sender := sys.FunderL2.NewFundedEOA(eth.Ether(1)) + baseNonce := sender.PendingNonce() + + logger.Info("Test sender created", + "address", sender.Address().Hex(), + "baseNonce", baseNonce, + ) + + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + + type txConfig struct { + nonce uint64 + tipGwei int64 + recipient recipientInfo + } + configs := make([]txConfig, txCount) + for i := 0; i < txCount; i++ { + tipGwei := int64(1 + rng.Intn(50)) + recip := recipients[rng.Intn(len(recipients))] + configs[i] = txConfig{ + nonce: baseNonce + uint64(i), + tipGwei: tipGwei, + recipient: recip, + } + logger.Info("Transaction config", + "index", i, + "nonce", configs[i].nonce, + "tipGwei", tipGwei, + "recipient", recip.addr.Hex(), + "boostWeight", recip.weight, + ) + } + + submitOrder := rng.Perm(txCount) + logger.Info("Shuffled submission order", "order", submitOrder) + + sendAmount := eth.OneHundredthEther + highFeeCap := new(big.Int).Mul(big.NewInt(200), big.NewInt(1_000_000_000)) + + plannedTxs := make([]*txplan.PlannedTx, txCount) + var wg sync.WaitGroup + for _, idx := range submitOrder { + wg.Add(1) + go func(i int) { + defer wg.Done() + cfg := configs[i] + tip := new(big.Int).Mul(big.NewInt(cfg.tipGwei), big.NewInt(1_000_000_000)) + recipAddr := cfg.recipient.addr + plannedTxs[i] = sender.Transact( + sender.Plan(), + txplan.WithStaticNonce(cfg.nonce), + txplan.WithTo(&recipAddr), + txplan.WithValue(sendAmount), + txplan.WithGasTipCap(tip), + txplan.WithGasFeeCap(highFeeCap), + ) + }(idx) + } + wg.Wait() + + type txResult struct { + nonce uint64 + tipGwei int64 + weight int + receipt *types.Receipt + } + results := make([]txResult, txCount) + for i := 0; i < txCount; i++ { + receipt, err := plannedTxs[i].Included.Eval(ctx) + if err != nil { + return fmt.Errorf("tx%d (nonce %d) inclusion: %w", i, configs[i].nonce, err) + } + results[i] = txResult{ + nonce: configs[i].nonce, + tipGwei: configs[i].tipGwei, + weight: configs[i].recipient.weight, + receipt: receipt, + } + logger.Info("Transaction confirmed", + "index", i, + "nonce", configs[i].nonce, + "tipGwei", configs[i].tipGwei, + "boostWeight", configs[i].recipient.weight, + "block", receipt.BlockNumber, + "txIndex", receipt.TransactionIndex, + ) + } + + sort.Slice(results, func(i, j int) bool { + return results[i].nonce < results[j].nonce + }) + + // Count how many transactions share a block with at least one other tx. + // If every tx lands in its own block, nonce ordering is trivially + // guaranteed by the protocol and the test does not exercise the builder's + // intra-block ordering logic. Treat that as a retryable condition so the + // next attempt (with a fresh sender/nonces) hopefully lands more txs in + // the same block. + blockCounts := make(map[uint64]int) + for _, r := range results { + blockCounts[bigs.Uint64Strict(r.receipt.BlockNumber)]++ + } + maxPerBlock := 0 + for _, c := range blockCounts { + if c > maxPerBlock { + maxPerBlock = c + } + } + if maxPerBlock < 2 { + return fmt.Errorf("all %d transactions landed in separate blocks (%d blocks); "+ + "need at least 2 in the same block to validate intra-block nonce ordering", + txCount, len(blockCounts)) + } + + for i := 0; i < len(results)-1; i++ { + cur := results[i] + next := results[i+1] + + if cur.receipt.BlockNumber.Cmp(next.receipt.BlockNumber) == 0 { + require.Less(t, cur.receipt.TransactionIndex, next.receipt.TransactionIndex, + "nonce %d (tip=%d gwei, boost=%d, txIdx=%d) must have lower tx index than nonce %d (tip=%d gwei, boost=%d, txIdx=%d) in block %d", + cur.nonce, cur.tipGwei, cur.weight, cur.receipt.TransactionIndex, + next.nonce, next.tipGwei, next.weight, next.receipt.TransactionIndex, + cur.receipt.BlockNumber) + } else { + require.Less( + t, + bigs.Uint64Strict(cur.receipt.BlockNumber), + bigs.Uint64Strict(next.receipt.BlockNumber), + "nonce %d must be in an earlier block than nonce %d (got blocks %d and %d)", + cur.nonce, next.nonce, cur.receipt.BlockNumber, next.receipt.BlockNumber, + ) + } + } + + logger.Info("Single sender random nonce order test passed - nonce ordering preserved despite random scores and shuffled submission", + "txCount", txCount, + "blocksUsed", len(blockCounts), + "maxTxsInOneBlock", maxPerBlock, + "nonceRange", fmt.Sprintf("%d-%d", results[0].nonce, results[len(results)-1].nonce), + ) + return nil + }) + require.NoError(t, err, "single sender random nonce order with random scores verification failed") +} + +func skipIfRulesNotEnabled(t devtest.T) { + if !rulesEnabled() { + t.Skip("Skipping rule ordering test") + } +} diff --git a/op-devstack/dsl/fb_ws_client.go b/op-devstack/dsl/fb_ws_client.go index fad80c7aa5933..be6149888feba 100644 --- a/op-devstack/dsl/fb_ws_client.go +++ b/op-devstack/dsl/fb_ws_client.go @@ -4,6 +4,8 @@ import ( "context" "errors" "fmt" + "io" + "net" "net/http" "time" @@ -112,11 +114,37 @@ func websocketListenFor(ctx context.Context, logger log.Logger, wsURL string, he return nil } + // Check for close errors first - these are normal termination if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) { logger.Info("WebSocket connection closed by peer", "total_messages", messageCount) return nil } + // Check if this is an unexpected close error (any close error not handled above) + var closeErr *websocket.CloseError + if errors.As(err, &closeErr) { + logger.Info("WebSocket connection closed unexpectedly", "code", closeErr.Code, "text", closeErr.Text, "total_messages", messageCount) + return nil + } + + // Check for EOF errors - connection was closed + if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { + logger.Info("WebSocket connection closed (EOF)", "total_messages", messageCount) + return nil + } + + // Check for use of closed network connection + if errors.Is(err, net.ErrClosed) { + logger.Info("WebSocket connection closed (network)", "total_messages", messageCount) + return nil + } + + // Timeout errors are recoverable - keep reading + var netErr net.Error + if errors.As(err, &netErr) && netErr.Timeout() { + continue + } + logger.Error("Error reading WebSocket message", "error", err, "message_count", messageCount) return fmt.Errorf("error reading WebSocket message: %w", err) } diff --git a/op-devstack/presets/op_rbuilder_rules.go b/op-devstack/presets/op_rbuilder_rules.go new file mode 100644 index 0000000000000..5942d82f67f6e --- /dev/null +++ b/op-devstack/presets/op_rbuilder_rules.go @@ -0,0 +1,43 @@ +package presets + +import ( + "fmt" + "os" + "path/filepath" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func WithOPRBuilderRules(ruleContent string, refreshInterval uint64) stack.CommonOption { + return stack.MakeCommon( + sysgo.WithGlobalOPRBuilderNodeOption(sysgo.OPRBuilderNodeOptionFn( + func(p devtest.P, id stack.OPRBuilderNodeID, cfg *sysgo.OPRBuilderNodeConfig) { + cfg.RulesEnabled = true + // Create a fixed directory for rules config + rulesDir := filepath.Join(os.TempDir(), "rules") + if err := os.MkdirAll(rulesDir, 0755); err != nil { + p.Errorf("Failed to create rules dir: %v", err) + } + // Write rules + rulesPath := filepath.Join(rulesDir, "ruleset.yaml") + if err := os.WriteFile(rulesPath, []byte(ruleContent), 0644); err != nil { + p.Errorf("Failed to create rules dir: %v", err) + } + // Write rule config pointing to rules file + rulesConfigContent := fmt.Sprintf(` +file: + - path: %s + name: "Test Rules" + enabled: true + +refresh_interval: %d +`, rulesPath, refreshInterval) + rulesConfigPath := filepath.Join(rulesDir, "rules_config.yaml") + if err := os.WriteFile(rulesConfigPath, []byte(rulesConfigContent), 0644); err != nil { + p.Errorf("Failed to write registry file: %v", err) + } + cfg.RulesConfigPath = rulesConfigPath + }))) +} diff --git a/op-devstack/sysgo/op_rbuilder.go b/op-devstack/sysgo/op_rbuilder.go index 90501f155a462..69b0ea1d0e052 100644 --- a/op-devstack/sysgo/op_rbuilder.go +++ b/op-devstack/sysgo/op_rbuilder.go @@ -92,6 +92,9 @@ type OPRBuilderNodeConfig struct { Full bool + RulesEnabled bool + RulesConfigPath string + // ExtraArgs are appended to the generated CLI allowing callers to override defaults // if the binary respects "last flag wins". ExtraArgs []string @@ -126,6 +129,8 @@ func DefaultOPRbuilderNodeConfig() *OPRBuilderNodeConfig { DataDir: "", ExtraArgs: nil, Env: nil, + RulesEnabled: false, + RulesConfigPath: "", } } @@ -235,6 +240,11 @@ func (cfg *OPRBuilderNodeConfig) LaunchSpec(p devtest.P) (args []string, env []s args = append(args, "--datadir="+cfg.DataDir) } + if cfg.RulesEnabled { + args = append(args, "--rules.enabled") + args = append(args, "--rules.config-path="+cfg.RulesConfigPath) + } + args = append(args, cfg.ExtraArgs...) return args, env @@ -244,6 +254,12 @@ type OPRBuilderNodeOption interface { Apply(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) } +func WithGlobalOPRBuilderNodeOption(opt OPRBuilderNodeOption) stack.Option[*Orchestrator] { + return stack.BeforeDeploy(func(o *Orchestrator) { + o.oprbuilderNodeOptions = append(o.oprbuilderNodeOptions, opt) + }) +} + type OPRBuilderNodeOptionFn func(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) var _ OPRBuilderNodeOption = OPRBuilderNodeOptionFn(nil) @@ -481,7 +497,8 @@ func WithOPRBuilderNode(id stack.OPRBuilderNodeID, opts ...OPRBuilderNodeOption) cfg := DefaultOPRbuilderNodeConfig() cfg.AuthRPCJWTPath, _ = orch.writeDefaultJWT() cfg.Chain = chainConfigPath - OPRBuilderNodeOptionBundle(opts).Apply(orch.P(), id, cfg) + orch.oprbuilderNodeOptions.Apply(p, id, cfg) // apply global options + OPRBuilderNodeOptionBundle(opts).Apply(orch.P(), id, cfg) // apply specific options rb := &OPRBuilderNode{ id: id, diff --git a/op-devstack/sysgo/orchestrator.go b/op-devstack/sysgo/orchestrator.go index ed6a6ab4f50ae..28c61738f419c 100644 --- a/op-devstack/sysgo/orchestrator.go +++ b/op-devstack/sysgo/orchestrator.go @@ -32,6 +32,7 @@ type Orchestrator struct { batcherOptions []BatcherOption proposerOptions []ProposerOption l2CLOptions L2CLOptionBundle + oprbuilderNodeOptions OPRBuilderNodeOptionBundle l2ELOptions L2ELOptionBundle l2ChallengerOpts l2ChallengerOpts SyncTesterELOptions SyncTesterELOptionBundle From ee28d5ac1f4dfd5e800e59459911ebb146566283 Mon Sep 17 00:00:00 2001 From: Stefano Charissis Date: Fri, 6 Mar 2026 15:37:28 +0100 Subject: [PATCH 065/201] chore(op-acceptance-tests): op-acceptor v3.10.2 (#19430) Updates op-acceptor to [v3.10.2](https://github.com/ethereum-optimism/infra/releases/tag/op-acceptor%2Fv3.10.2) --- mise.toml | 2 +- op-acceptance-tests/justfile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/mise.toml b/mise.toml index 3de35501acc7a..0c666d9b25dbc 100644 --- a/mise.toml +++ b/mise.toml @@ -41,7 +41,7 @@ anvil = "1.2.3" codecov-uploader = "0.8.0" goreleaser-pro = "2.11.2" kurtosis = "1.8.1" -op-acceptor = "op-acceptor/v3.10.1" +op-acceptor = "op-acceptor/v3.10.2" git-cliff = "2.12.0" # Fake dependencies diff --git a/op-acceptance-tests/justfile b/op-acceptance-tests/justfile index 34faa2e14f808..306134ae94a40 100644 --- a/op-acceptance-tests/justfile +++ b/op-acceptance-tests/justfile @@ -1,6 +1,6 @@ REPO_ROOT := `realpath ..` # path to the root of the optimism monorepo KURTOSIS_DIR := REPO_ROOT + "/kurtosis-devnet" -ACCEPTOR_VERSION := env_var_or_default("ACCEPTOR_VERSION", "v3.10.1") +ACCEPTOR_VERSION := env_var_or_default("ACCEPTOR_VERSION", "v3.10.2") DOCKER_REGISTRY := env_var_or_default("DOCKER_REGISTRY", "us-docker.pkg.dev/oplabs-tools-artifacts/images") ACCEPTOR_IMAGE := env_var_or_default("ACCEPTOR_IMAGE", DOCKER_REGISTRY + "/op-acceptor:" + ACCEPTOR_VERSION) From dfe3ce775a3dc6a1f5349c4c54d62c58b51701bd Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:29:31 -0500 Subject: [PATCH 066/201] fix(ci): retry L1 archive RPC call in print-pinned-block-number (#19421) * fix(ci): retry L1 archive RPC call in print-pinned-block-number The L1 archive RPC endpoint (ci-mainnet-l1-archive.optimism.io) returns transient 5xx errors occasionally, killing all contracts-bedrock-coverage and contracts-bedrock-tests-upgrade CI jobs simultaneously (5+ jobs fail from a single 30-second outage). Adding 3 retries with a 5s delay makes the step resilient to brief outages with minimal wall-clock cost. Co-Authored-By: Claude Sonnet 4.6 * fix(ci): use exponential backoff in print-pinned-block-number retry Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: smartcontracts Co-authored-by: Claude Sonnet 4.6 --- packages/contracts-bedrock/justfile | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 4465b7dcd1711..608871b8c31c0 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -94,6 +94,7 @@ test-dev *ARGS: build-go-ffi # Calculates and prints the pinned block number for the current ETH_RPC_URL. # Uses the most recent Sunday at 00:00 UTC as the target timestamp. +# Retries up to 3 times with a 5-second delay to handle transient RPC failures. print-pinned-block-number: #!/usr/bin/env bash set -euo pipefail @@ -103,7 +104,19 @@ print-pinned-block-number: h=$(date -u +%H); m=$(date -u +%M); s=$(date -u +%S) secs_since_midnight=$(( 10#$h * 3600 + 10#$m * 60 + 10#$s )) sunday_midnight=$(( now - secs_since_midnight - dow * 86400 )) - cast find-block "$sunday_midnight" --rpc-url $ETH_RPC_URL + delay=5 + for attempt in 1 2 3; do + if cast find-block "$sunday_midnight" --rpc-url $ETH_RPC_URL; then + exit 0 + fi + if [ "$attempt" -lt 3 ]; then + echo "Attempt $attempt failed, retrying in ${delay}s..." >&2 + sleep "$delay" + delay=$(( delay * 2 )) + fi + done + echo "All attempts to fetch pinned block number failed" >&2 + exit 1 # Prepares the environment for upgrade path variant of contract tests and coverage. # Env Vars: From c48932131098503d54953f2242e524457bc11d61 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Fri, 6 Mar 2026 10:44:56 -0500 Subject: [PATCH 067/201] op-reth: bump reth/op-reth to v1.11.1 and MSRV to 1.92 (#19292) * ci: add cannon-builder image to Docker CI builds Add the kona cannon-builder (Rust MIPS64r1 toolchain) image to the branch and tag Docker build workflows. This publishes the image to the shared artifact registry so it can be consumed by prestate builds. Co-Authored-By: Claude Opus 4.6 * rust: bump op-reth to v1.11.1 and update cannon build infrastructure Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 Co-authored-by: geoknee --- .circleci/continue/rust-ci.yml | 14 +- ops/docker/op-stack-go/Dockerfile | 2 +- rust/Cargo.lock | 1143 ++++++++--------- rust/Cargo.toml | 146 +-- rust/alloy-op-evm/Cargo.toml | 2 +- rust/alloy-op-hardforks/Cargo.toml | 2 +- rust/clippy.toml | 2 +- rust/kona/CLAUDE.md | 4 +- rust/kona/README.md | 2 +- rust/kona/bin/client/justfile | 104 -- .../docker/apps/kona_app_generic.dockerfile | 2 +- rust/kona/docker/cannon/cannon.dockerfile | 3 +- .../fpvm-prestates/cannon-repro.dockerfile | 4 +- rust/kona/justfile | 22 +- rust/op-reth/DockerfileOp | 2 +- rust/op-reth/README.md | 2 +- rust/op-reth/bin/Cargo.toml | 2 +- rust/op-reth/crates/chainspec/Cargo.toml | 2 +- rust/op-reth/crates/cli/Cargo.toml | 2 +- rust/op-reth/crates/consensus/Cargo.toml | 2 +- rust/op-reth/crates/evm/Cargo.toml | 2 +- rust/op-reth/crates/exex/Cargo.toml | 2 +- rust/op-reth/crates/flashblocks/Cargo.toml | 2 +- rust/op-reth/crates/hardforks/Cargo.toml | 2 +- rust/op-reth/crates/node/Cargo.toml | 2 +- .../crates/node/tests/e2e-testsuite/p2p.rs | 3 + rust/op-reth/crates/payload/Cargo.toml | 2 +- rust/op-reth/crates/primitives/Cargo.toml | 2 +- rust/op-reth/crates/reth/Cargo.toml | 2 +- rust/op-reth/crates/rpc/Cargo.toml | 2 +- rust/op-reth/crates/storage/Cargo.toml | 2 +- rust/op-reth/crates/trie/Cargo.toml | 2 +- rust/op-reth/crates/txpool/Cargo.toml | 2 +- rust/rust-toolchain.toml | 2 +- 34 files changed, 677 insertions(+), 816 deletions(-) diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index ecb1e732f4021..bdbeb97bd2451 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -813,11 +813,11 @@ jobs: $L2_CHAIN_ID cargo llvm-cov report --lcov --output-path client_host_cov.lcov - # Kona Rust CI - Lint (cannon/asterisc targets) + # Kona Rust CI - Lint (cannon target) kona-cargo-lint: parameters: target: - description: The lint target (native, cannon, asterisc) + description: The lint target (native, cannon) type: string machine: image: <> @@ -842,7 +842,7 @@ jobs: kona-build-fpvm: parameters: target: - description: The build target (cannon-client, asterisc-client) + description: The build target (cannon-client) type: string machine: image: <> @@ -1119,7 +1119,7 @@ workflows: - rust-build-binary: name: rust-msrv directory: rust - toolchain: "1.88.0" + toolchain: "1.92.0" context: *rust-ci-context # ----------------------------------------------------------------------- @@ -1130,7 +1130,7 @@ workflows: directory: rust command: | just check-no-std - toolchain: "1.88.0" + toolchain: "1.92.0" context: *rust-ci-context - rust-ci-cargo-hack-build: @@ -1175,14 +1175,14 @@ workflows: name: kona-lint-<> matrix: parameters: - target: ["cannon", "asterisc"] + target: ["cannon"] context: *rust-ci-context - kona-build-fpvm: name: kona-build-fpvm-<> matrix: parameters: - target: ["cannon-client", "asterisc-client"] + target: ["cannon-client"] context: *rust-ci-context - kona-coverage: diff --git a/ops/docker/op-stack-go/Dockerfile b/ops/docker/op-stack-go/Dockerfile index 45e9cb74df439..0d4d5de6c20bc 100644 --- a/ops/docker/op-stack-go/Dockerfile +++ b/ops/docker/op-stack-go/Dockerfile @@ -201,7 +201,7 @@ RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache op-interop-mon/op-interop-mon # The Rust version must match rust/rust-toolchain.toml. We don't use "latest" to ensure reproducibility -FROM --platform=$BUILDPLATFORM rust:1.88 AS kona-host-builder +FROM --platform=$BUILDPLATFORM rust:1.92 AS kona-host-builder ARG TARGETARCH # Install build dependencies and cross-compilation toolchains RUN apt-get update && apt-get install -y --no-install-recommends \ diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 86c0a2a92b3c7..350a96df58604 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -121,9 +121,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e4ff99651d46cef43767b5e8262ea228cd05287409ccb0c947cc25e70a952f9" +checksum = "b0c0dc44157867da82c469c13186015b86abef209bf0e41625e4b68bac61d728" dependencies = [ "alloy-eips", "alloy-primitives", @@ -149,9 +149,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a0701b0eda8051a2398591113e7862f807ccdd3315d0b441f06c2a0865a379b" +checksum = "ba4cdb42df3871cd6b346d6a938ec2ba69a9a0f49d1f82714bc5c48349268434" dependencies = [ "alloy-consensus", "alloy-eips", @@ -240,9 +240,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "def1626eea28d48c6cc0a6f16f34d4af0001906e4f889df6c660b39c86fd044d" +checksum = "b9f7ef09f21bd1e9cb8a686f168cb4a206646804567f0889eadb8dcc4c9288c8" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -289,9 +289,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55d9d1aba3f914f0e8db9e4616ae37f3d811426d95bdccf44e47d0605ab202f6" +checksum = "7c9cf3b99f46615fbf7dc1add0c96553abb7bf88fc9ec70dfbe7ad0b47ba7fe8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -330,9 +330,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e57586581f2008933241d16c3e3f633168b3a5d2738c5c42ea5246ec5e0ef17a" +checksum = "ff42cd777eea61f370c0b10f2648a1c81e0b783066cd7269228aa993afd487f7" dependencies = [ "alloy-primitives", "alloy-sol-types", @@ -345,9 +345,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b36c2a0ed74e48851f78415ca5b465211bd678891ba11e88fee09eac534bab1" +checksum = "8cbca04f9b410fdc51aaaf88433cbac761213905a65fe832058bcf6690585762" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -371,9 +371,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "636c8051da58802e757b76c3b65af610b95799f72423dc955737dec73de234fd" +checksum = "42d6d15e069a8b11f56bef2eccbad2a873c6dd4d4c81d04dda29710f5ea52f04" dependencies = [ "alloy-consensus", "alloy-eips", @@ -444,9 +444,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3dd56e2eafe8b1803e325867ac2c8a4c73c9fb5f341ffd8347f9344458c5922" +checksum = "d181c8cc7cf4805d7e589bf4074d56d55064fa1a979f005a45a62b047616d870" dependencies = [ "alloy-chains", "alloy-consensus", @@ -489,9 +489,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eebf54983d4fccea08053c218ee5c288adf2e660095a243d0532a8070b43955" +checksum = "e8bd82953194dec221aa4cbbbb0b1e2df46066fe9d0333ac25b43a311e122d13" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -528,14 +528,14 @@ checksum = "ce8849c74c9ca0f5a03da1c865e3eb6f768df816e67dd3721a398a8a7e398011" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "alloy-rpc-client" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "91577235d341a1bdbee30a463655d08504408a4d51e9f72edbfc5a622829f402" +checksum = "f2792758a93ae32a32e9047c843d536e1448044f78422d71bf7d7c05149e103f" dependencies = [ "alloy-json-rpc", "alloy-primitives", @@ -559,9 +559,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79cff039bf01a17d76c0aace3a3a773d5f895eb4c68baaae729ec9da9e86c99c" +checksum = "7bdcbf9dfd5eea8bfeb078b1d906da8cd3a39c4d4dbe7a628025648e323611f6" dependencies = [ "alloy-primitives", "alloy-rpc-types-debug", @@ -573,9 +573,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-admin" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "564afceae126df73b95f78c81eb46e2ef689a45ace0fcdaf5c9a178693a5ccca" +checksum = "42325c117af3a9e49013f881c1474168db57978e02085fc9853a1c89e0562740" dependencies = [ "alloy-genesis", "alloy-primitives", @@ -585,9 +585,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22250cf438b6a3926de67683c08163bfa1fd1efa47ee9512cbcd631b6b0243c" +checksum = "e0a3100b76987c1b1dc81f3abe592b7edc29e92b1242067a69d65e0030b35cf9" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -597,9 +597,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73234a141ecce14e2989748c04fcac23deee67a445e2c4c167cfb42d4dacd1b6" +checksum = "dd720b63f82b457610f2eaaf1f32edf44efffe03ae25d537632e7d23e7929e1a" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -608,9 +608,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-beacon" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "625af0c3ebd3c31322edb1fb6b8e3e518acc39e164ed07e422eaff05310ff2fa" +checksum = "4a22e13215866f5dfd5d3278f4c41f1fad9410dc68ce39022f58593c873c26f8" dependencies = [ "alloy-eips", "alloy-primitives", @@ -628,9 +628,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "779f70ab16a77e305571881b07a9bc6b0068ae6f962497baf5762825c5b839fb" +checksum = "e1b21e1ad18ff1b31ff1030e046462ab8168cf8894e6778cd805c8bdfe2bd649" dependencies = [ "alloy-primitives", "derive_more", @@ -640,9 +640,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "10620d600cc46538f613c561ac9a923843c6c74c61f054828dcdb8dd18c72ec4" +checksum = "e4ac61f03f1edabccde1c687b5b25fff28f183afee64eaa2e767def3929e4457" dependencies = [ "alloy-consensus", "alloy-eips", @@ -661,9 +661,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "010e101dbebe0c678248907a2545b574a87d078d82c2f6f5d0e8e7c9a6149a10" +checksum = "9b2dc411f13092f237d2bf6918caf80977fc2f51485f9b90cb2a2f956912c8c9" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -683,9 +683,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-mev" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375e4bf001135fe4f344db6197fafed8c2b61e99fa14d3597f44cd413f79e45b" +checksum = "fe85bf3be739126aa593dca9fb3ab13ca93fa7873e6f2247be64d7f2cb15f34a" dependencies = [ "alloy-consensus", "alloy-eips", @@ -698,9 +698,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be096f74d85e1f927580b398bf7bc5b4aa62326f149680ec0867e3c040c9aced" +checksum = "1ad79f1e27e161943b5a4f99fe5534ef0849876214be411e0032c12f38e94daa" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -712,9 +712,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "14ab75189fbc29c5dd6f0bc1529bccef7b00773b458763f4d9d81a77ae4a1a2d" +checksum = "d459f902a2313737bc66d18ed094c25d2aeb268b74d98c26bbbda2aa44182ab0" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -724,9 +724,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e6d631f8b975229361d8af7b2c749af31c73b3cf1352f90e144ddb06227105e" +checksum = "e2ce1e0dbf7720eee747700e300c99aac01b1a95bb93f493a01e78ee28bb1a37" dependencies = [ "alloy-primitives", "arbitrary", @@ -736,9 +736,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97f40010b5e8f79b70bf163b38cd15f529b18ca88c4427c0e43441ee54e4ed82" +checksum = "2425c6f314522c78e8198979c8cbf6769362be4da381d4152ea8eefce383535d" dependencies = [ "alloy-primitives", "async-trait", @@ -751,9 +751,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c4ec1cc27473819399a3f0da83bc1cef0ceaac8c1c93997696e46dc74377a58" +checksum = "c3ecb71ee53d8d9c3fa7bac17542c8116ebc7a9726c91b1bf333ec3d04f5a789" dependencies = [ "alloy-consensus", "alloy-network", @@ -779,7 +779,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -796,7 +796,7 @@ dependencies = [ "proc-macro2", "quote", "sha3", - "syn 2.0.114", + "syn 2.0.117", "syn-solidity", ] @@ -812,7 +812,7 @@ dependencies = [ "macro-string", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "syn-solidity", ] @@ -840,9 +840,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a03bb3f02b9a7ab23dacd1822fa7f69aa5c8eefcdcf57fad085e0b8d76fb4334" +checksum = "fa186e560d523d196580c48bf00f1bf62e63041f28ecf276acc22f8b27bb9f53" dependencies = [ "alloy-json-rpc", "auto_impl", @@ -863,9 +863,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ce599598ef8ebe067f3627509358d9faaa1ef94f77f834a7783cd44209ef55c" +checksum = "aa501ad58dd20acddbfebc65b52e60f05ebf97c52fa40d1b35e91f5e2da0ad0e" dependencies = [ "alloy-json-rpc", "alloy-rpc-types-engine", @@ -885,9 +885,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49963a2561ebd439549915ea61efb70a7b13b97500ec16ca507721c9d9957d07" +checksum = "c2ef85688e5ac2da72afc804e0a1f153a1f309f05a864b1998bbbed7804dbaab" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -905,9 +905,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ed38ea573c6658e0c2745af9d1f1773b1ed83aa59fbd9c286358ad469c3233a" +checksum = "b9f00445db69d63298e2b00a0ea1d859f00e6424a3144ffc5eba9c31da995e16" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -943,14 +943,14 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.6.3" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397406cf04b11ca2a48e6f81804c70af3f40a36abf648e11dc7416043eb0834d" +checksum = "6fa0c53e8c1e1ef4d01066b01c737fb62fc9397ab52c6e7bb5669f97d281b9bc" dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -962,7 +962,7 @@ dependencies = [ "itertools 0.10.5", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1032,9 +1032,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.101" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" +checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" [[package]] name = "aquamarine" @@ -1047,7 +1047,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1198,7 +1198,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "62945a2f7e6de02a31fe400aa489f0e0f5b2502e69f95f853adb82a96c7a6b60" dependencies = [ "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1236,7 +1236,7 @@ dependencies = [ "num-traits", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1325,7 +1325,7 @@ checksum = "213888f660fddcca0d257e88e54ac05bca01885f258ccdf695bafd77031bb69d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1397,7 +1397,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -1409,14 +1409,14 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "asn1_der" -version = "0.7.6" +version = "0.7.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "155a5a185e42c6b77ac7b88a15143d930a9e9727a5b7b77eed417404ab15c247" +checksum = "4858a9d740c5007a9069007c3b4e91152d0506f13c1b31dd49051fd537656156" [[package]] name = "assert-json-diff" @@ -1442,9 +1442,9 @@ dependencies = [ [[package]] name = "async-compression" -version = "0.4.37" +version = "0.4.40" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d10e4f991a553474232bc0a31799f6d24b034a84c0971d80d2e2f78b2e576e40" +checksum = "7d67d43201f4d20c78bcda740c142ca52482d81da80681533d33bf3f0596c8e2" dependencies = [ "compression-codecs", "compression-core", @@ -1510,7 +1510,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1521,7 +1521,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1584,7 +1584,7 @@ checksum = "ffdcb70bdbc4d478427380519163274ac86e52916e10f0a8889adf0f96d3fee7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1595,9 +1595,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-lc-rs" -version = "1.15.4" +version = "1.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b7b6141e96a8c160799cc2d5adecd5cbbe5054cb8c7c4af53da0f83bb7ad256" +checksum = "d9a7b350e3bb1767102698302bc37256cbd48422809984b98d292c40e2579aa9" dependencies = [ "aws-lc-sys", "zeroize", @@ -1605,9 +1605,9 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.37.0" +version = "0.37.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c34dda4df7017c8db52132f0f8a2e0f8161649d15723ed63fc00c82d0f2081a" +checksum = "b092fe214090261288111db7a2b2c2118e5a7f30dc2569f1732c4069a6840549" dependencies = [ "cc", "cmake", @@ -1785,7 +1785,7 @@ version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "cexpr", "clang-sys", "itertools 0.13.0", @@ -1794,7 +1794,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -1836,9 +1836,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" +checksum = "843867be96c8daad0d758b57df9392b6d8d271134fce549de6ce169ff98a92af" dependencies = [ "arbitrary", "serde_core", @@ -1916,7 +1916,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc119a5ad34c3f459062a96907f53358989b173d104258891bb74f95d93747e8" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "boa_interner", "boa_macros", "boa_string", @@ -1933,7 +1933,7 @@ checksum = "e637ec52ea66d76b0ca86180c259d6c7bb6e6a6e14b2f36b85099306d8b00cc3" dependencies = [ "aligned-vec", "arrayvec", - "bitflags 2.10.0", + "bitflags 2.11.0", "boa_ast", "boa_gc", "boa_interner", @@ -2015,7 +2015,7 @@ dependencies = [ "cow-utils", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -2025,7 +2025,7 @@ version = "0.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02f99bf5b684f0de946378fcfe5f38c3a0fbd51cbf83a0f39ff773a0e218541f" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "boa_ast", "boa_interner", "boa_macros", @@ -2071,7 +2071,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2136,9 +2136,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.19.1" +version = "3.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dd9dc738b7a8311c7ade152424974d8115f2cdad61e8dab8dac9f2362298510" +checksum = "5d20789868f4b01b2f2caec9f5c4e0213b41e3e5702a50157d699ae31ced2fcb" [[package]] name = "byte-slice-cast" @@ -2166,7 +2166,7 @@ checksum = "89385e82b5d1821d2219e0b095efa2cc1f246cbf99080f3be46a1a85c0d392d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2192,7 +2192,7 @@ checksum = "f9abbd1bc6865053c427f7198e6af43bfdedc55ab791faed4fbd361d789575ff" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2286,9 +2286,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.55" +version = "1.2.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29" +checksum = "aebf35691d1bfb0ac386a69bac2fde4dd276fb618cf8bf4f5318fe285e821bb2" dependencies = [ "find-msvc-tools", "jobserver", @@ -2349,9 +2349,9 @@ dependencies = [ [[package]] name = "chrono" -version = "0.4.43" +version = "0.4.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fac4744fb15ae8337dc853fee7fb3f4e48c0fbaa23d0afe49c447b4fab126118" +checksum = "c673075a2e0e5f4a1dde27ce9dee1ea4558c7ffe648f576438a20ca1d2acc4b0" dependencies = [ "iana-time-zone", "js-sys", @@ -2412,9 +2412,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.58" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "63be97961acde393029492ce0be7a1af7e323e6bae9511ebfac33751be5e6806" +checksum = "2797f34da339ce31042b27d23607e051786132987f595b02ba4f6a6dffb7030a" dependencies = [ "clap_builder", "clap_derive", @@ -2422,9 +2422,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.58" +version = "4.5.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f13174bda5dfd69d7e947827e5af4b0f2f94a4a3ee92912fba07a66150f21e2" +checksum = "24a241312cea5059b13574bb9b3861cabf758b879c15190b37b6d6fd63ab6876" dependencies = [ "anstream", "anstyle", @@ -2441,7 +2441,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2614,9 +2614,9 @@ dependencies = [ [[package]] name = "compression-codecs" -version = "0.4.36" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00828ba6fd27b45a448e57dbfe84f1029d4c9f26b368157e9a448a5f49a2ec2a" +checksum = "eb7b51a7d9c967fc26773061ba86150f19c50c0d65c887cb1fbe295fd16619b7" dependencies = [ "brotli", "compression-core", @@ -2851,7 +2851,7 @@ version = "0.29.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8b9f2e4c67f833b660cdb0a3523065869fb35570177239812ed4c905aeff87b" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "crossterm_winapi", "derive_more", "document-features", @@ -2934,7 +2934,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2978,7 +2978,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -2993,7 +2993,7 @@ dependencies = [ "quote", "serde", "strsim", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3006,7 +3006,7 @@ dependencies = [ "proc-macro2", "quote", "strsim", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3017,7 +3017,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3028,7 +3028,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3039,7 +3039,7 @@ checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ "darling_core 0.23.0", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3081,7 +3081,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ab67060fc6b8ef687992d439ca0fa36e7ed17e9a0b16b25b601e8757df720de" dependencies = [ "data-encoding", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3127,9 +3127,9 @@ dependencies = [ [[package]] name = "deranged" -version = "0.5.5" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ececcb659e7ba858fb4f10388c250a7252eb0a27373f1a72b8748afdd248e587" +checksum = "7cd812cc2bc1d69d4764bd80df88b4317eaef9e773c75226407d9bc0876b211c" dependencies = [ "powerfmt", "serde_core", @@ -3154,7 +3154,7 @@ checksum = "ef941ded77d15ca19b40374869ac6000af1c9f2a4c0f3d4c70926287e6364a8f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3165,7 +3165,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3186,7 +3186,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3196,7 +3196,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3218,7 +3218,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version 0.4.1", - "syn 2.0.114", + "syn 2.0.117", "unicode-xid", ] @@ -3332,7 +3332,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3397,7 +3397,7 @@ checksum = "1ec431cd708430d5029356535259c5d645d60edd3d39c54e5eea9782d46caa7d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3450,7 +3450,7 @@ dependencies = [ "enum-ordinalize", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3523,7 +3523,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3543,7 +3543,7 @@ checksum = "8ca9601fb2d62598ee17836250842873a413586e5d7ed88b356e38ddbb0ec631" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3563,7 +3563,7 @@ checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3645,7 +3645,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3657,7 +3657,7 @@ dependencies = [ "darling 0.23.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -3931,7 +3931,7 @@ checksum = "6dc7a9cb3326bafb80642c5ce99b39a2c0702d4bfa8ee8a3e773791a6cbe2407" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4031,9 +4031,9 @@ checksum = "e6d5a32815ae3f33302d95fdcb2ce17862f8c65363dcfd29360480ba1001fc9c" [[package]] name = "futures" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65bc07b1a8bc7c85c5f2e110c476c7389b4554ba72af57d8445ea63a576b0876" +checksum = "8b147ee9d1f6d097cef9ce628cd2ee62288d963e16fb287bd9286455b241382d" dependencies = [ "futures-channel", "futures-core", @@ -4056,9 +4056,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dff15bf788c671c1934e366d07e30c1814a8ef514e1af724a602e8a2fbe1b10" +checksum = "07bbe89c50d7a535e539b8c17bc0b49bdb77747034daa8087407d655f3f7cc1d" dependencies = [ "futures-core", "futures-sink", @@ -4079,27 +4079,26 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05f29059c0c2090612e8d742178b0580d2dc940c837851ad723096f87af6663e" +checksum = "7e3450815272ef58cec6d564423f6e755e25379b217b0bc688e295ba24df6b1d" [[package]] name = "futures-executor" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e28d1d997f585e54aebc3f97d39e72338912123a67330d723fdbb564d646c9f" +checksum = "baf29c38818342a3b26b5b923639e7b1f4a61fc5e76102d4b1981c6dc7a7579d" dependencies = [ "futures-core", "futures-task", "futures-util", - "num_cpus", ] [[package]] name = "futures-io" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e5c1b78ca4aae1ac06c48a526a655760685149f0d465d21f37abfe57ce075c6" +checksum = "cecba35d7ad927e23624b22ad55235f2239cfa44fd10428eecbeba6d6a717718" [[package]] name = "futures-lite" @@ -4116,13 +4115,13 @@ dependencies = [ [[package]] name = "futures-macro" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" +checksum = "e835b70203e41293343137df5c0664546da5745f82ec9b84d40be8336958447b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -4138,15 +4137,15 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e575fab7d1e0dcb8d0c7bcf9a63ee213816ab51902e6d244a95819acacf1d4f7" +checksum = "c39754e157331b013978ec91992bde1ac089843443c49cbc7f46150b0fad0893" [[package]] name = "futures-task" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f90f7dce0722e95104fcb095585910c0977252f286e354b5e3bd38902cd99988" +checksum = "037711b3d59c33004d3856fbdc83b99d4ff37a24768fa1be9ce3538a1cde4393" [[package]] name = "futures-timer" @@ -4160,9 +4159,9 @@ dependencies = [ [[package]] name = "futures-util" -version = "0.3.31" +version = "0.3.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fa08315bb612088cc391249efdc3bc77536f16c91f6cf495e6fbe85b20a4a81" +checksum = "389ca41296e6190b48053de0321d02a77f32f8a5d2461dd38762c0593805c6d6" dependencies = [ "futures-channel", "futures-core", @@ -4172,7 +4171,6 @@ dependencies = [ "futures-task", "memchr", "pin-project-lite", - "pin-utils", "slab", ] @@ -4270,7 +4268,7 @@ version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7b88256088d75a56f8ecfa070513a775dd9107f6530ef14919dac831af9cfe2b" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "libc", "libgit2-sys", "log", @@ -4903,9 +4901,9 @@ dependencies = [ [[package]] name = "id-arena" -version = "2.2.1" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25a2bc672d1148e28034f176e01fffebb08b35768468cc954630da77a1449005" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" [[package]] name = "ident_case" @@ -5015,7 +5013,7 @@ checksum = "a0eb5a3343abf848c0984fe4604b2b105da9539376e24fc0a3b0007411ae4fd9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5083,7 +5081,7 @@ version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f37dccff2791ab604f9babef0ba14fbe0be30bd368dc541e2b08d07c8aa908f3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "inotify-sys", "libc", ] @@ -5117,7 +5115,7 @@ dependencies = [ "indoc", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5131,9 +5129,9 @@ dependencies = [ [[package]] name = "interprocess" -version = "2.3.0" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b00d05442c2106c75b7410f820b152f61ec0edc7befcb9b381b673a20314753" +checksum = "6be5e5c847dbdb44564bd85294740d031f4f8aeb3464e5375ef7141f7538db69" dependencies = [ "doctest-file", "futures-core", @@ -5236,9 +5234,9 @@ checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" [[package]] name = "jemalloc_pprof" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74ff642505c7ce8d31c0d43ec0e235c6fd4585d9b8172d8f9dd04d36590200b5" +checksum = "8a0d44c349cfe2654897fadcb9de4f0bfbf48288ec344f700b2bd59f152dd209" dependencies = [ "anyhow", "libc", @@ -5285,9 +5283,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.85" +version = "0.3.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c942ebf8e95485ca0d52d97da7c5a2c387d0e7f0ba4c35e93bfcaee045955b3" +checksum = "c7e709f3e3d22866f9c25b3aff01af289b18422cc8b4262fb19103ee80fe513d" dependencies = [ "once_cell", "wasm-bindgen", @@ -5397,7 +5395,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -5508,9 +5506,9 @@ dependencies = [ [[package]] name = "keccak" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecc2af9a1119c51f12a14607e783cb977bde58bc069ff0c3da1095e635d70654" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" dependencies = [ "cpufeatures", ] @@ -6336,7 +6334,7 @@ dependencies = [ "kona-std-fpvm", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6541,9 +6539,9 @@ checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.181" +version = "0.2.182" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "459427e2af2b9c839b132acb702a1c654d95e10f8c326bfc2ad11310e458b1c5" +checksum = "6800badb6cb2082ffd7b6a67e6125bb39f18782f793520caee8cb8846be06112" [[package]] name = "libgit2-sys" @@ -6882,7 +6880,7 @@ checksum = "dd297cf53f0cb3dee4d2620bb319ae47ef27c702684309f682bdb7e55a18ae9c" dependencies = [ "heck", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -6967,9 +6965,9 @@ version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "libc", - "redox_syscall 0.7.0", + "redox_syscall 0.7.1", ] [[package]] @@ -7006,7 +7004,7 @@ version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f4de44e98ddbf09375cbf4d17714d18f39195f4f4894e8524501726fd9a8a4a" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -7027,9 +7025,9 @@ dependencies = [ [[package]] name = "linux-raw-sys" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" +checksum = "32a66949e030da00e8c7d4434b251670a91556f4144941d37452769c25d58a53" [[package]] name = "litemap" @@ -7144,14 +7142,14 @@ checksum = "1b27834086c65ec3f9387b096d66e99f221cf081c2b738042aa252bcd41204e3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "mappings" -version = "0.7.1" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db4d277bb50d4508057e7bddd7fcd19ef4a4cc38051b6a5a36868d75ae2cbeb9" +checksum = "8bab1e61a4b76757edb59cd81fcaa7f3ba9018d43b527d9abfad877b4c6c60f2" dependencies = [ "anyhow", "libc", @@ -7168,7 +7166,7 @@ checksum = "757aee279b8bdbb9f9e676796fd459e4207a1f986e87886700abf589f5abf771" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -7194,9 +7192,9 @@ checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "memmap2" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744133e4a0e0a658e1374cf3bf8e415c4052a15a111acd372764c55b4177d490" +checksum = "714098028fe011992e1c3962653c96b2d578c4b4bce9036e15ff220319b1e0e3" dependencies = [ "libc", ] @@ -7222,13 +7220,13 @@ dependencies = [ [[package]] name = "metrics-derive" -version = "0.1.1" +version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a87f4b19620e4c561f7b48f5e6ca085b1780def671696a6a3d9d0c137360ec" +checksum = "161ab904c2c62e7bda0f7562bf22f96440ca35ff79e66c800cbac298f2f4f5ec" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -7401,7 +7399,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -7422,7 +7420,7 @@ checksum = "59b43b4fd69e3437618106f7754f34021b831a514f9e1a98ae863cabcd8d8dad" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -7523,22 +7521,22 @@ checksum = "4568f25ccbd45ab5d5603dc34318c1ec56b117531781260002151b8530a9f931" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "native-tls" -version = "0.2.14" +version = "0.2.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" +checksum = "465500e14ea162429d264d44189adc38b199b62b1c21eea9f69e4b73cb03bbf2" dependencies = [ "libc", "log", "openssl", - "openssl-probe 0.1.6", + "openssl-probe", "openssl-sys", "schannel", - "security-framework 2.11.1", + "security-framework", "security-framework-sys", "tempfile", ] @@ -7649,7 +7647,7 @@ version = "8.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d3d07927151ff8575b7087f245456e549fea62edf0ec4e565a5ee50c8402bc3" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "fsevent-sys", "inotify", "kqueue", @@ -7667,14 +7665,14 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42b8cfee0e339a0337359f3c88165702ac6e600dc01c0cc9579a92d62b08477a" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] name = "ntapi" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c70f219e21142367c70c0b30c6a9e3a14d55b4d12a204d897fbec83a0363f081" +checksum = "c3b335231dfd352ffb0f8017f3b6027a4917f7df785ea2143d8af2adc66980ae" dependencies = [ "winapi", ] @@ -7798,7 +7796,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -7831,7 +7829,7 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -8005,7 +8003,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.11.0" +version = "1.11.1" dependencies = [ "clap", "reth-cli-util", @@ -8044,7 +8042,7 @@ version = "0.10.75" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08838db121398ad17ab8531ce9de97b244589089e290a384c900cb9ff7434328" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "cfg-if", "foreign-types", "libc", @@ -8061,15 +8059,9 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] -[[package]] -name = "openssl-probe" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" - [[package]] name = "openssl-probe" version = "0.2.1" @@ -8202,7 +8194,7 @@ dependencies = [ "reqwest 0.12.28", "thiserror 2.0.18", "tokio", - "tonic 0.14.3", + "tonic 0.14.5", "tracing", ] @@ -8230,7 +8222,7 @@ dependencies = [ "opentelemetry 0.31.0", "opentelemetry_sdk 0.31.0", "prost 0.14.3", - "tonic 0.14.3", + "tonic 0.14.5", "tonic-prost", ] @@ -8351,7 +8343,7 @@ dependencies = [ "proc-macro-crate", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -8475,7 +8467,7 @@ dependencies = [ "phf_shared", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -8504,7 +8496,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -8632,16 +8624,16 @@ checksum = "439ee305def115ba05938db6eb1644ff94165c5ab5e9420d1c1bcedbba909391" [[package]] name = "pprof_util" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4429d44e5e2c8a69399fc0070379201eed018e3df61e04eb7432811df073c224" +checksum = "eea0cc524de808a6d98d192a3d99fe95617031ad4a52ec0a0f987ef4432e8fe1" dependencies = [ "anyhow", "backtrace", "flate2", "num", "paste", - "prost 0.13.5", + "prost 0.14.3", ] [[package]] @@ -8655,9 +8647,9 @@ dependencies = [ [[package]] name = "predicates" -version = "3.1.3" +version = "3.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5d19ee57562043d37e82899fade9a22ebab7be9cef5026b07fda9cdd4293573" +checksum = "ada8f2932f28a27ee7b70dd6c1c39ea0675c55a36879ab92f3a715eaa1e63cfe" dependencies = [ "anstyle", "predicates-core", @@ -8665,15 +8657,15 @@ dependencies = [ [[package]] name = "predicates-core" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "727e462b119fe9c93fd0eb1429a5f7647394014cf3c04ab2c0350eeb09095ffa" +checksum = "cad38746f3166b4031b1a0d39ad9f954dd291e7854fcc0eed52ee41a0b50d144" [[package]] name = "predicates-tree" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72dd2d6d381dfb73a193c7fca536518d7caee39fc8503f74e7dc0be0531b425c" +checksum = "d0de1b847b39c8131db0467e9df1ff60e6d0562ab8e9a16e568ad0fdb372e2f2" dependencies = [ "predicates-core", "termtree", @@ -8696,7 +8688,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -8747,7 +8739,7 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -8765,7 +8757,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "25485360a54d6861439d60facef26de713b1e126bf015ec8f98239467a2b82f7" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "chrono", "flate2", "procfs-core", @@ -8778,7 +8770,7 @@ version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6401bf7b6af22f78b563665d15a22e9aef27775b79b149a66ca022468a4e405" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "chrono", "hex", ] @@ -8803,7 +8795,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -8814,7 +8806,7 @@ checksum = "37566cb3fdacef14c0737f9546df7cfeadbfbc9fef10991038bf5015d0c80532" dependencies = [ "bit-set", "bit-vec", - "bitflags 2.10.0", + "bitflags 2.11.0", "num-traits", "rand 0.9.2", "rand_chacha 0.9.0", @@ -8843,7 +8835,7 @@ checksum = "fb6dc647500e84a25a85b100e76c85b8ace114c209432dc174f20aac11d4ed6c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -8876,7 +8868,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -8889,7 +8881,7 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -8909,7 +8901,7 @@ checksum = "7347867d0a7e1208d93b46767be83e2b8f978c3dad35f775ac8d8847551d6fe1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -9133,9 +9125,9 @@ dependencies = [ [[package]] name = "rapidhash" -version = "4.2.2" +version = "4.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71ec30b38a417407efe7676bad0ca6b78f995f810185ece9af3bd5dc561185a9" +checksum = "b5e48930979c155e2f33aa36ab3119b5ee81332beb6482199a8ecd6029b80b59" dependencies = [ "rand 0.9.2", "rustversion", @@ -9159,7 +9151,7 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5ef8dea09a92caaf73bff7adb70b76162e5937524058a7e5bff37869cbbec293" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "compact_str", "hashbrown 0.16.1", "indoc", @@ -9191,7 +9183,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d7dbfa023cd4e604c2553483820c5fe8aa9d71a42eea5aa77c6e7f35756612db" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "hashbrown 0.16.1", "indoc", "instability", @@ -9210,7 +9202,7 @@ version = "11.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "498cd0dc59d73224351ee52a95fee0f1a617a2eae0e7d9d720cc622c73a54186" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -9258,16 +9250,16 @@ version = "0.5.18" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ed2bf2547551a7053d6fdfafda3f938979645c44812fbfcda098faae3f1a362d" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] name = "redox_syscall" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27" +checksum = "35985aa610addc02e24fc232012c86fd11f14111180f902b67e2d5331f8ebf2b" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", ] [[package]] @@ -9309,7 +9301,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -9446,8 +9438,8 @@ checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "reth-basic-payload-builder" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9470,8 +9462,8 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9502,8 +9494,8 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9522,8 +9514,8 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-genesis", "clap", @@ -9536,8 +9528,8 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9622,8 +9614,8 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "reth-tasks", "tokio", @@ -9632,8 +9624,8 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9652,8 +9644,8 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9672,18 +9664,18 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "reth-config" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "eyre", "humantime-serde", @@ -9698,8 +9690,8 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9711,8 +9703,8 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9723,8 +9715,8 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9749,8 +9741,8 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "derive_more", @@ -9776,8 +9768,8 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9805,8 +9797,8 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9835,8 +9827,8 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9850,8 +9842,8 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9875,8 +9867,8 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9899,8 +9891,8 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "dashmap", @@ -9923,8 +9915,8 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9958,8 +9950,8 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10016,8 +10008,8 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "aes", "alloy-primitives", @@ -10044,8 +10036,8 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10068,8 +10060,8 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10093,8 +10085,8 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "futures", "pin-project", @@ -10115,8 +10107,8 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eip7928", @@ -10172,8 +10164,8 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -10200,8 +10192,8 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10215,8 +10207,8 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "bytes", @@ -10231,8 +10223,8 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10253,8 +10245,8 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -10264,8 +10256,8 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-chains", "alloy-primitives", @@ -10293,8 +10285,8 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-chains", "alloy-consensus", @@ -10317,8 +10309,8 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -10358,8 +10350,8 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "clap", "eyre", @@ -10381,8 +10373,8 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10397,8 +10389,8 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10415,8 +10407,8 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -10429,8 +10421,8 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10458,8 +10450,8 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10478,8 +10470,8 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "rayon", "reth-db-api", @@ -10488,8 +10480,8 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10512,8 +10504,8 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10534,8 +10526,8 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-evm", "alloy-primitives", @@ -10547,8 +10539,8 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10565,8 +10557,8 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10603,8 +10595,8 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "eyre", @@ -10635,8 +10627,8 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10649,8 +10641,8 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "serde", "serde_json", @@ -10659,8 +10651,8 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10687,8 +10679,8 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "bytes", "futures", @@ -10707,10 +10699,10 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "byteorder", "dashmap", "derive_more", @@ -10723,8 +10715,8 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "bindgen", "cc", @@ -10732,8 +10724,8 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "futures", "metrics", @@ -10744,8 +10736,8 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "ipnet", @@ -10753,8 +10745,8 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "futures-util", "if-addrs 0.14.0", @@ -10767,8 +10759,8 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10824,8 +10816,8 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10849,8 +10841,8 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10872,8 +10864,8 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10887,8 +10879,8 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -10901,8 +10893,8 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "anyhow", "bincode 1.3.3", @@ -10918,8 +10910,8 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -10942,8 +10934,8 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11011,8 +11003,8 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11066,8 +11058,8 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-network", @@ -11104,8 +11096,8 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11128,8 +11120,8 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11152,8 +11144,8 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "bytes", "eyre", @@ -11181,8 +11173,8 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "reth-chainspec", "reth-db-api", @@ -11193,7 +11185,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-primitives", "reth-chainspec", @@ -11234,7 +11226,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -11262,7 +11254,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11312,7 +11304,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-chains", "alloy-consensus", @@ -11343,7 +11335,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11372,7 +11364,7 @@ dependencies = [ [[package]] name = "reth-optimism-exex" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11399,7 +11391,7 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11444,7 +11436,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -11454,7 +11446,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -11521,7 +11513,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11560,7 +11552,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11587,7 +11579,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11658,7 +11650,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "reth-codecs", @@ -11670,7 +11662,7 @@ dependencies = [ [[package]] name = "reth-optimism-trie" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11715,7 +11707,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.11.0" +version = "1.11.1" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11753,8 +11745,8 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11774,8 +11766,8 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "pin-project", "reth-payload-primitives", @@ -11786,8 +11778,8 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11809,8 +11801,8 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11819,8 +11811,8 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -11829,8 +11821,8 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "once_cell", @@ -11842,8 +11834,8 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11876,8 +11868,8 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11921,8 +11913,8 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11950,8 +11942,8 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "arbitrary", @@ -11966,8 +11958,8 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "reth-primitives-traits", @@ -11979,8 +11971,8 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -12056,8 +12048,8 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eip7928", "alloy-eips", @@ -12086,8 +12078,8 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-network", "alloy-provider", @@ -12127,8 +12119,8 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-evm", @@ -12151,8 +12143,8 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12181,8 +12173,8 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -12225,8 +12217,8 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12273,8 +12265,8 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-rpc-types-engine", "http", @@ -12287,8 +12279,8 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12303,8 +12295,8 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12353,8 +12345,8 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12380,8 +12372,8 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "arbitrary", @@ -12394,8 +12386,8 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "parking_lot", @@ -12414,8 +12406,8 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "clap", @@ -12429,8 +12421,8 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12453,8 +12445,8 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12470,8 +12462,8 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "auto_impl", "dyn-clone", @@ -12488,8 +12480,8 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12504,8 +12496,8 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "tokio", "tokio-stream", @@ -12514,8 +12506,8 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "clap", "eyre", @@ -12533,8 +12525,8 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "clap", "eyre", @@ -12551,8 +12543,8 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12560,7 +12552,7 @@ dependencies = [ "alloy-rlp", "aquamarine", "auto_impl", - "bitflags 2.10.0", + "bitflags 2.11.0", "futures-util", "metrics", "parking_lot", @@ -12597,8 +12589,8 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12623,8 +12615,8 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -12650,8 +12642,8 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "metrics", @@ -12670,8 +12662,8 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -12695,8 +12687,8 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -12714,8 +12706,8 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.11.0" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.0#564ffa586845fa4a8bb066f0c7b015ff36b26c08" +version = "1.11.1" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "zstd", ] @@ -12926,7 +12918,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "311720d4f0f239b041375e7ddafdbd20032a33b7bae718562ea188e188ed9fd3" dependencies = [ "alloy-eip7928", - "bitflags 2.10.0", + "bitflags 2.11.0", "revm-bytecode", "revm-primitives", "serde", @@ -12998,7 +12990,7 @@ checksum = "8100bb34c0a1d0f907143db3149e6b4eea3c33b9ee8b189720168e818303986f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -13180,7 +13172,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.114", + "syn 2.0.117", "unicode-ident", ] @@ -13296,11 +13288,11 @@ dependencies = [ [[package]] name = "rustix" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "146c9e247ccc180c1f61615433868c99f3de3ae256a30a43b49f67c2d9171f34" +checksum = "b6fe4565b9518b83ef4f91bb47ce29620ca828bd32cb7e408f0062e9930ba190" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "errno", "libc", "linux-raw-sys", @@ -13329,10 +13321,10 @@ version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63" dependencies = [ - "openssl-probe 0.2.1", + "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.5.1", + "security-framework", ] [[package]] @@ -13360,7 +13352,7 @@ dependencies = [ "rustls-native-certs", "rustls-platform-verifier-android", "rustls-webpki", - "security-framework 3.5.1", + "security-framework", "security-framework-sys", "webpki-root-certs 0.26.11", "windows-sys 0.59.0", @@ -13415,9 +13407,9 @@ dependencies = [ [[package]] name = "ryu" -version = "1.0.22" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "ryu-js" @@ -13563,24 +13555,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.11.1" +version = "3.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" +checksum = "b7f4bc775c73d9a02cde8bf7b2ec4c9d12743edf609006c7facc23998404cd1d" dependencies = [ - "bitflags 2.10.0", - "core-foundation 0.9.4", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework" -version = "3.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3297343eaf830f66ede390ea39da1d462b6b0c1b000f420d0a83f898bbbe6ef" -dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -13589,9 +13568,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.15.0" +version = "2.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" +checksum = "6ce2691df843ecc5d231c0b14ece2acc3efb62c0a398c7e1d875f3983ce020e3" dependencies = [ "core-foundation-sys", "libc", @@ -13679,7 +13658,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -13714,7 +13693,7 @@ checksum = "175ee3e80ae9982737ca543e96133087cbd9a485eecc3bc4de9c1a37b47ea59c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -13766,7 +13745,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -13781,9 +13760,9 @@ dependencies = [ [[package]] name = "serial_test" -version = "3.3.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d0b343e184fc3b7bb44dff0705fffcf4b3756ba6aff420dddd8b24ca145e555" +checksum = "911bd979bf1070a3f3aa7b691a3b3e9968f339ceeec89e08c280a8a22207a32f" dependencies = [ "futures-executor", "futures-util", @@ -13796,13 +13775,13 @@ dependencies = [ [[package]] name = "serial_test_derive" -version = "3.3.1" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" +checksum = "0a7d91949b85b0d2fb687445e448b40d322b6b3e4af6b44a29b21d9a5f33e6d9" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -13858,9 +13837,9 @@ dependencies = [ [[package]] name = "shellexpand" -version = "3.1.1" +version = "3.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b1fdf65dd6331831494dd616b30351c38e96e45921a27745cf98490458b90bb" +checksum = "32824fab5e16e6c4d86dc1ba84489390419a39f97699852b66480bb87d297ed8" dependencies = [ "dirs", ] @@ -13947,9 +13926,9 @@ dependencies = [ [[package]] name = "simple_asn1" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" +checksum = "0d585997b0ac10be3c5ee635f1bab02d512760d14b7c468801ac8a01d9ae5f1d" dependencies = [ "num-bigint", "num-traits", @@ -14120,7 +14099,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -14142,9 +14121,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.114" +version = "2.0.117" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" +checksum = "e665b8803e7b1d2a727f4023456bbbbe74da67099c585258af0ad9c5013b9b99" dependencies = [ "proc-macro2", "quote", @@ -14160,7 +14139,7 @@ dependencies = [ "paste", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -14180,14 +14159,14 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "sysinfo" -version = "0.38.1" +version = "0.38.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5792d209c2eac902426c0c4a166c9f72147db453af548cf9bf3242644c4d4fe3" +checksum = "1efc19935b4b66baa6f654ac7924c192f55b175c00a7ab72410fc24284dacda8" dependencies = [ "libc", "memchr", @@ -14203,7 +14182,7 @@ version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c879d448e9d986b661742763247d3693ed13609438cf3d006f51f5368a5ba6b" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -14214,7 +14193,7 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a13f3d0daba03132c0aa9767f98351b3488edc2c100cda2d2ec2b04f3d8d3c8b" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "core-foundation 0.9.4", "system-configuration-sys", ] @@ -14250,7 +14229,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -14297,7 +14276,7 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "715f9a4586706a61c571cb5ee1c3ac2bbb2cf63e15bce772307b95befef5f5ee" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "log", "num-traits", ] @@ -14339,7 +14318,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -14350,7 +14329,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "test-case-core", ] @@ -14395,7 +14374,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -14406,7 +14385,7 @@ checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -14553,7 +14532,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -14671,9 +14650,9 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.7+spec-1.1.0" +version = "1.0.9+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "247eaa3197818b831697600aadf81514e577e0cba5eab10f7e064e78ae154df1" +checksum = "702d4415e08923e7e1ef96cd5727c0dfed80b4d2fa25db9647fe5eb6f7c5a4c4" dependencies = [ "winnow", ] @@ -14716,9 +14695,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a286e33f82f8a1ee2df63f4fa35c0becf4a85a0cb03091a15fd7bf0b402dc94a" +checksum = "fec7c61a0695dc1887c1b53952990f3ad2e3a31453e1f49f10e75424943a93ec" dependencies = [ "async-trait", "base64 0.22.1", @@ -14742,13 +14721,13 @@ dependencies = [ [[package]] name = "tonic-prost" -version = "0.14.3" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c55a2d6a14174563de34409c9f92ff981d006f56da9c6ecd40d9d4a31500b0" +checksum = "a55376a0bbaa4975a3f10d009ad763d8f4108f067c7c2e74f3001fb49778d309" dependencies = [ "bytes", "prost 0.14.3", - "tonic 0.14.3", + "tonic 0.14.5", ] [[package]] @@ -14799,7 +14778,7 @@ checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8" dependencies = [ "async-compression", "base64 0.22.1", - "bitflags 2.10.0", + "bitflags 2.11.0", "bytes", "futures-core", "futures-util", @@ -14866,7 +14845,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -15068,7 +15047,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -15180,9 +15159,9 @@ checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" -version = "1.0.23" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" +checksum = "e6e4313cd5fcd3dad5cafa179702e2b244f760991f45397d14d4ebf38247da75" [[package]] name = "unicode-segmentation" @@ -15286,11 +15265,11 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.20.0" +version = "1.21.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee48d38b119b0cd71fe4141b30f5ba9c7c5d9f4e7a3a8b4a674e4b6ef789976f" +checksum = "b672338555252d43fd2240c714dc444b8c6fb0a5c5335e65a07bba7742735ddb" dependencies = [ - "getrandom 0.3.4", + "getrandom 0.4.1", "js-sys", "wasm-bindgen", ] @@ -15368,7 +15347,7 @@ checksum = "d674d135b4a8c1d7e813e2f8d1c9a58308aee4a680323066025e53132218bd91" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -15425,9 +15404,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.108" +version = "0.2.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64024a30ec1e37399cf85a7ffefebdb72205ca1c972291c51512360d90bd8566" +checksum = "ec1adf1535672f5b7824f817792b1afd731d7e843d2d04ec8f27e8cb51edd8ac" dependencies = [ "cfg-if", "once_cell", @@ -15438,9 +15417,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.58" +version = "0.4.61" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70a6e77fd0ae8029c9ea0063f87c46fde723e7d887703d74ad2616d792e51e6f" +checksum = "fe88540d1c934c4ec8e6db0afa536876c5441289d7f9f9123d4f065ac1250a6b" dependencies = [ "cfg-if", "futures-util", @@ -15452,9 +15431,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.108" +version = "0.2.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "008b239d9c740232e71bd39e8ef6429d27097518b6b30bdf9086833bd5b6d608" +checksum = "19e638317c08b21663aed4d2b9a2091450548954695ff4efa75bff5fa546b3b1" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -15462,22 +15441,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.108" +version = "0.2.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5256bae2d58f54820e6490f9839c49780dff84c65aeab9e772f15d5f0e913a55" +checksum = "2c64760850114d03d5f65457e96fc988f11f01d38fbaa51b254e4ab5809102af" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.108" +version = "0.2.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f01b580c9ac74c8d8f0c0e4afb04eeef2acf145458e52c03845ee9cd23e3d12" +checksum = "60eecd4fe26177cfa3339eb00b4a36445889ba3ad37080c2429879718e20ca41" dependencies = [ "unicode-ident", ] @@ -15523,7 +15502,7 @@ version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ - "bitflags 2.10.0", + "bitflags 2.11.0", "hashbrown 0.15.5", "indexmap 2.13.0", "semver 1.0.27", @@ -15545,9 +15524,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.85" +version = "0.3.88" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "312e32e551d92129218ea9a2452120f4aabc03529ef03e4d0d82fb2780608598" +checksum = "9d6bb20ed2d9572df8584f6dc81d68a41a625cadc6f15999d649a70ce7e3597a" dependencies = [ "js-sys", "wasm-bindgen", @@ -15569,14 +15548,14 @@ version = "0.26.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "75c7f0ef91146ebfb530314f5f1d24528d7f0767efbfd31dce919275413e393e" dependencies = [ - "webpki-root-certs 1.0.5", + "webpki-root-certs 1.0.6", ] [[package]] name = "webpki-root-certs" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36a29fc0408b113f68cf32637857ab740edfafdf460c326cd2afaa2d84cc05dc" +checksum = "804f18a4ac2676ffb4e8b5b5fa9ae38af06df08162314f96a68d2a363e21a8ca" dependencies = [ "rustls-pki-types", ] @@ -15709,7 +15688,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -15720,7 +15699,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -16132,7 +16111,7 @@ dependencies = [ "heck", "indexmap 2.13.0", "prettyplease", - "syn 2.0.114", + "syn 2.0.117", "wasm-metadata", "wit-bindgen-core", "wit-component", @@ -16148,7 +16127,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "wit-bindgen-core", "wit-bindgen-rust", ] @@ -16160,7 +16139,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" dependencies = [ "anyhow", - "bitflags 2.10.0", + "bitflags 2.11.0", "indexmap 2.13.0", "log", "serde", @@ -16355,7 +16334,7 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -16376,7 +16355,7 @@ checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -16396,7 +16375,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", "synstructure", ] @@ -16417,7 +16396,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] @@ -16451,14 +16430,14 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.114", + "syn 2.0.117", ] [[package]] name = "zmij" -version = "1.0.20" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4de98dfa5d5b7fef4ee834d0073d560c9ca7b6c46a71d058c48db7960f8cfaf7" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" [[package]] name = "zstd" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index a2e0e80b9d75b..58623202e7f1b 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -1,7 +1,7 @@ [workspace.package] edition = "2024" license = "MIT OR Apache-2.0" -rust-version = "1.88" +rust-version = "1.92" authors = ["Op Stack Contributors"] homepage = "https://github.com/ethereum-optimism/optimism" repository = "https://github.com/ethereum-optimism/optimism" @@ -307,78 +307,78 @@ alloy-op-evm = { version = "0.26.3", path = "alloy-op-evm/", default-features = alloy-op-hardforks = { version = "0.4.7", path = "alloy-op-hardforks/", default-features = false } # ==================== RETH CRATES (from git rev 564ffa58 / main) ==================== -reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-basic-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-chain-state = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-cli-commands = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-cli-runner = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-cli-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-consensus-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-downloaders = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-e2e-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-engine-local = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-eth-wire = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-ethereum-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-ethereum-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-ethereum-forks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-ethereum-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-execution-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-execution-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-fs-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-network = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-node-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-node-core = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-node-events = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-node-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-payload-builder-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-payload-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-payload-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-payload-validator = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-primitives-traits = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-provider = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-prune = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-revm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-rpc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-rpc-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-rpc-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-rpc-engine-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-rpc-eth-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-rpc-server-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-stages = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-static-file = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-static-file-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-storage-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-storage-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-tasks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-tracing = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-trie-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } -reth-trie-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0" } -reth-zstd-compressors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.0", default-features = false } +reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-basic-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-chain-state = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-cli-commands = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-cli-runner = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-cli-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-consensus-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-downloaders = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-e2e-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-engine-local = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-eth-wire = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-ethereum-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-ethereum-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-ethereum-forks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-ethereum-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-execution-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-execution-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-fs-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-network = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-node-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-node-core = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-node-events = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-node-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-payload-builder-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-payload-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-payload-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-payload-validator = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-primitives-traits = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-provider = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-prune = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-revm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-rpc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-rpc-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-rpc-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-rpc-engine-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-rpc-eth-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-rpc-server-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-stages = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-static-file = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-static-file-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-storage-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-storage-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-tasks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-tracing = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-trie-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth-trie-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } +reth-zstd-compressors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } # ==================== REVM (latest: op-reth versions) ==================== revm = { version = "34.0.0", default-features = false } diff --git a/rust/alloy-op-evm/Cargo.toml b/rust/alloy-op-evm/Cargo.toml index 593a6ff2a569e..5b3ab782027af 100644 --- a/rust/alloy-op-evm/Cargo.toml +++ b/rust/alloy-op-evm/Cargo.toml @@ -4,7 +4,7 @@ description = "OP EVM implementation" version = "0.26.3" edition = "2021" -rust-version = "1.88" +rust-version = "1.92" authors = ["Alloy Contributors", "OpLabsPBC"] license.workspace = true homepage = "https://github.com/ethereum-optimism/optimism" diff --git a/rust/alloy-op-hardforks/Cargo.toml b/rust/alloy-op-hardforks/Cargo.toml index 1181af6068783..338c707e4d5c2 100644 --- a/rust/alloy-op-hardforks/Cargo.toml +++ b/rust/alloy-op-hardforks/Cargo.toml @@ -4,7 +4,7 @@ description = "Bindings for named OP hardforks" version = "0.4.7" edition = "2024" -rust-version = "1.88" +rust-version = "1.92" authors = ["Alloy Contributors", "OpLabsPBC"] license.workspace = true homepage = "https://github.com/ethereum-optimism/optimism" diff --git a/rust/clippy.toml b/rust/clippy.toml index 1e75cb34f32eb..2b69e81286ba7 100644 --- a/rust/clippy.toml +++ b/rust/clippy.toml @@ -1,4 +1,4 @@ -msrv = "1.88" +msrv = "1.92" too-large-for-stack = 128 doc-valid-idents = [ "P2P", diff --git a/rust/kona/CLAUDE.md b/rust/kona/CLAUDE.md index 5e40a1b76a605..7353709db423b 100644 --- a/rust/kona/CLAUDE.md +++ b/rust/kona/CLAUDE.md @@ -12,7 +12,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co - Documentation: `just test-docs` ## Code Style -- MSRV: 1.88 +- MSRV: 1.92 - Format with nightly rustfmt: `cargo +nightly fmt` - Imports: organized by crate, reordered automatically - Error handling: use proper error types, prefer `Result` over panics @@ -73,7 +73,7 @@ Kona is a monorepo for OP Stack types, components, and services built in Rust. T 4. **Monorepo Integration**: Pins and integrates with the Optimism monorepo for action tests ### Key Configuration Files -- `rust-toolchain.toml`: Pins Rust version to 1.88 +- `rust-toolchain.toml`: Pins Rust version to 1.92 - `rustfmt.toml`: Custom formatting configuration with crate-level import grouping - `clippy.toml`: MSRV configuration for clippy - `deny.toml`: Dependency auditing and license compliance diff --git a/rust/kona/README.md b/rust/kona/README.md index ea3bd328e9fab..9da6052d2d24a 100644 --- a/rust/kona/README.md +++ b/rust/kona/README.md @@ -107,7 +107,7 @@ see the [SDK section of the docs](https://rollup.yoga/node/design/intro). ## MSRV -The current MSRV (minimum supported rust version) is `1.88`. +The current MSRV (minimum supported rust version) is `1.92`. The MSRV is not increased automatically, and will be updated only as part of a patch (pre-1.0) or minor (post-1.0) release. diff --git a/rust/kona/bin/client/justfile b/rust/kona/bin/client/justfile index 1d2a2656699c1..3aaf2b3c45be9 100644 --- a/rust/kona/bin/client/justfile +++ b/rust/kona/bin/client/justfile @@ -6,65 +6,6 @@ KONA_CLIENT_ROOT := source_directory() default: @just --list -# Run the client program on asterisc with the host in detached server mode. -run-client-asterisc block_number l1_rpc l1_beacon_rpc l2_rpc rollup_node_rpc verbosity='': - #!/usr/bin/env bash - - L1_NODE_ADDRESS="{{l1_rpc}}" - L1_BEACON_ADDRESS="{{l1_beacon_rpc}}" - L2_NODE_ADDRESS="{{l2_rpc}}" - OP_NODE_ADDRESS="{{rollup_node_rpc}}" - - HOST_BIN_PATH="{{KONA_CLIENT_ROOT}}/../../../target/release/kona-host" - CLIENT_BIN_PATH="{{KONA_CLIENT_ROOT}}/../../../target/riscv64imac-unknown-none-elf/release-client-lto/kona-client" - STATE_PATH="{{KONA_CLIENT_ROOT}}/../../state.bin.gz" - - CLAIMED_L2_BLOCK_NUMBER={{block_number}} - echo "Fetching configuration for block #$CLAIMED_L2_BLOCK_NUMBER..." - - # Get output root for block - CLAIMED_L2_OUTPUT_ROOT=$(cast rpc --rpc-url $OP_NODE_ADDRESS "optimism_outputAtBlock" $(cast 2h $CLAIMED_L2_BLOCK_NUMBER) | jq -r .outputRoot) - - # Get the info for the previous block - AGREED_L2_OUTPUT_ROOT=$(cast rpc --rpc-url $OP_NODE_ADDRESS "optimism_outputAtBlock" $(cast 2h $((CLAIMED_L2_BLOCK_NUMBER - 1))) | jq -r .outputRoot) - AGREED_L2_HEAD_HASH=$(cast block --rpc-url $L2_NODE_ADDRESS $((CLAIMED_L2_BLOCK_NUMBER - 1)) --json | jq -r .hash) - L1_ORIGIN_NUM=$(cast rpc --rpc-url $OP_NODE_ADDRESS "optimism_outputAtBlock" $(cast 2h $((CLAIMED_L2_BLOCK_NUMBER - 1))) | jq -r .blockRef.l1origin.number) - L1_HEAD=$(cast block --rpc-url $L1_NODE_ADDRESS $((L1_ORIGIN_NUM + 30)) --json | jq -r .hash) - L2_CHAIN_ID=$(cast chain-id --rpc-url $L2_NODE_ADDRESS) - - # Move to the kona root - cd {{KONA_CLIENT_ROOT}}/../.. - - echo "Building client program for RISC-V target..." - just build-asterisc-client - - echo "Loading client program into Asterisc state format..." - asterisc load-elf --path=$CLIENT_BIN_PATH - - echo "Building host program for native target..." - cargo build --bin kona-host --release - - echo "Running asterisc" - asterisc run \ - --info-at '%10000000' \ - --proof-at never \ - --input $STATE_PATH \ - -- \ - $HOST_BIN_PATH \ - single \ - --l1-head $L1_HEAD \ - --agreed-l2-head-hash $AGREED_L2_HEAD_HASH \ - --claimed-l2-output-root $CLAIMED_L2_OUTPUT_ROOT \ - --agreed-l2-output-root $AGREED_L2_OUTPUT_ROOT \ - --claimed-l2-block-number $CLAIMED_L2_BLOCK_NUMBER \ - --l2-chain-id $L2_CHAIN_ID \ - --l1-node-address $L1_NODE_ADDRESS \ - --l1-beacon-address $L1_BEACON_ADDRESS \ - --l2-node-address $L2_NODE_ADDRESS \ - --server \ - --data-dir ./data \ - {{verbosity}} - # Run the client program natively with the host program attached. run-client-native block_number l1_rpc l1_beacon_rpc l2_rpc rollup_node_rpc rollup_config_path='' verbosity='': #!/usr/bin/env bash @@ -140,51 +81,6 @@ run-client-native-offline block_number l2_claim l2_output_root l2_head l1_head l --data-dir ./data \ {{verbosity}} -# Run the client program on asterisc with the host program detached, in offline mode. -run-client-asterisc-offline block_number l2_claim l2_output_root l2_head l1_head l2_chain_id verbosity='': - #!/usr/bin/env bash - - HOST_BIN_PATH="{{KONA_CLIENT_ROOT}}/../../../target/debug/kona-host" - CLIENT_BIN_PATH="{{KONA_CLIENT_ROOT}}/../../../target/riscv64imac-unknown-none-elf/release-client-lto/kona-client" - STATE_PATH="{{KONA_CLIENT_ROOT}}/../../state.bin.gz" - - CLAIMED_L2_BLOCK_NUMBER={{block_number}} - CLAIMED_L2_OUTPUT_ROOT={{l2_claim}} - AGREED_L2_OUTPUT_ROOT={{l2_output_root}} - AGREED_L2_HEAD_HASH={{l2_head}} - L1_HEAD={{l1_head}} - L2_CHAIN_ID={{l2_chain_id}} - - # Move to the kona root - cd {{KONA_CLIENT_ROOT}}/../.. - - echo "Building client program for RISC-V target..." - just build-asterisc-client - - echo "Loading client program into Asterisc state format..." - asterisc load-elf --path=$CLIENT_BIN_PATH - - echo "Building host program for native target..." - cargo build --bin kona-host - - echo "Running asterisc" - asterisc run \ - --info-at '%10000000' \ - --proof-at never \ - --input $STATE_PATH \ - -- \ - $HOST_BIN_PATH \ - single \ - --l1-head $L1_HEAD \ - --agreed-l2-head-hash $AGREED_L2_HEAD_HASH \ - --claimed-l2-output-root $CLAIMED_L2_OUTPUT_ROOT \ - --agreed-l2-output-root $AGREED_L2_OUTPUT_ROOT \ - --claimed-l2-block-number $CLAIMED_L2_BLOCK_NUMBER \ - --l2-chain-id $L2_CHAIN_ID \ - --server \ - --data-dir ./data \ - {{verbosity}} - # Run the client program on cannon with the host in detached server mode. run-client-cannon block_number l1_rpc l1_beacon_rpc l2_rpc rollup_node_rpc rollup_config_path='' verbosity='': #!/usr/bin/env bash diff --git a/rust/kona/docker/apps/kona_app_generic.dockerfile b/rust/kona/docker/apps/kona_app_generic.dockerfile index 1bc5b6289f72d..fe744381d2be9 100644 --- a/rust/kona/docker/apps/kona_app_generic.dockerfile +++ b/rust/kona/docker/apps/kona_app_generic.dockerfile @@ -18,7 +18,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ pkg-config # Install rust -ENV RUST_VERSION=1.88 +ENV RUST_VERSION=1.92 RUN curl https://sh.rustup.rs -sSf | bash -s -- -y --default-toolchain ${RUST_VERSION} --profile minimal ENV PATH="/root/.cargo/bin:${PATH}" diff --git a/rust/kona/docker/cannon/cannon.dockerfile b/rust/kona/docker/cannon/cannon.dockerfile index 1ee23f2117763..14e6135d45b55 100644 --- a/rust/kona/docker/cannon/cannon.dockerfile +++ b/rust/kona/docker/cannon/cannon.dockerfile @@ -3,8 +3,7 @@ FROM ubuntu:22.04 ENV SHELL=/bin/bash ENV DEBIAN_FRONTEND=noninteractive -# todo: pin `nightly` version -ENV RUST_VERSION=nightly +ENV RUST_VERSION=nightly-2026-02-20 RUN apt-get update && apt-get install --assume-yes --no-install-recommends \ ca-certificates \ diff --git a/rust/kona/docker/fpvm-prestates/cannon-repro.dockerfile b/rust/kona/docker/fpvm-prestates/cannon-repro.dockerfile index 5f4d90596e2d6..0c69197ee30d4 100644 --- a/rust/kona/docker/fpvm-prestates/cannon-repro.dockerfile +++ b/rust/kona/docker/fpvm-prestates/cannon-repro.dockerfile @@ -33,7 +33,7 @@ RUN cd /optimism/cannon && \ # Build kona-client from local source # ################################################################ -FROM ghcr.io/op-rs/kona/cannon-builder:0.3.0 AS client-build +FROM us-docker.pkg.dev/oplabs-tools-artifacts/images/cannon-builder:v1.0.0 AS client-build SHELL ["/bin/bash", "-c"] ARG CLIENT_BIN @@ -49,7 +49,7 @@ ENV KONA_CUSTOM_CONFIGS_DIR=/usr/local/kona-custom-configs # Build kona-client RUN cd kona && \ - cargo build -Zbuild-std=core,alloc -p kona-client --bin $CLIENT_BIN --locked --profile release-client-lto && \ + cargo build -Zbuild-std=core,alloc -Zjson-target-spec -p kona-client --bin $CLIENT_BIN --locked --profile release-client-lto && \ mv ./target/mips64-unknown-none/release-client-lto/$CLIENT_BIN /kona-client-elf ################################################################ diff --git a/rust/kona/justfile b/rust/kona/justfile index b9110a3f7cfb6..eee5570d9c24c 100644 --- a/rust/kona/justfile +++ b/rust/kona/justfile @@ -71,7 +71,7 @@ benches: cargo bench --no-run --workspace --features test-utils --exclude example-gossip --exclude example-discovery # Lint the workspace for all available targets -lint-all: lint-native lint-cannon lint-asterisc lint-docs lint-typos +lint-all: lint-native lint-cannon lint-docs lint-typos # Check spelling with typos (`cargo install typos-cli`) lint-typos: @@ -97,19 +97,10 @@ lint-native: fmt-native-check lint-docs lint-cannon: docker run \ --rm \ - -e RUSTUP_TOOLCHAIN=nightly \ -v {{KONA_ROOT}}/../:/workdir \ -w="/workdir" \ - ghcr.io/op-rs/kona/cannon-builder:0.3.0 cargo clippy -p kona-std-fpvm --all-features -Zbuild-std=core,alloc -- -D warnings + us-docker.pkg.dev/oplabs-tools-artifacts/images/cannon-builder:v1.0.0 cargo clippy -p kona-std-fpvm --all-features -Zbuild-std=core,alloc -Zjson-target-spec -- -D warnings -# Lint the workspace (risc-v arch). Currently, only the `kona-std-fpvm` crate is linted for the `asterisc` target, as it is the only crate with architecture-specific code. -lint-asterisc: - docker run \ - --rm \ - -e RUSTUP_TOOLCHAIN=nightly \ - -v {{KONA_ROOT}}/../:/workdir \ - -w="/workdir" \ - ghcr.io/op-rs/kona/asterisc-builder:0.3.0 cargo clippy -p kona-std-fpvm --all-features -Zbuild-std=core,alloc -- -D warnings # Lint the Rust documentation lint-docs: @@ -130,15 +121,8 @@ build-cannon-client: --rm \ -v {{KONA_ROOT}}/../:/workdir \ -w="/workdir" \ - ghcr.io/op-rs/kona/cannon-builder:0.3.0 cargo build -Zbuild-std=core,alloc -p kona-client --bin kona-client --profile release-client-lto + us-docker.pkg.dev/oplabs-tools-artifacts/images/cannon-builder:v1.0.0 cargo build -Zbuild-std=core,alloc -Zjson-target-spec -p kona-client --bin kona-client --profile release-client-lto -# Build `kona-client` for the `asterisc` target. -build-asterisc-client: - docker run \ - --rm \ - -v {{KONA_ROOT}}/../:/workdir \ - -w="/workdir" \ - ghcr.io/op-rs/kona/asterisc-builder:0.3.0 cargo build -Zbuild-std=core,alloc -p kona-client --bin kona-client --profile release-client-lto # Check for unused dependencies in the crate graph. check-udeps: diff --git a/rust/op-reth/DockerfileOp b/rust/op-reth/DockerfileOp index d09aa6705245c..999d94e4406fb 100644 --- a/rust/op-reth/DockerfileOp +++ b/rust/op-reth/DockerfileOp @@ -1,4 +1,4 @@ -FROM lukemathwalker/cargo-chef:latest-rust-1 AS chef +FROM lukemathwalker/cargo-chef:latest-rust-1.92 AS chef WORKDIR /app LABEL org.opencontainers.image.source=https://github.com/paradigmxyz/reth diff --git a/rust/op-reth/README.md b/rust/op-reth/README.md index 3539a7e2a0616..a6d82c189e749 100644 --- a/rust/op-reth/README.md +++ b/rust/op-reth/README.md @@ -79,7 +79,7 @@ When updating this, also update: - .github/workflows/lint.yml --> -The Minimum Supported Rust Version (MSRV) of this project is [1.88.0](https://blog.rust-lang.org/2025/06/26/Rust-1.88.0/). +The Minimum Supported Rust Version (MSRV) of this project is [1.92.0](https://blog.rust-lang.org/2025/08/07/Rust-1.92.0/). See the docs for detailed instructions on how to [build from source](https://reth.rs/installation/source/). diff --git a/rust/op-reth/bin/Cargo.toml b/rust/op-reth/bin/Cargo.toml index e3cb2e67f8b1a..37ca6c20df29b 100644 --- a/rust/op-reth/bin/Cargo.toml +++ b/rust/op-reth/bin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "op-reth" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/chainspec/Cargo.toml b/rust/op-reth/crates/chainspec/Cargo.toml index 390bd9b2a9da5..e218e5cb9d3a6 100644 --- a/rust/op-reth/crates/chainspec/Cargo.toml +++ b/rust/op-reth/crates/chainspec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-chainspec" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/cli/Cargo.toml b/rust/op-reth/crates/cli/Cargo.toml index 8a62b69f4e882..729974f192b91 100644 --- a/rust/op-reth/crates/cli/Cargo.toml +++ b/rust/op-reth/crates/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-cli" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/consensus/Cargo.toml b/rust/op-reth/crates/consensus/Cargo.toml index 428116516392b..9d9a8bbf392c7 100644 --- a/rust/op-reth/crates/consensus/Cargo.toml +++ b/rust/op-reth/crates/consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-consensus" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/evm/Cargo.toml b/rust/op-reth/crates/evm/Cargo.toml index cb8f589c1ef20..fa69e1188d858 100644 --- a/rust/op-reth/crates/evm/Cargo.toml +++ b/rust/op-reth/crates/evm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-evm" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/exex/Cargo.toml b/rust/op-reth/crates/exex/Cargo.toml index 67216d39de61e..9ad570d2800a8 100644 --- a/rust/op-reth/crates/exex/Cargo.toml +++ b/rust/op-reth/crates/exex/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-exex" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/flashblocks/Cargo.toml b/rust/op-reth/crates/flashblocks/Cargo.toml index 58be6ea0349dd..ddd793c309d97 100644 --- a/rust/op-reth/crates/flashblocks/Cargo.toml +++ b/rust/op-reth/crates/flashblocks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-flashblocks" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/hardforks/Cargo.toml b/rust/op-reth/crates/hardforks/Cargo.toml index a6fe343a4dbc1..70a95fafb334e 100644 --- a/rust/op-reth/crates/hardforks/Cargo.toml +++ b/rust/op-reth/crates/hardforks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-forks" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/node/Cargo.toml b/rust/op-reth/crates/node/Cargo.toml index ca8684a9969eb..f9c43ea759c17 100644 --- a/rust/op-reth/crates/node/Cargo.toml +++ b/rust/op-reth/crates/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-node" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/node/tests/e2e-testsuite/p2p.rs b/rust/op-reth/crates/node/tests/e2e-testsuite/p2p.rs index d66aa3e753384..ff39eddb59630 100644 --- a/rust/op-reth/crates/node/tests/e2e-testsuite/p2p.rs +++ b/rust/op-reth/crates/node/tests/e2e-testsuite/p2p.rs @@ -3,6 +3,9 @@ use reth_optimism_node::utils::{advance_chain, setup}; use std::sync::Arc; use tokio::sync::Mutex; +// TODO(#19298): re-enable once reth releases a version including +// https://github.com/paradigmxyz/reth/pull/22505 +#[ignore] #[tokio::test] async fn can_sync() -> eyre::Result<()> { reth_tracing::init_test_tracing(); diff --git a/rust/op-reth/crates/payload/Cargo.toml b/rust/op-reth/crates/payload/Cargo.toml index 38014ce21684b..775375d2b472b 100644 --- a/rust/op-reth/crates/payload/Cargo.toml +++ b/rust/op-reth/crates/payload/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-payload-builder" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/primitives/Cargo.toml b/rust/op-reth/crates/primitives/Cargo.toml index 99e7841e0789a..2fd6b889b57f6 100644 --- a/rust/op-reth/crates/primitives/Cargo.toml +++ b/rust/op-reth/crates/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-primitives" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/reth/Cargo.toml b/rust/op-reth/crates/reth/Cargo.toml index 495a9e6ff44ab..0636f01a7af2d 100644 --- a/rust/op-reth/crates/reth/Cargo.toml +++ b/rust/op-reth/crates/reth/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-op" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/rpc/Cargo.toml b/rust/op-reth/crates/rpc/Cargo.toml index d807407f2e6f1..5777562808ef6 100644 --- a/rust/op-reth/crates/rpc/Cargo.toml +++ b/rust/op-reth/crates/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-rpc" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/storage/Cargo.toml b/rust/op-reth/crates/storage/Cargo.toml index 3f0a834e95d93..d11679f35256b 100644 --- a/rust/op-reth/crates/storage/Cargo.toml +++ b/rust/op-reth/crates/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-storage" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/trie/Cargo.toml b/rust/op-reth/crates/trie/Cargo.toml index e4be1c916674f..2120d9ddc4dc8 100644 --- a/rust/op-reth/crates/trie/Cargo.toml +++ b/rust/op-reth/crates/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-trie" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/txpool/Cargo.toml b/rust/op-reth/crates/txpool/Cargo.toml index f31b76af07f22..0bd7474cbe32e 100644 --- a/rust/op-reth/crates/txpool/Cargo.toml +++ b/rust/op-reth/crates/txpool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-txpool" -version = "1.11.0" +version = "1.11.1" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/rust-toolchain.toml b/rust/rust-toolchain.toml index 2340e4187e7b3..2e7fc42a6e270 100644 --- a/rust/rust-toolchain.toml +++ b/rust/rust-toolchain.toml @@ -1,3 +1,3 @@ [toolchain] # /ops/docker/op-stack-go/Dockerfile must match this version. -channel = "1.88" +channel = "1.92" From 5416d68cf5625e73699a04b0bbfe8868ae2c09c7 Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Fri, 6 Mar 2026 11:15:03 -0500 Subject: [PATCH 068/201] refactor(op-devstack): remove backward compatibility type aliases to align on single ComponentID type (#18877) * stack: remove backward compatibility type aliases for Phase 6 cleanup Complete the ID type system refactor by removing all backward compatibility code. All code now uses ComponentID directly with typed constructor functions. Changes: - Remove 19 type aliases from component_id.go (L1ELNodeID, L2BatcherID, etc.) - Remove Kind = ComponentKind alias - Update KindProvider interface to use ComponentKind return type - Update context.go functions to use ComponentKind instead of Kind - Fix test files using incorrect constructor patterns The type system is now fully unified: - Single ComponentID type with kind, shape, key, and chainID fields - Typed constructors (NewL2BatcherID, NewL2ELNodeID, etc.) return ComponentID - ComponentKind enum for type discrimination - Simplified Matcher[E] interface with single type parameter * fix(op-devstack): drop misplaced kona tests and finalize componentid cleanup * fix(op-devstack): restore Go build after ComponentID refactor --- .../conductor/leadership_transfer_test.go | 2 +- .../tests/batcher/init_test.go | 2 +- .../tests/batcher/throttling/init_test.go | 2 +- .../reqressyncdisabled/clsync/init_test.go | 2 +- .../divergence/divergence_test.go | 2 +- .../reqressyncdisabled/elsync/init_test.go | 2 +- .../depreqres/reqressyncdisabled/init_test.go | 2 +- .../syncmodereqressync/clsync/init_test.go | 2 +- .../syncmodereqressync/elsync/init_test.go | 2 +- op-acceptance-tests/tests/fusaka/init_test.go | 2 +- .../tests/interop/seqwindow/init_test.go | 2 +- .../sync/multisupervisor_interop/init_test.go | 2 +- .../tests/sync/clsync/gap_clp2p/init_test.go | 2 +- .../tests/sync/elsync/gap_clp2p/init_test.go | 2 +- .../tests/sync/elsync/gap_elp2p/init_test.go | 2 +- .../tests/sync/manual/init_test.go | 2 +- .../sync_tester_elsync_multi/init_test.go | 2 +- .../sync_tester/sync_tester_hfs/init_test.go | 2 +- op-devstack/dsl/l2_cl.go | 2 +- op-devstack/dsl/l2_el.go | 2 +- op-devstack/example/init_test.go | 6 +- op-devstack/presets/cl_config.go | 12 +- .../presets/minimal_with_conductors.go | 4 +- op-devstack/presets/op_rbuilder_rules.go | 2 +- op-devstack/presets/proof.go | 2 +- op-devstack/presets/singlechain_multinode.go | 8 +- .../presets/singlechain_twoverifiers.go | 8 +- op-devstack/presets/sync_tester_config.go | 4 +- op-devstack/presets/twol2_follow_l2.go | 8 +- op-devstack/shim/cluster.go | 6 +- op-devstack/shim/conductor.go | 6 +- op-devstack/shim/faucet.go | 6 +- op-devstack/shim/fb_ws_client.go | 6 +- op-devstack/shim/l1_cl.go | 6 +- op-devstack/shim/l1_el.go | 6 +- op-devstack/shim/l1_network.go | 66 ++-- op-devstack/shim/l2_batcher.go | 6 +- op-devstack/shim/l2_challenger.go | 6 +- op-devstack/shim/l2_cl.go | 18 +- op-devstack/shim/l2_el.go | 6 +- op-devstack/shim/l2_network.go | 230 ++++++++---- op-devstack/shim/l2_proposer.go | 6 +- op-devstack/shim/matcher.go | 31 +- op-devstack/shim/network.go | 62 +++- op-devstack/shim/op_rbuilder.go | 6 +- op-devstack/shim/rollup_boost.go | 6 +- op-devstack/shim/superchain.go | 6 +- op-devstack/shim/supervisor.go | 6 +- op-devstack/shim/sync_tester.go | 6 +- op-devstack/shim/system.go | 219 +++++++---- op-devstack/shim/test_sequencer.go | 6 +- op-devstack/stack/capabilities.go | 134 ------- op-devstack/stack/capabilities_test.go | 312 ---------------- op-devstack/stack/cluster.go | 45 +-- op-devstack/stack/component_id.go | 351 +++++------------- op-devstack/stack/component_id_test.go | 168 ++++----- op-devstack/stack/component_registry.go | 219 +++++++++++ op-devstack/stack/conductor.go | 32 +- op-devstack/stack/context.go | 14 +- op-devstack/stack/context_test.go | 41 +- op-devstack/stack/faucet.go | 65 +--- op-devstack/stack/fb_ws_client.go | 50 +-- op-devstack/stack/id.go | 217 ----------- op-devstack/stack/l1_cl.go | 65 +--- op-devstack/stack/l1_el.go | 68 +--- op-devstack/stack/l1_network.go | 61 +-- op-devstack/stack/l2_batcher.go | 65 +--- op-devstack/stack/l2_challenger.go | 65 +--- op-devstack/stack/l2_cl.go | 64 +--- op-devstack/stack/l2_el.go | 65 +--- op-devstack/stack/l2_network.go | 63 +--- op-devstack/stack/l2_proposer.go | 68 +--- op-devstack/stack/match/archive.go | 4 +- op-devstack/stack/match/core.go | 22 +- op-devstack/stack/match/engine.go | 24 +- op-devstack/stack/match/first.go | 36 +- op-devstack/stack/match/gate.go | 12 +- op-devstack/stack/match/gate_test.go | 6 +- op-devstack/stack/match/interop.go | 8 +- op-devstack/stack/match/labels.go | 10 +- op-devstack/stack/match/second.go | 6 +- op-devstack/stack/match/sequencer.go | 4 +- op-devstack/stack/match/util.go | 82 ++-- op-devstack/stack/match/util_test.go | 40 +- op-devstack/stack/matcher.go | 54 ++- op-devstack/stack/network.go | 4 +- op-devstack/stack/op_rbuilder.go | 67 +--- op-devstack/stack/orchestrator.go | 12 +- op-devstack/stack/registry.go | 12 +- op-devstack/stack/registry_test.go | 5 +- op-devstack/stack/rollup_boost.go | 65 +--- op-devstack/stack/superchain.go | 45 +-- op-devstack/stack/supernode.go | 57 +-- op-devstack/stack/supervisor.go | 45 +-- op-devstack/stack/sync_tester.go | 65 +--- op-devstack/stack/system.go | 10 +- op-devstack/stack/test_sequencer.go | 44 +-- op-devstack/sysext/control_plane.go | 14 +- op-devstack/sysext/l1.go | 2 +- op-devstack/sysext/l2.go | 12 +- op-devstack/sysext/system.go | 17 +- op-devstack/sysgo/add_game_type.go | 19 +- op-devstack/sysgo/cluster.go | 2 +- op-devstack/sysgo/control_plane.go | 43 +-- op-devstack/sysgo/control_plane_test.go | 6 +- op-devstack/sysgo/deployer.go | 20 +- op-devstack/sysgo/faucet.go | 36 +- op-devstack/sysgo/l1_network.go | 2 +- op-devstack/sysgo/l1_nodes.go | 37 +- op-devstack/sysgo/l1_nodes_subprocess.go | 13 +- op-devstack/sysgo/l2_batcher.go | 27 +- op-devstack/sysgo/l2_challenger.go | 54 ++- op-devstack/sysgo/l2_cl.go | 20 +- op-devstack/sysgo/l2_cl_kona.go | 31 +- op-devstack/sysgo/l2_cl_opnode.go | 33 +- op-devstack/sysgo/l2_cl_p2p_util.go | 8 +- op-devstack/sysgo/l2_cl_supernode.go | 59 +-- op-devstack/sysgo/l2_el.go | 24 +- op-devstack/sysgo/l2_el_opgeth.go | 14 +- op-devstack/sysgo/l2_el_opreth.go | 14 +- op-devstack/sysgo/l2_el_p2p_util.go | 2 +- op-devstack/sysgo/l2_el_synctester.go | 19 +- op-devstack/sysgo/l2_metrics_dashboard.go | 2 +- op-devstack/sysgo/l2_network.go | 4 +- .../sysgo/l2_network_superchain_registry.go | 10 +- op-devstack/sysgo/l2_proposer.go | 38 +- op-devstack/sysgo/op_rbuilder.go | 27 +- op-devstack/sysgo/orchestrator.go | 26 +- op-devstack/sysgo/orchestrator_getters.go | 48 +++ op-devstack/sysgo/rollup_boost.go | 35 +- op-devstack/sysgo/superchain.go | 2 +- op-devstack/sysgo/superroot.go | 17 +- op-devstack/sysgo/supervisor.go | 10 +- op-devstack/sysgo/supervisor_kona.go | 16 +- op-devstack/sysgo/supervisor_op.go | 12 +- op-devstack/sysgo/sync_tester.go | 6 +- op-devstack/sysgo/system.go | 206 +++++----- .../sysgo/system_singlechain_multinode.go | 8 +- .../sysgo/system_singlechain_twoverifiers.go | 11 +- op-devstack/sysgo/system_synctester.go | 12 +- op-devstack/sysgo/system_synctester_ext.go | 24 +- op-devstack/sysgo/system_test.go | 6 +- op-devstack/sysgo/system_two_l2_follow_l2.go | 8 +- op-devstack/sysgo/test_sequencer.go | 30 +- op-up/main.go | 2 +- rust/kona/tests/node/common/conductor_test.go | 2 +- rust/kona/tests/node/utils/mixed_preset.go | 136 ++++--- .../node/utils/mixed_preset_with_conductor.go | 4 +- .../tests/node/utils/test_sequencer_preset.go | 4 +- .../supervisor/presets/interop_minimal.go | 2 +- .../crates/tests/proofs/utils/preset.go | 44 +-- 151 files changed, 1798 insertions(+), 3269 deletions(-) delete mode 100644 op-devstack/stack/capabilities.go delete mode 100644 op-devstack/stack/capabilities_test.go delete mode 100644 op-devstack/stack/id.go create mode 100644 op-devstack/sysgo/orchestrator_getters.go diff --git a/op-acceptance-tests/tests/base/conductor/leadership_transfer_test.go b/op-acceptance-tests/tests/base/conductor/leadership_transfer_test.go index 7e61b8931e7fa..985d9760c2864 100644 --- a/op-acceptance-tests/tests/base/conductor/leadership_transfer_test.go +++ b/op-acceptance-tests/tests/base/conductor/leadership_transfer_test.go @@ -50,7 +50,7 @@ func TestConductorLeadershipTransfer(gt *testing.T) { idToConductor := make(map[string]conductorWithInfo) for _, conductor := range conductors { - conductorId := strings.TrimPrefix(conductor.String(), stack.ConductorKind.String()+"-") + conductorId := strings.TrimPrefix(conductor.String(), stack.KindConductor.String()+"-") idToConductor[conductorId] = conductorWithInfo{conductor, consensus.ServerInfo{}} } for _, memberInfo := range membership.Servers { diff --git a/op-acceptance-tests/tests/batcher/init_test.go b/op-acceptance-tests/tests/batcher/init_test.go index 1a86b3b95892d..a7f4e665550d5 100644 --- a/op-acceptance-tests/tests/batcher/init_test.go +++ b/op-acceptance-tests/tests/batcher/init_test.go @@ -17,7 +17,7 @@ func TestMain(m *testing.M) { presets.WithCompatibleTypes(compat.SysGo), presets.WithNoDiscovery(), presets.WithTimeTravel(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { cfg.Stopped = true // set the blob max size to 40_000 bytes for test purposes diff --git a/op-acceptance-tests/tests/batcher/throttling/init_test.go b/op-acceptance-tests/tests/batcher/throttling/init_test.go index 7bc65344ef9be..f27364fe952d8 100644 --- a/op-acceptance-tests/tests/batcher/throttling/init_test.go +++ b/op-acceptance-tests/tests/batcher/throttling/init_test.go @@ -18,7 +18,7 @@ func TestMain(m *testing.M) { presets.DoMain(m, presets.WithMinimal(), presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { // Enable throttling with step controller for predictable behavior cfg.ThrottleConfig.LowerThreshold = 99 // > 0 enables the throttling loop. cfg.ThrottleConfig.UpperThreshold = 100 diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/init_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/init_test.go index 6163f1cc02e09..1463cb3bbeabb 100644 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/init_test.go +++ b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/init_test.go @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { presets.WithCompatibleTypes(compat.SysGo), presets.WithReqRespSyncDisabled(), presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { cfg.Stopped = true })), ) diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/divergence/divergence_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/divergence/divergence_test.go index ba866b76383bb..48c4b5d2f99c9 100644 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/divergence/divergence_test.go +++ b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/divergence/divergence_test.go @@ -21,7 +21,7 @@ func TestMain(m *testing.M) { presets.WithExecutionLayerSyncOnVerifiers(), presets.WithReqRespSyncDisabled(), presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { cfg.Stopped = true })), ) diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/init_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/init_test.go index 867ffcec2c547..47899a6ab8380 100644 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/init_test.go +++ b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/init_test.go @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { presets.WithCompatibleTypes(compat.SysGo), presets.WithReqRespSyncDisabled(), presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { cfg.Stopped = true })), ) diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/init_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/init_test.go index 10fe7cec71886..41bc97589e3aa 100644 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/init_test.go +++ b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/init_test.go @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { presets.WithCompatibleTypes(compat.SysGo), presets.WithReqRespSyncDisabled(), presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { cfg.Stopped = true })), ) diff --git a/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/init_test.go b/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/init_test.go index 51dd93bfa3b92..b7a96209aa3cb 100644 --- a/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/init_test.go +++ b/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/init_test.go @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { presets.WithCompatibleTypes(compat.SysGo), presets.WithSyncModeReqRespSync(), presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { cfg.Stopped = true })), ) diff --git a/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/init_test.go b/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/init_test.go index 311ed9f8a337d..b35036a13c4be 100644 --- a/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/init_test.go +++ b/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/init_test.go @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { presets.WithCompatibleTypes(compat.SysGo), presets.WithSyncModeReqRespSync(), presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { cfg.Stopped = true })), ) diff --git a/op-acceptance-tests/tests/fusaka/init_test.go b/op-acceptance-tests/tests/fusaka/init_test.go index 509d28a58866f..61b00b94cb07e 100644 --- a/op-acceptance-tests/tests/fusaka/init_test.go +++ b/op-acceptance-tests/tests/fusaka/init_test.go @@ -24,7 +24,7 @@ func TestMain(m *testing.M) { sysgo.WithForkAtL1Offset(forks.Osaka, 0), sysgo.WithForkAtL1Offset(forks.BPO1, 1), ), - sysgo.WithBatcherOption(func(_ stack.L2BatcherID, cfg *batcher.CLIConfig) { + sysgo.WithBatcherOption(func(_ stack.ComponentID, cfg *batcher.CLIConfig) { cfg.DataAvailabilityType = flags.BlobsType cfg.TxMgrConfig.CellProofTime = 0 // Force cell proofs to be used }), diff --git a/op-acceptance-tests/tests/interop/seqwindow/init_test.go b/op-acceptance-tests/tests/interop/seqwindow/init_test.go index 1be339ff3e149..b4110b7123f69 100644 --- a/op-acceptance-tests/tests/interop/seqwindow/init_test.go +++ b/op-acceptance-tests/tests/interop/seqwindow/init_test.go @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { // Short enough that we can run the test, // long enough that the batcher can still submit something before we make things expire. presets.WithSequencingWindow(10, 30), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { // Span-batches during recovery don't appear to align well with the starting-point. // It can be off by ~6 L2 blocks, possibly due to off-by-one // in L1 block sync considerations in batcher stop or start. diff --git a/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/init_test.go b/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/init_test.go index 6f052d88b4d69..5c198171bd9e2 100644 --- a/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/init_test.go +++ b/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/init_test.go @@ -13,7 +13,7 @@ import ( func TestMain(m *testing.M) { presets.DoMain(m, presets.WithMultiSupervisorInterop(), presets.WithLogFilter(logfilter.DefaultMute( - stack.KindSelector(stack.SupervisorKind).And(logfilter.Level(log.LevelInfo)).Show(), + stack.KindSelector(stack.KindSupervisor).And(logfilter.Level(log.LevelInfo)).Show(), logfilter.Level(log.LevelError).Show(), ))) } diff --git a/op-acceptance-tests/tests/sync/clsync/gap_clp2p/init_test.go b/op-acceptance-tests/tests/sync/clsync/gap_clp2p/init_test.go index 5da44bdcd9421..40343a01fa647 100644 --- a/op-acceptance-tests/tests/sync/clsync/gap_clp2p/init_test.go +++ b/op-acceptance-tests/tests/sync/clsync/gap_clp2p/init_test.go @@ -14,7 +14,7 @@ func TestMain(m *testing.M) { // No ELP2P, CLP2P to control the supply of unsafe payload to the CL presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { // For stopping derivation, not to advance safe heads cfg.Stopped = true })), diff --git a/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go index 75dffe931e0b2..83745675957de 100644 --- a/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go +++ b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go @@ -15,7 +15,7 @@ func TestMain(m *testing.M) { presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), presets.WithExecutionLayerSyncOnVerifiers(), presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { // For stopping derivation, not to advance safe heads cfg.Stopped = true })), diff --git a/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go index 280fdd55ad511..43dec39747583 100644 --- a/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go +++ b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go @@ -14,7 +14,7 @@ func TestMain(m *testing.M) { // No ELP2P, CLP2P to control the supply of unsafe payload to the CL presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { // For stopping derivation, not to advance safe heads cfg.Stopped = true })), diff --git a/op-acceptance-tests/tests/sync/manual/init_test.go b/op-acceptance-tests/tests/sync/manual/init_test.go index ac448c396b413..a3c8bdff18d33 100644 --- a/op-acceptance-tests/tests/sync/manual/init_test.go +++ b/op-acceptance-tests/tests/sync/manual/init_test.go @@ -14,7 +14,7 @@ func TestMain(m *testing.M) { // No ELP2P, CLP2P to control the supply of unsafe payload to the CL presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { // For stopping derivation, not to advance safe heads cfg.Stopped = true })), diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/init_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/init_test.go index 18563be5dfdba..6b4a82e8ddc5d 100644 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/init_test.go +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/init_test.go @@ -16,7 +16,7 @@ func TestMain(m *testing.M) { presets.WithSimpleWithSyncTester(), presets.WithELSyncActive(), presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { // For stopping derivation, not to advance safe heads cfg.Stopped = true })), diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go index 80c210859c480..f01e0f6781521 100644 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go @@ -17,7 +17,7 @@ func TestMain(m *testing.M) { presets.WithCompatibleTypes(compat.SysGo), presets.WithHardforkSequentialActivation(forks.Bedrock, forks.Jovian, 6), presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.L2BatcherID, cfg *bss.CLIConfig) { + stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { // For supporting pre-delta batches cfg.BatchType = derive.SingularBatchType // For supporting pre-Fjord batches diff --git a/op-devstack/dsl/l2_cl.go b/op-devstack/dsl/l2_cl.go index dc04bbe797470..bfd4f562015be 100644 --- a/op-devstack/dsl/l2_cl.go +++ b/op-devstack/dsl/l2_cl.go @@ -32,7 +32,7 @@ func NewL2CLNode(inner stack.L2CLNode, control stack.ControlPlane) *L2CLNode { } } -func (cl *L2CLNode) ID() stack.L2CLNodeID { +func (cl *L2CLNode) ID() stack.ComponentID { return cl.inner.ID() } diff --git a/op-devstack/dsl/l2_el.go b/op-devstack/dsl/l2_el.go index b7c856ce636b2..e26a7e93bb6e7 100644 --- a/op-devstack/dsl/l2_el.go +++ b/op-devstack/dsl/l2_el.go @@ -45,7 +45,7 @@ func (el *L2ELNode) Escape() stack.L2ELNode { return el.inner } -func (el *L2ELNode) ID() stack.L2ELNodeID { +func (el *L2ELNode) ID() stack.ComponentID { return el.inner.ID() } diff --git a/op-devstack/example/init_test.go b/op-devstack/example/init_test.go index 4efb1541bb309..79c83311d0e03 100644 --- a/op-devstack/example/init_test.go +++ b/op-devstack/example/init_test.go @@ -16,9 +16,9 @@ func TestMain(m *testing.M) { // Logging can be adjusted with filters globally presets.WithPkgLogFilter( logfilter.DefaultShow( // Random configuration - stack.KindSelector(stack.L2ProposerKind).Mute(), - stack.KindSelector(stack.L2BatcherKind).And(logfilter.Level(log.LevelError)).Show(), - stack.KindSelector(stack.L2CLNodeKind).Mute(), + stack.KindSelector(stack.KindL2Proposer).Mute(), + stack.KindSelector(stack.KindL2Batcher).And(logfilter.Level(log.LevelError)).Show(), + stack.KindSelector(stack.KindL2CLNode).Mute(), ), // E.g. allow test interactions through while keeping background resource logs quiet ), diff --git a/op-devstack/presets/cl_config.go b/op-devstack/presets/cl_config.go index 3760cb3ab9ad7..2e2fbc5168739 100644 --- a/op-devstack/presets/cl_config.go +++ b/op-devstack/presets/cl_config.go @@ -10,7 +10,7 @@ import ( func WithExecutionLayerSyncOnVerifiers() stack.CommonOption { return stack.MakeCommon( sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.VerifierSyncMode = sync.ELSync }))) } @@ -18,7 +18,7 @@ func WithExecutionLayerSyncOnVerifiers() stack.CommonOption { func WithConsensusLayerSync() stack.CommonOption { return stack.MakeCommon( sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.SequencerSyncMode = sync.CLSync cfg.VerifierSyncMode = sync.CLSync }))) @@ -27,7 +27,7 @@ func WithConsensusLayerSync() stack.CommonOption { func WithSafeDBEnabled() stack.CommonOption { return stack.MakeCommon( sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(p devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.SafeDBPath = p.TempDir() }))) } @@ -35,7 +35,7 @@ func WithSafeDBEnabled() stack.CommonOption { func WithReqRespSyncDisabled() stack.CommonOption { return stack.MakeCommon( sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.EnableReqRespSync = false cfg.UseReqRespSync = false }))) @@ -44,7 +44,7 @@ func WithReqRespSyncDisabled() stack.CommonOption { func WithSyncModeReqRespSync() stack.CommonOption { return stack.MakeCommon( sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.UseReqRespSync = true }))) } @@ -52,7 +52,7 @@ func WithSyncModeReqRespSync() stack.CommonOption { func WithNoDiscovery() stack.CommonOption { return stack.MakeCommon( sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.NoDiscovery = true }))) } diff --git a/op-devstack/presets/minimal_with_conductors.go b/op-devstack/presets/minimal_with_conductors.go index fee4f02524057..4275b6705dc54 100644 --- a/op-devstack/presets/minimal_with_conductors.go +++ b/op-devstack/presets/minimal_with_conductors.go @@ -14,7 +14,7 @@ import ( type MinimalWithConductors struct { *Minimal - ConductorSets map[stack.L2NetworkID]dsl.ConductorSet + ConductorSets map[stack.ComponentID]dsl.ConductorSet } // TODO(#16418): shift this to a different sysgo constructor once the sysgo implementation supports conductors @@ -34,7 +34,7 @@ func NewMinimalWithConductors(t devtest.T) *MinimalWithConductors { orch := Orchestrator() orch.Hydrate(system) chains := system.L2Networks() - conductorSets := make(map[stack.L2NetworkID]dsl.ConductorSet) + conductorSets := make(map[stack.ComponentID]dsl.ConductorSet) for _, chain := range chains { chainMatcher := match.L2ChainById(chain.ID()) l2 := system.L2Network(match.Assume(t, chainMatcher)) diff --git a/op-devstack/presets/op_rbuilder_rules.go b/op-devstack/presets/op_rbuilder_rules.go index 5942d82f67f6e..5ee7d12a6697d 100644 --- a/op-devstack/presets/op_rbuilder_rules.go +++ b/op-devstack/presets/op_rbuilder_rules.go @@ -13,7 +13,7 @@ import ( func WithOPRBuilderRules(ruleContent string, refreshInterval uint64) stack.CommonOption { return stack.MakeCommon( sysgo.WithGlobalOPRBuilderNodeOption(sysgo.OPRBuilderNodeOptionFn( - func(p devtest.P, id stack.OPRBuilderNodeID, cfg *sysgo.OPRBuilderNodeConfig) { + func(p devtest.P, id stack.ComponentID, cfg *sysgo.OPRBuilderNodeConfig) { cfg.RulesEnabled = true // Create a fixed directory for rules config rulesDir := filepath.Join(os.TempDir(), "rules") diff --git a/op-devstack/presets/proof.go b/op-devstack/presets/proof.go index 0a2d3d30e2cf9..90ed0e2e3d680 100644 --- a/op-devstack/presets/proof.go +++ b/op-devstack/presets/proof.go @@ -75,7 +75,7 @@ func RequireRespectedGameType(gameType gameTypes.GameType) stack.CommonOption { func WithProposerGameType(gameType gameTypes.GameType) stack.CommonOption { return stack.Combine( stack.MakeCommon( - sysgo.WithProposerOption(func(id stack.L2ProposerID, cfg *ps.CLIConfig) { + sysgo.WithProposerOption(func(id stack.ComponentID, cfg *ps.CLIConfig) { cfg.DisputeGameType = uint32(gameType) }))) } diff --git a/op-devstack/presets/singlechain_multinode.go b/op-devstack/presets/singlechain_multinode.go index e51ca6ea8a1d7..61210c34bdf7a 100644 --- a/op-devstack/presets/singlechain_multinode.go +++ b/op-devstack/presets/singlechain_multinode.go @@ -40,12 +40,12 @@ func NewSingleChainMultiNodeWithoutCheck(t devtest.T) *SingleChainMultiNode { verifierCL := l2.L2CLNode(match.Assume(t, match.And( match.Not(match.WithSequencerActive(t.Ctx())), - match.Not[stack.L2CLNodeID, stack.L2CLNode](minimal.L2CL.ID()), + match.Not(stack.ByID[stack.L2CLNode](minimal.L2CL.ID())), ))) verifierEL := l2.L2ELNode(match.Assume(t, match.And( match.EngineFor(verifierCL), - match.Not[stack.L2ELNodeID, stack.L2ELNode](minimal.L2EL.ID())))) + match.Not(stack.ByID[stack.L2ELNode](minimal.L2EL.ID()))))) preset := &SingleChainMultiNode{ Minimal: *minimal, L2ELB: dsl.NewL2ELNode(verifierEL, orch.ControlPlane()), @@ -73,12 +73,12 @@ func NewSingleChainMultiNodeWithTestSeq(t devtest.T) *SingleChainMultiNodeWithTe verifierCL := l2.L2CLNode(match.Assume(t, match.And( match.Not(match.WithSequencerActive(t.Ctx())), - match.Not[stack.L2CLNodeID, stack.L2CLNode](minimal.L2CL.ID()), + match.Not(stack.ByID[stack.L2CLNode](minimal.L2CL.ID())), ))) verifierEL := l2.L2ELNode(match.Assume(t, match.And( match.EngineFor(verifierCL), - match.Not[stack.L2ELNodeID, stack.L2ELNode](minimal.L2EL.ID())))) + match.Not(stack.ByID[stack.L2ELNode](minimal.L2EL.ID()))))) preset := &SingleChainMultiNode{ Minimal: *minimal, L2ELB: dsl.NewL2ELNode(verifierEL, orch.ControlPlane()), diff --git a/op-devstack/presets/singlechain_twoverifiers.go b/op-devstack/presets/singlechain_twoverifiers.go index 898af17b4bc48..5b3fa9312f6cc 100644 --- a/op-devstack/presets/singlechain_twoverifiers.go +++ b/op-devstack/presets/singlechain_twoverifiers.go @@ -27,13 +27,13 @@ func NewSingleChainTwoVerifiersWithoutCheck(t devtest.T) *SingleChainTwoVerifier verifierCL := l2.L2CLNode(match.Assume(t, match.And( match.Not(match.WithSequencerActive(t.Ctx())), - match.Not[stack.L2CLNodeID, stack.L2CLNode](singleChainMultiNode.L2CL.ID()), - match.Not[stack.L2CLNodeID, stack.L2CLNode](singleChainMultiNode.L2CLB.ID()), + match.Not(stack.ByID[stack.L2CLNode](singleChainMultiNode.L2CL.ID())), + match.Not(stack.ByID[stack.L2CLNode](singleChainMultiNode.L2CLB.ID())), ))) verifierEL := l2.L2ELNode(match.Assume(t, match.And( - match.Not[stack.L2ELNodeID, stack.L2ELNode](singleChainMultiNode.L2EL.ID()), - match.Not[stack.L2ELNodeID, stack.L2ELNode](singleChainMultiNode.L2ELB.ID()), + match.Not(stack.ByID[stack.L2ELNode](singleChainMultiNode.L2EL.ID())), + match.Not(stack.ByID[stack.L2ELNode](singleChainMultiNode.L2ELB.ID())), ))) preset := &SingleChainTwoVerifiers{ SingleChainMultiNode: *singleChainMultiNode, diff --git a/op-devstack/presets/sync_tester_config.go b/op-devstack/presets/sync_tester_config.go index 08220e38e93c9..b97a32cbb2337 100644 --- a/op-devstack/presets/sync_tester_config.go +++ b/op-devstack/presets/sync_tester_config.go @@ -10,7 +10,7 @@ import ( func WithSyncTesterELInitialState(fcu eth.FCUState) stack.CommonOption { return stack.MakeCommon( sysgo.WithGlobalSyncTesterELOption(sysgo.SyncTesterELOptionFn( - func(_ devtest.P, id stack.L2ELNodeID, cfg *sysgo.SyncTesterELConfig) { + func(_ devtest.P, id stack.ComponentID, cfg *sysgo.SyncTesterELConfig) { cfg.FCUState = fcu }))) } @@ -18,7 +18,7 @@ func WithSyncTesterELInitialState(fcu eth.FCUState) stack.CommonOption { func WithELSyncActive() stack.CommonOption { return stack.MakeCommon( sysgo.WithGlobalSyncTesterELOption(sysgo.SyncTesterELOptionFn( - func(_ devtest.P, id stack.L2ELNodeID, cfg *sysgo.SyncTesterELConfig) { + func(_ devtest.P, id stack.ComponentID, cfg *sysgo.SyncTesterELConfig) { cfg.ELSyncActive = true }))) } diff --git a/op-devstack/presets/twol2_follow_l2.go b/op-devstack/presets/twol2_follow_l2.go index 2c725c4908dd3..2ed3a11ed0ae0 100644 --- a/op-devstack/presets/twol2_follow_l2.go +++ b/op-devstack/presets/twol2_follow_l2.go @@ -41,17 +41,17 @@ func NewTwoL2SupernodeFollowL2(t devtest.T, delaySeconds uint64) *TwoL2Supernode followerELBID := stack.NewL2ELNodeID("follower", l2b.ID().ChainID()) followerCLBID := stack.NewL2CLNodeID("follower", l2b.ID().ChainID()) - followerELA := l2a.L2ELNode(match.MatchElemFn[stack.L2ELNodeID, stack.L2ELNode](func(elem stack.L2ELNode) bool { + followerELA := l2a.L2ELNode(match.MatchElemFn[stack.L2ELNode](func(elem stack.L2ELNode) bool { return elem.ID() == followerELAID })) - followerCLA := l2a.L2CLNode(match.MatchElemFn[stack.L2CLNodeID, stack.L2CLNode](func(elem stack.L2CLNode) bool { + followerCLA := l2a.L2CLNode(match.MatchElemFn[stack.L2CLNode](func(elem stack.L2CLNode) bool { return elem.ID() == followerCLAID })) - followerELB := l2b.L2ELNode(match.MatchElemFn[stack.L2ELNodeID, stack.L2ELNode](func(elem stack.L2ELNode) bool { + followerELB := l2b.L2ELNode(match.MatchElemFn[stack.L2ELNode](func(elem stack.L2ELNode) bool { return elem.ID() == followerELBID })) - followerCLB := l2b.L2CLNode(match.MatchElemFn[stack.L2CLNodeID, stack.L2CLNode](func(elem stack.L2CLNode) bool { + followerCLB := l2b.L2CLNode(match.MatchElemFn[stack.L2CLNode](func(elem stack.L2CLNode) bool { return elem.ID() == followerCLBID })) diff --git a/op-devstack/shim/cluster.go b/op-devstack/shim/cluster.go index 8dadaf4e069c7..41e2cb3eea8f6 100644 --- a/op-devstack/shim/cluster.go +++ b/op-devstack/shim/cluster.go @@ -9,14 +9,14 @@ import ( type ClusterConfig struct { CommonConfig DependencySet depset.DependencySet - ID stack.ClusterID + ID stack.ComponentID } // presetCluster implements Cluster with preset values type presetCluster struct { commonImpl depSet depset.DependencySet - id stack.ClusterID + id stack.ComponentID } var _ stack.Cluster = (*presetCluster)(nil) @@ -30,7 +30,7 @@ func NewCluster(cfg ClusterConfig) stack.Cluster { } } -func (p *presetCluster) ID() stack.ClusterID { +func (p *presetCluster) ID() stack.ComponentID { return p.id } diff --git a/op-devstack/shim/conductor.go b/op-devstack/shim/conductor.go index e16072d775b4e..4151241ab9636 100644 --- a/op-devstack/shim/conductor.go +++ b/op-devstack/shim/conductor.go @@ -9,13 +9,13 @@ import ( type ConductorConfig struct { CommonConfig - ID stack.ConductorID + ID stack.ComponentID Client *rpc.Client } type rpcConductor struct { commonImpl - id stack.ConductorID + id stack.ComponentID client *rpc.Client api conductorRpc.API @@ -32,7 +32,7 @@ func NewConductor(cfg ConductorConfig) stack.Conductor { } } -func (r *rpcConductor) ID() stack.ConductorID { +func (r *rpcConductor) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/faucet.go b/op-devstack/shim/faucet.go index aaaedec7f934d..80f6d8858c070 100644 --- a/op-devstack/shim/faucet.go +++ b/op-devstack/shim/faucet.go @@ -9,7 +9,7 @@ import ( type FaucetConfig struct { CommonConfig - ID stack.FaucetID + ID stack.ComponentID Client client.RPC } @@ -18,7 +18,7 @@ type FaucetConfig struct { // This deconflicts funding requests by parallel tests from the same funding account. type presetFaucet struct { commonImpl - id stack.FaucetID + id stack.ComponentID faucetClient *sources.FaucetClient } @@ -33,7 +33,7 @@ func NewFaucet(cfg FaucetConfig) stack.Faucet { } } -func (p *presetFaucet) ID() stack.FaucetID { +func (p *presetFaucet) ID() stack.ComponentID { return p.id } diff --git a/op-devstack/shim/fb_ws_client.go b/op-devstack/shim/fb_ws_client.go index b9275497a6e47..5397c49ee9ed6 100644 --- a/op-devstack/shim/fb_ws_client.go +++ b/op-devstack/shim/fb_ws_client.go @@ -9,14 +9,14 @@ import ( type FlashblocksWSClientConfig struct { CommonConfig - ID stack.FlashblocksWSClientID + ID stack.ComponentID WsUrl string WsHeaders http.Header } type flashblocksWSClient struct { commonImpl - id stack.FlashblocksWSClientID + id stack.ComponentID wsUrl string wsHeaders http.Header } @@ -33,7 +33,7 @@ func NewFlashblocksWSClient(cfg FlashblocksWSClientConfig) stack.FlashblocksWSCl } } -func (r *flashblocksWSClient) ID() stack.FlashblocksWSClientID { +func (r *flashblocksWSClient) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/l1_cl.go b/op-devstack/shim/l1_cl.go index a8a65d8214f75..a518166d94c08 100644 --- a/op-devstack/shim/l1_cl.go +++ b/op-devstack/shim/l1_cl.go @@ -9,13 +9,13 @@ import ( type L1CLNodeConfig struct { CommonConfig - ID stack.L1CLNodeID + ID stack.ComponentID Client client.HTTP } type rpcL1CLNode struct { commonImpl - id stack.L1CLNodeID + id stack.ComponentID client apis.BeaconClient } @@ -30,7 +30,7 @@ func NewL1CLNode(cfg L1CLNodeConfig) stack.L1CLNode { } } -func (r *rpcL1CLNode) ID() stack.L1CLNodeID { +func (r *rpcL1CLNode) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/l1_el.go b/op-devstack/shim/l1_el.go index 03a0eb3a4531e..005c7e182984e 100644 --- a/op-devstack/shim/l1_el.go +++ b/op-devstack/shim/l1_el.go @@ -7,12 +7,12 @@ import ( type L1ELNodeConfig struct { ELNodeConfig - ID stack.L1ELNodeID + ID stack.ComponentID } type rpcL1ELNode struct { rpcELNode - id stack.L1ELNodeID + id stack.ComponentID } var _ stack.L1ELNode = (*rpcL1ELNode)(nil) @@ -26,6 +26,6 @@ func NewL1ELNode(cfg L1ELNodeConfig) stack.L1ELNode { } } -func (r *rpcL1ELNode) ID() stack.L1ELNodeID { +func (r *rpcL1ELNode) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/l1_network.go b/op-devstack/shim/l1_network.go index 78ceae0d98d7b..60c45430b1302 100644 --- a/op-devstack/shim/l1_network.go +++ b/op-devstack/shim/l1_network.go @@ -5,20 +5,16 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/locks" ) type L1NetworkConfig struct { NetworkConfig - ID stack.L1NetworkID + ID stack.ComponentID } type presetL1Network struct { presetNetwork - id stack.L1NetworkID - - els locks.RWMap[stack.L1ELNodeID, stack.L1ELNode] - cls locks.RWMap[stack.L1CLNodeID, stack.L1CLNode] + id stack.ComponentID } var _ stack.ExtensibleL1Network = (*presetL1Network)(nil) @@ -32,12 +28,19 @@ func NewL1Network(cfg L1NetworkConfig) stack.ExtensibleL1Network { } } -func (p *presetL1Network) ID() stack.L1NetworkID { +func (p *presetL1Network) ID() stack.ComponentID { return p.id } func (p *presetL1Network) L1ELNode(m stack.L1ELMatcher) stack.L1ELNode { - v, ok := findMatch(m, p.els.Get, p.L1ELNodes) + getter := func(id stack.ComponentID) (stack.L1ELNode, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.L1ELNode), true + } + v, ok := findMatch(m, getter, p.L1ELNodes) p.require().True(ok, "must find L1 EL %s", m) return v } @@ -45,13 +48,20 @@ func (p *presetL1Network) L1ELNode(m stack.L1ELMatcher) stack.L1ELNode { func (p *presetL1Network) AddL1ELNode(v stack.L1ELNode) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l1 EL node %s must be on chain %s", id, p.chainID) - p.require().True(p.els.SetIfMissing(id, v), "l1 EL node %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertL1ELNodeID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "l1 EL node %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetL1Network) L1CLNode(m stack.L1CLMatcher) stack.L1CLNode { - v, ok := findMatch(m, p.cls.Get, p.L1CLNodes) + getter := func(id stack.ComponentID) (stack.L1CLNode, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.L1CLNode), true + } + v, ok := findMatch(m, getter, p.L1CLNodes) p.require().True(ok, "must find L1 CL %s", m) return v } @@ -59,23 +69,37 @@ func (p *presetL1Network) L1CLNode(m stack.L1CLMatcher) stack.L1CLNode { func (p *presetL1Network) AddL1CLNode(v stack.L1CLNode) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l1 CL node %s must be on chain %s", id, p.chainID) - p.require().True(p.cls.SetIfMissing(id, v), "l1 CL node %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertL1CLNodeID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "l1 CL node %s must not already exist", id) + p.registry.Register(id, v) } -func (p *presetL1Network) L1ELNodeIDs() []stack.L1ELNodeID { - return stack.SortL1ELNodeIDs(p.els.Keys()) +func (p *presetL1Network) L1ELNodeIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindL1ELNode)) } func (p *presetL1Network) L1ELNodes() []stack.L1ELNode { - return stack.SortL1ELNodes(p.els.Values()) + ids := p.registry.IDsByKind(stack.KindL1ELNode) + result := make([]stack.L1ELNode, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.L1ELNode)) + } + } + return sortByIDFunc(result) } -func (p *presetL1Network) L1CLNodeIDs() []stack.L1CLNodeID { - return stack.SortL1CLNodeIDs(p.cls.Keys()) +func (p *presetL1Network) L1CLNodeIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindL1CLNode)) } func (p *presetL1Network) L1CLNodes() []stack.L1CLNode { - return stack.SortL1CLNodes(p.cls.Values()) + ids := p.registry.IDsByKind(stack.KindL1CLNode) + result := make([]stack.L1CLNode, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.L1CLNode)) + } + } + return sortByIDFunc(result) } diff --git a/op-devstack/shim/l2_batcher.go b/op-devstack/shim/l2_batcher.go index e75adad72b6ac..4e3e785af4283 100644 --- a/op-devstack/shim/l2_batcher.go +++ b/op-devstack/shim/l2_batcher.go @@ -9,13 +9,13 @@ import ( type L2BatcherConfig struct { CommonConfig - ID stack.L2BatcherID + ID stack.ComponentID Client client.RPC } type rpcL2Batcher struct { commonImpl - id stack.L2BatcherID + id stack.ComponentID client *sources.BatcherAdminClient } @@ -30,7 +30,7 @@ func NewL2Batcher(cfg L2BatcherConfig) stack.L2Batcher { } } -func (r *rpcL2Batcher) ID() stack.L2BatcherID { +func (r *rpcL2Batcher) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/l2_challenger.go b/op-devstack/shim/l2_challenger.go index a5fe76f431a0a..65f360503da0e 100644 --- a/op-devstack/shim/l2_challenger.go +++ b/op-devstack/shim/l2_challenger.go @@ -7,13 +7,13 @@ import ( type L2ChallengerConfig struct { CommonConfig - ID stack.L2ChallengerID + ID stack.ComponentID Config *config.Config } type rpcL2Challenger struct { commonImpl - id stack.L2ChallengerID + id stack.ComponentID config *config.Config } @@ -32,6 +32,6 @@ func NewL2Challenger(cfg L2ChallengerConfig) stack.L2Challenger { } } -func (r *rpcL2Challenger) ID() stack.L2ChallengerID { +func (r *rpcL2Challenger) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/l2_cl.go b/op-devstack/shim/l2_cl.go index 1421c54f229cb..a1a490ed73e6e 100644 --- a/op-devstack/shim/l2_cl.go +++ b/op-devstack/shim/l2_cl.go @@ -11,7 +11,7 @@ import ( type L2CLNodeConfig struct { CommonConfig - ID stack.L2CLNodeID + ID stack.ComponentID Client client.RPC UserRPC string @@ -22,13 +22,13 @@ type L2CLNodeConfig struct { type rpcL2CLNode struct { commonImpl - id stack.L2CLNodeID + id stack.ComponentID client client.RPC rollupClient apis.RollupClient p2pClient apis.P2PClient - els locks.RWMap[stack.L2ELNodeID, stack.L2ELNode] - rollupBoostNodes locks.RWMap[stack.RollupBoostNodeID, stack.RollupBoostNode] - oprbuilderNodes locks.RWMap[stack.OPRBuilderNodeID, stack.OPRBuilderNode] + els locks.RWMap[stack.ComponentID, stack.L2ELNode] + rollupBoostNodes locks.RWMap[stack.ComponentID, stack.RollupBoostNode] + oprbuilderNodes locks.RWMap[stack.ComponentID, stack.OPRBuilderNode] userRPC string @@ -60,7 +60,7 @@ func (r *rpcL2CLNode) ClientRPC() client.RPC { return r.client } -func (r *rpcL2CLNode) ID() stack.L2CLNodeID { +func (r *rpcL2CLNode) ID() stack.ComponentID { return r.id } @@ -85,7 +85,7 @@ func (r *rpcL2CLNode) LinkOPRBuilderNode(oprb stack.OPRBuilderNode) { } func (r *rpcL2CLNode) ELs() []stack.L2ELNode { - return stack.SortL2ELNodes(r.els.Values()) + return sortByIDFunc(r.els.Values()) } func (r *rpcL2CLNode) ELClient() apis.EthClient { @@ -101,11 +101,11 @@ func (r *rpcL2CLNode) ELClient() apis.EthClient { } func (r *rpcL2CLNode) RollupBoostNodes() []stack.RollupBoostNode { - return stack.SortRollupBoostNodes(r.rollupBoostNodes.Values()) + return sortByIDFunc(r.rollupBoostNodes.Values()) } func (r *rpcL2CLNode) OPRBuilderNodes() []stack.OPRBuilderNode { - return stack.SortOPRBuilderNodes(r.oprbuilderNodes.Values()) + return sortByIDFunc(r.oprbuilderNodes.Values()) } func (r *rpcL2CLNode) UserRPC() string { diff --git a/op-devstack/shim/l2_el.go b/op-devstack/shim/l2_el.go index 1739647c215ad..4bd7e41a826b6 100644 --- a/op-devstack/shim/l2_el.go +++ b/op-devstack/shim/l2_el.go @@ -14,7 +14,7 @@ type L2ELNodeConfig struct { ELNodeConfig EngineClient client.RPC RollupCfg *rollup.Config - ID stack.L2ELNodeID + ID stack.ComponentID } type rpcL2ELNode struct { @@ -22,7 +22,7 @@ type rpcL2ELNode struct { l2Client *sources.L2Client l2EngineClient *sources.EngineClient - id stack.L2ELNodeID + id stack.ComponentID } var _ stack.L2ELNode = (*rpcL2ELNode)(nil) @@ -47,7 +47,7 @@ func NewL2ELNode(cfg L2ELNodeConfig) stack.L2ELNode { } } -func (r *rpcL2ELNode) ID() stack.L2ELNodeID { +func (r *rpcL2ELNode) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/l2_network.go b/op-devstack/shim/l2_network.go index 3631747298b44..86128c95434c7 100644 --- a/op-devstack/shim/l2_network.go +++ b/op-devstack/shim/l2_network.go @@ -6,12 +6,11 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/locks" ) type L2NetworkConfig struct { NetworkConfig - ID stack.L2NetworkID + ID stack.ComponentID RollupConfig *rollup.Config Deployment stack.L2Deployment Keys stack.Keys @@ -23,7 +22,7 @@ type L2NetworkConfig struct { type presetL2Network struct { presetNetwork - id stack.L2NetworkID + id stack.ComponentID rollupCfg *rollup.Config deployment stack.L2Deployment @@ -32,17 +31,6 @@ type presetL2Network struct { superchain stack.Superchain l1 stack.L1Network cluster stack.Cluster - - batchers locks.RWMap[stack.L2BatcherID, stack.L2Batcher] - proposers locks.RWMap[stack.L2ProposerID, stack.L2Proposer] - challengers locks.RWMap[stack.L2ChallengerID, stack.L2Challenger] - - els locks.RWMap[stack.L2ELNodeID, stack.L2ELNode] - cls locks.RWMap[stack.L2CLNodeID, stack.L2CLNode] - - conductors locks.RWMap[stack.ConductorID, stack.Conductor] - rollupBoostNodes locks.RWMap[stack.RollupBoostNodeID, stack.RollupBoostNode] - oprBuilderNodes locks.RWMap[stack.OPRBuilderNodeID, stack.OPRBuilderNode] } var _ stack.L2Network = (*presetL2Network)(nil) @@ -65,7 +53,7 @@ func NewL2Network(cfg L2NetworkConfig) stack.ExtensibleL2Network { } } -func (p *presetL2Network) ID() stack.L2NetworkID { +func (p *presetL2Network) ID() stack.ComponentID { return p.id } @@ -100,7 +88,14 @@ func (p *presetL2Network) Cluster() stack.Cluster { } func (p *presetL2Network) L2Batcher(m stack.L2BatcherMatcher) stack.L2Batcher { - v, ok := findMatch(m, p.batchers.Get, p.L2Batchers) + getter := func(id stack.ComponentID) (stack.L2Batcher, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.L2Batcher), true + } + v, ok := findMatch(m, getter, p.L2Batchers) p.require().True(ok, "must find L2 batcher %s", m) return v } @@ -108,26 +103,40 @@ func (p *presetL2Network) L2Batcher(m stack.L2BatcherMatcher) stack.L2Batcher { func (p *presetL2Network) AddL2Batcher(v stack.L2Batcher) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l2 batcher %s must be on chain %s", id, p.chainID) - p.require().True(p.batchers.SetIfMissing(id, v), "l2 batcher %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertL2BatcherID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "l2 batcher %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetL2Network) Conductor(m stack.ConductorMatcher) stack.Conductor { - v, ok := findMatch(m, p.conductors.Get, p.Conductors) + getter := func(id stack.ComponentID) (stack.Conductor, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.Conductor), true + } + v, ok := findMatch(m, getter, p.Conductors) p.require().True(ok, "must find L2 conductor %s", m) return v } func (p *presetL2Network) AddConductor(v stack.Conductor) { id := v.ID() - p.require().True(p.conductors.SetIfMissing(id, v), "conductor %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertConductorID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "conductor %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetL2Network) L2Proposer(m stack.L2ProposerMatcher) stack.L2Proposer { - v, ok := findMatch(m, p.proposers.Get, p.L2Proposers) + getter := func(id stack.ComponentID) (stack.L2Proposer, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.L2Proposer), true + } + v, ok := findMatch(m, getter, p.L2Proposers) p.require().True(ok, "must find L2 proposer %s", m) return v } @@ -135,26 +144,40 @@ func (p *presetL2Network) L2Proposer(m stack.L2ProposerMatcher) stack.L2Proposer func (p *presetL2Network) AddL2Proposer(v stack.L2Proposer) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l2 proposer %s must be on chain %s", id, p.chainID) - p.require().True(p.proposers.SetIfMissing(id, v), "l2 proposer %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertL2ProposerID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "l2 proposer %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetL2Network) L2Challenger(m stack.L2ChallengerMatcher) stack.L2Challenger { - v, ok := findMatch(m, p.challengers.Get, p.L2Challengers) + getter := func(id stack.ComponentID) (stack.L2Challenger, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.L2Challenger), true + } + v, ok := findMatch(m, getter, p.L2Challengers) p.require().True(ok, "must find L2 challenger %s", m) return v } func (p *presetL2Network) AddL2Challenger(v stack.L2Challenger) { id := v.ID() - p.require().True(p.challengers.SetIfMissing(id, v), "l2 challenger %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertL2ChallengerID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "l2 challenger %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetL2Network) L2CLNode(m stack.L2CLMatcher) stack.L2CLNode { - v, ok := findMatch(m, p.cls.Get, p.L2CLNodes) + getter := func(id stack.ComponentID) (stack.L2CLNode, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.L2CLNode), true + } + v, ok := findMatch(m, getter, p.L2CLNodes) p.require().True(ok, "must find L2 CL %s", m) return v } @@ -162,13 +185,20 @@ func (p *presetL2Network) L2CLNode(m stack.L2CLMatcher) stack.L2CLNode { func (p *presetL2Network) AddL2CLNode(v stack.L2CLNode) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l2 CL node %s must be on chain %s", id, p.chainID) - p.require().True(p.cls.SetIfMissing(id, v), "l2 CL node %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertL2CLNodeID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "l2 CL node %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetL2Network) L2ELNode(m stack.L2ELMatcher) stack.L2ELNode { - v, ok := findMatch(m, p.els.Get, p.L2ELNodes) + getter := func(id stack.ComponentID) (stack.L2ELNode, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.L2ELNode), true + } + v, ok := findMatch(m, getter, p.L2ELNodes) p.require().True(ok, "must find L2 EL %s", m) return v } @@ -176,85 +206,155 @@ func (p *presetL2Network) L2ELNode(m stack.L2ELMatcher) stack.L2ELNode { func (p *presetL2Network) AddL2ELNode(v stack.L2ELNode) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "l2 EL node %s must be on chain %s", id, p.chainID) - p.require().True(p.els.SetIfMissing(id, v), "l2 EL node %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertL2ELNodeID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "l2 EL node %s must not already exist", id) + p.registry.Register(id, v) } -func (p *presetL2Network) L2BatcherIDs() []stack.L2BatcherID { - return stack.SortL2BatcherIDs(p.batchers.Keys()) +func (p *presetL2Network) L2BatcherIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindL2Batcher)) } func (p *presetL2Network) L2Batchers() []stack.L2Batcher { - return stack.SortL2Batchers(p.batchers.Values()) + ids := p.registry.IDsByKind(stack.KindL2Batcher) + result := make([]stack.L2Batcher, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.L2Batcher)) + } + } + return sortByIDFunc(result) } -func (p *presetL2Network) L2ProposerIDs() []stack.L2ProposerID { - return stack.SortL2ProposerIDs(p.proposers.Keys()) +func (p *presetL2Network) L2ProposerIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindL2Proposer)) } func (p *presetL2Network) L2Proposers() []stack.L2Proposer { - return stack.SortL2Proposers(p.proposers.Values()) + ids := p.registry.IDsByKind(stack.KindL2Proposer) + result := make([]stack.L2Proposer, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.L2Proposer)) + } + } + return sortByIDFunc(result) } -func (p *presetL2Network) L2ChallengerIDs() []stack.L2ChallengerID { - return stack.SortL2ChallengerIDs(p.challengers.Keys()) +func (p *presetL2Network) L2ChallengerIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindL2Challenger)) } func (p *presetL2Network) L2Challengers() []stack.L2Challenger { - return stack.SortL2Challengers(p.challengers.Values()) + ids := p.registry.IDsByKind(stack.KindL2Challenger) + result := make([]stack.L2Challenger, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.L2Challenger)) + } + } + return sortByIDFunc(result) } func (p *presetL2Network) Conductors() []stack.Conductor { - return stack.SortConductors(p.conductors.Values()) + ids := p.registry.IDsByKind(stack.KindConductor) + result := make([]stack.Conductor, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.Conductor)) + } + } + return sortByIDFunc(result) } -func (p *presetL2Network) L2CLNodeIDs() []stack.L2CLNodeID { - return stack.SortL2CLNodeIDs(p.cls.Keys()) +func (p *presetL2Network) L2CLNodeIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindL2CLNode)) } func (p *presetL2Network) L2CLNodes() []stack.L2CLNode { - return stack.SortL2CLNodes(p.cls.Values()) + ids := p.registry.IDsByKind(stack.KindL2CLNode) + result := make([]stack.L2CLNode, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.L2CLNode)) + } + } + return sortByIDFunc(result) } -func (p *presetL2Network) L2ELNodeIDs() []stack.L2ELNodeID { - return stack.SortL2ELNodeIDs(p.els.Keys()) +func (p *presetL2Network) L2ELNodeIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindL2ELNode)) } func (p *presetL2Network) L2ELNodes() []stack.L2ELNode { - return stack.SortL2ELNodes(p.els.Values()) + ids := p.registry.IDsByKind(stack.KindL2ELNode) + result := make([]stack.L2ELNode, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.L2ELNode)) + } + } + return sortByIDFunc(result) } func (p *presetL2Network) RollupBoostNodes() []stack.RollupBoostNode { - return stack.SortRollupBoostNodes(p.rollupBoostNodes.Values()) + ids := p.registry.IDsByKind(stack.KindRollupBoostNode) + result := make([]stack.RollupBoostNode, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.RollupBoostNode)) + } + } + return sortByIDFunc(result) } func (p *presetL2Network) OPRBuilderNodes() []stack.OPRBuilderNode { - return stack.SortOPRBuilderNodes(p.oprBuilderNodes.Values()) + ids := p.registry.IDsByKind(stack.KindOPRBuilderNode) + result := make([]stack.OPRBuilderNode, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.OPRBuilderNode)) + } + } + return sortByIDFunc(result) } func (p *presetL2Network) AddRollupBoostNode(v stack.RollupBoostNode) { id := v.ID() - p.require().True(p.rollupBoostNodes.SetIfMissing(id, v), "rollup boost node %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertRollupBoostNodeID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "rollup boost node %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetL2Network) AddOPRBuilderNode(v stack.OPRBuilderNode) { id := v.ID() - p.require().True(p.oprBuilderNodes.SetIfMissing(id, v), "OPR builder node %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertOPRBuilderNodeID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "OPR builder node %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetL2Network) OPRBuilderNode(m stack.OPRBuilderNodeMatcher) stack.OPRBuilderNode { - v, ok := findMatch(m, p.oprBuilderNodes.Get, p.OPRBuilderNodes) + getter := func(id stack.ComponentID) (stack.OPRBuilderNode, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.OPRBuilderNode), true + } + v, ok := findMatch(m, getter, p.OPRBuilderNodes) p.require().True(ok, "must find OPR builder node %s", m) return v } func (p *presetL2Network) RollupBoostNode(m stack.RollupBoostNodeMatcher) stack.RollupBoostNode { - v, ok := findMatch(m, p.rollupBoostNodes.Get, p.RollupBoostNodes) + getter := func(id stack.ComponentID) (stack.RollupBoostNode, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.RollupBoostNode), true + } + v, ok := findMatch(m, getter, p.RollupBoostNodes) p.require().True(ok, "must find rollup boost node %s", m) return v } diff --git a/op-devstack/shim/l2_proposer.go b/op-devstack/shim/l2_proposer.go index 530ab3c73accf..190cf3a450e04 100644 --- a/op-devstack/shim/l2_proposer.go +++ b/op-devstack/shim/l2_proposer.go @@ -7,13 +7,13 @@ import ( type L2ProposerConfig struct { CommonConfig - ID stack.L2ProposerID + ID stack.ComponentID Client client.RPC } type rpcL2Proposer struct { commonImpl - id stack.L2ProposerID + id stack.ComponentID client client.RPC } @@ -28,6 +28,6 @@ func NewL2Proposer(cfg L2ProposerConfig) stack.L2Proposer { } } -func (r *rpcL2Proposer) ID() stack.L2ProposerID { +func (r *rpcL2Proposer) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/matcher.go b/op-devstack/shim/matcher.go index b5e65c5facbff..d8ce976fae55e 100644 --- a/op-devstack/shim/matcher.go +++ b/op-devstack/shim/matcher.go @@ -1,16 +1,19 @@ package shim import ( + "slices" + "sort" + "github.com/ethereum-optimism/optimism/op-devstack/stack" ) -// findMatch checks if the matcher is an ID for direct lookup. If not, then it will search the list of values for a matching element. +// findMatch checks if the matcher is an ID wrapper for direct lookup. If not, then it will search the list of values for a matching element. // If multiple elements match, the first found is returned. // The values function is used to lazy-fetch values in sorted order, such that the search is deterministic. -func findMatch[I comparable, E stack.Identifiable[I]](m stack.Matcher[I, E], getValue func(I) (E, bool), values func() []E) (out E, found bool) { - id, ok := m.(I) - if ok { - return getValue(id) +func findMatch[E stack.Identifiable](m stack.Matcher[E], getValue func(stack.ComponentID) (E, bool), values func() []E) (out E, found bool) { + // Check for idMatcher wrapper (created by stack.ByID) + if idm, ok := m.(interface{ ID() stack.ComponentID }); ok { + return getValue(idm.ID()) } got := m.Match(values()) if len(got) == 0 { @@ -18,3 +21,21 @@ func findMatch[I comparable, E stack.Identifiable[I]](m stack.Matcher[I, E], get } return got[0], true } + +// sortByID sorts a slice of ComponentIDs. +func sortByID(ids []stack.ComponentID) []stack.ComponentID { + out := slices.Clone(ids) + sort.Slice(out, func(i, j int) bool { + return out[i].Less(out[j]) + }) + return out +} + +// sortByIDFunc sorts a slice of elements by extracting their ID. +func sortByIDFunc[T stack.Identifiable](elems []T) []T { + out := slices.Clone(elems) + sort.Slice(out, func(i, j int) bool { + return out[i].ID().Less(out[j].ID()) + }) + return out +} diff --git a/op-devstack/shim/network.go b/op-devstack/shim/network.go index c90ac2b6efee9..d6652f4828da7 100644 --- a/op-devstack/shim/network.go +++ b/op-devstack/shim/network.go @@ -5,7 +5,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/locks" ) type NetworkConfig struct { @@ -20,11 +19,6 @@ type presetNetwork struct { // Unified component registry for generic access registry *stack.Registry - - // Legacy typed maps - kept for backward compatibility during migration - // These will be removed once all callers migrate to generic access - faucets locks.RWMap[stack.FaucetID, stack.Faucet] - syncTesters locks.RWMap[stack.SyncTesterID, stack.SyncTester] } var _ stack.Network = (*presetNetwork)(nil) @@ -68,16 +62,30 @@ func (p *presetNetwork) ChainConfig() *params.ChainConfig { return p.chainCfg } -func (p *presetNetwork) FaucetIDs() []stack.FaucetID { - return stack.SortFaucetIDs(p.faucets.Keys()) +func (p *presetNetwork) FaucetIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindFaucet)) } func (p *presetNetwork) Faucets() []stack.Faucet { - return stack.SortFaucets(p.faucets.Values()) + ids := p.registry.IDsByKind(stack.KindFaucet) + result := make([]stack.Faucet, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.Faucet)) + } + } + return sortByIDFunc(result) } func (p *presetNetwork) Faucet(m stack.FaucetMatcher) stack.Faucet { - v, ok := findMatch(m, p.faucets.Get, p.Faucets) + getter := func(id stack.ComponentID) (stack.Faucet, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.Faucet), true + } + v, ok := findMatch(m, getter, p.Faucets) p.require().True(ok, "must find faucet %s", m) return v } @@ -85,21 +93,35 @@ func (p *presetNetwork) Faucet(m stack.FaucetMatcher) stack.Faucet { func (p *presetNetwork) AddFaucet(v stack.Faucet) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "faucet %s must be on chain %s", id, p.chainID) - p.require().True(p.faucets.SetIfMissing(id, v), "faucet %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertFaucetID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "faucet %s must not already exist", id) + p.registry.Register(id, v) } -func (p *presetNetwork) SyncTesterIDs() []stack.SyncTesterID { - return stack.SortSyncTesterIDs(p.syncTesters.Keys()) +func (p *presetNetwork) SyncTesterIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindSyncTester)) } func (p *presetNetwork) SyncTesters() []stack.SyncTester { - return stack.SortSyncTesters(p.syncTesters.Values()) + ids := p.registry.IDsByKind(stack.KindSyncTester) + result := make([]stack.SyncTester, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.SyncTester)) + } + } + return sortByIDFunc(result) } func (p *presetNetwork) SyncTester(m stack.SyncTesterMatcher) stack.SyncTester { - v, ok := findMatch(m, p.syncTesters.Get, p.SyncTesters) + getter := func(id stack.ComponentID) (stack.SyncTester, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.SyncTester), true + } + v, ok := findMatch(m, getter, p.SyncTesters) p.require().True(ok, "must find sync tester %s", m) return v } @@ -107,7 +129,7 @@ func (p *presetNetwork) SyncTester(m stack.SyncTesterMatcher) stack.SyncTester { func (p *presetNetwork) AddSyncTester(v stack.SyncTester) { id := v.ID() p.require().Equal(p.chainID, id.ChainID(), "sync tester %s must be on chain %s", id, p.chainID) - p.require().True(p.syncTesters.SetIfMissing(id, v), "sync tester %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertSyncTesterID(id).ComponentID, v) + _, exists := p.registry.Get(id) + p.require().False(exists, "sync tester %s must not already exist", id) + p.registry.Register(id, v) } diff --git a/op-devstack/shim/op_rbuilder.go b/op-devstack/shim/op_rbuilder.go index a1fca59679f55..4b5c8216e4fed 100644 --- a/op-devstack/shim/op_rbuilder.go +++ b/op-devstack/shim/op_rbuilder.go @@ -13,13 +13,13 @@ import ( type OPRBuilderNodeConfig struct { ELNodeConfig RollupCfg *rollup.Config - ID stack.OPRBuilderNodeID + ID stack.ComponentID FlashblocksClient *opclient.WSClient } type OPRBuilderNode struct { rpcELNode - id stack.OPRBuilderNodeID + id stack.ComponentID engineClient *sources.EngineClient flashblocksClient *opclient.WSClient } @@ -41,7 +41,7 @@ func NewOPRBuilderNode(cfg OPRBuilderNodeConfig) *OPRBuilderNode { } } -func (r *OPRBuilderNode) ID() stack.OPRBuilderNodeID { +func (r *OPRBuilderNode) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/rollup_boost.go b/op-devstack/shim/rollup_boost.go index be4fa4ef6e7f0..9ef839579420e 100644 --- a/op-devstack/shim/rollup_boost.go +++ b/op-devstack/shim/rollup_boost.go @@ -13,7 +13,7 @@ import ( type RollupBoostNodeConfig struct { ELNodeConfig RollupCfg *rollup.Config - ID stack.RollupBoostNodeID + ID stack.ComponentID FlashblocksClient *opclient.WSClient } @@ -21,7 +21,7 @@ type RollupBoostNode struct { rpcELNode engineClient *sources.EngineClient - id stack.RollupBoostNodeID + id stack.ComponentID flashblocksClient *opclient.WSClient } @@ -43,7 +43,7 @@ func NewRollupBoostNode(cfg RollupBoostNodeConfig) *RollupBoostNode { } } -func (r *RollupBoostNode) ID() stack.RollupBoostNodeID { +func (r *RollupBoostNode) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/superchain.go b/op-devstack/shim/superchain.go index 1920fb5295581..8e651c09a6b79 100644 --- a/op-devstack/shim/superchain.go +++ b/op-devstack/shim/superchain.go @@ -6,13 +6,13 @@ import ( type SuperchainConfig struct { CommonConfig - ID stack.SuperchainID + ID stack.ComponentID Deployment stack.SuperchainDeployment } type presetSuperchain struct { commonImpl - id stack.SuperchainID + id stack.ComponentID deployment stack.SuperchainDeployment } @@ -27,7 +27,7 @@ func NewSuperchain(cfg SuperchainConfig) stack.Superchain { } } -func (p *presetSuperchain) ID() stack.SuperchainID { +func (p *presetSuperchain) ID() stack.ComponentID { return p.id } diff --git a/op-devstack/shim/supervisor.go b/op-devstack/shim/supervisor.go index ec5d4c06941e0..2a91406ec02aa 100644 --- a/op-devstack/shim/supervisor.go +++ b/op-devstack/shim/supervisor.go @@ -9,13 +9,13 @@ import ( type SupervisorConfig struct { CommonConfig - ID stack.SupervisorID + ID stack.ComponentID Client client.RPC } type rpcSupervisor struct { commonImpl - id stack.SupervisorID + id stack.ComponentID client client.RPC api apis.SupervisorAPI @@ -33,7 +33,7 @@ func NewSupervisor(cfg SupervisorConfig) stack.Supervisor { } } -func (r *rpcSupervisor) ID() stack.SupervisorID { +func (r *rpcSupervisor) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/shim/sync_tester.go b/op-devstack/shim/sync_tester.go index 460169e148fdd..d86862374f06b 100644 --- a/op-devstack/shim/sync_tester.go +++ b/op-devstack/shim/sync_tester.go @@ -12,7 +12,7 @@ import ( type SyncTesterConfig struct { CommonConfig - ID stack.SyncTesterID + ID stack.ComponentID Addr string Client client.RPC } @@ -20,7 +20,7 @@ type SyncTesterConfig struct { // presetSyncTester wraps around a syncTester-service, type presetSyncTester struct { commonImpl - id stack.SyncTesterID + id stack.ComponentID // Endpoint for initializing RPC Client per session addr string // RPC Client initialized without session @@ -39,7 +39,7 @@ func NewSyncTester(cfg SyncTesterConfig) stack.SyncTester { } } -func (p *presetSyncTester) ID() stack.SyncTesterID { +func (p *presetSyncTester) ID() stack.ComponentID { return p.id } diff --git a/op-devstack/shim/system.go b/op-devstack/shim/system.go index 71996c5beb074..3523e4e9485a1 100644 --- a/op-devstack/shim/system.go +++ b/op-devstack/shim/system.go @@ -24,22 +24,7 @@ type presetSystem struct { // Unified component registry for generic access registry *stack.Registry - // Legacy typed maps - kept for backward compatibility during migration - superchains locks.RWMap[stack.SuperchainID, stack.Superchain] - clusters locks.RWMap[stack.ClusterID, stack.Cluster] - - // tracks L1 networks by L1NetworkID (a typed eth.ChainID) - l1Networks locks.RWMap[stack.L1NetworkID, stack.L1Network] - // tracks L2 networks by L2NetworkID (a typed eth.ChainID) - l2Networks locks.RWMap[stack.L2NetworkID, stack.L2Network] - - // tracks all networks, and ensures there are no networks with the same eth.ChainID - networks locks.RWMap[eth.ChainID, stack.Network] - - supervisors locks.RWMap[stack.SupervisorID, stack.Supervisor] - supernodes locks.RWMap[stack.SupernodeID, stack.Supernode] - sequencers locks.RWMap[stack.TestSequencerID, stack.TestSequencer] - syncTesters locks.RWMap[stack.SyncTesterID, stack.SyncTester] + supernodes locks.RWMap[stack.ComponentID, stack.Supernode] } var _ stack.ExtensibleSystem = (*presetSystem)(nil) @@ -74,78 +59,131 @@ func (p *presetSystem) ComponentIDs(kind stack.ComponentKind) []stack.ComponentI } func (p *presetSystem) Superchain(m stack.SuperchainMatcher) stack.Superchain { - v, ok := findMatch(m, p.superchains.Get, p.Superchains) + getter := func(id stack.ComponentID) (stack.Superchain, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.Superchain), true + } + v, ok := findMatch(m, getter, p.Superchains) p.require().True(ok, "must find superchain %s", m) return v } func (p *presetSystem) AddSuperchain(v stack.Superchain) { - p.require().True(p.superchains.SetIfMissing(v.ID(), v), "superchain %s must not already exist", v.ID()) - // Also register in unified registry - p.registry.Register(stack.ConvertSuperchainID(v.ID()).ComponentID, v) + id := v.ID() + _, exists := p.registry.Get(id) + p.require().False(exists, "superchain %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetSystem) Cluster(m stack.ClusterMatcher) stack.Cluster { - v, ok := findMatch(m, p.clusters.Get, p.Clusters) + getter := func(id stack.ComponentID) (stack.Cluster, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.Cluster), true + } + v, ok := findMatch(m, getter, p.Clusters) p.require().True(ok, "must find cluster %s", m) return v } func (p *presetSystem) AddCluster(v stack.Cluster) { - p.require().True(p.clusters.SetIfMissing(v.ID(), v), "cluster %s must not already exist", v.ID()) - // Also register in unified registry - p.registry.Register(stack.ConvertClusterID(v.ID()).ComponentID, v) + id := v.ID() + _, exists := p.registry.Get(id) + p.require().False(exists, "cluster %s must not already exist", id) + p.registry.Register(id, v) +} + +// networkExistsByChainID checks if any network (L1 or L2) exists with the given chain ID +func (p *presetSystem) networkExistsByChainID(chainID eth.ChainID) bool { + l1ID := stack.NewL1NetworkID(chainID) + if _, ok := p.registry.Get(l1ID); ok { + return true + } + l2ID := stack.NewL2NetworkID(chainID) + if _, ok := p.registry.Get(l2ID); ok { + return true + } + return false } func (p *presetSystem) Network(id eth.ChainID) stack.Network { - if l1Net, ok := p.l1Networks.Get(stack.L1NetworkID(id)); ok { - return l1Net + l1ID := stack.NewL1NetworkID(id) + if l1Net, ok := p.registry.Get(l1ID); ok { + return l1Net.(stack.L1Network) } - if l2Net, ok := p.l2Networks.Get(stack.L2NetworkID(id)); ok { - return l2Net + l2ID := stack.NewL2NetworkID(id) + if l2Net, ok := p.registry.Get(l2ID); ok { + return l2Net.(stack.L2Network) } p.t.FailNow() return nil } func (p *presetSystem) L1Network(m stack.L1NetworkMatcher) stack.L1Network { - v, ok := findMatch(m, p.l1Networks.Get, p.L1Networks) + getter := func(id stack.ComponentID) (stack.L1Network, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.L1Network), true + } + v, ok := findMatch(m, getter, p.L1Networks) p.require().True(ok, "must find l1 network %s", m) return v } func (p *presetSystem) AddL1Network(v stack.L1Network) { id := v.ID() - p.require().True(p.networks.SetIfMissing(id.ChainID(), v), "chain with id %s must not already exist", id.ChainID()) - p.require().True(p.l1Networks.SetIfMissing(id, v), "L1 chain %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertL1NetworkID(id).ComponentID, v) + p.require().False(p.networkExistsByChainID(id.ChainID()), "chain with id %s must not already exist", id.ChainID()) + _, exists := p.registry.Get(id) + p.require().False(exists, "L1 chain %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetSystem) L2Network(m stack.L2NetworkMatcher) stack.L2Network { - v, ok := findMatch(m, p.l2Networks.Get, p.L2Networks) + getter := func(id stack.ComponentID) (stack.L2Network, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.L2Network), true + } + v, ok := findMatch(m, getter, p.L2Networks) p.require().True(ok, "must find l2 network %s", m) return v } func (p *presetSystem) AddL2Network(v stack.L2Network) { id := v.ID() - p.require().True(p.networks.SetIfMissing(id.ChainID(), v), "chain with id %s must not already exist", id.ChainID()) - p.require().True(p.l2Networks.SetIfMissing(id, v), "L2 chain %s must not already exist", id) - // Also register in unified registry - p.registry.Register(stack.ConvertL2NetworkID(id).ComponentID, v) + p.require().False(p.networkExistsByChainID(id.ChainID()), "chain with id %s must not already exist", id.ChainID()) + _, exists := p.registry.Get(id) + p.require().False(exists, "L2 chain %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetSystem) Supervisor(m stack.SupervisorMatcher) stack.Supervisor { - v, ok := findMatch(m, p.supervisors.Get, p.Supervisors) + getter := func(id stack.ComponentID) (stack.Supervisor, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.Supervisor), true + } + v, ok := findMatch(m, getter, p.Supervisors) p.require().True(ok, "must find supervisor %s", m) return v } func (p *presetSystem) AddSupervisor(v stack.Supervisor) { - p.require().True(p.supervisors.SetIfMissing(v.ID(), v), "supervisor %s must not already exist", v.ID()) - // Also register in unified registry - p.registry.Register(stack.ConvertSupervisorID(v.ID()).ComponentID, v) + id := v.ID() + _, exists := p.registry.Get(id) + p.require().False(exists, "supervisor %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetSystem) Supernode(m stack.SupernodeMatcher) stack.Supernode { @@ -159,61 +197,105 @@ func (p *presetSystem) AddSupernode(v stack.Supernode) { } func (p *presetSystem) TestSequencer(m stack.TestSequencerMatcher) stack.TestSequencer { - v, ok := findMatch(m, p.sequencers.Get, p.TestSequencers) + getter := func(id stack.ComponentID) (stack.TestSequencer, bool) { + v, ok := p.registry.Get(id) + if !ok { + return nil, false + } + return v.(stack.TestSequencer), true + } + v, ok := findMatch(m, getter, p.TestSequencers) p.require().True(ok, "must find sequencer %s", m) return v } func (p *presetSystem) AddTestSequencer(v stack.TestSequencer) { - p.require().True(p.sequencers.SetIfMissing(v.ID(), v), "sequencer %s must not already exist", v.ID()) - // Also register in unified registry - p.registry.Register(stack.ConvertTestSequencerID(v.ID()).ComponentID, v) + id := v.ID() + _, exists := p.registry.Get(id) + p.require().False(exists, "sequencer %s must not already exist", id) + p.registry.Register(id, v) } func (p *presetSystem) AddSyncTester(v stack.SyncTester) { - p.require().True(p.syncTesters.SetIfMissing(v.ID(), v), "sync tester %s must not already exist", v.ID()) - // Also register in unified registry - p.registry.Register(stack.ConvertSyncTesterID(v.ID()).ComponentID, v) + id := v.ID() + _, exists := p.registry.Get(id) + p.require().False(exists, "sync tester %s must not already exist", id) + p.registry.Register(id, v) } -func (p *presetSystem) SuperchainIDs() []stack.SuperchainID { - return stack.SortSuperchainIDs(p.superchains.Keys()) +func (p *presetSystem) SuperchainIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindSuperchain)) } func (p *presetSystem) Superchains() []stack.Superchain { - return stack.SortSuperchains(p.superchains.Values()) + ids := p.registry.IDsByKind(stack.KindSuperchain) + result := make([]stack.Superchain, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.Superchain)) + } + } + return sortByIDFunc(result) } -func (p *presetSystem) ClusterIDs() []stack.ClusterID { - return stack.SortClusterIDs(p.clusters.Keys()) +func (p *presetSystem) ClusterIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindCluster)) } func (p *presetSystem) Clusters() []stack.Cluster { - return stack.SortClusters(p.clusters.Values()) + ids := p.registry.IDsByKind(stack.KindCluster) + result := make([]stack.Cluster, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.Cluster)) + } + } + return sortByIDFunc(result) } -func (p *presetSystem) L1NetworkIDs() []stack.L1NetworkID { - return stack.SortL1NetworkIDs(p.l1Networks.Keys()) +func (p *presetSystem) L1NetworkIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindL1Network)) } func (p *presetSystem) L1Networks() []stack.L1Network { - return stack.SortL1Networks(p.l1Networks.Values()) + ids := p.registry.IDsByKind(stack.KindL1Network) + result := make([]stack.L1Network, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.L1Network)) + } + } + return sortByIDFunc(result) } -func (p *presetSystem) L2NetworkIDs() []stack.L2NetworkID { - return stack.SortL2NetworkIDs(p.l2Networks.Keys()) +func (p *presetSystem) L2NetworkIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindL2Network)) } func (p *presetSystem) L2Networks() []stack.L2Network { - return stack.SortL2Networks(p.l2Networks.Values()) + ids := p.registry.IDsByKind(stack.KindL2Network) + result := make([]stack.L2Network, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.L2Network)) + } + } + return sortByIDFunc(result) } -func (p *presetSystem) SupervisorIDs() []stack.SupervisorID { - return stack.SortSupervisorIDs(p.supervisors.Keys()) +func (p *presetSystem) SupervisorIDs() []stack.ComponentID { + return sortByID(p.registry.IDsByKind(stack.KindSupervisor)) } func (p *presetSystem) Supervisors() []stack.Supervisor { - return stack.SortSupervisors(p.supervisors.Values()) + ids := p.registry.IDsByKind(stack.KindSupervisor) + result := make([]stack.Supervisor, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.Supervisor)) + } + } + return sortByIDFunc(result) } func (p *presetSystem) Supernodes() []stack.Supernode { @@ -221,7 +303,14 @@ func (p *presetSystem) Supernodes() []stack.Supernode { } func (p *presetSystem) TestSequencers() []stack.TestSequencer { - return stack.SortTestSequencers(p.sequencers.Values()) + ids := p.registry.IDsByKind(stack.KindTestSequencer) + result := make([]stack.TestSequencer, 0, len(ids)) + for _, id := range ids { + if v, ok := p.registry.Get(id); ok { + result = append(result, v.(stack.TestSequencer)) + } + } + return sortByIDFunc(result) } func (p *presetSystem) SetTimeTravelClock(cl stack.TimeTravelClock) { diff --git a/op-devstack/shim/test_sequencer.go b/op-devstack/shim/test_sequencer.go index 6f045efccc78e..5edb519c6f864 100644 --- a/op-devstack/shim/test_sequencer.go +++ b/op-devstack/shim/test_sequencer.go @@ -10,14 +10,14 @@ import ( type TestSequencerConfig struct { CommonConfig - ID stack.TestSequencerID + ID stack.ComponentID Client client.RPC ControlClients map[eth.ChainID]client.RPC } type rpcTestSequencer struct { commonImpl - id stack.TestSequencerID + id stack.ComponentID client client.RPC api apis.TestSequencerAPI @@ -42,7 +42,7 @@ func NewTestSequencer(cfg TestSequencerConfig) stack.TestSequencer { return s } -func (r *rpcTestSequencer) ID() stack.TestSequencerID { +func (r *rpcTestSequencer) ID() stack.ComponentID { return r.id } diff --git a/op-devstack/stack/capabilities.go b/op-devstack/stack/capabilities.go deleted file mode 100644 index 7075fa0eabfc1..0000000000000 --- a/op-devstack/stack/capabilities.go +++ /dev/null @@ -1,134 +0,0 @@ -package stack - -import ( - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// Capability interfaces define shared behaviors across component types. -// These enable polymorphic operations without requiring components to -// implement interfaces with incompatible ID() method signatures. -// -// For example, RollupBoostNode and OPRBuilderNode both provide L2 EL -// functionality but can't implement L2ELNode because their ID() methods -// return different types. The L2ELCapable interface captures the shared -// L2 EL behavior, allowing code to work with any L2 EL-like component. - -// L2ELCapable is implemented by any component that provides L2 execution layer functionality. -// This includes L2ELNode, RollupBoostNode, and OPRBuilderNode. -// -// Components implementing this interface can: -// - Execute L2 transactions -// - Provide engine API access for consensus layer integration -type L2ELCapable interface { - L2EthClient() apis.L2EthClient - L2EngineClient() apis.EngineClient - ELNode -} - -// L2ELCapableKinds returns all ComponentKinds that implement L2ELCapable. -func L2ELCapableKinds() []ComponentKind { - return []ComponentKind{ - KindL2ELNode, - KindRollupBoostNode, - KindOPRBuilderNode, - } -} - -// L1ELCapable is implemented by any component that provides L1 execution layer functionality. -type L1ELCapable interface { - ELNode -} - -// L1ELCapableKinds returns all ComponentKinds that implement L1ELCapable. -func L1ELCapableKinds() []ComponentKind { - return []ComponentKind{ - KindL1ELNode, - } -} - -// Verify that expected types implement capability interfaces. -// These are compile-time checks. -var ( - _ L2ELCapable = (L2ELNode)(nil) - _ L2ELCapable = (RollupBoostNode)(nil) - _ L2ELCapable = (OPRBuilderNode)(nil) -) - -// Registry helper functions for capability-based lookups. - -// RegistryFindByCapability returns all components that implement the given capability interface. -// This iterates over all components and performs a type assertion. -func RegistryFindByCapability[T any](r *Registry) []T { - var result []T - r.Range(func(id ComponentID, component any) bool { - if capable, ok := component.(T); ok { - result = append(result, capable) - } - return true - }) - return result -} - -// RegistryFindByCapabilityOnChain returns all components on a specific chain -// that implement the given capability interface. -func RegistryFindByCapabilityOnChain[T any](r *Registry, chainID eth.ChainID) []T { - var result []T - r.RangeByChainID(chainID, func(id ComponentID, component any) bool { - if capable, ok := component.(T); ok { - result = append(result, capable) - } - return true - }) - return result -} - -// RegistryFindByKinds returns all components of the specified kinds. -// This is useful when you know which kinds implement a capability. -func RegistryFindByKinds(r *Registry, kinds []ComponentKind) []any { - var result []any - for _, kind := range kinds { - result = append(result, r.GetByKind(kind)...) - } - return result -} - -// RegistryFindByKindsTyped returns all components of the specified kinds, -// cast to the expected type. Components that don't match are skipped. -func RegistryFindByKindsTyped[T any](r *Registry, kinds []ComponentKind) []T { - var result []T - for _, kind := range kinds { - for _, component := range r.GetByKind(kind) { - if typed, ok := component.(T); ok { - result = append(result, typed) - } - } - } - return result -} - -// FindL2ELCapable returns all L2 EL-capable components in the registry. -// This is a convenience function that finds L2ELNode, RollupBoostNode, and OPRBuilderNode. -func FindL2ELCapable(r *Registry) []L2ELCapable { - return RegistryFindByKindsTyped[L2ELCapable](r, L2ELCapableKinds()) -} - -// FindL2ELCapableOnChain returns all L2 EL-capable components on a specific chain. -func FindL2ELCapableOnChain(r *Registry, chainID eth.ChainID) []L2ELCapable { - return RegistryFindByCapabilityOnChain[L2ELCapable](r, chainID) -} - -// FindL2ELCapableByKey returns the first L2 EL-capable component with the given key and chainID. -// This enables the polymorphic lookup pattern where you want to find a node by key -// regardless of whether it's an L2ELNode, RollupBoostNode, or OPRBuilderNode. -func FindL2ELCapableByKey(r *Registry, key string, chainID eth.ChainID) (L2ELCapable, bool) { - for _, kind := range L2ELCapableKinds() { - id := NewComponentID(kind, key, chainID) - if component, ok := r.Get(id); ok { - if capable, ok := component.(L2ELCapable); ok { - return capable, true - } - } - } - return nil, false -} diff --git a/op-devstack/stack/capabilities_test.go b/op-devstack/stack/capabilities_test.go deleted file mode 100644 index b69758a0b9a99..0000000000000 --- a/op-devstack/stack/capabilities_test.go +++ /dev/null @@ -1,312 +0,0 @@ -package stack - -import ( - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" -) - -// Mock implementations for testing capabilities - -type mockELNode struct { - chainID eth.ChainID -} - -func (m *mockELNode) T() devtest.T { return nil } -func (m *mockELNode) Logger() log.Logger { return nil } -func (m *mockELNode) Label(key string) string { return "" } -func (m *mockELNode) SetLabel(key, value string) {} -func (m *mockELNode) ChainID() eth.ChainID { return m.chainID } -func (m *mockELNode) EthClient() apis.EthClient { return nil } -func (m *mockELNode) TransactionTimeout() time.Duration { return 0 } - -type mockL2ELNode struct { - mockELNode - id L2ELNodeID -} - -func (m *mockL2ELNode) ID() L2ELNodeID { return m.id } -func (m *mockL2ELNode) L2EthClient() apis.L2EthClient { return nil } -func (m *mockL2ELNode) L2EngineClient() apis.EngineClient { return nil } -func (m *mockL2ELNode) RegistryID() ComponentID { return ConvertL2ELNodeID(m.id).ComponentID } - -var _ L2ELNode = (*mockL2ELNode)(nil) -var _ L2ELCapable = (*mockL2ELNode)(nil) -var _ Registrable = (*mockL2ELNode)(nil) - -type mockRollupBoostNode struct { - mockELNode - id RollupBoostNodeID -} - -func (m *mockRollupBoostNode) ID() RollupBoostNodeID { return m.id } -func (m *mockRollupBoostNode) L2EthClient() apis.L2EthClient { return nil } -func (m *mockRollupBoostNode) L2EngineClient() apis.EngineClient { return nil } -func (m *mockRollupBoostNode) FlashblocksClient() *client.WSClient { return nil } -func (m *mockRollupBoostNode) RegistryID() ComponentID { - return ConvertRollupBoostNodeID(m.id).ComponentID -} - -var _ RollupBoostNode = (*mockRollupBoostNode)(nil) -var _ L2ELCapable = (*mockRollupBoostNode)(nil) -var _ Registrable = (*mockRollupBoostNode)(nil) - -type mockOPRBuilderNode struct { - mockELNode - id OPRBuilderNodeID -} - -func (m *mockOPRBuilderNode) ID() OPRBuilderNodeID { return m.id } -func (m *mockOPRBuilderNode) L2EthClient() apis.L2EthClient { return nil } -func (m *mockOPRBuilderNode) L2EngineClient() apis.EngineClient { return nil } -func (m *mockOPRBuilderNode) FlashblocksClient() *client.WSClient { return nil } -func (m *mockOPRBuilderNode) RegistryID() ComponentID { - return ConvertOPRBuilderNodeID(m.id).ComponentID -} - -var _ OPRBuilderNode = (*mockOPRBuilderNode)(nil) -var _ L2ELCapable = (*mockOPRBuilderNode)(nil) -var _ Registrable = (*mockOPRBuilderNode)(nil) - -func TestL2ELCapableKinds(t *testing.T) { - kinds := L2ELCapableKinds() - require.Len(t, kinds, 3) - require.Contains(t, kinds, KindL2ELNode) - require.Contains(t, kinds, KindRollupBoostNode) - require.Contains(t, kinds, KindOPRBuilderNode) -} - -func TestRegistryFindByCapability(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - // Register different L2 EL-capable nodes - l2el := &mockL2ELNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewL2ELNodeID("sequencer", chainID), - } - rollupBoost := &mockRollupBoostNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewRollupBoostNodeID("boost", chainID), - } - oprBuilder := &mockOPRBuilderNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewOPRBuilderNodeID("builder", chainID), - } - - r.RegisterComponent(l2el) - r.RegisterComponent(rollupBoost) - r.RegisterComponent(oprBuilder) - - // Also register a non-L2EL component - r.Register(NewComponentID(KindL2Batcher, "batcher", chainID), "not-l2el-capable") - - // Find all L2ELCapable - capable := RegistryFindByCapability[L2ELCapable](r) - require.Len(t, capable, 3) -} - -func TestRegistryFindByCapabilityOnChain(t *testing.T) { - r := NewRegistry() - - chainID1 := eth.ChainIDFromUInt64(420) - chainID2 := eth.ChainIDFromUInt64(421) - - // Nodes on chain 420 - l2el1 := &mockL2ELNode{ - mockELNode: mockELNode{chainID: chainID1}, - id: NewL2ELNodeID("sequencer", chainID1), - } - rollupBoost1 := &mockRollupBoostNode{ - mockELNode: mockELNode{chainID: chainID1}, - id: NewRollupBoostNodeID("boost", chainID1), - } - - // Node on chain 421 - l2el2 := &mockL2ELNode{ - mockELNode: mockELNode{chainID: chainID2}, - id: NewL2ELNodeID("sequencer", chainID2), - } - - r.RegisterComponent(l2el1) - r.RegisterComponent(rollupBoost1) - r.RegisterComponent(l2el2) - - // Find on chain 420 - chain420 := RegistryFindByCapabilityOnChain[L2ELCapable](r, chainID1) - require.Len(t, chain420, 2) - - // Find on chain 421 - chain421 := RegistryFindByCapabilityOnChain[L2ELCapable](r, chainID2) - require.Len(t, chain421, 1) -} - -func TestFindL2ELCapable(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - l2el := &mockL2ELNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewL2ELNodeID("sequencer", chainID), - } - rollupBoost := &mockRollupBoostNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewRollupBoostNodeID("boost", chainID), - } - - r.RegisterComponent(l2el) - r.RegisterComponent(rollupBoost) - - capable := FindL2ELCapable(r) - require.Len(t, capable, 2) -} - -func TestFindL2ELCapableOnChain(t *testing.T) { - r := NewRegistry() - - chainID1 := eth.ChainIDFromUInt64(420) - chainID2 := eth.ChainIDFromUInt64(421) - - l2el1 := &mockL2ELNode{ - mockELNode: mockELNode{chainID: chainID1}, - id: NewL2ELNodeID("sequencer", chainID1), - } - l2el2 := &mockL2ELNode{ - mockELNode: mockELNode{chainID: chainID2}, - id: NewL2ELNodeID("sequencer", chainID2), - } - - r.RegisterComponent(l2el1) - r.RegisterComponent(l2el2) - - chain420 := FindL2ELCapableOnChain(r, chainID1) - require.Len(t, chain420, 1) - require.Equal(t, chainID1, chain420[0].ChainID()) -} - -func TestFindL2ELCapableByKey(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - // Register a RollupBoostNode with key "sequencer" - rollupBoost := &mockRollupBoostNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewRollupBoostNodeID("sequencer", chainID), - } - r.RegisterComponent(rollupBoost) - - // Should find it by key, even though it's not an L2ELNode - found, ok := FindL2ELCapableByKey(r, "sequencer", chainID) - require.True(t, ok) - require.NotNil(t, found) - require.Equal(t, chainID, found.ChainID()) - - // Should not find non-existent key - _, ok = FindL2ELCapableByKey(r, "nonexistent", chainID) - require.False(t, ok) -} - -func TestFindL2ELCapableByKey_PrefersL2ELNode(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - // Register both L2ELNode and RollupBoostNode with same key - l2el := &mockL2ELNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewL2ELNodeID("sequencer", chainID), - } - rollupBoost := &mockRollupBoostNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewRollupBoostNodeID("sequencer", chainID), - } - - r.RegisterComponent(l2el) - r.RegisterComponent(rollupBoost) - - // Should find L2ELNode first (it's first in L2ELCapableKinds) - found, ok := FindL2ELCapableByKey(r, "sequencer", chainID) - require.True(t, ok) - // Verify it's the L2ELNode by checking it's the right mock type - _, isL2EL := found.(*mockL2ELNode) - require.True(t, isL2EL, "expected to find L2ELNode first") -} - -func TestRegistryFindByKindsTyped(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - l2el := &mockL2ELNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewL2ELNodeID("sequencer", chainID), - } - rollupBoost := &mockRollupBoostNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewRollupBoostNodeID("boost", chainID), - } - - r.RegisterComponent(l2el) - r.RegisterComponent(rollupBoost) - - // Find only L2ELNode kind - l2els := RegistryFindByKindsTyped[L2ELCapable](r, []ComponentKind{KindL2ELNode}) - require.Len(t, l2els, 1) - - // Find both kinds - both := RegistryFindByKindsTyped[L2ELCapable](r, []ComponentKind{KindL2ELNode, KindRollupBoostNode}) - require.Len(t, both, 2) -} - -// TestPolymorphicLookupScenario demonstrates the polymorphic lookup use case -// that Phase 3 is designed to solve. -func TestPolymorphicLookupScenario(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - // Scenario: A test wants to find an L2 EL node by key "sequencer" - // The actual node could be L2ELNode, RollupBoostNode, or OPRBuilderNode - // depending on the test configuration. - - // Configuration 1: Using RollupBoost - rollupBoost := &mockRollupBoostNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewRollupBoostNodeID("sequencer", chainID), - } - r.RegisterComponent(rollupBoost) - - // The polymorphic lookup finds the sequencer regardless of its concrete type - sequencer, ok := FindL2ELCapableByKey(r, "sequencer", chainID) - require.True(t, ok) - require.NotNil(t, sequencer) - - // Can use it as L2ELCapable - require.Equal(t, chainID, sequencer.ChainID()) - // Could call sequencer.L2EthClient(), sequencer.L2EngineClient(), etc. - - // Clear and try with OPRBuilder - r.Clear() - - oprBuilder := &mockOPRBuilderNode{ - mockELNode: mockELNode{chainID: chainID}, - id: NewOPRBuilderNodeID("sequencer", chainID), - } - r.RegisterComponent(oprBuilder) - - // Same lookup code works - sequencer, ok = FindL2ELCapableByKey(r, "sequencer", chainID) - require.True(t, ok) - require.NotNil(t, sequencer) - require.Equal(t, chainID, sequencer.ChainID()) -} diff --git a/op-devstack/stack/cluster.go b/op-devstack/stack/cluster.go index 09ae8ce557a5e..d1af1e22248cc 100644 --- a/op-devstack/stack/cluster.go +++ b/op-devstack/stack/cluster.go @@ -1,57 +1,14 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" ) -// ClusterID identifies a Cluster by name, is type-safe, and can be value-copied and used as map key. -type ClusterID genericID - -var _ GenericID = (*ClusterID)(nil) - -const ClusterKind Kind = "Cluster" - -func (id ClusterID) String() string { - return genericID(id).string(ClusterKind) -} - -func (id ClusterID) Kind() Kind { - return ClusterKind -} - -func (id ClusterID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id ClusterID) MarshalText() ([]byte, error) { - return genericID(id).marshalText(ClusterKind) -} - -func (id *ClusterID) UnmarshalText(data []byte) error { - return (*genericID)(id).unmarshalText(ClusterKind, data) -} - -func SortClusterIDs(ids []ClusterID) []ClusterID { - return copyAndSortCmp(ids) -} - -func SortClusters(elems []Cluster) []Cluster { - return copyAndSort(elems, lessElemOrdered[ClusterID, Cluster]) -} - -var _ ClusterMatcher = ClusterID("") - -func (id ClusterID) Match(elems []Cluster) []Cluster { - return findByID(id, elems) -} - // Cluster represents a set of chains that interop with each other. // This may include L1 chains (although potentially not two-way interop due to consensus-layer limitations). type Cluster interface { Common - ID() ClusterID + ID() ComponentID DependencySet() depset.DependencySet } diff --git a/op-devstack/stack/component_id.go b/op-devstack/stack/component_id.go index fdce22a6b1724..ce87817a391ba 100644 --- a/op-devstack/stack/component_id.go +++ b/op-devstack/stack/component_id.go @@ -2,6 +2,7 @@ package stack import ( "bytes" + "errors" "fmt" "log/slog" @@ -13,6 +14,44 @@ import ( // This is used in serialization to make each ID unique and type-safe. type ComponentKind string +var _ slog.LogValuer = (*ComponentKind)(nil) + +// ChainIDProvider presents a type that provides a relevant ChainID. +type ChainIDProvider interface { + ChainID() eth.ChainID +} + +// KindProvider presents a type that provides a relevant ComponentKind. E.g. KindL2Batcher. +type KindProvider interface { + Kind() ComponentKind +} + +// Keyed presents a type that provides a relevant string key. E.g. a named superchain. +type Keyed interface { + Key() string +} + +const maxIDLength = 100 + +var errInvalidID = errors.New("invalid ID") + +func (k ComponentKind) LogValue() slog.Value { + return slog.StringValue(string(k)) +} + +func (k ComponentKind) String() string { + return string(k) +} + +func (k ComponentKind) MarshalText() ([]byte, error) { + return []byte(k), nil +} + +func (k *ComponentKind) UnmarshalText(data []byte) error { + *k = ComponentKind(data) + return nil +} + // All component kinds. These values are used in serialization and must remain stable. const ( KindL1ELNode ComponentKind = "L1ELNode" @@ -234,304 +273,110 @@ func (id ComponentID) Less(other ComponentID) bool { return id.chainID.Cmp(other.chainID) < 0 } -// KindMarker is implemented by marker types to associate them with their ComponentKind. -// This enables type-safe unmarshaling of ID[T] types. -type KindMarker interface { - componentKind() ComponentKind -} - -// ID is a type-safe wrapper around ComponentID. -// The type parameter T must implement KindMarker to enable unmarshaling. -// This prevents accidentally mixing up different ID types (e.g., L2BatcherID vs L2ELNodeID). -type ID[T KindMarker] struct { - ComponentID -} - -// Marker types for each component kind. -// Each marker implements KindMarker to associate it with the correct ComponentKind. -type ( - L1ELNodeMarker struct{} - L1CLNodeMarker struct{} - L1NetworkMarker struct{} - L2ELNodeMarker struct{} - L2CLNodeMarker struct{} - L2NetworkMarker struct{} - L2BatcherMarker struct{} - L2ProposerMarker struct{} - L2ChallengerMarker struct{} - RollupBoostNodeMarker struct{} - OPRBuilderNodeMarker struct{} - FaucetMarker struct{} - SyncTesterMarker struct{} - SupervisorMarker struct{} - ConductorMarker struct{} - ClusterMarker struct{} - SuperchainMarker struct{} - TestSequencerMarker struct{} - FlashblocksClientMarker struct{} -) - -// KindMarker implementations for all marker types. -func (L1ELNodeMarker) componentKind() ComponentKind { return KindL1ELNode } -func (L1CLNodeMarker) componentKind() ComponentKind { return KindL1CLNode } -func (L1NetworkMarker) componentKind() ComponentKind { return KindL1Network } -func (L2ELNodeMarker) componentKind() ComponentKind { return KindL2ELNode } -func (L2CLNodeMarker) componentKind() ComponentKind { return KindL2CLNode } -func (L2NetworkMarker) componentKind() ComponentKind { return KindL2Network } -func (L2BatcherMarker) componentKind() ComponentKind { return KindL2Batcher } -func (L2ProposerMarker) componentKind() ComponentKind { return KindL2Proposer } -func (L2ChallengerMarker) componentKind() ComponentKind { return KindL2Challenger } -func (RollupBoostNodeMarker) componentKind() ComponentKind { return KindRollupBoostNode } -func (OPRBuilderNodeMarker) componentKind() ComponentKind { return KindOPRBuilderNode } -func (FaucetMarker) componentKind() ComponentKind { return KindFaucet } -func (SyncTesterMarker) componentKind() ComponentKind { return KindSyncTester } -func (SupervisorMarker) componentKind() ComponentKind { return KindSupervisor } -func (ConductorMarker) componentKind() ComponentKind { return KindConductor } -func (ClusterMarker) componentKind() ComponentKind { return KindCluster } -func (SuperchainMarker) componentKind() ComponentKind { return KindSuperchain } -func (TestSequencerMarker) componentKind() ComponentKind { return KindTestSequencer } -func (FlashblocksClientMarker) componentKind() ComponentKind { return KindFlashblocksClient } - -// Type-safe ID type aliases using marker types. -// These maintain backward compatibility with existing code. -type ( - L1ELNodeID2 = ID[L1ELNodeMarker] - L1CLNodeID2 = ID[L1CLNodeMarker] - L1NetworkID2 = ID[L1NetworkMarker] - L2ELNodeID2 = ID[L2ELNodeMarker] - L2CLNodeID2 = ID[L2CLNodeMarker] - L2NetworkID2 = ID[L2NetworkMarker] - L2BatcherID2 = ID[L2BatcherMarker] - L2ProposerID2 = ID[L2ProposerMarker] - L2ChallengerID2 = ID[L2ChallengerMarker] - RollupBoostNodeID2 = ID[RollupBoostNodeMarker] - OPRBuilderNodeID2 = ID[OPRBuilderNodeMarker] - FaucetID2 = ID[FaucetMarker] - SyncTesterID2 = ID[SyncTesterMarker] - SupervisorID2 = ID[SupervisorMarker] - ConductorID2 = ID[ConductorMarker] - ClusterID2 = ID[ClusterMarker] - SuperchainID2 = ID[SuperchainMarker] - TestSequencerID2 = ID[TestSequencerMarker] - FlashblocksClientID2 = ID[FlashblocksClientMarker] -) - -// Type-safe constructors for each ID type. - -func NewL1ELNodeID2(key string, chainID eth.ChainID) L1ELNodeID2 { - return L1ELNodeID2{NewComponentID(KindL1ELNode, key, chainID)} -} - -func NewL1CLNodeID2(key string, chainID eth.ChainID) L1CLNodeID2 { - return L1CLNodeID2{NewComponentID(KindL1CLNode, key, chainID)} -} - -func NewL1NetworkID2(chainID eth.ChainID) L1NetworkID2 { - return L1NetworkID2{NewComponentIDChainOnly(KindL1Network, chainID)} +// idMatcher wraps ComponentID to implement Matcher[E] for any component type. +type idMatcher[E Identifiable] struct { + id ComponentID } -func NewL2ELNodeID2(key string, chainID eth.ChainID) L2ELNodeID2 { - return L2ELNodeID2{NewComponentID(KindL2ELNode, key, chainID)} -} - -func NewL2CLNodeID2(key string, chainID eth.ChainID) L2CLNodeID2 { - return L2CLNodeID2{NewComponentID(KindL2CLNode, key, chainID)} -} - -func NewL2NetworkID2(chainID eth.ChainID) L2NetworkID2 { - return L2NetworkID2{NewComponentIDChainOnly(KindL2Network, chainID)} -} - -func NewL2BatcherID2(key string, chainID eth.ChainID) L2BatcherID2 { - return L2BatcherID2{NewComponentID(KindL2Batcher, key, chainID)} -} - -func NewL2ProposerID2(key string, chainID eth.ChainID) L2ProposerID2 { - return L2ProposerID2{NewComponentID(KindL2Proposer, key, chainID)} -} - -func NewL2ChallengerID2(key string, chainID eth.ChainID) L2ChallengerID2 { - return L2ChallengerID2{NewComponentID(KindL2Challenger, key, chainID)} -} - -func NewRollupBoostNodeID2(key string, chainID eth.ChainID) RollupBoostNodeID2 { - return RollupBoostNodeID2{NewComponentID(KindRollupBoostNode, key, chainID)} -} - -func NewOPRBuilderNodeID2(key string, chainID eth.ChainID) OPRBuilderNodeID2 { - return OPRBuilderNodeID2{NewComponentID(KindOPRBuilderNode, key, chainID)} -} - -func NewFaucetID2(key string, chainID eth.ChainID) FaucetID2 { - return FaucetID2{NewComponentID(KindFaucet, key, chainID)} -} - -func NewSyncTesterID2(key string, chainID eth.ChainID) SyncTesterID2 { - return SyncTesterID2{NewComponentID(KindSyncTester, key, chainID)} -} - -func NewSupervisorID2(key string) SupervisorID2 { - return SupervisorID2{NewComponentIDKeyOnly(KindSupervisor, key)} -} - -func NewConductorID2(key string) ConductorID2 { - return ConductorID2{NewComponentIDKeyOnly(KindConductor, key)} -} - -func NewClusterID2(key string) ClusterID2 { - return ClusterID2{NewComponentIDKeyOnly(KindCluster, key)} -} - -func NewSuperchainID2(key string) SuperchainID2 { - return SuperchainID2{NewComponentIDKeyOnly(KindSuperchain, key)} -} - -func NewTestSequencerID2(key string) TestSequencerID2 { - return TestSequencerID2{NewComponentIDKeyOnly(KindTestSequencer, key)} -} - -func NewFlashblocksClientID2(key string, chainID eth.ChainID) FlashblocksClientID2 { - return FlashblocksClientID2{NewComponentID(KindFlashblocksClient, key, chainID)} -} - -// ID methods that delegate to ComponentID but preserve type safety. - -// Kind returns the ComponentKind for this ID type. -// Unlike ComponentID.Kind(), this works even on zero values. -func (id ID[T]) Kind() ComponentKind { - var marker T - return marker.componentKind() -} - -func (id ID[T]) String() string { - return id.ComponentID.String() -} - -func (id ID[T]) LogValue() slog.Value { - return id.ComponentID.LogValue() -} - -func (id ID[T]) MarshalText() ([]byte, error) { - return id.ComponentID.MarshalText() -} - -func (id *ID[T]) UnmarshalText(data []byte) error { - var marker T - return id.ComponentID.unmarshalTextWithKind(marker.componentKind(), data) +func (m idMatcher[E]) Match(elems []E) []E { + for i, elem := range elems { + if elem.ID() == m.id { + return elems[i : i+1] + } + } + return nil } -// Less compares two IDs of the same type for sorting. -func (id ID[T]) Less(other ID[T]) bool { - return id.ComponentID.Less(other.ComponentID) +func (m idMatcher[E]) String() string { + return m.id.String() } -// SortIDs sorts a slice of IDs of any type. -func SortIDs[T KindMarker](ids []ID[T]) []ID[T] { - return copyAndSort(ids, func(a, b ID[T]) bool { - return a.Less(b) - }) +// ID returns the ComponentID this matcher wraps. +// This is used by shim.findMatch for direct registry lookup. +func (m idMatcher[E]) ID() ComponentID { + return m.id } -// AsComponentID returns the underlying ComponentID. -// This is useful when you need to work with IDs in a type-erased context. -func (id ID[T]) AsComponentID() ComponentID { - return id.ComponentID +// ByID creates a matcher for a specific ComponentID. +// This allows using a ComponentID as a matcher for any component type. +func ByID[E Identifiable](id ComponentID) Matcher[E] { + return idMatcher[E]{id: id} } -// Conversion helpers between old and new ID systems. -// These enable incremental migration from the old ID types to the new unified system. +// Convenience constructors for each component kind. -// ConvertL2BatcherID converts an old L2BatcherID to the new system. -func ConvertL2BatcherID(old L2BatcherID) L2BatcherID2 { - return NewL2BatcherID2(old.Key(), old.ChainID()) +func NewL1ELNodeID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindL1ELNode, key, chainID) } -// ConvertL2ELNodeID converts an old L2ELNodeID to the new system. -func ConvertL2ELNodeID(old L2ELNodeID) L2ELNodeID2 { - return NewL2ELNodeID2(old.Key(), old.ChainID()) +func NewL1CLNodeID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindL1CLNode, key, chainID) } -// ConvertL2CLNodeID converts an old L2CLNodeID to the new system. -func ConvertL2CLNodeID(old L2CLNodeID) L2CLNodeID2 { - return NewL2CLNodeID2(old.Key(), old.ChainID()) +func NewL1NetworkID(chainID eth.ChainID) ComponentID { + return NewComponentIDChainOnly(KindL1Network, chainID) } -// ConvertL1ELNodeID converts an old L1ELNodeID to the new system. -func ConvertL1ELNodeID(old L1ELNodeID) L1ELNodeID2 { - return NewL1ELNodeID2(old.Key(), old.ChainID()) +func NewL2ELNodeID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindL2ELNode, key, chainID) } -// ConvertL1CLNodeID converts an old L1CLNodeID to the new system. -func ConvertL1CLNodeID(old L1CLNodeID) L1CLNodeID2 { - return NewL1CLNodeID2(old.Key(), old.ChainID()) +func NewL2CLNodeID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindL2CLNode, key, chainID) } -// ConvertL1NetworkID converts an old L1NetworkID to the new system. -func ConvertL1NetworkID(old L1NetworkID) L1NetworkID2 { - return NewL1NetworkID2(old.ChainID()) +func NewL2NetworkID(chainID eth.ChainID) ComponentID { + return NewComponentIDChainOnly(KindL2Network, chainID) } -// ConvertL2NetworkID converts an old L2NetworkID to the new system. -func ConvertL2NetworkID(old L2NetworkID) L2NetworkID2 { - return NewL2NetworkID2(old.ChainID()) +func NewL2BatcherID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindL2Batcher, key, chainID) } -// ConvertL2ProposerID converts an old L2ProposerID to the new system. -func ConvertL2ProposerID(old L2ProposerID) L2ProposerID2 { - return NewL2ProposerID2(old.Key(), old.ChainID()) +func NewL2ProposerID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindL2Proposer, key, chainID) } -// ConvertL2ChallengerID converts an old L2ChallengerID to the new system. -func ConvertL2ChallengerID(old L2ChallengerID) L2ChallengerID2 { - return NewL2ChallengerID2(old.Key(), old.ChainID()) +func NewL2ChallengerID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindL2Challenger, key, chainID) } -// ConvertRollupBoostNodeID converts an old RollupBoostNodeID to the new system. -func ConvertRollupBoostNodeID(old RollupBoostNodeID) RollupBoostNodeID2 { - return NewRollupBoostNodeID2(old.Key(), old.ChainID()) +func NewRollupBoostNodeID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindRollupBoostNode, key, chainID) } -// ConvertOPRBuilderNodeID converts an old OPRBuilderNodeID to the new system. -func ConvertOPRBuilderNodeID(old OPRBuilderNodeID) OPRBuilderNodeID2 { - return NewOPRBuilderNodeID2(old.Key(), old.ChainID()) +func NewOPRBuilderNodeID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindOPRBuilderNode, key, chainID) } -// ConvertFaucetID converts an old FaucetID to the new system. -func ConvertFaucetID(old FaucetID) FaucetID2 { - return NewFaucetID2(old.Key(), old.ChainID()) +func NewFaucetID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindFaucet, key, chainID) } -// ConvertSyncTesterID converts an old SyncTesterID to the new system. -func ConvertSyncTesterID(old SyncTesterID) SyncTesterID2 { - return NewSyncTesterID2(old.Key(), old.ChainID()) +func NewSyncTesterID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindSyncTester, key, chainID) } -// ConvertSupervisorID converts an old SupervisorID to the new system. -func ConvertSupervisorID(old SupervisorID) SupervisorID2 { - return NewSupervisorID2(string(old)) +func NewSupervisorID(key string) ComponentID { + return NewComponentIDKeyOnly(KindSupervisor, key) } -// ConvertConductorID converts an old ConductorID to the new system. -func ConvertConductorID(old ConductorID) ConductorID2 { - return NewConductorID2(string(old)) +func NewConductorID(key string) ComponentID { + return NewComponentIDKeyOnly(KindConductor, key) } -// ConvertClusterID converts an old ClusterID to the new system. -func ConvertClusterID(old ClusterID) ClusterID2 { - return NewClusterID2(string(old)) +func NewClusterID(key string) ComponentID { + return NewComponentIDKeyOnly(KindCluster, key) } -// ConvertSuperchainID converts an old SuperchainID to the new system. -func ConvertSuperchainID(old SuperchainID) SuperchainID2 { - return NewSuperchainID2(string(old)) +func NewSuperchainID(key string) ComponentID { + return NewComponentIDKeyOnly(KindSuperchain, key) } -// ConvertTestSequencerID converts an old TestSequencerID to the new system. -func ConvertTestSequencerID(old TestSequencerID) TestSequencerID2 { - return NewTestSequencerID2(string(old)) +func NewTestSequencerID(key string) ComponentID { + return NewComponentIDKeyOnly(KindTestSequencer, key) } -// ConvertFlashblocksClientID converts an old FlashblocksWSClientID to the new system. -func ConvertFlashblocksClientID(old FlashblocksWSClientID) FlashblocksClientID2 { - return NewFlashblocksClientID2(idWithChain(old).key, old.ChainID()) +func NewFlashblocksWSClientID(key string, chainID eth.ChainID) ComponentID { + return NewComponentID(KindFlashblocksClient, key, chainID) } diff --git a/op-devstack/stack/component_id_test.go b/op-devstack/stack/component_id_test.go index f581fcbabd427..97209c9e978c3 100644 --- a/op-devstack/stack/component_id_test.go +++ b/op-devstack/stack/component_id_test.go @@ -1,6 +1,7 @@ package stack import ( + "slices" "testing" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -95,8 +96,8 @@ func TestID_TypeSafety(t *testing.T) { chainID := eth.ChainIDFromUInt64(420) // Create two different ID types with same key and chainID - batcherID := NewL2BatcherID2("mynode", chainID) - elNodeID := NewL2ELNodeID2("mynode", chainID) + batcherID := NewL2BatcherID("mynode", chainID) + elNodeID := NewL2ELNodeID("mynode", chainID) // They should have different kinds require.Equal(t, KindL2Batcher, batcherID.Kind()) @@ -106,21 +107,21 @@ func TestID_TypeSafety(t *testing.T) { require.Equal(t, "L2Batcher-mynode-420", batcherID.String()) require.Equal(t, "L2ELNode-mynode-420", elNodeID.String()) - // The underlying ComponentIDs should be different due to kind - require.NotEqual(t, batcherID.AsComponentID(), elNodeID.AsComponentID()) + // The IDs should be different due to kind + require.NotEqual(t, batcherID, elNodeID) } func TestID_MarshalRoundTrip(t *testing.T) { chainID := eth.ChainIDFromUInt64(420) - original := NewL2BatcherID2("mynode", chainID) + original := NewL2BatcherID("mynode", chainID) data, err := original.MarshalText() require.NoError(t, err) require.Equal(t, "L2Batcher-mynode-420", string(data)) - // Unmarshal into a zero value - Kind() should still work - var parsed L2BatcherID2 - require.Equal(t, KindL2Batcher, parsed.Kind()) // Works on zero value! + // Unmarshal into a ComponentID with kind preset + var parsed ComponentID + parsed.kind = KindL2Batcher // Must set kind before unmarshal err = parsed.UnmarshalText(data) require.NoError(t, err) @@ -128,8 +129,9 @@ func TestID_MarshalRoundTrip(t *testing.T) { } func TestID_UnmarshalKindMismatch(t *testing.T) { - // Try to unmarshal an L2ELNode ID into an L2Batcher ID - var batcherID L2BatcherID2 + // Try to unmarshal an L2ELNode ID into a ComponentID expecting L2Batcher + var batcherID ComponentID + batcherID.kind = KindL2Batcher err := batcherID.UnmarshalText([]byte("L2ELNode-mynode-420")) require.Error(t, err) require.Contains(t, err.Error(), "unexpected kind") @@ -137,7 +139,7 @@ func TestID_UnmarshalKindMismatch(t *testing.T) { func TestID_ChainOnlyTypes(t *testing.T) { chainID := eth.ChainIDFromUInt64(1) - networkID := NewL1NetworkID2(chainID) + networkID := NewL1NetworkID(chainID) require.Equal(t, KindL1Network, networkID.Kind()) require.Equal(t, chainID, networkID.ChainID()) @@ -146,14 +148,15 @@ func TestID_ChainOnlyTypes(t *testing.T) { data, err := networkID.MarshalText() require.NoError(t, err) - var parsed L1NetworkID2 + var parsed ComponentID + parsed.kind = KindL1Network // Must set kind before unmarshal err = parsed.UnmarshalText(data) require.NoError(t, err) require.Equal(t, networkID, parsed) } func TestID_KeyOnlyTypes(t *testing.T) { - supervisorID := NewSupervisorID2("mysupervisor") + supervisorID := NewSupervisorID("mysupervisor") require.Equal(t, KindSupervisor, supervisorID.Kind()) require.Equal(t, "mysupervisor", supervisorID.Key()) @@ -162,7 +165,8 @@ func TestID_KeyOnlyTypes(t *testing.T) { data, err := supervisorID.MarshalText() require.NoError(t, err) - var parsed SupervisorID2 + var parsed ComponentID + parsed.kind = KindSupervisor // Must set kind before unmarshal err = parsed.UnmarshalText(data) require.NoError(t, err) require.Equal(t, supervisorID, parsed) @@ -172,14 +176,24 @@ func TestID_Sorting(t *testing.T) { chainID1 := eth.ChainIDFromUInt64(100) chainID2 := eth.ChainIDFromUInt64(200) - ids := []L2BatcherID2{ - NewL2BatcherID2("charlie", chainID1), - NewL2BatcherID2("alice", chainID1), - NewL2BatcherID2("alice", chainID2), - NewL2BatcherID2("bob", chainID1), + ids := []ComponentID{ + NewL2BatcherID("charlie", chainID1), + NewL2BatcherID("alice", chainID1), + NewL2BatcherID("alice", chainID2), + NewL2BatcherID("bob", chainID1), } - sorted := SortIDs(ids) + // Sort using the ID's comparison + sorted := slices.Clone(ids) + slices.SortFunc(sorted, func(a, b ComponentID) int { + if a.Less(b) { + return -1 + } + if b.Less(a) { + return 1 + } + return 0 + }) // Should be sorted by key first, then by chainID require.Equal(t, "alice", sorted[0].Key()) @@ -194,10 +208,10 @@ func TestID_MapKey(t *testing.T) { chainID := eth.ChainIDFromUInt64(420) // IDs should work as map keys - m := make(map[L2BatcherID2]string) + m := make(map[ComponentID]string) - id1 := NewL2BatcherID2("node1", chainID) - id2 := NewL2BatcherID2("node2", chainID) + id1 := NewL2BatcherID("node1", chainID) + id2 := NewL2BatcherID("node2", chainID) m[id1] = "value1" m[id2] = "value2" @@ -206,7 +220,7 @@ func TestID_MapKey(t *testing.T) { require.Equal(t, "value2", m[id2]) // Same key+chainID should retrieve same value - id1Copy := NewL2BatcherID2("node1", chainID) + id1Copy := NewL2BatcherID("node1", chainID) require.Equal(t, "value1", m[id1Copy]) } @@ -219,25 +233,25 @@ func TestAllIDTypes(t *testing.T) { id interface{ Kind() ComponentKind } expected ComponentKind }{ - {"L1ELNode", NewL1ELNodeID2("node", chainID), KindL1ELNode}, - {"L1CLNode", NewL1CLNodeID2("node", chainID), KindL1CLNode}, - {"L1Network", NewL1NetworkID2(chainID), KindL1Network}, - {"L2ELNode", NewL2ELNodeID2("node", chainID), KindL2ELNode}, - {"L2CLNode", NewL2CLNodeID2("node", chainID), KindL2CLNode}, - {"L2Network", NewL2NetworkID2(chainID), KindL2Network}, - {"L2Batcher", NewL2BatcherID2("node", chainID), KindL2Batcher}, - {"L2Proposer", NewL2ProposerID2("node", chainID), KindL2Proposer}, - {"L2Challenger", NewL2ChallengerID2("node", chainID), KindL2Challenger}, - {"RollupBoostNode", NewRollupBoostNodeID2("node", chainID), KindRollupBoostNode}, - {"OPRBuilderNode", NewOPRBuilderNodeID2("node", chainID), KindOPRBuilderNode}, - {"Faucet", NewFaucetID2("node", chainID), KindFaucet}, - {"SyncTester", NewSyncTesterID2("node", chainID), KindSyncTester}, - {"Supervisor", NewSupervisorID2("node"), KindSupervisor}, - {"Conductor", NewConductorID2("node"), KindConductor}, - {"Cluster", NewClusterID2("node"), KindCluster}, - {"Superchain", NewSuperchainID2("node"), KindSuperchain}, - {"TestSequencer", NewTestSequencerID2("node"), KindTestSequencer}, - {"FlashblocksClient", NewFlashblocksClientID2("node", chainID), KindFlashblocksClient}, + {"L1ELNode", NewL1ELNodeID("node", chainID), KindL1ELNode}, + {"L1CLNode", NewL1CLNodeID("node", chainID), KindL1CLNode}, + {"L1Network", NewL1NetworkID(chainID), KindL1Network}, + {"L2ELNode", NewL2ELNodeID("node", chainID), KindL2ELNode}, + {"L2CLNode", NewL2CLNodeID("node", chainID), KindL2CLNode}, + {"L2Network", NewL2NetworkID(chainID), KindL2Network}, + {"L2Batcher", NewL2BatcherID("node", chainID), KindL2Batcher}, + {"L2Proposer", NewL2ProposerID("node", chainID), KindL2Proposer}, + {"L2Challenger", NewL2ChallengerID("node", chainID), KindL2Challenger}, + {"RollupBoostNode", NewRollupBoostNodeID("node", chainID), KindRollupBoostNode}, + {"OPRBuilderNode", NewOPRBuilderNodeID("node", chainID), KindOPRBuilderNode}, + {"Faucet", NewFaucetID("node", chainID), KindFaucet}, + {"SyncTester", NewSyncTesterID("node", chainID), KindSyncTester}, + {"Supervisor", NewSupervisorID("node"), KindSupervisor}, + {"Conductor", NewConductorID("node"), KindConductor}, + {"Cluster", NewClusterID("node"), KindCluster}, + {"Superchain", NewSuperchainID("node"), KindSuperchain}, + {"TestSequencer", NewTestSequencerID("node"), KindTestSequencer}, + {"FlashblocksClient", NewFlashblocksWSClientID("node", chainID), KindFlashblocksClient}, } for _, tt := range tests { @@ -258,11 +272,11 @@ func TestSerializationCompatibility(t *testing.T) { id interface{ MarshalText() ([]byte, error) } expected string }{ - {"L2Batcher", NewL2BatcherID2("mynode", chainID), "L2Batcher-mynode-420"}, - {"L2ELNode", NewL2ELNodeID2("mynode", chainID), "L2ELNode-mynode-420"}, - {"L1Network", NewL1NetworkID2(eth.ChainIDFromUInt64(1)), "L1Network-1"}, - {"Supervisor", NewSupervisorID2("mysupervisor"), "Supervisor-mysupervisor"}, - {"RollupBoostNode", NewRollupBoostNodeID2("boost", chainID), "RollupBoostNode-boost-420"}, + {"L2Batcher", NewL2BatcherID("mynode", chainID), "L2Batcher-mynode-420"}, + {"L2ELNode", NewL2ELNodeID("mynode", chainID), "L2ELNode-mynode-420"}, + {"L1Network", NewL1NetworkID(eth.ChainIDFromUInt64(1)), "L1Network-1"}, + {"Supervisor", NewSupervisorID("mysupervisor"), "Supervisor-mysupervisor"}, + {"RollupBoostNode", NewRollupBoostNodeID("boost", chainID), "RollupBoostNode-boost-420"}, } for _, tt := range tests { @@ -273,61 +287,3 @@ func TestSerializationCompatibility(t *testing.T) { }) } } - -// TestConversionHelpers verifies that conversion between old and new ID systems -// preserves all data correctly. -func TestConversionHelpers(t *testing.T) { - chainID := eth.ChainIDFromUInt64(420) - - t.Run("L2BatcherID", func(t *testing.T) { - old := NewL2BatcherID("mynode", chainID) - new := ConvertL2BatcherID(old) - require.Equal(t, KindL2Batcher, new.Kind()) - require.Equal(t, "mynode", new.Key()) - require.Equal(t, chainID, new.ChainID()) - require.Equal(t, old.String(), new.String()) - }) - - t.Run("L2ELNodeID", func(t *testing.T) { - old := NewL2ELNodeID("mynode", chainID) - new := ConvertL2ELNodeID(old) - require.Equal(t, KindL2ELNode, new.Kind()) - require.Equal(t, "mynode", new.Key()) - require.Equal(t, chainID, new.ChainID()) - require.Equal(t, old.String(), new.String()) - }) - - t.Run("L1NetworkID", func(t *testing.T) { - old := L1NetworkID(eth.ChainIDFromUInt64(1)) - new := ConvertL1NetworkID(old) - require.Equal(t, KindL1Network, new.Kind()) - require.Equal(t, eth.ChainIDFromUInt64(1), new.ChainID()) - require.Equal(t, old.String(), new.String()) - }) - - t.Run("SupervisorID", func(t *testing.T) { - old := SupervisorID("mysupervisor") - new := ConvertSupervisorID(old) - require.Equal(t, KindSupervisor, new.Kind()) - require.Equal(t, "mysupervisor", new.Key()) - require.Equal(t, old.String(), new.String()) - }) - - t.Run("RollupBoostNodeID", func(t *testing.T) { - old := NewRollupBoostNodeID("boost", chainID) - new := ConvertRollupBoostNodeID(old) - require.Equal(t, KindRollupBoostNode, new.Kind()) - require.Equal(t, "boost", new.Key()) - require.Equal(t, chainID, new.ChainID()) - require.Equal(t, old.String(), new.String()) - }) - - t.Run("FlashblocksWSClientID", func(t *testing.T) { - old := NewFlashblocksWSClientID("fbclient", chainID) - new := ConvertFlashblocksClientID(old) - require.Equal(t, KindFlashblocksClient, new.Kind()) - require.Equal(t, "fbclient", new.Key()) - require.Equal(t, chainID, new.ChainID()) - require.Equal(t, old.String(), new.String()) - }) -} diff --git a/op-devstack/stack/component_registry.go b/op-devstack/stack/component_registry.go index 8bb9c43d7b643..b1bf913383f21 100644 --- a/op-devstack/stack/component_registry.go +++ b/op-devstack/stack/component_registry.go @@ -22,3 +22,222 @@ type ComponentRegistry interface { // Returns an empty slice if no components of that kind exist. ComponentIDs(kind ComponentKind) []ComponentID } + +// --- Free functions for typed component access --- +// These functions provide type-safe access to components without requiring +// type-specific methods on every container interface. + +// GetComponent returns a typed component from a registry by ID. +// Returns (component, true) if found and type matches, (nil/zero, false) otherwise. +func GetComponent[T any](r ComponentRegistry, id ComponentID) (T, bool) { + comp, ok := r.Component(id) + if !ok { + var zero T + return zero, false + } + typed, ok := comp.(T) + return typed, ok +} + +// GetComponentsByKind returns all components of a given kind, typed. +// Only components that match the expected type are returned. +func GetComponentsByKind[T any](r ComponentRegistry, kind ComponentKind) []T { + comps := r.Components(kind) + result := make([]T, 0, len(comps)) + for _, comp := range comps { + if typed, ok := comp.(T); ok { + result = append(result, typed) + } + } + return result +} + +// --- Typed getter free functions for L2Network components --- + +// GetL2BatcherByID returns an L2Batcher from a network by ID. +func GetL2BatcherByID(n L2Network, id ComponentID) (L2Batcher, bool) { + return GetComponent[L2Batcher](n, id) +} + +// GetL2ProposerByID returns an L2Proposer from a network by ID. +func GetL2ProposerByID(n L2Network, id ComponentID) (L2Proposer, bool) { + return GetComponent[L2Proposer](n, id) +} + +// GetL2ChallengerByID returns an L2Challenger from a network by ID. +func GetL2ChallengerByID(n L2Network, id ComponentID) (L2Challenger, bool) { + return GetComponent[L2Challenger](n, id) +} + +// GetL2CLNodeByID returns an L2CLNode from a network by ID. +func GetL2CLNodeByID(n L2Network, id ComponentID) (L2CLNode, bool) { + return GetComponent[L2CLNode](n, id) +} + +// GetL2ELNodeByID returns an L2ELNode from a network by ID. +func GetL2ELNodeByID(n L2Network, id ComponentID) (L2ELNode, bool) { + return GetComponent[L2ELNode](n, id) +} + +// GetConductorByID returns a Conductor from a network by ID. +func GetConductorByID(n L2Network, id ComponentID) (Conductor, bool) { + return GetComponent[Conductor](n, id) +} + +// GetRollupBoostNodeByID returns a RollupBoostNode from a network by ID. +func GetRollupBoostNodeByID(n L2Network, id ComponentID) (RollupBoostNode, bool) { + return GetComponent[RollupBoostNode](n, id) +} + +// GetOPRBuilderNodeByID returns an OPRBuilderNode from a network by ID. +func GetOPRBuilderNodeByID(n L2Network, id ComponentID) (OPRBuilderNode, bool) { + return GetComponent[OPRBuilderNode](n, id) +} + +// --- Typed getter free functions for L1Network components --- + +// GetL1ELNodeByID returns an L1ELNode from a network by ID. +func GetL1ELNodeByID(n L1Network, id ComponentID) (L1ELNode, bool) { + return GetComponent[L1ELNode](n, id) +} + +// GetL1CLNodeByID returns an L1CLNode from a network by ID. +func GetL1CLNodeByID(n L1Network, id ComponentID) (L1CLNode, bool) { + return GetComponent[L1CLNode](n, id) +} + +// --- Typed getter free functions for Network components (shared by L1 and L2) --- + +// GetFaucetByID returns a Faucet from a network by ID. +func GetFaucetByID(n Network, id ComponentID) (Faucet, bool) { + return GetComponent[Faucet](n, id) +} + +// GetSyncTesterByID returns a SyncTester from a network by ID. +func GetSyncTesterByID(n Network, id ComponentID) (SyncTester, bool) { + return GetComponent[SyncTester](n, id) +} + +// --- Typed getter free functions for System components --- + +// GetSuperchainByID returns a Superchain from a system by ID. +func GetSuperchainByID(s System, id ComponentID) (Superchain, bool) { + return GetComponent[Superchain](s, id) +} + +// GetClusterByID returns a Cluster from a system by ID. +func GetClusterByID(s System, id ComponentID) (Cluster, bool) { + return GetComponent[Cluster](s, id) +} + +// GetL1NetworkByID returns an L1Network from a system by ID. +func GetL1NetworkByID(s System, id ComponentID) (L1Network, bool) { + return GetComponent[L1Network](s, id) +} + +// GetL2NetworkByID returns an L2Network from a system by ID. +func GetL2NetworkByID(s System, id ComponentID) (L2Network, bool) { + return GetComponent[L2Network](s, id) +} + +// GetSupervisorByID returns a Supervisor from a system by ID. +func GetSupervisorByID(s System, id ComponentID) (Supervisor, bool) { + return GetComponent[Supervisor](s, id) +} + +// GetTestSequencerByID returns a TestSequencer from a system by ID. +func GetTestSequencerByID(s System, id ComponentID) (TestSequencer, bool) { + return GetComponent[TestSequencer](s, id) +} + +// --- List getter free functions --- + +// GetL2Batchers returns all L2Batchers from a network. +func GetL2Batchers(n L2Network) []L2Batcher { + return GetComponentsByKind[L2Batcher](n, KindL2Batcher) +} + +// GetL2Proposers returns all L2Proposers from a network. +func GetL2Proposers(n L2Network) []L2Proposer { + return GetComponentsByKind[L2Proposer](n, KindL2Proposer) +} + +// GetL2Challengers returns all L2Challengers from a network. +func GetL2Challengers(n L2Network) []L2Challenger { + return GetComponentsByKind[L2Challenger](n, KindL2Challenger) +} + +// GetL2CLNodes returns all L2CLNodes from a network. +func GetL2CLNodes(n L2Network) []L2CLNode { + return GetComponentsByKind[L2CLNode](n, KindL2CLNode) +} + +// GetL2ELNodes returns all L2ELNodes from a network. +func GetL2ELNodes(n L2Network) []L2ELNode { + return GetComponentsByKind[L2ELNode](n, KindL2ELNode) +} + +// GetConductors returns all Conductors from a network. +func GetConductors(n L2Network) []Conductor { + return GetComponentsByKind[Conductor](n, KindConductor) +} + +// GetRollupBoostNodes returns all RollupBoostNodes from a network. +func GetRollupBoostNodes(n L2Network) []RollupBoostNode { + return GetComponentsByKind[RollupBoostNode](n, KindRollupBoostNode) +} + +// GetOPRBuilderNodes returns all OPRBuilderNodes from a network. +func GetOPRBuilderNodes(n L2Network) []OPRBuilderNode { + return GetComponentsByKind[OPRBuilderNode](n, KindOPRBuilderNode) +} + +// GetL1ELNodes returns all L1ELNodes from a network. +func GetL1ELNodes(n L1Network) []L1ELNode { + return GetComponentsByKind[L1ELNode](n, KindL1ELNode) +} + +// GetL1CLNodes returns all L1CLNodes from a network. +func GetL1CLNodes(n L1Network) []L1CLNode { + return GetComponentsByKind[L1CLNode](n, KindL1CLNode) +} + +// GetFaucets returns all Faucets from a network. +func GetFaucets(n Network) []Faucet { + return GetComponentsByKind[Faucet](n, KindFaucet) +} + +// GetSyncTesters returns all SyncTesters from a network. +func GetSyncTesters(n Network) []SyncTester { + return GetComponentsByKind[SyncTester](n, KindSyncTester) +} + +// GetSuperchains returns all Superchains from a system. +func GetSuperchains(s System) []Superchain { + return GetComponentsByKind[Superchain](s, KindSuperchain) +} + +// GetClusters returns all Clusters from a system. +func GetClusters(s System) []Cluster { + return GetComponentsByKind[Cluster](s, KindCluster) +} + +// GetL1Networks returns all L1Networks from a system. +func GetL1Networks(s System) []L1Network { + return GetComponentsByKind[L1Network](s, KindL1Network) +} + +// GetL2Networks returns all L2Networks from a system. +func GetL2Networks(s System) []L2Network { + return GetComponentsByKind[L2Network](s, KindL2Network) +} + +// GetSupervisors returns all Supervisors from a system. +func GetSupervisors(s System) []Supervisor { + return GetComponentsByKind[Supervisor](s, KindSupervisor) +} + +// GetTestSequencers returns all TestSequencers from a system. +func GetTestSequencers(s System) []TestSequencer { + return GetComponentsByKind[TestSequencer](s, KindTestSequencer) +} diff --git a/op-devstack/stack/conductor.go b/op-devstack/stack/conductor.go index 951f4fd9ee210..7f5720cbfd056 100644 --- a/op-devstack/stack/conductor.go +++ b/op-devstack/stack/conductor.go @@ -4,39 +4,9 @@ import ( conductorRpc "github.com/ethereum-optimism/optimism/op-conductor/rpc" ) -type ConductorID genericID - -const ConductorKind Kind = "Conductor" - -func (id ConductorID) String() string { - return genericID(id).string(ConductorKind) -} - -func (id ConductorID) MarshalText() ([]byte, error) { - return genericID(id).marshalText(ConductorKind) -} - -func (id *ConductorID) UnmarshalText(data []byte) error { - return (*genericID)(id).unmarshalText(ConductorKind, data) -} - -func SortConductorIDs(ids []ConductorID) []ConductorID { - return copyAndSortCmp(ids) -} - -func SortConductors(elems []Conductor) []Conductor { - return copyAndSort(elems, lessElemOrdered[ConductorID, Conductor]) -} - -var _ ConductorMatcher = ConductorID("") - -func (id ConductorID) Match(elems []Conductor) []Conductor { - return findByID(id, elems) -} - type Conductor interface { Common - ID() ConductorID + ID() ComponentID RpcAPI() conductorRpc.API } diff --git a/op-devstack/stack/context.go b/op-devstack/stack/context.go index 5f7c070cedaf2..1ff61b0921bcf 100644 --- a/op-devstack/stack/context.go +++ b/op-devstack/stack/context.go @@ -9,20 +9,20 @@ import ( ) // ContextWithKind annotates the context with the given kind of service -func ContextWithKind(ctx context.Context, kind Kind) context.Context { +func ContextWithKind(ctx context.Context, kind ComponentKind) context.Context { return logfilter.AddLogAttrToContext(ctx, "kind", kind) } // KindFromContext extracts the kind from the context. -func KindFromContext(ctx context.Context) Kind { - v, _ := logfilter.ValueFromContext[Kind](ctx, "kind") +func KindFromContext(ctx context.Context) ComponentKind { + v, _ := logfilter.ValueFromContext[ComponentKind](ctx, "kind") return v } // KindSelector creates a log-filter that applies the given inner log-filter only if it matches the given kind. // For logs of the specified kind, it applies the inner filter. // For logs of other kinds, it returns false (filters them out). -func KindSelector(kind Kind) logfilter.Selector { +func KindSelector(kind ComponentKind) logfilter.Selector { return logfilter.Select("kind", kind) } @@ -48,7 +48,11 @@ func ChainIDSelector(chainID eth.ChainID) logfilter.Selector { // This also automatically attaches the chain ID and component kind to the context, if available from the ID. func ContextWithID(ctx context.Context, id slog.LogValuer) context.Context { if idWithChainID, ok := id.(ChainIDProvider); ok { - ctx = ContextWithChainID(ctx, idWithChainID.ChainID()) + chainID := idWithChainID.ChainID() + // Only set chain ID if it's non-zero (i.e., the ID type actually has a meaningful chain ID) + if chainID != (eth.ChainID{}) { + ctx = ContextWithChainID(ctx, chainID) + } } if idWithKind, ok := id.(KindProvider); ok { ctx = ContextWithKind(ctx, idWithKind.Kind()) diff --git a/op-devstack/stack/context_test.go b/op-devstack/stack/context_test.go index 885464767e45e..8e28729bb3862 100644 --- a/op-devstack/stack/context_test.go +++ b/op-devstack/stack/context_test.go @@ -22,28 +22,26 @@ func TestContext(t *testing.T) { require.Equal(t, chainB, ChainIDFromContext(ContextWithChainID(ContextWithChainID(ctx, chainA), chainB)), "priority") }) t.Run("kind", func(t *testing.T) { - require.Equal(t, Kind(""), KindFromContext(ctx), "none") - require.Equal(t, L2BatcherKind, KindFromContext(ContextWithKind(ctx, L2BatcherKind)), "lookup") - require.Equal(t, L2ProposerKind, KindFromContext(ContextWithKind(ContextWithKind(ctx, L2BatcherKind), L2ProposerKind)), "priority") + require.Equal(t, ComponentKind(""), KindFromContext(ctx), "none") + require.Equal(t, KindL2Batcher, KindFromContext(ContextWithKind(ctx, KindL2Batcher)), "lookup") + require.Equal(t, KindL2Proposer, KindFromContext(ContextWithKind(ContextWithKind(ctx, KindL2Batcher), KindL2Proposer)), "priority") }) t.Run("id", func(t *testing.T) { - require.Equal(t, L2BatcherID{}, IDFromContext[L2BatcherID](ctx), "none") - require.Equal(t, SuperchainID(""), IDFromContext[SuperchainID](ctx), "none") - id1 := L2BatcherID{ - key: "batcherA", - chainID: chainA, - } + require.Equal(t, ComponentID{}, IDFromContext[ComponentID](ctx), "none") + id1 := NewL2BatcherID("batcherA", chainA) ctx1 := ContextWithID(ctx, id1) - require.Equal(t, L2BatcherKind, KindFromContext(ctx1), "lookup kind") + require.Equal(t, KindL2Batcher, KindFromContext(ctx1), "lookup kind") require.Equal(t, chainA, ChainIDFromContext(ctx1), "lookup chainID") - require.Equal(t, id1, IDFromContext[L2BatcherID](ctx1), "lookup ID") + require.Equal(t, id1, IDFromContext[ComponentID](ctx1), "lookup ID") // now overlay another different kind of ID on top - id2 := SuperchainID("foobar") + id2 := NewSuperchainID("foobar") ctx2 := ContextWithID(ctx1, id2) - require.Equal(t, SuperchainKind, KindFromContext(ctx2), "lookup kind") + require.Equal(t, KindSuperchain, KindFromContext(ctx2), "lookup kind") require.Equal(t, chainA, ChainIDFromContext(ctx2), "chainID still preserved") - require.Equal(t, id2, IDFromContext[SuperchainID](ctx2), "lookup ID") - require.Equal(t, L2BatcherID{}, IDFromContext[L2BatcherID](ctx2), "batcher ID not available") + require.Equal(t, id2, IDFromContext[ComponentID](ctx2), "lookup ID - now shows superchain") + // With type aliases, IDFromContext returns the stored ComponentID regardless of "type" + // The Kind() method can be used to check the actual kind of ID + require.Equal(t, KindSuperchain, IDFromContext[ComponentID](ctx2).Kind(), "id kind check") }) } @@ -58,20 +56,17 @@ func TestLogFilter(t *testing.T) { require.Equal(t, tri.Undefined, fn(ContextWithChainID(ctx, chainB), log.LevelDebug), "different chain should be shown") }) t.Run("kind", func(t *testing.T) { - fn := KindSelector(L2BatcherKind).Mute() + fn := KindSelector(KindL2Batcher).Mute() require.Equal(t, tri.Undefined, fn(ctx, log.LevelDebug), "regular context should be false") - require.Equal(t, tri.False, fn(ContextWithKind(ctx, L2BatcherKind), log.LevelDebug), "detected kind should be muted") - require.Equal(t, tri.Undefined, fn(ContextWithKind(ctx, L2ProposerKind), log.LevelDebug), "different kind should be shown") + require.Equal(t, tri.False, fn(ContextWithKind(ctx, KindL2Batcher), log.LevelDebug), "detected kind should be muted") + require.Equal(t, tri.Undefined, fn(ContextWithKind(ctx, KindL2Proposer), log.LevelDebug), "different kind should be shown") }) t.Run("id", func(t *testing.T) { - id1 := L2BatcherID{ - key: "batcherA", - chainID: chainA, - } + id1 := NewL2BatcherID("batcherA", chainA) fn := IDSelector(id1).Mute() require.Equal(t, tri.Undefined, fn(ctx, log.LevelDebug), "regular context should be false") require.Equal(t, tri.False, fn(ContextWithID(ctx, id1), log.LevelDebug), "detected id should be muted") - id2 := SuperchainID("foobar") + id2 := NewSuperchainID("foobar") require.Equal(t, tri.Undefined, fn(ContextWithID(ctx, id2), log.LevelDebug), "different id should be shown") }) } diff --git a/op-devstack/stack/faucet.go b/op-devstack/stack/faucet.go index 16a869dcc952e..fe590c798c5be 100644 --- a/op-devstack/stack/faucet.go +++ b/op-devstack/stack/faucet.go @@ -1,74 +1,11 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/eth" ) -// FaucetID identifies a Faucet by name and chainID, is type-safe, and can be value-copied and used as map key. -type FaucetID idWithChain - -var _ IDWithChain = (*FaucetID)(nil) - -const FaucetKind Kind = "Faucet" - -func NewFaucetID(key string, chainID eth.ChainID) FaucetID { - return FaucetID{ - key: key, - chainID: chainID, - } -} - -func (id FaucetID) String() string { - return idWithChain(id).string(FaucetKind) -} - -func (id FaucetID) ChainID() eth.ChainID { - return idWithChain(id).chainID -} - -func (id FaucetID) Kind() Kind { - return FaucetKind -} - -func (id FaucetID) Key() string { - return id.key -} - -func (id FaucetID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id FaucetID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(FaucetKind) -} - -func (id *FaucetID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(FaucetKind, data) -} - -func SortFaucetIDs(ids []FaucetID) []FaucetID { - return copyAndSort(ids, func(a, b FaucetID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortFaucets(elems []Faucet) []Faucet { - return copyAndSort(elems, func(a, b Faucet) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ FaucetMatcher = FaucetID{} - -func (id FaucetID) Match(elems []Faucet) []Faucet { - return findByID(id, elems) -} - type Faucet interface { Common - ID() FaucetID + ID() ComponentID API() apis.Faucet } diff --git a/op-devstack/stack/fb_ws_client.go b/op-devstack/stack/fb_ws_client.go index 9da423dcbf3f3..54e2b95dae2e0 100644 --- a/op-devstack/stack/fb_ws_client.go +++ b/op-devstack/stack/fb_ws_client.go @@ -1,7 +1,6 @@ package stack import ( - "log/slog" "net/http" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -10,54 +9,7 @@ import ( type FlashblocksWSClient interface { Common ChainID() eth.ChainID - ID() FlashblocksWSClientID + ID() ComponentID WsUrl() string WsHeaders() http.Header } - -type FlashblocksWSClientID idWithChain - -const FlashblocksWSClientKind Kind = "FlashblocksWSClient" - -func NewFlashblocksWSClientID(key string, chainID eth.ChainID) FlashblocksWSClientID { - return FlashblocksWSClientID{ - key: key, - chainID: chainID, - } -} - -func (id FlashblocksWSClientID) String() string { - return idWithChain(id).string(FlashblocksWSClientKind) -} - -func (id FlashblocksWSClientID) ChainID() eth.ChainID { - return idWithChain(id).chainID -} - -func (id FlashblocksWSClientID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(FlashblocksWSClientKind) -} - -func (id FlashblocksWSClientID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id *FlashblocksWSClientID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(FlashblocksWSClientKind, data) -} - -func SortFlashblocksWSClientIDs(ids []FlashblocksWSClientID) []FlashblocksWSClientID { - return copyAndSort(ids, func(a, b FlashblocksWSClientID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortFlashblocksWSClients(elems []FlashblocksWSClient) []FlashblocksWSClient { - return copyAndSort(elems, func(a, b FlashblocksWSClient) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -func (id FlashblocksWSClientID) Match(elems []FlashblocksWSClient) []FlashblocksWSClient { - return findByID(id, elems) -} diff --git a/op-devstack/stack/id.go b/op-devstack/stack/id.go deleted file mode 100644 index a871b535049a0..0000000000000 --- a/op-devstack/stack/id.go +++ /dev/null @@ -1,217 +0,0 @@ -package stack - -import ( - "bytes" - "cmp" - "errors" - "fmt" - "log/slog" - "slices" - "sort" - - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// Kind represents a kind of component, this is used to make each ID unique, even when encoded as text. -type Kind string - -var _ slog.LogValuer = (*Kind)(nil) - -func (k Kind) LogValue() slog.Value { - return slog.StringValue(string(k)) -} - -func (k Kind) String() string { - return string(k) -} - -func (k Kind) MarshalText() ([]byte, error) { - return []byte(k), nil -} - -func (k *Kind) UnmarshalText(data []byte) error { - *k = Kind(data) - return nil -} - -// ChainIDProvider presents a type that provides a relevant ChainID. -type ChainIDProvider interface { - ChainID() eth.ChainID -} - -// KindProvider presents a type that provides a relevant Kind. E.g. an L2BatcherKind. -type KindProvider interface { - Kind() Kind -} - -// Keyed presents a type that provides a relevant string key. E.g. a named superchain. -type Keyed interface { - Key() string -} - -const maxIDLength = 100 - -var errInvalidID = errors.New("invalid ID") - -// Defined types based on idWithChain should implement this interface so they may be used as logging attributes. -type IDWithChain interface { - slog.LogValuer - ChainIDProvider - KindProvider - Keyed -} - -// idWithChain is comparable, can be copied, contains a chain-ID, -// and has type-safe text encoding/decoding to prevent accidental mixups. -type idWithChain struct { - key string - chainID eth.ChainID -} - -func (id idWithChain) string(kind Kind) string { - return fmt.Sprintf("%s-%s-%s", kind, id.key, id.chainID) -} - -func (id idWithChain) marshalText(kind Kind) ([]byte, error) { - k := string(id.key) - if len(k) > maxIDLength { - return nil, errInvalidID - } - k = fmt.Sprintf("%s-%s-%s", kind, k, id.chainID) - return []byte(k), nil -} - -func (id *idWithChain) unmarshalText(kind Kind, data []byte) error { - kindData, mainData, ok := bytes.Cut(data, []byte("-")) - if !ok { - return fmt.Errorf("expected kind-prefix, but id has none: %q", data) - } - if x := string(kindData); x != string(kind) { - return fmt.Errorf("id %q has unexpected kind %q, expected %q", string(data), x, kind) - } - before, after, ok := bytes.Cut(mainData, []byte("-")) - if !ok { - return fmt.Errorf("expected chain separator, but found none: %q", string(data)) - } - var chainID eth.ChainID - if err := chainID.UnmarshalText(after); err != nil { - return fmt.Errorf("failed to unmarshal chain part: %w", err) - } - if len(before) > maxIDLength { - return errInvalidID - } - id.key = string(before) - id.chainID = chainID - return nil -} - -// Defined types based on idOnlyChainID should implement this interface so they may be used as logging attributes. -type IDOnlyChainID interface { - slog.LogValuer - ChainIDProvider - KindProvider -} - -// idChainID is comparable, can be copied, contains only a chain-ID, -// and has type-safe text encoding/decoding to prevent accidental mixups. -type idOnlyChainID eth.ChainID - -func (id idOnlyChainID) string(kind Kind) string { - return fmt.Sprintf("%s-%s", kind, eth.ChainID(id)) -} - -func (id idOnlyChainID) marshalText(kind Kind) ([]byte, error) { - k := fmt.Sprintf("%s-%s", kind, eth.ChainID(id)) - return []byte(k), nil -} - -func (id *idOnlyChainID) unmarshalText(kind Kind, data []byte) error { - kindData, mainData, ok := bytes.Cut(data, []byte("-")) - if !ok { - return fmt.Errorf("expected kind-prefix, but id has none: %q", data) - } - if x := string(kindData); x != string(kind) { - return fmt.Errorf("id %q has unexpected kind %q, expected %q", string(data), x, kind) - } - var chainID eth.ChainID - if err := chainID.UnmarshalText(mainData); err != nil { - return fmt.Errorf("failed to unmarshal chain part: %w", err) - } - *id = idOnlyChainID(chainID) - return nil -} - -// Defined types based on genericID should implement this interface so they may be used as logging attributes. -type GenericID interface { - slog.LogValuer - KindProvider -} - -// genericID is comparable, can be copied, -// and has type-safe text encoding/decoding to prevent accidental mixups. -type genericID string - -func (id genericID) string(kind Kind) string { - return fmt.Sprintf("%s-%s", kind, string(id)) -} - -func (id genericID) marshalText(kind Kind) ([]byte, error) { - if len(id) > maxIDLength { - return nil, errInvalidID - } - return []byte(fmt.Sprintf("%s-%s", kind, string(id))), nil -} - -func (id *genericID) unmarshalText(kind Kind, data []byte) error { - kindData, mainData, ok := bytes.Cut(data, []byte("-")) - if !ok { - return fmt.Errorf("expected kind-prefix, but id has none: %q", data) - } - if x := string(kindData); x != string(kind) { - return fmt.Errorf("id %q has unexpected kind %q, expected %q", string(data), x, kind) - } - if len(mainData) > maxIDLength { - return errInvalidID - } - *id = genericID(mainData) - return nil -} - -// copyAndSort helps copy and sort a slice of objects with the given less function -func copyAndSort[V ~[]E, E any](vs V, lessFn func(a, b E) bool) V { - out := slices.Clone(vs) - sort.Slice(out, func(i, j int) bool { - a := out[i] - b := out[j] - return lessFn(a, b) - }) - return out -} - -// lessIDWithChain is a helper function to compare two idWithChain objects. -// It does not use generics, since idWithChain is a concrete type with struct fields and no accessor methods in the types that wrap this type. -func lessIDWithChain(a, b idWithChain) bool { - if a.key > b.key { - return false - } - if a.key == b.key { - return a.chainID.Cmp(b.chainID) < 0 - } - return true -} - -// lessIDOnlyChainID is a helper function to compare two idOnlyChainID objects. -func lessIDOnlyChainID(a, b idOnlyChainID) bool { - return eth.ChainID(a).Cmp(eth.ChainID(b)) < 0 -} - -func lessElemOrdered[I cmp.Ordered, E Identifiable[I]](a, b E) bool { - return a.ID() < b.ID() -} - -// copyAndSortCmp is a helper function to copy and sort a slice of elements that are already natively comparable. -func copyAndSortCmp[V ~[]E, E cmp.Ordered](vs V) V { - out := slices.Clone(vs) - slices.Sort(out) - return out -} diff --git a/op-devstack/stack/l1_cl.go b/op-devstack/stack/l1_cl.go index 7a3356f16743d..a1e3ce3734ae7 100644 --- a/op-devstack/stack/l1_cl.go +++ b/op-devstack/stack/l1_cl.go @@ -1,77 +1,14 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/eth" ) -// L1CLNodeID identifies a L1CLNode by name and chainID, is type-safe, and can be value-copied and used as map key. -type L1CLNodeID idWithChain - -var _ IDWithChain = (*L1CLNodeID)(nil) - -const L1CLNodeKind Kind = "L1CLNode" - -func NewL1CLNodeID(key string, chainID eth.ChainID) L1CLNodeID { - return L1CLNodeID{ - key: key, - chainID: chainID, - } -} - -func (id L1CLNodeID) String() string { - return idWithChain(id).string(L1CLNodeKind) -} - -func (id L1CLNodeID) Kind() Kind { - return L1CLNodeKind -} - -func (id L1CLNodeID) ChainID() eth.ChainID { - return id.chainID -} - -func (id L1CLNodeID) Key() string { - return id.key -} - -func (id L1CLNodeID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id L1CLNodeID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(L1CLNodeKind) -} - -func (id *L1CLNodeID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(L1CLNodeKind, data) -} - -func SortL1CLNodeIDs(ids []L1CLNodeID) []L1CLNodeID { - return copyAndSort(ids, func(a, b L1CLNodeID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortL1CLNodes(elems []L1CLNode) []L1CLNode { - return copyAndSort(elems, func(a, b L1CLNode) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ L1CLMatcher = L1CLNodeID{} - -func (id L1CLNodeID) Match(elems []L1CLNode) []L1CLNode { - return findByID(id, elems) -} - // L1CLNode is a L1 ethereum consensus-layer node, aka Beacon node. // This node may not be a full beacon node, and instead run a mock L1 consensus node. type L1CLNode interface { Common - ID() L1CLNodeID + ID() ComponentID BeaconClient() apis.BeaconClient } diff --git a/op-devstack/stack/l1_el.go b/op-devstack/stack/l1_el.go index f51ee5075e79f..ec65e6241ddc9 100644 --- a/op-devstack/stack/l1_el.go +++ b/op-devstack/stack/l1_el.go @@ -1,74 +1,8 @@ package stack -import ( - "log/slog" - - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// L1ELNodeID identifies a L1ELNode by name and chainID, is type-safe, and can be value-copied and used as map key. -type L1ELNodeID idWithChain - -var _ IDWithChain = (*L1ELNodeID)(nil) - -const L1ELNodeKind Kind = "L1ELNode" - -func NewL1ELNodeID(key string, chainID eth.ChainID) L1ELNodeID { - return L1ELNodeID{ - key: key, - chainID: chainID, - } -} - -func (id L1ELNodeID) String() string { - return idWithChain(id).string(L1ELNodeKind) -} - -func (id L1ELNodeID) ChainID() eth.ChainID { - return id.chainID -} - -func (id L1ELNodeID) Kind() Kind { - return L1ELNodeKind -} - -func (id L1ELNodeID) Key() string { - return id.key -} - -func (id L1ELNodeID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id L1ELNodeID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(L1ELNodeKind) -} - -func (id *L1ELNodeID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(L1ELNodeKind, data) -} - -func SortL1ELNodeIDs(ids []L1ELNodeID) []L1ELNodeID { - return copyAndSort(ids, func(a, b L1ELNodeID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortL1ELNodes(elems []L1ELNode) []L1ELNode { - return copyAndSort(elems, func(a, b L1ELNode) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ L1ELMatcher = L1ELNodeID{} - -func (id L1ELNodeID) Match(elems []L1ELNode) []L1ELNode { - return findByID(id, elems) -} - // L1ELNode is a L1 ethereum execution-layer node type L1ELNode interface { - ID() L1ELNodeID + ID() ComponentID ELNode } diff --git a/op-devstack/stack/l1_network.go b/op-devstack/stack/l1_network.go index 10e09877f64ca..ce124772ef6ba 100644 --- a/op-devstack/stack/l1_network.go +++ b/op-devstack/stack/l1_network.go @@ -1,70 +1,15 @@ package stack -import ( - "log/slog" - - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// L1NetworkID identifies a L1Network by name and chainID, is type-safe, and can be value-copied and used as map key. -type L1NetworkID idOnlyChainID - -var _ IDOnlyChainID = (*L1NetworkID)(nil) - -const L1NetworkKind Kind = "L1Network" - -func (id L1NetworkID) Kind() Kind { - return L1NetworkKind -} - -func (id L1NetworkID) ChainID() eth.ChainID { - return eth.ChainID(id) -} - -func (id L1NetworkID) String() string { - return idOnlyChainID(id).string(L1NetworkKind) -} - -func (id L1NetworkID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id L1NetworkID) MarshalText() ([]byte, error) { - return idOnlyChainID(id).marshalText(L1NetworkKind) -} - -func (id *L1NetworkID) UnmarshalText(data []byte) error { - return (*idOnlyChainID)(id).unmarshalText(L1NetworkKind, data) -} - -func SortL1NetworkIDs(ids []L1NetworkID) []L1NetworkID { - return copyAndSort(ids, func(a, b L1NetworkID) bool { - return lessIDOnlyChainID(idOnlyChainID(a), idOnlyChainID(b)) - }) -} - -func SortL1Networks(elems []L1Network) []L1Network { - return copyAndSort(elems, func(a, b L1Network) bool { - return lessIDOnlyChainID(idOnlyChainID(a.ID()), idOnlyChainID(b.ID())) - }) -} - -var _ L1NetworkMatcher = L1NetworkID{} - -func (id L1NetworkID) Match(elems []L1Network) []L1Network { - return findByID(id, elems) -} - // L1Network represents a L1 chain, a collection of configuration and node resources. type L1Network interface { Network - ID() L1NetworkID + ID() ComponentID L1ELNode(m L1ELMatcher) L1ELNode L1CLNode(m L1CLMatcher) L1CLNode - L1ELNodeIDs() []L1ELNodeID - L1CLNodeIDs() []L1CLNodeID + L1ELNodeIDs() []ComponentID + L1CLNodeIDs() []ComponentID L1ELNodes() []L1ELNode L1CLNodes() []L1CLNode diff --git a/op-devstack/stack/l2_batcher.go b/op-devstack/stack/l2_batcher.go index 6978de7213d75..04af98094282f 100644 --- a/op-devstack/stack/l2_batcher.go +++ b/op-devstack/stack/l2_batcher.go @@ -1,75 +1,12 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/eth" ) -// L2BatcherID identifies a L2Batcher by name and chainID, is type-safe, and can be value-copied and used as map key. -type L2BatcherID idWithChain - -var _ IDWithChain = (*L2BatcherID)(nil) - -const L2BatcherKind Kind = "L2Batcher" - -func NewL2BatcherID(key string, chainID eth.ChainID) L2BatcherID { - return L2BatcherID{ - key: key, - chainID: chainID, - } -} - -func (id L2BatcherID) String() string { - return idWithChain(id).string(L2BatcherKind) -} - -func (id L2BatcherID) ChainID() eth.ChainID { - return id.chainID -} - -func (id L2BatcherID) Kind() Kind { - return L2BatcherKind -} - -func (id L2BatcherID) Key() string { - return id.key -} - -func (id L2BatcherID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id L2BatcherID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(L2BatcherKind) -} - -func (id *L2BatcherID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(L2BatcherKind, data) -} - -func SortL2BatcherIDs(ids []L2BatcherID) []L2BatcherID { - return copyAndSort(ids, func(a, b L2BatcherID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortL2Batchers(elems []L2Batcher) []L2Batcher { - return copyAndSort(elems, func(a, b L2Batcher) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ L2BatcherMatcher = L2BatcherID{} - -func (id L2BatcherID) Match(elems []L2Batcher) []L2Batcher { - return findByID(id, elems) -} - // L2Batcher represents an L2 batch-submission service, posting L2 data of an L2 to L1. type L2Batcher interface { Common - ID() L2BatcherID + ID() ComponentID ActivityAPI() apis.BatcherActivity } diff --git a/op-devstack/stack/l2_challenger.go b/op-devstack/stack/l2_challenger.go index c1caded684881..7d9f9d2dfc2c4 100644 --- a/op-devstack/stack/l2_challenger.go +++ b/op-devstack/stack/l2_challenger.go @@ -1,74 +1,11 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-challenger/config" - "github.com/ethereum-optimism/optimism/op-service/eth" ) -// L2ChallengerID identifies a L2Challenger by name and chainID, is type-safe, and can be value-copied and used as map key. -type L2ChallengerID idWithChain - -var _ IDWithChain = (*L2ChallengerID)(nil) - -const L2ChallengerKind Kind = "L2Challenger" - -func NewL2ChallengerID(key string, chainID eth.ChainID) L2ChallengerID { - return L2ChallengerID{ - key: key, - chainID: chainID, - } -} - -func (id L2ChallengerID) String() string { - return idWithChain(id).string(L2ChallengerKind) -} - -func (id L2ChallengerID) ChainID() eth.ChainID { - return idWithChain(id).chainID -} - -func (id L2ChallengerID) Kind() Kind { - return L2ChallengerKind -} - -func (id L2ChallengerID) Key() string { - return id.key -} - -func (id L2ChallengerID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id L2ChallengerID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(L2ChallengerKind) -} - -func (id *L2ChallengerID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(L2ChallengerKind, data) -} - -func SortL2ChallengerIDs(ids []L2ChallengerID) []L2ChallengerID { - return copyAndSort(ids, func(a, b L2ChallengerID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortL2Challengers(elems []L2Challenger) []L2Challenger { - return copyAndSort(elems, func(a, b L2Challenger) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ L2ChallengerMatcher = L2ChallengerID{} - -func (id L2ChallengerID) Match(elems []L2Challenger) []L2Challenger { - return findByID(id, elems) -} - type L2Challenger interface { Common - ID() L2ChallengerID + ID() ComponentID Config() *config.Config } diff --git a/op-devstack/stack/l2_cl.go b/op-devstack/stack/l2_cl.go index 8005cb3d28c7d..14d6d0695d465 100644 --- a/op-devstack/stack/l2_cl.go +++ b/op-devstack/stack/l2_cl.go @@ -1,77 +1,15 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" ) -// L2CLNodeID identifies a L2CLNode by name and chainID, is type-safe, and can be value-copied and used as map key. -type L2CLNodeID idWithChain - -var _ IDWithChain = L2CLNodeID{} - -const L2CLNodeKind Kind = "L2CLNode" - -func NewL2CLNodeID(key string, chainID eth.ChainID) L2CLNodeID { - return L2CLNodeID{ - key: key, - chainID: chainID, - } -} - -func (id L2CLNodeID) String() string { - return idWithChain(id).string(L2CLNodeKind) -} - -func (id L2CLNodeID) ChainID() eth.ChainID { - return id.chainID -} - -func (id L2CLNodeID) Kind() Kind { - return L2CLNodeKind -} - -func (id L2CLNodeID) Key() string { - return id.key -} - -func (id L2CLNodeID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id L2CLNodeID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(L2CLNodeKind) -} - -func (id *L2CLNodeID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(L2CLNodeKind, data) -} - -func SortL2CLNodeIDs(ids []L2CLNodeID) []L2CLNodeID { - return copyAndSort(ids, func(a, b L2CLNodeID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortL2CLNodes(elems []L2CLNode) []L2CLNode { - return copyAndSort(elems, func(a, b L2CLNode) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ L2CLMatcher = L2CLNodeID{} - -func (id L2CLNodeID) Match(elems []L2CLNode) []L2CLNode { - return findByID(id, elems) -} - // L2CLNode is a L2 ethereum consensus-layer node type L2CLNode interface { Common - ID() L2CLNodeID + ID() ComponentID ClientRPC() client.RPC RollupAPI() apis.RollupClient diff --git a/op-devstack/stack/l2_el.go b/op-devstack/stack/l2_el.go index 7b5e3e0781544..4678cb084ec0c 100644 --- a/op-devstack/stack/l2_el.go +++ b/op-devstack/stack/l2_el.go @@ -1,75 +1,12 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/eth" ) -// L2ELNodeID identifies a L2ELNode by name and chainID, is type-safe, and can be value-copied and used as map key. -type L2ELNodeID idWithChain - -var _ IDWithChain = (*L2ELNodeID)(nil) - -const L2ELNodeKind Kind = "L2ELNode" - -func NewL2ELNodeID(key string, chainID eth.ChainID) L2ELNodeID { - return L2ELNodeID{ - key: key, - chainID: chainID, - } -} - -func (id L2ELNodeID) String() string { - return idWithChain(id).string(L2ELNodeKind) -} - -func (id L2ELNodeID) ChainID() eth.ChainID { - return id.chainID -} - -func (id L2ELNodeID) Kind() Kind { - return L2ELNodeKind -} - -func (id L2ELNodeID) Key() string { - return id.key -} - -func (id L2ELNodeID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id L2ELNodeID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(L2ELNodeKind) -} - -func (id *L2ELNodeID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(L2ELNodeKind, data) -} - -func SortL2ELNodeIDs(ids []L2ELNodeID) []L2ELNodeID { - return copyAndSort(ids, func(a, b L2ELNodeID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortL2ELNodes(elems []L2ELNode) []L2ELNode { - return copyAndSort(elems, func(a, b L2ELNode) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ L2ELMatcher = L2ELNodeID{} - -func (id L2ELNodeID) Match(elems []L2ELNode) []L2ELNode { - return findByID(id, elems) -} - // L2ELNode is a L2 ethereum execution-layer node type L2ELNode interface { - ID() L2ELNodeID + ID() ComponentID L2EthClient() apis.L2EthClient L2EngineClient() apis.EngineClient diff --git a/op-devstack/stack/l2_network.go b/op-devstack/stack/l2_network.go index d49f26750b64f..7fb0f010b9840 100644 --- a/op-devstack/stack/l2_network.go +++ b/op-devstack/stack/l2_network.go @@ -2,64 +2,13 @@ package stack import ( "crypto/ecdsa" - "log/slog" "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/eth" ) -// L2NetworkID identifies a L2Network by name and chainID, is type-safe, and can be value-copied and used as map key. -type L2NetworkID idOnlyChainID - -var _ IDOnlyChainID = (*L2NetworkID)(nil) - -const L2NetworkKind Kind = "L2Network" - -func (id L2NetworkID) ChainID() eth.ChainID { - return eth.ChainID(id) -} - -func (id L2NetworkID) Kind() Kind { - return L2NetworkKind -} - -func (id L2NetworkID) String() string { - return idOnlyChainID(id).string(L2NetworkKind) -} - -func (id L2NetworkID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id L2NetworkID) MarshalText() ([]byte, error) { - return idOnlyChainID(id).marshalText(L2NetworkKind) -} - -func (id *L2NetworkID) UnmarshalText(data []byte) error { - return (*idOnlyChainID)(id).unmarshalText(L2NetworkKind, data) -} - -func SortL2NetworkIDs(ids []L2NetworkID) []L2NetworkID { - return copyAndSort(ids, func(a, b L2NetworkID) bool { - return lessIDOnlyChainID(idOnlyChainID(a), idOnlyChainID(b)) - }) -} - -func SortL2Networks(elems []L2Network) []L2Network { - return copyAndSort(elems, func(a, b L2Network) bool { - return lessIDOnlyChainID(idOnlyChainID(a.ID()), idOnlyChainID(b.ID())) - }) -} - -var _ L2NetworkMatcher = L2NetworkID{} - -func (id L2NetworkID) Match(elems []L2Network) []L2Network { - return findByID(id, elems) -} - type L2Deployment interface { SystemConfigProxyAddr() common.Address DisputeGameFactoryProxyAddr() common.Address @@ -76,7 +25,7 @@ type Keys interface { // There is an extension-interface ExtensibleL2Network for adding new components to the chain. type L2Network interface { Network - ID() L2NetworkID + ID() ComponentID RollupConfig() *rollup.Config Deployment() L2Deployment Keys() Keys @@ -94,11 +43,11 @@ type L2Network interface { RollupBoostNode(m RollupBoostNodeMatcher) RollupBoostNode OPRBuilderNode(m OPRBuilderNodeMatcher) OPRBuilderNode - L2BatcherIDs() []L2BatcherID - L2ProposerIDs() []L2ProposerID - L2ChallengerIDs() []L2ChallengerID - L2CLNodeIDs() []L2CLNodeID - L2ELNodeIDs() []L2ELNodeID + L2BatcherIDs() []ComponentID + L2ProposerIDs() []ComponentID + L2ChallengerIDs() []ComponentID + L2CLNodeIDs() []ComponentID + L2ELNodeIDs() []ComponentID L2Batchers() []L2Batcher L2Proposers() []L2Proposer diff --git a/op-devstack/stack/l2_proposer.go b/op-devstack/stack/l2_proposer.go index 4109b5ed07f8e..71e8dbe58cc18 100644 --- a/op-devstack/stack/l2_proposer.go +++ b/op-devstack/stack/l2_proposer.go @@ -1,73 +1,7 @@ package stack -import ( - "log/slog" - - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// L2ProposerID identifies a L2Proposer by name and chainID, is type-safe, and can be value-copied and used as map key. -type L2ProposerID idWithChain - -var _ IDWithChain = (*L2ProposerID)(nil) - -const L2ProposerKind Kind = "L2Proposer" - -func NewL2ProposerID(key string, chainID eth.ChainID) L2ProposerID { - return L2ProposerID{ - key: key, - chainID: chainID, - } -} - -func (id L2ProposerID) String() string { - return idWithChain(id).string(L2ProposerKind) -} - -func (id L2ProposerID) ChainID() eth.ChainID { - return idWithChain(id).chainID -} - -func (id L2ProposerID) Kind() Kind { - return L2ProposerKind -} - -func (id L2ProposerID) Key() string { - return id.key -} - -func (id L2ProposerID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id L2ProposerID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(L2ProposerKind) -} - -func (id *L2ProposerID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(L2ProposerKind, data) -} - -func SortL2ProposerIDs(ids []L2ProposerID) []L2ProposerID { - return copyAndSort(ids, func(a, b L2ProposerID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortL2Proposers(elems []L2Proposer) []L2Proposer { - return copyAndSort(elems, func(a, b L2Proposer) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ L2ProposerMatcher = L2ProposerID{} - -func (id L2ProposerID) Match(elems []L2Proposer) []L2Proposer { - return findByID(id, elems) -} - // L2Proposer is a L2 output proposer, posting claims of L2 state to L1. type L2Proposer interface { Common - ID() L2ProposerID + ID() ComponentID } diff --git a/op-devstack/stack/match/archive.go b/op-devstack/stack/match/archive.go index 84f6a1eb14263..dac4ca2febc58 100644 --- a/op-devstack/stack/match/archive.go +++ b/op-devstack/stack/match/archive.go @@ -16,8 +16,8 @@ import ( // // Either assumption being false could result in false positives or false negatives. Note that // there is also a race condition where assumption (2) becomes true after the function returns. -func WithArchive(ctx context.Context) stack.Matcher[stack.L2ELNodeID, stack.L2ELNode] { - return MatchElemFn[stack.L2ELNodeID, stack.L2ELNode](func(elem stack.L2ELNode) bool { +func WithArchive(ctx context.Context) stack.Matcher[stack.L2ELNode] { + return MatchElemFn[stack.L2ELNode](func(elem stack.L2ELNode) bool { if _, err := elem.L2EthClient().BlockRefByNumber(ctx, 1); err != nil { // The devnet is fresh. This is almost guaranteed to be a devnet created by sysgo, // which always uses archive mode. diff --git a/op-devstack/stack/match/core.go b/op-devstack/stack/match/core.go index b1bf06a6e77c8..073d607fa5b1a 100644 --- a/op-devstack/stack/match/core.go +++ b/op-devstack/stack/match/core.go @@ -7,24 +7,23 @@ import ( ) // MatchFn implements stack.Matcher, checking all elements at once. -type MatchFn[I comparable, E stack.Identifiable[I]] func(elems []E) []E +type MatchFn[E stack.Identifiable] func(elems []E) []E -func (m MatchFn[I, E]) Match(elems []E) []E { +func (m MatchFn[E]) Match(elems []E) []E { return m(elems) } -func (m MatchFn[I, E]) String() string { - var id I +func (m MatchFn[E]) String() string { var x E - return fmt.Sprintf("MatchFn[%T, %T]", id, x) + return fmt.Sprintf("MatchFn[%T]", x) } -var _ stack.Matcher[stack.L2NetworkID, stack.L2Network] = MatchFn[stack.L2NetworkID, stack.L2Network](nil) +var _ stack.Matcher[stack.L2Network] = MatchFn[stack.L2Network](nil) // MatchElemFn implements stack.Matcher, checking one element at a time. -type MatchElemFn[I comparable, E stack.Identifiable[I]] func(elem E) bool +type MatchElemFn[E stack.Identifiable] func(elem E) bool -func (m MatchElemFn[I, E]) Match(elems []E) (out []E) { +func (m MatchElemFn[E]) Match(elems []E) (out []E) { for _, elem := range elems { if m(elem) { out = append(out, elem) @@ -33,10 +32,9 @@ func (m MatchElemFn[I, E]) Match(elems []E) (out []E) { return out } -func (m MatchElemFn[I, E]) String() string { - var id I +func (m MatchElemFn[E]) String() string { var x E - return fmt.Sprintf("MatchElemFn[%T, %T]", id, x) + return fmt.Sprintf("MatchElemFn[%T]", x) } -var _ stack.Matcher[stack.L2NetworkID, stack.L2Network] = MatchElemFn[stack.L2NetworkID, stack.L2Network](nil) +var _ stack.Matcher[stack.L2Network] = MatchElemFn[stack.L2Network](nil) diff --git a/op-devstack/stack/match/engine.go b/op-devstack/stack/match/engine.go index a9bd93413e6a8..a0eca09d00e8b 100644 --- a/op-devstack/stack/match/engine.go +++ b/op-devstack/stack/match/engine.go @@ -4,20 +4,22 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" ) -func WithEngine(engine stack.L2ELNodeID) stack.Matcher[stack.L2CLNodeID, stack.L2CLNode] { - return MatchElemFn[stack.L2CLNodeID, stack.L2CLNode](func(elem stack.L2CLNode) bool { +func WithEngine(engine stack.ComponentID) stack.Matcher[stack.L2CLNode] { + return MatchElemFn[stack.L2CLNode](func(elem stack.L2CLNode) bool { for _, el := range elem.ELs() { if el.ID() == engine { return true } } - rbID := stack.RollupBoostNodeID(engine) + // Check RollupBoost nodes with matching key/chainID + rbID := stack.NewRollupBoostNodeID(engine.Key(), engine.ChainID()) for _, rb := range elem.RollupBoostNodes() { - if rb.ID().ChainID() == rbID.ChainID() { + if rb.ID() == rbID { return true } } - oprbID := stack.OPRBuilderNodeID(engine) + // Check OPRBuilder nodes with matching key/chainID + oprbID := stack.NewOPRBuilderNodeID(engine.Key(), engine.ChainID()) for _, oprb := range elem.OPRBuilderNodes() { if oprb.ID() == oprbID { return true @@ -27,20 +29,22 @@ func WithEngine(engine stack.L2ELNodeID) stack.Matcher[stack.L2CLNodeID, stack.L }) } -func EngineFor(cl stack.L2CLNode) stack.Matcher[stack.L2ELNodeID, stack.L2ELNode] { - return MatchElemFn[stack.L2ELNodeID, stack.L2ELNode](func(elem stack.L2ELNode) bool { +func EngineFor(cl stack.L2CLNode) stack.Matcher[stack.L2ELNode] { + return MatchElemFn[stack.L2ELNode](func(elem stack.L2ELNode) bool { for _, el := range cl.ELs() { if el.ID() == elem.ID() { return true } } - rbID := stack.RollupBoostNodeID(elem.ID()) + // Check RollupBoost nodes with matching key/chainID + rbID := stack.NewRollupBoostNodeID(elem.ID().Key(), elem.ID().ChainID()) for _, rb := range cl.RollupBoostNodes() { - if rb.ID().ChainID() == rbID.ChainID() { + if rb.ID() == rbID { return true } } - oprbID := stack.OPRBuilderNodeID(elem.ID()) + // Check OPRBuilder nodes with matching key/chainID + oprbID := stack.NewOPRBuilderNodeID(elem.ID().Key(), elem.ID().ChainID()) for _, oprb := range cl.OPRBuilderNodes() { if oprb.ID() == oprbID { return true diff --git a/op-devstack/stack/match/first.go b/op-devstack/stack/match/first.go index 085c47918b014..56af94e80abb3 100644 --- a/op-devstack/stack/match/first.go +++ b/op-devstack/stack/match/first.go @@ -2,26 +2,26 @@ package match import "github.com/ethereum-optimism/optimism/op-devstack/stack" -var FirstL2EL = First[stack.L2ELNodeID, stack.L2ELNode]() -var FirstL2CL = First[stack.L2CLNodeID, stack.L2CLNode]() -var FirstL2Batcher = First[stack.L2BatcherID, stack.L2Batcher]() -var FirstL2Proposer = First[stack.L2ProposerID, stack.L2Proposer]() -var FirstL2Challenger = First[stack.L2ChallengerID, stack.L2Challenger]() +var FirstL2EL = First[stack.L2ELNode]() +var FirstL2CL = First[stack.L2CLNode]() +var FirstL2Batcher = First[stack.L2Batcher]() +var FirstL2Proposer = First[stack.L2Proposer]() +var FirstL2Challenger = First[stack.L2Challenger]() -var FirstTestSequencer = First[stack.TestSequencerID, stack.TestSequencer]() -var FirstSupervisor = First[stack.SupervisorID, stack.Supervisor]() -var FirstSupernode = First[stack.SupernodeID, stack.Supernode]() +var FirstTestSequencer = First[stack.TestSequencer]() +var FirstSupervisor = First[stack.Supervisor]() +var FirstSupernode = First[stack.Supernode]() -var FirstL1EL = First[stack.L1ELNodeID, stack.L1ELNode]() -var FirstL1CL = First[stack.L1CLNodeID, stack.L1CLNode]() +var FirstL1EL = First[stack.L1ELNode]() +var FirstL1CL = First[stack.L1CLNode]() -var FirstL1Network = First[stack.L1NetworkID, stack.L1Network]() -var FirstL2Network = First[stack.L2NetworkID, stack.L2Network]() -var FirstSuperchain = First[stack.SuperchainID, stack.Superchain]() -var FirstCluster = First[stack.ClusterID, stack.Cluster]() +var FirstL1Network = First[stack.L1Network]() +var FirstL2Network = First[stack.L2Network]() +var FirstSuperchain = First[stack.Superchain]() +var FirstCluster = First[stack.Cluster]() -var FirstFaucet = First[stack.FaucetID, stack.Faucet]() -var FirstSyncTester = First[stack.SyncTesterID, stack.SyncTester]() +var FirstFaucet = First[stack.Faucet]() +var FirstSyncTester = First[stack.SyncTester]() -var FirstOPRBuilderNode = First[stack.OPRBuilderNodeID, stack.OPRBuilderNode]() -var FirstRollupBoostNode = First[stack.RollupBoostNodeID, stack.RollupBoostNode]() +var FirstOPRBuilderNode = First[stack.OPRBuilderNode]() +var FirstRollupBoostNode = First[stack.RollupBoostNode]() diff --git a/op-devstack/stack/match/gate.go b/op-devstack/stack/match/gate.go index 7b5cc81055d99..115fbc63dc066 100644 --- a/op-devstack/stack/match/gate.go +++ b/op-devstack/stack/match/gate.go @@ -7,24 +7,24 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" ) -type assume[I comparable, E stack.Identifiable[I]] struct { +type assume[E stack.Identifiable] struct { t devtest.T - inner stack.Matcher[I, E] + inner stack.Matcher[E] } -func (a *assume[I, E]) Match(elems []E) []E { +func (a *assume[E]) Match(elems []E) []E { elems = a.inner.Match(elems) a.t.Gate().NotEmpty(elems, "must match something to continue, but matched nothing with %s", a.inner) return elems } -func (a *assume[I, E]) String() string { +func (a *assume[E]) String() string { return fmt.Sprintf("Assume(%s)", a.inner) } // Assume skips the test if no elements were matched with the inner matcher -func Assume[I comparable, E stack.Identifiable[I]](t devtest.T, inner stack.Matcher[I, E]) stack.Matcher[I, E] { - return &assume[I, E]{ +func Assume[E stack.Identifiable](t devtest.T, inner stack.Matcher[E]) stack.Matcher[E] { + return &assume[E]{ t: t, inner: inner, } diff --git a/op-devstack/stack/match/gate_test.go b/op-devstack/stack/match/gate_test.go index 34181bf36c12c..61f26077f58ea 100644 --- a/op-devstack/stack/match/gate_test.go +++ b/op-devstack/stack/match/gate_test.go @@ -36,11 +36,11 @@ func (f *fakeTesting) Gate() *testreq.Assertions { } func TestAssume(t *testing.T) { - a := &testObject{id: "a"} - b := &testObject{id: "b"} + a := newTestObject("a") + b := newTestObject("b") fT := &fakeTesting{T: nil, g: &gateTesting{log: t.Logf}} - m := Assume(fT, First[testID, *testObject]()) + m := Assume(fT, First[*testObject]()) require.Equal(t, m.String(), "Assume(ByIndex(0))") require.Equal(t, []*testObject{a}, m.Match([]*testObject{a})) require.Equal(t, []*testObject{a}, m.Match([]*testObject{a, b})) diff --git a/op-devstack/stack/match/interop.go b/op-devstack/stack/match/interop.go index 298fa296cdbd9..1810debd76436 100644 --- a/op-devstack/stack/match/interop.go +++ b/op-devstack/stack/match/interop.go @@ -3,12 +3,12 @@ package match import "github.com/ethereum-optimism/optimism/op-devstack/stack" // L2ChainA is an alias for the first L2 network. -var L2ChainA = First[stack.L2NetworkID, stack.L2Network]() +var L2ChainA = First[stack.L2Network]() // L2ChainB is an alias for the second L2 network. -var L2ChainB = Second[stack.L2NetworkID, stack.L2Network]() +var L2ChainB = Second[stack.L2Network]() // L2ChainById returns a matcher for the L2 network with the given ID. -func L2ChainById(id stack.L2NetworkID) stack.Matcher[stack.L2NetworkID, stack.L2Network] { - return byID[stack.L2NetworkID, stack.L2Network](id) +func L2ChainById(id stack.ComponentID) stack.Matcher[stack.L2Network] { + return byID[stack.L2Network](id) } diff --git a/op-devstack/stack/match/labels.go b/op-devstack/stack/match/labels.go index be32276b3379d..44ea4e09fc988 100644 --- a/op-devstack/stack/match/labels.go +++ b/op-devstack/stack/match/labels.go @@ -4,11 +4,11 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" ) -func WithLabel[I comparable, E interface { - stack.Identifiable[I] +func WithLabel[E interface { + stack.Identifiable Label(key string) string -}](key, value string) stack.Matcher[I, E] { - return MatchElemFn[I, E](func(elem E) bool { +}](key, value string) stack.Matcher[E] { + return MatchElemFn[E](func(elem E) bool { return elem.Label(key) == value }) } @@ -30,7 +30,7 @@ const ( ) func (v Vendor) Match(elems []stack.L2ELNode) []stack.L2ELNode { - return WithLabel[stack.L2ELNodeID, stack.L2ELNode](LabelVendor, string(v)).Match(elems) + return WithLabel[stack.L2ELNode](LabelVendor, string(v)).Match(elems) } func (v Vendor) String() string { diff --git a/op-devstack/stack/match/second.go b/op-devstack/stack/match/second.go index 20e6c2a96077c..c483f015c397d 100644 --- a/op-devstack/stack/match/second.go +++ b/op-devstack/stack/match/second.go @@ -2,7 +2,7 @@ package match import "github.com/ethereum-optimism/optimism/op-devstack/stack" -var SecondL2EL = Second[stack.L2ELNodeID, stack.L2ELNode]() -var SecondL2CL = Second[stack.L2CLNodeID, stack.L2CLNode]() +var SecondL2EL = Second[stack.L2ELNode]() +var SecondL2CL = Second[stack.L2CLNode]() -var SecondSupervisor = Second[stack.SupervisorID, stack.Supervisor]() +var SecondSupervisor = Second[stack.Supervisor]() diff --git a/op-devstack/stack/match/sequencer.go b/op-devstack/stack/match/sequencer.go index ce2a17dd29b44..49e6b3b25e0ee 100644 --- a/op-devstack/stack/match/sequencer.go +++ b/op-devstack/stack/match/sequencer.go @@ -7,8 +7,8 @@ import ( "github.com/ethereum-optimism/optimism/op-service/retry" ) -func WithSequencerActive(ctx context.Context) stack.Matcher[stack.L2CLNodeID, stack.L2CLNode] { - return MatchElemFn[stack.L2CLNodeID, stack.L2CLNode](func(elem stack.L2CLNode) bool { +func WithSequencerActive(ctx context.Context) stack.Matcher[stack.L2CLNode] { + return MatchElemFn[stack.L2CLNode](func(elem stack.L2CLNode) bool { sequencing, err := retry.Do(ctx, 10, retry.Exponential(), func() (bool, error) { return elem.RollupAPI().SequencerActive(ctx) }) diff --git a/op-devstack/stack/match/util.go b/op-devstack/stack/match/util.go index 680380df74b07..97a3361dcaa07 100644 --- a/op-devstack/stack/match/util.go +++ b/op-devstack/stack/match/util.go @@ -7,25 +7,25 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" ) -func First[I comparable, E stack.Identifiable[I]]() stack.Matcher[I, E] { - return ByIndex[I, E](0) +func First[E stack.Identifiable]() stack.Matcher[E] { + return ByIndex[E](0) } -func Second[I comparable, E stack.Identifiable[I]]() stack.Matcher[I, E] { - return ByIndex[I, E](1) +func Second[E stack.Identifiable]() stack.Matcher[E] { + return ByIndex[E](1) } -func byID[I comparable, E stack.Identifiable[I]](id I) stack.Matcher[I, E] { - return MatchElemFn[I, E](func(elem E) bool { +func byID[E stack.Identifiable](id stack.ComponentID) stack.Matcher[E] { + return MatchElemFn[E](func(elem E) bool { return elem.ID() == id }) } -type byIndexMatcher[I comparable, E stack.Identifiable[I]] struct { +type byIndexMatcher[E stack.Identifiable] struct { index int } -func (ma byIndexMatcher[I, E]) Match(elems []E) []E { +func (ma byIndexMatcher[E]) Match(elems []E) []E { if ma.index < 0 { return nil } @@ -35,78 +35,78 @@ func (ma byIndexMatcher[I, E]) Match(elems []E) []E { return elems[ma.index : ma.index+1] } -func (ma byIndexMatcher[I, E]) String() string { +func (ma byIndexMatcher[E]) String() string { return fmt.Sprintf("ByIndex(%d)", ma.index) } // ByIndex matches element i (zero-indexed). -func ByIndex[I comparable, E stack.Identifiable[I]](index int) stack.Matcher[I, E] { - return byIndexMatcher[I, E]{index: index} +func ByIndex[E stack.Identifiable](index int) stack.Matcher[E] { + return byIndexMatcher[E]{index: index} } -type lastMatcher[I comparable, E stack.Identifiable[I]] struct{} +type lastMatcher[E stack.Identifiable] struct{} -func (ma lastMatcher[I, E]) Match(elems []E) []E { +func (ma lastMatcher[E]) Match(elems []E) []E { if len(elems) == 0 { return nil } return elems[len(elems)-1:] } -func (ma lastMatcher[I, E]) String() string { +func (ma lastMatcher[E]) String() string { return "Last" } // Last matches the last element. -func Last[I comparable, E stack.Identifiable[I]]() stack.Matcher[I, E] { - return lastMatcher[I, E]{} +func Last[E stack.Identifiable]() stack.Matcher[E] { + return lastMatcher[E]{} } -type onlyMatcher[I comparable, E stack.Identifiable[I]] struct{} +type onlyMatcher[E stack.Identifiable] struct{} -func (ma onlyMatcher[I, E]) Match(elems []E) []E { +func (ma onlyMatcher[E]) Match(elems []E) []E { if len(elems) != 1 { return nil } return elems } -func (ma onlyMatcher[I, E]) String() string { +func (ma onlyMatcher[E]) String() string { return "Only" } // Only matches the only value. If there are none, or more than one, then no value is matched. -func Only[I comparable, E stack.Identifiable[I]]() stack.Matcher[I, E] { - return onlyMatcher[I, E]{} +func Only[E stack.Identifiable]() stack.Matcher[E] { + return onlyMatcher[E]{} } -type andMatcher[I comparable, E stack.Identifiable[I]] struct { - inner []stack.Matcher[I, E] +type andMatcher[E stack.Identifiable] struct { + inner []stack.Matcher[E] } -func (ma andMatcher[I, E]) Match(elems []E) []E { +func (ma andMatcher[E]) Match(elems []E) []E { for _, matcher := range ma.inner { elems = matcher.Match(elems) } return elems } -func (ma andMatcher[I, E]) String() string { +func (ma andMatcher[E]) String() string { return fmt.Sprintf("And(%s)", joinStr(ma.inner)) } // And combines all the matchers, by running them all, narrowing down the set with each application. // If none are provided, all inputs are matched. -func And[I comparable, E stack.Identifiable[I]](matchers ...stack.Matcher[I, E]) stack.Matcher[I, E] { - return andMatcher[I, E]{inner: matchers} +func And[E stack.Identifiable](matchers ...stack.Matcher[E]) stack.Matcher[E] { + return andMatcher[E]{inner: matchers} } -type orMatcher[I comparable, E stack.Identifiable[I]] struct { - inner []stack.Matcher[I, E] +type orMatcher[E stack.Identifiable] struct { + inner []stack.Matcher[E] } -func (ma orMatcher[I, E]) Match(elems []E) []E { - seen := make(map[I]struct{}) +func (ma orMatcher[E]) Match(elems []E) []E { + seen := make(map[stack.ComponentID]struct{}) for _, matcher := range ma.inner { for _, elem := range matcher.Match(elems) { seen[elem.ID()] = struct{}{} @@ -122,7 +122,7 @@ func (ma orMatcher[I, E]) Match(elems []E) []E { return out } -func (ma orMatcher[I, E]) String() string { +func (ma orMatcher[E]) String() string { return fmt.Sprintf("Or(%s)", joinStr(ma.inner)) } @@ -139,16 +139,16 @@ func joinStr[V fmt.Stringer](elems []V) string { // Or returns each of the inputs that have a match with any of the matchers. // All inputs are applied to all matchers, even if matched previously. -func Or[I comparable, E stack.Identifiable[I]](matchers ...stack.Matcher[I, E]) stack.Matcher[I, E] { - return orMatcher[I, E]{inner: matchers} +func Or[E stack.Identifiable](matchers ...stack.Matcher[E]) stack.Matcher[E] { + return orMatcher[E]{inner: matchers} } -type notMatcher[I comparable, E stack.Identifiable[I]] struct { - inner stack.Matcher[I, E] +type notMatcher[E stack.Identifiable] struct { + inner stack.Matcher[E] } -func (ma notMatcher[I, E]) Match(elems []E) []E { - matched := make(map[I]struct{}) +func (ma notMatcher[E]) Match(elems []E) []E { + matched := make(map[stack.ComponentID]struct{}) for _, elem := range ma.inner.Match(elems) { matched[elem.ID()] = struct{}{} } @@ -161,11 +161,11 @@ func (ma notMatcher[I, E]) Match(elems []E) []E { return out } -func (ma notMatcher[I, E]) String() string { +func (ma notMatcher[E]) String() string { return fmt.Sprintf("Not(%s)", ma.inner) } // Not matches the elements that do not match the given matcher. -func Not[I comparable, E stack.Identifiable[I]](matcher stack.Matcher[I, E]) stack.Matcher[I, E] { - return notMatcher[I, E]{inner: matcher} +func Not[E stack.Identifiable](matcher stack.Matcher[E]) stack.Matcher[E] { + return notMatcher[E]{inner: matcher} } diff --git a/op-devstack/stack/match/util_test.go b/op-devstack/stack/match/util_test.go index 52f59260f4db9..5080c6f5a99fe 100644 --- a/op-devstack/stack/match/util_test.go +++ b/op-devstack/stack/match/util_test.go @@ -8,26 +8,28 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack" ) -type testID string - type testObject struct { - id testID + id stack.ComponentID } -func (t *testObject) ID() testID { +func (t *testObject) ID() stack.ComponentID { return t.id } -var _ stack.Identifiable[testID] = (*testObject)(nil) +var _ stack.Identifiable = (*testObject)(nil) + +func newTestObject(key string) *testObject { + return &testObject{id: stack.NewComponentIDKeyOnly(stack.KindL2ELNode, key)} +} func TestUtils(t *testing.T) { - a := &testObject{id: "a"} - b := &testObject{id: "b"} - c := &testObject{id: "c"} - d := &testObject{id: "d"} + a := newTestObject("a") + b := newTestObject("b") + c := newTestObject("c") + d := newTestObject("d") t.Run("first", func(t *testing.T) { - m := First[testID, *testObject]() + m := First[*testObject]() require.Equal(t, m.String(), "ByIndex(0)") require.Equal(t, []*testObject{a}, m.Match([]*testObject{a, b, c, d})) require.Equal(t, []*testObject{b}, m.Match([]*testObject{b, a, c, d})) @@ -35,13 +37,13 @@ func TestUtils(t *testing.T) { require.Equal(t, []*testObject(nil), m.Match([]*testObject{})) }) t.Run("last", func(t *testing.T) { - m := Last[testID, *testObject]() + m := Last[*testObject]() require.Equal(t, m.String(), "Last") require.Equal(t, []*testObject{d}, m.Match([]*testObject{a, b, c, d})) require.Equal(t, []*testObject{c}, m.Match([]*testObject{b, a, c})) }) t.Run("only", func(t *testing.T) { - m := Only[testID, *testObject]() + m := Only[*testObject]() t.Log(m.String()) require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, b, c, d})) require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, b})) @@ -49,38 +51,38 @@ func TestUtils(t *testing.T) { require.Equal(t, []*testObject(nil), m.Match([]*testObject{})) }) t.Run("and", func(t *testing.T) { - m := And(First[testID, *testObject](), Second[testID, *testObject]()) + m := And(First[*testObject](), Second[*testObject]()) require.Equal(t, m.String(), "And(ByIndex(0), ByIndex(1))") require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, b, c, d})) // narrowed down to single element with First require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, a})) - m2 := And(Second[testID, *testObject](), First[testID, *testObject]()) + m2 := And(Second[*testObject](), First[*testObject]()) // Narrowed down to b, then select b as first require.Equal(t, []*testObject{b}, m2.Match([]*testObject{a, b})) }) t.Run("or", func(t *testing.T) { - m := Or(First[testID, *testObject](), Second[testID, *testObject]()) + m := Or(First[*testObject](), Second[*testObject]()) t.Log(m.String()) require.Equal(t, []*testObject{a, b}, m.Match([]*testObject{a, b, c, d})) }) t.Run("not", func(t *testing.T) { - m := Not(Or(First[testID, *testObject](), Second[testID, *testObject]())) + m := Not(Or(First[*testObject](), Second[*testObject]())) require.Equal(t, m.String(), "Not(Or(ByIndex(0), ByIndex(1)))") require.Equal(t, []*testObject{c, d}, m.Match([]*testObject{a, b, c, d})) require.Equal(t, []*testObject{}, m.Match([]*testObject{})) - m2 := Not(Last[testID, *testObject]()) + m2 := Not(Last[*testObject]()) t.Log(m.String()) require.Equal(t, []*testObject{a, b, c}, m2.Match([]*testObject{a, b, c, d})) }) t.Run("by-index", func(t *testing.T) { - m := ByIndex[testID, *testObject](2) + m := ByIndex[*testObject](2) require.Equal(t, m.String(), "ByIndex(2)") require.Equal(t, []*testObject{c}, m.Match([]*testObject{a, b, c, d})) require.Equal(t, []*testObject{c}, m.Match([]*testObject{a, b, c})) require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, b})) require.Equal(t, []*testObject(nil), m.Match([]*testObject{a})) require.Equal(t, []*testObject(nil), m.Match([]*testObject{})) - m2 := ByIndex[testID, *testObject](-1) + m2 := ByIndex[*testObject](-1) require.Equal(t, []*testObject(nil), m2.Match([]*testObject{a, b})) }) } diff --git a/op-devstack/stack/matcher.go b/op-devstack/stack/matcher.go index 91f47baaeb9ed..d73b3f1dabb29 100644 --- a/op-devstack/stack/matcher.go +++ b/op-devstack/stack/matcher.go @@ -1,7 +1,8 @@ package stack -type Identifiable[I comparable] interface { - ID() I +// Identifiable is implemented by all components that have an ID. +type Identifiable interface { + ID() ComponentID } // Matcher abstracts what can be used as getter-method argument. @@ -9,7 +10,7 @@ type Identifiable[I comparable] interface { // if the argument is an ID before searching for a match. // This enables lookups such as getting a component by labels, // by its state, by its relation to other components, etc. -type Matcher[I comparable, E Identifiable[I]] interface { +type Matcher[E Identifiable] interface { // Match finds the elements that pass the matcher. // If no element passes, it returns an empty slice. // Callers should guarantee a stable order of ids, to ensure a deterministic match. @@ -20,49 +21,40 @@ type Matcher[I comparable, E Identifiable[I]] interface { String() string } -func findByID[I comparable, E Identifiable[I]](id I, elems []E) []E { - for i, elem := range elems { - if elem.ID() == id { - return elems[i : i+1] - } - } - return nil -} - -type ClusterMatcher = Matcher[ClusterID, Cluster] +type ClusterMatcher = Matcher[Cluster] -type L1CLMatcher = Matcher[L1CLNodeID, L1CLNode] +type L1CLMatcher = Matcher[L1CLNode] -type L1ELMatcher = Matcher[L1ELNodeID, L1ELNode] +type L1ELMatcher = Matcher[L1ELNode] -type L1NetworkMatcher = Matcher[L1NetworkID, L1Network] +type L1NetworkMatcher = Matcher[L1Network] -type L2NetworkMatcher = Matcher[L2NetworkID, L2Network] +type L2NetworkMatcher = Matcher[L2Network] -type SuperchainMatcher = Matcher[SuperchainID, Superchain] +type SuperchainMatcher = Matcher[Superchain] -type L2BatcherMatcher = Matcher[L2BatcherID, L2Batcher] +type L2BatcherMatcher = Matcher[L2Batcher] -type L2ChallengerMatcher = Matcher[L2ChallengerID, L2Challenger] +type L2ChallengerMatcher = Matcher[L2Challenger] -type L2ProposerMatcher = Matcher[L2ProposerID, L2Proposer] +type L2ProposerMatcher = Matcher[L2Proposer] -type L2CLMatcher = Matcher[L2CLNodeID, L2CLNode] +type L2CLMatcher = Matcher[L2CLNode] -type SupervisorMatcher = Matcher[SupervisorID, Supervisor] +type SupervisorMatcher = Matcher[Supervisor] -type SupernodeMatcher = Matcher[SupernodeID, Supernode] +type SupernodeMatcher = Matcher[Supernode] -type TestSequencerMatcher = Matcher[TestSequencerID, TestSequencer] +type TestSequencerMatcher = Matcher[TestSequencer] -type ConductorMatcher = Matcher[ConductorID, Conductor] +type ConductorMatcher = Matcher[Conductor] -type L2ELMatcher = Matcher[L2ELNodeID, L2ELNode] +type L2ELMatcher = Matcher[L2ELNode] -type FaucetMatcher = Matcher[FaucetID, Faucet] +type FaucetMatcher = Matcher[Faucet] -type SyncTesterMatcher = Matcher[SyncTesterID, SyncTester] +type SyncTesterMatcher = Matcher[SyncTester] -type RollupBoostNodeMatcher = Matcher[RollupBoostNodeID, RollupBoostNode] +type RollupBoostNodeMatcher = Matcher[RollupBoostNode] -type OPRBuilderNodeMatcher = Matcher[OPRBuilderNodeID, OPRBuilderNode] +type OPRBuilderNodeMatcher = Matcher[OPRBuilderNode] diff --git a/op-devstack/stack/network.go b/op-devstack/stack/network.go index 66144f933bb14..35c30363f2cc3 100644 --- a/op-devstack/stack/network.go +++ b/op-devstack/stack/network.go @@ -19,11 +19,11 @@ type Network interface { Faucet(m FaucetMatcher) Faucet Faucets() []Faucet - FaucetIDs() []FaucetID + FaucetIDs() []ComponentID SyncTester(m SyncTesterMatcher) SyncTester SyncTesters() []SyncTester - SyncTesterIDs() []SyncTesterID + SyncTesterIDs() []ComponentID } type ExtensibleNetwork interface { diff --git a/op-devstack/stack/op_rbuilder.go b/op-devstack/stack/op_rbuilder.go index 03926167f4046..0f7e92396bd83 100644 --- a/op-devstack/stack/op_rbuilder.go +++ b/op-devstack/stack/op_rbuilder.go @@ -1,76 +1,13 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" ) -// OPRBuilderNodeID identifies a L2ELNode by name and chainID, is type-safe, and can be value-copied and used as map key. -type OPRBuilderNodeID idWithChain - -var _ IDWithChain = (*OPRBuilderNodeID)(nil) - -const OPRBuilderNodeKind Kind = "OPRBuilderNode" - -func NewOPRBuilderNodeID(key string, chainID eth.ChainID) OPRBuilderNodeID { - return OPRBuilderNodeID{ - key: key, - chainID: chainID, - } -} - -func (id OPRBuilderNodeID) String() string { - return idWithChain(id).string(OPRBuilderNodeKind) -} - -func (id OPRBuilderNodeID) ChainID() eth.ChainID { - return id.chainID -} - -func (id OPRBuilderNodeID) Kind() Kind { - return OPRBuilderNodeKind -} - -func (id OPRBuilderNodeID) Key() string { - return id.key -} - -func (id OPRBuilderNodeID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id OPRBuilderNodeID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(OPRBuilderNodeKind) -} - -func (id *OPRBuilderNodeID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(OPRBuilderNodeKind, data) -} - -func SortOPRBuilderIDs(ids []OPRBuilderNodeID) []OPRBuilderNodeID { - return copyAndSort(ids, func(a, b OPRBuilderNodeID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortOPRBuilderNodes(elems []OPRBuilderNode) []OPRBuilderNode { - return copyAndSort(elems, func(a, b OPRBuilderNode) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ OPRBuilderNodeMatcher = OPRBuilderNodeID{} - -func (id OPRBuilderNodeID) Match(elems []OPRBuilderNode) []OPRBuilderNode { - return findByID(id, elems) -} - -// L2ELNode is a L2 ethereum execution-layer node +// OPRBuilderNode is a L2 ethereum execution-layer node type OPRBuilderNode interface { - ID() OPRBuilderNodeID + ID() ComponentID L2EthClient() apis.L2EthClient L2EngineClient() apis.EngineClient FlashblocksClient() *client.WSClient diff --git a/op-devstack/stack/orchestrator.go b/op-devstack/stack/orchestrator.go index a1e636fe26b10..07efe9d27f0c3 100644 --- a/op-devstack/stack/orchestrator.go +++ b/op-devstack/stack/orchestrator.go @@ -20,12 +20,12 @@ const ( // ControlPlane is the interface for the orchestrators to control components of the system. type ControlPlane interface { - SupervisorState(id SupervisorID, action ControlAction) - L2CLNodeState(id L2CLNodeID, action ControlAction) - L2ELNodeState(id L2ELNodeID, action ControlAction) - FakePoSState(id L1CLNodeID, action ControlAction) - RollupBoostNodeState(id RollupBoostNodeID, action ControlAction) - OPRBuilderNodeState(id OPRBuilderNodeID, action ControlAction) + SupervisorState(id ComponentID, action ControlAction) + L2CLNodeState(id ComponentID, action ControlAction) + L2ELNodeState(id ComponentID, action ControlAction) + FakePoSState(id ComponentID, action ControlAction) + RollupBoostNodeState(id ComponentID, action ControlAction) + OPRBuilderNodeState(id ComponentID, action ControlAction) } // Orchestrator is the base interface for all system orchestrators. diff --git a/op-devstack/stack/registry.go b/op-devstack/stack/registry.go index 2f11edf773c28..791c292c5307e 100644 --- a/op-devstack/stack/registry.go +++ b/op-devstack/stack/registry.go @@ -293,10 +293,10 @@ func (r *Registry) Clear() { // Type-safe generic accessor functions. // These provide compile-time type safety when working with the registry. -// RegistryGet retrieves a component by its typed ID and returns it as the expected type. +// RegistryGet retrieves a component by its ID and returns it as the expected type. // Returns the zero value and false if not found or if the type doesn't match. -func RegistryGet[T any, M KindMarker](r *Registry, id ID[M]) (T, bool) { - component, ok := r.Get(id.ComponentID) +func RegistryGet[T any](r *Registry, id ComponentID) (T, bool) { + component, ok := r.Get(id) if !ok { var zero T return zero, false @@ -358,7 +358,7 @@ func RegistryRangeByKind[T any](r *Registry, kind ComponentKind, fn func(id Comp }) } -// RegistryRegister is a type-safe way to register a component with a typed ID. -func RegistryRegister[T any, M KindMarker](r *Registry, id ID[M], component T) { - r.Register(id.ComponentID, component) +// RegistryRegister is a type-safe way to register a component with an ID. +func RegistryRegister[T any](r *Registry, id ComponentID, component T) { + r.Register(id, component) } diff --git a/op-devstack/stack/registry_test.go b/op-devstack/stack/registry_test.go index e4d1ebeb7a559..e4951da3e54da 100644 --- a/op-devstack/stack/registry_test.go +++ b/op-devstack/stack/registry_test.go @@ -461,9 +461,10 @@ func TestRegistryGet_TypeSafe(t *testing.T) { r := NewRegistry() chainID := eth.ChainIDFromUInt64(420) - id := NewL2BatcherID2("batcher1", chainID) - component := &mockComponent{id: id.ComponentID, name: "test-batcher"} + id := NewL2BatcherID("batcher1", chainID) + component := &mockComponent{id: id, name: "test-batcher"} + // Use the ID for generic registry functions RegistryRegister(r, id, component) // Type-safe get diff --git a/op-devstack/stack/rollup_boost.go b/op-devstack/stack/rollup_boost.go index 8c44e3d79d31b..7079f722a81df 100644 --- a/op-devstack/stack/rollup_boost.go +++ b/op-devstack/stack/rollup_boost.go @@ -1,76 +1,13 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" ) -// RollupBoostNodeID identifies a RollupBoost node by name and chainID, is type-safe, and can be value-copied and used as map key. -type RollupBoostNodeID L2ELNodeID - -var _ IDWithChain = (*RollupBoostNodeID)(nil) - -const RollupBoostNodeKind Kind = "RollupBoostNode" - -func NewRollupBoostNodeID(key string, chainID eth.ChainID) RollupBoostNodeID { - return RollupBoostNodeID{ - key: key, - chainID: chainID, - } -} - -func (id RollupBoostNodeID) String() string { - return idWithChain(id).string(RollupBoostNodeKind) -} - -func (id RollupBoostNodeID) ChainID() eth.ChainID { - return id.chainID -} - -func (id RollupBoostNodeID) Kind() Kind { - return RollupBoostNodeKind -} - -func (id RollupBoostNodeID) Key() string { - return id.key -} - -func (id RollupBoostNodeID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id RollupBoostNodeID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(RollupBoostNodeKind) -} - -func (id *RollupBoostNodeID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(RollupBoostNodeKind, data) -} - -func SortRollupBoostIDs(ids []RollupBoostNodeID) []RollupBoostNodeID { - return copyAndSort(ids, func(a, b RollupBoostNodeID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortRollupBoostNodes(elems []RollupBoostNode) []RollupBoostNode { - return copyAndSort(elems, func(a, b RollupBoostNode) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ RollupBoostNodeMatcher = RollupBoostNodeID{} - -func (id RollupBoostNodeID) Match(elems []RollupBoostNode) []RollupBoostNode { - return findByID(id, elems) -} - // RollupBoostNode is a shim service between an L2 consensus-layer node and an L2 ethereum execution-layer node type RollupBoostNode interface { - ID() RollupBoostNodeID + ID() ComponentID L2EthClient() apis.L2EthClient L2EngineClient() apis.EngineClient FlashblocksClient() *client.WSClient diff --git a/op-devstack/stack/superchain.go b/op-devstack/stack/superchain.go index 4087afd1ef844..b1680bbeffd6c 100644 --- a/op-devstack/stack/superchain.go +++ b/op-devstack/stack/superchain.go @@ -1,8 +1,6 @@ package stack import ( - "log/slog" - "github.com/ethereum/go-ethereum/common" ) @@ -11,51 +9,10 @@ type SuperchainDeployment interface { SuperchainConfigAddr() common.Address } -// SuperchainID identifies a Superchain by name, is type-safe, and can be value-copied and used as map key. -type SuperchainID genericID - -var _ GenericID = (*SuperchainID)(nil) - -const SuperchainKind Kind = "Superchain" - -func (id SuperchainID) String() string { - return genericID(id).string(SuperchainKind) -} - -func (id SuperchainID) Kind() Kind { - return SuperchainKind -} - -func (id SuperchainID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id SuperchainID) MarshalText() ([]byte, error) { - return genericID(id).marshalText(SuperchainKind) -} - -func (id *SuperchainID) UnmarshalText(data []byte) error { - return (*genericID)(id).unmarshalText(SuperchainKind, data) -} - -func SortSuperchainIDs(ids []SuperchainID) []SuperchainID { - return copyAndSortCmp(ids) -} - -func SortSuperchains(elems []Superchain) []Superchain { - return copyAndSort(elems, lessElemOrdered[SuperchainID, Superchain]) -} - -var _ SuperchainMatcher = SuperchainID("") - -func (id SuperchainID) Match(elems []Superchain) []Superchain { - return findByID(id, elems) -} - // Superchain is a collection of L2 chains with common rules and shared configuration on L1 type Superchain interface { Common - ID() SuperchainID + ID() ComponentID Deployment() SuperchainDeployment } diff --git a/op-devstack/stack/supernode.go b/op-devstack/stack/supernode.go index c7b1fa6080f2c..6b5fb48abf632 100644 --- a/op-devstack/stack/supernode.go +++ b/op-devstack/stack/supernode.go @@ -2,64 +2,43 @@ package stack import ( "fmt" - "log/slog" + "sort" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/eth" ) -// SupernodeID identifies a Supernode by name, is type-safe, and can be value-copied and used as map key. -type SupernodeID genericID - -var _ GenericID = (*SupernodeID)(nil) - -const SupernodeKind Kind = "Supernode" +// SupernodeID is kept as a semantic alias for ComponentID. +// Supernode IDs are key-only IDs with KindSupernode. +type SupernodeID = ComponentID func NewSupernodeID(key string, chains ...eth.ChainID) SupernodeID { - var s string + var suffix string for _, chain := range chains { - s += chain.String() + suffix += chain.String() } - return SupernodeID(fmt.Sprintf("%s-%s", key, s)) -} - -func (id SupernodeID) String() string { - return genericID(id).string(SupernodeKind) -} - -func (id SupernodeID) Kind() Kind { - return SupernodeKind -} - -func (id SupernodeID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id SupernodeID) MarshalText() ([]byte, error) { - return genericID(id).marshalText(SupernodeKind) -} - -func (id *SupernodeID) UnmarshalText(data []byte) error { - return (*genericID)(id).unmarshalText(SupernodeKind, data) + return NewComponentIDKeyOnly(KindSupernode, fmt.Sprintf("%s-%s", key, suffix)) } func SortSupernodeIDs(ids []SupernodeID) []SupernodeID { - return copyAndSortCmp(ids) + out := append([]SupernodeID(nil), ids...) + sort.Slice(out, func(i, j int) bool { + return out[i].Less(out[j]) + }) + return out } func SortSupernodes(elems []Supernode) []Supernode { - return copyAndSort(elems, lessElemOrdered[SupernodeID, Supernode]) -} - -var _ SupernodeMatcher = SupernodeID("") - -func (id SupernodeID) Match(elems []Supernode) []Supernode { - return findByID(id, elems) + out := append([]Supernode(nil), elems...) + sort.Slice(out, func(i, j int) bool { + return out[i].ID().Less(out[j].ID()) + }) + return out } type Supernode interface { Common - ID() SupernodeID + ID() ComponentID QueryAPI() apis.SupernodeQueryAPI } diff --git a/op-devstack/stack/supervisor.go b/op-devstack/stack/supervisor.go index b635e98237572..2deac9fb53634 100644 --- a/op-devstack/stack/supervisor.go +++ b/op-devstack/stack/supervisor.go @@ -1,56 +1,13 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" ) -// SupervisorID identifies a Supervisor by name and chainID, is type-safe, and can be value-copied and used as map key. -type SupervisorID genericID - -var _ GenericID = (*SupervisorID)(nil) - -const SupervisorKind Kind = "Supervisor" - -func (id SupervisorID) String() string { - return genericID(id).string(SupervisorKind) -} - -func (id SupervisorID) Kind() Kind { - return SupervisorKind -} - -func (id SupervisorID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id SupervisorID) MarshalText() ([]byte, error) { - return genericID(id).marshalText(SupervisorKind) -} - -func (id *SupervisorID) UnmarshalText(data []byte) error { - return (*genericID)(id).unmarshalText(SupervisorKind, data) -} - -func SortSupervisorIDs(ids []SupervisorID) []SupervisorID { - return copyAndSortCmp(ids) -} - -func SortSupervisors(elems []Supervisor) []Supervisor { - return copyAndSort(elems, lessElemOrdered[SupervisorID, Supervisor]) -} - -var _ SupervisorMatcher = SupervisorID("") - -func (id SupervisorID) Match(elems []Supervisor) []Supervisor { - return findByID(id, elems) -} - // Supervisor is an interop service, used to cross-verify messages between chains. type Supervisor interface { Common - ID() SupervisorID + ID() ComponentID AdminAPI() apis.SupervisorAdminAPI QueryAPI() apis.SupervisorQueryAPI diff --git a/op-devstack/stack/sync_tester.go b/op-devstack/stack/sync_tester.go index 0601e4e40fbaa..d08ccd9902de8 100644 --- a/op-devstack/stack/sync_tester.go +++ b/op-devstack/stack/sync_tester.go @@ -1,75 +1,12 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/eth" ) -// SyncTesterID identifies a syncTester by name and chainID, is type-safe, and can be value-copied and used as map key. -type SyncTesterID idWithChain - -var _ IDWithChain = (*SyncTesterID)(nil) - -const SyncTesterKind Kind = "SyncTester" - -func NewSyncTesterID(key string, chainID eth.ChainID) SyncTesterID { - return SyncTesterID{ - key: key, - chainID: chainID, - } -} - -func (id SyncTesterID) String() string { - return idWithChain(id).string(SyncTesterKind) -} - -func (id SyncTesterID) ChainID() eth.ChainID { - return idWithChain(id).chainID -} - -func (id SyncTesterID) Kind() Kind { - return SyncTesterKind -} - -func (id SyncTesterID) Key() string { - return id.key -} - -func (id SyncTesterID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id SyncTesterID) MarshalText() ([]byte, error) { - return idWithChain(id).marshalText(SyncTesterKind) -} - -func (id *SyncTesterID) UnmarshalText(data []byte) error { - return (*idWithChain)(id).unmarshalText(SyncTesterKind, data) -} - -func SortSyncTesterIDs(ids []SyncTesterID) []SyncTesterID { - return copyAndSort(ids, func(a, b SyncTesterID) bool { - return lessIDWithChain(idWithChain(a), idWithChain(b)) - }) -} - -func SortSyncTesters(elems []SyncTester) []SyncTester { - return copyAndSort(elems, func(a, b SyncTester) bool { - return lessIDWithChain(idWithChain(a.ID()), idWithChain(b.ID())) - }) -} - -var _ SyncTesterMatcher = SyncTesterID{} - -func (id SyncTesterID) Match(elems []SyncTester) []SyncTester { - return findByID(id, elems) -} - type SyncTester interface { Common - ID() SyncTesterID + ID() ComponentID API() apis.SyncTester APIWithSession(sessionID string) apis.SyncTester diff --git a/op-devstack/stack/system.go b/op-devstack/stack/system.go index c4cd7acbe0056..85c5cd8debf9b 100644 --- a/op-devstack/stack/system.go +++ b/op-devstack/stack/system.go @@ -22,11 +22,11 @@ type System interface { Supernode(m SupernodeMatcher) Supernode TestSequencer(id TestSequencerMatcher) TestSequencer - SuperchainIDs() []SuperchainID - ClusterIDs() []ClusterID - L1NetworkIDs() []L1NetworkID - L2NetworkIDs() []L2NetworkID - SupervisorIDs() []SupervisorID + SuperchainIDs() []ComponentID + ClusterIDs() []ComponentID + L1NetworkIDs() []ComponentID + L2NetworkIDs() []ComponentID + SupervisorIDs() []ComponentID Superchains() []Superchain Clusters() []Cluster diff --git a/op-devstack/stack/test_sequencer.go b/op-devstack/stack/test_sequencer.go index 7a6a2023baad7..072bbbb0b4ccd 100644 --- a/op-devstack/stack/test_sequencer.go +++ b/op-devstack/stack/test_sequencer.go @@ -1,56 +1,14 @@ package stack import ( - "log/slog" - "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/eth" ) -// TestSequencerID identifies a TestSequencer by name and chainID, is type-safe, and can be value-copied and used as map key. -type TestSequencerID genericID - -var _ GenericID = (*TestSequencerID)(nil) - -const TestSequencerKind Kind = "TestSequencer" - -// NewTestSequencerID creates a new TestSequencerID with the given key. -func NewTestSequencerID(key string) TestSequencerID { - return TestSequencerID(key) -} - -func (id TestSequencerID) String() string { - return genericID(id).string(TestSequencerKind) -} - -func (id TestSequencerID) Kind() Kind { - return TestSequencerKind -} - -func (id TestSequencerID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id TestSequencerID) MarshalText() ([]byte, error) { - return genericID(id).marshalText(TestSequencerKind) -} - -func (id *TestSequencerID) UnmarshalText(data []byte) error { - return (*genericID)(id).unmarshalText(TestSequencerKind, data) -} - -func SortTestSequencerIDs(ids []TestSequencerID) []TestSequencerID { - return copyAndSortCmp(ids) -} - -func SortTestSequencers(elems []TestSequencer) []TestSequencer { - return copyAndSort(elems, lessElemOrdered[TestSequencerID, TestSequencer]) -} - // TestSequencer type TestSequencer interface { Common - ID() TestSequencerID + ID() ComponentID AdminAPI() apis.TestSequencerAdminAPI BuildAPI() apis.TestSequencerBuildAPI diff --git a/op-devstack/sysext/control_plane.go b/op-devstack/sysext/control_plane.go index 65eb37d9d527c..157894df5abf3 100644 --- a/op-devstack/sysext/control_plane.go +++ b/op-devstack/sysext/control_plane.go @@ -26,27 +26,27 @@ func (c *ControlPlane) setLifecycleState(svcID string, mode stack.ControlAction) } } -func (c *ControlPlane) SupervisorState(id stack.SupervisorID, mode stack.ControlAction) { - c.setLifecycleState(string(id), mode) +func (c *ControlPlane) SupervisorState(id stack.ComponentID, mode stack.ControlAction) { + c.setLifecycleState(id.Key(), mode) } -func (c *ControlPlane) L2CLNodeState(id stack.L2CLNodeID, mode stack.ControlAction) { +func (c *ControlPlane) L2CLNodeState(id stack.ComponentID, mode stack.ControlAction) { c.setLifecycleState(id.Key(), mode) } -func (c *ControlPlane) L2ELNodeState(id stack.L2ELNodeID, mode stack.ControlAction) { +func (c *ControlPlane) L2ELNodeState(id stack.ComponentID, mode stack.ControlAction) { c.setLifecycleState(id.Key(), mode) } -func (c *ControlPlane) FakePoSState(id stack.L1CLNodeID, mode stack.ControlAction) { +func (c *ControlPlane) FakePoSState(id stack.ComponentID, mode stack.ControlAction) { panic("not implemented: plug in kurtosis wrapper, or gate for the test that uses this method to not run in kurtosis") } -func (c *ControlPlane) RollupBoostNodeState(id stack.RollupBoostNodeID, mode stack.ControlAction) { +func (c *ControlPlane) RollupBoostNodeState(id stack.ComponentID, mode stack.ControlAction) { c.setLifecycleState(id.Key(), mode) } -func (c *ControlPlane) OPRBuilderNodeState(id stack.OPRBuilderNodeID, mode stack.ControlAction) { +func (c *ControlPlane) OPRBuilderNodeState(id stack.ComponentID, mode stack.ControlAction) { c.setLifecycleState(id.Key(), mode) } diff --git a/op-devstack/sysext/l1.go b/op-devstack/sysext/l1.go index 467e74fe8405e..0ed5932e7c3a0 100644 --- a/op-devstack/sysext/l1.go +++ b/op-devstack/sysext/l1.go @@ -24,7 +24,7 @@ func (o *Orchestrator) hydrateL1(system stack.ExtensibleSystem) { CommonConfig: commonConfig, ChainConfig: env.Env.L1.Config, }, - ID: stack.L1NetworkID(l1ID), + ID: stack.NewL1NetworkID(l1ID), }) opts := []client.RPCOption{} diff --git a/op-devstack/sysext/l2.go b/op-devstack/sysext/l2.go index 5ec2be4155830..9d2b9450ba87a 100644 --- a/op-devstack/sysext/l2.go +++ b/op-devstack/sysext/l2.go @@ -24,8 +24,8 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) -func getL2ID(net *descriptors.L2Chain) stack.L2NetworkID { - return stack.L2NetworkID(eth.ChainIDFromBig(net.Config.ChainID)) +func getL2ID(net *descriptors.L2Chain) stack.ComponentID { + return stack.NewL2NetworkID(eth.ChainIDFromBig(net.Config.ChainID)) } func (o *Orchestrator) hydrateL2(net *descriptors.L2Chain, system stack.ExtensibleSystem) { @@ -35,7 +35,7 @@ func (o *Orchestrator) hydrateL2(net *descriptors.L2Chain, system stack.Extensib env := o.env l2ID := getL2ID(net) - l1 := system.L1Network(stack.L1NetworkID(eth.ChainIDFromBig(env.Env.L1.Config.ChainID))) + l1 := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(eth.ChainIDFromBig(env.Env.L1.Config.ChainID)))) cfg := shim.L2NetworkConfig{ NetworkConfig: shim.NetworkConfig{ @@ -46,11 +46,11 @@ func (o *Orchestrator) hydrateL2(net *descriptors.L2Chain, system stack.Extensib RollupConfig: net.RollupConfig, Deployment: newL2AddressBook(net.Addresses), Keys: o.defineSystemKeys(t, net), - Superchain: system.Superchain(stack.SuperchainID(env.Env.Name)), + Superchain: system.Superchain(stack.ByID[stack.Superchain](stack.NewSuperchainID(env.Env.Name))), L1: l1, } if o.isInterop() { - cfg.Cluster = system.Cluster(stack.ClusterID(env.Env.Name)) + cfg.Cluster = system.Cluster(stack.ByID[stack.Cluster](stack.NewClusterID(env.Env.Name))) } opts := []client.RPCOption{} @@ -187,7 +187,7 @@ func (o *Orchestrator) hydrateConductors(node *descriptors.Node, l2Net stack.Ext conductor := shim.NewConductor(shim.ConductorConfig{ CommonConfig: shim.NewCommonConfig(l2Net.T()), Client: conductorClient, - ID: stack.ConductorID(conductorService.Name), + ID: stack.NewConductorID(conductorService.Name), }) l2Net.AddConductor(conductor) diff --git a/op-devstack/sysext/system.go b/op-devstack/sysext/system.go index 6927517153fcf..01a6a186f6073 100644 --- a/op-devstack/sysext/system.go +++ b/op-devstack/sysext/system.go @@ -18,7 +18,7 @@ func (o *Orchestrator) hydrateSuperchain(sys stack.ExtensibleSystem) { env := o.env sys.AddSuperchain(shim.NewSuperchain(shim.SuperchainConfig{ CommonConfig: shim.NewCommonConfig(sys.T()), - ID: stack.SuperchainID(env.Env.Name), + ID: stack.NewSuperchainID(env.Env.Name), Deployment: newL1AddressBook(sys.T(), env.Env.L1.Addresses), })) } @@ -40,7 +40,7 @@ func (o *Orchestrator) hydrateClustersMaybe(sys stack.ExtensibleSystem) { sys.AddCluster(shim.NewCluster(shim.ClusterConfig{ CommonConfig: shim.NewCommonConfig(sys.T()), - ID: stack.ClusterID(env.Env.Name), + ID: stack.NewClusterID(env.Env.Name), DependencySet: &depSet, })) } @@ -52,11 +52,11 @@ func (o *Orchestrator) hydrateSupervisorsMaybe(sys stack.ExtensibleSystem) { return } - supervisors := make(map[stack.SupervisorID]bool) + supervisors := make(map[stack.ComponentID]bool) for _, l2 := range o.env.Env.L2 { if supervisorService, ok := l2.Services["supervisor"]; ok { for _, instance := range supervisorService { - id := stack.SupervisorID(instance.Name) + id := stack.NewSupervisorID(instance.Name) if supervisors[id] { // each supervisor appears in multiple L2s (covering the dependency set), // so we need to deduplicate @@ -74,7 +74,7 @@ func (o *Orchestrator) hydrateSupervisorsMaybe(sys stack.ExtensibleSystem) { } func (o *Orchestrator) hydrateTestSequencersMaybe(sys stack.ExtensibleSystem) { - sequencers := make(map[stack.TestSequencerID]bool) + sequencers := make(map[string]bool) // Collect all L2 chain IDs and the shared JWT secret var ( @@ -95,13 +95,12 @@ func (o *Orchestrator) hydrateTestSequencersMaybe(sys stack.ExtensibleSystem) { for _, l2 := range o.env.Env.L2 { if sequencerService, ok := l2.Services["test-sequencer"]; ok { for _, instance := range sequencerService { - id := stack.TestSequencerID(instance.Name) - if sequencers[id] { + if sequencers[instance.Name] { // Each test_sequencer appears in multiple L2s // So we need to deduplicate continue } - sequencers[id] = true + sequencers[instance.Name] = true cc := make(map[eth.ChainID]client.RPC, len(chainIDs)) for _, chainID := range chainIDs { @@ -116,7 +115,7 @@ func (o *Orchestrator) hydrateTestSequencersMaybe(sys stack.ExtensibleSystem) { sys.AddTestSequencer(shim.NewTestSequencer(shim.TestSequencerConfig{ CommonConfig: shim.NewCommonConfig(sys.T()), - ID: id, + ID: stack.NewTestSequencerID(instance.Name), Client: o.rpcClient(sys.T(), instance, RPCProtocol, "/", opts...), ControlClients: cc, })) diff --git a/op-devstack/sysgo/add_game_type.go b/op-devstack/sysgo/add_game_type.go index 06c279415c346..a1407cbf2599a 100644 --- a/op-devstack/sysgo/add_game_type.go +++ b/op-devstack/sysgo/add_game_type.go @@ -57,7 +57,7 @@ func WithRespectedGameType(gameType gameTypes.GameType) stack.Option[*Orchestrat } } -func WithCannonGameTypeAdded(l1ELID stack.L1ELNodeID, l2ChainID eth.ChainID) stack.Option[*Orchestrator] { +func WithCannonGameTypeAdded(l1ELID stack.ComponentID, l2ChainID eth.ChainID) stack.Option[*Orchestrator] { return stack.FnOption[*Orchestrator]{ FinallyFn: func(o *Orchestrator) { // TODO(#17867): Rebuild the op-program prestate using the newly minted L2 chain configs before using it. @@ -82,7 +82,7 @@ func WithCannonKonaGameTypeAdded() stack.Option[*Orchestrator] { } } -func requireGameTypeTargetIDs(o *Orchestrator) (stack.L1ELNodeID, []stack.ComponentID) { +func requireGameTypeTargetIDs(o *Orchestrator) (stack.ComponentID, []stack.ComponentID) { require := o.P().Require() l2NetIDs := o.registry.IDsByKind(stack.KindL2Network) require.NotEmpty(l2NetIDs, "need at least one L2 network to configure game types") @@ -90,7 +90,7 @@ func requireGameTypeTargetIDs(o *Orchestrator) (stack.L1ELNodeID, []stack.Compon l1ELIDs := o.registry.IDsByKind(stack.KindL1ELNode) require.NotEmpty(l1ELIDs, "need at least one L1 EL node to configure game types") - return stack.NewL1ELNodeID(l1ELIDs[0].Key(), l1ELIDs[0].ChainID()), l2NetIDs + return l1ELIDs[0], l2NetIDs } func WithChallengerCannonKonaEnabled() stack.Option[*Orchestrator] { @@ -101,20 +101,18 @@ func WithChallengerCannonKonaEnabled() stack.Option[*Orchestrator] { } } -func setRespectedGameType(o *Orchestrator, gameType gameTypes.GameType, l1ELID stack.L1ELNodeID, l2ChainID eth.ChainID) { +func setRespectedGameType(o *Orchestrator, gameType gameTypes.GameType, l1ELID stack.ComponentID, l2ChainID eth.ChainID) { t := o.P() require := t.Require() require.NotNil(o.wb, "must have a world builder") l1ChainID := l1ELID.ChainID() - l2NetComponent, ok := o.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(l2ChainID)).ComponentID) + l2Network, ok := o.GetL2Network(stack.NewL2NetworkID(l2ChainID)) require.True(ok, "l2Net must exist") - l2Network := l2NetComponent.(*L2Network) portalAddr := l2Network.rollupCfg.DepositContractAddress - l1ELComponent, ok := o.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := o.GetL1EL(l1ELID) require.True(ok, "l1El must exist") - l1EL := l1ELComponent.(L1ELNode) rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) require.NoError(err) @@ -155,7 +153,7 @@ func setRespectedGameType(o *Orchestrator, gameType gameTypes.GameType, l1ELID s require.Equal(rcpt.Status, gethTypes.ReceiptStatusSuccessful, "set respected game type tx did not execute correctly") } -func addGameType(o *Orchestrator, absolutePrestate common.Hash, gameType gameTypes.GameType, l1ELID stack.L1ELNodeID, l2ChainID eth.ChainID) { +func addGameType(o *Orchestrator, absolutePrestate common.Hash, gameType gameTypes.GameType, l1ELID stack.ComponentID, l2ChainID eth.ChainID) { t := o.P() require := t.Require() require.NotNil(o.wb, "must have a world builder") @@ -163,9 +161,8 @@ func addGameType(o *Orchestrator, absolutePrestate common.Hash, gameType gameTyp opcmAddr := o.wb.output.ImplementationsDeployment.OpcmImpl - l1ELComponent, ok := o.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := o.GetL1EL(l1ELID) require.True(ok, "l1El must exist") - l1EL := l1ELComponent.(L1ELNode) rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) require.NoError(err) diff --git a/op-devstack/sysgo/cluster.go b/op-devstack/sysgo/cluster.go index 24392dcfa754a..63eebac2e76fb 100644 --- a/op-devstack/sysgo/cluster.go +++ b/op-devstack/sysgo/cluster.go @@ -7,7 +7,7 @@ import ( ) type Cluster struct { - id stack.ClusterID + id stack.ComponentID cfgset depset.FullConfigSetMerged } diff --git a/op-devstack/sysgo/control_plane.go b/op-devstack/sysgo/control_plane.go index 94ecf5eda4b9c..1f4f019bdaec9 100644 --- a/op-devstack/sysgo/control_plane.go +++ b/op-devstack/sysgo/control_plane.go @@ -17,47 +17,40 @@ func control(lifecycle stack.Lifecycle, mode stack.ControlAction) { } } -func (c *ControlPlane) SupervisorState(id stack.SupervisorID, mode stack.ControlAction) { - cid := stack.ConvertSupervisorID(id) - component, ok := c.o.registry.Get(cid.ComponentID) +func (c *ControlPlane) SupervisorState(id stack.ComponentID, mode stack.ControlAction) { + component, ok := c.o.GetSupervisor(id) c.o.P().Require().True(ok, "need supervisor to change state") - control(component.(Supervisor), mode) + control(component, mode) } -func (c *ControlPlane) L2CLNodeState(id stack.L2CLNodeID, mode stack.ControlAction) { - cid := stack.ConvertL2CLNodeID(id) - component, ok := c.o.registry.Get(cid.ComponentID) +func (c *ControlPlane) L2CLNodeState(id stack.ComponentID, mode stack.ControlAction) { + component, ok := c.o.GetL2CL(id) c.o.P().Require().True(ok, "need l2cl node to change state") - control(component.(L2CLNode), mode) + control(component, mode) } -func (c *ControlPlane) L2ELNodeState(id stack.L2ELNodeID, mode stack.ControlAction) { - cid := stack.ConvertL2ELNodeID(id) - component, ok := c.o.registry.Get(cid.ComponentID) +func (c *ControlPlane) L2ELNodeState(id stack.ComponentID, mode stack.ControlAction) { + component, ok := c.o.GetL2EL(id) c.o.P().Require().True(ok, "need l2el node to change state") - control(component.(L2ELNode), mode) + control(component, mode) } -func (c *ControlPlane) FakePoSState(id stack.L1CLNodeID, mode stack.ControlAction) { - cid := stack.ConvertL1CLNodeID(id) - component, ok := c.o.registry.Get(cid.ComponentID) +func (c *ControlPlane) FakePoSState(id stack.ComponentID, mode stack.ControlAction) { + component, ok := c.o.GetL1CL(id) c.o.P().Require().True(ok, "need l1cl node to change state of fakePoS module") - s := component.(*L1CLNode) - control(s.fakepos, mode) + control(component.fakepos, mode) } -func (c *ControlPlane) OPRBuilderNodeState(id stack.OPRBuilderNodeID, mode stack.ControlAction) { - cid := stack.ConvertOPRBuilderNodeID(id) - component, ok := c.o.registry.Get(cid.ComponentID) +func (c *ControlPlane) OPRBuilderNodeState(id stack.ComponentID, mode stack.ControlAction) { + component, ok := c.o.GetOPRBuilder(id) c.o.P().Require().True(ok, "need oprbuilder node to change state") - control(component.(*OPRBuilderNode), mode) + control(component, mode) } -func (c *ControlPlane) RollupBoostNodeState(id stack.RollupBoostNodeID, mode stack.ControlAction) { - cid := stack.ConvertRollupBoostNodeID(id) - component, ok := c.o.registry.Get(cid.ComponentID) +func (c *ControlPlane) RollupBoostNodeState(id stack.ComponentID, mode stack.ControlAction) { + component, ok := c.o.GetRollupBoost(id) c.o.P().Require().True(ok, "need rollup boost node to change state") - control(component.(*RollupBoostNode), mode) + control(component, mode) } var _ stack.ControlPlane = (*ControlPlane)(nil) diff --git a/op-devstack/sysgo/control_plane_test.go b/op-devstack/sysgo/control_plane_test.go index 6c601d4ffe382..aa734be01b7f8 100644 --- a/op-devstack/sysgo/control_plane_test.go +++ b/op-devstack/sysgo/control_plane_test.go @@ -58,7 +58,7 @@ func TestControlPlane(gt *testing.T) { func testSupervisorRestart(ids DefaultInteropSystemIDs, system stack.System, control stack.ControlPlane) { t := system.T() logger := t.Logger() - supervisor := system.Supervisor(ids.Supervisor) + supervisor := system.Supervisor(stack.ByID[stack.Supervisor](ids.Supervisor)) // progress supervisor for range 3 { @@ -103,7 +103,7 @@ func testSupervisorRestart(ids DefaultInteropSystemIDs, system stack.System, con func testL2CLRestart(ids DefaultInteropSystemIDs, system stack.System, control stack.ControlPlane) { t := system.T() logger := t.Logger() - seqA := system.L2Network(ids.L2A).L2CLNode(ids.L2ACL) + seqA := system.L2Network(stack.ByID[stack.L2Network](ids.L2A)).L2CLNode(stack.ByID[stack.L2CLNode](ids.L2ACL)) // progress chain for range 3 { @@ -165,7 +165,7 @@ func TestControlPlaneFakePoS(gt *testing.T) { ctx := t.Ctx() - el := system.L1Network(ids.L1).L1ELNode(match.FirstL1EL) + el := system.L1Network(stack.ByID[stack.L1Network](ids.L1)).L1ELNode(match.FirstL1EL) // progress chain blockTime := time.Second * 6 diff --git a/op-devstack/sysgo/deployer.go b/op-devstack/sysgo/deployer.go index 94627337386f7..fa12568a2fc2e 100644 --- a/op-devstack/sysgo/deployer.go +++ b/op-devstack/sysgo/deployer.go @@ -85,10 +85,10 @@ func WithDeployerCacheDir(dirPath string) DeployerPipelineOption { // WithDAFootprintGasScalar sets the DA footprint gas scalar with which the networks identified by // l2IDs will be launched. If there are no l2IDs provided, all L2 networks are set with scalar. -func WithDAFootprintGasScalar(scalar uint16, l2IDs ...stack.L2NetworkID) DeployerOption { +func WithDAFootprintGasScalar(scalar uint16, l2IDs ...stack.ComponentID) DeployerOption { return func(p devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { for _, l2 := range builder.L2s() { - if len(l2IDs) == 0 || slices.ContainsFunc(l2IDs, func(id stack.L2NetworkID) bool { + if len(l2IDs) == 0 || slices.ContainsFunc(l2IDs, func(id stack.ComponentID) bool { return id.ChainID() == l2.ChainID() }) { l2.WithDAFootprintGasScalar(scalar) @@ -125,22 +125,22 @@ func WithDeployer() stack.Option[*Orchestrator] { require := o.P().Require() require.NotNil(o.wb, "must have a world builder") - l1ID := stack.L1NetworkID(eth.ChainIDFromUInt64(wb.output.AppliedIntent.L1ChainID)) - superchainID := stack.SuperchainID("main") - clusterID := stack.ClusterID("main") + l1ID := stack.NewL1NetworkID(eth.ChainIDFromUInt64(wb.output.AppliedIntent.L1ChainID)) + superchainID := stack.NewSuperchainID("main") + clusterID := stack.NewClusterID("main") l1Net := &L1Network{ id: l1ID, genesis: wb.outL1Genesis, blockTime: 6, } - o.registry.Register(stack.ConvertL1NetworkID(l1ID).ComponentID, l1Net) + o.registry.Register(l1ID, l1Net) - o.registry.Register(stack.ConvertSuperchainID(superchainID).ComponentID, &Superchain{ + o.registry.Register(superchainID, &Superchain{ id: superchainID, deployment: wb.outSuperchainDeployment, }) - o.registry.Register(stack.ConvertClusterID(clusterID).ComponentID, &Cluster{ + o.registry.Register(clusterID, &Cluster{ id: clusterID, cfgset: wb.outFullCfgSet, }) @@ -153,7 +153,7 @@ func WithDeployer() stack.Option[*Orchestrator] { l2Dep, ok := wb.outL2Deployment[chainID] require.True(ok, "L2 deployment must exist") - l2ID := stack.L2NetworkID(chainID) + l2ID := stack.NewL2NetworkID(chainID) l2Net := &L2Network{ id: l2ID, l1ChainID: l1ID.ChainID(), @@ -162,7 +162,7 @@ func WithDeployer() stack.Option[*Orchestrator] { deployment: l2Dep, keys: o.keys, } - o.registry.Register(stack.ConvertL2NetworkID(l2ID).ComponentID, l2Net) + o.registry.Register(l2ID, l2Net) } }, } diff --git a/op-devstack/sysgo/faucet.go b/op-devstack/sysgo/faucet.go index 88aba54f7defa..df0261cb6c1f6 100644 --- a/op-devstack/sysgo/faucet.go +++ b/op-devstack/sysgo/faucet.go @@ -49,16 +49,38 @@ func (n *FaucetService) hydrate(system stack.ExtensibleSystem) { for chainID, faucetID := range n.service.Defaults() { id := stack.NewFaucetID(faucetID.String(), chainID) net := system.Network(chainID).(stack.ExtensibleNetwork) - net.Faucet(id).SetLabel("default", "true") + net.Faucet(stack.ByID[stack.Faucet](id)).SetLabel("default", "true") } } -func WithFaucets(l1ELs []stack.L1ELNodeID, l2ELs []stack.L2ELNodeID) stack.Option[*Orchestrator] { +func isL2ELFaucetKind(kind stack.ComponentKind) bool { + switch kind { + case stack.KindL2ELNode, stack.KindRollupBoostNode, stack.KindOPRBuilderNode: + return true + default: + return false + } +} + +func WithFaucets(l1ELs []stack.ComponentID, l2ELs []stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { + require := orch.P().Require() + + require.NotEmpty(l2ELs, "need at least one L2 EL for faucet service") + for i, l1ELID := range l1ELs { + require.Equalf(stack.KindL1ELNode, l1ELID.Kind(), "l1ELs[%d] must be kind %s", i, stack.KindL1ELNode) + require.Truef(l1ELID.HasChainID(), "l1ELs[%d] must be chain-scoped", i) + } + for i, l2ELID := range l2ELs { + require.Truef(isL2ELFaucetKind(l2ELID.Kind()), + "l2ELs[%d] must be one of %s, %s, %s", + i, stack.KindL2ELNode, stack.KindRollupBoostNode, stack.KindOPRBuilderNode) + require.Truef(l2ELID.HasChainID(), "l2ELs[%d] must be chain-scoped", i) + } + faucetID := stack.NewFaucetID("dev-faucet", l2ELs[0].ChainID()) p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), faucetID)) - - require := p.Require() + require = p.Require() require.Nil(orch.faucet, "can only support a single faucet-service in sysgo") @@ -71,9 +93,8 @@ func WithFaucets(l1ELs []stack.L1ELNodeID, l2ELs []stack.L2ELNodeID) stack.Optio id := ftypes.FaucetID(fmt.Sprintf("dev-faucet-%s", elID.ChainID())) require.NotContains(faucets, id, "one faucet per chain only") - elComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(elID).ComponentID) + el, ok := orch.GetL1EL(elID) require.True(ok, "need L1 EL for faucet", elID) - el := elComponent.(L1ELNode) faucets[id] = &fconf.FaucetEntry{ ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, @@ -87,9 +108,8 @@ func WithFaucets(l1ELs []stack.L1ELNodeID, l2ELs []stack.L2ELNodeID) stack.Optio id := ftypes.FaucetID(fmt.Sprintf("dev-faucet-%s", elID.ChainID())) require.NotContains(faucets, id, "one faucet per chain only") - elComponent, ok := orch.registry.Get(stack.ConvertL2ELNodeID(elID).ComponentID) + el, ok := orch.GetL2EL(elID) require.True(ok, "need L2 EL for faucet", elID) - el := elComponent.(L2ELNode) faucets[id] = &fconf.FaucetEntry{ ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, diff --git a/op-devstack/sysgo/l1_network.go b/op-devstack/sysgo/l1_network.go index 0918c0f8a1523..e2cf19594c2ad 100644 --- a/op-devstack/sysgo/l1_network.go +++ b/op-devstack/sysgo/l1_network.go @@ -8,7 +8,7 @@ import ( ) type L1Network struct { - id stack.L1NetworkID + id stack.ComponentID genesis *core.Genesis blockTime uint64 } diff --git a/op-devstack/sysgo/l1_nodes.go b/op-devstack/sysgo/l1_nodes.go index d8d09b37a9c05..7fbe95b1e4533 100644 --- a/op-devstack/sysgo/l1_nodes.go +++ b/op-devstack/sysgo/l1_nodes.go @@ -21,7 +21,7 @@ type L1ELNode interface { } type L1Geth struct { - id stack.L1ELNodeID + id stack.ComponentID userRPC string authRPC string l1Geth *geth.GethInstance @@ -51,12 +51,12 @@ func (n *L1Geth) hydrate(system stack.ExtensibleSystem) { ChainID: n.id.ChainID(), }, }) - l1Net := system.L1Network(stack.L1NetworkID(n.id.ChainID())) + l1Net := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(n.id.ChainID()))) l1Net.(stack.ExtensibleL1Network).AddL1ELNode(frontend) } type L1CLNode struct { - id stack.L1CLNodeID + id stack.ComponentID beaconHTTPAddr string beacon *fakebeacon.FakeBeacon fakepos *FakePoS @@ -69,13 +69,13 @@ func (n *L1CLNode) hydrate(system stack.ExtensibleSystem) { ID: n.id, Client: beaconCl, }) - l1Net := system.L1Network(stack.L1NetworkID(n.id.ChainID())) + l1Net := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(n.id.ChainID()))) l1Net.(stack.ExtensibleL1Network).AddL1CLNode(frontend) } const DevstackL1ELKindEnvVar = "DEVSTACK_L1EL_KIND" -func WithL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[*Orchestrator] { +func WithL1Nodes(l1ELID stack.ComponentID, l1CLID stack.ComponentID) stack.Option[*Orchestrator] { switch os.Getenv(DevstackL1ELKindEnvVar) { case "geth": return WithL1NodesSubprocess(l1ELID, l1CLID) @@ -84,15 +84,14 @@ func WithL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[ } } -func WithL1NodesInProcess(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stack.Option[*Orchestrator] { +func WithL1NodesInProcess(l1ELID stack.ComponentID, l1CLID stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { clP := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l1CLID)) elP := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l1ELID)) require := orch.P().Require() - l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) + l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) require.True(ok, "L1 network must exist") - l1Net := l1NetComponent.(*L1Network) blockTimeL1 := l1Net.blockTime l1FinalizedDistance := uint64(20) @@ -138,9 +137,8 @@ func WithL1NodesInProcess(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stac l1Geth: l1Geth, blobPath: blobPath, } - elCID := stack.ConvertL1ELNodeID(l1ELID).ComponentID - require.False(orch.registry.Has(elCID), "must not already exist") - orch.registry.Register(elCID, l1ELNode) + require.False(orch.registry.Has(l1ELID), "must not already exist") + orch.registry.Register(l1ELID, l1ELNode) l1CLNode := &L1CLNode{ id: l1CLID, @@ -148,14 +146,13 @@ func WithL1NodesInProcess(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID) stac beacon: bcn, fakepos: &FakePoS{fakepos: fp, p: clP}, } - clCID := stack.ConvertL1CLNodeID(l1CLID).ComponentID - require.False(orch.registry.Has(clCID), "must not already exist") - orch.registry.Register(clCID, l1CLNode) + require.False(orch.registry.Has(l1CLID), "must not already exist") + orch.registry.Register(l1CLID, l1CLNode) }) } // WithExtL1Nodes initializes L1 EL and CL nodes that connect to external RPC endpoints -func WithExtL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID, elRPCEndpoint string, clRPCEndpoint string) stack.Option[*Orchestrator] { +func WithExtL1Nodes(l1ELID stack.ComponentID, l1CLID stack.ComponentID, elRPCEndpoint string, clRPCEndpoint string) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { require := orch.P().Require() @@ -164,17 +161,15 @@ func WithExtL1Nodes(l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID, elRPCEndpo id: l1ELID, userRPC: elRPCEndpoint, } - elCID := stack.ConvertL1ELNodeID(l1ELID).ComponentID - require.False(orch.registry.Has(elCID), "must not already exist") - orch.registry.Register(elCID, l1ELNode) + require.False(orch.registry.Has(l1ELID), "must not already exist") + orch.registry.Register(l1ELID, l1ELNode) // Create L1 CL node with external RPC l1CLNode := &L1CLNode{ id: l1CLID, beaconHTTPAddr: clRPCEndpoint, } - clCID := stack.ConvertL1CLNodeID(l1CLID).ComponentID - require.False(orch.registry.Has(clCID), "must not already exist") - orch.registry.Register(clCID, l1CLNode) + require.False(orch.registry.Has(l1CLID), "must not already exist") + orch.registry.Register(l1CLID, l1CLNode) }) } diff --git a/op-devstack/sysgo/l1_nodes_subprocess.go b/op-devstack/sysgo/l1_nodes_subprocess.go index e35ad97aa685c..43268d6618ca6 100644 --- a/op-devstack/sysgo/l1_nodes_subprocess.go +++ b/op-devstack/sysgo/l1_nodes_subprocess.go @@ -25,7 +25,7 @@ import ( type ExternalL1Geth struct { mu sync.Mutex - id stack.L1ELNodeID + id stack.ComponentID l1Net *L1Network // authRPC points to a proxy that forwards to geth's endpoint authRPC string @@ -53,7 +53,7 @@ func (n *ExternalL1Geth) hydrate(system stack.ExtensibleSystem) { require.NoError(err) system.T().Cleanup(rpcCl.Close) - l1Net := system.L1Network(stack.L1NetworkID(n.id.ChainID())) + l1Net := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(n.id.ChainID()))) sysL1EL := shim.NewL1ELNode(shim.L1ELNodeConfig{ ID: n.id, ELNodeConfig: shim.ELNodeConfig{ @@ -150,7 +150,7 @@ func (n *ExternalL1Geth) AuthRPC() string { const GethExecPathEnvVar = "SYSGO_GETH_EXEC_PATH" -func WithL1NodesSubprocess(id stack.L1ELNodeID, clID stack.L1CLNodeID) stack.Option[*Orchestrator] { +func WithL1NodesSubprocess(id stack.ComponentID, clID stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) require := p.Require() @@ -160,9 +160,8 @@ func WithL1NodesSubprocess(id stack.L1ELNodeID, clID stack.L1CLNodeID) stack.Opt _, err := os.Stat(execPath) p.Require().NotErrorIs(err, os.ErrNotExist, "geth executable must exist") - l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(id.ChainID())).ComponentID) + l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(id.ChainID())) require.True(ok, "L1 network required") - l1Net := l1NetComponent.(*L1Network) jwtPath, jwtSecret := orch.writeDefaultJWT() @@ -208,7 +207,7 @@ func WithL1NodesSubprocess(id stack.L1ELNodeID, clID stack.L1CLNodeID) stack.Opt l1EL.Start() p.Cleanup(l1EL.Stop) p.Logger().Info("geth is ready", "userRPC", l1EL.userRPC, "authRPC", l1EL.authRPC) - elCID := stack.ConvertL1ELNodeID(id).ComponentID + elCID := id require.False(orch.registry.Has(elCID), "must be unique L1 EL node") orch.registry.Register(elCID, l1EL) @@ -236,7 +235,7 @@ func WithL1NodesSubprocess(id stack.L1ELNodeID, clID stack.L1CLNodeID) stack.Opt } fp.Start() p.Cleanup(fp.Stop) - orch.registry.Register(stack.ConvertL1CLNodeID(clID).ComponentID, &L1CLNode{ + orch.registry.Register(clID, &L1CLNode{ id: clID, beaconHTTPAddr: bcn.BeaconAddr(), beacon: bcn, diff --git a/op-devstack/sysgo/l2_batcher.go b/op-devstack/sysgo/l2_batcher.go index 3c2082e05e4fc..3a2013ded78fb 100644 --- a/op-devstack/sysgo/l2_batcher.go +++ b/op-devstack/sysgo/l2_batcher.go @@ -21,7 +21,7 @@ import ( ) type L2Batcher struct { - id stack.L2BatcherID + id stack.ComponentID service *bss.BatcherService rpc string l1RPC string @@ -40,11 +40,11 @@ func (b *L2Batcher) hydrate(system stack.ExtensibleSystem) { ID: b.id, Client: rpcCl, }) - l2Net := system.L2Network(stack.L2NetworkID(b.id.ChainID())) + l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(b.id.ChainID()))) l2Net.(stack.ExtensibleL2Network).AddL2Batcher(bFrontend) } -type BatcherOption func(id stack.L2BatcherID, cfg *bss.CLIConfig) +type BatcherOption func(id stack.ComponentID, cfg *bss.CLIConfig) func WithBatcherOption(opt BatcherOption) stack.Option[*Orchestrator] { return stack.Deploy[*Orchestrator](func(orch *Orchestrator) { @@ -52,37 +52,32 @@ func WithBatcherOption(opt BatcherOption) stack.Option[*Orchestrator] { }) } -func WithBatcher(batcherID stack.L2BatcherID, l1ELID stack.L1ELNodeID, l2CLID stack.L2CLNodeID, l2ELID stack.L2ELNodeID) stack.Option[*Orchestrator] { +func WithBatcher(batcherID stack.ComponentID, l1ELID stack.ComponentID, l2CLID stack.ComponentID, l2ELID stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), batcherID)) require := p.Require() - batcherCID := stack.ConvertL2BatcherID(batcherID).ComponentID + batcherCID := batcherID require.False(orch.registry.Has(batcherCID), "batcher must not already exist") - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(l2CLID.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(l2CLID.ChainID())) require.True(ok) - l2Net := l2NetComponent.(*L2Network) - l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) + l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) require.True(ok) - l1Net := l1NetComponent.(*L1Network) require.Equal(l2Net.l1ChainID, l1Net.id.ChainID(), "expecting L1EL on L1 of L2CL") require.Equal(l2CLID.ChainID(), l2ELID.ChainID(), "L2 CL and EL must be on same L2 chain") - l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := orch.GetL1EL(l1ELID) require.True(ok) - l1EL := l1ELComponent.(L1ELNode) - l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CLID).ComponentID) + l2CL, ok := orch.GetL2CL(l2CLID) require.True(ok) - l2CL := l2CLComponent.(L2CLNode) - l2ELComponent, ok := orch.registry.Get(stack.ConvertL2ELNodeID(l2ELID).ComponentID) + l2EL, ok := orch.GetL2EL(l2ELID) require.True(ok) - l2EL := l2ELComponent.(L2ELNode) batcherSecret, err := orch.keys.Secret(devkeys.BatcherRole.Key(l2ELID.ChainID().ToBig())) require.NoError(err) @@ -147,6 +142,6 @@ func WithBatcher(batcherID stack.L2BatcherID, l1ELID stack.L1ELNodeID, l2CLID st l2CLRPC: l2CL.UserRPC(), l2ELRPC: l2EL.UserRPC(), } - orch.registry.Register(stack.ConvertL2BatcherID(batcherID).ComponentID, b) + orch.registry.Register(batcherID, b) }) } diff --git a/op-devstack/sysgo/l2_challenger.go b/op-devstack/sysgo/l2_challenger.go index 25ebb39d76c07..3057819b413f9 100644 --- a/op-devstack/sysgo/l2_challenger.go +++ b/op-devstack/sysgo/l2_challenger.go @@ -24,9 +24,9 @@ type l2ChallengerOpts struct { } type L2Challenger struct { - id stack.L2ChallengerID + id stack.ComponentID service cliapp.Lifecycle - l2NetIDs []stack.L2NetworkID + l2NetIDs []stack.ComponentID config *config.Config } @@ -38,37 +38,37 @@ func (p *L2Challenger) hydrate(system stack.ExtensibleSystem) { }) for _, netID := range p.l2NetIDs { - l2Net := system.L2Network(netID) + l2Net := system.L2Network(stack.ByID[stack.L2Network](netID)) l2Net.(stack.ExtensibleL2Network).AddL2Challenger(bFrontend) } } -func WithL2Challenger(challengerID stack.L2ChallengerID, l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID, - supervisorID *stack.SupervisorID, clusterID *stack.ClusterID, l2CLID *stack.L2CLNodeID, l2ELIDs []stack.L2ELNodeID, +func WithL2Challenger(challengerID stack.ComponentID, l1ELID stack.ComponentID, l1CLID stack.ComponentID, + supervisorID *stack.ComponentID, clusterID *stack.ComponentID, l2CLID *stack.ComponentID, l2ELIDs []stack.ComponentID, ) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { WithL2ChallengerPostDeploy(orch, challengerID, l1ELID, l1CLID, supervisorID, clusterID, l2CLID, l2ELIDs, nil) }) } -func WithSuperL2Challenger(challengerID stack.L2ChallengerID, l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID, - supervisorID *stack.SupervisorID, clusterID *stack.ClusterID, l2ELIDs []stack.L2ELNodeID, +func WithSuperL2Challenger(challengerID stack.ComponentID, l1ELID stack.ComponentID, l1CLID stack.ComponentID, + supervisorID *stack.ComponentID, clusterID *stack.ComponentID, l2ELIDs []stack.ComponentID, ) stack.Option[*Orchestrator] { return stack.Finally(func(orch *Orchestrator) { WithL2ChallengerPostDeploy(orch, challengerID, l1ELID, l1CLID, supervisorID, clusterID, nil, l2ELIDs, nil) }) } -func WithSupernodeL2Challenger(challengerID stack.L2ChallengerID, l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID, - supernodeID *stack.SupernodeID, clusterID *stack.ClusterID, l2ELIDs []stack.L2ELNodeID, +func WithSupernodeL2Challenger(challengerID stack.ComponentID, l1ELID stack.ComponentID, l1CLID stack.ComponentID, + supernodeID *stack.SupernodeID, clusterID *stack.ComponentID, l2ELIDs []stack.ComponentID, ) stack.Option[*Orchestrator] { return stack.Finally(func(orch *Orchestrator) { WithL2ChallengerPostDeploy(orch, challengerID, l1ELID, l1CLID, nil, clusterID, nil, l2ELIDs, supernodeID) }) } -func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2ChallengerID, l1ELID stack.L1ELNodeID, l1CLID stack.L1CLNodeID, - supervisorID *stack.SupervisorID, clusterID *stack.ClusterID, l2CLID *stack.L2CLNodeID, l2ELIDs []stack.L2ELNodeID, +func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.ComponentID, l1ELID stack.ComponentID, l1CLID stack.ComponentID, + supervisorID *stack.ComponentID, clusterID *stack.ComponentID, l2CLID *stack.ComponentID, l2ELIDs []stack.ComponentID, supernodeID *stack.SupernodeID, ) { ctx := orch.P().Ctx() @@ -76,7 +76,7 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen p := orch.P().WithCtx(ctx) require := p.Require() - challengerCID := stack.ConvertL2ChallengerID(challengerID).ComponentID + challengerCID := challengerID require.False(orch.registry.Has(challengerCID), "challenger must not already exist") challengerSecret, err := orch.keys.Secret(devkeys.ChallengerRole.Key(challengerID.ChainID().ToBig())) @@ -85,16 +85,14 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen logger := p.Logger() logger.Info("Challenger key acquired", "addr", crypto.PubkeyToAddress(challengerSecret.PublicKey)) - l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := orch.GetL1EL(l1ELID) require.True(ok) - l1EL := l1ELComponent.(L1ELNode) - l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) + l1CL, ok := orch.GetL1CL(l1CLID) require.True(ok) - l1CL := l1CLComponent.(*L1CLNode) l2Geneses := make([]*core.Genesis, 0, len(l2ELIDs)) rollupCfgs := make([]*rollup.Config, 0, len(l2ELIDs)) - l2NetIDs := make([]stack.L2NetworkID, 0, len(l2ELIDs)) + l2NetIDs := make([]stack.ComponentID, 0, len(l2ELIDs)) var disputeGameFactoryAddr common.Address var interopScheduled bool @@ -106,9 +104,8 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen } for _, l2ELID := range l2ELIDs { chainID := l2ELID.ChainID() - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(chainID)).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(chainID)) require.Truef(ok, "l2Net %s not found", chainID) - l2Net := l2NetComponent.(*L2Network) factory := l2Net.deployment.DisputeGameFactoryProxyAddr() if disputeGameFactoryAddr == (common.Address{}) { disputeGameFactoryAddr = factory @@ -122,11 +119,10 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen l2NetIDs = append(l2NetIDs, l2Net.id) } - l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) + l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) if !ok { require.Fail("l1 network not found") } - l1Net := l1NetComponent.(*L1Network) l1Genesis := l1Net.genesis if orch.l2ChallengerOpts.useCannonKonaConfig { @@ -144,9 +140,8 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen useSuperNode := false switch { case supervisorID != nil: - supervisorComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(*supervisorID).ComponentID) + supervisorNode, ok := orch.GetSupervisor(*supervisorID) require.True(ok) - supervisorNode := supervisorComponent.(Supervisor) superRPC = supervisorNode.UserRPC() case supernodeID != nil: supernode, ok := orch.supernodes.Get(*supernodeID) @@ -159,14 +154,12 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen l2ELRPCs := make([]string, len(l2ELIDs)) for i, l2ELID := range l2ELIDs { - l2ELComponent, ok := orch.registry.Get(stack.ConvertL2ELNodeID(l2ELID).ComponentID) + l2EL, ok := orch.GetL2EL(l2ELID) require.True(ok) - l2EL := l2ELComponent.(L2ELNode) l2ELRPCs[i] = l2EL.UserRPC() } - clusterComponent, ok := orch.registry.Get(stack.ConvertClusterID(*clusterID).ComponentID) + cluster, ok := orch.GetCluster(*clusterID) require.True(ok) - cluster := clusterComponent.(*Cluster) prestateVariant := shared.InteropVariant options := []shared.Option{ shared.WithFactoryAddress(disputeGameFactoryAddr), @@ -188,7 +181,7 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen } else { require.NotNil(l2CLID, "need L2 CL to connect to pre-interop") // In a post-interop infra setup, with unscheduled interop, we may see multiple EL nodes. - var l2ELID stack.L2ELNodeID + var l2ELID stack.ComponentID for _, id := range l2ELIDs { if id.ChainID() == l2CLID.ChainID() { l2ELID = id @@ -196,9 +189,8 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen } } require.NotZero(l2ELID, "need single L2 EL to connect to pre-interop") - l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(*l2CLID).ComponentID) + l2CL, ok := orch.GetL2CL(*l2CLID) require.True(ok) - l2CL := l2CLComponent.(L2CLNode) l2EL, ok := orch.GetL2EL(l2ELID) require.True(ok) prestateVariant := shared.MTCannonVariant @@ -249,5 +241,5 @@ func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.L2Challen l2NetIDs: l2NetIDs, config: cfg, } - orch.registry.Register(stack.ConvertL2ChallengerID(challengerID).ComponentID, c) + orch.registry.Register(challengerID, c) } diff --git a/op-devstack/sysgo/l2_cl.go b/op-devstack/sysgo/l2_cl.go index 8ef75d92a5204..7f4f47e681453 100644 --- a/op-devstack/sysgo/l2_cl.go +++ b/op-devstack/sysgo/l2_cl.go @@ -41,19 +41,19 @@ type L2CLConfig struct { } func L2CLSequencer() L2CLOption { - return L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) { + return L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { cfg.IsSequencer = true }) } func L2CLIndexing() L2CLOption { - return L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) { + return L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { cfg.IndexingMode = true }) } func L2CLFollowSource(source string) L2CLOption { - return L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) { + return L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { cfg.FollowSource = source }) } @@ -73,7 +73,7 @@ func DefaultL2CLConfig() *L2CLConfig { } type L2CLOption interface { - Apply(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) + Apply(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) } // WithGlobalL2CLOption applies the L2CLOption to all L2CLNode instances in this orchestrator @@ -83,11 +83,11 @@ func WithGlobalL2CLOption(opt L2CLOption) stack.Option[*Orchestrator] { }) } -type L2CLOptionFn func(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) +type L2CLOptionFn func(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) var _ L2CLOption = L2CLOptionFn(nil) -func (fn L2CLOptionFn) Apply(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) { +func (fn L2CLOptionFn) Apply(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { fn(p, id, cfg) } @@ -96,7 +96,7 @@ type L2CLOptionBundle []L2CLOption var _ L2CLOption = L2CLOptionBundle(nil) -func (l L2CLOptionBundle) Apply(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfig) { +func (l L2CLOptionBundle) Apply(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { for _, opt := range l { p.Require().NotNil(opt, "cannot Apply nil L2CLOption") opt.Apply(p, id, cfg) @@ -106,19 +106,19 @@ func (l L2CLOptionBundle) Apply(p devtest.P, id stack.L2CLNodeID, cfg *L2CLConfi // WithL2CLNode adds the default type of L2 CL node. // The default can be configured with DEVSTACK_L2CL_KIND. // Tests that depend on specific types can use options like WithKonaNode and WithOpNode directly. -func WithL2CLNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { +func WithL2CLNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { switch os.Getenv("DEVSTACK_L2CL_KIND") { case "kona": return WithKonaNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...) case "supernode": - var supe stack.SupernodeID // unused; this option is only used for CL tests that don't care about a supernode running + supe := stack.NewSupernodeID("default", l2CLID.ChainID()) return WithSupernode(supe, l2CLID, l1CLID, l1ELID, l2ELID, opts...) default: return WithOpNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...) } } -func WithL2CLNodeFollowL2(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, l2FollowSourceID stack.L2CLNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { +func WithL2CLNodeFollowL2(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, l2FollowSourceID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { switch os.Getenv("DEVSTACK_L2CL_KIND") { case "kona": return WithKonaNodeFollowL2(l2CLID, l1CLID, l1ELID, l2ELID, l2FollowSourceID, opts...) diff --git a/op-devstack/sysgo/l2_cl_kona.go b/op-devstack/sysgo/l2_cl_kona.go index 0edeff4aeb461..87823bf3bc33e 100644 --- a/op-devstack/sysgo/l2_cl_kona.go +++ b/op-devstack/sysgo/l2_cl_kona.go @@ -27,12 +27,12 @@ import ( type KonaNode struct { mu sync.Mutex - id stack.L2CLNodeID + id stack.ComponentID userRPC string interopEndpoint string // warning: currently not fully supported interopJwtSecret eth.Bytes32 - el stack.L2ELNodeID + el stack.ComponentID userProxy *tcpproxy.Proxy @@ -63,9 +63,9 @@ func (k *KonaNode) hydrate(system stack.ExtensibleSystem) { InteropJwtSecret: k.interopJwtSecret, }) sysL2CL.SetLabel(match.LabelVendor, string(match.KonaNode)) - l2Net := system.L2Network(stack.L2NetworkID(k.id.ChainID())) + l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(k.id.ChainID()))) l2Net.(stack.ExtensibleL2Network).AddL2CLNode(sysL2CL) - sysL2CL.(stack.LinkableL2CLNode).LinkEL(l2Net.L2ELNode(k.el)) + sysL2CL.(stack.LinkableL2CLNode).LinkEL(l2Net.L2ELNode(stack.ByID[stack.L2ELNode](k.el))) } func (k *KonaNode) Start() { @@ -161,13 +161,12 @@ func (k *KonaNode) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { var _ L2CLNode = (*KonaNode)(nil) -func WithKonaNodeFollowL2(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, l2FollowSourceID stack.L2CLNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { +func WithKonaNodeFollowL2(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, l2FollowSourceID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { followSource := func(orch *Orchestrator) string { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) - l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2FollowSourceID).ComponentID) + l2CLFollowSource, ok := orch.GetL2CL(l2FollowSourceID) p.Require().True(ok, "l2 CL Follow Source required") - l2CLFollowSource := l2CLComponent.(L2CLNode) return l2CLFollowSource.UserRPC() }(orch) opts = append(opts, L2CLFollowSource(followSource)) @@ -175,33 +174,29 @@ func WithKonaNodeFollowL2(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1EL }) } -func WithKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { +func WithKonaNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(withKonaNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...)) } -func withKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) func(orch *Orchestrator) { +func withKonaNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) func(orch *Orchestrator) { return func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) require := p.Require() - l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1CLID.ChainID())).ComponentID) + l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1CLID.ChainID())) require.True(ok, "l1 network required") - l1Net := l1NetComponent.(*L1Network) - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(l2CLID.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(l2CLID.ChainID())) require.True(ok, "l2 network required") - l2Net := l2NetComponent.(*L2Network) l1ChainConfig := l1Net.genesis.Config - l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := orch.GetL1EL(l1ELID) require.True(ok, "l1 EL node required") - l1EL := l1ELComponent.(L1ELNode) - l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) + l1CL, ok := orch.GetL1CL(l1CLID) require.True(ok, "l1 CL node required") - l1CL := l1CLComponent.(*L1CLNode) l2EL, ok := orch.GetL2EL(l2ELID) require.True(ok, "l2 EL node required") @@ -306,7 +301,7 @@ func withKonaNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack k.Start() p.Cleanup(k.Stop) p.Logger().Info("Kona-node is up", "rpc", k.UserRPC()) - cid := stack.ConvertL2CLNodeID(l2CLID).ComponentID + cid := l2CLID require.False(orch.registry.Has(cid), "must not already exist") orch.registry.Register(cid, k) } diff --git a/op-devstack/sysgo/l2_cl_opnode.go b/op-devstack/sysgo/l2_cl_opnode.go index 7a3e0e6659f12..2b272cfaee973 100644 --- a/op-devstack/sysgo/l2_cl_opnode.go +++ b/op-devstack/sysgo/l2_cl_opnode.go @@ -41,7 +41,7 @@ import ( type OpNode struct { mu sync.Mutex - id stack.L2CLNodeID + id stack.ComponentID opNode *opnode.Opnode userRPC string interopEndpoint string @@ -49,7 +49,7 @@ type OpNode struct { cfg *config.Config p devtest.P logger log.Logger - el *stack.L2ELNodeID // Optional: nil when using SyncTester + el *stack.ComponentID // Optional: nil when using SyncTester userProxy *tcpproxy.Proxy interopProxy *tcpproxy.Proxy clock clock.Clock @@ -72,7 +72,7 @@ func (n *OpNode) hydrate(system stack.ExtensibleSystem) { InteropJwtSecret: n.interopJwtSecret, }) sysL2CL.SetLabel(match.LabelVendor, string(match.OpNode)) - l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) + l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) l2Net.(stack.ExtensibleL2Network).AddL2CLNode(sysL2CL) if n.el != nil { for _, el := range l2Net.L2ELNodes() { @@ -81,14 +81,14 @@ func (n *OpNode) hydrate(system stack.ExtensibleSystem) { return } } - rbID := stack.RollupBoostNodeID(*n.el) + rbID := stack.NewRollupBoostNodeID(n.el.Key(), n.el.ChainID()) for _, rb := range l2Net.RollupBoostNodes() { if rb.ID() == rbID { sysL2CL.(stack.LinkableL2CLNode).LinkRollupBoostNode(rb) return } } - oprbID := stack.OPRBuilderNodeID(*n.el) + oprbID := stack.NewOPRBuilderNodeID(n.el.Key(), n.el.ChainID()) for _, oprb := range l2Net.OPRBuilderNodes() { if oprb.ID() == oprbID { sysL2CL.(stack.LinkableL2CLNode).LinkOPRBuilderNode(oprb) @@ -162,13 +162,12 @@ func (n *OpNode) Stop() { n.opNode = nil } -func WithOpNodeFollowL2(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, l2FollowSourceID stack.L2CLNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { +func WithOpNodeFollowL2(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, l2FollowSourceID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { followSource := func(orch *Orchestrator) string { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) - l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2FollowSourceID).ComponentID) + l2CLFollowSource, ok := orch.GetL2CL(l2FollowSourceID) p.Require().True(ok, "l2 CL Follow Source required") - l2CLFollowSource := l2CLComponent.(L2CLNode) return l2CLFollowSource.UserRPC() }(orch) opts = append(opts, L2CLFollowSource(followSource)) @@ -176,31 +175,27 @@ func WithOpNodeFollowL2(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID }) } -func WithOpNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { +func WithOpNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(withOpNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...)) } -func withOpNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) func(orch *Orchestrator) { +func withOpNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) func(orch *Orchestrator) { return func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) require := p.Require() - l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1CLID.ChainID())).ComponentID) + l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1CLID.ChainID())) require.True(ok, "l1 network required") - l1Net := l1NetComponent.(*L1Network) - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(l2CLID.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(l2CLID.ChainID())) require.True(ok, "l2 network required") - l2Net := l2NetComponent.(*L2Network) - l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := orch.GetL1EL(l1ELID) require.True(ok, "l1 EL node required") - l1EL := l1ELComponent.(L1ELNode) - l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) + l1CL, ok := orch.GetL1CL(l1CLID) require.True(ok, "l1 CL node required") - l1CL := l1CLComponent.(*L1CLNode) // Get the L2EL node (which can be a regular EL node or a SyncTesterEL) l2EL, ok := orch.GetL2EL(l2ELID) @@ -368,7 +363,7 @@ func withOpNode(l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L // Set the EL field to link to the L2EL node l2CLNode.el = &l2ELID - cid := stack.ConvertL2CLNodeID(l2CLID).ComponentID + cid := l2CLID require.False(orch.registry.Has(cid), fmt.Sprintf("must not already exist: %s", l2CLID)) orch.registry.Register(cid, l2CLNode) l2CLNode.Start() diff --git a/op-devstack/sysgo/l2_cl_p2p_util.go b/op-devstack/sysgo/l2_cl_p2p_util.go index 05a7cd3f79ed0..67d524dffa2b6 100644 --- a/op-devstack/sysgo/l2_cl_p2p_util.go +++ b/op-devstack/sysgo/l2_cl_p2p_util.go @@ -83,17 +83,15 @@ func getP2PClientsAndPeers(ctx context.Context, logger log.Logger, } // WithL2CLP2PConnection connects P2P between two L2CLs -func WithL2CLP2PConnection(l2CL1ID, l2CL2ID stack.L2CLNodeID) stack.Option[*Orchestrator] { +func WithL2CLP2PConnection(l2CL1ID, l2CL2ID stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { require := orch.P().Require() l := orch.P().Logger() - l2CL1Component, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CL1ID).ComponentID) + l2CL1, ok := orch.GetL2CL(l2CL1ID) require.True(ok, "looking for L2 CL node 1 to connect p2p") - l2CL1 := l2CL1Component.(L2CLNode) - l2CL2Component, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CL2ID).ComponentID) + l2CL2, ok := orch.GetL2CL(l2CL2ID) require.True(ok, "looking for L2 CL node 2 to connect p2p") - l2CL2 := l2CL2Component.(L2CLNode) require.Equal(l2CL1ID.ChainID(), l2CL2ID.ChainID(), "must be same l2 chain") ctx := orch.P().Ctx() diff --git a/op-devstack/sysgo/l2_cl_supernode.go b/op-devstack/sysgo/l2_cl_supernode.go index 442b7ff164cde..b10f17dd742df 100644 --- a/op-devstack/sysgo/l2_cl_supernode.go +++ b/op-devstack/sysgo/l2_cl_supernode.go @@ -41,7 +41,7 @@ type SuperNode struct { interopJwtSecret eth.Bytes32 p devtest.P logger log.Logger - els []*stack.L2ELNodeID // Optional: nil when using SyncTester + els []*stack.ComponentID // Optional: nil when using SyncTester chains []eth.ChainID l1UserRPC string l1BeaconAddr string @@ -140,20 +140,20 @@ func (n *SuperNode) ResumeInteropActivity() { } // WithSupernode constructs a Supernode-based L2 CL node -func WithSupernode(supernodeID stack.SupernodeID, l2CLID stack.L2CLNodeID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID, opts ...L2CLOption) stack.Option[*Orchestrator] { +func WithSupernode(supernodeID stack.SupernodeID, l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { args := []L2CLs{{CLID: l2CLID, ELID: l2ELID}} return WithSharedSupernodeCLs(supernodeID, args, l1CLID, l1ELID) } // SuperNodeProxy is a thin wrapper that points to a shared supernode instance. type SuperNodeProxy struct { - id stack.L2CLNodeID + id stack.ComponentID p devtest.P logger log.Logger userRPC string interopEndpoint string interopJwtSecret eth.Bytes32 - el *stack.L2ELNodeID + el *stack.ComponentID } var _ L2CLNode = (*SuperNodeProxy)(nil) @@ -173,10 +173,10 @@ func (n *SuperNodeProxy) hydrate(system stack.ExtensibleSystem) { InteropJwtSecret: n.interopJwtSecret, }) sysL2CL.SetLabel(match.LabelVendor, string(match.OpNode)) - l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) + l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) l2Net.(stack.ExtensibleL2Network).AddL2CLNode(sysL2CL) if n.el != nil { - sysL2CL.(stack.LinkableL2CLNode).LinkEL(l2Net.L2ELNode(n.el)) + sysL2CL.(stack.LinkableL2CLNode).LinkEL(l2Net.L2ELNode(stack.ByID[stack.L2ELNode](*n.el))) } } @@ -188,8 +188,8 @@ func (n *SuperNodeProxy) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { } type L2CLs struct { - CLID stack.L2CLNodeID - ELID stack.L2ELNodeID + CLID stack.ComponentID + ELID stack.ComponentID } // SupernodeConfig holds configuration options for the shared supernode. @@ -225,24 +225,23 @@ func WithSupernodeInteropAtGenesis() SupernodeOption { // WithSharedSupernodeCLsInterop starts one supernode for N L2 chains with interop enabled at genesis. // The interop activation timestamp is computed from the first chain's genesis time. -func WithSharedSupernodeCLsInterop(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID) stack.Option[*Orchestrator] { +func WithSharedSupernodeCLsInterop(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.ComponentID, l1ELID stack.ComponentID) stack.Option[*Orchestrator] { return WithSharedSupernodeCLs(supernodeID, cls, l1CLID, l1ELID, WithSupernodeInteropAtGenesis()) } // WithSharedSupernodeCLsInteropDelayed starts one supernode for N L2 chains with interop enabled // at a specified offset from genesis. This allows testing the transition from non-interop to interop mode. -func WithSharedSupernodeCLsInteropDelayed(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, delaySeconds uint64) stack.Option[*Orchestrator] { +func WithSharedSupernodeCLsInteropDelayed(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.ComponentID, l1ELID stack.ComponentID, delaySeconds uint64) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { if len(cls) == 0 { orch.P().Require().Fail("no chains provided") return } - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(cls[0].CLID.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(cls[0].CLID.ChainID())) if !ok { orch.P().Require().Fail("l2 network not found") return } - l2Net := l2NetComponent.(*L2Network) genesisTime := l2Net.rollupCfg.Genesis.L2Time activationTime := genesisTime + delaySeconds orch.P().Logger().Info("enabling supernode interop with delay", @@ -255,17 +254,30 @@ func WithSharedSupernodeCLsInteropDelayed(supernodeID stack.SupernodeID, cls []L } // WithSharedSupernodeCLs starts one supernode for N L2 chains and registers thin L2CL wrappers. -func WithSharedSupernodeCLs(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, opts ...SupernodeOption) stack.Option[*Orchestrator] { +func WithSharedSupernodeCLs(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.ComponentID, l1ELID stack.ComponentID, opts ...SupernodeOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { withSharedSupernodeCLsImpl(orch, supernodeID, cls, l1CLID, l1ELID, opts...) }) } // withSharedSupernodeCLsImpl is the implementation for starting a shared supernode. -func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, opts ...SupernodeOption) { +func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.ComponentID, l1ELID stack.ComponentID, opts ...SupernodeOption) { p := orch.P() require := p.Require() + require.Equal(stack.KindSupernode, supernodeID.Kind(), "supernode ID must be kind Supernode") + require.Equal(stack.KindL1CLNode, l1CLID.Kind(), "l1 CL ID must be kind L1CLNode") + require.Equal(stack.KindL1ELNode, l1ELID.Kind(), "l1 EL ID must be kind L1ELNode") + require.Equal(l1CLID.ChainID(), l1ELID.ChainID(), "l1 CL and EL IDs must be on the same chain") + require.NotEmpty(cls, "at least one L2 CL/EL pair is required") + for i := range cls { + ids := cls[i] + require.Equalf(stack.KindL2CLNode, ids.CLID.Kind(), "cls[%d].CLID must be kind L2CLNode", i) + require.Truef(ids.CLID.HasChainID(), "cls[%d].CLID must be chain-scoped", i) + require.Truef(ids.ELID.HasChainID(), "cls[%d].ELID must be chain-scoped", i) + require.Equalf(ids.CLID.ChainID(), ids.ELID.ChainID(), "cls[%d] CL and EL IDs must be on the same chain", i) + } + // Apply options snOpts := &SupernodeConfig{} for _, opt := range opts { @@ -275,25 +287,21 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI // Resolve UseGenesisInterop: read the activation timestamp from the first chain's genesis. if snOpts.UseGenesisInterop && snOpts.InteropActivationTimestamp == nil { p.Require().NotEmpty(cls, "no chains provided for genesis interop resolution") - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(cls[0].CLID.ChainID())).ComponentID) - l2Net := l2NetComponent.(*L2Network) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(cls[0].CLID.ChainID())) p.Require().True(ok, "l2 network not found for genesis interop resolution") genesisTime := l2Net.rollupCfg.Genesis.L2Time p.Logger().Info("enabling supernode interop at genesis", "activation_timestamp", genesisTime) snOpts.InteropActivationTimestamp = &genesisTime } - l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := orch.GetL1EL(l1ELID) require.True(ok, "l1 EL node required") - l1EL := l1ELComponent.(L1ELNode) - l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) + l1CL, ok := orch.GetL1CL(l1CLID) require.True(ok, "l1 CL node required") - l1CL := l1CLComponent.(*L1CLNode) // Get L1 network to access L1 chain config - l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) + l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) require.True(ok, "l1 network required") - l1Net := l1NetComponent.(*L1Network) _, jwtSecret := orch.writeDefaultJWT() @@ -345,12 +353,11 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI // Gather VN configs and chain IDs vnCfgs := make(map[eth.ChainID]*config.Config) chainIDs := make([]uint64, 0, len(cls)) - els := make([]*stack.L2ELNodeID, 0, len(cls)) + els := make([]*stack.ComponentID, 0, len(cls)) for i := range cls { a := cls[i] - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(a.CLID.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(a.CLID.ChainID())) require.True(ok, "l2 network required") - l2Net := l2NetComponent.(*L2Network) l2ELNode, ok := orch.GetL2EL(a.ELID) require.True(ok, "l2 EL node required") l2ChainID := a.CLID.ChainID() @@ -429,7 +436,7 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI interopJwtSecret: jwtSecret, el: &cls[i].ELID, } - cid := stack.ConvertL2CLNodeID(a.CLID).ComponentID + cid := a.CLID require.False(orch.registry.Has(cid), fmt.Sprintf("must not already exist: %s", a.CLID)) orch.registry.Register(cid, proxy) } diff --git a/op-devstack/sysgo/l2_el.go b/op-devstack/sysgo/l2_el.go index 7242f6f1498be..6ebe9e2de07fb 100644 --- a/op-devstack/sysgo/l2_el.go +++ b/op-devstack/sysgo/l2_el.go @@ -16,7 +16,7 @@ type L2ELNode interface { } type L2ELConfig struct { - SupervisorID *stack.SupervisorID + SupervisorID *stack.ComponentID P2PAddr string P2PPort int P2PNodeKeyHex string @@ -25,21 +25,21 @@ type L2ELConfig struct { ProofHistory bool } -func L2ELWithSupervisor(supervisorID stack.SupervisorID) L2ELOption { - return L2ELOptionFn(func(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) { +func L2ELWithSupervisor(supervisorID stack.ComponentID) L2ELOption { + return L2ELOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { cfg.SupervisorID = &supervisorID }) } func L2ELWithProofHistory(enable bool) L2ELOption { - return L2ELOptionFn(func(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) { + return L2ELOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { cfg.ProofHistory = enable }) } // L2ELWithP2PConfig sets deterministic P2P identity and static peers for the L2 EL. func L2ELWithP2PConfig(addr string, port int, nodeKeyHex string, staticPeers, trustedPeers []string) L2ELOption { - return L2ELOptionFn(func(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) { + return L2ELOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { cfg.P2PAddr = addr cfg.P2PPort = port cfg.P2PNodeKeyHex = nodeKeyHex @@ -61,7 +61,7 @@ func DefaultL2ELConfig() *L2ELConfig { } type L2ELOption interface { - Apply(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) + Apply(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) } // WithGlobalL2ELOption applies the L2ELOption to all L2ELNode instances in this orchestrator @@ -71,11 +71,11 @@ func WithGlobalL2ELOption(opt L2ELOption) stack.Option[*Orchestrator] { }) } -type L2ELOptionFn func(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) +type L2ELOptionFn func(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) var _ L2ELOption = L2ELOptionFn(nil) -func (fn L2ELOptionFn) Apply(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) { +func (fn L2ELOptionFn) Apply(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { fn(p, id, cfg) } @@ -84,7 +84,7 @@ type L2ELOptionBundle []L2ELOption var _ L2ELOption = L2ELOptionBundle(nil) -func (l L2ELOptionBundle) Apply(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfig) { +func (l L2ELOptionBundle) Apply(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { for _, opt := range l { p.Require().NotNil(opt, "cannot Apply nil L2ELOption") opt.Apply(p, id, cfg) @@ -94,7 +94,7 @@ func (l L2ELOptionBundle) Apply(p devtest.P, id stack.L2ELNodeID, cfg *L2ELConfi // WithL2ELNode adds the default type of L2 CL node. // The default can be configured with DEVSTACK_L2EL_KIND. // Tests that depend on specific types can use options like WithKonaNode and WithOpNode directly. -func WithL2ELNode(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestrator] { +func WithL2ELNode(id stack.ComponentID, opts ...L2ELOption) stack.Option[*Orchestrator] { switch os.Getenv("DEVSTACK_L2EL_KIND") { case "op-reth": return WithOpReth(id, opts...) @@ -103,7 +103,7 @@ func WithL2ELNode(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchest } } -func WithExtL2Node(id stack.L2ELNodeID, elRPCEndpoint string) stack.Option[*Orchestrator] { +func WithExtL2Node(id stack.ComponentID, elRPCEndpoint string) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { require := orch.P().Require() @@ -113,7 +113,7 @@ func WithExtL2Node(id stack.L2ELNodeID, elRPCEndpoint string) stack.Option[*Orch userRPC: elRPCEndpoint, readOnly: true, } - cid := stack.ConvertL2ELNodeID(id).ComponentID + cid := id require.False(orch.registry.Has(cid), "must not already exist") orch.registry.Register(cid, l2ELNode) }) diff --git a/op-devstack/sysgo/l2_el_opgeth.go b/op-devstack/sysgo/l2_el_opgeth.go index 5b2be235e7e17..57370d4403da8 100644 --- a/op-devstack/sysgo/l2_el_opgeth.go +++ b/op-devstack/sysgo/l2_el_opgeth.go @@ -27,7 +27,7 @@ type OpGeth struct { p devtest.P logger log.Logger - id stack.L2ELNodeID + id stack.ComponentID l2Net *L2Network jwtPath string jwtSecret [32]byte @@ -72,7 +72,7 @@ func (n *OpGeth) hydrate(system stack.ExtensibleSystem) { system.T().Cleanup(engineCl.Close) } - l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) + l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ RollupCfg: l2Net.RollupConfig(), ELNodeConfig: shim.ELNodeConfig{ @@ -179,14 +179,13 @@ func (n *OpGeth) Stop() { n.l2Geth = nil } -func WithOpGeth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestrator] { +func WithOpGeth(id stack.ComponentID, opts ...L2ELOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) require := p.Require() - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(id.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(id.ChainID())) require.True(ok, "L2 network required") - l2Net := l2NetComponent.(*L2Network) cfg := DefaultL2ELConfig() orch.l2ELOptions.Apply(p, id, cfg) // apply global options @@ -198,9 +197,8 @@ func WithOpGeth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra supervisorRPC := "" if useInterop && cfg.SupervisorID != nil { - supComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(*cfg.SupervisorID).ComponentID) + sup, ok := orch.GetSupervisor(*cfg.SupervisorID) require.True(ok, "supervisor is required for interop") - sup := supComponent.(Supervisor) supervisorRPC = sup.UserRPC() } @@ -220,7 +218,7 @@ func WithOpGeth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra p.Cleanup(func() { l2EL.Stop() }) - cid := stack.ConvertL2ELNodeID(id).ComponentID + cid := id require.False(orch.registry.Has(cid), "must be unique L2 EL node") orch.registry.Register(cid, l2EL) }) diff --git a/op-devstack/sysgo/l2_el_opreth.go b/op-devstack/sysgo/l2_el_opreth.go index 6f629b15bd63d..e09ec756f3aa7 100644 --- a/op-devstack/sysgo/l2_el_opreth.go +++ b/op-devstack/sysgo/l2_el_opreth.go @@ -25,7 +25,7 @@ import ( type OpReth struct { mu sync.Mutex - id stack.L2ELNodeID + id stack.ComponentID jwtPath string jwtSecret [32]byte authRPC string @@ -62,7 +62,7 @@ func (n *OpReth) hydrate(system stack.ExtensibleSystem) { require.NoError(err) system.T().Cleanup(engineCl.Close) - l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) + l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ RollupCfg: l2Net.RollupConfig(), ELNodeConfig: shim.ELNodeConfig{ @@ -184,14 +184,13 @@ func (n *OpReth) JWTPath() string { return n.jwtPath } -func WithOpReth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestrator] { +func WithOpReth(id stack.ComponentID, opts ...L2ELOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) require := p.Require() - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(id.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(id.ChainID())) require.True(ok, "L2 network required") - l2Net := l2NetComponent.(*L2Network) cfg := DefaultL2ELConfig() orch.l2ELOptions.Apply(p, id, cfg) // apply global options @@ -203,9 +202,8 @@ func WithOpReth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra supervisorRPC := "" if useInterop && cfg.SupervisorID != nil { - supComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(*cfg.SupervisorID).ComponentID) + sup, ok := orch.GetSupervisor(*cfg.SupervisorID) require.True(ok, "supervisor is required for interop") - sup := supComponent.(Supervisor) supervisorRPC = sup.UserRPC() } @@ -326,7 +324,7 @@ func WithOpReth(id stack.L2ELNodeID, opts ...L2ELOption) stack.Option[*Orchestra l2EL.Start() p.Cleanup(l2EL.Stop) p.Logger().Info("op-reth is ready", "userRPC", l2EL.userRPC, "authRPC", l2EL.authRPC) - cid := stack.ConvertL2ELNodeID(id).ComponentID + cid := id require.False(orch.registry.Has(cid), "must be unique L2 EL node") orch.registry.Register(cid, l2EL) }) diff --git a/op-devstack/sysgo/l2_el_p2p_util.go b/op-devstack/sysgo/l2_el_p2p_util.go index e78b2aaa8ce76..ea4728aa0dc50 100644 --- a/op-devstack/sysgo/l2_el_p2p_util.go +++ b/op-devstack/sysgo/l2_el_p2p_util.go @@ -16,7 +16,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/testreq" ) -func WithL2ELP2PConnection(l2EL1ID, l2EL2ID stack.L2ELNodeID, trusted bool) stack.Option[*Orchestrator] { +func WithL2ELP2PConnection(l2EL1ID, l2EL2ID stack.ComponentID, trusted bool) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { require := orch.P().Require() diff --git a/op-devstack/sysgo/l2_el_synctester.go b/op-devstack/sysgo/l2_el_synctester.go index a007a53d4bdf5..28b543428d0b0 100644 --- a/op-devstack/sysgo/l2_el_synctester.go +++ b/op-devstack/sysgo/l2_el_synctester.go @@ -18,7 +18,7 @@ import ( type SyncTesterEL struct { mu sync.Mutex - id stack.L2ELNodeID + id stack.ComponentID l2Net *L2Network jwtPath string @@ -56,7 +56,7 @@ func DefaultSyncTesterELConfig() *SyncTesterELConfig { } type SyncTesterELOption interface { - Apply(p devtest.P, id stack.L2ELNodeID, cfg *SyncTesterELConfig) + Apply(p devtest.P, id stack.ComponentID, cfg *SyncTesterELConfig) } // WithGlobalSyncTesterELOption applies the SyncTesterELOption to all SyncTesterEL instances in this orchestrator @@ -66,11 +66,11 @@ func WithGlobalSyncTesterELOption(opt SyncTesterELOption) stack.Option[*Orchestr }) } -type SyncTesterELOptionFn func(p devtest.P, id stack.L2ELNodeID, cfg *SyncTesterELConfig) +type SyncTesterELOptionFn func(p devtest.P, id stack.ComponentID, cfg *SyncTesterELConfig) var _ SyncTesterELOption = SyncTesterELOptionFn(nil) -func (fn SyncTesterELOptionFn) Apply(p devtest.P, id stack.L2ELNodeID, cfg *SyncTesterELConfig) { +func (fn SyncTesterELOptionFn) Apply(p devtest.P, id stack.ComponentID, cfg *SyncTesterELConfig) { fn(p, id, cfg) } @@ -79,7 +79,7 @@ type SyncTesterELOptionBundle []SyncTesterELOption var _ SyncTesterELOptionBundle = SyncTesterELOptionBundle(nil) -func (l SyncTesterELOptionBundle) Apply(p devtest.P, id stack.L2ELNodeID, cfg *SyncTesterELConfig) { +func (l SyncTesterELOptionBundle) Apply(p devtest.P, id stack.ComponentID, cfg *SyncTesterELConfig) { for _, opt := range l { p.Require().NotNil(opt, "cannot Apply nil SyncTesterELOption") opt.Apply(p, id, cfg) @@ -99,7 +99,7 @@ func (n *SyncTesterEL) hydrate(system stack.ExtensibleSystem) { require.NoError(err) system.T().Cleanup(engineCl.Close) - l2Net := system.L2Network(stack.L2NetworkID(n.id.ChainID())) + l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ RollupCfg: l2Net.RollupConfig(), ELNodeConfig: shim.ELNodeConfig{ @@ -175,14 +175,13 @@ func (n *SyncTesterEL) JWTPath() string { // WithSyncTesterL2ELNode creates a SyncTesterEL that satisfies the L2ELNode interface // The sync tester acts as an EL node that can be used by CL nodes for testing sync. -func WithSyncTesterL2ELNode(id, readonlyEL stack.L2ELNodeID, opts ...SyncTesterELOption) stack.Option[*Orchestrator] { +func WithSyncTesterL2ELNode(id, readonlyEL stack.ComponentID, opts ...SyncTesterELOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) require := p.Require() - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(readonlyEL.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(readonlyEL.ChainID())) require.True(ok, "L2 network required") - l2Net := l2NetComponent.(*L2Network) cfg := DefaultSyncTesterELConfig() orch.SyncTesterELOptions.Apply(p, id, cfg) // apply global options @@ -203,7 +202,7 @@ func WithSyncTesterL2ELNode(id, readonlyEL stack.L2ELNodeID, opts ...SyncTesterE syncTesterEL.Start() p.Cleanup(syncTesterEL.Stop) p.Logger().Info("sync tester EL is ready", "userRPC", syncTesterEL.userRPC, "authRPC", syncTesterEL.authRPC) - cid := stack.ConvertL2ELNodeID(id).ComponentID + cid := id require.False(orch.registry.Has(cid), "must be unique L2 EL node") orch.registry.Register(cid, syncTesterEL) }) diff --git a/op-devstack/sysgo/l2_metrics_dashboard.go b/op-devstack/sysgo/l2_metrics_dashboard.go index 80e633b9c49a2..92845c08ae712 100644 --- a/op-devstack/sysgo/l2_metrics_dashboard.go +++ b/op-devstack/sysgo/l2_metrics_dashboard.go @@ -31,7 +31,7 @@ const grafanaDockerPort = "3000" type L2MetricsRegistrar interface { // RegisterL2MetricsTargets is called by components when they are started (or earlier) to register // their metrics endpoints so that a prometheus instance may be spun up to scrape metrics. - RegisterL2MetricsTargets(serviceName stack.IDWithChain, endpoints ...PrometheusMetricsTarget) + RegisterL2MetricsTargets(serviceName stack.Keyed, endpoints ...PrometheusMetricsTarget) } type PrometheusMetricsTarget string diff --git a/op-devstack/sysgo/l2_network.go b/op-devstack/sysgo/l2_network.go index e054e8d3eff90..bdd3b5bb2f9b8 100644 --- a/op-devstack/sysgo/l2_network.go +++ b/op-devstack/sysgo/l2_network.go @@ -11,7 +11,7 @@ import ( ) type L2Network struct { - id stack.L2NetworkID + id stack.ComponentID l1ChainID eth.ChainID genesis *core.Genesis rollupCfg *rollup.Config @@ -20,7 +20,7 @@ type L2Network struct { } func (c *L2Network) hydrate(system stack.ExtensibleSystem) { - l1Net := system.L1Network(stack.L1NetworkID(c.l1ChainID)) + l1Net := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(c.l1ChainID))) sysL2Net := shim.NewL2Network(shim.L2NetworkConfig{ NetworkConfig: shim.NetworkConfig{ CommonConfig: shim.NewCommonConfig(system.T()), diff --git a/op-devstack/sysgo/l2_network_superchain_registry.go b/op-devstack/sysgo/l2_network_superchain_registry.go index fda91a1099f20..f48a1fa6bdb5f 100644 --- a/op-devstack/sysgo/l2_network_superchain_registry.go +++ b/op-devstack/sysgo/l2_network_superchain_registry.go @@ -13,7 +13,7 @@ import ( ) // WithL2NetworkFromSuperchainRegistry creates an L2 network using the rollup config from the superchain registry -func WithL2NetworkFromSuperchainRegistry(l2NetworkID stack.L2NetworkID, networkName string) stack.Option[*Orchestrator] { +func WithL2NetworkFromSuperchainRegistry(l2NetworkID stack.ComponentID, networkName string) stack.Option[*Orchestrator] { return stack.BeforeDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2NetworkID)) require := p.Require() @@ -44,14 +44,14 @@ func WithL2NetworkFromSuperchainRegistry(l2NetworkID stack.L2NetworkID, networkN keys: orch.keys, } - cid := stack.ConvertL2NetworkID(l2NetworkID).ComponentID + cid := l2NetworkID require.False(orch.registry.Has(cid), fmt.Sprintf("must not already exist: %s", l2NetworkID)) orch.registry.Register(cid, l2Net) }) } // WithEmptyDepSet creates an L2 network using the rollup config from the superchain registry -func WithEmptyDepSet(l2NetworkID stack.L2NetworkID, networkName string) stack.Option[*Orchestrator] { +func WithEmptyDepSet(l2NetworkID stack.ComponentID, networkName string) stack.Option[*Orchestrator] { return stack.Combine( WithL2NetworkFromSuperchainRegistry(l2NetworkID, networkName), stack.BeforeDeploy(func(orch *Orchestrator) { @@ -63,13 +63,13 @@ func WithEmptyDepSet(l2NetworkID stack.L2NetworkID, networkName string) stack.Op require.NotNil(chainCfg, "chain config not found for network %s", networkName) // Create a minimal cluster with empty dependency set - clusterID := stack.ClusterID(networkName) + clusterID := stack.NewClusterID(networkName) cluster := &Cluster{ id: clusterID, cfgset: depset.FullConfigSetMerged{}, } - orch.registry.Register(stack.ConvertClusterID(clusterID).ComponentID, cluster) + orch.registry.Register(clusterID, cluster) }), ) } diff --git a/op-devstack/sysgo/l2_proposer.go b/op-devstack/sysgo/l2_proposer.go index 538dac741a1c4..25007aef2b6d4 100644 --- a/op-devstack/sysgo/l2_proposer.go +++ b/op-devstack/sysgo/l2_proposer.go @@ -21,7 +21,7 @@ import ( ) type L2Proposer struct { - id stack.L2ProposerID + id stack.ComponentID service *ps.ProposerService userRPC string } @@ -37,11 +37,11 @@ func (p *L2Proposer) hydrate(system stack.ExtensibleSystem) { ID: p.id, Client: rpcCl, }) - l2Net := system.L2Network(stack.L2NetworkID(p.id.ChainID())) + l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(p.id.ChainID()))) l2Net.(stack.ExtensibleL2Network).AddL2Proposer(bFrontend) } -type ProposerOption func(id stack.L2ProposerID, cfg *ps.CLIConfig) +type ProposerOption func(id stack.ComponentID, cfg *ps.CLIConfig) func WithProposerOption(opt ProposerOption) stack.Option[*Orchestrator] { return stack.BeforeDeploy(func(o *Orchestrator) { @@ -49,35 +49,35 @@ func WithProposerOption(opt ProposerOption) stack.Option[*Orchestrator] { }) } -func WithProposer(proposerID stack.L2ProposerID, l1ELID stack.L1ELNodeID, - l2CLID *stack.L2CLNodeID, supervisorID *stack.SupervisorID) stack.Option[*Orchestrator] { +func WithProposer(proposerID stack.ComponentID, l1ELID stack.ComponentID, + l2CLID *stack.ComponentID, supervisorID *stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { WithProposerPostDeploy(orch, proposerID, l1ELID, l2CLID, supervisorID, nil) }) } -func WithSuperProposer(proposerID stack.L2ProposerID, l1ELID stack.L1ELNodeID, - supervisorID *stack.SupervisorID) stack.Option[*Orchestrator] { +func WithSuperProposer(proposerID stack.ComponentID, l1ELID stack.ComponentID, + supervisorID *stack.ComponentID) stack.Option[*Orchestrator] { return stack.Finally(func(orch *Orchestrator) { WithProposerPostDeploy(orch, proposerID, l1ELID, nil, supervisorID, nil) }) } -func WithSupernodeProposer(proposerID stack.L2ProposerID, l1ELID stack.L1ELNodeID, +func WithSupernodeProposer(proposerID stack.ComponentID, l1ELID stack.ComponentID, supernodeID *stack.SupernodeID) stack.Option[*Orchestrator] { return stack.Finally(func(orch *Orchestrator) { WithProposerPostDeploy(orch, proposerID, l1ELID, nil, nil, supernodeID) }) } -func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l1ELID stack.L1ELNodeID, - l2CLID *stack.L2CLNodeID, supervisorID *stack.SupervisorID, supernodeID *stack.SupernodeID) { +func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.ComponentID, l1ELID stack.ComponentID, + l2CLID *stack.ComponentID, supervisorID *stack.ComponentID, supernodeID *stack.SupernodeID) { ctx := orch.P().Ctx() ctx = stack.ContextWithID(ctx, proposerID) p := orch.P().WithCtx(ctx) require := p.Require() - proposerCID := stack.ConvertL2ProposerID(proposerID).ComponentID + proposerCID := proposerID require.False(orch.registry.Has(proposerCID), "proposer must not already exist") if supervisorID != nil && supernodeID != nil { require.Fail("cannot have both supervisorID and supernodeID set for proposer") @@ -89,13 +89,11 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l logger := p.Logger() logger.Info("Proposer key acquired", "addr", crypto.PubkeyToAddress(proposerSecret.PublicKey)) - l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := orch.GetL1EL(l1ELID) require.True(ok) - l1EL := l1ELComponent.(L1ELNode) - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(proposerID.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(proposerID.ChainID())) require.True(ok) - l2Net := l2NetComponent.(*L2Network) disputeGameFactoryAddr := l2Net.deployment.DisputeGameFactoryProxyAddr() disputeGameType := 1 // Permissioned game type is the only one currently deployed if orch.wb.outInteropMigration != nil { @@ -130,9 +128,8 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l // If supervisor is available, use it. Otherwise, connect to L2 CL. switch { case supervisorID != nil: - supervisorComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(*supervisorID).ComponentID) + supervisorNode, ok := orch.GetSupervisor(*supervisorID) require.True(ok, "supervisor not found") - supervisorNode := supervisorComponent.(Supervisor) proposerCLIConfig.SupervisorRpcs = []string{supervisorNode.UserRPC()} case supernodeID != nil: supernode, ok := orch.supernodes.Get(*supernodeID) @@ -140,9 +137,8 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l proposerCLIConfig.SuperNodeRpcs = []string{supernode.UserRPC()} default: require.NotNil(l2CLID, "need L2 CL to connect to when no supervisor") - l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(*l2CLID).ComponentID) - require.True(ok, "L2 CL not found") - l2CL := l2CLComponent.(L2CLNode) + l2CL, ok := orch.GetL2CL(*l2CLID) + require.True(ok) proposerCLIConfig.RollupRpc = l2CL.UserRPC() } @@ -163,5 +159,5 @@ func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.L2ProposerID, l service: proposer, userRPC: proposer.HTTPEndpoint(), } - orch.registry.Register(stack.ConvertL2ProposerID(proposerID).ComponentID, prop) + orch.registry.Register(proposerID, prop) } diff --git a/op-devstack/sysgo/op_rbuilder.go b/op-devstack/sysgo/op_rbuilder.go index 69b0ea1d0e052..b77bfbf0f3fc7 100644 --- a/op-devstack/sysgo/op_rbuilder.go +++ b/op-devstack/sysgo/op_rbuilder.go @@ -25,7 +25,7 @@ import ( type OPRBuilderNode struct { mu sync.Mutex - id stack.OPRBuilderNodeID + id stack.ComponentID rollupCfg *rollup.Config wsProxyURL string @@ -251,7 +251,7 @@ func (cfg *OPRBuilderNodeConfig) LaunchSpec(p devtest.P) (args []string, env []s } type OPRBuilderNodeOption interface { - Apply(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) + Apply(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) } func WithGlobalOPRBuilderNodeOption(opt OPRBuilderNodeOption) stack.Option[*Orchestrator] { @@ -260,11 +260,11 @@ func WithGlobalOPRBuilderNodeOption(opt OPRBuilderNodeOption) stack.Option[*Orch }) } -type OPRBuilderNodeOptionFn func(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) +type OPRBuilderNodeOptionFn func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) var _ OPRBuilderNodeOption = OPRBuilderNodeOptionFn(nil) -func (fn OPRBuilderNodeOptionFn) Apply(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) { +func (fn OPRBuilderNodeOptionFn) Apply(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { fn(p, id, cfg) } @@ -273,7 +273,7 @@ type OPRBuilderNodeOptionBundle []OPRBuilderNodeOption var _ OPRBuilderNodeOption = OPRBuilderNodeOptionBundle(nil) -func (b OPRBuilderNodeOptionBundle) Apply(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) { +func (b OPRBuilderNodeOptionBundle) Apply(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { for _, opt := range b { p.Require().NotNil(opt, "cannot Apply nil OPRBuilderNodeOption") opt.Apply(p, id, cfg) @@ -282,7 +282,7 @@ func (b OPRBuilderNodeOptionBundle) Apply(p devtest.P, id stack.OPRBuilderNodeID // OPRBuilderWithP2PConfig sets deterministic P2P identity and static peers for the builder EL. func OPRBuilderWithP2PConfig(addr string, port int, nodeKeyHex string, staticPeers, trustedPeers []string) OPRBuilderNodeOption { - return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) { + return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { cfg.P2PAddr = addr cfg.P2PPort = port cfg.P2PNodeKeyHex = nodeKeyHex @@ -293,7 +293,7 @@ func OPRBuilderWithP2PConfig(addr string, port int, nodeKeyHex string, staticPee // OPRBuilderWithNodeIdentity applies an ELNodeIdentity directly to the builder EL. func OPRBuilderWithNodeIdentity(identity *ELNodeIdentity, addr string, staticPeers, trustedPeers []string) OPRBuilderNodeOption { - return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) { + return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { cfg.P2PAddr = addr cfg.P2PPort = identity.Port cfg.P2PNodeKeyHex = identity.KeyHex() @@ -303,13 +303,13 @@ func OPRBuilderWithNodeIdentity(identity *ELNodeIdentity, addr string, staticPee } func OPRBuilderNodeWithExtraArgs(args ...string) OPRBuilderNodeOption { - return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) { + return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { cfg.ExtraArgs = append(cfg.ExtraArgs, args...) }) } func OPRBuilderNodeWithEnv(env ...string) OPRBuilderNodeOption { - return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.OPRBuilderNodeID, cfg *OPRBuilderNodeConfig) { + return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { cfg.Env = append(cfg.Env, env...) }) } @@ -336,7 +336,7 @@ func (b *OPRBuilderNode) hydrate(system stack.ExtensibleSystem) { RollupCfg: b.rollupCfg, FlashblocksClient: wsClient, }) - system.L2Network(stack.L2NetworkID(b.id.ChainID())).(stack.ExtensibleL2Network).AddOPRBuilderNode(node) + system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(b.id.ChainID()))).(stack.ExtensibleL2Network).AddOPRBuilderNode(node) } func (b *OPRBuilderNode) Start() { @@ -480,12 +480,11 @@ func (b *OPRBuilderNode) Stop() { } // WithOPRBuilderNode constructs and starts an OPRbuilderNode using the provided options. -func WithOPRBuilderNode(id stack.OPRBuilderNodeID, opts ...OPRBuilderNodeOption) stack.Option[*Orchestrator] { +func WithOPRBuilderNode(id stack.ComponentID, opts ...OPRBuilderNodeOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) - l2NetComponent, ok := orch.registry.Get(stack.ConvertL2NetworkID(stack.L2NetworkID(id.ChainID())).ComponentID) + l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(id.ChainID())) p.Require().True(ok, "l2 network required") - l2Net := l2NetComponent.(*L2Network) tempDir := p.TempDir() data, err := json.Marshal(l2Net.genesis) @@ -510,7 +509,7 @@ func WithOPRBuilderNode(id stack.OPRBuilderNodeID, opts ...OPRBuilderNodeOption) p.Logger().Info("Starting OPRbuilderNode") rb.Start() p.Cleanup(rb.Stop) - orch.registry.Register(stack.ConvertOPRBuilderNodeID(id).ComponentID, rb) + orch.registry.Register(id, rb) }) } diff --git a/op-devstack/sysgo/orchestrator.go b/op-devstack/sysgo/orchestrator.go index 28c61738f419c..42929cc451384 100644 --- a/op-devstack/sysgo/orchestrator.go +++ b/op-devstack/sysgo/orchestrator.go @@ -41,8 +41,8 @@ type Orchestrator struct { // Unified component registry - replaces the 15 separate locks.RWMap fields registry *stack.Registry - // supernodes is stored separately because SupernodeID cannot be converted to ComponentID - supernodes locks.RWMap[stack.SupernodeID, *SuperNode] + // supernodes are stored separately from the registry and hydrated explicitly. + supernodes locks.RWMap[stack.ComponentID, *SuperNode] // service name => prometheus endpoints to scrape l2MetricsEndpoints locks.RWMap[string, []PrometheusMetricsTarget] @@ -85,19 +85,11 @@ func (o *Orchestrator) EnableTimeTravel() { } } -// GetL2EL retrieves an L2 EL node by its ID from the registry. -// Supports polymorphic lookup: if the ID was converted from another L2EL-capable type -// (e.g., OPRBuilderNodeID), searches across all L2EL-capable kinds using same key/chainID. -func (o *Orchestrator) GetL2EL(id stack.L2ELNodeID) (L2ELNode, bool) { - for _, kind := range stack.L2ELCapableKinds() { - cid := stack.NewComponentID(kind, id.Key(), id.ChainID()) - if component, ok := o.registry.Get(cid); ok { - if el, ok := component.(L2ELNode); ok { - return el, true - } - } - } - return nil, false +// GetL2EL returns the component at the exact ID if it implements L2ELNode. +// This supports polymorphism via interface implementation (e.g. OpGeth, OpReth, +// RollupBoostNode, OPRBuilderNode), but does not rewrite IDs across kinds. +func (o *Orchestrator) GetL2EL(id stack.ComponentID) (L2ELNode, bool) { + return stack.RegistryGet[L2ELNode](o.registry, id) } var _ stack.Orchestrator = (*Orchestrator)(nil) @@ -146,7 +138,7 @@ func (o *Orchestrator) Hydrate(sys stack.ExtensibleSystem) { }) } - o.supernodes.Range(rangeHydrateFn[stack.SupernodeID, *SuperNode](sys)) + o.supernodes.Range(rangeHydrateFn[stack.ComponentID, *SuperNode](sys)) if o.syncTester != nil { o.syncTester.hydrate(sys) @@ -155,7 +147,7 @@ func (o *Orchestrator) Hydrate(sys stack.ExtensibleSystem) { o.sysHook.PostHydrate(sys) } -func (o *Orchestrator) RegisterL2MetricsTargets(id stack.IDWithChain, endpoints ...PrometheusMetricsTarget) { +func (o *Orchestrator) RegisterL2MetricsTargets(id stack.Keyed, endpoints ...PrometheusMetricsTarget) { wasSet := o.l2MetricsEndpoints.SetIfMissing(id.Key(), endpoints) if !wasSet { existing, _ := o.l2MetricsEndpoints.Get(id.Key()) diff --git a/op-devstack/sysgo/orchestrator_getters.go b/op-devstack/sysgo/orchestrator_getters.go new file mode 100644 index 0000000000000..19dd2e570027a --- /dev/null +++ b/op-devstack/sysgo/orchestrator_getters.go @@ -0,0 +1,48 @@ +package sysgo + +import "github.com/ethereum-optimism/optimism/op-devstack/stack" + +// GetL1Network returns an L1 network by ID. +func (o *Orchestrator) GetL1Network(id stack.ComponentID) (*L1Network, bool) { + return stack.RegistryGet[*L1Network](o.registry, id) +} + +// GetL2Network returns an L2 network by ID. +func (o *Orchestrator) GetL2Network(id stack.ComponentID) (*L2Network, bool) { + return stack.RegistryGet[*L2Network](o.registry, id) +} + +// GetCluster returns a cluster by ID. +func (o *Orchestrator) GetCluster(id stack.ComponentID) (*Cluster, bool) { + return stack.RegistryGet[*Cluster](o.registry, id) +} + +// GetL1EL returns an L1 execution node by ID. +func (o *Orchestrator) GetL1EL(id stack.ComponentID) (L1ELNode, bool) { + return stack.RegistryGet[L1ELNode](o.registry, id) +} + +// GetL1CL returns an L1 consensus node by ID. +func (o *Orchestrator) GetL1CL(id stack.ComponentID) (*L1CLNode, bool) { + return stack.RegistryGet[*L1CLNode](o.registry, id) +} + +// GetL2CL returns an L2 consensus node by ID. +func (o *Orchestrator) GetL2CL(id stack.ComponentID) (L2CLNode, bool) { + return stack.RegistryGet[L2CLNode](o.registry, id) +} + +// GetSupervisor returns a supervisor by ID. +func (o *Orchestrator) GetSupervisor(id stack.ComponentID) (Supervisor, bool) { + return stack.RegistryGet[Supervisor](o.registry, id) +} + +// GetOPRBuilder returns an OPR builder node by ID. +func (o *Orchestrator) GetOPRBuilder(id stack.ComponentID) (*OPRBuilderNode, bool) { + return stack.RegistryGet[*OPRBuilderNode](o.registry, id) +} + +// GetRollupBoost returns a rollup-boost node by ID. +func (o *Orchestrator) GetRollupBoost(id stack.ComponentID) (*RollupBoostNode, bool) { + return stack.RegistryGet[*RollupBoostNode](o.registry, id) +} diff --git a/op-devstack/sysgo/rollup_boost.go b/op-devstack/sysgo/rollup_boost.go index 12246cadca2b7..2585920b6ce35 100644 --- a/op-devstack/sysgo/rollup_boost.go +++ b/op-devstack/sysgo/rollup_boost.go @@ -24,7 +24,7 @@ import ( type RollupBoostNode struct { mu sync.Mutex - id stack.RollupBoostNodeID + id stack.ComponentID wsProxyURL string wsProxy *tcpproxy.Proxy @@ -65,10 +65,10 @@ func (r *RollupBoostNode) hydrate(system stack.ExtensibleSystem) { Client: elRPC, ChainID: r.id.ChainID(), }, - RollupCfg: system.L2Network(stack.L2NetworkID(r.id.ChainID())).RollupConfig(), + RollupCfg: system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(r.id.ChainID()))).RollupConfig(), FlashblocksClient: wsClient, }) - system.L2Network(stack.L2NetworkID(r.id.ChainID())).(stack.ExtensibleL2Network).AddRollupBoostNode(node) + system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(r.id.ChainID()))).(stack.ExtensibleL2Network).AddRollupBoostNode(node) } func (r *RollupBoostNode) Start() { @@ -177,7 +177,7 @@ func (r *RollupBoostNode) Stop() { // WithRollupBoost starts a rollup-boost process using the provided options // and registers a WSClient on the target L2 chain. // l2ELID is required to link the proxy to the L2 EL it serves. -func WithRollupBoost(id stack.RollupBoostNodeID, l2ELID stack.L2ELNodeID, opts ...RollupBoostOption) stack.Option[*Orchestrator] { +func WithRollupBoost(id stack.ComponentID, l2ELID stack.ComponentID, opts ...RollupBoostOption) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) logger := p.Logger() @@ -209,7 +209,7 @@ func WithRollupBoost(id stack.RollupBoostNodeID, l2ELID stack.L2ELNodeID, opts . // Apply any node-level link options for _, opt := range opts { if linkOpt, ok := opt.(interface { - applyNode(p devtest.P, id stack.RollupBoostNodeID, r *RollupBoostNode) + applyNode(p devtest.P, id stack.ComponentID, r *RollupBoostNode) }); ok { linkOpt.applyNode(p, id, r) } @@ -218,7 +218,7 @@ func WithRollupBoost(id stack.RollupBoostNodeID, l2ELID stack.L2ELNodeID, opts . r.Start() p.Cleanup(r.Stop) // Register for hydration - orch.registry.Register(stack.ConvertRollupBoostNodeID(id).ComponentID, r) + orch.registry.Register(id, r) }) } @@ -355,14 +355,14 @@ func (cfg *RollupBoostConfig) LaunchSpec(p devtest.P) (args []string, env []stri } type RollupBoostOption interface { - Apply(orch *Orchestrator, id stack.RollupBoostNodeID, cfg *RollupBoostConfig) + Apply(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) } -type RollupBoostOptionFn func(orch *Orchestrator, id stack.RollupBoostNodeID, cfg *RollupBoostConfig) +type RollupBoostOptionFn func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) var _ RollupBoostOption = RollupBoostOptionFn(nil) -func (fn RollupBoostOptionFn) Apply(orch *Orchestrator, id stack.RollupBoostNodeID, cfg *RollupBoostConfig) { +func (fn RollupBoostOptionFn) Apply(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { fn(orch, id, cfg) } @@ -370,7 +370,7 @@ type RollupBoostOptionBundle []RollupBoostOption var _ RollupBoostOption = RollupBoostOptionBundle(nil) -func (b RollupBoostOptionBundle) Apply(orch *Orchestrator, id stack.RollupBoostNodeID, cfg *RollupBoostConfig) { +func (b RollupBoostOptionBundle) Apply(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { for _, opt := range b { orch.P().Require().NotNil(opt, "cannot Apply nil RollupBoostOption") opt.Apply(orch, id, cfg) @@ -379,30 +379,29 @@ func (b RollupBoostOptionBundle) Apply(orch *Orchestrator, id stack.RollupBoostN // Convenience options func RollupBoostWithExecutionMode(mode string) RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, id stack.RollupBoostNodeID, cfg *RollupBoostConfig) { + return RollupBoostOptionFn(func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { cfg.ExecutionMode = mode }) } func RollupBoostWithEnv(env ...string) RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, id stack.RollupBoostNodeID, cfg *RollupBoostConfig) { + return RollupBoostOptionFn(func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { cfg.Env = append(cfg.Env, env...) }) } func RollupBoostWithExtraArgs(args ...string) RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, id stack.RollupBoostNodeID, cfg *RollupBoostConfig) { + return RollupBoostOptionFn(func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { cfg.ExtraArgs = append(cfg.ExtraArgs, args...) }) } -func RollupBoostWithBuilderNode(id stack.OPRBuilderNodeID) RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, rbID stack.RollupBoostNodeID, cfg *RollupBoostConfig) { - builderComponent, ok := orch.registry.Get(stack.ConvertOPRBuilderNodeID(id).ComponentID) +func RollupBoostWithBuilderNode(id stack.ComponentID) RollupBoostOption { + return RollupBoostOptionFn(func(orch *Orchestrator, rbID stack.ComponentID, cfg *RollupBoostConfig) { + builderNode, ok := orch.GetOPRBuilder(id) if !ok { orch.P().Require().FailNow("builder node not found") } - builderNode := builderComponent.(*OPRBuilderNode) cfg.BuilderURL = ensureHTTPURL(builderNode.authProxyURL) cfg.BuilderJWTPath = builderNode.cfg.AuthRPCJWTPath cfg.FlashblocksBuilderURL = builderNode.wsProxyURL @@ -410,7 +409,7 @@ func RollupBoostWithBuilderNode(id stack.OPRBuilderNodeID) RollupBoostOption { } func RollupBoostWithFlashblocksDisabled() RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, id stack.RollupBoostNodeID, cfg *RollupBoostConfig) { + return RollupBoostOptionFn(func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { cfg.EnableFlashblocks = false }) } diff --git a/op-devstack/sysgo/superchain.go b/op-devstack/sysgo/superchain.go index 6570b2345726a..ff421c88e03e2 100644 --- a/op-devstack/sysgo/superchain.go +++ b/op-devstack/sysgo/superchain.go @@ -22,7 +22,7 @@ func (d *SuperchainDeployment) ProtocolVersionsAddr() common.Address { } type Superchain struct { - id stack.SuperchainID + id stack.ComponentID deployment *SuperchainDeployment } diff --git a/op-devstack/sysgo/superroot.go b/op-devstack/sysgo/superroot.go index bd19fdd7f3758..7c05b3f1ea99a 100644 --- a/op-devstack/sysgo/superroot.go +++ b/op-devstack/sysgo/superroot.go @@ -48,19 +48,19 @@ type MigrateInputV2 struct { StartingRespectedGameType uint32 } -func WithSuperRoots(l1ChainID eth.ChainID, l1ELID stack.L1ELNodeID, clIDs []stack.L2CLNodeID, supervisorID stack.SupervisorID, primaryL2 eth.ChainID) stack.Option[*Orchestrator] { +func WithSuperRoots(l1ChainID eth.ChainID, l1ELID stack.ComponentID, clIDs []stack.ComponentID, supervisorID stack.ComponentID, primaryL2 eth.ChainID) stack.Option[*Orchestrator] { return withSuperRoots(l1ChainID, l1ELID, clIDs, primaryL2, func(t devtest.CommonT, o *Orchestrator, timestamp uint64) eth.Bytes32 { return getSuperRoot(t, o, timestamp, supervisorID) }) } -func WithSuperRootsFromSupernode(l1ChainID eth.ChainID, l1ELID stack.L1ELNodeID, clIDs []stack.L2CLNodeID, supernodeID stack.SupernodeID, primaryL2 eth.ChainID) stack.Option[*Orchestrator] { +func WithSuperRootsFromSupernode(l1ChainID eth.ChainID, l1ELID stack.ComponentID, clIDs []stack.ComponentID, supernodeID stack.SupernodeID, primaryL2 eth.ChainID) stack.Option[*Orchestrator] { return withSuperRoots(l1ChainID, l1ELID, clIDs, primaryL2, func(t devtest.CommonT, o *Orchestrator, timestamp uint64) eth.Bytes32 { return getSuperRootFromSupernode(t, o, timestamp, supernodeID) }) } -func withSuperRoots(l1ChainID eth.ChainID, l1ELID stack.L1ELNodeID, clIDs []stack.L2CLNodeID, primaryL2 eth.ChainID, getSuperRootAtTimestamp func(t devtest.CommonT, o *Orchestrator, timestamp uint64) eth.Bytes32) stack.Option[*Orchestrator] { +func withSuperRoots(l1ChainID eth.ChainID, l1ELID stack.ComponentID, clIDs []stack.ComponentID, primaryL2 eth.ChainID, getSuperRootAtTimestamp func(t devtest.CommonT, o *Orchestrator, timestamp uint64) eth.Bytes32) stack.Option[*Orchestrator] { return stack.FnOption[*Orchestrator]{ FinallyFn: func(o *Orchestrator) { t := o.P() @@ -68,9 +68,8 @@ func withSuperRoots(l1ChainID eth.ChainID, l1ELID stack.L1ELNodeID, clIDs []stac require.NotNil(o.wb, "must have a world builder") require.NotEmpty(o.wb.output.ImplementationsDeployment.OpcmImpl, "must have an OPCM implementation") - l1ELComponent, ok := o.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := o.GetL1EL(l1ELID) require.True(ok, "must have L1 EL node") - l1EL := l1ELComponent.(L1ELNode) rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) require.NoError(err) client := ethclient.NewClient(rpcClient) @@ -80,9 +79,8 @@ func withSuperRoots(l1ChainID eth.ChainID, l1ELID stack.L1ELNodeID, clIDs []stac // Supernode does not support super roots at genesis. // So let's wait for safe heads to advance before querying atTimestamp. for _, clID := range clIDs { - l2CLComponent, ok := o.registry.Get(stack.ConvertL2CLNodeID(clID).ComponentID) + l2CL, ok := o.GetL2CL(clID) require.True(ok, "must have L2 CL node") - l2CL := l2CLComponent.(L2CLNode) // TODO(#18947): Ideally, we should be able to wait on the supernode's SyncStatus directly // rather than check the sync statuses of all CLs rollupClient, err := dial.DialRollupClientWithTimeout(t.Ctx(), t.Logger(), l2CL.UserRPC()) @@ -287,10 +285,9 @@ func deployDelegateCallProxy(t devtest.CommonT, transactOpts *bind.TransactOpts, return deployAddress, proxyContract } -func getSuperRoot(t devtest.CommonT, o *Orchestrator, timestamp uint64, supervisorID stack.SupervisorID) eth.Bytes32 { - supervisorComponent, ok := o.registry.Get(stack.ConvertSupervisorID(supervisorID).ComponentID) +func getSuperRoot(t devtest.CommonT, o *Orchestrator, timestamp uint64, supervisorID stack.ComponentID) eth.Bytes32 { + supervisor, ok := o.GetSupervisor(supervisorID) t.Require().True(ok, "must have supervisor") - supervisor := supervisorComponent.(Supervisor) client, err := dial.DialSupervisorClientWithTimeout(t.Ctx(), t.Logger(), supervisor.UserRPC()) t.Require().NoError(err) diff --git a/op-devstack/sysgo/supervisor.go b/op-devstack/sysgo/supervisor.go index ca74091f27cbb..08b8d37e2a924 100644 --- a/op-devstack/sysgo/supervisor.go +++ b/op-devstack/sysgo/supervisor.go @@ -15,7 +15,7 @@ type Supervisor interface { UserRPC() string } -func WithSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID, l1ELID stack.L1ELNodeID) stack.Option[*Orchestrator] { +func WithSupervisor(supervisorID stack.ComponentID, clusterID stack.ComponentID, l1ELID stack.ComponentID) stack.Option[*Orchestrator] { switch os.Getenv("DEVSTACK_SUPERVISOR_KIND") { case "kona": return WithKonaSupervisor(supervisorID, clusterID, l1ELID) @@ -24,18 +24,16 @@ func WithSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID, } } -func WithManagedBySupervisor(l2CLID stack.L2CLNodeID, supervisorID stack.SupervisorID) stack.Option[*Orchestrator] { +func WithManagedBySupervisor(l2CLID stack.ComponentID, supervisorID stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { require := orch.P().Require() - l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2CLID).ComponentID) + l2CL, ok := orch.GetL2CL(l2CLID) require.True(ok, "looking for L2 CL node to connect to supervisor") - l2CL := l2CLComponent.(L2CLNode) interopEndpoint, secret := l2CL.InteropRPC() - supComponent, ok := orch.registry.Get(stack.ConvertSupervisorID(supervisorID).ComponentID) + s, ok := orch.GetSupervisor(supervisorID) require.True(ok, "looking for supervisor") - s := supComponent.(Supervisor) ctx := orch.P().Ctx() supClient, err := dial.DialSupervisorClientWithTimeout(ctx, orch.P().Logger(), s.UserRPC(), client.WithLazyDial()) diff --git a/op-devstack/sysgo/supervisor_kona.go b/op-devstack/sysgo/supervisor_kona.go index fa9d9387c2d94..8ae222b4c5cb6 100644 --- a/op-devstack/sysgo/supervisor_kona.go +++ b/op-devstack/sysgo/supervisor_kona.go @@ -17,7 +17,7 @@ import ( type KonaSupervisor struct { mu sync.Mutex - id stack.SupervisorID + id stack.ComponentID userRPC string userProxy *tcpproxy.Proxy @@ -114,18 +114,16 @@ func (s *KonaSupervisor) Stop() { s.sub = nil } -func WithKonaSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID, l1ELID stack.L1ELNodeID) stack.Option[*Orchestrator] { +func WithKonaSupervisor(supervisorID stack.ComponentID, clusterID stack.ComponentID, l1ELID stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), supervisorID)) require := p.Require() - l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := orch.GetL1EL(l1ELID) require.True(ok, "need L1 EL node to connect supervisor to") - l1EL := l1ELComponent.(L1ELNode) - clusterComponent, ok := orch.registry.Get(stack.ConvertClusterID(clusterID).ComponentID) + cluster, ok := orch.GetCluster(clusterID) require.True(ok, "need cluster to determine dependency set") - cluster := clusterComponent.(*Cluster) require.NotNil(cluster.cfgset, "need a full config set") require.NoError(cluster.cfgset.CheckChains(), "config set must be valid") @@ -141,8 +139,8 @@ func WithKonaSupervisor(supervisorID stack.SupervisorID, clusterID stack.Cluster rollupCfgPath := cfgDir + "/rollup-config-*.json" for _, l2NetID := range orch.registry.IDsByKind(stack.KindL2Network) { - l2NetComponent, _ := orch.registry.Get(l2NetID) - l2Net := l2NetComponent.(*L2Network) + l2Net, ok := orch.GetL2Network(l2NetID) + require.True(ok, "need l2 network") chainID := l2Net.id.ChainID() rollupData, err := json.Marshal(l2Net.rollupCfg) require.NoError(err, "failed to marshal rollup config") @@ -178,7 +176,7 @@ func WithKonaSupervisor(supervisorID stack.SupervisorID, clusterID stack.Cluster env: envVars, p: p, } - orch.registry.Register(stack.ConvertSupervisorID(supervisorID).ComponentID, konaSupervisor) + orch.registry.Register(supervisorID, konaSupervisor) p.Logger().Info("Starting kona-supervisor") konaSupervisor.Start() p.Cleanup(konaSupervisor.Stop) diff --git a/op-devstack/sysgo/supervisor_op.go b/op-devstack/sysgo/supervisor_op.go index d7b867da89630..e7ffe9c0eaaa7 100644 --- a/op-devstack/sysgo/supervisor_op.go +++ b/op-devstack/sysgo/supervisor_op.go @@ -24,7 +24,7 @@ import ( type OpSupervisor struct { mu sync.Mutex - id stack.SupervisorID + id stack.ComponentID userRPC string cfg *supervisorConfig.Config @@ -99,18 +99,16 @@ func (s *OpSupervisor) Stop() { s.service = nil } -func WithOPSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID, l1ELID stack.L1ELNodeID) stack.Option[*Orchestrator] { +func WithOPSupervisor(supervisorID stack.ComponentID, clusterID stack.ComponentID, l1ELID stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), supervisorID)) require := p.Require() - l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := orch.GetL1EL(l1ELID) require.True(ok, "need L1 EL node to connect supervisor to") - l1EL := l1ELComponent.(L1ELNode) - clusterComponent, ok := orch.registry.Get(stack.ConvertClusterID(clusterID).ComponentID) + cluster, ok := orch.GetCluster(clusterID) require.True(ok, "need cluster to determine dependency set") - cluster := clusterComponent.(*Cluster) require.NotNil(cluster.cfgset, "need a full config set") require.NoError(cluster.cfgset.CheckChains(), "config set must be valid") @@ -153,7 +151,7 @@ func WithOPSupervisor(supervisorID stack.SupervisorID, clusterID stack.ClusterID logger: plog, service: nil, // set on start } - orch.registry.Register(stack.ConvertSupervisorID(supervisorID).ComponentID, supervisorNode) + orch.registry.Register(supervisorID, supervisorNode) supervisorNode.Start() orch.p.Cleanup(supervisorNode.Stop) }) diff --git a/op-devstack/sysgo/sync_tester.go b/op-devstack/sysgo/sync_tester.go index 144f736921f86..185f3618bb2b6 100644 --- a/op-devstack/sysgo/sync_tester.go +++ b/op-devstack/sysgo/sync_tester.go @@ -20,7 +20,7 @@ import ( // Caveat: id is binded by a single EL(chainID), but service can support multiple ELs type SyncTesterService struct { - id stack.SyncTesterID + id stack.ComponentID service *synctester.Service } @@ -44,7 +44,7 @@ func (n *SyncTesterService) hydrate(system stack.ExtensibleSystem) { } } -func WithSyncTester(syncTesterID stack.SyncTesterID, l2ELs []stack.L2ELNodeID) stack.Option[*Orchestrator] { +func WithSyncTester(syncTesterID stack.ComponentID, l2ELs []stack.ComponentID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), syncTesterID)) @@ -90,7 +90,7 @@ func WithSyncTester(syncTesterID stack.SyncTesterID, l2ELs []stack.L2ELNodeID) s }) } -func WithSyncTesterWithExternalEndpoint(syncTesterID stack.SyncTesterID, endpointRPC string, chainID eth.ChainID) stack.Option[*Orchestrator] { +func WithSyncTesterWithExternalEndpoint(syncTesterID stack.ComponentID, endpointRPC string, chainID eth.ChainID) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), syncTesterID)) diff --git a/op-devstack/sysgo/system.go b/op-devstack/sysgo/system.go index 54ef3252edbc6..71cc861e60b68 100644 --- a/op-devstack/sysgo/system.go +++ b/op-devstack/sysgo/system.go @@ -14,27 +14,27 @@ var ( ) type DefaultMinimalSystemIDs struct { - L1 stack.L1NetworkID - L1EL stack.L1ELNodeID - L1CL stack.L1CLNodeID + L1 stack.ComponentID + L1EL stack.ComponentID + L1CL stack.ComponentID - L2 stack.L2NetworkID - L2CL stack.L2CLNodeID - L2EL stack.L2ELNodeID + L2 stack.ComponentID + L2CL stack.ComponentID + L2EL stack.ComponentID - L2Batcher stack.L2BatcherID - L2Proposer stack.L2ProposerID - L2Challenger stack.L2ChallengerID + L2Batcher stack.ComponentID + L2Proposer stack.ComponentID + L2Challenger stack.ComponentID - TestSequencer stack.TestSequencerID + TestSequencer stack.ComponentID } func NewDefaultMinimalSystemIDs(l1ID, l2ID eth.ChainID) DefaultMinimalSystemIDs { ids := DefaultMinimalSystemIDs{ - L1: stack.L1NetworkID(l1ID), + L1: stack.NewL1NetworkID(l1ID), L1EL: stack.NewL1ELNodeID("l1", l1ID), L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.L2NetworkID(l2ID), + L2: stack.NewL2NetworkID(l2ID), L2CL: stack.NewL2CLNodeID("sequencer", l2ID), L2EL: stack.NewL2ELNodeID("sequencer", l2ID), L2Batcher: stack.NewL2BatcherID("main", l2ID), @@ -74,11 +74,11 @@ func defaultMinimalSystemOpts(ids *DefaultMinimalSystemIDs, dest *DefaultMinimal opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.L2ELNodeID{ + opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ ids.L2EL, })) @@ -95,35 +95,35 @@ func defaultMinimalSystemOpts(ids *DefaultMinimalSystemIDs, dest *DefaultMinimal // without interop or supervisor: both L2s get their own ELs, and we attach L2CL nodes // via the default L2CL selector (which can be set to supernode to share a single process). type DefaultTwoL2SystemIDs struct { - L1 stack.L1NetworkID - L1EL stack.L1ELNodeID - L1CL stack.L1CLNodeID + L1 stack.ComponentID + L1EL stack.ComponentID + L1CL stack.ComponentID - L2A stack.L2NetworkID - L2ACL stack.L2CLNodeID - L2AEL stack.L2ELNodeID + L2A stack.ComponentID + L2ACL stack.ComponentID + L2AEL stack.ComponentID - L2B stack.L2NetworkID - L2BCL stack.L2CLNodeID - L2BEL stack.L2ELNodeID + L2B stack.ComponentID + L2BCL stack.ComponentID + L2BEL stack.ComponentID Supernode stack.SupernodeID - TestSequencer stack.TestSequencerID - L2ABatcher stack.L2BatcherID - L2AProposer stack.L2ProposerID - L2BBatcher stack.L2BatcherID - L2BProposer stack.L2ProposerID + TestSequencer stack.ComponentID + L2ABatcher stack.ComponentID + L2AProposer stack.ComponentID + L2BBatcher stack.ComponentID + L2BProposer stack.ComponentID } func NewDefaultTwoL2SystemIDs(l1ID, l2AID, l2BID eth.ChainID) DefaultTwoL2SystemIDs { return DefaultTwoL2SystemIDs{ - L1: stack.L1NetworkID(l1ID), + L1: stack.NewL1NetworkID(l1ID), L1EL: stack.NewL1ELNodeID("l1", l1ID), L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2A: stack.L2NetworkID(l2AID), + L2A: stack.NewL2NetworkID(l2AID), L2ACL: stack.NewL2CLNodeID("sequencer", l2AID), L2AEL: stack.NewL2ELNodeID("sequencer", l2AID), - L2B: stack.L2NetworkID(l2BID), + L2B: stack.NewL2NetworkID(l2BID), L2BCL: stack.NewL2CLNodeID("sequencer", l2BID), L2BEL: stack.NewL2ELNodeID("sequencer", l2BID), Supernode: stack.NewSupernodeID("supernode-two-l2-system", l2AID, l2BID), @@ -167,7 +167,7 @@ func DefaultTwoL2System(dest *DefaultTwoL2SystemIDs) stack.Option[*Orchestrator] opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) opt.Add(WithProposer(ids.L2BProposer, ids.L1EL, &ids.L2BCL, nil)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL, ids.L2BEL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) opt.Add(WithL2MetricsDashboard()) @@ -212,7 +212,7 @@ func DefaultSupernodeTwoL2System(dest *DefaultTwoL2SystemIDs) stack.Option[*Orch opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) opt.Add(WithProposer(ids.L2BProposer, ids.L1EL, &ids.L2BCL, nil)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL, ids.L2BEL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) opt.Add(stack.Finally(func(orch *Orchestrator) { *dest = ids @@ -266,7 +266,7 @@ func DefaultSupernodeInteropTwoL2System(dest *DefaultTwoL2SystemIDs, delaySecond opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) opt.Add(WithProposer(ids.L2BProposer, ids.L1EL, &ids.L2BCL, nil)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL, ids.L2BEL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) // Test sequencer for deterministic block building on both L2 chains opt.Add(WithTestSequencer2L2(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L2BCL, ids.L1EL, ids.L2AEL, ids.L2BEL)) @@ -281,7 +281,7 @@ func DefaultSupernodeInteropTwoL2System(dest *DefaultTwoL2SystemIDs, delaySecond type DefaultMinimalSystemWithSyncTesterIDs struct { DefaultMinimalSystemIDs - SyncTester stack.SyncTesterID + SyncTester stack.ComponentID } func NewDefaultMinimalSystemWithSyncTesterIDs(l1ID, l2ID eth.ChainID) DefaultMinimalSystemWithSyncTesterIDs { @@ -320,15 +320,15 @@ func DefaultMinimalSystemWithSyncTester(dest *DefaultMinimalSystemWithSyncTester opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.L2ELNodeID{ + opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ ids.L2EL, })) - opt.Add(WithSyncTester(ids.SyncTester, []stack.L2ELNodeID{ids.L2EL})) + opt.Add(WithSyncTester(ids.SyncTester, []stack.ComponentID{ids.L2EL})) opt.Add(WithL2MetricsDashboard()) @@ -340,35 +340,35 @@ func DefaultMinimalSystemWithSyncTester(dest *DefaultMinimalSystemWithSyncTester } type DefaultSingleChainInteropSystemIDs struct { - L1 stack.L1NetworkID - L1EL stack.L1ELNodeID - L1CL stack.L1CLNodeID + L1 stack.ComponentID + L1EL stack.ComponentID + L1CL stack.ComponentID - Superchain stack.SuperchainID - Cluster stack.ClusterID + Superchain stack.ComponentID + Cluster stack.ComponentID - Supervisor stack.SupervisorID - TestSequencer stack.TestSequencerID + Supervisor stack.ComponentID + TestSequencer stack.ComponentID - L2A stack.L2NetworkID - L2ACL stack.L2CLNodeID - L2AEL stack.L2ELNodeID + L2A stack.ComponentID + L2ACL stack.ComponentID + L2AEL stack.ComponentID - L2ABatcher stack.L2BatcherID - L2AProposer stack.L2ProposerID - L2ChallengerA stack.L2ChallengerID + L2ABatcher stack.ComponentID + L2AProposer stack.ComponentID + L2ChallengerA stack.ComponentID } func NewDefaultSingleChainInteropSystemIDs(l1ID, l2AID eth.ChainID) DefaultSingleChainInteropSystemIDs { ids := DefaultSingleChainInteropSystemIDs{ - L1: stack.L1NetworkID(l1ID), + L1: stack.NewL1NetworkID(l1ID), L1EL: stack.NewL1ELNodeID("l1", l1ID), L1CL: stack.NewL1CLNodeID("l1", l1ID), - Superchain: "main", // TODO(#15244): hardcoded to match the deployer default ID - Cluster: stack.ClusterID("main"), - Supervisor: "1-primary", // prefix with number for ordering of supervisors + Superchain: stack.NewSuperchainID("main"), // TODO(#15244): hardcoded to match the deployer default ID + Cluster: stack.NewClusterID("main"), + Supervisor: stack.NewSupervisorID("1-primary"), // prefix with number for ordering of supervisors TestSequencer: stack.NewTestSequencerID("dev"), - L2A: stack.L2NetworkID(l2AID), + L2A: stack.NewL2NetworkID(l2AID), L2ACL: stack.NewL2CLNodeID("sequencer", l2AID), L2AEL: stack.NewL2ELNodeID("sequencer", l2AID), L2ABatcher: stack.NewL2BatcherID("main", l2AID), @@ -383,11 +383,11 @@ func DefaultSingleChainInteropSystem(dest *DefaultSingleChainInteropSystemIDs) s opt := stack.Combine[*Orchestrator]() opt.Add(baseInteropSystem(&ids)) - opt.Add(WithL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, &ids.L2ACL, []stack.L2ELNodeID{ + opt.Add(WithL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, &ids.L2ACL, []stack.ComponentID{ ids.L2AEL, })) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL})) // Upon evaluation of the option, export the contents we created. // Ids here are static, but other things may be exported too. @@ -428,7 +428,7 @@ func DefaultMinimalInteropSystem(dest *DefaultMinimalSystemIDs) stack.Option[*Or opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) opt.Add(WithL2MetricsDashboard()) @@ -483,19 +483,19 @@ func baseInteropSystem(ids *DefaultSingleChainInteropSystemIDs) stack.Option[*Or type DefaultInteropSystemIDs struct { DefaultSingleChainInteropSystemIDs - L2B stack.L2NetworkID - L2BCL stack.L2CLNodeID - L2BEL stack.L2ELNodeID + L2B stack.ComponentID + L2BCL stack.ComponentID + L2BEL stack.ComponentID - L2BBatcher stack.L2BatcherID - L2BProposer stack.L2ProposerID - L2ChallengerB stack.L2ChallengerID + L2BBatcher stack.ComponentID + L2BProposer stack.ComponentID + L2ChallengerB stack.ComponentID } func NewDefaultInteropSystemIDs(l1ID, l2AID, l2BID eth.ChainID) DefaultInteropSystemIDs { ids := DefaultInteropSystemIDs{ DefaultSingleChainInteropSystemIDs: NewDefaultSingleChainInteropSystemIDs(l1ID, l2AID), - L2B: stack.L2NetworkID(l2BID), + L2B: stack.NewL2NetworkID(l2BID), L2BCL: stack.NewL2CLNodeID("sequencer", l2BID), L2BEL: stack.NewL2ELNodeID("sequencer", l2BID), L2BBatcher: stack.NewL2BatcherID("main", l2BID), @@ -528,14 +528,14 @@ func DefaultInteropSystem(dest *DefaultInteropSystemIDs) stack.Option[*Orchestra // Deploy separate challengers for each chain. Can be reduced to a single challenger when the DisputeGameFactory // is actually shared. - opt.Add(WithL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, &ids.L2ACL, []stack.L2ELNodeID{ + opt.Add(WithL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, &ids.L2ACL, []stack.ComponentID{ ids.L2AEL, ids.L2BEL, })) - opt.Add(WithL2Challenger(ids.L2ChallengerB, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, &ids.L2BCL, []stack.L2ELNodeID{ + opt.Add(WithL2Challenger(ids.L2ChallengerB, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, &ids.L2BCL, []stack.ComponentID{ ids.L2BEL, ids.L2AEL, })) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL, ids.L2BEL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) opt.Add(WithL2MetricsDashboard()) @@ -616,17 +616,17 @@ func defaultSupernodeSuperProofsSystem(dest *DefaultSupernodeInteropProofsSystem opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) // Run super roots migration using supernode as super root source - opt.Add(WithSuperRootsFromSupernode(ids.L1.ChainID(), ids.L1EL, []stack.L2CLNodeID{ids.L2ACL, ids.L2BCL}, ids.Supernode, ids.L2A.ChainID())) + opt.Add(WithSuperRootsFromSupernode(ids.L1.ChainID(), ids.L1EL, []stack.ComponentID{ids.L2ACL, ids.L2BCL}, ids.Supernode, ids.L2A.ChainID())) // Start challenger after migration; use supernode RPCs as super-roots source. - opt.Add(WithSupernodeL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supernode, &ids.Cluster, []stack.L2ELNodeID{ + opt.Add(WithSupernodeL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supernode, &ids.Cluster, []stack.ComponentID{ ids.L2BEL, ids.L2AEL, })) // Start proposer after migration; use supernode RPCs as proposal source. opt.Add(WithSupernodeProposer(ids.L2AProposer, ids.L1EL, &ids.Supernode)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL, ids.L2BEL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) opt.Add(WithL2MetricsDashboard()) @@ -697,17 +697,17 @@ func defaultSingleChainSupernodeSuperProofsSystem(dest *DefaultSingleChainSupern opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) // Run super roots migration using supernode as super root source - opt.Add(WithSuperRootsFromSupernode(ids.L1.ChainID(), ids.L1EL, []stack.L2CLNodeID{ids.L2ACL}, ids.Supernode, ids.L2A.ChainID())) + opt.Add(WithSuperRootsFromSupernode(ids.L1.ChainID(), ids.L1EL, []stack.ComponentID{ids.L2ACL}, ids.Supernode, ids.L2A.ChainID())) // Start challenger after migration; use supernode RPCs as super-roots source. - opt.Add(WithSupernodeL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supernode, &ids.Cluster, []stack.L2ELNodeID{ + opt.Add(WithSupernodeL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supernode, &ids.Cluster, []stack.ComponentID{ ids.L2AEL, })) // Start proposer after migration; use supernode RPCs as proposal source. opt.Add(WithSupernodeProposer(ids.L2AProposer, ids.L1EL, &ids.Supernode)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL})) opt.Add(WithL2MetricsDashboard()) @@ -755,13 +755,13 @@ func defaultSuperProofsSystem(dest *DefaultInteropSystemIDs, deployerOpts ...Dep opt.Add(WithManagedBySupervisor(ids.L2ACL, ids.Supervisor)) opt.Add(WithManagedBySupervisor(ids.L2BCL, ids.Supervisor)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL, ids.L2BEL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) - opt.Add(WithSuperRoots(ids.L1.ChainID(), ids.L1EL, []stack.L2CLNodeID{ids.L2ACL, ids.L2BCL}, ids.Supervisor, ids.L2A.ChainID())) + opt.Add(WithSuperRoots(ids.L1.ChainID(), ids.L1EL, []stack.ComponentID{ids.L2ACL, ids.L2BCL}, ids.Supervisor, ids.L2A.ChainID())) opt.Add(WithSuperProposer(ids.L2AProposer, ids.L1EL, &ids.Supervisor)) - opt.Add(WithSuperL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, []stack.L2ELNodeID{ + opt.Add(WithSuperL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, []stack.ComponentID{ ids.L2BEL, ids.L2AEL, })) @@ -780,18 +780,18 @@ type MultiSupervisorInteropSystemIDs struct { DefaultInteropSystemIDs // Supervisor does not support multinode so need a additional supervisor for verifier nodes - SupervisorSecondary stack.SupervisorID + SupervisorSecondary stack.ComponentID - L2A2CL stack.L2CLNodeID - L2A2EL stack.L2ELNodeID - L2B2CL stack.L2CLNodeID - L2B2EL stack.L2ELNodeID + L2A2CL stack.ComponentID + L2A2EL stack.ComponentID + L2B2CL stack.ComponentID + L2B2EL stack.ComponentID } func MultiSupervisorInteropSystem(dest *MultiSupervisorInteropSystemIDs) stack.Option[*Orchestrator] { ids := MultiSupervisorInteropSystemIDs{ DefaultInteropSystemIDs: NewDefaultInteropSystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID), - SupervisorSecondary: "2-secondary", // prefix with number for ordering of supervisors + SupervisorSecondary: stack.NewSupervisorID("2-secondary"), // prefix with number for ordering of supervisors L2A2CL: stack.NewL2CLNodeID("verifier", DefaultL2AID), L2A2EL: stack.NewL2ELNodeID("verifier", DefaultL2AID), L2B2CL: stack.NewL2CLNodeID("verifier", DefaultL2BID), @@ -841,29 +841,29 @@ func ProofSystem(dest *DefaultMinimalSystemIDs) stack.Option[*Orchestrator] { } type SingleChainSystemWithFlashblocksIDs struct { - L1 stack.L1NetworkID - L1EL stack.L1ELNodeID - L1CL stack.L1CLNodeID + L1 stack.ComponentID + L1EL stack.ComponentID + L1CL stack.ComponentID - L2 stack.L2NetworkID - L2CL stack.L2CLNodeID - L2EL stack.L2ELNodeID - L2Builder stack.OPRBuilderNodeID - L2RollupBoost stack.RollupBoostNodeID + L2 stack.ComponentID + L2CL stack.ComponentID + L2EL stack.ComponentID + L2Builder stack.ComponentID + L2RollupBoost stack.ComponentID - L2Batcher stack.L2BatcherID - L2Proposer stack.L2ProposerID - L2Challenger stack.L2ChallengerID + L2Batcher stack.ComponentID + L2Proposer stack.ComponentID + L2Challenger stack.ComponentID - TestSequencer stack.TestSequencerID + TestSequencer stack.ComponentID } func NewDefaultSingleChainSystemWithFlashblocksIDs(l1ID, l2ID eth.ChainID) SingleChainSystemWithFlashblocksIDs { ids := SingleChainSystemWithFlashblocksIDs{ - L1: stack.L1NetworkID(l1ID), + L1: stack.NewL1NetworkID(l1ID), L1EL: stack.NewL1ELNodeID("l1", l1ID), L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.L2NetworkID(l2ID), + L2: stack.NewL2NetworkID(l2ID), L2CL: stack.NewL2CLNodeID("sequencer", l2ID), L2EL: stack.NewL2ELNodeID("sequencer", l2ID), L2Builder: stack.NewOPRBuilderNodeID("sequencer-builder", l2ID), @@ -906,21 +906,21 @@ func singleChainSystemWithFlashblocksOpts(ids *SingleChainSystemWithFlashblocksI opt.Add(WithL2ELNode(ids.L2EL, L2ELWithP2PConfig("127.0.0.1", seqID.Port, seqID.KeyHex(), nil, nil))) opt.Add(WithOPRBuilderNode(ids.L2Builder, OPRBuilderWithNodeIdentity(builderID, "127.0.0.1", nil, nil))) // Sequencer adds builder as regular static peer (not trusted) - opt.Add(WithL2ELP2PConnection(ids.L2EL, stack.L2ELNodeID(ids.L2Builder), false)) + opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2Builder, false)) // Builder adds sequencer as trusted peer - opt.Add(WithL2ELP2PConnection(stack.L2ELNodeID(ids.L2Builder), ids.L2EL, true)) + opt.Add(WithL2ELP2PConnection(ids.L2Builder, ids.L2EL, true)) opt.Add(WithRollupBoost(ids.L2RollupBoost, ids.L2EL, RollupBoostWithBuilderNode(ids.L2Builder))) - opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, stack.L2ELNodeID(ids.L2RollupBoost), L2CLSequencer())) + opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2RollupBoost, L2CLSequencer())) opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.L2ELNodeID{ + opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ ids.L2EL, })) diff --git a/op-devstack/sysgo/system_singlechain_multinode.go b/op-devstack/sysgo/system_singlechain_multinode.go index 893cdb5f9b663..2a2e1070d8d19 100644 --- a/op-devstack/sysgo/system_singlechain_multinode.go +++ b/op-devstack/sysgo/system_singlechain_multinode.go @@ -8,14 +8,14 @@ import ( type DefaultSingleChainMultiNodeSystemIDs struct { DefaultMinimalSystemIDs - L2CLB stack.L2CLNodeID - L2ELB stack.L2ELNodeID + L2CLB stack.ComponentID + L2ELB stack.ComponentID } type DefaultSingleChainMultiNodeWithTestSeqSystemIDs struct { DefaultSingleChainMultiNodeSystemIDs - TestSequencer stack.TestSequencerID + TestSequencer stack.ComponentID } func NewDefaultSingleChainMultiNodeSystemIDs(l1ID, l2ID eth.ChainID) DefaultSingleChainMultiNodeSystemIDs { @@ -30,7 +30,7 @@ func NewDefaultSingleChainMultiNodeSystemIDs(l1ID, l2ID eth.ChainID) DefaultSing func NewDefaultSingleChainMultiNodeWithTestSeqSystemIDs(l1ID, l2ID eth.ChainID) DefaultSingleChainMultiNodeWithTestSeqSystemIDs { return DefaultSingleChainMultiNodeWithTestSeqSystemIDs{ DefaultSingleChainMultiNodeSystemIDs: NewDefaultSingleChainMultiNodeSystemIDs(l1ID, l2ID), - TestSequencer: "dev", + TestSequencer: stack.NewTestSequencerID("dev"), } } diff --git a/op-devstack/sysgo/system_singlechain_twoverifiers.go b/op-devstack/sysgo/system_singlechain_twoverifiers.go index 2e960517712f7..3a38587fc22f6 100644 --- a/op-devstack/sysgo/system_singlechain_twoverifiers.go +++ b/op-devstack/sysgo/system_singlechain_twoverifiers.go @@ -9,10 +9,10 @@ import ( type DefaultSingleChainTwoVerifiersSystemIDs struct { DefaultSingleChainMultiNodeSystemIDs - L2CLC stack.L2CLNodeID - L2ELC stack.L2ELNodeID + L2CLC stack.ComponentID + L2ELC stack.ComponentID - TestSequencer stack.TestSequencerID + TestSequencer stack.ComponentID } func NewDefaultSingleChainTwoVerifiersSystemIDs(l1ID, l2ID eth.ChainID) DefaultSingleChainTwoVerifiersSystemIDs { @@ -20,6 +20,7 @@ func NewDefaultSingleChainTwoVerifiersSystemIDs(l1ID, l2ID eth.ChainID) DefaultS DefaultSingleChainMultiNodeSystemIDs: NewDefaultSingleChainMultiNodeSystemIDs(l1ID, l2ID), L2CLC: stack.NewL2CLNodeID("c", l2ID), L2ELC: stack.NewL2ELNodeID("c", l2ID), + TestSequencer: stack.NewTestSequencerID("dev"), } } @@ -62,11 +63,11 @@ func DefaultSingleChainTwoVerifiersFollowL2System(dest *DefaultSingleChainTwoVer opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CLB, ids.L1EL, ids.L2ELB)) - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.L2ELNodeID{ + opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ ids.L2EL, })) diff --git a/op-devstack/sysgo/system_synctester.go b/op-devstack/sysgo/system_synctester.go index d6bc9f5a5fc17..5ba3c13315763 100644 --- a/op-devstack/sysgo/system_synctester.go +++ b/op-devstack/sysgo/system_synctester.go @@ -9,9 +9,9 @@ import ( type DefaultSimpleSystemWithSyncTesterIDs struct { DefaultMinimalSystemIDs - L2CL2 stack.L2CLNodeID - SyncTesterL2EL stack.L2ELNodeID - SyncTester stack.SyncTesterID + L2CL2 stack.ComponentID + SyncTesterL2EL stack.ComponentID + SyncTester stack.ComponentID } func NewDefaultSimpleSystemWithSyncTesterIDs(l1ID, l2ID eth.ChainID) DefaultSimpleSystemWithSyncTesterIDs { @@ -52,15 +52,15 @@ func DefaultSimpleSystemWithSyncTester(dest *DefaultSimpleSystemWithSyncTesterID opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - opt.Add(WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL})) + opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.L2ELNodeID{ + opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ ids.L2EL, })) - opt.Add(WithSyncTester(ids.SyncTester, []stack.L2ELNodeID{ids.L2EL})) + opt.Add(WithSyncTester(ids.SyncTester, []stack.ComponentID{ids.L2EL})) // Create a SyncTesterEL with the same chain ID as the EL node opt.Add(WithSyncTesterL2ELNode(ids.SyncTesterL2EL, ids.L2EL)) diff --git a/op-devstack/sysgo/system_synctester_ext.go b/op-devstack/sysgo/system_synctester_ext.go index b27c981cc75b5..a05820b17897c 100644 --- a/op-devstack/sysgo/system_synctester_ext.go +++ b/op-devstack/sysgo/system_synctester_ext.go @@ -11,24 +11,24 @@ import ( ) type DefaultMinimalExternalELSystemIDs struct { - L1 stack.L1NetworkID - L1EL stack.L1ELNodeID - L1CL stack.L1CLNodeID + L1 stack.ComponentID + L1EL stack.ComponentID + L1CL stack.ComponentID - L2 stack.L2NetworkID - L2CL stack.L2CLNodeID - L2EL stack.L2ELNodeID - L2ELReadOnly stack.L2ELNodeID + L2 stack.ComponentID + L2CL stack.ComponentID + L2EL stack.ComponentID + L2ELReadOnly stack.ComponentID - SyncTester stack.SyncTesterID + SyncTester stack.ComponentID } func NewExternalELSystemIDs(l1ID, l2ID eth.ChainID) DefaultMinimalExternalELSystemIDs { ids := DefaultMinimalExternalELSystemIDs{ - L1: stack.L1NetworkID(l1ID), + L1: stack.NewL1NetworkID(l1ID), L1EL: stack.NewL1ELNodeID("l1", l1ID), L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.L2NetworkID(l2ID), + L2: stack.NewL2NetworkID(l2ID), L2CL: stack.NewL2CLNodeID("verifier", l2ID), L2EL: stack.NewL2ELNodeID("sync-tester-el", l2ID), L2ELReadOnly: stack.NewL2ELNodeID("l2-el-readonly", l2ID), @@ -72,14 +72,14 @@ func ExternalELSystemWithEndpointAndSuperchainRegistry(dest *DefaultMinimalExter }, blockTime: 12, } - o.registry.Register(stack.ConvertL1NetworkID(ids.L1).ComponentID, l1Net) + o.registry.Register(ids.L1, l1Net) })) opt.Add(WithExtL1Nodes(ids.L1EL, ids.L1CL, networkPreset.L1ELEndpoint, networkPreset.L1CLBeaconEndpoint)) // Use empty dependency set and minimal cluster instead of deployer opt.Add(WithEmptyDepSet( - stack.L2NetworkID(l2ChainID), + stack.NewL2NetworkID(l2ChainID), networkPreset.L2NetworkName, )) diff --git a/op-devstack/sysgo/system_test.go b/op-devstack/sysgo/system_test.go index 77f7504f2f6c0..feddda2c9b608 100644 --- a/op-devstack/sysgo/system_test.go +++ b/op-devstack/sysgo/system_test.go @@ -94,7 +94,7 @@ func testSystem(ids DefaultInteropSystemIDs, system stack.System) { require.Equal("", netB.Label("nickname")) netB.SetLabel("nickname", "Network B") require.Equal("Network B", netB.Label("nickname")) - v := system.L2Network(match.WithLabel[stack.L2NetworkID, stack.L2Network]( + v := system.L2Network(match.WithLabel[stack.L2Network]( "nickname", "Network B")) require.Equal(ids.L2B, v.ID()) }) @@ -112,8 +112,8 @@ func testSystem(ids DefaultInteropSystemIDs, system stack.System) { t.Run("sync", func(t devtest.T) { require := t.Require() - seqA := system.L2Network(ids.L2A).L2CLNode(ids.L2ACL) - seqB := system.L2Network(ids.L2B).L2CLNode(ids.L2BCL) + seqA := system.L2Network(stack.ByID[stack.L2Network](ids.L2A)).L2CLNode(stack.ByID[stack.L2CLNode](ids.L2ACL)) + seqB := system.L2Network(stack.ByID[stack.L2Network](ids.L2B)).L2CLNode(stack.ByID[stack.L2CLNode](ids.L2BCL)) blocks := uint64(5) // wait for this many blocks, with some margin for delays for i := uint64(0); i < blocks*2+10; i++ { diff --git a/op-devstack/sysgo/system_two_l2_follow_l2.go b/op-devstack/sysgo/system_two_l2_follow_l2.go index 2ac09522f49dc..01c01c8dabc8e 100644 --- a/op-devstack/sysgo/system_two_l2_follow_l2.go +++ b/op-devstack/sysgo/system_two_l2_follow_l2.go @@ -10,10 +10,10 @@ import ( type DefaultTwoL2SupernodeFollowL2SystemIDs struct { DefaultTwoL2SystemIDs - L2AFollowerCL stack.L2CLNodeID - L2AFollowerEL stack.L2ELNodeID - L2BFollowerCL stack.L2CLNodeID - L2BFollowerEL stack.L2ELNodeID + L2AFollowerCL stack.ComponentID + L2AFollowerEL stack.ComponentID + L2BFollowerCL stack.ComponentID + L2BFollowerEL stack.ComponentID } func NewDefaultTwoL2SupernodeFollowL2SystemIDs(l1ID, l2AID, l2BID eth.ChainID) DefaultTwoL2SupernodeFollowL2SystemIDs { diff --git a/op-devstack/sysgo/test_sequencer.go b/op-devstack/sysgo/test_sequencer.go index 7971eb7e884f1..5da43b784e9ac 100644 --- a/op-devstack/sysgo/test_sequencer.go +++ b/op-devstack/sysgo/test_sequencer.go @@ -40,7 +40,7 @@ import ( ) type TestSequencer struct { - id stack.TestSequencerID + id stack.ComponentID userRPC string jwtSecret [32]byte sequencers map[eth.ChainID]seqtypes.SequencerID @@ -77,20 +77,20 @@ func (s *TestSequencer) hydrate(sys stack.ExtensibleSystem) { // l2ChainIDs pairs together the CL and EL node IDs for an L2 chain. type l2ChainIDs struct { - CLID stack.L2CLNodeID - ELID stack.L2ELNodeID + CLID stack.ComponentID + ELID stack.ComponentID } -func WithTestSequencer(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLNodeID, l2CLID stack.L2CLNodeID, l1ELID stack.L1ELNodeID, l2ELID stack.L2ELNodeID) stack.Option[*Orchestrator] { +func WithTestSequencer(testSequencerID stack.ComponentID, l1CLID stack.ComponentID, l2CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID) stack.Option[*Orchestrator] { return withTestSequencerImpl(testSequencerID, l1CLID, l1ELID, l2ChainIDs{CLID: l2CLID, ELID: l2ELID}) } // WithTestSequencer2L2 creates a test sequencer that can build blocks on two L2 chains. // This is useful for testing same-timestamp interop scenarios where we need deterministic // block timestamps on both chains. -func WithTestSequencer2L2(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLNodeID, - l2ACLID stack.L2CLNodeID, l2BCLID stack.L2CLNodeID, - l1ELID stack.L1ELNodeID, l2AELID stack.L2ELNodeID, l2BELID stack.L2ELNodeID) stack.Option[*Orchestrator] { +func WithTestSequencer2L2(testSequencerID stack.ComponentID, l1CLID stack.ComponentID, + l2ACLID stack.ComponentID, l2BCLID stack.ComponentID, + l1ELID stack.ComponentID, l2AELID stack.ComponentID, l2BELID stack.ComponentID) stack.Option[*Orchestrator] { return withTestSequencerImpl(testSequencerID, l1CLID, l1ELID, l2ChainIDs{CLID: l2ACLID, ELID: l2AELID}, l2ChainIDs{CLID: l2BCLID, ELID: l2BELID}, @@ -99,7 +99,7 @@ func WithTestSequencer2L2(testSequencerID stack.TestSequencerID, l1CLID stack.L1 // withTestSequencerImpl is the shared implementation for creating test sequencers. // It supports any number of L2 chains. -func withTestSequencerImpl(testSequencerID stack.TestSequencerID, l1CLID stack.L1CLNodeID, l1ELID stack.L1ELNodeID, l2Chains ...l2ChainIDs) stack.Option[*Orchestrator] { +func withTestSequencerImpl(testSequencerID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2Chains ...l2ChainIDs) stack.Option[*Orchestrator] { return stack.AfterDeploy(func(orch *Orchestrator) { p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), testSequencerID)) require := p.Require() @@ -107,21 +107,18 @@ func withTestSequencerImpl(testSequencerID stack.TestSequencerID, l1CLID stack.L // Setup L1 components orch.writeDefaultJWT() - l1ELComponent, ok := orch.registry.Get(stack.ConvertL1ELNodeID(l1ELID).ComponentID) + l1EL, ok := orch.GetL1EL(l1ELID) require.True(ok, "l1 EL node required") - l1EL := l1ELComponent.(L1ELNode) l1ELClient, err := ethclient.DialContext(p.Ctx(), l1EL.UserRPC()) require.NoError(err) engineCl, err := dialEngine(p.Ctx(), l1EL.AuthRPC(), orch.jwtSecret) require.NoError(err) - l1CLComponent, ok := orch.registry.Get(stack.ConvertL1CLNodeID(l1CLID).ComponentID) + l1CL, ok := orch.GetL1CL(l1CLID) require.True(ok, "l1 CL node required") - l1CL := l1CLComponent.(*L1CLNode) - l1NetComponent, ok := orch.registry.Get(stack.ConvertL1NetworkID(stack.L1NetworkID(l1ELID.ChainID())).ComponentID) + l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) require.True(ok, "l1 net required") - l1Net := l1NetComponent.(*L1Network) // L1 sequencer IDs bid_L1 := seqtypes.BuilderID("test-l1-builder") @@ -184,9 +181,8 @@ func withTestSequencerImpl(testSequencerID stack.TestSequencerID, l1CLID stack.L l2EL, ok := orch.GetL2EL(l2Chain.ELID) require.True(ok, "l2 EL node required for chain %d", i) - l2CLComponent, ok := orch.registry.Get(stack.ConvertL2CLNodeID(l2Chain.CLID).ComponentID) + l2CL, ok := orch.GetL2CL(l2Chain.CLID) require.True(ok, "l2 CL node required for chain %d", i) - l2CL := l2CLComponent.(L2CLNode) // Generate unique IDs for this L2 chain (use suffix for multi-chain, no suffix for single chain) suffix := "" @@ -320,6 +316,6 @@ func withTestSequencerImpl(testSequencerID stack.TestSequencerID, l1CLID stack.L sequencers: sequencerIDs, } logger.Info("Sequencer User RPC", "http_endpoint", testSequencerNode.userRPC) - orch.registry.Register(stack.ConvertTestSequencerID(testSequencerID).ComponentID, testSequencerNode) + orch.registry.Register(testSequencerID, testSequencerNode) }) } diff --git a/op-up/main.go b/op-up/main.go index a7a31dc0f965b..4ea3ec0b137e5 100644 --- a/op-up/main.go +++ b/op-up/main.go @@ -136,7 +136,7 @@ func runOpUp(ctx context.Context, stderr io.Writer, opUpDir string) error { sysgo.WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL), sysgo.WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil), - sysgo.WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2EL}), + sysgo.WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL}), ) orch := sysgo.NewOrchestrator(p, opts) diff --git a/rust/kona/tests/node/common/conductor_test.go b/rust/kona/tests/node/common/conductor_test.go index a414d5946a18f..5011e306c0907 100644 --- a/rust/kona/tests/node/common/conductor_test.go +++ b/rust/kona/tests/node/common/conductor_test.go @@ -54,7 +54,7 @@ func TestConductorLeadershipTransfer(gt *testing.T) { idToConductor := make(map[string]conductorWithInfo) for _, conductor := range conductors { - conductorId := strings.TrimPrefix(conductor.String(), stack.ConductorKind.String()+"-") + conductorId := strings.TrimPrefix(conductor.String(), stack.KindConductor.String()+"-") idToConductor[conductorId] = conductorWithInfo{conductor, consensus.ServerInfo{}} } for _, memberInfo := range membership.Servers { diff --git a/rust/kona/tests/node/utils/mixed_preset.go b/rust/kona/tests/node/utils/mixed_preset.go index e959f7e4320eb..f80eb415cc4e4 100644 --- a/rust/kona/tests/node/utils/mixed_preset.go +++ b/rust/kona/tests/node/utils/mixed_preset.go @@ -204,12 +204,8 @@ func (m *MixedOpKonaPreset) L2CLKonaNodes() []dsl.L2CLNode { return append(m.L2CLKonaValidatorNodes, m.L2CLKonaSequencerNodes...) } -func L2NodeMatcher[ - I interface { - comparable - Key() string - }, E stack.Identifiable[I]](value ...string) stack.Matcher[I, E] { - return match.MatchElemFn[I, E](func(elem E) bool { +func L2NodeMatcher[E stack.Identifiable](value ...string) stack.Matcher[E] { + return match.MatchElemFn[E](func(elem E) bool { for _, v := range value { if !strings.Contains(elem.ID().Key(), v) { return false @@ -258,16 +254,16 @@ func NewMixedOpKona(t devtest.T) *MixedOpKonaPreset { t.Gate().GreaterOrEqual(len(l2Net.L2CLNodes()), 2, "expected at least two L2CL nodes") - opSequencerCLNodes := L2NodeMatcher[stack.L2CLNodeID, stack.L2CLNode](string(OpNode), string(Sequencer)).Match(l2Net.L2CLNodes()) - konaSequencerCLNodes := L2NodeMatcher[stack.L2CLNodeID, stack.L2CLNode](string(KonaNode), string(Sequencer)).Match(l2Net.L2CLNodes()) + opSequencerCLNodes := L2NodeMatcher[stack.L2CLNode](string(OpNode), string(Sequencer)).Match(l2Net.L2CLNodes()) + konaSequencerCLNodes := L2NodeMatcher[stack.L2CLNode](string(KonaNode), string(Sequencer)).Match(l2Net.L2CLNodes()) - opCLNodes := L2NodeMatcher[stack.L2CLNodeID, stack.L2CLNode](string(OpNode), string(Validator)).Match(l2Net.L2CLNodes()) - konaCLNodes := L2NodeMatcher[stack.L2CLNodeID, stack.L2CLNode](string(KonaNode), string(Validator)).Match(l2Net.L2CLNodes()) + opCLNodes := L2NodeMatcher[stack.L2CLNode](string(OpNode), string(Validator)).Match(l2Net.L2CLNodes()) + konaCLNodes := L2NodeMatcher[stack.L2CLNode](string(KonaNode), string(Validator)).Match(l2Net.L2CLNodes()) - opSequencerELNodes := L2NodeMatcher[stack.L2ELNodeID, stack.L2ELNode](string(OpNode), string(Sequencer)).Match(l2Net.L2ELNodes()) - konaSequencerELNodes := L2NodeMatcher[stack.L2ELNodeID, stack.L2ELNode](string(KonaNode), string(Sequencer)).Match(l2Net.L2ELNodes()) - opELNodes := L2NodeMatcher[stack.L2ELNodeID, stack.L2ELNode](string(OpNode), string(Validator)).Match(l2Net.L2ELNodes()) - konaELNodes := L2NodeMatcher[stack.L2ELNodeID, stack.L2ELNode](string(KonaNode), string(Validator)).Match(l2Net.L2ELNodes()) + opSequencerELNodes := L2NodeMatcher[stack.L2ELNode](string(OpNode), string(Sequencer)).Match(l2Net.L2ELNodes()) + konaSequencerELNodes := L2NodeMatcher[stack.L2ELNode](string(KonaNode), string(Sequencer)).Match(l2Net.L2ELNodes()) + opELNodes := L2NodeMatcher[stack.L2ELNode](string(OpNode), string(Validator)).Match(l2Net.L2ELNodes()) + konaELNodes := L2NodeMatcher[stack.L2ELNode](string(KonaNode), string(Validator)).Match(l2Net.L2ELNodes()) out := &MixedOpKonaPreset{ Log: t.Logger(), @@ -297,95 +293,95 @@ func NewMixedOpKona(t devtest.T) *MixedOpKonaPreset { } type DefaultMixedOpKonaSystemIDs struct { - L1 stack.L1NetworkID - L1EL stack.L1ELNodeID - L1CL stack.L1CLNodeID + L1 stack.ComponentID + L1EL stack.ComponentID + L1CL stack.ComponentID - L2 stack.L2NetworkID + L2 stack.ComponentID - L2ELOpGethSequencerNodes []stack.L2ELNodeID - L2ELOpRethSequencerNodes []stack.L2ELNodeID + L2ELOpGethSequencerNodes []stack.ComponentID + L2ELOpRethSequencerNodes []stack.ComponentID - L2CLOpGethSequencerNodes []stack.L2CLNodeID - L2CLOpRethSequencerNodes []stack.L2CLNodeID + L2CLOpGethSequencerNodes []stack.ComponentID + L2CLOpRethSequencerNodes []stack.ComponentID - L2ELKonaGethSequencerNodes []stack.L2ELNodeID - L2ELKonaRethSequencerNodes []stack.L2ELNodeID + L2ELKonaGethSequencerNodes []stack.ComponentID + L2ELKonaRethSequencerNodes []stack.ComponentID - L2CLKonaGethSequencerNodes []stack.L2CLNodeID - L2CLKonaRethSequencerNodes []stack.L2CLNodeID + L2CLKonaGethSequencerNodes []stack.ComponentID + L2CLKonaRethSequencerNodes []stack.ComponentID - L2CLOpGethNodes []stack.L2CLNodeID - L2ELOpGethNodes []stack.L2ELNodeID + L2CLOpGethNodes []stack.ComponentID + L2ELOpGethNodes []stack.ComponentID - L2CLOpRethNodes []stack.L2CLNodeID - L2ELOpRethNodes []stack.L2ELNodeID + L2CLOpRethNodes []stack.ComponentID + L2ELOpRethNodes []stack.ComponentID - L2CLKonaGethNodes []stack.L2CLNodeID - L2ELKonaGethNodes []stack.L2ELNodeID + L2CLKonaGethNodes []stack.ComponentID + L2ELKonaGethNodes []stack.ComponentID - L2CLKonaRethNodes []stack.L2CLNodeID - L2ELKonaRethNodes []stack.L2ELNodeID + L2CLKonaRethNodes []stack.ComponentID + L2ELKonaRethNodes []stack.ComponentID - L2Batcher stack.L2BatcherID - L2Proposer stack.L2ProposerID + L2Batcher stack.ComponentID + L2Proposer stack.ComponentID } -func (ids *DefaultMixedOpKonaSystemIDs) L2CLSequencerNodes() []stack.L2CLNodeID { +func (ids *DefaultMixedOpKonaSystemIDs) L2CLSequencerNodes() []stack.ComponentID { list := append(ids.L2CLOpGethSequencerNodes, ids.L2CLOpRethSequencerNodes...) list = append(list, ids.L2CLKonaGethSequencerNodes...) list = append(list, ids.L2CLKonaRethSequencerNodes...) return list } -func (ids *DefaultMixedOpKonaSystemIDs) L2ELSequencerNodes() []stack.L2ELNodeID { +func (ids *DefaultMixedOpKonaSystemIDs) L2ELSequencerNodes() []stack.ComponentID { list := append(ids.L2ELOpGethSequencerNodes, ids.L2ELOpRethSequencerNodes...) list = append(list, ids.L2ELKonaGethSequencerNodes...) list = append(list, ids.L2ELKonaRethSequencerNodes...) return list } -func (ids *DefaultMixedOpKonaSystemIDs) L2CLValidatorNodes() []stack.L2CLNodeID { +func (ids *DefaultMixedOpKonaSystemIDs) L2CLValidatorNodes() []stack.ComponentID { list := append(ids.L2CLOpGethNodes, ids.L2CLOpRethNodes...) list = append(list, ids.L2CLKonaGethNodes...) list = append(list, ids.L2CLKonaRethNodes...) return list } -func (ids *DefaultMixedOpKonaSystemIDs) L2ELValidatorNodes() []stack.L2ELNodeID { +func (ids *DefaultMixedOpKonaSystemIDs) L2ELValidatorNodes() []stack.ComponentID { list := append(ids.L2ELOpGethNodes, ids.L2ELOpRethNodes...) list = append(list, ids.L2ELKonaGethNodes...) list = append(list, ids.L2ELKonaRethNodes...) return list } -func (ids *DefaultMixedOpKonaSystemIDs) L2CLNodes() []stack.L2CLNodeID { +func (ids *DefaultMixedOpKonaSystemIDs) L2CLNodes() []stack.ComponentID { return append(ids.L2CLSequencerNodes(), ids.L2CLValidatorNodes()...) } -func (ids *DefaultMixedOpKonaSystemIDs) L2ELNodes() []stack.L2ELNodeID { +func (ids *DefaultMixedOpKonaSystemIDs) L2ELNodes() []stack.ComponentID { return append(ids.L2ELSequencerNodes(), ids.L2ELValidatorNodes()...) } func NewDefaultMixedOpKonaSystemIDs(l1ID, l2ID eth.ChainID, l2NodeConfig L2NodeConfig) DefaultMixedOpKonaSystemIDs { - rethOpCLNodes := make([]stack.L2CLNodeID, l2NodeConfig.OpNodesWithReth) - rethOpELNodes := make([]stack.L2ELNodeID, l2NodeConfig.OpNodesWithReth) - rethKonaCLNodes := make([]stack.L2CLNodeID, l2NodeConfig.KonaNodesWithReth) - rethKonaELNodes := make([]stack.L2ELNodeID, l2NodeConfig.KonaNodesWithReth) - - gethOpCLNodes := make([]stack.L2CLNodeID, l2NodeConfig.OpNodesWithGeth) - gethOpELNodes := make([]stack.L2ELNodeID, l2NodeConfig.OpNodesWithGeth) - gethKonaCLNodes := make([]stack.L2CLNodeID, l2NodeConfig.KonaNodesWithGeth) - gethKonaELNodes := make([]stack.L2ELNodeID, l2NodeConfig.KonaNodesWithGeth) - - gethOpSequencerCLNodes := make([]stack.L2CLNodeID, l2NodeConfig.OpSequencerNodesWithGeth) - gethOpSequencerELNodes := make([]stack.L2ELNodeID, l2NodeConfig.OpSequencerNodesWithGeth) - gethKonaSequencerCLNodes := make([]stack.L2CLNodeID, l2NodeConfig.KonaSequencerNodesWithGeth) - gethKonaSequencerELNodes := make([]stack.L2ELNodeID, l2NodeConfig.KonaSequencerNodesWithGeth) - - rethOpSequencerCLNodes := make([]stack.L2CLNodeID, l2NodeConfig.OpSequencerNodesWithReth) - rethOpSequencerELNodes := make([]stack.L2ELNodeID, l2NodeConfig.OpSequencerNodesWithReth) - rethKonaSequencerCLNodes := make([]stack.L2CLNodeID, l2NodeConfig.KonaSequencerNodesWithReth) - rethKonaSequencerELNodes := make([]stack.L2ELNodeID, l2NodeConfig.KonaSequencerNodesWithReth) + rethOpCLNodes := make([]stack.ComponentID, l2NodeConfig.OpNodesWithReth) + rethOpELNodes := make([]stack.ComponentID, l2NodeConfig.OpNodesWithReth) + rethKonaCLNodes := make([]stack.ComponentID, l2NodeConfig.KonaNodesWithReth) + rethKonaELNodes := make([]stack.ComponentID, l2NodeConfig.KonaNodesWithReth) + + gethOpCLNodes := make([]stack.ComponentID, l2NodeConfig.OpNodesWithGeth) + gethOpELNodes := make([]stack.ComponentID, l2NodeConfig.OpNodesWithGeth) + gethKonaCLNodes := make([]stack.ComponentID, l2NodeConfig.KonaNodesWithGeth) + gethKonaELNodes := make([]stack.ComponentID, l2NodeConfig.KonaNodesWithGeth) + + gethOpSequencerCLNodes := make([]stack.ComponentID, l2NodeConfig.OpSequencerNodesWithGeth) + gethOpSequencerELNodes := make([]stack.ComponentID, l2NodeConfig.OpSequencerNodesWithGeth) + gethKonaSequencerCLNodes := make([]stack.ComponentID, l2NodeConfig.KonaSequencerNodesWithGeth) + gethKonaSequencerELNodes := make([]stack.ComponentID, l2NodeConfig.KonaSequencerNodesWithGeth) + + rethOpSequencerCLNodes := make([]stack.ComponentID, l2NodeConfig.OpSequencerNodesWithReth) + rethOpSequencerELNodes := make([]stack.ComponentID, l2NodeConfig.OpSequencerNodesWithReth) + rethKonaSequencerCLNodes := make([]stack.ComponentID, l2NodeConfig.KonaSequencerNodesWithReth) + rethKonaSequencerELNodes := make([]stack.ComponentID, l2NodeConfig.KonaSequencerNodesWithReth) for i := range l2NodeConfig.OpSequencerNodesWithGeth { gethOpSequencerCLNodes[i] = stack.NewL2CLNodeID(fmt.Sprintf("cl-geth-op-sequencer-%d", i), l2ID) @@ -428,10 +424,10 @@ func NewDefaultMixedOpKonaSystemIDs(l1ID, l2ID eth.ChainID, l2NodeConfig L2NodeC } ids := DefaultMixedOpKonaSystemIDs{ - L1: stack.L1NetworkID(l1ID), + L1: stack.NewL1NetworkID(l1ID), L1EL: stack.NewL1ELNodeID("l1", l1ID), L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.L2NetworkID(l2ID), + L2: stack.NewL2NetworkID(l2ID), L2CLOpGethSequencerNodes: gethOpSequencerCLNodes, L2ELOpGethSequencerNodes: gethOpSequencerELNodes, @@ -488,7 +484,7 @@ func DefaultMixedOpKonaSystem(dest *DefaultMixedOpKonaSystemIDs, l2NodeConfig L2 // Spawn all nodes. for i := range ids.L2CLKonaGethSequencerNodes { opt.Add(sysgo.WithOpGeth(ids.L2ELKonaGethSequencerNodes[i])) - opt.Add(sysgo.WithKonaNode(ids.L2CLKonaGethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaGethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + opt.Add(sysgo.WithKonaNode(ids.L2CLKonaGethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaGethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.IsSequencer = true cfg.SequencerSyncMode = sync.ELSync cfg.VerifierSyncMode = sync.ELSync @@ -497,14 +493,14 @@ func DefaultMixedOpKonaSystem(dest *DefaultMixedOpKonaSystemIDs, l2NodeConfig L2 for i := range ids.L2CLOpGethSequencerNodes { opt.Add(sysgo.WithOpGeth(ids.L2ELOpGethSequencerNodes[i])) - opt.Add(sysgo.WithOpNode(ids.L2CLOpGethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELOpGethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + opt.Add(sysgo.WithOpNode(ids.L2CLOpGethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELOpGethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.IsSequencer = true }))) } for i := range ids.L2CLKonaRethSequencerNodes { opt.Add(sysgo.WithOpReth(ids.L2ELKonaRethSequencerNodes[i])) - opt.Add(sysgo.WithKonaNode(ids.L2CLKonaRethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaRethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + opt.Add(sysgo.WithKonaNode(ids.L2CLKonaRethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaRethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.IsSequencer = true cfg.SequencerSyncMode = sync.ELSync cfg.VerifierSyncMode = sync.ELSync @@ -513,14 +509,14 @@ func DefaultMixedOpKonaSystem(dest *DefaultMixedOpKonaSystemIDs, l2NodeConfig L2 for i := range ids.L2CLOpRethSequencerNodes { opt.Add(sysgo.WithOpReth(ids.L2ELOpRethSequencerNodes[i])) - opt.Add(sysgo.WithOpNode(ids.L2CLOpRethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELOpRethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + opt.Add(sysgo.WithOpNode(ids.L2CLOpRethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELOpRethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.IsSequencer = true }))) } for i := range ids.L2CLKonaGethNodes { opt.Add(sysgo.WithOpGeth(ids.L2ELKonaGethNodes[i])) - opt.Add(sysgo.WithKonaNode(ids.L2CLKonaGethNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaGethNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + opt.Add(sysgo.WithKonaNode(ids.L2CLKonaGethNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaGethNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.SequencerSyncMode = sync.ELSync cfg.VerifierSyncMode = sync.ELSync }))) @@ -533,7 +529,7 @@ func DefaultMixedOpKonaSystem(dest *DefaultMixedOpKonaSystemIDs, l2NodeConfig L2 for i := range ids.L2CLKonaRethNodes { opt.Add(sysgo.WithOpReth(ids.L2ELKonaRethNodes[i])) - opt.Add(sysgo.WithKonaNode(ids.L2CLKonaRethNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaRethNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.L2CLNodeID, cfg *sysgo.L2CLConfig) { + opt.Add(sysgo.WithKonaNode(ids.L2CLKonaRethNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaRethNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { cfg.SequencerSyncMode = sync.ELSync cfg.VerifierSyncMode = sync.ELSync }))) @@ -558,7 +554,7 @@ func DefaultMixedOpKonaSystem(dest *DefaultMixedOpKonaSystemIDs, l2NodeConfig L2 opt.Add(sysgo.WithBatcher(ids.L2Batcher, ids.L1EL, CLNodeIDs[0], ELNodeIDs[0])) opt.Add(sysgo.WithProposer(ids.L2Proposer, ids.L1EL, &CLNodeIDs[0], nil)) - opt.Add(sysgo.WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ELNodeIDs[0]})) + opt.Add(sysgo.WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ELNodeIDs[0]})) opt.Add(stack.Finally(func(orch *sysgo.Orchestrator) { *dest = ids diff --git a/rust/kona/tests/node/utils/mixed_preset_with_conductor.go b/rust/kona/tests/node/utils/mixed_preset_with_conductor.go index e8b11c82de0f0..5e8a4f929dbf1 100644 --- a/rust/kona/tests/node/utils/mixed_preset_with_conductor.go +++ b/rust/kona/tests/node/utils/mixed_preset_with_conductor.go @@ -12,7 +12,7 @@ import ( type MinimalWithConductors struct { *MixedOpKonaPreset - ConductorSets map[stack.L2NetworkID]dsl.ConductorSet + ConductorSets map[stack.ComponentID]dsl.ConductorSet } func NewMixedOpKonaWithConductors(t devtest.T) *MinimalWithConductors { @@ -20,7 +20,7 @@ func NewMixedOpKonaWithConductors(t devtest.T) *MinimalWithConductors { orch := presets.Orchestrator() orch.Hydrate(system) chains := system.L2Networks() - conductorSets := make(map[stack.L2NetworkID]dsl.ConductorSet) + conductorSets := make(map[stack.ComponentID]dsl.ConductorSet) for _, chain := range chains { chainMatcher := match.L2ChainById(chain.ID()) l2 := system.L2Network(match.Assume(t, chainMatcher)) diff --git a/rust/kona/tests/node/utils/test_sequencer_preset.go b/rust/kona/tests/node/utils/test_sequencer_preset.go index 5fbaf46327280..56dcdaa18afbb 100644 --- a/rust/kona/tests/node/utils/test_sequencer_preset.go +++ b/rust/kona/tests/node/utils/test_sequencer_preset.go @@ -44,7 +44,7 @@ func NewMixedOpKonaWithTestSequencer(t devtest.T) *MinimalWithTestSequencersPres type DefaultMinimalWithTestSequencerIds struct { DefaultMixedOpKonaSystemIDs DefaultMixedOpKonaSystemIDs - TestSequencerId stack.TestSequencerID + TestSequencerId stack.ComponentID } func NewDefaultMinimalWithTestSequencerIds(l2Config L2NodeConfig) DefaultMinimalWithTestSequencerIds { @@ -57,7 +57,7 @@ func NewDefaultMinimalWithTestSequencerIds(l2Config L2NodeConfig) DefaultMinimal KonaNodesWithGeth: l2Config.KonaNodesWithGeth, KonaNodesWithReth: l2Config.KonaNodesWithReth, }), - TestSequencerId: "test-sequencer", + TestSequencerId: stack.NewTestSequencerID("test-sequencer"), } } diff --git a/rust/kona/tests/supervisor/presets/interop_minimal.go b/rust/kona/tests/supervisor/presets/interop_minimal.go index 64d9c77e98be6..e454a3b9c166f 100644 --- a/rust/kona/tests/supervisor/presets/interop_minimal.go +++ b/rust/kona/tests/supervisor/presets/interop_minimal.go @@ -38,7 +38,7 @@ func DefaultMinimalInteropSystem(dest *sysgo.DefaultInteropSystemIDs) stack.Opti // Since we may create an interop infra-setup, before interop is even scheduled to run. opt.Add(sysgo.WithProposer(ids.L2BProposer, ids.L1EL, &ids.L2BCL, &ids.Supervisor)) - opt.Add(sysgo.WithFaucets([]stack.L1ELNodeID{ids.L1EL}, []stack.L2ELNodeID{ids.L2AEL, ids.L2BEL})) + opt.Add(sysgo.WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) // Upon evaluation of the option, export the contents we created. // Ids here are static, but other things may be exported too. diff --git a/rust/op-reth/crates/tests/proofs/utils/preset.go b/rust/op-reth/crates/tests/proofs/utils/preset.go index 166adec2edad2..55cbc804103b5 100644 --- a/rust/op-reth/crates/tests/proofs/utils/preset.go +++ b/rust/op-reth/crates/tests/proofs/utils/preset.go @@ -22,14 +22,14 @@ import ( type L2ELClient string const ( - L2ELClientGeth L2ELClient = "geth" - L2ELClientReth L2ELClient = "reth" + L2ELClientGeth L2ELClient = "geth" + L2ELClientReth L2ELClient = "reth" L2ELClientRethWithProofs L2ELClient = "reth-with-proof" ) type L2ELNodeID struct { - stack.L2ELNodeID - Client L2ELClient + L2ELNodeID stack.ComponentID + Client L2ELClient } type L2ELNode struct { @@ -117,12 +117,8 @@ func WithMixedOpProofPreset() stack.CommonOption { return stack.MakeCommon(DefaultMixedOpProofSystem(&DefaultMixedOpProofSystemIDs{})) } -func L2NodeMatcher[ - I interface { - comparable - Key() string - }, E stack.Identifiable[I]](value ...string) stack.Matcher[I, E] { - return match.MatchElemFn[I, E](func(elem E) bool { +func L2NodeMatcher[E stack.Identifiable](value ...string) stack.Matcher[E] { + return match.MatchElemFn[E](func(elem E) bool { for _, v := range value { if !strings.Contains(elem.ID().Key(), v) { return false @@ -223,37 +219,37 @@ func NewMixedOpProofPreset(t devtest.T) *MixedOpProofPreset { } type DefaultMixedOpProofSystemIDs struct { - L1 stack.L1NetworkID - L1EL stack.L1ELNodeID - L1CL stack.L1CLNodeID + L1 stack.ComponentID + L1EL stack.ComponentID + L1CL stack.ComponentID - L2 stack.L2NetworkID + L2 stack.ComponentID - L2CLSequencer stack.L2CLNodeID + L2CLSequencer stack.ComponentID L2ELSequencer L2ELNodeID - L2CLValidator stack.L2CLNodeID + L2CLValidator stack.ComponentID L2ELValidator L2ELNodeID - L2Batcher stack.L2BatcherID - L2Proposer stack.L2ProposerID - L2Challenger stack.L2ChallengerID + L2Batcher stack.ComponentID + L2Proposer stack.ComponentID + L2Challenger stack.ComponentID - TestSequencer stack.TestSequencerID + TestSequencer stack.ComponentID } func NewDefaultMixedOpProofSystemIDs(l1ID, l2ID eth.ChainID) DefaultMixedOpProofSystemIDs { ids := DefaultMixedOpProofSystemIDs{ - L1: stack.L1NetworkID(l1ID), + L1: stack.NewL1NetworkID(l1ID), L1EL: stack.NewL1ELNodeID("l1", l1ID), L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.L2NetworkID(l2ID), + L2: stack.NewL2NetworkID(l2ID), L2CLSequencer: stack.NewL2CLNodeID("sequencer", l2ID), L2CLValidator: stack.NewL2CLNodeID("validator", l2ID), L2Batcher: stack.NewL2BatcherID("main", l2ID), L2Proposer: stack.NewL2ProposerID("main", l2ID), L2Challenger: stack.NewL2ChallengerID("main", l2ID), - TestSequencer: "test-sequencer", + TestSequencer: stack.NewTestSequencerID("test-sequencer"), } // default to op-geth for sequencer and op-reth-with-proof for validator @@ -360,7 +356,7 @@ func defaultMixedOpProofSystemOpts(src, dest *DefaultMixedOpProofSystemIDs) stac opt.Add(sysgo.WithBatcher(src.L2Batcher, src.L1EL, src.L2CLSequencer, src.L2ELSequencer.L2ELNodeID)) opt.Add(sysgo.WithProposer(src.L2Proposer, src.L1EL, &src.L2CLSequencer, nil)) - opt.Add(sysgo.WithFaucets([]stack.L1ELNodeID{src.L1EL}, []stack.L2ELNodeID{src.L2ELSequencer.L2ELNodeID})) + opt.Add(sysgo.WithFaucets([]stack.ComponentID{src.L1EL}, []stack.ComponentID{src.L2ELSequencer.L2ELNodeID})) opt.Add(sysgo.WithTestSequencer(src.TestSequencer, src.L1CL, src.L2CLSequencer, src.L1EL, src.L2ELSequencer.L2ELNodeID)) From a3a933ad9496e13d7fd8aa695cf475dfb87e4cb8 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Fri, 6 Mar 2026 12:15:24 -0500 Subject: [PATCH 069/201] fix(op-service): increase Anvil startup timeout from 5s to 30s (#19424) The 5s timeout is too tight under CI load. When 12 parallel test nodes compete for CPU/IO on a 2xlarge box, Anvil sometimes takes >5s to print its "Listening on" line, triggering "anvil did not start in time" in TestImplementations and TestSuperchain. This is the #2 and #3 most frequent flake in the repo over the last 7 days (67 and 46 incidences). 30s gives Anvil enough headroom on a loaded machine while still failing fast on a genuine startup failure. Co-authored-by: smartcontracts Co-authored-by: Claude Sonnet 4.6 --- op-service/testutils/devnet/anvil.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-service/testutils/devnet/anvil.go b/op-service/testutils/devnet/anvil.go index 2440af5afd2c1..9be23f107644d 100644 --- a/op-service/testutils/devnet/anvil.go +++ b/op-service/testutils/devnet/anvil.go @@ -114,7 +114,7 @@ func (r *Anvil) Start() error { go r.outputStream(r.stdout) go r.outputStream(r.stderr) - timeoutC := time.NewTimer(5 * time.Second) + timeoutC := time.NewTimer(30 * time.Second) select { case <-r.startedCh: From 968a92e3f0b1f1d56f984058395d52f0328dac95 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Fri, 6 Mar 2026 12:45:29 -0500 Subject: [PATCH 070/201] fix(depreqres): stabilize L2CLB snapshot after disconnect to fix ELSync triple-failure flake (#19418) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(depreqres): stabilize L2CLB head snapshot after disconnect to eliminate ELSync flake DisconnectPeer removes the libp2p peer but a buffered gossip payload can still arrive and be processed via AddUnsafePayload. This happens because SyncModeReqResp=true routes CL gossip payloads through the CLSync path even in ELSync mode (sync_deriver.go:109). When this in-flight message lands after DisconnectPeer returns but before the ssB_before snapshot is taken, ssB_before records block N+1 instead of N, causing the subsequent require.Equal(ssB_after, ssB_before) to pass when it should (both are N+1) — but the CI pattern shows it records N while the buffered message bumps ssB_after to N+1, causing the failure. Fix: poll ssB until stable (two consecutive reads agree) before taking the baseline snapshot. Drain window is at most a few milliseconds; the 5s timeout is a safety margin that will never be hit in practice. Co-Authored-By: Claude Sonnet 4.6 * refactor(depreqres): extract stableSyncStatus helper per review feedback Extract the duplicated gossip-drain polling loop into a stableSyncStatus helper function, used by both UnsafeChainNotStalling_Disconnect and UnsafeChainNotStalling_RestartOpNode. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: smartcontracts Co-authored-by: Claude Sonnet 4.6 --- .../tests/depreqres/common/common.go | 21 +++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/op-acceptance-tests/tests/depreqres/common/common.go b/op-acceptance-tests/tests/depreqres/common/common.go index 7dd6cfea95484..42ac2185fb6b6 100644 --- a/op-acceptance-tests/tests/depreqres/common/common.go +++ b/op-acceptance-tests/tests/depreqres/common/common.go @@ -9,9 +9,26 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/testreq" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) +// stableSyncStatus returns the sync status of node after any in-flight gossip messages +// have been drained. DisconnectPeer closes the libp2p connection but a buffered gossip +// payload can still arrive and be processed via AddUnsafePayload (SyncModeReqResp=true +// routes CL gossip through the CLSync path even in ELSync mode). Polling until the +// head is stable ensures the snapshot reflects a quiesced state. +func stableSyncStatus(require *testreq.Assertions, node *dsl.L2CLNode) *eth.SyncStatus { + ss := node.SyncStatus() + require.Eventually(func() bool { + next := node.SyncStatus() + stable := next.UnsafeL2.Number == ss.UnsafeL2.Number + ss = next + return stable + }, 5*time.Second, 200*time.Millisecond, "L2CLB head should stabilize after disconnect") + return ss +} + func UnsafeChainNotStalling_Disconnect(gt *testing.T, syncMode sync.Mode, sleep time.Duration) { t := devtest.SerialT(gt) sys := presets.NewSingleChainMultiNodeWithoutCheck(t) @@ -30,7 +47,7 @@ func UnsafeChainNotStalling_Disconnect(gt *testing.T, syncMode sync.Mode, sleep sys.L2CL.DisconnectPeer(sys.L2CLB) ssA_before := sys.L2CL.SyncStatus() - ssB_before := sys.L2CLB.SyncStatus() + ssB_before := stableSyncStatus(require, sys.L2CLB) l.Info("L2CL status before delay", "unsafeL2", ssA_before.UnsafeL2.ID(), "safeL2", ssA_before.SafeL2.ID()) l.Info("L2CLB status before delay", "unsafeL2", ssB_before.UnsafeL2.ID(), "safeL2", ssB_before.SafeL2.ID()) @@ -73,7 +90,7 @@ func UnsafeChainNotStalling_RestartOpNode(gt *testing.T, syncMode sync.Mode, sle sys.L2CL.DisconnectPeer(sys.L2CLB) ssA_before := sys.L2CL.SyncStatus() - ssB_before := sys.L2CLB.SyncStatus() + ssB_before := stableSyncStatus(require, sys.L2CLB) l.Info("L2CL status before delay", "unsafeL2", ssA_before.UnsafeL2.ID(), "safeL2", ssA_before.SafeL2.ID()) l.Info("L2CLB status before delay", "unsafeL2", ssB_before.UnsafeL2.ID(), "safeL2", ssB_before.SafeL2.ID()) From 8be6fa75cb95ebf0de29b8bfc1edcfd4301a89e9 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Fri, 6 Mar 2026 13:04:51 -0500 Subject: [PATCH 071/201] test(contracts): fix testFuzz_params_validValues_succeeds rejection rate (#19410) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * test(contracts): fix testFuzz_params_validValues_succeeds rejection rate The fuzz test was exhausting Foundry's 65,536-rejection limit due to two vm.assume filters: 1. `vm.assume(r / e * e == r)` — divisibility constraint; with _elasticityMultiplier uniform in [1,255] this rejects ~97.6% of inputs. 2. `vm.assume(_prevBaseFee * maxPercentIncrease * _gasLimit / 100 < T)` — gas burn safety condition; rejects a further slice of inputs. Fix 1 — round by construction: - Move _elasticityMultiplier binding first - Replace vm.assume with rounding: `r = r / e * e` - Re-bound _gasLimit and _prevBoughtGas from the rounded r (required to preserve the invariant _gasLimit ≤ _maxResourceLimit after rounding) - Keep a targeted vm.assume for the rare case where rounding drops below 21000 Fix 2 — direct bound on _prevBaseFee: - Replace the gas burn vm.assume with a computed cap: `(100 * T - 1) / (maxPercentIncrease * gasLimit)` for m > 0 - Proved equivalent: p * m / 100 < T ↔ p ≤ (100T - 1) / m Formal Lean 4 / Mathlib proof of correctness for both changes: https://gist.github.com/smartcontracts/b0030421b51e305066abacfec0c0b57b Co-Authored-By: Claude Sonnet 4.6 * test(contracts): scope gasBurn locals to avoid stack pressure Add scoped block around gasBurnDenom/gasBurnCap variables in testFuzz_params_validValues_succeeds. These variables are not needed after _prevBaseFee is bounded, so scoping them frees stack slots. Note: forge-build-dev (lite/no-optimizer profile) has a pre-existing stack-too-deep failure on this function at the same location on upstream develop. This commit does not fix that pre-existing issue. Co-Authored-By: Claude Sonnet 4.6 * test(contracts): fix stack-too-deep in testFuzz_params_validValues_succeeds Scope maxPercentIncrease, rcfg, and the storage/assert block into separate blocks to bring the function under the 16-slot stack limit for the lite (no-optimizer) build profile. At the deepest point (params() destructuring) the outer stack now holds only: 10 params + gasLimit + _prevBlockNum + 3 tuple vars = 15 slots. Co-Authored-By: Claude Sonnet 4.6 * test(contracts): forge fmt Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: smartcontracts Co-authored-by: Claude Sonnet 4.6 --- .../test/L1/OptimismPortal2.t.sol | 96 +++++++++++-------- 1 file changed, 56 insertions(+), 40 deletions(-) diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index 349f799ef2006..7c64dc2f9fe53 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -2628,68 +2628,84 @@ contract OptimismPortal2_Params_Test is CommonTest { // Get the set system gas limit uint64 gasLimit = systemConfig.gasLimit(); + // Bind _elasticityMultiplier first so we can round _maxResourceLimit by construction + // rather than filtering with vm.assume (which previously rejected ~97.6% of inputs). + _elasticityMultiplier = uint8(bound(_elasticityMultiplier, 1, type(uint8).max)); + _baseFeeMaxChangeDenominator = uint8(bound(_baseFeeMaxChangeDenominator, 2, type(uint8).max)); + // Bound resource config _systemTxMaxGas = uint32(bound(_systemTxMaxGas, 0, gasLimit - 21000)); _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, MAX_GAS_LIMIT / 8)); _maxResourceLimit = uint32(bound(_maxResourceLimit, 21000, gasLimit - _systemTxMaxGas)); _maximumBaseFee = uint128(bound(_maximumBaseFee, 1, type(uint128).max)); _minimumBaseFee = uint32(bound(_minimumBaseFee, 0, _maximumBaseFee - 1)); + + // Round _maxResourceLimit to the nearest multiple of _elasticityMultiplier. + // This guarantees divisibility by construction, replacing the vm.assume filter. + // Formal equivalence proof: https://gist.github.com/smartcontracts/b0030421b51e305066abacfec0c0b57b + _maxResourceLimit = uint32((_maxResourceLimit / _elasticityMultiplier) * _elasticityMultiplier); + + // Rounding can push _maxResourceLimit below 21000 in rare edge cases (e.g., e=255, + // r_pre=21000 → r=20910). Filter those out with a targeted vm.assume. + vm.assume(_maxResourceLimit >= 21000); + + // Re-bound _gasLimit and _prevBoughtGas from the rounded _maxResourceLimit so that + // the invariants _gasLimit ≤ _maxResourceLimit and _prevBoughtGas ≤ _maxResourceLimit - _gasLimit + // are preserved after rounding. _gasLimit = uint64(bound(_gasLimit, 21000, _maxResourceLimit)); _gasLimit = uint64(bound(_gasLimit, 0, gasLimit)); - _prevBaseFee = uint128(bound(_prevBaseFee, 0, 3 gwei)); _prevBoughtGas = uint64(bound(_prevBoughtGas, 0, _maxResourceLimit - _gasLimit)); _blockDiff = uint8(bound(_blockDiff, 0, 3)); - _baseFeeMaxChangeDenominator = uint8(bound(_baseFeeMaxChangeDenominator, 2, type(uint8).max)); - _elasticityMultiplier = uint8(bound(_elasticityMultiplier, 1, type(uint8).max)); // Prevent values that would cause reverts vm.assume(uint256(_maxResourceLimit) + uint256(_systemTxMaxGas) <= gasLimit); - vm.assume(((_maxResourceLimit / _elasticityMultiplier) * _elasticityMultiplier) == _maxResourceLimit); - - // Although we typically want to limit the usage of vm.assume, we've constructed the above - // bounds to satisfy the assumptions listed in this specific section. These assumptions - // serve only to act as an additional sanity check on top of the bounds and should not - // result in an unnecessary number of test rejections. vm.assume(gasLimit >= _gasLimit); vm.assume(_minimumBaseFee < _maximumBaseFee); // Base fee can increase quickly and mean that we can't buy the amount of gas we want. - // Here we add a VM assumption to bound the potential increase. - // Compute the maximum possible increase in base fee. - uint256 maxPercentIncrease = uint256(_elasticityMultiplier - 1) * 100 / uint256(_baseFeeMaxChangeDenominator); - - // Assume that we have enough gas to burn. - // Compute the maximum amount of gas we'd need to burn. - // Assume we need 1/5 of our gas to do other stuff. - vm.assume(_prevBaseFee * maxPercentIncrease * _gasLimit / 100 < MAX_GAS_LIMIT * 4 / 5); + // Replace the gas burn vm.assume with a direct bound on _prevBaseFee. + // For m > 0: p*m/100 < T ↔ p ≤ (100*T - 1)/m (proved in FuzzProof.lean, GasBurnEquivalence). + // For m = 0: gas burn is always 0 < T, so no cap needed. + // Scoped to free stack slots for the assertions below (lite profile, no via-IR). + { + uint256 maxPercentIncrease = + uint256(_elasticityMultiplier - 1) * 100 / uint256(_baseFeeMaxChangeDenominator); + uint256 gasBurnDenom = maxPercentIncrease * uint256(_gasLimit); + uint256 gasBurnCap = gasBurnDenom == 0 ? 3 gwei : (uint256(MAX_GAS_LIMIT) * 4 / 5 * 100 - 1) / gasBurnDenom; + _prevBaseFee = uint128(bound(_prevBaseFee, 0, gasBurnCap < 3 gwei ? gasBurnCap : 3 gwei)); + } // Pick a pseudorandom block number vm.roll(uint256(keccak256(abi.encode(_blockDiff))) % uint256(type(uint16).max) + uint256(_blockDiff)); - // Create a resource config to mock the call to the system config with - IResourceMetering.ResourceConfig memory rcfg = IResourceMetering.ResourceConfig({ - maxResourceLimit: _maxResourceLimit, - elasticityMultiplier: _elasticityMultiplier, - baseFeeMaxChangeDenominator: _baseFeeMaxChangeDenominator, - minimumBaseFee: _minimumBaseFee, - systemTxMaxGas: _systemTxMaxGas, - maximumBaseFee: _maximumBaseFee - }); - vm.mockCall(address(systemConfig), abi.encodeCall(systemConfig.resourceConfig, ()), abi.encode(rcfg)); - - // Set the resource params - uint256 _prevBlockNum = block.number - _blockDiff; - vm.store( - address(optimismPortal2), - bytes32(uint256(1)), - bytes32((_prevBlockNum << 192) | (uint256(_prevBoughtGas) << 128) | _prevBaseFee) - ); + // Create a resource config to mock the call to the system config with. + // Scoped to free stack slots before the params() destructuring below. + { + IResourceMetering.ResourceConfig memory rcfg = IResourceMetering.ResourceConfig({ + maxResourceLimit: _maxResourceLimit, + elasticityMultiplier: _elasticityMultiplier, + baseFeeMaxChangeDenominator: _baseFeeMaxChangeDenominator, + minimumBaseFee: _minimumBaseFee, + systemTxMaxGas: _systemTxMaxGas, + maximumBaseFee: _maximumBaseFee + }); + vm.mockCall(address(systemConfig), abi.encodeCall(systemConfig.resourceConfig, ()), abi.encode(rcfg)); + } - // Ensure that the storage setting is correct - (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum) = optimismPortal2.params(); - assertEq(prevBaseFee, _prevBaseFee); - assertEq(prevBoughtGas, _prevBoughtGas); - assertEq(prevBlockNum, _prevBlockNum); + // Set the resource params and verify storage is correct. + // Scoped to keep _prevBlockNum and the params() tuple off the outer stack. + { + uint256 _prevBlockNum = block.number - _blockDiff; + vm.store( + address(optimismPortal2), + bytes32(uint256(1)), + bytes32((_prevBlockNum << 192) | (uint256(_prevBoughtGas) << 128) | _prevBaseFee) + ); + (uint128 prevBaseFee, uint64 prevBoughtGas, uint64 prevBlockNum) = optimismPortal2.params(); + assertEq(prevBaseFee, _prevBaseFee); + assertEq(prevBoughtGas, _prevBoughtGas); + assertEq(prevBlockNum, _prevBlockNum); + } // Do a deposit, should not revert optimismPortal2.depositTransaction{ gas: MAX_GAS_LIMIT }({ From f002a635ba6a647d6c7a516da723623db83dab76 Mon Sep 17 00:00:00 2001 From: Ariel Diaz <65925295+aliersh@users.noreply.github.com> Date: Fri, 6 Mar 2026 16:43:24 -0500 Subject: [PATCH 072/201] docs(contracts): move l2 upgrade contracts spec from specs repo (#19409) --- .../specs/l2-upgrades-2-contracts.md | 506 ++++++++++++++++++ 1 file changed, 506 insertions(+) create mode 100644 packages/contracts-bedrock/specs/l2-upgrades-2-contracts.md diff --git a/packages/contracts-bedrock/specs/l2-upgrades-2-contracts.md b/packages/contracts-bedrock/specs/l2-upgrades-2-contracts.md new file mode 100644 index 0000000000000..cca3e7fc1f26d --- /dev/null +++ b/packages/contracts-bedrock/specs/l2-upgrades-2-contracts.md @@ -0,0 +1,506 @@ +# L2 Upgrade Contracts + + + +**Table of Contents** + +- [Overview](#overview) +- [ConditionalDeployer](#conditionaldeployer) + - [Overview](#overview-1) + - [Definitions](#definitions) + - [CREATE2 Collision](#create2-collision) + - [Deterministic Deployment Proxy](#deterministic-deployment-proxy) + - [Assumptions](#assumptions) + - [aCD-001: Deterministic Deployment Proxy is Available and Correct](#acd-001-deterministic-deployment-proxy-is-available-and-correct) + - [Mitigations](#mitigations) + - [aCD-002: Initcode is Well-Formed](#acd-002-initcode-is-well-formed) + - [Mitigations](#mitigations-1) + - [Invariants](#invariants) + - [iCD-001: Deterministic Address Derivation](#icd-001-deterministic-address-derivation) + - [Impact](#impact) + - [iCD-002: Idempotent Deployment Operations](#icd-002-idempotent-deployment-operations) + - [Impact](#impact-1) + - [iCD-003: Non-Reverting Collision Handling](#icd-003-non-reverting-collision-handling) + - [Impact](#impact-2) + - [iCD-004: Collision Detection Accuracy](#icd-004-collision-detection-accuracy) + - [Impact](#impact-3) + - [iCD-005: Contract Availability After Deployment](#icd-005-contract-availability-after-deployment) + - [Impact](#impact-4) +- [L2ProxyAdmin](#l2proxyadmin) + - [Overview](#overview-2) + - [Definitions](#definitions-1) + - [Depositor Account](#depositor-account) + - [Predeploy](#predeploy) + - [Assumptions](#assumptions-1) + - [aL2PA-001: Depositor Account is Controlled by Protocol](#al2pa-001-depositor-account-is-controlled-by-protocol) + - [Mitigations](#mitigations-2) + - [aL2PA-002: L2ContractsManager Code is Not Malicious](#al2pa-002-l2contractsmanager-code-is-not-malicious) + - [Mitigations](#mitigations-3) + - [aL2PA-003: Predeploy Proxies Follow Expected Patterns](#al2pa-003-predeploy-proxies-follow-expected-patterns) + - [Mitigations](#mitigations-4) + - [Invariants](#invariants-1) + - [iL2PA-001: Exclusive Depositor Authorization for Batch Upgrades](#il2pa-001-exclusive-depositor-authorization-for-batch-upgrades) + - [Impact](#impact-5) + - [iL2PA-002: Safe Delegation to L2ContractsManager](#il2pa-002-safe-delegation-to-l2contractsmanager) + - [Impact](#impact-6) + - [iL2PA-003: Backwards Compatibility Maintained](#il2pa-003-backwards-compatibility-maintained) + - [Impact](#impact-7) +- [L2ContractsManager](#l2contractsmanager) + - [Overview](#overview-3) + - [Definitions](#definitions-2) + - [Network-Specific Configuration](#network-specific-configuration) + - [Feature Flag](#feature-flag) + - [Initialization Parameters](#initialization-parameters) + - [Assumptions](#assumptions-2) + - [aL2CM-001: Existing Predeploys Provide Valid Configuration](#al2cm-001-existing-predeploys-provide-valid-configuration) + - [Mitigations](#mitigations-5) + - [aL2CM-002: Implementation Addresses Are Pre-Computed Correctly](#al2cm-002-implementation-addresses-are-pre-computed-correctly) + - [Mitigations](#mitigations-6) + - [aL2CM-003: Predeploy Proxies Are Upgradeable](#al2cm-003-predeploy-proxies-are-upgradeable) + - [Mitigations](#mitigations-7) + - [aL2CM-004: Feature Flags Are Correctly Configured](#al2cm-004-feature-flags-are-correctly-configured) + - [Mitigations](#mitigations-8) + - [Invariants](#invariants-2) + - [iL2CM-001: Deterministic Upgrade Execution](#il2cm-001-deterministic-upgrade-execution) + - [Impact](#impact-8) + - [iL2CM-002: Configuration Preservation](#il2cm-002-configuration-preservation) + - [Impact](#impact-9) + - [iL2CM-003: Upgrade Atomicity](#il2cm-003-upgrade-atomicity) + - [Impact](#impact-10) + - [iL2CM-004: Clear-and-Reinitialize Pattern](#il2cm-004-clear-and-reinitialize-pattern) + - [Impact](#impact-11) + - [iL2CM-005: No Storage Corruption During DELEGATECALL](#il2cm-005-no-storage-corruption-during-delegatecall) + - [Impact](#impact-12) + - [iL2CM-006: Complete Upgrade Coverage](#il2cm-006-complete-upgrade-coverage) + - [Impact](#impact-13) +- [Upgrade Execution](#upgrade-execution) + + + +## Overview + +This specification defines the mechanism for upgrading L2 predeploy contracts through deterministic, hard fork-driven +Network Upgrade Transactions (NUTs). The system enables safe, well-tested upgrades of L2 contracts with both +implementation and upgrade paths written in Solidity, ensuring determinism, verifiability, and testability across all +client implementations. + +The upgrade system maintains the existing pattern of injecting Network Upgrade Transactions at specific fork block +heights while improving the development and testing process. Upgrade transactions are defined in JSON bundles (see +[Bundle Format](./l2-upgrades-1-execution.md#bundle-format)) that are tracked in git, generated from Solidity scripts, +and executed deterministically at fork activation. + +## ConditionalDeployer + +### Overview + +The ConditionalDeployer contract enables deterministic deployment of contract implementations while maintaining +idempotency across upgrade transactions. It ensures that unchanged contract bytecode always deploys to the same +address, and that attempting to deploy already-deployed bytecode succeeds silently _rather than reverting_. + +This component enables upgrade transactions to unconditionally deploy for all implementation contracts without +requiring developers to manually track which contracts have changed between upgrades. + +The ConditionalDeployer is included in the L2Genesis state to ensure availability for all future network upgrades. It is +deployed as a preinstall at a deterministic address and does not require upgradeability. + +The deployment function returns an address for off-chain convenience, but this return value is not used in Network +Upgrade Transactions, as deployment addresses must be pre-computed before transaction generation. + +### Definitions + +#### CREATE2 Collision + +A CREATE2 collision occurs when attempting to deploy contract bytecode to an address where a contract with identical +bytecode already exists. This happens when the same initcode and salt are used in multiple deployment attempts. + +Note: when CREATE2 targets an address that already has code, the +[zero address is placed on the stack][create2-spec] (execution specs). + +[create2-spec]: +https://github.com/ethereum/execution-specs/blob/4ef381a0f75c96b52da635653ab580e731d3882a/src/ethereum/forks/prague/vm/instructions/system.py#L112 + +#### Deterministic Deployment Proxy + +The canonical deterministic deployment proxy contract at address `0x4e59b44847b379578588920cA78FbF26c0B4956C`, +originally deployed by Nick Johnson (Arachnid). This contract provides CREATE2-based deployment with a fixed deployer +address across all chains. + +Note: when the deterministic deployment proxy deploys to an address that already has code, [it will revert with no data](https://github.com/Arachnid/deterministic-deployment-proxy/blob/be3c5974db5028d502537209329ff2e730ed336c/source/deterministic-deployment-proxy.yul#L13). +Otherwise the ConditionalDeployer would not be required. + +### Assumptions + +#### aCD-001: Deterministic Deployment Proxy is Available and Correct + +The [Deterministic Deployment Proxy](#deterministic-deployment-proxy) exists at the expected address and correctly +implements CREATE2 deployment semantics. The proxy must deterministically compute deployment addresses and execute +deployments as specified. + +##### Mitigations + +- The [Deterministic Deployment Proxy](#deterministic-deployment-proxy) is a well-established contract deployed across + all EVM chains using the same keyless deployment transaction +- The proxy's behavior is verifiable by inspecting its bytecode and testing deployment operations +- The proxy contract is immutable and cannot be upgraded or modified + +#### aCD-002: Initcode is Well-Formed + +Callers provide valid EVM initcode that, when executed, will either successfully deploy a contract or revert with a +clear error. Malformed initcode that produces undefined behavior is not considered. + +##### Mitigations + +- Initcode is generated by the Solidity compiler from verified source code +- The upgrade transaction generation process includes validation of all deployment operations +- Fork-based testing exercises all deployments before inclusion in upgrade bundles + +### Invariants + +#### iCD-001: Deterministic Address Derivation + +For any given initcode and salt combination, the ConditionalDeployer MUST always compute the same deployment address, +regardless of whether the contract has been previously deployed. The address calculation MUST match the CREATE2 +address that would be computed by the [Deterministic Deployment Proxy](#deterministic-deployment-proxy). + +##### Impact + +**Severity: Critical** + +If address derivation is non-deterministic or inconsistent with CREATE2 semantics, upgrade transactions could deploy +implementations to unexpected addresses, breaking proxy upgrade operations. + +#### iCD-002: Idempotent Deployment Operations + +Calling the ConditionalDeployer multiple times with identical initcode and salt MUST produce the same outcome: the +first call deploys the contract, and subsequent calls succeed without modification. No operation should revert due to +a [CREATE2 Collision](#create2-collision). + +##### Impact + +**Severity: Critical** + +If deployments are not idempotent, upgrade transactions that attempt to deploy unchanged implementations would revert +or deploy the implementation to an unexpected address, breaking the upgrade. + +#### iCD-003: Non-Reverting Collision Handling + +When a [CREATE2 Collision](#create2-collision) is detected (contract already deployed at the target address), the +ConditionalDeployer MUST return successfully without reverting and without modifying blockchain state. + +##### Impact + +**Severity: Medium** + +If collisions cause reverts, the presence of reverting transactions in an upgrade block would cause confusion. + +#### iCD-004: Collision Detection Accuracy + +The ConditionalDeployer MUST correctly distinguish between addresses where no contract exists (deploy needed) and +addresses where a contract already exists (collision detected). False negatives (failing to detect existing contracts) +and false positives (detecting non-existent contracts) are both prohibited. + +##### Impact + +**Severity: High** + +False negatives would cause failed deployments while false positives would prevent legitimate deployments, both +breaking the upgrade process. + +#### iCD-005: Contract Availability After Deployment + +After execution of the ConditionalDeployer, the address returned by the deployment operation MUST contain the runtime +bytecode derived from the provided initcode. This ensures that contracts deployed through the ConditionalDeployer are +immediately available and functional at their expected addresses. + +##### Impact + +**Severity: Critical** + +If the contract is not properly available at the expected address after deployment, subsequent transactions that +attempt to call or upgrade to that implementation address will fail, causing the upgrade to fail and potentially +halting the chain. + +## L2ProxyAdmin + +### Overview + +The L2ProxyAdmin is the administrative contract responsible for managing proxy upgrades for L2 predeploy contracts. It +is deployed as a predeploy at address `0x4200000000000000000000000000000000000018` and serves as the `admin` for all +upgradeable L2 predeploy proxies. + +The upgraded L2ProxyAdmin implementation extends the existing proxy administration interface with a new +`upgradePredeploys()` function that orchestrates batch upgrades of multiple predeploys by delegating to an +[L2ContractsManager](#l2contractsmanager) contract. This design enables deterministic, testable upgrade paths written +entirely in Solidity. + +### Definitions + +#### Depositor Account + +The special system address `0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001` controlled by the L2 protocol's derivation +pipeline. This account is used to submit system transactions including L1 attributes updates and upgrade +transactions. + +#### Predeploy + +A contract deployed at a predetermined address in the L2 genesis state. Predeploys provide core L2 protocol +functionality and are typically deployed behind proxies to enable upgradability. + +### Assumptions + +#### aL2PA-001: Depositor Account is Controlled by Protocol + +The [Depositor Account](#depositor-account) is exclusively controlled by the L2 protocol's derivation and execution +pipeline. No external parties can submit transactions from this address. + +##### Mitigations + +- The [Depositor Account](#depositor-account) address has no known private key +- Transactions from this address can only originate from the protocol's derivation pipeline processing L1 deposit events +- The address is hardcoded in the protocol specification and client implementations + +#### aL2PA-002: L2ContractsManager Code is Not Malicious + +The [L2ContractsManager](#l2contractsmanager) contract that receives the DELEGATECALL from `upgradePredeploys()` +correctly implements the upgrade logic and does not contain malicious code. This includes not corrupting the +L2ProxyAdmin's storage, not performing unauthorized operations, and not introducing vulnerabilities when executing +in the context of the L2ProxyAdmin. + +##### Mitigations + +- The L2ContractsManager address is deterministically computed and verified during upgrade bundle generation +- The L2ContractsManager implementation is developed, reviewed, and tested alongside the upgrade bundle +- Fork-based testing validates the complete upgrade execution before production deployment +- The L2ContractsManager bytecode is verifiable against source code on a specific commit +- Code review and security audits examine the L2ContractsManager implementation + +#### aL2PA-003: Predeploy Proxies Follow Expected Patterns + +[Predeploys](#predeploy) being upgraded follow the expected proxy patterns (ERC-1967 or similar) and correctly handle +`upgradeTo()` and `upgradeToAndCall()` operations when called by the ProxyAdmin. + +##### Mitigations + +- All L2 predeploys use standardized proxy implementations from the contracts-bedrock package +- Proxy implementations are thoroughly tested and audited +- Fork-based testing validates upgrade operations against actual deployed proxies + +### Invariants + +#### iL2PA-001: Exclusive Depositor Authorization for Batch Upgrades + +The `upgradePredeploys()` function MUST only be callable by the [Depositor Account](#depositor-account). No other +address, including the current ProxyAdmin owner, can invoke this function. + +##### Impact + +**Severity: Critical** + +If unauthorized addresses could call `upgradePredeploys()`, attackers could execute arbitrary upgrade, enabling +complete takeover of all L2 predeploy contracts. + +#### iL2PA-002: Safe Delegation to L2ContractsManager + +When `upgradePredeploys()` executes a DELEGATECALL to the [L2ContractsManager](#l2contractsmanager), the call MUST +preserve the ProxyAdmin's storage context. + +##### Impact + +**Severity: Critical** + +If the DELEGATECALL is not properly executed, upgrades could fail silently or the ProxyAdmin's storage could be +corrupted, resulting in loss of admin control over predeploys. + +#### iL2PA-003: Backwards Compatibility Maintained + +The upgraded L2ProxyAdmin implementation MUST maintain the existing interface for standard proxy administration +functions. Existing functionality for upgrading individual proxies, changing proxy admins, and querying proxy state +MUST continue to work as before. + +Note: Backwards compatibility requires maintaining the full ProxyAdmin interface, but does not require supporting +upgrades of legacy proxy types (ResolvedDelegate and Chugsplash proxies). Currently, no predeploy uses these legacy +proxy types. + +##### Impact + +**Severity: High** + +If backwards compatibility is broken, existing tooling and scripts that interact with the ProxyAdmin could fail, +preventing emergency responses and breaking operational procedures. + +## L2ContractsManager + +### Overview + +The L2ContractsManager is a contract deployed fresh for each upgrade that contains the upgrade logic and coordination +for a specific set of predeploy upgrades. When invoked via DELEGATECALL from the [L2ProxyAdmin](#l2proxyadmin), it +gathers network-specific configuration from existing predeploys, and executes +upgrade operations for all affected predeploys. + +Each L2ContractsManager instance is purpose-built for a specific upgrade, deployed via the +[ConditionalDeployer](#conditionaldeployer), and referenced directly in the upgrade transaction. The contract is +stateless and contains all upgrade logic in code, ensuring determinism and verifiability. + +The L2ContractsManager assumes that all prerequisite contracts (implementations, ConditionalDeployer, etc.) have +already been deployed and are available in the state before the L2ContractsManager is called. The transaction +execution sequence ensures this ordering. + +### Definitions + +#### Network-Specific Configuration + +Configuration values that vary between L2 chains, such as custom gas token parameters, operator fee configurations, or +chain-specific feature flags. These values are typically stored in system predeploys like `L1Block` and must be +preserved across upgrades. + +#### Feature Flag + +A boolean or enumerated value that enables or disables optional protocol features. Feature flags allow different +upgrade paths for development environments (alphanets), testing environments, and production chains. Flags are read +from a dedicated FeatureFlags contract during upgrade execution. + +#### Initialization Parameters + +Constructor arguments or initializer function parameters required by a predeploy implementation. Similar to the OPCMv2 +implementation, we will assume that all config values are first read, and then contracts are reinitialized with +those same parameters. + +### Assumptions + +#### aL2CM-001: Existing Predeploys Provide Valid Configuration + +The existing [predeploy](#predeploy) contracts contain valid [network-specific configuration](#network-specific-configuration) +that can be read and used during the upgrade. Configuration values are accurate, properly formatted, and represent the +intended chain configuration. + +##### Mitigations + +- Configuration is read from well-established predeploys that have been operating correctly +- Fork-based testing validates configuration gathering against real chain state + +#### aL2CM-002: Implementation Addresses Are Pre-Computed Correctly + +The implementation addresses used by the L2ContractsManager are pre-computed by the off-chain bundle generation script +using the same CREATE2 parameters that will be used by the [ConditionalDeployer](#conditionaldeployer). The +L2ContractsManager receives these addresses via its constructor and does not compute them. Address mismatches would +cause proxies to point to incorrect or non-existent implementations. + +##### Mitigations + +- Implementation addresses are computed off-chain using deterministic CREATE2 formula during bundle generation +- The computed addresses are provided to the L2ContractsManager constructor at deployment time +- Fork-based testing validates that all implementation addresses exist and contain expected bytecode +- Address computation is isolated in shared libraries to prevent divergence + +#### aL2CM-003: Predeploy Proxies Are Upgradeable + +All [predeploy](#predeploy) proxies targeted for upgrade support the `upgradeTo()` and `upgradeToAndCall()` functions +and will accept upgrade calls from the [L2ProxyAdmin](#l2proxyadmin) executing the DELEGATECALL. + +##### Mitigations + +- All L2 predeploys use standardized proxy implementations with well-tested upgrade functions +- Fork-based testing exercises upgrade operations against actual deployed proxies +- Non-upgradeable predeploys (if they exist) will be excluded from the upgrade process + +#### aL2CM-004: Feature Flags Are Correctly Configured + +When [feature flags](#feature-flag) are used to customize upgrade behavior, the FeatureFlags contract is properly +configured in the environment and returns consistent values throughout the upgrade execution. +Production features which are enabled must be exposed by the L1Block contract's interface. + +##### Mitigations + +- Fork and local testing validates feature flag behavior across different configurations + +### Invariants + +#### iL2CM-001: Deterministic Upgrade Execution + +The L2ContractsManager's `upgrade()` function MUST execute deterministically, producing identical state changes when +given identical pre-upgrade blockchain state. The function MUST NOT read external state that could vary between +executions (timestamps, block hashes, etc.) and MUST NOT accept runtime parameters. + +##### Impact + +**Severity: Critical** + +If upgrade execution is non-deterministic, different L2 nodes could produce different post-upgrade states, causing +consensus failures and halting the chain. + +#### iL2CM-002: Configuration Preservation + +All [network-specific configuration](#network-specific-configuration) that exists before the upgrade MUST be preserved +in the upgraded predeploy implementations. Configuration values MUST be read from existing predeploys and properly +passed to new implementations during upgrade. + +##### Impact + +**Severity: Critical** + +If configuration is not preserved, chains could lose critical settings like custom gas token addresses or operator fee +parameters, breaking fee calculations and chain-specific functionality. + +#### iL2CM-003: Upgrade Atomicity + +All predeploy upgrades within a single L2ContractsManager execution MUST succeed or fail atomically. If any upgrade +operation fails, the entire DELEGATECALL MUST revert, leaving all predeploys in their pre-upgrade state. + +##### Impact + +**Severity: Critical** + +If upgrades are not atomic, a partial failure could leave some predeploys upgraded and others not, creating an +inconsistent system state that breaks inter-contract dependencies. + +#### iL2CM-004: Clear-and-Reinitialize Pattern + +For each predeploy being upgraded, the L2ContractsManager MUST: +1. use `upgradeTo()` to set the implementation to the StorageSetter +2. Reset the `initialized` value to 0 +3. use `upgradeToAndCall()` to call the `initialize()` method. + +This ensures storage is properly cleared and reconstructed, avoiding storage layout conflicts. + +##### Impact + +**Severity: Critical** + +If contracts are not properly reinitialized with preserved configuration, chain-specific settings could be lost or +storage corruption could occur, breaking critical system contracts. + +#### iL2CM-005: No Storage Corruption During DELEGATECALL + +When executing in the [L2ProxyAdmin](#l2proxyadmin) context via DELEGATECALL, the L2ContractsManager MUST NOT corrupt +or modify the ProxyAdmin's own storage. All storage modifications must be directed to the predeploy proxies being +upgraded. + +##### Impact + +**Severity: Critical** + +If the L2ContractsManager corrupts ProxyAdmin storage, it could change the ProxyAdmin's owner or disable future upgrade +capability, compromising the entire upgrade system. + +#### iL2CM-006: Complete Upgrade Coverage + +The L2ContractsManager MUST upgrade all predeploys intended for the upgrade. It MUST NOT skip predeploys that should +be upgraded, even if their implementations are unchanged, to maintain consistency across all chains executing the +upgrade. + +##### Impact + +**Severity: High** + +If predeploys are skipped incorrectly, chains would have inconsistent contract versions, violating the goal of bringing +all chains to a consistent version. + +## Upgrade Execution + +The L2ContractsManager contract should be deployed with all implementation addresses provided to its constructor and +stored in an `Implementations` struct. Where a dev feature or feature flag requires a different implementation, both +implementations will be deployed and stored in the L2ContractsManager. The L2ContractsManager will contain branching +logic which will enable a different implementation depending on the configured features. + +The upgrade flow of the L2ContractsManager will be similar to the OPCMv2, where the `FullConfig` is first collected from +all contracts, and the config values are provided to the contract's `initializer()` method using `upgradeToAndCall()`. From c14cd1e9a961c5e42c90abb56487439d21ebad58 Mon Sep 17 00:00:00 2001 From: Matt Solomon Date: Fri, 6 Mar 2026 15:12:05 -0800 Subject: [PATCH 073/201] chore(contracts-bedrock): deduplicate test helpers (#19434) - Extract _mockAndExpect to shared MockHelper contract (7 files) - Deduplicate _changeClaimStatus: import free function instead of redefining (3 files) - Deduplicate _dummyClaim: promote to free function alongside _changeClaimStatus (3 files) - Remove duplicate EIP1967Helper import in OptimismPortal2.t.sol Co-authored-by: Claude Opus 4.6 --- .../test/L1/OptimismPortal2.t.sol | 1 - .../contracts-bedrock/test/L2/FeeSplitter.t.sol | 9 ++------- .../contracts-bedrock/test/L2/L2ProxyAdmin.t.sol | 9 ++------- .../test/L2/L2StandardBridgeInterop.t.sol | 9 ++------- .../test/L2/OptimismSuperchainERC20.t.sol | 9 ++------- .../test/L2/SuperchainERC20.t.sol | 9 ++------- .../test/L2/SuperchainETHBridge.t.sol | 9 ++------- .../test/L2/SuperchainTokenBridge.t.sol | 9 ++------- .../test/dispute/FaultDisputeGame.t.sol | 10 +++++----- .../test/dispute/PermissionedDisputeGame.t.sol | 13 +------------ .../test/dispute/SuperFaultDisputeGame.t.sol | 13 +------------ .../dispute/SuperPermissionedDisputeGame.t.sol | 13 +------------ .../contracts-bedrock/test/utils/MockHelper.sol | 16 ++++++++++++++++ 13 files changed, 38 insertions(+), 91 deletions(-) create mode 100644 packages/contracts-bedrock/test/utils/MockHelper.sol diff --git a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol index 7c64dc2f9fe53..3ee1dbf534c1a 100644 --- a/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol +++ b/packages/contracts-bedrock/test/L1/OptimismPortal2.t.sol @@ -18,7 +18,6 @@ import { Types } from "src/libraries/Types.sol"; import { Hashing } from "src/libraries/Hashing.sol"; import { Constants } from "src/libraries/Constants.sol"; import { AddressAliasHelper } from "src/vendor/AddressAliasHelper.sol"; -import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; import { DevFeatures } from "src/libraries/DevFeatures.sol"; import { Features } from "src/libraries/Features.sol"; import "src/dispute/lib/Types.sol"; diff --git a/packages/contracts-bedrock/test/L2/FeeSplitter.t.sol b/packages/contracts-bedrock/test/L2/FeeSplitter.t.sol index 9ea3dac298057..b264544370706 100644 --- a/packages/contracts-bedrock/test/L2/FeeSplitter.t.sol +++ b/packages/contracts-bedrock/test/L2/FeeSplitter.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.15; // Testing import { CommonTest } from "test/setup/CommonTest.sol"; +import { MockHelper } from "test/utils/MockHelper.sol"; // Mocks import { MockFeeVault } from "test/mocks/MockFeeVault.sol"; @@ -23,7 +24,7 @@ import { IFeeVault } from "interfaces/L2/IFeeVault.sol"; /// @title FeeSplitter_TestInit /// @notice Reusable test initialization for `FeeSplitter` tests. -contract FeeSplitter_TestInit is CommonTest { +contract FeeSplitter_TestInit is CommonTest, MockHelper { // Events event FeesReceived(address indexed sender, uint256 amount, uint256 newBalance); event FeeDisbursementIntervalUpdated(uint128 oldFeeDisbursementInterval, uint128 newFeeDisbursementInterval); @@ -56,12 +57,6 @@ contract FeeSplitter_TestInit is CommonTest { _feeVaults[3] = Predeploys.OPERATOR_FEE_VAULT; } - /// @notice Helper function to setup a mock and expect a call to it. - function _mockAndExpect(address _receiver, bytes memory _calldata, bytes memory _returned) internal { - vm.mockCall(_receiver, _calldata, _returned); - vm.expectCall(_receiver, _calldata); - } - /// @notice Helper to mock fee vault calls for successful withdrawal scenarios function _mockFeeVaultForSuccessfulWithdrawal(address _vault, uint256 _balance) internal { _mockFeeVaultForSuccessfulWithdrawalWithSplitter(address(feeSplitter), _vault, _balance); diff --git a/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol b/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol index 6f98312f866e0..e1f00008c029a 100644 --- a/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.15; // Testing import { CommonTest } from "test/setup/CommonTest.sol"; +import { MockHelper } from "test/utils/MockHelper.sol"; import { ProxyAdmin_SetProxyType_Test, ProxyAdmin_SetImplementationName_Test, @@ -30,7 +31,7 @@ import { IL2ContractsManager } from "interfaces/L2/IL2ContractsManager.sol"; /// @title L2ProxyAdmin_TestInit /// @notice Reusable test initialization for `L2ProxyAdmin` tests. -abstract contract L2ProxyAdmin_TestInit is CommonTest { +abstract contract L2ProxyAdmin_TestInit is CommonTest, MockHelper { IL2ProxyAdmin public l2ProxyAdmin; address public owner; @@ -43,12 +44,6 @@ abstract contract L2ProxyAdmin_TestInit is CommonTest { l2ProxyAdmin = IL2ProxyAdmin(Predeploys.PROXY_ADMIN); owner = l2ProxyAdmin.owner(); } - - /// @notice Helper function to setup a mock and expect a call to it. - function _mockAndExpect(address _receiver, bytes memory _calldata, bytes memory _returned) internal { - vm.mockCall(_receiver, _calldata, _returned); - vm.expectCall(_receiver, _calldata); - } } /// @title L2ProxyAdmin_Constructor_Test diff --git a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol index dbe264f37ace3..53bed5901c9c9 100644 --- a/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol +++ b/packages/contracts-bedrock/test/L2/L2StandardBridgeInterop.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.15; // Testing import { CommonTest } from "test/setup/CommonTest.sol"; +import { MockHelper } from "test/utils/MockHelper.sol"; // Interfaces import { IMintableAndBurnableERC20 } from "interfaces/L2/IMintableAndBurnableERC20.sol"; @@ -15,7 +16,7 @@ import { IOptimismERC20Factory } from "interfaces/L2/IOptimismERC20Factory.sol"; /// @title L2StandardBridgeInterop_TestInit /// @notice Reusable test initialization for `L2StandardBridgeInterop` tests. -abstract contract L2StandardBridgeInterop_TestInit is CommonTest { +abstract contract L2StandardBridgeInterop_TestInit is CommonTest, MockHelper { /// @notice Emitted when a conversion is made. event Converted(address indexed from, address indexed to, address indexed caller, uint256 amount); @@ -28,12 +29,6 @@ abstract contract L2StandardBridgeInterop_TestInit is CommonTest { super.setUp(); } - /// @notice Helper function to setup a mock and expect a call to it. - function _mockAndExpect(address _receiver, bytes memory _calldata, bytes memory _returned) internal { - vm.mockCall(_receiver, _calldata, _returned); - vm.expectCall(_receiver, _calldata); - } - /// @notice Mock ERC20 decimals function _mockDecimals(address _token, uint8 _decimals) internal { _mockAndExpect(_token, abi.encodeCall(IERC20Metadata.decimals, ()), abi.encode(_decimals)); diff --git a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol index cc61a0991f83b..a91885e17e20a 100644 --- a/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismSuperchainERC20.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.25; // Testing import { Test } from "test/setup/Test.sol"; +import { MockHelper } from "test/utils/MockHelper.sol"; import { EIP1967Helper } from "test/mocks/EIP1967Helper.sol"; // Libraries @@ -23,7 +24,7 @@ import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; /// @title OptimismSuperchainERC20_TestInit /// @notice Reusable test initialization for `OptimismSuperchainERC20` tests. -abstract contract OptimismSuperchainERC20_TestInit is Test { +abstract contract OptimismSuperchainERC20_TestInit is Test, MockHelper { address internal constant ZERO_ADDRESS = address(0); address internal constant REMOTE_TOKEN = address(0x123); string internal constant NAME = "OptimismSuperchainERC20"; @@ -89,12 +90,6 @@ abstract contract OptimismSuperchainERC20_TestInit is Test { ) ); } - - /// @notice Helper function to setup a mock and expect a call to it. - function _mockAndExpect(address _receiver, bytes memory _calldata, bytes memory _returned) internal { - vm.mockCall(_receiver, _calldata, _returned); - vm.expectCall(_receiver, _calldata); - } } /// @title OptimismSuperchainERC20_Initialize_Test diff --git a/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol b/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol index 7835c11542bd8..db1b9e77f7802 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainERC20.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.25; // Testing import { Test } from "test/setup/Test.sol"; +import { MockHelper } from "test/utils/MockHelper.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; @@ -16,7 +17,7 @@ import { MockSuperchainERC20Implementation } from "test/mocks/SuperchainERC20Imp /// @title SuperchainERC20_TestInit /// @notice Reusable test initialization for `SuperchainERC20` tests. -abstract contract SuperchainERC20_TestInit is Test { +abstract contract SuperchainERC20_TestInit is Test, MockHelper { address internal constant ZERO_ADDRESS = address(0); address internal constant SUPERCHAIN_TOKEN_BRIDGE = Predeploys.SUPERCHAIN_TOKEN_BRIDGE; address internal constant MESSENGER = Predeploys.L2_TO_L2_CROSS_DOMAIN_MESSENGER; @@ -27,12 +28,6 @@ abstract contract SuperchainERC20_TestInit is Test { function setUp() public { superchainERC20 = new MockSuperchainERC20Implementation(); } - - /// @notice Helper function to setup a mock and expect a call to it. - function _mockAndExpect(address _receiver, bytes memory _calldata, bytes memory _returned) internal { - vm.mockCall(_receiver, _calldata, _returned); - vm.expectCall(_receiver, _calldata); - } } /// @title SuperchainERC20_CrosschainMint_Test diff --git a/packages/contracts-bedrock/test/L2/SuperchainETHBridge.t.sol b/packages/contracts-bedrock/test/L2/SuperchainETHBridge.t.sol index 7d10c2d86c79f..2dd08e080d40c 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainETHBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainETHBridge.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.15; // Testing import { CommonTest } from "test/setup/CommonTest.sol"; +import { MockHelper } from "test/utils/MockHelper.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; @@ -15,7 +16,7 @@ import { IL2ToL2CrossDomainMessenger } from "interfaces/L2/IL2ToL2CrossDomainMes /// @title SuperchainETHBridge_TestInit /// @notice Reusable test initialization for `SuperchainETHBridge` tests. -abstract contract SuperchainETHBridge_TestInit is CommonTest { +abstract contract SuperchainETHBridge_TestInit is CommonTest, MockHelper { event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination); event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source); @@ -33,12 +34,6 @@ abstract contract SuperchainETHBridge_TestInit is CommonTest { vm.etch(address(ethLiquidity), vm.getDeployedCode("ETHLiquidity.sol:ETHLiquidity")); } } - - /// @notice Helper function to setup a mock and expect a call to it. - function _mockAndExpect(address _receiver, bytes memory _calldata, bytes memory _returned) internal { - vm.mockCall(_receiver, _calldata, _returned); - vm.expectCall(_receiver, _calldata); - } } /// @title SuperchainETHBridge_SendETH_Test diff --git a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol index daf0f1546eeb3..6cb59bdcd5943 100644 --- a/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol +++ b/packages/contracts-bedrock/test/L2/SuperchainTokenBridge.t.sol @@ -3,6 +3,7 @@ pragma solidity 0.8.25; // Testing import { Test } from "test/setup/Test.sol"; +import { MockHelper } from "test/utils/MockHelper.sol"; // Libraries import { Predeploys } from "src/libraries/Predeploys.sol"; @@ -18,7 +19,7 @@ import { MockSuperchainERC20Implementation } from "test/mocks/SuperchainERC20Imp /// @title SuperchainTokenBridge_TestInit /// @notice Reusable test initialization for `SuperchainTokenBridge` tests. -abstract contract SuperchainTokenBridge_TestInit is Test { +abstract contract SuperchainTokenBridge_TestInit is Test, MockHelper { address internal constant ZERO_ADDRESS = address(0); string internal constant NAME = "SuperchainERC20"; string internal constant SYMBOL = "OSE"; @@ -48,12 +49,6 @@ abstract contract SuperchainTokenBridge_TestInit is Test { // ) // ); } - - /// @notice Helper function to setup a mock and expect a call to it. - function _mockAndExpect(address _receiver, bytes memory _calldata, bytes memory _returned) internal { - vm.mockCall(_receiver, _calldata, _returned); - vm.expectCall(_receiver, _calldata); - } } /// @title SuperchainTokenBridge_SendERC20_Test diff --git a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol index 4966667c7c87c..9ee911eaad0ad 100644 --- a/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/FaultDisputeGame.t.sol @@ -62,6 +62,11 @@ function _changeClaimStatus(Claim _claim, VMStatus _status) pure returns (Claim } } +/// @notice Helper to return a pseudo-random claim. +function _dummyClaim() view returns (Claim) { + return Claim.wrap(keccak256(abi.encode(gasleft()))); +} + /// @title BaseFaultDisputeGame_TestInit /// @notice Base test initializer that can be used by other contracts outside of this test suite. abstract contract BaseFaultDisputeGame_TestInit is DisputeGameFactory_TestInit { @@ -210,11 +215,6 @@ abstract contract FaultDisputeGame_TestInit is BaseFaultDisputeGame_TestInit { bond_ = gameProxy.getRequiredBond(pos); } - /// @notice Helper to return a pseudo-random claim - function _dummyClaim() internal view returns (Claim) { - return Claim.wrap(keccak256(abi.encode(gasleft()))); - } - /// @notice Helper to get the localized key for an identifier in the context of the game proxy. function _getKey(uint256 _ident, bytes32 _localContext) internal view returns (bytes32) { bytes32 h = keccak256(abi.encode(_ident | (1 << 248), address(gameProxy), _localContext)); diff --git a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol index c6e1640cbb3f3..a6da191641645 100644 --- a/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/PermissionedDisputeGame.t.sol @@ -3,6 +3,7 @@ pragma solidity ^0.8.15; // Testing import { DisputeGameFactory_TestInit } from "test/dispute/DisputeGameFactory.t.sol"; +import { _changeClaimStatus, _dummyClaim } from "test/dispute/FaultDisputeGame.t.sol"; import { AlphabetVM } from "test/mocks/AlphabetVM.sol"; // Libraries @@ -109,11 +110,6 @@ abstract contract PermissionedDisputeGame_TestInit is DisputeGameFactory_TestIni init({ _rootClaim: rootClaim, _absolutePrestate: absolutePrestate, _l2BlockNumber: validL2BlockNumber }); } - /// @dev Helper to return a pseudo-random claim - function _dummyClaim() internal view returns (Claim) { - return Claim.wrap(keccak256(abi.encode(gasleft()))); - } - /// @dev Helper to get the required bond for the given claim index. function _getRequiredBond(uint256 _claimIndex) internal view returns (uint256 bond_) { (,,,,, Position parent,) = gameProxy.claimData(_claimIndex); @@ -121,13 +117,6 @@ abstract contract PermissionedDisputeGame_TestInit is DisputeGameFactory_TestIni bond_ = gameProxy.getRequiredBond(pos); } - /// @dev Helper to change the VM status byte of a claim. - function _changeClaimStatus(Claim _claim, VMStatus _status) internal pure returns (Claim out_) { - assembly { - out_ := or(and(not(shl(248, 0xFF)), _claim), shl(248, _status)) - } - } - fallback() external payable { } receive() external payable { } diff --git a/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol index a25ae92bf9b19..9838c2fe3098a 100644 --- a/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/SuperFaultDisputeGame.t.sol @@ -4,6 +4,7 @@ pragma solidity ^0.8.15; // Testing import { Vm } from "forge-std/Vm.sol"; import { DisputeGameFactory_TestInit } from "test/dispute/DisputeGameFactory.t.sol"; +import { _changeClaimStatus, _dummyClaim } from "test/dispute/FaultDisputeGame.t.sol"; import { AlphabetVM } from "test/mocks/AlphabetVM.sol"; import { ByteUtils } from "test/setup/ByteUtils.sol"; import { stdError } from "forge-std/StdError.sol"; @@ -255,23 +256,11 @@ abstract contract SuperFaultDisputeGame_TestInit is BaseSuperFaultDisputeGame_Te return _dummyRootClaim(uint64(validl2SequenceNumber)); } - /// @notice Helper to return a pseudo-random claim - function _dummyClaim() internal view returns (Claim) { - return Claim.wrap(keccak256(abi.encode(gasleft()))); - } - /// @notice Helper to get the localized key for an identifier in the context of the game proxy. function _getKey(uint256 _ident, bytes32 _localContext) internal view returns (bytes32) { bytes32 h = keccak256(abi.encode(_ident | (1 << 248), address(gameProxy), _localContext)); return bytes32((uint256(h) & ~uint256(0xFF << 248)) | (1 << 248)); } - - /// @notice Helper to change the VM status byte of a claim. - function _changeClaimStatus(Claim _claim, VMStatus _status) internal pure returns (Claim out_) { - assembly { - out_ := or(and(not(shl(248, 0xFF)), _claim), shl(248, _status)) - } - } } /// @title SuperFaultDisputeGame_Version_Test diff --git a/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol b/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol index f3b216e82458d..5564298d9d950 100644 --- a/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/SuperPermissionedDisputeGame.t.sol @@ -3,6 +3,7 @@ pragma solidity ^0.8.15; // Testing import { DisputeGameFactory_TestInit } from "test/dispute/DisputeGameFactory.t.sol"; +import { _changeClaimStatus, _dummyClaim } from "test/dispute/FaultDisputeGame.t.sol"; import { AlphabetVM } from "test/mocks/AlphabetVM.sol"; // Libraries @@ -122,11 +123,6 @@ abstract contract SuperPermissionedDisputeGame_TestInit is DisputeGameFactory_Te init({ _rootClaim: rootClaim, _absolutePrestate: absolutePrestate, _super: superRootProof }); } - /// @notice Helper to return a pseudo-random claim - function _dummyClaim() internal view returns (Claim) { - return Claim.wrap(keccak256(abi.encode(gasleft()))); - } - /// @notice Helper to get the required bond for the given claim index. function _getRequiredBond(uint256 _claimIndex) internal view returns (uint256 bond_) { (,,,,, Position parent,) = gameProxy.claimData(_claimIndex); @@ -134,13 +130,6 @@ abstract contract SuperPermissionedDisputeGame_TestInit is DisputeGameFactory_Te bond_ = gameProxy.getRequiredBond(pos); } - /// @notice Helper to change the VM status byte of a claim. - function _changeClaimStatus(Claim _claim, VMStatus _status) internal pure returns (Claim out_) { - assembly { - out_ := or(and(not(shl(248, 0xFF)), _claim), shl(248, _status)) - } - } - /// @notice Helper to create an arbitrary SuperRootProof function _arbitraryRootClaim() internal view returns (Types.SuperRootProof memory super_) { Types.OutputRootWithChainId[] memory outputRoots = new Types.OutputRootWithChainId[](1); diff --git a/packages/contracts-bedrock/test/utils/MockHelper.sol b/packages/contracts-bedrock/test/utils/MockHelper.sol new file mode 100644 index 0000000000000..637c68bccee9f --- /dev/null +++ b/packages/contracts-bedrock/test/utils/MockHelper.sol @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +import { Vm } from "forge-std/Vm.sol"; + +/// @title MockHelper +/// @notice Shared test helper for setting up mocks with expected calls. +abstract contract MockHelper { + Vm private constant vm = Vm(address(uint160(uint256(keccak256("hevm cheat code"))))); + + /// @notice Sets up a mock call and expects it to be called. + function _mockAndExpect(address _receiver, bytes memory _calldata, bytes memory _returned) internal { + vm.mockCall(_receiver, _calldata, _returned); + vm.expectCall(_receiver, _calldata); + } +} From 81d57ae1571d08008cbe8c773e2399e27f2a1062 Mon Sep 17 00:00:00 2001 From: Karl Floersch Date: Sun, 8 Mar 2026 13:41:12 -0400 Subject: [PATCH 074/201] fix(op-supernode): early return in VerifiedBlockAtL1 for zero l1Block (#19405) * fix(op-supernode): early return in VerifiedBlockAtL1 for zero l1Block During startup, FinalizedL1 is zero. VerifiedBlockAtL1 would iterate backwards through all timestamps calling Get() for each one, but with l1Block.Number == 0 no entry could ever match (L1Inclusion.Number <= 0), causing the virtual node driver's event loop to hang permanently and preventing all derivation. Add an early return when l1Block is the zero value. Co-Authored-By: Claude Opus 4.6 (1M context) * test(op-supernode): add unit tests for VerifiedBlockAtL1 - Zero l1Block returns empty immediately (the bug case) - Non-zero l1Block finds the correct matching entry - Empty DB returns empty Co-Authored-By: Claude Opus 4.6 (1M context) --------- Co-authored-by: Claude Opus 4.6 (1M context) --- .../supernode/activity/interop/interop.go | 6 ++ .../activity/interop/interop_test.go | 67 +++++++++++++++++++ 2 files changed, 73 insertions(+) diff --git a/op-supernode/supernode/activity/interop/interop.go b/op-supernode/supernode/activity/interop/interop.go index 490dce6a33952..218b2efe7b644 100644 --- a/op-supernode/supernode/activity/interop/interop.go +++ b/op-supernode/supernode/activity/interop/interop.go @@ -458,6 +458,12 @@ func (i *Interop) LatestVerifiedL2Block(chainID eth.ChainID) (eth.BlockID, uint6 // which guarantees that the verified data at that pauseAtTimestamp // originates from or before the supplied L1 block. func (i *Interop) VerifiedBlockAtL1(chainID eth.ChainID, l1Block eth.L1BlockRef) (eth.BlockID, uint64) { + // If L1 block is empty/zero (e.g. during startup before FinalizedL1 is set), + // no verified result can match, so return early. + if l1Block == (eth.L1BlockRef{}) { + return eth.BlockID{}, 0 + } + // Get the last verified timestamp lastTs, ok := i.verifiedDB.LastTimestamp() if !ok { diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index 8974f25bfb502..8bfc8d6351e12 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -1535,3 +1535,70 @@ func TestReset(t *testing.T) { }) } } + +// ============================================================================= +// TestVerifiedBlockAtL1 +// ============================================================================= + +func TestVerifiedBlockAtL1(t *testing.T) { + t.Run("zero l1Block returns empty immediately", func(t *testing.T) { + h := newInteropTestHarness(t). + WithChain(10, nil). + Build() + + // Commit some verified results so the DB is non-empty + for ts := uint64(100); ts <= 110; ts++ { + err := h.interop.verifiedDB.Commit(VerifiedResult{ + Timestamp: ts, + L1Inclusion: eth.BlockID{Number: ts + 1000}, + L2Heads: map[eth.ChainID]eth.BlockID{h.Mock(10).id: {Number: ts}}, + }) + require.NoError(t, err) + } + + // Call with zero L1BlockRef — should return empty without scanning the DB + blockID, ts := h.interop.VerifiedBlockAtL1(h.Mock(10).id, eth.L1BlockRef{}) + require.Equal(t, eth.BlockID{}, blockID) + require.Equal(t, uint64(0), ts) + }) + + t.Run("non-zero l1Block finds matching entry", func(t *testing.T) { + h := newInteropTestHarness(t). + WithChain(10, nil). + Build() + + chainID := h.Mock(10).id + expectedL2 := eth.BlockID{Hash: common.Hash{0xaa}, Number: 105} + + for ts := uint64(100); ts <= 110; ts++ { + l2Head := eth.BlockID{Hash: common.Hash{byte(ts)}, Number: ts} + if ts == 105 { + l2Head = expectedL2 + } + err := h.interop.verifiedDB.Commit(VerifiedResult{ + Timestamp: ts, + L1Inclusion: eth.BlockID{Number: ts * 10}, // L1 inclusion grows with timestamp + L2Heads: map[eth.ChainID]eth.BlockID{chainID: l2Head}, + }) + require.NoError(t, err) + } + + // Query for L1 block 1059 — should match timestamp 105 (L1Inclusion.Number=1050 <= 1059) + // but not timestamp 106 (L1Inclusion.Number=1060 > 1059) + l1Block := eth.L1BlockRef{Hash: common.Hash{0x01}, Number: 1059, Time: 999} + blockID, ts := h.interop.VerifiedBlockAtL1(chainID, l1Block) + require.Equal(t, expectedL2, blockID) + require.Equal(t, uint64(105), ts) + }) + + t.Run("empty DB returns empty", func(t *testing.T) { + h := newInteropTestHarness(t). + WithChain(10, nil). + Build() + + l1Block := eth.L1BlockRef{Hash: common.Hash{0x01}, Number: 1000, Time: 999} + blockID, ts := h.interop.VerifiedBlockAtL1(h.Mock(10).id, l1Block) + require.Equal(t, eth.BlockID{}, blockID) + require.Equal(t, uint64(0), ts) + }) +} From cb794fb85e59f64c622736ec18838f6d7ab12449 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Emil=20S=C3=B8rensen?= Date: Sun, 8 Mar 2026 23:38:18 +0100 Subject: [PATCH 075/201] op-e2e: Simplify output game creation by auto-deriving rootClaim (#19294) * op-e2e: Simplify output game creation by auto-deriving rootClaim * op-e2e: fix build errors in faultproofs tests * op-e2e: Simplify output game creation by auto-deriving rootClaim --- op-e2e/e2eutils/disputegame/helper.go | 62 +++++++++++++--------- op-e2e/faultproofs/multi_test.go | 4 +- op-e2e/faultproofs/output_alphabet_test.go | 18 +++---- op-e2e/faultproofs/output_cannon_test.go | 40 +++++++------- op-e2e/faultproofs/permissioned_test.go | 2 +- op-e2e/faultproofs/precompile_test.go | 2 +- op-e2e/faultproofs/preimages_test.go | 2 +- op-e2e/faultproofs/response_delay_test.go | 4 +- 8 files changed, 73 insertions(+), 61 deletions(-) diff --git a/op-e2e/e2eutils/disputegame/helper.go b/op-e2e/e2eutils/disputegame/helper.go index d5021d7314de6..868169652556f 100644 --- a/op-e2e/e2eutils/disputegame/helper.go +++ b/op-e2e/e2eutils/disputegame/helper.go @@ -56,6 +56,7 @@ type GameCfg struct { allowUnsafe bool superOutputRoots []eth.Bytes32 super eth.Super + outputRoot common.Hash } type GameOpt interface { Apply(cfg *GameCfg) @@ -92,6 +93,13 @@ func WithSuper(super eth.Super) GameOpt { }) } +// WithOutputRoot allows specifying a custom output root. +func WithOutputRoot(outputRoot common.Hash) GameOpt { + return gameOptFn(func(c *GameCfg) { + c.outputRoot = outputRoot + }) +} + type DisputeSystem interface { L1BeaconEndpoint() endpoint.RestHTTP SupervisorClient() *sources.SupervisorClient @@ -183,29 +191,31 @@ func NewGameCfg(opts ...GameOpt) *GameCfg { return cfg } -func (h *FactoryHelper) StartOutputCannonGameWithCorrectRoot(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputCannonGameHelper { - cfg := NewGameCfg(opts...) - h.WaitForBlock(l2Node, l2BlockNumber, cfg) - output, err := h.System.RollupClient(l2Node).OutputAtBlock(ctx, l2BlockNumber) - h.Require.NoErrorf(err, "Failed to get output at block %v", l2BlockNumber) - return h.StartOutputCannonGame(ctx, l2Node, l2BlockNumber, common.Hash(output.OutputRoot), opts...) -} - -func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, opts ...GameOpt) *OutputCannonGameHelper { - return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, rootClaim, cannonGameType, opts...) +func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputCannonGameHelper { + return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, cannonGameType, opts...) } -func (h *FactoryHelper) StartPermissionedGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, opts ...GameOpt) *OutputCannonGameHelper { - return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, rootClaim, permissionedGameType, opts...) +func (h *FactoryHelper) StartPermissionedGame(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputCannonGameHelper { + return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, permissionedGameType, opts...) } -func (h *FactoryHelper) startOutputCannonGameOfType(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, gameType uint32, opts ...GameOpt) *OutputCannonGameHelper { +func (h *FactoryHelper) startOutputCannonGameOfType(ctx context.Context, l2Node string, l2BlockNumber uint64, gameType uint32, opts ...GameOpt) *OutputCannonGameHelper { cfg := NewGameCfg(opts...) logger := testlog.Logger(h.T, log.LevelInfo).New("role", "OutputCannonGameHelper") rollupClient := h.System.RollupClient(l2Node) l2Client := h.System.NodeClient(l2Node) - extraData := h.CreateBisectionGameExtraData(l2Node, l2BlockNumber, cfg) + extraData := h.createBisectionGameExtraData(l2Node, l2BlockNumber, cfg) + + // If a custom output root was provided via options, use it; otherwise derive from extraData + var rootClaim common.Hash + if cfg.outputRoot != (common.Hash{}) { + rootClaim = cfg.outputRoot + } else { + output, err := rollupClient.OutputAtBlock(ctx, l2BlockNumber) + h.Require.NoErrorf(err, "Failed to get output at block %v", l2BlockNumber) + rootClaim = common.Hash(output.OutputRoot) + } ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() @@ -307,21 +317,23 @@ func (h *FactoryHelper) GetL1Head(ctx context.Context, game contracts.FaultDispu return l1Head } -func (h *FactoryHelper) StartOutputAlphabetGameWithCorrectRoot(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputAlphabetGameHelper { - cfg := NewGameCfg(opts...) - h.WaitForBlock(l2Node, l2BlockNumber, cfg) - output, err := h.System.RollupClient(l2Node).OutputAtBlock(ctx, l2BlockNumber) - h.Require.NoErrorf(err, "Failed to get output at block %v", l2BlockNumber) - return h.StartOutputAlphabetGame(ctx, l2Node, l2BlockNumber, common.Hash(output.OutputRoot)) -} - -func (h *FactoryHelper) StartOutputAlphabetGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, opts ...GameOpt) *OutputAlphabetGameHelper { +func (h *FactoryHelper) StartOutputAlphabetGame(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputAlphabetGameHelper { cfg := NewGameCfg(opts...) logger := testlog.Logger(h.T, log.LevelInfo).New("role", "OutputAlphabetGameHelper") rollupClient := h.System.RollupClient(l2Node) l2Client := h.System.NodeClient(l2Node) - extraData := h.CreateBisectionGameExtraData(l2Node, l2BlockNumber, cfg) + extraData := h.createBisectionGameExtraData(l2Node, l2BlockNumber, cfg) + + // If a custom output root was provided via options, use it; otherwise derive from extraData + var rootClaim common.Hash + if cfg.outputRoot != (common.Hash{}) { + rootClaim = cfg.outputRoot + } else { + output, err := rollupClient.OutputAtBlock(ctx, l2BlockNumber) + h.Require.NoErrorf(err, "Failed to get output at block %v", l2BlockNumber) + rootClaim = common.Hash(output.OutputRoot) + } ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() @@ -352,7 +364,7 @@ func (h *FactoryHelper) StartOutputAlphabetGame(ctx context.Context, l2Node stri } } -func (h *FactoryHelper) CreateBisectionGameExtraData(l2Node string, l2BlockNumber uint64, cfg *GameCfg) []byte { +func (h *FactoryHelper) createBisectionGameExtraData(l2Node string, l2BlockNumber uint64, cfg *GameCfg) []byte { h.WaitForBlock(l2Node, l2BlockNumber, cfg) h.T.Logf("Creating game with l2 block number: %v", l2BlockNumber) extraData := make([]byte, 32) diff --git a/op-e2e/faultproofs/multi_test.go b/op-e2e/faultproofs/multi_test.go index e8e87268ea52a..cde98203fdbce 100644 --- a/op-e2e/faultproofs/multi_test.go +++ b/op-e2e/faultproofs/multi_test.go @@ -20,8 +20,8 @@ func TestMultipleGameTypes(t *testing.T) { gameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game1 := gameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) - game2 := gameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xbb}) + game1 := gameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) + game2 := gameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xbb})) latestClaim1 := game1.DisputeLastBlock(ctx) latestClaim2 := game2.DisputeLastBlock(ctx) diff --git a/op-e2e/faultproofs/output_alphabet_test.go b/op-e2e/faultproofs/output_alphabet_test.go index 135ab233a8815..ba246f41b87a4 100644 --- a/op-e2e/faultproofs/output_alphabet_test.go +++ b/op-e2e/faultproofs/output_alphabet_test.go @@ -24,7 +24,7 @@ func TestOutputAlphabetGame_ChallengerWins(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, common.Hash{0xff}) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{0xff})) correctTrace := game.CreateHonestActor(ctx, "sequencer") game.LogGameData(ctx) @@ -81,7 +81,7 @@ func TestOutputAlphabetGame_ReclaimBond(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, common.Hash{0xff}) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{0xff})) game.LogGameData(ctx) // The dispute game should have a zero balance @@ -151,7 +151,7 @@ func TestOutputAlphabetGame_ValidOutputRoot(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGameWithCorrectRoot(ctx, "sequencer", 2) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 2) correctTrace := game.CreateHonestActor(ctx, "sequencer") game.LogGameData(ctx) claim := game.DisputeLastBlock(ctx) @@ -190,9 +190,9 @@ func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) { disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) var game *disputegame.OutputAlphabetGameHelper if isRootCorrect { - game = disputeGameFactory.StartOutputAlphabetGameWithCorrectRoot(ctx, "sequencer", 1) + game = disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1) } else { - game = disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa, 0xbb, 0xcc}) + game = disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa, 0xbb, 0xcc})) } claim := game.DisputeLastBlock(ctx) @@ -256,7 +256,7 @@ func TestOutputAlphabetGame_FreeloaderEarnsNothing(t *testing.T) { require.Nil(t, err) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGameWithCorrectRoot(ctx, "sequencer", 2) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 2) correctTrace := game.CreateHonestActor(ctx, "sequencer") game.LogGameData(ctx) claim := game.DisputeLastBlock(ctx) @@ -319,14 +319,14 @@ func TestHighestActedL1BlockMetric(t *testing.T) { disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) honestChallenger := disputeGameFactory.StartChallenger(ctx, "Honest", challenger.WithAlphabet(), challenger.WithPrivKey(sys.Cfg.Secrets.Alice)) - game1 := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa}) + game1 := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa})) sys.AdvanceTime(game1.MaxClockDuration(ctx)) require.NoError(t, wait.ForNextBlock(ctx, l1Client)) game1.WaitForGameStatus(ctx, types.GameStatusDefenderWon) - disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 2, common.Hash{0xaa}) - disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, common.Hash{0xaa}) + disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 2, disputegame.WithOutputRoot(common.Hash{0xaa})) + disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{0xaa})) honestChallenger.WaitL1HeadActedOn(ctx, l1Client) diff --git a/op-e2e/faultproofs/output_cannon_test.go b/op-e2e/faultproofs/output_cannon_test.go index 9e758590b5e6a..5685d48a325d7 100644 --- a/op-e2e/faultproofs/output_cannon_test.go +++ b/op-e2e/faultproofs/output_cannon_test.go @@ -30,7 +30,7 @@ func testOutputCannonGame(t *testing.T, allocType config.AllocType) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 4, common.Hash{0x01}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 4, disputegame.WithOutputRoot(common.Hash{0x01})) arena := createOutputGameArena(t, sys, game) testCannonGame(t, ctx, arena, &game.SplitGameHelper) } @@ -46,7 +46,7 @@ func testOutputCannonChallengeAllZeroClaim(t *testing.T, allocType config.AllocT t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 3, common.Hash{}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{})) arena := createOutputGameArena(t, sys, game) testCannonChallengeAllZeroClaim(t, ctx, arena, &game.SplitGameHelper) } @@ -68,7 +68,7 @@ func TestOutputCannon_PublishCannonRootClaim(t *testing.T) { sys, _ := StartFaultDisputeSystem(t, WithAllocType(allocType)) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", test.disputeL2BlockNumber, common.Hash{0x01}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", test.disputeL2BlockNumber, disputegame.WithOutputRoot(common.Hash{0x01})) game.DisputeLastBlock(ctx) game.LogGameData(ctx) @@ -99,7 +99,7 @@ func TestOutputCannonDisputeGame(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) require.NotNil(t, game) game.LogGameData(ctx) @@ -137,7 +137,7 @@ func testOutputCannonDefendStep(t *testing.T, allocType config.AllocType) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) arena := createOutputGameArena(t, sys, game) testCannonDefendStep(t, ctx, arena, &game.SplitGameHelper) } @@ -163,7 +163,7 @@ func testOutputCannonStepWithLargePreimage(t *testing.T, allocType config.AllocT l2BlockNumber := safeHead.NumberU64() disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Dispute any block - it will have to read the L1 batches to see if the block is reached - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", l2BlockNumber, common.Hash{0x01, 0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", l2BlockNumber, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) require.NotNil(t, game) outputRootClaim := game.DisputeBlock(ctx, l2BlockNumber) game.LogGameData(ctx) @@ -250,7 +250,7 @@ func testPreimageStep(t *testing.T, allocType config.AllocType, preimageOptConfi t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) require.NotNil(t, game) outputRootClaim := game.DisputeLastBlock(ctx) game.LogGameData(ctx) @@ -299,7 +299,7 @@ func testOutputCannonStepWithKzgPointEvaluation(t *testing.T, allocType config.A t.Logf("KZG Point Evaluation block number: %d", precompileBlock) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", bigs.Uint64Strict(precompileBlock), common.Hash{0x01, 0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", bigs.Uint64Strict(precompileBlock), disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) require.NotNil(t, game) outputRootClaim := game.DisputeLastBlock(ctx) game.LogGameData(ctx) @@ -337,7 +337,7 @@ func testOutputCannonProposedOutputRootValid_AttackWithCorrectTrace(t *testing.T t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", 1) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1) arena := createOutputGameArena(t, sys, game) testCannonProposalValid_AttackWithCorrectTrace(t, ctx, arena, &game.SplitGameHelper) } @@ -352,7 +352,7 @@ func testOutputCannonProposedOutputRootValid_DefendWithCorrectTrace(t *testing.T t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", 1) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1) arena := createOutputGameArena(t, sys, game) testCannonProposalValid_DefendWithCorrectTrace(t, ctx, arena, &game.SplitGameHelper) } @@ -368,7 +368,7 @@ func testOutputCannonPoisonedPostState(t *testing.T, allocType config.AllocType) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is dishonest - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa})) arena := createOutputGameArena(t, sys, game) testCannonPoisonedPostState(t, ctx, arena, &game.SplitGameHelper) } @@ -384,7 +384,7 @@ func testDisputeOutputRootBeyondProposedBlockValidOutputRoot(t *testing.T, alloc disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is dishonest - game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", 1) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1) arena := createOutputGameArena(t, sys, game) testDisputeRootBeyondProposedBlockValidOutputRoot(t, ctx, arena, &game.SplitGameHelper) } @@ -400,7 +400,7 @@ func testDisputeOutputRootBeyondProposedBlockInvalidOutputRoot(t *testing.T, all disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is dishonest - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa})) arena := createOutputGameArena(t, sys, game) testDisputeRootBeyondProposedBlockInvalidOutputRoot(t, ctx, arena, &game.SplitGameHelper) } @@ -416,7 +416,7 @@ func testTestDisputeOutputRootChangeClaimedOutputRoot(t *testing.T, allocType co disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is dishonest - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa})) arena := createOutputGameArena(t, sys, game) testDisputeRootChangeClaimedRoot(t, ctx, arena, &game.SplitGameHelper) } @@ -460,7 +460,7 @@ func TestInvalidateUnsafeProposal(t *testing.T) { blockNum := uint64(1) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is _dishonest_ because the required data is not available on L1 - game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", blockNum, disputegame.WithUnsafeProposal()) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", blockNum, disputegame.WithUnsafeProposal()) correctTrace := game.CreateHonestActor(ctx, "sequencer", disputegame.WithPrivKey(sys.Cfg.Secrets.Alice)) @@ -521,7 +521,7 @@ func TestInvalidateProposalForFutureBlock(t *testing.T) { farFutureBlockNum := uint64(10_000_000) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is _dishonest_ because the required data is not available on L1 - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", farFutureBlockNum, common.Hash{0xaa}, disputegame.WithFutureProposal()) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", farFutureBlockNum, disputegame.WithOutputRoot(common.Hash{0xaa}), disputegame.WithFutureProposal()) correctTrace := game.CreateHonestActor(ctx, "sequencer", disputegame.WithPrivKey(sys.Cfg.Secrets.Alice)) @@ -562,7 +562,7 @@ func testInvalidateCorrectProposalFutureBlock(t *testing.T, allocType config.All require.NoError(t, err, "Failed to get output at safe head") // Create a dispute game with an output root that is valid at `safeHead`, but that claims to correspond to block // `safeHead.Number + 10000`. This is dishonest, because this block does not exist yet. - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 10_000, common.Hash(output.OutputRoot), disputegame.WithFutureProposal()) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 10_000, disputegame.WithOutputRoot(common.Hash(output.OutputRoot)), disputegame.WithFutureProposal()) // Start the honest challenger. game.StartChallenger(ctx, "Honest", challenger.WithPrivKey(sys.Cfg.Secrets.Bob)) @@ -595,7 +595,7 @@ func testOutputCannonHonestSafeTraceExtensionValidRoot(t *testing.T, allocType c // Create a dispute game with an honest claim disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", safeHeadNum-1) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", safeHeadNum-1) require.NotNil(t, game) // Create a correct trace actor with an honest trace extending to L2 block #4 @@ -649,7 +649,7 @@ func testOutputCannonHonestSafeTraceExtensionInvalidRoot(t *testing.T, allocType // Create a dispute game with a dishonest claim @ L2 block #4 disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", safeHeadNum-1, common.Hash{0xCA, 0xFE}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", safeHeadNum-1, disputegame.WithOutputRoot(common.Hash{0xCA, 0xFE})) require.NotNil(t, game) // Create a correct trace actor with an honest trace extending to L2 block #5 @@ -699,7 +699,7 @@ func testAgreeFirstBlockWithOriginOf1(t *testing.T, allocType config.AllocType) // Create a dispute game with a dishonest claim @ L2 block #4 disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Make the agreed block the first one with L1 origin of block 1 so the claim is blockNum+1 - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", blockNum+1, common.Hash{0xCA, 0xFE}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", blockNum+1, disputegame.WithOutputRoot(common.Hash{0xCA, 0xFE})) require.NotNil(t, game) outputRootClaim := game.DisputeLastBlock(ctx) game.LogGameData(ctx) diff --git a/op-e2e/faultproofs/permissioned_test.go b/op-e2e/faultproofs/permissioned_test.go index f6b0920b107b5..f7193a81788f2 100644 --- a/op-e2e/faultproofs/permissioned_test.go +++ b/op-e2e/faultproofs/permissioned_test.go @@ -20,7 +20,7 @@ func TestPermissionedGameType(t *testing.T) { gameFactory := disputegame.NewFactoryHelper(t, ctx, sys, disputegame.WithFactoryPrivKey(sys.Cfg.Secrets.Proposer)) - game := gameFactory.StartPermissionedGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) + game := gameFactory.StartPermissionedGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) // Start a challenger with both cannon and alphabet support gameFactory.StartChallenger(ctx, "TowerDefense", diff --git a/op-e2e/faultproofs/precompile_test.go b/op-e2e/faultproofs/precompile_test.go index 226716be1c37c..f7570107c2eaf 100644 --- a/op-e2e/faultproofs/precompile_test.go +++ b/op-e2e/faultproofs/precompile_test.go @@ -117,7 +117,7 @@ func TestDisputePrecompile(t *testing.T) { }) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", bigs.Uint64Strict(receipt.BlockNumber), common.Hash{0x01, 0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", bigs.Uint64Strict(receipt.BlockNumber), disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) require.NotNil(t, game) outputRootClaim := game.DisputeLastBlock(ctx) game.LogGameData(ctx) diff --git a/op-e2e/faultproofs/preimages_test.go b/op-e2e/faultproofs/preimages_test.go index aef8671f0613e..01a0674f63db8 100644 --- a/op-e2e/faultproofs/preimages_test.go +++ b/op-e2e/faultproofs/preimages_test.go @@ -38,7 +38,7 @@ func TestLocalPreimages(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 3, common.Hash{0x01, 0xaa}) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) require.NotNil(t, game) claim := game.DisputeLastBlock(ctx) diff --git a/op-e2e/faultproofs/response_delay_test.go b/op-e2e/faultproofs/response_delay_test.go index 2e809345aca33..69eabade65d5a 100644 --- a/op-e2e/faultproofs/response_delay_test.go +++ b/op-e2e/faultproofs/response_delay_test.go @@ -49,7 +49,7 @@ func TestChallengerResponseDelay(t *testing.T) { // Create a dispute game with incorrect root to trigger challenger response disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa, 0xbb, 0xcc}) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa, 0xbb, 0xcc})) // Make an invalid claim that the honest challenger should counter invalidClaim := game.RootClaim(ctx) @@ -95,7 +95,7 @@ func TestChallengerResponseDelayWithMultipleActions(t *testing.T) { responseDelay := 2 * time.Second disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa, 0xbb, 0xcc}) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa, 0xbb, 0xcc})) // Start challenger with response delay game.StartChallenger(ctx, "sequencer", "DelayedChallenger", From a4bfa2695cf32a7436973849c5d6052a8fe19466 Mon Sep 17 00:00:00 2001 From: Chiin <77933451+0xChin@users.noreply.github.com> Date: Sun, 8 Mar 2026 19:50:29 -0300 Subject: [PATCH 076/201] feat: register zk proofs feature flag (#19388) * feat: register zk proofs feature flag * chore: remove interface * chore: add zk dev feature to op deployer * chore: undo foundry.toml modification * feat(ci): add zk proofs flag to ci tests matrix * chore: rename zk proofs flag * chore: rename comments and run just pr * chore: remove single zk dispute game flag from CI * chore: add todo referencing issue --- .circleci/continue/main.yml | 3 +-- op-deployer/pkg/deployer/devfeatures.go | 4 ++++ packages/contracts-bedrock/scripts/libraries/Config.sol | 5 +++++ packages/contracts-bedrock/src/libraries/DevFeatures.sol | 4 ++++ .../test/dispute/zk/OptimisticZkGame.t.sol | 2 ++ packages/contracts-bedrock/test/setup/FeatureFlags.sol | 6 ++++++ 6 files changed, 22 insertions(+), 2 deletions(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index d9165de4309a3..4b7744644ed0e 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -2362,7 +2362,6 @@ jobs: - "op-program/bin/meta*" - "rust/kona/prestate-artifacts-*/" - # Aggregator job - allows downstream jobs to depend on a single job instead of listing all build jobs. rust-binaries-for-sysgo: docker: @@ -2373,7 +2372,6 @@ jobs: name: All Rust binaries ready command: echo "All Rust binaries built and persisted to workspace" - # ============================================================================ publish-cannon-prestates: @@ -2849,6 +2847,7 @@ workflows: - OPCM_V2 - OPCM_V2,CUSTOM_GAS_TOKEN - OPCM_V2,OPTIMISM_PORTAL_INTEROP + - OPCM_V2,ZK_DISPUTE_GAME context: - circleci-repo-readonly-authenticated-github-token - slack diff --git a/op-deployer/pkg/deployer/devfeatures.go b/op-deployer/pkg/deployer/devfeatures.go index 68d867366fac4..f86d478e9c3cd 100644 --- a/op-deployer/pkg/deployer/devfeatures.go +++ b/op-deployer/pkg/deployer/devfeatures.go @@ -23,6 +23,10 @@ var ( // L2CMDevFlag enables L2CM. L2CMDevFlag = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000100000") + + // ZKDisputeGameDevFlag enables the ZK dispute game system (ZKDisputeGame). + // TODO(#19432): Use this flag in the OPCM/OPD integration pipeline. + ZKDisputeGameDevFlag = common.HexToHash("0x0000000000000000000000000000000000000000000000000000000001000000") ) // IsDevFeatureEnabled checks if a specific development feature is enabled in a feature bitmap. diff --git a/packages/contracts-bedrock/scripts/libraries/Config.sol b/packages/contracts-bedrock/scripts/libraries/Config.sol index f38a1e153a7a7..52e6ce05734c7 100644 --- a/packages/contracts-bedrock/scripts/libraries/Config.sol +++ b/packages/contracts-bedrock/scripts/libraries/Config.sol @@ -303,6 +303,11 @@ library Config { return vm.envOr("DEV_FEATURE__L2CM", false); } + /// @notice Returns true if the development feature ZK_DISPUTE_GAME is enabled. + function devFeatureZkDisputeGame() internal view returns (bool) { + return vm.envOr("DEV_FEATURE__ZK_DISPUTE_GAME", false); + } + /// @notice Returns true if the system feature custom_gas_token is enabled. function sysFeatureCustomGasToken() internal view returns (bool) { return vm.envOr("SYS_FEATURE__CUSTOM_GAS_TOKEN", false); diff --git a/packages/contracts-bedrock/src/libraries/DevFeatures.sol b/packages/contracts-bedrock/src/libraries/DevFeatures.sol index 4fa5287c1b578..f14c47048b249 100644 --- a/packages/contracts-bedrock/src/libraries/DevFeatures.sol +++ b/packages/contracts-bedrock/src/libraries/DevFeatures.sol @@ -31,6 +31,10 @@ library DevFeatures { /// @notice The feature that enables L2CM. bytes32 public constant L2CM = bytes32(0x0000000000000000000000000000000000000000000000000000000000100000); + /// @notice The feature that enables the ZK dispute game system (ZKDisputeGame). + bytes32 public constant ZK_DISPUTE_GAME = + bytes32(0x0000000000000000000000000000000000000000000000000000000001000000); + /// @notice Checks if a feature is enabled in a bitmap. Note that this function does not check /// that the input feature represents a single feature and the bitwise AND operation /// allows for multiple features to be enabled at once. Users should generally check diff --git a/packages/contracts-bedrock/test/dispute/zk/OptimisticZkGame.t.sol b/packages/contracts-bedrock/test/dispute/zk/OptimisticZkGame.t.sol index c3df8a1838d96..fc1ffb5335ca5 100644 --- a/packages/contracts-bedrock/test/dispute/zk/OptimisticZkGame.t.sol +++ b/packages/contracts-bedrock/test/dispute/zk/OptimisticZkGame.t.sol @@ -5,6 +5,7 @@ pragma solidity 0.8.15; import { DisputeGameFactory_TestInit } from "test/dispute/DisputeGameFactory.t.sol"; // Libraries +import { DevFeatures } from "src/libraries/DevFeatures.sol"; import { Claim, Duration, GameStatus, GameType, Timestamp } from "src/dispute/lib/Types.sol"; import { BadAuth, @@ -77,6 +78,7 @@ abstract contract OptimisticZkGame_TestInit is DisputeGameFactory_TestInit { function setUp() public virtual override { super.setUp(); + skipIfDevFeatureDisabled(DevFeatures.ZK_DISPUTE_GAME); skipIfForkTest("Skip not supported yet"); // Get anchor state to calculate valid sequence numbers diff --git a/packages/contracts-bedrock/test/setup/FeatureFlags.sol b/packages/contracts-bedrock/test/setup/FeatureFlags.sol index b9c22cae02229..c28957556112e 100644 --- a/packages/contracts-bedrock/test/setup/FeatureFlags.sol +++ b/packages/contracts-bedrock/test/setup/FeatureFlags.sol @@ -49,6 +49,10 @@ abstract contract FeatureFlags { console.log("Setup: DEV_FEATURE__L2CM is enabled"); devFeatureBitmap |= DevFeatures.L2CM; } + if (Config.devFeatureZkDisputeGame()) { + console.log("Setup: DEV_FEATURE__ZK_DISPUTE_GAME is enabled"); + devFeatureBitmap |= DevFeatures.ZK_DISPUTE_GAME; + } } /// @notice Returns the string name of a feature. @@ -61,6 +65,8 @@ abstract contract FeatureFlags { return "DEV_FEATURE__OPCM_V2"; } else if (_feature == DevFeatures.L2CM) { return "DEV_FEATURE__L2CM"; + } else if (_feature == DevFeatures.ZK_DISPUTE_GAME) { + return "DEV_FEATURE__ZK_DISPUTE_GAME"; } else if (_feature == Features.CUSTOM_GAS_TOKEN) { return "SYS_FEATURE__CUSTOM_GAS_TOKEN"; } else if (_feature == Features.ETH_LOCKBOX) { From 470ae6ef2a6584f1d152ddff949b5dca19b61700 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Mon, 9 Mar 2026 12:04:08 -0400 Subject: [PATCH 077/201] chore(ci): add op to chain-specific upgrade test matrix, remove base (#19448) Add "op" to the fork_op_chain matrix for chain-specific upgrade tests (both PR and develop jobs). Remove "base" variants from upgrade and sync test matrices as Base's on-chain state has diverged from the standard OP Stack upgrade path. Co-authored-by: smartcontracts Co-authored-by: Claude Opus 4.6 --- .circleci/continue/main.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 4b7744644ed0e..6defd75af9b28 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -2938,7 +2938,7 @@ workflows: test_profile: liteci matrix: parameters: - fork_op_chain: ["base", "ink", "unichain"] + fork_op_chain: ["op", "ink", "unichain"] context: - circleci-repo-readonly-authenticated-github-token - slack @@ -2954,7 +2954,7 @@ workflows: test_profile: ci matrix: parameters: - fork_op_chain: ["base", "ink", "unichain"] + fork_op_chain: ["op", "ink", "unichain"] context: - circleci-repo-readonly-authenticated-github-token - slack @@ -3550,10 +3550,8 @@ workflows: network_preset: [ "op-sepolia", - "base-sepolia", "unichain-sepolia", "op-mainnet", - "base-mainnet", ] l2_cl_syncmode: ["consensus-layer", "execution-layer"] From c694b048b31396c3b5e64d0a4fc19cf1204e9689 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 10 Mar 2026 03:15:22 +1000 Subject: [PATCH 078/201] feat(op-program): add vm-compat-triage skill (#19444) * feat(op-program): add vm-compat-triage skill for Claude Code Add a skill that guides triage of analyze-op-program-client CI failures. The skill walks through each new vm-compat finding interactively, letting the engineer mark call stacks as unreachable or acceptable, then regenerates the baseline from scratch. Key features: - Prefers running vm-compat locally (CI logs truncate large JSON output) - Shows full call stacks with source file paths from the PR branch - Cascades unreachable decisions to auto-resolve related findings - Regenerates baseline from scratch (not merge) to remove stale entries - Includes MIPS64 syscall reference table Co-Authored-By: Claude Opus 4.6 * fix(skill): remove truncated CI log output as a data source CircleCI truncates large log output, silently dropping findings from the beginning of the JSON array. Triage based on incomplete data gives false confidence. Only CI artifacts and local execution are supported. Co-Authored-By: Claude Opus 4.6 * fix(skill): remove triage-report generation step The triage report was an intermediate artifact that didn't add value. Decisions are captured in the triage state file and the baseline itself. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .claude/skills/vm-compat-triage/SKILL.md | 318 +++++++++++++++++++++++ 1 file changed, 318 insertions(+) create mode 100644 .claude/skills/vm-compat-triage/SKILL.md diff --git a/.claude/skills/vm-compat-triage/SKILL.md b/.claude/skills/vm-compat-triage/SKILL.md new file mode 100644 index 0000000000000..2a15f5de084b3 --- /dev/null +++ b/.claude/skills/vm-compat-triage/SKILL.md @@ -0,0 +1,318 @@ +# vm-compat-triage + +Triage `analyze-op-program-client` CI failures by reviewing new syscall/opcode findings, determining reachability, and updating the baseline. + +## When to Use + +Use when the `analyze-op-program-client` CI job fails on a PR. The job runs `vm-compat` to detect syscalls and opcodes in `op-program` that are not supported by Cannon. New findings need human triage to determine if they are reachable at runtime. + +### Trigger Phrases + +- "Triage the vm-compat failure" +- "analyze-op-program-client failed" +- "vm-compat CI failure" +- "op-program compatibility test failed" + +## Prerequisites + +- `gh` CLI authenticated with GitHub +- `jq` available +- `vm-compat` binary (install: `mise use -g ubi:ChainSafe/vm-compat@1.1.0`, or download from GitHub releases — binary name is `analyzer-linux-arm64` / `analyzer-linux-amd64`) +- `llvm-objdump` — **Linux only** (install: `sudo apt-get install -y llvm`). Not available on macOS. On macOS, use `make run-vm-compat` in the `op-program` directory which runs the analysis inside Docker. +- The PR URL or number (ask the user if not provided) + +## MIPS64 Syscall Reference + +Syscall numbers in findings are Linux MIPS64 ABI (start at 5000). Use this lookup: + +| Number | Name | Number | Name | Number | Name | +|--------|------|--------|------|--------|------| +| 5000 | read | 5001 | write | 5002 | open | +| 5003 | close | 5004 | stat | 5005 | fstat | +| 5006 | lstat | 5007 | poll | 5008 | lseek | +| 5009 | mmap | 5010 | mprotect | 5011 | munmap | +| 5012 | brk | 5013 | ioctl | 5014 | pread64 | +| 5015 | pwrite64 | 5016 | readv | 5017 | writev | +| 5018 | access | 5019 | pipe | 5020 | select | +| 5021 | sched_yield | 5022 | mremap | 5023 | msync | +| 5024 | mincore | 5025 | madvise | 5026 | shmget | +| 5027 | shmat | 5028 | shmctl | 5029 | dup | +| 5030 | dup2 | 5031 | pause | 5032 | nanosleep | +| 5033 | getitimer | 5034 | setitimer | 5035 | alarm | +| 5036 | getpid | 5037 | sendfile | 5038 | socket | +| 5039 | connect | 5040 | accept | 5041 | sendto | +| 5042 | recvfrom | 5043 | sendmsg | 5044 | recvmsg | +| 5045 | shutdown | 5046 | bind | 5047 | listen | +| 5048 | getsockname | 5049 | getpeername | 5050 | socketpair | +| 5051 | setsockopt | 5052 | getsockopt | 5053 | clone | +| 5054 | fork | 5055 | execve | 5056 | exit | +| 5057 | wait4 | 5058 | kill | 5059 | uname | +| 5060 | semget | 5061 | semop | 5062 | semctl | +| 5063 | shmdt | 5064 | msgget | 5065 | msgsnd | +| 5066 | msgrcv | 5067 | msgctl | 5068 | fcntl | +| 5069 | flock | 5070 | fsync | 5071 | fdatasync | +| 5072 | truncate | 5073 | ftruncate | 5074 | getdents | +| 5075 | getcwd | 5076 | chdir | 5077 | fchdir | +| 5078 | rename | 5079 | mkdir | 5080 | rmdir | +| 5081 | creat | 5082 | link | 5083 | unlink | +| 5084 | symlink | 5085 | readlink | 5086 | chmod | +| 5087 | fchmod | 5088 | chown | 5089 | fchown | +| 5090 | lchown | 5091 | umask | 5092 | gettimeofday | +| 5093 | getrlimit | 5094 | getrusage | 5095 | sysinfo | +| 5096 | times | 5097 | ptrace | 5098 | getuid | +| 5099 | syslog | 5100 | getgid | 5101 | setuid | +| 5102 | setgid | 5103 | geteuid | 5104 | getegid | +| 5105 | setpgid | 5106 | getppid | 5107 | getpgrp | +| 5108 | setsid | 5109 | setreuid | 5110 | setregid | +| 5111 | getgroups | 5112 | setgroups | 5113 | setresuid | +| 5114 | getresuid | 5115 | setresgid | 5116 | getresgid | +| 5117 | getpgid | 5118 | setfsuid | 5119 | setfsgid | +| 5120 | getsid | 5121 | capget | 5122 | capset | +| 5129 | rt_sigqueueinfo | 5130 | rt_sigsuspend | 5131 | sigaltstack | +| 5132 | utime | 5133 | mknod | 5134 | personality | +| 5135 | ustat | 5136 | statfs | 5137 | fstatfs | +| 5138 | sysfs | 5139 | getpriority | 5140 | setpriority | +| 5141 | sched_setparam | 5142 | sched_getparam | 5143 | sched_setscheduler | +| 5144 | sched_getscheduler | 5145 | sched_get_priority_max | 5146 | sched_get_priority_min | +| 5147 | sched_rr_get_interval | 5148 | mlock | 5149 | munlock | +| 5150 | mlockall | 5151 | munlockall | 5152 | vhangup | +| 5153 | pivot_root | 5154 | _sysctl | 5155 | prctl | +| 5190 | semtimedop | 5196 | fadvise64 | 5205 | epoll_create | +| 5206 | epoll_ctl | 5207 | epoll_wait | 5208 | remap_file_pages | +| 5209 | rt_sigreturn | 5210 | set_tid_address | 5211 | restart_syscall | +| 5215 | clock_gettime | 5216 | clock_getres | 5217 | clock_nanosleep | +| 5220 | exit_group | 5223 | tgkill | 5225 | openat | +| 5247 | waitid | 5248 | set_robust_list | 5249 | get_robust_list | +| 5253 | unlinkat | 5254 | renameat | 5257 | fchmodat | +| 5261 | futimesat | 5272 | utimensat | 5279 | epoll_create1 | +| 5284 | preadv | 5285 | pwritev | 5288 | prlimit64 | +| 5297 | getrandom | 5308 | mlock2 | 5316 | copy_file_range | +| 5317 | preadv2 | 5318 | pwritev2 | | | + +For syscall numbers not in this table, look up the number at the MIPS64 syscall table in the Linux kernel source (`arch/mips/kernel/syscalls/syscall_n64.tbl`), or search the web for "linux mips64 syscall {number}". + +## Workflow + +### Step 1: Get the PR and failure data + +If the user hasn't provided a PR URL, ask for it. + +Extract the PR number and fetch the failing check run. The `analyze-op-program-client` job runs inside the CircleCI `main` workflow, not as a GitHub check run directly. Look it up via CircleCI API: + +```bash +PR_NUM="" +BRANCH=$(gh pr view "$PR_NUM" --repo ethereum-optimism/optimism --json headRefName -q '.headRefName') + +# Get the latest pipeline for this branch +PIPELINE_ID=$(curl -s "https://circleci.com/api/v2/project/gh/ethereum-optimism/optimism/pipeline?branch=$BRANCH" | \ + python3 -c "import json,sys; print(json.load(sys.stdin)['items'][0]['id'])") + +# Find the main workflow +WORKFLOW_ID=$(curl -s "https://circleci.com/api/v2/pipeline/$PIPELINE_ID/workflow" | \ + python3 -c "import json,sys; [print(w['id']) for w in json.load(sys.stdin)['items'] if w['name']=='main']") + +# Find the failed job +JOB_NUMBER=$(curl -s "https://circleci.com/api/v2/workflow/$WORKFLOW_ID/job" | \ + python3 -c "import json,sys; [print(j['job_number']) for j in json.load(sys.stdin)['items'] if j['name']=='analyze-op-program-client']") +``` + +### Step 2: Get the findings + +Try these methods in order. Use the first one that works. + +**1. CI artifact (preferred when available).** Check if the CI job stored the findings JSON as an artifact. This is the fastest path — no local tooling needed. + +```bash +# Get artifacts for the failed job +ARTIFACTS=$(curl -s "https://circleci.com/api/v2/project/gh/ethereum-optimism/optimism/$JOB_NUMBER/artifacts") + +# Look for the findings JSON artifact +ARTIFACT_URL=$(echo "$ARTIFACTS" | python3 -c " +import json, sys +data = json.load(sys.stdin) +for item in data.get('items', []): + if 'findings' in item.get('path', '') and item['path'].endswith('.json'): + print(item['url']) + break +") + +if [ -n "$ARTIFACT_URL" ]; then + curl -sL "$ARTIFACT_URL" -o /tmp/vm-compat-full-findings.json +fi +``` + +If the artifact exists and contains findings, use it. No auth is needed for public repos. + +**2. Run locally (if no artifact available).** + +**On Linux** (requires `vm-compat` and `llvm-objdump` in PATH): +```bash +# Checkout the PR branch in a worktree, then from the op-program directory: +vm-compat analyze \ + --with-trace=true \ + --skip-warnings=false \ + --format=json \ + --vm-profile-config vm-profiles/cannon-multithreaded-64.yaml \ + --baseline-report compatibility-test/baseline-cannon-multithreaded-64.json \ + --report-output-path /tmp/vm-compat-full-findings.json \ + ./client/cmd/main.go +``` + +**On macOS** (`llvm-objdump` is not available natively — use Docker): +```bash +# From the op-program directory in the PR branch worktree: +make run-vm-compat +``` +This builds and runs the analysis inside Docker. The findings JSON will be in the Docker build output (and as a CI artifact if the artifact capture PR is merged). + +**Do NOT use CI log output.** CircleCI truncates large log output, which silently drops findings from the beginning of the JSON array. Triage based on incomplete data is worse than useless — it gives false confidence. If neither CI artifacts nor local execution are available, stop and tell the user to set up one of those options. + +### Step 3: Load and parse findings + +Parse the JSON findings. Each finding has: +- `callStack`: Nested object with `function` (and optionally `file`, `line`, `absPath`) and `callStack` fields forming the call chain. The outermost level is the leaf (syscall), innermost is `main.main`. +- `message`: e.g. "Potential Incompatible Syscall Detected: 5043" +- `severity`: "CRITICAL" or "WARNING" +- `hash`: Unique identifier + +### Step 4: Load the baseline and compare + +Read the baseline file (`op-program/compatibility-test/baseline-cannon-multithreaded-64.json`). Also check if the failure is for the `-next` variant. + +Flatten each callStack into an ordered list of function names (ignoring `line`, `file`, `absPath`). A finding **matches** a baseline entry if the function name sequence is identical. + +Mark matched findings as **existing/accepted**. + +### Step 5: Present each new finding to the user + +Present findings one at a time. Do NOT group or summarize multiple findings — let the user make individual decisions. When the user marks a function as unreachable, auto-resolve other findings that share that unreachable path. + +#### Display format: + +Always show the **full call stack from main.main to the leaf syscall**, with main.main at the top. This gives the user the execution context they need to judge reachability. + +Include the **source file path** (no line number) for each function in the stack so the user can navigate to the right file. The `line` and `file` fields in the vm-compat JSON are assembly output positions, NOT Go source lines — they are useless. vm-compat does not provide Go source line numbers, and function definition lines would be misleading since we want call sites, not definitions. + +To resolve file paths: + +1. **Use the PR branch worktree for all lookups** — the source must match the code that produced the findings. Never look up locations from develop or another branch. +2. **Preferred: use `go_search` (gopls MCP)** to find function definitions. This resolves symbols accurately including methods on types, handles vendored/replaced modules, and works across the full dependency tree. However, gopls must be running from a Go workspace — if Claude was started from a non-Go directory (e.g., op-claude), gopls won't work and you must fall back to grep. +3. **Fallback: `grep -rn "func "`** in the PR worktree for optimism code, and in `go env GOMODCACHE` for geth/dependency code (find the exact module path from the `replace` directive in go.mod). +4. For stdlib functions (syscall.*, os.*, internal/*): omit the file path. +5. Cache the lookups — many findings share the same functions. + +``` +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ +Finding [N of TOTAL] | SEVERITY +━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ + +Type: Incompatible Syscall: () + or: Incompatible Opcode:
+ +Call Stack (main → leaf): + 1. main.main + 2. client.Main op-program/client/program.go + 3. client.RunProgram op-program/client/program.go + ... + 14. pathdb.repairHistory triedb/pathdb/history.go + 15. rawdb.NewStateFreezer core/rawdb/ancient_scheme.go + ... + 21. os.Remove + 22. syscall.unlinkat +``` + +**Display conventions:** +- Show full package paths in the raw data but use shortened names (last path component) in the display for readability +- Number every line so the user can easily reference by number +- main.main is always line 1, leaf syscall is always the last line +- Show source file path (no line number) for optimism and geth functions; omit for stdlib/runtime +- For geth functions, show paths relative to the geth module root (e.g., `core/rawdb/freezer.go`) + +#### User options: + +After showing each finding, ask: + +- **Number (1-N)** — That line is the first unreachable point in the call stack +- **A** — Acceptable (reachable but Cannon handles it correctly) +- **?** — Needs investigation (allow follow-up questions before deciding) + +#### When user provides a line number (unreachable): + +Ask for the reason it's unreachable (or offer a default like "Function is not called in the Cannon execution environment"). + +Then scan ALL remaining unreviewed findings for any that pass through the same function at the same position in their call stack (i.e., the path from main.main to that function is identical). Mark all matches as unreachable with the same reason. Report: + +``` +Marked N additional findings as unreachable (same path through ) +``` + +Proceed to the next unresolved finding. + +#### When user selects "Acceptable": + +Record the finding as acceptable. Proceed to the next finding. + +#### When user asks clarifying questions (?): + +Help the user investigate. Common queries: +- Show the source code of a function in the call stack +- Find what calls a particular function +- Check if a function is used in Cannon's execution path +- Look at the vm-profile YAML to see allowed/noop syscalls + +Return to the options prompt after answering. + +### Step 6: Update the baseline + +Only proceed if ALL findings are marked as either unreachable or acceptable (none remaining as "needs investigation"). + +**IMPORTANT:** Update the baseline in the PR branch worktree — the same code that produced the findings. The baseline must match the code it will be committed with. Never update the baseline on develop or a different branch. + +**Do NOT manually add entries to the existing baseline.** The baseline must be regenerated from scratch so that stale entries (from code paths that no longer exist) are removed. + +To regenerate: + +1. Run vm-compat with **no baseline** to get the complete report for the current code: + +```bash +cd op-program && vm-compat analyze \ + --with-trace=true --skip-warnings=false --format=json \ + --vm-profile-config vm-profiles/cannon-multithreaded-64.yaml \ + --report-output-path /tmp/vm-compat-full-report.json \ + ./client/cmd/main.go +``` + +2. Normalize the output by stripping `line`, `file`, and `absPath` fields (these are assembly positions, not Go source lines, and cause false positives when they change): + +```bash +cat /tmp/vm-compat-full-report.json | jq 'walk( + if type == "object" and has("line") then del(.line) else . end | + if type == "object" and has("absPath") then del(.absPath) else . end | + if type == "object" and has("file") then del(.file) else . end +)' > op-program/compatibility-test/baseline-cannon-multithreaded-64.json +``` + +3. This replaces the entire baseline with the current state. The old baseline is not merged — it is replaced. + +### Step 7: Verify + +After regenerating the baseline, re-run `vm-compat` with the new baseline to confirm zero new findings: + +```bash +cd op-program && vm-compat analyze \ + --with-trace=true --skip-warnings=false --format=json \ + --vm-profile-config vm-profiles/cannon-multithreaded-64.yaml \ + --baseline-report compatibility-test/baseline-cannon-multithreaded-64.json \ + --report-output-path /tmp/verify.json \ + ./client/cmd/main.go +``` + +If the output file contains an empty array `[]`, the baseline is complete. + +## Notes + +- The `baseline-cannon-multithreaded-64-next.json` file is for a future Cannon version. If the failure is from the `-next` variant, use that baseline instead. Ask the user if unclear. +- Findings with severity "WARNING" are typically less critical than "CRITICAL" but still need triage. +- The vm-compat tool performs static analysis — it cannot determine runtime reachability. Many flagged call paths are through library code that op-program never actually invokes. +- Common sources of unreachable code: p2p networking (geth's node package), disk I/O (freezer, database compaction), OS-level features (signals, process management). +- When a dependency upgrade (e.g., geth) changes internal call paths, many findings may have zero baseline matches even though the conceptual paths are the same. The user must still triage each one individually — do not assume they are safe just because similar paths existed before. From fe24eb3317516f11ad02434419834cd1247b1ecd Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Mon, 9 Mar 2026 14:56:41 -0400 Subject: [PATCH 079/201] chore(kona): remove deprecated supervisor crates (#19290) The OP Stack supervisor is deprecated. Remove all supervisor-related Rust crates, the supervisor binary, Go e2e tests, and Grafana dashboard recipe. Update workspace configuration, justfiles, and documentation. ci: remove kona-supervisor CI jobs and env vars Remove the kona-supervisor-e2e-tests job, kona-supervisor-e2e workflow, and RUST_BINARY_PATH_KONA_SUPERVISOR env exports from CircleCI configs. Co-authored-by: Claude Opus 4.6 --- .circleci/continue/main.yml | 3 - .circleci/continue/rust-ci.yml | 3 +- .circleci/continue/rust-e2e.yml | 36 - AGENTS.md | 2 +- rust/Cargo.lock | 165 - rust/Cargo.toml | 10 - rust/justfile | 4 - rust/kona/CLAUDE.md | 8 - rust/kona/README.md | 2 - rust/kona/bin/supervisor/Architecture.md | 127 - rust/kona/bin/supervisor/Cargo.toml | 47 - rust/kona/bin/supervisor/README.md | 51 - rust/kona/bin/supervisor/build.rs | 37 - rust/kona/bin/supervisor/src/cli.rs | 80 - rust/kona/bin/supervisor/src/flags/mod.rs | 4 - .../bin/supervisor/src/flags/supervisor.rs | 778 --- rust/kona/bin/supervisor/src/main.rs | 24 - rust/kona/bin/supervisor/src/metrics/mod.rs | 4 - .../bin/supervisor/src/metrics/version.rs | 59 - rust/kona/bin/supervisor/src/version.rs | 19 - rust/kona/crates/supervisor/core/Cargo.toml | 65 - .../core/src/chain_processor/chain.rs | 147 - .../core/src/chain_processor/error.rs | 19 - .../chain_processor/handlers/cross_chain.rs | 272 - .../src/chain_processor/handlers/finalized.rs | 283 - .../chain_processor/handlers/invalidation.rs | 620 --- .../core/src/chain_processor/handlers/mod.rs | 31 - .../src/chain_processor/handlers/origin.rs | 271 - .../chain_processor/handlers/safe_block.rs | 1026 ---- .../chain_processor/handlers/unsafe_block.rs | 315 -- .../core/src/chain_processor/metrics.rs | 288 - .../core/src/chain_processor/mod.rs | 17 - .../core/src/chain_processor/state/mod.rs | 2 - .../src/chain_processor/state/processor.rs | 40 - .../supervisor/core/src/config/core_config.rs | 165 - .../crates/supervisor/core/src/config/mod.rs | 7 - .../core/src/config/rollup_config_set.rs | 191 - rust/kona/crates/supervisor/core/src/error.rs | 209 - .../crates/supervisor/core/src/event/chain.rs | 60 - .../crates/supervisor/core/src/event/mod.rs | 4 - .../supervisor/core/src/l1_watcher/mod.rs | 6 - .../supervisor/core/src/l1_watcher/watcher.rs | 509 -- rust/kona/crates/supervisor/core/src/lib.rs | 29 - .../supervisor/core/src/logindexer/indexer.rs | 364 -- .../supervisor/core/src/logindexer/mod.rs | 17 - .../supervisor/core/src/logindexer/util.rs | 95 - .../crates/supervisor/core/src/reorg/error.rs | 29 - .../supervisor/core/src/reorg/handler.rs | 91 - .../supervisor/core/src/reorg/metrics.rs | 106 - .../crates/supervisor/core/src/reorg/mod.rs | 9 - .../crates/supervisor/core/src/reorg/task.rs | 1250 ----- .../crates/supervisor/core/src/rpc/admin.rs | 216 - .../crates/supervisor/core/src/rpc/metrics.rs | 125 - .../crates/supervisor/core/src/rpc/mod.rs | 10 - .../crates/supervisor/core/src/rpc/server.rs | 514 -- .../core/src/safety_checker/cross.rs | 841 --- .../core/src/safety_checker/error.rs | 81 - .../supervisor/core/src/safety_checker/mod.rs | 20 - .../core/src/safety_checker/promoter.rs | 53 - .../core/src/safety_checker/task.rs | 469 -- .../core/src/safety_checker/traits.rs | 27 - .../crates/supervisor/core/src/state/mod.rs | 2 - .../crates/supervisor/core/src/supervisor.rs | 384 -- .../supervisor/core/src/syncnode/client.rs | 406 -- .../supervisor/core/src/syncnode/command.rs | 36 - .../supervisor/core/src/syncnode/error.rs | 67 - .../supervisor/core/src/syncnode/metrics.rs | 103 - .../supervisor/core/src/syncnode/mod.rs | 23 - .../supervisor/core/src/syncnode/node.rs | 943 ---- .../supervisor/core/src/syncnode/resetter.rs | 586 -- .../supervisor/core/src/syncnode/traits.rs | 197 - .../kona/crates/supervisor/metrics/Cargo.toml | 17 - .../kona/crates/supervisor/metrics/src/lib.rs | 5 - .../crates/supervisor/metrics/src/macros.rs | 75 - .../crates/supervisor/metrics/src/reporter.rs | 9 - rust/kona/crates/supervisor/rpc/Cargo.toml | 71 - rust/kona/crates/supervisor/rpc/README.md | 1 - rust/kona/crates/supervisor/rpc/src/config.rs | 41 - .../crates/supervisor/rpc/src/jsonrpsee.rs | 232 - rust/kona/crates/supervisor/rpc/src/lib.rs | 30 - .../kona/crates/supervisor/rpc/src/reqwest.rs | 65 - .../crates/supervisor/rpc/src/response.rs | 326 -- rust/kona/crates/supervisor/rpc/src/server.rs | 55 - .../kona/crates/supervisor/service/Cargo.toml | 43 - .../supervisor/service/src/actors/metric.rs | 107 - .../supervisor/service/src/actors/mod.rs | 20 - .../supervisor/service/src/actors/node.rs | 361 -- .../service/src/actors/processor.rs | 324 -- .../supervisor/service/src/actors/rpc.rs | 140 - .../supervisor/service/src/actors/traits.rs | 11 - .../supervisor/service/src/actors/utils.rs | 67 - .../kona/crates/supervisor/service/src/lib.rs | 9 - .../crates/supervisor/service/src/service.rs | 508 -- .../kona/crates/supervisor/storage/Cargo.toml | 54 - .../crates/supervisor/storage/src/chaindb.rs | 1623 ------ .../supervisor/storage/src/chaindb_factory.rs | 320 -- .../crates/supervisor/storage/src/error.rs | 99 - .../kona/crates/supervisor/storage/src/lib.rs | 44 - .../crates/supervisor/storage/src/metrics.rs | 118 - .../supervisor/storage/src/models/block.rs | 127 - .../supervisor/storage/src/models/common.rs | 87 - .../storage/src/models/derivation.rs | 220 - .../supervisor/storage/src/models/head_ref.rs | 138 - .../supervisor/storage/src/models/log.rs | 299 -- .../supervisor/storage/src/models/mod.rs | 247 - .../src/providers/derivation_provider.rs | 1375 ----- .../src/providers/head_ref_provider.rs | 331 -- .../storage/src/providers/log_provider.rs | 781 --- .../supervisor/storage/src/providers/mod.rs | 15 - .../crates/supervisor/storage/src/traits.rs | 475 -- rust/kona/crates/supervisor/types/Cargo.toml | 39 - rust/kona/crates/supervisor/types/README.md | 1 - .../supervisor/types/src/access_list.rs | 396 -- rust/kona/crates/supervisor/types/src/head.rs | 36 - .../supervisor/types/src/hex_string_u64.rs | 63 - rust/kona/crates/supervisor/types/src/lib.rs | 26 - rust/kona/crates/supervisor/types/src/log.rs | 22 - .../crates/supervisor/types/src/message.rs | 17 - .../crates/supervisor/types/src/receipt.rs | 4 - .../kona/crates/supervisor/types/src/types.rs | 100 - rust/kona/docker/README.md | 2 - .../grafana/dashboards/dashboard.yml | 7 - .../grafana/dashboards/kona-supervisor.json | 4772 ----------------- rust/kona/justfile | 4 - rust/kona/tests/justfile | 12 +- .../tests/supervisor/l1reorg/init_test.go | 12 - .../tests/supervisor/l1reorg/reorg_test.go | 150 - .../supervisor/l2reorg/init_exec_msg_test.go | 248 - .../tests/supervisor/l2reorg/init_test.go | 13 - .../l2reorg/invalid_exec_msgs_test.go | 244 - .../supervisor/l2reorg/unsafe_head_test.go | 134 - .../l2reorgAfterL1reorg/init_test.go | 14 - .../l2reorgAfterL1reorg/reorg_test.go | 165 - .../tests/supervisor/message/init_test.go | 21 - .../message/interop_contract_test.go | 86 - .../message/interop_happy_tx_test.go | 50 - .../supervisor/message/interop_msg_test.go | 646 --- .../tests/supervisor/pre_interop/init_test.go | 20 - .../tests/supervisor/pre_interop/post_test.go | 209 - .../tests/supervisor/pre_interop/pre_test.go | 112 - rust/kona/tests/supervisor/rpc/init_test.go | 21 - rust/kona/tests/supervisor/rpc/rpc_test.go | 333 -- rust/kona/tests/supervisor/sync/init_test.go | 21 - .../kona/tests/supervisor/sync/resync_test.go | 86 - rust/kona/tests/supervisor/sync/sync_test.go | 251 - rust/kona/tests/supervisor/utils/builder.go | 353 -- rust/kona/tests/supervisor/utils/pos.go | 82 - rust/kona/tests/supervisor/utils/reorg.go | 116 - 148 files changed, 5 insertions(+), 30856 deletions(-) delete mode 100644 rust/kona/bin/supervisor/Architecture.md delete mode 100644 rust/kona/bin/supervisor/Cargo.toml delete mode 100644 rust/kona/bin/supervisor/README.md delete mode 100644 rust/kona/bin/supervisor/build.rs delete mode 100644 rust/kona/bin/supervisor/src/cli.rs delete mode 100644 rust/kona/bin/supervisor/src/flags/mod.rs delete mode 100644 rust/kona/bin/supervisor/src/flags/supervisor.rs delete mode 100644 rust/kona/bin/supervisor/src/main.rs delete mode 100644 rust/kona/bin/supervisor/src/metrics/mod.rs delete mode 100644 rust/kona/bin/supervisor/src/metrics/version.rs delete mode 100644 rust/kona/bin/supervisor/src/version.rs delete mode 100644 rust/kona/crates/supervisor/core/Cargo.toml delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/chain.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/error.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/handlers/cross_chain.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/handlers/finalized.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/handlers/invalidation.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/handlers/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/handlers/origin.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/handlers/safe_block.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/handlers/unsafe_block.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/metrics.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/state/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/chain_processor/state/processor.rs delete mode 100644 rust/kona/crates/supervisor/core/src/config/core_config.rs delete mode 100644 rust/kona/crates/supervisor/core/src/config/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/config/rollup_config_set.rs delete mode 100644 rust/kona/crates/supervisor/core/src/error.rs delete mode 100644 rust/kona/crates/supervisor/core/src/event/chain.rs delete mode 100644 rust/kona/crates/supervisor/core/src/event/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/l1_watcher/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/l1_watcher/watcher.rs delete mode 100644 rust/kona/crates/supervisor/core/src/lib.rs delete mode 100644 rust/kona/crates/supervisor/core/src/logindexer/indexer.rs delete mode 100644 rust/kona/crates/supervisor/core/src/logindexer/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/logindexer/util.rs delete mode 100644 rust/kona/crates/supervisor/core/src/reorg/error.rs delete mode 100644 rust/kona/crates/supervisor/core/src/reorg/handler.rs delete mode 100644 rust/kona/crates/supervisor/core/src/reorg/metrics.rs delete mode 100644 rust/kona/crates/supervisor/core/src/reorg/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/reorg/task.rs delete mode 100644 rust/kona/crates/supervisor/core/src/rpc/admin.rs delete mode 100644 rust/kona/crates/supervisor/core/src/rpc/metrics.rs delete mode 100644 rust/kona/crates/supervisor/core/src/rpc/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/rpc/server.rs delete mode 100644 rust/kona/crates/supervisor/core/src/safety_checker/cross.rs delete mode 100644 rust/kona/crates/supervisor/core/src/safety_checker/error.rs delete mode 100644 rust/kona/crates/supervisor/core/src/safety_checker/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/safety_checker/promoter.rs delete mode 100644 rust/kona/crates/supervisor/core/src/safety_checker/task.rs delete mode 100644 rust/kona/crates/supervisor/core/src/safety_checker/traits.rs delete mode 100644 rust/kona/crates/supervisor/core/src/state/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/supervisor.rs delete mode 100644 rust/kona/crates/supervisor/core/src/syncnode/client.rs delete mode 100644 rust/kona/crates/supervisor/core/src/syncnode/command.rs delete mode 100644 rust/kona/crates/supervisor/core/src/syncnode/error.rs delete mode 100644 rust/kona/crates/supervisor/core/src/syncnode/metrics.rs delete mode 100644 rust/kona/crates/supervisor/core/src/syncnode/mod.rs delete mode 100644 rust/kona/crates/supervisor/core/src/syncnode/node.rs delete mode 100644 rust/kona/crates/supervisor/core/src/syncnode/resetter.rs delete mode 100644 rust/kona/crates/supervisor/core/src/syncnode/traits.rs delete mode 100644 rust/kona/crates/supervisor/metrics/Cargo.toml delete mode 100644 rust/kona/crates/supervisor/metrics/src/lib.rs delete mode 100644 rust/kona/crates/supervisor/metrics/src/macros.rs delete mode 100644 rust/kona/crates/supervisor/metrics/src/reporter.rs delete mode 100644 rust/kona/crates/supervisor/rpc/Cargo.toml delete mode 100644 rust/kona/crates/supervisor/rpc/README.md delete mode 100644 rust/kona/crates/supervisor/rpc/src/config.rs delete mode 100644 rust/kona/crates/supervisor/rpc/src/jsonrpsee.rs delete mode 100644 rust/kona/crates/supervisor/rpc/src/lib.rs delete mode 100644 rust/kona/crates/supervisor/rpc/src/reqwest.rs delete mode 100644 rust/kona/crates/supervisor/rpc/src/response.rs delete mode 100644 rust/kona/crates/supervisor/rpc/src/server.rs delete mode 100644 rust/kona/crates/supervisor/service/Cargo.toml delete mode 100644 rust/kona/crates/supervisor/service/src/actors/metric.rs delete mode 100644 rust/kona/crates/supervisor/service/src/actors/mod.rs delete mode 100644 rust/kona/crates/supervisor/service/src/actors/node.rs delete mode 100644 rust/kona/crates/supervisor/service/src/actors/processor.rs delete mode 100644 rust/kona/crates/supervisor/service/src/actors/rpc.rs delete mode 100644 rust/kona/crates/supervisor/service/src/actors/traits.rs delete mode 100644 rust/kona/crates/supervisor/service/src/actors/utils.rs delete mode 100644 rust/kona/crates/supervisor/service/src/lib.rs delete mode 100644 rust/kona/crates/supervisor/service/src/service.rs delete mode 100644 rust/kona/crates/supervisor/storage/Cargo.toml delete mode 100644 rust/kona/crates/supervisor/storage/src/chaindb.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/chaindb_factory.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/error.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/lib.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/metrics.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/models/block.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/models/common.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/models/derivation.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/models/head_ref.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/models/log.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/models/mod.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/providers/derivation_provider.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/providers/head_ref_provider.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/providers/log_provider.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/providers/mod.rs delete mode 100644 rust/kona/crates/supervisor/storage/src/traits.rs delete mode 100644 rust/kona/crates/supervisor/types/Cargo.toml delete mode 100644 rust/kona/crates/supervisor/types/README.md delete mode 100644 rust/kona/crates/supervisor/types/src/access_list.rs delete mode 100644 rust/kona/crates/supervisor/types/src/head.rs delete mode 100644 rust/kona/crates/supervisor/types/src/hex_string_u64.rs delete mode 100644 rust/kona/crates/supervisor/types/src/lib.rs delete mode 100644 rust/kona/crates/supervisor/types/src/log.rs delete mode 100644 rust/kona/crates/supervisor/types/src/message.rs delete mode 100644 rust/kona/crates/supervisor/types/src/receipt.rs delete mode 100644 rust/kona/crates/supervisor/types/src/types.rs delete mode 100644 rust/kona/docker/recipes/kona-supervisor/grafana/dashboards/dashboard.yml delete mode 100644 rust/kona/docker/recipes/kona-supervisor/grafana/dashboards/kona-supervisor.json delete mode 100644 rust/kona/tests/supervisor/l1reorg/init_test.go delete mode 100644 rust/kona/tests/supervisor/l1reorg/reorg_test.go delete mode 100644 rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go delete mode 100644 rust/kona/tests/supervisor/l2reorg/init_test.go delete mode 100644 rust/kona/tests/supervisor/l2reorg/invalid_exec_msgs_test.go delete mode 100644 rust/kona/tests/supervisor/l2reorg/unsafe_head_test.go delete mode 100644 rust/kona/tests/supervisor/l2reorgAfterL1reorg/init_test.go delete mode 100644 rust/kona/tests/supervisor/l2reorgAfterL1reorg/reorg_test.go delete mode 100644 rust/kona/tests/supervisor/message/init_test.go delete mode 100644 rust/kona/tests/supervisor/message/interop_contract_test.go delete mode 100644 rust/kona/tests/supervisor/message/interop_happy_tx_test.go delete mode 100644 rust/kona/tests/supervisor/message/interop_msg_test.go delete mode 100644 rust/kona/tests/supervisor/pre_interop/init_test.go delete mode 100644 rust/kona/tests/supervisor/pre_interop/post_test.go delete mode 100644 rust/kona/tests/supervisor/pre_interop/pre_test.go delete mode 100644 rust/kona/tests/supervisor/rpc/init_test.go delete mode 100644 rust/kona/tests/supervisor/rpc/rpc_test.go delete mode 100644 rust/kona/tests/supervisor/sync/init_test.go delete mode 100644 rust/kona/tests/supervisor/sync/resync_test.go delete mode 100644 rust/kona/tests/supervisor/sync/sync_test.go delete mode 100644 rust/kona/tests/supervisor/utils/builder.go delete mode 100644 rust/kona/tests/supervisor/utils/pos.go delete mode 100644 rust/kona/tests/supervisor/utils/reorg.go diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 6defd75af9b28..155a5809b41fd 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -409,7 +409,6 @@ commands: ROOT_DIR="$(pwd)" BIN_DIR="$ROOT_DIR/.circleci-cache/rust-binaries" echo "export RUST_BINARY_PATH_KONA_NODE=$ROOT_DIR/rust/target/release/kona-node" >> "$BASH_ENV" - echo "export RUST_BINARY_PATH_KONA_SUPERVISOR=$ROOT_DIR/rust/target/release/kona-supervisor" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" @@ -2039,7 +2038,6 @@ jobs: ROOT_DIR="$(pwd)" BIN_DIR="$ROOT_DIR/.circleci-cache/rust-binaries" echo "export RUST_BINARY_PATH_KONA_NODE=$ROOT_DIR/rust/target/release/kona-node" >> "$BASH_ENV" - echo "export RUST_BINARY_PATH_KONA_SUPERVISOR=$ROOT_DIR/rust/target/release/kona-supervisor" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" # Restore cached Go modules @@ -2143,7 +2141,6 @@ jobs: ROOT_DIR="$(pwd)" BIN_DIR="$ROOT_DIR/.circleci-cache/rust-binaries" echo "export RUST_BINARY_PATH_KONA_NODE=$BIN_DIR/kona-node" >> "$BASH_ENV" - echo "export RUST_BINARY_PATH_KONA_SUPERVISOR=$BIN_DIR/kona-supervisor" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" - restore_cache: diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index bdbeb97bd2451..e89cefd435760 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -89,7 +89,6 @@ commands: ROOT_DIR="$(pwd)" BIN_DIR="$ROOT_DIR/.circleci-cache/rust-binaries" echo "export RUST_BINARY_PATH_KONA_NODE=$ROOT_DIR/rust/target/release/kona-node" >> "$BASH_ENV" - echo "export RUST_BINARY_PATH_KONA_SUPERVISOR=$ROOT_DIR/rust/target/release/kona-supervisor" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" @@ -604,7 +603,7 @@ jobs: - run: name: Install nextest command: | - command -v cargo-nextest >/dev/null || cargo binstall --no-confirm cargo-nextest + command -v cargo-nextest >/dev/null || cargo binstall --locked --no-confirm cargo-nextest - run: name: Run cargo tests working_directory: <> diff --git a/.circleci/continue/rust-e2e.yml b/.circleci/continue/rust-e2e.yml index c4d51e4f7e409..1cd90095834b1 100644 --- a/.circleci/continue/rust-e2e.yml +++ b/.circleci/continue/rust-e2e.yml @@ -133,28 +133,6 @@ jobs: - go-save-cache: namespace: kona-ci - # Kona Supervisor E2E Tests - kona-supervisor-e2e-tests: - parameters: - test_pkg: - description: The test package to run - type: string - docker: - - image: <> - resource_class: xlarge - steps: - - utils/checkout-with-mise: - checkout-method: blobless - - rust-build: - <<: *kona-rust-build-release - binary: "kona-supervisor" - - run: - name: Run supervisor tests for <> - working_directory: rust/kona - no_output_timeout: 40m - command: | - just test-e2e-sysgo supervisor "/supervisor/<>" - # Kona Proof Action Tests (from proof.yaml) kona-proof-action-tests: parameters: @@ -256,17 +234,3 @@ workflows: context: - circleci-repo-readonly-authenticated-github-token - # Kona supervisor E2E tests - manual dispatch only - kona-supervisor-e2e: - when: - and: - - equal: [true, <>] - - equal: ["api", << pipeline.trigger_source >>] - jobs: - - kona-supervisor-e2e-tests: - name: kona-supervisor-<> - matrix: - parameters: - test_pkg: ["pre_interop", "l1reorg/sysgo"] - context: - - circleci-repo-readonly-authenticated-github-token diff --git a/AGENTS.md b/AGENTS.md index 867e8ea1a7030..2857cddef15bf 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -36,7 +36,7 @@ Solidity smart contracts for the OP Stack, including the core protocol contracts The OP Stack includes significant Rust implementations: -- **kona**: Rust implementation of the OP Stack rollup state transition, including fault proof program, rollup node, and supervisor +- **kona**: Rust implementation of the OP Stack rollup state transition, including fault proof program and rollup node - **op-reth**: OP Stack execution client built on reth - **op-alloy**: Rust crates providing OP Stack types and providers for the alloy ecosystem - **alloy-op-hardforks** / **alloy-op-evm**: OP Stack hardfork and EVM support for alloy diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 350a96df58604..bb4e5922c355b 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -6337,171 +6337,6 @@ dependencies = [ "syn 2.0.117", ] -[[package]] -name = "kona-supervisor" -version = "0.1.0" -dependencies = [ - "alloy-network", - "alloy-provider", - "alloy-rpc-types-engine", - "anyhow", - "clap", - "glob", - "kona-cli", - "kona-genesis", - "kona-interop", - "kona-protocol", - "kona-registry", - "kona-supervisor-core", - "kona-supervisor-service", - "metrics", - "serde", - "serde_json", - "tempfile", - "tokio", - "tracing", - "tracing-subscriber 0.3.22", - "vergen", - "vergen-git2", -] - -[[package]] -name = "kona-supervisor-core" -version = "0.1.0" -dependencies = [ - "alloy-consensus", - "alloy-eips", - "alloy-network", - "alloy-primitives", - "alloy-provider", - "alloy-rpc-client", - "alloy-rpc-types-engine", - "alloy-rpc-types-eth", - "alloy-transport", - "async-trait", - "auto_impl", - "derive_more", - "futures", - "jsonrpsee", - "kona-genesis", - "kona-interop", - "kona-protocol", - "kona-supervisor-metrics", - "kona-supervisor-rpc", - "kona-supervisor-storage", - "kona-supervisor-types", - "metrics", - "mockall", - "op-alloy-consensus", - "op-alloy-rpc-types", - "serde", - "serde_json", - "tempfile", - "thiserror 2.0.18", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "kona-supervisor-metrics" -version = "0.1.0" - -[[package]] -name = "kona-supervisor-rpc" -version = "0.1.1" -dependencies = [ - "alloy-eips", - "alloy-primitives", - "alloy-rpc-client", - "alloy-rpc-types-engine", - "alloy-serde", - "async-trait", - "derive_more", - "jsonrpsee", - "kona-interop", - "kona-protocol", - "kona-supervisor-types", - "op-alloy-consensus", - "serde", - "serde_json", - "thiserror 2.0.18", - "tokio", -] - -[[package]] -name = "kona-supervisor-service" -version = "0.1.0" -dependencies = [ - "alloy-eips", - "alloy-primitives", - "alloy-provider", - "alloy-rpc-client", - "alloy-rpc-types-eth", - "anyhow", - "async-trait", - "derive_more", - "futures", - "jsonrpsee", - "kona-genesis", - "kona-interop", - "kona-protocol", - "kona-supervisor-core", - "kona-supervisor-metrics", - "kona-supervisor-rpc", - "kona-supervisor-storage", - "kona-supervisor-types", - "mockall", - "thiserror 2.0.18", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "kona-supervisor-storage" -version = "0.1.0" -dependencies = [ - "alloy-eips", - "alloy-primitives", - "bytes", - "derive_more", - "eyre", - "kona-cli", - "kona-interop", - "kona-protocol", - "kona-supervisor-metrics", - "kona-supervisor-types", - "metrics", - "modular-bitfield", - "op-alloy-consensus", - "reth-codecs", - "reth-db", - "reth-db-api", - "reth-primitives-traits", - "serde", - "tempfile", - "thiserror 2.0.18", - "tokio", - "tracing", -] - -[[package]] -name = "kona-supervisor-types" -version = "0.1.1" -dependencies = [ - "alloy-eips", - "alloy-primitives", - "alloy-serde", - "derive_more", - "kona-interop", - "kona-protocol", - "op-alloy-consensus", - "serde", - "serde_json", - "thiserror 2.0.18", -] - [[package]] name = "kqueue" version = "1.1.1" diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 58623202e7f1b..69f2fd6832bd0 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -16,7 +16,6 @@ members = [ "kona/bin/*", "kona/crates/proof/*", "kona/crates/node/*", - "kona/crates/supervisor/*", "kona/crates/protocol/*", "kona/crates/providers/*", "kona/crates/utilities/*", @@ -54,7 +53,6 @@ default-members = [ "kona/bin/host", "kona/bin/client", "kona/bin/node", - "kona/bin/supervisor", "op-reth/bin/", ] @@ -249,14 +247,6 @@ kona-node-service = { path = "kona/crates/node/service", version = "0.1.3", defa kona-disc = { path = "kona/crates/node/disc", version = "0.1.2", default-features = false } kona-gossip = { path = "kona/crates/node/gossip", version = "0.1.2", default-features = false } -# Supervisor -kona-supervisor-rpc = { path = "kona/crates/supervisor/rpc", version = "0.1.1", default-features = false } -kona-supervisor-core = { path = "kona/crates/supervisor/core", version = "0.1.0", default-features = false } -kona-supervisor-service = { path = "kona/crates/supervisor/service", version = "0.1.0", default-features = false } -kona-supervisor-types = { path = "kona/crates/supervisor/types", version = "0.1.1", default-features = false } -kona-supervisor-storage = { path = "kona/crates/supervisor/storage", version = "0.1.0", default-features = false } -kona-supervisor-metrics = { path = "kona/crates/supervisor/metrics", version = "0.1.0", default-features = false } - # Providers kona-providers-alloy = { path = "kona/crates/providers/providers-alloy", version = "0.3.3", default-features = false } kona-providers-local = { path = "kona/crates/providers/providers-local", version = "0.1.0", default-features = false } diff --git a/rust/justfile b/rust/justfile index d3a1c221223ec..79422c27af1d7 100644 --- a/rust/justfile +++ b/rust/justfile @@ -32,10 +32,6 @@ build-release *args='': build-node: cargo build --release --bin kona-node -# Build the supervisor -build-supervisor: - cargo build --release --bin kona-supervisor - # Build op-reth build-op-reth: cargo build --release --bin op-reth diff --git a/rust/kona/CLAUDE.md b/rust/kona/CLAUDE.md index 7353709db423b..58ba2b486d2bd 100644 --- a/rust/kona/CLAUDE.md +++ b/rust/kona/CLAUDE.md @@ -31,7 +31,6 @@ Kona is a monorepo for OP Stack types, components, and services built in Rust. T - **`client`**: The fault proof program that executes state transitions on a prover - **`host`**: Native program serving as the Preimage Oracle server - **`node`**: Rollup Node implementation with flexible chain ID support -- **`supervisor`**: Supervisor implementation for interop coordination ### Protocol (`crates/protocol/`) - **`derive`**: `no_std` compatible derivation pipeline implementation @@ -58,13 +57,6 @@ Kona is a monorepo for OP Stack types, components, and services built in Rust. T - **`p2p`**: P2P networking including Gossip and Discovery - **`sources`**: Data source types and utilities -### Supervisor (`crates/supervisor/`) -- **`core`**: Core supervisor functionality -- **`service`**: Supervisor service implementation -- **`rpc`**: Supervisor RPC types and client -- **`storage`**: Database storage layer -- **`types`**: Common types for supervisor components - ### Development Workflow 1. **Testing**: The project uses `nextest` for test execution. Online tests are excluded by default and can be run separately with `just test-online` diff --git a/rust/kona/README.md b/rust/kona/README.md index 9da6052d2d24a..f3c86b4be841e 100644 --- a/rust/kona/README.md +++ b/rust/kona/README.md @@ -46,7 +46,6 @@ getting started with building your own programs, and a reference for the librari - [`client`](./bin/client): The bare-metal program that executes the state transition, to be run on a prover. - [`host`](./bin/host): The host program that runs natively alongside the prover, serving as the [Preimage Oracle][g-preimage-oracle] server. - [`node`](./bin/node): [WIP] A [Rollup Node][rollup-node-spec] implementation, backed by [`kona-derive`](./crates/protocol/derive). Supports flexible chain ID specification via `--l2-chain-id` using either numeric IDs (`10`) or chain names (`optimism`). -- [`supervisor`](./bin/supervisor): [WIP] A [Supervisor][supervisor-spec] implementation. **Protocol** @@ -158,7 +157,6 @@ Licensed under the [MIT license.](https://github.com/ethereum-optimism/optimism/ [cannon]: https://github.com/ethereum-optimism/optimism/tree/develop/cannon [cannon-rs]: https://github.com/op-rs/cannon-rs [rollup-node-spec]: https://specs.optimism.io/protocol/rollup-node.html -[supervisor-spec]: https://specs.optimism.io/interop/supervisor.html [badboi-cannon-rs]: https://github.com/BadBoiLabs/cannon-rs [asterisc]: https://github.com/ethereum-optimism/asterisc [fpp-specs]: https://specs.optimism.io/fault-proof/index.html diff --git a/rust/kona/bin/supervisor/Architecture.md b/rust/kona/bin/supervisor/Architecture.md deleted file mode 100644 index 379050dc1d52c..0000000000000 --- a/rust/kona/bin/supervisor/Architecture.md +++ /dev/null @@ -1,127 +0,0 @@ -# Kona Supervisor System Architecture - -## System Module Diagram - -```mermaid -flowchart TD - SVC["Supervisor Service"] - DBF["ChainDbFactory"] - L1W["L1Watcher"] - RH["ReorgHandler"] - SRPC["SupervisorRpcActor"] - SRPCS["Supervisor RPC Server"] - - subgraph PerChain["Modules Per Chain"] - subgraph ManagedNodeGroup["Managed Node Modules"] - MNODE["ManagedNode"] - MN["ManagedNodeActor"] - end - subgraph ChainProcessorGroup["Chain Processor Modules"] - CPA["ChainProcessorActor"] - CP["ChainProcessor"] - end - CSCJ_SAFE["CrossSafetyCheckerJob (Safe)"] - CSCJ_UNSAFE["CrossSafetyCheckerJob (Unsafe)"] - CHAN["ChainEvent Channel"] - CMDCHAN["ManagedNodeCommand Channel"] - end - - SVC --> DBF - SVC --> L1W - SVC --> SRPC - SRPC --> SRPCS - SVC --> PerChain - SRPCS --> DBF - - L1W --> RH - - CPA --> CP - CP --> MNODE - CP --> DBF - - MN --> MNODE - - CSCJ_SAFE --> DBF - CSCJ_UNSAFE --> DBF - - %% Producers send events to the ChainEvent channel - MNODE --> CHAN - L1W --> CHAN - CSCJ_SAFE --> CHAN - CSCJ_UNSAFE --> CHAN - - %% ChainEvent channel delivers events to ChainProcessorActor - CHAN --> CPA - - %% ChainProcessorActor sends ManagedNodeCommand - CP --> CMDCHAN - - %% Reorg Handler sends ManagedNodeCommand - RH --> CMDCHAN - - %% ManagedNodeCommand channel delivers commands to ManagedNodeActor - CMDCHAN --> MN -``` - ---- - -## Module Descriptions - -### **Global Modules** - -- **Supervisor Service (`SVC`)** - The main orchestrator. Initializes, manages, and coordinates all subsystems and per-chain modules. - -- **ChainDbFactory (`DBF`)** - Responsible for creating and managing per-chain databases, providing storage and state management. - -- **L1Watcher (`L1W`)** - Monitors the Layer 1 chain for finalized and new blocks, detects reorgs and broadcasts relevant events. - -- **ReorgHandler (`RH`)** - Handles chain reorganizations detected by the L1Watcher, ensuring system consistency. - -- **SupervisorRpcActor (`SRPC`)** - Exposes the supervisor’s API endpoints for external control and monitoring. - -- **Supervisor RPC Server (`SRPCS`)** - The actual RPC server implementation, serving requests and interacting with the supervisor service and database. - ---- - -### **Per-Chain Modules** - -- **Managed Node Modules** - - **ManagedNode (`MNODE`)** - Represents a node for a specific chain, handling chain-specific logic and state. - - **ManagedNodeActor (`MN`)** - The actor responsible for executing commands and managing the lifecycle of the ManagedNode. - -- **Chain Processor modules** - - **ChainProcessorActor (`CPA`)** - Listens for chain events and delegates processing to the ChainProcessor. - - **ChainProcessor (`CP`)** - Processes chain events, interacts with the ManagedNode and ChainDbFactory, and issues commands. - -- **CrossSafetyCheckerJob (Safe/Unsafe) (`CSCJ_SAFE`, `CSCJ_UNSAFE`)** - Periodically promotes safety levels for each chain, ensuring cross-chain consistency. - -- **ChainEvent Channel (`CHAN`)** - Event bus for chain events. Receives events from producers (ManagedNode, L1Watcher, CrossSafetyCheckerJobs) and delivers them to the ChainProcessorActor. - -- **ManagedNodeCommand Channel (`CMDCHAN`)** - Command bus for ManagedNode commands. Receives commands from ChainProcessor and ReorgHandler, and delivers them to ManagedNodeActor. - ---- - -## **Event and Command Flow** - -- **Chain events** are produced by ManagedNode, L1Watcher, and CrossSafetyCheckerJobs, sent to the ChainEvent Channel, and consumed by ChainProcessorActor. -- **ManagedNode commands** are produced by ChainProcessor and ReorgHandler, sent to the ManagedNodeCommand Channel, and consumed by ManagedNodeActor. - ---- - -## **Summary** - -This architecture provides a modular, event-driven system for managing multiple chains, ensuring robust coordination, safety, and extensibility. -Each module is clearly separated, with explicit channels for event and command communication, making the system easy to reason about and maintain. diff --git a/rust/kona/bin/supervisor/Cargo.toml b/rust/kona/bin/supervisor/Cargo.toml deleted file mode 100644 index a1fa09433022c..0000000000000 --- a/rust/kona/bin/supervisor/Cargo.toml +++ /dev/null @@ -1,47 +0,0 @@ -[package] -name = "kona-supervisor" -version = "0.1.0" -description = "Kona Supervisor" - -edition.workspace = true -license.workspace = true -rust-version.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -keywords.workspace = true -categories.workspace = true - -[dependencies] -# Workspace -kona-supervisor-service.workspace = true -kona-supervisor-core.workspace = true -kona-cli.workspace = true -kona-interop.workspace = true -kona-genesis.workspace = true -kona-protocol.workspace = true - -alloy-network.workspace = true -alloy-provider.workspace = true -alloy-rpc-types-engine.workspace = true - -clap = { workspace = true, features = ["derive", "env"] } -tokio = { workspace = true, features = [ "full", "macros"] } -anyhow = { workspace = true } -tracing-subscriber = { workspace = true, features = ["fmt", "env-filter"] } -tracing = { workspace = true } -serde.workspace = true -serde_json.workspace = true -glob.workspace = true -metrics.workspace = true - -[dev-dependencies] -tempfile.workspace = true -kona-registry.workspace = true - -[build-dependencies] -vergen = { workspace = true, features = ["build", "cargo", "emit_and_set"] } -vergen-git2.workspace = true - -[lints] -workspace = true diff --git a/rust/kona/bin/supervisor/README.md b/rust/kona/bin/supervisor/README.md deleted file mode 100644 index b091b06078767..0000000000000 --- a/rust/kona/bin/supervisor/README.md +++ /dev/null @@ -1,51 +0,0 @@ -# `kona-supervisor` - -A supervisor implementation for the OP stack built in rust. - -## Installation - -Build from source - -``` -cargo build --profile release-perf --bin kona-supervisor -``` - -### Usage - -Run the `kona-supervisor` using the following command - -```bash -kona-supervisor \ - --metrics.enabled \ - --metrics.port 9090 \ - --metrics.addr 127.0.0.1 \ - --l1-rpc http://localhost:8545 \ - --l2-consensus.nodes http://node1:8551,http://node2:8551 \ - --l2-consensus.jwt-secret secret1,secret2 \ - --datadir /supervisor_data \ - --dependency-set /path/to/deps.json \ - --rollup-config-paths /configs/rollup-*.json -``` - -### Configuration via Environment Variables - -Many configuration options can be set via environment variables: - -- `L1_RPC` - L1 RPC source -- `L2_CONSENSUS_NODES` - L2 consensus rollup node RPC addresses. -- `L2_CONSENSUS_JWT_SECRET` - JWT secrets for L2 consensus nodes. -- `DEPENDENCY_SET` - Path to the dependency-set JSON config file. -- `DATADIR` - Directory to store supervisor data. -- `ROLLUP_CONFIG_PATHS` - Path pattern to op-node rollup.json configs to load as a rollup config set. - -### Help and Documentation - -Use the `--help` flag to see all available options: - -``` -kona-supervisor --help -``` - -## Advanced Configuration - -Coming soon diff --git a/rust/kona/bin/supervisor/build.rs b/rust/kona/bin/supervisor/build.rs deleted file mode 100644 index 4391459be7dbb..0000000000000 --- a/rust/kona/bin/supervisor/build.rs +++ /dev/null @@ -1,37 +0,0 @@ -//! Used for generating build information for the supervisor service. - -use std::{env, error::Error}; -use vergen::{BuildBuilder, CargoBuilder, Emitter}; -use vergen_git2::Git2Builder; - -fn main() -> Result<(), Box> { - let mut emitter = Emitter::default(); - - let build_builder = BuildBuilder::default().build_timestamp(true).build()?; - - // Add build timestamp information. - emitter.add_instructions(&build_builder)?; - - let cargo_builder = CargoBuilder::default().features(true).target_triple(true).build()?; - - // Add cargo features and target information. - emitter.add_instructions(&cargo_builder)?; - - let git_builder = - Git2Builder::default().describe(false, true, None).dirty(true).sha(false).build()?; - - // Add commit information. - emitter.add_instructions(&git_builder)?; - - emitter.emit_and_set()?; - - // Need to print in order to set the environment variables. - let sha = env::var("VERGEN_GIT_SHA")?; - println!("cargo:rustc-env=VERGEN_GIT_SHA_SHORT={}", &sha[..8]); - - let out_dir = env::var("OUT_DIR").unwrap(); - let profile = out_dir.rsplit(std::path::MAIN_SEPARATOR).nth(3).unwrap(); - println!("cargo:rustc-env=KONA_SUPERVISOR_BUILD_PROFILE={profile}"); - - Ok(()) -} diff --git a/rust/kona/bin/supervisor/src/cli.rs b/rust/kona/bin/supervisor/src/cli.rs deleted file mode 100644 index 326d19acb8667..0000000000000 --- a/rust/kona/bin/supervisor/src/cli.rs +++ /dev/null @@ -1,80 +0,0 @@ -//! Contains the supervisor CLI. - -use crate::{flags::SupervisorArgs, metrics::VersionInfo}; -use anyhow::Result; -use clap::Parser; -use kona_cli::{LogArgs, LogConfig, MetricsArgs, cli_styles}; -use kona_supervisor_service::Service; -use tracing::{error, info}; - -/// CLI for the Rust implementation of the OP Supervisor. -#[derive(Parser, Debug)] -#[command(name = "op-supervisor", about = "Rust implementation of the OP Supervisor", styles = cli_styles())] -pub struct Cli { - /// Global args - #[command(flatten)] - pub global: LogArgs, - - /// Prometheus metrics args - #[command(flatten)] - pub metrics: MetricsArgs, - - /// Supervisor args - #[command(flatten)] - pub supervisor: SupervisorArgs, -} - -impl Cli { - /// Runs the CLI. - pub fn run(self) -> Result<()> { - self.metrics.init_metrics()?; - // Register build metrics - VersionInfo::from_build().register_version_metrics(); - - self.init_logs(&self.global)?; - - Self::run_until_ctrl_c(async move { - let config = self.supervisor.init_config().await?; - let mut service = Service::new(config); - - tokio::select! { - res = service.run() => { - if let Err(err) = res { - error!(target: "supervisor", %err, "Error running supervisor service"); - } - } - _ = tokio::signal::ctrl_c() => { - info!(target: "supervisor", "Ctrl+C received, initiating service shutdown..."); - } - } - - service.shutdown().await?; // Call shutdown on the service instance itself - info!(target: "supervisor", "Supervisor service shut down gracefully."); - Ok(()) - }) - } - - /// Run until ctrl-c is pressed. - pub fn run_until_ctrl_c(fut: F) -> Result<()> - where - F: std::future::Future>, - { - let rt = Self::tokio_runtime().map_err(|e| anyhow::anyhow!(e))?; - rt.block_on(fut) - } - - /// Creates a new default tokio multi-thread [`Runtime`](tokio::runtime::Runtime) with all - /// features enabled - pub fn tokio_runtime() -> Result { - tokio::runtime::Builder::new_multi_thread().enable_all().build() - } - - /// Initializes the telemetry stack and Prometheus metrics recorder. - pub fn init_logs(&self, args: &LogArgs) -> anyhow::Result<()> { - // Filter out discovery warnings since they're very very noisy. - let filter = tracing_subscriber::EnvFilter::from_default_env(); - - LogConfig::new(args.clone()).init_tracing_subscriber(Some(filter))?; - Ok(()) - } -} diff --git a/rust/kona/bin/supervisor/src/flags/mod.rs b/rust/kona/bin/supervisor/src/flags/mod.rs deleted file mode 100644 index a27930e643065..0000000000000 --- a/rust/kona/bin/supervisor/src/flags/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! CLI Flags - -mod supervisor; -pub use supervisor::SupervisorArgs; diff --git a/rust/kona/bin/supervisor/src/flags/supervisor.rs b/rust/kona/bin/supervisor/src/flags/supervisor.rs deleted file mode 100644 index 2d45bd100593b..0000000000000 --- a/rust/kona/bin/supervisor/src/flags/supervisor.rs +++ /dev/null @@ -1,778 +0,0 @@ -use alloy_network::Ethereum; -use alloy_provider::{Provider, RootProvider}; -use alloy_rpc_types_engine::JwtSecret; -use anyhow::{Context as _, Ok, Result, anyhow}; -use clap::Args; -use glob::glob; -use kona_genesis::RollupConfig; -use kona_interop::DependencySet; -use kona_protocol::BlockInfo; -use kona_supervisor_core::{ - config::{Config, RollupConfigSet}, - syncnode::ClientConfig, -}; -use serde::de::DeserializeOwned; -use std::{ - net::{IpAddr, SocketAddr}, - path::{Path, PathBuf}, -}; -use tokio::{fs::File, io::AsyncReadExt}; - -/// Supervisor configuration arguments. -#[derive(Args, Debug)] -pub struct SupervisorArgs { - /// L1 RPC source - #[arg(long, env = "L1_RPC")] - pub l1_rpc: String, - - /// L2 consensus rollup node RPC addresses. - #[arg(long = "l2-consensus.nodes", env = "L2_CONSENSUS_NODES", value_delimiter = ',')] - pub l2_consensus_nodes: Vec, - - /// JWT secrets for L2 consensus nodes. - #[arg( - long = "l2-consensus.jwt-secret", - env = "L2_CONSENSUS_JWT_SECRET", - value_delimiter = ',' - )] - pub l2_consensus_jwt_secret: Vec, - - /// Directory to store supervisor data. - #[arg(long, env = "DATADIR")] - pub datadir: PathBuf, - - /// Optional endpoint to sync data from another supervisor. - #[arg(long = "datadir.sync-endpoint", env = "DATADIR_SYNC_ENDPOINT")] - pub datadir_sync_endpoint: Option, - - /// Path to the dependency-set JSON config file. - #[arg(long = "dependency-set", env = "DEPENDENCY_SET")] - pub dependency_set: PathBuf, - - /// Path pattern to op-node rollup.json configs to load as a rollup config set. - /// The pattern should use the glob syntax, e.g. '/configs/rollup-*.json' - /// When using this flag, the L1 timestamps are loaded from the provided L1 RPC. - #[arg(long = "rollup-config-paths", env = "ROLLUP_CONFIG_PATHS")] - pub rollup_config_paths: PathBuf, - - /// IP address for the Supervisor RPC server to listen on. - #[arg(long = "rpc.addr", env = "RPC_ADDR", default_value = "0.0.0.0")] - pub rpc_address: IpAddr, - - /// Port for the Supervisor RPC server to listen on. - #[arg(long = "rpc.port", env = "RPC_PORT", default_value_t = 8545)] - pub rpc_port: u16, - - /// Enable the Supervisor Admin API. - #[arg(long = "rpc.enable-admin", env = "RPC_ENABLE_ADMIN", default_value_t = false)] - pub enable_admin_api: bool, -} - -impl SupervisorArgs { - async fn read_json_file(path: &Path) -> Result { - let mut file = File::open(path) - .await - .with_context(|| format!("Failed to open '{}'", path.display()))?; - let mut contents = String::new(); - file.read_to_string(&mut contents) - .await - .with_context(|| format!("Failed to read '{}'", path.display()))?; - let value = serde_json::from_str(&contents) - .with_context(|| format!("Failed to parse JSON from '{}'", path.display()))?; - Ok(value) - } - - /// initialise and return the [`DependencySet`]. - pub async fn init_dependency_set(&self) -> Result { - Self::read_json_file(&self.dependency_set).await - } - - async fn get_rollup_configs(&self) -> Result> { - let pattern = self - .rollup_config_paths - .to_str() - .ok_or_else(|| anyhow::anyhow!("rollup_config_paths contains invalid UTF-8"))?; - if pattern.is_empty() { - return Err(anyhow::anyhow!("rollup_config_paths pattern is empty")); - } - - let mut rollup_configs = Vec::new(); - for entry in glob(pattern)? { - let path = entry?; - let rollup_config = Self::read_json_file(&path).await?; - rollup_configs.push(rollup_config); - } - Ok(rollup_configs) - } - - /// Initialise and return the rollup config set. - pub async fn init_rollup_config_set(&self) -> Result { - let l1_url = self - .l1_rpc - .parse() - .with_context(|| format!("Failed to parse L1 RPC URL '{}'", &self.l1_rpc))?; - let provider = RootProvider::::new_http(l1_url); - - let mut rollup_config_set = RollupConfigSet::default(); - - // Use the helper to get all configs - let rollup_configs = self.get_rollup_configs().await?; - - for rollup_config in rollup_configs { - let chain_id = rollup_config.l2_chain_id; - - let l1_genesis = provider - .get_block_by_hash(rollup_config.genesis.l1.hash) - .await - .with_context(|| { - format!( - "Failed to fetch L1 genesis block for hash {}", - rollup_config.genesis.l1.hash - ) - })?; - - let l1_genesis = l1_genesis.ok_or_else(|| { - anyhow::anyhow!( - "L1 genesis block not found for hash {}", - rollup_config.genesis.l1.hash - ) - })?; - - rollup_config_set - .add_from_rollup_config( - chain_id.id(), - rollup_config, - BlockInfo::new( - l1_genesis.header.hash, - l1_genesis.header.number, - l1_genesis.header.parent_hash, - l1_genesis.header.timestamp, - ), - ) - .map_err(|err| anyhow!(err))?; - } - - Ok(rollup_config_set) - } - - /// initialise and return the managed nodes configuration. - pub fn init_managed_nodes_config(&self) -> Result> { - let nodes: Vec = self - .l2_consensus_nodes - .iter() - .map(|s| s.trim().to_string()) - .filter(|s| !s.is_empty()) - .collect(); - - if nodes.is_empty() { - return Ok(Vec::new()); - } - - let mut managed_nodes = Vec::with_capacity(nodes.len()); - let default_secret_path = self - .l2_consensus_jwt_secret - .first() - .ok_or_else(|| anyhow::anyhow!("No JWT secrets provided"))?; - for (i, rpc_url) in nodes.iter().enumerate() { - let secret_path = self.l2_consensus_jwt_secret.get(i).unwrap_or(default_secret_path); - - let secret = std::fs::read_to_string(secret_path).map_err(|err| { - anyhow::anyhow!("Failed to read JWT secret from '{secret_path}': {err}") - })?; - - let jwt_secret = JwtSecret::from_hex(secret).map_err(|err| { - anyhow::anyhow!("Failed to parse JWT secret from '{secret_path}': {err}") - })?; - - managed_nodes.push(ClientConfig { url: rpc_url.clone(), jwt_secret }); - } - Ok(managed_nodes) - } - - /// initialise and return the Supervisor [`Config`]. - pub async fn init_config(&self) -> Result { - let dependency_set = self.init_dependency_set().await?; - let rollup_config_set = self.init_rollup_config_set().await?; - - let rpc_addr = SocketAddr::new(self.rpc_address, self.rpc_port); - let managed_nodes_config = self.init_managed_nodes_config()?; - - Ok(Config { - l1_rpc: self.l1_rpc.clone(), - l2_consensus_nodes_config: managed_nodes_config, - datadir: self.datadir.clone(), - rpc_addr, - enable_admin_api: self.enable_admin_api, - dependency_set, - rollup_config_set, - }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use clap::Parser; - use kona_interop::{ChainDependency, DependencySet}; - use kona_registry::HashMap; - use std::{fs::File, io::Write, net::Ipv4Addr}; - use tempfile::{NamedTempFile, tempdir}; - - // Helper struct to parse SupervisorArgs within a test CLI structure - #[derive(Parser, Debug)] - struct TestCli { - #[command(flatten)] - supervisor: SupervisorArgs, - } - - #[test] - fn test_supervisor_args_from_cli_required_only() { - let cli = TestCli::parse_from([ - "test_app", - "--l1-rpc", - "http://localhost:8545", - "--l2-consensus.nodes", - "http://node1:8551,http://node2:8551", - "--l2-consensus.jwt-secret", - "secret1,secret2", - "--datadir", - "/tmp/supervisor_data", - "--dependency-set", - "/path/to/deps.json", - "--rollup-config-paths", - "/configs/rollup-*.json", - ]); - - assert_eq!(cli.supervisor.l1_rpc, "http://localhost:8545"); - assert_eq!( - cli.supervisor.l2_consensus_nodes, - vec!["http://node1:8551".to_string(), "http://node2:8551".to_string()] - ); - assert_eq!( - cli.supervisor.l2_consensus_jwt_secret, - vec!["secret1".to_string(), "secret2".to_string()] - ); - assert_eq!(cli.supervisor.datadir, PathBuf::from("/tmp/supervisor_data")); - assert_eq!(cli.supervisor.datadir_sync_endpoint, None); - assert_eq!(cli.supervisor.dependency_set, PathBuf::from("/path/to/deps.json")); - assert_eq!(cli.supervisor.rollup_config_paths, PathBuf::from("/configs/rollup-*.json")); - assert_eq!(cli.supervisor.rpc_address, IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0))); - assert_eq!(cli.supervisor.rpc_port, 8545); - } - - #[test] - fn test_supervisor_args_from_cli_all_args() { - let cli = TestCli::parse_from([ - "test_app", - "--l1-rpc", - "http://l1.example.com", - "--l2-consensus.nodes", - "http://consensus1", - "--l2-consensus.jwt-secret", - "jwt_secret_value", - "--datadir", - "/data", - "--datadir.sync-endpoint", - "http://sync.example.com", - "--dependency-set", - "/path/to/deps.json", - "--rollup-config-paths", - "/configs/rollup-*.json", - "--rpc.addr", - "192.168.1.100", - "--rpc.port", - "9001", - ]); - - assert_eq!(cli.supervisor.l1_rpc, "http://l1.example.com"); - assert_eq!(cli.supervisor.l2_consensus_nodes, vec!["http://consensus1".to_string()]); - assert_eq!(cli.supervisor.l2_consensus_jwt_secret, vec!["jwt_secret_value".to_string()]); - assert_eq!(cli.supervisor.datadir, PathBuf::from("/data")); - assert_eq!( - cli.supervisor.datadir_sync_endpoint, - Some("http://sync.example.com".to_string()) - ); - assert_eq!(cli.supervisor.dependency_set, PathBuf::from("/path/to/deps.json")); - assert_eq!(cli.supervisor.rollup_config_paths, PathBuf::from("/configs/rollup-*.json")); - assert_eq!(cli.supervisor.rpc_address, IpAddr::V4(Ipv4Addr::new(192, 168, 1, 100))); - assert_eq!(cli.supervisor.rpc_port, 9001); - } - - #[tokio::test] - #[allow(clippy::zero_sized_map_values)] - async fn test_init_dependency_set_success() -> anyhow::Result<()> { - let mut temp_file = NamedTempFile::new()?; - let json_content = r#" - { - "dependencies": { - "1": { - "chainIndex": 10, - "activationTime": 1678886400, - "historyMinTime": 1609459200 - }, - "2": { - "chainIndex": 20, - "activationTime": 1678886401, - "historyMinTime": 1609459201 - } - }, - "overrideMessageExpiryWindow": 3600 - } - "#; - temp_file.write_all(json_content.as_bytes())?; - - let args = SupervisorArgs { - l1_rpc: "dummy".to_string(), - l2_consensus_nodes: vec![], - l2_consensus_jwt_secret: vec![], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: temp_file.path().to_path_buf(), - rollup_config_paths: PathBuf::from("dummy/rollup_config_*.json"), - rpc_address: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - rpc_port: 8545, - enable_admin_api: false, - }; - - let result = args.init_dependency_set().await; - assert!(result.is_ok(), "init_dependency_set should succeed"); - - let loaded_depset = result.unwrap(); - let mut expected_dependencies = HashMap::default(); - expected_dependencies.insert(1, ChainDependency {}); - expected_dependencies.insert(2, ChainDependency {}); - - let expected_depset = DependencySet { - dependencies: expected_dependencies, - override_message_expiry_window: Some(3600), - }; - - assert_eq!(loaded_depset, expected_depset); - Ok(()) - } - - #[tokio::test] - async fn test_init_dependency_set_file_not_found() -> anyhow::Result<()> { - let args = SupervisorArgs { - l1_rpc: "dummy".to_string(), - l2_consensus_nodes: vec![], - l2_consensus_jwt_secret: vec![], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("/path/to/non_existent_file.json"), - rollup_config_paths: PathBuf::from("dummy/rollup_config_*.json"), - rpc_address: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - rpc_port: 8545, - enable_admin_api: false, - }; - - let result = args.init_dependency_set().await; - let err = result.expect_err("init_dependency_set should have failed due to file not found"); - let io_error = err.downcast_ref::(); - assert!(io_error.is_some(), "Error should be an std::io::Error, but was: {err:?}"); - assert_eq!(io_error.unwrap().kind(), std::io::ErrorKind::NotFound); - Ok(()) - } - - #[tokio::test] - async fn test_init_dependency_set_invalid_json() -> anyhow::Result<()> { - let mut temp_file = NamedTempFile::new()?; - temp_file.write_all(b"{ \"invalid_json\": ")?; // Malformed JSON - - let args = SupervisorArgs { - l1_rpc: "dummy".to_string(), - l2_consensus_nodes: vec![], - l2_consensus_jwt_secret: vec![], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: temp_file.path().to_path_buf(), - rollup_config_paths: PathBuf::from("dummy/rollup_config_*.json"), - rpc_address: IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), - rpc_port: 8545, - enable_admin_api: false, - }; - - let result = args.init_dependency_set().await; - let err = result.expect_err("init_dependency_set should have failed due to invalid JSON"); - let json_error = err.downcast_ref::(); - assert!(json_error.is_some(), "Error should be a serde_json::Error, but was: {err:?}"); - Ok(()) - } - - #[tokio::test] - async fn test_get_rollup_configs_success() -> anyhow::Result<()> { - let dir = tempdir()?; - let config_path = dir.path().join("rollup-1.json"); - let mut file = File::create(&config_path)?; - let json_content = r#" - { - "genesis": { - "l1": { - "hash": "0x6c61a74b17fc1b6dc8ae9a2197e83871a20f57d1adf9c9acbf920bc44225744b", - "number": 18 - }, - "l2": { - "hash": "0xcde85e0f40c4c9921d40f2d4ee1a8794e76d615044a1176ae71fff0ee8cb2f40", - "number": 0 - }, - "l2_time": 1748932228, - "system_config": { - "batcherAddr": "0xd3f2c5afb2d76f5579f326b0cd7da5f5a4126c35", - "overhead": "0x0000000000000000000000000000000000000000000000000000000000000000", - "scalar": "0x010000000000000000000000000000000000000000000000000c5fc500000558", - "gasLimit": 60000000, - "eip1559Params": "0x0000000000000000", - "operatorFeeParams": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - }, - "block_time": 2, - "max_sequencer_drift": 600, - "seq_window_size": 3600, - "channel_timeout": 300, - "l1_chain_id": 3151908, - "l2_chain_id": 2151908, - "regolith_time": 0, - "canyon_time": 0, - "delta_time": 0, - "ecotone_time": 0, - "fjord_time": 0, - "granite_time": 0, - "holocene_time": 0, - "isthmus_time": 0, - "interop_time": 0, - "batch_inbox_address": "0x00a4fe4c6aaa0729d7699c387e7f281dd64afa2a", - "deposit_contract_address": "0xea0a3ca38bca6eb69cb7463b3fda7aa1616f9e09", - "l1_system_config_address": "0x67872e274ce2d6f2dc937196f8ec9f7af82fae7e", - "protocol_versions_address": "0xb74bb6ae1a1804d283d17e95620da9b9b0e6e0da", - "chain_op_config": { - "eip1559Elasticity": 6, - "eip1559Denominator": 50, - "eip1559DenominatorCanyon": 250 - } - } - "#; - file.write_all(json_content.as_bytes())?; - - let args = SupervisorArgs { - l1_rpc: "dummy".to_string(), - l2_consensus_nodes: vec![], - l2_consensus_jwt_secret: vec![], - datadir: PathBuf::from("dummy".to_string()), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: dir.path().join("rollup-*.json"), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - - let configs = args.get_rollup_configs().await?; - assert_eq!(configs.len(), 1); - assert_eq!(configs[0].l2_chain_id, 2151908); - Ok(()) - } - - #[tokio::test] - async fn test_get_rollup_configs_no_files() -> anyhow::Result<()> { - let dir = tempdir()?; - let args = SupervisorArgs { - l1_rpc: "dummy".to_string(), - l2_consensus_nodes: vec![], - l2_consensus_jwt_secret: vec![], - datadir: PathBuf::from("dummy".to_string()), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: dir.path().join("rollup-*.json"), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - - let configs = args.get_rollup_configs().await?; - assert!(configs.is_empty()); - Ok(()) - } - - #[tokio::test] - async fn test_get_rollup_configs_invalid_json() -> anyhow::Result<()> { - let dir = tempdir()?; - let config_path = dir.path().join("rollup-1.json"); - let mut file = File::create(&config_path)?; - file.write_all(b"{ invalid json }")?; - - let args = SupervisorArgs { - l1_rpc: "dummy".to_string(), - l2_consensus_nodes: vec![], - l2_consensus_jwt_secret: vec![], - datadir: PathBuf::from("dummy".to_string()), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: dir.path().join("rollup-*.json"), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - - let result = args.get_rollup_configs().await; - assert!(result.is_err(), "Should fail on invalid JSON"); - Ok(()) - } - - #[tokio::test] - async fn test_get_rollup_configs_empty_pattern() -> anyhow::Result<()> { - let args = SupervisorArgs { - l1_rpc: "dummy".to_string(), - l2_consensus_nodes: vec![], - l2_consensus_jwt_secret: vec![], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: PathBuf::from(""), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - let result = args.get_rollup_configs().await; - assert!(result.is_err()); - assert!(result.unwrap_err().to_string().contains("pattern is empty"),); - Ok(()) - } - - #[test] - fn test_init_managed_nodes_config_no_jwt_secret() { - let args = SupervisorArgs { - l1_rpc: "dummy".to_string(), - l2_consensus_nodes: vec!["http://node1:8551".to_string()], - l2_consensus_jwt_secret: vec![], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: PathBuf::from("dummy/rollup_config_*.json"), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - let result = args.init_managed_nodes_config(); - assert!(result.is_err()); - assert!(result.unwrap_err().to_string().contains("No JWT secrets provided"),); - } - - #[test] - fn test_init_managed_nodes_config_success_single() { - let dir = tempdir().unwrap(); - let secret_path = dir.path().join("s1"); - std::fs::write( - &secret_path, - "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - ) - .unwrap(); - - let args = SupervisorArgs { - l1_rpc: "dummy".into(), - l2_consensus_nodes: vec!["http://node1:8551".into()], - l2_consensus_jwt_secret: vec![secret_path.to_string_lossy().into()], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: PathBuf::from(""), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - - let res = args.init_managed_nodes_config(); - assert!(res.is_ok()); - let cfgs = res.unwrap(); - assert_eq!(cfgs.len(), 1); - assert_eq!(cfgs[0].url, "http://node1:8551"); - } - - #[test] - fn test_init_managed_nodes_config_multiple_nodes_single_secret_uses_default() { - let dir = tempdir().unwrap(); - let secret_path = dir.path().join("s1"); - std::fs::write( - &secret_path, - "0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", - ) - .unwrap(); - - let args = SupervisorArgs { - l1_rpc: "dummy".into(), - l2_consensus_nodes: vec!["http://n1:8551".into(), "http://n2:8551".into()], - l2_consensus_jwt_secret: vec![secret_path.to_string_lossy().into()], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: PathBuf::from(""), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - - let res = args.init_managed_nodes_config().unwrap(); - assert_eq!(res.len(), 2); - assert_eq!(res[0].url, "http://n1:8551"); - assert_eq!(res[1].url, "http://n2:8551"); - } - - #[test] - fn test_init_managed_nodes_config_missing_secret_file() { - let args = SupervisorArgs { - l1_rpc: "dummy".into(), - l2_consensus_nodes: vec!["http://node1:8551".into()], - l2_consensus_jwt_secret: vec!["/non/existent/path".into()], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: PathBuf::from(""), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - - let err = args.init_managed_nodes_config().unwrap_err(); - assert!(err.to_string().contains("Failed to read JWT secret")); - } - - #[test] - fn test_init_managed_nodes_config_invalid_jwt_hex() { - let dir = tempdir().unwrap(); - let secret_path = dir.path().join("bad"); - std::fs::write(&secret_path, "not-hex").unwrap(); - - let args = SupervisorArgs { - l1_rpc: "dummy".into(), - l2_consensus_nodes: vec!["http://node1:8551".into()], - l2_consensus_jwt_secret: vec![secret_path.to_string_lossy().into()], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: PathBuf::from(""), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - - let err = args.init_managed_nodes_config().unwrap_err(); - assert!(err.to_string().contains("Failed to parse JWT secret")); - } - - #[test] - fn test_init_managed_nodes_config_empty_nodes_returns_empty() { - let args = SupervisorArgs { - l1_rpc: "dummy".to_string(), - // clap/env may produce [""] — ensure it's filtered to empty - l2_consensus_nodes: vec![String::new()], - l2_consensus_jwt_secret: vec![], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: PathBuf::from("dummy.json"), - rollup_config_paths: PathBuf::from(""), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - - let res = args.init_managed_nodes_config(); - assert!(res.is_ok()); - assert!(res.unwrap().is_empty()); - } - - #[tokio::test] - async fn test_init_config_success() -> anyhow::Result<()> { - use std::{fs::File as StdFile, io::Write}; - - // Create a temp dependency set file - let mut dep_file = NamedTempFile::new()?; - let dep_json = r#" - { - "dependencies": { - "1": { - "chainIndex": 10, - "activationTime": 1678886400, - "historyMinTime": 1609459200 - } - }, - "overrideMessageExpiryWindow": 3600 - } - "#; - dep_file.write_all(dep_json.as_bytes())?; - - // Create a temp dir and rollup config file - let rollup_dir = tempdir()?; - let rollup_path = rollup_dir.path().join("rollup-1.json"); - let mut rollup_file = StdFile::create(&rollup_path)?; - let rollup_json = r#" - { - "genesis": { - "l1": { - "hash": "0x6c61a74b17fc1b6dc8ae9a2197e83871a20f57d1adf9c9acbf920bc44225744b", - "number": 18 - }, - "l2": { - "hash": "0xcde85e0f40c4c9921d40f2d4ee1a8794e76d615044a1176ae71fff0ee8cb2f40", - "number": 0 - }, - "l2_time": 1748932228, - "system_config": { - "batcherAddr": "0xd3f2c5afb2d76f5579f326b0cd7da5f5a4126c35", - "overhead": "0x0000000000000000000000000000000000000000000000000000000000000000", - "scalar": "0x010000000000000000000000000000000000000000000000000c5fc500000558", - "gasLimit": 60000000, - "eip1559Params": "0x0000000000000000", - "operatorFeeParams": "0x0000000000000000000000000000000000000000000000000000000000000000" - } - }, - "block_time": 2, - "max_sequencer_drift": 600, - "seq_window_size": 3600, - "channel_timeout": 300, - "l1_chain_id": 3151908, - "l2_chain_id": 2151908, - "regolith_time": 0, - "canyon_time": 0, - "delta_time": 0, - "ecotone_time": 0, - "fjord_time": 0, - "granite_time": 0, - "holocene_time": 0, - "isthmus_time": 0, - "interop_time": 0, - "batch_inbox_address": "0x00a4fe4c6aaa0729d7699c387e7f281dd64afa2a", - "deposit_contract_address": "0xea0a3ca38bca6eb69cb7463b3fda7aa1616f9e09", - "l1_system_config_address": "0x67872e274ce2d6f2dc937196f8ec9f7af82fae7e", - "protocol_versions_address": "0xb74bb6ae1a1804d283d17e95620da9b9b0e6e0da", - "chain_op_config": { - "eip1559Elasticity": 6, - "eip1559Denominator": 50, - "eip1559DenominatorCanyon": 250 - } - } - "#; - rollup_file.write_all(rollup_json.as_bytes())?; - - let args = SupervisorArgs { - l1_rpc: "http://localhost:8545".to_string(), - l2_consensus_nodes: vec!["http://node1:8551".to_string()], - l2_consensus_jwt_secret: vec!["secret1".to_string()], - datadir: PathBuf::from("dummy"), - datadir_sync_endpoint: None, - dependency_set: dep_file.path().to_path_buf(), - rollup_config_paths: rollup_dir.path().join("rollup-*.json"), - rpc_address: "127.0.0.1".parse().unwrap(), - rpc_port: 8545, - enable_admin_api: false, - }; - - // This will fail at the L1 RPC call unless you mock RootProvider. - // So, for a pure unit test, you may want to mock or skip the L1 RPC part. - let result = args.init_config().await; - assert!(result.is_err() || result.is_ok(), "Should not panic"); - - // If you want to check up to the point before the L1 RPC, you can test init_dependency_set - // and get_rollup_configs separately. - - Ok(()) - } -} diff --git a/rust/kona/bin/supervisor/src/main.rs b/rust/kona/bin/supervisor/src/main.rs deleted file mode 100644 index 36ef944f27438..0000000000000 --- a/rust/kona/bin/supervisor/src/main.rs +++ /dev/null @@ -1,24 +0,0 @@ -#![doc = include_str!("../README.md")] -#![doc( - html_logo_url = "https://raw.githubusercontent.com/ethereum-optimism/optimism/develop/rust/kona/assets/square.png", - html_favicon_url = "https://raw.githubusercontent.com/ethereum-optimism/optimism/develop/rust/kona/assets/favicon.ico", - issue_tracker_base_url = "https://github.com/ethereum-optimism/optimism/issues/" -)] -#![cfg_attr(docsrs, feature(doc_cfg))] - -pub mod cli; -pub mod flags; -pub mod metrics; -pub(crate) mod version; - -use clap::Parser; - -fn main() { - kona_cli::sigsegv_handler::install(); - kona_cli::backtrace::enable(); - - if let Err(err) = cli::Cli::parse().run() { - eprintln!("Error: {err:?}"); - std::process::exit(1); - } -} diff --git a/rust/kona/bin/supervisor/src/metrics/mod.rs b/rust/kona/bin/supervisor/src/metrics/mod.rs deleted file mode 100644 index 4257338cec0e8..0000000000000 --- a/rust/kona/bin/supervisor/src/metrics/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Metrics module - -mod version; -pub use version::VersionInfo; diff --git a/rust/kona/bin/supervisor/src/metrics/version.rs b/rust/kona/bin/supervisor/src/metrics/version.rs deleted file mode 100644 index 13fa2c273cf2a..0000000000000 --- a/rust/kona/bin/supervisor/src/metrics/version.rs +++ /dev/null @@ -1,59 +0,0 @@ -//! [`VersionInfo`] metrics -//! -//! Derived from [`reth-node-core`'s type][reth-version-info] -//! -//! [reth-version-info]: https://github.com/paradigmxyz/reth/blob/805fb1012cd1601c3b4fe9e8ca2d97c96f61355b/crates/node/metrics/src/version.rs#L6 - -use metrics::gauge; - -/// Contains version information for the application and allows for exposing the contained -/// information as a prometheus metric. -#[derive(Debug, Clone)] -pub struct VersionInfo { - /// The version of the application. - pub version: &'static str, - /// The build timestamp of the application. - pub build_timestamp: &'static str, - /// The cargo features enabled for the build. - pub cargo_features: &'static str, - /// The Git SHA of the build. - pub git_sha: &'static str, - /// The target triple for the build. - pub target_triple: &'static str, - /// The build profile (e.g., debug or release). - pub build_profile: &'static str, -} - -impl VersionInfo { - /// Creates a new instance of [`VersionInfo`] from the constants defined in [`crate::version`] - /// at compile time. - pub const fn from_build() -> Self { - Self { - version: crate::version::CARGO_PKG_VERSION, - build_timestamp: crate::version::VERGEN_BUILD_TIMESTAMP, - cargo_features: crate::version::VERGEN_CARGO_FEATURES, - git_sha: crate::version::VERGEN_GIT_SHA, - target_triple: crate::version::VERGEN_CARGO_TARGET_TRIPLE, - build_profile: crate::version::BUILD_PROFILE_NAME, - } - } - - /// Exposes kona-supervisor's version information over prometheus. - pub fn register_version_metrics(&self) { - // If no features are enabled, the string will be empty, and the metric will not be - // reported. Report "none" if the string is empty. - let features = if self.cargo_features.is_empty() { "none" } else { self.cargo_features }; - - let labels: [(&str, &str); 6] = [ - ("version", self.version), - ("build_timestamp", self.build_timestamp), - ("cargo_features", features), - ("git_sha", self.git_sha), - ("target_triple", self.target_triple), - ("build_profile", self.build_profile), - ]; - - let gauge = gauge!("kona_supervisor_info", &labels); - gauge.set(1); - } -} diff --git a/rust/kona/bin/supervisor/src/version.rs b/rust/kona/bin/supervisor/src/version.rs deleted file mode 100644 index 6fa1e8b73d0f2..0000000000000 --- a/rust/kona/bin/supervisor/src/version.rs +++ /dev/null @@ -1,19 +0,0 @@ -//! Version information for kona-supervisor. - -/// The latest version from Cargo.toml. -pub(crate) const CARGO_PKG_VERSION: &str = env!("CARGO_PKG_VERSION"); - -/// The 8 character short SHA of the latest commit. -pub(crate) const VERGEN_GIT_SHA: &str = env!("VERGEN_GIT_SHA_SHORT"); - -/// The build timestamp. -pub(crate) const VERGEN_BUILD_TIMESTAMP: &str = env!("VERGEN_BUILD_TIMESTAMP"); - -/// The target triple. -pub(crate) const VERGEN_CARGO_TARGET_TRIPLE: &str = env!("VERGEN_CARGO_TARGET_TRIPLE"); - -/// The build features. -pub(crate) const VERGEN_CARGO_FEATURES: &str = env!("VERGEN_CARGO_FEATURES"); - -/// The build profile name. -pub(crate) const BUILD_PROFILE_NAME: &str = env!("KONA_SUPERVISOR_BUILD_PROFILE"); diff --git a/rust/kona/crates/supervisor/core/Cargo.toml b/rust/kona/crates/supervisor/core/Cargo.toml deleted file mode 100644 index 020aeba6f946c..0000000000000 --- a/rust/kona/crates/supervisor/core/Cargo.toml +++ /dev/null @@ -1,65 +0,0 @@ -[package] -name = "kona-supervisor-core" -version = "0.1.0" - -edition.workspace = true -license.workspace = true -rust-version.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -keywords.workspace = true -categories.workspace = true -exclude.workspace = true - -[dependencies] -# workspace -kona-interop.workspace = true -kona-protocol.workspace = true -kona-supervisor-types.workspace = true -kona-supervisor-rpc = { workspace = true, features = ["jsonrpsee", "client"] } -kona-supervisor-storage.workspace = true -kona-supervisor-metrics.workspace = true -kona-genesis.workspace = true - -# alloy -alloy-eips.workspace = true -alloy-network.workspace = true -alloy-provider = { workspace = true, features = ["reqwest"] } -alloy-primitives = { workspace = true, features = ["map", "rlp", "serde"] } -alloy-rpc-types-engine = { workspace = true, features = ["jwt", "serde"] } -alloy-rpc-client.workspace = true -alloy-rpc-types-eth.workspace = true -alloy-consensus.workspace = true - -# op-alloy -op-alloy-rpc-types = { workspace = true, features = ["jsonrpsee"] } -op-alloy-consensus.workspace = true - -# jsonrpsee -jsonrpsee = { workspace = true, features = [ "macros", "server", "client", "ws-client" ] } - -# general -async-trait.workspace = true -serde.workspace = true -serde_json.workspace = true -tracing.workspace = true -thiserror.workspace = true -tokio = { workspace = true, features = ["sync", "macros"] } -tokio-util.workspace = true -auto_impl.workspace = true -futures = { workspace = true } -derive_more = { workspace = true, features = ["try_from"] } - -# `metrics` feature -metrics = { workspace = true } - -[dev-dependencies] -serde_json.workspace = true -tempfile.workspace = true -alloy-transport.workspace = true -kona-interop = {workspace = true, features = ["std", "test-utils"]} -mockall.workspace = true - -[lints] -workspace = true diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/chain.rs b/rust/kona/crates/supervisor/core/src/chain_processor/chain.rs deleted file mode 100644 index e1238ab83f1e9..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/chain.rs +++ /dev/null @@ -1,147 +0,0 @@ -use super::handlers::{ - CrossSafeHandler, CrossUnsafeHandler, EventHandler, FinalizedHandler, InvalidationHandler, - OriginHandler, ReplacementHandler, SafeBlockHandler, UnsafeBlockHandler, -}; -use crate::{ - LogIndexer, ProcessorState, - event::ChainEvent, - syncnode::{BlockProvider, ManagedNodeCommand}, -}; -use alloy_primitives::ChainId; -use kona_interop::InteropValidator; -use kona_supervisor_storage::{ - DerivationStorage, HeadRefStorageWriter, LogStorage, StorageRewinder, -}; -use std::{fmt::Debug, sync::Arc}; -use tokio::sync::mpsc; -use tracing::debug; - -/// Represents a task that processes chain events from a managed node. -/// It listens for events emitted by the managed node and handles them accordingly. -#[derive(Debug)] -pub struct ChainProcessor { - chain_id: ChainId, - metrics_enabled: Option, - - // state - state: ProcessorState, - - // Handlers for different types of chain events. - unsafe_handler: UnsafeBlockHandler, - safe_handler: SafeBlockHandler, - origin_handler: OriginHandler, - invalidation_handler: InvalidationHandler, - replacement_handler: ReplacementHandler, - finalized_handler: FinalizedHandler, - cross_unsafe_handler: CrossUnsafeHandler, - cross_safe_handler: CrossSafeHandler, -} - -impl ChainProcessor -where - P: BlockProvider + 'static, - V: InteropValidator + 'static, - W: LogStorage + DerivationStorage + HeadRefStorageWriter + StorageRewinder + 'static, -{ - /// Creates a new [`ChainProcessor`]. - pub fn new( - validator: Arc, - chain_id: ChainId, - log_indexer: Arc>, - db_provider: Arc, - managed_node_sender: mpsc::Sender, - ) -> Self { - let unsafe_handler = UnsafeBlockHandler::new( - chain_id, - validator.clone(), - db_provider.clone(), - log_indexer.clone(), - ); - - let safe_handler = SafeBlockHandler::new( - chain_id, - managed_node_sender.clone(), - db_provider.clone(), - validator, - log_indexer.clone(), - ); - - let origin_handler = - OriginHandler::new(chain_id, managed_node_sender.clone(), db_provider.clone()); - - let invalidation_handler = - InvalidationHandler::new(chain_id, managed_node_sender.clone(), db_provider.clone()); - - let replacement_handler = - ReplacementHandler::new(chain_id, log_indexer, db_provider.clone()); - - let finalized_handler = - FinalizedHandler::new(chain_id, managed_node_sender.clone(), db_provider); - let cross_unsafe_handler = CrossUnsafeHandler::new(chain_id, managed_node_sender.clone()); - let cross_safe_handler = CrossSafeHandler::new(chain_id, managed_node_sender); - - Self { - chain_id, - metrics_enabled: None, - - state: ProcessorState::new(), - - // Handlers for different types of chain events. - unsafe_handler, - safe_handler, - origin_handler, - invalidation_handler, - replacement_handler, - finalized_handler, - cross_unsafe_handler, - cross_safe_handler, - } - } - - /// Enables metrics on the database environment. - pub fn with_metrics(mut self) -> Self { - self.metrics_enabled = Some(true); - super::Metrics::init(self.chain_id); - self - } - - /// Handles a chain event by delegating it to the appropriate handler. - pub async fn handle_event(&mut self, event: ChainEvent) { - let result = match event { - ChainEvent::UnsafeBlock { block } => { - self.unsafe_handler.handle(block, &mut self.state).await - } - ChainEvent::DerivedBlock { derived_ref_pair } => { - self.safe_handler.handle(derived_ref_pair, &mut self.state).await - } - ChainEvent::DerivationOriginUpdate { origin } => { - self.origin_handler.handle(origin, &mut self.state).await - } - ChainEvent::InvalidateBlock { block } => { - self.invalidation_handler.handle(block, &mut self.state).await - } - ChainEvent::BlockReplaced { replacement } => { - self.replacement_handler.handle(replacement, &mut self.state).await - } - ChainEvent::FinalizedSourceUpdate { finalized_source_block } => { - self.finalized_handler.handle(finalized_source_block, &mut self.state).await - } - ChainEvent::CrossUnsafeUpdate { block } => { - self.cross_unsafe_handler.handle(block, &mut self.state).await - } - ChainEvent::CrossSafeUpdate { derived_ref_pair } => { - self.cross_safe_handler.handle(derived_ref_pair, &mut self.state).await - } - }; - - if let Err(err) = result { - debug!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - %err, - ?event, - "Failed to process event" - ); - } - } -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/error.rs b/rust/kona/crates/supervisor/core/src/chain_processor/error.rs deleted file mode 100644 index 31ef4feb931e5..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/error.rs +++ /dev/null @@ -1,19 +0,0 @@ -use crate::logindexer::LogIndexerError; -use kona_supervisor_storage::StorageError; -use thiserror::Error; - -/// Errors that may occur while processing chains in the supervisor core. -#[derive(Debug, Error, PartialEq, Eq)] -pub enum ChainProcessorError { - /// Represents an error that occurred while interacting with the storage layer. - #[error(transparent)] - StorageError(#[from] StorageError), - - /// Represents an error that occurred while indexing logs. - #[error(transparent)] - LogIndexerError(#[from] LogIndexerError), - - /// Represents an error that occurred while sending an event to the channel. - #[error("failed to send event to channel: {0}")] - ChannelSendFailed(String), -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/cross_chain.rs b/rust/kona/crates/supervisor/core/src/chain_processor/handlers/cross_chain.rs deleted file mode 100644 index 888b341a72707..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/cross_chain.rs +++ /dev/null @@ -1,272 +0,0 @@ -use super::EventHandler; -use crate::{ - ChainProcessorError, ProcessorState, chain_processor::Metrics, syncnode::ManagedNodeCommand, -}; -use alloy_primitives::ChainId; -use async_trait::async_trait; -use derive_more::Constructor; -use kona_interop::DerivedRefPair; -use kona_protocol::BlockInfo; -use tokio::sync::mpsc; -use tracing::{trace, warn}; - -/// Handler for cross unsafe blocks. -/// This handler processes cross unsafe blocks by updating the managed node. -#[derive(Debug, Constructor)] -pub struct CrossUnsafeHandler { - chain_id: ChainId, - managed_node_sender: mpsc::Sender, -} - -#[async_trait] -impl EventHandler for CrossUnsafeHandler { - async fn handle( - &self, - block: BlockInfo, - _state: &mut ProcessorState, - ) -> Result { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = block.number, - "Processing cross unsafe block" - ); - - let result = self.inner_handle(block).await; - Metrics::record_block_processing(self.chain_id, Metrics::BLOCK_TYPE_CROSS_UNSAFE, &result); - - result - } -} - -impl CrossUnsafeHandler { - async fn inner_handle(&self, block: BlockInfo) -> Result { - self.managed_node_sender - .send(ManagedNodeCommand::UpdateCrossUnsafe { block_id: block.id() }) - .await - .map_err(|err| { - warn!( - target: "supervisor::chain_processor::managed_node", - chain_id = self.chain_id, - %block, - %err, - "Failed to send cross unsafe block update" - ); - ChainProcessorError::ChannelSendFailed(err.to_string()) - })?; - Ok(block) - } -} - -/// Handler for cross safe blocks. -/// This handler processes cross safe blocks by updating the managed node. -#[derive(Debug, Constructor)] -pub struct CrossSafeHandler { - chain_id: ChainId, - managed_node_sender: mpsc::Sender, -} - -#[async_trait] -impl EventHandler for CrossSafeHandler { - async fn handle( - &self, - derived_ref_pair: DerivedRefPair, - _state: &mut ProcessorState, - ) -> Result { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = derived_ref_pair.derived.number, - "Processing cross safe block" - ); - - let result = self.inner_handle(derived_ref_pair).await; - Metrics::record_block_processing(self.chain_id, Metrics::BLOCK_TYPE_CROSS_SAFE, &result); - result - } -} - -impl CrossSafeHandler { - async fn inner_handle( - &self, - derived_ref_pair: DerivedRefPair, - ) -> Result { - self.managed_node_sender - .send(ManagedNodeCommand::UpdateCrossSafe { - source_block_id: derived_ref_pair.source.id(), - derived_block_id: derived_ref_pair.derived.id(), - }) - .await - .map_err(|err| { - warn!( - target: "supervisor::chain_processor::managed_node", - chain_id = self.chain_id, - %derived_ref_pair, - %err, - "Failed to send cross safe block update" - ); - ChainProcessorError::ChannelSendFailed(err.to_string()) - })?; - Ok(derived_ref_pair.derived) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::syncnode::{ - BlockProvider, ManagedNodeController, ManagedNodeDataProvider, ManagedNodeError, - }; - use alloy_primitives::B256; - use alloy_rpc_types_eth::BlockNumHash; - use async_trait::async_trait; - use kona_interop::DerivedRefPair; - use kona_protocol::BlockInfo; - use kona_supervisor_types::{BlockSeal, OutputV0, Receipts}; - use mockall::mock; - - mock!( - #[derive(Debug)] - pub Node {} - - #[async_trait] - impl BlockProvider for Node { - async fn fetch_receipts(&self, _block_hash: B256) -> Result; - async fn block_by_number(&self, _number: u64) -> Result; - } - - #[async_trait] - impl ManagedNodeDataProvider for Node { - async fn output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn pending_output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn l2_block_ref_by_timestamp( - &self, - _timestamp: u64, - ) -> Result; - } - - #[async_trait] - impl ManagedNodeController for Node { - async fn update_finalized( - &self, - _finalized_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn reset(&self) -> Result<(), ManagedNodeError>; - - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ManagedNodeError>; - } - ); - - #[tokio::test] - async fn test_handle_cross_unsafe_update_triggers() { - use crate::syncnode::ManagedNodeCommand; - - let (tx, mut rx) = mpsc::channel(8); - let chain_id = 1; - let handler = CrossUnsafeHandler::new(chain_id, tx); - - let block = - BlockInfo { number: 42, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - let mut state = ProcessorState::new(); - - // Call the handler - let result = handler.handle(block, &mut state).await; - assert!(result.is_ok()); - - // The handler should send the correct command - if let Some(ManagedNodeCommand::UpdateCrossUnsafe { block_id }) = rx.recv().await { - assert_eq!(block_id, block.id()); - } else { - panic!("Expected UpdateCrossUnsafe command"); - } - } - - #[tokio::test] - async fn test_handle_cross_unsafe_update_error() { - let (tx, rx) = mpsc::channel(8); - let chain_id = 1; - let handler = CrossUnsafeHandler::new(chain_id, tx); - - // Drop the receiver to simulate a send error - drop(rx); - - let block = - BlockInfo { number: 42, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - let mut state = ProcessorState::new(); - - // Call the handler, which should now error - let result = handler.handle(block, &mut state).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_handle_cross_safe_update_triggers() { - use crate::syncnode::ManagedNodeCommand; - - let (tx, mut rx) = mpsc::channel(8); - let chain_id = 1; - let handler = CrossSafeHandler::new(chain_id, tx); - - let derived = - BlockInfo { number: 42, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - let source = - BlockInfo { number: 1, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - let derived_ref_pair = DerivedRefPair { source, derived }; - let mut state = ProcessorState::new(); - - // Call the handler - let result = handler.handle(derived_ref_pair, &mut state).await; - assert!(result.is_ok()); - - // The handler should send the correct command - if let Some(ManagedNodeCommand::UpdateCrossSafe { source_block_id, derived_block_id }) = - rx.recv().await - { - assert_eq!(source_block_id, source.id()); - assert_eq!(derived_block_id, derived.id()); - } else { - panic!("Expected UpdateCrossSafe command"); - } - } - - #[tokio::test] - async fn test_handle_cross_safe_update_error() { - let (tx, rx) = mpsc::channel(8); - let chain_id = 1; - let handler = CrossSafeHandler::new(chain_id, tx); - - // Drop the receiver to simulate a send error - drop(rx); - - let derived = - BlockInfo { number: 42, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - let source = - BlockInfo { number: 1, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - let derived_ref_pair = DerivedRefPair { source, derived }; - let mut state = ProcessorState::new(); - - // Call the handler, which should now error - let result = handler.handle(derived_ref_pair, &mut state).await; - assert!(result.is_err()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/finalized.rs b/rust/kona/crates/supervisor/core/src/chain_processor/handlers/finalized.rs deleted file mode 100644 index c081c5d026433..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/finalized.rs +++ /dev/null @@ -1,283 +0,0 @@ -use super::EventHandler; -use crate::{ - ChainProcessorError, ProcessorState, chain_processor::Metrics, syncnode::ManagedNodeCommand, -}; -use alloy_primitives::ChainId; -use async_trait::async_trait; -use derive_more::Constructor; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::HeadRefStorageWriter; -use std::sync::Arc; -use tokio::sync::mpsc; -use tracing::{trace, warn}; - -/// Handler for finalized block updates. -/// This handler processes finalized block updates by updating the managed node and state manager. -#[derive(Debug, Constructor)] -pub struct FinalizedHandler { - chain_id: ChainId, - managed_node_sender: mpsc::Sender, - db_provider: Arc, -} - -#[async_trait] -impl EventHandler for FinalizedHandler -where - W: HeadRefStorageWriter + Send + Sync + 'static, -{ - async fn handle( - &self, - finalized_source_block: BlockInfo, - _state: &mut ProcessorState, - ) -> Result { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = finalized_source_block.number, - "Processing finalized L1 update" - ); - - let result = self.inner_handle(finalized_source_block).await; - Metrics::record_block_processing(self.chain_id, Metrics::BLOCK_TYPE_FINALIZED, &result); - - result - } -} - -impl FinalizedHandler -where - W: HeadRefStorageWriter + Send + Sync + 'static, -{ - async fn inner_handle( - &self, - finalized_source_block: BlockInfo, - ) -> Result { - let finalized_derived_block = self - .db_provider - .update_finalized_using_source(finalized_source_block) - .inspect_err(|err| { - warn!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - %finalized_source_block, - %err, - "Failed to update finalized block using source" - ); - })?; - - self.managed_node_sender - .send(ManagedNodeCommand::UpdateFinalized { block_id: finalized_derived_block.id() }) - .await - .map_err(|err| { - warn!( - target: "supervisor::chain_processor::managed_node", - chain_id = self.chain_id, - %finalized_derived_block, - %err, - "Failed to send finalized block update" - ); - ChainProcessorError::ChannelSendFailed(err.to_string()) - })?; - Ok(finalized_derived_block) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::syncnode::{ - BlockProvider, ManagedNodeController, ManagedNodeDataProvider, ManagedNodeError, - }; - use alloy_primitives::B256; - use alloy_rpc_types_eth::BlockNumHash; - use async_trait::async_trait; - use kona_interop::DerivedRefPair; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::{HeadRefStorageWriter, StorageError}; - use kona_supervisor_types::{BlockSeal, OutputV0, Receipts}; - use mockall::mock; - - mock!( - #[derive(Debug)] - pub Node {} - - #[async_trait] - impl BlockProvider for Node { - async fn fetch_receipts(&self, _block_hash: B256) -> Result; - async fn block_by_number(&self, _number: u64) -> Result; - } - - #[async_trait] - impl ManagedNodeDataProvider for Node { - async fn output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn pending_output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn l2_block_ref_by_timestamp( - &self, - _timestamp: u64, - ) -> Result; - } - - #[async_trait] - impl ManagedNodeController for Node { - async fn update_finalized( - &self, - _finalized_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn reset(&self) -> Result<(), ManagedNodeError>; - - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ManagedNodeError>; - } - ); - - mock!( - #[derive(Debug)] - pub Db {} - - impl HeadRefStorageWriter for Db { - fn update_finalized_using_source( - &self, - block_info: BlockInfo, - ) -> Result; - - fn update_current_cross_unsafe( - &self, - block: &BlockInfo, - ) -> Result<(), StorageError>; - - fn update_current_cross_safe( - &self, - block: &BlockInfo, - ) -> Result; - } - ); - - #[tokio::test] - async fn test_handle_finalized_source_update_triggers() { - use crate::syncnode::ManagedNodeCommand; - - let mut mocknode = MockNode::new(); - let mut mockdb = MockDb::new(); - let mut state = ProcessorState::new(); - - let finalized_source_block = - BlockInfo { number: 99, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 1234578 }; - - // The finalized_derived_block returned by update_finalized_using_source - let finalized_derived_block = - BlockInfo { number: 5, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 1234578 }; - - // Expect update_finalized_using_source to be called with finalized_source_block - mockdb.expect_update_finalized_using_source().returning(move |block_info: BlockInfo| { - assert_eq!(block_info, finalized_source_block); - Ok(finalized_derived_block) - }); - - // Expect update_finalized to be called with the derived block's id - let finalized_derived_block_id = finalized_derived_block.id(); - mocknode.expect_update_finalized().returning(move |block_id| { - assert_eq!(block_id, finalized_derived_block_id); - Ok(()) - }); - - let writer = Arc::new(mockdb); - - // Set up the channel and spawn a task to handle the command - let (tx, mut rx) = mpsc::channel(8); - - let handler = FinalizedHandler::new( - 1, // chain_id - tx, writer, - ); - let result = handler.handle(finalized_source_block, &mut state).await; - assert!(result.is_ok()); - - // The handler should send the correct command - if let Some(ManagedNodeCommand::UpdateFinalized { block_id }) = rx.recv().await { - assert_eq!(block_id, finalized_derived_block.id()); - } else { - panic!("Expected UpdateFinalized command"); - } - } - - #[tokio::test] - async fn test_handle_finalized_source_update_db_error() { - let mut mocknode = MockNode::new(); - let mut mockdb = MockDb::new(); - let mut state = ProcessorState::new(); - - let finalized_source_block = - BlockInfo { number: 99, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 1234578 }; - - // DB returns error - mockdb - .expect_update_finalized_using_source() - .returning(|_block_info: BlockInfo| Err(StorageError::DatabaseNotInitialised)); - - // Managed node's update_finalized should NOT be called - mocknode.expect_update_finalized().never(); - - let writer = Arc::new(mockdb); - let (tx, mut rx) = mpsc::channel(8); - - let handler = FinalizedHandler::new( - 1, // chain_id - tx, writer, - ); - let result = handler.handle(finalized_source_block, &mut state).await; - assert!(result.is_err()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_finalized_source_update_managed_node_error() { - let mut mockdb = MockDb::new(); - let mut state = ProcessorState::new(); - - let finalized_source_block = - BlockInfo { number: 99, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 1234578 }; - - let finalized_derived_block = - BlockInfo { number: 5, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 1234578 }; - - // DB returns the derived block as usual - mockdb.expect_update_finalized_using_source().returning(move |block_info: BlockInfo| { - assert_eq!(block_info, finalized_source_block); - Ok(finalized_derived_block) - }); - - let writer = Arc::new(mockdb); - - // Set up the channel and immediately drop the receiver to simulate a send error - let (tx, rx) = mpsc::channel(8); - drop(rx); - - let handler = FinalizedHandler::new( - 1, // chain_id - tx, writer, - ); - let result = handler.handle(finalized_source_block, &mut state).await; - assert!(result.is_err()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/invalidation.rs b/rust/kona/crates/supervisor/core/src/chain_processor/handlers/invalidation.rs deleted file mode 100644 index 093f46e42529f..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/invalidation.rs +++ /dev/null @@ -1,620 +0,0 @@ -use super::EventHandler; -use crate::{ - ChainProcessorError, LogIndexer, ProcessorState, - chain_processor::metrics::Metrics, - syncnode::{BlockProvider, ManagedNodeCommand}, -}; -use alloy_primitives::ChainId; -use async_trait::async_trait; -use derive_more::Constructor; -use kona_interop::{BlockReplacement, DerivedRefPair}; -use kona_protocol::BlockInfo; -use kona_supervisor_metrics::observe_metrics_for_result_async; -use kona_supervisor_storage::{DerivationStorage, LogStorage, StorageRewinder}; -use kona_supervisor_types::BlockSeal; -use std::sync::Arc; -use tokio::sync::mpsc; -use tracing::{debug, error, trace, warn}; - -/// Handler for block invalidation events. -/// This handler processes block invalidation by rewinding the state and updating the managed node. -#[derive(Debug, Constructor)] -pub struct InvalidationHandler { - chain_id: ChainId, - managed_node_sender: mpsc::Sender, - db_provider: Arc, -} - -#[async_trait] -impl EventHandler for InvalidationHandler -where - W: DerivationStorage + StorageRewinder + Send + Sync + 'static, -{ - async fn handle( - &self, - block: BlockInfo, - state: &mut ProcessorState, - ) -> Result { - observe_metrics_for_result_async!( - Metrics::BLOCK_INVALIDATION_SUCCESS_TOTAL, - Metrics::BLOCK_INVALIDATION_ERROR_TOTAL, - Metrics::BLOCK_INVALIDATION_LATENCY_SECONDS, - Metrics::BLOCK_INVALIDATION_METHOD_INVALIDATE_BLOCK, - async { - self.inner_handle(block, state).await - }, - "chain_id" => self.chain_id.to_string() - ) - } -} - -impl InvalidationHandler -where - W: DerivationStorage + StorageRewinder + Send + Sync + 'static, -{ - async fn inner_handle( - &self, - block: BlockInfo, - state: &mut ProcessorState, - ) -> Result { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = block.number, - "Invalidating block" - ); - - if state.is_invalidated() { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = block.number, - "Invalidated block already set, skipping" - ); - return Ok(block); - } - - let source_block = self.db_provider.derived_to_source(block.id()).inspect_err(|err| { - warn!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - %block, - %err, - "Failed to get source block for invalidation" - ); - })?; - - self.db_provider.rewind(&block.id()).inspect_err(|err| { - warn!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - %block, - %err, - "Failed to rewind state for invalidation" - ); - })?; - - let block_seal = BlockSeal::new(block.hash, block.number, block.timestamp); - self.managed_node_sender - .send(ManagedNodeCommand::InvalidateBlock { seal: block_seal }) - .await - .map_err(|err| { - warn!( - target: "supervisor::chain_processor::managed_node", - chain_id = self.chain_id, - %block, - %err, - "Failed to send invalidate block command to managed node" - ); - ChainProcessorError::ChannelSendFailed(err.to_string()) - })?; - - state.set_invalidated(DerivedRefPair { source: source_block, derived: block }); - Ok(block) - } -} - -/// Handler for block replacement events. -/// This handler processes block replacements by resyncing the log and derivation storage. -#[derive(Debug, Constructor)] -pub struct ReplacementHandler { - chain_id: ChainId, - log_indexer: Arc>, - db_provider: Arc, -} - -#[async_trait] -impl EventHandler for ReplacementHandler -where - P: BlockProvider + 'static, - W: LogStorage + DerivationStorage + 'static, -{ - async fn handle( - &self, - replacement: BlockReplacement, - state: &mut ProcessorState, - ) -> Result { - observe_metrics_for_result_async!( - Metrics::BLOCK_REPLACEMENT_SUCCESS_TOTAL, - Metrics::BLOCK_REPLACEMENT_ERROR_TOTAL, - Metrics::BLOCK_REPLACEMENT_LATENCY_SECONDS, - Metrics::BLOCK_REPLACEMENT_METHOD_REPLACE_BLOCK, - async { - self.inner_handle(replacement, state).await - }, - "chain_id" => self.chain_id.to_string() - ) - } -} - -impl ReplacementHandler -where - P: BlockProvider + 'static, - W: LogStorage + DerivationStorage + 'static, -{ - async fn inner_handle( - &self, - replacement: BlockReplacement, - state: &mut ProcessorState, - ) -> Result { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - %replacement, - "Handling block replacement" - ); - - let invalidated_ref_pair = match state.get_invalidated() { - Some(block) => block, - None => { - debug!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - %replacement, - "No invalidated block set, skipping replacement" - ); - return Ok(replacement.replacement); - } - }; - - if invalidated_ref_pair.derived.hash != replacement.invalidated { - debug!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - invalidated_block = %invalidated_ref_pair.derived, - replacement_block = %replacement.replacement, - "Invalidated block hash does not match replacement, skipping" - ); - return Ok(replacement.replacement); - } - - let derived_ref_pair = DerivedRefPair { - source: invalidated_ref_pair.source, - derived: replacement.replacement, - }; - - self.retry_with_resync_derived_block(derived_ref_pair).await?; - state.clear_invalidated(); - Ok(replacement.replacement) - } - - async fn retry_with_resync_derived_block( - &self, - derived_ref_pair: DerivedRefPair, - ) -> Result<(), ChainProcessorError> { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - derived_block_number = derived_ref_pair.derived.number, - "Retrying with resync of derived block" - ); - - self.log_indexer.process_and_store_logs(&derived_ref_pair.derived).await.inspect_err( - |err| { - error!( - target: "supervisor::chain_processor::log_indexer", - chain_id = self.chain_id, - %derived_ref_pair, - %err, - "Failed to process and store logs for derived block" - ); - }, - )?; - - self.db_provider.save_derived_block(derived_ref_pair).inspect_err(|err| { - error!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - %derived_ref_pair, - %err, - "Failed to save derived block" - ); - })?; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::syncnode::{ - BlockProvider, ManagedNodeController, ManagedNodeDataProvider, ManagedNodeError, - }; - use alloy_primitives::B256; - use alloy_rpc_types_eth::BlockNumHash; - use async_trait::async_trait; - use kona_interop::DerivedRefPair; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::{ - DerivationStorageReader, DerivationStorageWriter, LogStorageReader, LogStorageWriter, - StorageError, - }; - use kona_supervisor_types::{BlockSeal, Log, OutputV0, Receipts}; - use mockall::mock; - - mock!( - #[derive(Debug)] - pub Node {} - - #[async_trait] - impl BlockProvider for Node { - async fn fetch_receipts(&self, _block_hash: B256) -> Result; - async fn block_by_number(&self, _number: u64) -> Result; - } - - #[async_trait] - impl ManagedNodeDataProvider for Node { - async fn output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn pending_output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn l2_block_ref_by_timestamp( - &self, - _timestamp: u64, - ) -> Result; - } - - #[async_trait] - impl ManagedNodeController for Node { - async fn update_finalized( - &self, - _finalized_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn reset(&self) -> Result<(), ManagedNodeError>; - - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ManagedNodeError>; - } - ); - - mock!( - #[derive(Debug)] - pub Db {} - - impl LogStorageWriter for Db { - fn initialise_log_storage( - &self, - block: BlockInfo, - ) -> Result<(), StorageError>; - - fn store_block_logs( - &self, - block: &BlockInfo, - logs: Vec, - ) -> Result<(), StorageError>; - } - - impl LogStorageReader for Db { - fn get_block(&self, block_number: u64) -> Result; - fn get_latest_block(&self) -> Result; - fn get_log(&self,block_number: u64,log_index: u32) -> Result; - fn get_logs(&self, block_number: u64) -> Result, StorageError>; - } - - impl DerivationStorageReader for Db { - fn derived_to_source(&self, derived_block_id: BlockNumHash) -> Result; - fn latest_derived_block_at_source(&self, source_block_id: BlockNumHash) -> Result; - fn latest_derivation_state(&self) -> Result; - fn get_source_block(&self, source_block_number: u64) -> Result; - fn get_activation_block(&self) -> Result; - } - - impl DerivationStorageWriter for Db { - fn initialise_derivation_storage( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError>; - - fn save_derived_block( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError>; - - fn save_source_block( - &self, - source: BlockInfo, - ) -> Result<(), StorageError>; - } - - impl StorageRewinder for Db { - fn rewind_log_storage(&self, to: &BlockNumHash) -> Result<(), StorageError>; - fn rewind(&self, to: &BlockNumHash) -> Result<(), StorageError>; - fn rewind_to_source(&self, to: &BlockNumHash) -> Result, StorageError>; - } - ); - - #[tokio::test] - async fn test_handle_invalidate_block_already_set_skips() { - let mockdb = MockDb::new(); - let (tx, mut rx) = tokio::sync::mpsc::channel(1); - let mut state = ProcessorState::new(); - - let block = BlockInfo::new(B256::from([1u8; 32]), 42, B256::ZERO, 12345); - - // Set up state: invalidated_block is already set - state.set_invalidated(DerivedRefPair { source: block, derived: block }); - - let writer = Arc::new(mockdb); - - let handler = InvalidationHandler::new( - 1, // chain_id - tx, writer, - ); - - let result = handler.handle(block, &mut state).await; - assert!(result.is_ok()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_invalidate_block_derived_to_source_error() { - let mut mockdb = MockDb::new(); - let (tx, mut rx) = tokio::sync::mpsc::channel(1); - let mut state = ProcessorState::new(); - - let block = BlockInfo::new(B256::from([1u8; 32]), 42, B256::ZERO, 12345); - - mockdb.expect_derived_to_source().returning(move |_id| Err(StorageError::FutureData)); - - let writer = Arc::new(mockdb); - - let handler = InvalidationHandler::new( - 1, // chain_id - tx, writer, - ); - - let result = handler.handle(block, &mut state).await; - assert!(matches!(result, Err(ChainProcessorError::StorageError(StorageError::FutureData)))); - - // make sure invalidated_block is not set - assert!(state.get_invalidated().is_none()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_invalidate_block_rewind_error() { - let mut mockdb = MockDb::new(); - let (tx, mut rx) = tokio::sync::mpsc::channel(1); - let mut state = ProcessorState::new(); - - let block = BlockInfo::new(B256::from([1u8; 32]), 42, B256::ZERO, 12345); - - mockdb.expect_derived_to_source().returning(move |_id| Ok(block)); - mockdb.expect_rewind().returning(move |_to| Err(StorageError::DatabaseNotInitialised)); - - let writer = Arc::new(mockdb); - - let handler = InvalidationHandler::new( - 1, // chain_id - tx, writer, - ); - - let result = handler.handle(block, &mut state).await; - assert!(matches!( - result, - Err(ChainProcessorError::StorageError(StorageError::DatabaseNotInitialised)) - )); - - // make sure invalidated_block is not set - assert!(state.get_invalidated().is_none()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_invalidate_block_managed_node_error() { - let mut mockdb = MockDb::new(); - let (tx, rx) = tokio::sync::mpsc::channel(1); - let mut state = ProcessorState::new(); - - let block = BlockInfo::new(B256::from([1u8; 32]), 42, B256::ZERO, 12345); - - mockdb.expect_derived_to_source().returning(move |_id| Ok(block)); - mockdb.expect_rewind().returning(move |_to| Ok(())); - - let writer = Arc::new(mockdb); - - // Drop the receiver to simulate a send error to the managed node - drop(rx); - - let handler = InvalidationHandler::new( - 1, // chain_id - tx, writer, - ); - - let result = handler.handle(block, &mut state).await; - assert!(result.is_err()); - - // make sure invalidated_block is not set - assert!(state.get_invalidated().is_none()); - } - - #[tokio::test] - async fn test_handle_invalidate_block_success_sets_invalidated() { - let mut mockdb = MockDb::new(); - let (tx, mut rx) = tokio::sync::mpsc::channel(1); - let mut state = ProcessorState::new(); - - let derived_block = BlockInfo::new(B256::from([1u8; 32]), 42, B256::ZERO, 12345); - let source_block = BlockInfo::new(B256::from([2u8; 32]), 41, B256::ZERO, 12344); - - mockdb.expect_derived_to_source().returning(move |_id| Ok(source_block)); - mockdb.expect_rewind().returning(move |_to| Ok(())); - - let writer = Arc::new(mockdb); - - // Spawn a background task to receive and check the command - let derived_block_clone = derived_block; - - let handler = InvalidationHandler::new( - 1, // chain_id - tx, writer, - ); - - let result = handler.handle(derived_block, &mut state).await; - assert!(result.is_ok()); - - // make sure invalidated_block is set - let pair = state.get_invalidated().unwrap(); - assert_eq!(pair.derived, derived_block); - assert_eq!(pair.source, source_block); - - if let Some(ManagedNodeCommand::InvalidateBlock { seal }) = rx.recv().await { - assert_eq!(seal.hash, derived_block_clone.hash); - assert_eq!(seal.number, derived_block_clone.number); - assert_eq!(seal.timestamp, derived_block_clone.timestamp); - } else { - panic!("Expected InvalidateBlock command"); - } - } - - #[tokio::test] - async fn test_handle_block_replacement_no_invalidated_block() { - let mockdb = MockDb::new(); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - let replacement = BlockReplacement { - invalidated: B256::from([1u8; 32]), - replacement: BlockInfo::new(B256::from([2u8; 32]), 43, B256::ZERO, 12346), - }; - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = ReplacementHandler::new( - 1, // chain_id - log_indexer, - writer, - ); - - let result = handler.handle(replacement, &mut state).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_block_replacement_invalidated_hash_mismatch() { - let mockdb = MockDb::new(); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - let invalidated_block = BlockInfo::new(B256::from([3u8; 32]), 42, B256::ZERO, 12345); - let replacement = BlockReplacement { - invalidated: B256::from([1u8; 32]), // does not match invalidated_block.hash - replacement: BlockInfo::new(B256::from([2u8; 32]), 43, B256::ZERO, 12346), - }; - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - state.set_invalidated(DerivedRefPair { - source: invalidated_block, - derived: invalidated_block, - }); - - let handler = ReplacementHandler::new( - 1, // chain_id - log_indexer, - writer, - ); - - let result = handler.handle(replacement, &mut state).await; - assert!(result.is_ok()); - - // invalidated_block should remain set - let invalidated = state.get_invalidated(); - assert!(invalidated.is_some()); - } - - #[tokio::test] - async fn test_handle_block_replacement_success() { - let mut mockdb = MockDb::new(); - let mut mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - let source_block = BlockInfo::new(B256::from([1u8; 32]), 45, B256::ZERO, 12345); - let invalidated_block = BlockInfo::new(B256::from([1u8; 32]), 42, B256::ZERO, 12345); - let replacement_block = BlockInfo::new(B256::from([2u8; 32]), 42, B256::ZERO, 12346); - - mockdb.expect_save_derived_block().returning(move |_pair| Ok(())); - mockdb.expect_store_block_logs().returning(move |_block, _logs| Ok(())); - - mocknode.expect_fetch_receipts().returning(move |_block_hash| { - assert_eq!(_block_hash, replacement_block.hash); - Ok(Receipts::default()) - }); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - state.set_invalidated(DerivedRefPair { source: source_block, derived: invalidated_block }); - - let handler = ReplacementHandler::new( - 1, // chain_id - log_indexer, - writer, - ); - - let result = handler - .handle( - BlockReplacement { - invalidated: invalidated_block.hash, - replacement: replacement_block, - }, - &mut state, - ) - .await; - assert!(result.is_ok()); - - // invalidated_block should be cleared - assert!(state.get_invalidated().is_none()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/mod.rs b/rust/kona/crates/supervisor/core/src/chain_processor/handlers/mod.rs deleted file mode 100644 index 41b41d0fa5316..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/mod.rs +++ /dev/null @@ -1,31 +0,0 @@ -//! This module contains various event handlers for processing different types of chain events. -mod cross_chain; -mod finalized; -mod invalidation; -mod origin; -mod safe_block; -mod unsafe_block; - -pub use cross_chain::{CrossSafeHandler, CrossUnsafeHandler}; -pub use finalized::FinalizedHandler; -pub use invalidation::{InvalidationHandler, ReplacementHandler}; -pub use origin::OriginHandler; -pub use safe_block::SafeBlockHandler; -pub use unsafe_block::UnsafeBlockHandler; - -use crate::{ChainProcessorError, ProcessorState}; -use async_trait::async_trait; -use kona_protocol::BlockInfo; - -/// [`EventHandler`] trait defines the interface for handling different types of events in the chain -/// processor. Each handler will implement this trait to process specific events like block updates, -/// invalidations, etc. -#[async_trait] -pub trait EventHandler { - /// Handle the event with the given state. - async fn handle( - &self, - event: E, - state: &mut ProcessorState, - ) -> Result; -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/origin.rs b/rust/kona/crates/supervisor/core/src/chain_processor/handlers/origin.rs deleted file mode 100644 index b7c4e503e53a2..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/origin.rs +++ /dev/null @@ -1,271 +0,0 @@ -use super::EventHandler; -use crate::{ChainProcessorError, ProcessorState, syncnode::ManagedNodeCommand}; -use alloy_primitives::ChainId; -use async_trait::async_trait; -use derive_more::Constructor; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::{DerivationStorageWriter, StorageError}; -use std::sync::Arc; -use tokio::sync::mpsc; -use tracing::{debug, error, trace, warn}; - -/// Handler for origin updates in the chain. -#[derive(Debug, Constructor)] -pub struct OriginHandler { - chain_id: ChainId, - managed_node_sender: mpsc::Sender, - db_provider: Arc, -} - -#[async_trait] -impl EventHandler for OriginHandler -where - W: DerivationStorageWriter + Send + Sync + 'static, -{ - async fn handle( - &self, - origin: BlockInfo, - state: &mut ProcessorState, - ) -> Result { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - %origin, - "Processing derivation origin update" - ); - - if state.is_invalidated() { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - %origin, - "Invalidated block set, skipping derivation origin update" - ); - return Ok(origin); - } - - match self.db_provider.save_source_block(origin) { - Ok(_) => Ok(origin), - Err(StorageError::BlockOutOfOrder) => { - debug!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - %origin, - "Block out of order detected, resetting managed node" - ); - - self.managed_node_sender.send(ManagedNodeCommand::Reset {}).await.map_err( - |err| { - warn!( - target: "supervisor::chain_processor::managed_node", - chain_id = self.chain_id, - %origin, - %err, - "Failed to send reset command to managed node" - ); - ChainProcessorError::ChannelSendFailed(err.to_string()) - }, - )?; - Ok(origin) - } - Err(err) => { - error!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - %origin, - %err, - "Failed to save source block during derivation origin update" - ); - Err(err.into()) - } - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::syncnode::{ - BlockProvider, ManagedNodeController, ManagedNodeDataProvider, ManagedNodeError, - }; - use alloy_primitives::B256; - use alloy_rpc_types_eth::BlockNumHash; - use async_trait::async_trait; - use kona_interop::DerivedRefPair; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::{DerivationStorageWriter, StorageError}; - use kona_supervisor_types::{BlockSeal, OutputV0, Receipts}; - use mockall::mock; - - mock!( - #[derive(Debug)] - pub Node {} - - #[async_trait] - impl BlockProvider for Node { - async fn fetch_receipts(&self, _block_hash: B256) -> Result; - async fn block_by_number(&self, _number: u64) -> Result; - } - - #[async_trait] - impl ManagedNodeDataProvider for Node { - async fn output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn pending_output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn l2_block_ref_by_timestamp( - &self, - _timestamp: u64, - ) -> Result; - } - - #[async_trait] - impl ManagedNodeController for Node { - async fn update_finalized( - &self, - _finalized_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn reset(&self) -> Result<(), ManagedNodeError>; - - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ManagedNodeError>; - } - ); - - mock!( - #[derive(Debug)] - pub Db {} - - impl DerivationStorageWriter for Db { - fn initialise_derivation_storage( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError>; - - fn save_derived_block( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError>; - - fn save_source_block( - &self, - source: BlockInfo, - ) -> Result<(), StorageError>; - } - ); - - #[tokio::test] - async fn test_handle_derivation_origin_update_triggers() { - let mut mockdb = MockDb::new(); - let (tx, mut rx) = mpsc::channel(1); - let mut state = ProcessorState::new(); - - let origin = - BlockInfo { number: 42, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - - let origin_clone = origin; - mockdb.expect_save_source_block().returning(move |block_info: BlockInfo| { - assert_eq!(block_info, origin_clone); - Ok(()) - }); - - let writer = Arc::new(mockdb); - - let handler = OriginHandler::new( - 1, // chain_id - tx, writer, - ); - - let result = handler.handle(origin, &mut state).await; - assert!(result.is_ok()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_derivation_origin_update_block_out_of_order_triggers_reset() { - let mut mockdb = MockDb::new(); - let (tx, mut rx) = mpsc::channel(1); - let mut state = ProcessorState::new(); - - let origin = - BlockInfo { number: 42, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - - mockdb.expect_save_source_block().returning(|_| Err(StorageError::BlockOutOfOrder)); - - let writer = Arc::new(mockdb); - - let handler = OriginHandler::new(1, tx, writer); - - let result = handler.handle(origin, &mut state).await; - assert!(result.is_ok()); - - // The handler should send the reset command - if rx.recv().await == Some(ManagedNodeCommand::Reset {}) { - // Command received successfully - } else { - panic!("Expected Reset command"); - } - } - - #[tokio::test] - async fn test_handle_derivation_origin_update_reset_fails() { - let mut mockdb = MockDb::new(); - let (tx, rx) = mpsc::channel(1); - let mut state = ProcessorState::new(); - - let origin = - BlockInfo { number: 42, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - - mockdb.expect_save_source_block().returning(|_| Err(StorageError::BlockOutOfOrder)); - - let writer = Arc::new(mockdb); - - drop(rx); // Simulate a send error by dropping the receiver - - let handler = OriginHandler::new(1, tx, writer); - - let result = handler.handle(origin, &mut state).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_handle_derivation_origin_update_other_storage_error() { - let mut mockdb = MockDb::new(); - let (tx, mut rx) = mpsc::channel(1); - let mut state = ProcessorState::new(); - - let origin = - BlockInfo { number: 42, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 123456 }; - - mockdb.expect_save_source_block().returning(|_| Err(StorageError::DatabaseNotInitialised)); - - let writer = Arc::new(mockdb); - - let handler = OriginHandler::new(1, tx, writer); - - let result = handler.handle(origin, &mut state).await; - assert!(result.is_err()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/safe_block.rs b/rust/kona/crates/supervisor/core/src/chain_processor/handlers/safe_block.rs deleted file mode 100644 index ed210d03c4078..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/safe_block.rs +++ /dev/null @@ -1,1026 +0,0 @@ -use super::EventHandler; -use crate::{ - ChainProcessorError, LogIndexer, ProcessorState, - chain_processor::Metrics, - syncnode::{BlockProvider, ManagedNodeCommand}, -}; -use alloy_primitives::ChainId; -use async_trait::async_trait; -use derive_more::Constructor; -use kona_interop::{DerivedRefPair, InteropValidator}; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::{DerivationStorage, LogStorage, StorageError, StorageRewinder}; -use std::sync::Arc; -use tokio::sync::mpsc; -use tracing::{debug, error, info, trace, warn}; - -/// Handler for safe blocks. -#[derive(Debug, Constructor)] -pub struct SafeBlockHandler { - chain_id: ChainId, - managed_node_sender: mpsc::Sender, - db_provider: Arc, - validator: Arc, - log_indexer: Arc>, -} - -#[async_trait] -impl EventHandler for SafeBlockHandler -where - P: BlockProvider + 'static, - V: InteropValidator + 'static, - W: LogStorage + DerivationStorage + StorageRewinder + 'static, -{ - async fn handle( - &self, - derived_ref_pair: DerivedRefPair, - state: &mut ProcessorState, - ) -> Result { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - %derived_ref_pair, - "Processing local safe derived block pair" - ); - - if state.is_invalidated() { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = derived_ref_pair.derived.number, - "Invalidated block already set, skipping safe event processing" - ); - return Ok(derived_ref_pair.derived); - } - - let result = self.inner_handle(derived_ref_pair).await; - Metrics::record_block_processing(self.chain_id, Metrics::BLOCK_TYPE_LOCAL_SAFE, &result); - - result - } -} - -impl SafeBlockHandler -where - P: BlockProvider + 'static, - V: InteropValidator + 'static, - W: LogStorage + DerivationStorage + StorageRewinder + 'static, -{ - async fn inner_handle( - &self, - derived_ref_pair: DerivedRefPair, - ) -> Result { - if self.validator.is_post_interop(self.chain_id, derived_ref_pair.derived.timestamp) { - self.process_safe_derived_block(derived_ref_pair).await?; - return Ok(derived_ref_pair.derived); - } - - if self.validator.is_interop_activation_block(self.chain_id, derived_ref_pair.derived) { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = derived_ref_pair.derived.number, - "Initialising derivation storage for interop activation block" - ); - - self.db_provider.initialise_derivation_storage(derived_ref_pair).inspect_err( - |err| { - error!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - %err, - "Failed to initialise derivation storage for interop activation block" - ); - }, - )?; - return Ok(derived_ref_pair.derived); - } - - Ok(derived_ref_pair.derived) - } - - async fn process_safe_derived_block( - &self, - derived_ref_pair: DerivedRefPair, - ) -> Result<(), ChainProcessorError> { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = derived_ref_pair.derived.number, - "Processing safe derived block" - ); - - match self.db_provider.save_derived_block(derived_ref_pair) { - Ok(_) => Ok(()), - Err(StorageError::BlockOutOfOrder) => { - debug!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - block_number = derived_ref_pair.derived.number, - "Block out of order detected, resetting managed node" - ); - - self.managed_node_sender.send(ManagedNodeCommand::Reset {}).await.map_err( - |err| { - warn!( - target: "supervisor::chain_processor::managed_node", - chain_id = self.chain_id, - %err, - "Failed to send reset command to managed node" - ); - ChainProcessorError::ChannelSendFailed(err.to_string()) - }, - )?; - Ok(()) - } - Err(StorageError::ReorgRequired) => { - info!( - target: "supervisor::chain_processor", - chain = self.chain_id, - derived_block = %derived_ref_pair.derived, - "Local derivation conflict detected — rewinding" - ); - - self.rewind_log_storage(&derived_ref_pair.derived).await?; - self.retry_with_resync_derived_block(derived_ref_pair).await?; - Ok(()) - } - Err(StorageError::FutureData) => { - debug!( - target: "supervisor::chain_processor", - chain = self.chain_id, - derived_block = %derived_ref_pair.derived, - "Future data detected — retrying with resync" - ); - - self.retry_with_resync_derived_block(derived_ref_pair).await - } - Err(err) => { - error!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = derived_ref_pair.derived.number, - %err, - "Failed to save derived block pair" - ); - Err(err.into()) - } - } - } - - async fn rewind_log_storage( - &self, - derived_block: &BlockInfo, - ) -> Result<(), ChainProcessorError> { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = derived_block.number, - "Rewinding log storage for derived block" - ); - - let log_block = self.db_provider.get_block(derived_block.number).inspect_err(|err| { - warn!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - block_number = derived_block.number, - %err, - "Failed to get block for rewinding log storage" - ); - })?; - - self.db_provider.rewind_log_storage(&log_block.id()).inspect_err(|err| { - warn!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - block_number = derived_block.number, - %err, - "Failed to rewind log storage for derived block" - ); - })?; - Ok(()) - } - - async fn retry_with_resync_derived_block( - &self, - derived_ref_pair: DerivedRefPair, - ) -> Result<(), ChainProcessorError> { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - derived_block_number = derived_ref_pair.derived.number, - "Retrying with resync of derived block" - ); - - self.log_indexer.process_and_store_logs(&derived_ref_pair.derived).await.inspect_err( - |err| { - error!( - target: "supervisor::chain_processor::log_indexer", - chain_id = self.chain_id, - block_number = derived_ref_pair.derived.number, - %err, - "Error resyncing logs for derived block" - ); - }, - )?; - - self.db_provider.save_derived_block(derived_ref_pair).inspect_err(|err| { - error!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - block_number = derived_ref_pair.derived.number, - %err, - "Error saving derived block after resync" - ); - })?; - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::syncnode::{ - BlockProvider, ManagedNodeController, ManagedNodeDataProvider, ManagedNodeError, - }; - use alloy_primitives::B256; - use alloy_rpc_types_eth::BlockNumHash; - use async_trait::async_trait; - use kona_interop::{DerivedRefPair, InteropValidationError}; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::{ - DerivationStorageReader, DerivationStorageWriter, HeadRefStorageWriter, LogStorageReader, - LogStorageWriter, StorageError, - }; - use kona_supervisor_types::{BlockSeal, Log, OutputV0, Receipts}; - use mockall::mock; - - mock!( - #[derive(Debug)] - pub Node {} - - #[async_trait] - impl ManagedNodeDataProvider for Node { - async fn output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn pending_output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn l2_block_ref_by_timestamp( - &self, - _timestamp: u64, - ) -> Result; - } - - #[async_trait] - impl BlockProvider for Node { - async fn fetch_receipts(&self, _block_hash: B256) -> Result; - async fn block_by_number(&self, _number: u64) -> Result; - } - - #[async_trait] - impl ManagedNodeController for Node { - async fn update_finalized( - &self, - _finalized_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn reset(&self) -> Result<(), ManagedNodeError>; - - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ManagedNodeError>; - } - ); - - mock!( - #[derive(Debug)] - pub Db {} - - impl LogStorageWriter for Db { - fn initialise_log_storage( - &self, - block: BlockInfo, - ) -> Result<(), StorageError>; - - fn store_block_logs( - &self, - block: &BlockInfo, - logs: Vec, - ) -> Result<(), StorageError>; - } - - impl LogStorageReader for Db { - fn get_block(&self, block_number: u64) -> Result; - fn get_latest_block(&self) -> Result; - fn get_log(&self,block_number: u64,log_index: u32) -> Result; - fn get_logs(&self, block_number: u64) -> Result, StorageError>; - } - - impl DerivationStorageReader for Db { - fn derived_to_source(&self, derived_block_id: BlockNumHash) -> Result; - fn latest_derived_block_at_source(&self, source_block_id: BlockNumHash) -> Result; - fn latest_derivation_state(&self) -> Result; - fn get_source_block(&self, source_block_number: u64) -> Result; - fn get_activation_block(&self) -> Result; - } - - impl DerivationStorageWriter for Db { - fn initialise_derivation_storage( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError>; - - fn save_derived_block( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError>; - - fn save_source_block( - &self, - source: BlockInfo, - ) -> Result<(), StorageError>; - } - - impl HeadRefStorageWriter for Db { - fn update_finalized_using_source( - &self, - block_info: BlockInfo, - ) -> Result; - - fn update_current_cross_unsafe( - &self, - block: &BlockInfo, - ) -> Result<(), StorageError>; - - fn update_current_cross_safe( - &self, - block: &BlockInfo, - ) -> Result; - } - - impl StorageRewinder for Db { - fn rewind_log_storage(&self, to: &BlockNumHash) -> Result<(), StorageError>; - fn rewind(&self, to: &BlockNumHash) -> Result<(), StorageError>; - fn rewind_to_source(&self, to: &BlockNumHash) -> Result, StorageError>; - } - ); - - mock! ( - #[derive(Debug)] - pub Validator {} - - impl InteropValidator for Validator { - fn validate_interop_timestamps( - &self, - initiating_chain_id: ChainId, - initiating_timestamp: u64, - executing_chain_id: ChainId, - executing_timestamp: u64, - timeout: Option, - ) -> Result<(), InteropValidationError>; - - fn is_post_interop(&self, chain_id: ChainId, timestamp: u64) -> bool; - - fn is_interop_activation_block(&self, chain_id: ChainId, block: BlockInfo) -> bool; - } - ); - - #[tokio::test] - async fn test_handle_derived_event_skips_if_invalidated() { - let mockdb = MockDb::new(); - let mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - // Simulate invalidated state - state.set_invalidated(DerivedRefPair { - source: BlockInfo { - number: 1, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 2, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - }); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1003, - }, - }; - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new(1, tx, writer, Arc::new(mockvalidator), log_indexer); - - let result = handler.handle(block_pair, &mut state).await; - assert!(result.is_ok()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_derived_event_pre_interop() { - let mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| false); - mockvalidator.expect_is_interop_activation_block().returning(|_, _| false); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 999, - }, - }; - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - - let result = handler.handle(block_pair, &mut state).await; - assert!(result.is_ok()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_derived_event_post_interop() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| true); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1003, - }, - }; - - mockdb.expect_save_derived_block().returning(move |_pair: DerivedRefPair| { - assert_eq!(_pair, block_pair); - Ok(()) - }); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - - let result = handler.handle(block_pair, &mut state).await; - assert!(result.is_ok()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_derived_event_interop_activation() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| false); - mockvalidator.expect_is_interop_activation_block().returning(|_, _| true); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1001, - }, - }; - - mockdb.expect_initialise_derivation_storage().returning(move |_pair: DerivedRefPair| { - assert_eq!(_pair, block_pair); - Ok(()) - }); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - - let result = handler.handle(block_pair, &mut state).await; - assert!(result.is_ok()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_derived_event_block_out_of_order_triggers_reset() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mut mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| true); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }, - }; - - // Simulate BlockOutOfOrder error - mockdb - .expect_save_derived_block() - .returning(move |_pair: DerivedRefPair| Err(StorageError::BlockOutOfOrder)); - - // Expect reset to be called - mocknode.expect_reset().returning(|| Ok(())); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - let result = handler.handle(block_pair, &mut state).await; - assert!(result.is_ok()); - - // Ensure reset command was sent - if let Some(cmd) = rx.recv().await { - assert!(matches!(cmd, ManagedNodeCommand::Reset {})); - } else { - panic!("Expected reset command to be sent"); - } - } - - #[tokio::test] - async fn test_handle_derived_event_block_out_of_order_triggers_reset_error() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, rx) = mpsc::channel(1); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| true); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }, - }; - - // Simulate BlockOutOfOrder error - mockdb - .expect_save_derived_block() - .returning(move |_pair: DerivedRefPair| Err(StorageError::BlockOutOfOrder)); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node), writer.clone())); - - drop(rx); // Simulate a send error by dropping the receiver - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - - let result = handler.handle(block_pair, &mut state).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_handle_derived_event_block_triggers_reorg() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mut mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| true); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }, - }; - - let mut seq = mockall::Sequence::new(); - // Simulate ReorgRequired error - mockdb - .expect_save_derived_block() - .times(1) - .in_sequence(&mut seq) - .returning(move |_pair: DerivedRefPair| Err(StorageError::ReorgRequired)); - - mockdb.expect_get_block().returning(move |num| { - Ok(BlockInfo { - number: num, - hash: B256::random(), // different hash from safe derived block - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }) - }); - - // Expect reorg on log storage - mockdb.expect_rewind_log_storage().returning(|_block_id| Ok(())); - mockdb.expect_store_block_logs().returning(|_block_id, _logs| Ok(())); - mocknode.expect_fetch_receipts().returning(|_receipts| Ok(Receipts::default())); - - mockdb - .expect_save_derived_block() - .times(1) - .in_sequence(&mut seq) - .returning(move |_pair: DerivedRefPair| Ok(())); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - let result = handler.handle(block_pair, &mut state).await; - assert!(result.is_ok()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_derived_event_block_triggers_reorg_block_error() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| true); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }, - }; - - let mut seq = mockall::Sequence::new(); - // Simulate ReorgRequired error - mockdb - .expect_save_derived_block() - .times(1) - .in_sequence(&mut seq) - .returning(move |_pair: DerivedRefPair| Err(StorageError::ReorgRequired)); - - mockdb.expect_get_block().returning(move |_| Err(StorageError::DatabaseNotInitialised)); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - let result = handler.handle(block_pair, &mut state).await.unwrap_err(); - assert!(matches!( - result, - ChainProcessorError::StorageError(StorageError::DatabaseNotInitialised) - )); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_derived_event_block_triggers_reorg_rewind_error() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| true); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }, - }; - - let mut seq = mockall::Sequence::new(); - // Simulate ReorgRequired error - mockdb - .expect_save_derived_block() - .times(1) - .in_sequence(&mut seq) - .returning(move |_pair: DerivedRefPair| Err(StorageError::ReorgRequired)); - - mockdb.expect_get_block().returning(move |num| { - Ok(BlockInfo { - number: num, - hash: B256::random(), // different hash from safe derived block - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }) - }); - - // Expect reorg on log storage - mockdb - .expect_rewind_log_storage() - .returning(|_block_id| Err(StorageError::DatabaseNotInitialised)); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - let result = handler.handle(block_pair, &mut state).await; - assert!(matches!( - result, - Err(ChainProcessorError::StorageError(StorageError::DatabaseNotInitialised)) - )); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_derived_event_block_triggers_resync() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mut mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| true); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }, - }; - - let mut seq = mockall::Sequence::new(); - // Simulate ReorgRequired error - mockdb - .expect_save_derived_block() - .times(1) - .in_sequence(&mut seq) - .returning(move |_pair: DerivedRefPair| Err(StorageError::FutureData)); - - mockdb.expect_get_block().returning(move |num| { - Ok(BlockInfo { - number: num, - hash: B256::random(), // different hash from safe derived block - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }) - }); - - mockdb.expect_store_block_logs().returning(|_block_id, _logs| Ok(())); - - mocknode.expect_fetch_receipts().returning(|_receipts| Ok(Receipts::default())); - - mockdb - .expect_save_derived_block() - .times(1) - .in_sequence(&mut seq) - .returning(move |_pair: DerivedRefPair| Ok(())); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - let result = handler.handle(block_pair, &mut state).await; - assert!(result.is_ok()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_derived_event_other_error() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let (tx, mut rx) = mpsc::channel(1); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| true); - - let block_pair = DerivedRefPair { - source: BlockInfo { - number: 123, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 0, - }, - derived: BlockInfo { - number: 1234, - hash: B256::ZERO, - parent_hash: B256::ZERO, - timestamp: 1003, // post-interop - }, - }; - - // Simulate a different error - mockdb - .expect_save_derived_block() - .returning(move |_pair: DerivedRefPair| Err(StorageError::DatabaseNotInitialised)); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node.clone()), writer.clone())); - - let handler = SafeBlockHandler::new( - 1, // chain_id - tx, - writer, - Arc::new(mockvalidator), - log_indexer, - ); - let result = handler.handle(block_pair, &mut state).await; - assert!(result.is_err()); - - // Ensure no command was sent - assert!(rx.try_recv().is_err()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/unsafe_block.rs b/rust/kona/crates/supervisor/core/src/chain_processor/handlers/unsafe_block.rs deleted file mode 100644 index a9a5501a14780..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/handlers/unsafe_block.rs +++ /dev/null @@ -1,315 +0,0 @@ -use super::EventHandler; -use crate::{ - ChainProcessorError, LogIndexer, ProcessorState, chain_processor::Metrics, - syncnode::BlockProvider, -}; -use alloy_primitives::ChainId; -use async_trait::async_trait; -use derive_more::Constructor; -use kona_interop::InteropValidator; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::LogStorage; -use std::sync::Arc; -use tracing::{error, trace}; - -/// Handler for unsafe blocks. -/// This handler processes unsafe blocks by syncing logs and initializing log storage. -#[derive(Debug, Constructor)] -pub struct UnsafeBlockHandler { - chain_id: ChainId, - validator: Arc, - db_provider: Arc, - log_indexer: Arc>, -} - -#[async_trait] -impl EventHandler for UnsafeBlockHandler -where - P: BlockProvider + 'static, - V: InteropValidator + 'static, - W: LogStorage + 'static, -{ - async fn handle( - &self, - block: BlockInfo, - state: &mut ProcessorState, - ) -> Result { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = block.number, - "Processing unsafe block" - ); - - if state.is_invalidated() { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = block.number, - "Invalidated block already set, skipping unsafe event processing" - ); - return Ok(block); - } - - let result = self.inner_handle(block).await; - Metrics::record_block_processing(self.chain_id, Metrics::BLOCK_TYPE_LOCAL_UNSAFE, &result); - - result - } -} - -impl UnsafeBlockHandler -where - P: BlockProvider + 'static, - V: InteropValidator + 'static, - W: LogStorage + 'static, -{ - async fn inner_handle(&self, block: BlockInfo) -> Result { - if self.validator.is_post_interop(self.chain_id, block.timestamp) { - self.log_indexer.clone().sync_logs(block); - return Ok(block); - } - - if self.validator.is_interop_activation_block(self.chain_id, block) { - trace!( - target: "supervisor::chain_processor", - chain_id = self.chain_id, - block_number = block.number, - "Initialising log storage for interop activation block" - ); - - self.db_provider.initialise_log_storage(block).inspect_err(|err| { - error!( - target: "supervisor::chain_processor::db", - chain_id = self.chain_id, - %block, - %err, - "Failed to initialise log storage for interop activation block" - ); - })?; - return Ok(block); - } - - Ok(block) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - ProcessorState, - syncnode::{BlockProvider, ManagedNodeError}, - }; - use alloy_primitives::B256; - use kona_interop::{DerivedRefPair, InteropValidationError}; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::{LogStorageReader, LogStorageWriter, StorageError}; - use kona_supervisor_types::{Log, Receipts}; - use mockall::mock; - use std::sync::Arc; - - mock!( - #[derive(Debug)] - pub Node {} - - #[async_trait] - impl BlockProvider for Node { - async fn fetch_receipts(&self, _block_hash: B256) -> Result; - async fn block_by_number(&self, _number: u64) -> Result; - } - ); - - mock!( - #[derive(Debug)] - pub Db {} - - impl LogStorageWriter for Db { - fn initialise_log_storage( - &self, - block: BlockInfo, - ) -> Result<(), StorageError>; - - fn store_block_logs( - &self, - block: &BlockInfo, - logs: Vec, - ) -> Result<(), StorageError>; - } - - impl LogStorageReader for Db { - fn get_block(&self, block_number: u64) -> Result; - fn get_latest_block(&self) -> Result; - fn get_log(&self,block_number: u64,log_index: u32) -> Result; - fn get_logs(&self, block_number: u64) -> Result, StorageError>; - } - ); - - mock! ( - #[derive(Debug)] - pub Validator {} - - impl InteropValidator for Validator { - fn validate_interop_timestamps( - &self, - initiating_chain_id: ChainId, - initiating_timestamp: u64, - executing_chain_id: ChainId, - executing_timestamp: u64, - timeout: Option, - ) -> Result<(), InteropValidationError>; - - fn is_post_interop(&self, chain_id: ChainId, timestamp: u64) -> bool; - - fn is_interop_activation_block(&self, chain_id: ChainId, block: BlockInfo) -> bool; - } - ); - - #[tokio::test] - async fn test_handle_unsafe_event_skips_if_invalidated() { - let mockdb = MockDb::new(); - let mockvalidator = MockValidator::new(); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - // Simulate invalidated state - state.set_invalidated(DerivedRefPair { - source: BlockInfo::new(B256::ZERO, 1, B256::ZERO, 0), - derived: BlockInfo::new(B256::ZERO, 2, B256::ZERO, 0), - }); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node), writer.clone())); - - let handler = UnsafeBlockHandler::new(1, Arc::new(mockvalidator), writer, log_indexer); - - let block = BlockInfo::new(B256::ZERO, 123, B256::ZERO, 10); - let result = handler.handle(block, &mut state).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_unsafe_event_pre_interop() { - let mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| false); - mockvalidator.expect_is_interop_activation_block().returning(|_, _| false); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node), writer.clone())); - - let handler = UnsafeBlockHandler::new( - 1, // chain_id - Arc::new(mockvalidator), - writer, - log_indexer, - ); - - // Pre-interop block (timestamp < 1000) - let block = BlockInfo::new(B256::ZERO, 123, B256::ZERO, 10); - - let result = handler.handle(block, &mut state).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_unsafe_event_post_interop() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let mut mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| true); - - // Send unsafe block event - let block = BlockInfo::new(B256::ZERO, 123, B256::ZERO, 1003); - - mockdb.expect_store_block_logs().returning(move |_block, _log| Ok(())); - mocknode.expect_fetch_receipts().returning(move |block_hash| { - assert!(block_hash == block.hash); - Ok(Receipts::default()) - }); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node), writer.clone())); - - let handler = UnsafeBlockHandler::new( - 1, // chain_id - Arc::new(mockvalidator), - writer, - log_indexer, - ); - - let result = handler.handle(block, &mut state).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_unsafe_event_interop_activation() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| false); - mockvalidator.expect_is_interop_activation_block().returning(|_, _| true); - - // Block that triggers interop activation - let block = BlockInfo::new(B256::ZERO, 123, B256::ZERO, 1001); - - mockdb.expect_initialise_log_storage().times(1).returning(move |b| { - assert_eq!(b, block); - Ok(()) - }); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - // Create a mock log indexer - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node), writer.clone())); - - let handler = UnsafeBlockHandler::new( - 1, // chain_id - Arc::new(mockvalidator), - writer, - log_indexer, - ); - - let result = handler.handle(block, &mut state).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_unsafe_event_interop_activation_init_fails() { - let mut mockdb = MockDb::new(); - let mut mockvalidator = MockValidator::new(); - let mocknode = MockNode::new(); - let mut state = ProcessorState::new(); - - mockvalidator.expect_is_post_interop().returning(|_, _| false); - mockvalidator.expect_is_interop_activation_block().returning(|_, _| true); - - let block = BlockInfo::new(B256::ZERO, 123, B256::ZERO, 1001); - - mockdb - .expect_initialise_log_storage() - .times(1) - .returning(move |_b| Err(StorageError::ConflictError)); - - let writer = Arc::new(mockdb); - let managed_node = Arc::new(mocknode); - let log_indexer = Arc::new(LogIndexer::new(1, Some(managed_node), writer.clone())); - - let handler = UnsafeBlockHandler::new(1, Arc::new(mockvalidator), writer, log_indexer); - - let result = handler.handle(block, &mut state).await; - assert!(result.is_err()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/metrics.rs b/rust/kona/crates/supervisor/core/src/chain_processor/metrics.rs deleted file mode 100644 index f4bbcd13b3383..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/metrics.rs +++ /dev/null @@ -1,288 +0,0 @@ -use crate::ChainProcessorError; -use alloy_primitives::ChainId; -use kona_protocol::BlockInfo; -use std::time::SystemTime; -use tracing::error; - -#[derive(Debug)] -pub(crate) struct Metrics; - -impl Metrics { - // --- Metric Names --- - /// Identifier for block processing success. - /// Labels: `chain_id`, `type` - pub(crate) const BLOCK_PROCESSING_SUCCESS_TOTAL: &'static str = - "supervisor_block_processing_success_total"; - - /// Identifier for block processing errors. - /// Labels: `chain_id`, `type` - pub(crate) const BLOCK_PROCESSING_ERROR_TOTAL: &'static str = - "supervisor_block_processing_error_total"; - - /// Identifier for block processing latency. - /// Labels: `chain_id`, `type` - pub(crate) const BLOCK_PROCESSING_LATENCY_SECONDS: &'static str = - "supervisor_block_processing_latency_seconds"; - - pub(crate) const BLOCK_TYPE_LOCAL_UNSAFE: &'static str = "local_unsafe"; - pub(crate) const BLOCK_TYPE_CROSS_UNSAFE: &'static str = "cross_unsafe"; - pub(crate) const BLOCK_TYPE_LOCAL_SAFE: &'static str = "local_safe"; - pub(crate) const BLOCK_TYPE_CROSS_SAFE: &'static str = "cross_safe"; - pub(crate) const BLOCK_TYPE_FINALIZED: &'static str = "finalized"; - - // --- Block Invalidation Metric Names --- - /// Identifier for block invalidation success. - /// Labels: `chain_id` - pub(crate) const BLOCK_INVALIDATION_SUCCESS_TOTAL: &'static str = - "supervisor_block_invalidation_success_total"; - - /// Identifier for block invalidation errors. - /// Labels: `chain_id` - pub(crate) const BLOCK_INVALIDATION_ERROR_TOTAL: &'static str = - "supervisor_block_invalidation_error_total"; - - /// Identifier for block invalidation latency. - /// Labels: `chain_id` - pub(crate) const BLOCK_INVALIDATION_LATENCY_SECONDS: &'static str = - "supervisor_block_invalidation_latency_seconds"; - - pub(crate) const BLOCK_INVALIDATION_METHOD_INVALIDATE_BLOCK: &'static str = "invalidate_block"; - - // --- Block Replacement Metric Names --- - /// Identifier for block replacement success. - /// Labels: `chain_id` - pub(crate) const BLOCK_REPLACEMENT_SUCCESS_TOTAL: &'static str = - "supervisor_block_replacement_success_total"; - - /// Identifier for block replacement errors. - /// Labels: `chain_id` - pub(crate) const BLOCK_REPLACEMENT_ERROR_TOTAL: &'static str = - "supervisor_block_replacement_error_total"; - - /// Identifier for block replacement latency. - /// Labels: `chain_id` - pub(crate) const BLOCK_REPLACEMENT_LATENCY_SECONDS: &'static str = - "supervisor_block_replacement_latency_seconds"; - - pub(crate) const BLOCK_REPLACEMENT_METHOD_REPLACE_BLOCK: &'static str = "replace_block"; - - // --- Safety Head Ref Metric Names --- - /// Identifier for safety head ref. - /// Labels: `chain_id`, `type` - pub(crate) const SAFETY_HEAD_REF_LABELS: &'static str = "supervisor_safety_head_ref_labels"; - - pub(crate) fn init(chain_id: ChainId) { - Self::describe(); - Self::zero(chain_id); - } - - fn describe() { - metrics::describe_counter!( - Self::BLOCK_PROCESSING_SUCCESS_TOTAL, - metrics::Unit::Count, - "Total number of successfully processed blocks in the supervisor", - ); - - metrics::describe_counter!( - Self::BLOCK_PROCESSING_ERROR_TOTAL, - metrics::Unit::Count, - "Total number of errors encountered while processing blocks in the supervisor", - ); - - metrics::describe_histogram!( - Self::BLOCK_PROCESSING_LATENCY_SECONDS, - metrics::Unit::Seconds, - "Latency for processing in the supervisor", - ); - - metrics::describe_counter!( - Self::BLOCK_INVALIDATION_SUCCESS_TOTAL, - metrics::Unit::Count, - "Total number of successfully invalidated blocks in the supervisor", - ); - - metrics::describe_counter!( - Self::BLOCK_INVALIDATION_ERROR_TOTAL, - metrics::Unit::Count, - "Total number of errors encountered while invalidating blocks in the supervisor", - ); - - metrics::describe_histogram!( - Self::BLOCK_INVALIDATION_LATENCY_SECONDS, - metrics::Unit::Seconds, - "Latency for invalidating blocks in the supervisor", - ); - - metrics::describe_counter!( - Self::BLOCK_REPLACEMENT_SUCCESS_TOTAL, - metrics::Unit::Count, - "Total number of successfully replaced blocks in the supervisor", - ); - - metrics::describe_counter!( - Self::BLOCK_REPLACEMENT_ERROR_TOTAL, - metrics::Unit::Count, - "Total number of errors encountered while replacing blocks in the supervisor", - ); - - metrics::describe_histogram!( - Self::BLOCK_REPLACEMENT_LATENCY_SECONDS, - metrics::Unit::Seconds, - "Latency for replacing blocks in the supervisor", - ); - - metrics::describe_gauge!(Self::SAFETY_HEAD_REF_LABELS, "Supervisor safety head ref",); - } - - fn zero_block_processing(chain_id: ChainId, block_type: &'static str) { - metrics::counter!( - Self::BLOCK_PROCESSING_SUCCESS_TOTAL, - "type" => block_type, - "chain_id" => chain_id.to_string() - ) - .increment(0); - - metrics::counter!( - Self::BLOCK_PROCESSING_ERROR_TOTAL, - "type" => block_type, - "chain_id" => chain_id.to_string() - ) - .increment(0); - - metrics::histogram!( - Self::BLOCK_PROCESSING_LATENCY_SECONDS, - "type" => block_type, - "chain_id" => chain_id.to_string() - ) - .record(0.0); - } - - fn zero_safety_head_ref(chain_id: ChainId, head_type: &'static str) { - metrics::gauge!( - Self::SAFETY_HEAD_REF_LABELS, - "type" => head_type, - "chain_id" => chain_id.to_string(), - ) - .set(0.0); - } - - fn zero_block_invalidation(chain_id: ChainId) { - metrics::counter!( - Self::BLOCK_INVALIDATION_SUCCESS_TOTAL, - "method" => Self::BLOCK_INVALIDATION_METHOD_INVALIDATE_BLOCK, - "chain_id" => chain_id.to_string() - ) - .increment(0); - - metrics::counter!( - Self::BLOCK_INVALIDATION_ERROR_TOTAL, - "method" => Self::BLOCK_INVALIDATION_METHOD_INVALIDATE_BLOCK, - "chain_id" => chain_id.to_string() - ) - .increment(0); - - metrics::histogram!( - Self::BLOCK_INVALIDATION_LATENCY_SECONDS, - "method" => Self::BLOCK_INVALIDATION_METHOD_INVALIDATE_BLOCK, - "chain_id" => chain_id.to_string() - ) - .record(0.0); - } - - fn zero_block_replacement(chain_id: ChainId) { - metrics::counter!( - Self::BLOCK_REPLACEMENT_SUCCESS_TOTAL, - "method" => Self::BLOCK_REPLACEMENT_METHOD_REPLACE_BLOCK, - "chain_id" => chain_id.to_string() - ) - .increment(0); - - metrics::counter!( - Self::BLOCK_REPLACEMENT_ERROR_TOTAL, - "method" => Self::BLOCK_REPLACEMENT_METHOD_REPLACE_BLOCK, - "chain_id" => chain_id.to_string() - ) - .increment(0); - - metrics::histogram!( - Self::BLOCK_REPLACEMENT_LATENCY_SECONDS, - "method" => Self::BLOCK_REPLACEMENT_METHOD_REPLACE_BLOCK, - "chain_id" => chain_id.to_string() - ) - .record(0.0); - } - - fn zero(chain_id: ChainId) { - Self::zero_block_processing(chain_id, Self::BLOCK_TYPE_LOCAL_UNSAFE); - Self::zero_block_processing(chain_id, Self::BLOCK_TYPE_CROSS_UNSAFE); - Self::zero_block_processing(chain_id, Self::BLOCK_TYPE_LOCAL_SAFE); - Self::zero_block_processing(chain_id, Self::BLOCK_TYPE_CROSS_SAFE); - Self::zero_block_processing(chain_id, Self::BLOCK_TYPE_FINALIZED); - - Self::zero_block_invalidation(chain_id); - Self::zero_block_replacement(chain_id); - - Self::zero_safety_head_ref(chain_id, Self::BLOCK_TYPE_LOCAL_UNSAFE); - Self::zero_safety_head_ref(chain_id, Self::BLOCK_TYPE_CROSS_UNSAFE); - Self::zero_safety_head_ref(chain_id, Self::BLOCK_TYPE_LOCAL_SAFE); - Self::zero_safety_head_ref(chain_id, Self::BLOCK_TYPE_CROSS_SAFE); - Self::zero_safety_head_ref(chain_id, Self::BLOCK_TYPE_FINALIZED); - } - - /// Records metrics for a block processing operation. - /// Takes the result of the processing and extracts the block info if successful. - pub(crate) fn record_block_processing( - chain_id: ChainId, - block_type: &'static str, - result: &Result, - ) { - match result { - Ok(block) => { - metrics::counter!( - Self::BLOCK_PROCESSING_SUCCESS_TOTAL, - "type" => block_type, - "chain_id" => chain_id.to_string() - ) - .increment(1); - - metrics::gauge!( - Self::SAFETY_HEAD_REF_LABELS, - "type" => block_type, - "chain_id" => chain_id.to_string(), - ) - .set(block.number as f64); - - // Calculate latency - match SystemTime::now().duration_since(std::time::UNIX_EPOCH) { - Ok(duration) => { - let now = duration.as_secs_f64(); - let latency = now - block.timestamp as f64; - - metrics::histogram!( - Self::BLOCK_PROCESSING_LATENCY_SECONDS, - "type" => block_type, - "chain_id" => chain_id.to_string() - ) - .record(latency); - } - Err(err) => { - error!( - target: "chain_processor", - chain_id = chain_id, - %err, - "SystemTime error when recording block processing latency" - ); - } - } - } - Err(_) => { - metrics::counter!( - Self::BLOCK_PROCESSING_ERROR_TOTAL, - "type" => block_type, - "chain_id" => chain_id.to_string() - ) - .increment(1); - } - } - } -} diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/mod.rs b/rust/kona/crates/supervisor/core/src/chain_processor/mod.rs deleted file mode 100644 index 03d77150365ac..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Chain Processor Module -//! This module implements the Chain Processor, which manages the nodes and process events per -//! chain. It provides a structured way to handle tasks, manage chains, and process blocks -//! in a supervisor environment. -mod error; -pub use error::ChainProcessorError; - -mod chain; -pub use chain::ChainProcessor; - -mod metrics; -pub(crate) use metrics::Metrics; - -mod state; -pub use state::ProcessorState; - -pub mod handlers; diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/state/mod.rs b/rust/kona/crates/supervisor/core/src/chain_processor/state/mod.rs deleted file mode 100644 index d82378134ab9a..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/state/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -mod processor; -pub use processor::ProcessorState; diff --git a/rust/kona/crates/supervisor/core/src/chain_processor/state/processor.rs b/rust/kona/crates/supervisor/core/src/chain_processor/state/processor.rs deleted file mode 100644 index db22d93c25de8..0000000000000 --- a/rust/kona/crates/supervisor/core/src/chain_processor/state/processor.rs +++ /dev/null @@ -1,40 +0,0 @@ -use kona_interop::DerivedRefPair; - -/// This module contains the state management for the chain processor. -/// It provides a way to track the invalidated blocks and manage the state of the chain processor -#[derive(Debug, Default)] -pub struct ProcessorState { - invalidated_block: Option, -} - -impl ProcessorState { - /// Creates a new instance of [`ProcessorState`]. - pub fn new() -> Self { - Self::default() - } - - /// Returns `true` if the state is invalidated, otherwise `false`. - pub const fn is_invalidated(&self) -> bool { - self.invalidated_block.is_some() - } - - /// Returns the invalidated block if it exists. - pub const fn get_invalidated(&self) -> Option { - self.invalidated_block - } - - /// Sets the invalidated block to the given pair if it is not already set. - pub const fn set_invalidated(&mut self, pair: DerivedRefPair) -> bool { - if self.invalidated_block.is_some() { - return false; // Already set - } - // Set the invalidated block - self.invalidated_block = Some(pair); - true - } - - /// Clears the invalidated block. - pub const fn clear_invalidated(&mut self) { - self.invalidated_block = None; - } -} diff --git a/rust/kona/crates/supervisor/core/src/config/core_config.rs b/rust/kona/crates/supervisor/core/src/config/core_config.rs deleted file mode 100644 index 0c3e0f02b6d5e..0000000000000 --- a/rust/kona/crates/supervisor/core/src/config/core_config.rs +++ /dev/null @@ -1,165 +0,0 @@ -use super::RollupConfigSet; -use crate::syncnode::ClientConfig; -use alloy_primitives::ChainId; -use derive_more::Constructor; -use kona_interop::{DependencySet, InteropValidationError, InteropValidator}; -use kona_protocol::BlockInfo; -use std::{net::SocketAddr, path::PathBuf}; - -/// Configuration for the Supervisor service. -#[derive(Debug, Clone, Constructor)] -pub struct Config { - /// The URL of the L1 RPC endpoint. - pub l1_rpc: String, - - /// L2 consensus nodes configuration. - pub l2_consensus_nodes_config: Vec, - - /// Directory where the database files are stored. - pub datadir: PathBuf, - - /// The socket address for the RPC server to listen on. - pub rpc_addr: SocketAddr, - - /// Whether to enable the Supervisor Admin API. - pub enable_admin_api: bool, - - /// The loaded dependency set configuration. - pub dependency_set: DependencySet, - - /// The rollup configuration set. - pub rollup_config_set: RollupConfigSet, -} - -impl InteropValidator for Config { - fn validate_interop_timestamps( - &self, - initiating_chain_id: ChainId, - initiating_timestamp: u64, - executing_chain_id: ChainId, - executing_timestamp: u64, - timeout: Option, - ) -> Result<(), InteropValidationError> { - // Interop must be active on both chains at the relevant times - if !self.rollup_config_set.is_post_interop(initiating_chain_id, initiating_timestamp) || - !self.rollup_config_set.is_post_interop(executing_chain_id, executing_timestamp) - { - return Err(InteropValidationError::InteropNotEnabled); - } - - // Executing timestamp must not be earlier than the initiating timestamp - if initiating_timestamp > executing_timestamp { - return Err(InteropValidationError::InvalidTimestampInvariant { - initiating: initiating_timestamp, - executing: executing_timestamp, - }); - } - - // Ensure the message has not expired by the time of execution - let expiry_window = self.dependency_set.get_message_expiry_window(); - let expires_at = initiating_timestamp.saturating_add(expiry_window); - let execution_deadline = executing_timestamp.saturating_add(timeout.unwrap_or(0)); - - if expires_at < execution_deadline { - return Err(InteropValidationError::InvalidInteropTimestamp(executing_timestamp)); - } - - Ok(()) - } - - fn is_post_interop(&self, chain_id: ChainId, timestamp: u64) -> bool { - self.rollup_config_set.is_post_interop(chain_id, timestamp) - } - - fn is_interop_activation_block(&self, chain_id: ChainId, block: BlockInfo) -> bool { - self.rollup_config_set.is_interop_activation_block(chain_id, block) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::config::RollupConfig; - use kona_interop::DependencySet; - use std::{collections::HashMap, net::SocketAddr, path::PathBuf}; - - fn mock_rollup_config_set() -> RollupConfigSet { - let chain1 = - RollupConfig { genesis: Default::default(), block_time: 2, interop_time: Some(100) }; - let chain2 = - RollupConfig { genesis: Default::default(), block_time: 2, interop_time: Some(105) }; - let mut config_set = HashMap::::new(); - config_set.insert(1, chain1); - config_set.insert(2, chain2); - - RollupConfigSet { rollups: config_set } - } - - fn mock_config() -> Config { - Config { - l1_rpc: Default::default(), - l2_consensus_nodes_config: vec![], - datadir: PathBuf::new(), - rpc_addr: SocketAddr::from(([127, 0, 0, 1], 8545)), - enable_admin_api: false, - dependency_set: DependencySet { - dependencies: Default::default(), - override_message_expiry_window: Some(10), - }, - rollup_config_set: mock_rollup_config_set(), - } - } - - #[test] - fn test_valid_case() { - let cfg = mock_config(); - let res = cfg.validate_interop_timestamps(1, 200, 2, 202, None); - assert_eq!(res, Ok(())); - } - #[test] - fn test_valid_with_timeout() { - let cfg = mock_config(); - let res = cfg.validate_interop_timestamps(1, 200, 2, 202, Some(5)); - assert_eq!(res, Ok(())); - } - - #[test] - fn test_chain_id_doesnt_exist() { - let cfg = mock_config(); - let res = cfg.validate_interop_timestamps(1, 200, 3, 215, Some(20)); - assert_eq!(res, Err(InteropValidationError::InteropNotEnabled)); - } - #[test] - fn test_interop_not_enabled_chain1() { - let cfg = mock_config(); - let res = cfg.validate_interop_timestamps(1, 100, 2, 215, Some(20)); - assert_eq!(res, Err(InteropValidationError::InteropNotEnabled)); - } - - #[test] - fn test_invalid_timestamp_invariant() { - let cfg = mock_config(); - let res = cfg.validate_interop_timestamps(1, 200, 2, 195, Some(20)); - assert_eq!( - res, - Err(InteropValidationError::InvalidTimestampInvariant { - initiating: 200, - executing: 195 - }) - ); - } - - #[test] - fn test_expired_message_with_timeout() { - let cfg = mock_config(); - let res = cfg.validate_interop_timestamps(1, 200, 2, 250, Some(20)); - assert_eq!(res, Err(InteropValidationError::InvalidInteropTimestamp(250))); - } - - #[test] - fn test_expired_message_without_timeout() { - let cfg = mock_config(); - let res = cfg.validate_interop_timestamps(1, 200, 2, 215, None); - assert_eq!(res, Err(InteropValidationError::InvalidInteropTimestamp(215))); - } -} diff --git a/rust/kona/crates/supervisor/core/src/config/mod.rs b/rust/kona/crates/supervisor/core/src/config/mod.rs deleted file mode 100644 index 2841e240874f3..0000000000000 --- a/rust/kona/crates/supervisor/core/src/config/mod.rs +++ /dev/null @@ -1,7 +0,0 @@ -//! Configuration management for the supervisor. - -mod rollup_config_set; -pub use rollup_config_set::{Genesis, RollupConfig, RollupConfigSet}; - -mod core_config; -pub use core_config::Config; diff --git a/rust/kona/crates/supervisor/core/src/config/rollup_config_set.rs b/rust/kona/crates/supervisor/core/src/config/rollup_config_set.rs deleted file mode 100644 index 66ef32a06f40e..0000000000000 --- a/rust/kona/crates/supervisor/core/src/config/rollup_config_set.rs +++ /dev/null @@ -1,191 +0,0 @@ -use alloy_primitives::{B256, ChainId}; -use kona_genesis::ChainGenesis; -use kona_interop::DerivedRefPair; -use kona_protocol::BlockInfo; -use std::collections::HashMap; - -use crate::SupervisorError; - -/// Genesis provides the genesis information relevant for Interop. -#[derive(Debug, Default, Clone)] -pub struct Genesis { - /// The L1 [`BlockInfo`] that the rollup starts after. - pub l1: BlockInfo, - /// The L2 [`BlockInfo`] that the rollup starts from. - pub l2: BlockInfo, -} - -impl Genesis { - /// Creates a new Genesis with the given L1 and L2 block seals. - pub const fn new(l1: BlockInfo, l2: BlockInfo) -> Self { - Self { l1, l2 } - } - - /// Creates a new Genesis from a `RollupConfig`. - pub const fn new_from_rollup_genesis(genesis: ChainGenesis, l1_block: BlockInfo) -> Self { - Self { - l1: l1_block, - l2: BlockInfo::new(genesis.l2.hash, genesis.l2.number, B256::ZERO, genesis.l2_time), - } - } - - /// Returns the genesis as a [`DerivedRefPair`]. - pub const fn get_derived_pair(&self) -> DerivedRefPair { - DerivedRefPair { derived: self.l2, source: self.l1 } - } -} - -/// `RollupConfig` contains the configuration for the Optimism rollup. -#[derive(Debug, Default, Clone)] -pub struct RollupConfig { - /// Genesis anchor information for the rollup. - pub genesis: Genesis, - - /// The block time of the L2, in seconds. - pub block_time: u64, - - /// Activation time for the interop network upgrade. - pub interop_time: Option, -} - -impl RollupConfig { - /// Creates a new `RollupConfig` with the given genesis and block time. - pub const fn new(genesis: Genesis, block_time: u64, interop_time: Option) -> Self { - Self { genesis, block_time, interop_time } - } - - /// Creates a new [`RollupConfig`] with the given genesis and block time. - pub fn new_from_rollup_config( - config: kona_genesis::RollupConfig, - l1_block: BlockInfo, - ) -> Result { - if config.genesis.l1.number != l1_block.number { - return Err(SupervisorError::L1BlockMismatch { - expected: config.genesis.l1.number, - got: l1_block.number, - }); - } - - Ok(Self { - genesis: Genesis::new_from_rollup_genesis(config.genesis, l1_block), - block_time: config.block_time, - interop_time: config.hardforks.interop_time, - }) - } - - /// Returns `true` if the timestamp is at or after the interop activation time. - /// - /// Interop activates at [`interop_time`](Self::interop_time). This function checks whether the - /// provided timestamp is before or after interop timestamp. - /// - /// Returns `false` if `interop_time` is not configured. - pub fn is_interop(&self, timestamp: u64) -> bool { - self.interop_time.is_some_and(|t| timestamp >= t) - } - - /// Returns `true` if the timestamp is strictly after the interop activation block. - /// - /// Interop activates at [`interop_time`](Self::interop_time). This function checks whether the - /// provided timestamp is *after* that activation, skipping the activation block - /// itself. - /// - /// Returns `false` if `interop_time` is not configured. - pub fn is_post_interop(&self, timestamp: u64) -> bool { - self.is_interop(timestamp.saturating_sub(self.block_time)) - } - - /// Returns `true` if given block is the interop activation block. - /// - /// An interop activation block is defined as the block that is right after the - /// interop activation time. - /// - /// Returns `false` if `interop_time` is not configured. - pub fn is_interop_activation_block(&self, block: BlockInfo) -> bool { - self.is_interop(block.timestamp) && - !self.is_interop(block.timestamp.saturating_sub(self.block_time)) - } -} - -/// `RollupConfigSet` contains the configuration for multiple Optimism rollups. -#[derive(Debug, Clone, Default)] -pub struct RollupConfigSet { - /// The rollup configurations for the Optimism rollups. - pub rollups: HashMap, -} - -impl RollupConfigSet { - /// Creates a new `RollupConfigSet` with the given rollup configurations. - pub const fn new(rollups: HashMap) -> Self { - Self { rollups } - } - - /// Returns the rollup configuration for the given chain id. - pub fn get(&self, chain_id: u64) -> Option<&RollupConfig> { - self.rollups.get(&chain_id) - } - - /// adds a new rollup configuration to the set using the provided chain ID and `RollupConfig`. - pub fn add_from_rollup_config( - &mut self, - chain_id: u64, - config: kona_genesis::RollupConfig, - l1_block: BlockInfo, - ) -> Result<(), SupervisorError> { - let rollup_config = RollupConfig::new_from_rollup_config(config, l1_block)?; - self.rollups.insert(chain_id, rollup_config); - Ok(()) - } - - /// Returns `true` if interop is enabled for the chain at given timestamp. - pub fn is_post_interop(&self, chain_id: ChainId, timestamp: u64) -> bool { - self.get(chain_id).map(|cfg| cfg.is_post_interop(timestamp)).unwrap_or(false) // if config not found, return false - } - - /// Returns `true` if given block is the interop activation block for the specified chain. - pub fn is_interop_activation_block(&self, chain_id: ChainId, block: BlockInfo) -> bool { - self.get(chain_id).map(|cfg| cfg.is_interop_activation_block(block)).unwrap_or(false) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::ChainId; - use kona_protocol::BlockInfo; - - fn dummy_blockinfo(number: u64) -> BlockInfo { - BlockInfo::new(B256::ZERO, number, B256::ZERO, 0) - } - - #[test] - fn test_is_interop_enabled() { - let mut set = RollupConfigSet::default(); - let chain_id = ChainId::from(1u64); - - // Interop time is 100, block_time is 10 - let rollup_config = - RollupConfig::new(Genesis::new(dummy_blockinfo(0), dummy_blockinfo(0)), 10, Some(100)); - set.rollups.insert(chain_id, rollup_config); - - // Before interop time - assert!(!set.is_post_interop(chain_id, 100)); - assert!(!set.is_post_interop(chain_id, 109)); - // After interop time (should be true) - assert!(set.is_post_interop(chain_id, 110)); - assert!(set.is_post_interop(chain_id, 111)); - assert!(set.is_post_interop(chain_id, 200)); - - // Unknown chain_id returns false - assert!(!set.is_post_interop(ChainId::from(999u64), 200)); - } - - #[test] - fn test_rollup_config_is_interop_interop_time_zero() { - // Interop time is 100, block_time is 10 - let rollup_config = - RollupConfig::new(Genesis::new(dummy_blockinfo(0), dummy_blockinfo(0)), 2, Some(0)); - - assert!(rollup_config.is_interop(0)); - assert!(rollup_config.is_interop(1000)); - } -} diff --git a/rust/kona/crates/supervisor/core/src/error.rs b/rust/kona/crates/supervisor/core/src/error.rs deleted file mode 100644 index 8f188dae46e8a..0000000000000 --- a/rust/kona/crates/supervisor/core/src/error.rs +++ /dev/null @@ -1,209 +0,0 @@ -//! [`SupervisorService`](crate::SupervisorService) errors. - -use crate::syncnode::ManagedNodeError; -use derive_more; -use jsonrpsee::types::{ErrorCode, ErrorObjectOwned}; -use kona_supervisor_storage::StorageError; -use kona_supervisor_types::AccessListError; -use op_alloy_rpc_types::SuperchainDAError; -use thiserror::Error; - -/// Custom error type for the Supervisor core logic. -#[derive(Debug, Error)] -pub enum SupervisorError { - /// Indicates that a feature or method is not yet implemented. - #[error("functionality not implemented")] - Unimplemented, - - /// No chains are configured for supervision. - #[error("empty dependency set")] - EmptyDependencySet, - - /// Unsupported chain ID. - #[error("unsupported chain ID")] - UnsupportedChainId, - - /// Data availability errors. - /// - /// Spec . - #[error(transparent)] - SpecError(#[from] SpecError), - - /// Indicates that error occurred while interacting with the storage layer. - #[error(transparent)] - StorageError(#[from] StorageError), - - /// Indicates that managed node not found for the chain. - #[error("managed node not found for chain: {0}")] - ManagedNodeMissing(u64), - - /// Indicates the error occurred while interacting with the managed node. - #[error(transparent)] - ManagedNodeError(#[from] ManagedNodeError), - - /// Indicates the error occurred while parsing the `access_list` - #[error(transparent)] - AccessListError(#[from] AccessListError), - - /// Indicates the error occurred while serializing or deserializing JSON. - #[error(transparent)] - SerdeJson(#[from] serde_json::Error), - - /// Indicates the L1 block does not match the expected L1 block. - #[error("L1 block number mismatch. expected: {expected}, but got {got}")] - L1BlockMismatch { - /// Expected L1 block. - expected: u64, - /// Received L1 block. - got: u64, - }, - - /// Indicates that the chain ID could not be parsed from the access list. - #[error("failed to parse chain id from access list")] - ChainIdParseError(), -} - -impl PartialEq for SupervisorError { - fn eq(&self, other: &Self) -> bool { - use SupervisorError::{ - AccessListError, EmptyDependencySet, L1BlockMismatch, ManagedNodeError, - ManagedNodeMissing, SerdeJson, SpecError, StorageError, Unimplemented, - }; - match (self, other) { - (Unimplemented, Unimplemented) | (EmptyDependencySet, EmptyDependencySet) => true, - (SpecError(a), SpecError(b)) => a == b, - (StorageError(a), StorageError(b)) => a == b, - (ManagedNodeMissing(a), ManagedNodeMissing(b)) => a == b, - (ManagedNodeError(a), ManagedNodeError(b)) => a == b, - (AccessListError(a), AccessListError(b)) => a == b, - (SerdeJson(a), SerdeJson(b)) => a.to_string() == b.to_string(), - (L1BlockMismatch { expected: a, got: b }, L1BlockMismatch { expected: c, got: d }) => { - a == c && b == d - } - _ => false, - } - } -} - -impl Eq for SupervisorError {} - -/// Extending the [`SuperchainDAError`] to include errors not in the spec. -#[derive(Error, Debug, PartialEq, Eq, derive_more::TryFrom)] -#[repr(i32)] -#[try_from(repr)] -pub enum SpecError { - /// [`SuperchainDAError`] from the spec. - #[error(transparent)] - SuperchainDAError(#[from] SuperchainDAError), - - /// Error not in spec. - #[error("error not in spec")] - ErrorNotInSpec, -} - -impl SpecError { - /// Maps the proper error code from `SuperchainDAError`. - /// Introduced a new error code for errors not in the spec. - pub const fn code(&self) -> i32 { - match self { - Self::SuperchainDAError(e) => *e as i32, - Self::ErrorNotInSpec => -321300, - } - } -} - -impl From for ErrorObjectOwned { - fn from(err: SpecError) -> Self { - ErrorObjectOwned::owned(err.code(), err.to_string(), None::<()>) - } -} - -impl From for ErrorObjectOwned { - fn from(err: SupervisorError) -> Self { - match err { - // todo: handle these errors more gracefully - SupervisorError::Unimplemented | - SupervisorError::EmptyDependencySet | - SupervisorError::UnsupportedChainId | - SupervisorError::L1BlockMismatch { .. } | - SupervisorError::ManagedNodeMissing(_) | - SupervisorError::ManagedNodeError(_) | - SupervisorError::StorageError(_) | - SupervisorError::AccessListError(_) | - SupervisorError::ChainIdParseError() | - SupervisorError::SerdeJson(_) => ErrorObjectOwned::from(ErrorCode::InternalError), - SupervisorError::SpecError(err) => err.into(), - } - } -} - -impl From for SpecError { - fn from(err: StorageError) -> Self { - match err { - StorageError::Database(_) => Self::from(SuperchainDAError::DataCorruption), - StorageError::FutureData => Self::from(SuperchainDAError::FutureData), - StorageError::EntryNotFound(_) => Self::from(SuperchainDAError::MissedData), - StorageError::ConflictError => Self::from(SuperchainDAError::ConflictingData), - StorageError::BlockOutOfOrder => Self::from(SuperchainDAError::OutOfOrder), - _ => Self::ErrorNotInSpec, - } - } -} - -#[cfg(test)] -mod test { - use kona_supervisor_storage::EntryNotFoundError; - - use super::*; - - #[test] - fn test_storage_error_conversion() { - let test_err = SpecError::from(StorageError::DatabaseNotInitialised); - let expected_err = SpecError::ErrorNotInSpec; - - assert_eq!(test_err, expected_err); - } - - #[test] - fn test_unmapped_storage_error_conversion() { - let spec_err = ErrorObjectOwned::from(SpecError::ErrorNotInSpec); - let expected_err = SpecError::ErrorNotInSpec; - - assert_eq!(spec_err, expected_err.into()); - - let spec_err = ErrorObjectOwned::from(SpecError::from(StorageError::LockPoisoned)); - let expected_err = SpecError::ErrorNotInSpec; - - assert_eq!(spec_err, expected_err.into()); - - let spec_err = ErrorObjectOwned::from(SpecError::from(StorageError::FutureData)); - let expected_err = SpecError::SuperchainDAError(SuperchainDAError::FutureData); - - assert_eq!(spec_err, expected_err.into()); - - let spec_err = ErrorObjectOwned::from(SpecError::from(StorageError::EntryNotFound( - EntryNotFoundError::DerivedBlockNotFound(12), - ))); - let expected_err = SpecError::SuperchainDAError(SuperchainDAError::MissedData); - - assert_eq!(spec_err, expected_err.into()); - } - - #[test] - fn test_supervisor_error_conversion() { - // This will happen implicitly in server rpc response calls. - let supervisor_err = ErrorObjectOwned::from(SupervisorError::SpecError(SpecError::from( - StorageError::LockPoisoned, - ))); - let expected_err = SpecError::ErrorNotInSpec; - - assert_eq!(supervisor_err, expected_err.into()); - - let supervisor_err = ErrorObjectOwned::from(SupervisorError::SpecError(SpecError::from( - StorageError::FutureData, - ))); - let expected_err = SpecError::SuperchainDAError(SuperchainDAError::FutureData); - - assert_eq!(supervisor_err, expected_err.into()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/event/chain.rs b/rust/kona/crates/supervisor/core/src/event/chain.rs deleted file mode 100644 index 36f8b2593aed0..0000000000000 --- a/rust/kona/crates/supervisor/core/src/event/chain.rs +++ /dev/null @@ -1,60 +0,0 @@ -use kona_interop::{BlockReplacement, DerivedRefPair}; -use kona_protocol::BlockInfo; - -/// Represents chain events that are emitted from modules in the supervisor. -/// These events are used to notify the [`ChainProcessor`](crate::chain_processor::ChainProcessor) -/// about changes in block states, such as unsafe blocks, safe blocks, or block replacements. -/// Each event carries relevant information about the block involved, -/// allowing the supervisor to take appropriate actions based on the event type. -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum ChainEvent { - /// An unsafe block event, indicating that a new unsafe block has been detected. - UnsafeBlock { - /// The [`BlockInfo`] of the unsafe block. - block: BlockInfo, - }, - - /// A derived block event, indicating that a new derived block has been detected. - DerivedBlock { - /// The [`DerivedRefPair`] containing the derived block and its source block. - derived_ref_pair: DerivedRefPair, - }, - - /// A derivation origin update event, indicating that the origin for derived blocks has - /// changed. - DerivationOriginUpdate { - /// The [`BlockInfo`] of the block that is the new derivation origin. - origin: BlockInfo, - }, - - /// An invalidate Block event, indicating that a block has been invalidated. - InvalidateBlock { - /// The [`BlockInfo`] of the block that has been invalidated. - block: BlockInfo, - }, - - /// A block replacement event, indicating that a block has been replaced with a new one. - BlockReplaced { - /// The [`BlockReplacement`] containing the replacement block and the invalidated block - /// hash. - replacement: BlockReplacement, - }, - - /// A finalized source update event, indicating that a new source block has been finalized. - FinalizedSourceUpdate { - /// The [`BlockInfo`] of the new finalized source(l1) block. - finalized_source_block: BlockInfo, - }, - - /// A cross unsafe update event, indicating that a cross unsafe block has been promoted. - CrossUnsafeUpdate { - /// The [`BlockInfo`] of the new cross unsafe block - block: BlockInfo, - }, - - /// A cross safe update event, indicating that a cross safe block has been promoted. - CrossSafeUpdate { - /// The [`DerivedRefPair`] containing the derived block and its source block. - derived_ref_pair: DerivedRefPair, - }, -} diff --git a/rust/kona/crates/supervisor/core/src/event/mod.rs b/rust/kona/crates/supervisor/core/src/event/mod.rs deleted file mode 100644 index 3db9adba7f1e1..0000000000000 --- a/rust/kona/crates/supervisor/core/src/event/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -//! Event module for the chain processor and supervisor coordination. - -mod chain; -pub use chain::ChainEvent; diff --git a/rust/kona/crates/supervisor/core/src/l1_watcher/mod.rs b/rust/kona/crates/supervisor/core/src/l1_watcher/mod.rs deleted file mode 100644 index e2751f3dc502f..0000000000000 --- a/rust/kona/crates/supervisor/core/src/l1_watcher/mod.rs +++ /dev/null @@ -1,6 +0,0 @@ -//! L1 Watcher Module -//! This module provides functionality for watching L1 blocks and managing subscriptions to L1 -//! events. -mod watcher; - -pub use watcher::L1Watcher; diff --git a/rust/kona/crates/supervisor/core/src/l1_watcher/watcher.rs b/rust/kona/crates/supervisor/core/src/l1_watcher/watcher.rs deleted file mode 100644 index b2d16ceb658c7..0000000000000 --- a/rust/kona/crates/supervisor/core/src/l1_watcher/watcher.rs +++ /dev/null @@ -1,509 +0,0 @@ -use crate::event::ChainEvent; -use alloy_eips::{BlockNumHash, BlockNumberOrTag}; -use alloy_primitives::ChainId; -use alloy_rpc_client::RpcClient; -use alloy_rpc_types_eth::{Block, Header}; -use futures::StreamExt; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::{DbReader, FinalizedL1Storage, StorageRewinder}; -use std::{collections::HashMap, sync::Arc, time::Duration}; -use tokio::sync::mpsc; -use tokio_util::sync::CancellationToken; -use tracing::{error, info, trace}; - -use crate::ReorgHandler; - -/// A watcher that polls the L1 chain for finalized blocks. -#[derive(Debug)] -pub struct L1Watcher { - /// The Alloy RPC client for L1. - rpc_client: RpcClient, - /// The cancellation token, shared between all tasks. - cancellation: CancellationToken, - /// The finalized L1 block storage. - finalized_l1_storage: Arc, - /// The event senders for each chain. - event_txs: HashMap>, - /// The reorg handler. - reorg_handler: ReorgHandler, -} - -impl L1Watcher -where - F: FinalizedL1Storage + 'static, - DB: DbReader + StorageRewinder + Send + Sync + 'static, -{ - /// Creates a new [`L1Watcher`] instance. - pub const fn new( - rpc_client: RpcClient, - finalized_l1_storage: Arc, - event_txs: HashMap>, - cancellation: CancellationToken, - reorg_handler: ReorgHandler, - ) -> Self { - Self { rpc_client, finalized_l1_storage, event_txs, cancellation, reorg_handler } - } - - /// Starts polling for finalized and latest blocks and processes them. - pub async fn run(&self) { - // TODO: Change the polling interval to 1535 seconds with mainnet config. - let finalized_head_poller = self - .rpc_client - .prepare_static_poller::<_, Block>( - "eth_getBlockByNumber", - (BlockNumberOrTag::Finalized, false), - ) - .with_poll_interval(Duration::from_secs(47)); - - let finalized_head_stream = finalized_head_poller.into_stream(); - - // TODO: Change the polling interval to 11 seconds with mainnet config. - let latest_head_poller = self - .rpc_client - .prepare_static_poller::<_, Block>( - "eth_getBlockByNumber", - (BlockNumberOrTag::Latest, false), - ) - .with_poll_interval(Duration::from_secs(2)); - - let latest_head_stream = latest_head_poller.into_stream(); - - self.poll_blocks(finalized_head_stream, latest_head_stream).await; - } - - /// Helper function to poll blocks using a provided stream and handler closure. - async fn poll_blocks(&self, mut finalized_head_stream: S, mut latest_head_stream: S) - where - S: futures::Stream + Unpin, - { - let mut finalized_number = 0; - let mut previous_latest_block: Option = None; - - loop { - tokio::select! { - _ = self.cancellation.cancelled() => { - info!(target: "supervisor::l1_watcher", "L1Watcher cancellation requested, stopping polling"); - break; - } - latest_block = latest_head_stream.next() => { - if let Some(latest_block) = latest_block { - previous_latest_block = self.handle_new_latest_block(latest_block, previous_latest_block).await; - } - } - finalized_block = finalized_head_stream.next() => { - if let Some(finalized_block) = finalized_block { - finalized_number = self.handle_new_finalized_block(finalized_block, finalized_number); - } - } - } - } - } - - /// Handles a new finalized [`Block`], updating the storage and broadcasting the event. - /// - /// Arguments: - /// - `block`: The finalized block to process. - /// - `last_finalized_number`: The last finalized block number. - /// - /// Returns: - /// - `u64`: The new finalized block number. - fn handle_new_finalized_block(&self, block: Block, last_finalized_number: u64) -> u64 { - let block_number = block.header.number; - if block_number == last_finalized_number { - return last_finalized_number; - } - - let Header { - hash, - inner: alloy_consensus::Header { number, parent_hash, timestamp, .. }, - .. - } = block.header; - let finalized_source_block = BlockInfo::new(hash, number, parent_hash, timestamp); - - trace!( - target: "supervisor::l1_watcher", - incoming_block_number = block_number, - previous_block_number = last_finalized_number, - "Finalized L1 block received" - ); - - if let Err(err) = self.finalized_l1_storage.update_finalized_l1(finalized_source_block) { - error!(target: "supervisor::l1_watcher", %err, "Failed to update finalized L1 block"); - return last_finalized_number; - } - - self.broadcast_finalized_source_update(finalized_source_block); - - block_number - } - - fn broadcast_finalized_source_update(&self, finalized_source_block: BlockInfo) { - for (chain_id, sender) in &self.event_txs { - if let Err(err) = - sender.try_send(ChainEvent::FinalizedSourceUpdate { finalized_source_block }) - { - error!( - target: "supervisor::l1_watcher", - chain_id = %chain_id, - %err, "Failed to send finalized L1 update event", - ); - } - } - } - - /// Handles a new latest [`Block`], checking if it requires a reorg or is sequential. - /// - /// Arguments: - /// - `incoming_block`: The incoming block to process. - /// - `previous_block`: The previously stored latest block, if any. - /// - /// Returns: - /// - `Option`: The ID of the new latest block if processed successfully, or the - /// previous block if no changes were made. - async fn handle_new_latest_block( - &self, - incoming_block: Block, - previous_block: Option, - ) -> Option { - let Header { - hash, - inner: alloy_consensus::Header { number, parent_hash, timestamp, .. }, - .. - } = incoming_block.header; - let latest_block = BlockInfo::new(hash, number, parent_hash, timestamp); - - let prev = match previous_block { - Some(prev) => prev, - None => { - return Some(latest_block.id()); - } - }; - - trace!( - target: "l1_watcher", - block_number = latest_block.number, - block_hash = ?incoming_block.header.hash, - "New latest L1 block received" - ); - - // Early exit if the incoming block is not newer than the previous block - if latest_block.number <= prev.number { - trace!( - target: "supervisor::l1_watcher", - incoming_block_number = latest_block.number, - previous_block_number = prev.number, - "Incoming latest L1 block is not greater than the stored latest block" - ); - return previous_block; - } - - // Early exit: check if no reorg is needed (sequential block) - if latest_block.parent_hash == prev.hash { - trace!( - target: "supervisor::l1_watcher", - block_number = latest_block.number, - "Sequential block received, no reorg needed" - ); - return Some(latest_block.id()); - } - - match self.reorg_handler.handle_l1_reorg(latest_block).await { - Ok(()) => { - trace!( - target: "supervisor::l1_watcher", - block_number = latest_block.number, - "Successfully processed potential L1 reorg" - ); - } - Err(err) => { - error!( - target: "supervisor::l1_watcher", - block_number = latest_block.number, - %err, - "Failed to handle L1 reorg" - ); - } - } - - Some(latest_block.id()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::{ - SupervisorError, - syncnode::{ManagedNodeController, ManagedNodeError}, - }; - use alloy_primitives::B256; - use alloy_transport::mock::*; - use async_trait::async_trait; - use kona_supervisor_storage::{ChainDb, FinalizedL1Storage, StorageError}; - use kona_supervisor_types::BlockSeal; - use mockall::{mock, predicate}; - use std::sync::Arc; - use tokio::sync::mpsc; - // Mock the FinalizedL1Storage trait - mock! ( - pub finalized_l1_storage {} - impl FinalizedL1Storage for finalized_l1_storage { - fn update_finalized_l1(&self, block: BlockInfo) -> Result<(), StorageError>; - fn get_finalized_l1(&self) -> Result; - } - ); - - mock!( - #[derive(Debug)] - pub Node {} - - #[async_trait] - impl ManagedNodeController for Node { - async fn update_finalized( - &self, - _finalized_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - async fn reset(&self) -> Result<(), ManagedNodeError>; - - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ManagedNodeError>; - } - ); - - mock! ( - pub ReorgHandler { - fn handle_l1_reorg(&self, latest_block: BlockInfo) -> Result<(), SupervisorError>; - } - ); - - fn mock_rpc_client() -> RpcClient { - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter); - RpcClient::new(transport, false) - } - - fn mock_reorg_handler() -> ReorgHandler { - let chain_dbs_map: HashMap> = HashMap::new(); - ReorgHandler::new(mock_rpc_client(), chain_dbs_map) - } - - #[tokio::test] - async fn test_broadcast_finalized_source_update_sends_to_all() { - let (tx1, mut rx1) = mpsc::channel(1); - let (tx2, mut rx2) = mpsc::channel(1); - - let mut event_txs = HashMap::new(); - event_txs.insert(1, tx1); - event_txs.insert(2, tx2); - - let watcher = L1Watcher { - rpc_client: mock_rpc_client(), - cancellation: CancellationToken::new(), - finalized_l1_storage: Arc::new(Mockfinalized_l1_storage::new()), - event_txs, - reorg_handler: mock_reorg_handler(), - }; - - let block = BlockInfo::new(B256::ZERO, 42, B256::ZERO, 12345); - watcher.broadcast_finalized_source_update(block); - - assert!( - matches!(rx1.recv().await, Some(ChainEvent::FinalizedSourceUpdate { finalized_source_block }) if finalized_source_block == block) - ); - assert!( - matches!(rx2.recv().await, Some(ChainEvent::FinalizedSourceUpdate { finalized_source_block }) if finalized_source_block == block) - ); - } - - #[tokio::test] - async fn test_handle_new_finalized_block_updates_and_broadcasts() { - let (tx, mut rx) = mpsc::channel(1); - let event_txs = std::iter::once((1, tx)).collect(); - - let mut mock_storage = Mockfinalized_l1_storage::new(); - mock_storage.expect_update_finalized_l1().returning(|_block| Ok(())); - - let watcher = L1Watcher { - rpc_client: mock_rpc_client(), - cancellation: CancellationToken::new(), - finalized_l1_storage: Arc::new(mock_storage), - event_txs, - reorg_handler: mock_reorg_handler(), - }; - - let block = Block { - header: Header { - hash: B256::ZERO, - inner: alloy_consensus::Header { - number: 42, - parent_hash: B256::ZERO, - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - let mut last_finalized_number = 0; - last_finalized_number = - watcher.handle_new_finalized_block(block.clone(), last_finalized_number); - - let event = rx.recv().await.unwrap(); - let expected = BlockInfo::new( - block.header.hash, - block.header.number, - block.header.parent_hash, - block.header.timestamp, - ); - assert!( - matches!(event, ChainEvent::FinalizedSourceUpdate { ref finalized_source_block } if *finalized_source_block == expected), - "Expected FinalizedSourceUpdate with block {expected:?}, got {event:?}" - ); - assert_eq!(last_finalized_number, block.header.number); - } - - #[tokio::test] - async fn test_handle_new_finalized_block_storage_error() { - let (tx, mut rx) = mpsc::channel(1); - let event_txs = std::iter::once((1, tx)).collect(); - - let mut mock_storage = Mockfinalized_l1_storage::new(); - mock_storage - .expect_update_finalized_l1() - .returning(|_block| Err(StorageError::DatabaseNotInitialised)); - - let watcher = L1Watcher { - rpc_client: mock_rpc_client(), - cancellation: CancellationToken::new(), - finalized_l1_storage: Arc::new(mock_storage), - event_txs, - reorg_handler: mock_reorg_handler(), - }; - - let block = Block { - header: Header { - hash: B256::ZERO, - inner: alloy_consensus::Header { - number: 42, - parent_hash: B256::ZERO, - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - let mut last_finalized_number = 0; - last_finalized_number = watcher.handle_new_finalized_block(block, last_finalized_number); - - assert_eq!(last_finalized_number, 0); - // Should NOT broadcast if storage update fails - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_handle_new_latest_block_updates() { - let (tx, mut rx) = mpsc::channel(1); - let event_txs = std::iter::once((1, tx)).collect(); - - let watcher = L1Watcher { - rpc_client: mock_rpc_client(), - cancellation: CancellationToken::new(), - finalized_l1_storage: Arc::new(Mockfinalized_l1_storage::new()), - event_txs, - reorg_handler: mock_reorg_handler(), - }; - - let block = Block { - header: Header { - hash: B256::ZERO, - inner: alloy_consensus::Header { - number: 1, - parent_hash: B256::ZERO, - timestamp: 123456, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - let mut last_latest_number = None; - last_latest_number = watcher.handle_new_latest_block(block, last_latest_number).await; - assert_eq!(last_latest_number.unwrap().number, 1); - // Should NOT send any event for latest block - assert!(rx.try_recv().is_err()); - } - - #[tokio::test] - async fn test_trigger_reorg_handler() { - let (tx, mut rx) = mpsc::channel(1); - let event_txs = std::iter::once((1, tx)).collect(); - - let watcher = L1Watcher { - rpc_client: mock_rpc_client(), - cancellation: CancellationToken::new(), - finalized_l1_storage: Arc::new(Mockfinalized_l1_storage::new()), - event_txs, - reorg_handler: mock_reorg_handler(), - }; - - let block = Block { - header: Header { - hash: B256::ZERO, - inner: alloy_consensus::Header { - number: 101, - parent_hash: B256::ZERO, - timestamp: 123456, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - let mut last_latest_number = Some(BlockNumHash { number: 100, hash: B256::ZERO }); - last_latest_number = watcher.handle_new_latest_block(block, last_latest_number).await; - assert_eq!(last_latest_number.unwrap().number, 101); - - // Send previous block as latest block - let reorg_block = Block { - header: Header { - hash: B256::ZERO, - inner: alloy_consensus::Header { - number: 105, - parent_hash: B256::from([1u8; 32]), - timestamp: 123456, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - let reorg_block_info = BlockInfo::new( - reorg_block.header.hash, - reorg_block.header.number, - reorg_block.header.parent_hash, - reorg_block.header.timestamp, - ); - let mut mock_reorg_handler = MockReorgHandler::new(); - mock_reorg_handler - .expect_handle_l1_reorg() - .with(predicate::eq(reorg_block_info)) - .returning(|_| Ok(())); - - last_latest_number = watcher.handle_new_latest_block(reorg_block, last_latest_number).await; - assert_eq!(last_latest_number.unwrap().number, 105); - // Should NOT send any event for latest block - assert!(rx.try_recv().is_err()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/lib.rs b/rust/kona/crates/supervisor/core/src/lib.rs deleted file mode 100644 index bf211b7c4f347..0000000000000 --- a/rust/kona/crates/supervisor/core/src/lib.rs +++ /dev/null @@ -1,29 +0,0 @@ -//! This crate contains the core logic for the Optimism Supervisor component. - -pub mod chain_processor; -pub use chain_processor::{ChainProcessor, ChainProcessorError, ProcessorState}; - -pub mod error; -pub use error::{SpecError, SupervisorError}; - -/// Contains the main Supervisor struct and its implementation. -mod supervisor; -pub use supervisor::{Supervisor, SupervisorService}; - -mod logindexer; -pub use logindexer::{ - LogIndexer, LogIndexerError, log_to_log_hash, log_to_message_payload, payload_hash_to_log_hash, -}; - -pub mod rpc; - -pub mod config; -pub mod event; -pub mod l1_watcher; -pub mod syncnode; - -pub mod safety_checker; -pub use safety_checker::{CrossSafetyCheckerJob, CrossSafetyError}; - -mod reorg; -pub use reorg::{ReorgHandler, ReorgHandlerError}; diff --git a/rust/kona/crates/supervisor/core/src/logindexer/indexer.rs b/rust/kona/crates/supervisor/core/src/logindexer/indexer.rs deleted file mode 100644 index 9299f1ae2444b..0000000000000 --- a/rust/kona/crates/supervisor/core/src/logindexer/indexer.rs +++ /dev/null @@ -1,364 +0,0 @@ -use crate::{ - logindexer::{log_to_log_hash, payload_hash_to_log_hash}, - syncnode::{BlockProvider, ManagedNodeError}, -}; -use alloy_primitives::ChainId; -use kona_interop::parse_log_to_executing_message; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::{LogStorageReader, LogStorageWriter, StorageError}; -use kona_supervisor_types::{ExecutingMessage, Log}; -use std::sync::Arc; -use thiserror::Error; -use tokio::sync::Mutex; -use tracing::{debug, error}; - -/// The [`LogIndexer`] is responsible for processing L2 receipts, extracting [`ExecutingMessage`]s, -/// and persisting them to the state manager. -#[derive(Debug)] -pub struct LogIndexer { - /// The chain ID of the rollup. - chain_id: ChainId, - /// Component that provides receipts for a given block hash. - block_provider: Mutex>>, - /// Component that persists parsed log entries to storage. - log_storage: Arc, - /// Protects concurrent catch-up - is_catch_up_running: Mutex, -} - -impl LogIndexer -where - P: BlockProvider + 'static, - S: LogStorageWriter + LogStorageReader + 'static, -{ - /// Creates a new [`LogIndexer`] with the given receipt provider and state manager. - /// - /// # Arguments - /// - `block_provider`: Shared reference to a component capable of fetching block ref and - /// receipts. - /// - `log_storage`: Shared reference to the storage layer for persisting parsed logs. - pub fn new(chain_id: ChainId, block_provider: Option>, log_storage: Arc) -> Self { - Self { - chain_id, - block_provider: Mutex::new(block_provider), - log_storage, - is_catch_up_running: Mutex::new(false), - } - } - - /// Sets the block provider - pub async fn set_block_provider(&self, block_provider: Arc

) { - let mut guard = self.block_provider.lock().await; - *guard = Some(block_provider); - } - - /// Asynchronously initiates a background task to catch up and index logs - /// starting from the latest successfully indexed block up to the specified block. - /// - /// If a catch-up job is already running, this call is ignored. - /// - /// # Arguments - /// - `block`: The target block to sync logs up to (inclusive). - pub fn sync_logs(self: Arc, block: BlockInfo) { - tokio::spawn(async move { - let mut running = self.is_catch_up_running.lock().await; - - if *running { - debug!(target: "supervisor::log_indexer", chain_id = %self.chain_id, "Catch-up running log index"); - return; - } - - *running = true; - drop(running); // release the lock while the job runs - - if let Err(err) = self.index_log_upto(&block).await { - error!( - target: "supervisor::log_indexer", - chain_id = %self.chain_id, - %err, - "Log indexer catch-up failed" - ); - } - - let mut running = self.is_catch_up_running.lock().await; - *running = false; - }); - } - - /// Performs log indexing sequentially from the latest indexed block up to the given target - /// block. - async fn index_log_upto(&self, block: &BlockInfo) -> Result<(), LogIndexerError> { - let mut current_number = self.log_storage.get_latest_block()?.number + 1; - - while current_number < block.number { - let provider = { - let guard = self.block_provider.lock().await; - guard.as_ref().ok_or(LogIndexerError::NoBlockProvider)?.clone() - }; - - let current_block = provider.block_by_number(current_number).await?; - self.process_and_store_logs(¤t_block).await?; - current_number += 1; - } - self.process_and_store_logs(block).await?; - - Ok(()) - } - - /// Processes and stores the logs of a given block in into the state manager. - /// - /// This function: - /// - Fetches all receipts for the given block from the specified chain. - /// - Iterates through all logs in all receipts. - /// - For each log, computes a hash from the log and optionally parses an [`ExecutingMessage`]. - /// - Records each [`Log`] including the message if found. - /// - Saves all log entries atomically using the [`LogStorageWriter`]. - /// - /// # Arguments - /// - `block`: Metadata about the block being processed. - pub async fn process_and_store_logs(&self, block: &BlockInfo) -> Result<(), LogIndexerError> { - let provider = { - let guard = self.block_provider.lock().await; - guard.as_ref().ok_or(LogIndexerError::NoBlockProvider)?.clone() - }; - - let receipts = provider.fetch_receipts(block.hash).await?; - let mut log_entries = Vec::with_capacity(receipts.len()); - let mut log_index: u32 = 0; - - for receipt in receipts { - for log in receipt.logs() { - let log_hash = log_to_log_hash(log); - - let executing_message = parse_log_to_executing_message(log).map(|msg| { - let payload_hash = - payload_hash_to_log_hash(msg.payloadHash, msg.identifier.origin); - ExecutingMessage { - chain_id: msg.identifier.chainId.try_into().unwrap(), - block_number: msg.identifier.blockNumber.try_into().unwrap(), - log_index: msg.identifier.logIndex.try_into().unwrap(), - timestamp: msg.identifier.timestamp.try_into().unwrap(), - hash: payload_hash, - } - }); - - log_entries.push(Log { index: log_index, hash: log_hash, executing_message }); - - log_index += 1; - } - } - - log_entries.shrink_to_fit(); - - self.log_storage.store_block_logs(block, log_entries)?; - Ok(()) - } -} - -/// Error type for the [`LogIndexer`]. -#[derive(Error, Debug, PartialEq, Eq)] -pub enum LogIndexerError { - /// No block provider set when attempting to index logs. - #[error("no block provider set")] - NoBlockProvider, - - /// Failed to write processed logs for a block to the state manager. - #[error(transparent)] - StateWrite(#[from] StorageError), - - /// Failed to fetch logs for a block from the state manager. - #[error(transparent)] - FetchReceipt(#[from] ManagedNodeError), -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::syncnode::{AuthenticationError, ClientError}; - use alloy_primitives::{Address, B256, Bytes}; - use async_trait::async_trait; - use kona_interop::{ExecutingMessageBuilder, InteropProvider, SuperchainBuilder}; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::StorageError; - use kona_supervisor_types::{Log, Receipts}; - use mockall::mock; - use op_alloy_consensus::{OpReceiptEnvelope, OpTxType}; - use std::sync::Arc; - mock! ( - #[derive(Debug)] - pub BlockProvider {} - - #[async_trait] - impl BlockProvider for BlockProvider { - async fn fetch_receipts(&self, block_hash: B256) -> Result; - async fn block_by_number(&self, number: u64) -> Result; - } - ); - - mock!( - #[derive(Debug)] - pub Db {} - - impl LogStorageWriter for Db { - fn initialise_log_storage(&self, _block: BlockInfo) -> Result<(), StorageError>; - fn store_block_logs(&self, block: &BlockInfo, logs: Vec) -> Result<(), StorageError>; - } - - impl LogStorageReader for Db { - fn get_block(&self, block_number: u64) -> Result; - fn get_latest_block(&self) -> Result; - fn get_log(&self,block_number: u64,log_index: u32) -> Result; - fn get_logs(&self, block_number: u64) -> Result, StorageError>; - } - ); - - fn hash_for_number(n: u64) -> B256 { - let mut bytes = [0u8; 32]; - bytes[24..].copy_from_slice(&n.to_be_bytes()); - B256::from(bytes) - } - - async fn build_receipts() -> Receipts { - let mut builder = SuperchainBuilder::new(); - builder - .chain(10) - .with_timestamp(123456) - .add_initiating_message(Bytes::from_static(b"init-msg")) - .add_executing_message( - ExecutingMessageBuilder::default() - .with_message_hash(B256::repeat_byte(0xaa)) - .with_origin_address(Address::ZERO) - .with_origin_log_index(0) - .with_origin_block_number(1) - .with_origin_chain_id(10) - .with_origin_timestamp(123456), - ); - let (headers, _, mock_provider) = builder.build(); - let block = headers.get(&10).unwrap(); - - mock_provider.receipts_by_hash(10, block.hash()).await.unwrap() - } - - #[tokio::test] - async fn test_process_and_store_logs_success() { - let receipts = build_receipts().await; - let block_hash = B256::random(); - let block_info = - BlockInfo { number: 1, hash: block_hash, timestamp: 123456789, ..Default::default() }; - - let mut mock_provider = MockBlockProvider::new(); - mock_provider - .expect_fetch_receipts() - .withf(move |hash| *hash == block_hash) - .returning(move |_| Ok(receipts.clone())); - - mock_provider.expect_block_by_number().returning(|_| Ok(BlockInfo::default())); // Not used here - - let mut mock_db = MockDb::new(); - mock_db - .expect_store_block_logs() - .withf(|block, logs| block.number == 1 && logs.len() == 2) - .returning(|_, _| Ok(())); - - let log_indexer = LogIndexer::new(1, Some(Arc::new(mock_provider)), Arc::new(mock_db)); - - let result = log_indexer.process_and_store_logs(&block_info).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_process_and_store_logs_with_empty_logs() { - let block_hash = B256::random(); - let block_info = - BlockInfo { number: 2, hash: block_hash, timestamp: 111111111, ..Default::default() }; - - let empty_log_receipt = - OpReceiptEnvelope::from_parts(true, 21000, vec![], OpTxType::Eip1559, None, None); - let receipts = vec![empty_log_receipt]; - - let mut mock_provider = MockBlockProvider::new(); - mock_provider - .expect_fetch_receipts() - .withf(move |hash| *hash == block_hash) - .returning(move |_| Ok(receipts.clone())); - - mock_provider.expect_block_by_number().returning(|_| Ok(BlockInfo::default())); // Not used - - let mut mock_db = MockDb::new(); - mock_db - .expect_store_block_logs() - .withf(|block, logs| block.number == 2 && logs.is_empty()) - .returning(|_, _| Ok(())); - - let log_indexer = LogIndexer::new(1, Some(Arc::new(mock_provider)), Arc::new(mock_db)); - - let result = log_indexer.process_and_store_logs(&block_info).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_process_and_store_logs_receipt_fetch_fails() { - let block_hash = B256::random(); - let block_info = - BlockInfo { number: 3, hash: block_hash, timestamp: 123456, ..Default::default() }; - - let mut mock_provider = MockBlockProvider::new(); - mock_provider.expect_fetch_receipts().withf(move |hash| *hash == block_hash).returning( - |_| { - Err(ManagedNodeError::ClientError(ClientError::Authentication( - AuthenticationError::InvalidHeader, - ))) - }, - ); - - mock_provider.expect_block_by_number().returning(|_| Ok(BlockInfo::default())); // Not used - - let mock_db = MockDb::new(); // No call expected - - let log_indexer = LogIndexer::new(1, Some(Arc::new(mock_provider)), Arc::new(mock_db)); - - let result = log_indexer.process_and_store_logs(&block_info).await; - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_sync_logs_stores_all_blocks_in_range() { - let target_block = BlockInfo { - number: 5, - hash: B256::random(), - timestamp: 123456789, - ..Default::default() - }; - - // BlockProvider mock - let mut mock_provider = MockBlockProvider::new(); - mock_provider.expect_block_by_number().withf(|n| *n >= 1 && *n <= 5).returning(|n| { - Ok(BlockInfo { - number: n, - hash: hash_for_number(n), - timestamp: 0, - ..Default::default() - }) - }); - - mock_provider.expect_fetch_receipts().times(5).returning(move |_| { - Ok(vec![]) // Empty receipts - }); - - // Db mock - let mut mock_db = MockDb::new(); - mock_db - .expect_get_latest_block() - .returning(|| Ok(BlockInfo { number: 0, ..Default::default() })); - - mock_db.expect_store_block_logs().times(5).returning(move |_, _| Ok(())); - - let indexer = - Arc::new(LogIndexer::new(1, Some(Arc::new(mock_provider)), Arc::new(mock_db))); - - indexer.clone().sync_logs(target_block); - - // Let the background task complete - tokio::time::sleep(tokio::time::Duration::from_millis(300)).await; - } -} diff --git a/rust/kona/crates/supervisor/core/src/logindexer/mod.rs b/rust/kona/crates/supervisor/core/src/logindexer/mod.rs deleted file mode 100644 index 172de96b77aef..0000000000000 --- a/rust/kona/crates/supervisor/core/src/logindexer/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -//! Log indexing module for processing L2 receipts and extracting messages. -//! -//! This module provides functionality to extract and persist -//! [`ExecutingMessage`](kona_supervisor_types::ExecutingMessage)s and their corresponding -//! [`Log`](alloy_primitives::Log)s from L2 block receipts. It handles computing message payload -//! hashes and log hashes based on the interop messaging specification. -//! -//! # Modules -//! -//! - [`LogIndexer`] — main indexer that processes logs and persists them. -//! - [`LogIndexerError`] — error type for failures in fetching or storing logs. -//! - `util` — helper functions for computing payload and log hashes. -mod indexer; -pub use indexer::{LogIndexer, LogIndexerError}; - -mod util; -pub use util::{log_to_log_hash, log_to_message_payload, payload_hash_to_log_hash}; diff --git a/rust/kona/crates/supervisor/core/src/logindexer/util.rs b/rust/kona/crates/supervisor/core/src/logindexer/util.rs deleted file mode 100644 index 47a2a6848691b..0000000000000 --- a/rust/kona/crates/supervisor/core/src/logindexer/util.rs +++ /dev/null @@ -1,95 +0,0 @@ -use alloy_primitives::{Address, B256, Bytes, Log, keccak256}; - -/// Computes the log hash from a payload hash and log address. -/// -/// This is done by: -/// 1. Concatenating the raw 20-byte address with the 32-byte payload hash, -/// 2. Hashing the result with keccak256. -/// -/// This log hash is stored in the log storage and is used to map -/// an executing message back to the original initiating log. -pub fn payload_hash_to_log_hash(payload_hash: B256, addr: Address) -> B256 { - let mut buf = Vec::with_capacity(64); - buf.extend_from_slice(addr.as_slice()); // 20 bytes - buf.extend_from_slice(payload_hash.as_slice()); // 32 bytes - keccak256(&buf) -} - -/// Converts an L2 log into its raw message payload for hashing. -/// -/// This payload is defined as the concatenation of all log topics followed by the log data, -/// in accordance with the OP stack interop messaging spec. -/// -/// This data is what is hashed to produce the `payloadHash`. -pub fn log_to_message_payload(log: &Log) -> Bytes { - let mut payload = Vec::with_capacity(log.topics().len() * 32 + log.data.data.len()); - - // Append each topic in order - for topic in log.topics() { - payload.extend_from_slice(topic.as_slice()); - } - - // Append the raw data - payload.extend_from_slice(&log.data.data); - - payload.into() -} - -/// Computes the full log hash from a log using the OP Stack convention. -pub fn log_to_log_hash(log: &Log) -> B256 { - let payload = log_to_message_payload(log); - let payload_hash = keccak256(&payload); - payload_hash_to_log_hash(payload_hash, log.address) -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::{Bytes, Log, address, b256}; - - /// Creates a dummy log with fixed topics and data for testing. - fn sample_log() -> Log { - Log::new_unchecked( - address!("0xe0e1e2e3e4e5e6e7e8e9f0f1f2f3f4f5f6f7f8f9"), - vec![ - b256!("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"), - b256!("bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb"), - ], - Bytes::from_static(b"example payload"), - ) - } - - #[test] - fn test_log_to_message_payload_is_correct() { - let log = sample_log(); - let payload = log_to_message_payload(&log); - - // Expect: topics + data - let mut expected = Vec::new(); - expected.extend_from_slice(&log.topics()[0].0); - expected.extend_from_slice(&log.topics()[1].0); - expected.extend_from_slice(&log.data.data); - - assert_eq!(payload.as_ref(), expected.as_slice()); - } - - #[test] - fn test_payload_hash_to_log_hash_with_known_value() { - let address = address!("0xe0e1e2e3e4e5e6e7e8e9f0f1f2f3f4f5f6f7f8f9"); - let payload_hash = keccak256(Bytes::from_static(b"example payload")); - let log_hash = payload_hash_to_log_hash(payload_hash, address); - let expected_hash = - b256!("0xf9ed05990c887d3f86718aabd7e940faaa75d6a5cd44602e89642586ce85f2aa"); - - assert_eq!(log_hash, expected_hash); - } - - #[test] - fn test_log_to_log_hash_with_known_value() { - let log = sample_log(); - let actual_log_hash = log_to_log_hash(&log); - let expected_log_hash = - b256!("0x20b21f284fb0286571fbf1cbfc20cdb1d50ea5c74c914478aee4a47b0a82a170"); - assert_eq!(actual_log_hash, expected_log_hash); - } -} diff --git a/rust/kona/crates/supervisor/core/src/reorg/error.rs b/rust/kona/crates/supervisor/core/src/reorg/error.rs deleted file mode 100644 index 6b92461bddd89..0000000000000 --- a/rust/kona/crates/supervisor/core/src/reorg/error.rs +++ /dev/null @@ -1,29 +0,0 @@ -use kona_supervisor_storage::StorageError; -use thiserror::Error; - -use crate::syncnode::ManagedNodeError; - -/// Error type for reorg handling -#[derive(Debug, Error)] -pub enum ReorgHandlerError { - /// Indicates managed node not found for the chain. - #[error("managed node not found for chain: {0}")] - ManagedNodeMissing(u64), - - /// Indicates an error occurred while interacting with the managed node. - #[error(transparent)] - ManagedNodeError(#[from] ManagedNodeError), - - /// Indicates an error occurred while interacting with the database. - #[error(transparent)] - StorageError(#[from] StorageError), - - /// Indicates an error occurred while interacting with the l1 RPC client. - #[error("failed to interact with l1 RPC client: {0}")] - RPCError(String), - - /// Indicates an error occurred while finding rewind target for reorg. - /// This can happen if the rewind target block is pre-interop. - #[error("rewind target is pre-interop")] - RewindTargetPreInterop, -} diff --git a/rust/kona/crates/supervisor/core/src/reorg/handler.rs b/rust/kona/crates/supervisor/core/src/reorg/handler.rs deleted file mode 100644 index c7556ca1f97ca..0000000000000 --- a/rust/kona/crates/supervisor/core/src/reorg/handler.rs +++ /dev/null @@ -1,91 +0,0 @@ -use super::metrics::Metrics; -use crate::{ReorgHandlerError, reorg::task::ReorgTask}; -use alloy_primitives::ChainId; -use alloy_rpc_client::RpcClient; -use derive_more::Constructor; -use futures::future; -use kona_protocol::BlockInfo; -use kona_supervisor_metrics::observe_metrics_for_result_async; -use kona_supervisor_storage::{DbReader, StorageRewinder}; -use std::{collections::HashMap, sync::Arc}; -use tracing::{error, info, trace}; - -/// Handles L1 reorg operations for multiple chains -#[derive(Debug, Constructor)] -pub struct ReorgHandler { - /// The Alloy RPC client for L1. - rpc_client: RpcClient, - /// Per chain dbs. - chain_dbs: HashMap>, -} - -impl ReorgHandler -where - DB: DbReader + StorageRewinder + Send + Sync + 'static, -{ - /// Initializes the metrics for the reorg handler - pub fn with_metrics(self) -> Self { - // Initialize metrics for all chains - for chain_id in self.chain_dbs.keys() { - Metrics::init(*chain_id); - } - - self - } - - /// Wrapper method for segregating concerns between the startup and L1 watcher reorg handlers. - pub async fn verify_l1_consistency(&self) -> Result<(), ReorgHandlerError> { - info!( - target: "supervisor::reorg_handler", - "Verifying L1 consistency for each chain..." - ); - - self.verify_and_handle_chain_reorg().await - } - - /// Processes a reorg for all chains when a new latest L1 block is received - pub async fn handle_l1_reorg(&self, latest_block: BlockInfo) -> Result<(), ReorgHandlerError> { - trace!( - target: "supervisor::reorg_handler", - l1_block_number = latest_block.number, - "Potential reorg detected, processing..." - ); - - self.verify_and_handle_chain_reorg().await - } - - /// Verifies the consistency of each chain with the L1 chain and handles any reorgs, if any. - async fn verify_and_handle_chain_reorg(&self) -> Result<(), ReorgHandlerError> { - let mut handles = Vec::with_capacity(self.chain_dbs.len()); - - for (chain_id, chain_db) in &self.chain_dbs { - let reorg_task = - ReorgTask::new(*chain_id, Arc::clone(chain_db), self.rpc_client.clone()); - - let chain_id = *chain_id; - - let handle = tokio::spawn(async move { - observe_metrics_for_result_async!( - Metrics::SUPERVISOR_REORG_SUCCESS_TOTAL, - Metrics::SUPERVISOR_REORG_ERROR_TOTAL, - Metrics::SUPERVISOR_REORG_DURATION_SECONDS, - Metrics::SUPERVISOR_REORG_METHOD_PROCESS_CHAIN_REORG, - async { - reorg_task.process_chain_reorg().await - }, - "chain_id" => chain_id.to_string() - ) - }); - handles.push(handle); - } - - let results = future::join_all(handles).await; - for result in results { - if let Err(err) = result { - error!(target: "supervisor::reorg_handler", %err, "Reorg task failed"); - } - } - - Ok(()) - } -} diff --git a/rust/kona/crates/supervisor/core/src/reorg/metrics.rs b/rust/kona/crates/supervisor/core/src/reorg/metrics.rs deleted file mode 100644 index de9d194621578..0000000000000 --- a/rust/kona/crates/supervisor/core/src/reorg/metrics.rs +++ /dev/null @@ -1,106 +0,0 @@ -use alloy_primitives::ChainId; - -/// Metrics for reorg operations -#[derive(Debug, Clone)] -pub(crate) struct Metrics; - -impl Metrics { - pub(crate) const SUPERVISOR_REORG_SUCCESS_TOTAL: &'static str = - "kona_supervisor_reorg_success_total"; - pub(crate) const SUPERVISOR_REORG_ERROR_TOTAL: &'static str = - "kona_supervisor_reorg_error_total"; - pub(crate) const SUPERVISOR_REORG_DURATION_SECONDS: &'static str = - "kona_supervisor_reorg_duration_seconds"; - pub(crate) const SUPERVISOR_REORG_METHOD_PROCESS_CHAIN_REORG: &'static str = - "process_chain_reorg"; - pub(crate) const SUPERVISOR_REORG_L1_DEPTH: &'static str = "kona_supervisor_reorg_l1_depth"; - pub(crate) const SUPERVISOR_REORG_L2_DEPTH: &'static str = "kona_supervisor_reorg_l2_depth"; - - pub(crate) fn init(chain_id: ChainId) { - Self::describe(); - Self::zero(chain_id); - } - - fn describe() { - metrics::describe_counter!( - Self::SUPERVISOR_REORG_SUCCESS_TOTAL, - metrics::Unit::Count, - "Total number of successfully processed L1 reorgs in the supervisor", - ); - - metrics::describe_counter!( - Self::SUPERVISOR_REORG_ERROR_TOTAL, - metrics::Unit::Count, - "Total number of errors encountered while processing L1 reorgs in the supervisor", - ); - - metrics::describe_histogram!( - Self::SUPERVISOR_REORG_L1_DEPTH, - metrics::Unit::Count, - "Depth of the L1 reorg in the supervisor", - ); - - metrics::describe_histogram!( - Self::SUPERVISOR_REORG_L2_DEPTH, - metrics::Unit::Count, - "Depth of the L2 reorg in the supervisor", - ); - - metrics::describe_histogram!( - Self::SUPERVISOR_REORG_DURATION_SECONDS, - metrics::Unit::Seconds, - "Latency for processing L1 reorgs in the supervisor", - ); - } - - fn zero(chain_id: ChainId) { - metrics::counter!( - Self::SUPERVISOR_REORG_SUCCESS_TOTAL, - "chain_id" => chain_id.to_string(), - "method" => Self::SUPERVISOR_REORG_METHOD_PROCESS_CHAIN_REORG, - ) - .increment(0); - - metrics::counter!( - Self::SUPERVISOR_REORG_ERROR_TOTAL, - "chain_id" => chain_id.to_string(), - "method" => Self::SUPERVISOR_REORG_METHOD_PROCESS_CHAIN_REORG, - ) - .increment(0); - - metrics::histogram!( - Self::SUPERVISOR_REORG_L1_DEPTH, - "chain_id" => chain_id.to_string(), - "method" => Self::SUPERVISOR_REORG_METHOD_PROCESS_CHAIN_REORG, - ) - .record(0); - - metrics::histogram!( - Self::SUPERVISOR_REORG_L2_DEPTH, - "chain_id" => chain_id.to_string(), - "method" => Self::SUPERVISOR_REORG_METHOD_PROCESS_CHAIN_REORG, - ) - .record(0); - - metrics::histogram!( - Self::SUPERVISOR_REORG_DURATION_SECONDS, - "chain_id" => chain_id.to_string(), - "method" => Self::SUPERVISOR_REORG_METHOD_PROCESS_CHAIN_REORG, - ) - .record(0.0); - } - - pub(crate) fn record_block_depth(chain_id: ChainId, l1_depth: u64, l2_depth: u64) { - metrics::histogram!( - Self::SUPERVISOR_REORG_L1_DEPTH, - "chain_id" => chain_id.to_string(), - ) - .record(l1_depth as f64); - - metrics::histogram!( - Self::SUPERVISOR_REORG_L2_DEPTH, - "chain_id" => chain_id.to_string(), - ) - .record(l2_depth as f64); - } -} diff --git a/rust/kona/crates/supervisor/core/src/reorg/mod.rs b/rust/kona/crates/supervisor/core/src/reorg/mod.rs deleted file mode 100644 index f3c49ac20d611..0000000000000 --- a/rust/kona/crates/supervisor/core/src/reorg/mod.rs +++ /dev/null @@ -1,9 +0,0 @@ -mod task; - -mod handler; -pub use handler::ReorgHandler; - -mod error; -pub use error::ReorgHandlerError; - -mod metrics; diff --git a/rust/kona/crates/supervisor/core/src/reorg/task.rs b/rust/kona/crates/supervisor/core/src/reorg/task.rs deleted file mode 100644 index 4069bbad2d40d..0000000000000 --- a/rust/kona/crates/supervisor/core/src/reorg/task.rs +++ /dev/null @@ -1,1250 +0,0 @@ -use super::metrics::Metrics; -use crate::ReorgHandlerError; -use alloy_eips::BlockNumberOrTag; -use alloy_primitives::{B256, ChainId}; -use alloy_rpc_client::RpcClient; -use alloy_rpc_types_eth::Block; -use derive_more::Constructor; -use kona_interop::DerivedRefPair; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::{DbReader, StorageError, StorageRewinder}; -use std::sync::Arc; -use tracing::{debug, info, trace, warn}; - -/// Handles reorg for a single chain -#[derive(Debug, Constructor)] -pub(crate) struct ReorgTask { - chain_id: ChainId, - db: Arc, - rpc_client: RpcClient, -} - -#[derive(Debug)] -struct RewoundState { - source: BlockInfo, - derived: Option, -} - -impl ReorgTask -where - DB: DbReader + StorageRewinder + Send + Sync + 'static, -{ - /// Processes reorg for a single chain. If the chain is consistent with the L1 chain, - /// does nothing. - pub(crate) async fn process_chain_reorg(&self) -> Result<(), ReorgHandlerError> { - trace!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - "Processing reorg for chain..." - ); - - let latest_state = self.db.latest_derivation_state()?; - - // Find last valid source block for this chain - let rewound_state = match self.find_rewind_target(latest_state).await { - Ok(Some(rewind_target_source)) => { - Some(self.rewind_to_target_source(rewind_target_source).await?) - } - Ok(None) => { - // No reorg needed, latest source block is still canonical - return Ok(()); - } - Err(ReorgHandlerError::RewindTargetPreInterop) => { - self.rewind_to_activation_block().await? - } - Err(err) => { - return Err(err); - } - }; - - // record metrics - if let Some(rewound_state) = rewound_state { - let l1_depth = latest_state.source.number - rewound_state.source.number; - let l2_depth = rewound_state - .derived - .map_or(0, |derived| latest_state.derived.number - derived.number); - Metrics::record_block_depth(self.chain_id, l1_depth, l2_depth); - } - info!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - "Processed reorged successfully" - ); - Ok(()) - } - - async fn rewind_to_target_source( - &self, - rewind_target_source: BlockInfo, - ) -> Result { - info!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - rewind_target_source = rewind_target_source.number, - "Reorg detected - rewinding to target source block..." - ); - - // Call the rewinder to handle the DB rewinding - let derived_block_rewound = - self.db.rewind_to_source(&rewind_target_source.id()).inspect_err(|err| { - warn!( - target: "supervisor::reorg_handler::db", - chain_id = %self.chain_id, - %err, - "Failed to rewind DB to derived block" - ); - })?; - - Ok(RewoundState { source: rewind_target_source, derived: derived_block_rewound }) - } - - async fn rewind_to_activation_block(&self) -> Result, ReorgHandlerError> { - info!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - "Reorg detected - rewinding to activation block..." - ); - - // If the rewind target is pre-interop, we need to rewind to the activation block - match self.db.get_activation_block() { - Ok(activation_block) => { - let activation_source_block = self.db.derived_to_source(activation_block.id())?; - self.db.rewind(&activation_block.id()).inspect_err(|err| { - warn!( - target: "supervisor::reorg_handler::db", - chain_id = %self.chain_id, - %err, - "Failed to rewind DB to activation block" - ); - })?; - Ok(Some(RewoundState { - source: activation_source_block, - derived: Some(activation_block), - })) - } - Err(StorageError::DatabaseNotInitialised) => { - debug!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - "No activation block found, no rewind required" - ); - Ok(None) - } - Err(err) => Err(ReorgHandlerError::StorageError(err)), - } - } - - /// Finds the rewind target for a chain during a reorg - /// - /// Returns `None` if no rewind is needed, or the target block to rewind to. - /// Returns `ReorgHandlerError::RewindTargetPreInterop` if the rewind target is before the - /// interop activation block. - async fn find_rewind_target( - &self, - latest_state: DerivedRefPair, - ) -> Result, ReorgHandlerError> { - trace!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - "Finding rewind target..." - ); - - // Check if the latest source block is still canonical - if self.is_block_canonical(latest_state.source.number, latest_state.source.hash).await? { - debug!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - block_number = latest_state.source.number, - "Latest source block is still canonical, no reorg needed" - ); - return Ok(None); - } - - let common_ancestor = self.find_common_ancestor().await?; - let mut prev_source = latest_state.source; - let mut current_source = self.db.get_source_block(prev_source.number - 1)?; - - while current_source.number > common_ancestor.number { - if current_source.number % 5 == 0 { - trace!( - target: "supervisor::reorg_handler", - current_block=current_source.number, - common_ancestor=common_ancestor.number, - "Finding rewind target..." - ) - } - - // If the current source block is canonical, we found the rewind target - if self.is_block_canonical(current_source.number, current_source.hash).await? { - info!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - block_number = current_source.number, - "Found canonical block as rewind target" - ); - break; - } - - // Otherwise, walk back to the previous source block - prev_source = current_source; - current_source = self.db.get_source_block(current_source.number - 1)?; - } - - // return the previous source block as the rewind target since rewinding is inclusive - Ok(Some(prev_source)) - } - - async fn find_common_ancestor(&self) -> Result { - trace!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - "Finding common ancestor." - ); - - match self.db.get_safety_head_ref(kona_interop::SafetyLevel::Finalized) { - Ok(finalized_block) => { - let common_ancestor = self.db.derived_to_source(finalized_block.id())?; - return Ok(common_ancestor); - } - Err(StorageError::FutureData) => { /* fall through to activation block */ } - Err(err) => { - return Err(ReorgHandlerError::StorageError(err)); - } - } - - debug!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - "No finalized block found, checking activation block." - ); - - match self.db.get_activation_block() { - Ok(activation_block) => { - let activation_source_block = self.db.derived_to_source(activation_block.id())?; - if self - .is_block_canonical( - activation_source_block.number, - activation_source_block.hash, - ) - .await? - { - Ok(activation_source_block) - } else { - debug!( - target: "supervisor::reorg_handler", - chain_id = %self.chain_id, - "Activation block is not canonical, no common ancestor found" - ); - Err(ReorgHandlerError::RewindTargetPreInterop) - } - } - Err(StorageError::DatabaseNotInitialised) => { - Err(ReorgHandlerError::RewindTargetPreInterop) - } - Err(err) => Err(ReorgHandlerError::StorageError(err)), - } - } - - /// Checks if a block is canonical on L1 - async fn is_block_canonical( - &self, - block_number: u64, - expected_hash: B256, - ) -> Result { - let canonical_l1 = self - .rpc_client - .request::<_, Block>( - "eth_getBlockByNumber", - (BlockNumberOrTag::Number(block_number), false), - ) - .await - .map_err(|err| { - warn!( - target: "supervisor::reorg_handler", - block_number, - %err, - "Failed to fetch L1 block from RPC" - ); - ReorgHandlerError::RPCError(err.to_string()) - })?; - Ok(canonical_l1.hash() == expected_hash) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_eips::BlockNumHash; - use alloy_rpc_types_eth::Header; - use alloy_transport::mock::*; - use kona_interop::{DerivedRefPair, SafetyLevel}; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::{ - DerivationStorageReader, HeadRefStorageReader, LogStorageReader, StorageError, - }; - use kona_supervisor_types::{Log, SuperHead}; - use mockall::{mock, predicate}; - - mock!( - #[derive(Debug)] - pub Db {} - - impl LogStorageReader for Db { - fn get_block(&self, block_number: u64) -> Result; - fn get_latest_block(&self) -> Result; - fn get_log(&self, block_number: u64,log_index: u32) -> Result; - fn get_logs(&self, block_number: u64) -> Result, StorageError>; - } - - impl DerivationStorageReader for Db { - fn derived_to_source(&self, derived_block_id: BlockNumHash) -> Result; - fn latest_derived_block_at_source(&self, source_block_id: BlockNumHash) -> Result; - fn latest_derivation_state(&self) -> Result; - fn get_source_block(&self, source_block_number: u64) -> Result; - fn get_activation_block(&self) -> Result; - } - - impl HeadRefStorageReader for Db { - fn get_safety_head_ref(&self, safety_level: SafetyLevel) -> Result; - fn get_super_head(&self) -> Result; - } - - impl StorageRewinder for Db { - fn rewind(&self, to: &BlockNumHash) -> Result<(), StorageError>; - fn rewind_log_storage(&self, to: &BlockNumHash) -> Result<(), StorageError>; - fn rewind_to_source(&self, to: &BlockNumHash) -> Result, StorageError>; - } - ); - - mock! ( - pub chain_db {} - ); - - #[tokio::test] - async fn test_process_chain_reorg_no_reorg_needed() { - let mut mock_db = MockDb::new(); - - let latest_source = - BlockInfo::new(B256::from([1u8; 32]), 100, B256::from([2u8; 32]), 12345); - - let latest_state = DerivedRefPair { - source: latest_source, - derived: BlockInfo::new(B256::from([3u8; 32]), 50, B256::from([4u8; 32]), 12346), - }; - - // Mock the latest derivation state - mock_db.expect_latest_derivation_state().times(1).returning(move || Ok(latest_state)); - - // Mock the RPC to return the same block (no reorg) - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let rpc_client = RpcClient::new(transport, false); - - let canonical_block: Block = Block { - header: Header { - hash: latest_source.hash, - inner: alloy_consensus::Header { - number: latest_source.number, - parent_hash: latest_source.parent_hash, - timestamp: latest_source.timestamp, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - asserter.push_success(&canonical_block); - - let reorg_task = ReorgTask::new(1, Arc::new(mock_db), rpc_client); - - let result = reorg_task.process_chain_reorg().await; - - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_process_chain_reorg_with_rewind() { - let mut mock_db = MockDb::new(); - - let latest_source = - BlockInfo::new(B256::from([1u8; 32]), 100, B256::from([2u8; 32]), 12345); - - let latest_state = DerivedRefPair { - source: latest_source, - derived: BlockInfo::new(B256::from([3u8; 32]), 50, B256::from([4u8; 32]), 12346), - }; - - let canonical_source = - BlockInfo::new(B256::from([5u8; 32]), 95, B256::from([6u8; 32]), 12344); - - let rewind_target_source = - BlockInfo::new(B256::from([10u8; 32]), 96, B256::from([11u8; 32]), 12340); - - let rewind_target_derived = - BlockInfo::new(B256::from([12u8; 32]), 45, B256::from([13u8; 32]), 12341); - - let finalized_block = - BlockInfo::new(B256::from([20u8; 32]), 40, B256::from([21u8; 32]), 12330); - - // Mock the latest derivation state - mock_db.expect_latest_derivation_state().times(1).returning(move || Ok(latest_state)); - - // Mock finding common ancestor - mock_db.expect_get_safety_head_ref().times(1).returning(move |_| Ok(finalized_block)); - - mock_db.expect_derived_to_source().times(1).returning(move |_| Ok(canonical_source)); - - mock_db.expect_get_source_block().times(5).returning( - move |block_number| match block_number { - 99 => Ok(BlockInfo::new(B256::from([16u8; 32]), 99, B256::from([17u8; 32]), 12344)), - 98 => Ok(BlockInfo::new(B256::from([17u8; 32]), 98, B256::from([18u8; 32]), 12343)), - 97 => Ok(BlockInfo::new(B256::from([18u8; 32]), 97, B256::from([19u8; 32]), 12342)), - 96 => Ok(rewind_target_source), - 95 => Ok(canonical_source), - _ => Err(StorageError::ConflictError), - }, - ); - - // Mock the RPC to show reorg happened - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let rpc_client = RpcClient::new(transport, false); - - // First call shows different hash (reorg detected) - let different_block: Block = Block { - header: Header { - hash: B256::from([99u8; 32]), // Different hash - inner: alloy_consensus::Header { - number: latest_source.number, - parent_hash: latest_source.parent_hash, - timestamp: latest_source.timestamp, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - asserter.push_success(&different_block); - asserter.push_success(&different_block); - asserter.push_success(&different_block); - asserter.push_success(&different_block); - asserter.push_success(&different_block); - - // Second call for checking if rewind target is canonical - let canonical_block: Block = Block { - header: Header { - hash: canonical_source.hash, - inner: alloy_consensus::Header { - number: canonical_source.number, - parent_hash: canonical_source.parent_hash, - timestamp: canonical_source.timestamp, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - asserter.push_success(&canonical_block); - - // Mock rewind operations - mock_db - .expect_rewind_to_source() - .times(1) - .returning(move |_| Ok(Some(rewind_target_derived))); - - let reorg_task = ReorgTask::new(1, Arc::new(mock_db), rpc_client); - - let result = reorg_task.process_chain_reorg().await; - - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_process_chain_reorg_rewind_pre_interop() { - let mut mock_db = MockDb::new(); - - let latest_source = - BlockInfo::new(B256::from([1u8; 32]), 100, B256::from([2u8; 32]), 12345); - - let latest_state = DerivedRefPair { - source: latest_source, - derived: BlockInfo::new(B256::from([3u8; 32]), 50, B256::from([4u8; 32]), 12346), - }; - - let activation_block = - BlockInfo::new(B256::from([10u8; 32]), 1, B256::from([11u8; 32]), 12000); - - let activation_source = - BlockInfo::new(B256::from([12u8; 32]), 10, B256::from([13u8; 32]), 11999); - - // Mock the latest derivation state - mock_db.expect_latest_derivation_state().times(1).returning(move || Ok(latest_state)); - - // Mock finding common ancestor fails with pre-interop - mock_db.expect_get_safety_head_ref().times(1).returning(|_| Err(StorageError::FutureData)); - - mock_db - .expect_get_activation_block() - .times(2) // Once in find_common_ancestor, once in rewind_to_activation_block - .returning(move || Ok(activation_block)); - - mock_db - .expect_derived_to_source() - .times(2) // Once in find_common_ancestor, once in rewind_to_activation_block - .returning(move |_| Ok(activation_source)); - - // Mock the RPC calls - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let rpc_client = RpcClient::new(transport, false); - - // First call shows different hash (reorg detected) - let different_block: Block = Block { - header: Header { - hash: B256::from([99u8; 32]), - inner: alloy_consensus::Header { - number: latest_source.number, - parent_hash: latest_source.parent_hash, - timestamp: latest_source.timestamp, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - asserter.push_success(&different_block); - - // Activation block is not canonical - let non_canonical_activation: Block = Block { - header: Header { - hash: B256::from([99u8; 32]), // Different from expected - inner: alloy_consensus::Header { - number: activation_source.number, - parent_hash: activation_source.parent_hash, - timestamp: activation_source.timestamp, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - asserter.push_success(&non_canonical_activation); - - // Mock rewind to activation block - mock_db.expect_rewind().times(1).returning(|_| Ok(())); - - let reorg_task = ReorgTask::new(1, Arc::new(mock_db), rpc_client); - - let result = reorg_task.process_chain_reorg().await; - - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_process_chain_reorg_storage_error() { - let mut mock_db = MockDb::new(); - - // DB fails to get latest derivation state - mock_db - .expect_latest_derivation_state() - .times(1) - .returning(|| Err(StorageError::LockPoisoned)); - - let reorg_task = ReorgTask::new( - 1, - Arc::new(mock_db), - RpcClient::new(MockTransport::new(Asserter::new()), false), - ); - - let result = reorg_task.process_chain_reorg().await; - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - ReorgHandlerError::StorageError(StorageError::LockPoisoned) - )); - } - - #[tokio::test] - async fn test_find_rewind_target_without_reorg() { - let mut mock_db = MockDb::new(); - let latest_source: Block = Block { - header: Header { - hash: B256::from([1u8; 32]), - inner: alloy_consensus::Header { - number: 42, - parent_hash: B256::ZERO, - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let latest_state = DerivedRefPair { - source: BlockInfo::new( - latest_source.header.hash, - latest_source.header.number, - latest_source.header.parent_hash, - latest_source.header.timestamp, - ), - derived: BlockInfo::new(B256::from([5u8; 32]), 200, B256::ZERO, 1100), - }; - - // Mock the latest derivation state and expect this to be called once - mock_db.expect_latest_derivation_state().times(1).returning(move || Ok(latest_state)); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let rpc_client = RpcClient::new(transport, false); - // Mock RPC response - asserter.push_success(&latest_source); - - let reorg_task = ReorgTask::new(1, Arc::new(mock_db), rpc_client); - let rewind_target = reorg_task.process_chain_reorg().await; - - // Should succeed since the latest source block is still canonical - assert!(rewind_target.is_ok()); - } - - #[tokio::test] - async fn test_find_rewind_target_with_reorg() { - let mut mock_db = MockDb::new(); - let latest_source: Block = Block { - header: Header { - hash: B256::from([1u8; 32]), - inner: alloy_consensus::Header { - number: 41, - parent_hash: B256::from([2u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let latest_state = DerivedRefPair { - source: BlockInfo::new( - latest_source.header.hash, - latest_source.header.number, - latest_source.header.parent_hash, - latest_source.header.timestamp, - ), - derived: BlockInfo::new(B256::from([10u8; 32]), 200, B256::ZERO, 1100), - }; - - let finalized_source: Block = Block { - header: Header { - hash: B256::from([2u8; 32]), - inner: alloy_consensus::Header { - number: 38, - parent_hash: B256::from([1u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let finalized_state = DerivedRefPair { - source: BlockInfo::new( - finalized_source.header.hash, - finalized_source.header.number, - finalized_source.header.parent_hash, - finalized_source.header.timestamp, - ), - derived: BlockInfo::new(B256::from([20u8; 32]), 200, B256::ZERO, 1100), - }; - - let reorg_source: Block = Block { - header: Header { - hash: B256::from([14u8; 32]), - inner: alloy_consensus::Header { - number: 40, - parent_hash: B256::from([13u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let reorg_source_info = BlockInfo::new( - reorg_source.header.hash, - reorg_source.header.number, - reorg_source.header.parent_hash, - reorg_source.header.timestamp, - ); - - let mut source_39: Block = reorg_source.clone(); - source_39.header.inner.number = 39; - let source_39_info = BlockInfo::new( - source_39.header.hash, - source_39.header.number, - source_39.header.parent_hash, - source_39.header.timestamp, - ); - - let incorrect_source: Block = Block { - header: Header { - hash: B256::from([15u8; 32]), - inner: alloy_consensus::Header { - number: 5000, - parent_hash: B256::from([13u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - mock_db.expect_latest_derivation_state().returning(move || Ok(latest_state)); - mock_db - .expect_get_safety_head_ref() - .times(1) - .returning(move |_| Ok(finalized_state.derived)); - mock_db.expect_derived_to_source().times(1).returning(move |_| Ok(finalized_state.source)); - - mock_db.expect_get_source_block().times(3).returning( - move |block_number| match block_number { - 41 => Ok(latest_state.source), - 40 => Ok(reorg_source_info), - 39 => Ok(source_39_info), - _ => Ok(finalized_state.source), - }, - ); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let rpc_client = RpcClient::new(transport, false); - - // First return the reorged block - asserter.push_success(&reorg_source); - - // Then returning some random incorrect blocks 3 times till it reaches the finalized block - asserter.push_success(&incorrect_source); - asserter.push_success(&incorrect_source); - asserter.push_success(&incorrect_source); - - // Finally returning the correct block - asserter.push_success(&finalized_source); - - let reorg_task = ReorgTask::new(1, Arc::new(mock_db), rpc_client); - let rewind_target = reorg_task.find_rewind_target(latest_state).await; - - // Should succeed since the latest source block is still canonical - assert!(rewind_target.is_ok()); - assert_eq!(rewind_target.unwrap(), Some(source_39_info)); - } - - #[tokio::test] - async fn test_find_rewind_target_with_finalized_future_activation_canonical() { - let mut mock_db = MockDb::new(); - let latest_source: Block = Block { - header: Header { - hash: B256::from([1u8; 32]), - inner: alloy_consensus::Header { - number: 41, - parent_hash: B256::from([2u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let latest_state = DerivedRefPair { - source: BlockInfo::new( - latest_source.header.hash, - latest_source.header.number, - latest_source.header.parent_hash, - latest_source.header.timestamp, - ), - derived: BlockInfo::new(B256::from([10u8; 32]), 200, B256::ZERO, 1100), - }; - - let activation_source: Block = Block { - header: Header { - hash: B256::from([2u8; 32]), - inner: alloy_consensus::Header { - number: 38, - parent_hash: B256::from([1u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let activation_state = DerivedRefPair { - source: BlockInfo::new( - activation_source.header.hash, - activation_source.header.number, - activation_source.header.parent_hash, - activation_source.header.timestamp, - ), - derived: BlockInfo::new(B256::from([20u8; 32]), 200, B256::ZERO, 1100), - }; - - let reorg_source: Block = Block { - header: Header { - hash: B256::from([14u8; 32]), - inner: alloy_consensus::Header { - number: 40, - parent_hash: B256::from([13u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let reorg_source_info = BlockInfo::new( - reorg_source.header.hash, - reorg_source.header.number, - reorg_source.header.parent_hash, - reorg_source.header.timestamp, - ); - - let mut source_39: Block = reorg_source.clone(); - source_39.header.inner.number = 39; - let source_39_info = BlockInfo::new( - source_39.header.hash, - source_39.header.number, - source_39.header.parent_hash, - source_39.header.timestamp, - ); - - let incorrect_source: Block = Block { - header: Header { - hash: B256::from([15u8; 32]), - inner: alloy_consensus::Header { - number: 5000, - parent_hash: B256::from([13u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - mock_db - .expect_get_safety_head_ref() - .times(1) - .returning(move |_| Err(StorageError::FutureData)); - mock_db - .expect_get_activation_block() - .times(1) - .returning(move || Ok(activation_state.derived)); - mock_db.expect_derived_to_source().times(1).returning(move |_| Ok(activation_state.source)); - - mock_db.expect_get_source_block().times(3).returning( - move |block_number| match block_number { - 41 => Ok(latest_state.source), - 40 => Ok(reorg_source_info), - 39 => Ok(source_39_info), - _ => Ok(activation_state.source), - }, - ); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let rpc_client = RpcClient::new(transport, false); - - // First return the reorged block - asserter.push_success(&reorg_source); - - // Return the activation block source to make sure it is canonical - // Used in `find_common_ancestor` - asserter.push_success(&activation_source); - - // Then returning some random incorrect blocks 3 times till it reaches the finalized block - asserter.push_success(&incorrect_source); - asserter.push_success(&incorrect_source); - asserter.push_success(&incorrect_source); - - // Finally returning the correct block - asserter.push_success(&activation_source); - - let reorg_task = ReorgTask::new(1, Arc::new(mock_db), rpc_client); - let rewind_target = reorg_task.find_rewind_target(latest_state).await; - - // Should succeed since the latest source block is still canonical - assert!(rewind_target.is_ok()); - assert_eq!(rewind_target.unwrap(), Some(source_39_info)); - } - - #[tokio::test] - async fn test_find_rewind_target_with_finalized_future_activation_not_canonical() { - let mut mock_db = MockDb::new(); - let latest_source: Block = Block { - header: Header { - hash: B256::from([1u8; 32]), - inner: alloy_consensus::Header { - number: 41, - parent_hash: B256::from([2u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let latest_state = DerivedRefPair { - source: BlockInfo::new( - latest_source.header.hash, - latest_source.header.number, - latest_source.header.parent_hash, - latest_source.header.timestamp, - ), - derived: BlockInfo::new(B256::from([10u8; 32]), 200, B256::ZERO, 1100), - }; - - let activation_source: Block = Block { - header: Header { - hash: B256::from([2u8; 32]), - inner: alloy_consensus::Header { - number: 38, - parent_hash: B256::from([1u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let activation_state = DerivedRefPair { - source: BlockInfo::new( - activation_source.header.hash, - activation_source.header.number, - activation_source.header.parent_hash, - activation_source.header.timestamp, - ), - derived: BlockInfo::new(B256::from([20u8; 32]), 200, B256::ZERO, 1100), - }; - - let reorg_source: Block = Block { - header: Header { - hash: B256::from([14u8; 32]), - inner: alloy_consensus::Header { - number: 40, - parent_hash: B256::from([13u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let incorrect_source: Block = Block { - header: Header { - hash: B256::from([15u8; 32]), - inner: alloy_consensus::Header { - number: 5000, - parent_hash: B256::from([13u8; 32]), - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - mock_db - .expect_get_safety_head_ref() - .times(1) - .returning(move |_| Err(StorageError::FutureData)); - mock_db - .expect_get_activation_block() - .times(1) - .returning(move || Ok(activation_state.derived)); - mock_db.expect_derived_to_source().times(1).returning(move |_| Ok(activation_state.source)); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let rpc_client = RpcClient::new(transport, false); - - // First return the reorged block - asserter.push_success(&reorg_source); - - // Return the incorrect source to make sure activation block is not canonical - // Used in `find_common_ancestor` - asserter.push_success(&incorrect_source); - - let reorg_task = ReorgTask::new(1, Arc::new(mock_db), rpc_client); - let rewind_target = reorg_task.find_rewind_target(latest_state).await; - - assert!(matches!(rewind_target, Err(ReorgHandlerError::RewindTargetPreInterop))); - } - - #[tokio::test] - async fn test_is_block_canonical() { - let canonical_hash = B256::from([1u8; 32]); - let non_canonical_hash = B256::from([2u8; 32]); - - let canonical_block: Block = Block { - header: Header { - hash: canonical_hash, - inner: alloy_consensus::Header { - number: 100, - parent_hash: B256::ZERO, - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let non_canonical_block: Block = Block { - header: Header { - hash: non_canonical_hash, - inner: alloy_consensus::Header { - number: 100, - parent_hash: B256::ZERO, - timestamp: 12345, - ..Default::default() - }, - ..Default::default() - }, - ..Default::default() - }; - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let rpc_client = RpcClient::new(transport, false); - asserter.push_success(&canonical_block); - asserter.push_success(&non_canonical_block); - - let reorg_task = ReorgTask::new(1, Arc::new(MockDb::new()), rpc_client); - - let result = reorg_task.is_block_canonical(100, canonical_hash).await; - assert!(result.is_ok()); - - // Should return false - let result = reorg_task.is_block_canonical(100, canonical_hash).await; - assert!(result.is_ok()); - assert!(!result.unwrap()); - } - - #[tokio::test] - async fn test_rewind_to_activation_block_success() { - let mut mock_db = MockDb::new(); - - let activation_block = - BlockInfo::new(B256::from([1u8; 32]), 100, B256::from([2u8; 32]), 12345); - - let activation_source = - BlockInfo::new(B256::from([3u8; 32]), 200, B256::from([4u8; 32]), 12346); - - // Expect get_activation_block to be called - mock_db.expect_get_activation_block().times(1).returning(move || Ok(activation_block)); - - // Expect derived_to_source to be called - mock_db - .expect_derived_to_source() - .times(1) - .with(mockall::predicate::eq(activation_block.id())) - .returning(move |_| Ok(activation_source)); - - // Expect rewind to be called - mock_db - .expect_rewind() - .times(1) - .with(mockall::predicate::eq(activation_block.id())) - .returning(|_| Ok(())); - - let reorg_task = ReorgTask::new( - 1, - Arc::new(mock_db), - RpcClient::new(MockTransport::new(Asserter::new()), false), - ); - - let result = reorg_task.rewind_to_activation_block().await; - - assert!(result.is_ok()); - let pair = result.unwrap().unwrap(); - assert_eq!(pair.source, activation_source); - assert_eq!(pair.derived.unwrap(), activation_block); - } - - #[tokio::test] - async fn test_rewind_to_activation_block_database_not_initialized() { - let mut mock_db = MockDb::new(); - - // Expect get_activation_block to return DatabaseNotInitialised - mock_db - .expect_get_activation_block() - .times(1) - .returning(|| Err(StorageError::DatabaseNotInitialised)); - - let reorg_task = ReorgTask::new( - 1, - Arc::new(mock_db), - RpcClient::new(MockTransport::new(Asserter::new()), false), - ); - - let result = reorg_task.rewind_to_activation_block().await; - - // Should succeed with None (no-op case) - assert!(result.is_ok()); - assert!(result.unwrap().is_none()); - } - - #[tokio::test] - async fn test_rewind_to_activation_block_storage_error() { - let mut mock_db = MockDb::new(); - - // Expect get_activation_block to return a different storage error - mock_db - .expect_get_activation_block() - .times(1) - .returning(|| Err(StorageError::LockPoisoned)); - - let reorg_task = ReorgTask::new( - 1, - Arc::new(mock_db), - RpcClient::new(MockTransport::new(Asserter::new()), false), - ); - - let result = reorg_task.rewind_to_activation_block().await; - - // Should return storage error - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - ReorgHandlerError::StorageError(StorageError::LockPoisoned) - )); - } - - #[tokio::test] - async fn test_rewind_to_activation_block_derived_to_source_fails() { - let mut mock_db = MockDb::new(); - - let activation_block = - BlockInfo::new(B256::from([1u8; 32]), 100, B256::from([2u8; 32]), 12345); - - // Expect get_activation_block to succeed - mock_db.expect_get_activation_block().times(1).returning(move || Ok(activation_block)); - - // Expect derived_to_source to fail - mock_db.expect_derived_to_source().times(1).returning(|_| Err(StorageError::LockPoisoned)); - - let reorg_task = ReorgTask::new( - 1, - Arc::new(mock_db), - RpcClient::new(MockTransport::new(Asserter::new()), false), - ); - - let result = reorg_task.rewind_to_activation_block().await; - - // Should return storage error - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - ReorgHandlerError::StorageError(StorageError::LockPoisoned) - )); - } - - #[tokio::test] - async fn test_rewind_to_activation_block_rewind_fails() { - let mut mock_db = MockDb::new(); - - let activation_block = - BlockInfo::new(B256::from([1u8; 32]), 100, B256::from([2u8; 32]), 12345); - - let activation_source = - BlockInfo::new(B256::from([3u8; 32]), 200, B256::from([4u8; 32]), 12346); - - // Expect get_activation_block to succeed - mock_db.expect_get_activation_block().times(1).returning(move || Ok(activation_block)); - - // Expect derived_to_source to succeed - mock_db.expect_derived_to_source().times(1).returning(move |_| Ok(activation_source)); - - // Expect rewind to fail - mock_db.expect_rewind().times(1).returning(|_| Err(StorageError::LockPoisoned)); - - let reorg_task = ReorgTask::new( - 1, - Arc::new(mock_db), - RpcClient::new(MockTransport::new(Asserter::new()), false), - ); - - let result = reorg_task.rewind_to_activation_block().await; - - // Should return storage error - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - ReorgHandlerError::StorageError(StorageError::LockPoisoned) - )); - } - - #[tokio::test] - async fn test_rewind_to_target_source_success() { - let mut mock_db = MockDb::new(); - - let rewind_target_source = - BlockInfo::new(B256::from([1u8; 32]), 100, B256::from([2u8; 32]), 12345); - - let rewind_target_derived = - BlockInfo::new(B256::from([3u8; 32]), 50, B256::from([4u8; 32]), 12346); - - // Expect rewind to be called - mock_db - .expect_rewind_to_source() - .times(1) - .with(predicate::eq(rewind_target_source.id())) - .returning(move |_| Ok(Some(rewind_target_derived))); - - let reorg_task = ReorgTask::new( - 1, - Arc::new(mock_db), - RpcClient::new(MockTransport::new(Asserter::new()), false), - ); - - let result = reorg_task.rewind_to_target_source(rewind_target_source).await; - - assert!(result.is_ok()); - let pair = result.unwrap(); - assert_eq!(pair.source, rewind_target_source); - assert_eq!(pair.derived.unwrap(), rewind_target_derived); - } - - #[tokio::test] - async fn test_rewind_to_target_source_rewind_fails() { - let mut mock_db = MockDb::new(); - - let rewind_target_source = - BlockInfo::new(B256::from([1u8; 32]), 100, B256::from([2u8; 32]), 12345); - - // Expect rewind to fail - mock_db.expect_rewind_to_source().times(1).returning(|_| Err(StorageError::LockPoisoned)); - - let reorg_task = ReorgTask::new( - 1, - Arc::new(mock_db), - RpcClient::new(MockTransport::new(Asserter::new()), false), - ); - - let result = reorg_task.rewind_to_target_source(rewind_target_source).await; - - assert!(result.is_err()); - assert!(matches!( - result.unwrap_err(), - ReorgHandlerError::StorageError(StorageError::LockPoisoned) - )); - } -} diff --git a/rust/kona/crates/supervisor/core/src/rpc/admin.rs b/rust/kona/crates/supervisor/core/src/rpc/admin.rs deleted file mode 100644 index ab00f720af22c..0000000000000 --- a/rust/kona/crates/supervisor/core/src/rpc/admin.rs +++ /dev/null @@ -1,216 +0,0 @@ -use crate::syncnode::ClientConfig; -use alloy_rpc_types_engine::JwtSecret; -use async_trait::async_trait; -use derive_more::Constructor; -use jsonrpsee::{ - core::RpcResult, - types::{ErrorCode, ErrorObject, ErrorObjectOwned}, -}; -use kona_supervisor_rpc::SupervisorAdminApiServer; -use std::time::Duration; -use thiserror::Error; -use tokio::{ - sync::{mpsc::Sender, oneshot}, - time::timeout, -}; -use tracing::warn; - -/// Error types for Supervisor Admin RPC operations. -#[derive(Debug, Error)] -pub enum AdminError { - /// Indicates that the JWT secret is invalid. - #[error("invalid jwt secret: {0}")] - InvalidJwtSecret(String), - - /// Indicates that the request to the admin channel failed to send. - #[error("failed to send admin request")] - SendFailed, - - /// Indicates that the sender dropped before a response was received. - #[error("admin request sender dropped")] - SenderDropped, - - /// Indicates that the admin request timed out. - #[error("admin request timed out")] - Timeout, - - /// Indicates a service error occurred during processing the request. - #[error("service error: {0}")] - ServiceError(String), -} - -impl From for ErrorObjectOwned { - fn from(err: AdminError) -> Self { - match err { - // todo: handle these errors more gracefully - AdminError::InvalidJwtSecret(_) => ErrorObjectOwned::from(ErrorCode::InvalidParams), - AdminError::SendFailed | - AdminError::SenderDropped | - AdminError::Timeout | - AdminError::ServiceError(_) => ErrorObjectOwned::from(ErrorCode::InternalError), - } - } -} - -// timeout for admin requests (seconds) -const ADMIN_REQUEST_TIMEOUT_SECS: u64 = 3; - -/// Represents Admin Request types -#[derive(Debug)] -pub enum AdminRequest { - /// Adds a new L2 RPC to the Supervisor. - AddL2Rpc { - /// The configuration for the L2 RPC client. - cfg: ClientConfig, - /// The response channel to send the result back. - resp: oneshot::Sender>, - }, -} - -/// Supervisor Admin RPC interface -#[derive(Debug, Constructor)] -pub struct AdminRpc { - admin_tx: Sender, -} - -#[async_trait] -impl SupervisorAdminApiServer for AdminRpc { - /// Adds L2RPC to the supervisor. - async fn add_l2_rpc(&self, url: String, secret: String) -> RpcResult<()> { - let (resp_tx, resp_rx) = oneshot::channel(); - - let jwt_secret = JwtSecret::from_hex(secret).map_err(|err| { - warn!(target: "supervisor::admin_rpc", %url, %err, "Failed to decode JWT secret"); - ErrorObject::from(AdminError::InvalidJwtSecret(err.to_string())) - })?; - - let request = AdminRequest::AddL2Rpc { - cfg: ClientConfig { url: url.clone(), jwt_secret }, - resp: resp_tx, - }; - - self.admin_tx.send(request).await.map_err(|err| { - warn!(target: "supervisor::admin_rpc", %url, %err, "Failed to send AdminRequest"); - ErrorObject::from(AdminError::SendFailed) - })?; - - // wait for response with a timeout - timeout(Duration::from_secs(ADMIN_REQUEST_TIMEOUT_SECS), resp_rx) - .await - .map_or_else( - |_| { - warn!(target: "supervisor::admin_rpc", %url, "AdminRequest timed out"); - Err(ErrorObject::from(AdminError::Timeout)) - }, - |res| res - .unwrap_or(Err(AdminError::SenderDropped)) - .map_err(|err| { - warn!(target: "supervisor::admin_rpc", %url, %err, "Failed to process AdminRequest"); - ErrorObject::from(err) - }), - ) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use tokio::{ - sync::mpsc, - time::{self, Duration}, - }; - - // valid 32-byte hex (64 hex chars) - const VALID_SECRET: &str = "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"; - - #[tokio::test] - async fn test_add_l2_rpc_success() { - let (tx, mut rx) = mpsc::channel::(1); - let admin = AdminRpc::new(tx.clone()); - - // spawn a task that simulates the service handling the admin request - let handler = tokio::spawn(async move { - if let Some(AdminRequest::AddL2Rpc { cfg, resp }) = rx.recv().await { - assert_eq!(cfg.url, "http://node:8545"); - // reply success - let _ = resp.send(Ok(())); - } else { - panic!("expected AddL2Rpc request"); - } - }); - - let res = admin.add_l2_rpc("http://node:8545".to_string(), VALID_SECRET.to_string()).await; - assert!(res.is_ok(), "expected successful response"); - - handler.await.unwrap(); - } - - #[tokio::test] - async fn test_add_l2_rpc_invalid_jwt() { - // admin with working channel (not used because parsing fails early) - let (tx, _rx) = mpsc::channel::(1); - let admin = AdminRpc::new(tx); - - let res = admin.add_l2_rpc("http://node:8545".to_string(), "zzzz".to_string()).await; - assert!(res.is_err(), "expected error for invalid jwt secret"); - } - - #[tokio::test] - async fn test_add_l2_rpc_send_failed() { - // create channel and drop the receiver to force send() -> Err - let (tx, rx) = mpsc::channel::(1); - drop(rx); - let admin = AdminRpc::new(tx); - - let res = admin.add_l2_rpc("http://node:8545".to_string(), VALID_SECRET.to_string()).await; - assert!(res.is_err(), "expected error when admin channel receiver is closed"); - } - - #[tokio::test] - async fn test_add_l2_rpc_service_response_dropped() { - let (tx, mut rx) = mpsc::channel::(1); - let admin = AdminRpc::new(tx.clone()); - - // handler drops the response sender to simulate service failure before replying - let handler = tokio::spawn(async move { - if let Some(AdminRequest::AddL2Rpc { cfg: _, resp }) = rx.recv().await { - // drop the sender without sending -> receiver side will get Err - drop(resp); - } else { - panic!("expected AddL2Rpc request"); - } - }); - - let res = admin.add_l2_rpc("http://node:8545".to_string(), VALID_SECRET.to_string()).await; - assert!(res.is_err(), "expected error when service drops response channel"); - handler.await.unwrap(); - } - - #[tokio::test] - async fn test_add_l2_rpc_timeout() { - // use a handler that receives the request but does not reply (keeps sender alive) - let (tx, mut rx) = mpsc::channel::(1); - let admin = AdminRpc::new(tx.clone()); - - let handler = tokio::spawn(async move { - if let Some(AdminRequest::AddL2Rpc { cfg: _, resp: _ }) = rx.recv().await { - // hold the sender (do nothing) so the rpc call times out - // keep the future alive long enough (we use tokio::time::advance in the test) - time::sleep(Duration::from_secs(ADMIN_REQUEST_TIMEOUT_SECS + 5)).await; - } else { - panic!("expected AddL2Rpc request"); - } - }); - - // call the rpc concurrently - let call = tokio::spawn(async move { - admin.add_l2_rpc("http://node:8545".to_string(), VALID_SECRET.to_string()).await - }); - - let res = call.await.unwrap(); - assert!(res.is_err(), "expected timeout error for long-running admin handler"); - - // let handler finish cleanly - handler.await.unwrap(); - } -} diff --git a/rust/kona/crates/supervisor/core/src/rpc/metrics.rs b/rust/kona/crates/supervisor/core/src/rpc/metrics.rs deleted file mode 100644 index c9f26ac2b8d98..0000000000000 --- a/rust/kona/crates/supervisor/core/src/rpc/metrics.rs +++ /dev/null @@ -1,125 +0,0 @@ -//! Metrics for the Supervisor RPC service. - -/// Container for metrics. -#[derive(Debug, Clone)] -pub(crate) struct Metrics; - -impl Metrics { - // --- Metric Names --- - /// Identifier for the counter of successful RPC requests. Labels: `method`. - pub(crate) const SUPERVISOR_RPC_REQUESTS_SUCCESS_TOTAL: &'static str = - "supervisor_rpc_requests_success_total"; - /// Identifier for the counter of failed RPC requests. Labels: `method`. - pub(crate) const SUPERVISOR_RPC_REQUESTS_ERROR_TOTAL: &'static str = - "supervisor_rpc_requests_error_total"; - /// Identifier for the histogram of RPC request durations. Labels: `method`. - pub(crate) const SUPERVISOR_RPC_REQUEST_DURATION_SECONDS: &'static str = - "supervisor_rpc_request_duration_seconds"; - - pub(crate) const SUPERVISOR_RPC_METHOD_CROSS_DERIVED_TO_SOURCE: &'static str = - "cross_derived_to_source"; - pub(crate) const SUPERVISOR_RPC_METHOD_DEPENDENCY_SET: &'static str = "dependency_set"; - pub(crate) const SUPERVISOR_RPC_METHOD_LOCAL_UNSAFE: &'static str = "local_unsafe"; - pub(crate) const SUPERVISOR_RPC_METHOD_LOCAL_SAFE: &'static str = "local_safe"; - pub(crate) const SUPERVISOR_RPC_METHOD_CROSS_SAFE: &'static str = "cross_safe"; - pub(crate) const SUPERVISOR_RPC_METHOD_FINALIZED: &'static str = "finalized"; - pub(crate) const SUPERVISOR_RPC_METHOD_FINALIZED_L1: &'static str = "finalized_l1"; - pub(crate) const SUPERVISOR_RPC_METHOD_SUPER_ROOT_AT_TIMESTAMP: &'static str = - "super_root_at_timestamp"; - pub(crate) const SUPERVISOR_RPC_METHOD_SYNC_STATUS: &'static str = "sync_status"; - pub(crate) const SUPERVISOR_RPC_METHOD_ALL_SAFE_DERIVED_AT: &'static str = - "all_safe_derived_at"; - pub(crate) const SUPERVISOR_RPC_METHOD_CHECK_ACCESS_LIST: &'static str = "check_access_list"; - - /// Initializes metrics for the Supervisor RPC service. - /// - /// This does two things: - /// * Describes various metrics. - /// * Initializes metrics with their labels to 0 so they can be queried immediately. - pub(crate) fn init() { - Self::describe(); - Self::zero(); - } - - /// Describes metrics used in the Supervisor RPC service. - fn describe() { - metrics::describe_counter!( - Self::SUPERVISOR_RPC_REQUESTS_SUCCESS_TOTAL, - metrics::Unit::Count, - "Total number of successful RPC requests processed by the supervisor" - ); - metrics::describe_counter!( - Self::SUPERVISOR_RPC_REQUESTS_ERROR_TOTAL, - metrics::Unit::Count, - "Total number of failed RPC requests processed by the supervisor" - ); - metrics::describe_histogram!( - Self::SUPERVISOR_RPC_REQUEST_DURATION_SECONDS, - metrics::Unit::Seconds, - "Duration of RPC requests processed by the supervisor" - ); - } - - fn zero_rpc_method(method: &str) { - metrics::counter!( - Self::SUPERVISOR_RPC_REQUESTS_SUCCESS_TOTAL, - "method" => method.to_string() - ) - .increment(0); - - metrics::counter!( - Self::SUPERVISOR_RPC_REQUESTS_ERROR_TOTAL, - "method" => method.to_string() - ) - .increment(0); - - metrics::histogram!( - Self::SUPERVISOR_RPC_REQUEST_DURATION_SECONDS, - "method" => method.to_string() - ) - .record(0.0); // Record a zero value to ensure the label combination is present - } - - /// Initializes metrics with their labels to `0` so they appear in Prometheus from the start. - fn zero() { - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_CROSS_DERIVED_TO_SOURCE); - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_LOCAL_UNSAFE); - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_LOCAL_SAFE); - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_CROSS_SAFE); - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_FINALIZED); - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_FINALIZED_L1); - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_SUPER_ROOT_AT_TIMESTAMP); - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_SYNC_STATUS); - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_ALL_SAFE_DERIVED_AT); - Self::zero_rpc_method(Self::SUPERVISOR_RPC_METHOD_CHECK_ACCESS_LIST); - } -} - -/// Observes an RPC call, recording its duration and outcome. -/// -/// # Usage -/// ```ignore -/// async fn my_rpc_method(&self, arg: u32) -> RpcResult { -/// observe_rpc_call!("my_rpc_method_name", { -/// // todo: add actual RPC logic -/// if arg == 0 { Ok("success".to_string()) } else { Err(ErrorObject::owned(1, "failure", None::<()>)) } -/// }) -/// } -/// ``` -#[macro_export] -macro_rules! observe_rpc_call { - ($method_name:expr, $block:expr) => {{ - let start_time = std::time::Instant::now(); - let result = $block; // Execute the provided code block - let duration = start_time.elapsed().as_secs_f64(); - - if result.is_ok() { - metrics::counter!($crate::rpc::metrics::Metrics::SUPERVISOR_RPC_REQUESTS_SUCCESS_TOTAL, "method" => $method_name).increment(1); - } else { - metrics::counter!($crate::rpc::metrics::Metrics::SUPERVISOR_RPC_REQUESTS_ERROR_TOTAL, "method" => $method_name).increment(1); - } - - metrics::histogram!($crate::rpc::metrics::Metrics::SUPERVISOR_RPC_REQUEST_DURATION_SECONDS, "method" => $method_name).record(duration); - result - }}; -} diff --git a/rust/kona/crates/supervisor/core/src/rpc/mod.rs b/rust/kona/crates/supervisor/core/src/rpc/mod.rs deleted file mode 100644 index 8998551b1f84c..0000000000000 --- a/rust/kona/crates/supervisor/core/src/rpc/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -//! Supervisor RPC module - -mod server; -pub use server::SupervisorRpc; - -mod admin; -pub use admin::{AdminError, AdminRequest, AdminRpc}; - -mod metrics; -pub(crate) use metrics::Metrics; diff --git a/rust/kona/crates/supervisor/core/src/rpc/server.rs b/rust/kona/crates/supervisor/core/src/rpc/server.rs deleted file mode 100644 index fddcd0e6ba2b5..0000000000000 --- a/rust/kona/crates/supervisor/core/src/rpc/server.rs +++ /dev/null @@ -1,514 +0,0 @@ -//! Server-side implementation of the Supervisor RPC API. - -use super::Metrics; -use crate::{SpecError, SupervisorError, SupervisorService}; -use alloy_eips::eip1898::BlockNumHash; -use alloy_primitives::{B256, ChainId, map::HashMap}; -use async_trait::async_trait; -use jsonrpsee::{core::RpcResult, types::ErrorObject}; -use kona_interop::{DependencySet, DerivedIdPair, ExecutingDescriptor, SafetyLevel}; -use kona_protocol::BlockInfo; -use kona_supervisor_rpc::{ - SuperRootOutputRpc, SupervisorApiServer, SupervisorChainSyncStatus, SupervisorSyncStatus, -}; -use kona_supervisor_types::{HexStringU64, SuperHead}; -use std::sync::Arc; -use tracing::{trace, warn}; - -/// The server-side implementation struct for the [`SupervisorApiServer`]. -/// It holds a reference to the core Supervisor logic. -#[derive(Debug)] -pub struct SupervisorRpc { - /// Reference to the core Supervisor logic. - /// Using Arc allows sharing the Supervisor instance if needed, - supervisor: Arc, -} - -impl SupervisorRpc { - /// Creates a new [`SupervisorRpc`] instance. - pub fn new(supervisor: Arc) -> Self { - Metrics::init(); - trace!(target: "supervisor::rpc", "Creating new SupervisorRpc handler"); - Self { supervisor } - } -} - -#[async_trait] -impl SupervisorApiServer for SupervisorRpc -where - T: SupervisorService + 'static, -{ - async fn cross_derived_to_source( - &self, - chain_id_hex: HexStringU64, - derived: BlockNumHash, - ) -> RpcResult { - let chain_id = ChainId::from(chain_id_hex); - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_CROSS_DERIVED_TO_SOURCE, - async { - trace!( - target: "supervisor::rpc", - %chain_id, - ?derived, - "Received cross_derived_to_source request" - ); - - let source_block = - self.supervisor.derived_to_source_block(chain_id, derived).map_err(|err| { - warn!( - target: "supervisor::rpc", - %chain_id, - ?derived, - %err, - "Failed to get source block for derived block" - ); - ErrorObject::from(err) - })?; - - Ok(source_block) - } - .await - ) - } - - async fn local_unsafe(&self, chain_id_hex: HexStringU64) -> RpcResult { - let chain_id = ChainId::from(chain_id_hex); - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_LOCAL_UNSAFE, - async { - trace!(target: "supervisor::rpc", - %chain_id, - "Received local_unsafe request" - ); - - Ok(self.supervisor.local_unsafe(chain_id)?.id()) - } - .await - ) - } - - async fn local_safe(&self, chain_id_hex: HexStringU64) -> RpcResult { - let chain_id = ChainId::from(chain_id_hex); - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_LOCAL_SAFE, - async { - trace!(target: "supervisor::rpc", - %chain_id, - "Received local_safe request" - ); - - let derived = self.supervisor.local_safe(chain_id)?.id(); - let source = self.supervisor.derived_to_source_block(chain_id, derived)?.id(); - - Ok(DerivedIdPair { source, derived }) - } - .await - ) - } - - async fn dependency_set_v1(&self) -> RpcResult { - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_DEPENDENCY_SET, - async { - trace!(target: "supervisor::rpc", - "Received the dependency set" - ); - - Ok(self.supervisor.dependency_set().to_owned()) - } - .await - ) - } - - async fn cross_safe(&self, chain_id_hex: HexStringU64) -> RpcResult { - let chain_id = ChainId::from(chain_id_hex); - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_CROSS_SAFE, - async { - trace!(target: "supervisor::rpc", - %chain_id, - "Received cross_safe request" - ); - - let derived = self.supervisor.cross_safe(chain_id)?.id(); - let source = self.supervisor.derived_to_source_block(chain_id, derived)?.id(); - - Ok(DerivedIdPair { source, derived }) - } - .await - ) - } - - async fn finalized(&self, chain_id_hex: HexStringU64) -> RpcResult { - let chain_id = ChainId::from(chain_id_hex); - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_FINALIZED, - async { - trace!(target: "supervisor::rpc", - %chain_id, - "Received finalized request" - ); - - Ok(self.supervisor.finalized(chain_id)?.id()) - } - .await - ) - } - - async fn finalized_l1(&self) -> RpcResult { - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_FINALIZED_L1, - async { - trace!(target: "supervisor::rpc", "Received finalized_l1 request"); - Ok(self.supervisor.finalized_l1()?) - } - .await - ) - } - - async fn super_root_at_timestamp( - &self, - timestamp_hex: HexStringU64, - ) -> RpcResult { - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_SUPER_ROOT_AT_TIMESTAMP, - async { - let timestamp = u64::from(timestamp_hex); - trace!(target: "supervisor::rpc", - %timestamp, - "Received super_root_at_timestamp request" - ); - - self.supervisor.super_root_at_timestamp(timestamp) - .await - .map_err(|err| { - warn!(target: "supervisor::rpc", %err, "Error from core supervisor super_root_at_timestamp"); - ErrorObject::from(err) - }) - }.await - ) - } - - async fn check_access_list( - &self, - inbox_entries: Vec, - min_safety: SafetyLevel, - executing_descriptor: ExecutingDescriptor, - ) -> RpcResult<()> { - // TODO:: refactor, maybe build proc macro to record metrics - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_CHECK_ACCESS_LIST, - async { - trace!(target: "supervisor::rpc", - num_inbox_entries = inbox_entries.len(), - ?min_safety, - ?executing_descriptor, - "Received check_access_list request", - ); - self.supervisor - .check_access_list(inbox_entries, min_safety, executing_descriptor) - .map_err(|err| { - warn!(target: "supervisor::rpc", %err, "Error from core supervisor check_access_list"); - ErrorObject::from(err) - }) - }.await - ) - } - - async fn sync_status(&self) -> RpcResult { - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_SYNC_STATUS, - async { - trace!(target: "supervisor::rpc", "Received sync_status request"); - - let mut chains = self - .supervisor - .chain_ids() - .map(|id| (id, Default::default())) - .collect::>(); - - if chains.is_empty() { - // return error if no chains configured - // - // - // - // todo: add to spec - Err(SupervisorError::EmptyDependencySet)?; - } - - let mut min_synced_l1 = BlockInfo { number: u64::MAX, ..Default::default() }; - let mut cross_safe_timestamp = u64::MAX; - let mut finalized_timestamp = u64::MAX; - let mut uninitialized_chain_db_count = 0; - - for (id, status) in &mut chains { - let head = match self.supervisor.super_head(*id) { - Ok(head) => head, - Err(SupervisorError::SpecError(SpecError::ErrorNotInSpec)) => { - uninitialized_chain_db_count += 1; - continue; - } - Err(err) => return Err(ErrorObject::from(err)), - }; - - // uses lowest safe and finalized timestamps, as well as l1 block, of all l2s - // - // - // - // todo: add to spec - let SuperHead { l1_source, cross_safe, finalized, .. } = &head; - - let default_block = BlockInfo::default(); - let l1_source = l1_source.as_ref().unwrap_or(&default_block); - let cross_safe = cross_safe.as_ref().unwrap_or(&default_block); - let finalized = finalized.as_ref().unwrap_or(&default_block); - - if l1_source.number < min_synced_l1.number { - min_synced_l1 = *l1_source; - } - if cross_safe.timestamp < cross_safe_timestamp { - cross_safe_timestamp = cross_safe.timestamp; - } - if finalized.timestamp < finalized_timestamp { - finalized_timestamp = finalized.timestamp; - } - - *status = head.into(); - } - - if uninitialized_chain_db_count == chains.len() { - warn!(target: "supervisor::rpc", "No chain db initialized"); - return Err(ErrorObject::from(SupervisorError::SpecError( - SpecError::ErrorNotInSpec, - ))); - } - - Ok(SupervisorSyncStatus { - min_synced_l1, - cross_safe_timestamp, - finalized_timestamp, - chains, - }) - } - .await - ) - } - - async fn all_safe_derived_at( - &self, - derived_from: BlockNumHash, - ) -> RpcResult> { - crate::observe_rpc_call!( - Metrics::SUPERVISOR_RPC_METHOD_ALL_SAFE_DERIVED_AT, - async { - trace!(target: "supervisor::rpc", - ?derived_from, - "Received all_safe_derived_at request" - ); - - let mut chains = self - .supervisor - .chain_ids() - .map(|id| (id, Default::default())) - .collect::>(); - - for (id, block) in &mut chains { - *block = self.supervisor.latest_block_from(derived_from, *id)?.id(); - } - - Ok(chains) - } - .await - ) - } -} - -impl Clone for SupervisorRpc { - fn clone(&self) -> Self { - Self { supervisor: self.supervisor.clone() } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::ChainId; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::{EntryNotFoundError, StorageError}; - use mockall::*; - use std::sync::Arc; - - mock!( - #[derive(Debug)] - pub SupervisorService {} - - #[async_trait] - impl SupervisorService for SupervisorService { - fn chain_ids(&self) -> impl Iterator; - fn dependency_set(&self) -> &DependencySet; - fn super_head(&self, chain: ChainId) -> Result; - fn latest_block_from(&self, l1_block: BlockNumHash, chain: ChainId) -> Result; - fn derived_to_source_block(&self, chain: ChainId, derived: BlockNumHash) -> Result; - fn local_unsafe(&self, chain: ChainId) -> Result; - fn local_safe(&self, chain: ChainId) -> Result; - fn cross_safe(&self, chain: ChainId) -> Result; - fn finalized(&self, chain: ChainId) -> Result; - fn finalized_l1(&self) -> Result; - fn check_access_list(&self, inbox_entries: Vec, min_safety: SafetyLevel, executing_descriptor: ExecutingDescriptor) -> Result<(), SupervisorError>; - async fn super_root_at_timestamp(&self, timestamp: u64) -> Result; - } - ); - - #[tokio::test] - async fn test_sync_status_empty_chains() { - let mut mock_service = MockSupervisorService::new(); - mock_service.expect_chain_ids().returning(|| Box::new(vec![].into_iter())); - - let rpc = SupervisorRpc::new(Arc::new(mock_service)); - let result = rpc.sync_status().await; - - assert!(result.is_err()); - assert_eq!(result.unwrap_err(), ErrorObject::from(SupervisorError::EmptyDependencySet)); - } - - #[tokio::test] - async fn test_sync_status_single_chain() { - let chain_id = ChainId::from(1u64); - - let block_info = BlockInfo { number: 42, ..Default::default() }; - let super_head = SuperHead { - l1_source: Some(block_info), - cross_safe: Some(BlockInfo { timestamp: 100, ..Default::default() }), - finalized: Some(BlockInfo { timestamp: 50, ..Default::default() }), - ..Default::default() - }; - - let mut mock_service = MockSupervisorService::new(); - mock_service.expect_chain_ids().returning(move || Box::new(vec![chain_id].into_iter())); - mock_service.expect_super_head().returning(move |_| Ok(super_head)); - - let rpc = SupervisorRpc::new(Arc::new(mock_service)); - let result = rpc.sync_status().await.unwrap(); - - assert_eq!(result.min_synced_l1.number, 42); - assert_eq!(result.cross_safe_timestamp, 100); - assert_eq!(result.finalized_timestamp, 50); - assert_eq!(result.chains.len(), 1); - } - - #[tokio::test] - async fn test_sync_status_missing_super_head() { - let chain_id_1 = ChainId::from(1u64); - let chain_id_2 = ChainId::from(2u64); - - // Only chain_id_1 has a SuperHead, chain_id_2 is missing - let block_info = BlockInfo { number: 42, ..Default::default() }; - let super_head = SuperHead { - l1_source: Some(block_info), - cross_safe: Some(BlockInfo { timestamp: 100, ..Default::default() }), - finalized: Some(BlockInfo { timestamp: 50, ..Default::default() }), - ..Default::default() - }; - - let mut mock_service = MockSupervisorService::new(); - mock_service - .expect_chain_ids() - .returning(move || Box::new(vec![chain_id_1, chain_id_2].into_iter())); - mock_service.expect_super_head().returning(move |chain_id| { - if chain_id == chain_id_1 { - Ok(super_head) - } else { - Err(SupervisorError::StorageError(StorageError::EntryNotFound( - EntryNotFoundError::DerivedBlockNotFound(1), - ))) - } - }); - - let rpc = SupervisorRpc::new(Arc::new(mock_service)); - let result = rpc.sync_status().await; - - assert!(result.is_err()); - } - - #[tokio::test] - async fn test_sync_status_uninitialized_chain_db() { - let chain_id_1 = ChainId::from(1u64); - let chain_id_2 = ChainId::from(2u64); - - // Case 1: No chain db is initialized - let mut mock_service = MockSupervisorService::new(); - mock_service - .expect_chain_ids() - .returning(move || Box::new(vec![chain_id_1, chain_id_2].into_iter())); - mock_service - .expect_super_head() - .times(2) - .returning(move |_| Err(SupervisorError::SpecError(SpecError::ErrorNotInSpec))); - - let rpc = SupervisorRpc::new(Arc::new(mock_service)); - let result = rpc.sync_status().await; - assert!(result.is_err()); - assert_eq!( - result.unwrap_err(), - ErrorObject::from(SupervisorError::SpecError(SpecError::ErrorNotInSpec,)) - ); - - // Case 2: Only one chain db is initialized - let block_info = BlockInfo { number: 42, ..Default::default() }; - let super_head = SuperHead { - l1_source: Some(block_info), - cross_safe: Some(BlockInfo { timestamp: 100, ..Default::default() }), - finalized: Some(BlockInfo { timestamp: 50, ..Default::default() }), - ..Default::default() - }; - - let mut mock_service = MockSupervisorService::new(); - mock_service - .expect_chain_ids() - .returning(move || Box::new(vec![chain_id_1, chain_id_2].into_iter())); - mock_service.expect_super_head().times(2).returning(move |chain_id| { - if chain_id == chain_id_1 { - Ok(super_head) - } else { - Err(SupervisorError::SpecError(SpecError::ErrorNotInSpec)) - } - }); - - let rpc = SupervisorRpc::new(Arc::new(mock_service)); - let result = rpc.sync_status().await; - assert!(result.is_ok()); - - // Case 3: Both chain dbs are initialized - let block_info_1 = BlockInfo { number: 42, ..Default::default() }; - let super_head_1 = SuperHead { - l1_source: Some(block_info_1), - cross_safe: Some(BlockInfo { timestamp: 100, ..Default::default() }), - finalized: Some(BlockInfo { timestamp: 50, ..Default::default() }), - ..Default::default() - }; - let block_info_2 = BlockInfo { number: 43, ..Default::default() }; - let super_head_2 = SuperHead { - l1_source: Some(block_info_2), - cross_safe: Some(BlockInfo { timestamp: 110, ..Default::default() }), - finalized: Some(BlockInfo { timestamp: 60, ..Default::default() }), - ..Default::default() - }; - let mut mock_service = MockSupervisorService::new(); - mock_service - .expect_chain_ids() - .returning(move || Box::new(vec![chain_id_1, chain_id_2].into_iter())); - mock_service.expect_super_head().times(2).returning(move |chain_id| { - if chain_id == chain_id_1 { Ok(super_head_1) } else { Ok(super_head_2) } - }); - - let rpc = SupervisorRpc::new(Arc::new(mock_service)); - let result = rpc.sync_status().await; - assert!(result.is_ok()); - let status = result.unwrap(); - assert_eq!(status.min_synced_l1.number, 42); - assert_eq!(status.cross_safe_timestamp, 100); - assert_eq!(status.finalized_timestamp, 50); - assert_eq!(status.chains.len(), 2); - } -} diff --git a/rust/kona/crates/supervisor/core/src/safety_checker/cross.rs b/rust/kona/crates/supervisor/core/src/safety_checker/cross.rs deleted file mode 100644 index b15300b0c5b09..0000000000000 --- a/rust/kona/crates/supervisor/core/src/safety_checker/cross.rs +++ /dev/null @@ -1,841 +0,0 @@ -use crate::{ - CrossSafetyError, - safety_checker::{ValidationError, ValidationError::InitiatingMessageNotFound}, -}; -use alloy_primitives::{BlockHash, ChainId}; -use derive_more::Constructor; -use kona_interop::InteropValidator; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::{CrossChainSafetyProvider, StorageError}; -use kona_supervisor_types::ExecutingMessage; -use op_alloy_consensus::interop::SafetyLevel; -use std::collections::HashSet; - -/// Uses a [`CrossChainSafetyProvider`] to verify the safety of cross-chain message dependencies. -#[derive(Debug, Constructor)] -pub struct CrossSafetyChecker<'a, P, V> { - chain_id: ChainId, - validator: &'a V, - provider: &'a P, - required_level: SafetyLevel, -} - -impl CrossSafetyChecker<'_, P, V> -where - P: CrossChainSafetyProvider, - V: InteropValidator, -{ - /// Verifies that all executing messages in the given block are valid based on the validity - /// checks - pub fn validate_block(&self, block: BlockInfo) -> Result<(), CrossSafetyError> { - self.map_dependent_block(&block, self.chain_id, |message, initiating_block_fetcher| { - // Step 1: Validate interop timestamps before any dependency checks - self.validator - .validate_interop_timestamps( - message.chain_id, // initiating chain id - message.timestamp, // initiating block timestamp - self.chain_id, // executing chain id - block.timestamp, // executing block timestamp - None, - ) - .map_err(ValidationError::InteropValidationError)?; - - // Step 2: Verify message dependency without fetching the initiating block. - // This avoids unnecessary I/O and ensures we skip validation when: - // - The current target head of the chain is behind the initiating block (must wait for - // that chain to process further) - // Only if the target head is ahead but the initiating block is missing, we return a - // validation error. - self.verify_message_dependency(&message)?; - - // Step 3: Lazily fetch the initiating block only after dependency checks pass. - let initiating_block = initiating_block_fetcher()?; - - // Step 4: Validate message existence and integrity. - self.validate_executing_message(initiating_block, &message)?; - - // Step 5: Perform cyclic dependency detection starting from the dependent block. - self.check_cyclic_dependency( - &block, - &initiating_block, - message.chain_id, - &mut HashSet::new(), - ) - })?; - - Ok(()) - } - - /// Ensures that the block a message depends on satisfies the given safety level. - fn verify_message_dependency( - &self, - message: &ExecutingMessage, - ) -> Result<(), CrossSafetyError> { - let head = self.provider.get_safety_head_ref(message.chain_id, self.required_level)?; - - if head.number < message.block_number { - return Err(CrossSafetyError::DependencyNotSafe { - chain_id: message.chain_id, - block_number: message.block_number, - }); - } - - Ok(()) - } - - /// Recursively checks for a cyclic dependency in cross-chain messages. - /// - /// # Purpose - /// This function walks backwards through message dependencies starting from a candidate block. - /// If any dependency chain leads back to the candidate itself (with the same timestamp), it is - /// considered a **cycle**, which would make the candidate block invalid for cross-safe - /// promotion. - /// - /// # How It Works - /// - It stops recursion if the block is already at required level (cannot be part of a new - /// cycle). - /// - It only follows dependencies that occur at the same timestamp as the candidate. - /// - It uses a `visited` set to avoid re-processing blocks or getting stuck in infinite loops. - /// It doesn't care about cycle that is created excluding the candidate block as that will be - /// detected by the specific chain's safety checker - /// - /// Example: - /// - A (candidate) → B → C → A → ❌ cycle detected (includes candidate) - /// - A → B → C → D (no return to A) → ✅ safe - /// - B → C → D → B → ❌ ignored, since candidate is not involved - fn check_cyclic_dependency( - &self, - candidate: &BlockInfo, - current: &BlockInfo, - chain_id: ChainId, - visited: &mut HashSet<(ChainId, BlockHash)>, - ) -> Result<(), CrossSafetyError> { - // Skipping different timestamps - if candidate.timestamp != current.timestamp { - return Ok(()); - } - - // Already visited, avoid infinite loop - let current_id = (chain_id, current.hash); - if !visited.insert(current_id) { - return Ok(()); - } - - // Reached back to candidate - cycle detected - if candidate.hash == current.hash && self.chain_id == chain_id { - return Err(ValidationError::CyclicDependency { block: *candidate }.into()); - } - - let head = self.provider.get_safety_head_ref(chain_id, self.required_level)?; - if head.number >= current.number { - return Ok(()); // Already at target safety level - cannot form a cycle - } - - self.map_dependent_block(current, chain_id, |message, origin_block_fetcher| { - let origin_block = origin_block_fetcher()?; - self.check_cyclic_dependency(candidate, &origin_block, message.chain_id, visited) - }) - } - - fn validate_executing_message( - &self, - init_block: BlockInfo, - message: &ExecutingMessage, - ) -> Result<(), CrossSafetyError> { - // Ensure timestamp invariant - if init_block.timestamp != message.timestamp { - return Err(ValidationError::TimestampInvariantViolation { - expected_timestamp: init_block.timestamp, - actual_timestamp: message.timestamp, - } - .into()); - } - - // Try to fetch the original log from storage - let init_msg = self - .provider - .get_log(message.chain_id, message.block_number, message.log_index) - .map_err(|err| match err { - StorageError::EntryNotFound(_) => { - CrossSafetyError::ValidationError(InitiatingMessageNotFound) - } - other => other.into(), - })?; - - // Verify the hash of the message against the original - // Don't need to verify the checksum as we're already verifying all the individual fields. - if init_msg.hash != message.hash { - return Err(ValidationError::InvalidMessageHash { - message_hash: message.hash, - original_hash: init_msg.hash, - } - .into()); - } - - Ok(()) - } - - /// For each executing log in the block, provide a lazy fetcher for the initiating block. - /// The callback decides if/when to fetch the initiating block. - fn map_dependent_block( - &self, - exec_block: &BlockInfo, - chain_id: ChainId, - mut f: F, - ) -> Result<(), CrossSafetyError> - where - F: for<'a> FnMut( - ExecutingMessage, - &'a dyn Fn() -> Result, - ) -> Result<(), CrossSafetyError>, - { - let logs = self.provider.get_block_logs(chain_id, exec_block.number)?; - for log in logs { - if let Some(msg) = log.executing_message { - // Capture what we need for a lazy fetch. - let provider = &self.provider; - let chain = msg.chain_id; - let number = msg.block_number; - - // Zero-arg closure that fetches the initiating block on demand. - let fetcher = - || provider.get_block(chain, number).map_err(CrossSafetyError::Storage); - - // Pass the message and the reference to the fetcher. - f(msg, &fetcher)?; - } - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::B256; - use kona_interop::{DerivedRefPair, InteropValidationError}; - use kona_supervisor_storage::{EntryNotFoundError, StorageError}; - use kona_supervisor_types::Log; - use mockall::mock; - use op_alloy_consensus::interop::SafetyLevel; - - mock! ( - #[derive(Debug)] - pub Provider {} - - impl CrossChainSafetyProvider for Provider { - fn get_block(&self, chain_id: ChainId, block_number: u64) -> Result; - fn get_log(&self, chain_id: ChainId, block_number: u64, log_index: u32) -> Result; - fn get_block_logs(&self, chain_id: ChainId, block_number: u64) -> Result, StorageError>; - fn get_safety_head_ref(&self, chain_id: ChainId, level: SafetyLevel) -> Result; - fn update_current_cross_unsafe(&self, chain_id: ChainId, block: &BlockInfo) -> Result<(), StorageError>; - fn update_current_cross_safe(&self, chain_id: ChainId, block: &BlockInfo) -> Result; - } - ); - - mock! ( - #[derive(Debug)] - pub Validator {} - - impl InteropValidator for Validator { - fn validate_interop_timestamps( - &self, - initiating_chain_id: ChainId, - initiating_timestamp: u64, - executing_chain_id: ChainId, - executing_timestamp: u64, - timeout: Option, - ) -> Result<(), InteropValidationError>; - - fn is_post_interop(&self, chain_id: ChainId, timestamp: u64) -> bool; - - fn is_interop_activation_block(&self, chain_id: ChainId, block: BlockInfo) -> bool; - } - ); - - fn b256(n: u64) -> B256 { - let mut bytes = [0u8; 32]; - bytes[24..].copy_from_slice(&n.to_be_bytes()); - B256::from(bytes) - } - - #[test] - fn verify_message_dependency_success() { - let chain_id = 1; - let msg = ExecutingMessage { - chain_id, - block_number: 100, - log_index: 0, - timestamp: 0, - hash: b256(0), - }; - - let head_info = - BlockInfo { number: 101, hash: b256(101), parent_hash: b256(100), timestamp: 0 }; - - let mut provider = MockProvider::default(); - let validator = MockValidator::default(); - - provider - .expect_get_safety_head_ref() - .withf(move |cid, lvl| *cid == chain_id && *lvl == SafetyLevel::CrossSafe) - .returning(move |_, _| Ok(head_info)); - - let checker = CrossSafetyChecker::new(1, &validator, &provider, SafetyLevel::CrossSafe); - let result = checker.verify_message_dependency(&msg); - assert!(result.is_ok()); - } - - #[test] - fn verify_message_dependency_failed() { - let chain_id = 1; - let msg = ExecutingMessage { - chain_id, - block_number: 105, // dependency is ahead of safety head - log_index: 0, - timestamp: 0, - hash: b256(123), - }; - - let head_block = BlockInfo { - number: 100, // safety head is behind the message dependency - hash: b256(100), - parent_hash: b256(99), - timestamp: 0, - }; - - let mut provider = MockProvider::default(); - let validator = MockValidator::default(); - - provider - .expect_get_safety_head_ref() - .withf(move |cid, lvl| *cid == chain_id && *lvl == SafetyLevel::CrossSafe) - .returning(move |_, _| Ok(head_block)); - - let checker = CrossSafetyChecker::new(1, &validator, &provider, SafetyLevel::CrossSafe); - let result = checker.verify_message_dependency(&msg); - - assert!( - matches!(result, Err(CrossSafetyError::DependencyNotSafe { .. })), - "Expected DependencyNotSafe error" - ); - } - - #[test] - fn validate_block_success() { - let init_chain_id = 1; - let exec_chain_id = 2; - - let block = - BlockInfo { number: 101, hash: b256(101), parent_hash: b256(100), timestamp: 200 }; - - let dep_block = - BlockInfo { number: 100, hash: b256(100), parent_hash: b256(99), timestamp: 195 }; - - let exec_msg = ExecutingMessage { - chain_id: init_chain_id, - block_number: 100, - log_index: 0, - timestamp: 195, - hash: b256(999), - }; - - let init_log = Log { - index: 0, - hash: b256(999), // Matches msg.hash → passes checksum - executing_message: None, - }; - - let exec_log = Log { index: 0, hash: b256(999), executing_message: Some(exec_msg) }; - - let head = - BlockInfo { number: 101, hash: b256(101), parent_hash: b256(100), timestamp: 200 }; - - let mut provider = MockProvider::default(); - let mut validator = MockValidator::default(); - - provider - .expect_get_block_logs() - .withf(move |cid, num| *cid == exec_chain_id && *num == 101) - .returning(move |_, _| Ok(vec![exec_log.clone()])); - - provider - .expect_get_block() - .withf(move |cid, num| *cid == init_chain_id && *num == 100) - .returning(move |_, _| Ok(dep_block)); - - provider - .expect_get_log() - .withf(move |cid, blk, idx| *cid == init_chain_id && *blk == 100 && *idx == 0) - .returning(move |_, _, _| Ok(init_log.clone())); - - provider - .expect_get_safety_head_ref() - .withf(move |cid, lvl| *cid == init_chain_id && *lvl == SafetyLevel::CrossSafe) - .returning(move |_, _| Ok(head)); - - validator.expect_validate_interop_timestamps().returning(move |_, _, _, _, _| Ok(())); - - let checker = - CrossSafetyChecker::new(exec_chain_id, &validator, &provider, SafetyLevel::CrossSafe); - let result = checker.validate_block(block); - assert!(result.is_ok()); - } - - #[test] - fn validate_executing_message_timestamp_violation() { - let chain_id = 1; - let msg = ExecutingMessage { - chain_id, - block_number: 100, - log_index: 0, - timestamp: 1234, - hash: b256(999), - }; - - let init_block = BlockInfo { - number: 100, - hash: b256(100), - parent_hash: b256(99), - timestamp: 9999, // Different timestamp to trigger invariant violation - }; - - let provider = MockProvider::default(); - let validator = MockValidator::default(); - let checker = - CrossSafetyChecker::new(chain_id, &validator, &provider, SafetyLevel::CrossSafe); - - let result = checker.validate_executing_message(init_block, &msg); - assert!(matches!( - result, - Err(CrossSafetyError::ValidationError( - ValidationError::TimestampInvariantViolation { .. } - )) - )); - } - - #[test] - fn validate_executing_message_initiating_message_not_found() { - let chain_id = 1; - let msg = ExecutingMessage { - chain_id, - block_number: 100, - log_index: 0, - timestamp: 1234, - hash: b256(999), - }; - - let init_block = - BlockInfo { number: 100, hash: b256(100), parent_hash: b256(99), timestamp: 1234 }; - - let mut provider = MockProvider::default(); - provider - .expect_get_log() - .withf(move |cid, blk, idx| *cid == chain_id && *blk == 100 && *idx == 0) - .returning(|_, _, _| { - Err(StorageError::EntryNotFound(EntryNotFoundError::LogNotFound { - block_number: 100, - log_index: 0, - })) - }); - - let validator = MockValidator::default(); - - let checker = - CrossSafetyChecker::new(chain_id, &validator, &provider, SafetyLevel::CrossSafe); - let result = checker.validate_executing_message(init_block, &msg); - - assert!(matches!( - result, - Err(CrossSafetyError::ValidationError(InitiatingMessageNotFound)) - )); - } - - #[test] - fn validate_executing_message_hash_mismatch() { - let chain_id = 1; - let msg = ExecutingMessage { - chain_id, - block_number: 100, - log_index: 0, - timestamp: 1234, - hash: b256(123), - }; - - let init_block = - BlockInfo { number: 100, hash: b256(100), parent_hash: b256(99), timestamp: 1234 }; - - let init_log = Log { - index: 0, - hash: b256(990), // Checksum mismatch - executing_message: None, - }; - - let mut provider = MockProvider::default(); - provider - .expect_get_log() - .withf(move |cid, blk, idx| *cid == chain_id && *blk == 100 && *idx == 0) - .returning(move |_, _, _| Ok(init_log.clone())); - - let validator = MockValidator::default(); - let checker = - CrossSafetyChecker::new(chain_id, &validator, &provider, SafetyLevel::CrossSafe); - let result = checker.validate_executing_message(init_block, &msg); - - assert!(matches!( - result, - Err(CrossSafetyError::ValidationError(ValidationError::InvalidMessageHash { - message_hash: _, - original_hash: _ - })) - )); - } - - #[test] - fn validate_executing_message_success() { - let chain_id = 1; - let timestamp = 1234; - - let init_block = BlockInfo { - number: 100, - hash: b256(100), - parent_hash: b256(99), - timestamp, // Matches msg.timestamp - }; - - let init_log = Log { - index: 0, - hash: b256(999), // Matches msg.hash → passes checksum - executing_message: None, - }; - - let msg = ExecutingMessage { - chain_id, - block_number: 100, - log_index: 0, - timestamp, - hash: b256(999), - }; - - let mut provider = MockProvider::default(); - provider - .expect_get_log() - .withf(move |cid, blk, idx| *cid == chain_id && *blk == 100 && *idx == 0) - .returning(move |_, _, _| Ok(init_log.clone())); - - let validator = MockValidator::default(); - let checker = - CrossSafetyChecker::new(chain_id, &validator, &provider, SafetyLevel::CrossSafe); - - let result = checker.validate_executing_message(init_block, &msg); - assert!(result.is_ok(), "Expected successful validation"); - } - - #[test] - fn detect_cycle_when_it_loops_back_to_candidate() { - // Scenario: - // candidate: (chain 1, block 10) - // → depends on (chain 2, block 11) - // → depends on (chain 3, block 20) - // → depends on (chain 1, block 10) ← back to candidate! - // Expected result: cyclic dependency detected. - - let ts = 100; - let candidate = - BlockInfo { number: 10, hash: b256(10), parent_hash: b256(9), timestamp: ts }; - - let block11 = - BlockInfo { number: 11, hash: b256(11), parent_hash: b256(10), timestamp: ts }; - - let block20 = - BlockInfo { number: 20, hash: b256(20), parent_hash: b256(19), timestamp: ts }; - - let mut provider = MockProvider::default(); - let validator = MockValidator::default(); - - // All blocks are below safety head (to allow traversal) - provider.expect_get_safety_head_ref().returning(|_, _| { - Ok(BlockInfo { number: 0, hash: b256(0), parent_hash: b256(0), timestamp: 0 }) - }); - - // Define log dependencies - provider.expect_get_block_logs().returning(move |chain, number| { - match (chain.to_string().as_str(), number) { - ("1", 10) => Ok(vec![Log { - index: 0, - hash: b256(1010), - executing_message: Some(ExecutingMessage { - chain_id: 2, - block_number: 11, - log_index: 0, - timestamp: ts, - hash: b256(222), - }), - }]), - ("2", 11) => Ok(vec![Log { - index: 0, - hash: b256(1020), - executing_message: Some(ExecutingMessage { - chain_id: 3, - block_number: 20, - log_index: 0, - timestamp: ts, - hash: b256(333), - }), - }]), - ("3", 20) => Ok(vec![Log { - index: 0, - hash: b256(1030), - executing_message: Some(ExecutingMessage { - chain_id: 1, - block_number: 10, - log_index: 0, - timestamp: ts, - hash: b256(444), - }), - }]), - _ => Ok(vec![]), - } - }); - - // Define block fetch behavior - provider.expect_get_block().returning(move |chain, number| { - match (chain.to_string().as_str(), number) { - ("2", 11) => Ok(block11), - ("3", 20) => Ok(block20), - ("1", 10) => Ok(candidate), - _ => panic!("unexpected block lookup: chain={chain} num={number}"), - } - }); - - let checker = CrossSafetyChecker::new(1, &validator, &provider, SafetyLevel::CrossSafe); - - let result = checker.check_cyclic_dependency(&candidate, &block11, 2, &mut HashSet::new()); - - assert!( - matches!( - result, - Err(CrossSafetyError::ValidationError(ValidationError::CyclicDependency { .. })) - ), - "Expected cyclic dependency error" - ); - } - - #[test] - fn no_cycle_if_dependency_path_does_not_reach_candidate() { - // Scenario: - // candidate: (chain 1, block 10) - // → depends on (chain 2, block 11) - // → depends on (chain 3, block 20) - // But no further dependency → path ends safely without cycling back to candidate. - // Expected result: no cycle detected. - - let ts = 100; - let candidate = - BlockInfo { number: 10, hash: b256(10), parent_hash: b256(9), timestamp: ts }; - - let block11 = - BlockInfo { number: 11, hash: b256(11), parent_hash: b256(10), timestamp: ts }; - - let block20 = - BlockInfo { number: 20, hash: b256(20), parent_hash: b256(19), timestamp: ts }; - - let mut provider = MockProvider::default(); - let validator = MockValidator::default(); - - // All blocks are below safety head (to allow traversal) - provider.expect_get_safety_head_ref().returning(|_, _| { - Ok(BlockInfo { number: 0, hash: b256(0), parent_hash: b256(0), timestamp: 0 }) - }); - - // Define log dependencies - provider.expect_get_block_logs().returning(move |chain, number| { - match (chain.to_string().as_str(), number) { - ("1", 10) => Ok(vec![Log { - index: 0, - hash: b256(1010), - executing_message: Some(ExecutingMessage { - chain_id: 2, - block_number: 11, - log_index: 0, - timestamp: ts, - hash: b256(222), - }), - }]), - ("2", 11) => Ok(vec![Log { - index: 0, - hash: b256(1020), - executing_message: Some(ExecutingMessage { - chain_id: 3, - block_number: 20, - log_index: 0, - timestamp: ts, - hash: b256(333), - }), - }]), - _ => Ok(vec![]), - } - }); - - // Define block fetch behavior - provider.expect_get_block().returning(move |chain, number| { - match (chain.to_string().as_str(), number) { - ("2", 11) => Ok(block11), - ("3", 20) => Ok(block20), - _ => panic!("unexpected block lookup: chain={chain} num={number}"), - } - }); - - let checker = CrossSafetyChecker::new(1, &validator, &provider, SafetyLevel::CrossSafe); - - let result = checker.check_cyclic_dependency(&candidate, &block11, 2, &mut HashSet::new()); - - assert!(result.is_ok(), "Expected no cycle when dependency path does not reach candidate"); - } - - #[test] - fn ignores_cycle_that_does_not_include_candidate() { - // Scenario: - // There is a cycle between blocks: - // Chain2 block 11 → Chain3 block 20 → Chain2 block 11 (forms a cycle) - // But candidate block (Chain1 block 10) is not in the cycle. - // Expected result: cycle is ignored since it doesn't involve the candidate. - - let ts = 100; - let candidate = - BlockInfo { number: 10, hash: b256(10), parent_hash: b256(9), timestamp: ts }; - - let block11 = - BlockInfo { number: 11, hash: b256(11), parent_hash: b256(10), timestamp: ts }; - - let block20 = - BlockInfo { number: 20, hash: b256(20), parent_hash: b256(19), timestamp: ts }; - - let mut provider = MockProvider::default(); - let validator = MockValidator::default(); - - // All blocks are below safety head (so we traverse them) - provider.expect_get_safety_head_ref().returning(|_, _| { - Ok(BlockInfo { number: 0, hash: b256(0), parent_hash: b256(0), timestamp: 0 }) - }); - - // Block logs setup: - // Chain1 block 10 → Chain2 block 11 - // Chain2 block 11 → Chain3 block 20 - // Chain3 block 20 → Chain2 block 11 (cycle here, but no candidate involvement) - provider.expect_get_block_logs().returning(move |chain, number| { - match (chain.to_string().as_str(), number) { - ("1", 10) => Ok(vec![Log { - index: 0, - hash: b256(1010), - executing_message: Some(ExecutingMessage { - chain_id: 2, - block_number: 11, - log_index: 0, - timestamp: ts, - hash: b256(222), - }), - }]), - ("2", 11) => Ok(vec![Log { - index: 0, - hash: b256(1020), - executing_message: Some(ExecutingMessage { - chain_id: 3, - block_number: 20, - log_index: 0, - timestamp: ts, - hash: b256(333), - }), - }]), - ("3", 20) => Ok(vec![Log { - index: 0, - hash: b256(1030), - executing_message: Some(ExecutingMessage { - chain_id: 2, - block_number: 11, - log_index: 0, - timestamp: ts, - hash: b256(444), - }), - }]), - _ => Ok(vec![]), - } - }); - - // Block fetches - provider.expect_get_block().returning(move |chain, number| { - match (chain.to_string().as_str(), number) { - ("2", 11) => Ok(block11), - ("3", 20) => Ok(block20), - _ => panic!("unexpected block lookup"), - } - }); - - let checker = CrossSafetyChecker::new(1, &validator, &provider, SafetyLevel::CrossSafe); - - // Start traversal from chain2: block11 is a dependency of candidate - let result = checker.check_cyclic_dependency(&candidate, &block11, 2, &mut HashSet::new()); - - assert!( - result.is_ok(), - "Expected no cycle error because candidate is not part of the cycle" - ); - } - - #[test] - fn stops_traversal_if_timestamp_differs() { - // Scenario: - // Candidate and dependency block have different timestamps. - // Should short-circuit the check and not recurse further. - // Expected result: no cycle detected. - - let chain_id = 1; - - let candidate = - BlockInfo { number: 10, hash: b256(10), parent_hash: b256(9), timestamp: 100 }; - let dep = BlockInfo { number: 9, hash: b256(9), parent_hash: b256(8), timestamp: 50 }; - - let provider = MockProvider::default(); - let validator = MockValidator::default(); - - let checker = - CrossSafetyChecker::new(chain_id, &validator, &provider, SafetyLevel::CrossSafe); - - let result = - checker.check_cyclic_dependency(&candidate, &dep, chain_id, &mut HashSet::new()); - assert!(result.is_ok()); - } - - #[test] - fn stops_traversal_if_block_is_already_cross_safe() { - // Scenario: - // Dependency block is already cross-safe (head is ahead of it). - // Should skip further traversal. - // Expected result: no cycle detected. - - let chain_id = 1; - - let candidate = - BlockInfo { number: 10, hash: b256(10), parent_hash: b256(9), timestamp: 100 }; - let dep = BlockInfo { number: 9, hash: b256(9), parent_hash: b256(8), timestamp: 100 }; - - let mut provider = MockProvider::default(); - let validator = MockValidator::default(); - - provider.expect_get_safety_head_ref().returning(|_, _| { - Ok(BlockInfo { - number: 10, - hash: b256(10), - parent_hash: b256(9), - timestamp: 100, // head ahead - }) - }); - - let checker = - CrossSafetyChecker::new(chain_id, &validator, &provider, SafetyLevel::CrossSafe); - - let result = - checker.check_cyclic_dependency(&candidate, &dep, chain_id, &mut HashSet::new()); - assert!(result.is_ok()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/safety_checker/error.rs b/rust/kona/crates/supervisor/core/src/safety_checker/error.rs deleted file mode 100644 index 4201b21e55bed..0000000000000 --- a/rust/kona/crates/supervisor/core/src/safety_checker/error.rs +++ /dev/null @@ -1,81 +0,0 @@ -use alloy_primitives::{B256, ChainId}; -use kona_interop::InteropValidationError; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::StorageError; -use op_alloy_consensus::interop::SafetyLevel; -use thiserror::Error; - -/// Errors returned when validating cross-chain message dependencies. -#[derive(Debug, Error, Eq, PartialEq)] -pub enum CrossSafetyError { - /// Indicates a failure while accessing storage during dependency checking. - #[error("storage error: {0}")] - Storage(#[from] StorageError), - - /// The block that a message depends on does not meet the required safety level. - #[error( - "dependency on block {block_number} (chain {chain_id}) does not meet required safety level" - )] - DependencyNotSafe { - /// The ID of the chain containing the unsafe dependency. - chain_id: ChainId, - /// The block number of the dependency that failed the safety check - block_number: u64, - }, - - /// No candidate block is currently available for promotion. - #[error("no candidate block found to promote")] - NoBlockToPromote, - - /// The requested safety level is not supported for promotion. - #[error("promotion to level {0} is not supported")] - UnsupportedTargetLevel(SafetyLevel), - - /// Indicates that error occurred while validating block - #[error(transparent)] - ValidationError(#[from] ValidationError), -} - -/// Errors returned when block validation fails due to a fatal violation. -/// These errors indicate that the block must be invalidated. -#[derive(Debug, Error, PartialEq, Eq)] -pub enum ValidationError { - /// Indicates that error occurred while validating interop config for the block messages - #[error(transparent)] - InteropValidationError(#[from] InteropValidationError), - - /// Indicates a mismatch between the executing message hash and the expected original log hash. - #[error( - "executing message hash {message_hash} does not match original log hash {original_hash}" - )] - InvalidMessageHash { - /// The hash provided in the executing message. - message_hash: B256, - /// The expected hash from the original initiating log. - original_hash: B256, - }, - - /// Indicates that the timestamp in the executing message does not match the timestamp - /// of the initiating block, violating the timestamp invariant required for validation. - #[error( - "timestamp invariant violated while validating executing message: expected {expected_timestamp}, but found {actual_timestamp}" - )] - TimestampInvariantViolation { - /// The timestamp of the initiating block. - expected_timestamp: u64, - /// The timestamp found in the executing message. - actual_timestamp: u64, - }, - - /// The initiating message corresponding to the executing message could not be found in log - /// storage. - #[error("initiating message not found for the executing message")] - InitiatingMessageNotFound, - - /// Cyclic dependency detected involving the candidate block - #[error("cyclic dependency detected while promoting block {block}")] - CyclicDependency { - /// The candidate block which is creating cyclic dependency - block: BlockInfo, - }, -} diff --git a/rust/kona/crates/supervisor/core/src/safety_checker/mod.rs b/rust/kona/crates/supervisor/core/src/safety_checker/mod.rs deleted file mode 100644 index 5bf2a2e1c4524..0000000000000 --- a/rust/kona/crates/supervisor/core/src/safety_checker/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! # Cross-Chain Block Safety Checker -//! -//! This module is responsible for verifying that all executing messages in a block -//! are based on dependencies that have reached the required safety level (e.g., -//! [`CrossSafe`](op_alloy_consensus::interop::SafetyLevel)). -//! -//! It ensures correctness in cross-chain execution by validating that initiating blocks -//! of messages are safely committed before the messages are executed in other chains. -mod cross; -pub use cross::CrossSafetyChecker; -mod error; -mod task; -mod traits; -pub use traits::SafetyPromoter; -mod promoter; -pub use promoter::{CrossSafePromoter, CrossUnsafePromoter}; - -pub use task::CrossSafetyCheckerJob; - -pub use error::{CrossSafetyError, ValidationError}; diff --git a/rust/kona/crates/supervisor/core/src/safety_checker/promoter.rs b/rust/kona/crates/supervisor/core/src/safety_checker/promoter.rs deleted file mode 100644 index 3172bd738db06..0000000000000 --- a/rust/kona/crates/supervisor/core/src/safety_checker/promoter.rs +++ /dev/null @@ -1,53 +0,0 @@ -use crate::{CrossSafetyError, event::ChainEvent, safety_checker::traits::SafetyPromoter}; -use alloy_primitives::ChainId; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::CrossChainSafetyProvider; -use op_alloy_consensus::interop::SafetyLevel; - -/// `CrossUnsafePromoter` implements [`SafetyPromoter`] for [`SafetyLevel::CrossUnsafe`] -#[derive(Debug)] -pub struct CrossUnsafePromoter; - -impl SafetyPromoter for CrossUnsafePromoter { - fn target_level(&self) -> SafetyLevel { - SafetyLevel::CrossUnsafe - } - - fn lower_bound_level(&self) -> SafetyLevel { - SafetyLevel::LocalUnsafe - } - - fn update_and_emit_event( - &self, - provider: &dyn CrossChainSafetyProvider, - chain_id: ChainId, - block: &BlockInfo, - ) -> Result { - provider.update_current_cross_unsafe(chain_id, block)?; - Ok(ChainEvent::CrossUnsafeUpdate { block: *block }) - } -} - -/// `CrossSafePromoter` implements [`SafetyPromoter`] for [`SafetyLevel::CrossSafe`] -#[derive(Debug)] -pub struct CrossSafePromoter; - -impl SafetyPromoter for CrossSafePromoter { - fn target_level(&self) -> SafetyLevel { - SafetyLevel::CrossSafe - } - - fn lower_bound_level(&self) -> SafetyLevel { - SafetyLevel::LocalSafe - } - - fn update_and_emit_event( - &self, - provider: &dyn CrossChainSafetyProvider, - chain_id: ChainId, - block: &BlockInfo, - ) -> Result { - let derived_ref_pair = provider.update_current_cross_safe(chain_id, block)?; - Ok(ChainEvent::CrossSafeUpdate { derived_ref_pair }) - } -} diff --git a/rust/kona/crates/supervisor/core/src/safety_checker/task.rs b/rust/kona/crates/supervisor/core/src/safety_checker/task.rs deleted file mode 100644 index 0a7cae741c5bc..0000000000000 --- a/rust/kona/crates/supervisor/core/src/safety_checker/task.rs +++ /dev/null @@ -1,469 +0,0 @@ -use crate::{ - CrossSafetyError, - event::ChainEvent, - safety_checker::{CrossSafetyChecker, traits::SafetyPromoter}, -}; -use alloy_primitives::ChainId; -use derive_more::Constructor; -use kona_interop::InteropValidator; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::{CrossChainSafetyProvider, StorageError}; -use op_alloy_consensus::interop::SafetyLevel; -use std::{sync::Arc, time::Duration}; -use tokio::sync::mpsc; -use tokio_util::sync::CancellationToken; -use tracing::{debug, error, info}; - -/// A background job that promotes blocks to a target safety level on a given chain. -/// -/// It uses [`CrossChainSafetyProvider`] to fetch candidate blocks and the [`CrossSafetyChecker`] -/// to validate cross-chain message dependencies. -#[derive(Debug, Constructor)] -pub struct CrossSafetyCheckerJob { - chain_id: ChainId, - provider: Arc

, - cancel_token: CancellationToken, - interval: Duration, - promoter: L, - event_tx: mpsc::Sender, - validator: Arc, -} - -impl CrossSafetyCheckerJob -where - P: CrossChainSafetyProvider + Send + Sync + 'static, - V: InteropValidator + Send + Sync + 'static, - L: SafetyPromoter, -{ - /// Runs the job loop until cancelled, promoting blocks by Promoter - /// - /// On each iteration: - /// - Tries to promote the next eligible block - /// - Waits for configured interval if promotion fails - /// - Exits when [`CancellationToken`] is triggered - pub async fn run(self) { - let target_level = self.promoter.target_level(); - let chain_id = self.chain_id; - - info!( - target: "supervisor::safety_checker", - chain_id, - %target_level, - "Started safety checker"); - - let checker = - CrossSafetyChecker::new(chain_id, &*self.validator, &*self.provider, target_level); - - loop { - tokio::select! { - _ = self.cancel_token.cancelled() => { - info!(target: "supervisor::safety_checker", chain_id, %target_level, "Canceled safety checker"); - break; - } - - _ = async { - match self.promote_next_block(&checker) { - Ok(block_info) => { - debug!( - target: "supervisor::safety_checker", - chain_id, - %target_level, - %block_info, - "Promoted next candidate block" - ); - } - Err(err) => { - match err { - // Expected / non-fatal errors: - // - no candidate is ready right now - // - validation failed (we already emitted invalidate event in promote_next_block for CrossSafe) - // - dependency not yet safe on another chain - CrossSafetyError::NoBlockToPromote | - CrossSafetyError::ValidationError(_) | - CrossSafetyError::DependencyNotSafe { .. } => { - debug!( - target: "supervisor::safety_checker", - chain_id, - %target_level, - %err, - "Error promoting next candidate block" - ); - }, - _ => { - error!( - target: "supervisor::safety_checker", - chain_id, - %target_level, - %err, - "Unexpected error promoting next candidate block" - ); - } - } - tokio::time::sleep(self.interval).await; - } - } - } => {} - } - } - - info!(target: "supervisor::safety_checker", chain_id = self.chain_id, %target_level, "Stopped safety checker"); - } - - // Attempts to promote the next block by the Promoter - // after validating cross-chain dependencies. - fn promote_next_block( - &self, - checker: &CrossSafetyChecker<'_, P, V>, - ) -> Result { - let candidate = self.find_next_promotable_block()?; - - match checker.validate_block(candidate) { - Ok(()) => { - // Success: promote + emit - let ev = self.promoter.update_and_emit_event( - &*self.provider, - self.chain_id, - &candidate, - )?; - self.broadcast_event(ev); - Ok(candidate) - } - - Err(err @ CrossSafetyError::ValidationError(_)) => { - // Only invalidate if we are targeting CrossSafe - if self.promoter.target_level() == SafetyLevel::CrossSafe { - info!( - target: "supervisor::safety_checker", - chain_id = self.chain_id, - target_level = %self.promoter.target_level(), - block_info = %candidate, - %err, - "Triggering block invalidation for the invalid block" - ); - self.broadcast_event(ChainEvent::InvalidateBlock { block: candidate }); - } - Err(err) // propagate the error for logging - } - Err(err) => Err(err), - } - } - - // Finds the next block that is eligible for promotion at the configured target level. - fn find_next_promotable_block(&self) -> Result { - let current_head = self - .provider - .get_safety_head_ref(self.chain_id, self.promoter.target_level()) - .map_err(|err| { - if matches!(err, StorageError::FutureData) { - CrossSafetyError::NoBlockToPromote - } else { - err.into() - } - })?; - - let upper_head = self - .provider - .get_safety_head_ref(self.chain_id, self.promoter.lower_bound_level()) - .map_err(|err| { - if matches!(err, StorageError::FutureData) { - CrossSafetyError::NoBlockToPromote - } else { - err.into() - } - })?; - - if current_head.number >= upper_head.number { - return Err(CrossSafetyError::NoBlockToPromote); - } - - let candidate = self.provider.get_block(self.chain_id, current_head.number + 1)?; - - Ok(candidate) - } - - fn broadcast_event(&self, event: ChainEvent) { - if let Err(err) = self.event_tx.try_send(event) { - error!( - target: "supervisor::safety_checker", - target_level = %self.promoter.target_level(), - %err, - "Failed to broadcast cross head update event", - ); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::safety_checker::promoter::{CrossSafePromoter, CrossUnsafePromoter}; - use alloy_primitives::{B256, ChainId}; - use kona_interop::{DerivedRefPair, InteropValidationError}; - use kona_supervisor_storage::{CrossChainSafetyProvider, StorageError}; - use kona_supervisor_types::{ExecutingMessage, Log}; - use mockall::mock; - use op_alloy_consensus::interop::SafetyLevel; - - mock! { - #[derive(Debug)] - pub Provider {} - - impl CrossChainSafetyProvider for Provider { - fn get_block(&self, chain_id: ChainId, block_number: u64) -> Result; - fn get_log(&self, chain_id: ChainId, block_number: u64, log_index: u32) -> Result; - fn get_block_logs(&self, chain_id: ChainId, block_number: u64) -> Result, StorageError>; - fn get_safety_head_ref(&self, chain_id: ChainId, level: SafetyLevel) -> Result; - fn update_current_cross_unsafe(&self, chain_id: ChainId, block: &BlockInfo) -> Result<(), StorageError>; - fn update_current_cross_safe(&self, chain_id: ChainId, block: &BlockInfo) -> Result; - } - } - - mock! ( - #[derive(Debug)] - pub Validator {} - - impl InteropValidator for Validator { - fn validate_interop_timestamps( - &self, - initiating_chain_id: ChainId, - initiating_timestamp: u64, - executing_chain_id: ChainId, - executing_timestamp: u64, - timeout: Option, - ) -> Result<(), InteropValidationError>; - - fn is_post_interop(&self, chain_id: ChainId, timestamp: u64) -> bool; - - fn is_interop_activation_block(&self, chain_id: ChainId, block: BlockInfo) -> bool; - } - ); - - fn b256(n: u64) -> B256 { - let mut bytes = [0u8; 32]; - bytes[24..].copy_from_slice(&n.to_be_bytes()); - B256::from(bytes) - } - - fn block(n: u64) -> BlockInfo { - BlockInfo { number: n, hash: b256(n), parent_hash: b256(n - 1), timestamp: 0 } - } - - #[tokio::test] - async fn promotes_next_cross_unsafe_successfully() { - let chain_id = 1; - let mut mock = MockProvider::default(); - let mock_validator = MockValidator::default(); - let (event_tx, mut event_rx) = mpsc::channel::(10); - - mock.expect_get_safety_head_ref() - .withf(move |cid, lvl| *cid == chain_id && *lvl == SafetyLevel::CrossUnsafe) - .returning(|_, _| Ok(block(99))); - - mock.expect_get_safety_head_ref() - .withf(move |cid, lvl| *cid == chain_id && *lvl == SafetyLevel::LocalUnsafe) - .returning(|_, _| Ok(block(100))); - - mock.expect_get_block() - .withf(move |cid, num| *cid == chain_id && *num == 100) - .returning(|_, _| Ok(block(100))); - - mock.expect_get_block_logs() - .withf(move |cid, num| *cid == chain_id && *num == 100) - .returning(|_, _| Ok(vec![])); - - mock.expect_update_current_cross_unsafe() - .withf(move |cid, blk| *cid == chain_id && blk.number == 100) - .returning(|_, _| Ok(())); - - let job = CrossSafetyCheckerJob::new( - chain_id, - Arc::new(mock), - CancellationToken::new(), - Duration::from_secs(1), - CrossUnsafePromoter, - event_tx, - Arc::new(mock_validator), - ); - let checker = CrossSafetyChecker::new( - job.chain_id, - &*job.validator, - &*job.provider, - CrossUnsafePromoter.target_level(), - ); - let result = job.promote_next_block(&checker); - - assert!(result.is_ok()); - assert_eq!(result.unwrap().number, 100); - - // Receive and assert the correct event - let received_event = event_rx.recv().await.expect("expected event not received"); - - assert_eq!(received_event, ChainEvent::CrossUnsafeUpdate { block: block(100) }); - } - - #[tokio::test] - async fn promotes_next_cross_safe_successfully() { - let chain_id = 1; - let mut mock = MockProvider::default(); - let mock_validator = MockValidator::default(); - let (event_tx, mut event_rx) = mpsc::channel::(10); - - mock.expect_get_safety_head_ref() - .withf(move |cid, lvl| *cid == chain_id && *lvl == SafetyLevel::CrossSafe) - .returning(|_, _| Ok(block(99))); - - mock.expect_get_safety_head_ref() - .withf(move |cid, lvl| *cid == chain_id && *lvl == SafetyLevel::LocalSafe) - .returning(|_, _| Ok(block(100))); - - mock.expect_get_block() - .withf(move |cid, num| *cid == chain_id && *num == 100) - .returning(|_, _| Ok(block(100))); - - mock.expect_get_block_logs() - .withf(move |cid, num| *cid == chain_id && *num == 100) - .returning(|_, _| Ok(vec![])); - - mock.expect_update_current_cross_safe() - .withf(move |cid, blk| *cid == chain_id && blk.number == 100) - .returning(|_, _| Ok(DerivedRefPair { derived: block(100), source: block(1) })); - - let job = CrossSafetyCheckerJob::new( - chain_id, - Arc::new(mock), - CancellationToken::new(), - Duration::from_secs(1), - CrossSafePromoter, - event_tx, - Arc::new(mock_validator), - ); - - let checker = CrossSafetyChecker::new( - job.chain_id, - &*job.validator, - &*job.provider, - CrossSafePromoter.target_level(), - ); - let result = job.promote_next_block(&checker); - - assert!(result.is_ok()); - assert_eq!(result.unwrap().number, 100); - - // Receive and assert the correct event - let received_event = event_rx.recv().await.expect("expected event not received"); - - assert_eq!( - received_event, - ChainEvent::CrossSafeUpdate { - derived_ref_pair: DerivedRefPair { derived: block(100), source: block(1) } - } - ); - } - - #[tokio::test] - async fn promotes_next_cross_safe_triggers_block_invalidation() { - let chain_id = 1; - let mut mock = MockProvider::default(); - let mut mock_validator = MockValidator::default(); - let (event_tx, mut event_rx) = mpsc::channel::(10); - - let exec_msg = ExecutingMessage { - chain_id: 2, - block_number: 99, - log_index: 0, - timestamp: 195, - hash: b256(99), - }; - - let exec_log = Log { index: 0, hash: b256(100), executing_message: Some(exec_msg) }; - - mock.expect_get_safety_head_ref() - .withf(move |cid, lvl| *cid == chain_id && *lvl == SafetyLevel::CrossSafe) - .returning(|_, _| Ok(block(99))); - - mock.expect_get_safety_head_ref() - .withf(move |cid, lvl| *cid == chain_id && *lvl == SafetyLevel::LocalSafe) - .returning(|_, _| Ok(block(100))); - - mock.expect_get_block() - .withf(move |cid, num| *cid == 2 && *num == 99) - .returning(|_, _| Ok(block(99))); - - mock.expect_get_block() - .withf(move |cid, num| *cid == chain_id && *num == 100) - .returning(|_, _| Ok(block(100))); - - mock.expect_get_block_logs() - .withf(move |cid, num| *cid == chain_id && *num == 100) - .returning(move |_, _| Ok(vec![exec_log.clone()])); - - mock_validator.expect_validate_interop_timestamps().returning(move |_, _, _, _, _| { - Err(InteropValidationError::InvalidTimestampInvariant { executing: 0, initiating: 0 }) - }); - - let job = CrossSafetyCheckerJob::new( - chain_id, - Arc::new(mock), - CancellationToken::new(), - Duration::from_secs(1), - CrossSafePromoter, - event_tx, - Arc::new(mock_validator), - ); - - let checker = CrossSafetyChecker::new( - job.chain_id, - &*job.validator, - &*job.provider, - CrossSafePromoter.target_level(), - ); - let result = job.promote_next_block(&checker); - - assert!(result.is_err()); - assert!( - matches!(result, Err(CrossSafetyError::ValidationError(_))), - "Expected validation error" - ); - - // Receive and assert the correct event - let received_event = event_rx.recv().await.expect("expected event not received"); - - assert_eq!(received_event, ChainEvent::InvalidateBlock { block: block(100) }); - } - - #[test] - fn promotes_next_cross_unsafe_failed_with_no_candidates() { - let chain_id = 1; - let mut mock = MockProvider::default(); - let mock_validator = MockValidator::default(); - let (event_tx, _) = mpsc::channel::(10); - - mock.expect_get_safety_head_ref() - .withf(|_, lvl| *lvl == SafetyLevel::CrossSafe) - .returning(|_, _| Ok(block(200))); - - mock.expect_get_safety_head_ref() - .withf(|_, lvl| *lvl == SafetyLevel::LocalSafe) - .returning(|_, _| Ok(block(200))); - - let job = CrossSafetyCheckerJob::new( - chain_id, - Arc::new(mock), - CancellationToken::new(), - Duration::from_secs(1), - CrossSafePromoter, - event_tx, - Arc::new(mock_validator), - ); - - let checker = CrossSafetyChecker::new( - job.chain_id, - &*job.validator, - &*job.provider, - CrossSafePromoter.target_level(), - ); - let result = job.promote_next_block(&checker); - - assert!(matches!(result, Err(CrossSafetyError::NoBlockToPromote))); - } -} diff --git a/rust/kona/crates/supervisor/core/src/safety_checker/traits.rs b/rust/kona/crates/supervisor/core/src/safety_checker/traits.rs deleted file mode 100644 index 7bc0504d5509e..0000000000000 --- a/rust/kona/crates/supervisor/core/src/safety_checker/traits.rs +++ /dev/null @@ -1,27 +0,0 @@ -use crate::{CrossSafetyError, event::ChainEvent}; -use alloy_primitives::ChainId; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::CrossChainSafetyProvider; -use op_alloy_consensus::interop::SafetyLevel; - -/// Defines the logic for promoting a block to a specific [`SafetyLevel`]. -/// -/// Each implementation handles: -/// - Which safety level it promotes to -/// - Its required lower bound -/// - Updating state and generating the corresponding [`ChainEvent`] -pub trait SafetyPromoter { - /// Target safety level this promoter upgrades to. - fn target_level(&self) -> SafetyLevel; - - /// Required lower bound level for promotion eligibility. - fn lower_bound_level(&self) -> SafetyLevel; - - /// Performs the promotion by updating state and returning the event to broadcast. - fn update_and_emit_event( - &self, - provider: &dyn CrossChainSafetyProvider, - chain_id: ChainId, - block: &BlockInfo, - ) -> Result; -} diff --git a/rust/kona/crates/supervisor/core/src/state/mod.rs b/rust/kona/crates/supervisor/core/src/state/mod.rs deleted file mode 100644 index 695a56c9a5edd..0000000000000 --- a/rust/kona/crates/supervisor/core/src/state/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -//! This module is responsible for managing and persisting the state of the supervisor. -// TODO: Implement state management logic for the supervisor. diff --git a/rust/kona/crates/supervisor/core/src/supervisor.rs b/rust/kona/crates/supervisor/core/src/supervisor.rs deleted file mode 100644 index 00b5cbba589f0..0000000000000 --- a/rust/kona/crates/supervisor/core/src/supervisor.rs +++ /dev/null @@ -1,384 +0,0 @@ -use alloy_eips::BlockNumHash; -use alloy_primitives::{B256, Bytes, ChainId, keccak256}; -use async_trait::async_trait; -use core::fmt::Debug; -use kona_interop::{ - DependencySet, ExecutingDescriptor, InteropValidator, OutputRootWithChain, SUPER_ROOT_VERSION, - SafetyLevel, SuperRoot, -}; -use kona_protocol::BlockInfo; -use kona_supervisor_rpc::{ChainRootInfoRpc, SuperRootOutputRpc}; -use kona_supervisor_storage::{ - ChainDb, ChainDbFactory, DerivationStorageReader, FinalizedL1Storage, HeadRefStorageReader, - LogStorageReader, -}; -use kona_supervisor_types::{SuperHead, parse_access_list}; -use op_alloy_rpc_types::SuperchainDAError; -use std::{collections::HashMap, sync::Arc}; -use tokio::sync::RwLock; -use tracing::{error, warn}; - -use crate::{ - SpecError, SupervisorError, - config::Config, - syncnode::{BlockProvider, ManagedNodeDataProvider}, -}; - -/// Defines the service for the Supervisor core logic. -#[async_trait] -#[auto_impl::auto_impl(&, &mut, Arc, Box)] -pub trait SupervisorService: Debug + Send + Sync { - /// Returns list of supervised [`ChainId`]s. - fn chain_ids(&self) -> impl Iterator; - - /// Returns mapping of supervised [`ChainId`]s to their [`ChainDependency`] config. - /// - /// [`ChainDependency`]: kona_interop::ChainDependency - fn dependency_set(&self) -> &DependencySet; - - /// Returns [`SuperHead`] of given supervised chain. - fn super_head(&self, chain: ChainId) -> Result; - - /// Returns latest block derived from given L1 block, for given chain. - fn latest_block_from( - &self, - l1_block: BlockNumHash, - chain: ChainId, - ) -> Result; - - /// Returns the L1 source block that the given L2 derived block was based on, for the specified - /// chain. - fn derived_to_source_block( - &self, - chain: ChainId, - derived: BlockNumHash, - ) -> Result; - - /// Returns [`LocalUnsafe`] block for the given chain. - /// - /// [`LocalUnsafe`]: SafetyLevel::LocalUnsafe - fn local_unsafe(&self, chain: ChainId) -> Result; - - /// Returns [`LocalSafe`] block for the given chain. - /// - /// [`LocalSafe`]: SafetyLevel::LocalSafe - fn local_safe(&self, chain: ChainId) -> Result; - - /// Returns [`CrossSafe`] block for the given chain. - /// - /// [`CrossSafe`]: SafetyLevel::CrossSafe - fn cross_safe(&self, chain: ChainId) -> Result; - - /// Returns [`Finalized`] block for the given chain. - /// - /// [`Finalized`]: SafetyLevel::Finalized - fn finalized(&self, chain: ChainId) -> Result; - - /// Returns the finalized L1 block that the supervisor is synced to. - fn finalized_l1(&self) -> Result; - - /// Returns the [`SuperRootOutput`] at a specified timestamp, which represents the global - /// state across all monitored chains. - /// - /// [`SuperRootOutput`]: kona_interop::SuperRootOutput - async fn super_root_at_timestamp( - &self, - timestamp: u64, - ) -> Result; - - /// Verifies if an access-list references only valid messages - fn check_access_list( - &self, - inbox_entries: Vec, - min_safety: SafetyLevel, - executing_descriptor: ExecutingDescriptor, - ) -> Result<(), SupervisorError>; -} - -/// The core Supervisor component responsible for monitoring and coordinating chain states. -#[derive(Debug)] -pub struct Supervisor { - config: Arc, - database_factory: Arc, - - // As of now supervisor only supports a single managed node per chain. - // This is a limitation of the current implementation, but it will be extended in the future. - managed_nodes: RwLock>>, -} - -impl Supervisor -where - M: ManagedNodeDataProvider + BlockProvider + Send + Sync + Debug, -{ - /// Creates a new [`Supervisor`] instance. - #[allow(clippy::new_without_default, clippy::missing_const_for_fn)] - pub fn new(config: Arc, database_factory: Arc) -> Self { - Self { config, database_factory, managed_nodes: RwLock::new(HashMap::new()) } - } - - /// Adds a new managed node to the [`Supervisor`]. - pub async fn add_managed_node( - &self, - chain_id: ChainId, - managed_node: Arc, - ) -> Result<(), SupervisorError> { - // todo: instead of passing the chain ID, we should get it from the managed node - if !self.config.dependency_set.dependencies.contains_key(&chain_id) { - warn!(target: "supervisor::service", %chain_id, "Unsupported chain ID"); - return Err(SupervisorError::UnsupportedChainId); - } - - let mut managed_nodes = self.managed_nodes.write().await; - if managed_nodes.contains_key(&chain_id) { - warn!(target: "supervisor::service", %chain_id, "Managed node already exists for chain"); - return Ok(()); - } - - managed_nodes.insert(chain_id, managed_node.clone()); - Ok(()) - } - - fn verify_safety_level( - &self, - chain_id: ChainId, - block: &BlockInfo, - safety: SafetyLevel, - ) -> Result<(), SupervisorError> { - let head_ref = self.database_factory.get_db(chain_id)?.get_safety_head_ref(safety)?; - - if head_ref.number < block.number { - return Err(SpecError::SuperchainDAError(SuperchainDAError::ConflictingData).into()); - } - - Ok(()) - } - - fn get_db(&self, chain: ChainId) -> Result, SupervisorError> { - self.database_factory.get_db(chain).map_err(|err| { - error!(target: "supervisor::service", %chain, %err, "Failed to get database for chain"); - SpecError::from(err).into() - }) - } -} - -#[async_trait] -impl SupervisorService for Supervisor -where - M: ManagedNodeDataProvider + BlockProvider + Send + Sync + Debug, -{ - fn chain_ids(&self) -> impl Iterator { - self.config.dependency_set.dependencies.keys().copied() - } - - fn dependency_set(&self) -> &DependencySet { - &self.config.dependency_set - } - - fn super_head(&self, chain: ChainId) -> Result { - Ok(self.get_db(chain)?.get_super_head().map_err(|err| { - error!(target: "supervisor::service", %chain, %err, "Failed to get super head for chain"); - SpecError::from(err) - })?) - } - - fn latest_block_from( - &self, - l1_block: BlockNumHash, - chain: ChainId, - ) -> Result { - Ok(self - .get_db(chain)? - .latest_derived_block_at_source(l1_block) - .map_err(|err| { - error!(target: "supervisor::service", %chain, %err, "Failed to get latest derived block at source for chain"); - SpecError::from(err) - })? - ) - } - - fn derived_to_source_block( - &self, - chain: ChainId, - derived: BlockNumHash, - ) -> Result { - Ok(self.get_db(chain)?.derived_to_source(derived).map_err(|err| { - error!(target: "supervisor::service", %chain, %err, "Failed to get derived to source block for chain"); - SpecError::from(err) - })?) - } - - fn local_unsafe(&self, chain: ChainId) -> Result { - Ok(self.get_db(chain)?.get_safety_head_ref(SafetyLevel::LocalUnsafe).map_err(|err| { - error!(target: "supervisor::service", %chain, %err, "Failed to get local unsafe head ref for chain"); - SpecError::from(err) - })?) - } - - fn local_safe(&self, chain: ChainId) -> Result { - Ok(self.get_db(chain)?.get_safety_head_ref(SafetyLevel::LocalSafe).map_err(|err| { - error!(target: "supervisor::service", %chain, %err, "Failed to get local safe head ref for chain"); - SpecError::from(err) - })?) - } - - fn cross_safe(&self, chain: ChainId) -> Result { - Ok(self.get_db(chain)?.get_safety_head_ref(SafetyLevel::CrossSafe).map_err(|err| { - error!(target: "supervisor::service", %chain, %err, "Failed to get cross safe head ref for chain"); - SpecError::from(err) - })?) - } - - fn finalized(&self, chain: ChainId) -> Result { - Ok(self.get_db(chain)?.get_safety_head_ref(SafetyLevel::Finalized).map_err(|err| { - error!(target: "supervisor::service", %chain, %err, "Failed to get finalized head ref for chain"); - SpecError::from(err) - })?) - } - - fn finalized_l1(&self) -> Result { - Ok(self.database_factory.get_finalized_l1().map_err(|err| { - error!(target: "supervisor::service", %err, "Failed to get finalized L1"); - SpecError::from(err) - })?) - } - - async fn super_root_at_timestamp( - &self, - timestamp: u64, - ) -> Result { - let mut chain_ids = self.config.dependency_set.dependencies.keys().collect::>(); - // Sorting chain ids for deterministic super root hash - chain_ids.sort(); - - let mut chain_infos = Vec::::with_capacity(chain_ids.len()); - let mut super_root_chains = Vec::::with_capacity(chain_ids.len()); - let mut cross_safe_source = BlockNumHash::default(); - - for id in chain_ids { - let managed_node = { - let guard = self.managed_nodes.read().await; - match guard.get(id) { - Some(m) => m.clone(), - None => { - error!(target: "supervisor::service", chain_id = %id, "Managed node not found for chain"); - return Err(SupervisorError::ManagedNodeMissing(*id)); - } - } - }; - let output_v0 = managed_node.output_v0_at_timestamp(timestamp).await?; - let output_v0_string = serde_json::to_string(&output_v0) - .inspect_err(|err| { - error!(target: "supervisor::service", chain_id = %id, %err, "Failed to serialize output_v0 for chain"); - })?; - let canonical_root = keccak256(output_v0_string.as_bytes()); - - let pending_output_v0 = managed_node.pending_output_v0_at_timestamp(timestamp).await?; - let pending_output_v0_string = serde_json::to_string(&pending_output_v0) - .inspect_err(|err| { - error!(target: "supervisor::service", chain_id = %id, %err, "Failed to serialize pending_output_v0 for chain"); - })?; - let pending_output_v0_bytes = - Bytes::copy_from_slice(pending_output_v0_string.as_bytes()); - - chain_infos.push(ChainRootInfoRpc { - chain_id: *id, - canonical: canonical_root, - pending: pending_output_v0_bytes, - }); - - super_root_chains - .push(OutputRootWithChain { chain_id: *id, output_root: canonical_root }); - - let l2_block = managed_node.l2_block_ref_by_timestamp(timestamp).await?; - let source = self - .derived_to_source_block(*id, l2_block.id()) - .inspect_err(|err| { - error!(target: "supervisor::service", %id, %err, "Failed to get derived to source block for chain"); - })?; - - if cross_safe_source.number == 0 || cross_safe_source.number < source.number { - cross_safe_source = source.id(); - } - } - - let super_root = SuperRoot { timestamp, output_roots: super_root_chains }; - let super_root_hash = super_root.hash(); - - Ok(SuperRootOutputRpc { - cross_safe_derived_from: cross_safe_source, - timestamp, - super_root: super_root_hash, - chains: chain_infos, - version: SUPER_ROOT_VERSION, - }) - } - - fn check_access_list( - &self, - inbox_entries: Vec, - min_safety: SafetyLevel, - executing_descriptor: ExecutingDescriptor, - ) -> Result<(), SupervisorError> { - let access_list = parse_access_list(inbox_entries)?; - - for access in &access_list { - // Check all the invariants for each message - // Ref: https://github.com/ethereum-optimism/specs/blob/main/specs/interop/derivation.md#invariants - - // TODO: support 32 bytes chain id and convert to u64 via dependency set to be usable - // across services - let initiating_chain_id = access.chain_id[24..32] - .try_into() - .map(u64::from_be_bytes) - .map_err(|err| { - error!(target: "supervisor::service", %err, "Failed to parse initiating chain id from access list"); - SupervisorError::ChainIdParseError() - })?; - - let executing_chain_id = executing_descriptor.chain_id.unwrap_or(initiating_chain_id); - - // Message must be valid at the time of execution. - self.config.validate_interop_timestamps( - initiating_chain_id, - access.timestamp, - executing_chain_id, - executing_descriptor.timestamp, - executing_descriptor.timeout, - ).map_err(|err| { - warn!(target: "supervisor::service", %err, "Failed to validate interop timestamps"); - SpecError::SuperchainDAError(SuperchainDAError::ConflictingData) - })?; - - // Verify the initiating message exists and valid for corresponding executing message. - let db = self.get_db(initiating_chain_id)?; - - let block = db.get_block(access.block_number).map_err(|err| { - warn!(target: "supervisor::service", %initiating_chain_id, %err, "Failed to get block for chain"); - SpecError::from(err) - })?; - if block.timestamp != access.timestamp { - return Err(SupervisorError::from(SpecError::SuperchainDAError( - SuperchainDAError::ConflictingData, - ))); - } - - let log = db.get_log(access.block_number, access.log_index).map_err(|err| { - warn!(target: "supervisor::service", %initiating_chain_id, %err, "Failed to get log for chain"); - SpecError::from(err) - })?; - access.verify_checksum(&log.hash).map_err(|err| { - warn!(target: "supervisor::service", %initiating_chain_id, %err, "Failed to verify checksum for access list"); - SpecError::SuperchainDAError(SuperchainDAError::ConflictingData) - })?; - - // The message must be included in a block that is at least as safe as required - // by the `min_safety` level - if min_safety != SafetyLevel::LocalUnsafe { - // The block is already unsafe as it is found in log db - self.verify_safety_level(initiating_chain_id, &block, min_safety)?; - } - } - - Ok(()) - } -} diff --git a/rust/kona/crates/supervisor/core/src/syncnode/client.rs b/rust/kona/crates/supervisor/core/src/syncnode/client.rs deleted file mode 100644 index 46b00225f8e06..0000000000000 --- a/rust/kona/crates/supervisor/core/src/syncnode/client.rs +++ /dev/null @@ -1,406 +0,0 @@ -use super::{AuthenticationError, ClientError, metrics::Metrics}; -use alloy_primitives::{B256, ChainId}; -use alloy_rpc_types_engine::{Claims, JwtSecret}; -use alloy_rpc_types_eth::BlockNumHash; -use async_trait::async_trait; -use jsonrpsee::{ - core::client::Subscription, - ws_client::{HeaderMap, HeaderValue, WsClient, WsClientBuilder}, -}; -use kona_supervisor_metrics::observe_metrics_for_result_async; -use kona_supervisor_rpc::{BlockInfo, ManagedModeApiClient, jsonrpsee::SubscriptionTopic}; -use kona_supervisor_types::{BlockSeal, OutputV0, Receipts, SubscriptionEvent}; -use std::{ - fmt::Debug, - sync::{Arc, OnceLock}, -}; -use tokio::sync::Mutex; -use tracing::{error, info}; - -/// Trait for a managed node client that provides various methods to interact with the node. -#[async_trait] -pub trait ManagedNodeClient: Send + Sync + Debug { - /// Returns the [`ChainId`] of the managed node. - async fn chain_id(&self) -> Result; - - /// Subscribes to [`SubscriptionEvent`] from the managed node. - async fn subscribe_events(&self) -> Result, ClientError>; - - /// Fetches [`Receipts`] for a given block hash. - async fn fetch_receipts(&self, block_hash: B256) -> Result; - - /// Fetches the [`OutputV0`] at a specific timestamp. - async fn output_v0_at_timestamp(&self, timestamp: u64) -> Result; - - /// Fetches the pending [`OutputV0`] at a specific timestamp. - async fn pending_output_v0_at_timestamp(&self, timestamp: u64) - -> Result; - - /// Fetches the L2 [`BlockInfo`] by timestamp. - async fn l2_block_ref_by_timestamp(&self, timestamp: u64) -> Result; - - /// Fetches the [`BlockInfo`] by block number. - async fn block_ref_by_number(&self, block_number: u64) -> Result; - - /// Resets the managed node to the pre-interop state. - async fn reset_pre_interop(&self) -> Result<(), ClientError>; - - /// Resets the node state with the provided block IDs. - async fn reset( - &self, - unsafe_id: BlockNumHash, - cross_unsafe_id: BlockNumHash, - local_safe_id: BlockNumHash, - cross_safe_id: BlockNumHash, - finalised_id: BlockNumHash, - ) -> Result<(), ClientError>; - - /// Invalidates a block in the managed node. - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ClientError>; - - /// Provides L1 [`BlockInfo`] to the managed node. - async fn provide_l1(&self, block_info: BlockInfo) -> Result<(), ClientError>; - - /// Updates the finalized block ID in the managed node. - async fn update_finalized(&self, finalized_block_id: BlockNumHash) -> Result<(), ClientError>; - - /// Updates the cross-unsafe block ID in the managed node. - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ClientError>; - - /// Updates the cross-safe block ID in the managed node. - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ClientError>; - - /// Resets the ws-client to None when server disconnects - async fn reset_ws_client(&self); -} - -/// [`ClientConfig`] sets the configuration for the managed node client. -#[derive(Debug, Clone)] -pub struct ClientConfig { - /// The URL + port of the managed node - pub url: String, - /// jwt secret for the managed node interop rpc - pub jwt_secret: JwtSecret, -} - -/// Client for interacting with a managed node. -#[derive(Debug)] -pub struct Client { - config: ClientConfig, - /// Chain ID of the managed node - chain_id: OnceLock, - /// The attached web socket client - ws_client: Mutex>>, -} - -impl Client { - /// Creates a new [`Client`] with the given configuration. - pub fn new(config: ClientConfig) -> Self { - Metrics::init(config.url.as_ref()); - Self { config, chain_id: OnceLock::new(), ws_client: Mutex::new(None) } - } - - /// Creates authentication headers using JWT secret. - fn create_auth_headers(&self) -> Result { - // Create JWT claims with current time - let claims = Claims::with_current_timestamp(); - let token = self.config.jwt_secret.encode(&claims).map_err(|err| { - error!(target: "supervisor::managed_node", %err, "Failed to encode JWT claims"); - AuthenticationError::InvalidJwt - })?; - - let mut headers = HeaderMap::new(); - let auth_header = format!("Bearer {token}"); - - headers.insert( - "Authorization", - HeaderValue::from_str(&auth_header).map_err(|err| { - error!(target: "supervisor::managed_node", %err, "Invalid authorization header"); - AuthenticationError::InvalidHeader - })?, - ); - - Ok(headers) - } - - /// Returns a reference to the `WebSocket` client, creating it if it doesn't exist. - // todo: support http client as well - pub async fn get_ws_client(&self) -> Result, ClientError> { - let mut ws_client_guard = self.ws_client.lock().await; - if ws_client_guard.is_none() { - let headers = self.create_auth_headers().inspect_err(|err| { - error!(target: "supervisor::managed_node", %err, "Failed to create auth headers"); - })?; - - info!(target: "supervisor::managed_node", ws_url = self.config.url, "Creating a new web socket client"); - let client = - WsClientBuilder::default().set_headers(headers).build(&self.config.url).await?; - - *ws_client_guard = Some(Arc::new(client)); - } - Ok(ws_client_guard.clone().unwrap()) - } -} - -#[async_trait] -impl ManagedNodeClient for Client { - async fn reset_ws_client(&self) { - let mut ws_client_guard = self.ws_client.lock().await; - if ws_client_guard.is_some() { - *ws_client_guard = None; - }; - } - - async fn chain_id(&self) -> Result { - if let Some(chain_id) = self.chain_id.get() { - return Ok(*chain_id); - } - - let client = self.get_ws_client().await?; - let chain_id_str = observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_CHAIN_ID, - async { - client.chain_id().await - }, - "node" => self.config.url.clone() - ) - .inspect_err(|err| { - error!(target: "supervisor::managed_node", %err, "Failed to get chain ID"); - })?; - - let chain_id = chain_id_str.parse::().inspect_err(|err| { - error!(target: "supervisor::managed_node", %err, "Failed to parse chain ID"); - })?; - - let _ = self.chain_id.set(chain_id); - Ok(chain_id) - } - - async fn subscribe_events(&self) -> Result, ClientError> { - let client = self.get_ws_client().await?; // This returns ManagedNodeError, handled by your function - let subscription = observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_SUBSCRIBE_EVENTS, - async { - ManagedModeApiClient::subscribe_events(client.as_ref(), SubscriptionTopic::Events).await - }, - "node" => self.config.url.clone() - )?; - - Ok(subscription) - } - - async fn fetch_receipts(&self, block_hash: B256) -> Result { - let client = self.get_ws_client().await?; // This returns ManagedNodeError, handled by your function - let receipts = observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_FETCH_RECEIPTS, - async { - ManagedModeApiClient::fetch_receipts(client.as_ref(), block_hash).await - }, - "node" => self.config.url.clone() - )?; - - Ok(receipts) - } - - async fn output_v0_at_timestamp(&self, timestamp: u64) -> Result { - let client = self.get_ws_client().await?; - let output_v0 = observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_OUTPUT_V0_AT_TIMESTAMP, - async { - ManagedModeApiClient::output_v0_at_timestamp(client.as_ref(), timestamp).await - }, - "node" => self.config.url.clone() - )?; - - Ok(output_v0) - } - - async fn pending_output_v0_at_timestamp( - &self, - timestamp: u64, - ) -> Result { - let client = self.get_ws_client().await?; - let output_v0 = observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_PENDING_OUTPUT_V0_AT_TIMESTAMP, - async { - ManagedModeApiClient::pending_output_v0_at_timestamp(client.as_ref(), timestamp).await - }, - "node" => self.config.url.clone() - )?; - - Ok(output_v0) - } - - async fn l2_block_ref_by_timestamp(&self, timestamp: u64) -> Result { - let client = self.get_ws_client().await?; - let block_info = observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_L2_BLOCK_REF_BY_TIMESTAMP, - async { - ManagedModeApiClient::l2_block_ref_by_timestamp(client.as_ref(), timestamp).await - }, - "node" => self.config.url.clone() - )?; - - Ok(block_info) - } - - async fn block_ref_by_number(&self, block_number: u64) -> Result { - let client = self.get_ws_client().await?; - let block_info = observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_BLOCK_REF_BY_NUMBER, - async { - ManagedModeApiClient::l2_block_ref_by_number(client.as_ref(), block_number).await - }, - "node" => self.config.url.clone() - )?; - - Ok(block_info) - } - - async fn reset_pre_interop(&self) -> Result<(), ClientError> { - let client = self.get_ws_client().await?; - observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_RESET_PRE_INTEROP, - async { - ManagedModeApiClient::reset_pre_interop(client.as_ref()).await - }, - "node" => self.config.url.clone() - )?; - Ok(()) - } - - async fn reset( - &self, - unsafe_id: BlockNumHash, - cross_unsafe_id: BlockNumHash, - local_safe_id: BlockNumHash, - cross_safe_id: BlockNumHash, - finalised_id: BlockNumHash, - ) -> Result<(), ClientError> { - let client = self.get_ws_client().await?; - observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_RESET, - async { - ManagedModeApiClient::reset(client.as_ref(), unsafe_id, cross_unsafe_id, local_safe_id, cross_safe_id, finalised_id).await - }, - "node" => self.config.url.clone() - )?; - Ok(()) - } - - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ClientError> { - let client = self.get_ws_client().await?; - observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_INVALIDATE_BLOCK, - async { - ManagedModeApiClient::invalidate_block(client.as_ref(), seal).await - }, - "node" => self.config.url.clone() - )?; - Ok(()) - } - - async fn provide_l1(&self, block_info: BlockInfo) -> Result<(), ClientError> { - let client = self.get_ws_client().await?; - observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_PROVIDE_L1, - async { - ManagedModeApiClient::provide_l1(client.as_ref(), block_info).await - }, - "node" => self.config.url.clone() - )?; - Ok(()) - } - - async fn update_finalized(&self, finalized_block_id: BlockNumHash) -> Result<(), ClientError> { - let client = self.get_ws_client().await?; - observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_UPDATE_FINALIZED, - async { - ManagedModeApiClient::update_finalized(client.as_ref(), finalized_block_id).await - }, - "node" => self.config.url.clone() - )?; - Ok(()) - } - - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ClientError> { - let client = self.get_ws_client().await?; - observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_UPDATE_CROSS_UNSAFE, - async { - ManagedModeApiClient::update_cross_unsafe(client.as_ref(), cross_unsafe_block_id).await - }, - "node" => self.config.url.clone() - )?; - Ok(()) - } - - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ClientError> { - let client = self.get_ws_client().await?; - observe_metrics_for_result_async!( - Metrics::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - Metrics::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - Metrics::RPC_METHOD_UPDATE_CROSS_SAFE, - async { - ManagedModeApiClient::update_cross_safe(client.as_ref(), derived_block_id, source_block_id).await - }, - "node" => self.config.url.clone() - )?; - Ok(()) - } -} diff --git a/rust/kona/crates/supervisor/core/src/syncnode/command.rs b/rust/kona/crates/supervisor/core/src/syncnode/command.rs deleted file mode 100644 index 92b96e6c70398..0000000000000 --- a/rust/kona/crates/supervisor/core/src/syncnode/command.rs +++ /dev/null @@ -1,36 +0,0 @@ -use alloy_eips::BlockNumHash; -use kona_supervisor_types::BlockSeal; - -/// Commands for managing a node in the supervisor. -/// These commands are sent to the managed node actor to perform various operations. -#[derive(Debug, PartialEq, Eq)] -pub enum ManagedNodeCommand { - /// Updates the finalized block in the managed node. - UpdateFinalized { - /// [`BlockNumHash`] of the finalized block. - block_id: BlockNumHash, - }, - - /// Updates the cross-unsafe block in the managed node. - UpdateCrossUnsafe { - /// [`BlockNumHash`] of the cross-unsafe block. - block_id: BlockNumHash, - }, - - /// Updates the cross-safe block in the managed node. - UpdateCrossSafe { - /// [`BlockNumHash`] of the source block. - source_block_id: BlockNumHash, - /// [`BlockNumHash`] of the derived block. - derived_block_id: BlockNumHash, - }, - - /// Resets the managed node. - Reset {}, - - /// Asks managed node to invalidate the block. - InvalidateBlock { - /// [`BlockSeal`] of the block to invalidate. - seal: BlockSeal, - }, -} diff --git a/rust/kona/crates/supervisor/core/src/syncnode/error.rs b/rust/kona/crates/supervisor/core/src/syncnode/error.rs deleted file mode 100644 index 859938e01cbf9..0000000000000 --- a/rust/kona/crates/supervisor/core/src/syncnode/error.rs +++ /dev/null @@ -1,67 +0,0 @@ -use kona_supervisor_storage::StorageError; -use thiserror::Error; - -/// Represents various errors that can occur during node management. -#[derive(Debug, Error, PartialEq, Eq)] -pub enum ManagedNodeError { - /// Represents an error that occurred while starting the managed node. - #[error(transparent)] - ClientError(#[from] ClientError), - - /// Represents an error that occurred while fetching data from the storage. - #[error(transparent)] - StorageError(#[from] StorageError), - - /// Unable to successfully fetch block. - #[error("failed to get block by number, number: {0}")] - GetBlockByNumberFailed(u64), - - /// Represents an error that occurred while sending an event to the channel. - #[error("failed to send event to channel: {0}")] - ChannelSendFailed(String), - - /// Represents an error that occurred while resetting the managed node. - #[error("failed to reset the managed node")] - ResetFailed, -} - -/// Error establishing authenticated connection to managed node. -#[derive(Debug, Error, PartialEq, Eq)] -pub enum AuthenticationError { - /// Missing valid JWT secret for authentication header. - #[error("jwt secret not found or invalid")] - InvalidJwt, - /// Invalid header format. - #[error("invalid authorization header")] - InvalidHeader, -} - -/// Represents errors that can occur while interacting with the managed node client. -#[derive(Debug, Error)] -pub enum ClientError { - /// Represents an error that occurred while starting the managed node. - #[error(transparent)] - Client(#[from] jsonrpsee::core::ClientError), - - /// Represents an error that occurred while authenticating to the managed node. - #[error("failed to authenticate: {0}")] - Authentication(#[from] AuthenticationError), - - /// Represents an error that occurred while parsing a chain ID from a string. - #[error(transparent)] - ChainIdParseError(#[from] std::num::ParseIntError), -} - -impl PartialEq for ClientError { - fn eq(&self, other: &Self) -> bool { - use ClientError::{Authentication, ChainIdParseError, Client}; - match (self, other) { - (Client(a), Client(b)) => a.to_string() == b.to_string(), - (Authentication(a), Authentication(b)) => a == b, - (ChainIdParseError(a), ChainIdParseError(b)) => a == b, - _ => false, - } - } -} - -impl Eq for ClientError {} diff --git a/rust/kona/crates/supervisor/core/src/syncnode/metrics.rs b/rust/kona/crates/supervisor/core/src/syncnode/metrics.rs deleted file mode 100644 index fa668173e89df..0000000000000 --- a/rust/kona/crates/supervisor/core/src/syncnode/metrics.rs +++ /dev/null @@ -1,103 +0,0 @@ -//! Metrics for the Managed Mode RPC client. - -/// Container for metrics. -#[derive(Debug, Clone)] -pub(super) struct Metrics; - -impl Metrics { - // --- Metric Names --- - /// Identifier for the counter of successful RPC requests. Labels: `method`. - pub(crate) const MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL: &'static str = - "managed_node_rpc_requests_success_total"; - /// Identifier for the counter of failed RPC requests. Labels: `method`. - pub(crate) const MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL: &'static str = - "managed_node_rpc_requests_error_total"; - /// Identifier for the histogram of RPC request durations. Labels: `method`. - pub(crate) const MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS: &'static str = - "managed_node_rpc_request_duration_seconds"; - - pub(crate) const RPC_METHOD_CHAIN_ID: &'static str = "chain_id"; - pub(crate) const RPC_METHOD_SUBSCRIBE_EVENTS: &'static str = "subscribe_events"; - pub(crate) const RPC_METHOD_FETCH_RECEIPTS: &'static str = "fetch_receipts"; - pub(crate) const RPC_METHOD_OUTPUT_V0_AT_TIMESTAMP: &'static str = "output_v0_at_timestamp"; - pub(crate) const RPC_METHOD_PENDING_OUTPUT_V0_AT_TIMESTAMP: &'static str = - "pending_output_v0_at_timestamp"; - pub(crate) const RPC_METHOD_L2_BLOCK_REF_BY_TIMESTAMP: &'static str = - "l2_block_ref_by_timestamp"; - pub(crate) const RPC_METHOD_BLOCK_REF_BY_NUMBER: &'static str = "block_ref_by_number"; - pub(crate) const RPC_METHOD_RESET_PRE_INTEROP: &'static str = "reset_pre_interop"; - pub(crate) const RPC_METHOD_RESET: &'static str = "reset"; - pub(crate) const RPC_METHOD_INVALIDATE_BLOCK: &'static str = "invalidate_block"; - pub(crate) const RPC_METHOD_PROVIDE_L1: &'static str = "provide_l1"; - pub(crate) const RPC_METHOD_UPDATE_FINALIZED: &'static str = "update_finalized"; - pub(crate) const RPC_METHOD_UPDATE_CROSS_UNSAFE: &'static str = "update_cross_unsafe"; - pub(crate) const RPC_METHOD_UPDATE_CROSS_SAFE: &'static str = "update_cross_safe"; - - /// Initializes metrics for the Supervisor RPC service. - /// - /// This does two things: - /// * Describes various metrics. - /// * Initializes metrics with their labels to 0 so they can be queried immediately. - pub(crate) fn init(node: &str) { - Self::describe(); - Self::zero(node); - } - - /// Describes metrics used in the Supervisor RPC service. - fn describe() { - metrics::describe_counter!( - Self::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - metrics::Unit::Count, - "Total number of successful RPC requests processed by the managed mode client" - ); - metrics::describe_counter!( - Self::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - metrics::Unit::Count, - "Total number of failed RPC requests processed by the managed mode client" - ); - metrics::describe_histogram!( - Self::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - metrics::Unit::Seconds, - "Duration of RPC requests processed by the managed mode client" - ); - } - - fn zero_rpc_method(method: &str, node: &str) { - metrics::counter!( - Self::MANAGED_NODE_RPC_REQUESTS_SUCCESS_TOTAL, - "method" => method.to_string(), - "node" => node.to_string() - ) - .increment(0); - metrics::counter!( - Self::MANAGED_NODE_RPC_REQUESTS_ERROR_TOTAL, - "method" => method.to_string(), - "node" => node.to_string() - ) - .increment(0); - metrics::histogram!( - Self::MANAGED_NODE_RPC_REQUEST_DURATION_SECONDS, - "method" => method.to_string(), - "node" => node.to_string() - ) - .record(0.0); - } - - /// Initializes metrics with their labels to `0` so they appear in Prometheus from the start. - fn zero(node: &str) { - Self::zero_rpc_method(Self::RPC_METHOD_CHAIN_ID, node); - Self::zero_rpc_method(Self::RPC_METHOD_SUBSCRIBE_EVENTS, node); - Self::zero_rpc_method(Self::RPC_METHOD_FETCH_RECEIPTS, node); - Self::zero_rpc_method(Self::RPC_METHOD_OUTPUT_V0_AT_TIMESTAMP, node); - Self::zero_rpc_method(Self::RPC_METHOD_PENDING_OUTPUT_V0_AT_TIMESTAMP, node); - Self::zero_rpc_method(Self::RPC_METHOD_L2_BLOCK_REF_BY_TIMESTAMP, node); - Self::zero_rpc_method(Self::RPC_METHOD_BLOCK_REF_BY_NUMBER, node); - Self::zero_rpc_method(Self::RPC_METHOD_RESET_PRE_INTEROP, node); - Self::zero_rpc_method(Self::RPC_METHOD_RESET, node); - Self::zero_rpc_method(Self::RPC_METHOD_INVALIDATE_BLOCK, node); - Self::zero_rpc_method(Self::RPC_METHOD_PROVIDE_L1, node); - Self::zero_rpc_method(Self::RPC_METHOD_UPDATE_FINALIZED, node); - Self::zero_rpc_method(Self::RPC_METHOD_UPDATE_CROSS_UNSAFE, node); - Self::zero_rpc_method(Self::RPC_METHOD_UPDATE_CROSS_SAFE, node); - } -} diff --git a/rust/kona/crates/supervisor/core/src/syncnode/mod.rs b/rust/kona/crates/supervisor/core/src/syncnode/mod.rs deleted file mode 100644 index d248524e796af..0000000000000 --- a/rust/kona/crates/supervisor/core/src/syncnode/mod.rs +++ /dev/null @@ -1,23 +0,0 @@ -//! Supervisor core syncnode module -//! This module provides the core functionality for managing nodes in the supervisor environment. - -mod command; -pub use command::ManagedNodeCommand; - -mod node; -pub use node::ManagedNode; - -mod error; -pub use error::{AuthenticationError, ClientError, ManagedNodeError}; - -mod traits; -pub use traits::{ - BlockProvider, ManagedNodeController, ManagedNodeDataProvider, ManagedNodeProvider, - SubscriptionHandler, -}; - -mod client; -pub use client::{Client, ClientConfig, ManagedNodeClient}; - -pub(super) mod metrics; -pub(super) mod resetter; diff --git a/rust/kona/crates/supervisor/core/src/syncnode/node.rs b/rust/kona/crates/supervisor/core/src/syncnode/node.rs deleted file mode 100644 index 610737d3a3002..0000000000000 --- a/rust/kona/crates/supervisor/core/src/syncnode/node.rs +++ /dev/null @@ -1,943 +0,0 @@ -//! [`ManagedNode`] implementation for handling events from the managed node. - -use super::{ - BlockProvider, ManagedNodeClient, ManagedNodeController, ManagedNodeDataProvider, - ManagedNodeError, SubscriptionHandler, resetter::Resetter, -}; -use crate::event::ChainEvent; -use alloy_eips::BlockNumberOrTag; -use alloy_network::Ethereum; -use alloy_primitives::{B256, ChainId}; -use alloy_provider::{Provider, RootProvider}; -use alloy_rpc_types_eth::BlockNumHash; -use async_trait::async_trait; -use kona_interop::{BlockReplacement, DerivedRefPair}; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::{DerivationStorageReader, HeadRefStorageReader, LogStorageReader}; -use kona_supervisor_types::{BlockSeal, OutputV0, Receipts}; -use std::sync::Arc; -use tokio::sync::{Mutex, mpsc}; -use tracing::{debug, error, trace, warn}; - -/// [`ManagedNode`] processes events dispatched from the managed node. -/// -/// It implements `SubscriptionHandler`, forwards resulting `ChainEvent`s to the chain -/// processor, and delegates control operations to the underlying client/resetter. -/// The `WebSocket` subscription lifecycle (subscription creation, reconnection/restart) -/// is managed by the supervisor actor and the client, not by this type. -#[derive(Debug)] -pub struct ManagedNode { - /// The attached web socket client - client: Arc, - /// Shared L1 provider for fetching receipts - l1_provider: RootProvider, - /// Resetter for handling node resets - resetter: Arc>, - /// Channel for sending events to the chain processor - chain_event_sender: mpsc::Sender, - - /// Cached chain ID - chain_id: Mutex>, -} - -impl ManagedNode -where - DB: LogStorageReader + DerivationStorageReader + HeadRefStorageReader + Send + Sync + 'static, - C: ManagedNodeClient + Send + Sync + 'static, -{ - /// Creates a new [`ManagedNode`] with the specified client. - pub fn new( - client: Arc, - db_provider: Arc, - l1_provider: RootProvider, - chain_event_sender: mpsc::Sender, - ) -> Self { - let resetter = Arc::new(Resetter::new(client.clone(), l1_provider.clone(), db_provider)); - - Self { client, resetter, l1_provider, chain_event_sender, chain_id: Mutex::new(None) } - } - - /// Returns the [`ChainId`] of the [`ManagedNode`]. - /// If the chain ID is already cached, it returns that. - /// If not, it fetches the chain ID from the managed node. - pub async fn chain_id(&self) -> Result { - // we are caching the chain ID here to avoid multiple calls to the client - // there is a possibility that chain ID might be being cached in the client already - // but we are caching it here to make sure it caches in the `ManagedNode` context - let mut cache = self.chain_id.lock().await; - if let Some(chain_id) = *cache { - Ok(chain_id) - } else { - let chain_id = self.client.chain_id().await?; - *cache = Some(chain_id); - Ok(chain_id) - } - } -} - -#[async_trait] -impl SubscriptionHandler for ManagedNode -where - DB: LogStorageReader + DerivationStorageReader + HeadRefStorageReader + Send + Sync + 'static, - C: ManagedNodeClient + Send + Sync + 'static, -{ - async fn handle_exhaust_l1( - &self, - derived_ref_pair: &DerivedRefPair, - ) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!( - target: "supervisor::managed_node", - %chain_id, - %derived_ref_pair, - "Handling L1 exhaust event" - ); - - let next_block_number = derived_ref_pair.source.number + 1; - let next_block = self - .l1_provider - .get_block_by_number(BlockNumberOrTag::Number(next_block_number)) - .await - .map_err(|err| { - error!(target: "supervisor::managed_node", %chain_id, %err, "Failed to fetch next L1 block"); - ManagedNodeError::GetBlockByNumberFailed(next_block_number) - })?; - - let block = match next_block { - Some(block) => block, - None => { - // If the block is None, it means the block is either empty or unavailable. - // ignore this case - return Ok(()); - } - }; - - let new_source = BlockInfo { - hash: block.header.hash, - number: block.header.number, - parent_hash: block.header.parent_hash, - timestamp: block.header.timestamp, - }; - - if new_source.parent_hash != derived_ref_pair.source.hash { - // this could happen due to a reorg. - // this case should be handled by the reorg manager - debug!( - target: "supervisor::managed_node", - %chain_id, - %new_source, - current_source = %derived_ref_pair.source, - "Parent hash mismatch. Possible reorg detected" - ); - } - - self.client.provide_l1(new_source).await.inspect_err(|err| { - error!( - target: "supervisor::managed_node", - %chain_id, - %new_source, - %err, - "Failed to provide L1 block" - ); - })?; - Ok(()) - } - - async fn handle_reset(&self, reset_id: &str) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, reset_id, "Handling reset event"); - - self.resetter.reset().await?; - Ok(()) - } - - async fn handle_unsafe_block(&self, unsafe_block: &BlockInfo) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, %unsafe_block, "Unsafe block event received"); - - self.chain_event_sender.send(ChainEvent::UnsafeBlock { block: *unsafe_block }).await.map_err(|err| { - warn!(target: "supervisor::managed_node", %chain_id, %err, "Failed to send unsafe block event"); - ManagedNodeError::ChannelSendFailed(err.to_string()) - })?; - Ok(()) - } - - async fn handle_derivation_update( - &self, - derived_ref_pair: &DerivedRefPair, - ) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, "Derivation update event received"); - - self.chain_event_sender.send(ChainEvent::DerivedBlock { derived_ref_pair: *derived_ref_pair }).await.map_err(|err| { - warn!(target: "supervisor::managed_node", %chain_id, %err, "Failed to send derivation update event"); - ManagedNodeError::ChannelSendFailed(err.to_string()) - })?; - Ok(()) - } - - async fn handle_replace_block( - &self, - replacement: &BlockReplacement, - ) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, %replacement, "Block replacement received"); - - self.chain_event_sender.send(ChainEvent::BlockReplaced { replacement: *replacement }).await.map_err(|err| { - warn!(target: "supervisor::managed_node", %chain_id, %err, "Failed to send block replacement event"); - ManagedNodeError::ChannelSendFailed(err.to_string()) - })?; - Ok(()) - } - - async fn handle_derivation_origin_update( - &self, - origin: &BlockInfo, - ) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, %origin, "Derivation origin update received"); - - self.chain_event_sender.send(ChainEvent::DerivationOriginUpdate { origin: *origin }).await.map_err(|err| { - warn!(target: "supervisor::managed_node", %chain_id, %err, "Failed to send derivation origin update event"); - ManagedNodeError::ChannelSendFailed(err.to_string()) - })?; - Ok(()) - } -} - -/// Implements [`BlockProvider`] for [`ManagedNode`] by delegating to the underlying `WebSocket` -/// client. -#[async_trait] -impl BlockProvider for ManagedNode -where - DB: LogStorageReader + DerivationStorageReader + HeadRefStorageReader + Send + Sync + 'static, - C: ManagedNodeClient + Send + Sync + 'static, -{ - async fn block_by_number(&self, block_number: u64) -> Result { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, block_number, "Fetching block by number"); - - let block = self.client.block_ref_by_number(block_number).await?; - Ok(block) - } - async fn fetch_receipts(&self, block_hash: B256) -> Result { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, %block_hash, "Fetching receipts for block"); - - let receipt = self.client.fetch_receipts(block_hash).await?; - Ok(receipt) - } -} - -#[async_trait] -impl ManagedNodeDataProvider for ManagedNode -where - DB: LogStorageReader + DerivationStorageReader + HeadRefStorageReader + Send + Sync + 'static, - C: ManagedNodeClient + Send + Sync + 'static, -{ - async fn output_v0_at_timestamp(&self, timestamp: u64) -> Result { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, timestamp, "Fetching output v0 at timestamp"); - - let outputv0 = self.client.output_v0_at_timestamp(timestamp).await?; - Ok(outputv0) - } - - async fn pending_output_v0_at_timestamp( - &self, - timestamp: u64, - ) -> Result { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, timestamp, "Fetching pending output v0 at timestamp"); - - let outputv0 = self.client.pending_output_v0_at_timestamp(timestamp).await?; - Ok(outputv0) - } - - async fn l2_block_ref_by_timestamp( - &self, - timestamp: u64, - ) -> Result { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, timestamp, "Fetching L2 block ref by timestamp"); - - let block = self.client.l2_block_ref_by_timestamp(timestamp).await?; - Ok(block) - } -} - -#[async_trait] -impl ManagedNodeController for ManagedNode -where - DB: LogStorageReader + DerivationStorageReader + HeadRefStorageReader + Send + Sync + 'static, - C: ManagedNodeClient + Send + Sync + 'static, -{ - async fn update_finalized( - &self, - finalized_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!( - target: "supervisor::managed_node", - %chain_id, - finalized_block_number = finalized_block_id.number, - "Updating finalized block" - ); - - self.client.update_finalized(finalized_block_id).await?; - Ok(()) - } - - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!( - target: "supervisor::managed_node", - %chain_id, - cross_unsafe_block_number = cross_unsafe_block_id.number, - "Updating cross unsafe block", - ); - - self.client.update_cross_unsafe(cross_unsafe_block_id).await?; - Ok(()) - } - - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!( - target: "supervisor::managed_node", - %chain_id, - source_block_number = source_block_id.number, - derived_block_number = derived_block_id.number, - "Updating cross safe block" - ); - self.client.update_cross_safe(source_block_id, derived_block_id).await?; - Ok(()) - } - - async fn reset(&self) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!(target: "supervisor::managed_node", %chain_id, "Resetting managed node state"); - - self.resetter.reset().await?; - Ok(()) - } - - async fn invalidate_block(&self, block_seal: BlockSeal) -> Result<(), ManagedNodeError> { - let chain_id = self.chain_id().await?; - trace!( - target: "supervisor::managed_node", - %chain_id, - block_number = block_seal.number, - "Invalidating block" - ); - - self.client.invalidate_block(block_seal).await?; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::syncnode::ClientError; - use alloy_primitives::{B256, ChainId, hex::FromHex}; - use alloy_provider::RootProvider; - use alloy_rpc_client::RpcClient; - use alloy_transport::mock::*; - use jsonrpsee::core::client::Subscription; - use kona_interop::{BlockReplacement, DerivedRefPair, SafetyLevel}; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::{ - DerivationStorageReader, HeadRefStorageReader, LogStorageReader, StorageError, - }; - use kona_supervisor_types::{BlockSeal, Log, OutputV0, Receipts, SubscriptionEvent, SuperHead}; - use mockall::{mock, predicate::*}; - use std::sync::Arc; - use tokio::sync::mpsc; - - mock! { - #[derive(Debug)] - pub Client {} - - #[async_trait] - impl ManagedNodeClient for Client { - async fn chain_id(&self) -> Result; - async fn subscribe_events(&self) -> Result, ClientError>; - async fn fetch_receipts(&self, block_hash: B256) -> Result; - async fn output_v0_at_timestamp(&self, timestamp: u64) -> Result; - async fn pending_output_v0_at_timestamp(&self, timestamp: u64) -> Result; - async fn l2_block_ref_by_timestamp(&self, timestamp: u64) -> Result; - async fn block_ref_by_number(&self, block_number: u64) -> Result; - async fn reset_pre_interop(&self) -> Result<(), ClientError>; - async fn reset(&self, unsafe_id: BlockNumHash, cross_unsafe_id: BlockNumHash, local_safe_id: BlockNumHash, cross_safe_id: BlockNumHash, finalised_id: BlockNumHash) -> Result<(), ClientError>; - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ClientError>; - async fn provide_l1(&self, block_info: BlockInfo) -> Result<(), ClientError>; - async fn update_finalized(&self, finalized_block_id: BlockNumHash) -> Result<(), ClientError>; - async fn update_cross_unsafe(&self, cross_unsafe_block_id: BlockNumHash) -> Result<(), ClientError>; - async fn update_cross_safe(&self, source_block_id: BlockNumHash, derived_block_id: BlockNumHash) -> Result<(), ClientError>; - async fn reset_ws_client(&self); - } - } - - mock! { - #[derive(Debug)] - pub Db {} - - impl LogStorageReader for Db { - fn get_block(&self, block_number: u64) -> Result; - fn get_latest_block(&self) -> Result; - fn get_log(&self, block_number: u64, log_index: u32) -> Result; - fn get_logs(&self, block_number: u64) -> Result, StorageError>; - } - - impl DerivationStorageReader for Db { - fn derived_to_source(&self, derived_block_id: BlockNumHash) -> Result; - fn latest_derived_block_at_source(&self, _source_block_id: BlockNumHash) -> Result; - fn latest_derivation_state(&self) -> Result; - fn get_source_block(&self, source_block_number: u64) -> Result; - fn get_activation_block(&self) -> Result; - } - - impl HeadRefStorageReader for Db { - fn get_safety_head_ref(&self, level: SafetyLevel) -> Result; - fn get_super_head(&self) -> Result; - } - } - - #[tokio::test] - async fn test_chain_id_caching() { - let mut client = MockClient::new(); - - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - // First call fetches from client - let id1 = node.chain_id().await.unwrap(); - assert_eq!(id1, ChainId::from(42u64)); - // Second call uses cache - let id2 = node.chain_id().await.unwrap(); - assert_eq!(id2, ChainId::from(42u64)); - } - - #[tokio::test] - async fn test_handle_unsafe_block_sends_event() { - let unsafe_block = - BlockInfo { hash: B256::ZERO, number: 1, parent_hash: B256::ZERO, timestamp: 123 }; - - let mut client = MockClient::new(); - - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, mut rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let result = node.handle_unsafe_block(&unsafe_block).await; - assert!(result.is_ok()); - - let event = rx.recv().await.unwrap(); - match event { - ChainEvent::UnsafeBlock { block } => assert_eq!(block.number, 1), - _ => panic!("Wrong event"), - } - } - - #[tokio::test] - async fn test_handle_derivation_update_sends_event() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, mut rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let derived_ref_pair = DerivedRefPair { - source: BlockInfo::new(B256::from([0u8; 32]), 0, B256::ZERO, 0), - derived: BlockInfo::new(B256::from([1u8; 32]), 1, B256::ZERO, 0), - }; - - let result = node.handle_derivation_update(&derived_ref_pair).await; - assert!(result.is_ok()); - - let event = rx.recv().await.unwrap(); - match event { - ChainEvent::DerivedBlock { derived_ref_pair: pair } => { - assert_eq!(pair, derived_ref_pair); - } - _ => panic!("Wrong event"), - } - } - - #[tokio::test] - async fn test_handle_replace_block_sends_event() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, mut rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let replacement = BlockReplacement { - replacement: BlockInfo::new(B256::from([1u8; 32]), 1, B256::ZERO, 0), - invalidated: B256::from([2u8; 32]), - }; - - let result = node.handle_replace_block(&replacement).await; - assert!(result.is_ok()); - - let event = rx.recv().await.unwrap(); - match event { - ChainEvent::BlockReplaced { replacement: rep } => assert_eq!(rep, replacement), - _ => panic!("Wrong event"), - } - } - - #[tokio::test] - async fn test_handle_derivation_origin_update_sends_event() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, mut rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let origin = - BlockInfo { hash: B256::ZERO, number: 10, parent_hash: B256::ZERO, timestamp: 12345 }; - - let result = node.handle_derivation_origin_update(&origin).await; - assert!(result.is_ok()); - - let event = rx.recv().await.unwrap(); - match event { - ChainEvent::DerivationOriginUpdate { origin: block } => assert_eq!(block.number, 10), - _ => panic!("Wrong event"), - } - } - - #[tokio::test] - async fn test_handle_exhaust_l1_calls_provide_l1_on_success() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client.expect_provide_l1().times(1).returning(|_| Ok(())); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - - let derived_ref_pair = DerivedRefPair { - source: BlockInfo { - hash: B256::from_hex( - "0x1f68ac259155e2f38211ddad0f0a15394d55417b185a93923e2abe71bb7a4d6d", - ) - .unwrap(), - number: 5, - parent_hash: B256::from([14u8; 32]), - timestamp: 300, - }, - derived: BlockInfo { - hash: B256::from([11u8; 32]), - number: 40, - parent_hash: B256::from([12u8; 32]), - timestamp: 301, - }, - }; - - let next_block = r#"{ - "number": "6", - "hash": "0xd5f1812548be429cbdc6376b29611fc49e06f1359758c4ceaaa3b393e2239f9c", - "mixHash": "0x24900fb3da77674a861c428429dce0762707ecb6052325bbd9b3c64e74b5af9d", - "parentHash": "0x1f68ac259155e2f38211ddad0f0a15394d55417b185a93923e2abe71bb7a4d6d", - "nonce": "0x378da40ff335b070", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000100000004080000000000500000000000000020000100000000800001000000004000001000000000000000800040010000020100000000400000010000000000000000040000000000000040000000000000000000000000000000400002400000000000000000000000000000004000004000000000000840000000800000080010004000000001000000800000000000000000000000000000000000800000000000040000000020000000000000000000800000400000000000000000000000600000400000000002000000000000000000000004000000000000000100000000000000000000000000000000000040000900010000000", - "transactionsRoot":"0x4d0c8e91e16bdff538c03211c5c73632ed054d00a7e210c0eb25146c20048126", - "stateRoot": "0x91309efa7e42c1f137f31fe9edbe88ae087e6620d0d59031324da3e2f4f93233", - "receiptsRoot": "0x68461ab700003503a305083630a8fb8d14927238f0bc8b6b3d246c0c64f21f4a", - "miner":"0xb42b6c4a95406c78ff892d270ad20b22642e102d", - "difficulty": "0x66e619a", - "totalDifficulty": "0x1e875d746ae", - "extraData": "0xd583010502846765746885676f312e37856c696e7578", - "size": "0x334", - "gasLimit": "0x47e7c4", - "gasUsed": "0x37993", - "timestamp": "0x5835c54d", - "uncles": [], - "transactions": [ - "0xa0807e117a8dd124ab949f460f08c36c72b710188f01609595223b325e58e0fc", - "0xeae6d797af50cb62a596ec3939114d63967c374fa57de9bc0f4e2b576ed6639d" - ], - "baseFeePerGas": "0x7", - "withdrawalsRoot": "0x7a4ecf19774d15cf9c15adf0dd8e8a250c128b26c9e2ab2a08d6c9c8ffbd104f", - "withdrawals": [], - "blobGasUsed": "0x0", - "excessBlobGas": "0x0", - "parentBeaconBlockRoot": "0x95c4dbd5b19f6fe3cbc3183be85ff4e85ebe75c5b4fc911f1c91e5b7a554a685" - }"#; - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - - asserter.push(MockResponse::Success(serde_json::from_str(next_block).unwrap())); - - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let result = node.handle_exhaust_l1(&derived_ref_pair).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_exhaust_l1_calls_provide_l1_on_parent_hash_mismatch() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client.expect_provide_l1().times(1).returning(|_| Ok(())); // Should be called - - let client = Arc::new(client); - let db = MockDb::new(); - - let derived_ref_pair = DerivedRefPair { - source: BlockInfo { - hash: B256::from([1u8; 32]), // This will NOT match parent_hash below - number: 5, - parent_hash: B256::from([14u8; 32]), - timestamp: 300, - }, - derived: BlockInfo { - hash: B256::from([11u8; 32]), - number: 40, - parent_hash: B256::from([12u8; 32]), - timestamp: 301, - }, - }; - - // Block with mismatched parent_hash - let next_block = r#"{ - "number": "10", - "hash": "0xd5f1812548be429cbdc6376b29611fc49e06f1359758c4ceaaa3b393e2239f9c", - "mixHash": "0x24900fb3da77674a861c428429dce0762707ecb6052325bbd9b3c64e74b5af9d", - "parentHash": "0x1f68ac259155e2f38211ddad0f0a15394d55417b185a93923e2abe71bb7a4d6d", - "nonce": "0x378da40ff335b070", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000100000004080000000000500000000000000020000100000000800001000000004000001000000000000000800040010000020100000000400000010000000000000000040000000000000040000000000000000000000000000000400002400000000000000000000000000000004000004000000000000840000000800000080010004000000001000000800000000000000000000000000000000000800000000000040000000020000000000000000000800000400000000000000000000000600000400000000002000000000000000000000004000000000000000100000000000000000000000000000000000040000900010000000", - "transactionsRoot":"0x4d0c8e91e16bdff538c03211c5c73632ed054d00a7e210c0eb25146c20048126", - "stateRoot": "0x91309efa7e42c1f137f31fe9edbe88ae087e6620d0d59031324da3e2f4f93233", - "receiptsRoot": "0x68461ab700003503a305083630a8fb8d14927238f0bc8b6b3d246c0c64f21f4a", - "miner":"0xb42b6c4a95406c78ff892d270ad20b22642e102d", - "difficulty": "0x66e619a", - "totalDifficulty": "0x1e875d746ae", - "extraData": "0xd583010502846765746885676f312e37856c696e7578", - "size": "0x334", - "gasLimit": "0x47e7c4", - "gasUsed": "0x37993", - "timestamp": "0x5835c54d", - "uncles": [], - "transactions": [ - "0xa0807e117a8dd124ab949f460f08c36c72b710188f01609595223b325e58e0fc", - "0xeae6d797af50cb62a596ec3939114d63967c374fa57de9bc0f4e2b576ed6639d" - ], - "baseFeePerGas": "0x7", - "withdrawalsRoot": "0x7a4ecf19774d15cf9c15adf0dd8e8a250c128b26c9e2ab2a08d6c9c8ffbd104f", - "withdrawals": [], - "blobGasUsed": "0x0", - "excessBlobGas": "0x0", - "parentBeaconBlockRoot": "0x95c4dbd5b19f6fe3cbc3183be85ff4e85ebe75c5b4fc911f1c91e5b7a554a685" - }"#; - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - - asserter.push(MockResponse::Success(serde_json::from_str(next_block).unwrap())); - - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), Arc::new(db), l1_provider, tx); - - let result = node.handle_exhaust_l1(&derived_ref_pair).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_handle_reset_calls_resetter() { - let mut client = MockClient::new(); - client.expect_chain_id().times(2).returning(|| Ok(ChainId::from(42u64))); - client.expect_reset_pre_interop().times(1).returning(|| Ok(())); - - let mut db = MockDb::new(); - db.expect_latest_derivation_state() - .times(1) - .returning(|| Err(StorageError::DatabaseNotInitialised)); - - let client = Arc::new(client); - let db = Arc::new(db); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - // Just check that it completes without error - let result = node.handle_reset("reset_id").await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_block_by_number_delegates_to_client() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client.expect_block_ref_by_number().with(eq(10)).times(1).returning(|_| { - Ok(BlockInfo { - hash: B256::from([1u8; 32]), - number: 10, - parent_hash: B256::from([2u8; 32]), - timestamp: 12345, - }) - }); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let block = node.block_by_number(10).await.unwrap(); - assert_eq!(block.number, 10); - assert_eq!(block.hash, B256::from([1u8; 32])); - } - - #[tokio::test] - async fn test_fetch_receipts_delegates_to_client() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client - .expect_fetch_receipts() - .withf(|hash| *hash == B256::from([1u8; 32])) - .times(1) - .returning(|_| Ok(Receipts::default())); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let receipts = node.fetch_receipts(B256::from([1u8; 32])).await.unwrap(); - assert!(receipts.is_empty()); - } - - #[tokio::test] - async fn test_output_v0_at_timestamp_delegates_to_client() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client - .expect_output_v0_at_timestamp() - .with(eq(12345)) - .times(1) - .returning(|_| Ok(OutputV0::default())); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let output = node.output_v0_at_timestamp(12345).await.unwrap(); - assert_eq!(output, OutputV0::default()); - } - - #[tokio::test] - async fn test_pending_output_v0_at_timestamp_delegates_to_client() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client - .expect_pending_output_v0_at_timestamp() - .with(eq(54321)) - .times(1) - .returning(|_| Ok(OutputV0::default())); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let output = node.pending_output_v0_at_timestamp(54321).await.unwrap(); - assert_eq!(output, OutputV0::default()); - } - - #[tokio::test] - async fn test_l2_block_ref_by_timestamp_delegates_to_client() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client.expect_l2_block_ref_by_timestamp().with(eq(11111)).times(1).returning(|_| { - Ok(BlockInfo { - hash: B256::from([9u8; 32]), - number: 99, - parent_hash: B256::from([8u8; 32]), - timestamp: 11111, - }) - }); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let block = node.l2_block_ref_by_timestamp(11111).await.unwrap(); - assert_eq!(block.number, 99); - assert_eq!(block.timestamp, 11111); - } - - #[tokio::test] - async fn test_update_finalized_delegates_to_client() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client - .expect_update_finalized() - .withf(|block_id| block_id.number == 100) - .times(1) - .returning(|_| Ok(())); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let block_id = BlockNumHash { number: 100, hash: B256::from([1u8; 32]) }; - let result = node.update_finalized(block_id).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_update_cross_unsafe_delegates_to_client() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client - .expect_update_cross_unsafe() - .withf(|block_id| block_id.number == 200) - .times(1) - .returning(|_| Ok(())); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let block_id = BlockNumHash { number: 200, hash: B256::from([2u8; 32]) }; - let result = node.update_cross_unsafe(block_id).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_update_cross_safe_delegates_to_client() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client - .expect_update_cross_safe() - .withf(|source, derived| source.number == 300 && derived.number == 301) - .times(1) - .returning(|_, _| Ok(())); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let source_block_id = BlockNumHash { number: 300, hash: B256::from([3u8; 32]) }; - let derived_block_id = BlockNumHash { number: 301, hash: B256::from([4u8; 32]) }; - let result = node.update_cross_safe(source_block_id, derived_block_id).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_invalidate_block_delegates_to_client() { - let mut client = MockClient::new(); - client.expect_chain_id().times(1).returning(|| Ok(ChainId::from(42u64))); - client - .expect_invalidate_block() - .withf(|seal| seal.number == 400) - .times(1) - .returning(|_| Ok(())); - - let client = Arc::new(client); - let db = Arc::new(MockDb::new()); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let block_seal = BlockSeal { number: 400, hash: B256::from([5u8; 32]), timestamp: 0 }; - let result = node.invalidate_block(block_seal).await; - assert!(result.is_ok()); - } - - #[tokio::test] - async fn test_reset_calls_resetter() { - let mut client = MockClient::new(); - client.expect_chain_id().times(2).returning(|| Ok(ChainId::from(42u64))); - client.expect_reset_pre_interop().times(1).returning(|| Ok(())); - - let mut db = MockDb::new(); - db.expect_latest_derivation_state() - .times(1) - .returning(|| Err(StorageError::DatabaseNotInitialised)); - - let client = Arc::new(client); - let db = Arc::new(db); - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let (tx, _rx) = mpsc::channel(10); - let node = ManagedNode::new(client.clone(), db, l1_provider, tx); - - let result = node.reset().await; - assert!(result.is_ok()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/syncnode/resetter.rs b/rust/kona/crates/supervisor/core/src/syncnode/resetter.rs deleted file mode 100644 index 84a83f7ded638..0000000000000 --- a/rust/kona/crates/supervisor/core/src/syncnode/resetter.rs +++ /dev/null @@ -1,586 +0,0 @@ -use super::{ManagedNodeClient, ManagedNodeError}; -use alloy_eips::{BlockNumHash, BlockNumberOrTag}; -use alloy_network::Ethereum; -use alloy_primitives::ChainId; -use alloy_provider::{Provider, RootProvider}; -use kona_protocol::BlockInfo; -use kona_supervisor_storage::{DerivationStorageReader, HeadRefStorageReader, StorageError}; -use kona_supervisor_types::SuperHead; -use std::sync::Arc; -use tokio::sync::Mutex; -use tracing::{error, info, warn}; - -#[derive(Debug)] -pub(super) struct Resetter { - client: Arc, - l1_provider: RootProvider, - db_provider: Arc, - reset_guard: Mutex<()>, -} - -impl Resetter -where - DB: HeadRefStorageReader + DerivationStorageReader + Send + Sync + 'static, - C: ManagedNodeClient + Send + Sync + 'static, -{ - /// Creates a new [`Resetter`] with the specified client. - pub(super) fn new( - client: Arc, - l1_provider: RootProvider, - db_provider: Arc, - ) -> Self { - Self { client, l1_provider, db_provider, reset_guard: Mutex::new(()) } - } - - /// Resets the node using the latest super head. - pub(crate) async fn reset(&self) -> Result<(), ManagedNodeError> { - // get the chain ID to log it, this is useful for debugging - // no performance impact as it is cached in the client - let chain_id = self.client.chain_id().await?; - let _guard = self.reset_guard.lock().await; - - let local_safe = match self.get_latest_valid_local_safe(chain_id).await { - Ok(block) => block, - // todo: require refactor and corner case handling - Err(ManagedNodeError::StorageError(StorageError::DatabaseNotInitialised)) => { - self.reset_pre_interop(chain_id).await?; - return Ok(()); - } - Err(err) => { - error!(target: "supervisor::syncnode_resetter", %chain_id, %err, "Failed to get latest valid derived block"); - return Err(ManagedNodeError::ResetFailed); - } - }; - - // check if the source of valid local_safe is canonical - // If the source block is not canonical, it mean there is a reorg on L1 - // this makes sure that we always reset to a valid state - let source = self.db_provider.derived_to_source(local_safe.id())?; - if !self.is_canonical(chain_id, source.id()).await? { - warn!(target: "supervisor::syncnode_resetter", %chain_id, %source, "Source block for the valid local safe is not canonical"); - return Err(ManagedNodeError::ResetFailed); - } - - let SuperHead { cross_unsafe, cross_safe, finalized, .. } = - self.db_provider.get_super_head().inspect_err( - |err| error!(target: "supervisor::syncnode_resetter", %chain_id, %err, "Failed to get super head"), - )?; - - // using the local safe block as the local unsafe as well - let local_unsafe = local_safe; - - let mut cross_unsafe = cross_unsafe.unwrap_or_else(BlockInfo::default); - if cross_unsafe.number > local_unsafe.number { - cross_unsafe = local_unsafe; - } - - let mut cross_safe = cross_safe.unwrap_or_else(BlockInfo::default); - if cross_safe.number > local_safe.number { - cross_safe = local_safe; - } - - let mut finalized = match finalized { - Some(block) => block, - // fall back to activation block if finalized is None - None => self.db_provider.get_activation_block()?, - }; - - if finalized.number > local_safe.number { - finalized = local_safe; - } - - info!(target: "supervisor::syncnode_resetter", - %chain_id, - %local_unsafe, - %cross_unsafe, - %local_safe, - %cross_safe, - %finalized, - "Resetting managed node with latest information", - ); - - self.client - .reset( - local_unsafe.id(), - cross_unsafe.id(), - local_safe.id(), - cross_safe.id(), - finalized.id(), - ) - .await - .inspect_err(|err| { - error!(target: "supervisor::syncnode_resetter", %chain_id, %err, "Failed to reset managed node"); - })?; - Ok(()) - } - - async fn reset_pre_interop(&self, chain_id: ChainId) -> Result<(), ManagedNodeError> { - info!(target: "supervisor::syncnode_resetter", %chain_id, "Resetting the node to pre-interop state"); - - self.client.reset_pre_interop().await.inspect_err(|err| { - error!(target: "supervisor::syncnode_resetter", %chain_id, %err, "Failed to reset managed node to pre-interop state"); - })?; - Ok(()) - } - - async fn get_latest_valid_local_safe( - &self, - chain_id: ChainId, - ) -> Result { - let latest_derivation_state = self.db_provider.latest_derivation_state()?; - let mut local_safe = latest_derivation_state.derived; - - loop { - let node_block = self.client.block_ref_by_number(local_safe.number).await.inspect_err( - |err| error!(target: "supervisor::syncnode_resetter", %chain_id, %err, "Failed to get block by number"), - )?; - - // If the local safe block matches the node block, we can return the super - // head right away - if node_block == local_safe { - return Ok(local_safe); - } - - // Get the source block for the current local safe, this helps to skip empty source - // blocks - let source_block = self - .db_provider - .derived_to_source(local_safe.id()) - .inspect_err(|err| error!(target: "supervisor::syncnode_resetter", %chain_id, %err, "Failed to get source block for the local safe head ref"))?; - - // Get the previous source block id - let prev_source_id = - BlockNumHash { number: source_block.number - 1, hash: source_block.parent_hash }; - - // If the previous source block id is 0, we cannot reset further. This should not happen - // in prod, added for safety during dev environment. - if prev_source_id.number == 0 { - error!(target: "supervisor::syncnode_resetter", %chain_id, "Source block number is 0, cannot reset further"); - return Err(ManagedNodeError::ResetFailed); - } - - // Get the latest derived block at the previous source block, this helps to skip derived - // blocks. If this loop is executed, it means there is something wrong with - // derivation. Faster to go back source blocks than to go back derived - // blocks. - local_safe = self - .db_provider - .latest_derived_block_at_source(prev_source_id) - .inspect_err(|err| { - error!(target: "supervisor::syncnode_resetter", %chain_id, %err, "Failed to get latest derived block for the previous source block") - })?; - } - } - - async fn is_canonical( - &self, - chain_id: ChainId, - source: BlockNumHash, - ) -> Result { - let canonical_block = self - .l1_provider - .get_block_by_number(BlockNumberOrTag::Number(source.number)) - .await - .map_err(|err| { - warn!(target: "supervisor::syncnode_resetter", %chain_id, %err, "Failed to fetch source block from L1"); - ManagedNodeError::GetBlockByNumberFailed(source.number) - })?; - - canonical_block.map_or_else(|| Ok(false), |block| Ok(block.hash() == source.hash)) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::syncnode::{AuthenticationError, ClientError}; - use alloy_eips::BlockNumHash; - use alloy_primitives::{B256, ChainId}; - use alloy_provider::mock::{Asserter, MockResponse, MockTransport}; - use alloy_rpc_client::RpcClient; - use async_trait::async_trait; - use jsonrpsee::core::client::Subscription; - use kona_interop::{DerivedRefPair, SafetyLevel}; - use kona_protocol::BlockInfo; - use kona_supervisor_storage::{DerivationStorageReader, HeadRefStorageReader, StorageError}; - use kona_supervisor_types::{BlockSeal, OutputV0, Receipts, SubscriptionEvent, SuperHead}; - use mockall::{mock, predicate}; - - // Mock for HeadRefStorageReader - mock! { - #[derive(Debug)] - pub Db {} - - impl HeadRefStorageReader for Db { - fn get_safety_head_ref(&self, level: SafetyLevel) -> Result; - fn get_super_head(&self) -> Result; - } - - impl DerivationStorageReader for Db { - fn derived_to_source(&self, derived_block_id: BlockNumHash) -> Result; - fn latest_derived_block_at_source(&self, source_block_id: BlockNumHash) -> Result; - fn latest_derivation_state(&self) -> Result; - fn get_source_block(&self, source_block_number: u64) -> Result; - fn get_activation_block(&self) -> Result; - } - } - - mock! { - #[derive(Debug)] - pub Client {} - - #[async_trait] - impl ManagedNodeClient for Client { - async fn chain_id(&self) -> Result; - async fn subscribe_events(&self) -> Result, ClientError>; - async fn fetch_receipts(&self, block_hash: B256) -> Result; - async fn output_v0_at_timestamp(&self, timestamp: u64) -> Result; - async fn pending_output_v0_at_timestamp(&self, timestamp: u64) -> Result; - async fn l2_block_ref_by_timestamp(&self, timestamp: u64) -> Result; - async fn block_ref_by_number(&self, block_number: u64) -> Result; - async fn reset_pre_interop(&self) -> Result<(), ClientError>; - async fn reset(&self, unsafe_id: BlockNumHash, cross_unsafe_id: BlockNumHash, local_safe_id: BlockNumHash, cross_safe_id: BlockNumHash, finalised_id: BlockNumHash) -> Result<(), ClientError>; - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ClientError>; - async fn provide_l1(&self, block_info: BlockInfo) -> Result<(), ClientError>; - async fn update_finalized(&self, finalized_block_id: BlockNumHash) -> Result<(), ClientError>; - async fn update_cross_unsafe(&self, cross_unsafe_block_id: BlockNumHash) -> Result<(), ClientError>; - async fn update_cross_safe(&self, source_block_id: BlockNumHash, derived_block_id: BlockNumHash) -> Result<(), ClientError>; - async fn reset_ws_client(&self); - } - } - - fn make_super_head() -> SuperHead { - SuperHead { - local_unsafe: BlockInfo::new(B256::from([0u8; 32]), 5, B256::ZERO, 0), - cross_unsafe: Some(BlockInfo::new(B256::from([1u8; 32]), 4, B256::ZERO, 0)), - local_safe: Some(BlockInfo::new(B256::from([2u8; 32]), 3, B256::ZERO, 0)), - cross_safe: Some(BlockInfo::new(B256::from([3u8; 32]), 2, B256::ZERO, 0)), - finalized: Some(BlockInfo::new(B256::from([4u8; 32]), 1, B256::ZERO, 0)), - l1_source: Some(BlockInfo::new(B256::from([54u8; 32]), 100, B256::ZERO, 0)), - } - } - - #[tokio::test] - async fn test_reset_success() { - let super_head = make_super_head(); - - let mut db = MockDb::new(); - db.expect_latest_derivation_state().returning(move || { - Ok(DerivedRefPair { - derived: super_head.local_safe.unwrap(), - source: super_head.l1_source.unwrap(), - }) - }); - db.expect_get_super_head().returning(move || Ok(super_head)); - - let mut client = MockClient::new(); - client.expect_chain_id().returning(move || Ok(1)); - client.expect_block_ref_by_number().returning(move |_| Ok(super_head.local_safe.unwrap())); - - db.expect_derived_to_source() - .with(predicate::eq(super_head.local_safe.unwrap().id())) - .returning(move |_| Ok(super_head.l1_source.unwrap())); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - - let canonical_block = r#"{ - "number": "100", - "hash": "0x3636363636363636363636363636363636363636363636363636363636363636", - "mixHash": "0x24900fb3da77674a861c428429dce0762707ecb6052325bbd9b3c64e74b5af9d", - "parentHash": "0x1f68ac259155e2f38211ddad0f0a15394d55417b185a93923e2abe71bb7a4d6d", - "nonce": "0x378da40ff335b070", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000100000004080000000000500000000000000020000100000000800001000000004000001000000000000000800040010000020100000000400000010000000000000000040000000000000040000000000000000000000000000000400002400000000000000000000000000000004000004000000000000840000000800000080010004000000001000000800000000000000000000000000000000000800000000000040000000020000000000000000000800000400000000000000000000000600000400000000002000000000000000000000004000000000000000100000000000000000000000000000000000040000900010000000", - "transactionsRoot":"0x4d0c8e91e16bdff538c03211c5c73632ed054d00a7e210c0eb25146c20048126", - "stateRoot": "0x91309efa7e42c1f137f31fe9edbe88ae087e6620d0d59031324da3e2f4f93233", - "receiptsRoot": "0x68461ab700003503a305083630a8fb8d14927238f0bc8b6b3d246c0c64f21f4a", - "miner":"0xb42b6c4a95406c78ff892d270ad20b22642e102d", - "difficulty": "0x66e619a", - "totalDifficulty": "0x1e875d746ae", - "extraData": "0xd583010502846765746885676f312e37856c696e7578", - "size": "0x334", - "gasLimit": "0x47e7c4", - "gasUsed": "0x37993", - "timestamp": "0x5835c54d", - "uncles": [], - "transactions": [ - "0xa0807e117a8dd124ab949f460f08c36c72b710188f01609595223b325e58e0fc", - "0xeae6d797af50cb62a596ec3939114d63967c374fa57de9bc0f4e2b576ed6639d" - ], - "baseFeePerGas": "0x7", - "withdrawalsRoot": "0x7a4ecf19774d15cf9c15adf0dd8e8a250c128b26c9e2ab2a08d6c9c8ffbd104f", - "withdrawals": [], - "blobGasUsed": "0x0", - "excessBlobGas": "0x0", - "parentBeaconBlockRoot": "0x95c4dbd5b19f6fe3cbc3183be85ff4e85ebe75c5b4fc911f1c91e5b7a554a685" - }"#; - asserter.push(MockResponse::Success(serde_json::from_str(canonical_block).unwrap())); - - client.expect_reset().returning(|_, _, _, _, _| Ok(())); - - let resetter = Resetter::new(Arc::new(client), l1_provider, Arc::new(db)); - - assert!(resetter.reset().await.is_ok()); - } - - #[tokio::test] - async fn test_reset_canonical_hash_mismatch() { - let super_head = make_super_head(); - - let mut db = MockDb::new(); - db.expect_latest_derivation_state().returning(move || { - Ok(DerivedRefPair { - derived: super_head.local_safe.unwrap(), - source: super_head.l1_source.unwrap(), - }) - }); - db.expect_get_super_head().returning(move || Ok(super_head)); - - let mut client = MockClient::new(); - client.expect_chain_id().returning(move || Ok(1)); - client.expect_block_ref_by_number().returning(move |_| Ok(super_head.local_safe.unwrap())); - - db.expect_derived_to_source() - .with(predicate::eq(super_head.local_safe.unwrap().id())) - .returning(move |_| Ok(super_head.l1_source.unwrap())); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - - let canonical_block = r#"{ - "number": "100", - "hash": "0x3737373737373737373737373737373737373737373737373737373737367637", - "mixHash": "0x24900fb3da77674a861c428429dce0762707ecb6052325bbd9b3c64e74b5af9d", - "parentHash": "0x1f68ac259155e2f38211ddad0f0a15394d55417b185a93923e2abe71bb7a4d6d", - "nonce": "0x378da40ff335b070", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000100000004080000000000500000000000000020000100000000800001000000004000001000000000000000800040010000020100000000400000010000000000000000040000000000000040000000000000000000000000000000400002400000000000000000000000000000004000004000000000000840000000800000080010004000000001000000800000000000000000000000000000000000800000000000040000000020000000000000000000800000400000000000000000000000600000400000000002000000000000000000000004000000000000000100000000000000000000000000000000000040000900010000000", - "transactionsRoot":"0x4d0c8e91e16bdff538c03211c5c73632ed054d00a7e210c0eb25146c20048126", - "stateRoot": "0x91309efa7e42c1f137f31fe9edbe88ae087e6620d0d59031324da3e2f4f93233", - "receiptsRoot": "0x68461ab700003503a305083630a8fb8d14927238f0bc8b6b3d246c0c64f21f4a", - "miner":"0xb42b6c4a95406c78ff892d270ad20b22642e102d", - "difficulty": "0x66e619a", - "totalDifficulty": "0x1e875d746ae", - "extraData": "0xd583010502846765746885676f312e37856c696e7578", - "size": "0x334", - "gasLimit": "0x47e7c4", - "gasUsed": "0x37993", - "timestamp": "0x5835c54d", - "uncles": [], - "transactions": [ - "0xa0807e117a8dd124ab949f460f08c36c72b710188f01609595223b325e58e0fc", - "0xeae6d797af50cb62a596ec3939114d63967c374fa57de9bc0f4e2b576ed6639d" - ], - "baseFeePerGas": "0x7", - "withdrawalsRoot": "0x7a4ecf19774d15cf9c15adf0dd8e8a250c128b26c9e2ab2a08d6c9c8ffbd104f", - "withdrawals": [], - "blobGasUsed": "0x0", - "excessBlobGas": "0x0", - "parentBeaconBlockRoot": "0x95c4dbd5b19f6fe3cbc3183be85ff4e85ebe75c5b4fc911f1c91e5b7a554a685" - }"#; - asserter.push(MockResponse::Success(serde_json::from_str(canonical_block).unwrap())); - - let resetter = Resetter::new(Arc::new(client), l1_provider, Arc::new(db)); - - assert!(resetter.reset().await.is_err()); - } - - #[tokio::test] - async fn test_reset_db_error() { - let mut db = MockDb::new(); - db.expect_latest_derivation_state().returning(|| Err(StorageError::LockPoisoned)); - - let mut client = MockClient::new(); - client.expect_chain_id().returning(move || Ok(1)); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let resetter = Resetter::new(Arc::new(client), l1_provider, Arc::new(db)); - - assert!(resetter.reset().await.is_err()); - } - - #[tokio::test] - async fn test_reset_block_error() { - let super_head = make_super_head(); - - let mut db = MockDb::new(); - db.expect_latest_derivation_state().returning(move || { - Ok(DerivedRefPair { - derived: super_head.local_safe.unwrap(), - source: super_head.l1_source.unwrap(), - }) - }); - let mut client = MockClient::new(); - client.expect_chain_id().returning(move || Ok(1)); - client - .expect_block_ref_by_number() - .returning(|_| Err(ClientError::Authentication(AuthenticationError::InvalidHeader))); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - let resetter = Resetter::new(Arc::new(client), l1_provider, Arc::new(db)); - - assert!(resetter.reset().await.is_err()); - } - - #[tokio::test] - async fn test_reset_inconsistency() { - let super_head = make_super_head(); - - let mut db = MockDb::new(); - db.expect_latest_derivation_state().returning(move || { - Ok(DerivedRefPair { - derived: super_head.local_safe.unwrap(), - source: super_head.l1_source.unwrap(), - }) - }); - - let prev_source_block = BlockInfo::new(B256::from([8u8; 32]), 101, B256::ZERO, 0); - let current_source_block = - BlockInfo::new(B256::from([7u8; 32]), 102, prev_source_block.hash, 0); - let last_valid_derived_block = BlockInfo::new(B256::from([6u8; 32]), 9, B256::ZERO, 0); - - // return expected values when get_last_valid_derived_block() is called - db.expect_derived_to_source() - .with(predicate::eq(super_head.local_safe.unwrap().id())) - .returning(move |_| Ok(current_source_block)); - db.expect_latest_derived_block_at_source() - .with(predicate::eq(prev_source_block.id())) - .returning(move |_| Ok(last_valid_derived_block)); - - let mut client = MockClient::new(); - client.expect_chain_id().returning(move || Ok(1)); - // Return a block that does not match local_safe - client - .expect_block_ref_by_number() - .with(predicate::eq(super_head.local_safe.unwrap().number)) - .returning(|_| Ok(BlockInfo::new(B256::from([4u8; 32]), 3, B256::ZERO, 0))); - // On second call, return the last valid derived block - client - .expect_block_ref_by_number() - .with(predicate::eq(last_valid_derived_block.number)) - .returning(move |_| Ok(last_valid_derived_block)); - - db.expect_derived_to_source() - .with(predicate::eq(last_valid_derived_block.id())) - .returning(move |_| Ok(prev_source_block)); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - - let canonical_block = r#"{ - "number": "100", - "hash": "0x0808080808080808080808080808080808080808080808080808080808080808", - "mixHash": "0x24900fb3da77674a861c428429dce0762707ecb6052325bbd9b3c64e74b5af9d", - "parentHash": "0x1f68ac259155e2f38211ddad0f0a15394d55417b185a93923e2abe71bb7a4d6d", - "nonce": "0x378da40ff335b070", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000100000004080000000000500000000000000020000100000000800001000000004000001000000000000000800040010000020100000000400000010000000000000000040000000000000040000000000000000000000000000000400002400000000000000000000000000000004000004000000000000840000000800000080010004000000001000000800000000000000000000000000000000000800000000000040000000020000000000000000000800000400000000000000000000000600000400000000002000000000000000000000004000000000000000100000000000000000000000000000000000040000900010000000", - "transactionsRoot":"0x4d0c8e91e16bdff538c03211c5c73632ed054d00a7e210c0eb25146c20048126", - "stateRoot": "0x91309efa7e42c1f137f31fe9edbe88ae087e6620d0d59031324da3e2f4f93233", - "receiptsRoot": "0x68461ab700003503a305083630a8fb8d14927238f0bc8b6b3d246c0c64f21f4a", - "miner":"0xb42b6c4a95406c78ff892d270ad20b22642e102d", - "difficulty": "0x66e619a", - "totalDifficulty": "0x1e875d746ae", - "extraData": "0xd583010502846765746885676f312e37856c696e7578", - "size": "0x334", - "gasLimit": "0x47e7c4", - "gasUsed": "0x37993", - "timestamp": "0x5835c54d", - "uncles": [], - "transactions": [ - "0xa0807e117a8dd124ab949f460f08c36c72b710188f01609595223b325e58e0fc", - "0xeae6d797af50cb62a596ec3939114d63967c374fa57de9bc0f4e2b576ed6639d" - ], - "baseFeePerGas": "0x7", - "withdrawalsRoot": "0x7a4ecf19774d15cf9c15adf0dd8e8a250c128b26c9e2ab2a08d6c9c8ffbd104f", - "withdrawals": [], - "blobGasUsed": "0x0", - "excessBlobGas": "0x0", - "parentBeaconBlockRoot": "0x95c4dbd5b19f6fe3cbc3183be85ff4e85ebe75c5b4fc911f1c91e5b7a554a685" - }"#; - asserter.push(MockResponse::Success(serde_json::from_str(canonical_block).unwrap())); - - db.expect_get_super_head().returning(move || Ok(super_head)); - - client.expect_reset().times(1).returning(|_, _, _, _, _| Ok(())); - - let resetter = Resetter::new(Arc::new(client), l1_provider, Arc::new(db)); - - assert!(resetter.reset().await.is_ok()); - } - - #[tokio::test] - async fn test_reset_rpc_error() { - let super_head = make_super_head(); - - let mut db = MockDb::new(); - db.expect_latest_derivation_state().returning(move || { - Ok(DerivedRefPair { - derived: super_head.local_safe.unwrap(), - source: super_head.l1_source.unwrap(), - }) - }); - - db.expect_derived_to_source() - .with(predicate::eq(super_head.local_safe.unwrap().id())) - .returning(move |_| Ok(super_head.l1_source.unwrap())); - - let asserter = Asserter::new(); - let transport = MockTransport::new(asserter.clone()); - let l1_provider = RootProvider::::new(RpcClient::new(transport, false)); - - let canonical_block = r#"{ - "number": "100", - "hash": "0x3636363636363636363636363636363636363636363636363636363636363636", - "mixHash": "0x24900fb3da77674a861c428429dce0762707ecb6052325bbd9b3c64e74b5af9d", - "parentHash": "0x1f68ac259155e2f38211ddad0f0a15394d55417b185a93923e2abe71bb7a4d6d", - "nonce": "0x378da40ff335b070", - "sha3Uncles": "0x1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347", - "logsBloom": "0x00000000000000100000004080000000000500000000000000020000100000000800001000000004000001000000000000000800040010000020100000000400000010000000000000000040000000000000040000000000000000000000000000000400002400000000000000000000000000000004000004000000000000840000000800000080010004000000001000000800000000000000000000000000000000000800000000000040000000020000000000000000000800000400000000000000000000000600000400000000002000000000000000000000004000000000000000100000000000000000000000000000000000040000900010000000", - "transactionsRoot":"0x4d0c8e91e16bdff538c03211c5c73632ed054d00a7e210c0eb25146c20048126", - "stateRoot": "0x91309efa7e42c1f137f31fe9edbe88ae087e6620d0d59031324da3e2f4f93233", - "receiptsRoot": "0x68461ab700003503a305083630a8fb8d14927238f0bc8b6b3d246c0c64f21f4a", - "miner":"0xb42b6c4a95406c78ff892d270ad20b22642e102d", - "difficulty": "0x66e619a", - "totalDifficulty": "0x1e875d746ae", - "extraData": "0xd583010502846765746885676f312e37856c696e7578", - "size": "0x334", - "gasLimit": "0x47e7c4", - "gasUsed": "0x37993", - "timestamp": "0x5835c54d", - "uncles": [], - "transactions": [ - "0xa0807e117a8dd124ab949f460f08c36c72b710188f01609595223b325e58e0fc", - "0xeae6d797af50cb62a596ec3939114d63967c374fa57de9bc0f4e2b576ed6639d" - ], - "baseFeePerGas": "0x7", - "withdrawalsRoot": "0x7a4ecf19774d15cf9c15adf0dd8e8a250c128b26c9e2ab2a08d6c9c8ffbd104f", - "withdrawals": [], - "blobGasUsed": "0x0", - "excessBlobGas": "0x0", - "parentBeaconBlockRoot": "0x95c4dbd5b19f6fe3cbc3183be85ff4e85ebe75c5b4fc911f1c91e5b7a554a685" - }"#; - asserter.push(MockResponse::Success(serde_json::from_str(canonical_block).unwrap())); - - db.expect_get_super_head().returning(move || Ok(super_head)); - - let mut client = MockClient::new(); - client.expect_chain_id().returning(move || Ok(1)); - client.expect_block_ref_by_number().returning(move |_| Ok(super_head.local_safe.unwrap())); - client.expect_reset().returning(|_, _, _, _, _| { - Err(ClientError::Authentication(AuthenticationError::InvalidJwt)) - }); - - let resetter = Resetter::new(Arc::new(client), l1_provider, Arc::new(db)); - - assert!(resetter.reset().await.is_err()); - } -} diff --git a/rust/kona/crates/supervisor/core/src/syncnode/traits.rs b/rust/kona/crates/supervisor/core/src/syncnode/traits.rs deleted file mode 100644 index 7d81aa21bfaf1..0000000000000 --- a/rust/kona/crates/supervisor/core/src/syncnode/traits.rs +++ /dev/null @@ -1,197 +0,0 @@ -use super::ManagedNodeError; -use alloy_eips::BlockNumHash; -use alloy_primitives::B256; -use async_trait::async_trait; -use kona_interop::{BlockReplacement, DerivedRefPair}; -use kona_protocol::BlockInfo; -use kona_supervisor_types::{BlockSeal, OutputV0, Receipts}; -use std::fmt::Debug; - -/// Represents a handler for subscription events. -#[async_trait] -pub trait SubscriptionHandler: Send + Sync { - /// Handles the exhaustion L1 exhaust event from the node. - async fn handle_exhaust_l1( - &self, - derived_ref_pair: &DerivedRefPair, - ) -> Result<(), ManagedNodeError>; - - /// Handles the reset event from the node. - async fn handle_reset(&self, reset_id: &str) -> Result<(), ManagedNodeError>; - - /// Handles the unsafe block event from the node. - async fn handle_unsafe_block(&self, block: &BlockInfo) -> Result<(), ManagedNodeError>; - - /// Handles the derivation update event from the node. - async fn handle_derivation_update( - &self, - derived_ref_pair: &DerivedRefPair, - ) -> Result<(), ManagedNodeError>; - - /// Handles the block replacement event from the node. - async fn handle_replace_block( - &self, - replacement: &BlockReplacement, - ) -> Result<(), ManagedNodeError>; - - /// Handles the derivation origin update event from the node. - async fn handle_derivation_origin_update( - &self, - origin: &BlockInfo, - ) -> Result<(), ManagedNodeError>; -} - -/// [`BlockProvider`] abstracts fetching blocks and receipts for a given block. -#[async_trait] -pub trait BlockProvider: Send + Sync + Debug { - /// Fetch all transaction receipts for the block with the given hash. - /// - /// # Arguments - /// * `block_hash` - The hash of the block whose receipts should be fetched. - /// - /// # Returns - /// [Receipts] representing all transaction receipts in the block, - /// or an error if the fetch fails. - async fn fetch_receipts(&self, block_hash: B256) -> Result; - - /// Returns the block info for the given block number - async fn block_by_number(&self, number: u64) -> Result; -} - -/// [`ManagedNodeDataProvider`] abstracts the managed node data APIs that supervisor uses to fetch -/// info from the managed node. -#[async_trait] -pub trait ManagedNodeDataProvider: Send + Sync + Debug { - /// Fetch the output v0 at a given timestamp. - /// - /// # Arguments - /// * `timestamp` - The timestamp to fetch the output v0 at. - /// - /// # Returns - /// The output v0 at the given timestamp, - /// or an error if the fetch fails. - async fn output_v0_at_timestamp(&self, timestamp: u64) -> Result; - - /// Fetch the pending output v0 at a given timestamp. - /// - /// # Arguments - /// * `timestamp` - The timestamp to fetch the pending output v0 at. - /// - /// # Returns - /// The pending output v0 at the given timestamp, - /// or an error if the fetch fails. - async fn pending_output_v0_at_timestamp( - &self, - timestamp: u64, - ) -> Result; - - /// Fetch the l2 block ref by timestamp. - /// - /// # Arguments - /// * `timestamp` - The timestamp to fetch the l2 block ref at. - /// - /// # Returns - /// The l2 block ref at the given timestamp. - async fn l2_block_ref_by_timestamp( - &self, - timestamp: u64, - ) -> Result; -} - -/// [`ManagedNodeController`] abstracts the managed node control APIs that supervisor uses to -/// control the managed node state. -#[async_trait] -pub trait ManagedNodeController: Send + Sync + Debug { - /// Update the finalized block head using the given [`BlockNumHash`]. - /// - /// # Arguments - /// * `finalized_block_id` - The block number and hash of the finalized block - /// - /// # Returns - /// * `Ok(())` on success - /// * `Err(ManagedNodeError)` if the update fails - async fn update_finalized( - &self, - finalized_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - /// Update the cross unsafe block head using the given [`BlockNumHash`]. - /// - /// # Arguments - /// * `cross_unsafe_block_id` - The block number and hash of the cross unsafe block - /// - /// # Returns - /// * `Ok(())` on success - /// * `Err(ManagedNodeError)` if the update fails - async fn update_cross_unsafe( - &self, - cross_unsafe_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - /// Update the cross safe block head using the given [`BlockNumHash`]. - /// - /// # Arguments - /// * `source_block_id` - The block number and hash of the L1 block - /// * `derived_block_id` - The block number and hash of the new cross safe block - /// - /// # Returns - /// * `Ok(())` on success - /// * `Err(ManagedNodeError)` if the update fails - async fn update_cross_safe( - &self, - source_block_id: BlockNumHash, - derived_block_id: BlockNumHash, - ) -> Result<(), ManagedNodeError>; - - /// Reset the managed node based on the supervisor's state. - /// This is typically used to reset the node's state - /// when the supervisor detects a misalignment - /// - /// # Returns - /// * `Ok(())` on success - /// * `Err(ManagedNodeError)` if the reset fails - async fn reset(&self) -> Result<(), ManagedNodeError>; - - /// Instructs the managed node to invalidate a block. - /// This is used when the supervisor detects an invalid block - /// and needs to roll back the node's state. - /// - /// # Arguments - /// * `seal` - The [`BlockSeal`] of the block. - /// - /// # Returns - /// * `Ok(())` on success - /// * `Err(ManagedNodeError)` if the invalidation fails - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ManagedNodeError>; -} - -/// Composite trait for any node that provides: -/// - Event subscriptions (`NodeSubscriber`) -/// - Receipt access (`ReceiptProvider`) -/// - Managed node API access (`ManagedNodeApiProvider`) -/// -/// This is the main abstraction used for a fully-managed node -/// within the supervisor context. -#[async_trait] -pub trait ManagedNodeProvider: - SubscriptionHandler - + BlockProvider - + ManagedNodeDataProvider - + ManagedNodeController - + Send - + Sync - + Debug -{ -} - -#[async_trait] -impl ManagedNodeProvider for T where - T: SubscriptionHandler - + BlockProvider - + ManagedNodeDataProvider - + ManagedNodeController - + Send - + Sync - + Debug -{ -} diff --git a/rust/kona/crates/supervisor/metrics/Cargo.toml b/rust/kona/crates/supervisor/metrics/Cargo.toml deleted file mode 100644 index 817b1e031b74e..0000000000000 --- a/rust/kona/crates/supervisor/metrics/Cargo.toml +++ /dev/null @@ -1,17 +0,0 @@ -[package] -name = "kona-supervisor-metrics" -version = "0.1.0" -edition.workspace = true -license.workspace = true -rust-version.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -keywords.workspace = true -categories.workspace = true -exclude.workspace = true - -[dependencies] - -[lints] -workspace = true diff --git a/rust/kona/crates/supervisor/metrics/src/lib.rs b/rust/kona/crates/supervisor/metrics/src/lib.rs deleted file mode 100644 index 6bd6a301fc8d9..0000000000000 --- a/rust/kona/crates/supervisor/metrics/src/lib.rs +++ /dev/null @@ -1,5 +0,0 @@ -//! Metrics collection and reporting for the supervisor. -mod reporter; -pub use reporter::MetricsReporter; - -mod macros; diff --git a/rust/kona/crates/supervisor/metrics/src/macros.rs b/rust/kona/crates/supervisor/metrics/src/macros.rs deleted file mode 100644 index 9b24f96f54573..0000000000000 --- a/rust/kona/crates/supervisor/metrics/src/macros.rs +++ /dev/null @@ -1,75 +0,0 @@ -/// Macro to observe a call to a storage method and record metrics. -#[macro_export] -macro_rules! observe_metrics_for_result { - ( - $success_metric:expr, - $error_metric:expr, - $duration_metric:expr, - $method_name:expr, - $block:expr $(, $tag_key:expr => $tag_val:expr )* - ) => {{ - let start_time = std::time::Instant::now(); - let result = $block; - let duration = start_time.elapsed().as_secs_f64(); - - if result.is_ok() { - metrics::counter!( - $success_metric, - "method" => $method_name - $(, $tag_key => $tag_val )* - ).increment(1); - } else { - metrics::counter!( - $error_metric, - "method" => $method_name - $(, $tag_key => $tag_val )* - ).increment(1); - } - - metrics::histogram!( - $duration_metric, - "method" => $method_name - $(, $tag_key => $tag_val )* - ).record(duration); - - result - }}; -} - -/// Macro to observe a call to an async function and record metrics. -#[macro_export] -macro_rules! observe_metrics_for_result_async { - ( - $success_metric:expr, - $error_metric:expr, - $duration_metric:expr, - $method_name:expr, - $block:expr $(, $tag_key:expr => $tag_val:expr )* - ) => {{ - let start_time = std::time::Instant::now(); - let result = $block.await; - let duration = start_time.elapsed().as_secs_f64(); - - if result.is_ok() { - metrics::counter!( - $success_metric, - "method" => $method_name - $(, $tag_key => $tag_val )* - ).increment(1); - } else { - metrics::counter!( - $error_metric, - "method" => $method_name - $(, $tag_key => $tag_val )* - ).increment(1); - } - - metrics::histogram!( - $duration_metric, - "method" => $method_name - $(, $tag_key => $tag_val )* - ).record(duration); - - result - }}; -} diff --git a/rust/kona/crates/supervisor/metrics/src/reporter.rs b/rust/kona/crates/supervisor/metrics/src/reporter.rs deleted file mode 100644 index 6b0f391a32c59..0000000000000 --- a/rust/kona/crates/supervisor/metrics/src/reporter.rs +++ /dev/null @@ -1,9 +0,0 @@ -/// Defines a contract for types that can report metrics. -/// This trait is intended to be implemented by types that need to report metrics -pub trait MetricsReporter { - /// Reports metrics for the implementing type. - /// This function is intended to be called periodically to collect and report metrics. - /// The implementation should gather relevant metrics and report them to the configured metrics - /// backend. - fn report_metrics(&self); -} diff --git a/rust/kona/crates/supervisor/rpc/Cargo.toml b/rust/kona/crates/supervisor/rpc/Cargo.toml deleted file mode 100644 index d0fdec143c2e3..0000000000000 --- a/rust/kona/crates/supervisor/rpc/Cargo.toml +++ /dev/null @@ -1,71 +0,0 @@ -[package] -name = "kona-supervisor-rpc" -description = "Kona Supervisor RPC" -version = "0.1.1" - -edition.workspace = true -license.workspace = true -rust-version.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -keywords.workspace = true -categories.workspace = true -exclude.workspace = true - -[lints] -workspace = true - -[dependencies] -# Workspace -kona-interop.workspace = true -kona-protocol.workspace = true -kona-supervisor-types.workspace = true - -# jsonrpsee -serde.workspace = true -serde_json.workspace = true -jsonrpsee = { workspace = true, optional = true, features = ["macros", "server"] } -async-trait.workspace = true - -# Alloy -alloy-eips.workspace = true -alloy-serde.workspace = true -alloy-primitives = { workspace = true, features = ["map", "rlp", "serde"] } -op-alloy-consensus.workspace = true -alloy-rpc-types-engine = { workspace = true, features = ["jwt", "serde", "std"], optional = true } -tokio = { workspace = true, features = ["time", "sync"], optional = true } -derive_more = { workspace = true, default-features = false, features = ["display", "from", "constructor", "std"], optional = true } - -# `reqwest` feature dependencies -alloy-rpc-client = { workspace = true, features = ["reqwest"], optional = true } -thiserror = { workspace = true, optional = true } - -[features] -serde = [ - "alloy-eips/serde", - "alloy-primitives/serde", - "alloy-rpc-types-engine?/serde", - "kona-interop/serde", - "kona-protocol/serde", - "op-alloy-consensus/serde", -] -# The `jsonrpsee` feature enables the core RPC functionality. -# When it's active, we also need the `serde` feature. -jsonrpsee = [ "dep:jsonrpsee", "serde" ] -# Client feature builds upon the base jsonrpsee feature -client = [ "jsonrpsee", "jsonrpsee/client" ] -# Server feature for supervisor RPC server functionality -server = [ - "dep:alloy-rpc-types-engine", - "dep:derive_more", - "dep:tokio", - "jsonrpsee", -] -# reqwest client feature -reqwest = [ - "client", - "dep:alloy-rpc-client", - "dep:derive_more", - "dep:thiserror", -] diff --git a/rust/kona/crates/supervisor/rpc/README.md b/rust/kona/crates/supervisor/rpc/README.md deleted file mode 100644 index 5ac480f498c2d..0000000000000 --- a/rust/kona/crates/supervisor/rpc/README.md +++ /dev/null @@ -1 +0,0 @@ -## `kona-supervisor-rpc` \ No newline at end of file diff --git a/rust/kona/crates/supervisor/rpc/src/config.rs b/rust/kona/crates/supervisor/rpc/src/config.rs deleted file mode 100644 index 3c454b01dc96d..0000000000000 --- a/rust/kona/crates/supervisor/rpc/src/config.rs +++ /dev/null @@ -1,41 +0,0 @@ -//! Contains the Configuration for the supervisor RPC server. - -#[cfg(feature = "server")] -use alloy_rpc_types_engine::JwtSecret; -#[cfg(feature = "server")] -use std::net::SocketAddr; - -/// The RPC Config. -#[cfg(feature = "server")] -#[derive(Debug, Clone)] -pub struct SupervisorRpcConfig { - /// If the RPC is disabled. - /// By default, the RPC server is disabled. - pub rpc_disabled: bool, - /// The socket address for the RPC server. - pub socket_address: SocketAddr, - /// The JWT secret for the RPC server. - pub jwt_secret: JwtSecret, -} - -#[cfg(feature = "server")] -impl SupervisorRpcConfig { - /// Returns if the rpc is disabled. - pub const fn is_disabled(&self) -> bool { - self.rpc_disabled - } -} - -// By default, the RPC server is disabled. -// As such, the socket address and JWT secret are unused -// and can be set to random values. -#[cfg(feature = "server")] -impl std::default::Default for SupervisorRpcConfig { - fn default() -> Self { - Self { - rpc_disabled: true, - socket_address: SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), 9333), - jwt_secret: JwtSecret::random(), - } - } -} diff --git a/rust/kona/crates/supervisor/rpc/src/jsonrpsee.rs b/rust/kona/crates/supervisor/rpc/src/jsonrpsee.rs deleted file mode 100644 index c21c80c1ff70c..0000000000000 --- a/rust/kona/crates/supervisor/rpc/src/jsonrpsee.rs +++ /dev/null @@ -1,232 +0,0 @@ -//! The Optimism Supervisor RPC API using `jsonrpsee` - -pub use jsonrpsee::{ - core::{RpcResult, SubscriptionResult}, - types::{ErrorCode, ErrorObjectOwned}, -}; - -use crate::{SuperRootOutputRpc, SupervisorSyncStatus}; -use alloy_eips::BlockNumHash; -use alloy_primitives::{B256, BlockHash, ChainId, map::HashMap}; -use jsonrpsee::proc_macros::rpc; -use kona_interop::{ - DependencySet, DerivedIdPair, DerivedRefPair, ExecutingDescriptor, ManagedEvent, SafetyLevel, -}; -use kona_protocol::BlockInfo; -use kona_supervisor_types::{BlockSeal, HexStringU64, OutputV0, Receipts, SubscriptionEvent}; -use serde::{Deserialize, Serialize}; - -/// Supervisor API for interop. -/// -/// See spec . -// TODO:: add all the methods -#[cfg_attr(not(feature = "client"), rpc(server, namespace = "supervisor"))] -#[cfg_attr(feature = "client", rpc(server, client, namespace = "supervisor"))] -pub trait SupervisorApi { - /// Gets the source block for a given derived block - #[method(name = "crossDerivedToSource")] - async fn cross_derived_to_source( - &self, - chain_id: HexStringU64, - block_id: BlockNumHash, - ) -> RpcResult; - - /// Returns the [`LocalUnsafe`] block for given chain. - /// - /// Spec: - /// - /// [`LocalUnsafe`]: SafetyLevel::LocalUnsafe - #[method(name = "localUnsafe")] - async fn local_unsafe(&self, chain_id: HexStringU64) -> RpcResult; - - /// Returns the [`LocalSafe`] block for given chain. - /// - /// Spec: - /// - /// [`LocalSafe`]: SafetyLevel::LocalSafe - #[method(name = "localSafe")] - async fn local_safe(&self, chain_id: HexStringU64) -> RpcResult; - - /// Returns the [`CrossSafe`] block for given chain. - /// - /// Spec: - /// - /// [`CrossSafe`]: SafetyLevel::CrossSafe - #[method(name = "crossSafe")] - async fn cross_safe(&self, chain_id: HexStringU64) -> RpcResult; - - /// Returns the [`Finalized`] block for the given chain. - /// - /// Spec: - /// - /// [`Finalized`]: SafetyLevel::Finalized - #[method(name = "finalized")] - async fn finalized(&self, chain_id: HexStringU64) -> RpcResult; - - /// Returns the finalized L1 block that the supervisor is synced to. - /// - /// Spec: - #[method(name = "finalizedL1")] - async fn finalized_l1(&self) -> RpcResult; - - /// Returns the [`SuperRootOutput`] at a specified timestamp, which represents the global - /// state across all monitored chains. Contains the - /// - Highest L1 [`BlockNumHash`] that is cross-safe among all chains - /// - Timestamp of the super root - /// - The [`SuperRoot`] hash - /// - All chains [`ChainRootInfo`]s - /// - /// Spec: - /// - /// [`SuperRootOutput`]: kona_interop::SuperRootOutput - /// [`SuperRoot`]: kona_interop::SuperRoot - /// [`ChainRootInfo`]: kona_interop::ChainRootInfo - #[method(name = "superRootAtTimestamp")] - async fn super_root_at_timestamp( - &self, - timestamp: HexStringU64, - ) -> RpcResult; - - /// Verifies if an access-list references only valid messages w.r.t. locally configured minimum - /// [`SafetyLevel`]. - #[method(name = "checkAccessList")] - async fn check_access_list( - &self, - inbox_entries: Vec, - min_safety: SafetyLevel, - executing_descriptor: ExecutingDescriptor, - ) -> RpcResult<()>; - - /// Describes superchain sync status. - /// - /// Spec: - #[method(name = "syncStatus")] - async fn sync_status(&self) -> RpcResult; - - /// Returns the last derived block, for each chain, from the given L1 block. This block is at - /// least [`LocalSafe`]. - /// - /// Spec: - /// - /// [`LocalSafe`]: SafetyLevel::LocalSafe - #[method(name = "allSafeDerivedAt")] - async fn all_safe_derived_at( - &self, - derived_from: BlockNumHash, - ) -> RpcResult>; - - /// Returns the [`DependencySet`] for the supervisor. - /// - /// Spec: - /// TODO: Replace the link above after the PR is merged. - #[method(name = "dependencySetV1")] - async fn dependency_set_v1(&self) -> RpcResult; -} - -/// Supervisor API for admin operations. -#[cfg_attr(not(feature = "client"), rpc(server, namespace = "admin"))] -#[cfg_attr(feature = "client", rpc(server, client, namespace = "admin"))] -pub trait SupervisorAdminApi { - /// Adds L2RPC to the supervisor. - #[method(name = "addL2RPC")] - async fn add_l2_rpc(&self, url: String, jwt_secret: String) -> RpcResult<()>; -} - -/// Represents the topics for subscriptions in the Managed Mode API. -#[derive(Debug, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub enum SubscriptionTopic { - /// The topic for events from the managed node. - Events, -} - -/// ManagedModeApi to send control signals to a managed node from supervisor -/// And get info for syncing the state with the given L2. -/// -/// See spec -/// Using the proc_macro to generate the client and server code. -/// Default namespace separator is `_`. -#[cfg_attr(not(feature = "client"), rpc(server, namespace = "interop"))] -#[cfg_attr(feature = "client", rpc(server, client, namespace = "interop"))] -pub trait ManagedModeApi { - /// Subscribe to the events from the managed node. - /// Op-node provides the `interop-subscribe` method for subscribing to the events topic. - /// Subscription notifications are then sent via the `interop-subscription` method as - /// [`SubscriptionEvent`]s. - // Currently, the `events` topic must be explicitly passed as a parameter to the subscription - // request, even though this function is specifically intended to subscribe to the `events` - // topic. todo: Find a way to eliminate the need to pass the topic explicitly. - #[subscription(name = "subscribe" => "subscription", item = SubscriptionEvent, unsubscribe = "unsubscribe")] - async fn subscribe_events(&self, topic: SubscriptionTopic) -> SubscriptionResult; - - /// Pull an event from the managed node. - #[method(name = "pullEvent")] - async fn pull_event(&self) -> RpcResult; - - /// Control signals sent to the managed node from supervisor - /// Update the cross unsafe block head - #[method(name = "updateCrossUnsafe")] - async fn update_cross_unsafe(&self, id: BlockNumHash) -> RpcResult<()>; - - /// Update the cross safe block head - #[method(name = "updateCrossSafe")] - async fn update_cross_safe(&self, derived: BlockNumHash, source: BlockNumHash) - -> RpcResult<()>; - - /// Update the finalized block head - #[method(name = "updateFinalized")] - async fn update_finalized(&self, id: BlockNumHash) -> RpcResult<()>; - - /// Invalidate a block - #[method(name = "invalidateBlock")] - async fn invalidate_block(&self, seal: BlockSeal) -> RpcResult<()>; - - /// Send the next L1 block - #[method(name = "provideL1")] - async fn provide_l1(&self, next_l1: BlockInfo) -> RpcResult<()>; - - /// Get the genesis block ref for l1 and l2; Soon to be deprecated! - #[method(name = "anchorPoint")] - async fn anchor_point(&self) -> RpcResult; - - /// Reset the managed node to the pre-interop state - #[method(name = "resetPreInterop")] - async fn reset_pre_interop(&self) -> RpcResult<()>; - - /// Reset the managed node to the specified block heads - #[method(name = "reset")] - async fn reset( - &self, - local_unsafe: BlockNumHash, - cross_unsafe: BlockNumHash, - local_safe: BlockNumHash, - cross_safe: BlockNumHash, - finalized: BlockNumHash, - ) -> RpcResult<()>; - - /// Sync methods that supervisor uses to sync with the managed node - /// Fetch all receipts for a give block - #[method(name = "fetchReceipts")] - async fn fetch_receipts(&self, block_hash: BlockHash) -> RpcResult; - - /// Get block info for a given block number - #[method(name = "l2BlockRefByNumber")] - async fn l2_block_ref_by_number(&self, number: u64) -> RpcResult; - - /// Get the chain id - #[method(name = "chainID")] - async fn chain_id(&self) -> RpcResult; - - /// Get the `state_root`, `message_parser_storage_root`, and `block_hash` at a given timestamp - #[method(name = "outputV0AtTimestamp")] - async fn output_v0_at_timestamp(&self, timestamp: u64) -> RpcResult; - - /// Get the pending `state_root`, `message_parser_storage_root`, and `block_hash` at a given - /// timestamp - #[method(name = "pendingOutputV0AtTimestamp")] - async fn pending_output_v0_at_timestamp(&self, timestamp: u64) -> RpcResult; - - /// Get the l2 block ref for a given timestamp - #[method(name = "l2BlockRefByTimestamp")] - async fn l2_block_ref_by_timestamp(&self, timestamp: u64) -> RpcResult; -} diff --git a/rust/kona/crates/supervisor/rpc/src/lib.rs b/rust/kona/crates/supervisor/rpc/src/lib.rs deleted file mode 100644 index 459bb3e31b1f6..0000000000000 --- a/rust/kona/crates/supervisor/rpc/src/lib.rs +++ /dev/null @@ -1,30 +0,0 @@ -#![doc = include_str!("../README.md")] - -#[cfg(feature = "jsonrpsee")] -pub mod jsonrpsee; -#[cfg(all(feature = "jsonrpsee", feature = "client"))] -pub use jsonrpsee::{ManagedModeApiClient, SupervisorAdminApiClient, SupervisorApiClient}; -#[cfg(feature = "jsonrpsee")] -pub use jsonrpsee::{SupervisorAdminApiServer, SupervisorApiServer}; - -#[cfg(feature = "server")] -pub mod config; -#[cfg(feature = "server")] -pub use config::SupervisorRpcConfig; - -#[cfg(feature = "server")] -pub mod server; -#[cfg(feature = "server")] -pub use server::SupervisorRpcServer; - -#[cfg(feature = "reqwest")] -pub mod reqwest; -#[cfg(feature = "reqwest")] -pub use reqwest::{CheckAccessListClient, SupervisorClient, SupervisorClientError}; - -pub mod response; -pub use response::{ - ChainRootInfoRpc, SuperRootOutputRpc, SupervisorChainSyncStatus, SupervisorSyncStatus, -}; - -pub use kona_protocol::BlockInfo; diff --git a/rust/kona/crates/supervisor/rpc/src/reqwest.rs b/rust/kona/crates/supervisor/rpc/src/reqwest.rs deleted file mode 100644 index eea78aa08b464..0000000000000 --- a/rust/kona/crates/supervisor/rpc/src/reqwest.rs +++ /dev/null @@ -1,65 +0,0 @@ -//! RPC API implementation using `reqwest` - -#[cfg(feature = "reqwest")] -use alloy_primitives::B256; -#[cfg(feature = "reqwest")] -use alloy_rpc_client::ReqwestClient; -#[cfg(feature = "reqwest")] -use derive_more::Constructor; -#[cfg(feature = "reqwest")] -use kona_interop::{ExecutingDescriptor, SafetyLevel}; - -/// Error types for supervisor RPC interactions -#[cfg(feature = "reqwest")] -#[derive(Debug, thiserror::Error)] -pub enum SupervisorClientError { - /// RPC client error - #[error("RPC client error: {0}")] - Client(Box), -} - -#[cfg(feature = "reqwest")] -impl SupervisorClientError { - /// Creates a new client error - pub fn client(err: impl std::error::Error + Send + Sync + 'static) -> Self { - Self::Client(Box::new(err)) - } -} - -/// Subset of `op-supervisor` API, used for validating interop events. -#[cfg(feature = "reqwest")] -pub trait CheckAccessListClient { - /// Returns if the messages meet the minimum safety level. - fn check_access_list( - &self, - inbox_entries: &[B256], - min_safety: SafetyLevel, - executing_descriptor: ExecutingDescriptor, - ) -> impl std::future::Future> + Send; -} - -/// A supervisor client. -#[cfg(feature = "reqwest")] -#[derive(Debug, Clone, Constructor)] -pub struct SupervisorClient { - /// The inner RPC client. - client: ReqwestClient, -} - -#[cfg(feature = "reqwest")] -impl CheckAccessListClient for SupervisorClient { - async fn check_access_list( - &self, - inbox_entries: &[B256], - min_safety: SafetyLevel, - executing_descriptor: ExecutingDescriptor, - ) -> Result<(), SupervisorClientError> { - self.client - .request( - "supervisor_checkAccessList", - (inbox_entries, min_safety, executing_descriptor), - ) - .await - .map_err(SupervisorClientError::client) - } -} diff --git a/rust/kona/crates/supervisor/rpc/src/response.rs b/rust/kona/crates/supervisor/rpc/src/response.rs deleted file mode 100644 index 0cb500ad77190..0000000000000 --- a/rust/kona/crates/supervisor/rpc/src/response.rs +++ /dev/null @@ -1,326 +0,0 @@ -//! Supervisor RPC response types. - -use alloy_eips::BlockNumHash; -use alloy_primitives::{B256, Bytes, ChainId, map::HashMap}; -use kona_protocol::BlockInfo; -use kona_supervisor_types::SuperHead; -use serde::{Deserialize, Serialize, Serializer}; - -/// Describes superchain sync status. -/// -/// Specs: . -#[derive(Debug, Default, Clone, PartialEq, Eq)] -#[cfg_attr( - feature = "serde", - derive(serde::Serialize, serde::Deserialize), - serde(rename_all = "camelCase") -)] -pub struct SupervisorSyncStatus { - /// [`BlockInfo`] of highest L1 block. - pub min_synced_l1: BlockInfo, - /// Timestamp of highest cross-safe block. - /// - /// NOTE: Some fault-proof releases may already depend on `safe`, so we keep JSON field name as - /// `safe`. - #[cfg_attr(feature = "serde", serde(rename = "safeTimestamp"))] - pub cross_safe_timestamp: u64, - /// Timestamp of highest finalized block. - pub finalized_timestamp: u64, - /// Map of all tracked chains and their individual [`SupervisorChainSyncStatus`]. - pub chains: HashMap, -} - -/// Describes the sync status for a specific chain. -/// -/// Specs: -#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] -#[cfg_attr( - feature = "serde", - derive(serde::Serialize, serde::Deserialize), - serde(rename_all = "camelCase") -)] -pub struct SupervisorChainSyncStatus { - /// Highest [`Unsafe`] head of chain. - /// - /// [`Unsafe`]: op_alloy_consensus::interop::SafetyLevel::LocalUnsafe - pub local_unsafe: BlockInfo, - /// Highest [`CrossUnsafe`] head of chain. - /// - /// [`CrossUnsafe`]: op_alloy_consensus::interop::SafetyLevel::CrossUnsafe - pub cross_unsafe: BlockNumHash, - /// Highest [`LocalSafe`] head of chain. - /// - /// [`LocalSafe`]: op_alloy_consensus::interop::SafetyLevel::LocalSafe - pub local_safe: BlockNumHash, - /// Highest [`Safe`] head of chain [`BlockNumHash`]. - /// - /// NOTE: Some fault-proof releases may already depend on `safe`, so we keep JSON field name as - /// `safe`. - /// - /// [`Safe`]: op_alloy_consensus::interop::SafetyLevel::CrossSafe - #[cfg_attr(feature = "serde", serde(rename = "safe"))] - pub cross_safe: BlockNumHash, - /// Highest [`Finalized`] head of chain [`BlockNumHash`]. - /// - /// [`Finalized`]: op_alloy_consensus::interop::SafetyLevel::Finalized - pub finalized: BlockNumHash, -} - -impl From for SupervisorChainSyncStatus { - fn from(super_head: SuperHead) -> Self { - let SuperHead { local_unsafe, cross_unsafe, local_safe, cross_safe, finalized, .. } = - super_head; - - let cross_unsafe = cross_unsafe.unwrap_or_else(BlockInfo::default); - let local_safe = local_safe.unwrap_or_else(BlockInfo::default); - let cross_safe = cross_safe.unwrap_or_else(BlockInfo::default); - let finalized = finalized.unwrap_or_else(BlockInfo::default); - - Self { - local_unsafe, - local_safe: local_safe.id(), - cross_unsafe: cross_unsafe.id(), - cross_safe: cross_safe.id(), - finalized: finalized.id(), - } - } -} - -/// This is same as [`kona_interop::ChainRootInfo`] but with [`u64`] serializing as a valid hex -/// string. -/// -/// Required by -/// [`super_root_at_timestamp`](crate::jsonrpsee::SupervisorApiServer::super_root_at_timestamp) RPC -/// for marshalling and unmarshalling in GO implementation. Required for e2e tests. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct ChainRootInfoRpc { - /// The chain ID. - #[serde(rename = "chainID", with = "alloy_serde::quantity")] - pub chain_id: ChainId, - /// The canonical output root of the latest canonical block at a particular timestamp. - pub canonical: B256, - /// The pending output root. - /// - /// This is the output root preimage for the latest block at a particular timestamp prior to - /// validation of executing messages. If the original block was valid, this will be the - /// preimage of the output root from the `canonical` array. If it was invalid, it will be - /// the output root preimage from the optimistic block deposited transaction added to the - /// deposit-only block. - pub pending: Bytes, -} - -/// This is same as [`kona_interop::SuperRootOutput`] but with timestamp serializing as a valid hex -/// string. version is also serialized as an even length hex string. -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct SuperRootOutputRpc { - /// The Highest L1 Block that is cross-safe among all chains. - pub cross_safe_derived_from: BlockNumHash, - /// The timestamp of the super root. - #[serde(with = "alloy_serde::quantity")] - pub timestamp: u64, - /// The super root hash. - pub super_root: B256, - /// The version of the super root. - #[serde(serialize_with = "serialize_u8_as_hex")] - pub version: u8, - /// The chain root info for each chain in the dependency set. - /// It represents the state of the chain at or before the timestamp. - pub chains: Vec, -} - -/// Serializes a [u8] as a hex string. Ensure that the hex string has an even length. -/// -/// This is used to serialize the [`SuperRootOutputRpc`]'s version field as a hex string. -fn serialize_u8_as_hex(value: &u8, serializer: S) -> Result -where - S: Serializer, -{ - let hex_string = format!("0x{value:02x}"); - serializer.serialize_str(&hex_string) -} - -#[cfg(test)] -mod test { - use super::*; - use alloy_primitives::b256; - use kona_interop::SUPER_ROOT_VERSION; - - const CHAIN_STATUS: &str = r#" - { - "localUnsafe": { - "number": 100, - "hash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - "timestamp": 40044440000, - "parentHash": "0x111def1234567890abcdef1234567890abcdef1234500000abcdef123456aaaa" - }, - "crossUnsafe": { - "number": 90, - "hash": "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890" - }, - "localSafe": { - "number": 80, - "hash": "0x34567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef13" - }, - "safe": { - "number": 70, - "hash": "0x567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234" - }, - "finalized": { - "number": 60, - "hash": "0x34567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef12" - } - }"#; - - const STATUS: &str = r#" - { - "minSyncedL1": { - "number": 100, - "hash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - "timestamp": 40044440000, - "parentHash": "0x111def1234567890abcdef1234567890abcdef1234500000abcdef123456aaaa" - }, - "safeTimestamp": 40044450000, - "finalizedTimestamp": 40044460000, - "chains" : { - "1": { - "localUnsafe": { - "number": 100, - "hash": "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - "timestamp": 40044440000, - "parentHash": "0x111def1234567890abcdef1234567890abcdef1234500000abcdef123456aaaa" - }, - "crossUnsafe": { - "number": 90, - "hash": "0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890" - }, - "localSafe": { - "number": 80, - "hash": "0x34567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef13" - }, - "safe": { - "number": 70, - "hash": "0x567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234" - }, - "finalized": { - "number": 60, - "hash": "0x34567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef12" - } - } - } - }"#; - - #[cfg(feature = "serde")] - #[test] - fn test_serialize_supervisor_chain_sync_status() { - assert_eq!( - serde_json::from_str::(CHAIN_STATUS) - .expect("should deserialize"), - SupervisorChainSyncStatus { - local_unsafe: BlockInfo { - number: 100, - hash: b256!( - "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" - ), - timestamp: 40044440000, - parent_hash: b256!( - "0x111def1234567890abcdef1234567890abcdef1234500000abcdef123456aaaa" - ), - }, - cross_unsafe: BlockNumHash::new( - 90, - b256!("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890") - ), - local_safe: BlockNumHash::new( - 80, - b256!("0x34567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef13") - ), - cross_safe: BlockNumHash::new( - 70, - b256!("0x567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234") - ), - finalized: BlockNumHash::new( - 60, - b256!("0x34567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef12") - ), - } - ) - } - - #[cfg(feature = "serde")] - #[test] - fn test_serialize_supervisor_sync_status() { - let mut chains = HashMap::default(); - - chains.insert( - 1, - SupervisorChainSyncStatus { - local_unsafe: BlockInfo { - number: 100, - hash: b256!( - "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" - ), - timestamp: 40044440000, - parent_hash: b256!( - "0x111def1234567890abcdef1234567890abcdef1234500000abcdef123456aaaa" - ), - }, - cross_unsafe: BlockNumHash::new( - 90, - b256!("0xabcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890"), - ), - local_safe: BlockNumHash::new( - 80, - b256!("0x34567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef13"), - ), - cross_safe: BlockNumHash::new( - 70, - b256!("0x567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234"), - ), - finalized: BlockNumHash::new( - 60, - b256!("0x34567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef12"), - ), - }, - ); - - assert_eq!( - serde_json::from_str::(STATUS).expect("should deserialize"), - SupervisorSyncStatus { - min_synced_l1: BlockInfo { - number: 100, - hash: b256!( - "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" - ), - timestamp: 40044440000, - parent_hash: b256!( - "0x111def1234567890abcdef1234567890abcdef1234500000abcdef123456aaaa" - ), - }, - cross_safe_timestamp: 40044450000, - finalized_timestamp: 40044460000, - chains, - } - ) - } - - #[test] - fn test_super_root_version_even_length_hex() { - let root = SuperRootOutputRpc { - cross_safe_derived_from: BlockNumHash::default(), - timestamp: 0, - super_root: B256::default(), - version: SUPER_ROOT_VERSION, - chains: vec![], - }; - let json = serde_json::to_string(&root).expect("should serialize"); - let v: serde_json::Value = serde_json::from_str(&json).expect("valid json"); - let version_field = - v.get("version").expect("version field present").as_str().expect("version is string"); - let hex_part = &version_field[2..]; // remove 0x - assert_eq!(hex_part.len() % 2, 0, "Hex string should have even length"); - // For SUPER_ROOT_VERSION = 1, should be 0x01 - assert_eq!(version_field, "0x01"); - } -} diff --git a/rust/kona/crates/supervisor/rpc/src/server.rs b/rust/kona/crates/supervisor/rpc/src/server.rs deleted file mode 100644 index 8684e9f7d9ebb..0000000000000 --- a/rust/kona/crates/supervisor/rpc/src/server.rs +++ /dev/null @@ -1,55 +0,0 @@ -//! Minimal supervisor RPC server implementation - -#[cfg(feature = "server")] -use alloy_rpc_types_engine::JwtSecret; -#[cfg(feature = "server")] -use jsonrpsee::server::ServerHandle; -#[cfg(feature = "server")] -use kona_interop::{ControlEvent, ManagedEvent}; -#[cfg(feature = "server")] -use std::net::SocketAddr; -#[cfg(feature = "server")] -use tokio::sync::broadcast; - -/// Minimal supervisor RPC server -#[cfg(feature = "server")] -#[derive(Debug)] -pub struct SupervisorRpcServer { - /// A channel to receive [`ManagedEvent`] from the node. - #[allow(dead_code)] - managed_events: broadcast::Receiver, - /// A channel to send [`ControlEvent`]. - #[allow(dead_code)] - control_events: broadcast::Sender, - /// A JWT token for authentication. - #[allow(dead_code)] - jwt_token: JwtSecret, - /// The socket address for the RPC server. - socket: SocketAddr, -} - -#[cfg(feature = "server")] -impl SupervisorRpcServer { - /// Creates a new instance of the `SupervisorRpcServer`. - pub const fn new( - managed_events: broadcast::Receiver, - control_events: broadcast::Sender, - jwt_token: JwtSecret, - socket: SocketAddr, - ) -> Self { - Self { managed_events, control_events, jwt_token, socket } - } - - /// Returns the socket address for the RPC server. - pub const fn socket(&self) -> SocketAddr { - self.socket - } - - /// Launches the RPC server with the given socket address. - pub async fn launch(self) -> std::io::Result { - let server = jsonrpsee::server::ServerBuilder::default().build(self.socket).await?; - // For now, start without any RPC methods - this is a minimal implementation - let module = jsonrpsee::RpcModule::new(()); - Ok(server.start(module)) - } -} diff --git a/rust/kona/crates/supervisor/service/Cargo.toml b/rust/kona/crates/supervisor/service/Cargo.toml deleted file mode 100644 index f442af6ec5c49..0000000000000 --- a/rust/kona/crates/supervisor/service/Cargo.toml +++ /dev/null @@ -1,43 +0,0 @@ -[package] -name = "kona-supervisor-service" -version = "0.1.0" - -edition.workspace = true -license.workspace = true -rust-version.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -keywords.workspace = true -categories.workspace = true -exclude.workspace = true - -[dependencies] -# Workspace dependencies -kona-supervisor-core = { workspace = true } -kona-supervisor-rpc = { workspace = true, features = ["jsonrpsee"] } -kona-supervisor-storage = { workspace = true } -kona-supervisor-metrics = { workspace = true } -kona-interop.workspace = true -kona-supervisor-types.workspace = true - -# External dependencies -jsonrpsee = { workspace = true, features = ["macros", "server"] } -mockall = { workspace = true } -anyhow = { workspace = true } -tracing = { workspace = true} -alloy-eips = { workspace = true } -alloy-primitives = { workspace = true } -alloy-provider = { workspace = true } -alloy-rpc-types-eth = { workspace = true } -async-trait = { workspace = true } -futures = { workspace = true } -kona-genesis = { workspace = true } -kona-protocol = { workspace = true } -thiserror = { workspace = true } -tokio = { workspace = true, features = ["sync", "macros"] } -tokio-util = { workspace = true } -derive_more.workspace = true - -# Dev dependencies -alloy-rpc-client = { workspace = true } diff --git a/rust/kona/crates/supervisor/service/src/actors/metric.rs b/rust/kona/crates/supervisor/service/src/actors/metric.rs deleted file mode 100644 index 7b95b273b56a7..0000000000000 --- a/rust/kona/crates/supervisor/service/src/actors/metric.rs +++ /dev/null @@ -1,107 +0,0 @@ -use async_trait::async_trait; -use kona_supervisor_metrics::MetricsReporter; -use std::{io, sync::Arc, time::Duration}; -use tokio::time::sleep; -use tokio_util::sync::CancellationToken; -use tracing::info; - -use crate::SupervisorActor; - -#[derive(derive_more::Constructor)] -pub struct MetricWorker { - interval: Duration, - // list of reporters - reporters: Vec>, - cancel_token: CancellationToken, -} - -#[async_trait] -impl SupervisorActor for MetricWorker -where - R: MetricsReporter + Send + Sync + 'static, -{ - type InboundEvent = (); - type Error = io::Error; - - async fn start(mut self) -> Result<(), Self::Error> { - info!( - target: "supervisor::metric_worker", - "Starting MetricWorker with interval: {:?}", - self.interval - ); - - let reporters = self.reporters; - let interval = self.interval; - - loop { - if self.cancel_token.is_cancelled() { - info!("MetricReporter actor is stopping due to cancellation."); - break; - } - - for reporter in &reporters { - reporter.report_metrics(); - } - sleep(interval).await; - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use mockall::{mock, predicate::*}; - use std::{sync::Arc, time::Duration}; - use tokio_util::sync::CancellationToken; - - mock! ( - #[derive(Debug)] - pub Reporter {} - - impl MetricsReporter for Reporter { - fn report_metrics(&self); - } - ); - - #[tokio::test] - async fn test_metric_worker_reports_metrics_and_stops_on_cancel() { - let mut mock_reporter = MockReporter::new(); - mock_reporter.expect_report_metrics().return_const(()); - - let reporter = Arc::new(mock_reporter); - let cancel_token = CancellationToken::new(); - - let worker = MetricWorker::new( - Duration::from_millis(50), - vec![reporter.clone()], - cancel_token.clone(), - ); - - let handle = tokio::spawn(worker.start()); - - tokio::time::sleep(Duration::from_millis(120)).await; - cancel_token.cancel(); - - let _ = handle.await; - } - - #[tokio::test] - async fn test_metric_worker_stops_immediately_on_cancel() { - let mut mock_reporter = MockReporter::new(); - mock_reporter.expect_report_metrics().times(0); - - let reporter = Arc::new(mock_reporter); - let cancel_token = CancellationToken::new(); - - let worker = MetricWorker::new( - Duration::from_millis(100), - vec![reporter.clone()], - cancel_token.clone(), - ); - - cancel_token.cancel(); - - let _ = worker.start().await; - } -} diff --git a/rust/kona/crates/supervisor/service/src/actors/mod.rs b/rust/kona/crates/supervisor/service/src/actors/mod.rs deleted file mode 100644 index dae73ffddf028..0000000000000 --- a/rust/kona/crates/supervisor/service/src/actors/mod.rs +++ /dev/null @@ -1,20 +0,0 @@ -//! [SupervisorActor] services for the supervisor. -//! -//! [SupervisorActor]: super::SupervisorActor - -mod traits; -pub use traits::SupervisorActor; - -mod metric; -pub use metric::MetricWorker; - -mod processor; -pub use processor::ChainProcessorActor; - -mod node; -pub use node::ManagedNodeActor; - -mod rpc; -pub use rpc::SupervisorRpcActor; - -pub(super) mod utils; diff --git a/rust/kona/crates/supervisor/service/src/actors/node.rs b/rust/kona/crates/supervisor/service/src/actors/node.rs deleted file mode 100644 index c16a14a83a235..0000000000000 --- a/rust/kona/crates/supervisor/service/src/actors/node.rs +++ /dev/null @@ -1,361 +0,0 @@ -use anyhow::Error; -use async_trait::async_trait; -use derive_more::Constructor; -use kona_interop::ManagedEvent; -use kona_supervisor_core::syncnode::{ - ManagedNodeClient, ManagedNodeCommand, ManagedNodeController, SubscriptionHandler, -}; -use std::sync::Arc; -use thiserror::Error; -use tokio::sync::mpsc; -use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; - -use crate::{SupervisorActor, actors::utils::spawn_task_with_retry}; - -/// Actor for managing a node in the supervisor environment. -#[derive(Debug, Constructor)] -pub struct ManagedNodeActor { - client: Arc, - node: Arc, - command_rx: mpsc::Receiver, - cancel_token: CancellationToken, -} - -#[async_trait] -impl SupervisorActor for ManagedNodeActor -where - C: ManagedNodeClient + 'static, - N: ManagedNodeController + SubscriptionHandler + 'static, -{ - type InboundEvent = ManagedNodeCommand; - type Error = SupervisorRpcActorError; - - async fn start(mut self) -> Result<(), Self::Error> { - // Task 1: Subscription handling - let node = self.node.clone(); - let client = self.client.clone(); - let cancel_token = self.cancel_token.clone(); - - spawn_task_with_retry( - move || { - let handler = node.clone(); - let client = client.clone(); - - async move { run_subscription_task(client, handler).await } - }, - cancel_token, - usize::MAX, - ); - - // Task 2: Command handling - let node = self.node.clone(); - let cancel_token = self.cancel_token.clone(); - run_command_task(node, self.command_rx, cancel_token).await?; - Ok(()) - } -} - -async fn run_command_task( - node: Arc, - mut command_rx: mpsc::Receiver, - cancel_token: CancellationToken, -) -> Result<(), SupervisorRpcActorError> -where - N: ManagedNodeController + SubscriptionHandler + 'static, -{ - info!(target: "supervisor::syncnode_actor", "Starting command task for managed node"); - loop { - tokio::select! { - _ = cancel_token.cancelled() => { - info!(target: "supervisor::syncnode", "Cancellation requested, shutting down command task"); - return Ok(()); - } - maybe_cmd = command_rx.recv() => { - match maybe_cmd { - Some(cmd) => { - match cmd { - ManagedNodeCommand::UpdateFinalized { block_id } => { - let result = node.update_finalized(block_id).await; - if let Err(err) = result { - warn!( - target: "supervisor::syncnode", - %err, - "Failed to update finalized block" - ); - } - } - ManagedNodeCommand::UpdateCrossUnsafe { block_id } => { - let result = node.update_cross_unsafe(block_id).await; - if let Err(err) = result { - warn!( - target: "supervisor::syncnode", - %err, - "Failed to update cross unsafe block" - ); - } - } - ManagedNodeCommand::UpdateCrossSafe { source_block_id, derived_block_id } => { - let result = node.update_cross_safe(source_block_id, derived_block_id).await; - if let Err(err) = result { - warn!( - target: "supervisor::syncnode", - %err, - "Failed to update cross safe block" - ); - } - } - ManagedNodeCommand::Reset {} => { - let result = node.reset().await; - if let Err(err) = result { - warn!( - target: "supervisor::syncnode", - %err, - "Failed to reset managed node" - ); - } - } - ManagedNodeCommand::InvalidateBlock { seal } => { - let result = node.invalidate_block(seal).await; - if let Err(err) = result { - warn!( - target: "supervisor::syncnode", - %err, - "Failed to invalidate block" - ); - } - } - } - } - None => { - info!(target: "supervisor::syncnode", "Command channel closed, shutting down command task"); - return Err(SupervisorRpcActorError::CommandReceiverClosed); - } - } - } - } - } -} - -async fn run_subscription_task( - client: Arc, - handler: Arc, -) -> Result<(), Error> { - info!(target: "supervisor::syncnode", "Starting subscription task for managed node"); - - let mut subscription = client.subscribe_events().await.inspect_err(|err| { - error!( - target: "supervisor::syncnode", - %err, - "Failed to subscribe to node events" - ); - })?; - - loop { - tokio::select! { - incoming_event = subscription.next() => { - match incoming_event { - Some(Ok(subscription_event)) => { - if let Some(event) = subscription_event.data { - handle_subscription_event(&handler, event).await; - } - } - Some(Err(err)) => { - error!( - target: "supervisor::managed_event_task", - %err, - "Error in event deserialization" - ); - return Err(err.into()); - } - None => { - warn!(target: "supervisor::managed_event_task", "Subscription closed by server"); - client.reset_ws_client().await; - break; - } - } - } - } - } - Ok(()) -} - -async fn handle_subscription_event(handler: &Arc, event: ManagedEvent) { - if let Some(reset_id) = &event.reset && - let Err(err) = handler.handle_reset(reset_id).await - { - warn!( - target: "supervisor::syncnode", - %err, - %reset_id, - "Failed to handle reset event" - ); - } - - if let Some(unsafe_block) = &event.unsafe_block && - let Err(err) = handler.handle_unsafe_block(unsafe_block).await - { - warn!( - target: "supervisor::syncnode", - %err, - %unsafe_block, - "Failed to handle unsafe block event" - ); - } - - if let Some(derived_ref_pair) = &event.derivation_update && - event.derivation_origin_update.is_none() && - let Err(err) = handler.handle_derivation_update(derived_ref_pair).await - { - warn!( - target: "supervisor::syncnode", - %err, - %derived_ref_pair, - "Failed to handle derivation update event" - ); - } - - if let Some(origin) = &event.derivation_origin_update && - let Err(err) = handler.handle_derivation_origin_update(origin).await - { - warn!( - target: "supervisor::syncnode", - %err, - %origin, - "Failed to handle derivation origin update event" - ); - } - - if let Some(derived_ref_pair) = &event.exhaust_l1 && - let Err(err) = handler.handle_exhaust_l1(derived_ref_pair).await - { - warn!( - target: "supervisor::syncnode", - %err, - %derived_ref_pair, - "Failed to handle L1 exhaust event" - ); - } - - if let Some(replacement) = &event.replace_block && - let Err(err) = handler.handle_replace_block(replacement).await - { - warn!( - target: "supervisor::syncnode", - %err, - %replacement, - "Failed to handle block replacement event" - ); - } -} - -#[derive(Debug, Error)] -pub enum SupervisorRpcActorError { - /// Error indicating that command receiver is closed. - #[error("managed node command receiver closed")] - CommandReceiverClosed, -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_eips::BlockNumHash; - use alloy_primitives::{B256, ChainId}; - use jsonrpsee::core::client::Subscription; - use kona_interop::{BlockReplacement, DerivedRefPair}; - use kona_protocol::BlockInfo; - use kona_supervisor_core::syncnode::{ - ClientError, ManagedNodeClient, ManagedNodeCommand, ManagedNodeController, - ManagedNodeError, SubscriptionHandler, - }; - use kona_supervisor_types::{BlockSeal, OutputV0, Receipts, SubscriptionEvent}; - use mockall::{mock, predicate::*}; - use std::sync::Arc; - use tokio::sync::mpsc; - use tokio_util::sync::CancellationToken; - - // Mock the ManagedNodeController trait - mock! { - #[derive(Debug)] - pub Node {} - - #[async_trait::async_trait] - impl ManagedNodeController for Node { - async fn update_finalized(&self, finalized_block_id: BlockNumHash) -> Result<(), ManagedNodeError>; - async fn update_cross_unsafe(&self, cross_unsafe_block_id: BlockNumHash) -> Result<(), ManagedNodeError>; - async fn update_cross_safe(&self,source_block_id: BlockNumHash,derived_block_id: BlockNumHash) -> Result<(), ManagedNodeError>; - async fn reset(&self) -> Result<(), ManagedNodeError>; - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ManagedNodeError>; - } - - #[async_trait::async_trait] - impl SubscriptionHandler for Node { - async fn handle_exhaust_l1(&self, derived_ref_pair: &DerivedRefPair) -> Result<(), ManagedNodeError>; - async fn handle_reset(&self, reset_id: &str) -> Result<(), ManagedNodeError>; - async fn handle_unsafe_block(&self, block: &BlockInfo) -> Result<(), ManagedNodeError>; - async fn handle_derivation_update(&self, derived_ref_pair: &DerivedRefPair) -> Result<(), ManagedNodeError>; - async fn handle_replace_block(&self, replacement: &BlockReplacement) -> Result<(), ManagedNodeError>; - async fn handle_derivation_origin_update(&self, origin: &BlockInfo) -> Result<(), ManagedNodeError>; - } - } - - mock! { - #[derive(Debug)] - pub NodeClient {} - - #[async_trait::async_trait] - impl ManagedNodeClient for NodeClient { - async fn chain_id(&self) -> Result; - async fn subscribe_events(&self) -> Result, ClientError>; - async fn fetch_receipts(&self, block_hash: B256) -> Result; - async fn output_v0_at_timestamp(&self, timestamp: u64) -> Result; - async fn pending_output_v0_at_timestamp(&self, timestamp: u64)-> Result; - async fn l2_block_ref_by_timestamp(&self, timestamp: u64) -> Result; - async fn block_ref_by_number(&self, block_number: u64) -> Result; - async fn reset_pre_interop(&self) -> Result<(), ClientError>; - async fn reset( - &self, - unsafe_id: BlockNumHash, - cross_unsafe_id: BlockNumHash, - local_safe_id: BlockNumHash, - cross_safe_id: BlockNumHash, - finalised_id: BlockNumHash, - ) -> Result<(), ClientError>; - async fn invalidate_block(&self, seal: BlockSeal) -> Result<(), ClientError>; - async fn provide_l1(&self, block_info: BlockInfo) -> Result<(), ClientError>; - async fn update_finalized(&self, finalized_block_id: BlockNumHash) -> Result<(), ClientError>; - async fn update_cross_unsafe(&self,cross_unsafe_block_id: BlockNumHash) -> Result<(), ClientError>; - async fn update_cross_safe(&self,source_block_id: BlockNumHash,derived_block_id: BlockNumHash) -> Result<(), ClientError>; - async fn reset_ws_client(&self); - } - } - - #[tokio::test] - async fn test_run_command_task_update_finalized_and_reset() { - let mut mock_node = MockNode::new(); - mock_node.expect_update_finalized().times(1).returning(|_| Ok(())); - mock_node.expect_reset().times(1).returning(|| Ok(())); - - let node = Arc::new(mock_node); - let (tx, rx) = mpsc::channel(10); - let cancel_token = CancellationToken::new(); - - // Spawn the command task - let handle = tokio::spawn(super::run_command_task(node.clone(), rx, cancel_token.clone())); - - // Send commands - tx.send(ManagedNodeCommand::UpdateFinalized { - block_id: BlockNumHash::new(1, B256::random()), - }) - .await - .unwrap(); - tx.send(ManagedNodeCommand::Reset {}).await.unwrap(); - - // Drop the sender to close the channel and end the task - drop(tx); - - // Wait for the task to finish - let result = handle.await.unwrap(); - assert!(matches!(result, Err(SupervisorRpcActorError::CommandReceiverClosed))); - } -} diff --git a/rust/kona/crates/supervisor/service/src/actors/processor.rs b/rust/kona/crates/supervisor/service/src/actors/processor.rs deleted file mode 100644 index e3a39dd94bf04..0000000000000 --- a/rust/kona/crates/supervisor/service/src/actors/processor.rs +++ /dev/null @@ -1,324 +0,0 @@ -use async_trait::async_trait; -use kona_interop::InteropValidator; -use kona_supervisor_core::{ChainProcessor, event::ChainEvent, syncnode::BlockProvider}; -use kona_supervisor_storage::{ - DerivationStorage, HeadRefStorageWriter, LogStorage, StorageRewinder, -}; -use thiserror::Error; -use tokio::sync::mpsc; -use tokio_util::sync::CancellationToken; -use tracing::info; - -use crate::SupervisorActor; - -/// Represents an actor that processes chain events using the [`ChainProcessor`]. -/// It listens for [`ChainEvent`]s and handles them accordingly. -#[derive(Debug)] -pub struct ChainProcessorActor { - chain_processor: ChainProcessor, - cancel_token: CancellationToken, - event_rx: mpsc::Receiver, -} - -impl ChainProcessorActor -where - P: BlockProvider + 'static, - V: InteropValidator + 'static, - W: LogStorage + DerivationStorage + HeadRefStorageWriter + StorageRewinder + 'static, -{ - /// Creates a new [`ChainProcessorActor`]. - pub const fn new( - chain_processor: ChainProcessor, - cancel_token: CancellationToken, - event_rx: mpsc::Receiver, - ) -> Self { - Self { chain_processor, cancel_token, event_rx } - } -} - -#[async_trait] -impl SupervisorActor for ChainProcessorActor -where - P: BlockProvider + 'static, - V: InteropValidator + 'static, - W: LogStorage + DerivationStorage + HeadRefStorageWriter + StorageRewinder + 'static, -{ - type InboundEvent = ChainEvent; - type Error = ChainProcessorActorError; - - async fn start(mut self) -> Result<(), Self::Error> { - info!( - target: "supervisor::chain_processor_actor", - "Starting ChainProcessorActor" - ); - - loop { - tokio::select! { - maybe_event = self.event_rx.recv() => { - if let Some(event) = maybe_event { - self.chain_processor.handle_event(event).await; - } else { - info!( - target: "supervisor::chain_processor_actor", - "Chain event receiver closed, stopping ChainProcessorActor" - ); - return Err(ChainProcessorActorError::ReceiverClosed); - } - } - _ = self.cancel_token.cancelled() => { - info!( - target: "supervisor::chain_processor_actor", - "ChainProcessorActor cancellation requested, stopping..." - ); - break; - } - } - } - - Ok(()) - } -} - -#[derive(Debug, Error)] -pub enum ChainProcessorActorError { - /// Error when the chain event receiver is closed. - #[error("Chain event receiver closed")] - ReceiverClosed, -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::SupervisorActor; - use alloy_eips::BlockNumHash; - use alloy_primitives::{B256, ChainId}; - use kona_interop::{DerivedRefPair, InteropValidationError}; - use kona_protocol::BlockInfo; - use kona_supervisor_core::{ - LogIndexer, - syncnode::{BlockProvider, ManagedNodeCommand, ManagedNodeDataProvider, ManagedNodeError}, - }; - use kona_supervisor_storage::{ - DerivationStorageReader, DerivationStorageWriter, HeadRefStorageWriter, LogStorageReader, - LogStorageWriter, StorageError, StorageRewinder, - }; - use kona_supervisor_types::{Log, OutputV0, Receipts}; - use mockall::{mock, predicate::*}; - use std::sync::Arc; - use tokio::sync::mpsc; - use tokio_util::sync::CancellationToken; - - mock!( - #[derive(Debug)] - pub Node {} - - #[async_trait] - impl BlockProvider for Node { - async fn fetch_receipts(&self, _block_hash: B256) -> Result; - async fn block_by_number(&self, _number: u64) -> Result; - } - - #[async_trait] - impl ManagedNodeDataProvider for Node { - async fn output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn pending_output_v0_at_timestamp( - &self, - _timestamp: u64, - ) -> Result; - - async fn l2_block_ref_by_timestamp( - &self, - _timestamp: u64, - ) -> Result; - } - ); - - mock!( - #[derive(Debug)] - pub Db {} - - impl LogStorageWriter for Db { - fn initialise_log_storage( - &self, - block: BlockInfo, - ) -> Result<(), StorageError>; - - fn store_block_logs( - &self, - block: &BlockInfo, - logs: Vec, - ) -> Result<(), StorageError>; - } - - impl LogStorageReader for Db { - fn get_block(&self, block_number: u64) -> Result; - fn get_latest_block(&self) -> Result; - fn get_log(&self,block_number: u64,log_index: u32) -> Result; - fn get_logs(&self, block_number: u64) -> Result, StorageError>; - } - - impl DerivationStorageReader for Db { - fn derived_to_source(&self, derived_block_id: BlockNumHash) -> Result; - fn latest_derived_block_at_source(&self, source_block_id: BlockNumHash) -> Result; - fn latest_derivation_state(&self) -> Result; - fn get_source_block(&self, source_block_number: u64) -> Result; - fn get_activation_block(&self) -> Result; - } - - impl DerivationStorageWriter for Db { - fn initialise_derivation_storage( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError>; - - fn save_derived_block( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError>; - - fn save_source_block( - &self, - source: BlockInfo, - ) -> Result<(), StorageError>; - } - - impl HeadRefStorageWriter for Db { - fn update_finalized_using_source( - &self, - block_info: BlockInfo, - ) -> Result; - - fn update_current_cross_unsafe( - &self, - block: &BlockInfo, - ) -> Result<(), StorageError>; - - fn update_current_cross_safe( - &self, - block: &BlockInfo, - ) -> Result; - } - - impl StorageRewinder for Db { - fn rewind_log_storage(&self, to: &BlockNumHash) -> Result<(), StorageError>; - fn rewind(&self, to: &BlockNumHash) -> Result<(), StorageError>; - fn rewind_to_source(&self, to: &BlockNumHash) -> Result, StorageError>; - } - ); - - mock!( - #[derive(Debug)] - pub Validator {} - - impl InteropValidator for Validator { - fn validate_interop_timestamps( - &self, - initiating_chain_id: ChainId, - initiating_timestamp: u64, - executing_chain_id: ChainId, - executing_timestamp: u64, - timeout: Option, - ) -> Result<(), InteropValidationError>; - - fn is_post_interop(&self, chain_id: ChainId, timestamp: u64) -> bool; - - fn is_interop_activation_block(&self, chain_id: ChainId, block: BlockInfo) -> bool; - } - ); - - #[tokio::test] - async fn test_actor_handles_event() { - let mock_node = MockNode::new(); - let mock_db = MockDb::new(); - let validator = MockValidator::new(); - let (mn_sender, mut mn_receiver) = mpsc::channel(1); - - let db = Arc::new(mock_db); - let log_indexer = LogIndexer::new(1, Some(Arc::new(mock_node)), db.clone()); - - let processor = - ChainProcessor::new(Arc::new(validator), 1, Arc::new(log_indexer), db, mn_sender); - - let cancel_token = CancellationToken::new(); - let (tx, rx) = mpsc::channel(1); - - let actor = ChainProcessorActor::new(processor, cancel_token.clone(), rx); - - // Send an event - let block = BlockInfo { - number: 1, - hash: B256::from([0; 32]), - timestamp: 1000, - ..Default::default() - }; - tx.send(ChainEvent::CrossUnsafeUpdate { block }).await.unwrap(); - - // Cancel after a short delay to exit the loop - let cancel = cancel_token.clone(); - tokio::spawn(async move { - tokio::time::sleep(std::time::Duration::from_millis(50)).await; - cancel.cancel(); - }); - - let result = actor.start().await; - assert!(result.is_ok()); - - if let Some(ManagedNodeCommand::UpdateCrossUnsafe { block_id }) = mn_receiver.recv().await { - assert_eq!(block_id, block.id()); - } else { - panic!("Expected UpdateCrossUnsafe command"); - } - } - - #[tokio::test] - async fn test_actor_receiver_closed() { - let mock_node = MockNode::new(); - let mock_db = MockDb::new(); - let validator = MockValidator::new(); - let (mn_sender, _mn_receiver) = mpsc::channel(1); - - let db = Arc::new(mock_db); - let log_indexer = LogIndexer::new(1, Some(Arc::new(mock_node)), db.clone()); - - let processor = - ChainProcessor::new(Arc::new(validator), 1, Arc::new(log_indexer), db, mn_sender); - - let cancel_token = CancellationToken::new(); - let (tx, rx) = mpsc::channel::(1); // No sender, so channel is closed - drop(tx); - - let actor = ChainProcessorActor::new(processor, cancel_token, rx); - - let result = actor.start().await; - assert!(matches!(result, Err(ChainProcessorActorError::ReceiverClosed))); - } - - #[tokio::test] - async fn test_actor_cancellation() { - let mock_node = MockNode::new(); - let mock_db = MockDb::new(); - let validator = MockValidator::new(); - let (mn_sender, _mn_receiver) = mpsc::channel(1); - - let db = Arc::new(mock_db); - let log_indexer = LogIndexer::new(1, Some(Arc::new(mock_node)), db.clone()); - - let processor = - ChainProcessor::new(Arc::new(validator), 1, Arc::new(log_indexer), db, mn_sender); - - let cancel_token = CancellationToken::new(); - let (_tx, rx) = mpsc::channel::(1); - - let actor = ChainProcessorActor::new(processor, cancel_token.clone(), rx); - - // Cancel immediately - cancel_token.cancel(); - - let result = actor.start().await; - assert!(result.is_ok()); - } -} diff --git a/rust/kona/crates/supervisor/service/src/actors/rpc.rs b/rust/kona/crates/supervisor/service/src/actors/rpc.rs deleted file mode 100644 index 59cce1022b17e..0000000000000 --- a/rust/kona/crates/supervisor/service/src/actors/rpc.rs +++ /dev/null @@ -1,140 +0,0 @@ -use std::{io, net::SocketAddr}; - -use async_trait::async_trait; -use derive_more::Constructor; -use jsonrpsee::{RpcModule, server::ServerBuilder}; -use thiserror::Error; -use tokio_util::sync::CancellationToken; -use tracing::{error, info}; - -use crate::SupervisorActor; - -#[derive(Debug, Constructor)] -pub struct SupervisorRpcActor { - rpc_addr: SocketAddr, - rpc_module: RpcModule, - cancel_token: CancellationToken, -} - -#[async_trait] -impl SupervisorActor for SupervisorRpcActor -where - D: Send + Sync + 'static, -{ - type InboundEvent = (); - type Error = SupervisorRpcActorError; - - async fn start(mut self) -> Result<(), Self::Error> { - info!( - target: "supervisor::rpc_actor", - addr = %self.rpc_addr, - "RPC server bound to address", - ); - - // let supervisor_rpc = SupervisorRpc::new(self.supervisor.clone()); - let server = ServerBuilder::default().build(self.rpc_addr).await?; - // let mut root = supervisor_rpc.into_rpc(); - let handle = server.start(self.rpc_module); - - let stopped = handle.clone().stopped(); - let cancelled = self.cancel_token.cancelled(); - - tokio::select! { - _ = stopped => { - error!(target: "supervisor::rpc_actor", "RPC server stopped unexpectedly"); - return Err(SupervisorRpcActorError::ServerStopped); - } - _ = cancelled => { - match handle.stop() { - Ok(_) => info!(target: "supervisor::rpc_actor", "RPC server stopped gracefully"), - Err(e) => { - error!(target: "supervisor::rpc_actor", %e, "Failed to stop RPC server gracefully"); - return Err(SupervisorRpcActorError::StopFailed); - } - } - info!(target: "supervisor::rpc_actor", "Cancellation requested, stopping RPC server..."); - } - } - - Ok(()) - } -} - -#[derive(Debug, Error)] -pub enum SupervisorRpcActorError { - /// Failed to build the RPC server. - #[error(transparent)] - BuildFailed(#[from] io::Error), - - /// Indicates that the RPC server failed to start. - #[error("rpc server stopped unexpectedly")] - ServerStopped, - - /// Indicates that the RPC server failed to stop gracefully. - #[error("failed to stop the RPC server")] - StopFailed, -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_eips::BlockNumHash; - use alloy_primitives::{B256, ChainId}; - use async_trait::async_trait; - use kona_interop::{DependencySet, ExecutingDescriptor, SafetyLevel}; - use kona_protocol::BlockInfo; - use kona_supervisor_core::{SupervisorError, SupervisorService}; - use kona_supervisor_rpc::{SuperRootOutputRpc, SupervisorApiServer}; - use kona_supervisor_types::SuperHead; - use mockall::mock; - use std::{ - net::{Ipv4Addr, SocketAddr}, - sync::Arc, - }; - use tokio_util::sync::CancellationToken; - - // Mock SupervisorService - mock!( - #[derive(Debug)] - pub SupervisorService {} - - #[async_trait] - impl SupervisorService for SupervisorService { - fn chain_ids(&self) -> impl Iterator; - fn dependency_set(&self) -> &DependencySet; - fn super_head(&self, chain: ChainId) -> Result; - fn latest_block_from(&self, l1_block: BlockNumHash, chain: ChainId) -> Result; - fn derived_to_source_block(&self, chain: ChainId, derived: BlockNumHash) -> Result; - fn local_unsafe(&self, chain: ChainId) -> Result; - fn local_safe(&self, chain: ChainId) -> Result; - fn cross_safe(&self, chain: ChainId) -> Result; - fn finalized(&self, chain: ChainId) -> Result; - fn finalized_l1(&self) -> Result; - fn check_access_list(&self, inbox_entries: Vec, min_safety: SafetyLevel, executing_descriptor: ExecutingDescriptor) -> Result<(), SupervisorError>; - async fn super_root_at_timestamp(&self, timestamp: u64) -> Result; - } - ); - - #[tokio::test] - async fn test_supervisor_rpc_actor_stops_on_cancel() { - let addr = SocketAddr::from((Ipv4Addr::LOCALHOST, 0)); - let supervisor = Arc::new(MockSupervisorService::new()); - let cancel_token = CancellationToken::new(); - - let supervisor_rpc = kona_supervisor_core::rpc::SupervisorRpc::new(supervisor.clone()); - let rpc_module = supervisor_rpc.into_rpc(); - let actor = SupervisorRpcActor::new(addr, rpc_module, cancel_token.clone()); - - let handle = tokio::spawn(actor.start()); - - // Give the server a moment to start - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - - // Trigger cancellation - cancel_token.cancel(); - - // Await the actor and ensure it stops gracefully - let result = handle.await.unwrap(); - assert!(result.is_ok() || matches!(result, Err(SupervisorRpcActorError::StopFailed))); - } -} diff --git a/rust/kona/crates/supervisor/service/src/actors/traits.rs b/rust/kona/crates/supervisor/service/src/actors/traits.rs deleted file mode 100644 index 58a0ed48fd7c6..0000000000000 --- a/rust/kona/crates/supervisor/service/src/actors/traits.rs +++ /dev/null @@ -1,11 +0,0 @@ -/// The [`SupervisorActor`] trait is an actor-like service for the supervisor. -use async_trait::async_trait; -#[async_trait] -pub trait SupervisorActor { - /// The event type received by the actor. - type InboundEvent; - /// The error type for the actor. - type Error: std::fmt::Debug; - /// Starts the actor. - async fn start(mut self) -> Result<(), Self::Error>; -} diff --git a/rust/kona/crates/supervisor/service/src/actors/utils.rs b/rust/kona/crates/supervisor/service/src/actors/utils.rs deleted file mode 100644 index bba093cdb998d..0000000000000 --- a/rust/kona/crates/supervisor/service/src/actors/utils.rs +++ /dev/null @@ -1,67 +0,0 @@ -use std::{future::Future, time::Duration}; -use tokio::{select, task::JoinHandle, time::sleep}; -use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; - -/// Spawns a background task that retries the given async operation with backoff on failure. -/// -/// - `operation`: The async task to retry (must return `Result<(), E>`) -/// - `cancel_token`: Cancels the retry loop -/// - `max_retries`: Max retries before exiting (use `usize::MAX` for infinite) -pub(super) fn spawn_task_with_retry( - operation: impl Fn() -> Fut + Send + Sync + 'static, - cancel_token: CancellationToken, - max_retries: usize, -) -> JoinHandle<()> -where - Fut: Future> + Send + 'static, - E: std::fmt::Display + Send + 'static, -{ - tokio::spawn(async move { - let mut attempt = 0; - - loop { - if cancel_token.is_cancelled() { - info!(target: "supervisor::retrier", "Retry loop cancelled before starting"); - break; - } - - match operation().await { - Ok(()) => { - info!(target: "supervisor::retrier", "Task exited successfully, restarting"); - attempt = 0; // Reset attempt count on success - } - Err(err) => { - attempt += 1; - - if attempt > max_retries { - error!(target: "supervisor::retrier", %err, "Retry limit ({max_retries}) exceeded"); - break; - } - - let delay = backoff_delay(attempt); - warn!( - target: "supervisor::retrier", - %err, - ?delay, - "Attempt {attempt}/{max_retries} failed, retrying after delay" - ); - - select! { - _ = sleep(delay) => {} - _ = cancel_token.cancelled() => { - warn!(target: "supervisor::retrier", "Retry loop cancelled during backoff"); - break; - } - } - } - } - } - }) -} - -/// Calculates exponential backoff delay with a max cap (30s). -fn backoff_delay(attempt: usize) -> Duration { - let secs = 2u64.saturating_pow(attempt.min(5) as u32); - Duration::from_secs(secs.min(30)) -} diff --git a/rust/kona/crates/supervisor/service/src/lib.rs b/rust/kona/crates/supervisor/service/src/lib.rs deleted file mode 100644 index a89f69594e8d7..0000000000000 --- a/rust/kona/crates/supervisor/service/src/lib.rs +++ /dev/null @@ -1,9 +0,0 @@ -//! This crate provides the runnable service layer for the Kona Supervisor. -//! It integrates the core logic with the RPC server. - -mod service; - -pub use service::Service; - -mod actors; -pub use actors::SupervisorActor; diff --git a/rust/kona/crates/supervisor/service/src/service.rs b/rust/kona/crates/supervisor/service/src/service.rs deleted file mode 100644 index 997b5d8b4fb8c..0000000000000 --- a/rust/kona/crates/supervisor/service/src/service.rs +++ /dev/null @@ -1,508 +0,0 @@ -//! Contains the main Supervisor service runner. - -use alloy_primitives::ChainId; -use alloy_provider::{RootProvider, network::Ethereum}; -use alloy_rpc_client::RpcClient; -use anyhow::Result; -use futures::future; -use jsonrpsee::client_transport::ws::Url; -use kona_supervisor_core::{ - ChainProcessor, CrossSafetyCheckerJob, LogIndexer, ReorgHandler, Supervisor, - config::Config, - event::ChainEvent, - l1_watcher::L1Watcher, - rpc::{AdminError, AdminRequest, AdminRpc, SupervisorRpc}, - safety_checker::{CrossSafePromoter, CrossUnsafePromoter}, - syncnode::{Client, ClientConfig, ManagedNode, ManagedNodeClient, ManagedNodeCommand}, -}; -use kona_supervisor_rpc::{SupervisorAdminApiServer, SupervisorApiServer}; -use kona_supervisor_storage::{ChainDb, ChainDbFactory, DerivationStorageWriter, LogStorageWriter}; -use std::{collections::HashMap, sync::Arc}; -use tokio::{sync::mpsc, task::JoinSet, time::Duration}; -use tokio_util::sync::CancellationToken; -use tracing::{error, info, warn}; - -use crate::actors::{ - ChainProcessorActor, ManagedNodeActor, MetricWorker, SupervisorActor, SupervisorRpcActor, -}; - -// simplify long type signature -type ManagedLogIndexer = LogIndexer, ChainDb>; - -/// The main service structure for the Kona -/// [`SupervisorService`](`kona_supervisor_core::SupervisorService`). Orchestrates the various -/// components of the supervisor. -#[derive(Debug)] -pub struct Service { - config: Arc, - - supervisor: Arc>>, - database_factory: Arc, - managed_nodes: HashMap>>, - log_indexers: HashMap>, - - // channels - chain_event_senders: HashMap>, - chain_event_receivers: HashMap>, - managed_node_senders: HashMap>, - managed_node_receivers: HashMap>, - admin_receiver: Option>, - - cancel_token: CancellationToken, - join_set: JoinSet>, -} - -impl Service { - /// Creates a new Supervisor service instance. - pub fn new(cfg: Config) -> Self { - let config = Arc::new(cfg); - let database_factory = Arc::new(ChainDbFactory::new(config.datadir.clone()).with_metrics()); - let supervisor = Arc::new(Supervisor::new(config.clone(), database_factory.clone())); - - Self { - config, - - supervisor, - database_factory, - managed_nodes: HashMap::new(), - log_indexers: HashMap::new(), - - chain_event_senders: HashMap::new(), - chain_event_receivers: HashMap::new(), - managed_node_senders: HashMap::new(), - managed_node_receivers: HashMap::new(), - admin_receiver: None, - - cancel_token: CancellationToken::new(), - join_set: JoinSet::new(), - } - } - - /// Initialises the Supervisor service. - pub async fn initialise(&mut self) -> Result<()> { - // create sender and receiver channels for each chain - for chain_id in self.config.rollup_config_set.rollups.keys() { - let (chain_tx, chain_rx) = mpsc::channel::(1000); - self.chain_event_senders.insert(*chain_id, chain_tx); - self.chain_event_receivers.insert(*chain_id, chain_rx); - - let (managed_node_tx, managed_node_rx) = mpsc::channel::(1000); - self.managed_node_senders.insert(*chain_id, managed_node_tx); - self.managed_node_receivers.insert(*chain_id, managed_node_rx); - } - - self.init_database().await?; - self.init_chain_processor().await?; - self.init_managed_nodes().await?; - self.init_l1_watcher()?; - self.init_cross_safety_checker().await?; - - // todo: run metric worker only if metrics are enabled - self.init_rpc_server().await?; - self.init_metric_reporter().await; - Ok(()) - } - - async fn init_database(&self) -> Result<()> { - info!(target: "supervisor::service", "Initialising databases for all chains..."); - - for (chain_id, config) in self.config.rollup_config_set.rollups.iter() { - // Initialise the database for each chain. - let db = self.database_factory.get_or_create_db(*chain_id)?; - let interop_time = config.interop_time; - let derived_pair = config.genesis.get_derived_pair(); - if config.is_interop(derived_pair.derived.timestamp) { - info!(target: "supervisor::service", chain_id, interop_time, %derived_pair, "Initialising database for interop activation block"); - db.initialise_log_storage(derived_pair.derived)?; - db.initialise_derivation_storage(derived_pair)?; - } - info!(target: "supervisor::service", chain_id, "Database initialized successfully"); - } - Ok(()) - } - - async fn init_managed_node(&mut self, config: &ClientConfig) -> Result<()> { - info!(target: "supervisor::service", node = %config.url, "Initialising managed node..."); - let url = Url::parse(&self.config.l1_rpc).map_err(|err| { - error!(target: "supervisor::service", %err, "Failed to parse L1 RPC URL"); - anyhow::anyhow!("failed to parse L1 RPC URL: {err}") - })?; - let provider = RootProvider::::new_http(url); - let client = Arc::new(Client::new(config.clone())); - - let chain_id = client.chain_id().await.map_err(|err| { - error!(target: "supervisor::service", %err, "Failed to get chain ID from client"); - anyhow::anyhow!("failed to get chain ID from client: {err}") - })?; - - let db = self.database_factory.get_db(chain_id)?; - - let chain_event_sender = self - .chain_event_senders - .get(&chain_id) - .ok_or(anyhow::anyhow!("no chain event sender found for chain {chain_id}"))? - .clone(); - - let managed_node = - ManagedNode::::new(client.clone(), db, provider, chain_event_sender); - - if self.managed_nodes.contains_key(&chain_id) { - warn!(target: "supervisor::service", %chain_id, "Managed node for chain already exists, skipping initialization"); - return Ok(()); - } - - let managed_node = Arc::new(managed_node); - // add the managed node to the supervisor service - // also checks if the chain ID is supported - self.supervisor.add_managed_node(chain_id, managed_node.clone()).await?; - - // set the managed node in the log indexer - let log_indexer = self - .log_indexers - .get(&chain_id) - .ok_or(anyhow::anyhow!("no log indexer found for chain {chain_id}"))? - .clone(); - log_indexer.set_block_provider(managed_node.clone()).await; - - self.managed_nodes.insert(chain_id, managed_node.clone()); - info!(target: "supervisor::service", - chain_id, - "Managed node for chain initialized successfully", - ); - - // start managed node actor - let managed_node_receiver = self - .managed_node_receivers - .remove(&chain_id) - .ok_or(anyhow::anyhow!("no managed node receiver found for chain {chain_id}"))?; - - let cancel_token = self.cancel_token.clone(); - self.join_set.spawn(async move { - if let Err(err) = - ManagedNodeActor::new(client, managed_node, managed_node_receiver, cancel_token) - .start() - .await - { - Err(anyhow::anyhow!(err)) - } else { - Ok(()) - } - }); - Ok(()) - } - - async fn init_managed_nodes(&mut self) -> Result<()> { - let configs = self.config.l2_consensus_nodes_config.clone(); - for config in configs.iter() { - self.init_managed_node(config).await?; - } - Ok(()) - } - - async fn init_chain_processor(&mut self) -> Result<()> { - info!(target: "supervisor::service", "Initialising chain processors for all chains..."); - - for (chain_id, _) in self.config.rollup_config_set.rollups.iter() { - let db = self.database_factory.get_db(*chain_id)?; - - let managed_node_sender = self - .managed_node_senders - .get(chain_id) - .ok_or(anyhow::anyhow!("no managed node sender found for chain {chain_id}"))? - .clone(); - - let log_indexer = Arc::new(LogIndexer::new(*chain_id, None, db.clone())); - self.log_indexers.insert(*chain_id, log_indexer.clone()); - - // initialise chain processor for the chain. - let mut processor = ChainProcessor::new( - self.config.clone(), - *chain_id, - log_indexer, - db, - managed_node_sender, - ); - - // todo: enable metrics only if configured - processor = processor.with_metrics(); - - // Start the chain processor actor. - let chain_event_receiver = self - .chain_event_receivers - .remove(chain_id) - .ok_or(anyhow::anyhow!("no chain event receiver found for chain {chain_id}"))?; - - let cancel_token = self.cancel_token.clone(); - self.join_set.spawn(async move { - if let Err(err) = - ChainProcessorActor::new(processor, cancel_token, chain_event_receiver) - .start() - .await - { - Err(anyhow::anyhow!(err)) - } else { - Ok(()) - } - }); - } - Ok(()) - } - - fn init_l1_watcher(&mut self) -> Result<()> { - info!(target: "supervisor::service", "Initialising L1 watcher..."); - - let l1_rpc_url = Url::parse(&self.config.l1_rpc).map_err(|err| { - error!(target: "supervisor::service", %err, "Failed to parse L1 RPC URL"); - anyhow::anyhow!("failed to parse L1 RPC URL: {err}") - })?; - let l1_rpc = RpcClient::new_http(l1_rpc_url); - - let chain_dbs_map: HashMap> = self - .config - .rollup_config_set - .rollups - .keys() - .map(|chain_id| { - self.database_factory.get_db(*chain_id) - .map(|db| (*chain_id, db)) // <-- FIX: remove Arc::new(db) - .map_err(|err| { - error!(target: "supervisor::service", %err, "Failed to get database for chain {chain_id}"); - anyhow::anyhow!("failed to get database for chain {chain_id}: {err}") - }) - }) - .collect::>>>()?; - - let database_factory = self.database_factory.clone(); - let cancel_token = self.cancel_token.clone(); - let event_senders = self.chain_event_senders.clone(); - self.join_set.spawn(async move { - let reorg_handler = ReorgHandler::new(l1_rpc.clone(), chain_dbs_map).with_metrics(); - - // Start the L1 watcher streaming loop. - let l1_watcher = L1Watcher::new( - l1_rpc.clone(), - database_factory, - event_senders, - cancel_token, - reorg_handler, - ); - - l1_watcher.run().await; - Ok(()) - }); - Ok(()) - } - - async fn init_cross_safety_checker(&mut self) -> Result<()> { - info!(target: "supervisor::service", "Initialising cross safety checker..."); - - for (&chain_id, config) in &self.config.rollup_config_set.rollups { - let db = Arc::clone(&self.database_factory); - let cancel = self.cancel_token.clone(); - - let chain_event_sender = self - .chain_event_senders - .get(&chain_id) - .ok_or(anyhow::anyhow!("no chain event sender found for chain {chain_id}"))? - .clone(); - - let cross_safe_job = CrossSafetyCheckerJob::new( - chain_id, - db.clone(), - cancel.clone(), - Duration::from_secs(config.block_time), - CrossSafePromoter, - chain_event_sender.clone(), - self.config.clone(), - ); - - self.join_set.spawn(async move { - cross_safe_job.run().await; - Ok(()) - }); - - let cross_unsafe_job = CrossSafetyCheckerJob::new( - chain_id, - db, - cancel, - Duration::from_secs(config.block_time), - CrossUnsafePromoter, - chain_event_sender, - self.config.clone(), - ); - - self.join_set.spawn(async move { - cross_unsafe_job.run().await; - Ok(()) - }); - } - Ok(()) - } - - async fn init_metric_reporter(&mut self) { - // Initialize the metric reporter actor. - let database_factory = self.database_factory.clone(); - let cancel_token = self.cancel_token.clone(); - self.join_set.spawn(async move { - if let Err(err) = - MetricWorker::new(Duration::from_secs(30), vec![database_factory], cancel_token) - .start() - .await - { - Err(anyhow::anyhow!(err)) - } else { - Ok(()) - } - }); - } - - async fn init_rpc_server(&mut self) -> Result<()> { - let supervisor_rpc = SupervisorRpc::new(self.supervisor.clone()); - - let mut rpc_module = supervisor_rpc.into_rpc(); - - if self.config.enable_admin_api { - info!(target: "supervisor::service", "Enabling Supervisor Admin API"); - - let (admin_tx, admin_rx) = mpsc::channel::(100); - let admin_rpc = AdminRpc::new(admin_tx); - rpc_module - .merge(admin_rpc.into_rpc()) - .map_err(|err| anyhow::anyhow!("failed to merge Admin RPC module: {err}"))?; - self.admin_receiver = Some(admin_rx); - } - - let rpc_addr = self.config.rpc_addr; - let cancel_token = self.cancel_token.clone(); - self.join_set.spawn(async move { - if let Err(err) = - SupervisorRpcActor::new(rpc_addr, rpc_module, cancel_token).start().await - { - Err(anyhow::anyhow!(err)) - } else { - Ok(()) - } - }); - Ok(()) - } - - async fn handle_admin_request(&mut self, req: AdminRequest) { - match req { - AdminRequest::AddL2Rpc { cfg, resp } => { - let result = match self.init_managed_node(&cfg).await { - Ok(()) => Ok(()), - Err(e) => { - tracing::error!(target: "supervisor::service", %e, "admin add_l2_rpc failed"); - Err(AdminError::ServiceError(e.to_string())) - } - }; - - let _ = resp.send(result); - } - } - } - - /// Runs the Supervisor service. - /// This function will typically run indefinitely until interrupted. - pub async fn run(&mut self) -> Result<()> { - self.initialise().await?; - - // todo: refactor this to only run the tasks completion loop - // and handle admin requests elsewhere - loop { - tokio::select! { - // Admin requests (if admin_receiver was initialized) - maybe_req = async { - if let Some(rx) = self.admin_receiver.as_mut() { - rx.recv().await - } else { - // if no receiver present, never produce a value - future::pending::>().await - } - } => { - if let Some(req) = maybe_req { - self.handle_admin_request(req).await; - } - } - - // Supervisor task completions / failures - opt = self.join_set.join_next() => { - match opt { - Some(Ok(Ok(_))) => { - info!(target: "supervisor::service", "Task completed successfully."); - } - Some(Ok(Err(err))) => { - error!(target: "supervisor::service", %err, "A task encountered an error."); - self.cancel_token.cancel(); - return Err(anyhow::anyhow!("A service task failed: {err}")); - } - Some(Err(err)) => { - error!(target: "supervisor::service", %err, "A task encountered an error."); - self.cancel_token.cancel(); - return Err(anyhow::anyhow!("A service task failed: {err}")); - } - None => break, // all tasks finished - } - } - } - } - Ok(()) - } - - pub async fn shutdown(mut self) -> Result<()> { - self.cancel_token.cancel(); // Signal cancellation to all tasks - - // Wait for all tasks to finish. - while let Some(res) = self.join_set.join_next().await { - match res { - Ok(Ok(_)) => { - info!(target: "supervisor::service", "Task completed successfully during shutdown."); - } - Ok(Err(err)) => { - error!(target: "supervisor::service", %err, "A task encountered an error during shutdown."); - } - Err(err) => { - error!(target: "supervisor::service", %err, "A task encountered an error during shutdown."); - } - } - } - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use std::{net::SocketAddr, path::PathBuf}; - - use kona_interop::DependencySet; - use kona_supervisor_core::config::RollupConfigSet; - - use super::*; - - fn make_test_config(enable_admin: bool) -> Config { - let mut cfg = Config::new( - "http://localhost:8545".to_string(), - vec![], - PathBuf::from("/tmp/kona-supervisor"), - SocketAddr::from(([127, 0, 0, 1], 8545)), - false, - DependencySet { - dependencies: Default::default(), - override_message_expiry_window: None, - }, - RollupConfigSet { rollups: HashMap::new() }, - ); - cfg.enable_admin_api = enable_admin; - cfg - } - - #[tokio::test] - async fn test_init_rpc_server_enables_admin_receiver_when_flag_set() { - let cfg = Arc::new(make_test_config(true)); - let mut svc = Service::new((*cfg).clone()); - - svc.config = cfg.clone(); - svc.init_rpc_server().await.expect("init_rpc_server failed"); - assert!(svc.admin_receiver.is_some(), "admin_receiver must be set when admin enabled"); - } -} diff --git a/rust/kona/crates/supervisor/storage/Cargo.toml b/rust/kona/crates/supervisor/storage/Cargo.toml deleted file mode 100644 index 6239df045e5fb..0000000000000 --- a/rust/kona/crates/supervisor/storage/Cargo.toml +++ /dev/null @@ -1,54 +0,0 @@ -[package] -name = "kona-supervisor-storage" -version = "0.1.0" - -edition.workspace = true -license.workspace = true -rust-version.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -keywords.workspace = true -categories.workspace = true -exclude.workspace = true - -[dependencies] -# Workspace -kona-protocol.workspace = true -kona-interop.workspace = true -kona-supervisor-types.workspace = true -kona-supervisor-metrics.workspace = true - -# Alloy -alloy-primitives = { workspace = true, features = ["map", "rlp", "serde", "rand"] } -alloy-eips = { workspace = true } - -# Op-Alloy -op-alloy-consensus.workspace = true - -# Misc -serde = { workspace = true, features = ["derive"] } -derive_more.workspace = true -bytes.workspace = true -modular-bitfield.workspace = true -thiserror.workspace = true -tracing.workspace = true -eyre.workspace = true -metrics.workspace = true - -#reth -reth-db-api = { workspace = true } -reth-db = { workspace = true, features = ["mdbx"] } -reth-codecs = { workspace = true } -reth-primitives-traits = { workspace = true } - -# HTTP client and TLS for remote signer -tokio = { workspace = true, features = ["full"] } - -[dev-dependencies] -tempfile = { workspace = true } -tokio.workspace = true -kona-cli.workspace = true - -[lints] -workspace = true diff --git a/rust/kona/crates/supervisor/storage/src/chaindb.rs b/rust/kona/crates/supervisor/storage/src/chaindb.rs deleted file mode 100644 index 9eeca7416dccd..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/chaindb.rs +++ /dev/null @@ -1,1623 +0,0 @@ -//! Main database access structure and transaction contexts. - -use crate::{ - Metrics, StorageRewinder, - error::StorageError, - providers::{DerivationProvider, LogProvider, SafetyHeadRefProvider}, - traits::{ - DerivationStorageReader, DerivationStorageWriter, HeadRefStorageReader, - HeadRefStorageWriter, LogStorageReader, LogStorageWriter, - }, -}; -use alloy_eips::eip1898::BlockNumHash; -use alloy_primitives::ChainId; -use kona_interop::DerivedRefPair; -use kona_protocol::BlockInfo; -use kona_supervisor_metrics::{MetricsReporter, observe_metrics_for_result}; -use kona_supervisor_types::{Log, SuperHead}; -use metrics::{Label, gauge}; -use op_alloy_consensus::interop::SafetyLevel; -use reth_db::{ - DatabaseEnv, - mdbx::{DatabaseArguments, init_db_for}, -}; -use reth_db_api::database::Database; -use std::path::Path; -use tracing::warn; - -/// Manages the database environment for a single chain. -/// Provides transactional access to data via providers. -#[derive(Debug)] -pub struct ChainDb { - chain_id: ChainId, - metrics_enabled: Option, - - env: DatabaseEnv, -} - -impl ChainDb { - /// Creates or opens a database environment at the given path. - pub fn new(chain_id: ChainId, path: &Path) -> Result { - let env = init_db_for::<_, crate::models::Tables>(path, DatabaseArguments::default())?; - Ok(Self { chain_id, metrics_enabled: None, env }) - } - - /// Enables metrics on the database environment. - pub fn with_metrics(mut self) -> Self { - self.metrics_enabled = Some(true); - crate::Metrics::init(self.chain_id); - self - } - - fn observe_call Result>( - &self, - name: &'static str, - f: F, - ) -> Result { - if self.metrics_enabled.unwrap_or(false) { - observe_metrics_for_result!( - Metrics::STORAGE_REQUESTS_SUCCESS_TOTAL, - Metrics::STORAGE_REQUESTS_ERROR_TOTAL, - Metrics::STORAGE_REQUEST_DURATION_SECONDS, - name, - f(), - "chain_id" => self.chain_id.to_string() - ) - } else { - f() - } - } -} - -// todo: make sure all get method return DatabaseNotInitialised error if db is not initialised -impl DerivationStorageReader for ChainDb { - fn derived_to_source(&self, derived_block_id: BlockNumHash) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_DERIVED_TO_SOURCE, || { - self.env.view(|tx| { - DerivationProvider::new(tx, self.chain_id).derived_to_source(derived_block_id) - }) - })? - } - - fn latest_derived_block_at_source( - &self, - source_block_id: BlockNumHash, - ) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_LATEST_DERIVED_BLOCK_AT_SOURCE, || { - self.env.view(|tx| { - DerivationProvider::new(tx, self.chain_id) - .latest_derived_block_at_source(source_block_id) - }) - })? - } - - fn latest_derivation_state(&self) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_LATEST_DERIVATION_STATE, || { - self.env.view(|tx| DerivationProvider::new(tx, self.chain_id).latest_derivation_state()) - })? - } - - fn get_source_block(&self, source_block_number: u64) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_GET_SOURCE_BLOCK, || { - self.env.view(|tx| { - DerivationProvider::new(tx, self.chain_id).get_source_block(source_block_number) - }) - })? - } - - fn get_activation_block(&self) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_GET_ACTIVATION_BLOCK, || { - self.env.view(|tx| DerivationProvider::new(tx, self.chain_id).get_activation_block()) - })? - } -} - -impl DerivationStorageWriter for ChainDb { - fn initialise_derivation_storage( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_INITIALISE_DERIVATION_STORAGE, || { - self.env.update(|ctx| { - DerivationProvider::new(ctx, self.chain_id).initialise(incoming_pair)?; - SafetyHeadRefProvider::new(ctx, self.chain_id) - .update_safety_head_ref(SafetyLevel::LocalSafe, &incoming_pair.derived)?; - SafetyHeadRefProvider::new(ctx, self.chain_id) - .update_safety_head_ref(SafetyLevel::CrossSafe, &incoming_pair.derived) - }) - })? - } - - fn save_derived_block(&self, incoming_pair: DerivedRefPair) -> Result<(), StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_SAVE_DERIVED_BLOCK, || { - self.env.update(|ctx| { - DerivationProvider::new(ctx, self.chain_id).save_derived_block(incoming_pair)?; - - // Verify the consistency with log storage. - // The check is intentionally deferred until after saving the derived block, - // ensuring validation only triggers on the committed state to prevent false - // positives. - // Example: If the parent derived block doesn't exist, it should return error from - // derivation provider, not from log provider. - let derived_block = incoming_pair.derived; - let block = LogProvider::new(ctx, self.chain_id) - .get_block(derived_block.number) - .map_err(|err| match err { - StorageError::EntryNotFound(_) => { - warn!( - target: "supervisor::storage", - incoming_block = %derived_block, - "Derived block not found in log storage: {derived_block:?}" - ); - StorageError::FutureData - } - other => other, // propagate other errors as-is - })?; - if block != derived_block { - warn!( - target: "supervisor::storage", - incoming_block = %derived_block, - stored_log_block = %block, - "Derived block does not match the stored log block" - ); - return Err(StorageError::ReorgRequired); - } - - SafetyHeadRefProvider::new(ctx, self.chain_id) - .update_safety_head_ref(SafetyLevel::LocalSafe, &incoming_pair.derived) - }) - })? - } - - fn save_source_block(&self, incoming_source: BlockInfo) -> Result<(), StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_SAVE_SOURCE_BLOCK, || { - self.env.update(|ctx| { - DerivationProvider::new(ctx, self.chain_id).save_source_block(incoming_source) - }) - })? - } -} - -// todo: make sure all get method return DatabaseNotInitialised error if db is not initialised -impl LogStorageReader for ChainDb { - fn get_latest_block(&self) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_GET_LATEST_BLOCK, || { - self.env.view(|tx| LogProvider::new(tx, self.chain_id).get_latest_block()) - })? - } - - fn get_block(&self, block_number: u64) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_GET_BLOCK, || { - self.env.view(|tx| LogProvider::new(tx, self.chain_id).get_block(block_number)) - })? - } - - fn get_log(&self, block_number: u64, log_index: u32) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_GET_LOG, || { - self.env.view(|tx| LogProvider::new(tx, self.chain_id).get_log(block_number, log_index)) - })? - } - - fn get_logs(&self, block_number: u64) -> Result, StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_GET_LOGS, || { - self.env.view(|tx| LogProvider::new(tx, self.chain_id).get_logs(block_number)) - })? - } -} - -impl LogStorageWriter for ChainDb { - fn initialise_log_storage(&self, block: BlockInfo) -> Result<(), StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_INITIALISE_LOG_STORAGE, || { - self.env.update(|ctx| { - LogProvider::new(ctx, self.chain_id).initialise(block)?; - SafetyHeadRefProvider::new(ctx, self.chain_id) - .update_safety_head_ref(SafetyLevel::LocalUnsafe, &block)?; - SafetyHeadRefProvider::new(ctx, self.chain_id) - .update_safety_head_ref(SafetyLevel::CrossUnsafe, &block) - }) - })? - } - - fn store_block_logs(&self, block: &BlockInfo, logs: Vec) -> Result<(), StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_STORE_BLOCK_LOGS, || { - self.env.update(|ctx| { - LogProvider::new(ctx, self.chain_id).store_block_logs(block, logs)?; - - SafetyHeadRefProvider::new(ctx, self.chain_id) - .update_safety_head_ref(SafetyLevel::LocalUnsafe, block) - }) - })? - } -} - -impl HeadRefStorageReader for ChainDb { - fn get_safety_head_ref(&self, safety_level: SafetyLevel) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_GET_SAFETY_HEAD_REF, || { - self.env.view(|tx| { - SafetyHeadRefProvider::new(tx, self.chain_id).get_safety_head_ref(safety_level) - }) - })? - } - - /// Fetches all safety heads and current L1 state - fn get_super_head(&self) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_GET_SUPER_HEAD, || { - self.env.view(|tx| { - let sp = SafetyHeadRefProvider::new(tx, self.chain_id); - let local_unsafe = - sp.get_safety_head_ref(SafetyLevel::LocalUnsafe).map_err(|err| { - if matches!(err, StorageError::FutureData) { - StorageError::DatabaseNotInitialised - } else { - err - } - })?; - - let cross_unsafe = match sp.get_safety_head_ref(SafetyLevel::CrossUnsafe) { - Ok(block) => Some(block), - Err(StorageError::FutureData) => None, - Err(err) => return Err(err), - }; - - let local_safe = match sp.get_safety_head_ref(SafetyLevel::LocalSafe) { - Ok(block) => Some(block), - Err(StorageError::FutureData) => None, - Err(err) => return Err(err), - }; - - let cross_safe = match sp.get_safety_head_ref(SafetyLevel::CrossSafe) { - Ok(block) => Some(block), - Err(StorageError::FutureData) => None, - Err(err) => return Err(err), - }; - - let finalized = match sp.get_safety_head_ref(SafetyLevel::Finalized) { - Ok(block) => Some(block), - Err(StorageError::FutureData) => None, - Err(err) => return Err(err), - }; - - let l1_source = - match DerivationProvider::new(tx, self.chain_id).latest_derivation_state() { - Ok(pair) => Some(pair.source), - Err(StorageError::DatabaseNotInitialised) => None, - Err(err) => return Err(err), - }; - - Ok(SuperHead { - l1_source, - local_unsafe, - cross_unsafe, - local_safe, - cross_safe, - finalized, - }) - })? - }) - } -} - -impl HeadRefStorageWriter for ChainDb { - fn update_finalized_using_source( - &self, - finalized_source_block: BlockInfo, - ) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_UPDATE_FINALIZED_USING_SOURCE, || { - self.env.update(|tx| { - let sp = SafetyHeadRefProvider::new(tx, self.chain_id); - let safe = sp.get_safety_head_ref(SafetyLevel::CrossSafe)?; - - let dp = DerivationProvider::new(tx, self.chain_id); - let safe_block_pair = dp.get_derived_block_pair(safe.id())?; - - if finalized_source_block.number >= safe_block_pair.source.number { - // this could happen during initial sync - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - l1_finalized_block_number = finalized_source_block.number, - safe_source_block_number = safe_block_pair.source.number, - "L1 finalized block is greater than safe block", - ); - sp.update_safety_head_ref(SafetyLevel::Finalized, &safe)?; - return Ok(safe); - } - - let latest_derived = - dp.latest_derived_block_at_source(finalized_source_block.id())?; - sp.update_safety_head_ref(SafetyLevel::Finalized, &latest_derived)?; - Ok(latest_derived) - }) - })? - } - - fn update_current_cross_unsafe(&self, block: &BlockInfo) -> Result<(), StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_UPDATE_CURRENT_CROSS_UNSAFE, || { - self.env.update(|tx| { - let lp = LogProvider::new(tx, self.chain_id); - let sp = SafetyHeadRefProvider::new(tx, self.chain_id); - - // Check parent-child relationship with current CrossUnsafe head, if it exists. - let parent = sp.get_safety_head_ref(SafetyLevel::CrossUnsafe)?; - if !parent.is_parent_of(block) { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - incoming_block = %block, - latest_block = %parent, - "Incoming block is not the child of the current cross-unsafe head", - ); - return Err(StorageError::ConflictError); - } - - // Ensure the block exists in log storage and hasn't been pruned due to a re-org. - let stored_block = lp.get_block(block.number)?; - if stored_block.hash != block.hash { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - incoming_block_hash = %block.hash, - stored_block_hash = %stored_block.hash, - "Hash mismatch while updating CrossUnsafe head", - ); - return Err(StorageError::ConflictError); - } - - sp.update_safety_head_ref(SafetyLevel::CrossUnsafe, block)?; - Ok(()) - })? - }) - } - - fn update_current_cross_safe(&self, block: &BlockInfo) -> Result { - self.observe_call(Metrics::STORAGE_METHOD_UPDATE_CURRENT_CROSS_SAFE, || { - self.env.update(|tx| { - let dp = DerivationProvider::new(tx, self.chain_id); - let sp = SafetyHeadRefProvider::new(tx, self.chain_id); - - // Check parent-child relationship with current CrossUnsafe head, if it exists. - let parent = sp.get_safety_head_ref(SafetyLevel::CrossSafe)?; - if !parent.is_parent_of(block) { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - incoming_block = %block, - latest_block = %parent, - "Incoming block is not the child of the current cross-safe head", - ); - return Err(StorageError::ConflictError); - } - - // Ensure the block exists in derivation storage and hasn't been pruned due to a - // re-org. - let derived_pair = dp.get_derived_block_pair(block.id())?; - sp.update_safety_head_ref(SafetyLevel::CrossSafe, block)?; - - Ok(derived_pair.into()) - })? - }) - } -} - -impl StorageRewinder for ChainDb { - fn rewind_log_storage(&self, to: &BlockNumHash) -> Result<(), StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_REWIND_LOG_STORAGE, || { - self.env.update(|tx| { - let lp = LogProvider::new(tx, self.chain_id); - let hp = SafetyHeadRefProvider::new(tx, self.chain_id); - - // Ensure we don't rewind to or before the LocalSafe head. - match hp.get_safety_head_ref(SafetyLevel::LocalSafe) { - Ok(local_safe) => { - // If the target block is less than or equal to the local safe head, - // we cannot rewind to it, as this would mean losing logs for the safe - // blocks. The check is inclusive since the rewind - // operation removes the target block as well. - if to.number <= local_safe.number { - return Err(StorageError::RewindBeyondLocalSafeHead { - to: to.number, - local_safe: local_safe.number, - }); - } - } - Err(StorageError::FutureData) => { - // If LocalSafe is not set, we can rewind to any point. - } - Err(err) => return Err(err), - } - - lp.rewind_to(to)?; - - // get the current latest block to update the safety head refs - match lp.get_latest_block() { - Ok(latest_block) => { - hp.reset_safety_head_ref_if_ahead(SafetyLevel::LocalUnsafe, &latest_block)?; - hp.reset_safety_head_ref_if_ahead(SafetyLevel::CrossUnsafe, &latest_block)?; - } - Err(StorageError::DatabaseNotInitialised) => { - // If the database returns DatabaseNotInitialised, it means we have rewound - // past the activation block - hp.remove_safety_head_ref(SafetyLevel::LocalUnsafe)?; - hp.remove_safety_head_ref(SafetyLevel::CrossUnsafe)?; - } - Err(err) => return Err(err), - }; - Ok(()) - })? - }) - } - - fn rewind(&self, to: &BlockNumHash) -> Result<(), StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_REWIND, || { - self.env.update(|tx| { - let lp = LogProvider::new(tx, self.chain_id); - let dp = DerivationProvider::new(tx, self.chain_id); - let hp = SafetyHeadRefProvider::new(tx, self.chain_id); - - lp.rewind_to(to)?; - dp.rewind_to(to)?; - - // get the current latest block to update the safety head refs - match lp.get_latest_block() { - Ok(latest_block) => { - hp.reset_safety_head_ref_if_ahead(SafetyLevel::LocalUnsafe, &latest_block)?; - hp.reset_safety_head_ref_if_ahead(SafetyLevel::CrossUnsafe, &latest_block)?; - hp.reset_safety_head_ref_if_ahead(SafetyLevel::LocalSafe, &latest_block)?; - hp.reset_safety_head_ref_if_ahead(SafetyLevel::CrossSafe, &latest_block)?; - hp.reset_safety_head_ref_if_ahead(SafetyLevel::Finalized, &latest_block)?; - } - Err(StorageError::DatabaseNotInitialised) => { - // If the database returns DatabaseNotInitialised, it means we have rewound - // past the activation block - hp.remove_safety_head_ref(SafetyLevel::LocalUnsafe)?; - hp.remove_safety_head_ref(SafetyLevel::CrossUnsafe)?; - hp.remove_safety_head_ref(SafetyLevel::LocalSafe)?; - hp.remove_safety_head_ref(SafetyLevel::CrossSafe)?; - hp.remove_safety_head_ref(SafetyLevel::Finalized)?; - } - Err(err) => return Err(err), - } - Ok(()) - })? - }) - } - - fn rewind_to_source(&self, to: &BlockNumHash) -> Result, StorageError> { - self.observe_call(Metrics::STORAGE_METHOD_REWIND_TO_SOURCE, || { - self.env.update(|tx| { - let lp = LogProvider::new(tx, self.chain_id); - let dp = DerivationProvider::new(tx, self.chain_id); - let hp = SafetyHeadRefProvider::new(tx, self.chain_id); - - let derived_target_block = dp.rewind_to_source(to)?; - if let Some(rewind_target) = derived_target_block { - lp.rewind_to(&rewind_target.id())?; - } - - // get the current latest block to update the safety head refs - match lp.get_latest_block() { - Ok(latest_block) => { - hp.reset_safety_head_ref_if_ahead(SafetyLevel::LocalUnsafe, &latest_block)?; - hp.reset_safety_head_ref_if_ahead(SafetyLevel::CrossUnsafe, &latest_block)?; - hp.reset_safety_head_ref_if_ahead(SafetyLevel::LocalSafe, &latest_block)?; - hp.reset_safety_head_ref_if_ahead(SafetyLevel::CrossSafe, &latest_block)?; - hp.reset_safety_head_ref_if_ahead(SafetyLevel::Finalized, &latest_block)?; - } - Err(StorageError::DatabaseNotInitialised) => { - // If the database returns DatabaseNotInitialised, it means we have rewound - // past the activation block - hp.remove_safety_head_ref(SafetyLevel::LocalUnsafe)?; - hp.remove_safety_head_ref(SafetyLevel::CrossUnsafe)?; - hp.remove_safety_head_ref(SafetyLevel::LocalSafe)?; - hp.remove_safety_head_ref(SafetyLevel::CrossSafe)?; - hp.remove_safety_head_ref(SafetyLevel::Finalized)?; - } - Err(err) => return Err(err), - } - Ok(derived_target_block) - })? - }) - } -} - -impl MetricsReporter for ChainDb { - fn report_metrics(&self) { - let mut metrics = Vec::new(); - - let _ = self - .env - .view(|tx| { - for table in crate::models::Tables::ALL.iter().map(crate::models::Tables::name) { - let table_db = tx.inner().open_db(Some(table))?; - - let stats = tx.inner().db_stat(table_db.dbi())?; - - let page_size = stats.page_size() as usize; - let leaf_pages = stats.leaf_pages(); - let branch_pages = stats.branch_pages(); - let overflow_pages = stats.overflow_pages(); - let num_pages = leaf_pages + branch_pages + overflow_pages; - let table_size = page_size * num_pages; - let entries = stats.entries(); - - metrics.push(( - "kona_supervisor_storage.table_size", - table_size as f64, - vec![ - Label::new("table", table), - Label::new("chain_id", self.chain_id.to_string()), - ], - )); - metrics.push(( - "kona_supervisor_storage.table_pages", - leaf_pages as f64, - vec![ - Label::new("table", table), - Label::new("type", "leaf"), - Label::new("chain_id", self.chain_id.to_string()), - ], - )); - metrics.push(( - "kona_supervisor_storage.table_pages", - branch_pages as f64, - vec![ - Label::new("table", table), - Label::new("type", "branch"), - Label::new("chain_id", self.chain_id.to_string()), - ], - )); - metrics.push(( - "kona_supervisor_storage.table_pages", - overflow_pages as f64, - vec![ - Label::new("table", table), - Label::new("type", "overflow"), - Label::new("chain_id", self.chain_id.to_string()), - ], - )); - metrics.push(( - "kona_supervisor_storage.table_entries", - entries as f64, - vec![ - Label::new("table", table), - Label::new("chain_id", self.chain_id.to_string()), - ], - )); - } - - Ok::<(), eyre::Report>(()) - }) - .inspect_err(|err| { - warn!(target: "supervisor::storage", %err, "Failed to collect database metrics"); - }); - - for (name, value, labels) in metrics { - gauge!(name, labels).set(value); - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::B256; - use kona_supervisor_types::Log; - use tempfile::TempDir; - - #[test] - fn test_create_and_open_db() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb"); - let db = ChainDb::new(1, &db_path); - assert!(db.is_ok(), "Should create or open database"); - } - - #[test] - fn test_log_storage() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_logs"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - let anchor = DerivedRefPair { - source: BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([2u8; 32]), - number: 0, - parent_hash: B256::from([3u8; 32]), - timestamp: 0, - }, - }; - - db.initialise_log_storage(anchor.derived).expect("initialise log storage"); - db.initialise_derivation_storage(anchor).expect("initialise derivation storage"); - - let block = BlockInfo { - hash: B256::from([4u8; 32]), - number: 1, - parent_hash: anchor.derived.hash, - timestamp: 0, - }; - let log1 = Log { index: 0, hash: B256::from([0u8; 32]), executing_message: None }; - let log2 = Log { index: 1, hash: B256::from([1u8; 32]), executing_message: None }; - let logs = vec![log1, log2]; - - // Store logs - db.store_block_logs(&block, logs.clone()).expect("store logs"); - - // Retrieve logs - let retrieved_logs = db.get_logs(block.number).expect("get logs"); - assert_eq!(retrieved_logs.len(), 2); - assert_eq!(retrieved_logs, logs, "First log should match stored log"); - - let latest_block = db.get_latest_block().expect("latest block"); - assert_eq!(latest_block, block, "Latest block should match stored block"); - - let log = db.get_log(block.number, 1).expect("get block by log"); - assert_eq!(log, logs[1], "Block by log should match stored block"); - } - - #[test] - fn test_super_head_empty() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_super_head_empty"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - // Get super head when no blocks are stored - let err = db.get_super_head().unwrap_err(); - assert!(matches!(err, StorageError::DatabaseNotInitialised)); - } - - #[test] - fn test_get_super_head_populated() { - let tmp_dir = tempfile::TempDir::new().unwrap(); - let db_path = tmp_dir.path().join("chaindb"); - let db = ChainDb::new(1, &db_path).unwrap(); - - // Prepare blocks - let block = BlockInfo { number: 1, ..Default::default() }; - let derived_pair = DerivedRefPair { source: block, derived: block }; - - // Initialise all heads - db.initialise_log_storage(block).unwrap(); - db.initialise_derivation_storage(derived_pair).unwrap(); - - let _ = db - .env - .update(|ctx| { - let sp = SafetyHeadRefProvider::new(ctx, 1); - sp.update_safety_head_ref(SafetyLevel::Finalized, &block) - }) - .unwrap(); - - // Should not error and all heads should be Some - let super_head = db.get_super_head().unwrap(); - assert_eq!(super_head.local_unsafe, block); - assert!(super_head.cross_unsafe.is_some()); - assert!(super_head.local_safe.is_some()); - assert!(super_head.cross_safe.is_some()); - assert!(super_head.finalized.is_some()); - assert!(super_head.l1_source.is_some()); - } - - #[test] - fn test_get_super_head_with_some_missing_heads() { - let tmp_dir = tempfile::TempDir::new().unwrap(); - let db_path = tmp_dir.path().join("chaindb"); - let db = ChainDb::new(1, &db_path).unwrap(); - - // Only initialise log storage (not derivation storage) - let block = BlockInfo { number: 1, ..Default::default() }; - db.initialise_log_storage(block).unwrap(); - - let super_head = db.get_super_head().unwrap(); - assert_eq!(super_head.local_unsafe, block); - // These will be None because derivation storage was not initialised - assert!(super_head.local_safe.is_none()); - assert!(super_head.cross_safe.is_none()); - assert!(super_head.finalized.is_none()); - assert!(super_head.l1_source.is_none()); - } - - #[test] - fn test_latest_derivation_state_empty() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_latest_derivation_empty"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - // Get latest derivation state when no blocks are stored - let err = db.latest_derivation_state().unwrap_err(); - assert!(matches!(err, StorageError::DatabaseNotInitialised)); - } - - #[test] - fn test_get_latest_block_empty() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_latest_block_empty"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - // Get latest block when no blocks are stored - let err = db.get_latest_block().unwrap_err(); - assert!(matches!(err, StorageError::DatabaseNotInitialised)); - } - - #[test] - fn test_derivation_storage() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_derivation"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - let anchor = DerivedRefPair { - source: BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([2u8; 32]), - number: 0, - parent_hash: B256::from([3u8; 32]), - timestamp: 0, - }, - }; - - // Create dummy derived block pair - let derived_pair = DerivedRefPair { - source: BlockInfo { - hash: B256::from([4u8; 32]), - number: 101, - parent_hash: anchor.source.hash, - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([6u8; 32]), - number: 1, - parent_hash: anchor.derived.hash, - timestamp: 0, - }, - }; - - // Initialise the database with the anchor derived block pair - db.initialise_log_storage(anchor.derived).expect("initialise log storage"); - db.initialise_derivation_storage(anchor).expect("initialise derivation storage"); - - // Save derived block pair - should error BlockOutOfOrder error - let err = db.save_derived_block(derived_pair).unwrap_err(); - assert!(matches!(err, StorageError::BlockOutOfOrder)); - - db.store_block_logs( - &BlockInfo { - hash: B256::from([6u8; 32]), - number: 1, - parent_hash: anchor.derived.hash, - timestamp: 0, - }, - vec![], - ) - .expect("storing logs failed"); - - // Save derived block pair - db.save_source_block(derived_pair.source).expect("save source block"); - db.save_derived_block(derived_pair).expect("save derived pair"); - - // Retrieve latest derived block pair - let latest_pair = db.latest_derivation_state().expect("get latest derived pair"); - assert_eq!(latest_pair, derived_pair, "Latest derived pair should match saved pair"); - - // Retrieve derived to source mapping - let derived_block_id = - BlockNumHash::new(derived_pair.derived.number, derived_pair.derived.hash); - let source_block = db.derived_to_source(derived_block_id).expect("get derived to source"); - assert_eq!( - source_block, derived_pair.source, - "Source block should match derived pair source" - ); - - // Retrieve latest derived block at source - let source_block_id = - BlockNumHash::new(derived_pair.source.number, derived_pair.source.hash); - let latest_derived = db - .latest_derived_block_at_source(source_block_id) - .expect("get latest derived at source"); - assert_eq!( - latest_derived, derived_pair.derived, - "Latest derived block at source should match derived pair derived" - ); - } - - #[test] - fn test_update_current_cross_unsafe() { - let tmp_dir = tempfile::TempDir::new().unwrap(); - let db_path = tmp_dir.path().join("chaindb"); - let db = ChainDb::new(1, &db_path).unwrap(); - - let source = BlockInfo { number: 1, ..Default::default() }; - let block1 = BlockInfo { - number: 10, - hash: B256::random(), - parent_hash: B256::random(), - timestamp: 1, - }; - let mut block2 = BlockInfo { - number: 11, - hash: B256::random(), - parent_hash: B256::random(), - timestamp: 1, - }; - - db.initialise_log_storage(block1).expect("initialise log storage"); - db.initialise_derivation_storage(DerivedRefPair { source, derived: block1 }) - .expect("initialise derivation storage"); - - // should error as block2 must be child of block1 - let err = db.update_current_cross_unsafe(&block2).expect_err("should return an error"); - assert!(matches!(err, StorageError::ConflictError)); - - // make block2 as child of block1 - block2.parent_hash = block1.hash; - - // block2 doesn't exist in log storage - should return not found error - let err = db.update_current_cross_unsafe(&block2).expect_err("should return an error"); - assert!(matches!(err, StorageError::EntryNotFound(_))); - - db.store_block_logs(&block2, vec![]).unwrap(); - db.update_current_cross_unsafe(&block2).unwrap(); - - let cross_unsafe_block = db.get_safety_head_ref(SafetyLevel::CrossUnsafe).unwrap(); - assert_eq!(cross_unsafe_block, block2); - } - - #[test] - fn test_update_current_cross_safe() { - let tmp_dir = tempfile::TempDir::new().unwrap(); - let db_path = tmp_dir.path().join("chaindb"); - let db = ChainDb::new(1, &db_path).unwrap(); - - let source = BlockInfo { number: 1, ..Default::default() }; - let block1 = BlockInfo { - number: 10, - hash: B256::random(), - parent_hash: B256::random(), - timestamp: 1, - }; - let mut block2 = BlockInfo { - number: 11, - hash: B256::random(), - parent_hash: B256::random(), - timestamp: 1, - }; - - db.initialise_log_storage(block1).expect("initialise log storage"); - db.initialise_derivation_storage(DerivedRefPair { source, derived: block1 }) - .expect("initialise derivation storage"); - - // should error as block2 must be child of block1 - let err = db.update_current_cross_safe(&block2).expect_err("should return an error"); - assert!(matches!(err, StorageError::ConflictError)); - - // make block2 as child of block1 - block2.parent_hash = block1.hash; - - // block2 doesn't exist in derivation storage - should return not found error - let err = db.update_current_cross_safe(&block2).expect_err("should return an error"); - assert!(matches!(err, StorageError::EntryNotFound(_))); - - db.store_block_logs(&block2, vec![]).unwrap(); - db.save_derived_block(DerivedRefPair { source, derived: block2 }).unwrap(); - - let ref_pair = db.update_current_cross_safe(&block2).unwrap(); - assert_eq!(ref_pair.source, source); - assert_eq!(ref_pair.derived, block2); - - let cross_safe_block = db.get_safety_head_ref(SafetyLevel::CrossSafe).unwrap(); - assert_eq!(cross_safe_block, block2); - } - - #[test] - fn test_source_block_storage() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_source_block"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - let source1 = BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 1234, - }; - let source2 = BlockInfo { - hash: B256::from([2u8; 32]), - number: 101, - parent_hash: source1.hash, - timestamp: 5678, - }; - let derived1 = BlockInfo { - hash: B256::from([3u8; 32]), - number: 1, - parent_hash: source1.hash, - timestamp: 9101, - }; - - db.initialise_log_storage(derived1).expect("initialise log storage"); - db.initialise_derivation_storage(DerivedRefPair { source: source1, derived: derived1 }) - .expect("initialise derivation storage"); - - assert!(db.save_source_block(source2).is_ok()); - - // Retrieve latest source block - let latest = db.latest_derivation_state().expect("get latest source block"); - assert_eq!(latest.source, source2); - } - - #[test] - fn test_all_safe_derived() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_source_block"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - let anchor = DerivedRefPair { - source: BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 1234, - }, - derived: BlockInfo { - hash: B256::from([1u8; 32]), - number: 1, - parent_hash: B256::from([2u8; 32]), - timestamp: 1234, - }, - }; - - db.initialise_log_storage(anchor.derived).expect("initialise log storage"); - db.initialise_derivation_storage(anchor).expect("initialise derivation storage"); - - let source1 = BlockInfo { - hash: B256::from([2u8; 32]), - number: 101, - parent_hash: anchor.source.hash, - timestamp: 1234, - }; - let source2 = BlockInfo { - hash: B256::from([3u8; 32]), - number: 102, - parent_hash: source1.hash, - timestamp: 1234, - }; - let derived1 = BlockInfo { - hash: B256::from([4u8; 32]), - number: 2, - parent_hash: anchor.derived.hash, - timestamp: 1234, - }; - let derived2 = BlockInfo { - hash: B256::from([5u8; 32]), - number: 3, - parent_hash: derived1.hash, - timestamp: 1234, - }; - let derived3 = BlockInfo { - hash: B256::from([7u8; 32]), - number: 4, - parent_hash: derived2.hash, - timestamp: 1234, - }; - - assert!(db.save_source_block(source1).is_ok()); - db.store_block_logs(&derived1, vec![]).expect("storing logs failed"); - db.store_block_logs(&derived2, vec![]).expect("storing logs failed"); - db.store_block_logs(&derived3, vec![]).expect("storing logs failed"); - - assert!( - db.save_derived_block(DerivedRefPair { source: source1, derived: derived1 }).is_ok() - ); - - assert!(db.save_source_block(source2).is_ok()); - assert!( - db.save_derived_block(DerivedRefPair { source: source2, derived: derived2 }).is_ok() - ); - assert!( - db.save_derived_block(DerivedRefPair { source: source2, derived: derived3 }).is_ok() - ); - - let safe_derived = db.latest_derived_block_at_source(source1.id()).expect("should exist"); - assert_eq!(safe_derived, derived1); - - let safe_derived = db.latest_derived_block_at_source(source2.id()).expect("should exist"); - assert_eq!(safe_derived, derived3); - - let source = db.derived_to_source(derived2.id()).expect("should exist"); - assert_eq!(source, source2); - - let source = db.derived_to_source(derived3.id()).expect("should exist"); - assert_eq!(source, source2); - - let latest_derived_pair = db.latest_derivation_state().expect("should exist"); - assert_eq!(latest_derived_pair, DerivedRefPair { source: source2, derived: derived3 }); - } - - #[test] - fn test_rewind_log_storage() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_rewind_log"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - let anchor = BlockInfo { - hash: B256::from([2u8; 32]), - number: 1, - parent_hash: B256::from([3u8; 32]), - timestamp: 0, - }; - - let next_block = BlockInfo { - hash: B256::from([3u8; 32]), - number: 2, - parent_hash: anchor.hash, - timestamp: 0, - }; - - db.initialise_log_storage(anchor).unwrap(); - db.store_block_logs(&next_block, vec![]).unwrap(); - - // Add and promote next_block to CrossUnsafe and LocalUnsafe - db.update_current_cross_unsafe(&next_block).unwrap(); - - db.rewind_log_storage(&next_block.id()).expect("rewind log storage should succeed"); - - // Should be rewound to anchor - let local_unsafe = - db.get_safety_head_ref(SafetyLevel::LocalUnsafe).expect("get safety head ref"); - let cross_unsafe = - db.get_safety_head_ref(SafetyLevel::CrossUnsafe).expect("get safety head ref"); - - assert_eq!(local_unsafe, anchor); - assert_eq!(cross_unsafe, anchor); - } - - #[test] - fn test_rewind_log_storage_beyond_derivation_head_should_error() { - let tmp_dir = tempfile::TempDir::new().unwrap(); - let db_path = tmp_dir.path().join("chaindb_rewind_beyond_derivation"); - let db = ChainDb::new(1, &db_path).unwrap(); - - // Initialise anchor derived block and derivation storage - let anchor = DerivedRefPair { - source: BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([2u8; 32]), - number: 0, - parent_hash: B256::from([3u8; 32]), - timestamp: 0, - }, - }; - - db.initialise_log_storage(anchor.derived).unwrap(); - db.initialise_derivation_storage(anchor).unwrap(); - - let block1 = BlockInfo { - hash: B256::from([3u8; 32]), - number: 1, - parent_hash: anchor.derived.hash, - timestamp: 0, - }; - let source1 = BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 0, - }; - - let result = db.store_block_logs(&block1, Vec::new()); - assert!(result.is_ok(), "Should store block logs successfully"); - let result = db.save_source_block(source1); - assert!(result.is_ok(), "Should save source block successfully"); - let result = db.save_derived_block(DerivedRefPair { source: source1, derived: block1 }); - assert!(result.is_ok(), "Should save derived block successfully"); - - let block2 = BlockInfo { - hash: B256::from([4u8; 32]), - number: 2, - parent_hash: block1.hash, - timestamp: 0, - }; - - let result = db.store_block_logs(&block2, Vec::new()); - assert!(result.is_ok(), "Should store block logs successfully"); - - // Attempt to rewind log storage beyond local safe head - let err = db.rewind_log_storage(&anchor.derived.id()).unwrap_err(); - assert!( - matches!(err, StorageError::RewindBeyondLocalSafeHead { to, local_safe } if to == anchor.derived.number && local_safe == block1.number), - "Should not allow rewinding log storage beyond derivation head" - ); - - // Attempt to rewind log storage to the local safe head - let result = db.rewind_log_storage(&block1.id()).unwrap_err(); - assert!( - matches!(result, StorageError::RewindBeyondLocalSafeHead { to, local_safe } if to == block1.number && local_safe == block1.number), - "Should not allow rewinding log storage to the local safe head" - ); - } - - #[test] - fn test_rewind_log_comprehensive() { - let tmp_dir = tempfile::TempDir::new().unwrap(); - let db_path = tmp_dir.path().join("chaindb_rewind_beyond_derivation"); - let db = ChainDb::new(1, &db_path).unwrap(); - - // Initialise anchor derived block and derivation storage - let block0 = BlockInfo { - hash: B256::from([2u8; 32]), - number: 0, - parent_hash: B256::ZERO, - timestamp: 0, - }; - - let result = db.initialise_log_storage(block0); - assert!(result.is_ok(), "Should initialise log storage successfully"); - - let block1 = BlockInfo { - hash: B256::from([3u8; 32]), - number: 1, - parent_hash: block0.hash, - timestamp: 0, - }; - - let result = db.store_block_logs(&block1, Vec::new()); - assert!(result.is_ok(), "Should store block logs successfully"); - - let block2 = BlockInfo { - hash: B256::from([4u8; 32]), - number: 2, - parent_hash: block1.hash, - timestamp: 0, - }; - - let result = db.store_block_logs(&block2, Vec::new()); - assert!(result.is_ok(), "Should store block logs successfully"); - - db.update_current_cross_unsafe(&block1).expect("update cross unsafe"); - - let result = db.rewind_log_storage(&block2.id()); - assert!(result.is_ok(), "Should rewind log storage successfully"); - - let local_unsafe = - db.get_safety_head_ref(SafetyLevel::LocalUnsafe).expect("get safety head ref"); - let cross_unsafe = - db.get_safety_head_ref(SafetyLevel::CrossUnsafe).expect("get safety head ref"); - - assert_eq!(local_unsafe, block1); - assert_eq!(cross_unsafe, block1); - - let result = db.rewind_log_storage(&block1.id()); - assert!(result.is_ok(), "Should rewind log storage successfully"); - - let local_unsafe = - db.get_safety_head_ref(SafetyLevel::LocalUnsafe).expect("get safety head ref"); - let cross_unsafe = - db.get_safety_head_ref(SafetyLevel::CrossUnsafe).expect("get safety head ref"); - - assert_eq!(local_unsafe, block0); - assert_eq!(cross_unsafe, block0); - } - - #[test] - fn test_rewind_log_storage_to_activation_block() { - let tmp_dir = tempfile::TempDir::new().unwrap(); - let db_path = tmp_dir.path().join("chaindb_rewind_beyond_derivation"); - let db = ChainDb::new(1, &db_path).unwrap(); - - // Initialise anchor derived block and derivation storage - let block0 = BlockInfo { - hash: B256::from([2u8; 32]), - number: 0, - parent_hash: B256::ZERO, - timestamp: 0, - }; - - let result = db.initialise_log_storage(block0); - assert!(result.is_ok(), "Should initialise log storage successfully"); - - let block1 = BlockInfo { - hash: B256::from([3u8; 32]), - number: 1, - parent_hash: block0.hash, - timestamp: 0, - }; - - let result = db.store_block_logs(&block1, Vec::new()); - assert!(result.is_ok(), "Should store block logs successfully"); - - let block2 = BlockInfo { - hash: B256::from([4u8; 32]), - number: 2, - parent_hash: block1.hash, - timestamp: 0, - }; - - let result = db.store_block_logs(&block2, Vec::new()); - assert!(result.is_ok(), "Should store block logs successfully"); - - let result = db.rewind_log_storage(&block0.id()); - assert!(result.is_ok(), "Should rewind log storage successfully"); - } - - #[test] - fn test_rewind_comprehensive() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_rewind_all"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - let anchor = DerivedRefPair { - source: BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([2u8; 32]), - number: 1, - parent_hash: B256::from([3u8; 32]), - timestamp: 0, - }, - }; - - let pair1 = DerivedRefPair { - source: BlockInfo { - hash: B256::from([3u8; 32]), - number: 101, - parent_hash: anchor.source.hash, - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([4u8; 32]), - number: 2, - parent_hash: anchor.derived.hash, - timestamp: 1, - }, - }; - - let pair2 = DerivedRefPair { - source: BlockInfo { - hash: B256::from([4u8; 32]), - number: 102, - parent_hash: pair1.source.hash, - timestamp: 1, - }, - derived: BlockInfo { - hash: B256::from([5u8; 32]), - number: 3, - parent_hash: pair1.derived.hash, - timestamp: 2, - }, - }; - - let unsafe_block = BlockInfo { - hash: B256::from([5u8; 32]), - number: 3, - parent_hash: pair1.derived.hash, - timestamp: 2, - }; - - db.initialise_log_storage(anchor.derived).expect("initialise log storage"); - db.initialise_derivation_storage(anchor).expect("initialise derivation storage"); - - db.store_block_logs(&pair1.derived, vec![]).expect("store logs"); - db.store_block_logs(&unsafe_block, vec![]).expect("store logs"); - - db.save_source_block(pair1.source).expect("save source block"); - db.save_derived_block(pair1).expect("save derived block"); - - db.save_source_block(pair2.source).expect("save source block"); - db.save_derived_block(pair2).expect("save derived block"); - - db.update_current_cross_unsafe(&pair1.derived).expect("update cross unsafe"); - db.update_current_cross_safe(&pair1.derived).expect("update cross safe"); - - db.update_current_cross_unsafe(&pair2.derived).expect("update cross unsafe"); - db.update_current_cross_safe(&pair2.derived).expect("update cross safe"); - - db.update_finalized_using_source(anchor.source).expect("update finalized using source"); - - db.rewind(&pair2.derived.id()).expect("rewind should succeed"); - - // Everything should be rewound to pair1.derived - let local_unsafe = db.get_safety_head_ref(SafetyLevel::LocalUnsafe).unwrap(); - let cross_unsafe = db.get_safety_head_ref(SafetyLevel::CrossUnsafe).unwrap(); - let local_safe = db.get_safety_head_ref(SafetyLevel::LocalSafe).unwrap(); - let cross_safe = db.get_safety_head_ref(SafetyLevel::CrossSafe).unwrap(); - let latest_pair = db.latest_derivation_state().unwrap(); - let log_block = db.get_latest_block().unwrap(); - let finalized = db.get_safety_head_ref(SafetyLevel::Finalized).unwrap(); - - assert_eq!(local_unsafe, pair1.derived); - assert_eq!(cross_unsafe, pair1.derived); - assert_eq!(local_safe, pair1.derived); - assert_eq!(cross_safe, pair1.derived); - assert_eq!(latest_pair, pair1); - assert_eq!(log_block, pair1.derived); - assert_eq!(finalized, anchor.derived); - - db.update_finalized_using_source(pair1.source).expect("update finalized using source"); - db.rewind(&pair1.derived.id()).expect("rewind should succeed"); - - // Everything should be rewound to anchor.derived - let local_unsafe = db.get_safety_head_ref(SafetyLevel::LocalUnsafe).unwrap(); - let cross_unsafe = db.get_safety_head_ref(SafetyLevel::CrossUnsafe).unwrap(); - let local_safe = db.get_safety_head_ref(SafetyLevel::LocalSafe).unwrap(); - let cross_safe = db.get_safety_head_ref(SafetyLevel::CrossSafe).unwrap(); - let latest_pair = db.latest_derivation_state().unwrap(); - let log_block = db.get_latest_block().unwrap(); - let finalized = db.get_safety_head_ref(SafetyLevel::Finalized).unwrap(); - - assert_eq!(local_unsafe, anchor.derived); - assert_eq!(cross_unsafe, anchor.derived); - assert_eq!(local_safe, anchor.derived); - assert_eq!(cross_safe, anchor.derived); - assert_eq!(latest_pair, anchor); - assert_eq!(log_block, anchor.derived); - assert_eq!(finalized, anchor.derived); - } - - #[test] - fn test_rewind_to_activation_block() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_rewind_all"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - let pair0 = DerivedRefPair { - source: BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([2u8; 32]), - number: 1, - parent_hash: B256::from([3u8; 32]), - timestamp: 0, - }, - }; - - let pair1 = DerivedRefPair { - source: BlockInfo { - hash: B256::from([3u8; 32]), - number: 101, - parent_hash: pair0.source.hash, - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([4u8; 32]), - number: 2, - parent_hash: pair0.derived.hash, - timestamp: 1, - }, - }; - - let unsafe_block = BlockInfo { - hash: B256::from([5u8; 32]), - number: 3, - parent_hash: pair1.derived.hash, - timestamp: 2, - }; - - db.initialise_log_storage(pair0.derived).expect("initialise log storage"); - db.initialise_derivation_storage(pair0).expect("initialise derivation storage"); - - db.store_block_logs(&pair1.derived, vec![]).expect("store logs"); - db.store_block_logs(&unsafe_block, vec![]).expect("store logs"); - - db.save_source_block(pair1.source).expect("save source block"); - db.save_derived_block(pair1).expect("save derived block"); - - db.update_current_cross_unsafe(&pair1.derived).expect("update cross unsafe"); - - db.rewind(&pair0.derived.id()).expect("rewind should succeed"); - - // Everything should return error - let local_unsafe = db.get_safety_head_ref(SafetyLevel::LocalUnsafe); - assert!(matches!(local_unsafe, Err(StorageError::FutureData))); - - let cross_unsafe = db.get_safety_head_ref(SafetyLevel::CrossUnsafe); - assert!(matches!(cross_unsafe, Err(StorageError::FutureData))); - - let local_safe = db.get_safety_head_ref(SafetyLevel::LocalSafe); - assert!(matches!(local_safe, Err(StorageError::FutureData))); - - let cross_safe = db.get_safety_head_ref(SafetyLevel::CrossSafe); - assert!(matches!(cross_safe, Err(StorageError::FutureData))); - - let latest_derivation_state = db.latest_derivation_state(); - assert!(matches!(latest_derivation_state, Err(StorageError::DatabaseNotInitialised))); - - let latest_log_block = db.get_latest_block(); - assert!(matches!(latest_log_block, Err(StorageError::DatabaseNotInitialised))); - } - - #[test] - fn test_rewind_to_source_updates_logs_and_heads() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_rewind_to_source"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - // Anchor (activation) - let anchor = DerivedRefPair { - source: BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([2u8; 32]), - number: 0, - parent_hash: B256::from([3u8; 32]), - timestamp: 0, - }, - }; - - // Initialise DB with anchor - db.initialise_log_storage(anchor.derived).expect("initialise log storage"); - db.initialise_derivation_storage(anchor).expect("initialise derivation storage"); - - // Build two source entries and several derived blocks - let source1 = BlockInfo { - hash: B256::from([3u8; 32]), - number: 101, - parent_hash: anchor.source.hash, - timestamp: 0, - }; - let source2 = BlockInfo { - hash: B256::from([4u8; 32]), - number: 102, - parent_hash: source1.hash, - timestamp: 0, - }; - - // Derived blocks chained off the anchor/previous derived blocks - let derived1 = BlockInfo { - hash: B256::from([10u8; 32]), - number: 1, - parent_hash: anchor.derived.hash, - timestamp: 0, - }; - let derived2 = BlockInfo { - hash: B256::from([11u8; 32]), - number: 2, - parent_hash: derived1.hash, - timestamp: 0, - }; - let derived3 = BlockInfo { - hash: B256::from([12u8; 32]), - number: 3, - parent_hash: derived2.hash, - timestamp: 0, - }; - let derived4 = BlockInfo { - hash: B256::from([13u8; 32]), - number: 4, - parent_hash: derived3.hash, - timestamp: 0, - }; - let derived5 = BlockInfo { - hash: B256::from([14u8; 32]), - number: 5, - parent_hash: derived4.hash, - timestamp: 0, - }; - - // Insert sources and derived blocks into storage (logs + derivation) - assert!(db.save_source_block(source1).is_ok()); - db.store_block_logs(&derived1, vec![]).expect("store logs derived1"); - db.save_derived_block(DerivedRefPair { source: source1, derived: derived1 }) - .expect("save derived1"); - - db.store_block_logs(&derived2, vec![]).expect("store logs derived2"); - db.save_derived_block(DerivedRefPair { source: source1, derived: derived2 }) - .expect("save derived2"); - - db.store_block_logs(&derived3, vec![]).expect("store logs derived3"); - db.save_derived_block(DerivedRefPair { source: source1, derived: derived3 }) - .expect("save derived3"); - - assert!(db.save_source_block(source2).is_ok()); - db.store_block_logs(&derived4, vec![]).expect("store logs derived4"); - db.save_derived_block(DerivedRefPair { source: source2, derived: derived4 }) - .expect("save derived4"); - - db.store_block_logs(&derived5, vec![]).expect("store logs derived5"); - db.save_derived_block(DerivedRefPair { source: source2, derived: derived5 }) - .expect("save derived5"); - - // Advance safety heads to be ahead of anchor so that rewind will need to reset them. - db.update_current_cross_unsafe(&derived1).expect("update cross unsafe"); - db.update_current_cross_unsafe(&derived2).expect("update cross unsafe"); - db.update_current_cross_unsafe(&derived3).expect("update cross unsafe"); - db.update_current_cross_unsafe(&derived4).expect("update cross unsafe"); - - db.update_current_cross_safe(&derived1).expect("update cross safe"); - db.update_current_cross_safe(&derived2).expect("update cross safe"); - - // Now rewind to source1: expected derived rewind target is derived1 (first derived for - // source1) - let res = db.rewind_to_source(&source1.id()).expect("rewind_to_source should succeed"); - assert!(res.is_some(), "expected a derived rewind target"); - let rewind_target = res.unwrap(); - assert_eq!(rewind_target, derived1); - - // After rewind, logs should be rewound to before derived1 -> latest block == anchor.derived - let latest_log = db.get_latest_block().expect("latest block after rewind"); - assert_eq!(latest_log, anchor.derived); - - // All safety heads that were ahead should be reset to the new latest (anchor.derived) - let local_unsafe = db.get_safety_head_ref(SafetyLevel::LocalUnsafe).expect("local unsafe"); - let cross_unsafe = db.get_safety_head_ref(SafetyLevel::CrossUnsafe).expect("cross unsafe"); - let local_safe = db.get_safety_head_ref(SafetyLevel::LocalSafe).expect("local safe"); - let cross_safe = db.get_safety_head_ref(SafetyLevel::CrossSafe).expect("cross safe"); - - assert_eq!(local_unsafe, anchor.derived); - assert_eq!(cross_unsafe, anchor.derived); - assert_eq!(local_safe, anchor.derived); - assert_eq!(cross_safe, anchor.derived); - } - - #[test] - fn test_rewind_to_source_with_empty_source_returns_none() { - let tmp_dir = TempDir::new().expect("create temp dir"); - let db_path = tmp_dir.path().join("chaindb_rewind_to_source_empty"); - let db = ChainDb::new(1, &db_path).expect("create db"); - - // Anchor (activation) - let anchor = DerivedRefPair { - source: BlockInfo { - hash: B256::from([0u8; 32]), - number: 100, - parent_hash: B256::from([1u8; 32]), - timestamp: 0, - }, - derived: BlockInfo { - hash: B256::from([2u8; 32]), - number: 0, - parent_hash: B256::from([3u8; 32]), - timestamp: 0, - }, - }; - - // Initialise DB with anchor - db.initialise_log_storage(anchor.derived).expect("initialise log storage"); - db.initialise_derivation_storage(anchor).expect("initialise derivation storage"); - - // Insert a source block that has no derived entries - let source = BlockInfo { - hash: B256::from([3u8; 32]), - number: 101, - parent_hash: anchor.source.hash, - timestamp: 0, - }; - db.save_source_block(source).expect("save source block"); - - // Rewind to the source with empty derived list -> should return None - let res = db.rewind_to_source(&source.id()).expect("rewind_to_source should succeed"); - assert!(res.is_none(), "Expected None when source has no derived blocks"); - - // Ensure latest log and derivation state remain at the anchor - let latest_log = db.get_latest_block().expect("latest block after noop rewind"); - assert_eq!(latest_log, anchor.derived); - - let latest_pair = db.latest_derivation_state().expect("latest derivation state"); - assert_eq!(latest_pair, anchor); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/chaindb_factory.rs b/rust/kona/crates/supervisor/storage/src/chaindb_factory.rs deleted file mode 100644 index d3ef793f9dd93..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/chaindb_factory.rs +++ /dev/null @@ -1,320 +0,0 @@ -use std::{ - collections::HashMap, - path::PathBuf, - sync::{Arc, RwLock}, -}; - -use crate::{ - CrossChainSafetyProvider, FinalizedL1Storage, HeadRefStorageReader, HeadRefStorageWriter, - LogStorageReader, Metrics, chaindb::ChainDb, error::StorageError, -}; -use alloy_primitives::ChainId; -use kona_interop::DerivedRefPair; -use kona_protocol::BlockInfo; -use kona_supervisor_metrics::{MetricsReporter, observe_metrics_for_result}; -use kona_supervisor_types::Log; -use op_alloy_consensus::interop::SafetyLevel; -use tracing::error; - -/// Factory for managing multiple chain databases. -/// This struct allows for the creation and retrieval of `ChainDb` instances -/// based on chain IDs, ensuring that each chain has its own database instance. -#[derive(Debug)] -pub struct ChainDbFactory { - db_path: PathBuf, - metrics_enabled: Option, - - dbs: RwLock>>, - /// Finalized L1 block reference, used for tracking the finalized L1 block. - /// In-memory only, not persisted. - finalized_l1: RwLock>, -} - -impl ChainDbFactory { - /// Create a new, empty factory. - pub fn new(db_path: PathBuf) -> Self { - Self { - db_path, - metrics_enabled: None, - dbs: RwLock::new(HashMap::new()), - finalized_l1: RwLock::new(None), - } - } - - /// Enables metrics on the database environment. - pub const fn with_metrics(mut self) -> Self { - self.metrics_enabled = Some(true); - self - } - - fn observe_call Result>( - &self, - name: &'static str, - f: F, - ) -> Result { - if self.metrics_enabled.unwrap_or(false) { - observe_metrics_for_result!( - Metrics::STORAGE_REQUESTS_SUCCESS_TOTAL, - Metrics::STORAGE_REQUESTS_ERROR_TOTAL, - Metrics::STORAGE_REQUEST_DURATION_SECONDS, - name, - f() - ) - } else { - f() - } - } - - /// Get or create a [`ChainDb`] for the given chain id. - /// - /// If the database does not exist, it will be created at the path `self.db_path/`. - pub fn get_or_create_db(&self, chain_id: ChainId) -> Result, StorageError> { - { - // Try to get it without locking for write - let dbs = self.dbs.read().map_err(|err| { - error!(target: "supervisor::storage", %err, "Failed to acquire read lock on databases"); - StorageError::LockPoisoned - })?; - if let Some(db) = dbs.get(&chain_id) { - return Ok(db.clone()); - } - } - - // Not found, create and insert - let mut dbs = self.dbs.write().map_err(|err| { - error!(target: "supervisor::storage", %err, "Failed to acquire write lock on databases"); - StorageError::LockPoisoned - })?; - // Double-check in case another thread inserted - if let Some(db) = dbs.get(&chain_id) { - return Ok(db.clone()); - } - - let chain_db_path = self.db_path.join(chain_id.to_string()); - let mut chain_db = ChainDb::new(chain_id, chain_db_path.as_path())?; - if self.metrics_enabled.unwrap_or(false) { - chain_db = chain_db.with_metrics(); - } - let db = Arc::new(chain_db); - dbs.insert(chain_id, db.clone()); - Ok(db) - } - - /// Get a [`ChainDb`] for the given chain id, returning an error if it doesn't exist. - /// - /// # Returns - /// * `Ok(Arc)` if the database exists. - /// * `Err(StorageError)` if the database does not exist. - pub fn get_db(&self, chain_id: ChainId) -> Result, StorageError> { - let dbs = self.dbs.read().map_err(|_| StorageError::LockPoisoned)?; - dbs.get(&chain_id).cloned().ok_or_else(|| StorageError::DatabaseNotInitialised) - } -} - -impl MetricsReporter for ChainDbFactory { - fn report_metrics(&self) { - let metrics_enabled = self.metrics_enabled.unwrap_or(false); - if metrics_enabled { - let dbs: Vec> = { - match self.dbs.read() { - Ok(dbs_guard) => dbs_guard.values().cloned().collect(), - Err(_) => { - error!(target: "supervisor::storage", "Failed to acquire read lock for metrics reporting"); - return; - } - } - }; - for db in dbs { - db.report_metrics(); - } - } - } -} - -impl FinalizedL1Storage for ChainDbFactory { - fn get_finalized_l1(&self) -> Result { - self.observe_call( - Metrics::STORAGE_METHOD_GET_FINALIZED_L1, - || { - let guard = self.finalized_l1.read().map_err(|err| { - error!(target: "supervisor::storage", %err, "Failed to acquire read lock on finalized_l1"); - StorageError::LockPoisoned - })?; - guard.as_ref().copied().ok_or(StorageError::FutureData) - } - ) - } - - fn update_finalized_l1(&self, block: BlockInfo) -> Result<(), StorageError> { - self.observe_call( - Metrics::STORAGE_METHOD_UPDATE_FINALIZED_L1, - || { - let mut guard = self - .finalized_l1 - .write() - .map_err(|err| { - error!(target: "supervisor::storage", %err, "Failed to acquire write lock on finalized_l1"); - StorageError::LockPoisoned - })?; - - // Check if the new block number is greater than the current finalized block - if let Some(ref current) = *guard - && block.number <= current.number - { - error!(target: "supervisor::storage", - current_block_number = current.number, - new_block_number = block.number, - "New finalized block number is not greater than current finalized block number", - ); - return Err(StorageError::BlockOutOfOrder); - } - *guard = Some(block); - Ok(()) - } - ) - } -} - -impl CrossChainSafetyProvider for ChainDbFactory { - fn get_block(&self, chain_id: ChainId, block_number: u64) -> Result { - self.get_db(chain_id)?.get_block(block_number) - } - - fn get_log( - &self, - chain_id: ChainId, - block_number: u64, - log_index: u32, - ) -> Result { - self.get_db(chain_id)?.get_log(block_number, log_index) - } - - fn get_block_logs( - &self, - chain_id: ChainId, - block_number: u64, - ) -> Result, StorageError> { - self.get_db(chain_id)?.get_logs(block_number) - } - - fn get_safety_head_ref( - &self, - chain_id: ChainId, - level: SafetyLevel, - ) -> Result { - self.get_db(chain_id)?.get_safety_head_ref(level) - } - - fn update_current_cross_unsafe( - &self, - chain_id: ChainId, - block: &BlockInfo, - ) -> Result<(), StorageError> { - self.get_db(chain_id)?.update_current_cross_unsafe(block) - } - - fn update_current_cross_safe( - &self, - chain_id: ChainId, - block: &BlockInfo, - ) -> Result { - self.get_db(chain_id)?.update_current_cross_safe(block) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use tempfile::TempDir; - - fn temp_factory() -> (TempDir, ChainDbFactory) { - let tmp = TempDir::new().expect("create temp dir"); - let factory = ChainDbFactory::new(tmp.path().to_path_buf()); - (tmp, factory) - } - - #[test] - fn test_get_or_create_db_creates_and_returns_db() { - let (_tmp, factory) = temp_factory(); - let db = factory.get_or_create_db(1).expect("should create db"); - assert!(Arc::strong_count(&db) >= 1); - } - - #[test] - fn test_get_or_create_db_returns_same_instance() { - let (_tmp, factory) = temp_factory(); - let db1 = factory.get_or_create_db(42).unwrap(); - let db2 = factory.get_or_create_db(42).unwrap(); - assert!(Arc::ptr_eq(&db1, &db2)); - } - - #[test] - fn test_get_db_returns_error_if_not_exists() { - let (_tmp, factory) = temp_factory(); - let err = factory.get_db(999).unwrap_err(); - assert!(matches!(err, StorageError::DatabaseNotInitialised)); - } - - #[test] - fn test_get_db_returns_existing_db() { - let (_tmp, factory) = temp_factory(); - let db = factory.get_or_create_db(7).unwrap(); - let db2 = factory.get_db(7).unwrap(); - assert!(Arc::ptr_eq(&db, &db2)); - } - - #[test] - fn test_db_path_is_unique_per_chain() { - let (tmp, factory) = temp_factory(); - let db1 = factory.get_or_create_db(1).unwrap(); - let db2 = factory.get_or_create_db(2).unwrap(); - assert!(!Arc::ptr_eq(&db1, &db2)); - - assert!(tmp.path().join("1").exists()); - assert!(tmp.path().join("2").exists()); - } - - #[test] - fn test_get_finalized_l1_returns_error_when_none() { - let (_tmp, factory) = temp_factory(); - let err = factory.get_finalized_l1().unwrap_err(); - assert!(matches!(err, StorageError::FutureData)); - } - - #[test] - fn test_update_and_get_finalized_l1_success() { - let (_tmp, factory) = temp_factory(); - let block1 = BlockInfo { number: 100, ..Default::default() }; - let block2 = BlockInfo { number: 200, ..Default::default() }; - - // Set first finalized block - factory.update_finalized_l1(block1).unwrap(); - assert_eq!(factory.get_finalized_l1().unwrap(), block1); - - // Update with higher block number - factory.update_finalized_l1(block2).unwrap(); - assert_eq!(factory.get_finalized_l1().unwrap(), block2); - } - - #[test] - fn test_update_finalized_l1_with_lower_block_number_errors() { - let (_tmp, factory) = temp_factory(); - let block1 = BlockInfo { number: 100, ..Default::default() }; - let block2 = BlockInfo { number: 50, ..Default::default() }; - - factory.update_finalized_l1(block1).unwrap(); - let err = factory.update_finalized_l1(block2).unwrap_err(); - assert!(matches!(err, StorageError::BlockOutOfOrder)); - } - - #[test] - fn test_update_finalized_l1_with_same_block_number_errors() { - let (_tmp, factory) = temp_factory(); - let block1 = BlockInfo { number: 100, ..Default::default() }; - let block2 = BlockInfo { number: 100, ..Default::default() }; - - factory.update_finalized_l1(block1).unwrap(); - let err = factory.update_finalized_l1(block2).unwrap_err(); - assert!(matches!(err, StorageError::BlockOutOfOrder)); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/error.rs b/rust/kona/crates/supervisor/storage/src/error.rs deleted file mode 100644 index 909067117df28..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/error.rs +++ /dev/null @@ -1,99 +0,0 @@ -use alloy_eips::BlockNumHash; -use reth_db::DatabaseError; -use thiserror::Error; - -/// Errors that may occur while interacting with supervisor log storage. -/// -/// This enum is used across all implementations of the Storage traits. -#[derive(Debug, Error)] -pub enum StorageError { - /// Represents a database error that occurred while interacting with storage. - #[error(transparent)] - Database(#[from] DatabaseError), - - /// Represents an error that occurred while initializing the database. - #[error(transparent)] - DatabaseInit(#[from] eyre::Report), - - /// Represents an error that occurred while writing to the database. - #[error("lock poisoned")] - LockPoisoned, - - /// The expected entry was not found in the database. - #[error(transparent)] - EntryNotFound(#[from] EntryNotFoundError), - - /// Represents an error that occurred while getting data that is not yet available. - #[error("data not yet available")] - FutureData, - - /// Represents an error that occurred when database is not initialized. - #[error("database not initialized")] - DatabaseNotInitialised, - - /// Represents a conflict occurred while attempting to write to the database. - #[error("conflicting data")] - ConflictError, - - /// Represents an error that occurred while writing to log database. - #[error("latest stored block is not parent of the incoming block")] - BlockOutOfOrder, - - /// Represents an error that occurred when there is inconsistency in log storage - #[error("reorg required due to inconsistent storage state")] - ReorgRequired, - - /// Represents an error that occurred when attempting to rewind log storage beyond the local - /// safe head. - #[error("rewinding log storage beyond local safe head. to: {to}, local_safe: {local_safe}")] - RewindBeyondLocalSafeHead { - /// The target block number to rewind to. - to: u64, - /// The local safe head block number. - local_safe: u64, - }, -} - -impl PartialEq for StorageError { - fn eq(&self, other: &Self) -> bool { - use StorageError::{ - ConflictError, Database, DatabaseInit, DatabaseNotInitialised, EntryNotFound, - }; - match (self, other) { - (Database(a), Database(b)) => format!("{a}") == format!("{b}"), - (DatabaseInit(a), DatabaseInit(b)) => format!("{a}") == format!("{b}"), - (EntryNotFound(a), EntryNotFound(b)) => a == b, - (DatabaseNotInitialised, DatabaseNotInitialised) | (ConflictError, ConflictError) => { - true - } - _ => false, - } - } -} - -impl Eq for StorageError {} - -/// Entry not found error. -#[derive(Debug, Error, PartialEq, Eq)] -pub enum EntryNotFoundError { - /// No derived blocks found for given source block. - #[error("no derived blocks for source block, number: {}, hash: {}", .0.number, .0.hash)] - MissingDerivedBlocks(BlockNumHash), - - /// Expected source block not found. - #[error("source block not found, number: {0}")] - SourceBlockNotFound(u64), - - /// Expected derived block not found. - #[error("derived block not found, number: {0}")] - DerivedBlockNotFound(u64), - - /// Expected log not found. - #[error("log not found at block {block_number} index {log_index}")] - LogNotFound { - /// Block number. - block_number: u64, - /// Log index within the block. - log_index: u32, - }, -} diff --git a/rust/kona/crates/supervisor/storage/src/lib.rs b/rust/kona/crates/supervisor/storage/src/lib.rs deleted file mode 100644 index 24e1a0c489a5a..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/lib.rs +++ /dev/null @@ -1,44 +0,0 @@ -//! Persistent storage for the Supervisor. -//! -//! This crate provides structured, append-only storage for the Supervisor, -//! exposing high-level APIs to write and query logs, block metadata, and -//! other execution states. -//! -//! The storage system is built on top of `reth-db`, using MDBX, -//! and defines schemas for supervisor-specific data like: -//! - L2 log entries -//! - Block ancestry metadata -//! - Source and Derived Blocks -//! - Chain heads for safety levels: **SAFE**, **UNSAFE**, and **CROSS-SAFE** -//! -//! -//! ## Capabilities -//! -//! - Append logs emitted by L2 execution -//! - Look up logs by block number and index -//! - Rewind logs during reorgs -//! - Track sealed blocks and ancestry metadata - -pub mod models; -pub use models::SourceBlockTraversal; - -mod error; -pub use error::{EntryNotFoundError, StorageError}; - -mod providers; - -mod chaindb; -pub use chaindb::ChainDb; - -mod metrics; -pub(crate) use metrics::Metrics; - -mod chaindb_factory; -pub use chaindb_factory::ChainDbFactory; - -mod traits; -pub use traits::{ - CrossChainSafetyProvider, DbReader, DerivationStorage, DerivationStorageReader, - DerivationStorageWriter, FinalizedL1Storage, HeadRefStorage, HeadRefStorageReader, - HeadRefStorageWriter, LogStorage, LogStorageReader, LogStorageWriter, StorageRewinder, -}; diff --git a/rust/kona/crates/supervisor/storage/src/metrics.rs b/rust/kona/crates/supervisor/storage/src/metrics.rs deleted file mode 100644 index 56988aa5fab9a..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/metrics.rs +++ /dev/null @@ -1,118 +0,0 @@ -use alloy_primitives::ChainId; - -/// Container for `ChainDb` metrics. -#[derive(Debug, Clone)] -pub(crate) struct Metrics; - -// todo: implement this using the reth metrics for tables -impl Metrics { - pub(crate) const STORAGE_REQUESTS_SUCCESS_TOTAL: &'static str = - "kona_supervisor_storage_success_total"; - pub(crate) const STORAGE_REQUESTS_ERROR_TOTAL: &'static str = - "kona_supervisor_storage_error_total"; - pub(crate) const STORAGE_REQUEST_DURATION_SECONDS: &'static str = - "kona_supervisor_storage_duration_seconds"; - - pub(crate) const STORAGE_METHOD_DERIVED_TO_SOURCE: &'static str = "derived_to_source"; - pub(crate) const STORAGE_METHOD_LATEST_DERIVED_BLOCK_AT_SOURCE: &'static str = - "latest_derived_block_at_source"; - pub(crate) const STORAGE_METHOD_LATEST_DERIVATION_STATE: &'static str = - "latest_derivation_state"; - pub(crate) const STORAGE_METHOD_GET_SOURCE_BLOCK: &'static str = "get_source_block"; - pub(crate) const STORAGE_METHOD_GET_ACTIVATION_BLOCK: &'static str = "get_activation_block"; - pub(crate) const STORAGE_METHOD_INITIALISE_DERIVATION_STORAGE: &'static str = - "initialise_derivation_storage"; - pub(crate) const STORAGE_METHOD_SAVE_DERIVED_BLOCK: &'static str = "save_derived_block"; - pub(crate) const STORAGE_METHOD_SAVE_SOURCE_BLOCK: &'static str = "save_source_block"; - pub(crate) const STORAGE_METHOD_GET_LATEST_BLOCK: &'static str = "get_latest_block"; - pub(crate) const STORAGE_METHOD_GET_BLOCK: &'static str = "get_block"; - pub(crate) const STORAGE_METHOD_GET_LOG: &'static str = "get_log"; - pub(crate) const STORAGE_METHOD_GET_LOGS: &'static str = "get_logs"; - pub(crate) const STORAGE_METHOD_INITIALISE_LOG_STORAGE: &'static str = "initialise_log_storage"; - pub(crate) const STORAGE_METHOD_STORE_BLOCK_LOGS: &'static str = "store_block_logs"; - pub(crate) const STORAGE_METHOD_GET_SAFETY_HEAD_REF: &'static str = "get_safety_head_ref"; - pub(crate) const STORAGE_METHOD_GET_SUPER_HEAD: &'static str = "get_super_head"; - pub(crate) const STORAGE_METHOD_UPDATE_FINALIZED_USING_SOURCE: &'static str = - "update_finalized_using_source"; - pub(crate) const STORAGE_METHOD_UPDATE_CURRENT_CROSS_UNSAFE: &'static str = - "update_current_cross_unsafe"; - pub(crate) const STORAGE_METHOD_UPDATE_CURRENT_CROSS_SAFE: &'static str = - "update_current_cross_safe"; - pub(crate) const STORAGE_METHOD_UPDATE_FINALIZED_L1: &'static str = "update_finalized_l1"; - pub(crate) const STORAGE_METHOD_GET_FINALIZED_L1: &'static str = "get_finalized_l1"; - pub(crate) const STORAGE_METHOD_REWIND_LOG_STORAGE: &'static str = "rewind_log_storage"; - pub(crate) const STORAGE_METHOD_REWIND: &'static str = "rewind"; - pub(crate) const STORAGE_METHOD_REWIND_TO_SOURCE: &'static str = "rewind_to_source"; - - pub(crate) fn init(chain_id: ChainId) { - Self::describe(); - Self::zero(chain_id); - } - - fn describe() { - metrics::describe_counter!( - Self::STORAGE_REQUESTS_SUCCESS_TOTAL, - metrics::Unit::Count, - "Total number of successful Kona Supervisor Storage requests" - ); - metrics::describe_counter!( - Self::STORAGE_REQUESTS_ERROR_TOTAL, - metrics::Unit::Count, - "Total number of failed Kona Supervisor Storage requests" - ); - metrics::describe_histogram!( - Self::STORAGE_REQUEST_DURATION_SECONDS, - metrics::Unit::Seconds, - "Duration of Kona Supervisor Storage requests" - ); - } - - fn zero_storage_methods(chain_id: ChainId, method_name: &'static str) { - metrics::counter!( - Self::STORAGE_REQUESTS_SUCCESS_TOTAL, - "method" => method_name, - "chain_id" => chain_id.to_string() - ) - .increment(0); - - metrics::counter!( - Self::STORAGE_REQUESTS_ERROR_TOTAL, - "method" => method_name, - "chain_id" => chain_id.to_string() - ) - .increment(0); - - metrics::histogram!( - Self::STORAGE_REQUEST_DURATION_SECONDS, - "method" => method_name, - "chain_id" => chain_id.to_string() - ) - .record(0.0); - } - - fn zero(chain_id: ChainId) { - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_DERIVED_TO_SOURCE); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_LATEST_DERIVED_BLOCK_AT_SOURCE); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_LATEST_DERIVATION_STATE); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_GET_SOURCE_BLOCK); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_INITIALISE_DERIVATION_STORAGE); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_SAVE_DERIVED_BLOCK); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_SAVE_SOURCE_BLOCK); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_GET_LATEST_BLOCK); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_GET_BLOCK); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_GET_LOG); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_GET_LOGS); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_INITIALISE_LOG_STORAGE); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_STORE_BLOCK_LOGS); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_GET_SAFETY_HEAD_REF); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_GET_SUPER_HEAD); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_UPDATE_FINALIZED_USING_SOURCE); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_UPDATE_CURRENT_CROSS_UNSAFE); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_UPDATE_CURRENT_CROSS_SAFE); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_UPDATE_FINALIZED_L1); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_GET_FINALIZED_L1); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_REWIND_LOG_STORAGE); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_REWIND); - Self::zero_storage_methods(chain_id, Self::STORAGE_METHOD_REWIND_TO_SOURCE); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/models/block.rs b/rust/kona/crates/supervisor/storage/src/models/block.rs deleted file mode 100644 index a477b95ffd182..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/models/block.rs +++ /dev/null @@ -1,127 +0,0 @@ -//! Models for storing block metadata in the database. -//! -//! This module defines the data structure and schema used for tracking -//! individual blocks by block number. The stored metadata includes block hash, -//! parent hash, and block timestamp. -//! -//! Unlike logs, each block is uniquely identified by its number and does not -//! require dup-sorting. - -use alloy_primitives::B256; -use derive_more::Display; -use kona_protocol::BlockInfo; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; - -/// Metadata reference for a single block. -/// -/// This struct captures essential block information required to track canonical -/// block lineage and verify ancestry. It is stored as the value -/// in the [`crate::models::BlockRefs`] table. -#[derive(Debug, Clone, Display, PartialEq, Eq, Default, Serialize, Deserialize, Compact)] -#[display("number: {number}, hash: {hash}, parent_hash: {parent_hash}, timestamp: {timestamp}")] -pub struct BlockRef { - /// The height of the block. - pub number: u64, - /// The hash of the block itself. - pub hash: B256, - /// The hash of the parent block (previous block in the chain). - pub parent_hash: B256, - /// The timestamp of the block (seconds since Unix epoch). - pub timestamp: u64, -} - -/// Converts from [`BlockInfo`] (external API format) to [`BlockRef`] (storage -/// format). -/// -/// Performs a direct field mapping. -impl From for BlockRef { - fn from(block: BlockInfo) -> Self { - Self { - number: block.number, - hash: block.hash, - parent_hash: block.parent_hash, - timestamp: block.timestamp, - } - } -} - -/// Converts from [`BlockRef`] (storage format) to [`BlockInfo`] (external API -/// format). -/// -/// This enables decoding values stored in a compact format for use in application logic. -impl From for BlockInfo { - fn from(block: BlockRef) -> Self { - Self { - number: block.number, - hash: block.hash, - parent_hash: block.parent_hash, - timestamp: block.timestamp, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::B256; - - fn test_b256(val: u8) -> B256 { - let mut val_bytes = [0u8; 32]; - val_bytes[0] = val; - let b256_from_val = B256::from(val_bytes); - B256::random() ^ b256_from_val - } - - #[test] - fn test_block_ref_compact_roundtrip() { - let original_ref = BlockRef { - number: 42, - hash: test_b256(10), - parent_hash: test_b256(11), - timestamp: 1678886400, - }; - - let mut buffer = Vec::new(); - let bytes_written = original_ref.to_compact(&mut buffer); - assert_eq!(bytes_written, buffer.len(), "Bytes written should match buffer length"); - - let (deserialized_ref, remaining_buf) = BlockRef::from_compact(&buffer, bytes_written); - assert_eq!(original_ref, deserialized_ref, "Original and deserialized ref should be equal"); - assert!(remaining_buf.is_empty(), "Remaining buffer should be empty after deserialization"); - } - - #[test] - fn test_from_block_info_to_block_ref() { - let block_info = BlockInfo { - number: 123, - hash: test_b256(1), - parent_hash: test_b256(2), - timestamp: 1600000000, - }; - - let block_ref: BlockRef = block_info.into(); - - assert_eq!(block_ref.number, block_info.number, "Number should match"); - assert_eq!(block_ref.hash, block_info.hash, "Hash should match"); - assert_eq!(block_ref.parent_hash, block_info.parent_hash, "Parent hash should match"); - assert_eq!(block_ref.timestamp, block_info.timestamp, "Time (timestamp) should match"); - } - - #[test] - fn test_from_block_ref_to_block_info() { - let block_ref = BlockRef { - number: 456, - hash: test_b256(3), - parent_hash: test_b256(4), - timestamp: 1700000000, - }; - - let block_info: BlockInfo = block_ref.clone().into(); - - assert_eq!(block_info.number, block_ref.number, "Number should match"); - assert_eq!(block_info.hash, block_ref.hash, "Hash should match"); - assert_eq!(block_info.parent_hash, block_ref.parent_hash, "Parent hash should match"); - assert_eq!(block_info.timestamp, block_ref.timestamp, "Timestamp (time) should match"); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/models/common.rs b/rust/kona/crates/supervisor/storage/src/models/common.rs deleted file mode 100644 index 968dac51f6db5..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/models/common.rs +++ /dev/null @@ -1,87 +0,0 @@ -//! Common model types used across various storage tables. - -use derive_more::{Deref, DerefMut}; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; - -/// Wrapper for `Vec` to represent a list of numbers. -// todo: add support for Vec<64> in table -#[derive( - Deref, DerefMut, Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize, Compact, -)] -pub struct U64List(pub Vec); - -#[cfg(test)] -mod tests { - use super::*; - use reth_codecs::Compact; - - #[test] - fn test_u64list_compact_empty() { - let original_list = U64List(Vec::new()); - - let mut buffer = Vec::new(); - let bytes_written = original_list.to_compact(&mut buffer); - - assert_eq!( - bytes_written, - buffer.len(), - "Bytes written should match buffer length for empty list" - ); - let (deserialized_list, remaining_buf) = U64List::from_compact(&buffer, bytes_written); - - assert_eq!( - original_list, deserialized_list, - "Original and deserialized empty lists should be equal" - ); - assert!( - remaining_buf.is_empty(), - "Remaining buffer should be empty after deserialization of empty list" - ); - } - - #[test] - fn test_u64list_compact_with_data() { - let original_list = U64List(vec![10, 20, 30, 40, 50]); - - let mut buffer = Vec::new(); - let bytes_written = original_list.to_compact(&mut buffer); - - assert_eq!( - bytes_written, - buffer.len(), - "Bytes written should match buffer length for list with data" - ); - let (deserialized_list, remaining_buf) = U64List::from_compact(&buffer, bytes_written); - - assert_eq!( - original_list, deserialized_list, - "Original and deserialized lists with data should be equal" - ); - assert!( - remaining_buf.is_empty(), - "Remaining buffer should be empty after deserialization of list with data" - ); - } - - #[test] - fn test_u64list_deref() { - let list = U64List(vec![1, 2, 3]); - assert_eq!(list.len(), 3); - assert_eq!(list[0], 1); - assert!(!list.is_empty()); - } - - #[test] - fn test_u64list_deref_mut() { - let mut list = U64List(vec![1, 2, 3]); - list.push(4); - assert_eq!(list.0, vec![1, 2, 3, 4]); - - list.sort(); - assert_eq!(list.0, vec![1, 2, 3, 4]); - - list.clear(); - assert!(list.is_empty()); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/models/derivation.rs b/rust/kona/crates/supervisor/storage/src/models/derivation.rs deleted file mode 100644 index c8b022bc4947a..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/models/derivation.rs +++ /dev/null @@ -1,220 +0,0 @@ -//! Models for storing blockchain derivation in the database. -//! -//! This module defines the data structure and schema used for tracking -//! how blocks are derived from source. This is particularly relevant -//! in rollup contexts, such as linking an L2 block to its originating L1 block. - -use super::{BlockRef, U64List}; -use kona_interop::DerivedRefPair; -use reth_codecs::Compact; -use serde::{Deserialize, Serialize}; - -/// Represents a pair of blocks where one block [`derived`](`Self::derived`) is derived -/// from another [`source`](`Self::source`). -/// -/// This structure is used to track the lineage of blocks where L2 blocks are derived from L1 -/// blocks. It stores the [`BlockRef`] information for both the source and the derived blocks. -/// It is stored as value in the [`DerivedBlocks`](`crate::models::DerivedBlocks`) table. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] -pub struct StoredDerivedBlockPair { - /// The block that was derived from the [`source`](`Self::source`) block. - pub derived: BlockRef, - /// The source block from which the [`derived`](`Self::derived`) block was created. - pub source: BlockRef, -} - -impl Compact for StoredDerivedBlockPair { - fn to_compact>(&self, buf: &mut B) -> usize { - let mut bytes_written = 0; - bytes_written += self.derived.to_compact(buf); - bytes_written += self.source.to_compact(buf); - bytes_written - } - - fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { - let (derived, remaining_buf) = BlockRef::from_compact(buf, buf.len()); - let (source, final_remaining_buf) = - BlockRef::from_compact(remaining_buf, remaining_buf.len()); - (Self { derived, source }, final_remaining_buf) - } -} - -/// Converts from [`StoredDerivedBlockPair`] (storage format) to [`DerivedRefPair`] (external API -/// format). -/// -/// Performs a direct field mapping. -impl From for DerivedRefPair { - fn from(pair: StoredDerivedBlockPair) -> Self { - Self { derived: pair.derived.into(), source: pair.source.into() } - } -} - -/// Converts from [`DerivedRefPair`] (external API format) to [`StoredDerivedBlockPair`] (storage -/// format). -/// -/// Performs a direct field mapping. -impl From for StoredDerivedBlockPair { - fn from(pair: DerivedRefPair) -> Self { - Self { derived: pair.derived.into(), source: pair.source.into() } - } -} - -impl StoredDerivedBlockPair { - /// Creates a new [`StoredDerivedBlockPair`] from the given [`BlockRef`]s. - /// - /// # Arguments - /// - /// * `source` - The source block reference. - /// * `derived` - The derived block reference. - pub const fn new(source: BlockRef, derived: BlockRef) -> Self { - Self { source, derived } - } -} - -/// Represents a traversal of source blocks and their derived blocks. -/// -/// This structure is used to track the lineage of blocks where L2 blocks are derived from L1 -/// blocks. It stores the [`BlockRef`] information for the source block and the list of derived -/// block numbers. It is stored as value in the [`BlockTraversal`](`crate::models::BlockTraversal`) -/// table. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] -pub struct SourceBlockTraversal { - /// The source block reference. - pub source: BlockRef, - /// The list of derived block numbers. - pub derived_block_numbers: U64List, -} - -impl Compact for SourceBlockTraversal { - fn to_compact>(&self, buf: &mut B) -> usize { - let mut bytes_written = 0; - bytes_written += self.source.to_compact(buf); - bytes_written += self.derived_block_numbers.to_compact(buf); - bytes_written - } - - fn from_compact(buf: &[u8], _len: usize) -> (Self, &[u8]) { - let (source, remaining_buf) = BlockRef::from_compact(buf, buf.len()); - let (derived_block_numbers, final_remaining_buf) = - U64List::from_compact(remaining_buf, remaining_buf.len()); - (Self { source, derived_block_numbers }, final_remaining_buf) - } -} - -impl SourceBlockTraversal { - /// Creates a new [`SourceBlockTraversal`] from the given [`BlockRef`] and [`U64List`]. - /// - /// # Arguments - /// - /// * `source` - The source block reference. - /// * `derived_block_numbers` - The list of derived block numbers. - pub const fn new(source: BlockRef, derived_block_numbers: U64List) -> Self { - Self { source, derived_block_numbers } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::models::BlockRef; - use alloy_primitives::B256; - use kona_interop::DerivedRefPair; - use kona_protocol::BlockInfo; - use reth_codecs::Compact; - - fn test_b256(val: u8) -> B256 { - let mut val_bytes = [0u8; 32]; - val_bytes[0] = val; - let b256_from_val = B256::from(val_bytes); - B256::random() ^ b256_from_val - } - - #[test] - fn test_derived_block_pair_compact_roundtrip() { - let source_ref = BlockRef { - number: 100, - hash: test_b256(1), - parent_hash: test_b256(2), - timestamp: 1000, - }; - let derived_ref = BlockRef { - number: 200, - hash: test_b256(3), - parent_hash: test_b256(4), - timestamp: 1010, - }; - - let original_pair = StoredDerivedBlockPair { source: source_ref, derived: derived_ref }; - - let mut buffer = Vec::new(); - let bytes_written = original_pair.to_compact(&mut buffer); - - assert_eq!(bytes_written, buffer.len(), "Bytes written should match buffer length"); - let (deserialized_pair, remaining_buf) = - StoredDerivedBlockPair::from_compact(&buffer, bytes_written); - - assert_eq!( - original_pair, deserialized_pair, - "Original and deserialized pairs should be equal" - ); - assert!(remaining_buf.is_empty(), "Remaining buffer should be empty after deserialization"); - } - - #[test] - fn test_from_stored_to_derived_ref_pair() { - let source_ref = - BlockRef { number: 1, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 100 }; - let derived_ref = - BlockRef { number: 2, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 200 }; - - let stored = - StoredDerivedBlockPair { source: source_ref.clone(), derived: derived_ref.clone() }; - - // Convert to DerivedRefPair - let derived: DerivedRefPair = stored.into(); - - // The conversion should map fields directly (BlockRef -> BlockInfo) - assert_eq!(BlockInfo::from(source_ref), derived.source); - assert_eq!(BlockInfo::from(derived_ref), derived.derived); - } - - #[test] - fn test_from_derived_ref_pair_to_stored() { - let source_info = - BlockInfo { number: 10, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 100 }; - let derived_info = - BlockInfo { number: 20, hash: B256::ZERO, parent_hash: B256::ZERO, timestamp: 200 }; - - let pair = DerivedRefPair { source: source_info, derived: derived_info }; - - // Convert to StoredDerivedBlockPair - let stored: StoredDerivedBlockPair = pair.into(); - - // The conversion should map fields directly (BlockInfo -> BlockRef) - assert_eq!(BlockRef::from(source_info), stored.source); - assert_eq!(BlockRef::from(derived_info), stored.derived); - } - - #[test] - fn test_source_block_traversal_compact_roundtrip() { - let source_ref = BlockRef { - number: 123, - hash: test_b256(10), - parent_hash: test_b256(11), - timestamp: 1111, - }; - let derived_block_numbers = U64List(vec![1, 2, 3, 4, 5]); - let original = SourceBlockTraversal { source: source_ref, derived_block_numbers }; - - let mut buffer = Vec::new(); - let bytes_written = original.to_compact(&mut buffer); - assert_eq!(bytes_written, buffer.len(), "Bytes written should match buffer length"); - let (deserialized, remaining_buf) = - SourceBlockTraversal::from_compact(&buffer, bytes_written); - assert_eq!( - original, deserialized, - "Original and deserialized SourceBlockTraversal should be equal" - ); - assert!(remaining_buf.is_empty(), "Remaining buffer should be empty after deserialization"); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/models/head_ref.rs b/rust/kona/crates/supervisor/storage/src/models/head_ref.rs deleted file mode 100644 index 10904166c3b3d..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/models/head_ref.rs +++ /dev/null @@ -1,138 +0,0 @@ -use derive_more::TryFrom; -use op_alloy_consensus::interop::SafetyLevel; -use reth_db::DatabaseError; -use reth_db_api::table; -use serde::{Deserialize, Serialize}; - -/// Key representing a particular head reference type. -#[derive( - Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize, TryFrom, -)] -#[try_from(repr)] -#[repr(u8)] -pub enum SafetyHeadRefKey { - /// Latest unverified or unsafe head. - Unsafe = 0, - - /// Head block considered safe via local verification. - LocalSafe = 1, - - /// Head block considered unsafe via cross-chain sync. - CrossUnsafe = 2, - - /// Head block considered safe. - Safe = 3, - - /// Finalized head block. - Finalized = 4, - - /// Invalid head reference. - Invalid = u8::MAX, -} - -/// Implementation of [`table::Encode`] for [`SafetyHeadRefKey`]. -impl table::Encode for SafetyHeadRefKey { - type Encoded = [u8; 1]; - - fn encode(self) -> Self::Encoded { - [self as u8] - } -} - -/// Implementation of [`table::Decode`] for [`SafetyHeadRefKey`]. -impl table::Decode for SafetyHeadRefKey { - fn decode(value: &[u8]) -> Result { - if value.is_empty() { - return Err(DatabaseError::Decode); - } - - value[0].try_into().map_err(|_| DatabaseError::Decode) - } -} - -/// Converts from [`SafetyHeadRefKey`] (internal storage reference) to [`SafetyLevel`] (public API -/// format). -/// -/// Performs a lossless and direct mapping from head reference level to safety level. -impl From for SafetyLevel { - fn from(key: SafetyHeadRefKey) -> Self { - match key { - SafetyHeadRefKey::Unsafe => Self::LocalUnsafe, - SafetyHeadRefKey::LocalSafe => Self::LocalSafe, - SafetyHeadRefKey::CrossUnsafe => Self::CrossUnsafe, - SafetyHeadRefKey::Safe => Self::CrossSafe, - SafetyHeadRefKey::Finalized => Self::Finalized, - SafetyHeadRefKey::Invalid => Self::Invalid, - } - } -} - -/// Converts from [`SafetyLevel`] (public API format) to [`SafetyHeadRefKey`] (internal storage -/// reference). -/// -/// Performs a direct mapping from safety level to head reference key. -impl From for SafetyHeadRefKey { - fn from(key: SafetyLevel) -> Self { - match key { - SafetyLevel::LocalUnsafe => Self::Unsafe, - SafetyLevel::LocalSafe => Self::LocalSafe, - SafetyLevel::CrossUnsafe => Self::CrossUnsafe, - SafetyLevel::CrossSafe => Self::Safe, - SafetyLevel::Finalized => Self::Finalized, - SafetyLevel::Invalid => Self::Invalid, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use reth_db_api::table::{Decode, Encode}; - #[test] - fn test_head_ref_key_encode_decode() { - let cases = vec![ - (SafetyHeadRefKey::Unsafe, [0]), - (SafetyHeadRefKey::LocalSafe, [1]), - (SafetyHeadRefKey::CrossUnsafe, [2]), - (SafetyHeadRefKey::Safe, [3]), - (SafetyHeadRefKey::Finalized, [4]), - (SafetyHeadRefKey::Invalid, [255]), - ]; - - for (key, expected_encoding) in &cases { - // Test encoding - let encoded = key.encode(); - assert_eq!(encoded, *expected_encoding, "Encoding failed for {key:?}"); - - // Test decoding - let decoded = SafetyHeadRefKey::decode(&encoded).expect("Decoding should succeed"); - assert_eq!(decoded, *key, "Decoding mismatch for {key:?}"); - } - } - #[test] - fn test_round_trip_conversion() { - for level in [ - SafetyLevel::LocalUnsafe, - SafetyLevel::LocalSafe, - SafetyLevel::CrossUnsafe, - SafetyLevel::CrossSafe, - SafetyLevel::Finalized, - SafetyLevel::Invalid, - ] { - let round_trip = SafetyLevel::from(SafetyHeadRefKey::from(level)); - assert_eq!(round_trip, level, "Round-trip failed for {level:?}"); - } - - for key in [ - SafetyHeadRefKey::Unsafe, - SafetyHeadRefKey::LocalSafe, - SafetyHeadRefKey::CrossUnsafe, - SafetyHeadRefKey::Safe, - SafetyHeadRefKey::Finalized, - SafetyHeadRefKey::Invalid, - ] { - let round_trip = SafetyHeadRefKey::from(SafetyLevel::from(key)); - assert_eq!(round_trip, key, "Round-trip failed for {key:?}"); - } - } -} diff --git a/rust/kona/crates/supervisor/storage/src/models/log.rs b/rust/kona/crates/supervisor/storage/src/models/log.rs deleted file mode 100644 index 0ab2787de5637..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/models/log.rs +++ /dev/null @@ -1,299 +0,0 @@ -//! Models for storing blockchain logs in the database. -//! -//! This module defines the data structure and table mapping for logs emitted during -//! transaction execution. Each log is uniquely identified by its block number and -//! index within the block. -//! -//! The table is dup-sorted, allowing efficient grouping of multiple logs per block. -//! It supports fast appends, retrieval, and range queries ordered by log index. - -use alloy_primitives::B256; -use bytes::{Buf, BufMut}; -use kona_supervisor_types::{ExecutingMessage, Log}; -use reth_codecs::Compact; -use reth_primitives_traits::ValueWithSubKey; -use serde::{Deserialize, Serialize}; - -/// Metadata associated with a single emitted log. -/// -/// This is the value stored in the [`crate::models::LogEntries`] dup-sorted table. Each entry -/// includes: -/// - `index` - Index of the log in a block. -/// - `hash`: The keccak256 hash of the log event. -/// - `executing_message` - An optional field that may contain a cross-domain execution message. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] -pub struct LogEntry { - /// Index of the log. - pub index: u32, - /// The keccak256 hash of the emitted log event. - pub hash: B256, - /// Optional cross-domain execution message. - pub executing_message: Option, -} -/// Compact encoding and decoding implementation for [`LogEntry`]. -/// -/// This encoding is used for storing log entries in dup-sorted tables, -/// where the `index` field is treated as the subkey. The layout is optimized -/// for lexicographic ordering by `index`. -/// -/// ## Encoding Layout (ordered): -/// - `index: u32` – Log index (subkey), used for ordering within dup table. -/// - `has_msg: u8` – 1 if `executing_message` is present, 0 otherwise. -/// - `hash: B256` – 32-byte Keccak256 hash of the log. -/// - `executing_message: Option` – Compact-encoded if present. -impl Compact for LogEntry { - fn to_compact(&self, buf: &mut B) -> usize - where - B: BufMut + AsMut<[u8]>, - { - let start_len = buf.remaining_mut(); - - buf.put_u32(self.index); // Subkey must be at first - buf.put_u8(self.executing_message.is_some() as u8); - buf.put_slice(self.hash.as_slice()); - - if let Some(msg) = &self.executing_message { - msg.to_compact(buf); - } - - start_len - buf.remaining_mut() - } - - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { - let index = buf.get_u32(); - let has_msg = buf.get_u8() != 0; - - assert!(buf.len() >= 32, "LogEntry::from_compact: buffer too small for hash"); - let hash = B256::from_slice(&buf[..32]); - buf.advance(32); - - let executing_message = has_msg.then(|| { - let (msg, rest) = ExecutingMessageEntry::from_compact(buf, buf.len()); - buf = rest; - msg - }); - - (Self { index, hash, executing_message }, buf) - } -} - -impl ValueWithSubKey for LogEntry { - type SubKey = u32; - - fn get_subkey(&self) -> Self::SubKey { - self.index - } -} - -/// Conversion from [`Log`] to [`LogEntry`] used for internal storage. -/// -/// Maps fields 1:1, converting `executing_message` using `Into`. -impl From for LogEntry { - fn from(log: Log) -> Self { - Self { - index: log.index, - hash: log.hash, - executing_message: log.executing_message.map(Into::into), - } - } -} - -/// Conversion from [`LogEntry`] to [`Log`] for external API use. -/// -/// Mirrors the conversion from `Log`, enabling easy retrieval. -impl From for Log { - fn from(log: LogEntry) -> Self { - Self { - index: log.index, - hash: log.hash, - executing_message: log.executing_message.map(Into::into), - } - } -} - -/// Represents an entry of an executing message, containing metadata -/// about the message's origin and context within the blockchain. -/// - `chain_id` (`u64`): The unique identifier of the blockchain where the message originated. -/// - `block_number` (`u64`): The block number in the blockchain where the message originated. -/// - `log_index` (`u64`): The index of the log entry within the block where the message was logged. -/// - `timestamp` (`u64`): The timestamp associated with the block where the message was recorded. -/// - `hash` (`B256`): The unique hash identifier of the message. -#[derive(Debug, Clone, PartialEq, Eq, Default, Serialize, Deserialize)] -pub struct ExecutingMessageEntry { - /// Log index within the block. - pub log_index: u32, - /// ID of the chain where the message was emitted. - pub chain_id: u64, - /// Block number in the source chain. - pub block_number: u64, - /// Timestamp of the block. - pub timestamp: u64, - /// Hash of the message. - pub hash: B256, -} - -/// Compact encoding for [`ExecutingMessageEntry`] used in log storage. -/// -/// This format ensures deterministic encoding and lexicographic ordering by -/// placing `log_index` first, which is used as the subkey in dup-sorted tables. -/// -/// ## Encoding Layout (ordered): -/// - `log_index: u32` – Subkey for dup sort ordering. -/// - `chain_id: u64` -/// - `block_number: u64` -/// - `timestamp: u64` -/// - `hash: B256` – 32-byte message hash. -impl Compact for ExecutingMessageEntry { - fn to_compact(&self, buf: &mut B) -> usize - where - B: BufMut + AsMut<[u8]>, - { - let start_len = buf.remaining_mut(); - - buf.put_u32(self.log_index); - buf.put_u64(self.chain_id); - buf.put_u64(self.block_number); - buf.put_u64(self.timestamp); - buf.put_slice(self.hash.as_slice()); - - start_len - buf.remaining_mut() - } - - fn from_compact(mut buf: &[u8], _len: usize) -> (Self, &[u8]) { - let log_index = buf.get_u32(); - let chain_id = buf.get_u64(); - let block_number = buf.get_u64(); - let timestamp = buf.get_u64(); - - assert!(buf.len() >= 32, "ExecutingMessageEntry::from_compact: buffer too small for hash"); - let hash = B256::from_slice(&buf[..32]); - buf.advance(32); - - (Self { chain_id, block_number, timestamp, hash, log_index }, buf) - } -} - -/// Converts from [`ExecutingMessage`] (external API format) to [`ExecutingMessageEntry`] (storage -/// format). -/// -/// Performs a direct field mapping. -impl From for ExecutingMessageEntry { - fn from(msg: ExecutingMessage) -> Self { - Self { - chain_id: msg.chain_id, - block_number: msg.block_number, - log_index: msg.log_index, - timestamp: msg.timestamp, - hash: msg.hash, - } - } -} - -/// Converts from [`ExecutingMessageEntry`] (storage format) to [`ExecutingMessage`] (external API -/// format). -/// -/// This enables decoding values stored in a compact format for use in application logic. -impl From for ExecutingMessage { - fn from(msg: ExecutingMessageEntry) -> Self { - Self { - chain_id: msg.chain_id, - block_number: msg.block_number, - log_index: msg.log_index, - timestamp: msg.timestamp, - hash: msg.hash, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; // Imports LogEntry, ExecutingMessageEntry - use alloy_primitives::B256; - use reth_codecs::Compact; // For the Compact trait methods - - // Helper to create somewhat unique B256 values for testing. - // Assumes the "rand" feature for alloy-primitives is enabled for tests. - fn test_b256(val: u8) -> B256 { - let mut val_bytes = [0u8; 32]; - val_bytes[0] = val; - let b256_from_val = B256::from(val_bytes); - B256::random() ^ b256_from_val - } - - #[test] - fn test_log_entry_compact_roundtrip_with_message() { - let original_log_entry = LogEntry { - index: 100, - hash: test_b256(1), - executing_message: Some(ExecutingMessageEntry { - chain_id: 10, - block_number: 1001, - log_index: 5, - timestamp: 1234567890, - hash: test_b256(2), - }), - }; - - let mut buffer = Vec::new(); - let bytes_written = original_log_entry.to_compact(&mut buffer); - - assert_eq!(bytes_written, buffer.len(), "Bytes written should match buffer length"); - assert!(!buffer.is_empty(), "Buffer should not be empty after compression"); - - let (deserialized_log_entry, remaining_buf) = - LogEntry::from_compact(&buffer, bytes_written); - - assert_eq!( - original_log_entry, deserialized_log_entry, - "Original and deserialized log entries should be equal" - ); - assert!(remaining_buf.is_empty(), "Remaining buffer should be empty after deserialization"); - } - - #[test] - fn test_log_entry_compact_roundtrip_without_message() { - let original_log_entry = - LogEntry { index: 100, hash: test_b256(3), executing_message: None }; - - let mut buffer = Vec::new(); - let bytes_written = original_log_entry.to_compact(&mut buffer); - - assert_eq!(bytes_written, buffer.len(), "Bytes written should match buffer length"); - assert!(!buffer.is_empty(), "Buffer should not be empty after compression"); - - let (deserialized_log_entry, remaining_buf) = - LogEntry::from_compact(&buffer, bytes_written); - - assert_eq!( - original_log_entry, deserialized_log_entry, - "Original and deserialized log entries should be equal" - ); - assert!(remaining_buf.is_empty(), "Remaining buffer should be empty after deserialization"); - } - - #[test] - fn test_executing_message_entry_compact_roundtrip() { - let original_entry = ExecutingMessageEntry { - log_index: 42, - chain_id: 1, - block_number: 123456, - timestamp: 1_654_321_000, - hash: test_b256(77), - }; - - let mut buffer = Vec::new(); - let bytes_written = original_entry.to_compact(&mut buffer); - - assert_eq!(bytes_written, buffer.len(), "Bytes written should match buffer length"); - assert!(!buffer.is_empty(), "Buffer should not be empty after serialization"); - - let (decoded_entry, remaining_buf) = - ExecutingMessageEntry::from_compact(&buffer, bytes_written); - - assert_eq!( - original_entry, decoded_entry, - "Original and decoded ExecutingMessageEntry should be equal" - ); - assert!(remaining_buf.is_empty(), "Remaining buffer should be empty after decoding"); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/models/mod.rs b/rust/kona/crates/supervisor/storage/src/models/mod.rs deleted file mode 100644 index 357142fee4f42..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/models/mod.rs +++ /dev/null @@ -1,247 +0,0 @@ -//! Database table schemas used by the Supervisor. -//! -//! This module defines the value types, keys, and table layouts for all data -//! persisted by the `supervisor` component of the node. -//! -//! The tables are registered using [`reth_db_api::table::TableInfo`] and grouped into a -//! [`reth_db_api::TableSet`] for database initialization via Reth's storage-api. - -use reth_db_api::{ - TableSet, TableType, TableViewer, - table::{DupSort, TableInfo}, - tables, -}; -use std::fmt; - -mod log; -pub use log::{ExecutingMessageEntry, LogEntry}; - -mod block; -pub use block::BlockRef; - -mod derivation; -pub use derivation::{SourceBlockTraversal, StoredDerivedBlockPair}; - -mod common; -mod head_ref; -pub use head_ref::SafetyHeadRefKey; - -pub use common::U64List; - -/// Implements [`reth_db_api::table::Compress`] and [`reth_db_api::table::Decompress`] traits for -/// types that implement [`reth_codecs::Compact`]. -/// -/// This macro defines how to serialize and deserialize a type into a compressed -/// byte format using Reth's compact codec system. -/// -/// # Example -/// ```ignore -/// impl_compression_for_compact!(BlockRef, LogEntry); -/// ``` -macro_rules! impl_compression_for_compact { - ($($name:ident$(<$($generic:ident),*>)?),+) => { - $( - impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? reth_db_api::table::Compress for $name$(<$($generic),*>)? { - type Compressed = Vec; - - fn compress_to_buf>(&self, buf: &mut B) { - let _ = reth_codecs::Compact::to_compact(self, buf); - } - } - - impl$(<$($generic: core::fmt::Debug + Send + Sync + Compact),*>)? reth_db_api::table::Decompress for $name$(<$($generic),*>)? { - fn decompress(value: &[u8]) -> Result<$name$(<$($generic),*>)?, reth_db_api::DatabaseError> { - let (obj, _) = reth_codecs::Compact::from_compact(value, value.len()); - Ok(obj) - } - } - )+ - }; -} - -// Implement compression logic for all value types stored in tables -impl_compression_for_compact!( - BlockRef, - LogEntry, - StoredDerivedBlockPair, - U64List, - SourceBlockTraversal -); - -tables! { - /// A dup-sorted table that stores all logs emitted in a given block, sorted by their index. - /// Keyed by block number, with log index as the subkey for DupSort. - table LogEntries { - type Key = u64; // Primary key: u64 (block_number) - type Value = LogEntry; // Value: The log metadata - type SubKey = u32; // SubKey for DupSort: u32 (log_index) - } - - /// A table for storing block metadata by block number. - /// This is a standard table (not dup-sorted) where: - /// - Key: `u64` — block number - /// - Value: [`BlockRef`] — block metadata - table BlockRefs { - type Key = u64; - type Value = BlockRef; - } - - /// A table mapping a derived block number to its corresponding source and derived block reference. - /// - Key: `u64` — derived block number - /// - Value: [`StoredDerivedBlockPair`] — pair of source and derived block reference - table DerivedBlocks { - type Key = u64; - type Value = StoredDerivedBlockPair; - } - - /// A table mapping a source block number to a struct representing the traversal of its derived - /// block numbers. - /// - Key: `u64` — source block number - /// - Value: [`SourceBlockTraversal`] — contains the source block reference and the list of - /// derived block numbers. - table BlockTraversal { - type Key = u64; - type Value = SourceBlockTraversal; - } - - /// Stores the latest head block reference for each safety level. - /// # Key - /// - [`SafetyHeadRefKey`] — Enum variant indicating the type of head being tracked - /// (e.g., unsafe, locally safe, cross-chain safe, finalized). - /// - /// # Value - /// - [`BlockRef`] — Reference to a block including block number and hash. - table SafetyHeadRefs { - type Key = SafetyHeadRefKey; - type Value = BlockRef; - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::B256; - use reth_db_api::table::{Compress, Decompress}; - - // Helper to create somewhat unique B256 values for testing. - fn test_b256(val: u8) -> B256 { - let mut val_bytes = [0u8; 32]; - val_bytes[0] = val; // Place the u8 into the first byte of the array - let b256_from_val = B256::from(val_bytes); - B256::random() ^ b256_from_val - } - - #[test] - fn test_block_ref_compression_decompression() { - let original = BlockRef { - number: 1, - hash: test_b256(1), - parent_hash: test_b256(2), - timestamp: 1234567890, - }; - - let mut compressed_buf = Vec::new(); - original.compress_to_buf(&mut compressed_buf); - - // Ensure some data was written - assert!(!compressed_buf.is_empty()); - - let decompressed = BlockRef::decompress(&compressed_buf).unwrap(); - assert_eq!(original, decompressed); - } - - #[test] - fn test_log_entry_compression_decompression_with_message() { - let original = LogEntry { - index: 1, - hash: test_b256(3), - executing_message: Some(ExecutingMessageEntry { - chain_id: 1, - block_number: 100, - log_index: 2, - timestamp: 12345, - hash: test_b256(4), - }), - }; - - let mut compressed_buf = Vec::new(); - original.compress_to_buf(&mut compressed_buf); - assert!(!compressed_buf.is_empty()); - let decompressed = LogEntry::decompress(&compressed_buf).unwrap(); - assert_eq!(original, decompressed); - } - - #[test] - fn test_log_entry_compression_decompression_without_message() { - let original = LogEntry { index: 1, hash: test_b256(5), executing_message: None }; - let mut compressed_buf = Vec::new(); - original.compress_to_buf(&mut compressed_buf); - assert!(!compressed_buf.is_empty()); - let decompressed = LogEntry::decompress(&compressed_buf).unwrap(); - assert_eq!(original, decompressed); - } - - #[test] - fn test_derived_block_pair_compression_decompression() { - let source_ref = BlockRef { - number: 100, - hash: test_b256(6), - parent_hash: test_b256(7), - timestamp: 1000, - }; - let derived_ref = BlockRef { - number: 200, - hash: test_b256(8), - parent_hash: test_b256(8), // Link to source - timestamp: 1010, - }; - - let original_pair = StoredDerivedBlockPair { source: source_ref, derived: derived_ref }; - - let mut compressed_buf = Vec::new(); - original_pair.compress_to_buf(&mut compressed_buf); - - assert!(!compressed_buf.is_empty(), "Buffer should not be empty after compression"); - - let decompressed_pair = StoredDerivedBlockPair::decompress(&compressed_buf).unwrap(); - assert_eq!( - original_pair, decompressed_pair, - "Original and deserialized pairs should be equal" - ); - } - - #[test] - fn test_u64list_compression_decompression_empty() { - let original_list = U64List(Vec::new()); - - let mut compressed_buf = Vec::new(); - original_list.compress_to_buf(&mut compressed_buf); - - // For an empty list, the compact representation might also be empty or very small. - // The primary check is that deserialization works and results in an empty list. - let decompressed_list = U64List::decompress(&compressed_buf).unwrap(); - assert_eq!( - original_list, decompressed_list, - "Original and deserialized empty U64List should be equal" - ); - } - - #[test] - fn test_u64list_compression_decompression_with_data() { - let original_list = U64List(vec![10, 20, 30, 40, 50]); - - let mut compressed_buf = Vec::new(); - original_list.compress_to_buf(&mut compressed_buf); - - assert!( - !compressed_buf.is_empty(), - "Buffer should not be empty after compression of U64List with data" - ); - - let decompressed_list = U64List::decompress(&compressed_buf).unwrap(); - assert_eq!( - original_list, decompressed_list, - "Original and deserialized U64List with data should be equal" - ); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/providers/derivation_provider.rs b/rust/kona/crates/supervisor/storage/src/providers/derivation_provider.rs deleted file mode 100644 index af0099c35f6a4..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/providers/derivation_provider.rs +++ /dev/null @@ -1,1375 +0,0 @@ -//! Provider for derivation-related database operations. -use crate::{ - error::{EntryNotFoundError, StorageError}, - models::{ - BlockTraversal, DerivedBlocks, SourceBlockTraversal, StoredDerivedBlockPair, U64List, - }, -}; -use alloy_eips::eip1898::BlockNumHash; -use alloy_primitives::ChainId; -use kona_interop::DerivedRefPair; -use kona_protocol::BlockInfo; -use reth_db_api::{ - cursor::DbCursorRO, - transaction::{DbTx, DbTxMut}, -}; -use tracing::{error, info, trace, warn}; - -const DEFAULT_LOG_INTERVAL: u64 = 100; - -/// Provides access to derivation storage operations within a transaction. -#[derive(Debug)] -pub(crate) struct DerivationProvider<'tx, TX> { - tx: &'tx TX, - chain_id: ChainId, - #[doc(hidden)] - observability_interval: u64, -} - -impl<'tx, TX> DerivationProvider<'tx, TX> { - pub(crate) const fn new(tx: &'tx TX, chain_id: ChainId) -> Self { - Self::new_with_observability_interval(tx, chain_id, DEFAULT_LOG_INTERVAL) - } - - pub(crate) const fn new_with_observability_interval( - tx: &'tx TX, - chain_id: ChainId, - observability_interval: u64, - ) -> Self { - Self { tx, chain_id, observability_interval } - } -} - -impl DerivationProvider<'_, TX> -where - TX: DbTx, -{ - /// Helper to get [`StoredDerivedBlockPair`] by block number. - fn get_derived_block_pair_by_number( - &self, - derived_block_number: u64, - ) -> Result { - let derived_block_pair_opt = - self.tx.get::(derived_block_number).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - derived_block_number, - %err, - "Failed to get derived block pair" - ); - })?; - - let derived_block_pair = derived_block_pair_opt.ok_or_else(|| { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - derived_block_number, - "Derived block not found" - ); - EntryNotFoundError::DerivedBlockNotFound(derived_block_number) - })?; - - Ok(derived_block_pair) - } - - /// Helper to get [`StoredDerivedBlockPair`] by derived [`BlockNumHash`]. - /// This function checks if the derived block hash matches the expected hash. - /// If there is a mismatch, it logs a warning and returns [`StorageError::EntryNotFound`] error. - pub(crate) fn get_derived_block_pair( - &self, - derived_block_id: BlockNumHash, - ) -> Result { - let derived_block_pair = self.get_derived_block_pair_by_number(derived_block_id.number)?; - - if derived_block_pair.derived.hash != derived_block_id.hash { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - derived_block_number = derived_block_id.number, - expected_hash = %derived_block_id.hash, - actual_hash = %derived_block_pair.derived.hash, - "Derived block hash mismatch" - ); - return Err(StorageError::ConflictError); - } - - Ok(derived_block_pair) - } - - /// Gets the source [`BlockInfo`] for the given derived [`BlockNumHash`]. - pub(crate) fn derived_to_source( - &self, - derived_block_id: BlockNumHash, - ) -> Result { - let derived_block_pair: StoredDerivedBlockPair = - self.get_derived_block_pair(derived_block_id)?; - Ok(derived_block_pair.source.into()) - } - - /// Gets the [`SourceBlockTraversal`] for the given source block number. - /// - /// # Arguments - /// - /// * `source_block_number` - The source block number. - /// - /// Returns the [`SourceBlockTraversal`] for the given source block number. - fn get_block_traversal( - &self, - source_block_number: u64, - ) -> Result { - let block_traversal = - self.tx.get::(source_block_number).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - source_block_number, - %err, - "Failed to get block traversal info for source block" - ); - })?; - - Ok(block_traversal.ok_or_else(|| { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - source_block_number, - "source block not found" - ); - EntryNotFoundError::SourceBlockNotFound(source_block_number) - })?) - } - - /// Gets the latest derived [`BlockInfo`] at the given source [`BlockNumHash`]. - /// This does NOT mean to get the derived block that is "derived from" the source block. - /// It could happen that a source block has no derived blocks, in which case the latest derived - /// block is from one of the previous source blocks. - /// - /// Returns the latest derived block pair. - pub(crate) fn latest_derived_block_at_source( - &self, - source_block_id: BlockNumHash, - ) -> Result { - let block_traversal = self.get_block_traversal(source_block_id.number)?; - - if block_traversal.source.hash != source_block_id.hash { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - source_block_hash = %source_block_id.hash, - "Source block hash mismatch" - ); - return Err(StorageError::ConflictError); - } - - let mut cursor = self.tx.cursor_read::()?; - let walker = cursor.walk_back(Some(source_block_id.number))?; - - for item in walker { - let (_, block_traversal) = item?; - if let Some(latest_derived_block_number) = block_traversal.derived_block_numbers.last() - { - let derived_block_pair = - self.get_derived_block_pair_by_number(*latest_derived_block_number)?; - return Ok(derived_block_pair.derived.into()); - } - } - - Err(EntryNotFoundError::MissingDerivedBlocks(source_block_id).into()) - } - - /// Gets the latest derivation state [`DerivedRefPair`], which includes the latest source block - /// and the latest derived block. - /// - /// # Returns - /// A [`DerivedRefPair`] containing the latest source block and latest derived block. - pub(crate) fn latest_derivation_state(&self) -> Result { - let mut cursor = self.tx.cursor_read::().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %err, - "Failed to get cursor for DerivedBlocks" - ); - })?; - - let result = cursor.last().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %err, - "Failed to seek to last block" - ); - })?; - - let (_, block) = result.ok_or_else(|| { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - "No blocks found in storage" - ); - StorageError::DatabaseNotInitialised - })?; - - let latest_source_block = self.latest_source_block().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %err, - "Failed to get latest source block" - ); - })?; - - Ok(DerivedRefPair { source: latest_source_block, derived: block.derived.into() }) - } - - /// Gets the latest [`SourceBlockTraversal`]. - /// - /// # Returns - /// The latest [`SourceBlockTraversal`] in the database. - fn latest_source_block_traversal(&self) -> Result { - let mut cursor = self.tx.cursor_read::()?; - let result = cursor.last()?; - - let (_, block_traversal) = result.ok_or_else(|| StorageError::DatabaseNotInitialised)?; - Ok(block_traversal) - } - - /// Gets the source block for the given source block number. - pub(crate) fn get_source_block( - &self, - source_block_number: u64, - ) -> Result { - let block_traversal = self.get_block_traversal(source_block_number)?; - Ok(block_traversal.source.into()) - } - - /// Gets the latest source block, even if it has no derived blocks. - pub(crate) fn latest_source_block(&self) -> Result { - let block = self.latest_source_block_traversal().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %err, - "Failed to get latest source block traversal" - ); - })?; - - Ok(block.source.into()) - } - - /// Gets the activation block, which is the first block in the database. - pub(crate) fn get_activation_block(&self) -> Result { - let mut cursor = self.tx.cursor_read::()?; - let result = cursor.first()?; - - let (_, derived_block_pair) = result.ok_or_else(|| StorageError::DatabaseNotInitialised)?; - Ok(derived_block_pair.derived.into()) - } -} - -impl DerivationProvider<'_, TX> -where - TX: DbTxMut + DbTx, -{ - /// initialises the database with a derived activation block pair. - pub(crate) fn initialise(&self, activation_pair: DerivedRefPair) -> Result<(), StorageError> { - match self.get_derived_block_pair_by_number(0) { - Ok(pair) if activation_pair == pair.clone().into() => { - // Anchor matches, nothing to do - Ok(()) - } - Ok(_) => Err(StorageError::ConflictError), - Err(StorageError::EntryNotFound(_)) => { - self.save_source_block_internal(activation_pair.source)?; - self.save_derived_block_internal(activation_pair)?; - Ok(()) - } - Err(err) => Err(err), - } - } - - /// Saves a [`StoredDerivedBlockPair`] to [`DerivedBlocks`](`crate::models::DerivedBlocks`) - /// table and [`SourceBlockTraversal`] to [`BlockTraversal`](`crate::models::BlockTraversal`) - /// table in the database. - pub(crate) fn save_derived_block( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError> { - let latest_derivation_state = match self.latest_derivation_state() { - Ok(pair) => pair, - Err(StorageError::EntryNotFound(_)) => { - return Err(StorageError::DatabaseNotInitialised); - } - Err(e) => return Err(e), - }; - - // If the incoming derived block is not newer than the latest stored derived block, - // we do not save it, check if it is consistent with the saved state. - // If it is not consistent, we return an error. - if latest_derivation_state.derived.number >= incoming_pair.derived.number { - let stored_pair = self - .get_derived_block_pair_by_number(incoming_pair.derived.number) - .inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - incoming_derived_block_pair = %incoming_pair, - %err, - "Failed to get derived block pair" - ); - })?; - - if incoming_pair == stored_pair.into() { - return Ok(()); - } - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %latest_derivation_state, - incoming_derived_block_pair = %incoming_pair, - "Incoming derived block is not consistent with the latest stored derived block" - ); - return Err(StorageError::ConflictError); - } - - // Latest source block must be same as the incoming source block - if latest_derivation_state.source != incoming_pair.source { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - latest_source_block = %latest_derivation_state.source, - incoming_source = %incoming_pair.source, - "Latest source block does not match the incoming derived block source" - ); - return Err(StorageError::BlockOutOfOrder); - } - - if !latest_derivation_state.derived.is_parent_of(&incoming_pair.derived) { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %latest_derivation_state, - incoming_derived_block_pair = %incoming_pair, - "Latest stored derived block is not parent of the incoming derived block" - ); - return Err(StorageError::BlockOutOfOrder); - } - - self.save_derived_block_internal(incoming_pair) - } - - /// Internal function to save a derived block pair. - /// This function does not perform checks on the incoming derived pair, - /// it assumes that the pair is valid and the latest derived block is its parent. - fn save_derived_block_internal( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError> { - // the derived block must be derived from the latest source block - let mut block_traversal = self.latest_source_block_traversal().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - incoming_derived_block_pair = %incoming_pair, - %err, - "Failed to get latest source block traversal" - ); - })?; - - let latest_source_block = block_traversal.clone().source.into(); - if incoming_pair.source != latest_source_block { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - latest_source_block = %latest_source_block, - incoming_source = %incoming_pair.source, - "Latest source block does not match the incoming derived block source" - ); - return Err(StorageError::BlockOutOfOrder); - } - - // Add the derived block number to the list - block_traversal.derived_block_numbers.push(incoming_pair.derived.number); - - // Save the derived block pair to the database - self.tx - .put::(incoming_pair.derived.number, incoming_pair.into()) - .inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - incoming_derived_block_pair = %incoming_pair, - %err, - "Failed to save derived block pair" - ); - })?; - - // Save the SourceBlockTraversal to the database - self.tx.put::(incoming_pair.source.number, block_traversal).inspect_err( - |err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - incoming_derived_block_pair = %incoming_pair, - %err, - "Failed to save derived block numbers for source block" - ); - }, - )?; - - Ok(()) - } - - /// Saves a source block to the database. - /// If the source block already exists, it does nothing. - /// If the source block does not exist, it creates a new [`SourceBlockTraversal`] and saves it - /// to the database. - pub(crate) fn save_source_block(&self, incoming_source: BlockInfo) -> Result<(), StorageError> { - let latest_source_block = match self.latest_source_block() { - Ok(latest_source_block) => latest_source_block, - Err(StorageError::EntryNotFound(_)) => { - return Err(StorageError::DatabaseNotInitialised); - } - Err(err) => return Err(err), - }; - - // idempotent check: if the source block already exists, do nothing - if latest_source_block == incoming_source { - return Ok(()); - } - - // If the incoming source block is not newer than the latest source block, - // we do not save it, check if it is consistent with the saved state. - // If it is not consistent, we return an error. - if latest_source_block.number > incoming_source.number { - let source_block = - self.get_source_block(incoming_source.number).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - incoming_source = %incoming_source, - %err, - "Failed to get source block" - ); - })?; - - if source_block == incoming_source { - return Ok(()); - } - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - latest_source_block = %latest_source_block, - incoming_source = %incoming_source, - "Incoming source block is not consistent with the latest source block" - ); - return Err(StorageError::ConflictError); - } - - if !latest_source_block.is_parent_of(&incoming_source) { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - latest_source_block = %latest_source_block, - incoming_source = %incoming_source, - "Stored latest source block is not parent of the incoming source block" - ); - return Err(StorageError::BlockOutOfOrder); - } - - self.save_source_block_internal(incoming_source)?; - Ok(()) - } - - fn save_source_block_internal(&self, incoming_source: BlockInfo) -> Result<(), StorageError> { - let block_traversal = SourceBlockTraversal { - source: incoming_source.into(), - derived_block_numbers: U64List::default(), - }; - - self.tx.put::(incoming_source.number, block_traversal).inspect_err( - |err| { - error!(target: "supervisor::storage", chain_id = %self.chain_id, %err, "Failed to save block traversal"); - }, - )?; - - Ok(()) - } - - /// Rewinds the derivation database from the given derived block onward. - /// This removes all derived blocks with number >= the given block number - /// and updates the traversal state accordingly. - pub(crate) fn rewind_to(&self, block: &BlockNumHash) -> Result<(), StorageError> { - info!( - target: "supervisor::storage", - chain_id = %self.chain_id, - target_block_number = %block.number, - target_block_hash = %block.hash, - "Starting rewind of derivation storage" - ); - - // Validate the block exists and get the block pair - this provides hash validation - let block_pair = self.get_derived_block_pair(*block)?; - - // Get the latest block number from DerivedBlocks - let latest_block = { - let mut cursor = self.tx.cursor_read::()?; - cursor.last()?.map(|(num, _)| num).unwrap_or(block.number) - }; - - // Check for future block - if block.number > latest_block { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - target_block_number = %block.number, - latest_block, - "Cannot rewind to future block" - ); - return Err(StorageError::FutureData); - } - - // total blocks to rewind down to and including tgt block - let total_blocks = latest_block - block.number + 1; - let mut processed_blocks = 0; - - // Delete all derived blocks with number ≥ `block.number` - { - let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk(Some(block.number))?; - - trace!( - target: "supervisor::storage", - chain_id = %self.chain_id, - target_block_number = %block.number, - target_block_hash = %block.hash, - latest_block, - total_blocks, - observability_interval = %self.observability_interval, - "Rewinding derived block storage..." - ); - - while let Some(Ok((key, _stored_block))) = walker.next() { - // Remove the block first - walker.delete_current()?; - - // Only count as processed after successful deletion - processed_blocks += 1; - - // Log progress periodically or on last block - if processed_blocks % self.observability_interval == 0 || - processed_blocks == total_blocks - { - let percentage = if total_blocks > 0 { - (processed_blocks as f64 / total_blocks as f64 * 100.0).min(100.0) - } else { - 100.0 - }; - - info!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number = %key, - percentage = %format!("{:.2}%", percentage), - processed_blocks, - total_blocks, - "Rewind progress" - ); - } - } - - info!( - target: "supervisor::storage", - target_block_number = %block.number, - target_block_hash = %block.hash, - chain_id = %self.chain_id, - total_blocks, - "Rewind completed successfully" - ); - } - - self.rewind_block_traversal_to(&block_pair) - } - - /// Rewinds the block traversal for a given derived block pair. - /// - If only part of the derived list needs to be removed, it updates the list in-place. - /// - If later source blocks exist, they are removed entirely. - // TODO: validate the logic in block invalidation and re-org - fn rewind_block_traversal_to( - &self, - block_pair: &StoredDerivedBlockPair, - ) -> Result<(), StorageError> { - // Retain only valid derived blocks < the invalidated one - let mut traversal = self.get_block_traversal(block_pair.source.number)?; - traversal.derived_block_numbers.retain(|&num| num < block_pair.derived.number); - - let mut walk_from = block_pair.source.number; - - // If there's still something left, update the entry. Otherwise, skip — let the walker - // delete it. - if !traversal.derived_block_numbers.is_empty() { - self.tx.put::(block_pair.source.number, traversal).inspect_err( - |err| { - error!(target: "supervisor::storage", chain_id = %self.chain_id, %err, "Failed to update block traversal"); - }, - )?; - walk_from += 1; - } - - // Walk from (source.number) forward, deleting entries with key ≥ source.number - let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk(Some(walk_from))?; - while let Some(Ok((_, _))) = walker.next() { - walker.delete_current()?; - } - - Ok(()) - } - - /// Rewinds the derivation storage to a specific source block. - /// This will remove all derived blocks and their traversals from the given source block onward. - /// - /// # Arguments - /// * `source` - The source block number and hash to rewind to. - /// - /// # Returns - /// [`BlockInfo`] of the derived block that was rewound to, or `None` if no derived blocks - /// were found. - pub(crate) fn rewind_to_source( - &self, - source: &BlockNumHash, - ) -> Result, StorageError> { - let mut derived_rewind_target: Option = None; - { - let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk(Some(source.number))?; - while let Some(Ok((block_number, block_traversal))) = walker.next() { - if block_number == source.number && block_traversal.source.hash != source.hash { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - source_block_number = source.number, - expected_hash = %source.hash, - actual_hash = %block_traversal.source.hash, - "Source block hash mismatch during rewind" - ); - return Err(StorageError::ConflictError); - } - - if derived_rewind_target.is_none() && - !block_traversal.derived_block_numbers.is_empty() - { - let first_num = block_traversal.derived_block_numbers[0]; - let derived_block_pair = self.get_derived_block_pair_by_number(first_num)?; - derived_rewind_target = Some(derived_block_pair.derived.into()); - } - - walker.delete_current()?; - } - } - - // Delete all derived blocks with number ≥ `block_info.number` - if let Some(rewind_target) = derived_rewind_target { - let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk(Some(rewind_target.number))?; - while let Some(Ok((_, _))) = walker.next() { - walker.delete_current()?; // we’re already walking from the rewind point - } - } - - Ok(derived_rewind_target) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::models::Tables; - use alloy_primitives::B256; - use kona_cli::init_test_tracing; - use kona_interop::DerivedRefPair; - use kona_protocol::BlockInfo; - use reth_db::{ - Database, DatabaseEnv, - mdbx::{DatabaseArguments, init_db_for}, - }; - use tempfile::TempDir; - - static CHAIN_ID: ChainId = 1; - - fn block_info(number: u64, parent_hash: B256, timestamp: u64) -> BlockInfo { - BlockInfo { hash: B256::from([number as u8; 32]), number, parent_hash, timestamp } - } - - const fn derived_pair(source: BlockInfo, derived: BlockInfo) -> DerivedRefPair { - DerivedRefPair { source, derived } - } - - fn genesis_block() -> BlockInfo { - BlockInfo { - hash: B256::from([0u8; 32]), - number: 0, - parent_hash: B256::ZERO, - timestamp: 100, - } - } - - /// Sets up a new temp DB - fn setup_db() -> DatabaseEnv { - let temp_dir = TempDir::new().expect("Could not create temp dir"); - init_db_for::<_, Tables>(temp_dir.path(), DatabaseArguments::default()) - .expect("Failed to init database") - } - - /// Helper to initialize database in a new transaction, committing if successful. - fn initialize_db(db: &DatabaseEnv, pair: &DerivedRefPair) -> Result<(), StorageError> { - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let res = provider.initialise(*pair); - if res.is_ok() { - tx.commit().expect("Failed to commit transaction"); - } - res - } - - /// Helper to insert a pair in a new transaction, committing if successful. - fn insert_pair(db: &DatabaseEnv, pair: &DerivedRefPair) -> Result<(), StorageError> { - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let res = provider.save_derived_block(*pair); - if res.is_ok() { - tx.commit().expect("Failed to commit transaction"); - } - res - } - - /// Helper to insert a source block in a new transaction, committing if successful. - fn insert_source_block(db: &DatabaseEnv, source: &BlockInfo) -> Result<(), StorageError> { - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let res = provider.save_source_block(*source); - if res.is_ok() { - tx.commit().expect("Failed to commit transaction"); - } - res - } - - #[test] - fn initialise_inserts_anchor_if_not_exists() { - let db = setup_db(); - - let source = block_info(100, B256::from([100u8; 32]), 200); - let derived = block_info(0, genesis_block().hash, 200); - let anchor = derived_pair(source, derived); - - // Should succeed and insert the anchor - assert!(initialize_db(&db, &anchor).is_ok()); - - // Check that the anchor is present - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let stored = provider.get_derived_block_pair_by_number(0).expect("should exist"); - assert_eq!(stored.source.hash, anchor.source.hash); - assert_eq!(stored.derived.hash, anchor.derived.hash); - } - - #[test] - fn initialise_is_idempotent_if_anchor_matches() { - let db = setup_db(); - - let source = block_info(100, B256::from([100u8; 32]), 200); - let anchor = derived_pair(source, genesis_block()); - - // First initialise - assert!(initialize_db(&db, &anchor).is_ok()); - // Second initialise with the same anchor should succeed (idempotent) - assert!(insert_pair(&db, &anchor).is_ok()); - } - - #[test] - fn initialise_fails_if_anchor_mismatch() { - let db = setup_db(); - - let source = block_info(100, B256::from([100u8; 32]), 200); - let anchor = derived_pair(source, genesis_block()); - - // Insert the genesis - assert!(initialize_db(&db, &anchor).is_ok()); - - // Try to initialise with a different anchor (different hash) - let wrong_derived = block_info(0, B256::from([42u8; 32]), 200); - let wrong_anchor = derived_pair(source, wrong_derived); - - let result = initialize_db(&db, &wrong_anchor); - assert!(matches!(result, Err(StorageError::ConflictError))); - } - - #[test] - fn save_derived_block_positive() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let derived2 = block_info(2, derived1.hash, 300); - let pair2 = derived_pair(source1, derived2); - assert!(insert_pair(&db, &pair2).is_ok()); - - let source3 = block_info(101, source1.hash, 400); - let derived3 = block_info(3, derived2.hash, 400); - let pair3 = derived_pair(source3, derived3); - assert!(insert_source_block(&db, &source3).is_ok()); - assert!(insert_pair(&db, &pair3).is_ok()); - } - - #[test] - fn save_derived_block_wrong_parent_should_fail() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let wrong_parent_hash = B256::from([99u8; 32]); - let derived2 = block_info(2, wrong_parent_hash, 300); - let pair2 = derived_pair(source1, derived2); - let result = insert_pair(&db, &pair2); - assert!(matches!(result, Err(StorageError::BlockOutOfOrder))); - } - - #[test] - fn save_derived_block_gap_in_number_should_fail() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let derived2 = block_info(4, derived1.hash, 400); // should be 2, not 4 - let pair2 = derived_pair(source1, derived2); - let result = insert_pair(&db, &pair2); - assert!(matches!(result, Err(StorageError::BlockOutOfOrder))); - } - - #[test] - fn duplicate_derived_block_number_should_pass() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - // Try to insert the same derived block again - let result = insert_pair(&db, &pair1); - assert!(result.is_ok(), "Should allow inserting the same derived block again"); - } - - #[test] - fn save_old_block_should_pass() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let derived2 = block_info(2, derived1.hash, 300); - let pair2 = derived_pair(source1, derived2); - assert!(insert_pair(&db, &pair2).is_ok()); - - // Try to insert a block with a lower number than the latest - let result = insert_pair(&db, &pair1); - assert!(result.is_ok(), "Should allow inserting an old derived block"); - } - - #[test] - fn non_monotonic_l2_number_should_fail() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let derived2 = block_info(2, derived1.hash, 300); - let pair2 = derived_pair(source1, derived2); - assert!(insert_pair(&db, &pair2).is_ok()); - - // Try to insert a block with a lower number than the latest - let derived_non_monotonic = block_info(1, derived2.hash, 400); - let pair_non_monotonic = derived_pair(source1, derived_non_monotonic); - let result = insert_pair(&db, &pair_non_monotonic); - assert!(matches!(result, Err(StorageError::ConflictError))); - } - - #[test] - fn test_latest_derived_block_at_source_returns_latest() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let derived2 = block_info(2, derived1.hash, 300); - let pair2 = derived_pair(source1, derived2); - assert!(insert_pair(&db, &pair2).is_ok()); - - let source2 = block_info(101, B256::from([100u8; 32]), 300); - let derived3 = block_info(3, derived2.hash, 400); - let pair3 = derived_pair(source2, derived3); - assert!(insert_source_block(&db, &source2).is_ok()); - assert!(insert_pair(&db, &pair3).is_ok()); - - // Now check latest_derived_block_at_source returns derived2 for source1 - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let source_id1 = BlockNumHash { number: source1.number, hash: source1.hash }; - let latest = provider.latest_derived_block_at_source(source_id1).expect("should exist"); - assert_eq!(latest.number, derived2.number); - assert_eq!(latest.hash, derived2.hash); - - // Now check latest_derived_block_at_source returns derived3 for source2 - let source_id2 = BlockNumHash { number: source2.number, hash: source2.hash }; - let latest = provider.latest_derived_block_at_source(source_id2).expect("should exist"); - assert_eq!(latest, derived3); - } - - #[test] - fn test_latest_derived_block_at_source_empty_list_returns_error() { - let db = setup_db(); - - // Use a source block that does not exist - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let source_id = BlockNumHash { number: 9999, hash: B256::from([99u8; 32]) }; - let result = provider.latest_derived_block_at_source(source_id); - assert!(matches!(result, Err(StorageError::EntryNotFound(_)))); - } - - #[test] - fn test_latest_derived_block_at_source_hash_mismatch_returns_error() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - // Use correct number but wrong hash - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let wrong_hash = B256::from([123u8; 32]); - let source_id = BlockNumHash { number: source1.number, hash: wrong_hash }; - let result = provider.latest_derived_block_at_source(source_id); - assert!(matches!(result, Err(StorageError::ConflictError))); - } - - #[test] - fn test_latest_derivation_state() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let derived2 = block_info(2, derived1.hash, 300); - let pair2 = derived_pair(source1, derived2); - assert!(insert_pair(&db, &pair2).is_ok()); - - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let latest = provider.latest_derivation_state().expect("should exist"); - assert_eq!(latest, pair2); - } - - #[test] - fn test_latest_derivation_state_empty_storage() { - let db = setup_db(); - - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let result = provider.latest_derivation_state(); - print!("{result:?}"); - assert!( - matches!(result, Err(StorageError::DatabaseNotInitialised)), - "Should return DatabaseNotInitialised error when no derivation state exists" - ); - } - - #[test] - fn test_latest_derivation_state_empty_source() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let source2 = block_info(101, source1.hash, 300); - let derived2 = block_info(2, derived1.hash, 300); - let pair2 = derived_pair(source2, derived2); - assert!(insert_source_block(&db, &source2).is_ok()); - assert!(insert_pair(&db, &pair2).is_ok()); - - let source3 = block_info(102, source2.hash, 400); - assert!(insert_source_block(&db, &source3).is_ok()); - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let latest = provider.latest_derivation_state().expect("should exist"); - let expected_derivation_state = DerivedRefPair { source: source3, derived: derived2 }; - assert_eq!(latest, expected_derivation_state); - } - - #[test] - fn test_latest_derivation_state_empty_returns_error() { - let temp_dir = TempDir::new().expect("Could not create temp dir"); - let db = init_db_for::<_, Tables>(temp_dir.path(), DatabaseArguments::default()) - .expect("Failed to init database"); - - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - assert!(matches!( - provider.latest_derivation_state(), - Err(StorageError::DatabaseNotInitialised) - )); - } - - #[test] - fn test_derived_to_source_returns_correct_source() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(1, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let derived_block_id = BlockNumHash { number: derived1.number, hash: derived1.hash }; - let source = provider.derived_to_source(derived_block_id).expect("should exist"); - assert_eq!(source, source1); - } - - #[test] - fn test_derived_to_source_not_found_returns_error() { - let db = setup_db(); - - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let derived_block_id = BlockNumHash { number: 9999, hash: B256::from([9u8; 32]) }; - let result = provider.derived_to_source(derived_block_id); - assert!(matches!(result, Err(StorageError::EntryNotFound(_)))); - } - - #[test] - fn save_source_block_positive() { - let db = setup_db(); - - let derived0 = block_info(10, B256::from([10u8; 32]), 200); - let pair1 = derived_pair(genesis_block(), derived0); - assert!(initialize_db(&db, &pair1).is_ok()); - - let source1 = block_info(1, genesis_block().hash, 200); - assert!(insert_source_block(&db, &source1).is_ok()); - } - - #[test] - fn save_source_block_idempotent_should_pass() { - let db = setup_db(); - - let derived0 = block_info(10, B256::from([10u8; 32]), 200); - let pair1 = derived_pair(genesis_block(), derived0); - assert!(initialize_db(&db, &pair1).is_ok()); - - let source1 = block_info(1, genesis_block().hash, 200); - assert!(insert_source_block(&db, &source1).is_ok()); - // Try saving the same block again - assert!(insert_source_block(&db, &source1).is_ok()); - } - - #[test] - fn save_source_invalid_parent_should_fail() { - let db = setup_db(); - - let source0 = block_info(10, B256::from([10u8; 32]), 200); - let derived0 = genesis_block(); - let pair1 = derived_pair(source0, derived0); - assert!(initialize_db(&db, &pair1).is_ok()); - - let source1 = block_info(11, B256::from([1u8; 32]), 200); - let result = insert_source_block(&db, &source1); - assert!( - matches!(result, Err(StorageError::BlockOutOfOrder)), - "Should fail with BlockOutOfOrder error" - ); - } - - #[test] - fn save_source_block_lower_number_should_pass() { - let db = setup_db(); - - let source0 = block_info(10, B256::from([10u8; 32]), 200); - let derived0 = genesis_block(); - let pair1 = derived_pair(source0, derived0); - assert!(initialize_db(&db, &pair1).is_ok()); - - let source1 = block_info(11, source0.hash, 400); - assert!(insert_source_block(&db, &source1).is_ok()); - - // Try to save a block with a lower number - let result = insert_source_block(&db, &source0); - assert!(result.is_ok(), "Should allow saving an old source block"); - } - - #[test] - fn save_inconsistent_source_block_lower_number_should_fail() { - let db = setup_db(); - - let source0 = block_info(10, B256::from([10u8; 32]), 200); - let derived0 = genesis_block(); - let pair1 = derived_pair(source0, derived0); - assert!(initialize_db(&db, &pair1).is_ok()); - - let source1 = block_info(11, source0.hash, 400); - assert!(insert_source_block(&db, &source1).is_ok()); - - let old_source = block_info(source0.number, B256::from([1u8; 32]), 400); - // Try to save a block with a lower number - let result = insert_source_block(&db, &old_source); - assert!(matches!(result, Err(StorageError::ConflictError))); - } - - #[test] - fn save_source_block_gap_number_should_fail() { - let db = setup_db(); - - let derived0 = block_info(10, B256::from([10u8; 32]), 200); - let pair1 = derived_pair(genesis_block(), derived0); - assert!(initialize_db(&db, &pair1).is_ok()); - - let source1 = block_info(2, genesis_block().hash, 400); - // Try to skip a block - let result = insert_source_block(&db, &source1); - assert!(matches!(result, Err(StorageError::BlockOutOfOrder))); - } - - #[test] - fn save_source_block_higher_number_should_succeed() { - let db = setup_db(); - - let derived0 = block_info(10, B256::from([10u8; 32]), 200); - let pair1 = derived_pair(genesis_block(), derived0); - assert!(initialize_db(&db, &pair1).is_ok()); - - let source1 = block_info(1, genesis_block().hash, 200); - let source2 = block_info(2, source1.hash, 400); - assert!(insert_source_block(&db, &source1).is_ok()); - assert!(insert_source_block(&db, &source2).is_ok()); - } - - #[test] - fn save_source_block_traversal_updates_existing_traversal_positive() { - let db = setup_db(); - - let derived0 = block_info(10, B256::from([10u8; 32]), 200); - let pair1 = derived_pair(genesis_block(), derived0); - assert!(initialize_db(&db, &pair1).is_ok()); - - let source1 = block_info(1, genesis_block().hash, 200); - assert!(insert_source_block(&db, &source1).is_ok()); - - let derived1 = block_info(100, derived0.hash, 200); - - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let mut block_traversal = - provider.get_block_traversal(source1.number).expect("should exist"); - block_traversal.derived_block_numbers.push(derived1.number); - assert!(tx.put::(source1.number, block_traversal).is_ok()); - } - - #[test] - fn test_get_activation_block_returns_error_if_empty() { - let db = setup_db(); - - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let result = provider.get_activation_block(); - assert!(matches!(result, Err(StorageError::DatabaseNotInitialised))); - } - - #[test] - fn test_get_activation_block_with_multiple_blocks_returns_first() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(0, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - let derived2 = block_info(1, derived1.hash, 300); - let pair2 = derived_pair(source1, derived2); - assert!(insert_pair(&db, &pair2).is_ok()); - - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let activation = provider.get_activation_block().expect("should exist"); - assert_eq!(activation, derived1); - } - - #[test] - fn rewind_to_source_returns_none_when_no_source_present() { - let db = setup_db(); - let tx = db.tx_mut().expect("Failed to get mutable tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let source = BlockNumHash { number: 9999, hash: B256::from([9u8; 32]) }; - let res = provider.rewind_to_source(&source).expect("should succeed"); - assert!(res.is_none(), "Expected None when no source traversal exists"); - } - - #[test] - fn rewind_to_source_fails_on_source_hash_mismatch() { - let db = setup_db(); - - let source1 = block_info(100, B256::from([100u8; 32]), 200); - let derived1 = block_info(0, genesis_block().hash, 200); - let pair1 = derived_pair(source1, derived1); - assert!(initialize_db(&db, &pair1).is_ok()); - - // insert a source block at number 1 with a certain hash - let source_saved = block_info(101, source1.hash, 200); - assert!(insert_source_block(&db, &source_saved).is_ok()); - - // create provider and call rewind_to_source with same number but different hash - let tx = db.tx_mut().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let mismatched_source = BlockNumHash { number: 101, hash: B256::from([42u8; 32]) }; - let result = provider.rewind_to_source(&mismatched_source); - assert!(matches!(result, Err(StorageError::ConflictError))); - } - - #[test] - fn rewind_to_source_deletes_derived_blocks_and_returns_target() { - let db = setup_db(); - - let source0 = block_info(100, B256::from([100u8; 32]), 200); - let derived0 = block_info(0, genesis_block().hash, 200); - let pair0 = derived_pair(source0, derived0); - assert!(initialize_db(&db, &pair0).is_ok()); - - // Setup source1 with derived 10,11,12 and source2 with 13,14 - let source1 = block_info(101, source0.hash, 200); - let source2 = block_info(102, source1.hash, 300); - let derived1 = block_info(1, derived0.hash, 195); - let derived2 = block_info(2, derived1.hash, 197); - let derived3 = block_info(3, derived2.hash, 290); - let derived4 = block_info(4, derived3.hash, 292); - let derived5 = block_info(5, derived4.hash, 295); - - assert!(insert_source_block(&db, &source1).is_ok()); - assert!(insert_pair(&db, &derived_pair(source1, derived1)).is_ok()); - assert!(insert_pair(&db, &derived_pair(source1, derived2)).is_ok()); - assert!(insert_pair(&db, &derived_pair(source1, derived3)).is_ok()); - - assert!(insert_source_block(&db, &source2).is_ok()); - assert!(insert_pair(&db, &derived_pair(source2, derived4)).is_ok()); - assert!(insert_pair(&db, &derived_pair(source2, derived5)).is_ok()); - - // Perform rewind_to_source starting at source1 - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let source_id = BlockNumHash { number: source1.number, hash: source1.hash }; - let res = provider.rewind_to_source(&source_id).expect("rewind should succeed"); - - // derived_rewind_target should be the first derived block encountered (10) - assert!(res.is_some(), "expected a derived rewind target"); - let target = res.unwrap(); - assert_eq!(target, derived1); - - let res = provider.get_derived_block_pair_by_number(10); - assert!(matches!(res, Err(StorageError::EntryNotFound(_)))); - } - - #[test] - fn rewind_to_deletes_derived_blocks_and_returns_target() { - init_test_tracing(); - - let db = setup_db(); - - let source0 = block_info(100, B256::from([100u8; 32]), 200); - let derived0 = block_info(0, genesis_block().hash, 200); - let pair0 = derived_pair(source0, derived0); - assert!(initialize_db(&db, &pair0).is_ok()); - - // Setup source1 with derived 10,11,12 and source2 with 13,14 - let source1 = block_info(101, source0.hash, 200); - let source2 = block_info(102, source1.hash, 300); - let derived1 = block_info(1, derived0.hash, 195); - let derived2 = block_info(2, derived1.hash, 197); - let derived3 = block_info(3, derived2.hash, 290); - let derived4 = block_info(4, derived3.hash, 292); - let derived5 = block_info(5, derived4.hash, 295); - - assert!(insert_source_block(&db, &source1).is_ok()); - assert!(insert_pair(&db, &derived_pair(source1, derived1)).is_ok()); - assert!(insert_pair(&db, &derived_pair(source1, derived2)).is_ok()); - assert!(insert_pair(&db, &derived_pair(source1, derived3)).is_ok()); - - assert!(insert_source_block(&db, &source2).is_ok()); - assert!(insert_pair(&db, &derived_pair(source2, derived4)).is_ok()); - assert!(insert_pair(&db, &derived_pair(source2, derived5)).is_ok()); - - // Perform rewind_to_source starting at source1 - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = DerivationProvider::new_with_observability_interval(&tx, CHAIN_ID, 1); - let derived_id = BlockNumHash { number: derived1.number, hash: derived1.hash }; - provider.rewind_to(&derived_id).expect("rewind should succeed"); - - let res = provider.get_derived_block_pair_by_number(1); - assert!(matches!(res, Err(StorageError::EntryNotFound(_)))); - } - - #[test] - fn rewind_to_source_with_empty_derived_list_returns_none() { - let db = setup_db(); - - let source0 = block_info(100, B256::from([100u8; 32]), 200); - let derived0 = block_info(0, genesis_block().hash, 200); - let pair0 = derived_pair(source0, derived0); - assert!(initialize_db(&db, &pair0).is_ok()); - - // Insert a source block that has no derived_block_numbers - let source1 = block_info(101, source0.hash, 200); - assert!(insert_source_block(&db, &source1).is_ok()); - - // Call rewind_to_source on that source - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - let res = provider.rewind_to_source(&source1.id()).expect("rewind should succeed"); - - assert!(res.is_none(), "Expected None when source has empty derived list"); - - let tx = db.tx().expect("Could not get tx"); - let provider = DerivationProvider::new(&tx, CHAIN_ID); - - let activation = provider.get_activation_block().expect("activation should exist"); - assert_eq!(activation, derived0); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/providers/head_ref_provider.rs b/rust/kona/crates/supervisor/storage/src/providers/head_ref_provider.rs deleted file mode 100644 index ea048e580560d..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/providers/head_ref_provider.rs +++ /dev/null @@ -1,331 +0,0 @@ -//! Provider for tracking block safety head reference -use crate::{StorageError, models::SafetyHeadRefs}; -use alloy_primitives::ChainId; -use derive_more::Constructor; -use kona_protocol::BlockInfo; -use op_alloy_consensus::interop::SafetyLevel; -use reth_db_api::transaction::{DbTx, DbTxMut}; -use tracing::{error, warn}; - -/// A Safety Head Reference storage that wraps transactional reference. -#[derive(Debug, Constructor)] -pub(crate) struct SafetyHeadRefProvider<'tx, TX> { - tx: &'tx TX, - chain_id: ChainId, -} - -impl SafetyHeadRefProvider<'_, TX> -where - TX: DbTx, -{ - pub(crate) fn get_safety_head_ref( - &self, - safety_level: SafetyLevel, - ) -> Result { - let head_ref_key = safety_level.into(); - let result = self.tx.get::(head_ref_key).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %safety_level, - %err, - "Failed to seek head reference" - ); - })?; - let block_ref = result.ok_or_else(|| StorageError::FutureData)?; - Ok(block_ref.into()) - } -} - -impl SafetyHeadRefProvider<'_, Tx> -where - Tx: DbTxMut + DbTx, -{ - /// Updates the safety head reference with the provided block info. - /// If the block info's number is less than the current head reference's number, - /// it will not update the head reference and will log a warning. - pub(crate) fn update_safety_head_ref( - &self, - safety_level: SafetyLevel, - incoming_head_ref: &BlockInfo, - ) -> Result<(), StorageError> { - // Ensure the block_info.number is greater than the stored head reference - // If the head reference is not set, this check will be skipped. - if let Ok(current_head_ref) = self.get_safety_head_ref(safety_level) && - current_head_ref.number > incoming_head_ref.number - { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %current_head_ref, - %incoming_head_ref, - %safety_level, - "Attempting to update head reference with a block that has a lower number than the current head reference", - ); - return Ok(()); - } - - self.tx - .put::(safety_level.into(), (*incoming_head_ref).into()) - .inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %incoming_head_ref, - %safety_level, - %err, - "Failed to store head reference" - ) - })?; - Ok(()) - } - - /// Forcefully resets the head reference only if the current stored head is ahead of the - /// incoming one. - /// - /// This is intended for internal use during rewinds, where the safety head needs to be directly - /// set to a previous block regardless of the current head state. - pub(crate) fn reset_safety_head_ref_if_ahead( - &self, - safety_level: SafetyLevel, - incoming_head_ref: &BlockInfo, - ) -> Result<(), StorageError> { - // Skip if the current head is behind or missing. - match self.get_safety_head_ref(safety_level) { - Ok(current_head_ref) => { - if current_head_ref.number < incoming_head_ref.number { - return Ok(()); - } - } - Err(StorageError::FutureData) => { - return Ok(()); - } - Err(err) => return Err(err), - } - - self.tx - .put::(safety_level.into(), (*incoming_head_ref).into()) - .inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %incoming_head_ref, - %safety_level, - %err, - "Failed to reset head reference" - ) - })?; - Ok(()) - } - - /// Removes the safety head reference for the specified safety level. - pub(crate) fn remove_safety_head_ref( - &self, - safety_level: SafetyLevel, - ) -> Result<(), StorageError> { - self.tx.delete::(safety_level.into(), None).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %safety_level, - %err, - "Failed to remove head reference" - ) - })?; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::models::Tables; - use alloy_primitives::B256; - use reth_db::{ - DatabaseEnv, - mdbx::{DatabaseArguments, init_db_for}, - }; - use reth_db_api::Database; - use tempfile::TempDir; - - static CHAIN_ID: ChainId = 1; - - fn setup_db() -> DatabaseEnv { - let temp_dir = TempDir::new().expect("Could not create temp dir"); - init_db_for::<_, Tables>(temp_dir.path(), DatabaseArguments::default()) - .expect("Failed to init database") - } - - #[test] - fn test_safety_head_ref_retrieval() { - let db = setup_db(); - - // Create write transaction first - let write_tx = db.tx_mut().expect("Failed to create write transaction"); - let write_provider = SafetyHeadRefProvider::new(&write_tx, CHAIN_ID); - - // Initially, there should be no head ref - let result = write_provider.get_safety_head_ref(SafetyLevel::CrossSafe); - assert!(result.is_err()); - - // Update head ref - let block_info = BlockInfo::default(); - write_provider - .update_safety_head_ref(SafetyLevel::CrossSafe, &block_info) - .expect("Failed to update head ref"); - - // Commit the write transaction - write_tx.commit().expect("Failed to commit the write transaction"); - - // Create a new read transaction to verify - let tx = db.tx().expect("Failed to create transaction"); - let provider = SafetyHeadRefProvider::new(&tx, CHAIN_ID); - let result = - provider.get_safety_head_ref(SafetyLevel::CrossSafe).expect("Failed to get head ref"); - assert_eq!(result, block_info); - } - - #[test] - fn test_safety_head_ref_update() { - let db = setup_db(); - let write_tx = db.tx_mut().expect("Failed to create write transaction"); - let write_provider = SafetyHeadRefProvider::new(&write_tx, CHAIN_ID); - - // Create initial block info - let initial_block_info = BlockInfo { - hash: Default::default(), - number: 1, - parent_hash: Default::default(), - timestamp: 100, - }; - write_provider - .update_safety_head_ref(SafetyLevel::CrossSafe, &initial_block_info) - .expect("Failed to update head ref"); - - // Create updated block info - let mut updated_block_info = BlockInfo { - hash: Default::default(), - number: 1, - parent_hash: Default::default(), - timestamp: 200, - }; - updated_block_info.number = 100; - write_provider - .update_safety_head_ref(SafetyLevel::CrossSafe, &updated_block_info) - .expect("Failed to update head ref"); - - // Commit the write transaction - write_tx.commit().expect("Failed to commit the write transaction"); - - // Verify the updated value - let tx = db.tx().expect("Failed to create transaction"); - let provider = SafetyHeadRefProvider::new(&tx, CHAIN_ID); - let result = - provider.get_safety_head_ref(SafetyLevel::CrossSafe).expect("Failed to get head ref"); - assert_eq!(result, updated_block_info); - } - - #[test] - fn test_reset_safety_head_ref_if_ahead() { - let db = setup_db(); - let tx = db.tx_mut().expect("Failed to start write tx"); - let provider = SafetyHeadRefProvider::new(&tx, CHAIN_ID); - - // Set initial head at 100 - let head_100 = BlockInfo { - number: 100, - hash: B256::from([1u8; 32]), - parent_hash: B256::ZERO, - timestamp: 1234, - }; - provider.update_safety_head_ref(SafetyLevel::CrossSafe, &head_100).expect("update failed"); - - // Try to reset to 101 (should NOT update — current is behind) - let head_101 = BlockInfo { number: 101, ..head_100 }; - provider - .reset_safety_head_ref_if_ahead(SafetyLevel::CrossSafe, &head_101) - .expect("reset failed"); - - // Should still be 100 - let current = provider.get_safety_head_ref(SafetyLevel::CrossSafe).expect("get failed"); - assert_eq!(current.number, 100); - - // Now try to reset to 90 (should update — current is ahead) - let head_90 = BlockInfo { number: 90, ..head_100 }; - provider - .reset_safety_head_ref_if_ahead(SafetyLevel::CrossSafe, &head_90) - .expect("reset failed"); - - // Should now be 90 - let current = provider.get_safety_head_ref(SafetyLevel::CrossSafe).expect("get failed"); - assert_eq!(current.number, 90); - - tx.commit().expect("commit failed"); - } - - #[test] - fn test_reset_safety_head_ref_should_ignore_future_data() { - let db = setup_db(); - let tx = db.tx_mut().expect("Failed to start write tx"); - let provider = SafetyHeadRefProvider::new(&tx, CHAIN_ID); - - // Set initial head at 100 - let head_100 = BlockInfo { - number: 100, - hash: B256::from([1u8; 32]), - parent_hash: B256::ZERO, - timestamp: 1234, - }; - - provider - .reset_safety_head_ref_if_ahead(SafetyLevel::CrossSafe, &head_100) - .expect("reset should succeed"); - - // check head is not updated and still returns FutureData Err - let result = provider.get_safety_head_ref(SafetyLevel::CrossSafe); - assert!(result.is_err()); - assert!(matches!(result.unwrap_err(), StorageError::FutureData)); - - tx.commit().expect("commit failed"); - } - - #[test] - fn test_remove_safety_head_ref_removes_existing() { - let db = setup_db(); - let tx = db.tx_mut().expect("Failed to start write tx"); - let provider = SafetyHeadRefProvider::new(&tx, CHAIN_ID); - - // Set a head ref - let block_info = BlockInfo { - hash: Default::default(), - number: 42, - parent_hash: Default::default(), - timestamp: 1234, - }; - provider - .update_safety_head_ref(SafetyLevel::CrossSafe, &block_info) - .expect("update failed"); - - // Remove it - provider.remove_safety_head_ref(SafetyLevel::CrossSafe).expect("remove failed"); - - // Should now return FutureData error - let result = provider.get_safety_head_ref(SafetyLevel::CrossSafe); - assert!(matches!(result, Err(StorageError::FutureData))); - } - - #[test] - fn test_remove_safety_head_ref_no_existing() { - let db = setup_db(); - let tx = db.tx_mut().expect("Failed to start write tx"); - let provider = SafetyHeadRefProvider::new(&tx, CHAIN_ID); - - // Remove when nothing exists - let result = provider.remove_safety_head_ref(SafetyLevel::CrossSafe); - assert!(result.is_ok()); - - // Still returns FutureData error - let result = provider.get_safety_head_ref(SafetyLevel::CrossSafe); - assert!(matches!(result, Err(StorageError::FutureData))); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/providers/log_provider.rs b/rust/kona/crates/supervisor/storage/src/providers/log_provider.rs deleted file mode 100644 index 0fb5d442685eb..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/providers/log_provider.rs +++ /dev/null @@ -1,781 +0,0 @@ -//! Reth's MDBX-backed abstraction of [`LogProvider`] for superchain state. -//! -//! This module provides the [`LogProvider`] struct, which uses the -//! `reth-db` abstraction of reth to store execution logs -//! and block metadata required by the Optimism supervisor. -//! -//! It supports: -//! - Writing full blocks of logs with metadata -//! - Retrieving block metadata by number -//! - Finding a block from a specific log (with hash/index match) -//! - Fetching logs per block using dup-sorted key layout -//! -//! Logs are stored in [`LogEntries`] under dup-sorted tables, with log index -//! used as the subkey. Block metadata is stored in [`BlockRefs`]. - -use crate::{ - error::{EntryNotFoundError, StorageError}, - models::{BlockRefs, LogEntries}, -}; -use alloy_eips::BlockNumHash; -use alloy_primitives::ChainId; -use kona_protocol::BlockInfo; -use kona_supervisor_types::Log; -use reth_db_api::{ - cursor::{DbCursorRO, DbDupCursorRO, DbDupCursorRW}, - transaction::{DbTx, DbTxMut}, -}; - -use tracing::{debug, error, info, trace, warn}; - -const DEFAULT_LOG_INTERVAL: u64 = 100; - -/// A log storage that wraps a transactional reference to the MDBX backend. -#[derive(Debug)] -pub(crate) struct LogProvider<'tx, TX> { - tx: &'tx TX, - chain_id: ChainId, - #[doc(hidden)] - observability_interval: u64, -} - -impl<'tx, TX> LogProvider<'tx, TX> { - pub(crate) const fn new(tx: &'tx TX, chain_id: ChainId) -> Self { - Self::new_with_observability_interval(tx, chain_id, DEFAULT_LOG_INTERVAL) - } - - pub(crate) const fn new_with_observability_interval( - tx: &'tx TX, - chain_id: ChainId, - observability_interval: u64, - ) -> Self { - Self { tx, chain_id, observability_interval } - } -} - -impl LogProvider<'_, TX> -where - TX: DbTxMut + DbTx, -{ - pub(crate) fn initialise(&self, activation_block: BlockInfo) -> Result<(), StorageError> { - match self.get_block(0) { - Ok(block) if block == activation_block => Ok(()), - Ok(_) => Err(StorageError::ConflictError), - Err(StorageError::EntryNotFound(_)) => { - self.store_block_logs_internal(&activation_block, Vec::new()) - } - - Err(err) => Err(err), - } - } - - pub(crate) fn store_block_logs( - &self, - block: &BlockInfo, - logs: Vec, - ) -> Result<(), StorageError> { - debug!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number = block.number, - "Storing logs", - ); - - let latest_block = match self.get_latest_block() { - Ok(block) => block, - Err(StorageError::EntryNotFound(_)) => { - return Err(StorageError::DatabaseNotInitialised); - } - Err(e) => return Err(e), - }; - - if latest_block.number >= block.number { - // If the latest block is ahead of the incoming block, it means - // the incoming block is old block, check if it is same as the stored block. - let stored_block = self.get_block(block.number)?; - if stored_block == *block { - return Ok(()); - } - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %stored_block, - incoming_block = %block, - "Incoming log block is not consistent with the stored log block", - ); - return Err(StorageError::ConflictError); - } - - if !latest_block.is_parent_of(block) { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %latest_block, - incoming_block = %block, - "Incoming block does not follow latest stored block" - ); - return Err(StorageError::BlockOutOfOrder); - } - - self.store_block_logs_internal(block, logs) - } - - fn store_block_logs_internal( - &self, - block: &BlockInfo, - logs: Vec, - ) -> Result<(), StorageError> { - self.tx.put::(block.number, (*block).into()).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number = block.number, - %err, - "Failed to insert block" - ); - })?; - - let mut cursor = self.tx.cursor_dup_write::().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %err, - "Failed to get dup cursor" - ); - })?; - - for log in logs { - cursor.append_dup(block.number, log.into()).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number = block.number, - %err, - "Failed to append logs" - ); - })?; - } - Ok(()) - } - - /// Rewinds the log storage by deleting all blocks and logs from the given block onward. - /// Fails if the given block exists with a mismatching hash (to prevent unsafe deletion). - pub(crate) fn rewind_to(&self, block: &BlockNumHash) -> Result<(), StorageError> { - info!( - target: "supervisor::storage", - chain_id = %self.chain_id, - target_block_number = %block.number, - target_block_hash = %block.hash, - "Starting rewind of log storage" - ); - - // Get the latest block number from BlockRefs - let latest_block = { - let mut cursor = self.tx.cursor_read::()?; - cursor.last()?.map(|(num, _)| num).unwrap_or(block.number) - }; - - // Check for future block - if block.number > latest_block { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - target_block_number = %block.number, - latest_block, - "Cannot rewind to future block" - ); - return Err(StorageError::FutureData); - } - - // total blocks to rewind down to and including tgt block - let total_blocks = latest_block - block.number + 1; - let mut processed_blocks = 0; - - // Delete all blocks and logs with number ≥ `block.number` - { - let mut cursor = self.tx.cursor_write::()?; - let mut walker = cursor.walk(Some(block.number))?; - - trace!( - target: "supervisor::storage", - chain_id = %self.chain_id, - target_block_number = %block.number, - target_block_hash = %block.hash, - latest_block, - total_blocks, - observability_interval = %self.observability_interval, - "Rewinding log storage..." - ); - - while let Some(Ok((key, stored_block))) = walker.next() { - if key == block.number && block.hash != stored_block.hash { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %stored_block, - incoming_block = ?block, - "Requested block to rewind does not match stored block" - ); - return Err(StorageError::ConflictError); - } - // remove the block - walker.delete_current()?; - - // remove the logs of that block - self.tx.delete::(key, None)?; - - processed_blocks += 1; - - // Log progress periodically or on last block - if processed_blocks % self.observability_interval == 0 || - processed_blocks == total_blocks - { - let percentage = if total_blocks > 0 { - (processed_blocks as f64 / total_blocks as f64 * 100.0).min(100.0) - } else { - 100.0 - }; - - info!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number = %key, - percentage = %format!("{:.2}%", percentage), - processed_blocks, - total_blocks, - "Rewind progress" - ); - } - } - - info!( - target: "supervisor::storage", - target_block_number = ?block.number, - target_block_hash = %block.hash, - chain_id = %self.chain_id, - total_blocks, - "Rewind completed successfully" - ); - } - - Ok(()) - } -} - -impl LogProvider<'_, TX> -where - TX: DbTx, -{ - pub(crate) fn get_block(&self, block_number: u64) -> Result { - debug!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number, - "Fetching block" - ); - - let block_option = self.tx.get::(block_number).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number, - %err, - "Failed to read block", - ); - })?; - - let block = block_option.ok_or_else(|| { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number, - "Block not found" - ); - EntryNotFoundError::DerivedBlockNotFound(block_number) - })?; - Ok(block.into()) - } - - pub(crate) fn get_latest_block(&self) -> Result { - debug!(target: "supervisor::storage", chain_id = %self.chain_id, "Fetching latest block"); - - let mut cursor = self.tx.cursor_read::().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %err, - "Failed to get cursor" - ); - })?; - - let result = cursor.last().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %err, - "Failed to seek to last block" - ); - })?; - - let (_, block) = result.ok_or_else(|| { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - "No blocks found in storage" - ); - StorageError::DatabaseNotInitialised - })?; - Ok(block.into()) - } - - pub(crate) fn get_log(&self, block_number: u64, log_index: u32) -> Result { - debug!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number, - log_index, - "Fetching block by log" - ); - - let mut cursor = self.tx.cursor_dup_read::().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %err, - "Failed to get cursor for LogEntries" - ); - })?; - - let result = cursor.seek_by_key_subkey(block_number, log_index).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number, - log_index, - %err, - "Failed to read log entry" - ); - })?; - - let log_entry = result.ok_or_else(|| { - warn!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number, - log_index, - "Log not found" - ); - EntryNotFoundError::LogNotFound { block_number, log_index } - })?; - - Ok(Log::from(log_entry)) - } - - pub(crate) fn get_logs(&self, block_number: u64) -> Result, StorageError> { - debug!(target: "supervisor::storage", chain_id = %self.chain_id, block_number, "Fetching logs"); - - let mut cursor = self.tx.cursor_dup_read::().inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - %err, - "Failed to get dup cursor" - ); - })?; - - let walker = cursor.walk_range(block_number..=block_number).inspect_err(|err| { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number, - %err, - "Failed to walk dup range", - ); - })?; - - let mut logs = Vec::new(); - for row in walker { - match row { - Ok((_, entry)) => logs.push(entry.into()), - Err(err) => { - error!( - target: "supervisor::storage", - chain_id = %self.chain_id, - block_number, - %err, - "Failed to read log entry", - ); - return Err(StorageError::Database(err)); - } - } - } - Ok(logs) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::models::Tables; - use alloy_primitives::B256; - use kona_cli::init_test_tracing; - use kona_protocol::BlockInfo; - use kona_supervisor_types::{ExecutingMessage, Log}; - use reth_db::{ - DatabaseEnv, - mdbx::{DatabaseArguments, init_db_for}, - }; - use reth_db_api::Database; - use tempfile::TempDir; - - static CHAIN_ID: ChainId = 1; - - fn genesis_block() -> BlockInfo { - BlockInfo { - hash: B256::from([0u8; 32]), - number: 0, - parent_hash: B256::ZERO, - timestamp: 100, - } - } - - fn sample_block_info(block_number: u64, parent_hash: B256) -> BlockInfo { - BlockInfo { - number: block_number, - hash: B256::from([0x11; 32]), - parent_hash, - timestamp: 123456, - } - } - - fn sample_log(log_index: u32, with_msg: bool) -> Log { - Log { - index: log_index, - hash: B256::from([log_index as u8; 32]), - executing_message: with_msg.then_some(ExecutingMessage { - chain_id: 10, - block_number: 999, - log_index: 7, - hash: B256::from([0x44; 32]), - timestamp: 88888, - }), - } - } - - /// Sets up a new temp DB - fn setup_db() -> DatabaseEnv { - let temp_dir = TempDir::new().expect("Could not create temp dir"); - init_db_for::<_, Tables>(temp_dir.path(), DatabaseArguments::default()) - .expect("Failed to init database") - } - - /// Helper to initialize database in a new transaction, committing if successful. - fn initialize_db(db: &DatabaseEnv, block: &BlockInfo) -> Result<(), StorageError> { - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = LogProvider::new(&tx, CHAIN_ID); - let res = provider.initialise(*block); - if res.is_ok() { - tx.commit().expect("Failed to commit transaction"); - } else { - tx.abort(); - } - res - } - - /// Helper to insert a pair in a new transaction, committing if successful. - fn insert_block_logs( - db: &DatabaseEnv, - block: &BlockInfo, - logs: Vec, - ) -> Result<(), StorageError> { - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = LogProvider::new(&tx, CHAIN_ID); - let res = provider.store_block_logs(block, logs); - if res.is_ok() { - tx.commit().expect("Failed to commit transaction"); - } - res - } - - #[test] - fn initialise_inserts_anchor_if_not_exists() { - let db = setup_db(); - let genesis = genesis_block(); - - // Should succeed and insert the anchor - assert!(initialize_db(&db, &genesis).is_ok()); - - // Check that the anchor is present - let tx = db.tx().expect("Could not get tx"); - let provider = LogProvider::new(&tx, CHAIN_ID); - let stored = provider.get_block(genesis.number).expect("should exist"); - assert_eq!(stored.hash, genesis.hash); - } - - #[test] - fn initialise_is_idempotent_if_anchor_matches() { - let db = setup_db(); - let genesis = genesis_block(); - - // First initialise - assert!(initialize_db(&db, &genesis).is_ok()); - - // Second initialise with the same anchor should succeed (idempotent) - assert!(initialize_db(&db, &genesis).is_ok()); - } - - #[test] - fn initialise_fails_if_anchor_mismatch() { - let db = setup_db(); - - // Initialize with the genesis block - let genesis = genesis_block(); - assert!(initialize_db(&db, &genesis).is_ok()); - - // Try to initialise with a different anchor (different hash) - let mut wrong_genesis = genesis; - wrong_genesis.hash = B256::from([42u8; 32]); - - let result = initialize_db(&db, &wrong_genesis); - assert!(matches!(result, Err(StorageError::ConflictError))); - } - - #[test] - fn test_get_latest_block_empty() { - let db = setup_db(); - - let tx = db.tx().expect("Failed to start RO tx"); - let log_reader = LogProvider::new(&tx, CHAIN_ID); - - let result = log_reader.get_latest_block(); - assert!(matches!(result, Err(StorageError::DatabaseNotInitialised))); - } - - #[test] - fn test_storage_read_write_success() { - let db = setup_db(); - - // Initialize with genesis block - let genesis = genesis_block(); - initialize_db(&db, &genesis).expect("Failed to initialize DB with genesis block"); - - let block1 = sample_block_info(1, genesis.hash); - let logs1 = vec![ - sample_log(0, false), - sample_log(1, true), - sample_log(3, false), - sample_log(4, true), - ]; - - // Store logs for block1 - assert!(insert_block_logs(&db, &block1, logs1.clone()).is_ok()); - - let block2 = sample_block_info(2, block1.hash); - let logs2 = vec![sample_log(0, false), sample_log(1, true)]; - - // Store logs for block2 - assert!(insert_block_logs(&db, &block2, logs2.clone()).is_ok()); - - let block3 = sample_block_info(3, block2.hash); - let logs3 = vec![sample_log(0, false), sample_log(1, true), sample_log(2, true)]; - - // Store logs for block3 - assert!(insert_block_logs(&db, &block3, logs3).is_ok()); - - let tx = db.tx().expect("Failed to start RO tx"); - let log_reader = LogProvider::new(&tx, CHAIN_ID); - - // get_block - let block = log_reader.get_block(block2.number).expect("Failed to get block"); - assert_eq!(block, block2); - - // get_latest_block - let block = log_reader.get_latest_block().expect("Failed to get latest block"); - assert_eq!(block, block3); - - // get log - let log = log_reader.get_log(1, 1).expect("Failed to get block by log"); - assert_eq!(log, logs1[1]); - - // get_logs - let logs = log_reader.get_logs(block2.number).expect("Failed to get logs"); - assert_eq!(logs.len(), 2); - assert_eq!(logs[0], logs2[0]); - assert_eq!(logs[1], logs2[1]); - } - - #[test] - fn test_not_found_error_and_empty_results() { - let db = setup_db(); - - let tx = db.tx().expect("Failed to start RO tx"); - let log_reader = LogProvider::new(&tx, CHAIN_ID); - - let result = log_reader.get_latest_block(); - assert!(matches!(result, Err(StorageError::DatabaseNotInitialised))); - - // Initialize with genesis block - let genesis = genesis_block(); - initialize_db(&db, &genesis).expect("Failed to initialize DB with genesis block"); - - assert!( - insert_block_logs(&db, &sample_block_info(1, genesis.hash), vec![sample_log(0, true)]) - .is_ok() - ); - - let result = log_reader.get_block(2); - assert!(matches!(result, Err(StorageError::EntryNotFound(_)))); - - // should return empty logs but not an error - let logs = log_reader.get_logs(2).expect("Should not return error"); - assert_eq!(logs.len(), 0); - - let result = log_reader.get_log(1, 1); - assert!(matches!(result, Err(StorageError::EntryNotFound(_)))); - } - - #[test] - fn test_block_append_failed_on_order_mismatch() { - let db = setup_db(); - - // Initialize with genesis block - let genesis = genesis_block(); - initialize_db(&db, &genesis).expect("Failed to initialize DB with genesis block"); - - let block1 = sample_block_info(1, genesis.hash); - let logs1 = vec![sample_log(0, false)]; - - let block2 = sample_block_info(3, genesis.hash); - let logs2 = vec![sample_log(0, false), sample_log(1, true)]; - - // Store logs - assert!(insert_block_logs(&db, &block1, logs1).is_ok()); - - let result = insert_block_logs(&db, &block2, logs2); - assert!(matches!(result, Err(StorageError::BlockOutOfOrder))); - } - - #[test] - fn store_block_logs_skips_if_block_already_exists() { - let db = setup_db(); - let genesis = genesis_block(); - initialize_db(&db, &genesis).expect("Failed to initialize DB with genesis block"); - - let block1 = sample_block_info(1, genesis.hash); - let logs1 = vec![sample_log(0, false)]; - - // Store block1 for the first time - assert!(insert_block_logs(&db, &block1, logs1.clone()).is_ok()); - - // Try storing the same block again (should skip and succeed) - assert!(insert_block_logs(&db, &block1, logs1.clone()).is_ok()); - - // Try storing genesis block again (should skip and succeed) - assert!(insert_block_logs(&db, &genesis, Vec::new()).is_ok()); - - // Check that the logs are still present and correct - let tx = db.tx().expect("Failed to start RO tx"); - let log_reader = LogProvider::new(&tx, CHAIN_ID); - let logs = log_reader.get_logs(block1.number).expect("Should get logs"); - assert_eq!(logs, logs1); - } - - #[test] - fn store_block_logs_returns_conflict_if_block_exists_with_different_data() { - let db = setup_db(); - let genesis = genesis_block(); - initialize_db(&db, &genesis).expect("Failed to initialize DB with genesis block"); - - let block1 = sample_block_info(1, genesis.hash); - let logs1 = vec![sample_log(0, false)]; - assert!(insert_block_logs(&db, &block1, logs1).is_ok()); - - // Try storing block1 again with a different hash (simulate conflict) - let mut block1_conflict = block1; - block1_conflict.hash = B256::from([0x22; 32]); - let logs1_conflict = vec![sample_log(0, false)]; - - let result = insert_block_logs(&db, &block1_conflict, logs1_conflict); - assert!(matches!(result, Err(StorageError::ConflictError))); - - // Try storing genesis block again with a different hash (simulate conflict) - let mut genesis_conflict = genesis; - genesis_conflict.hash = B256::from([0x33; 32]); - let result = insert_block_logs(&db, &genesis_conflict, Vec::new()); - assert!(matches!(result, Err(StorageError::ConflictError))); - } - - #[test] - fn test_rewind_to() { - init_test_tracing(); - - let db = setup_db(); - let genesis = genesis_block(); - initialize_db(&db, &genesis).expect("Failed to initialize DB"); - - // Add 5 blocks with logs - let mut blocks = vec![genesis]; - for i in 1..=5 { - let prev = &blocks[i - 1]; - let block = sample_block_info(i as u64, prev.hash); - let logs = (0..3).map(|j| sample_log(j, j % 2 == 0)).collect(); - insert_block_logs(&db, &block, logs).expect("Failed to insert logs"); - blocks.push(block); - } - - // Rewind to block 3, blocks 3, 4, 5 should be removed - let tx = db.tx_mut().expect("Could not get mutable tx"); - let provider = LogProvider::new_with_observability_interval(&tx, CHAIN_ID, 1); - provider.rewind_to(&blocks[3].id()).expect("Failed to rewind blocks"); - tx.commit().expect("Failed to commit rewind"); - - let tx = db.tx().expect("Could not get RO tx"); - let provider = LogProvider::new_with_observability_interval(&tx, CHAIN_ID, 1); - - // Blocks 0,1,2 should still exist - for i in 0..=2 { - assert!(provider.get_block(i).is_ok(), "block {i} should exist after rewind"); - } - - // Logs for blocks 1,2 should exist - for i in 1..=2 { - let logs = provider.get_logs(i).expect("logs should exist"); - assert_eq!(logs.len(), 3, "block {i} should have 3 logs"); - } - - // Blocks 3,4,5 should be gone - for i in 3..=5 { - assert!( - matches!(provider.get_block(i), Err(StorageError::EntryNotFound(_))), - "block {i} should be removed" - ); - - let logs = provider.get_logs(i).expect("get_logs should not fail"); - assert!(logs.is_empty(), "logs for block {i} should be empty"); - } - } - - #[test] - fn test_rewind_to_conflict_hash() { - let db = setup_db(); - let genesis = genesis_block(); - initialize_db(&db, &genesis).expect("Failed to initialize DB"); - - // Insert block 1 - let block1 = sample_block_info(1, genesis.hash); - insert_block_logs(&db, &block1, vec![sample_log(0, true)]).expect("insert block 1"); - - // Create a conflicting block with the same number but different hash - let mut conflicting_block1 = block1; - conflicting_block1.hash = B256::from([0xAB; 32]); // different hash - - let tx = db.tx_mut().expect("Failed to get tx"); - let provider = LogProvider::new(&tx, CHAIN_ID); - - let result = provider.rewind_to(&conflicting_block1.id()); - assert!( - matches!(result, Err(StorageError::ConflictError)), - "Expected conflict error due to hash mismatch" - ); - } -} diff --git a/rust/kona/crates/supervisor/storage/src/providers/mod.rs b/rust/kona/crates/supervisor/storage/src/providers/mod.rs deleted file mode 100644 index fb8aaeb54342f..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/providers/mod.rs +++ /dev/null @@ -1,15 +0,0 @@ -//! Providers for supervisor state tracking. -//! -//! This module defines and implements storage providers used by the supervisor -//! for managing L2 execution state. It includes support for reading and writing: -//! - Logs and block metadata (via [`LogProvider`]) -//! - Derivation pipeline state (via [`DerivationProvider`]) -//! - Chain head tracking and progression -mod derivation_provider; -pub(crate) use derivation_provider::DerivationProvider; - -mod log_provider; -pub(crate) use log_provider::LogProvider; - -mod head_ref_provider; -pub(crate) use head_ref_provider::SafetyHeadRefProvider; diff --git a/rust/kona/crates/supervisor/storage/src/traits.rs b/rust/kona/crates/supervisor/storage/src/traits.rs deleted file mode 100644 index 5880b913c4404..0000000000000 --- a/rust/kona/crates/supervisor/storage/src/traits.rs +++ /dev/null @@ -1,475 +0,0 @@ -use crate::StorageError; -use alloy_eips::eip1898::BlockNumHash; -use alloy_primitives::ChainId; -use kona_interop::DerivedRefPair; -use kona_protocol::BlockInfo; -use kona_supervisor_types::{Log, SuperHead}; -use op_alloy_consensus::interop::SafetyLevel; -use std::fmt::Debug; - -/// Provides an interface for supervisor storage to manage source and derived blocks. -/// -/// Defines methods to retrieve derived block information, -/// enabling the supervisor to track the derivation progress. -/// -/// Implementations are expected to provide persistent and thread-safe access to block data. -pub trait DerivationStorageReader: Debug { - /// Gets the source [`BlockInfo`] for a given derived block [`BlockNumHash`]. - /// - /// NOTE: [`LocalUnsafe`] block is not pushed to L1 yet, hence it cannot be part of derivation - /// storage. - /// - /// # Arguments - /// * `derived_block_id` - The identifier (number and hash) of the derived (L2) block. - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the source block information if it exists. - /// * `Err(StorageError)` if there is an issue retrieving the source block. - /// - /// [`LocalUnsafe`]: SafetyLevel::LocalUnsafe - fn derived_to_source(&self, derived_block_id: BlockNumHash) -> Result; - - /// Gets the latest derived [`BlockInfo`] associated with the given source block - /// [`BlockNumHash`]. - /// - /// # Arguments - /// * `source_block_id` - The identifier (number and hash) of the L1 source block. - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the latest derived block information if it exists. - /// * `Err(StorageError)` if there is an issue retrieving the derived block. - fn latest_derived_block_at_source( - &self, - source_block_id: BlockNumHash, - ) -> Result; - - /// Gets the latest derivation state [`DerivedRefPair`] from the storage, which includes the - /// latest source block and the latest derived block. - /// - /// # Returns - /// - /// * `Ok(DerivedRefPair)` containing the latest derived block pair if it exists. - /// * `Err(StorageError)` if there is an issue retrieving the pair. - fn latest_derivation_state(&self) -> Result; - - /// Gets the source block for the given source block number. - /// - /// # Arguments - /// * `source_block_number` - The number of the source block to retrieve. - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the source block information if it exists. - /// * `Err(StorageError)` if there is an issue retrieving the source block. - fn get_source_block(&self, source_block_number: u64) -> Result; - - /// Gets the interop activation [`BlockInfo`]. - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the activation block information if it exists. - /// * `Err(StorageError)` if there is an issue retrieving the activation block. - fn get_activation_block(&self) -> Result; -} - -/// Provides an interface for supervisor storage to write source and derived blocks. -/// -/// Defines methods to persist derived block information, -/// enabling the supervisor to track the derivation progress. -/// -/// Implementations are expected to provide persistent and thread-safe access to block data. -pub trait DerivationStorageWriter: Debug { - /// Initializes the derivation storage with a given [`DerivedRefPair`]. - /// This method is typically called once to set up the storage with the initial pair. - /// - /// # Arguments - /// * `incoming_pair` - The derived block pair to initialize the storage with. - /// - /// # Returns - /// * `Ok(())` if the storage was successfully initialized. - /// * `Err(StorageError)` if there is an issue initializing the storage. - fn initialise_derivation_storage( - &self, - incoming_pair: DerivedRefPair, - ) -> Result<(), StorageError>; - - /// Saves a [`DerivedRefPair`] to the storage. - /// - /// This method is **append-only**: it does not overwrite existing pairs. - /// - If a pair with the same block number already exists and is identical to the incoming pair, - /// the request is silently ignored (idempotent). - /// - If a pair with the same block number exists but differs from the incoming pair, an error - /// is returned to indicate a data inconsistency. - /// - If the pair is new and consistent, it is appended to the storage. - /// - /// Ensures that the latest stored pair is the parent of the incoming pair before saving. - /// - /// # Arguments - /// * `incoming_pair` - The derived block pair to save. - /// - /// # Returns - /// * `Ok(())` if the pair was successfully saved. - /// * `Err(StorageError)` if there is an issue saving the pair. - fn save_derived_block(&self, incoming_pair: DerivedRefPair) -> Result<(), StorageError>; - - /// Saves the latest incoming source [`BlockInfo`] to the storage. - /// - /// This method is **append-only**: it does not overwrite existing source blocks. - /// - If a source block with the same number already exists and is identical to the incoming - /// block, the request is silently ignored (idempotent). - /// - If a source block with the same number exists but differs from the incoming block, an - /// error is returned to indicate a data inconsistency. - /// - If the block is new and consistent, it is appended to the storage. - /// - /// Ensures that the latest stored source block is the parent of the incoming block before - /// saving. - /// - /// # Arguments - /// * `source` - The source block to save. - /// - /// # Returns - /// * `Ok(())` if the source block was successfully saved. - /// * `Err(StorageError)` if there is an issue saving the source block. - fn save_source_block(&self, source: BlockInfo) -> Result<(), StorageError>; -} - -/// Combines both reading and writing capabilities for derivation storage. -/// -/// Any type that implements both [`DerivationStorageReader`] and [`DerivationStorageWriter`] -/// automatically implements this trait. -pub trait DerivationStorage: DerivationStorageReader + DerivationStorageWriter {} - -impl DerivationStorage for T {} - -/// Provides an interface for retrieving logs associated with blocks. -/// -/// This trait defines methods to retrieve the latest block, -/// find a block by a specific log, and retrieve logs for a given block number. -/// -/// Implementations are expected to provide persistent and thread-safe access to block logs. -pub trait LogStorageReader: Debug { - /// Retrieves the latest [`BlockInfo`] from the storage. - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the latest block information. - /// * `Err(StorageError)` if there is an issue retrieving the latest block. - fn get_latest_block(&self) -> Result; - - /// Retrieves the [`BlockInfo`] from the storage for a given block number - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the block information. - /// * `Err(StorageError)` if there is an issue retrieving the block. - fn get_block(&self, block_number: u64) -> Result; - - /// Finds a [`Log`] by `block_number` and `log_index` - /// - /// # Arguments - /// * `block_number` - The block number to search for the log. - /// * `log_index` - The index of the log within the block. - /// - /// # Returns - /// * `Ok(Log)` containing the [`Log`] object. - /// * `Err(StorageError)` if there is an issue retrieving the log or if the log is not found. - fn get_log(&self, block_number: u64, log_index: u32) -> Result; - - /// Retrieves all [`Log`]s associated with a specific block number. - /// - /// # Arguments - /// * `block_number` - The block number for which to retrieve logs. - /// - /// # Returns - /// * `Ok(Vec)` containing the logs associated with the block number. - /// * `Err(StorageError)` if there is an issue retrieving the logs or if no logs are found. - fn get_logs(&self, block_number: u64) -> Result, StorageError>; -} - -/// Provides an interface for storing blocks and logs associated with blocks. -/// -/// Implementations are expected to provide persistent and thread-safe access to block logs. -pub trait LogStorageWriter: Send + Sync + Debug { - /// Initializes the log storage with a given [`BlockInfo`]. - /// This method is typically called once to set up the storage with the initial block. - /// - /// # Arguments - /// * `block` - The [`BlockInfo`] to initialize the storage with. - /// - /// # Returns - /// * `Ok(())` if the storage was successfully initialized. - /// * `Err(StorageError)` if there is an issue initializing the storage. - fn initialise_log_storage(&self, block: BlockInfo) -> Result<(), StorageError>; - - /// Stores [`BlockInfo`] and [`Log`]s in the storage. - /// This method is append-only and does not overwrite existing logs. - /// Ensures that the latest stored block is the parent of the incoming block before saving. - /// - /// # Arguments - /// * `block` - [`BlockInfo`] to associate with the logs. - /// * `logs` - The [`Log`] events associated with the block. - /// - /// # Returns - /// * `Ok(())` if the logs were successfully stored. - /// * `Err(StorageError)` if there is an issue storing the logs. - fn store_block_logs(&self, block: &BlockInfo, logs: Vec) -> Result<(), StorageError>; -} - -/// Combines both reading and writing capabilities for log storage. -/// -/// Any type that implements both [`LogStorageReader`] and [`LogStorageWriter`] -/// automatically implements this trait. -pub trait LogStorage: LogStorageReader + LogStorageWriter {} - -impl LogStorage for T {} - -/// Provides an interface for retrieving head references. -/// -/// This trait defines methods to manage safety head references for different safety levels. -/// Each safety level maintains a reference to a block. -/// -/// Implementations are expected to provide persistent and thread-safe access to safety head -/// references. -pub trait HeadRefStorageReader: Debug { - /// Retrieves the current [`BlockInfo`] for a given [`SafetyLevel`]. - /// - /// # Arguments - /// * `safety_level` - The safety level for which to retrieve the head reference. - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the current safety head reference. - /// * `Err(StorageError)` if there is an issue retrieving the reference. - fn get_safety_head_ref(&self, safety_level: SafetyLevel) -> Result; - - /// Retrieves the super head reference from the storage. - /// - /// # Returns - /// * `Ok(SuperHead)` containing the super head reference. - /// * `Err(StorageError)` if there is an issue retrieving the super head reference. - fn get_super_head(&self) -> Result; -} - -/// Provides an interface for storing head references. -/// -/// This trait defines methods to manage safety head references for different safety levels. -/// Each safety level maintains a reference to a block. -/// -/// Implementations are expected to provide persistent and thread-safe access to safety head -/// references. -pub trait HeadRefStorageWriter: Debug { - /// Updates the finalized head reference using a finalized source(l1) block. - /// - /// # Arguments - /// * `source_block` - The [`BlockInfo`] of the source block to use for the update. - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the updated finalized derived(l2) block information. - /// * `Err(StorageError)` if there is an issue updating the finalized head reference. - fn update_finalized_using_source( - &self, - finalized_source_block: BlockInfo, - ) -> Result; - - /// Updates the current [`CrossUnsafe`](SafetyLevel::CrossUnsafe) head reference in storage. - /// - /// Ensures the provided block still exists in log storage and was not removed due to a re-org. - /// If the stored block's hash does not match the provided block, the update is aborted. - /// # Arguments - /// * `block` - The [`BlockInfo`] to set as the head reference - /// - /// # Returns - /// * `Ok(())` if the reference was successfully updated. - /// * `Err(StorageError)` if there is an issue updating the reference. - fn update_current_cross_unsafe(&self, block: &BlockInfo) -> Result<(), StorageError>; - - /// Updates the current [`CrossSafe`](SafetyLevel::CrossSafe) head reference in storage and - /// returns the corresponding derived pair. - /// - /// Ensures the provided block still exists in derivation storage and was not removed due to a - /// re-org. # Arguments - /// * `block` - The [`BlockInfo`] to set as the head reference - /// - /// # Returns - /// * `Ok(DerivedRefPair)` if the reference was successfully updated. - /// * `Err(StorageError)` if there is an issue updating the reference. - fn update_current_cross_safe(&self, block: &BlockInfo) -> Result; -} - -/// Combines both reading and writing capabilities for safety head ref storage. -/// -/// Any type that implements both [`HeadRefStorageReader`] and [`HeadRefStorageWriter`] -/// automatically implements this trait. -pub trait HeadRefStorage: HeadRefStorageReader + HeadRefStorageWriter {} - -impl HeadRefStorage for T {} - -/// Provides an interface for managing the finalized L1 block reference in the storage. -/// -/// This trait defines methods to update and retrieve the finalized L1 block reference. -pub trait FinalizedL1Storage { - /// Updates the finalized L1 block reference in the storage. - /// - /// # Arguments - /// * `block` - The new [`BlockInfo`] to set as the finalized L1 block reference. - /// - /// # Returns - /// * `Ok(())` if the reference was successfully updated. - /// * `Err(StorageError)` if there is an issue updating the reference. - fn update_finalized_l1(&self, block: BlockInfo) -> Result<(), StorageError>; - - /// Retrieves the finalized L1 block reference from the storage. - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the finalized L1 block reference. - /// * `Err(StorageError)` if there is an issue retrieving the reference. - fn get_finalized_l1(&self) -> Result; -} - -/// Provides an interface for retrieving block and safety information across multiple chains. -/// -/// This trait defines methods required by the cross-chain safety checker to access -/// block metadata, logs, and safe head references for various chains. -pub trait CrossChainSafetyProvider { - /// Retrieves the [`BlockInfo`] for a given block number on the specified chain. - /// - /// # Arguments - /// * `chain_id` - The [`ChainId`] of the target chain. - /// * `block_number` - The number of the block to retrieve. - /// - /// # Returns - /// * `Ok(BlockInfo)` containing the block metadata if available. - /// * `Err(StorageError)` if there is an issue fetching the block. - fn get_block(&self, chain_id: ChainId, block_number: u64) -> Result; - - /// Retrieves a [`Log`] by `block_number` and `log_index` - /// - /// # Arguments - /// * `chain_id` - The [`ChainId`] of the target chain. - /// * `block_number` - The block number to search for the log. - /// * `log_index` - The index of the log within the block. - /// - /// # Returns - /// * `Ok(Log)` containing the [`Log`] object. - /// * `Err(StorageError)` if there is an issue retrieving the log or if the log is not found. - fn get_log( - &self, - chain_id: ChainId, - block_number: u64, - log_index: u32, - ) -> Result; - - /// Retrieves all logs associated with the specified block on the given chain. - /// - /// # Arguments - /// * `chain_id` - The [`ChainId`] of the target chain. - /// * `block_number` - The number of the block whose logs should be retrieved. - /// - /// # Returns - /// * `Ok(Vec)` containing all logs for the block. - /// * `Err(StorageError)` if there is an issue fetching the logs. - fn get_block_logs( - &self, - chain_id: ChainId, - block_number: u64, - ) -> Result, StorageError>; - - /// Retrieves the latest known safe head reference for a given chain at the specified safety - /// level. - /// - /// # Arguments - /// * `chain_id` - The [`ChainId`] of the target chain. - /// * `level` - The desired [`SafetyLevel`] (e.g., `CrossSafe`, `LocalSafe`). - /// - /// # Returns - /// * `Ok(BlockInfo)` representing the safe head block at the requested safety level. - /// * `Err(StorageError)` if the safe head cannot be retrieved. - fn get_safety_head_ref( - &self, - chain_id: ChainId, - level: SafetyLevel, - ) -> Result; - - /// Updates the current [`CrossUnsafe`](SafetyLevel::CrossUnsafe) head reference in storage. - /// - /// Ensures the provided block still exists in log storage and was not removed due to a re-org. - /// If the stored block's hash does not match the provided block, the update is aborted. - /// # Arguments - /// * `chain_id` - The [`ChainId`] of the target chain. - /// * `block` - The [`BlockInfo`] to set as the head reference - /// - /// # Returns - /// * `Ok(())` if the reference was successfully updated. - /// * `Err(StorageError)` if there is an issue updating the reference. - fn update_current_cross_unsafe( - &self, - chain_id: ChainId, - block: &BlockInfo, - ) -> Result<(), StorageError>; - - /// Updates the current [`CrossSafe`](SafetyLevel::CrossSafe) head reference in storage and - /// returns the corresponding derived pair. - /// - /// Ensures the provided block still exists in derivation storage and was not removed due to a - /// re-org. # Arguments - /// * `chain_id` - The [`ChainId`] of the target chain. - /// * `block` - The [`BlockInfo`] to set as the head reference - /// - /// # Returns - /// * `Ok(DerivedRefPair)` if the reference was successfully updated. - /// * `Err(StorageError)` if there is an issue updating the reference. - fn update_current_cross_safe( - &self, - chain_id: ChainId, - block: &BlockInfo, - ) -> Result; -} - -/// Trait for rewinding supervisor-related state in the database. -/// -/// This trait provides an interface to revert persisted log data, derivation records, -/// and safety head references from the latest block back to a specified block number (inclusive). -/// It is typically used during chain reorganizations or when invalid blocks are detected and need -/// to be rolled back. -pub trait StorageRewinder { - /// Rewinds the log storage from the latest block down to the specified block (inclusive). - /// This method ensures that log storage is never rewound to(since it's inclusive) and beyond - /// the local safe head. If the target block is beyond the local safe head, an error is - /// returned. Use [`StorageRewinder::rewind`] to rewind to and beyond the local safe head. - /// - /// # Arguments - /// * `to` - The block id to rewind to. - /// - /// # Errors - /// Returns a [`StorageError`] if any database operation fails during the rewind. - fn rewind_log_storage(&self, to: &BlockNumHash) -> Result<(), StorageError>; - - /// Rewinds all supervisor-managed state (log storage, derivation, and safety head refs) - /// from the latest block back to the given block (inclusive). - /// - /// This method performs a coordinated rewind across all components, ensuring consistency - /// of supervisor state after chain reorganizations or rollback of invalid blocks. - /// - /// # Arguments - /// * `to` - The target block id to rewind to. Rewind is performed from the latest block down to - /// this block. - /// - /// # Errors - /// Returns a [`StorageError`] if any part of the rewind process fails. - fn rewind(&self, to: &BlockNumHash) -> Result<(), StorageError>; - - /// Rewinds the storage to a specific source block (inclusive), ensuring that all derived blocks - /// and logs associated with that source blocks are also reverted. - /// - /// # Arguments - /// * `to` - The source block [`BlockNumHash`] to rewind to. - /// - /// # Returns - /// * [`BlockInfo`] of the derived block that was rewound to, or `None` if no derived blocks - /// were found. - /// * `Err(StorageError)` if there is an issue during the rewind operation. - fn rewind_to_source(&self, to: &BlockNumHash) -> Result, StorageError>; -} - -/// Combines the reader traits for the database. -/// -/// Any type that implements [`DerivationStorageReader`], [`HeadRefStorageReader`], and -/// [`LogStorageReader`] automatically implements this trait. -pub trait DbReader: DerivationStorageReader + HeadRefStorageReader + LogStorageReader {} - -impl DbReader for T {} diff --git a/rust/kona/crates/supervisor/types/Cargo.toml b/rust/kona/crates/supervisor/types/Cargo.toml deleted file mode 100644 index 8c98cd0964004..0000000000000 --- a/rust/kona/crates/supervisor/types/Cargo.toml +++ /dev/null @@ -1,39 +0,0 @@ -[package] -name = "kona-supervisor-types" -description = "Types used by the OP Stack supervisor" -version = "0.1.1" - -edition.workspace = true -license.workspace = true -rust-version.workspace = true -authors.workspace = true -homepage.workspace = true -repository.workspace = true -keywords.workspace = true -categories.workspace = true -exclude.workspace = true - -[lints] -workspace = true - -[dependencies] - -# workspace -kona-interop = { workspace = true, features = ["serde"] } -kona-protocol = { workspace = true, features = ["serde"] } - -# alloy -alloy-eips.workspace = true -alloy-primitives = { workspace = true, features = ["map", "rlp", "serde"] } -alloy-serde = { workspace = true } - -# op-alloy -op-alloy-consensus.workspace = true - -# general -serde.workspace = true -derive_more = { workspace = true, default-features = false, features = ["constructor"] } -thiserror = {workspace = true} - -[dev-dependencies] -serde_json.workspace = true diff --git a/rust/kona/crates/supervisor/types/README.md b/rust/kona/crates/supervisor/types/README.md deleted file mode 100644 index fc2d95572744d..0000000000000 --- a/rust/kona/crates/supervisor/types/README.md +++ /dev/null @@ -1 +0,0 @@ -## `kona-supervisor-types` \ No newline at end of file diff --git a/rust/kona/crates/supervisor/types/src/access_list.rs b/rust/kona/crates/supervisor/types/src/access_list.rs deleted file mode 100644 index 9928f5fa9b47e..0000000000000 --- a/rust/kona/crates/supervisor/types/src/access_list.rs +++ /dev/null @@ -1,396 +0,0 @@ -use alloy_primitives::{B256, keccak256}; -use thiserror::Error; - -/// A structured representation of a parsed `CrossL2Inbox` message access entry. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Access { - /// Full 256-bit chain ID (combined from lookup + extension) - pub chain_id: [u8; 32], - /// Block number in the source chain - pub block_number: u64, - /// Timestamp of the message's block - pub timestamp: u64, - /// Log index of the message within the block - pub log_index: u32, - /// Provided checksum entry (prefix 0x03) - pub checksum: B256, -} - -impl Access { - /// Constructs a new [`Access`] from a `LookupEntry`, optional `ChainIdExtensionEntry`, - /// and a `ChecksumEntry`. Used internally by the parser. - fn from_entries( - lookup: LookupEntry, - chain_id_ext: Option, - checksum: ChecksumEntry, - ) -> Self { - let mut chain_id = [0u8; 32]; - - if let Some(ext) = chain_id_ext { - chain_id[0..24].copy_from_slice(&ext.upper_bytes); - } - - chain_id[24..32].copy_from_slice(&lookup.chain_id_low); - - Self { - chain_id, - block_number: lookup.block_number, - timestamp: lookup.timestamp, - log_index: lookup.log_index, - checksum: checksum.raw, - } - } - - /// Recomputes the checksum for this access entry. - /// - /// This follows the spec: - /// - `idPacked = 12 zero bytes ++ block_number ++ timestamp ++ log_index` - /// - `idLogHash = keccak256(log_hash ++ idPacked)` - /// - `bareChecksum = keccak256(idLogHash ++ chain_id)` - /// - Prepend 0x03 to `bareChecksum[1..]` - /// - /// Returns the full 32-byte checksum with prefix 0x03. - /// - /// Reference: [Checksum Calculation](https://github.com/ethereum-optimism/specs/blob/main/specs/interop/predeploys.md#type-3-checksum) - pub fn recompute_checksum(&self, log_hash: &B256) -> B256 { - // Step 1: idPacked = [0u8; 12] ++ block_number ++ timestamp ++ log_index - let mut id_packed = [0u8; 12 + 8 + 8 + 4]; // 32 bytes - id_packed[12..20].copy_from_slice(&self.block_number.to_be_bytes()); - id_packed[20..28].copy_from_slice(&self.timestamp.to_be_bytes()); - id_packed[28..32].copy_from_slice(&self.log_index.to_be_bytes()); - - // Step 2: keccak256(log_hash ++ id_packed) - let id_log_hash = keccak256([log_hash.as_slice(), &id_packed].concat()); - - // Step 3: keccak256(id_log_hash ++ chain_id) - let bare_checksum = keccak256([id_log_hash.as_slice(), &self.chain_id].concat()); - - // Step 4: Prepend type byte 0x03 (overwrite first byte) - let mut checksum = bare_checksum; - checksum.0[0] = 0x03; - - checksum - } - - /// Verify the checksums after recalculation - pub fn verify_checksum(&self, log_hash: &B256) -> Result<(), AccessListError> { - if self.recompute_checksum(log_hash) != self.checksum { - return Err(AccessListError::MalformedEntry); - } - Ok(()) - } -} - -/// Represents a single entry in the access list. -#[derive(Debug, Clone)] -enum AccessListEntry { - Lookup(LookupEntry), - ChainIdExtension(ChainIdExtensionEntry), - Checksum(ChecksumEntry), -} - -/// Parsed lookup identity entry (type 0x01). -#[derive(Debug, Clone)] -struct LookupEntry { - pub chain_id_low: [u8; 8], - pub block_number: u64, - pub timestamp: u64, - pub log_index: u32, -} - -/// Parsed Chain ID extension entry (type 0x02). -#[derive(Debug, Clone)] -struct ChainIdExtensionEntry { - pub upper_bytes: [u8; 24], -} - -/// Parsed checksum entry (type 0x03). -#[derive(Debug, Clone)] -struct ChecksumEntry { - pub raw: B256, -} - -/// Error returned when access list parsing fails. -#[derive(Debug, Error, PartialEq, Eq)] -pub enum AccessListError { - /// Input ended before a complete message group was parsed. - #[error("unexpected end of access list")] - UnexpectedEnd, - - /// Unexpected entry type found. - #[error("expected type {expected:#x}, got {found:#x}")] - UnexpectedType { - /// The type we expected (e.g. 0x01, 0x02, or 0x03) - expected: u8, - /// The actual type byte we found - found: u8, - }, - - /// Malformed entry sequence or invalid prefix structure. - #[error("malformed entry")] - MalformedEntry, - - /// Message expired. - #[error("message expired")] - MessageExpired, - - /// Timestamp invariant violated. - #[error("executing timestamp is earlier than initiating timestamp")] - InvalidTimestampInvariant, -} - -// Access list entry type byte constants -const PREFIX_LOOKUP: u8 = 0x01; -const PREFIX_CHAIN_ID_EXTENSION: u8 = 0x02; -const PREFIX_CHECKSUM: u8 = 0x03; - -/// Parses a vector of raw `B256` access list entries into structured [`Access`] objects. -/// -/// Each `Access` group must follow the pattern: -/// - One `Lookup` entry (prefix `0x01`) -/// - Optionally one `ChainIdExtension` entry (prefix `0x02`) -/// - One `Checksum` entry (prefix `0x03`) -/// -/// Entries are consumed in order. If any group is malformed, this function returns a -/// [`AccessListError`]. -/// -/// # Arguments -/// -/// * `entries` - A `Vec` representing the raw access list entries. -/// -/// # Returns -/// -/// A vector of fully parsed [`Access`] items if all entries are valid. -/// -/// # Errors -/// -/// Returns [`AccessListError`] if entries are out-of-order, malformed, or incomplete. -pub fn parse_access_list(entries: Vec) -> Result, AccessListError> { - let mut list = Vec::with_capacity(entries.len() / 2); - let mut lookup_entry: Option = None; - let mut chain_id_ext: Option = None; - - for entry in entries { - let parsed = parse_entry(&entry)?; - - match parsed { - AccessListEntry::Lookup(lookup) => { - if lookup_entry.is_some() { - return Err(AccessListError::MalformedEntry); - } - lookup_entry = Some(lookup); - } - - AccessListEntry::ChainIdExtension(ext) => { - if lookup_entry.is_none() || chain_id_ext.is_some() { - return Err(AccessListError::MalformedEntry); - } - chain_id_ext = Some(ext); - } - - AccessListEntry::Checksum(checksum) => { - let lookup = lookup_entry.take().ok_or(AccessListError::MalformedEntry)?; - let access = Access::from_entries(lookup, chain_id_ext.take(), checksum); - list.push(access); - } - } - } - - if lookup_entry.is_some() { - return Err(AccessListError::UnexpectedEnd); - } - - Ok(list) -} - -/// Parses a single 32-byte access list entry into a typed [`AccessListEntry`]. -/// -/// This function performs a prefix-based decoding of the input hash: -/// -/// ### Entry Type Encoding -/// -/// | Prefix Byte | Type | Description | -/// |-------------|------------------------|-------------------------------------------------------------------| -/// | `0x01` | `LookupEntry` | Contains chain ID (low bits), block number, timestamp, log index. | -/// | `0x02` | `ChainIdExtensionEntry`| Contains upper 24 bytes of a 256-bit chain ID. | -/// | `0x03` | `ChecksumEntry` | Contains the checksum hash used for message validation. | -/// -/// ### Spec References -/// -/// - [Optimism Access List Format](https://github.com/ethereum-optimism/specs/blob/main/specs/interop/predeploys.md#access-list) -/// - Entry format and layout based on `CrossL2Inbox` access-list encoding. -fn parse_entry(entry: &B256) -> Result { - match entry[0] { - PREFIX_LOOKUP => { - if entry[1..4] != [0; 3] { - return Err(AccessListError::MalformedEntry); - } - Ok(AccessListEntry::Lookup(LookupEntry { - chain_id_low: entry[4..12].try_into().unwrap(), - block_number: u64::from_be_bytes(entry[12..20].try_into().unwrap()), - timestamp: u64::from_be_bytes(entry[20..28].try_into().unwrap()), - log_index: u32::from_be_bytes(entry[28..32].try_into().unwrap()), - })) - } - - PREFIX_CHAIN_ID_EXTENSION => { - if entry[1..8] != [0; 7] { - return Err(AccessListError::MalformedEntry); - } - Ok(AccessListEntry::ChainIdExtension(ChainIdExtensionEntry { - upper_bytes: entry[8..32].try_into().unwrap(), - })) - } - - PREFIX_CHECKSUM => Ok(AccessListEntry::Checksum(ChecksumEntry { raw: *entry })), - - other => Err(AccessListError::UnexpectedType { expected: PREFIX_LOOKUP, found: other }), - } -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::{B256, U256, b256}; - - fn make_lookup_entry( - block_number: u64, - timestamp: u64, - log_index: u32, - chain_id_low: [u8; 8], - ) -> B256 { - let mut buf = [0u8; 32]; - buf[0] = PREFIX_LOOKUP; - // 3 zero padding - buf[4..12].copy_from_slice(&chain_id_low); - buf[12..20].copy_from_slice(&block_number.to_be_bytes()); - buf[20..28].copy_from_slice(×tamp.to_be_bytes()); - buf[28..32].copy_from_slice(&log_index.to_be_bytes()); - B256::from(buf) - } - - fn make_chain_id_ext(upper: [u8; 24]) -> B256 { - let mut buf = [0u8; 32]; - buf[0] = PREFIX_CHAIN_ID_EXTENSION; - // 7 zero padding - buf[8..32].copy_from_slice(&upper); - B256::from(buf) - } - - fn make_checksum(access: &Access, log_hash: &B256) -> B256 { - access.recompute_checksum(log_hash) - } - - #[test] - fn test_parse_valid_access_list_with_chain_id_ext() { - let block_number = 1234; - let timestamp = 9999; - let log_index = 5; - let chain_id_low = [1u8; 8]; - let upper_bytes = [2u8; 24]; - let log_hash = keccak256([0u8; 32]); - - let lookup = make_lookup_entry(block_number, timestamp, log_index, chain_id_low); - let chain_ext = make_chain_id_ext(upper_bytes); - - let access = Access::from_entries( - LookupEntry { chain_id_low, block_number, timestamp, log_index }, - Some(ChainIdExtensionEntry { upper_bytes }), - ChecksumEntry { - raw: B256::default(), // will override later - }, - ); - - let checksum = make_checksum(&access, &log_hash); - - let access = Access::from_entries( - LookupEntry { chain_id_low, block_number, timestamp, log_index }, - Some(ChainIdExtensionEntry { upper_bytes }), - ChecksumEntry { raw: checksum }, - ); - - let list = vec![lookup, chain_ext, checksum]; - let parsed = parse_access_list(list).unwrap(); - assert_eq!(parsed.len(), 1); - assert_eq!(parsed[0], access); - assert!(parsed[0].verify_checksum(&log_hash).is_ok()); - } - - #[test] - fn test_parse_access_list_without_chain_id_ext() { - let block_number = 1; - let timestamp = 2; - let log_index = 3; - let chain_id_low = [0xaa; 8]; - let log_hash = keccak256([1u8; 32]); - - let lookup = make_lookup_entry(block_number, timestamp, log_index, chain_id_low); - let access = Access::from_entries( - LookupEntry { chain_id_low, block_number, timestamp, log_index }, - None, - ChecksumEntry { raw: B256::default() }, - ); - let checksum = make_checksum(&access, &log_hash); - let access = Access::from_entries( - LookupEntry { chain_id_low, block_number, timestamp, log_index }, - None, - ChecksumEntry { raw: checksum }, - ); - - let list = vec![lookup, checksum]; - let parsed = parse_access_list(list).unwrap(); - assert_eq!(parsed.len(), 1); - assert_eq!(parsed[0], access); - assert!(parsed[0].verify_checksum(&log_hash).is_ok()); - } - - #[test] - fn test_recompute_checksum_against_known_value() { - // Input data - let access = Access { - chain_id: U256::from(3).to_be_bytes(), - block_number: 2587, - timestamp: 4660, - log_index: 66, - checksum: B256::default(), // not used in this test - }; - - let log_hash = b256!("0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef"); - - // Expected checksum computed previously using spec logic - let expected = b256!("0x03ca886771056d8ea647bb809b888ba14986f57daaf28954d40408321717716a"); - - let computed = access.recompute_checksum(&log_hash); - assert_eq!(computed, expected, "Checksum does not match expected value"); - } - - #[test] - fn test_checksum_mismatch() { - let block_number = 1; - let timestamp = 2; - let log_index = 3; - let chain_id_low = [0xaa; 8]; - let log_hash = keccak256([1u8; 32]); - - let lookup = make_lookup_entry(block_number, timestamp, log_index, chain_id_low); - let fake_checksum = - b256!("0x03ca886771056d8ea647bb809b888ba14986f57daaf28954d40408321717716a"); - let list = vec![lookup, fake_checksum]; - - let parsed = parse_access_list(list).unwrap(); - let err = parsed[0].verify_checksum(&log_hash); - assert_eq!(err, Err(AccessListError::MalformedEntry)); - } - - #[test] - fn test_invalid_entry_order_should_fail() { - let mut raw = [0u8; 32]; - raw[0] = PREFIX_CHECKSUM; - let checksum = B256::from(raw); - - let lookup = make_lookup_entry(0, 0, 0, [0u8; 8]); - let entries = vec![checksum, lookup]; - - assert!(matches!(parse_access_list(entries), Err(AccessListError::MalformedEntry))); - } -} diff --git a/rust/kona/crates/supervisor/types/src/head.rs b/rust/kona/crates/supervisor/types/src/head.rs deleted file mode 100644 index 6fa4b5ebbf0d2..0000000000000 --- a/rust/kona/crates/supervisor/types/src/head.rs +++ /dev/null @@ -1,36 +0,0 @@ -//! Head of chain in context of superchain. - -use kona_protocol::BlockInfo; - -/// Head of a chain from superchain perspective. -/// -/// In context of a single chain, canonical head is tracked by its safe and finalized head. In -/// superchain context, earlier finality-stages (aka [`SafetyLevel`]s) are tracked too, i.e. -/// unsafe, cross-unsafe and local-safe heads. -/// -/// [`SafetyLevel`]: op_alloy_consensus::interop::SafetyLevel -#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] -pub struct SuperHead { - /// Source (L1) block. - pub l1_source: Option, - /// [`Unsafe`] head of chain. - /// - /// [`Unsafe`]: op_alloy_consensus::interop::SafetyLevel::LocalUnsafe - pub local_unsafe: BlockInfo, - /// [`CrossUnsafe`] head of chain. - /// - /// [`CrossUnsafe`]: op_alloy_consensus::interop::SafetyLevel::CrossUnsafe - pub cross_unsafe: Option, - /// [`LocalSafe`] head of chain. - /// - /// [`LocalSafe`]: op_alloy_consensus::interop::SafetyLevel::LocalSafe - pub local_safe: Option, - /// [`Safe`] head of chain. - /// - /// [`Safe`]: op_alloy_consensus::interop::SafetyLevel::CrossSafe - pub cross_safe: Option, - /// [`Finalized`] head of chain. - /// - /// [`Finalized`]: op_alloy_consensus::interop::SafetyLevel::Finalized - pub finalized: Option, -} diff --git a/rust/kona/crates/supervisor/types/src/hex_string_u64.rs b/rust/kona/crates/supervisor/types/src/hex_string_u64.rs deleted file mode 100644 index 7df764180440a..0000000000000 --- a/rust/kona/crates/supervisor/types/src/hex_string_u64.rs +++ /dev/null @@ -1,63 +0,0 @@ -/// A wrapper around `u64` that supports hex string (e.g. `"0x1"`) or numeric deserialization -/// for RPC inputs. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub struct HexStringU64(pub u64); - -impl serde::Serialize for HexStringU64 { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - alloy_serde::quantity::serialize(&self.0, serializer) - } -} - -impl<'de> serde::Deserialize<'de> for HexStringU64 { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let inner = alloy_serde::quantity::deserialize(deserializer)?; - Ok(Self(inner)) - } -} - -impl From for u64 { - fn from(value: HexStringU64) -> Self { - value.0 - } -} - -impl From for HexStringU64 { - fn from(value: u64) -> Self { - Self(value) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_deserialize_from_hex_string() { - let json = r#""0x1a""#; - let parsed: HexStringU64 = serde_json::from_str(json).expect("should parse hex string"); - let chain_id: u64 = parsed.0; - assert_eq!(chain_id, 0x1a); - } - - #[test] - fn test_serialize_to_hex() { - let value = HexStringU64(26); - let json = serde_json::to_string(&value).expect("should serialize"); - assert_eq!(json, r#""0x1a""#); - } - - #[test] - fn test_round_trip() { - let original = HexStringU64(12345); - let json = serde_json::to_string(&original).unwrap(); - let parsed: HexStringU64 = serde_json::from_str(&json).unwrap(); - assert_eq!(parsed.0, original.0); - } -} diff --git a/rust/kona/crates/supervisor/types/src/lib.rs b/rust/kona/crates/supervisor/types/src/lib.rs deleted file mode 100644 index 55e27f33fb42f..0000000000000 --- a/rust/kona/crates/supervisor/types/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -//! Core types shared across supervisor components. -//! -//! This crate defines the fundamental data structures used within the -//! Optimism supervisor. - -pub mod head; -pub use head::SuperHead; - -mod log; -pub use log::Log; - -mod message; -pub use message::ExecutingMessage; - -mod receipt; -pub use receipt::Receipts; - -mod access_list; -pub use access_list::{Access, AccessListError, parse_access_list}; - -mod hex_string_u64; -mod types; - -pub use hex_string_u64::HexStringU64; - -pub use types::{BlockSeal, OutputV0, SubscriptionEvent}; diff --git a/rust/kona/crates/supervisor/types/src/log.rs b/rust/kona/crates/supervisor/types/src/log.rs deleted file mode 100644 index 7a3ec19a71616..0000000000000 --- a/rust/kona/crates/supervisor/types/src/log.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::message::ExecutingMessage; -use alloy_primitives::B256; - -/// A reference entry representing a log observed in an L2 receipt. -/// -/// This struct does **not** store the actual log content. Instead: -/// - `index` is the index of the log. -/// - `hash` is the hash of the log, which uniquely identifies the log entry and can be used for -/// lookups or comparisons. -/// - `executing_message` is present if the log represents an `ExecutingMessage` emitted by the -/// `CrossL2Inbox` contract. -/// -/// This is the unit persisted by the log indexer into the database for later validation. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Log { - /// The index of the log. - pub index: u32, - /// The hash of the log, derived from the log address and payload. - pub hash: B256, - /// The parsed message, if the log matches an `ExecutingMessage` event. - pub executing_message: Option, -} diff --git a/rust/kona/crates/supervisor/types/src/message.rs b/rust/kona/crates/supervisor/types/src/message.rs deleted file mode 100644 index 5c185552ac1fa..0000000000000 --- a/rust/kona/crates/supervisor/types/src/message.rs +++ /dev/null @@ -1,17 +0,0 @@ -use alloy_primitives::B256; - -/// A parsed executing message extracted from a log emitted by the -/// `CrossL2Inbox` contract on an L2 chain. -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct ExecutingMessage { - /// The chain ID where the message was observed. - pub chain_id: u64, - /// The block number that contained the log. - pub block_number: u64, - /// The log index within the block. - pub log_index: u32, - /// The timestamp of the block. - pub timestamp: u64, - /// A unique hash identifying the log (based on payload and origin). - pub hash: B256, -} diff --git a/rust/kona/crates/supervisor/types/src/receipt.rs b/rust/kona/crates/supervisor/types/src/receipt.rs deleted file mode 100644 index 327f5a4df3e2c..0000000000000 --- a/rust/kona/crates/supervisor/types/src/receipt.rs +++ /dev/null @@ -1,4 +0,0 @@ -use op_alloy_consensus::OpReceiptEnvelope; - -/// Collection of transaction receipts. -pub type Receipts = Vec; diff --git a/rust/kona/crates/supervisor/types/src/types.rs b/rust/kona/crates/supervisor/types/src/types.rs deleted file mode 100644 index 5bd183911e6dd..0000000000000 --- a/rust/kona/crates/supervisor/types/src/types.rs +++ /dev/null @@ -1,100 +0,0 @@ -//! Types for communication between supervisor and op-node. -//! -//! This module defines the data structures used for communicating between the supervisor -//! and the op-node components in the rollup system. It includes block references, -//! block seals, derivation events, and event notifications. - -use alloy_primitives::B256; -use kona_interop::ManagedEvent; -use serde::{Deserialize, Serialize}; - -// todo:: Determine appropriate locations for these structs and move them accordingly. -// todo:: Link these structs to the spec documentation after the related PR is merged. - -/// Represents a sealed block with its hash, number, and timestamp. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -#[serde(rename_all = "camelCase")] -pub struct BlockSeal { - /// The block's hash - pub hash: B256, - /// The block number - pub number: u64, - /// The block's timestamp - pub timestamp: u64, -} - -impl BlockSeal { - /// Creates a new [`BlockSeal`] with the given hash, number, and timestamp. - pub const fn new(hash: B256, number: u64, timestamp: u64) -> Self { - Self { hash, number, timestamp } - } -} -/// Output data for version 0 of the protocol. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] -#[serde(rename_all = "camelCase")] -pub struct OutputV0 { - /// The state root hash - pub state_root: B256, - /// Storage root of the message passer contract - pub message_passer_storage_root: B256, - /// The block hash - pub block_hash: B256, -} - -impl OutputV0 { - /// Creates a new [`OutputV0`] instance. - pub const fn new( - state_root: B256, - message_passer_storage_root: B256, - block_hash: B256, - ) -> Self { - Self { state_root, message_passer_storage_root, block_hash } - } -} - -/// Represents the events structure sent by the node to the supervisor. -#[derive(Debug, Serialize, Deserialize)] -pub struct SubscriptionEvent { - /// Represents the event data sent by the node - pub data: Option, -} - -#[cfg(test)] -mod tests { - use super::*; - use alloy_primitives::B256; - use serde_json::{Value, json}; - - #[test] - fn test_output_v0_serialize_camel_case() { - let output = OutputV0 { - state_root: B256::from([1u8; 32]), - message_passer_storage_root: B256::from([2u8; 32]), - block_hash: B256::from([3u8; 32]), - }; - - let json_str = serde_json::to_string(&output).unwrap(); - let v: Value = serde_json::from_str(&json_str).unwrap(); - - // Check that keys are camelCase - assert!(v.get("stateRoot").is_some()); - assert!(v.get("messagePasserStorageRoot").is_some()); - assert!(v.get("blockHash").is_some()); - } - - #[test] - fn test_output_v0_deserialize_camel_case() { - let json_obj = json!({ - "stateRoot": "0x0101010101010101010101010101010101010101010101010101010101010101", - "messagePasserStorageRoot": "0x0202020202020202020202020202020202020202020202020202020202020202", - "blockHash": "0x0303030303030303030303030303030303030303030303030303030303030303" - }); - - let json_str = serde_json::to_string(&json_obj).unwrap(); - let output: OutputV0 = serde_json::from_str(&json_str).unwrap(); - - assert_eq!(output.state_root, B256::from([1u8; 32])); - assert_eq!(output.message_passer_storage_root, B256::from([2u8; 32])); - assert_eq!(output.block_hash, B256::from([3u8; 32])); - } -} diff --git a/rust/kona/docker/README.md b/rust/kona/docker/README.md index 6904eabec0602..63a8223508a5a 100644 --- a/rust/kona/docker/README.md +++ b/rust/kona/docker/README.md @@ -50,7 +50,6 @@ docker buildx create --name kona-builder --use Nightly Docker images are automatically built and published every day at 2 AM UTC for: - `kona-node` - `kona-host` -- `kona-supervisor` ### Using Nightly Images @@ -58,7 +57,6 @@ Nightly Docker images are automatically built and published every day at 2 AM UT # Pull the latest nightly build (multi-platform: linux/amd64, linux/arm64) docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node:nightly docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-host:nightly -docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-supervisor:nightly # Pull a specific date's nightly build docker pull us-docker.pkg.dev/oplabs-tools-artifacts/images/kona-node:nightly-2024-12-10 diff --git a/rust/kona/docker/recipes/kona-supervisor/grafana/dashboards/dashboard.yml b/rust/kona/docker/recipes/kona-supervisor/grafana/dashboards/dashboard.yml deleted file mode 100644 index 2d95a2149527e..0000000000000 --- a/rust/kona/docker/recipes/kona-supervisor/grafana/dashboards/dashboard.yml +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: 1 - -providers: - - name: 'Kona Supervisor' - allowUiUpdates: true - options: - path: /etc/grafana/provisioning/dashboards \ No newline at end of file diff --git a/rust/kona/docker/recipes/kona-supervisor/grafana/dashboards/kona-supervisor.json b/rust/kona/docker/recipes/kona-supervisor/grafana/dashboards/kona-supervisor.json deleted file mode 100644 index 093b0caab2f23..0000000000000 --- a/rust/kona/docker/recipes/kona-supervisor/grafana/dashboards/kona-supervisor.json +++ /dev/null @@ -1,4772 +0,0 @@ -{ - "__inputs": [ - { - "name": "DS_PROMETHEUS", - "label": "Prometheus", - "description": "", - "type": "datasource", - "pluginId": "prometheus", - "pluginName": "Prometheus" - } - ], - "__elements": {}, - "__requires": [ - { - "type": "panel", - "id": "bargauge", - "name": "Bar gauge", - "version": "" - }, - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "11.5.0" - }, - { - "type": "datasource", - "id": "prometheus", - "name": "Prometheus", - "version": "1.0.0" - }, - { - "type": "panel", - "id": "stat", - "name": "Stat", - "version": "" - }, - { - "type": "panel", - "id": "timeseries", - "name": "Time series", - "version": "" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": { - "type": "grafana", - "uid": "-- Grafana --" - }, - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "fiscalYearStartMonth": 0, - "graphTooltip": 0, - "id": null, - "links": [], - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "dark-yellow", - "mode": "fixed" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 4, - "x": 0, - "y": 0 - }, - "id": 48, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "text": {}, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "exemplar": false, - "expr": "kona_supervisor_info{namespace=~\"kona-supervisor-supervisor-superchain\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": true, - "legendFormat": "{{version}}", - "range": false, - "refId": "A", - "useBackend": false - } - ], - "title": "Version", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "dark-yellow", - "mode": "fixed" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 4, - "x": 4, - "y": 0 - }, - "id": 49, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "exemplar": false, - "expr": "kona_supervisor_info{namespace=~\"kona-supervisor-supervisor-superchain\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": true, - "legendFormat": "{{build_timestamp}}", - "range": false, - "refId": "A", - "useBackend": false - } - ], - "title": "Build Timestamp", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 4, - "x": 8, - "y": 0 - }, - "id": 50, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "exemplar": false, - "expr": "kona_supervisor_info{namespace=~\"kona-supervisor-supervisor-superchain\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": true, - "legendFormat": "{{cargo_features}}", - "range": false, - "refId": "A", - "useBackend": false - } - ], - "title": "Cargo Features", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "text", - "mode": "fixed" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 4, - "x": 12, - "y": 0 - }, - "id": 51, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "kona_supervisor_info{namespace=~\"kona-supervisor-supervisor-superchain\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{git_sha}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Git SHA", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 4, - "x": 16, - "y": 0 - }, - "id": 52, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "kona_supervisor_info{namespace=~\"kona-supervisor-supervisor-superchain\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{target_triple}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Target Triple", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "semi-dark-green", - "mode": "fixed" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 4, - "x": 20, - "y": 0 - }, - "id": 53, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "name", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "kona_supervisor_info{namespace=~\"kona-supervisor-supervisor-superchain\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{build_profile}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Build Profile", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Time since the supervisor service has started", - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "yellow", - "mode": "fixed" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "dthms" - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 4, - "x": 0, - "y": 4 - }, - "id": 42, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "auto", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "time() - process_start_time_seconds{namespace=\"kona-supervisor-supervisor-superchain\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Elapsed Time", - "range": true, - "refId": "Elapsed Time", - "useBackend": false - } - ], - "title": "Elapsed Time", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Does not include swapped out pages. ", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 4, - "y": 4 - }, - "id": 41, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "process_resident_memory_bytes{namespace=\"kona-supervisor-supervisor-superchain\"} / 1000000", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{namespace}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "RAM Usage (in MB)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "If the % is > 100, it means it is using multiple cores", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 8, - "x": 12, - "y": 4 - }, - "id": 40, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "code", - "expr": "rate(process_cpu_seconds_total{namespace=\"kona-supervisor-supervisor-superchain\"}[1m]) * 100", - "fullMetaSearch": false, - "includeNullMetadata": false, - "legendFormat": "{{namespace}}", - "range": true, - "refId": "CPU Usage", - "useBackend": false - } - ], - "title": "CPU usage (%)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 4, - "x": 20, - "y": 4 - }, - "id": 43, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "auto", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "process_threads{namespace=\"kona-supervisor-supervisor-superchain\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{namespace}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "No. of Threads", - "type": "stat" - }, - { - "collapsed": false, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 11 - }, - "id": 35, - "panels": [], - "title": "Supervisor Overview", - "type": "row" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 5 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 4, - "w": 12, - "x": 0, - "y": 12 - }, - "id": 36, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "auto", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "kona_supervisor_reorg_error_total{namespace=~\"kona-supervisor-supervisor-superchain\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "Failed ({{chain_id}})", - "range": true, - "refId": "Error", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "kona_supervisor_reorg_success_total{namespace=\"kona-supervisor-supervisor-superchain\", method!=\"process_chain_reorg\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Successful ({{chain_id}})", - "range": true, - "refId": "Success", - "useBackend": false - } - ], - "title": "Reorg: $chain_id", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Total number of blocks processed, including reorgs, reset, etc.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 7, - "w": 12, - "x": 12, - "y": 12 - }, - "id": 37, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "auto", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_block_processing_success_total{type=~\"finalized\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "Finalized ({{chain_id}})", - "range": true, - "refId": "Finalized", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_block_processing_success_total{type=~\"cross_safe\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Cross-safe ({{chain_id}})", - "range": true, - "refId": "Cross-safe", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_block_processing_success_total{type=~\"cross_unsafe\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "Cross-unsafe ({{chain_id}})", - "range": true, - "refId": "Cross-unsafe", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_block_processing_success_total{type=~\"local_safe\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "Local-safe ({{chain_id}})", - "range": true, - "refId": "Local-safe", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_block_processing_success_total{type=~\"local_unsafe\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "Local-unsafe ({{chain_id}})", - "range": true, - "refId": "Local-unsafe", - "useBackend": false - } - ], - "title": "Total Blocks Processed: $chain_id", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "fixedColor": "purple", - "mode": "shades" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 3, - "w": 12, - "x": 0, - "y": 16 - }, - "id": 38, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "auto", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "sum by(namespace) (supervisor_rpc_requests_error_total{namespace=\"kona-supervisor-supervisor-superchain\"})", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "Failed Requests", - "range": true, - "refId": "Error", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "sum by(namespace) (supervisor_rpc_requests_success_total{namespace=\"kona-supervisor-supervisor-superchain\"})", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "Successful Requests", - "range": true, - "refId": "Success", - "useBackend": false - } - ], - "title": "Supervisor RPC", - "type": "stat" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Shows the latest safety head ref block number for each chain", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 19 - }, - "id": 54, - "options": { - "displayMode": "lcd", - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "maxVizHeight": 300, - "minVizHeight": 16, - "minVizWidth": 8, - "namePlacement": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showUnfilled": true, - "sizing": "auto", - "valueMode": "text" - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_safety_head_ref_labels{type=~\"local_unsafe\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{type}}: {{chain_id}}", - "range": true, - "refId": "Local Unsafe", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_safety_head_ref_labels{type=~\"cross_unsafe\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "{{type}}: {{chain_id}}", - "range": true, - "refId": "Cross Unsafe", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_safety_head_ref_labels{type=~\"local_safe\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "{{type}}: {{chain_id}}", - "range": true, - "refId": "Local Safe", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_safety_head_ref_labels{type=~\"cross_safe\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "{{type}}: {{chain_id}}", - "range": true, - "refId": "Cross Safe", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_safety_head_ref_labels{type=~\"finalized\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "{{type}}: {{chain_id}}", - "range": true, - "refId": "Finalized", - "useBackend": false - } - ], - "title": "Safety Head Refs: $chain_id", - "type": "bargauge" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Total Requests", - "fieldConfig": { - "defaults": { - "color": { - "mode": "thresholds" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 19 - }, - "id": 39, - "options": { - "colorMode": "value", - "graphMode": "none", - "justifyMode": "auto", - "orientation": "auto", - "percentChangeColorMode": "standard", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showPercentChange": false, - "textMode": "auto", - "wideLayout": true - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "sum by(node) (managed_node_rpc_requests_error_total{namespace=\"kona-supervisor-supervisor-superchain\", node=~\".*$chain_id.*\"})", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": true, - "legendFormat": "Failed Req ({{node}})", - "range": true, - "refId": "Error", - "useBackend": false - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "sum by(node) (managed_node_rpc_requests_success_total{namespace=\"kona-supervisor-supervisor-superchain\", node=~\".*$chain_id.*\"})", - "fullMetaSearch": false, - "includeNullMetadata": false, - "legendFormat": "{{node}}", - "range": true, - "refId": "Success", - "useBackend": false - } - ], - "title": "Managed Node RPC: $chain_id", - "transformations": [ - { - "id": "renameByRegex", - "options": { - "regex": "^([^0-9]*)(\\d+)(.*)$", - "renamePattern": "$2" - } - } - ], - "type": "stat" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 27 - }, - "id": 29, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Shows the no. of source blocks that the reorg happened for a given chain", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 28 - }, - "id": 33, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "kona_supervisor_reorg_l1_depth{quantile=\"0.95\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "L1 Depth: $chain_id", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Shows the no. of derived blocks reorg happened for the given chain", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 28 - }, - "id": 34, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "kona_supervisor_reorg_l2_depth{quantile=\"0.95\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "L2 Depth: $chain_id", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Shows the success rate of all L2 chains when an L1 reorg occurs.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 52 - }, - "id": 30, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(kona_supervisor_reorg_success_total{namespace=\"kona-supervisor-supervisor-superchain\", chain_id=~\"$chain_id\"}[5m])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Reorg Success Rate: $chain_id", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Shows the time taken for all L2 chains to reorg when an L1 reorg occurs.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 52 - }, - "id": 31, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "exemplar": false, - "expr": "kona_supervisor_reorg_duration_seconds{quantile=\"0.95\", method!=\"process_chain_reorg\", chain_id=~\"$chain_id\"}", - "format": "time_series", - "fullMetaSearch": false, - "includeNullMetadata": true, - "instant": false, - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Reorg Duration in seconds: $chain_id", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Shows the error rate of all L2 chains when an L1 reorg occurs.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 60 - }, - "id": 32, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(kona_supervisor_reorg_error_total{namespace=\"kona-supervisor-supervisor-superchain\", chain_id=~\"$chain_id\"}[5m])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Reorg Error Rate: $chain_id", - "type": "timeseries" - } - ], - "title": "Reorg", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 28 - }, - "id": 1, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Rate of successful blocks processed (per 5 min) ", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 29 - }, - "id": 2, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "rate(supervisor_block_processing_success_total{type=\"local_safe\",chain_id=~\"$chain_id\"}[5m])", - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Block Processing Speed: local_safe", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Rate of successful blocks processed (per 5 min) ", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 29 - }, - "id": 3, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "rate(supervisor_block_processing_success_total{type=\"local_unsafe\",chain_id=~\"$chain_id\"}[5m])", - "legendFormat": "{{chain_id}}", - "refId": "A" - } - ], - "title": "Block Processing Speed: local_unsafe", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Rate of successful blocks processed (per 5 min) ", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 53 - }, - "id": 4, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "code", - "expr": "rate(supervisor_block_processing_success_total{type=\"finalized\",chain_id=~\"$chain_id\"}[5m])", - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Block Processing Speed: finalized", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Rate of successful blocks processed (per 5 min) ", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 53 - }, - "id": 5, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "rate(supervisor_block_processing_success_total{type=\"cross_safe\",chain_id=~\"$chain_id\"}[5m])", - "legendFormat": "{{chain_id}}", - "refId": "A" - } - ], - "title": "Block Processing Speed: cross_safe", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Rate of successful blocks processed (per 5 min) ", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 61 - }, - "id": 6, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "rate(supervisor_block_processing_success_total{type=\"cross_unsafe\",chain_id=~\"$chain_id\"}[5m])", - "legendFormat": "{{chain_id}}", - "refId": "A" - } - ], - "title": "Block Processing Speed: cross_unsafe", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Amount of time taken to process the blocks by supervisor since its block time", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 61 - }, - "id": 7, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "supervisor_block_processing_latency_seconds{quantile=\"0.95\",type=\"local_safe\",chain_id=~\"$chain_id\"}", - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Block Processing Latency p95: local_safe", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Amount of time taken to process the blocks by supervisor since its block time", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 69 - }, - "id": 8, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "supervisor_block_processing_latency_seconds{quantile=\"0.95\",type=\"local_unsafe\",chain_id=~\"$chain_id\"}", - "legendFormat": "{{chain_id}}", - "refId": "A" - } - ], - "title": "Block Processing Latency p95: local_unsafe", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Amount of time taken to process the blocks by supervisor since its block time", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 69 - }, - "id": 9, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "supervisor_block_processing_latency_seconds{quantile=\"0.95\",type=\"finalized\",chain_id=~\"$chain_id\"}", - "legendFormat": "{{chain_id}}", - "refId": "A" - } - ], - "title": "Block Processing Latency p95: finalized", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Amount of time taken to process the blocks by supervisor since its block time", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 77 - }, - "id": 10, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "supervisor_block_processing_latency_seconds{quantile=\"0.95\",type=\"cross_safe\",chain_id=~\"$chain_id\"}", - "legendFormat": "{{chain_id}}", - "refId": "A" - } - ], - "title": "Block Processing Latency p95: cross_safe", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Amount of time taken to process the blocks by supervisor since its block time", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 77 - }, - "id": 11, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "supervisor_block_processing_latency_seconds{quantile=\"0.95\",type=\"cross_unsafe\",chain_id=~\"$chain_id\"}", - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A" - } - ], - "title": "Block Processing Latency p95: cross_unsafe", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Rate of successful block invalidation (per 5 min) over time for each chain", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 85 - }, - "id": 44, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(supervisor_block_invalidation_success_total{namespace=\"kona-supervisor-supervisor-superchain\", chain_id=~\"$chain_id\"}[5m])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Block Invalidation Speed: $chain_id", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Amount of time taken in seconds to process the block invalidation query.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 85 - }, - "id": 46, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_block_invalidation_latency_seconds{quantile=\"0.95\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Block Invalidation Latency p95", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Rate of successful block replacement (per 5 min) over time for each chain", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 93 - }, - "id": 45, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(supervisor_block_replacement_success_total{namespace=\"kona-supervisor-supervisor-superchain\", chain_id=~\"$chain_id\"}[5m])", - "fullMetaSearch": false, - "includeNullMetadata": false, - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Block Replacement Speed", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "description": "Amount of time taken in seconds to process the block replacement query.", - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - } - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 93 - }, - "id": 47, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "supervisor_block_replacement_latency_seconds{quantile=\"0.95\", chain_id=~\"$chain_id\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{chain_id}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Block Replacement Latency p95", - "type": "timeseries" - } - ], - "title": "Block Processing", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 29 - }, - "id": 12, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 30 - }, - "id": 13, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "rate(kona_supervisor_storage_success_total{chain_id=~\"$chain_id\"}[5m])", - "legendFormat": "{{method}}", - "range": true, - "refId": "A" - } - ], - "title": "Storage Success Rate (per Method)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 30 - }, - "id": 14, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "rate(kona_supervisor_storage_error_total{chain_id=~\"$chain_id\"}[5m])", - "legendFormat": "{{method}}", - "refId": "A" - } - ], - "title": "Storage Error Rate (per Method)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 54 - }, - "id": 15, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "kona_supervisor_storage_duration_seconds{quantile=\"0.95\",chain_id=~\"$chain_id\",method=~\"derived_to_source|latest_derived_block_at_source|latest_derivation_state|save_derived_block\"}", - "legendFormat": "{{method}}", - "refId": "A" - } - ], - "title": "Derivation Storage Latency p95", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 54 - }, - "id": 16, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "kona_supervisor_storage_duration_seconds{quantile=\"0.95\",chain_id=~\"$chain_id\",method=~\"get_latest_block|get_block|get_log|get_logs|store_block_logs\"}", - "legendFormat": "{{method}}", - "refId": "A" - } - ], - "title": "Log Storage Latency p95", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 62 - }, - "id": 17, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "editorMode": "builder", - "expr": "kona_supervisor_storage_duration_seconds{quantile=\"0.95\",chain_id=~\"$chain_id\",method=~\"get_current_l1|get_safety_head_ref|get_super_head|update_current_l1|update_finalized_using_source|update_current_cross_unsafe|update_current_cross_safe\"}", - "legendFormat": "{{method}}", - "range": true, - "refId": "A" - } - ], - "title": "Ref Storage Latency p95", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 62 - }, - "id": 18, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "kona_supervisor_storage_duration_seconds{quantile=\"0.95\",chain_id=~\"$chain_id\",method=~\"update_finalized_l1|get_finalized_l1\"}", - "legendFormat": "{{method}}", - "refId": "A" - } - ], - "title": "Finalized Storage Latency p95", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "none" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 70 - }, - "id": 19, - "options": { - "displayMode": "basic", - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "maxVizHeight": 300, - "minVizHeight": 16, - "minVizWidth": 8, - "namePlacement": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showUnfilled": true, - "sizing": "auto", - "valueMode": "text" - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "kona_supervisor_storage_table_entries{chain_id=~\"$chain_id\"}", - "legendFormat": "{{table}} - {{chain_id}}", - "refId": "A" - } - ], - "title": "Storage Table Entries (Bar Gauge, per Chain)", - "type": "bargauge" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "bytes" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 78 - }, - "id": 20, - "options": { - "displayMode": "basic", - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": false - }, - "maxVizHeight": 300, - "minVizHeight": 16, - "minVizWidth": 8, - "namePlacement": "auto", - "orientation": "horizontal", - "reduceOptions": { - "calcs": [ - "lastNotNull" - ], - "fields": "", - "values": false - }, - "showUnfilled": true, - "sizing": "auto", - "valueMode": "text" - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "kona_supervisor_storage_table_size{chain_id=~\"$chain_id\"}", - "legendFormat": "{{table}}", - "refId": "A" - } - ], - "title": "Storage Table Size (Bar Gauge, per Chain)", - "type": "bargauge" - } - ], - "title": "Storage", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 30 - }, - "id": 21, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 31 - }, - "id": 22, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "rate(supervisor_rpc_requests_success_total[5m])", - "legendFormat": "{{method}}", - "refId": "A" - } - ], - "title": "Supervisor RPC Success Rate (per Method)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 31 - }, - "id": 23, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "rate(supervisor_rpc_requests_error_total[5m])", - "legendFormat": "{{method}}", - "refId": "A" - } - ], - "title": "Supervisor RPC Error Rate (per Method)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 39 - }, - "id": 24, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "expr": "supervisor_rpc_request_duration_seconds{quantile=\"0.95\"}", - "legendFormat": "{{method}}", - "refId": "A" - } - ], - "title": "Supervisor RPC Latency p95 (per Method)", - "type": "timeseries" - } - ], - "title": "Supervisor RPC", - "type": "row" - }, - { - "collapsed": true, - "gridPos": { - "h": 1, - "w": 24, - "x": 0, - "y": 31 - }, - "id": 25, - "panels": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" - }, - "overrides": [ - { - "__systemRef": "hideSeriesFrom", - "matcher": { - "id": "byNames", - "options": { - "mode": "exclude", - "names": [ - "block_ref_by_number: ws://op-cl-2151909-node1-op-node:9645" - ], - "prefix": "All except:", - "readOnly": true - } - }, - "properties": [ - { - "id": "custom.hideFrom", - "value": { - "legend": false, - "tooltip": false, - "viz": true - } - } - ] - } - ] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 0, - "y": 32 - }, - "id": 26, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(managed_node_rpc_requests_success_total{node=~\".*$chain_id.*\"}[5m])", - "fullMetaSearch": false, - "hide": false, - "includeNullMetadata": false, - "instant": false, - "legendFormat": "{{method}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Managed Node RPC Success Rate (per Method)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "ops" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 12, - "x": 12, - "y": 32 - }, - "id": 27, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "rate(managed_node_rpc_requests_error_total{node=~\".*$chain_id.*\"}[5m])", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{method}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Managed Node RPC Error Rate (per Method)", - "type": "timeseries" - }, - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "fieldConfig": { - "defaults": { - "color": { - "mode": "palette-classic" - }, - "custom": { - "axisBorderShow": false, - "axisCenteredZero": false, - "axisColorMode": "text", - "axisLabel": "", - "axisPlacement": "auto", - "barAlignment": 0, - "barWidthFactor": 0.6, - "drawStyle": "line", - "fillOpacity": 0, - "gradientMode": "none", - "hideFrom": { - "legend": false, - "tooltip": false, - "viz": false - }, - "insertNulls": false, - "lineInterpolation": "linear", - "lineWidth": 1, - "pointSize": 5, - "scaleDistribution": { - "type": "linear" - }, - "showPoints": "auto", - "spanNulls": false, - "stacking": { - "group": "A", - "mode": "none" - }, - "thresholdsStyle": { - "mode": "off" - } - }, - "mappings": [], - "thresholds": { - "mode": "absolute", - "steps": [ - { - "color": "green", - "value": null - }, - { - "color": "red", - "value": 80 - } - ] - }, - "unit": "s" - }, - "overrides": [] - }, - "gridPos": { - "h": 8, - "w": 24, - "x": 0, - "y": 40 - }, - "id": 28, - "options": { - "legend": { - "calcs": [], - "displayMode": "list", - "placement": "bottom", - "showLegend": true - }, - "tooltip": { - "hideZeros": false, - "mode": "single", - "sort": "none" - } - }, - "pluginVersion": "11.5.0", - "targets": [ - { - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "disableTextWrap": false, - "editorMode": "builder", - "expr": "managed_node_rpc_request_duration_seconds{quantile=\"0.95\", node=~\".*$chain_id.*\"}", - "fullMetaSearch": false, - "includeNullMetadata": true, - "legendFormat": "{{method}}", - "range": true, - "refId": "A", - "useBackend": false - } - ], - "title": "Managed Node RPC Latency p95 (per Method)", - "type": "timeseries" - } - ], - "title": "Managed Node RPC", - "type": "row" - } - ], - "refresh": "10s", - "schemaVersion": 40, - "tags": [], - "templating": { - "list": [ - { - "current": {}, - "datasource": { - "type": "prometheus", - "uid": "${DS_PROMETHEUS}" - }, - "definition": "label_values(chain_id)", - "includeAll": true, - "label": "Chain ID", - "multi": true, - "name": "chain_id", - "options": [], - "query": { - "qryType": 1, - "query": "label_values(chain_id)", - "refId": "PrometheusVariableQueryEditor-VariableQuery" - }, - "refresh": 2, - "type": "query" - } - ] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": {}, - "timezone": "", - "title": "Kona Supervisor Dashboard", - "uid": "fevgnwku1evi8b", - "version": 3, - "weekStart": "" -} \ No newline at end of file diff --git a/rust/kona/justfile b/rust/kona/justfile index eee5570d9c24c..aec749755c5f1 100644 --- a/rust/kona/justfile +++ b/rust/kona/justfile @@ -23,10 +23,6 @@ default: build-node: cargo build --release --bin kona-node -# Build the supervisor -build-supervisor: - cargo build --release --bin kona-supervisor - # Run all tests (excluding online tests) tests: test test-docs diff --git a/rust/kona/tests/justfile b/rust/kona/tests/justfile index e95516d0ba7d3..b1c4ede6f2f75 100644 --- a/rust/kona/tests/justfile +++ b/rust/kona/tests/justfile @@ -5,8 +5,8 @@ DEFAULT_OP_PACKAGE_PATH := "github.com/ethpandaops/optimism-package@998796c0f3bb build-devnet BINARY: #!/usr/bin/env bash - if [ {{BINARY}} != "node" ] && [ {{BINARY}} != "supervisor" ]; then - echo "Invalid binary specified. Must be either 'node' or 'supervisor'." + if [ {{BINARY}} != "node" ]; then + echo "Invalid binary specified. Must be 'node'." exit 1 fi @@ -65,9 +65,6 @@ test-e2e-sysgo BINARY="node" GO_PKG_NAME="node/common" DEVNET="simple-kona" FILT just build-kona echo "Building op-reth..." just build-reth - elif [ "{{BINARY}}" = "supervisor" ]; then - echo "Building supervisor..." - cd {{SOURCE}}/.. && just build-supervisor fi just test-e2e-sysgo-run {{BINARY}} {{GO_PKG_NAME}} {{DEVNET}} {{FILTER}} @@ -88,11 +85,8 @@ test-e2e-sysgo-run BINARY="node" GO_PKG_NAME="node/common" DEVNET="simple-kona" if [ $OP_RETH_EXEC_PATH == "" ]; then export OP_RETH_EXEC_PATH="{{SOURCE}}/../../reth/target/debug/op-reth" fi - elif [ "{{BINARY}}" = "supervisor" ]; then - export DEVSTACK_SUPERVISOR_KIND=kona - export KONA_SUPERVISOR_EXEC_PATH="{{SOURCE}}/../target/release/kona-supervisor" else - echo "Invalid BINARY specified. Must be either 'node' or 'supervisor'." + echo "Invalid BINARY specified. Must be 'node'." exit 1 fi diff --git a/rust/kona/tests/supervisor/l1reorg/init_test.go b/rust/kona/tests/supervisor/l1reorg/init_test.go deleted file mode 100644 index 10cfb13f2b953..0000000000000 --- a/rust/kona/tests/supervisor/l1reorg/init_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package reorgl1 - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSimpleInterop()) -} diff --git a/rust/kona/tests/supervisor/l1reorg/reorg_test.go b/rust/kona/tests/supervisor/l1reorg/reorg_test.go deleted file mode 100644 index 57d09359b5f35..0000000000000 --- a/rust/kona/tests/supervisor/l1reorg/reorg_test.go +++ /dev/null @@ -1,150 +0,0 @@ -package reorgl1 - -import ( - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum-optimism/optimism/rust/kona/tests/supervisor/utils" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type checksFunc func(t devtest.T, sys *presets.SimpleInterop) - -func TestL1Reorg(gt *testing.T) { - gt.Run("unsafe reorg", func(gt *testing.T) { - var crossSafeRef, localSafeRef, unsafeRef, reorgAfter eth.BlockID - pre := func(t devtest.T, sys *presets.SimpleInterop) { - ss := sys.Supervisor.FetchSyncStatus() - - crossSafeRef = ss.Chains[sys.L2ChainA.ChainID()].CrossSafe - localSafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalSafe - unsafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalUnsafe.ID() - gt.Logf("Pre:: CrossSafe: %s, LocalSafe: %s, Unsafe: %s", crossSafeRef, localSafeRef, unsafeRef) - - // Calculate the divergent block - blockRef, err := sys.Supervisor.Escape().QueryAPI().CrossDerivedToSource(t.Ctx(), sys.L2ChainA.ChainID(), localSafeRef) - assert.Nil(gt, err, "Failed to query cross derived to source") - reorgAfter = blockRef.ID() - } - post := func(t devtest.T, sys *presets.SimpleInterop) { - require.True(t, sys.L2ELA.IsCanonical(crossSafeRef), "Previous cross-safe block should still be canonical") - require.True(t, sys.L2ELA.IsCanonical(localSafeRef), "Previous local-safe block should still be canonical") - require.False(t, sys.L2ELA.IsCanonical(unsafeRef), "Previous unsafe block should have been reorged") - } - testL2ReorgAfterL1Reorg(gt, &reorgAfter, pre, post) - }) -} - -func testL2ReorgAfterL1Reorg(gt *testing.T, reorgAfter *eth.BlockID, preChecks, postChecks checksFunc) { - t := devtest.SerialT(gt) - ctx := t.Ctx() - - sys := presets.NewSimpleInterop(t) - trm := utils.NewTestReorgManager(t) - - sys.L1Network.WaitForBlock() - - trm.StopL1CL() - - // sequence some l1 blocks initially - for range 10 { - trm.GetBlockBuilder().BuildBlock(ctx, nil) - time.Sleep(5 * time.Second) - } - - // pre reorg trigger validations and checks - preChecks(t, sys) - - tip := sys.L1EL.BlockRefByLabel(eth.Unsafe).Number - - // create at least 5 blocks after the divergence point - for tip-reorgAfter.Number < 5 { - trm.GetBlockBuilder().BuildBlock(ctx, nil) - time.Sleep(5 * time.Second) - tip++ - } - - // Give some time so that those block are derived - time.Sleep(time.Second * 10) - - divergence := sys.L1EL.BlockRefByNumber(reorgAfter.Number + 1) - - tipL2_preReorg := sys.L2ELA.BlockRefByLabel(eth.Unsafe) - - // reorg the L1 chain -- sequence an alternative L1 block from divergence block parent - t.Log("Building Divergence Chain from:", divergence) - trm.GetBlockBuilder().BuildBlock(ctx, &divergence.ParentHash) - - t.Log("Stopping the batchers") - sys.L2BatcherA.Stop() - sys.L2BatcherB.Stop() - - t.Log("Starting the batchers again") - sys.L2BatcherA.Start() - sys.L2BatcherB.Start() - - // Give some time to batcher catch up - time.Sleep(5 * time.Second) - - // Start sequential block building - err := trm.GetPOS().Start() - require.NoError(t, err, "Expected to be able to start POS") - - // Wait sometime(5*5 = 25 at least) so that pos can create required - time.Sleep(30 * time.Second) - - // confirm L1 reorged - sys.L1EL.ReorgTriggered(divergence, 5) - - // wait until L2 chain A cross-safe ref caught up to where it was before the reorg - sys.L2CLA.Reached(types.CrossSafe, tipL2_preReorg.Number, 100) - - // test that latest chain A unsafe is not referencing a reorged L1 block (through the L1Origin field) - require.Eventually(t, func() bool { - unsafe := sys.L2ELA.BlockRefByLabel(eth.Unsafe) - - block, err := sys.L1EL.Escape().EthClient().InfoByNumber(ctx, unsafe.L1Origin.Number) - if err != nil { - sys.Log.Warn("failed to get L1 block info by number", "number", unsafe.L1Origin.Number, "err", err) - return false - } - - sys.Log.Info("current unsafe ref", "tip", unsafe, "tip_origin", unsafe.L1Origin, "l1blk", eth.InfoToL1BlockRef(block)) - - return block.Hash() == unsafe.L1Origin.Hash - }, 120*time.Second, 7*time.Second, "L1 block origin hash should match hash of block on L1 at that number. If not, it means there was a reorg, and L2 blocks L1Origin field is referencing a reorged block.") - - // confirm all L1Origin fields point to canonical blocks - require.Eventually(t, func() bool { - ref := sys.L2ELA.BlockRefByLabel(eth.Unsafe) - var err error - - // wait until L2 chains' L1Origin points to a L1 block after the one that was reorged - if ref.L1Origin.Number < divergence.Number { - return false - } - - sys.Log.Info("L2 chain progressed, pointing to newer L1 block", "ref", ref, "ref_origin", ref.L1Origin, "divergence", divergence) - - for i := ref.Number; i > 0 && ref.L1Origin.Number >= divergence.Number; i-- { - ref, err = sys.L2ELA.Escape().L2EthClient().L2BlockRefByNumber(ctx, i) - if err != nil { - return false - } - - if !sys.L1EL.IsCanonical(ref.L1Origin) { - return false - } - } - - return true - }, 120*time.Second, 5*time.Second, "all L1Origin fields should point to canonical L1 blocks") - - // post reorg test validations and checks - postChecks(t, sys) -} diff --git a/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go b/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go deleted file mode 100644 index 2acdf2e5f2761..0000000000000 --- a/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go +++ /dev/null @@ -1,248 +0,0 @@ -package l2reorg - -import ( - "math/rand" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" - "github.com/ethereum-optimism/optimism/op-service/bigs" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/txintent" - "github.com/ethereum-optimism/optimism/op-service/txplan" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/stretchr/testify/require" -) - -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go#L25 -func TestReorgInitExecMsg(gt *testing.T) { - t := devtest.SerialT(gt) - ctx := t.Ctx() - - sys := presets.NewSimpleInterop(t) - l := sys.Log - - ia := sys.TestSequencer.Escape().ControlAPI(sys.L2ChainA.ChainID()) - - // three EOAs for triggering the init and exec interop txs, as well as a simple transfer tx - alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) - cathrine := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - - sys.L1Network.WaitForBlock() - sys.L2ChainA.WaitForBlock() - - // stop batchers on chain A and on chain B - sys.L2BatcherA.Stop() - sys.L2BatcherB.Stop() - - // deploy event logger on chain A - var eventLoggerAddress common.Address - { - tx := txplan.NewPlannedTx(txplan.Combine( - alice.Plan(), - txplan.WithData(common.FromHex(bindings.EventloggerBin)), - )) - res, err := tx.Included.Eval(ctx) - require.NoError(t, err) - - eventLoggerAddress = res.ContractAddress - l.Info("deployed EventLogger", "chainID", tx.ChainID.Value(), "address", eventLoggerAddress) - } - - sys.L1Network.WaitForBlock() - - var initTrigger *txintent.InitTrigger - // prepare init trigger (i.e. what logs to emit on chain A) - { - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - nTopics := 3 - lenData := 10 - initTrigger = interop.RandomInitTrigger(rng, eventLoggerAddress, nTopics, lenData) - - l.Info("created init trigger", "address", eventLoggerAddress, "topics", nTopics, "lenData", lenData) - } - - // wait for chain B to catch up to chain A if necessary - sys.L2ChainB.CatchUpTo(sys.L2ChainA) - - var initTx *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput] - var initReceipt *types.Receipt - // prepare and include initiating message on chain A - { - initTx = txintent.NewIntent[*txintent.InitTrigger, *txintent.InteropOutput](alice.Plan()) - initTx.Content.Set(initTrigger) - var err error - initReceipt, err = initTx.PlannedTx.Included.Eval(ctx) - require.NoError(t, err) - - l.Info("initiating message included", "chain", sys.L2ChainA.ChainID(), "block_number", initReceipt.BlockNumber, "block_hash", initReceipt.BlockHash, "now", time.Now().Unix()) - } - - // stop sequencer on chain A so that we later force a reorg/removal of the init msg - sys.L2CLA.StopSequencer() - - // at least one block between the init tx on chain A and the exec tx on chain B - sys.L2ChainB.WaitForBlock() - - var execTx *txintent.IntentTx[*txintent.ExecTrigger, *txintent.InteropOutput] - var execReceipt *types.Receipt - // prepare and include executing message on chain B - { - execTx = txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) - execTx.Content.DependOn(&initTx.Result) - // single event in tx so index is 0. ExecuteIndexed returns a lambda to transform InteropOutput to a new ExecTrigger - execTx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initTx.Result, 0)) - var err error - execReceipt, err = execTx.PlannedTx.Included.Eval(ctx) - require.NoError(t, err) - require.Equal(t, 1, len(execReceipt.Logs)) - - l.Info("executing message included", "chain", sys.L2ChainB.ChainID(), "block_number", execReceipt.BlockNumber, "block_hash", execReceipt.BlockHash, "now", time.Now().Unix()) - } - - // record divergence block numbers and original refs for future validation checks - var divergenceBlockNumber_A, divergenceBlockNumber_B uint64 - var originalRef_A, originalRef_B eth.L2BlockRef - - // sequence a conflicting block with a simple transfer tx, based on the parent of the parent of the unsafe head - { - var err error - divergenceBlockNumber_B = bigs.Uint64Strict(execReceipt.BlockNumber) - originalRef_B, err = sys.L2ELB.Escape().L2EthClient().L2BlockRefByHash(ctx, execReceipt.BlockHash) - require.NoError(t, err, "Expected to be able to call L2BlockRefByHash API, but got error") - - headToReorgA := initReceipt.BlockHash - headToReorgARef, err := sys.L2ELA.Escape().L2EthClient().L2BlockRefByHash(ctx, headToReorgA) - require.NoError(t, err, "Expected to be able to call L2BlockRefByHash API, but got error") - - divergenceBlockNumber_A = headToReorgARef.Number - originalRef_A = headToReorgARef - - parentOfHeadToReorgA := headToReorgARef.ParentID() - parentsL1Origin, err := sys.L2ELA.Escape().L2EthClient().L2BlockRefByHash(ctx, parentOfHeadToReorgA.Hash) - require.NoError(t, err, "Expected to be able to call L2BlockRefByHash API, but got error") - - nextL1Origin := parentsL1Origin.L1Origin.Number + 1 - l1Origin, err := sys.L1Network.Escape().L1ELNode(match.FirstL1EL).EthClient().InfoByNumber(ctx, nextL1Origin) - require.NoError(t, err, "Expected to get block number %v from L1 execution client", nextL1Origin) - l1OriginHash := l1Origin.Hash() - - l.Info("Sequencing a conflicting block", "chain", sys.L2ChainA.ChainID(), "newL1Origin", eth.ToBlockID(l1Origin), "headToReorgA", headToReorgARef, "parent", parentOfHeadToReorgA, "parent_l1_origin", parentsL1Origin.L1Origin) - - err = ia.New(ctx, seqtypes.BuildOpts{ - Parent: parentOfHeadToReorgA.Hash, - L1Origin: &l1OriginHash, - }) - require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") - - // include simple transfer tx in opened block - { - to := cathrine.PlanTransfer(alice.Address(), eth.OneGWei) - opt := txplan.Combine(to) - ptx := txplan.NewPlannedTx(opt) - signed_tx, err := ptx.Signed.Eval(ctx) - require.NoError(t, err, "Expected to be able to evaluate a planned transaction on op-test-sequencer, but got error") - txdata, err := signed_tx.MarshalBinary() - require.NoError(t, err, "Expected to be able to marshal a signed transaction on op-test-sequencer, but got error") - - err = ia.IncludeTx(ctx, txdata) - require.NoError(t, err, "Expected to be able to include a signed transaction on op-test-sequencer, but got error") - } - - err = ia.Next(ctx) - require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") - } - - // sequence a second block with op-test-sequencer - { - unsafe := sys.L2ELA.BlockRefByLabel(eth.Unsafe) - l.Info("Current unsafe ref", "unsafeHead", unsafe) - err := ia.New(ctx, seqtypes.BuildOpts{ - Parent: unsafe.Hash, - L1Origin: nil, - }) - require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") - - err = ia.Next(ctx) - require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") - } - - // continue sequencing with op-node - sys.L2CLA.StartSequencer() - - // start batchers on chain A and on chain B - sys.L2BatcherA.Start() - sys.L2BatcherB.Start() - - // wait and confirm reorgs on chain A and B - dsl.CheckAll(t, - sys.L2ELA.ReorgTriggeredFn(eth.L2BlockRef{ - Number: divergenceBlockNumber_A, - Hash: originalRef_A.Hash, - ParentHash: originalRef_A.ParentID().Hash, - }, 30), - sys.L2ELB.ReorgTriggeredFn(eth.L2BlockRef{ - Number: divergenceBlockNumber_B, - Hash: originalRef_B.Hash, - ParentHash: originalRef_B.ParentID().Hash, - }, 30), - ) - - // executing tx should eventually be no longer confirmed on chain B - require.Eventually(t, func() bool { - receipt, err := sys.L2ELB.Escape().EthClient().TransactionReceipt(ctx, execReceipt.TxHash) - if err == nil || err.Error() != "not found" { // want to get "not found" error - return false - } - if receipt != nil { // want to get nil receipt - return false - } - return true - }, 60*time.Second, 3*time.Second, "Expected for the executing tx to be removed from chain B") - - err := wait.For(ctx, 5*time.Second, func() (bool, error) { - safeL2Head_supervisor_A := sys.Supervisor.SafeBlockID(sys.L2ChainA.ChainID()).Hash - safeL2Head_supervisor_B := sys.Supervisor.SafeBlockID(sys.L2ChainB.ChainID()).Hash - safeL2Head_sequencer_A := sys.L2CLA.SafeL2BlockRef() - safeL2Head_sequencer_B := sys.L2CLB.SafeL2BlockRef() - - if safeL2Head_sequencer_A.Number < divergenceBlockNumber_A { - l.Info("Safe ref number is still behind divergence block A number", "divergence", divergenceBlockNumber_A, "safe", safeL2Head_sequencer_A.Number) - return false, nil - } - - if safeL2Head_sequencer_B.Number < divergenceBlockNumber_B { - l.Info("Safe ref number is still behind divergence block B number", "divergence", divergenceBlockNumber_B, "safe", safeL2Head_sequencer_B.Number) - return false, nil - } - - if safeL2Head_sequencer_A.Hash.Cmp(safeL2Head_supervisor_A) != 0 { - l.Info("Safe ref still not the same on supervisor and sequencer A", "supervisor", safeL2Head_supervisor_A, "sequencer", safeL2Head_sequencer_A.Hash) - return false, nil - } - - if safeL2Head_sequencer_B.Hash.Cmp(safeL2Head_supervisor_B) != 0 { - l.Info("Safe ref still not the same on supervisor and sequencer B", "supervisor", safeL2Head_supervisor_B, "sequencer", safeL2Head_sequencer_B.Hash) - return false, nil - } - - l.Info("Safe ref the same across supervisor and sequencers", - "supervisor_A", safeL2Head_supervisor_A, - "sequencer_A", safeL2Head_sequencer_A.Hash, - "supervisor_B", safeL2Head_supervisor_B, - "sequencer_B", safeL2Head_sequencer_B.Hash) - - return true, nil - }) - require.NoError(t, err, "Expected to get same safe ref on both supervisor and sequencer eventually") -} diff --git a/rust/kona/tests/supervisor/l2reorg/init_test.go b/rust/kona/tests/supervisor/l2reorg/init_test.go deleted file mode 100644 index a20170b148c4d..0000000000000 --- a/rust/kona/tests/supervisor/l2reorg/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package l2reorg - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // Other setups may be added here, hydrated from the same orchestrator - presets.DoMain(m, presets.WithSimpleInterop()) -} diff --git a/rust/kona/tests/supervisor/l2reorg/invalid_exec_msgs_test.go b/rust/kona/tests/supervisor/l2reorg/invalid_exec_msgs_test.go deleted file mode 100644 index e082a6cc4b1f3..0000000000000 --- a/rust/kona/tests/supervisor/l2reorg/invalid_exec_msgs_test.go +++ /dev/null @@ -1,244 +0,0 @@ -package l2reorg - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/txintent" - "github.com/ethereum-optimism/optimism/op-service/txplan" - suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/stretchr/testify/require" -) - -// TestReorgInvalidExecMsgs tests that the supervisor reorgs the chain when an invalid exec msg is included -// Each subtest runs a test with a different invalid message, by modifying the message in the txModifierFn -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/reorgs/invalid_exec_msgs_test.go#L28 -func TestReorgInvalidExecMsgs(gt *testing.T) { - gt.Run("invalid log index", func(gt *testing.T) { - testReorgInvalidExecMsg(gt, func(msg *suptypes.Message) { - msg.Identifier.LogIndex = 1024 - }) - }) - - gt.Run("invalid block number", func(gt *testing.T) { - testReorgInvalidExecMsg(gt, func(msg *suptypes.Message) { - msg.Identifier.BlockNumber = msg.Identifier.BlockNumber - 1 - }) - }) - - gt.Run("invalid chain id", func(gt *testing.T) { - testReorgInvalidExecMsg(gt, func(msg *suptypes.Message) { - msg.Identifier.ChainID = eth.ChainIDFromUInt64(1024) - }) - }) -} - -func testReorgInvalidExecMsg(gt *testing.T, txModifierFn func(msg *suptypes.Message)) { - t := devtest.SerialT(gt) - ctx := t.Ctx() - - sys := presets.NewSimpleInterop(t) - l := sys.Log - - ia := sys.TestSequencer.Escape().ControlAPI(sys.L2ChainA.ChainID()) - - // three EOAs for triggering the init and exec interop txs, as well as a simple transfer tx - alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) - cathrine := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - - sys.L1Network.WaitForBlock() - sys.L2ChainA.WaitForBlock() - - // stop batcher on chain A - sys.L2BatcherA.Stop() - - // deploy event logger on chain B - var eventLoggerAddress common.Address - { - tx := txplan.NewPlannedTx(txplan.Combine( - bob.Plan(), - txplan.WithData(common.FromHex(bindings.EventloggerBin)), - )) - res, err := tx.Included.Eval(ctx) - require.NoError(t, err) - - eventLoggerAddress = res.ContractAddress - l.Info("deployed EventLogger", "chainID", tx.ChainID.Value(), "address", eventLoggerAddress) - } - - sys.L1Network.WaitForBlock() - - var initTrigger *txintent.InitTrigger - // prepare init trigger (i.e. what logs to emit on chain A) - { - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - nTopics := 3 - lenData := 10 - initTrigger = interop.RandomInitTrigger(rng, eventLoggerAddress, nTopics, lenData) - - l.Info("created init trigger", "address", eventLoggerAddress, "topics", nTopics, "lenData", lenData) - } - - // wait for chain B to catch up to chain A if necessary - sys.L2ChainB.CatchUpTo(sys.L2ChainA) - - var initTx *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput] - var initReceipt *types.Receipt - // prepare and include initiating message on chain B - { - initTx = txintent.NewIntent[*txintent.InitTrigger, *txintent.InteropOutput](bob.Plan()) - initTx.Content.Set(initTrigger) - var err error - initReceipt, err = initTx.PlannedTx.Included.Eval(ctx) - require.NoError(t, err) - - l.Info("initiating message included in chain B", "chain", sys.L2ChainB.ChainID(), "block_number", initReceipt.BlockNumber, "block_hash", initReceipt.BlockHash, "now", time.Now().Unix()) - } - - // at least one block between the init tx on chain B and the exec tx on chain A - sys.L2ChainA.WaitForBlock() - - // stop sequencer on chain A so that we later force include an invalid exec msg - latestUnsafe_A := sys.L2CLA.StopSequencer() - - var execTx *txintent.IntentTx[*txintent.ExecTrigger, *txintent.InteropOutput] - var execSignedTx *types.Transaction - var execTxEncoded []byte - // prepare and include invalid executing message on chain B via the op-test-sequencer (no other way to force-include an invalid message) - { - execTx = txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](alice.Plan()) - execTx.Content.DependOn(&initTx.Result) - // single event in tx so index is 0. - index := 0 - // lambda to transform InteropOutput to a new broken ExecTrigger - execTx.Content.Fn(func(ctx context.Context) (*txintent.ExecTrigger, error) { - events := initTx.Result.Value() - if x := len(events.Entries); x <= index { - return nil, fmt.Errorf("invalid index: %d, only have %d events", index, x) - } - msg := events.Entries[index] - // modify the message in order to make it invalid - txModifierFn(&msg) - return &txintent.ExecTrigger{ - Executor: constants.CrossL2Inbox, - Msg: msg, - }, nil - }) - - var err error - execSignedTx, err = execTx.PlannedTx.Signed.Eval(ctx) - require.NoError(t, err) - - l.Info("executing message signed", "to", execSignedTx.To(), "nonce", execSignedTx.Nonce(), "data", len(execSignedTx.Data())) - - execTxEncoded, err = execSignedTx.MarshalBinary() - require.NoError(t, err, "Expected to be able to marshal a signed transaction on op-test-sequencer, but got error") - } - - // sequence a new block with an invalid executing msg on chain A - { - l.Info("Building chain A with op-test-sequencer, and include invalid exec msg", "chain", sys.L2ChainA.ChainID(), "unsafeHead", latestUnsafe_A) - - err := ia.New(ctx, seqtypes.BuildOpts{ - Parent: latestUnsafe_A, - L1Origin: nil, - }) - require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") - - // include invalid executing msg in opened block - err = ia.IncludeTx(ctx, execTxEncoded) - require.NoError(t, err, "Expected to be able to include a signed transaction on op-test-sequencer, but got error") - - err = ia.Next(ctx) - require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") - } - - // record divergence block numbers and original refs for future validation checks - var divergenceBlockNumber_A uint64 - var originalHash_A common.Hash - var originalParentHash_A common.Hash - // sequence a second block with op-test-sequencer - { - currentUnsafeRef := sys.L2ELA.BlockRefByLabel(eth.Unsafe) - - l.Info("Unsafe head after invalid exec msg has been included in chain A", "chain", sys.L2ChainA.ChainID(), "unsafeHead", currentUnsafeRef, "parent", currentUnsafeRef.ParentID()) - - divergenceBlockNumber_A = currentUnsafeRef.Number - originalHash_A = currentUnsafeRef.Hash - originalParentHash_A = currentUnsafeRef.ParentHash - l.Info("Continue building chain A with another block with op-test-sequencer", "chain", sys.L2ChainA.ChainID(), "unsafeHead", currentUnsafeRef, "parent", currentUnsafeRef.ParentID()) - err := ia.New(ctx, seqtypes.BuildOpts{ - Parent: currentUnsafeRef.Hash, - L1Origin: nil, - }) - require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") - time.Sleep(2 * time.Second) - - // include simple transfer tx in opened block - { - to := cathrine.PlanTransfer(alice.Address(), eth.OneGWei) - opt := txplan.Combine(to) - ptx := txplan.NewPlannedTx(opt) - signed_tx, err := ptx.Signed.Eval(ctx) - require.NoError(t, err, "Expected to be able to evaluate a planned transaction on op-test-sequencer, but got error") - txdata, err := signed_tx.MarshalBinary() - require.NoError(t, err, "Expected to be able to marshal a signed transaction on op-test-sequencer, but got error") - - err = ia.IncludeTx(ctx, txdata) - require.NoError(t, err, "Expected to be able to include a signed transaction on op-test-sequencer, but got error") - } - - err = ia.Next(ctx) - require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") - time.Sleep(2 * time.Second) - } - - // continue sequencing with op-node - sys.L2CLA.StartSequencer() - - // start batcher on chain A - sys.L2BatcherA.Start() - - // wait for reorg on chain A - sys.L2ELA.ReorgTriggered(eth.L2BlockRef{ - Number: divergenceBlockNumber_A, - Hash: originalHash_A, - ParentHash: originalParentHash_A, - }, 30) - - err := wait.For(ctx, 5*time.Second, func() (bool, error) { - safeL2Head_supervisor_A := sys.Supervisor.SafeBlockID(sys.L2ChainA.ChainID()).Hash - safeL2Head_sequencer_A := sys.L2CLA.SafeL2BlockRef() - - if safeL2Head_sequencer_A.Number < divergenceBlockNumber_A { - l.Info("Safe ref number is still behind divergence block A number", "divergence", divergenceBlockNumber_A, "safe", safeL2Head_sequencer_A.Number) - return false, nil - } - - if safeL2Head_sequencer_A.Hash.Cmp(safeL2Head_supervisor_A) != 0 { - l.Info("Safe ref still not the same on supervisor and sequencer A", "supervisor", safeL2Head_supervisor_A, "sequencer", safeL2Head_sequencer_A.Hash) - return false, nil - } - - l.Info("Safe ref the same across supervisor and sequencers", - "supervisor_A", safeL2Head_supervisor_A, - "sequencer_A", safeL2Head_sequencer_A.Hash) - - return true, nil - }) - require.NoError(t, err, "Expected to get same safe ref on both supervisor and sequencer eventually") -} diff --git a/rust/kona/tests/supervisor/l2reorg/unsafe_head_test.go b/rust/kona/tests/supervisor/l2reorg/unsafe_head_test.go deleted file mode 100644 index a6c1411952f3c..0000000000000 --- a/rust/kona/tests/supervisor/l2reorg/unsafe_head_test.go +++ /dev/null @@ -1,134 +0,0 @@ -package l2reorg - -import ( - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/txplan" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" - "github.com/stretchr/testify/require" -) - -// TestReorgUnsafeHead starts an interop chain with an op-test-sequencer, which takes control over sequencing the L2 chain and introduces a reorg on the unsafe head -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/reorgs/unsafe_head_test.go#L17 -func TestReorgUnsafeHead(gt *testing.T) { - t := devtest.SerialT(gt) - ctx := t.Ctx() - - sys := presets.NewSimpleInterop(t) - l := sys.Log - - ia := sys.TestSequencer.Escape().ControlAPI(sys.L2ChainA.ChainID()) - - // stop batcher on chain A - sys.L2BatcherA.Stop() - - // two EOAs for a sample transfer tx used later in a conflicting block - alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob := sys.Wallet.NewEOA(sys.L2ELA) - - sys.L1Network.WaitForBlock() - - sys.L2ChainA.WaitForBlock() - // waiting for two blocks in order to make sure we are not jumping ahead of a L1 origin (i.e. can't build a chain with L1Origin gaps) - sys.L2ChainA.WaitForBlock() - sys.L2ChainA.WaitForBlock() - - unsafeHead := sys.L2CLA.StopSequencer() - - var divergenceBlockNumber_A uint64 - var originalRef_A eth.L2BlockRef - // prepare and sequence a conflicting block for the L2A chain - { - unsafeHeadRef := sys.L2ELA.BlockRefByLabel(eth.Unsafe) - - l.Info("Current unsafe ref", "unsafeHead", unsafeHead, "parent", unsafeHeadRef.ParentID().Hash, "l1_origin", unsafeHeadRef.L1Origin) - - l.Info("Expect to reorg the chain on current unsafe block", "number", unsafeHeadRef.Number, "head", unsafeHead, "parent", unsafeHeadRef.ParentID().Hash) - divergenceBlockNumber_A = unsafeHeadRef.Number - originalRef_A = unsafeHeadRef - - parentOfUnsafeHead := unsafeHeadRef.ParentID() - - l.Info("Sequencing a conflicting block", "unsafeHead", unsafeHeadRef, "parent", parentOfUnsafeHead) - - // sequence a conflicting block with a simple transfer tx, based on the parent of the parent of the unsafe head - { - err := ia.New(ctx, seqtypes.BuildOpts{ - Parent: parentOfUnsafeHead.Hash, - L1Origin: nil, - }) - require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") - - // include simple transfer tx in opened block - { - to := alice.PlanTransfer(bob.Address(), eth.OneGWei) - opt := txplan.Combine(to) - ptx := txplan.NewPlannedTx(opt) - signed_tx, err := ptx.Signed.Eval(ctx) - require.NoError(t, err, "Expected to be able to evaluate a planned transaction on op-test-sequencer, but got error") - txdata, err := signed_tx.MarshalBinary() - require.NoError(t, err, "Expected to be able to marshal a signed transaction on op-test-sequencer, but got error") - - err = ia.IncludeTx(ctx, txdata) - require.NoError(t, err, "Expected to be able to include a signed transaction on op-test-sequencer, but got error") - } - - err = ia.Next(ctx) - require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") - } - } - - // start batcher on chain A - sys.L2BatcherA.Start() - - // sequence a second block with op-test-sequencer (no L1 origin override) - { - l.Info("Sequencing with op-test-sequencer (no L1 origin override)") - err := ia.New(ctx, seqtypes.BuildOpts{ - Parent: sys.L2ELA.BlockRefByLabel(eth.Unsafe).Hash, - L1Origin: nil, - }) - require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") - time.Sleep(2 * time.Second) - - err = ia.Next(ctx) - require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") - time.Sleep(2 * time.Second) - } - - // continue sequencing with consensus node (op-node) - sys.L2CLA.StartSequencer() - - sys.L2ChainA.WaitForBlock() - - reorgedRef_A, err := sys.L2ELA.Escape().EthClient().BlockRefByNumber(ctx, divergenceBlockNumber_A) - require.NoError(t, err, "Expected to be able to call BlockRefByNumber API, but got error") - - l.Info("Reorged chain A on divergence block number (prior the reorg)", "number", divergenceBlockNumber_A, "head", originalRef_A.Hash, "parent", originalRef_A.ParentID().Hash) - l.Info("Reorged chain A on divergence block number (after the reorg)", "number", divergenceBlockNumber_A, "head", reorgedRef_A.Hash, "parent", reorgedRef_A.ParentID().Hash) - require.NotEqual(t, originalRef_A.Hash, reorgedRef_A.Hash, "Expected to get different heads on divergence block number, but got the same hash, so no reorg happened on chain A") - require.Equal(t, originalRef_A.ParentID().Hash, reorgedRef_A.ParentHash, "Expected to get same parent hashes on divergence block number, but got different hashes") - - err = wait.For(ctx, 5*time.Second, func() (bool, error) { - safeL2Head_A_supervisor := sys.Supervisor.SafeBlockID(sys.L2ChainA.ChainID()).Hash - safeL2Head_A_sequencer := sys.L2CLA.SafeL2BlockRef() - - if safeL2Head_A_sequencer.Number <= divergenceBlockNumber_A { - l.Info("Safe ref number is still behind divergence block number", "divergence", divergenceBlockNumber_A, "safe", safeL2Head_A_sequencer.Number) - return false, nil - } - if safeL2Head_A_sequencer.Hash.Cmp(safeL2Head_A_supervisor) != 0 { - l.Info("Safe ref still not the same on supervisor and sequencer", "supervisor", safeL2Head_A_supervisor, "sequencer", safeL2Head_A_sequencer.Hash) - return false, nil - } - l.Info("Safe ref is the same on both supervisor and sequencer", "supervisor", safeL2Head_A_supervisor, "sequencer", safeL2Head_A_sequencer.Hash) - - return true, nil - }) - require.NoError(t, err, "Expected to get same safe ref on both supervisor and sequencer eventually") -} diff --git a/rust/kona/tests/supervisor/l2reorgAfterL1reorg/init_test.go b/rust/kona/tests/supervisor/l2reorgAfterL1reorg/init_test.go deleted file mode 100644 index c2ebd6ccd5924..0000000000000 --- a/rust/kona/tests/supervisor/l2reorgAfterL1reorg/init_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package sysgo - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - spresets "github.com/ethereum-optimism/optimism/rust/kona/tests/supervisor/presets" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // Other setups may be added here, hydrated from the same orchestrator - presets.DoMain(m, spresets.WithSimpleInteropMinimal()) -} diff --git a/rust/kona/tests/supervisor/l2reorgAfterL1reorg/reorg_test.go b/rust/kona/tests/supervisor/l2reorgAfterL1reorg/reorg_test.go deleted file mode 100644 index 7a99149d3359f..0000000000000 --- a/rust/kona/tests/supervisor/l2reorgAfterL1reorg/reorg_test.go +++ /dev/null @@ -1,165 +0,0 @@ -package sysgo - -import ( - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -type checksFunc func(t devtest.T, sys *presets.SimpleInterop) - -func TestL2ReorgAfterL1Reorg(gt *testing.T) { - gt.Run("unsafe reorg", func(gt *testing.T) { - var crossSafeRef, localSafeRef, unsafeRef eth.BlockID - pre := func(t devtest.T, sys *presets.SimpleInterop) { - ss := sys.Supervisor.FetchSyncStatus() - crossSafeRef = ss.Chains[sys.L2ChainA.ChainID()].CrossSafe - localSafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalSafe - unsafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalUnsafe.ID() - } - post := func(t devtest.T, sys *presets.SimpleInterop) { - require.True(t, sys.L2ELA.IsCanonical(crossSafeRef), "Previous cross-safe block should still be canonical") - require.True(t, sys.L2ELA.IsCanonical(localSafeRef), "Previous local-safe block should still be canonical") - require.False(t, sys.L2ELA.IsCanonical(unsafeRef), "Previous unsafe block should have been reorged") - } - testL2ReorgAfterL1Reorg(gt, 3, pre, post) - }) - - gt.Run("unsafe, local-safe, cross-unsafe, cross-safe reorgs", func(gt *testing.T) { - var crossSafeRef, crossUnsafeRef, localSafeRef, unsafeRef eth.BlockID - pre := func(t devtest.T, sys *presets.SimpleInterop) { - ss := sys.Supervisor.FetchSyncStatus() - crossUnsafeRef = ss.Chains[sys.L2ChainA.ChainID()].CrossUnsafe - crossSafeRef = ss.Chains[sys.L2ChainA.ChainID()].CrossSafe - localSafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalSafe - unsafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalUnsafe.ID() - } - post := func(t devtest.T, sys *presets.SimpleInterop) { - require.False(t, sys.L2ELA.IsCanonical(crossSafeRef), "Previous cross-safe block should have been reorged") - require.False(t, sys.L2ELA.IsCanonical(crossUnsafeRef), "Previous cross-unsafe block should have been reorged") - require.False(t, sys.L2ELA.IsCanonical(localSafeRef), "Previous local-safe block should have been reorged") - require.False(t, sys.L2ELA.IsCanonical(unsafeRef), "Previous unsafe block should have been reorged") - } - testL2ReorgAfterL1Reorg(gt, 10, pre, post) - }) -} - -// testL2ReorgAfterL1Reorg tests that the L2 chain reorgs after an L1 reorg, and takes n, number of blocks to reorg, as parameter -// for unsafe reorgs - n must be at least >= confDepth, which is 2 in our test deployments -// for cross-safe reorgs - n must be at least >= safe distance, which is 10 in our test deployments (set in -// op-e2e/e2eutils/geth/geth.go when initialising FakePoS) -// pre- and post-checks are sanity checks to ensure that the blocks we expected to be reorged were indeed reorged or not -func testL2ReorgAfterL1Reorg(gt *testing.T, n int, preChecks, postChecks checksFunc) { - t := devtest.SerialT(gt) - ctx := t.Ctx() - - sys := presets.NewSimpleInterop(t) - ts := sys.TestSequencer.Escape().ControlAPI(sys.L1Network.ChainID()) - - cl := sys.L1Network.Escape().L1CLNode(match.FirstL1CL) - - sys.L1Network.WaitForBlock() - - sys.ControlPlane.FakePoSState(cl.ID(), stack.Stop) - - // sequence a few L1 and L2 blocks - for range n + 1 { - sequenceL1Block(t, ts, common.Hash{}) - - sys.L2ChainA.WaitForBlock() - sys.L2ChainA.WaitForBlock() - } - - // select a divergence block to reorg from - var divergence eth.L1BlockRef - { - tip := sys.L1EL.BlockRefByLabel(eth.Unsafe) - require.Greater(t, tip.Number, uint64(n), "n is larger than L1 tip, cannot reorg out block number `tip-n`") - - divergence = sys.L1EL.BlockRefByNumber(tip.Number - uint64(n)) - } - - // print the chains before sequencing an alternative L1 block - sys.L2ChainA.PrintChain() - sys.L1Network.PrintChain() - - // pre reorg trigger validations and checks - preChecks(t, sys) - - tipL2_preReorg := sys.L2ELA.BlockRefByLabel(eth.Unsafe) - - // reorg the L1 chain -- sequence an alternative L1 block from divergence block parent - sequenceL1Block(t, ts, divergence.ParentHash) - - // continue building on the alternative L1 chain - sys.ControlPlane.FakePoSState(cl.ID(), stack.Start) - - // confirm L1 reorged - sys.L1EL.ReorgTriggered(divergence, 5) - - // wait until L2 chain A cross-safe ref caught up to where it was before the reorg - sys.L2CLA.Reached(types.CrossSafe, tipL2_preReorg.Number, 50) - - // test that latest chain A unsafe is not referencing a reorged L1 block (through the L1Origin field) - require.Eventually(t, func() bool { - unsafe := sys.L2ELA.BlockRefByLabel(eth.Unsafe) - - block, err := sys.L1EL.Escape().EthClient().InfoByNumber(ctx, unsafe.L1Origin.Number) - if err != nil { - sys.Log.Warn("failed to get L1 block info by number", "number", unsafe.L1Origin.Number, "err", err) - return false - } - - sys.Log.Info("current unsafe ref", "tip", unsafe, "tip_origin", unsafe.L1Origin, "l1blk", eth.InfoToL1BlockRef(block)) - - // print the chains so we have information to debug if the test fails - sys.L2ChainA.PrintChain() - sys.L1Network.PrintChain() - - return block.Hash() == unsafe.L1Origin.Hash - }, 120*time.Second, 7*time.Second, "L1 block origin hash should match hash of block on L1 at that number. If not, it means there was a reorg, and L2 blocks L1Origin field is referencing a reorged block.") - - // confirm all L1Origin fields point to canonical blocks - require.Eventually(t, func() bool { - ref := sys.L2ELA.BlockRefByLabel(eth.Unsafe) - var err error - - // wait until L2 chains' L1Origin points to a L1 block after the one that was reorged - if ref.L1Origin.Number < divergence.Number { - return false - } - - sys.Log.Info("L2 chain progressed, pointing to newer L1 block", "ref", ref, "ref_origin", ref.L1Origin, "divergence", divergence) - - for i := ref.Number; i > 0 && ref.L1Origin.Number >= divergence.Number; i-- { - ref, err = sys.L2ELA.Escape().L2EthClient().L2BlockRefByNumber(ctx, i) - if err != nil { - return false - } - - if !sys.L1EL.IsCanonical(ref.L1Origin) { - return false - } - } - - return true - }, 120*time.Second, 5*time.Second, "all L1Origin fields should point to canonical L1 blocks") - - // post reorg test validations and checks - postChecks(t, sys) -} - -func sequenceL1Block(t devtest.T, ts apis.TestSequencerControlAPI, parent common.Hash) { - require.NoError(t, ts.New(t.Ctx(), seqtypes.BuildOpts{Parent: parent})) - require.NoError(t, ts.Next(t.Ctx())) -} diff --git a/rust/kona/tests/supervisor/message/init_test.go b/rust/kona/tests/supervisor/message/init_test.go deleted file mode 100644 index d51f54da12053..0000000000000 --- a/rust/kona/tests/supervisor/message/init_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package message - -import ( - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -const ( - // SLEEP_BACKEND_READY is the time to wait for the backend to be ready - SLEEP_BACKEND_READY = 60 * time.Second -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // sleep to ensure the backend is ready - time.Sleep(SLEEP_BACKEND_READY) - - presets.DoMain(m, presets.WithSimpleInterop()) -} diff --git a/rust/kona/tests/supervisor/message/interop_contract_test.go b/rust/kona/tests/supervisor/message/interop_contract_test.go deleted file mode 100644 index 659f84ea7fe65..0000000000000 --- a/rust/kona/tests/supervisor/message/interop_contract_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package message - -import ( - "math/rand" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testutils" - "github.com/ethereum-optimism/optimism/op-service/txintent" - "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" - "github.com/ethereum/go-ethereum/common" -) - -// TODO: Run the test directly from the https://github.com/ethereum-optimism/optimism/tree/develop/op-acceptance-tests - -// TestRegularMessage checks that messages can be sent and relayed via L2ToL2CrossDomainMessenger -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/contract/interop_contract_test.go -func TestRegularMessage(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - require := sys.T.Require() - logger := t.Logger() - rng := rand.New(rand.NewSource(1234)) - - alice, bob := sys.FunderA.NewFundedEOA(eth.OneTenthEther), sys.FunderB.NewFundedEOA(eth.OneTenthEther) - - // deploy event logger at chain B - eventLoggerAddress := bob.DeployEventLogger() - // only use the binding to generate calldata - eventLogger := bindings.NewBindings[bindings.EventLogger]() - // manually build topics and data for EventLogger - topics := []eth.Bytes32{} - for range rng.Intn(5) { - var topic [32]byte - copy(topic[:], testutils.RandomData(rng, 32)) - topics = append(topics, topic) - } - data := testutils.RandomData(rng, rng.Intn(30)) - - calldata, err := eventLogger.EmitLog(topics, data).EncodeInputLambda() - require.NoError(err, "failed to prepare calldata") - - logger.Info("Send message", "address", eventLoggerAddress, "topicCnt", len(topics), "dataLen", len(data)) - trigger := &txintent.SendTrigger{ - Emitter: constants.L2ToL2CrossDomainMessenger, - DestChainID: bob.ChainID(), - Target: eventLoggerAddress, - RelayedCalldata: calldata, - } - // Intent to send message on chain A - txA := txintent.NewIntent[*txintent.SendTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(trigger) - - sendMsgReceipt, err := txA.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err, "send msg receipt not found") - require.Equal(1, len(sendMsgReceipt.Logs)) // SentMessage event - require.Equal(constants.L2ToL2CrossDomainMessenger, sendMsgReceipt.Logs[0].Address) - - // Make sure supervisor syncs the chain A events - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - - // Intent to relay message on chain B - txB := txintent.NewIntent[*txintent.RelayTrigger, *txintent.InteropOutput](bob.Plan()) - txB.Content.DependOn(&txA.Result) - idx := 0 - txB.Content.Fn(txintent.RelayIndexed(constants.L2ToL2CrossDomainMessenger, &txA.Result, &txA.PlannedTx.Included, idx)) - - relayMsgReceipt, err := txB.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err, "relay msg receipt not found") - - // ExecutingMessage, EventLogger, RelayedMessage Events - require.Equal(3, len(relayMsgReceipt.Logs)) - for logIdx, addr := range []common.Address{constants.CrossL2Inbox, eventLoggerAddress, constants.L2ToL2CrossDomainMessenger} { - require.Equal(addr, relayMsgReceipt.Logs[logIdx].Address) - } - // EventLogger topics and data - eventLog := relayMsgReceipt.Logs[1] - require.Equal(len(topics), len(eventLog.Topics)) - for topicIdx := range len(eventLog.Topics) { - require.Equal(topics[topicIdx][:], eventLog.Topics[topicIdx].Bytes()) - } - require.Equal(data, eventLog.Data) -} diff --git a/rust/kona/tests/supervisor/message/interop_happy_tx_test.go b/rust/kona/tests/supervisor/message/interop_happy_tx_test.go deleted file mode 100644 index 44250895a72e9..0000000000000 --- a/rust/kona/tests/supervisor/message/interop_happy_tx_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package message - -import ( - "math/rand" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-service/eth" - stypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -// TODO: Run the test directly from the https://github.com/ethereum-optimism/optimism/tree/develop/op-acceptance-tests - -// TestInteropHappyTx is testing that a valid init message, followed by a valid exec message are correctly -// included in two L2 chains and that the cross-safe ref for both of them progresses as expected beyond -// the block number where the messages were included -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_happy_tx_test.go -func TestInteropHappyTx(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - - // two EOAs for triggering the init and exec interop txs - alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) - - eventLoggerAddress := alice.DeployEventLogger() - - // wait for chain B to catch up to chain A if necessary - sys.L2ChainB.CatchUpTo(sys.L2ChainA) - - // send initiating message on chain A - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - initMsg := alice.SendInitMessage(interop.RandomInitTrigger(rng, eventLoggerAddress, rng.Intn(3), rng.Intn(10))) - - // at least one block between the init tx on chain A and the exec tx on chain B - sys.L2ChainB.WaitForBlock() - - // send executing message on chain B - execMsg := bob.SendExecMessage(initMsg) - - // confirm that the cross-safe safety passed init and exec receipts and that blocks were not reorged - dsl.CheckAll(t, - sys.L2CLA.ReachedRefFn(stypes.CrossSafe, initMsg.BlockID(), 500), - sys.L2CLB.ReachedRefFn(stypes.CrossSafe, execMsg.BlockID(), 500), - ) -} diff --git a/rust/kona/tests/supervisor/message/interop_msg_test.go b/rust/kona/tests/supervisor/message/interop_msg_test.go deleted file mode 100644 index 5f18eb2352449..0000000000000 --- a/rust/kona/tests/supervisor/message/interop_msg_test.go +++ /dev/null @@ -1,646 +0,0 @@ -package message - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" - "github.com/ethereum-optimism/optimism/op-core/predeploys" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/dsl/contract" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/plan" - "github.com/ethereum-optimism/optimism/op-service/testutils" - "github.com/ethereum-optimism/optimism/op-service/txintent" - "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" - "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" - "github.com/ethereum-optimism/optimism/op-service/txplan" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "golang.org/x/sync/errgroup" - - "github.com/ethereum-optimism/optimism/op-service/bigs" - suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -// TODO: Run the test directly from the https://github.com/ethereum-optimism/optimism/tree/develop/op-acceptance-tests - -// TestInitExecMsg tests basic interop messaging -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_msg_test.go#L33 -func TestInitExecMsg(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - rng := rand.New(rand.NewSource(1234)) - alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) - - eventLoggerAddress := alice.DeployEventLogger() - // Trigger random init message at chain A - initMsg := alice.SendInitMessage(interop.RandomInitTrigger(rng, eventLoggerAddress, rng.Intn(5), rng.Intn(30))) - // Make sure supervisor indexes block which includes init message - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - // Single event in tx so index is 0 - bob.SendExecMessage(initMsg) -} - -// TestInitExecMsgWithDSL tests basic interop messaging with contract DSL -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_msg_test.go#L50 -func TestInitExecMsgWithDSL(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - rng := rand.New(rand.NewSource(1234)) - alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) - require := t.Require() - - eventLoggerAddress := alice.DeployEventLogger() - - clientA := sys.L2ELA.Escape().EthClient() - clientB := sys.L2ELB.Escape().EthClient() - - // Initialize eventLogger binding - eventLogger := bindings.NewBindings[bindings.EventLogger](bindings.WithClient(clientA), bindings.WithTest(t), bindings.WithTo(eventLoggerAddress)) - // Initialize crossL2Inbox binding - crossL2Inbox := bindings.NewBindings[bindings.CrossL2Inbox](bindings.WithClient(clientB), bindings.WithTest(t), bindings.WithTo(common.HexToAddress(predeploys.CrossL2Inbox))) - - // manually build topics and data for EventLogger - topics := []eth.Bytes32{} - for range rng.Intn(5) { - var topic [32]byte - copy(topic[:], testutils.RandomData(rng, 32)) - topics = append(topics, topic) - } - data := testutils.RandomData(rng, rng.Intn(30)) - - // Write: Alice triggers initiating message - receipt := contract.Write(alice, eventLogger.EmitLog(topics, data)) - block, err := clientA.BlockRefByNumber(t.Ctx(), bigs.Uint64Strict(receipt.BlockNumber)) - require.NoError(err) - - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - - // Manually build identifier, message, accesslist for executing message - // Single event in tx so index is 0 - logIdx := uint32(0) - payload := suptypes.LogToMessagePayload(receipt.Logs[logIdx]) - identifier := suptypes.Identifier{ - Origin: eventLoggerAddress, - BlockNumber: bigs.Uint64Strict(receipt.BlockNumber), - LogIndex: logIdx, - Timestamp: block.Time, - ChainID: sys.L2ELA.ChainID(), - } - payloadHash := crypto.Keccak256Hash(payload) - msgHash := eth.Bytes32(payloadHash) - msg := suptypes.Message{ - Identifier: identifier, PayloadHash: payloadHash, - } - accessList := types.AccessList{{ - Address: predeploys.CrossL2InboxAddr, - StorageKeys: suptypes.EncodeAccessList([]suptypes.Access{msg.Access()}), - }} - - call := crossL2Inbox.ValidateMessage(identifier, msgHash) - - // Read not using the DSL. Therefore you need to manually error handle and also set context - _, err = contractio.Read(call, t.Ctx()) - // Will revert because access list not provided - require.Error(err) - // Provide access list using txplan - _, err = contractio.Read(call, t.Ctx(), txplan.WithAccessList(accessList)) - // Success because access list made storage slot warm - require.NoError(err) - - // Read: Trigger executing message - contract.Read(call, txplan.WithAccessList(accessList)) - - // Write: Bob triggers executing message - contract.Write(bob, call, txplan.WithAccessList(accessList)) -} - -// TestRandomDirectedGraph tests below scenario: -// Construct random directed graph of messages. -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_msg_test.go#L125 -func TestRandomDirectedGraph(gt *testing.T) { - t := devtest.SerialT(gt) - - sys := presets.NewSimpleInterop(t) - logger := sys.Log.With("Test", "TestRandomDirectedGraph") - rng := rand.New(rand.NewSource(1234)) - require := sys.T.Require() - - // interop network has at least two chains - l2ChainNum := 2 - - alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) - - // Deploy eventLoggers per every L2 chains because initiating messages can happen on any L2 chains - eventLoggerAddresses := []common.Address{alice.DeployEventLogger(), bob.DeployEventLogger()} - - // pubSubPairCnt is the count of (publisher, subscriber) pairs which - // - publisher initiates messages - // - subscriber validates messages - pubSubPairCnt := 5 - // txCnt is the count of transactions that each publisher emits - txCnt := 3 - // fundAmount is the ETH amount to fund publishers and subscribers - fundAmount := eth.OneTenthEther - - // jitter randomizes tx - jitter := func(rng *rand.Rand) { - time.Sleep(time.Duration(rng.Intn(250)) * time.Millisecond) - } - - // fund EOAs per chain - eoasPerChain := make([][]*dsl.EOA, l2ChainNum) - for chainIdx, funder := range []*dsl.Funder{sys.FunderA, sys.FunderB} { - eoas := funder.NewFundedEOAs(pubSubPairCnt, fundAmount) - eoasPerChain[chainIdx] = eoas - } - - // runPubSubPair spawns publisher goroutine, paired with subscriber goroutine - runPubSubPair := func(pubEOA, subEOA *dsl.EOA, eventLoggerAddress common.Address, localRng *rand.Rand) error { - ctx, cancel := context.WithCancel(t.Ctx()) - defer cancel() - - g, ctx := errgroup.WithContext(ctx) - - ch := make(chan *txintent.IntentTx[*txintent.MultiTrigger, *txintent.InteropOutput]) - - publisherRng := rand.New(rand.NewSource(localRng.Int63())) - subscriberRng := rand.New(rand.NewSource(localRng.Int63())) - - // publisher initiates txCnt transactions that includes multiple random messages - g.Go(func() error { - defer close(ch) - for range txCnt { - select { - case <-ctx.Done(): - return ctx.Err() - default: - tx, receipt, err := pubEOA.SendPackedRandomInitMessages(publisherRng, eventLoggerAddress) - if err != nil { - return fmt.Errorf("publisher error: %w", err) - } - logger.Info("Initiate messages included", "chainID", tx.PlannedTx.ChainID.Value(), "blockNumber", receipt.BlockNumber, "block", receipt.BlockHash) - select { - case ch <- tx: - case <-ctx.Done(): - return ctx.Err() - } - jitter(publisherRng) - } - } - return nil - }) - - // subscriber validates every messages that was initiated by the publisher - g.Go(func() error { - for { - select { - case <-ctx.Done(): - return ctx.Err() - case dependsOn, ok := <-ch: - if !ok { - return nil - } - tx, receipt, err := subEOA.SendPackedExecMessages(dependsOn) - if err != nil { - return fmt.Errorf("subscriber error: %w", err) - } - logger.Info("Validate messages included", "blockNumber", receipt.BlockNumber, "block", receipt.BlockHash) - logger.Info("Message dependency", - "sourceChainID", dependsOn.PlannedTx.ChainID.Value(), - "destChainID", tx.PlannedTx.ChainID.Value(), - "sourceBlockNum", dependsOn.PlannedTx.IncludedBlock.Value().Number, - "destBlockNum", receipt.BlockNumber) - jitter(subscriberRng) - } - } - }) - return g.Wait() - } - - var g errgroup.Group - - runPubSubPairWrapper := func(sourceIdx, destIdx, pairIdx int, localRng *rand.Rand) error { - return runPubSubPair(eoasPerChain[sourceIdx][pairIdx], eoasPerChain[destIdx][pairIdx], eventLoggerAddresses[sourceIdx], localRng) - } - - for pairIdx := range pubSubPairCnt { - // randomize source and destination L2 chain - sourceIdx := rng.Intn(2) - destIdx := 1 - sourceIdx - // localRng is needed per pubsub pair because rng cannot be shared without mutex - localRng := rand.New(rand.NewSource(rng.Int63())) - g.Go(func() error { - return runPubSubPairWrapper(sourceIdx, destIdx, pairIdx, localRng) - }) - } - require.NoError(g.Wait()) -} - -// TestInitExecMultipleMsg tests below scenario: -// Transaction initiates and executes multiple messages of self -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_msg_test.go#L247 -func TestInitExecMultipleMsg(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - require := sys.T.Require() - logger := t.Logger() - - rng := rand.New(rand.NewSource(1234)) - alice, bob := sys.FunderA.NewFundedEOA(eth.OneTenthEther), sys.FunderB.NewFundedEOA(eth.OneTenthEther) - - eventLoggerAddress := alice.DeployEventLogger() - // Intent to initiate two message(or emit event) on chain A - initCalls := []txintent.Call{ - interop.RandomInitTrigger(rng, eventLoggerAddress, 1, 15), - interop.RandomInitTrigger(rng, eventLoggerAddress, 2, 13), - } - txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) - - // Trigger two events - receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Initiate messages included", "block", receiptA.BlockHash) - require.Equal(2, len(receiptA.Logs)) - - // Make sure supervisor syncs the chain A events - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - - // Intent to validate messages on chain B - txB := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](bob.Plan()) - txB.Content.DependOn(&txA.Result) - - // Two events in tx so use every index - indexes := []int{0, 1} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) - - receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Validate messages included", "block", receiptB.BlockHash) - - // Check two ExecutingMessage triggered - require.Equal(2, len(receiptB.Logs)) -} - -// TestExecSameMsgTwice tests below scenario: -// Transaction that executes the same message twice. -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_msg_test.go#L292 -func TestExecSameMsgTwice(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - require := sys.T.Require() - logger := t.Logger() - - rng := rand.New(rand.NewSource(1234)) - alice, bob := sys.FunderA.NewFundedEOA(eth.OneTenthEther), sys.FunderB.NewFundedEOA(eth.OneTenthEther) - - eventLoggerAddress := alice.DeployEventLogger() - - // Intent to initiate message(or emit event) on chain A - txA := txintent.NewIntent[*txintent.InitTrigger, *txintent.InteropOutput](alice.Plan()) - randomInitTrigger := interop.RandomInitTrigger(rng, eventLoggerAddress, 3, 10) - txA.Content.Set(randomInitTrigger) - - // Trigger single event - receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Initiate message included", "block", receiptA.BlockHash) - - // Make sure supervisor syncs the chain A events - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - - // Intent to validate same message two times on chain B - txB := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](bob.Plan()) - txB.Content.DependOn(&txA.Result) - - // Single event in tx so indexes are 0, 0 - indexes := []int{0, 0} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) - - receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Validate messages included", "block", receiptB.BlockHash) - - // Check two ExecutingMessage triggered - require.Equal(2, len(receiptB.Logs)) - // Check two messages are identical - require.Equal(receiptB.Logs[0].Topics, receiptB.Logs[1].Topics) -} - -// TestExecDifferentTopicCount tests below scenario: -// Execute message that links with initiating message with: 0, 1, 2, 3, or 4 topics in it -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_msg_test.go#L336 -func TestExecDifferentTopicCount(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - require := sys.T.Require() - logger := t.Logger() - - rng := rand.New(rand.NewSource(1234)) - alice, bob := sys.FunderA.NewFundedEOA(eth.OneTenthEther), sys.FunderB.NewFundedEOA(eth.OneTenthEther) - - eventLoggerAddress := alice.DeployEventLogger() - - // Intent to initiate message with different topic counts on chain A - initCalls := make([]txintent.Call, 5) - for topicCnt := range 5 { - initCalls[topicCnt] = interop.RandomInitTrigger(rng, eventLoggerAddress, topicCnt, 10) - } - txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) - - // Trigger five events, each have {0, 1, 2, 3, 4} topics in it - receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Initiate messages included", "block", receiptA.BlockHash) - require.Equal(5, len(receiptA.Logs)) - - for topicCnt := range 5 { - require.Equal(topicCnt, len(receiptA.Logs[topicCnt].Topics)) - } - - // Make sure supervisor syncs the chain A events - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - - // Intent to validate message on chain B - txB := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](bob.Plan()) - txB.Content.DependOn(&txA.Result) - - // Five events in tx so use every index - indexes := []int{0, 1, 2, 3, 4} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) - - receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Validate message included", "block", receiptB.BlockHash) - - // Check five ExecutingMessage triggered - require.Equal(5, len(receiptB.Logs)) -} - -// TestExecMsgOpaqueData tests below scenario: -// Execute message that links with initiating message with: 0, 10KB of opaque event data in it -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_msg_test.go#L386 -func TestExecMsgOpaqueData(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - require := sys.T.Require() - logger := t.Logger() - - rng := rand.New(rand.NewSource(1234)) - alice, bob := sys.FunderA.NewFundedEOA(eth.OneTenthEther), sys.FunderB.NewFundedEOA(eth.OneTenthEther) - - eventLoggerAddress := alice.DeployEventLogger() - - // Intent to initiate message with two messages: 0, 10KB of opaque event data - initCalls := make([]txintent.Call, 2) - emptyInitTrigger := interop.RandomInitTrigger(rng, eventLoggerAddress, 2, 0) // 0B - largeInitTrigger := interop.RandomInitTrigger(rng, eventLoggerAddress, 3, 10_000) // 10KB - initCalls[0] = emptyInitTrigger - initCalls[1] = largeInitTrigger - - txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) - - // Trigger two events - receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Initiate messages included", "block", receiptA.BlockHash) - require.Equal(2, len(receiptA.Logs)) - require.Equal(emptyInitTrigger.OpaqueData, receiptA.Logs[0].Data) - require.Equal(largeInitTrigger.OpaqueData, receiptA.Logs[1].Data) - - // Make sure supervisor syncs the chain A events - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - - // Intent to validate messages on chain B - txB := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](bob.Plan()) - txB.Content.DependOn(&txA.Result) - - // Two events in tx so use every index - indexes := []int{0, 1} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) - - receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Validate messages included", "block", receiptB.BlockHash) - - // Check two ExecutingMessage triggered - require.Equal(2, len(receiptB.Logs)) -} - -// TestExecMsgDifferEventIndexInSingleTx tests below scenario: -// Execute message that links with initiating message with: first, random or last event of a tx. -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_msg_test.go#L436 -func TestExecMsgDifferEventIndexInSingleTx(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - require := sys.T.Require() - logger := t.Logger() - - rng := rand.New(rand.NewSource(1234)) - alice, bob := sys.FunderA.NewFundedEOA(eth.OneTenthEther), sys.FunderB.NewFundedEOA(eth.OneTenthEther) - - eventLoggerAddress := alice.DeployEventLogger() - - // Intent to initiate message with multiple messages, all included in single tx - eventCnt := 10 - initCalls := make([]txintent.Call, eventCnt) - for index := range eventCnt { - initCalls[index] = interop.RandomInitTrigger(rng, eventLoggerAddress, rng.Intn(5), rng.Intn(100)) - } - - txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) - - // Trigger multiple events - receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Initiate messages included", "block", receiptA.BlockHash) - require.Equal(eventCnt, len(receiptA.Logs)) - - // Make sure supervisor syncs the chain A events - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - - // Intent to validate messages on chain B - txB := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](bob.Plan()) - txB.Content.DependOn(&txA.Result) - - // first, random or last event of a tx. - indexes := []int{0, 1 + rng.Intn(eventCnt-1), eventCnt - 1} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) - - receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Validate messages included", "block", receiptB.BlockHash) - - // Check three ExecutingMessage triggered - require.Equal(len(indexes), len(receiptB.Logs)) -} - -type invalidAttributeType string - -const ( - randomOrigin invalidAttributeType = "randomOrigin" - randomBlockNumber invalidAttributeType = "randomBlockNumber" - randomLogIndex invalidAttributeType = "randomLogIndex" - randomTimestamp invalidAttributeType = "randomTimestamp" - randomChainID invalidAttributeType = "randomChainID" - mismatchedLogIndex invalidAttributeType = "mismatchedLogIndex" - mismatchedTimestamp invalidAttributeType = "mismatchedTimestamp" - msgNotPresent invalidAttributeType = "msgNotPresent" - logIndexGreaterOrEqualToEventCnt invalidAttributeType = "logIndexGreaterOrEqualToEventCnt" -) - -// executeIndexedFault builds on top of txintent.ExecuteIndexed to inject a fault for the identifier of message -func executeIndexedFault( - executor common.Address, - events *plan.Lazy[*txintent.InteropOutput], - index int, - rng *rand.Rand, - faults []invalidAttributeType, - destChainID eth.ChainID, -) func(ctx context.Context) (*txintent.ExecTrigger, error) { - return func(ctx context.Context) (*txintent.ExecTrigger, error) { - execTrigger, err := txintent.ExecuteIndexed(executor, events, index)(ctx) - if err != nil { - return nil, err - } - newMsg := execTrigger.Msg - for _, fault := range faults { - switch fault { - case randomOrigin: - newMsg.Identifier.Origin = testutils.RandomAddress(rng) - case randomBlockNumber: - // make sure that the faulty blockNumber does not exceed type(uint64).max for CrossL2Inbox check - newMsg.Identifier.BlockNumber = rng.Uint64() / 2 - case randomLogIndex: - // make sure that the faulty logIndex does not exceed type(uint32).max for CrossL2Inbox check - newMsg.Identifier.LogIndex = rng.Uint32() / 2 - case randomTimestamp: - // make sure that the faulty Timestamp does not exceed type(uint64).max for CrossL2Inbox check - newMsg.Identifier.Timestamp = rng.Uint64() / 2 - case randomChainID: - newMsg.Identifier.ChainID = eth.ChainIDFromBytes32([32]byte(testutils.RandomData(rng, 32))) - case mismatchedLogIndex: - // valid msg within block, but mismatching event index - newMsg.Identifier.LogIndex += 1 - case mismatchedTimestamp: - // within time window, but mismatching block - newMsg.Identifier.Timestamp += 2 - case msgNotPresent: - // valid chain but msg not there - // use destination chain ID because initiating message is not present in dest chain - newMsg.Identifier.ChainID = destChainID - case logIndexGreaterOrEqualToEventCnt: - // execute implied-conflict message: point to event-index >= number of logs - // number of logs == number of entries - // so set the invalid logindex to number of entries - newMsg.Identifier.LogIndex = uint32(len(events.Value().Entries)) - default: - panic("invalid type") - } - } - return &txintent.ExecTrigger{ - Executor: executor, - Msg: newMsg, - }, nil - } -} - -// TestExecMessageInvalidAttributes tests below scenario: -// Execute message, but with one or more invalid attributes inside identifiers -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/message/interop_msg_test.go#L554 -func TestExecMessageInvalidAttributes(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - require := sys.T.Require() - logger := t.Logger() - - rng := rand.New(rand.NewSource(1234)) - // honest EOA which initiates messages - alice := sys.FunderA.NewFundedEOA(eth.OneTenthEther) - // honest EOA which executes messages - bob := sys.FunderB.NewFundedEOA(eth.OneTenthEther) - // malicious EOA which creates executing messages with invalid attributes - chuck := sys.FunderB.NewFundedEOA(eth.OneTenthEther) - - eventLoggerAddress := alice.DeployEventLogger() - - // Intent to initiate messages(or emit events) on chain A - initCalls := []txintent.Call{ - interop.RandomInitTrigger(rng, eventLoggerAddress, 3, 10), - interop.RandomInitTrigger(rng, eventLoggerAddress, 2, 95), - interop.RandomInitTrigger(rng, eventLoggerAddress, 1, 50), - } - txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) - - // Trigger multiple events - receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Initiate messages included", "block", receiptA.BlockHash) - - // Make sure supervisor syncs the chain A events - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - - faultsLists := [][]invalidAttributeType{ - // test each identifier attributes to be faulty for upper bound tests - {randomOrigin}, {randomBlockNumber}, {randomLogIndex}, {randomTimestamp}, {randomChainID}, - // test for every attributes to be faulty for upper bound tests - {randomOrigin, randomBlockNumber, randomLogIndex, randomTimestamp, randomChainID}, - // test for non-random invalid attributes - {mismatchedLogIndex}, {mismatchedTimestamp}, {msgNotPresent}, {logIndexGreaterOrEqualToEventCnt}, - } - - for _, faults := range faultsLists { - logger.Info("Attempt to validate message with invalid attribute", "faults", faults) - // Intent to validate message on chain B - txC := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](chuck.Plan()) - txC.Content.DependOn(&txA.Result) - - // Random select event index in tx for injecting faults - eventIdx := rng.Intn(len(initCalls)) - txC.Content.Fn(executeIndexedFault(constants.CrossL2Inbox, &txA.Result, eventIdx, rng, faults, chuck.ChainID())) - - // make sure that the transaction is not reverted by CrossL2Inbox... - gas, err := txC.PlannedTx.Gas.Eval(t.Ctx()) - require.NoError(err) - require.Greater(gas, uint64(0)) - - // but rather not included at chain B because of supervisor check - // chain B L2 EL will query supervisor to check whether given message is valid - // supervisor will throw ErrConflict(conflicting data), and L2 EL will drop tx - _, err = txC.PlannedTx.Included.Eval(t.Ctx()) - require.Error(err) - logger.Info("Validate message not included") - } - - // we now attempt to execute msg correctly - // Intent to validate message on chain B - txB := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](bob.Plan()) - txB.Content.DependOn(&txA.Result) - - // Three events in tx so use every index - indexes := []int{0, 1, 2} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) - - receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) - require.NoError(err) - logger.Info("Validate message included", "block", receiptB.BlockHash) - - // Check three ExecutingMessage triggered - require.Equal(3, len(receiptB.Logs)) -} diff --git a/rust/kona/tests/supervisor/pre_interop/init_test.go b/rust/kona/tests/supervisor/pre_interop/init_test.go deleted file mode 100644 index c173e8ce9e7b9..0000000000000 --- a/rust/kona/tests/supervisor/pre_interop/init_test.go +++ /dev/null @@ -1,20 +0,0 @@ -package preinterop - -// todo: add tests -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - spresets "github.com/ethereum-optimism/optimism/rust/kona/tests/supervisor/presets" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // sleep to ensure the backend is ready - - presets.DoMain(m, - spresets.WithSimpleInteropMinimal(), - presets.WithSuggestedInteropActivationOffset(30), - presets.WithInteropNotAtGenesis()) - -} diff --git a/rust/kona/tests/supervisor/pre_interop/post_test.go b/rust/kona/tests/supervisor/pre_interop/post_test.go deleted file mode 100644 index 00057286054da..0000000000000 --- a/rust/kona/tests/supervisor/pre_interop/post_test.go +++ /dev/null @@ -1,209 +0,0 @@ -package preinterop - -import ( - "math/rand" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" - "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" - "github.com/ethereum-optimism/optimism/op-core/forks" - "github.com/ethereum-optimism/optimism/op-core/predeploys" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-service/eth" - stypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum/go-ethereum/common" -) - -func testSupervisorActivationBlock(t devtest.T, sys *presets.SimpleInterop, net *dsl.L2Network, activationBlock eth.BlockID) { - require := t.Require() - - // wait for some time to ensure the interop activation block is become cross-safe - t.Logger().Info("Waiting for interop activation block to be cross-safe") - sys.Supervisor.WaitForL2HeadToAdvanceTo(net.ChainID(), stypes.CrossSafe, activationBlock) - - interopTime := net.Escape().ChainConfig().InteropTime - pre := net.LatestBlockBeforeTimestamp(t, *interopTime) - require.NotNil(pre, "Pre-interop block should be found before interop time") - - // make sure pre-interop block is parent of activation block - require.Equal(pre.Number, activationBlock.Number-1, "Activation block should be one after pre-interop block") - - // fetching the source for the pre-interop block should return the error - // this is to make sure that we only store the blocks after interop - _, err := sys.Supervisor.Escape().QueryAPI().CrossDerivedToSource(t.Ctx(), net.ChainID(), pre.ID()) - require.Error(err, "CrossDerivedToSource should error before interop") - - // fetch the source for the activation block - derivedFrom, err := sys.Supervisor.Escape().QueryAPI().CrossDerivedToSource(t.Ctx(), net.ChainID(), activationBlock) - require.NoError(err, "CrossDerivedToSource should not error after interop") - require.NotNil(derivedFrom, "CrossDerivedToSource should return a valid source block") -} - -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/upgrade/post_test.go -// test case modified to check the correctness of the supervisor activation block as well -func TestPostInbox(gt *testing.T) { - t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) - devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { - require := t.Require() - activationBlock := net.AwaitActivation(t, forks.Interop) - - el := net.Escape().L2ELNode(match.FirstL2EL) - implAddrBytes, err := el.EthClient().GetStorageAt(t.Ctx(), predeploys.CrossL2InboxAddr, - genesis.ImplementationSlot, activationBlock.Hash.String()) - require.NoError(err) - implAddr := common.BytesToAddress(implAddrBytes[:]) - require.NotEqual(common.Address{}, implAddr) - code, err := el.EthClient().CodeAtHash(t.Ctx(), implAddr, activationBlock.Hash) - require.NoError(err) - require.NotEmpty(code) - - testSupervisorActivationBlock(t, sys, net, activationBlock) - }) -} - -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/upgrade/post_test.go -func TestPostInteropUpgradeComprehensive(gt *testing.T) { - t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) - require := t.Require() - logger := t.Logger() - - // Wait for networks to be online by waiting for blocks - sys.L1Network.WaitForBlock() - sys.L2ChainA.WaitForBlock() - sys.L2ChainB.WaitForBlock() - - // Get interop activation time - interopTime := sys.L2ChainA.Escape().ChainConfig().InteropTime - require.NotNil(interopTime, "InteropTime must be set") - - logger.Info("Starting comprehensive post-interop upgrade tests", "interopTime", *interopTime) - - // 1. Check that anchor block of supervisor matches the activation block - logger.Info("Checking supervisor anchor block matches activation block") - testSupervisorAnchorBlock(t, sys) - - // 2. Check that the supervisor has safety progression for each level - logger.Info("Checking supervisor safety progression") - testSupervisorSafetyProgression(t, sys) - - // 3. Confirms that interop message can be included - logger.Info("Testing interop message inclusion") - testInteropMessageInclusion(t, sys) - - logger.Info("All comprehensive post-interop upgrade tests completed successfully") -} - -// testSupervisorAnchorBlock checks that the supervisor's anchor block has been set and matches the upgrade timestamp -func testSupervisorAnchorBlock(t devtest.T, sys *presets.SimpleInterop) { - logger := t.Logger() - - // Use the DSL helper for anchor block validation - logger.Info("Testing supervisor anchor block functionality") - - // Phase 1: Wait for L2 chains to reach interop activation time - logger.Info("Phase 1: Waiting for L2 chains to reach interop activation time") - - devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { - - // Gate test to not time out before upgrade happens - forkTimestamp := net.Escape().ChainConfig().InteropTime - t.Gate().NotNil(forkTimestamp, "Must have fork configured") - t.Gate().Greater(*forkTimestamp, uint64(0), "Must not start fork at genesis") - upgradeTime := time.Unix(int64(*forkTimestamp), 0) - if deadline, hasDeadline := t.Deadline(); hasDeadline { - t.Gate().True(upgradeTime.Before(deadline), "test must not time out before upgrade happens") - } - - activationBlock := net.AwaitActivation(t, forks.Interop) - sys.Supervisor.WaitForL2HeadToAdvanceTo(net.ChainID(), stypes.CrossSafe, activationBlock) - - logger.Info("Validating anchor block timing", - "chainID", net.ChainID(), - "derivedBlockNumber", activationBlock.Number, - "interopTime", *forkTimestamp) - }) - - logger.Info("Supervisor anchor block validation completed successfully") -} - -// testSupervisorSafetyProgression checks that supervisor has safety progression for each level -func testSupervisorSafetyProgression(t devtest.T, sys *presets.SimpleInterop) { - logger := t.Logger() - logger.Info("Testing supervisor safety progression") - - delta := uint64(3) // Minimum blocks of progression expected - dsl.CheckAll(t, - sys.L2CLA.AdvancedFn(stypes.LocalUnsafe, delta, 30), - sys.L2CLB.AdvancedFn(stypes.LocalUnsafe, delta, 30), - - sys.L2CLA.AdvancedFn(stypes.LocalSafe, delta, 30), - sys.L2CLB.AdvancedFn(stypes.LocalSafe, delta, 30), - - sys.L2CLA.AdvancedFn(stypes.CrossUnsafe, delta, 30), - sys.L2CLB.AdvancedFn(stypes.CrossUnsafe, delta, 30), - - sys.L2CLA.AdvancedFn(stypes.CrossSafe, delta, 60), - sys.L2CLB.AdvancedFn(stypes.CrossSafe, delta, 60), - ) - - logger.Info("Supervisor safety progression validation completed successfully") -} - -// testInteropMessageInclusion confirms that interop messages can be included post-upgrade -func testInteropMessageInclusion(t devtest.T, sys *presets.SimpleInterop) { - logger := t.Logger() - logger.Info("Starting interop message inclusion test") - - // Phase 1: Setup test accounts and contracts - alice, bob, eventLoggerAddress := setupInteropTestEnvironment(sys) - - // Phase 2: Send init message on chain A - rng := rand.New(rand.NewSource(1234)) - initMsg := alice.SendInitMessage(interop.RandomInitTrigger(rng, eventLoggerAddress, rng.Intn(5), rng.Intn(30))) - - // Make sure supervisor indexes block which includes init message - sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) - - // Single event in tx so index is 0 - execMsg := bob.SendExecMessage(initMsg) - - // Phase 5: Verify cross-safe progression - verifyInteropMessagesProgression(t, sys, initMsg, execMsg) - - logger.Info("Interop message inclusion test completed successfully") -} - -// setupInteropTestEnvironment creates test accounts and deploys necessary contracts -func setupInteropTestEnvironment(sys *presets.SimpleInterop) (alice, bob *dsl.EOA, eventLoggerAddress common.Address) { - - // Create EOAs for interop messaging - alice = sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob = sys.FunderB.NewFundedEOA(eth.OneHundredthEther) - - // Deploy event logger contract on chain A - eventLoggerAddress = alice.DeployEventLogger() - - // Wait for chains to catch up - sys.L2ChainB.CatchUpTo(sys.L2ChainA) - - return alice, bob, eventLoggerAddress -} - -// verifyInteropMessagesProgression verifies cross-safe progression for both init and exec messages -func verifyInteropMessagesProgression(t devtest.T, sys *presets.SimpleInterop, initMsg *dsl.InitMessage, execMsg *dsl.ExecMessage) { - logger := t.Logger() - - // Verify cross-safe progression for both messages - dsl.CheckAll(t, - sys.L2CLA.ReachedRefFn(stypes.CrossSafe, initMsg.BlockID(), 60), - sys.L2CLB.ReachedRefFn(stypes.CrossSafe, execMsg.BlockID(), 60), - ) - - logger.Info("Cross-safe progression verified for both init and exec messages") -} diff --git a/rust/kona/tests/supervisor/pre_interop/pre_test.go b/rust/kona/tests/supervisor/pre_interop/pre_test.go deleted file mode 100644 index 143a9baed75b5..0000000000000 --- a/rust/kona/tests/supervisor/pre_interop/pre_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package preinterop - -import ( - "math/rand" - "testing" - "time" - - "github.com/ethereum/go-ethereum/common" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" - "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" - "github.com/ethereum-optimism/optimism/op-core/predeploys" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/txintent" - stypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/upgrade/pre_test.go -func TestPreNoInbox(gt *testing.T) { - t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) - require := t.Require() - - t.Logger().Info("Starting") - - devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { - interopTime := net.Escape().ChainConfig().InteropTime - t.Require().NotNil(interopTime) - pre := net.LatestBlockBeforeTimestamp(t, *interopTime) - el := net.Escape().L2ELNode(match.FirstL2EL) - codeAddr := common.HexToAddress("0xC0D3C0d3C0D3C0d3c0d3c0D3c0D3C0d3C0D30022") - implCode, err := el.EthClient().CodeAtHash(t.Ctx(), codeAddr, pre.Hash) - require.NoError(err) - require.Len(implCode, 0, "needs to be empty") - implAddrBytes, err := el.EthClient().GetStorageAt(t.Ctx(), predeploys.CrossL2InboxAddr, - genesis.ImplementationSlot, pre.Hash.String()) - require.NoError(err) - require.Equal(common.Address{}, common.BytesToAddress(implAddrBytes[:])) - }) - - // try access the sync-status of the supervisor, assert that the sync-status returns the expected error - devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { - interopTime := net.Escape().ChainConfig().InteropTime - - _, err := sys.Supervisor.Escape().QueryAPI().SyncStatus(t.Ctx()) - require.ErrorContains(err, "chain database is not initialized") - - // confirm we are still pre-interop - require.False(net.IsActivated(*interopTime)) - t.Logger().Info("Timestamps", "interopTime", *interopTime, "now", time.Now().Unix()) - }) - - var initMsg *dsl.InitMessage - // try interop before the upgrade, confirm that messages do not get included - { - // two EOAs for triggering the init and exec interop txs - alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) - - interopTimeA := sys.L2ChainA.Escape().ChainConfig().InteropTime - interopTimeB := sys.L2ChainB.Escape().ChainConfig().InteropTime - - eventLoggerAddress := alice.DeployEventLogger() - - // wait for chain B to catch up to chain A if necessary - sys.L2ChainB.CatchUpTo(sys.L2ChainA) - - // send initiating message on chain A - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - initMsg = alice.SendInitMessage(interop.RandomInitTrigger(rng, eventLoggerAddress, rng.Intn(3), rng.Intn(10))) - - // at least one block between the init tx on chain A and the exec tx on chain B - sys.L2ChainB.WaitForBlock() - - // send executing message on chain B and confirm we got an error - execTx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) - execTx.Content.DependOn(&initMsg.Tx.Result) - execTx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initMsg.Tx.Result, 0)) - execReceipt, err := execTx.PlannedTx.Included.Eval(sys.T.Ctx()) - require.ErrorContains(err, "implementation not initialized", "error did not contain expected string") - require.Nil(execReceipt) - - t.Logger().Info("initReceipt", "blocknum", initMsg.Receipt.BlockNumber, "txhash", initMsg.Receipt.TxHash) - - // confirm we are still pre-interop - require.False(sys.L2ChainA.IsActivated(*interopTimeA)) - require.False(sys.L2ChainB.IsActivated(*interopTimeB)) - t.Logger().Info("Timestamps", "interopTimeA", *interopTimeA, "interopTimeB", *interopTimeB, "now", time.Now().Unix()) - } - - // check that log events from a block before activation, when converted into an access-list, fail the check-access-list RPC check - { - ctx := sys.T.Ctx() - - execTrigger, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &initMsg.Tx.Result, 0)(ctx) - require.NoError(err) - - ed := stypes.ExecutingDescriptor{Timestamp: uint64(time.Now().Unix())} - accessEntries := []stypes.Access{execTrigger.Msg.Access()} - accessList := stypes.EncodeAccessList(accessEntries) - - err = sys.Supervisor.Escape().QueryAPI().CheckAccessList(ctx, accessList, stypes.CrossSafe, ed) - require.ErrorContains(err, "conflicting data") - } - - t.Logger().Info("Done") -} diff --git a/rust/kona/tests/supervisor/rpc/init_test.go b/rust/kona/tests/supervisor/rpc/init_test.go deleted file mode 100644 index b11738839ec9b..0000000000000 --- a/rust/kona/tests/supervisor/rpc/init_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package rpc - -import ( - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -const ( - // SLEEP_BACKEND_READY is the time to wait for the backend to be ready - SLEEP_BACKEND_READY = 90 * time.Second -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // sleep to ensure the backend is ready - time.Sleep(SLEEP_BACKEND_READY) - - presets.DoMain(m, presets.WithSimpleInterop()) -} diff --git a/rust/kona/tests/supervisor/rpc/rpc_test.go b/rust/kona/tests/supervisor/rpc/rpc_test.go deleted file mode 100644 index 7d3105f5d6b73..0000000000000 --- a/rust/kona/tests/supervisor/rpc/rpc_test.go +++ /dev/null @@ -1,333 +0,0 @@ -package rpc - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-service/bigs" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - gethTypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// TODO: add test for dependencySetV1 after devstack support is added to the QueryAPI - -func TestRPCLocalUnsafe(gt *testing.T) { - t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) - client := sys.Supervisor.Escape() - - t.Run("fails with invalid chain ID", func(gt devtest.T) { - _, err := client.QueryAPI().LocalUnsafe(context.Background(), eth.ChainIDFromUInt64(100)) - require.Error(t, err, "expected LocalUnsafe to fail with raw chain ID") - }) - - for _, chainID := range []eth.ChainID{sys.L2ChainA.ChainID(), sys.L2ChainB.ChainID()} { - t.Run(fmt.Sprintf("succeeds with valid chain ID %d", chainID), func(gt devtest.T) { - safe, err := client.QueryAPI().LocalUnsafe(context.Background(), chainID) - require.NoError(t, err) - assert.Greater(t, safe.Number, uint64(0)) - assert.Len(t, safe.Hash, 32) - }) - } -} - -func TestRPCCrossSafe(gt *testing.T) { - t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) - client := sys.Supervisor.Escape() - - t.Run("fails with invalid chain ID", func(gt devtest.T) { - _, err := client.QueryAPI().CrossSafe(context.Background(), eth.ChainIDFromUInt64(100)) - require.Error(t, err, "expected CrossSafe to fail with invalid chain") - }) - - for _, chainID := range []eth.ChainID{sys.L2ChainA.ChainID(), sys.L2ChainB.ChainID()} { - t.Run(fmt.Sprintf("succeeds with valid chain ID %d", chainID), func(gt devtest.T) { - blockPair, err := client.QueryAPI().CrossSafe(context.Background(), chainID) - require.NoError(t, err) - assert.Greater(t, blockPair.Derived.Number, uint64(0)) - assert.Len(t, blockPair.Derived.Hash, 32) - - assert.Greater(t, blockPair.Source.Number, uint64(0)) - assert.Len(t, blockPair.Source.Hash, 32) - }) - } -} - -func TestRPCFinalized(gt *testing.T) { - gt.Skip() - t := devtest.ParallelT(gt) - - sys := presets.NewSimpleInterop(t) - client := sys.Supervisor.Escape() - - t.Run("fails with invalid chain ID", func(gt devtest.T) { - _, err := client.QueryAPI().Finalized(context.Background(), eth.ChainIDFromUInt64(100)) - require.Error(t, err, "expected Finalized to fail with invalid chain") - }) - - for _, chainID := range []eth.ChainID{sys.L2ChainA.ChainID(), sys.L2ChainB.ChainID()} { - t.Run(fmt.Sprintf("succeeds with valid chain ID %d", chainID), func(gt devtest.T) { - safe, err := client.QueryAPI().Finalized(context.Background(), chainID) - require.NoError(t, err) - assert.Greater(t, safe.Number, uint64(0)) - assert.Len(t, safe.Hash, 32) - }) - } -} - -func TestRPCFinalizedL1(gt *testing.T) { - t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) - client := sys.Supervisor.Escape() - t.Run("succeeds to get finalized L1 block", func(gt devtest.T) { - block, err := client.QueryAPI().FinalizedL1(context.Background()) - require.NoError(t, err) - assert.Greater(t, block.Number, uint64(0)) - assert.Less(t, block.Time, uint64(time.Now().Unix()+5)) - assert.Len(t, block.Hash, 32) - }) -} - -func TestRPCSuperRootAtTimestamp(gt *testing.T) { - t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) - client := sys.Supervisor.Escape() - - t.Run("fails with invalid timestamp", func(gt devtest.T) { - _, err := client.QueryAPI().SuperRootAtTimestamp(context.Background(), 0) - require.Error(t, err) - }) - - t.Run("succeeds with valid timestamp", func(gt devtest.T) { - timeNow := uint64(time.Now().Unix()) - root, err := client.QueryAPI().SuperRootAtTimestamp(context.Background(), hexutil.Uint64(timeNow-90)) - require.NoError(t, err) - assert.Len(t, root.SuperRoot, 32) - assert.Len(t, root.Chains, 2) - - for _, chain := range root.Chains { - assert.Len(t, chain.Canonical, 32) - assert.Contains(t, []eth.ChainID{sys.L2ChainA.ChainID(), sys.L2ChainB.ChainID()}, chain.ChainID) - } - }) -} - -func TestRPCAllSafeDerivedAt(gt *testing.T) { - t := devtest.ParallelT(gt) - - sys := presets.NewSimpleInterop(t) - client := sys.Supervisor.Escape() - - t.Run("fails with invalid L1 block hash", func(gt devtest.T) { - _, err := client.QueryAPI().AllSafeDerivedAt(context.Background(), eth.BlockID{ - Number: 100, - Hash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), - }) - require.Error(t, err) - }) - - t.Run("succeeds with valid synced L1 block hash", func(gt devtest.T) { - sync, err := client.QueryAPI().SyncStatus(context.Background()) - require.NoError(t, err) - - allSafe, err := client.QueryAPI().AllSafeDerivedAt(context.Background(), eth.BlockID{ - Number: sync.MinSyncedL1.Number, - Hash: sync.MinSyncedL1.Hash, - }) - require.NoError(t, err) - - require.Equal(t, 2, len(allSafe)) - for key, value := range allSafe { - require.Contains(t, []eth.ChainID{sys.L2ChainA.ChainID(), sys.L2ChainB.ChainID()}, key) - require.Len(t, value.Hash, 32) - } - }) -} - -func TestRPCCrossDerivedToSource(gt *testing.T) { - t := devtest.ParallelT(gt) - - sys := presets.NewSimpleInterop(t) - client := sys.Supervisor.Escape() - - t.Run("fails with invalid chain ID", func(gt devtest.T) { - _, err := client.QueryAPI().CrossDerivedToSource(context.Background(), eth.ChainIDFromUInt64(100), eth.BlockID{Number: 25}) - require.Error(t, err, "expected CrossDerivedToSource to fail with invalid chain") - }) - - safe, err := client.QueryAPI().CrossSafe(context.Background(), sys.L2ChainA.ChainID()) - require.NoError(t, err) - - t.Run(fmt.Sprintf("succeeds with valid chain ID %d", sys.L2ChainA.ChainID()), func(gt devtest.T) { - source, err := client.QueryAPI().CrossDerivedToSource( - context.Background(), - sys.L2ChainA.ChainID(), - eth.BlockID{ - Number: safe.Derived.Number, - Hash: safe.Derived.Hash, - }, - ) - require.NoError(t, err) - assert.Greater(t, source.Number, uint64(0)) - assert.Len(t, source.Hash, 32) - assert.Equal(t, source.Number, safe.Source.Number) - assert.Equal(t, source.Hash, safe.Source.Hash) - }) - -} - -func TestRPCCheckAccessList(gt *testing.T) { - t := devtest.ParallelT(gt) - - sys := presets.NewSimpleInterop(t) - client := sys.Supervisor.Escape() - ctx := sys.T.Ctx() - - alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) - bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) - - eventLoggerAddress := alice.DeployEventLogger() - sys.L2ChainB.CatchUpTo(sys.L2ChainA) - - rng := rand.New(rand.NewSource(time.Now().UnixNano())) - initMsg := alice.SendInitMessage( - interop.RandomInitTrigger(rng, eventLoggerAddress, rng.Intn(3), rng.Intn(10)), - ) - initReceipt := initMsg.Receipt - - logToAccess := func(chainID eth.ChainID, log *gethTypes.Log, timestamp uint64) types.Access { - msgPayload := make([]byte, 0) - for _, topic := range log.Topics { - msgPayload = append(msgPayload, topic.Bytes()...) - } - msgPayload = append(msgPayload, log.Data...) - - msgHash := crypto.Keccak256Hash(msgPayload) - args := types.ChecksumArgs{ - BlockNumber: log.BlockNumber, - Timestamp: timestamp, - LogIndex: uint32(log.Index), - ChainID: chainID, - LogHash: types.PayloadHashToLogHash(msgHash, log.Address), - } - return args.Access() - } - - blockRef := sys.L2ChainA.PublicRPC().BlockRefByNumber(bigs.Uint64Strict(initReceipt.BlockNumber)) - - var accessEntries []types.Access - for _, evLog := range initReceipt.Logs { - accessEntries = append(accessEntries, logToAccess(alice.ChainID(), evLog, blockRef.Time)) - } - - cloneAccessEntries := func() []types.Access { - clone := make([]types.Access, len(accessEntries)) - copy(clone, accessEntries) - return clone - } - - sys.L2ChainB.WaitForBlock() - - t.Run("succeeds with valid access list", func(gt devtest.T) { - accessList := types.EncodeAccessList(cloneAccessEntries()) - timestamp := uint64(time.Now().Unix()) - ed := types.ExecutingDescriptor{ - Timestamp: timestamp, - ChainID: bob.ChainID(), - } - - err := client.QueryAPI().CheckAccessList(ctx, accessList, types.LocalUnsafe, ed) - require.NoError(t, err, "CheckAccessList should succeed with valid access list and chain ID") - }) - - t.Run("fails with invalid chain ID", func(gt devtest.T) { - accessList := types.EncodeAccessList(cloneAccessEntries()) - timestamp := uint64(time.Now().Unix()) - ed := types.ExecutingDescriptor{ - Timestamp: timestamp, - ChainID: eth.ChainIDFromUInt64(99999999), - } - - err := client.QueryAPI().CheckAccessList(ctx, accessList, types.LocalUnsafe, ed) - require.Error(t, err, "CheckAccessList should fail with invalid chain ID") - }) - - t.Run("fails with invalid timestamp", func(gt devtest.T) { - accessList := types.EncodeAccessList(cloneAccessEntries()) - ed := types.ExecutingDescriptor{ - Timestamp: blockRef.Time - 1, - ChainID: bob.ChainID(), - } - - err := client.QueryAPI().CheckAccessList(ctx, accessList, types.LocalUnsafe, ed) - require.Error(t, err, "CheckAccessList should fail with invalid timestamp") - }) - - t.Run("fails with conflicting data - log index mismatch", func(gt devtest.T) { - entries := cloneAccessEntries() - entries[0].LogIndex = 10 - accessList := types.EncodeAccessList(entries) - timestamp := uint64(time.Now().Unix()) - ed := types.ExecutingDescriptor{ - Timestamp: timestamp, - ChainID: bob.ChainID(), - } - - err := client.QueryAPI().CheckAccessList(ctx, accessList, types.LocalUnsafe, ed) - require.Error(t, err, "CheckAccessList should fail with conflicting log index") - }) - - t.Run("fails with conflicting data - invalid block number", func(gt devtest.T) { - entries := cloneAccessEntries() - entries[0].BlockNumber = entries[0].BlockNumber - 1 - accessList := types.EncodeAccessList(entries) - timestamp := uint64(time.Now().Unix()) - ed := types.ExecutingDescriptor{ - Timestamp: timestamp, - ChainID: bob.ChainID(), - } - - err := client.QueryAPI().CheckAccessList(ctx, accessList, types.LocalUnsafe, ed) - require.Error(t, err, "CheckAccessList should fail with invalid block number") - }) - - t.Run("fails with conflicting data - invalid checksum", func(gt devtest.T) { - entries := cloneAccessEntries() - // Corrupt the checksum - entries[0].Checksum[10] ^= 0xFF - accessList := types.EncodeAccessList(entries) - timestamp := uint64(time.Now().Unix()) - ed := types.ExecutingDescriptor{ - Timestamp: timestamp, - ChainID: bob.ChainID(), - } - - err := client.QueryAPI().CheckAccessList(ctx, accessList, types.LocalUnsafe, ed) - require.Error(t, err, "CheckAccessList should fail with invalid checksum") - }) - - t.Run("fails with safety violation", func(gt devtest.T) { - accessList := types.EncodeAccessList(cloneAccessEntries()) - timestamp := uint64(time.Now().Unix()) - ed := types.ExecutingDescriptor{ - Timestamp: timestamp, - ChainID: bob.ChainID(), - } - - err := client.QueryAPI().CheckAccessList(ctx, accessList, types.Finalized, ed) - require.Error(t, err, "CheckAccessList should fail due to safety level violation") - }) -} diff --git a/rust/kona/tests/supervisor/sync/init_test.go b/rust/kona/tests/supervisor/sync/init_test.go deleted file mode 100644 index 3562c1cbf47fc..0000000000000 --- a/rust/kona/tests/supervisor/sync/init_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package sync - -import ( - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -const ( - // SLEEP_BACKEND_READY is the time to wait for the backend to be ready - SLEEP_BACKEND_READY = 90 * time.Second -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // sleep to ensure the backend is ready - time.Sleep(SLEEP_BACKEND_READY) - - presets.DoMain(m, presets.WithSimpleInterop()) -} diff --git a/rust/kona/tests/supervisor/sync/resync_test.go b/rust/kona/tests/supervisor/sync/resync_test.go deleted file mode 100644 index 442181c8d3bfe..0000000000000 --- a/rust/kona/tests/supervisor/sync/resync_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package sync - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -// TestL2CLResync checks that unsafe head advances after restarting L2CL. -// Resync is only possible when supervisor and L2CL reconnects. -// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/sync/simple_interop/interop_sync_test.go -func TestL2CLResync(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - logger := sys.Log.With("Test", "TestL2CLResync") - - logger.Info("Check unsafe chains are advancing") - dsl.CheckAll(t, - sys.L2ELA.AdvancedFn(eth.Unsafe, 5), - sys.L2ELB.AdvancedFn(eth.Unsafe, 5), - ) - - logger.Info("Stop L2CL nodes") - sys.L2CLA.Stop() - sys.L2CLB.Stop() - - logger.Info("Make sure L2ELs does not advance") - dsl.CheckAll(t, - sys.L2ELA.NotAdvancedFn(eth.Unsafe, 30), - sys.L2ELB.NotAdvancedFn(eth.Unsafe, 30), - ) - - logger.Info("Restart L2CL nodes") - sys.L2CLA.Start() - sys.L2CLB.Start() - - // L2CL may advance a few blocks without supervisor connection, but eventually it will stop without the connection - // we must check that unsafe head is advancing due to reconnection - logger.Info("Boot up L2CL nodes") - - dsl.CheckAll(t, - sys.L2ELA.AdvancedFn(eth.Unsafe, 30), - sys.L2ELB.AdvancedFn(eth.Unsafe, 30), - ) - - // supervisor will attempt to reconnect with L2CLs at this point because L2CL ws endpoint is recovered - logger.Info("Check unsafe chains are advancing again") - dsl.CheckAll(t, - sys.L2ELA.AdvancedFn(eth.Unsafe, 10), - sys.L2ELB.AdvancedFn(eth.Unsafe, 10), - ) - - // supervisor successfully connected with managed L2CLs -} - -// TestSupervisorResync checks that heads advances after restarting the Supervisor. -func TestSupervisorResync(gt *testing.T) { - t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) - logger := sys.Log.With("Test", "TestSupervisorResync") - - logger.Info("Check unsafe chains are advancing") - - for _, level := range []types.SafetyLevel{types.LocalUnsafe, types.LocalSafe, types.CrossUnsafe, types.CrossSafe} { - sys.Supervisor.WaitForL2HeadToAdvance(sys.L2ChainA.ChainID(), 2, level, 20) - sys.Supervisor.WaitForL2HeadToAdvance(sys.L2ChainB.ChainID(), 2, level, 20) - } - - logger.Info("Stop Supervisor node") - sys.Supervisor.Stop() - - logger.Info("Restart Supervisor node") - sys.Supervisor.Start() - - logger.Info("Boot up Supervisor node") - - // Re check syncing is not blocked - for _, level := range []types.SafetyLevel{types.LocalUnsafe, types.LocalSafe, types.CrossUnsafe, types.CrossSafe} { - sys.Supervisor.WaitForL2HeadToAdvance(sys.L2ChainA.ChainID(), 2, level, 20) - sys.Supervisor.WaitForL2HeadToAdvance(sys.L2ChainB.ChainID(), 2, level, 20) - } -} diff --git a/rust/kona/tests/supervisor/sync/sync_test.go b/rust/kona/tests/supervisor/sync/sync_test.go deleted file mode 100644 index ddec52b0a366e..0000000000000 --- a/rust/kona/tests/supervisor/sync/sync_test.go +++ /dev/null @@ -1,251 +0,0 @@ -package sync - -import ( - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" -) - -const ( - // UnSafeHeadAdvanceRetries is the number of retries for unsafe head advancement - UnSafeHeadAdvanceRetries = 15 - - // CrossUnsafeHeadAdvanceRetries is the number of retries for cross-unsafe head advancement - CrossUnsafeHeadAdvanceRetries = 15 - - // LocalSafeHeadAdvanceRetries is the number of retries for safe head advancement - LocalSafeHeadAdvanceRetries = 15 - - // SafeHeadAdvanceRetries is the number of retries for safe head advancement - SafeHeadAdvanceRetries = 25 - - // FinalizedHeadAdvanceRetries is the number of retries for finalized head advancement - FinalizedHeadAdvanceRetries = 100 -) - -func TestLocalUnsafeHeadAdvancing(gt *testing.T) { - t := devtest.SerialT(gt) - - out := presets.NewSimpleInterop(t) - l2aChainID := out.L2CLA.ChainID() - l2bChainID := out.L2CLB.ChainID() - - supervisorStatus := out.Supervisor.FetchSyncStatus() - - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainA.ChainID(), 2, "unsafe", UnSafeHeadAdvanceRetries) - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainB.ChainID(), 2, "unsafe", UnSafeHeadAdvanceRetries) - - // Wait and check if the local unsafe head has advanced on L2A - err := wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLA.SyncStatus() - return status.UnsafeL2.Number > supervisorStatus.Chains[l2aChainID].LocalUnsafe.Number, nil - }) - t.Require().NoError(err) - - // Wait and check if the local unsafe head has advanced on L2B - err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLB.SyncStatus() - return status.UnsafeL2.Number > supervisorStatus.Chains[l2bChainID].LocalUnsafe.Number, nil - }) - t.Require().NoError(err) - - // Wait and cross check the supervisor unsafe heads to advance on both chains - err = wait.For(t.Ctx(), 5*time.Second, func() (bool, error) { - latestSupervisorStatus := out.Supervisor.FetchSyncStatus() - return latestSupervisorStatus.Chains[l2aChainID].LocalUnsafe.Number > supervisorStatus.Chains[l2aChainID].LocalUnsafe.Number && - latestSupervisorStatus.Chains[l2bChainID].LocalUnsafe.Number >= supervisorStatus.Chains[l2bChainID].LocalUnsafe.Number, nil - }) - t.Require().NoError(err) -} - -func TestCrossUnsafeHeadAdvancing(gt *testing.T) { - t := devtest.SerialT(gt) - - out := presets.NewSimpleInterop(t) - l2aChainID := out.L2CLA.ChainID() - l2bChainID := out.L2CLB.ChainID() - - supervisorStatus := out.Supervisor.FetchSyncStatus() - - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainA.ChainID(), 2, "cross-unsafe", CrossUnsafeHeadAdvanceRetries) - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainB.ChainID(), 2, "cross-unsafe", CrossUnsafeHeadAdvanceRetries) - - // Wait and cross check the supervisor cross unsafe heads to advance on both chains - err := wait.For(t.Ctx(), 5*time.Second, func() (bool, error) { - latestSupervisorStatus := out.Supervisor.FetchSyncStatus() - return latestSupervisorStatus.Chains[l2aChainID].LocalUnsafe.Number > supervisorStatus.Chains[l2aChainID].LocalUnsafe.Number && - latestSupervisorStatus.Chains[l2bChainID].LocalUnsafe.Number >= supervisorStatus.Chains[l2bChainID].LocalUnsafe.Number, nil - }) - t.Require().NoError(err) - // Wait and check if the cross unsafe head has advanced on L2A - err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLA.SyncStatus() - return status.CrossUnsafeL2.Number > supervisorStatus.Chains[l2aChainID].CrossUnsafe.Number, nil - }) - t.Require().NoError(err) - // Wait and check if the cross unsafe head has advanced on L2B - err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLB.SyncStatus() - return status.CrossUnsafeL2.Number > supervisorStatus.Chains[l2bChainID].CrossUnsafe.Number, nil - }) - - t.Require().NoError(err) -} - -func TestLocalSafeHeadAdvancing(gt *testing.T) { - t := devtest.SerialT(gt) - - out := presets.NewSimpleInterop(t) - l2aChainID := out.L2CLA.ChainID() - l2bChainID := out.L2CLB.ChainID() - - supervisorStatus := out.Supervisor.FetchSyncStatus() - - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainA.ChainID(), 1, "local-safe", LocalSafeHeadAdvanceRetries) - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainB.ChainID(), 1, "local-safe", LocalSafeHeadAdvanceRetries) - - // Wait and check if the local safe head has advanced on L2A - err := wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLA.SyncStatus() - return status.LocalSafeL2.Number > supervisorStatus.Chains[l2aChainID].LocalSafe.Number, nil - }) - t.Require().NoError(err) - // Wait and check if the local safe head has advanced on L2B - err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLB.SyncStatus() - return status.LocalSafeL2.Number > supervisorStatus.Chains[l2bChainID].LocalSafe.Number, nil - }) - t.Require().NoError(err) - // Wait and cross check the supervisor local safe heads to advance on both chains - err = wait.For(t.Ctx(), 5*time.Second, func() (bool, error) { - latestSupervisorStatus := out.Supervisor.FetchSyncStatus() - return latestSupervisorStatus.Chains[l2aChainID].LocalSafe.Number > supervisorStatus.Chains[l2aChainID].LocalSafe.Number && - latestSupervisorStatus.Chains[l2bChainID].LocalSafe.Number >= supervisorStatus.Chains[l2bChainID].LocalSafe.Number, nil - }) - t.Require().NoError(err) -} - -func TestCrossSafeHeadAdvancing(gt *testing.T) { - t := devtest.SerialT(gt) - - out := presets.NewSimpleInterop(t) - l2aChainID := out.L2CLA.ChainID() - l2bChainID := out.L2CLB.ChainID() - - supervisorStatus := out.Supervisor.FetchSyncStatus() - - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainA.ChainID(), 1, "safe", SafeHeadAdvanceRetries) - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainB.ChainID(), 1, "safe", SafeHeadAdvanceRetries) - - // Wait and cross check the supervisor cross safe heads to advance on both chains - err := wait.For(t.Ctx(), 5*time.Second, func() (bool, error) { - latestSupervisorStatus := out.Supervisor.FetchSyncStatus() - return latestSupervisorStatus.Chains[l2aChainID].CrossSafe.Number > supervisorStatus.Chains[l2aChainID].CrossSafe.Number && - latestSupervisorStatus.Chains[l2bChainID].CrossSafe.Number >= supervisorStatus.Chains[l2bChainID].CrossSafe.Number, nil - }) - t.Require().NoError(err) - // Wait and check if the cross safe head has advanced on L2A - err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLA.SyncStatus() - return status.SafeL2.Number > supervisorStatus.Chains[l2aChainID].CrossSafe.Number, nil - }) - t.Require().NoError(err) - // Wait and check if the cross safe head has advanced on L2B - err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLB.SyncStatus() - return status.SafeL2.Number > supervisorStatus.Chains[l2bChainID].CrossSafe.Number, nil - }) - - t.Require().NoError(err) -} - -func TestMinSyncedL1Advancing(gt *testing.T) { - t := devtest.SerialT(gt) - - out := presets.NewSimpleInterop(t) - supervisorStatus := out.Supervisor.FetchSyncStatus() - - out.Supervisor.AwaitMinL1(supervisorStatus.MinSyncedL1.Number + 1) - - // Wait and check if the currentL1 head has advanced on L2A - err := wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLA.SyncStatus() - return status.CurrentL1.Number > supervisorStatus.MinSyncedL1.Number, nil - }) - t.Require().NoError(err) - // Wait and check if the currentL1 head has advanced on L2B - err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLB.SyncStatus() - return status.CurrentL1.Number > supervisorStatus.MinSyncedL1.Number, nil - }) - t.Require().NoError(err) - // Wait and check if the min synced L1 has advanced - err = wait.For(t.Ctx(), 5*time.Second, func() (bool, error) { - latestSupervisorStatus := out.Supervisor.FetchSyncStatus() - return latestSupervisorStatus.MinSyncedL1.Number > supervisorStatus.MinSyncedL1.Number, nil - }) - t.Require().NoError(err) -} - -func TestFinalizedHeadAdvancing(gt *testing.T) { - t := devtest.SerialT(gt) - - out := presets.NewSimpleInterop(t) - l2aChainID := out.L2CLA.ChainID() - l2bChainID := out.L2CLB.ChainID() - - supervisorStatus := out.Supervisor.FetchSyncStatus() - - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainA.ChainID(), 1, "finalized", FinalizedHeadAdvanceRetries) - out.Supervisor.WaitForL2HeadToAdvance(out.L2ChainB.ChainID(), 1, "finalized", FinalizedHeadAdvanceRetries) - - // Wait and cross check the supervisor finalized heads to advance on both chains - err := wait.For(t.Ctx(), 5*time.Second, func() (bool, error) { - latestSupervisorStatus := out.Supervisor.FetchSyncStatus() - return latestSupervisorStatus.Chains[l2aChainID].Finalized.Number > supervisorStatus.Chains[l2aChainID].Finalized.Number && - latestSupervisorStatus.Chains[l2bChainID].Finalized.Number >= supervisorStatus.Chains[l2bChainID].Finalized.Number, nil - }) - t.Require().NoError(err) - // Wait and check if the finalized head has advanced on L2A - err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLA.SyncStatus() - return status.FinalizedL1.Time > supervisorStatus.FinalizedTimestamp && - status.FinalizedL2.Number > supervisorStatus.Chains[l2aChainID].Finalized.Number, nil - }) - t.Require().NoError(err) - // Wait and check if the finalized head has advanced on L2B - err = wait.For(t.Ctx(), 2*time.Second, func() (bool, error) { - status := out.L2CLB.SyncStatus() - return status.FinalizedL1.Time > supervisorStatus.FinalizedTimestamp && - status.FinalizedL2.Number > supervisorStatus.Chains[l2bChainID].Finalized.Number, nil - }) - t.Require().NoError(err) -} - -func TestDerivationPipeline(gt *testing.T) { - t := devtest.SerialT(gt) - - out := presets.NewSimpleInterop(t) - l2BlockHead := out.Supervisor.L2HeadBlockID(out.L2ChainA.ChainID(), "local-safe") - - // Get current L1 at which L2 is at and wait for new L1 to be synced in supervisor. - current_l1_at_l2 := out.L2CLA.SyncStatus().CurrentL1 - out.Supervisor.AwaitMinL1(current_l1_at_l2.Number + 1) - new_l1 := out.Supervisor.FetchSyncStatus().MinSyncedL1 - - t.Require().NotEqual(current_l1_at_l2.Hash, new_l1.Hash) - t.Require().Greater(new_l1.Number, current_l1_at_l2.Number) - - // Wait for the L2 chain to sync to the new L1 block. - err := wait.For(t.Ctx(), 5*time.Second, func() (bool, error) { - new_l1_at_l2 := out.L2CLA.SyncStatus().CurrentL1 - return new_l1_at_l2.Number >= new_l1.Number, nil - }) - t.Require().NoError(err) - - new_l2BlockHead := out.Supervisor.L2HeadBlockID(out.L2ChainA.ChainID(), "local-safe") - t.Require().Greater(new_l2BlockHead.Number, l2BlockHead.Number) -} diff --git a/rust/kona/tests/supervisor/utils/builder.go b/rust/kona/tests/supervisor/utils/builder.go deleted file mode 100644 index 382d059c5db27..0000000000000 --- a/rust/kona/tests/supervisor/utils/builder.go +++ /dev/null @@ -1,353 +0,0 @@ -package utils - -import ( - "bytes" - "context" - "crypto/hmac" - "crypto/sha256" - "encoding/base64" - "encoding/binary" - "encoding/hex" - "encoding/json" - "fmt" - "io" - "math/big" - "math/rand" - "net/http" - "strings" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - opeth "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testutils" - "github.com/ethereum/go-ethereum/beacon/engine" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/rpc" -) - -type rpcRequest struct { - Jsonrpc string `json:"jsonrpc"` - Method string `json:"method"` - Params interface{} `json:"params"` - ID int `json:"id"` -} - -type rpcResponse struct { - Jsonrpc string `json:"jsonrpc"` - ID int `json:"id"` - Result json.RawMessage `json:"result"` - Error *rpcError `json:"error,omitempty"` -} - -type rpcError struct { - Code int `json:"code"` - Message string `json:"message"` -} - -type TestBlockBuilderConfig struct { - safeBlockDistance uint64 - finalizedBlockDistance uint64 - - GethRPC string - - EngineRPC string - JWTSecret string -} - -type TestBlockBuilder struct { - t devtest.CommonT - - withdrawalsIndex uint64 - - cfg TestBlockBuilderConfig - ethClient *ethclient.Client -} - -func NewTestBlockBuilder(t devtest.CommonT, cfg TestBlockBuilderConfig) *TestBlockBuilder { - ethClient, err := ethclient.Dial(cfg.GethRPC) - if err != nil { - t.Errorf("failed to connect to Geth RPC: %v", err) - return nil - } - - return &TestBlockBuilder{t, 1001, cfg, ethClient} -} - -func createJWT(secret []byte) (string, error) { - // try to decode hex string (support "0x..." or plain hex), fall back to raw bytes - secretStr := string(secret) - secretStr = strings.TrimPrefix(secretStr, "0x") - key, err := hex.DecodeString(secretStr) - if err != nil { - key = secret - } - - // typos:disable - header := base64.RawURLEncoding.EncodeToString([]byte(`{"alg":"HS256","typ":"JWT"}`)) - // typos:enable - payload := fmt.Sprintf(`{"iat":%d}`, time.Now().Unix()) - payloadEnc := base64.RawURLEncoding.EncodeToString([]byte(payload)) - toSign := header + "." + payloadEnc - h := hmac.New(sha256.New, key) - h.Write([]byte(toSign)) - sig := base64.RawURLEncoding.EncodeToString(h.Sum(nil)) - return toSign + "." + sig, nil -} - -func (s *TestBlockBuilder) rpcCallWithJWT(url, method string, params interface{}) (*rpcResponse, error) { - reqBody, _ := json.Marshal(rpcRequest{Jsonrpc: "2.0", Method: method, Params: params, ID: 1}) - req, _ := http.NewRequest("POST", url, bytes.NewBuffer(reqBody)) - req.Header.Set("Content-Type", "application/json") - - // Create JWT token - jwtToken, err := createJWT([]byte(s.cfg.JWTSecret)) - if err != nil { - return nil, fmt.Errorf("failed to create JWT: %w", err) - } - req.Header.Set("Authorization", "Bearer "+jwtToken) - - client := &http.Client{} - resp, err := client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - bodyBytes, _ := io.ReadAll(resp.Body) - - // Non-200 -> surface the body for debugging - if resp.StatusCode != http.StatusOK { - return nil, fmt.Errorf("non-200 response %d from %s: %s", resp.StatusCode, url, string(bodyBytes)) - } - - var rpcResp rpcResponse - if err := json.Unmarshal(bodyBytes, &rpcResp); err != nil { - // include raw body to help debugging the bad payload - return nil, fmt.Errorf("failed to parse RPC response: %w; body: %s", err, string(bodyBytes)) - } - if rpcResp.Error != nil { - return nil, fmt.Errorf("RPC error: %s", rpcResp.Error.Message) - } - return &rpcResp, nil -} - -func (s *TestBlockBuilder) rpcCall(url, method string, params interface{}) (*rpcResponse, error) { - return s.rpcCallWithJWT(url, method, params) -} - -func (s *TestBlockBuilder) rewindTo(ctx context.Context, blockHash common.Hash) (*types.Block, error) { - s.t.Logf("Rewinding to block %s", blockHash.Hex()) - - block, err := s.ethClient.BlockByHash(ctx, blockHash) - if err != nil { - s.t.Errorf("failed to fetch block by hash %s: %v", blockHash.Hex(), err) - return nil, fmt.Errorf("failed to fetch block by hash: %w", err) - } - - // Attempt rewind using debug_setHead - _, err = s.rpcCall(s.cfg.GethRPC, "debug_setHead", []interface{}{fmt.Sprintf("0x%x", block.NumberU64())}) - if err != nil { - s.t.Errorf("failed to rewind to block %s: %v", blockHash.Hex(), err) - return nil, fmt.Errorf("rewind failed: %w", err) - } - - // Confirm head matches requested parent - head, err := s.ethClient.BlockByNumber(ctx, big.NewInt(int64(rpc.LatestBlockNumber))) - if err != nil { - s.t.Errorf("failed to fetch latest block: %w", err) - return nil, fmt.Errorf("failed to fetch latest block: %w", err) - } - - if head.Hash() != blockHash { - s.t.Errorf("head mismatch after rewind: expected %s, got %s", blockHash.Hex(), head.Hash().Hex()) - return nil, fmt.Errorf("head mismatch after rewind") - } - - s.t.Logf("Successfully rewound to block %s", blockHash.Hex()) - return block, nil -} - -func (s *TestBlockBuilder) BuildBlock(ctx context.Context, parentHash *common.Hash) { - var head *types.Block - var err error - if parentHash != nil { - head, err = s.rewindTo(ctx, *parentHash) - if err != nil { - s.t.Errorf("failed to rewind to parent block: %v", err) - return - } - } else { - head, err = s.ethClient.BlockByNumber(ctx, big.NewInt(int64(rpc.LatestBlockNumber))) - if err != nil { - s.t.Errorf("failed to fetch latest block: %v", err) - return - } - } - - finalizedBlock, _ := s.ethClient.BlockByNumber(ctx, big.NewInt(rpc.FinalizedBlockNumber.Int64())) - if finalizedBlock == nil { - // set sb to genesis if safe block is not set - finalizedBlock, err = s.ethClient.BlockByNumber(ctx, big.NewInt(0)) - if err != nil { - s.t.Errorf("failed to fetch genesis block: %v", err) - return - } - } - - // progress finalised block - if head.NumberU64() > uint64(s.cfg.finalizedBlockDistance) { - finalizedBlock, err = s.ethClient.BlockByNumber(ctx, big.NewInt(int64(head.NumberU64()-s.cfg.finalizedBlockDistance))) - if err != nil { - s.t.Errorf("failed to fetch safe block: %v", err) - return - } - } - - safeBlock, _ := s.ethClient.BlockByNumber(ctx, big.NewInt(rpc.SafeBlockNumber.Int64())) - if safeBlock == nil { - safeBlock = finalizedBlock - } - - // progress safe block - if head.NumberU64() > uint64(s.cfg.safeBlockDistance) { - safeBlock, err = s.ethClient.BlockByNumber(ctx, big.NewInt(int64(head.NumberU64()-s.cfg.safeBlockDistance))) - if err != nil { - s.t.Errorf("failed to fetch safe block: %v", err) - return - } - } - - fcState := engine.ForkchoiceStateV1{ - HeadBlockHash: head.Hash(), - SafeBlockHash: safeBlock.Hash(), - FinalizedBlockHash: finalizedBlock.Hash(), - } - - newBlockTimestamp := head.Time() + 6 - nonce := time.Now().UnixNano() - var nonceBytes [8]byte - binary.LittleEndian.PutUint64(nonceBytes[:], uint64(nonce)) - randomHash := crypto.Keccak256Hash(nonceBytes[:]) - payloadAttrs := engine.PayloadAttributes{ - Timestamp: uint64(newBlockTimestamp), - Random: randomHash, - SuggestedFeeRecipient: head.Coinbase(), - Withdrawals: randomWithdrawals(s.withdrawalsIndex), - BeaconRoot: fakeBeaconBlockRoot(uint64(head.Time())), - } - - // Start payload build - fcResp, err := s.rpcCallWithJWT(s.cfg.EngineRPC, "engine_forkchoiceUpdatedV3", - []interface{}{fcState, payloadAttrs}) - if err != nil { - s.t.Errorf("forkchoiceUpdated failed: %v", err) - return - } - - var fcResult engine.ForkChoiceResponse - err = json.Unmarshal(fcResp.Result, &fcResult) - if err != nil { - s.t.Errorf("failed to unmarshal forkchoiceUpdated response: %v", err) - return - } - if fcResult.PayloadStatus.Status != "VALID" && fcResult.PayloadStatus.Status != "SYNCING" { - s.t.Errorf("forkchoiceUpdated returned invalid status: %s", fcResult.PayloadStatus.Status) - return - } - - if fcResult.PayloadID == nil { - s.t.Errorf("forkchoiceUpdated did not return a payload ID") - return - } - - time.Sleep(150 * time.Millisecond) - - // Get payload - plResp, err := s.rpcCallWithJWT(s.cfg.EngineRPC, "engine_getPayloadV3", []interface{}{fcResult.PayloadID}) - if err != nil { - s.t.Errorf("getPayload failed: %v", err) - return - } - - var envelope engine.ExecutionPayloadEnvelope - err = json.Unmarshal(plResp.Result, &envelope) - if err != nil { - s.t.Errorf("failed to unmarshal getPayload response: %v", err) - return - } - if envelope.ExecutionPayload == nil { - s.t.Errorf("getPayload returned empty execution payload") - return - } - - blobHashes := make([]common.Hash, 0) - if envelope.BlobsBundle != nil { - for _, commitment := range envelope.BlobsBundle.Commitments { - if len(commitment) != 48 { - break - } - blobHashes = append(blobHashes, opeth.KZGToVersionedHash(*(*[48]byte)(commitment))) - } - if len(blobHashes) != len(envelope.BlobsBundle.Commitments) { - s.t.Errorf("blob hashes length mismatch: expected %d, got %d", len(envelope.BlobsBundle.Commitments), len(blobHashes)) - return - } - } - - // Insert - newPayloadResp, err := s.rpcCallWithJWT(s.cfg.EngineRPC, "engine_newPayloadV3", []interface{}{envelope.ExecutionPayload, blobHashes, payloadAttrs.BeaconRoot}) - if err != nil { - s.t.Errorf("newPayload failed: %v", err) - return - } - - var npRes engine.PayloadStatusV1 - err = json.Unmarshal(newPayloadResp.Result, &npRes) - if err != nil { - s.t.Errorf("failed to unmarshal newPayload response: %v", err) - return - } - if npRes.Status != "VALID" && npRes.Status != "ACCEPTED" { - s.t.Errorf("newPayload returned invalid status: %s", npRes.Status) - return - } - - // Update forkchoice - updateFc := engine.ForkchoiceStateV1{ - HeadBlockHash: envelope.ExecutionPayload.BlockHash, - SafeBlockHash: safeBlock.Hash(), - FinalizedBlockHash: finalizedBlock.Hash(), - } - _, err = s.rpcCallWithJWT(s.cfg.EngineRPC, "engine_forkchoiceUpdatedV3", []interface{}{updateFc, nil}) - if err != nil { - s.t.Errorf("forkchoiceUpdated failed after newPayload: %v", err) - return - } - - s.withdrawalsIndex += uint64(len(envelope.ExecutionPayload.Withdrawals)) - - s.t.Logf("Successfully built block %s:%d at timestamp %d", envelope.ExecutionPayload.BlockHash.Hex(), envelope.ExecutionPayload.Number, newBlockTimestamp) -} - -func fakeBeaconBlockRoot(time uint64) *common.Hash { - var dat [8]byte - binary.LittleEndian.PutUint64(dat[:], time) - hash := crypto.Keccak256Hash(dat[:]) - return &hash -} - -func randomWithdrawals(startIndex uint64) []*types.Withdrawal { - r := rand.New(rand.NewSource(time.Now().UnixNano())) - withdrawals := make([]*types.Withdrawal, r.Intn(4)) - for i := 0; i < len(withdrawals); i++ { - withdrawals[i] = &types.Withdrawal{ - Index: startIndex + uint64(i), - Validator: r.Uint64() % 100_000_000, // 100 million fake validators - Address: testutils.RandomAddress(r), - Amount: uint64(r.Intn(50_000_000_000) + 1), - } - } - return withdrawals -} diff --git a/rust/kona/tests/supervisor/utils/pos.go b/rust/kona/tests/supervisor/utils/pos.go deleted file mode 100644 index cbcfa1177e55c..0000000000000 --- a/rust/kona/tests/supervisor/utils/pos.go +++ /dev/null @@ -1,82 +0,0 @@ -package utils - -import ( - "context" - "math/big" - "sync" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/rpc" -) - -type TestPOS struct { - t devtest.CommonT - - ethClient *ethclient.Client - blockBuilder *TestBlockBuilder - - // background management - ctx context.Context - cancel context.CancelFunc - wg sync.WaitGroup -} - -func NewTestPOS(t devtest.CommonT, rpcURL string, blockBuilder *TestBlockBuilder) *TestPOS { - ethClient, err := ethclient.Dial(rpcURL) - if err != nil { - t.Errorf("failed to connect to RPC: %v", err) - return nil - } - - return &TestPOS{t: t, ethClient: ethClient, blockBuilder: blockBuilder} -} - -// Starts a background process to build blocks -func (p *TestPOS) Start() error { - p.t.Log("Starting sequential block builder") - // already started - if p.ctx != nil { - return nil - } - - p.ctx, p.cancel = context.WithCancel(context.Background()) - p.wg.Add(1) - - go func() { - defer p.wg.Done() - ticker := time.NewTicker(time.Second * 5) - defer ticker.Stop() - - for { - select { - case <-p.ctx.Done(): - return - case <-ticker.C: - _, err := p.ethClient.BlockByNumber(p.ctx, big.NewInt(rpc.LatestBlockNumber.Int64())) - if err != nil { - p.t.Errorf("failed to fetch latest block: %v", err) - } - - // Build a new block - p.blockBuilder.BuildBlock(p.ctx, nil) - } - } - }() - - return nil -} - -// Stops the background process -func (p *TestPOS) Stop() { - // cancel the context to signal the goroutine to exit - if p.cancel != nil { - p.cancel() - p.cancel = nil - } - // wait for goroutine to finish - p.wg.Wait() - // clear the context to mark stopped - p.ctx = nil -} diff --git a/rust/kona/tests/supervisor/utils/reorg.go b/rust/kona/tests/supervisor/utils/reorg.go deleted file mode 100644 index b53da9bd10a32..0000000000000 --- a/rust/kona/tests/supervisor/utils/reorg.go +++ /dev/null @@ -1,116 +0,0 @@ -package utils - -import ( - "fmt" - "os" - - "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" -) - -type TestReorgManager struct { - t devtest.CommonT - env *env.DevnetEnv - blockBuilder *TestBlockBuilder - pos *TestPOS -} - -func NewTestReorgManager(t devtest.CommonT) *TestReorgManager { - url := os.Getenv(env.EnvURLVar) - if url == "" { - t.Errorf("environment variable %s is not set", env.EnvURLVar) - return nil - } - - env, err := env.LoadDevnetFromURL(url) - if err != nil { - t.Errorf("failed to load devnet environment from URL %s: %v", url, err) - return nil - } - - var engineURL, rpcURL string - for _, node := range env.Env.L1.Nodes { - el, ok := node.Services["el"] - if !ok { - continue - } - - engine, ok := el.Endpoints["engine-rpc"] - if !ok { - continue - } - - rpc, ok := el.Endpoints["rpc"] - if !ok { - continue - } - - engineURL = fmt.Sprintf("http://%s:%d", engine.Host, engine.Port) - rpcURL = fmt.Sprintf("http://%s:%d", rpc.Host, rpc.Port) - break - } - - if engineURL == "" || rpcURL == "" { - t.Errorf("could not find engine or RPC endpoints in the devnet environment") - return nil - } - - blockBuilder := NewTestBlockBuilder(t, TestBlockBuilderConfig{ - GethRPC: rpcURL, - EngineRPC: engineURL, - JWTSecret: env.Env.L1.JWT, - safeBlockDistance: 10, - finalizedBlockDistance: 20, - }) - - pos := NewTestPOS(t, rpcURL, blockBuilder) - return &TestReorgManager{t, env, blockBuilder, pos} -} - -func (m *TestReorgManager) StopL1CL() { - m.t.Log("Stopping L1 CL services") - - panic("not implemented. TODO(op-rs/kona#3174): implement this `https://github.com/op-rs/kona/issues/3174`") - - // kurtosisCtx, err := kurtosis_context.NewKurtosisContextFromLocalEngine() - // if err != nil { - // m.t.Errorf("failed to create kurtosis context: %v", err) - // return - // } - - // // Use a bounded context to avoid hanging tests if Kurtosis call stalls. - // ctx, cancel := context.WithTimeout(context.Background(), 30*time.Second) - // defer cancel() - // enclaveCtx, err := kurtosisCtx.GetEnclaveContext(ctx, m.env.Env.Name) - // if err != nil { - // m.t.Errorf("failed to get enclave context: %v", err) - // return - // } - - // for _, node := range m.env.Env.L1.Nodes { - // cl, ok := node.Services["cl"] - // if !ok { - // continue - // } - - // svcCtx, err := enclaveCtx.GetServiceContext(cl.Name) - // if err != nil { - // m.t.Errorf("failed to get service context for %s: %v", cl.Name, err) - // return - // } - - // _, _, err = svcCtx.ExecCommand([]string{"sh", "-c", "kill 1"}) - // if err != nil { - // m.t.Errorf("failed to stop service %s: %v", cl.Name, err) - // return - // } - // } -} - -func (m *TestReorgManager) GetBlockBuilder() *TestBlockBuilder { - return m.blockBuilder -} - -func (m *TestReorgManager) GetPOS() *TestPOS { - return m.pos -} From 319a1a31c9a8820d0f4fb025ee832b008628d606 Mon Sep 17 00:00:00 2001 From: Maurelian Date: Mon, 9 Mar 2026 14:58:15 -0400 Subject: [PATCH 080/201] feat(kona): add Karst hardfork support (#19372) * feat(kona): add Karst hardfork support Add Karst between Jovian and Interop in the fork ordering. Karst has an empty NUT bundle and requires empty activation blocks, matching the Go implementation in op-node (PR #19250). Co-Authored-By: Claude Opus 4.6 * fix(kona): chain Karst to Interop in op_fork_activation When karst_time is None, op_fork_activation should chain to Interop rather than returning Never. This maintains consistency between the is_karst_active() method and op_fork_activation(). Co-Authored-By: Claude Opus 4.6 * fix(kona): don't chain Karst to Interop in op_fork_activation Revert ebd614d. Interop is an independent fork that doesn't participate in the sequential activation chain. Karst is the terminal fork before Interop, matching how Jovian was terminal before Karst was added. Co-Authored-By: Claude Opus 4.6 * fix rust formatting * Chain karst activation to interop * fix(kona): address Karst review feedback * fix(kona): add missing karst_time to registry test configs and fix doc link --------- Co-authored-by: Claude Opus 4.6 --- rust/alloy-op-hardforks/src/lib.rs | 17 +++++- rust/kona/bin/node/src/flags/overrides.rs | 12 +++- .../service/src/actors/sequencer/actor.rs | 7 +++ .../derive/src/attributes/stateful.rs | 5 ++ .../protocol/genesis/src/chain/hardfork.rs | 14 ++++- .../crates/protocol/genesis/src/rollup.rs | 55 +++++++++++++++++-- .../protocol/genesis/src/superchain/chain.rs | 1 + .../protocol/genesis/src/superchain/chains.rs | 1 + .../protocol/genesis/src/superchain/config.rs | 2 + .../crates/protocol/hardforks/src/forks.rs | 8 ++- .../crates/protocol/hardforks/src/karst.rs | 34 ++++++++++++ .../kona/crates/protocol/hardforks/src/lib.rs | 3 + .../protocol/protocol/src/batch/single.rs | 6 +- .../registry/src/test_utils/base_mainnet.rs | 1 + .../registry/src/test_utils/base_sepolia.rs | 1 + .../registry/src/test_utils/op_mainnet.rs | 1 + .../registry/src/test_utils/op_sepolia.rs | 1 + .../utilities/cli/src/flags/overrides.rs | 12 +++- 18 files changed, 163 insertions(+), 18 deletions(-) create mode 100644 rust/kona/crates/protocol/hardforks/src/karst.rs diff --git a/rust/alloy-op-hardforks/src/lib.rs b/rust/alloy-op-hardforks/src/lib.rs index c4dffbcde530b..756ed70e4263a 100644 --- a/rust/alloy-op-hardforks/src/lib.rs +++ b/rust/alloy-op-hardforks/src/lib.rs @@ -44,10 +44,12 @@ hardfork!( /// Holocene: Holocene, /// Isthmus: - #[default] Isthmus, /// Jovian: + #[default] Jovian, + /// Karst: + Karst, /// TODO: add interop hardfork overview when available Interop, } @@ -242,6 +244,12 @@ pub trait OpHardforks: EthereumHardforks { self.op_fork_activation(OpHardfork::Jovian).active_at_timestamp(timestamp) } + /// Returns `true` if [`Karst`](OpHardfork::Karst) is active at given block + /// timestamp. + fn is_karst_active_at_timestamp(&self, timestamp: u64) -> bool { + self.op_fork_activation(OpHardfork::Karst).active_at_timestamp(timestamp) + } + /// Returns `true` if [`Interop`](OpHardfork::Interop) is active at given block /// timestamp. fn is_interop_active_at_timestamp(&self, timestamp: u64) -> bool { @@ -341,7 +349,8 @@ impl Index for OpChainHardforks { fn index(&self, hf: OpHardfork) -> &Self::Output { use OpHardfork::{ - Bedrock, Canyon, Ecotone, Fjord, Granite, Holocene, Interop, Isthmus, Jovian, Regolith, + Bedrock, Canyon, Ecotone, Fjord, Granite, Holocene, Interop, Isthmus, Jovian, Karst, + Regolith, }; match hf { @@ -354,6 +363,7 @@ impl Index for OpChainHardforks { Holocene => &self.forks[Holocene.idx()].1, Isthmus => &self.forks[Isthmus.idx()].1, Jovian => &self.forks[Jovian.idx()].1, + Karst => &self.forks[Karst.idx()].1, Interop => &self.forks[Interop.idx()].1, } } @@ -406,7 +416,7 @@ mod tests { fn check_op_hardfork_from_str() { let hardfork_str = [ "beDrOck", "rEgOlITH", "cAnYoN", "eCoToNe", "FJorD", "GRaNiTe", "hOlOcEnE", "isthMUS", - "jOvIaN", "inTerOP", + "jOvIaN", "kArSt", "inTerOP", ]; let expected_hardforks = [ OpHardfork::Bedrock, @@ -418,6 +428,7 @@ mod tests { OpHardfork::Holocene, OpHardfork::Isthmus, OpHardfork::Jovian, + OpHardfork::Karst, OpHardfork::Interop, ]; diff --git a/rust/kona/bin/node/src/flags/overrides.rs b/rust/kona/bin/node/src/flags/overrides.rs index 15305850b4e8e..48f03c7ecd094 100644 --- a/rust/kona/bin/node/src/flags/overrides.rs +++ b/rust/kona/bin/node/src/flags/overrides.rs @@ -30,6 +30,9 @@ pub struct OverrideArgs { /// Manually specify the timestamp for the Jovian fork, overriding the bundled setting. #[arg(long, env = "KONA_NODE_OVERRIDE_JOVIAN")] pub jovian_override: Option, + /// Manually specify the timestamp for the Karst fork, overriding the bundled setting. + #[arg(long, env = "KONA_NODE_OVERRIDE_KARST")] + pub karst_override: Option, /// Manually specify the timestamp for the pectra blob schedule, overriding the bundled /// setting. #[arg(long, env = "KONA_NODE_OVERRIDE_PECTRA_BLOB_SCHEDULE")] @@ -67,6 +70,7 @@ impl OverrideArgs { .unwrap_or(config.hardforks.pectra_blob_schedule_time), isthmus_time: self.isthmus_override.map(Some).unwrap_or(config.hardforks.isthmus_time), jovian_time: self.jovian_override.map(Some).unwrap_or(config.hardforks.jovian_time), + karst_time: self.karst_override.map(Some).unwrap_or(config.hardforks.karst_time), interop_time: self.interop_override.map(Some).unwrap_or(config.hardforks.interop_time), }; RollupConfig { hardforks, ..config } @@ -108,8 +112,10 @@ mod tests { "1740000000", "--jovian-override", "1745000001", - "--interop-override", + "--karst-override", "1750000000", + "--interop-override", + "1755000000", ]); let config = RollupConfig::default(); let updated_config = args.override_flags.apply(config); @@ -126,7 +132,8 @@ mod tests { pectra_blob_schedule_time: Some(1745000000), isthmus_time: Some(1740000000), jovian_time: Some(1745000001), - interop_time: Some(1750000000), + karst_time: Some(1750000000), + interop_time: Some(1755000000), } ); } @@ -159,6 +166,7 @@ mod tests { pectra_blob_schedule_override: None, isthmus_override: None, jovian_override: None, + karst_override: None, interop_override: None, } ); diff --git a/rust/kona/crates/node/service/src/actors/sequencer/actor.rs b/rust/kona/crates/node/service/src/actors/sequencer/actor.rs index 194766cf01d18..0722c031683db 100644 --- a/rust/kona/crates/node/service/src/actors/sequencer/actor.rs +++ b/rust/kona/crates/node/service/src/actors/sequencer/actor.rs @@ -345,6 +345,13 @@ where return false; } + // Do not include transactions in the first Karst block. + // See: `` + if self.rollup_config.is_first_karst_block(attributes.payload_attributes.timestamp) { + info!(target: "sequencer", "Sequencing karst upgrade block"); + return false; + } + // Do not include transactions in the first Interop block. if self.rollup_config.is_first_interop_block(attributes.payload_attributes.timestamp) { info!(target: "sequencer", "Sequencing interop upgrade block"); diff --git a/rust/kona/crates/protocol/derive/src/attributes/stateful.rs b/rust/kona/crates/protocol/derive/src/attributes/stateful.rs index 8a9f9b744379e..82717f566ffcc 100644 --- a/rust/kona/crates/protocol/derive/src/attributes/stateful.rs +++ b/rust/kona/crates/protocol/derive/src/attributes/stateful.rs @@ -161,6 +161,11 @@ where { upgrade_transactions.append(&mut Hardforks::JOVIAN.txs().collect()); } + if self.rollup_cfg.is_karst_active(next_l2_time) && + !self.rollup_cfg.is_karst_active(l2_parent.block_info.timestamp) + { + upgrade_transactions.append(&mut Hardforks::KARST.txs().collect()); + } if self.rollup_cfg.is_interop_active(next_l2_time) && !self.rollup_cfg.is_interop_active(l2_parent.block_info.timestamp) { diff --git a/rust/kona/crates/protocol/genesis/src/chain/hardfork.rs b/rust/kona/crates/protocol/genesis/src/chain/hardfork.rs index 7bd2e2bf00189..58a712efd0500 100644 --- a/rust/kona/crates/protocol/genesis/src/chain/hardfork.rs +++ b/rust/kona/crates/protocol/genesis/src/chain/hardfork.rs @@ -67,6 +67,11 @@ pub struct HardForkConfig { /// otherwise. #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] pub jovian_time: Option, + /// `karst_time` sets the activation time for the Karst network upgrade. + /// Active if `karst_time` != None && L2 block timestamp >= `Some(karst_time)`, inactive + /// otherwise. + #[cfg_attr(feature = "serde", serde(skip_serializing_if = "Option::is_none"))] + pub karst_time: Option, /// `interop_time` sets the activation time for the Interop network upgrade. /// Active if `interop_time` != None && L2 block timestamp >= `Some(interop_time)`, inactive /// otherwise. @@ -103,6 +108,7 @@ impl HardForkConfig { ("Pectra Blob Schedule", self.pectra_blob_schedule_time), ("Isthmus", self.isthmus_time), ("Jovian", self.jovian_time), + ("Karst", self.karst_time), ("Interop", self.interop_time), ] .into_iter() @@ -138,6 +144,7 @@ mod tests { pectra_blob_schedule_time: None, isthmus_time: None, jovian_time: None, + karst_time: None, interop_time: None, }; @@ -185,6 +192,7 @@ mod tests { pectra_blob_schedule_time: None, isthmus_time: None, jovian_time: None, + karst_time: None, interop_time: None, }; @@ -219,7 +227,8 @@ mod tests { pectra_blob_schedule_time: Some(8), isthmus_time: Some(9), jovian_time: Some(10), - interop_time: Some(11), + karst_time: Some(11), + interop_time: Some(12), }; let mut iter = hardforks.iter(); @@ -233,7 +242,8 @@ mod tests { assert_eq!(iter.next(), Some(("Pectra Blob Schedule", Some(8)))); assert_eq!(iter.next(), Some(("Isthmus", Some(9)))); assert_eq!(iter.next(), Some(("Jovian", Some(10)))); - assert_eq!(iter.next(), Some(("Interop", Some(11)))); + assert_eq!(iter.next(), Some(("Karst", Some(11)))); + assert_eq!(iter.next(), Some(("Interop", Some(12)))); assert_eq!(iter.next(), None); } } diff --git a/rust/kona/crates/protocol/genesis/src/rollup.rs b/rust/kona/crates/protocol/genesis/src/rollup.rs index 8691197f5cd87..ff8ff646f78fa 100644 --- a/rust/kona/crates/protocol/genesis/src/rollup.rs +++ b/rust/kona/crates/protocol/genesis/src/rollup.rs @@ -302,7 +302,7 @@ impl RollupConfig { /// Returns true if Jovian is active at the given timestamp. pub fn is_jovian_active(&self, timestamp: u64) -> bool { self.hardforks.jovian_time.is_some_and(|t| timestamp >= t) || - self.is_interop_active(timestamp) + self.is_karst_active(timestamp) } /// Returns true if the timestamp marks the first Jovian block. @@ -311,6 +311,18 @@ impl RollupConfig { !self.is_jovian_active(timestamp.saturating_sub(self.block_time)) } + /// Returns true if Karst is active at the given timestamp. + pub fn is_karst_active(&self, timestamp: u64) -> bool { + self.hardforks.karst_time.is_some_and(|t| timestamp >= t) || + self.is_interop_active(timestamp) + } + + /// Returns true if the timestamp marks the first Karst block. + pub fn is_first_karst_block(&self, timestamp: u64) -> bool { + self.is_karst_active(timestamp) && + !self.is_karst_active(timestamp.saturating_sub(self.block_time)) + } + /// Returns true if Interop is active at the given timestamp. pub fn is_interop_active(&self, timestamp: u64) -> bool { self.hardforks.interop_time.is_some_and(|t| timestamp >= t) @@ -460,7 +472,12 @@ impl OpHardforks for RollupConfig { .hardforks .jovian_time .map(ForkCondition::Timestamp) - .unwrap_or(ForkCondition::Never), + .unwrap_or_else(|| self.op_fork_activation(OpHardfork::Karst)), + OpHardfork::Karst => self + .hardforks + .karst_time + .map(ForkCondition::Timestamp) + .unwrap_or_else(|| self.op_fork_activation(OpHardfork::Interop)), OpHardfork::Interop => self .hardforks .interop_time @@ -648,6 +665,25 @@ mod tests { assert!(!config.is_jovian_active(9)); } + #[test] + fn test_karst_active() { + let mut config = RollupConfig::default(); + assert!(!config.is_karst_active(0)); + config.hardforks.karst_time = Some(10); + assert!(config.is_regolith_active(10)); + assert!(config.is_canyon_active(10)); + assert!(config.is_delta_active(10)); + assert!(config.is_ecotone_active(10)); + assert!(config.is_fjord_active(10)); + assert!(config.is_granite_active(10)); + assert!(config.is_holocene_active(10)); + assert!(!config.is_pectra_blob_schedule_active(10)); + assert!(config.is_isthmus_active(10)); + assert!(config.is_jovian_active(10)); + assert!(config.is_karst_active(10)); + assert!(!config.is_karst_active(9)); + } + #[test] fn test_interop_active() { let mut config = RollupConfig::default(); @@ -662,6 +698,7 @@ mod tests { assert!(config.is_holocene_active(10)); assert!(!config.is_pectra_blob_schedule_active(10)); assert!(config.is_isthmus_active(10)); + assert!(config.is_karst_active(10)); assert!(config.is_interop_active(10)); assert!(!config.is_interop_active(9)); } @@ -680,7 +717,8 @@ mod tests { pectra_blob_schedule_time: Some(80), isthmus_time: Some(90), jovian_time: Some(100), - interop_time: Some(110), + karst_time: Some(110), + interop_time: Some(120), }, block_time: 2, ..Default::default() @@ -736,10 +774,15 @@ mod tests { assert!(cfg.is_first_jovian_block(100)); assert!(!cfg.is_first_jovian_block(102)); + // Karst + assert!(!cfg.is_first_karst_block(108)); + assert!(cfg.is_first_karst_block(110)); + assert!(!cfg.is_first_karst_block(112)); + // Interop - assert!(!cfg.is_first_interop_block(108)); - assert!(cfg.is_first_interop_block(110)); - assert!(!cfg.is_first_interop_block(112)); + assert!(!cfg.is_first_interop_block(118)); + assert!(cfg.is_first_interop_block(120)); + assert!(!cfg.is_first_interop_block(122)); } #[test] diff --git a/rust/kona/crates/protocol/genesis/src/superchain/chain.rs b/rust/kona/crates/protocol/genesis/src/superchain/chain.rs index 861af4dcd069e..03fc914847245 100644 --- a/rust/kona/crates/protocol/genesis/src/superchain/chain.rs +++ b/rust/kona/crates/protocol/genesis/src/superchain/chain.rs @@ -98,6 +98,7 @@ mod tests { pectra_blob_schedule_time: None, isthmus_time: None, jovian_time: None, + karst_time: None, interop_time: None, }, protocol_versions_addr: None, diff --git a/rust/kona/crates/protocol/genesis/src/superchain/chains.rs b/rust/kona/crates/protocol/genesis/src/superchain/chains.rs index 1166748ca4a44..dea0ac218c4e6 100644 --- a/rust/kona/crates/protocol/genesis/src/superchain/chains.rs +++ b/rust/kona/crates/protocol/genesis/src/superchain/chains.rs @@ -105,6 +105,7 @@ mod tests { pectra_blob_schedule_time: None, isthmus_time: None, jovian_time: None, + karst_time: None, interop_time: None, }, protocol_versions_addr: None, diff --git a/rust/kona/crates/protocol/genesis/src/superchain/config.rs b/rust/kona/crates/protocol/genesis/src/superchain/config.rs index 518e79d4401ed..0377670a18fbe 100644 --- a/rust/kona/crates/protocol/genesis/src/superchain/config.rs +++ b/rust/kona/crates/protocol/genesis/src/superchain/config.rs @@ -67,6 +67,7 @@ mod tests { pectra_blob_schedule_time: None, isthmus_time: None, jovian_time: None, + karst_time: None, interop_time: None, }, protocol_versions_addr: None, @@ -181,6 +182,7 @@ mod tests { pectra_blob_schedule_time: None, isthmus_time: None, jovian_time: None, + karst_time: None, interop_time: None, }, protocol_versions_addr: None, diff --git a/rust/kona/crates/protocol/hardforks/src/forks.rs b/rust/kona/crates/protocol/hardforks/src/forks.rs index c8c368ac52528..137580aeab26e 100644 --- a/rust/kona/crates/protocol/hardforks/src/forks.rs +++ b/rust/kona/crates/protocol/hardforks/src/forks.rs @@ -1,6 +1,6 @@ //! Contains all hardforks represented in the [`crate::Hardfork`] type. -use crate::{Ecotone, Fjord, Interop, Isthmus, Jovian}; +use crate::{Ecotone, Fjord, Interop, Isthmus, Jovian, Karst}; /// Optimism Hardforks /// @@ -54,6 +54,9 @@ impl Hardforks { /// The Jovian hardfork upgrade transactions. pub const JOVIAN: Jovian = Jovian; + /// The Karst hardfork upgrade transactions. + pub const KARST: Karst = Karst; + /// The Interop hardfork upgrade transactions. pub const INTEROP: Interop = Interop; } @@ -78,6 +81,9 @@ mod tests { let jovian_upgrade_tx = Hardforks::JOVIAN.txs(); assert_eq!(jovian_upgrade_tx.collect::>().len(), 5); + let karst_upgrade_tx = Hardforks::KARST.txs(); + assert_eq!(karst_upgrade_tx.collect::>().len(), 0); + let interop_upgrade_tx = Hardforks::INTEROP.txs(); assert_eq!(interop_upgrade_tx.collect::>().len(), 4); } diff --git a/rust/kona/crates/protocol/hardforks/src/karst.rs b/rust/kona/crates/protocol/hardforks/src/karst.rs new file mode 100644 index 0000000000000..7b4c1cbc70ed3 --- /dev/null +++ b/rust/kona/crates/protocol/hardforks/src/karst.rs @@ -0,0 +1,34 @@ +//! Module containing a `TxDeposit` builder for the Karst network upgrade transactions. +//! +//! Karst network upgrade transactions are defined in the [OP Stack Specs][specs]. +//! +//! [specs]: https://github.com/ethereum-optimism/specs/tree/main/specs/protocol/karst + +use alloy_primitives::Bytes; + +use crate::Hardfork; + +/// The Karst network upgrade transactions. +#[derive(Debug, Default, Clone, Copy)] +pub struct Karst; + +impl Hardfork for Karst { + /// Constructs the network upgrade transactions. + /// Karst has no upgrade transactions (empty NUT bundle). + fn txs(&self) -> impl Iterator + '_ { + core::iter::empty() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use alloc::vec::Vec; + + #[test] + fn test_karst_no_upgrade_txs() { + let karst = Karst; + let txs: Vec<_> = karst.txs().collect(); + assert!(txs.is_empty()); + } +} diff --git a/rust/kona/crates/protocol/hardforks/src/lib.rs b/rust/kona/crates/protocol/hardforks/src/lib.rs index 044e92c83a405..4e1bc4c7b7e20 100644 --- a/rust/kona/crates/protocol/hardforks/src/lib.rs +++ b/rust/kona/crates/protocol/hardforks/src/lib.rs @@ -30,6 +30,9 @@ pub use interop::Interop; mod jovian; pub use jovian::Jovian; +mod karst; +pub use karst::Karst; + mod utils; pub(crate) use utils::upgrade_to_calldata; diff --git a/rust/kona/crates/protocol/protocol/src/batch/single.rs b/rust/kona/crates/protocol/protocol/src/batch/single.rs index aff2d263f6876..ffc7fdf24a559 100644 --- a/rust/kona/crates/protocol/protocol/src/batch/single.rs +++ b/rust/kona/crates/protocol/protocol/src/batch/single.rs @@ -155,12 +155,14 @@ impl SingleBatch { // If this is the first block in the jovian or interop hardfork, and the batch contains any // transactions, it must be dropped. - if (cfg.is_first_jovian_block(self.timestamp) || cfg.is_first_interop_block(self.timestamp)) && + if (cfg.is_first_jovian_block(self.timestamp) || + cfg.is_first_karst_block(self.timestamp) || + cfg.is_first_interop_block(self.timestamp)) && !self.transactions.is_empty() { warn!( target: "single_batch", - "Sequencer included user transactions in jovian or interop transition block. Dropping batch." + "Sequencer included user transactions in jovian, karst, or interop transition block. Dropping batch." ); return BatchValidity::Drop(BatchDropReason::NonEmptyTransitionBlock); } diff --git a/rust/kona/crates/protocol/registry/src/test_utils/base_mainnet.rs b/rust/kona/crates/protocol/registry/src/test_utils/base_mainnet.rs index 0082ad633ba63..08e04ef42884a 100644 --- a/rust/kona/crates/protocol/registry/src/test_utils/base_mainnet.rs +++ b/rust/kona/crates/protocol/registry/src/test_utils/base_mainnet.rs @@ -58,6 +58,7 @@ pub const BASE_MAINNET_CONFIG: RollupConfig = RollupConfig { pectra_blob_schedule_time: None, isthmus_time: Some(BASE_MAINNET_ISTHMUS_TIMESTAMP), jovian_time: Some(BASE_MAINNET_JOVIAN_TIMESTAMP), + karst_time: None, interop_time: None, }, batch_inbox_address: address!("ff00000000000000000000000000000000008453"), diff --git a/rust/kona/crates/protocol/registry/src/test_utils/base_sepolia.rs b/rust/kona/crates/protocol/registry/src/test_utils/base_sepolia.rs index 5f5b667d475e9..81e19b9358a28 100644 --- a/rust/kona/crates/protocol/registry/src/test_utils/base_sepolia.rs +++ b/rust/kona/crates/protocol/registry/src/test_utils/base_sepolia.rs @@ -60,6 +60,7 @@ pub const BASE_SEPOLIA_CONFIG: RollupConfig = RollupConfig { pectra_blob_schedule_time: Some(1742486400), isthmus_time: Some(BASE_SEPOLIA_ISTHMUS_TIMESTAMP), jovian_time: Some(BASE_SEPOLIA_JOVIAN_TIMESTAMP), + karst_time: None, interop_time: None, }, batch_inbox_address: address!("ff00000000000000000000000000000000084532"), diff --git a/rust/kona/crates/protocol/registry/src/test_utils/op_mainnet.rs b/rust/kona/crates/protocol/registry/src/test_utils/op_mainnet.rs index b6e3eea7417e8..809c005e203da 100644 --- a/rust/kona/crates/protocol/registry/src/test_utils/op_mainnet.rs +++ b/rust/kona/crates/protocol/registry/src/test_utils/op_mainnet.rs @@ -60,6 +60,7 @@ pub const OP_MAINNET_CONFIG: RollupConfig = RollupConfig { pectra_blob_schedule_time: None, isthmus_time: Some(OP_MAINNET_ISTHMUS_TIMESTAMP), jovian_time: Some(OP_MAINNET_JOVIAN_TIMESTAMP), + karst_time: None, interop_time: None, }, batch_inbox_address: address!("ff00000000000000000000000000000000000010"), diff --git a/rust/kona/crates/protocol/registry/src/test_utils/op_sepolia.rs b/rust/kona/crates/protocol/registry/src/test_utils/op_sepolia.rs index 9c42a29c8c830..efbc8d4a9ca25 100644 --- a/rust/kona/crates/protocol/registry/src/test_utils/op_sepolia.rs +++ b/rust/kona/crates/protocol/registry/src/test_utils/op_sepolia.rs @@ -60,6 +60,7 @@ pub const OP_SEPOLIA_CONFIG: RollupConfig = RollupConfig { pectra_blob_schedule_time: Some(1742486400), isthmus_time: Some(OP_SEPOLIA_ISTHMUS_TIMESTAMP), jovian_time: Some(OP_SEPOLIA_JOVIAN_TIMESTAMP), + karst_time: None, interop_time: None, }, batch_inbox_address: address!("ff00000000000000000000000000000011155420"), diff --git a/rust/kona/crates/utilities/cli/src/flags/overrides.rs b/rust/kona/crates/utilities/cli/src/flags/overrides.rs index dd804fa691bbf..7fc24e3604bd6 100644 --- a/rust/kona/crates/utilities/cli/src/flags/overrides.rs +++ b/rust/kona/crates/utilities/cli/src/flags/overrides.rs @@ -30,6 +30,9 @@ pub struct OverrideArgs { /// Manually specify the timestamp for the Jovian fork, overriding the bundled setting. #[arg(long, env = "KONA_OVERRIDE_JOVIAN")] pub jovian_override: Option, + /// Manually specify the timestamp for the Karst fork, overriding the bundled setting. + #[arg(long, env = "KONA_OVERRIDE_KARST")] + pub karst_override: Option, /// Manually specify the timestamp for the pectra blob schedule, overriding the bundled /// setting. #[arg(long, env = "KONA_OVERRIDE_PECTRA_BLOB_SCHEDULE")] @@ -67,6 +70,7 @@ impl OverrideArgs { .unwrap_or(config.hardforks.pectra_blob_schedule_time), isthmus_time: self.isthmus_override.map(Some).unwrap_or(config.hardforks.isthmus_time), jovian_time: self.jovian_override.map(Some).unwrap_or(config.hardforks.jovian_time), + karst_time: self.karst_override.map(Some).unwrap_or(config.hardforks.karst_time), interop_time: self.interop_override.map(Some).unwrap_or(config.hardforks.interop_time), }; RollupConfig { hardforks, ..config } @@ -108,8 +112,10 @@ mod tests { "1740000000", "--jovian-override", "1745000001", - "--interop-override", + "--karst-override", "1750000000", + "--interop-override", + "1755000000", ]); let config = RollupConfig::default(); let updated_config = args.override_flags.apply(config); @@ -126,7 +132,8 @@ mod tests { pectra_blob_schedule_time: Some(1745000000), isthmus_time: Some(1740000000), jovian_time: Some(1745000001), - interop_time: Some(1750000000), + karst_time: Some(1750000000), + interop_time: Some(1755000000), } ); } @@ -159,6 +166,7 @@ mod tests { pectra_blob_schedule_override: None, isthmus_override: None, jovian_override: None, + karst_override: None, interop_override: None, } ); From 62a55e1a74a05a2849a342a5a24c1de8fb5b5513 Mon Sep 17 00:00:00 2001 From: niha <205694301+0xniha@users.noreply.github.com> Date: Mon, 9 Mar 2026 16:10:33 -0300 Subject: [PATCH 081/201] feat: add NUT Bundle generation (#19248) * feat: add nut lib * refactor: remove newTx function from nut lib * feat: add GenerateNUTBundle script * fix: pre-pr fixes * test: add generate bundle utils tests * refactor: replace custom computeCreate2Address by forge's * docs: fix and add natspec * fix: remove script inheritance from utils contract * refactor: remove unnecesary fields from nut tx struct & replace sourcehash by intent * test: add create tx empry args equivalence and different salts assumption * refactor: remove fork, salt and cgt from bundle script input & consolidate utils into UpgradeUtils * refactor: remove fork, salt and cgt from bundle script input * refactor: comments and constants * refactor: consolidate nut bundle utilities into UpgradeUtils * fix: proxy admin get code * fix: remove unused import Fork * fix: rm L2ContractsManagerTypes * fix: update impl struct and make lib functions internal * feat: remove TODO for L2CM merge and add TODO for OptimismMintableERC721Factory initializable upgrade * test: complete nuts structure testing * feat: add metadata struct to bundle * feat: make OptimismMintableERC721Factory initializable * fix: replace jovian for karst * fix: add Initialized event to IOptimismMintableERC721Factory * fix: remove check immutable in apply tests * fix: outdated safety invariants * fix: add OptimismMintableERC721FactoryLegacyMapping * refactor: rm upgrade name from intent, add upgradeBundlePath helper and add TODO for karst nuts removal * feat: add invariant #2 to L2ProxyAdmin * feat: add nut bundle check, just script and ci check * feat: add implementation count check * docs: add array string clarification comment * feat: add regex check for metadata version and 'to' field comment * fix: remove salt standard todo comment * fix: remove old upgrade OptimismMintableERC721Factory call and fix initializable slot * feat: add IOptimismMintableERC721FactoryLegacyMapping * fix: run pre-pr * fix: run pre-pr * fix: remove owner argument for L2PA constructor and set owner to zero address * fix: remove gitkeep * fix: storage gap in OptimismMintableERC721Factory * refactor: make bundle file name generic * fix: remove duplicated assert * fix: l2genesis proxy admin impl owner test * fix: l2pa createAdmin with manual storage setting test --- .circleci/continue/main.yml | 2 + .../deployer/integration_test/apply_test.go | 13 - packages/contracts-bedrock/.gitignore | 3 + .../interfaces/L2/IL2ProxyAdmin.sol | 2 +- .../L2/IOptimismMintableERC721Factory.sol | 10 +- ...mismMintableERC721FactoryLegacyMapping.sol | 6 + .../staking/IPolicyEngineStaking.sol | 6 +- packages/contracts-bedrock/justfile | 14 + .../contracts-bedrock/scripts/L2Genesis.s.sol | 41 +- .../scripts/checks/interfaces/main.go | 3 + .../scripts/checks/nut-bundle-check/main.go | 247 ++++++++ .../scripts/libraries/UpgradeUtils.sol | 219 +++++++ .../scripts/upgrade/GenerateNUTBundle.s.sol | 577 ++++++++++++++++++ .../snapshots/abi/L2ProxyAdmin.json | 8 +- .../abi/OptimismMintableERC721Factory.json | 44 +- ...ismMintableERC721FactoryLegacyMapping.json | 21 + .../snapshots/semver-lock.json | 12 +- .../OptimismMintableERC721Factory.json | 35 ++ ...ismMintableERC721FactoryLegacyMapping.json | 9 + .../upgrades/current-upgrade-bundle.json | 238 ++++++++ .../src/L2/L2ContractsManager.sol | 23 +- .../contracts-bedrock/src/L2/L2ProxyAdmin.sol | 5 +- .../src/L2/OptimismMintableERC721Factory.sol | 64 +- .../src/libraries/L2ContractsManagerTypes.sol | 7 + .../src/libraries/NetworkUpgradeTxns.sol | 103 ++++ .../test/L2/L2ContractsManager.t.sol | 25 +- .../test/L2/L2ProxyAdmin.t.sol | 58 +- .../L2/OptimismMintableERC721Factory.t.sol | 10 +- .../test/libraries/NetworkUpgradeTxns.t.sol | 296 +++++++++ .../test/scripts/GenerateNUTBundle.t.sol | 129 ++++ .../test/scripts/L2Genesis.t.sol | 5 +- 31 files changed, 2115 insertions(+), 120 deletions(-) create mode 100644 packages/contracts-bedrock/interfaces/L2/IOptimismMintableERC721FactoryLegacyMapping.sol create mode 100644 packages/contracts-bedrock/scripts/checks/nut-bundle-check/main.go create mode 100644 packages/contracts-bedrock/scripts/libraries/UpgradeUtils.sol create mode 100644 packages/contracts-bedrock/scripts/upgrade/GenerateNUTBundle.s.sol create mode 100644 packages/contracts-bedrock/snapshots/abi/OptimismMintableERC721FactoryLegacyMapping.json create mode 100644 packages/contracts-bedrock/snapshots/storageLayout/OptimismMintableERC721FactoryLegacyMapping.json create mode 100644 packages/contracts-bedrock/snapshots/upgrades/current-upgrade-bundle.json create mode 100644 packages/contracts-bedrock/src/libraries/NetworkUpgradeTxns.sol create mode 100644 packages/contracts-bedrock/test/libraries/NetworkUpgradeTxns.t.sol create mode 100644 packages/contracts-bedrock/test/scripts/GenerateNUTBundle.t.sol diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 155a5809b41fd..86587ce1c371f 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -1586,6 +1586,8 @@ jobs: command: lint - run-contracts-check: command: snapshots-check-no-build + - run-contracts-check: + command: nut-bundle-check-no-build - run-contracts-check: command: interfaces-check-no-build - run-contracts-check: diff --git a/op-deployer/pkg/deployer/integration_test/apply_test.go b/op-deployer/pkg/deployer/integration_test/apply_test.go index cae2024ae685f..9a2803bf448a3 100644 --- a/op-deployer/pkg/deployer/integration_test/apply_test.go +++ b/op-deployer/pkg/deployer/integration_test/apply_test.go @@ -1214,7 +1214,6 @@ func validateOPChainDeployment(t *testing.T, cg codeGetter, st *state.State, int alloc := chainState.Allocs.Data.Accounts chainIntent := intent.Chains[i] - checkImmutableBehindProxy(t, alloc, predeploys.OptimismMintableERC721FactoryAddr, common.BigToHash(new(big.Int).SetUint64(intent.L1ChainID))) // ownership slots var addrAsSlot common.Hash @@ -1243,22 +1242,10 @@ func validateOPChainDeployment(t *testing.T, cg codeGetter, st *state.State, int } } -func getEIP1967ImplementationAddress(t *testing.T, allocations types.GenesisAlloc, proxyAddress common.Address) common.Address { - storage := allocations[proxyAddress].Storage - storageValue := storage[genesis.ImplementationSlot] - require.NotEmpty(t, storageValue, "Implementation address for %s should be set", proxyAddress) - return common.HexToAddress(storageValue.Hex()) -} - type bytesMarshaler interface { Bytes() []byte } -func checkImmutableBehindProxy(t *testing.T, allocations types.GenesisAlloc, proxyContract common.Address, thing bytesMarshaler) { - implementationAddress := getEIP1967ImplementationAddress(t, allocations, proxyContract) - checkImmutable(t, allocations, implementationAddress, thing) -} - func checkImmutable(t *testing.T, allocations types.GenesisAlloc, implementationAddress common.Address, thing bytesMarshaler) { account, ok := allocations[implementationAddress] require.True(t, ok, "%s not found in allocations", implementationAddress) diff --git a/packages/contracts-bedrock/.gitignore b/packages/contracts-bedrock/.gitignore index 1e2b6f844e203..9eb65c677e65f 100644 --- a/packages/contracts-bedrock/.gitignore +++ b/packages/contracts-bedrock/.gitignore @@ -40,3 +40,6 @@ deployments/1-deploy.json # Getting Started guide deploy config deploy-config/getting-started.json + +# Nut test deployment +deployments/nut-*.json \ No newline at end of file diff --git a/packages/contracts-bedrock/interfaces/L2/IL2ProxyAdmin.sol b/packages/contracts-bedrock/interfaces/L2/IL2ProxyAdmin.sol index 8f5f3f0dd9868..2f7f11060f66f 100644 --- a/packages/contracts-bedrock/interfaces/L2/IL2ProxyAdmin.sol +++ b/packages/contracts-bedrock/interfaces/L2/IL2ProxyAdmin.sol @@ -17,7 +17,7 @@ interface IL2ProxyAdmin is IProxyAdmin, ISemver { /// @notice Thrown when the upgrade fails. error L2ProxyAdmin__UpgradeFailed(bytes data); - function __constructor__(address _owner) external; + function __constructor__() external; /// @notice Upgrades the predeploys via delegatecall to the L2ContractsManager contract. /// @param _l2ContractsManager Address of the L2ContractsManager contract. function upgradePredeploys(address _l2ContractsManager) external; diff --git a/packages/contracts-bedrock/interfaces/L2/IOptimismMintableERC721Factory.sol b/packages/contracts-bedrock/interfaces/L2/IOptimismMintableERC721Factory.sol index be3e8ad054250..eef5269ef3acb 100644 --- a/packages/contracts-bedrock/interfaces/L2/IOptimismMintableERC721Factory.sol +++ b/packages/contracts-bedrock/interfaces/L2/IOptimismMintableERC721Factory.sol @@ -1,9 +1,14 @@ // SPDX-License-Identifier: MIT pragma solidity ^0.8.0; -interface IOptimismMintableERC721Factory { +import { IOptimismMintableERC721FactoryLegacyMapping } from + "interfaces/L2/IOptimismMintableERC721FactoryLegacyMapping.sol"; + +interface IOptimismMintableERC721Factory is IOptimismMintableERC721FactoryLegacyMapping { event OptimismMintableERC721Created(address indexed localToken, address indexed remoteToken, address deployer); + event Initialized(uint8 version); + function BRIDGE() external view returns (address); function REMOTE_CHAIN_ID() external view returns (uint256); function bridge() external view returns (address); @@ -14,9 +19,8 @@ interface IOptimismMintableERC721Factory { ) external returns (address); - function isOptimismMintableERC721(address) external view returns (bool); function remoteChainID() external view returns (uint256); function version() external view returns (string memory); - function __constructor__(address _bridge, uint256 _remoteChainId) external; + function initialize(address _bridge, uint256 _remoteChainID) external; } diff --git a/packages/contracts-bedrock/interfaces/L2/IOptimismMintableERC721FactoryLegacyMapping.sol b/packages/contracts-bedrock/interfaces/L2/IOptimismMintableERC721FactoryLegacyMapping.sol new file mode 100644 index 0000000000000..66395c2eb926f --- /dev/null +++ b/packages/contracts-bedrock/interfaces/L2/IOptimismMintableERC721FactoryLegacyMapping.sol @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +interface IOptimismMintableERC721FactoryLegacyMapping { + function isOptimismMintableERC721(address) external view returns (bool); +} diff --git a/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol b/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol index 50a42d481207a..288af54c3b9c7 100644 --- a/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol +++ b/packages/contracts-bedrock/interfaces/periphery/staking/IPolicyEngineStaking.sol @@ -81,13 +81,13 @@ interface IPolicyEngineStaking is ISemver { function PE_DATA_SLOT() external view returns (bytes32); /// @notice Returns Policy Engine data for an account. - function peData(address _account) external view returns (uint128 effectiveStake_, uint128 lastUpdate_); + function peData(address account) external view returns (uint128 effectiveStake, uint128 lastUpdate); /// @notice Returns allowlist entry for a beneficiary-staker pair. - function allowlist(address _beneficiary, address _staker) external view returns (bool allowed_); + function allowlist(address beneficiary, address staker) external view returns (bool allowed); /// @notice Returns staking data for an account. - function stakingData(address _account) external view returns (uint128 stakedAmount_, address beneficiary_); + function stakingData(address account) external view returns (uint128 stakedAmount, address beneficiary); /// @notice Returns the ERC20 token used for staking. function stakingToken() external view returns (IERC20); diff --git a/packages/contracts-bedrock/justfile b/packages/contracts-bedrock/justfile index 608871b8c31c0..5b1dbc38de59f 100644 --- a/packages/contracts-bedrock/justfile +++ b/packages/contracts-bedrock/justfile @@ -213,6 +213,10 @@ coverage-lcov-all *ARGS: genesis: forge script scripts/L2Genesis.s.sol:L2Genesis --sig 'runWithStateDump()' +# Generates the Network Upgrade Transaction (NUT) bundle. +generate-nut-bundle: + @forge script scripts/upgrade/GenerateNUTBundle.s.sol:GenerateNUTBundle --sig 'run()' > /dev/null + # Deploys the contracts. deploy: ./scripts/deploy/deploy.sh @@ -360,6 +364,14 @@ validate-spacers: build validate-spacers-no-build check-kontrol-summaries-unchanged: ./scripts/checks/check-kontrol-summaries-unchanged.sh +# Checks that the Network Upgrade Transaction (NUT) bundle is up to date. +# Does not build contracts. +nut-bundle-check-no-build: + go run ./scripts/checks/nut-bundle-check + +# Checks that the Network Upgrade Transaction (NUT) bundle is up to date. +nut-bundle-check: build-source nut-bundle-check-no-build + # Runs semgrep on the contracts. semgrep: cd ../../ && semgrep scan --config .semgrep/rules/ ./packages/contracts-bedrock @@ -374,6 +386,7 @@ check: semgrep \ lint-check \ snapshots-check-no-build \ + nut-bundle-check-no-build \ unused-imports-check-no-build \ strict-pragma-check-no-build \ valid-semver-check-no-build \ @@ -422,6 +435,7 @@ pre-pr *ARGS: just lint just build-source + just generate-nut-bundle just check diff --git a/packages/contracts-bedrock/scripts/L2Genesis.s.sol b/packages/contracts-bedrock/scripts/L2Genesis.s.sol index 2385d962c4314..c100225b862fc 100644 --- a/packages/contracts-bedrock/scripts/L2Genesis.s.sol +++ b/packages/contracts-bedrock/scripts/L2Genesis.s.sol @@ -283,19 +283,21 @@ contract L2Genesis is Script { function setInteropPredeployProxies() internal { } + /// @notice This predeploy is following the safety invariant #2. + /// Follows invariant #2 since the constructor transfers ownership to the input owner, + /// and therefore requires setting the storage manually here. function setL2ProxyAdmin(Input memory _input) internal { // Note the L2ProxyAdmin implementation itself is behind a proxy that owns itself. - address impl = _setImplementationCode(Predeploys.PROXY_ADMIN); + _setImplementationCode(Predeploys.PROXY_ADMIN); bytes32 _ownerSlot = bytes32(0); // TODO(#19182): Remove this once the L2ProxyAdmin is initializable. // there is no initialize() function, so we just set the storage manually. vm.store(Predeploys.PROXY_ADMIN, _ownerSlot, bytes32(uint256(uint160(_input.opChainProxyAdminOwner)))); - // update the proxy to not be uninitialized (although not standard initialize pattern) - vm.store(impl, _ownerSlot, bytes32(uint256(uint160(_input.opChainProxyAdminOwner)))); } + /// @notice This predeploy is following the safety invariant #1. function setL2ToL1MessagePasser(bool _useCustomGasToken) internal { if (_useCustomGasToken) { string memory cname = "L2ToL1MessagePasserCGT"; @@ -337,7 +339,7 @@ contract L2Genesis is Script { IL2ERC721Bridge(Predeploys.L2_ERC721_BRIDGE).initialize({ _l1ERC721Bridge: payable(_l1ERC721BridgeProxy) }); } - /// @notice This predeploy is following the safety invariant #2, + /// @notice This predeploy is following the safety invariant #1. function setSequencerFeeVault(Input memory _input) internal { _setFeeVault({ _vaultAddr: Predeploys.SEQUENCER_FEE_WALLET, @@ -360,25 +362,16 @@ contract L2Genesis is Script { }); } - /// @notice This predeploy is following the safety invariant #2, + /// @notice This predeploy is following the safety invariant #1. function setOptimismMintableERC721Factory(Input memory _input) internal { - IOptimismMintableERC721Factory factory = IOptimismMintableERC721Factory( - DeployUtils.create1({ - _name: "OptimismMintableERC721Factory", - _args: DeployUtils.encodeConstructor( - abi.encodeCall( - IOptimismMintableERC721Factory.__constructor__, (Predeploys.L2_ERC721_BRIDGE, _input.l1ChainID) - ) - ) - }) - ); + address impl = _setImplementationCode(Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY); - address impl = Predeploys.predeployToCodeNamespace(Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY); - vm.etch(impl, address(factory).code); + IOptimismMintableERC721Factory(impl).initialize({ _bridge: address(0), _remoteChainID: 0 }); - /// Reset so its not included state dump - vm.etch(address(factory), ""); - vm.resetNonce(address(factory)); + IOptimismMintableERC721Factory(Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY).initialize({ + _bridge: Predeploys.L2_ERC721_BRIDGE, + _remoteChainID: _input.l1ChainID + }); } /// @notice This predeploy is following the safety invariant #1. @@ -425,7 +418,7 @@ contract L2Genesis is Script { _setImplementationCode(Predeploys.LEGACY_MESSAGE_PASSER); } - /// @notice This predeploy is following the safety invariant #2. + /// @notice This predeploy is following the safety invariant #1. function setBaseFeeVault(Input memory _input) internal { _setFeeVault({ _vaultAddr: Predeploys.BASE_FEE_VAULT, @@ -437,7 +430,7 @@ contract L2Genesis is Script { }); } - /// @notice This predeploy is following the safety invariant #2. + /// @notice This predeploy is following the safety invariant #1. function setL1FeeVault(Input memory _input) internal { _setFeeVault({ _vaultAddr: Predeploys.L1_FEE_VAULT, @@ -449,7 +442,7 @@ contract L2Genesis is Script { }); } - /// @notice This predeploy is following the safety invariant #2. + /// @notice This predeploy is following the safety invariant #1. function setOperatorFeeVault(Input memory _input) internal { _setFeeVault({ _vaultAddr: Predeploys.OPERATOR_FEE_VAULT, @@ -707,7 +700,7 @@ contract L2Genesis is Script { } /// @notice Helper function to set up a fee vault predeploy with revenue sharing support. - /// This follows safety invariant #2 (initializable contracts). + /// This follows safety invariant #1. /// @param _vaultAddr The predeploy address of the fee vault. /// @param _useRevenueShare Whether revenue sharing is enabled. /// @param _recipient The recipient address (ignored if revenue sharing is enabled). diff --git a/packages/contracts-bedrock/scripts/checks/interfaces/main.go b/packages/contracts-bedrock/scripts/checks/interfaces/main.go index fb2bdf4051201..b2ed175cc1571 100644 --- a/packages/contracts-bedrock/scripts/checks/interfaces/main.go +++ b/packages/contracts-bedrock/scripts/checks/interfaces/main.go @@ -30,6 +30,9 @@ var excludeContracts = []string{ // Misc stuff that can be ignored "IOPContractsManagerLegacyUpgrade", + // Constructor inheritance differences + "IL2ProxyAdmin", + // TODO: Interfaces that need to be fixed "IInitializable", "IOptimismMintableERC20", "ILegacyMintableERC20", "KontrolCheatsBase", "IResolvedDelegateProxy", diff --git a/packages/contracts-bedrock/scripts/checks/nut-bundle-check/main.go b/packages/contracts-bedrock/scripts/checks/nut-bundle-check/main.go new file mode 100644 index 0000000000000..f456214f9282a --- /dev/null +++ b/packages/contracts-bedrock/scripts/checks/nut-bundle-check/main.go @@ -0,0 +1,247 @@ +package main + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "fmt" + "os" + "os/exec" + "regexp" + "strings" +) + +const ( + DEPOSITOR_ACCOUNT = "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001" + PROXY_ADMIN = "0x4200000000000000000000000000000000000018" + CONDITIONAL_DEPLOYER = "0x420000000000000000000000000000000000002C" + ZERO_ADDRESS = "0x0000000000000000000000000000000000000000" +) + +type BundleMetadata struct { + Version string `json:"version"` +} + +type NetworkUpgradeTxn struct { + Data string `json:"data"` + From string `json:"from"` + GasLimit uint64 `json:"gasLimit"` + Intent string `json:"intent"` + To string `json:"to"` +} + +type UpgradeBundle struct { + Metadata BundleMetadata `json:"metadata"` + Transactions []NetworkUpgradeTxn `json:"transactions"` +} + +func main() { + if err := run(); err != nil { + fmt.Fprintf(os.Stderr, "❌ %v\n", err) + os.Exit(1) + } +} + +func run() error { + // Construct bundle path dynamically + bundlePath := "snapshots/upgrades/current-upgrade-bundle.json" + fmt.Printf("Bundle path: %s\n", bundlePath) + + if _, err := os.Stat(bundlePath); os.IsNotExist(err) { + return fmt.Errorf("bundle file not found at %s", bundlePath) + } + + // Read the committed bundle + committedData, err := os.ReadFile(bundlePath) + if err != nil { + return fmt.Errorf("failed to read bundle file: %w", err) + } + + var committedBundle UpgradeBundle + if err := json.Unmarshal(committedData, &committedBundle); err != nil { + return fmt.Errorf("failed to parse bundle JSON: %w", err) + } + + fmt.Printf("Found %d transactions in bundle (version: %s)\n", + len(committedBundle.Transactions), committedBundle.Metadata.Version) + + // 1. Bundle Structure Validation + fmt.Println("\nValidating bundle structure...") + if err := validateBundleStructure(&committedBundle); err != nil { + return fmt.Errorf("bundle structure validation failed: %w", err) + } + fmt.Println(" ✅ Bundle structure is valid") + + // 2. Transaction Validation + fmt.Println("\nValidating transactions...") + if err := validateTransactions(committedBundle.Transactions); err != nil { + return fmt.Errorf("transaction validation failed: %w", err) + } + fmt.Printf(" ✅ All %d transactions are valid\n", len(committedBundle.Transactions)) + + // 3. Address Validation + fmt.Println("\nValidating addresses...") + if err := validateAddresses(committedBundle.Transactions); err != nil { + return fmt.Errorf("address validation failed: %w", err) + } + fmt.Println(" ✅ All addresses are valid") + + // 4. Bundle Up-to-Date Check + fmt.Println("\nChecking if bundle is up-to-date...") + fmt.Println(" (generating fresh bundle to compare...)") + if err := checkBundleUpToDate(committedData, bundlePath); err != nil { + return fmt.Errorf("bundle up-to-date check failed: %w", err) + } + fmt.Println(" ✅ Bundle is up-to-date") + + fmt.Println("\n✅ NUT bundle validation passed") + return nil +} + +func validateBundleStructure(bundle *UpgradeBundle) error { + // Check metadata exists and has version + if bundle.Metadata.Version == "" { + return fmt.Errorf("metadata.version is empty") + } + + // Validate version format (must be valid semver: X.Y.Z with optional pre-release/build metadata) + semverPattern := regexp.MustCompile(`^v?(\d+)\.(\d+)\.(\d+)(-[0-9A-Za-z\-\.]+)?(\+[0-9A-Za-z\-\.]+)?$`) + if !semverPattern.MatchString(bundle.Metadata.Version) { + return fmt.Errorf("metadata.version has invalid semver format: %s (expected format: X.Y.Z[-prerelease][+build])", bundle.Metadata.Version) + } + + // Check transactions array exists + if bundle.Transactions == nil { + return fmt.Errorf("transactions array is nil") + } + + if len(bundle.Transactions) == 0 { + return fmt.Errorf("transactions array is empty") + } + + return nil +} + +func validateTransactions(txns []NetworkUpgradeTxn) error { + for i, txn := range txns { + // Check all required fields are present + if txn.Data == "" { + return fmt.Errorf("transaction %d (%s): data field is empty", i, txn.Intent) + } + + // Validate data is hex string + if !strings.HasPrefix(txn.Data, "0x") { + return fmt.Errorf("transaction %d (%s): data is not a hex string", i, txn.Intent) + } + + if _, err := hex.DecodeString(txn.Data[2:]); err != nil { + return fmt.Errorf("transaction %d (%s): data is not valid hex: %w", i, txn.Intent, err) + } + + if txn.To == "" { + return fmt.Errorf("transaction %d (%s): to address is empty", i, txn.Intent) + } + + if txn.From == "" { + return fmt.Errorf("transaction %d (%s): from address is empty", i, txn.Intent) + } + + if txn.GasLimit == 0 { + return fmt.Errorf("transaction %d (%s): gasLimit is zero", i, txn.Intent) + } + + if txn.Intent == "" { + return fmt.Errorf("transaction %d: intent is empty", i) + } + + // Intent should be descriptive (at least 5 characters) + if len(txn.Intent) < 5 { + return fmt.Errorf("transaction %d: intent too short: %s", i, txn.Intent) + } + } + + return nil +} + +func validateAddresses(txns []NetworkUpgradeTxn) error { + for i, txn := range txns { + // Validate 'to' address + if !isValidAddressFormat(txn.To) { + return fmt.Errorf("transaction %d (%s): invalid 'to' address format: %s", i, txn.Intent, txn.To) + } + + // All the deployments are done via ConditionalDeployer predeploy, so 'to' cannot be zero address. + if strings.EqualFold(txn.To, ZERO_ADDRESS) { + return fmt.Errorf("transaction %d (%s): 'to' address is zero address", i, txn.Intent) + } + + // Validate 'from' address + if !isValidAddressFormat(txn.From) { + return fmt.Errorf("transaction %d (%s): invalid 'from' address format: %s", i, txn.Intent, txn.From) + } + } + + return nil +} + +func isValidAddressFormat(addr string) bool { + if !strings.HasPrefix(addr, "0x") { + return false + } + + if len(addr) != 42 { // 0x + 40 hex characters + return false + } + + // Check if all characters after 0x are valid hex + _, err := hex.DecodeString(addr[2:]) + return err == nil +} + +func checkBundleUpToDate(committedData []byte, bundlePath string) error { + // Temporarily move the committed bundle + originalPath := bundlePath + backupPath := bundlePath + ".backup" + + if err := os.Rename(originalPath, backupPath); err != nil { + return fmt.Errorf("failed to backup bundle: %w", err) + } + + // Ensure we restore the bundle even if generation fails + defer func() { + _ = os.Rename(backupPath, originalPath) + }() + + // Generate new bundle using forge script + cmd := exec.Command("forge", "script", + "scripts/upgrade/GenerateNUTBundle.s.sol:GenerateNUTBundle", + "--sig", "run()") + + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + + if err := cmd.Run(); err != nil { + return fmt.Errorf("failed to generate bundle:\nstdout: %s\nstderr: %s\nerror: %w", + stdout.String(), stderr.String(), err) + } + + // Read the newly generated bundle + generatedData, err := os.ReadFile(originalPath) + if err != nil { + return fmt.Errorf("failed to read generated bundle: %w", err) + } + + // Restore the original bundle + if err := os.Rename(backupPath, originalPath); err != nil { + return fmt.Errorf("failed to restore original bundle: %w", err) + } + + // Compare the two bundles + if !bytes.Equal(committedData, generatedData) { + // Pretty print the difference hint + return fmt.Errorf("bundle is out of date\n\nThe committed bundle differs from the generated bundle.\nRun 'just generate-nut-bundle' to update it") + } + + return nil +} diff --git a/packages/contracts-bedrock/scripts/libraries/UpgradeUtils.sol b/packages/contracts-bedrock/scripts/libraries/UpgradeUtils.sol new file mode 100644 index 0000000000000..fc05ead8297ce --- /dev/null +++ b/packages/contracts-bedrock/scripts/libraries/UpgradeUtils.sol @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.0; + +// Libraries +import { Vm } from "forge-std/Vm.sol"; +import { NetworkUpgradeTxns } from "src/libraries/NetworkUpgradeTxns.sol"; +import { Preinstalls } from "src/libraries/Preinstalls.sol"; +import { Predeploys } from "src/libraries/Predeploys.sol"; +import { Constants } from "src/libraries/Constants.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Interfaces +import { IProxy } from "interfaces/universal/IProxy.sol"; + +// Contracts +import { ConditionalDeployer } from "src/L2/ConditionalDeployer.sol"; + +/// @title UpgradeUtils +/// @notice Utility library for L2 hardfork upgrade transaction generation. +library UpgradeUtils { + Vm private constant vm = Vm(address(uint160(uint256(keccak256("hevm cheat code"))))); + + /// @notice The number of implementations deployed in every upgrade. + /// Includes: + /// - 1 StorageSetter + /// - 16 base predeploys + /// - 7 INTEROP predeploys + /// - 2 CGT predeploys (NativeAssetLiquidity, LiquidityController) + /// - 2 CGT variants (L1BlockCGT, L2ToL1MessagePasserCGT) + /// Total: 28 implementations + uint256 internal constant IMPLEMENTATION_COUNT = 28; + + /// @notice The default gas limit for a deployment transaction. + uint64 internal constant DEFAULT_DEPLOYMENT_GAS = 375_000; + + /// @notice The default gas limit for an upgrade transaction. + uint64 internal constant DEFAULT_UPGRADE_GAS = 50_000; + + /// @notice Gas limits for different types of upgrade transactions. + /// @param l2cmDeployment Gas for deploying L2ContractsManager + /// @param upgradeExecution Gas for L2ProxyAdmin.upgradePredeploys() call + /// @param conditionalDeployerDeployment Gas for deploying ConditionalDeployer + /// @param conditionalDeployerUpgrade Gas for upgrading ConditionalDeployer proxy + /// @param proxyAdminUpgrade Gas for upgrading ProxyAdmin implementation + struct GasLimits { + // Fixed + uint64 l2cmDeployment; + uint64 upgradeExecution; + // Karst + uint64 conditionalDeployerDeployment; + uint64 conditionalDeployerUpgrade; + uint64 proxyAdminUpgrade; + } + + /// @notice Returns the total number of transactions for the current upgrade. + /// @dev Total count: + /// - 28 implementation deployments + /// - [KARST] 2 ConditionalDeployer (deployment + upgrade) + /// - [KARST] 1 ProxyAdmin upgrade + /// - 1 L2CM deployment + /// - 1 Upgrade Predeploys call + function getTransactionCount() internal pure returns (uint256 txnCount_) { + if (IMPLEMENTATION_COUNT != 28) { + revert( + "UpgradeUtils: implementation count changed, ensure that the txnCount_ calculation is still correct." + ); + } + txnCount_ = IMPLEMENTATION_COUNT + 5; + } + + /// @notice Returns the gas limits for all upgrade transaction types. + /// @dev Gas limits are chosen to provide sufficient headroom while being + /// conservative enough to fit within the upgrade block gas allocation. + /// Rationale for each limit: + /// - TODO: Add rationale here + /// @return Gas limits struct. + function gasLimits() internal pure returns (GasLimits memory) { + return GasLimits({ + // Fixed + l2cmDeployment: DEFAULT_DEPLOYMENT_GAS, + upgradeExecution: type(uint64).max, + // Karst + conditionalDeployerDeployment: DEFAULT_DEPLOYMENT_GAS, + conditionalDeployerUpgrade: DEFAULT_UPGRADE_GAS, + proxyAdminUpgrade: DEFAULT_UPGRADE_GAS + }); + } + + /// @notice Returns the array of predeploy names to upgrade. + /// @dev Exception: StorageSetter is not a predeploy, but is upgraded in L2CM too. + /// @return implementations_ Array of implementation names to upgrade. + function getImplementationsNamesToUpgrade() internal pure returns (string[] memory implementations_) { + implementations_ = new string[](IMPLEMENTATION_COUNT); + + // StorageSetter + implementations_[0] = "StorageSetter"; + + // Base predeploys + implementations_[1] = "L2CrossDomainMessenger"; + implementations_[2] = "GasPriceOracle"; + implementations_[3] = "L2StandardBridge"; + implementations_[4] = "SequencerFeeVault"; + implementations_[5] = "OptimismMintableERC20Factory"; + implementations_[6] = "L2ERC721Bridge"; + implementations_[7] = "L1Block"; + implementations_[8] = "L2ToL1MessagePasser"; + implementations_[9] = "OptimismMintableERC721Factory"; + implementations_[10] = "L2ProxyAdmin"; + implementations_[11] = "BaseFeeVault"; + implementations_[12] = "L1FeeVault"; + implementations_[13] = "OperatorFeeVault"; + implementations_[14] = "SchemaRegistry"; + implementations_[15] = "EAS"; + implementations_[16] = "FeeSplitter"; + + // INTEROP predeploys + implementations_[17] = "CrossL2Inbox"; + implementations_[18] = "L2ToL2CrossDomainMessenger"; + implementations_[19] = "SuperchainETHBridge"; + implementations_[20] = "OptimismSuperchainERC20Factory"; + implementations_[21] = "OptimismSuperchainERC20Beacon"; + implementations_[22] = "SuperchainTokenBridge"; + implementations_[23] = "ETHLiquidity"; + + // CGT predeploys + implementations_[24] = "L1BlockCGT"; + implementations_[25] = "L2ToL1MessagePasserCGT"; + implementations_[26] = "LiquidityController"; + implementations_[27] = "NativeAssetLiquidity"; + } + + /// @notice Uses vm.computeCreate2Address to compute the CREATE2 address for given initcode and salt. + /// @dev Uses the DeterministicDeploymentProxy address as the deployer. + /// @param _code The contract initcode (creation bytecode). + /// @param _salt The CREATE2 salt. + /// @return expected_ The computed contract address. + function computeCreate2Address(bytes memory _code, bytes32 _salt) internal pure returns (address expected_) { + expected_ = vm.computeCreate2Address(_salt, keccak256(_code), Preinstalls.DeterministicDeploymentProxy); + } + + /// @notice Creates a deployment transaction via ConditionalDeployer. + /// @dev The transaction calls ConditionalDeployer.deploy(salt, code) which performs + /// idempotent CREATE2 deployment via the DeterministicDeploymentProxy. + /// @param _name Human-readable name for the contract being deployed. + /// @param _artifactPath Forge artifact path (e.g., "MyContract.sol:MyContract"). + /// @param _salt CREATE2 salt for address computation. + /// @param _gasLimit Gas limit for the deployment transaction. + /// @return txn_ The constructed deployment transaction. + function createDeploymentTxn( + string memory _name, + string memory _artifactPath, + bytes32 _salt, + uint64 _gasLimit + ) + internal + view + returns (NetworkUpgradeTxns.NetworkUpgradeTxn memory txn_) + { + return createDeploymentTxnWithArgs(_name, _artifactPath, "", _salt, _gasLimit); + } + + /// @notice Creates a deployment transaction via ConditionalDeployer with constructor arguments. + /// @dev The transaction calls ConditionalDeployer.deploy(salt, code) which performs + /// idempotent CREATE2 deployment via the DeterministicDeploymentProxy. + /// @param _name Human-readable name for the contract being deployed. + /// @param _artifactPath Forge artifact path (e.g., "MyContract.sol:MyContract"). + /// @param _args ABI-encoded constructor arguments. + /// @param _salt CREATE2 salt for address computation. + /// @param _gasLimit Gas limit for the deployment transaction. + /// @return txn_ The constructed deployment transaction. + function createDeploymentTxnWithArgs( + string memory _name, + string memory _artifactPath, + bytes memory _args, + bytes32 _salt, + uint64 _gasLimit + ) + internal + view + returns (NetworkUpgradeTxns.NetworkUpgradeTxn memory txn_) + { + bytes memory code = abi.encodePacked(DeployUtils.getCode(_artifactPath), _args); + txn_ = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: string.concat("Deploy ", _name, " Implementation"), + from: Constants.DEPOSITOR_ACCOUNT, + to: Predeploys.CONDITIONAL_DEPLOYER, + gasLimit: _gasLimit, + data: abi.encodeCall(ConditionalDeployer.deploy, (_salt, code)) + }); + } + + /// @notice Creates an upgrade transaction for a proxy contract. + /// @dev The transaction calls IProxy(proxy).upgradeTo(implementation). + /// For the ProxyAdmin upgrade, the sender must be address(0) to use the + /// zero-address upgrade path in the Proxy.sol implementation. + /// @param _name Human-readable name for the contract being upgraded. + /// @param _proxy Address of the proxy contract. + /// @param _implementation Address of the new implementation. + /// @param _gasLimit Gas limit for the upgrade transaction. + /// @return txn_ The constructed upgrade transaction. + function createUpgradeTxn( + string memory _name, + address _proxy, + address _implementation, + uint64 _gasLimit + ) + internal + pure + returns (NetworkUpgradeTxns.NetworkUpgradeTxn memory txn_) + { + txn_ = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: string.concat("Upgrade ", _name, " Implementation"), + from: address(0), + to: _proxy, + gasLimit: _gasLimit, + data: abi.encodeCall(IProxy.upgradeTo, (_implementation)) + }); + } +} diff --git a/packages/contracts-bedrock/scripts/upgrade/GenerateNUTBundle.s.sol b/packages/contracts-bedrock/scripts/upgrade/GenerateNUTBundle.s.sol new file mode 100644 index 0000000000000..38b476c0c8744 --- /dev/null +++ b/packages/contracts-bedrock/scripts/upgrade/GenerateNUTBundle.s.sol @@ -0,0 +1,577 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Utilities +import { Script } from "forge-std/Script.sol"; + +// Libraries +import { Predeploys } from "src/libraries/Predeploys.sol"; +import { Preinstalls } from "src/libraries/Preinstalls.sol"; +import { Constants } from "src/libraries/Constants.sol"; +import { NetworkUpgradeTxns } from "src/libraries/NetworkUpgradeTxns.sol"; +import { L2ContractsManagerTypes } from "src/libraries/L2ContractsManagerTypes.sol"; +import { UpgradeUtils } from "scripts/libraries/UpgradeUtils.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +// Interfaces +import { IL2ProxyAdmin } from "interfaces/L2/IL2ProxyAdmin.sol"; + +/// @title GenerateNUTBundle +/// @notice Generates Network Upgrade Transaction (NUT) bundles for L2 hardfork upgrades. +/// @dev This script creates deterministic upgrade transaction bundles for L2 hardfork upgrades +/// using the L2ContractsManager (L2CM) system. +contract GenerateNUTBundle is Script { + /// @notice CREATE2 salt for deterministic deployments. + bytes32 internal constant SALT = bytes32(uint256(keccak256("optimism.network-upgrade"))); + + /// @notice Name of the upgrade. + string internal constant UPGRADE_NAME = "karst"; + + /// @notice Version of the upgrade bundle. + string internal constant BUNDLE_VERSION = "1.0.0"; + + /// @notice Output containing generated transactions. + /// @param txns Array of Network Upgrade Transactions to execute. + struct Output { + NetworkUpgradeTxns.NetworkUpgradeTxn[] txns; + } + + /// @notice Configuration for a implementation contract deployment. + /// @param implementation Expected implementation address after deployment. + /// @param deploymentGasLimit Gas limit for the deployment transaction. + /// @param artifactPath Forge artifact path (e.g., "MyContract.sol:MyContract"). + /// @param name Human-readable name for the contract. + /// @param args ABI-encoded constructor arguments. + struct ImplementationConfig { + address implementation; + uint64 deploymentGasLimit; + string name; + string artifactPath; + } + + /// @notice Gas limits for the upgrade. + UpgradeUtils.GasLimits internal gasLimits; + + /// @notice Expected implementations for the upgrade. + L2ContractsManagerTypes.Implementations internal implementations; + + /// @notice Implementation configurations. + mapping(string => ImplementationConfig) internal implementationConfigs; + + /// @notice Array of generated transactions. + NetworkUpgradeTxns.NetworkUpgradeTxn[] internal txns; + + function setUp() public { + // Clear previous txns: Transactions are pushed to a dynamic array, so we need + // to delete the array to avoid pushing duplicates. + delete txns; + + gasLimits = UpgradeUtils.gasLimits(); + } + + /// @notice Generates the complete upgrade transaction bundle. + /// @dev Executes 5 phases in fixed order: + /// 1. Pre-implementation deployments [CUSTOM] + /// 2. Implementation deployments [FIXED] + /// 3. Pre-L2CM deployment [CUSTOM] + /// 4. L2CM deployment [FIXED] + /// 5. Upgrade execution [FIXED] + /// @dev Only modify phases 1 and 3 for fork-specific logic. Other phases must remain unchanged. + /// @return output_ Output containing all generated transactions in execution order. + function run() public returns (Output memory output_) { + setUp(); + + // Build implementation deployment configurations + _buildImplementationDeploymentConfigs(); + + // Phase 1: Pre-implementation deployments + // Add fork-specific deployment or upgrade txns that must occur prior to the implementation deployments + // phase. + _preImplementationDeployments(); + + // Phase 2: Implementation deployments + _generateImplementationDeployments(); + + // Build the implementations struct + implementations = _getImplementations(); + + // Phase 3: Pre-L2CM deployment + // Add fork-specific deployment or upgrade logic that must occur between the implementation deployment + // phase and the L2ContractsManager deployment phase. + _preL2CMDeployment(); + + // Phase 4: L2ContractsManager deployment + _generateL2CMDeployment(); + + // Phase 5: Upgrade execution + _generateUpgradeExecution(); + + // Copy storage array to memory array for return + uint256 txnsLength = txns.length; + output_.txns = new NetworkUpgradeTxns.NetworkUpgradeTxn[](txnsLength); + for (uint256 i = 0; i < txnsLength; i++) { + output_.txns[i] = txns[i]; + } + + _assertValidOutput(output_); + + // Write transactions to artifact with metadata + NetworkUpgradeTxns.BundleMetadata memory metadata = + NetworkUpgradeTxns.BundleMetadata({ version: BUNDLE_VERSION }); + NetworkUpgradeTxns.writeArtifact(txns, metadata, upgradeBundlePath()); + } + + /// @notice Asserts the output is valid. + /// @param _output The output to assert. + function _assertValidOutput(Output memory _output) internal pure { + uint256 transactionCount = UpgradeUtils.getTransactionCount(); + uint256 txnsLength = _output.txns.length; + require(txnsLength == transactionCount, "GenerateNUTBundle: invalid transaction count"); + + for (uint256 i = 0; i < txnsLength; i++) { + require(_output.txns[i].data.length > 0, "GenerateNUTBundle: invalid transaction data"); + require(bytes(_output.txns[i].intent).length > 0, "GenerateNUTBundle: invalid transaction intent"); + require(_output.txns[i].to != address(0), "GenerateNUTBundle: invalid transaction to"); + require(_output.txns[i].gasLimit > 0, "GenerateNUTBundle: invalid transaction gasLimit"); + + if (_output.txns[i].from == address(0)) { + // Transactions must have a from address except for ProxyAdmin and ConditionalDeployer upgrades + if ( + _output.txns[i].to != Predeploys.PROXY_ADMIN + && _output.txns[i].to != Predeploys.CONDITIONAL_DEPLOYER + ) { + revert("GenerateNUTBundle: invalid transaction from"); + } + } + } + } + + /// @notice Asserts the implementation config is valid. + /// @param _config The implementation config to assert. + function _assertValidImplementationConfig(ImplementationConfig memory _config) internal pure { + require(bytes(_config.name).length > 0, "GenerateNUTBundle: invalid implementation name"); + require(bytes(_config.artifactPath).length > 0, "GenerateNUTBundle: invalid implementation artifact path"); + require(_config.deploymentGasLimit > 0, "GenerateNUTBundle: invalid implementation deployment gas limit"); + require(_config.implementation != address(0), "GenerateNUTBundle: invalid implementation address"); + } + + // ======================================== + // CUSTOM NUT OPERATIONS + // ======================================== + + /// @notice Pre-implementation deployment phase for fork-specific setup. + /// @dev Any transactions added to the txns array within this function will be executed BEFORE + /// any predeploy implementations are deployed. This is the designated location for adding + /// fork-specific deployment or upgrade logic that must occur prior to the standard + /// implementation deployment phase. The rest of the script follows a fixed structure and + /// should not be modified. + function _preImplementationDeployments() internal { + if (keccak256(abi.encodePacked(UPGRADE_NAME)) == keccak256(abi.encodePacked("karst"))) { + // TODO(#19369): Remove these steps once Karst upgrade is deployed in all chains. + // ConditionalDeployer deployment + upgrade + _generateConditionalDeployerTxns(); + } + } + + /// @notice Pre-L2CM deployment phase for fork-specific setup. + /// @dev This function executes AFTER implementations are deployed but BEFORE the L2ContractsManager + /// is deployed. It is the designated location for adding fork-specific deployment or upgrade + /// logic that must occur between these two phases. The rest of the script follows a fixed + /// structure and should not be modified. + /// @dev IMPORTANT: This is one of only TWO extension points in this script. Do not modify + /// the core deployment flow in _generateL2CMDeployment, _generateUpgradeExecution, or other + /// fixed phases. + function _preL2CMDeployment() internal { + if (keccak256(abi.encodePacked(UPGRADE_NAME)) == keccak256(abi.encodePacked("karst"))) { + // TODO(#19369): Remove these steps once Karst upgrade is deployed in all chains. + // L2ProxyAdmin upgrade + _generateL2ProxyAdminUpgrade(implementations.proxyAdminImpl); + } + } + + // ======================================== + // KARST-ONLY NUTs + // ======================================== + + /// @notice Generates ConditionalDeployer deployment and upgrade transactions. + /// @dev TODO(#19369): Remove this function once Karst upgrade is deployed in all chains. + function _generateConditionalDeployerTxns() internal { + // 1. Deploy ConditionalDeployer implementation + bytes memory conditionalDeployerCode = + abi.encodePacked(DeployUtils.getCode("ConditionalDeployer.sol:ConditionalDeployer")); + + txns.push( + NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: "ConditionalDeployer Deployment", + from: Constants.DEPOSITOR_ACCOUNT, + to: Preinstalls.DeterministicDeploymentProxy, + gasLimit: gasLimits.conditionalDeployerDeployment, + data: abi.encodePacked(SALT, conditionalDeployerCode) + }) + ); + + // 2. Upgrade ConditionalDeployer proxy + address newConditionalDeployerImpl = UpgradeUtils.computeCreate2Address(conditionalDeployerCode, SALT); + txns.push( + UpgradeUtils.createUpgradeTxn( + "ConditionalDeployer", + Predeploys.CONDITIONAL_DEPLOYER, + newConditionalDeployerImpl, + gasLimits.conditionalDeployerUpgrade + ) + ); + } + + /// @notice Generates L2ProxyAdmin upgrade transaction. + /// @dev It upgrades the L2ProxyAdmin to add the upgradePredeploys() function. + /// @param _proxyAdminImpl Address of the new L2ProxyAdmin implementation. + /// @dev TODO(#19369): Remove this function once Karst upgrade is deployed in all chains. + function _generateL2ProxyAdminUpgrade(address _proxyAdminImpl) internal { + txns.push( + UpgradeUtils.createUpgradeTxn( + "L2ProxyAdmin", Predeploys.PROXY_ADMIN, _proxyAdminImpl, gasLimits.proxyAdminUpgrade + ) + ); + } + + // ======================================== + // FIXED NUT OPERATIONS + // ======================================== + + /// @notice Generates implementation deployment transactions for all the implementations to upgrade. + /// @dev This function is called for all upgrades. It deploys implementation contracts + /// via ConditionalDeployer.deploy(), which ensures idempotent deployments. + /// @dev IMPORTANT: Only modify this function if you need to add or modify a fixed implementation deployment. + function _generateImplementationDeployments() internal { + // Get all implementations to upgrade + string[] memory implementationsToUpgrade = UpgradeUtils.getImplementationsNamesToUpgrade(); + + for (uint256 i = 0; i < implementationsToUpgrade.length; i++) { + // Get implementation config + ImplementationConfig memory config = implementationConfigs[implementationsToUpgrade[i]]; + + _assertValidImplementationConfig(config); + + txns.push( + UpgradeUtils.createDeploymentTxn(config.name, config.artifactPath, SALT, config.deploymentGasLimit) + ); + } + } + + /// @notice Generates L2ContractsManager deployment transaction. + /// @dev This function is called for all upgrades. The L2ContractsManager is deployed + /// with all implementation addresses encoded in its constructor. + function _generateL2CMDeployment() internal { + // Encode constructor arguments + bytes memory l2cmArgs = abi.encode(implementations); + + // Deploy L2ContractsManager with encoded implementation addresses + txns.push( + UpgradeUtils.createDeploymentTxnWithArgs( + "L2ContractsManager", + "L2ContractsManager.sol:L2ContractsManager", + l2cmArgs, + SALT, + gasLimits.l2cmDeployment + ) + ); + } + + /// @notice Generates the final upgrade execution transaction. + /// @dev This function is called for all upgrades. It creates the transaction that calls + /// L2ProxyAdmin.upgradePredeploys(l2cm), which executes a DELEGATECALL to the + /// L2ContractsManager.upgrade() function to perform the actual upgrades. + function _generateUpgradeExecution() internal { + // Encode constructor arguments + bytes memory l2cmArgs = abi.encode(implementations); + + // Compute L2ContractsManager address + address l2cm = UpgradeUtils.computeCreate2Address( + abi.encodePacked(DeployUtils.getCode("L2ContractsManager.sol:L2ContractsManager"), l2cmArgs), SALT + ); + + // Create upgrade execution transaction + txns.push( + NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: "L2ProxyAdmin Upgrade Predeploys", + from: Constants.DEPOSITOR_ACCOUNT, + to: Predeploys.PROXY_ADMIN, + gasLimit: gasLimits.upgradeExecution, + data: abi.encodeCall(IL2ProxyAdmin.upgradePredeploys, (l2cm)) + }) + ); + } + + // ======================================== + // HELPERS + // ======================================== + + /// @notice Returns the path to the upgrade bundle. + function upgradeBundlePath() public pure returns (string memory) { + return string.concat("snapshots/upgrades/current-upgrade-bundle.json"); + } + + /// @notice Retrieves all expected implementation addresses for the upgrade. + /// @dev All addresses are looked up from the implementationConfigs mapping, which contains + /// deterministically computed CREATE2 addresses using the hardcoded salt. This ensures + /// identical addresses across all chains executing the upgrade. + /// @return implementations_ Struct containing all implementation addresses. + function _getImplementations() + internal + view + returns (L2ContractsManagerTypes.Implementations memory implementations_) + { + implementations_ = L2ContractsManagerTypes.Implementations({ + storageSetterImpl: implementationConfigs["StorageSetter"].implementation, + l2CrossDomainMessengerImpl: implementationConfigs["L2CrossDomainMessenger"].implementation, + gasPriceOracleImpl: implementationConfigs["GasPriceOracle"].implementation, + l2StandardBridgeImpl: implementationConfigs["L2StandardBridge"].implementation, + sequencerFeeWalletImpl: implementationConfigs["SequencerFeeVault"].implementation, + optimismMintableERC20FactoryImpl: implementationConfigs["OptimismMintableERC20Factory"].implementation, + l2ERC721BridgeImpl: implementationConfigs["L2ERC721Bridge"].implementation, + l1BlockImpl: implementationConfigs["L1Block"].implementation, + l1BlockCGTImpl: implementationConfigs["L1BlockCGT"].implementation, + l2ToL1MessagePasserImpl: implementationConfigs["L2ToL1MessagePasser"].implementation, + l2ToL1MessagePasserCGTImpl: implementationConfigs["L2ToL1MessagePasserCGT"].implementation, + optimismMintableERC721FactoryImpl: implementationConfigs["OptimismMintableERC721Factory"].implementation, + proxyAdminImpl: implementationConfigs["L2ProxyAdmin"].implementation, + baseFeeVaultImpl: implementationConfigs["BaseFeeVault"].implementation, + l1FeeVaultImpl: implementationConfigs["L1FeeVault"].implementation, + operatorFeeVaultImpl: implementationConfigs["OperatorFeeVault"].implementation, + schemaRegistryImpl: implementationConfigs["SchemaRegistry"].implementation, + easImpl: implementationConfigs["EAS"].implementation, + crossL2InboxImpl: implementationConfigs["CrossL2Inbox"].implementation, + l2ToL2CrossDomainMessengerImpl: implementationConfigs["L2ToL2CrossDomainMessenger"].implementation, + superchainETHBridgeImpl: implementationConfigs["SuperchainETHBridge"].implementation, + ethLiquidityImpl: implementationConfigs["ETHLiquidity"].implementation, + optimismSuperchainERC20FactoryImpl: implementationConfigs["OptimismSuperchainERC20Factory"].implementation, + optimismSuperchainERC20BeaconImpl: implementationConfigs["OptimismSuperchainERC20Beacon"].implementation, + superchainTokenBridgeImpl: implementationConfigs["SuperchainTokenBridge"].implementation, + nativeAssetLiquidityImpl: implementationConfigs["NativeAssetLiquidity"].implementation, + liquidityControllerImpl: implementationConfigs["LiquidityController"].implementation, + feeSplitterImpl: implementationConfigs["FeeSplitter"].implementation, + conditionalDeployerImpl: implementationConfigs["ConditionalDeployer"].implementation + }); + } + + /// @notice Builds the implementation configuration mapping for all contracts to be deployed. + /// @dev IMPORTANT: Only modify this function if you need to add or modify a deployment implementation + /// configuration. + /// @dev An array of strings is used to add contracts that are not predeploys (StorageSetter) or have + /// feature-specific variants (e.g. CGT). + function _buildImplementationDeploymentConfigs() internal { + implementationConfigs["StorageSetter"] = ImplementationConfig({ + name: "StorageSetter", + artifactPath: "StorageSetter.sol:StorageSetter", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("StorageSetter.sol:StorageSetter"), SALT) + }); + implementationConfigs["L2CrossDomainMessenger"] = ImplementationConfig({ + name: "L2CrossDomainMessenger", + artifactPath: "L2CrossDomainMessenger.sol:L2CrossDomainMessenger", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("L2CrossDomainMessenger.sol:L2CrossDomainMessenger"), SALT + ) + }); + implementationConfigs["GasPriceOracle"] = ImplementationConfig({ + name: "GasPriceOracle", + artifactPath: "GasPriceOracle.sol:GasPriceOracle", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("GasPriceOracle.sol:GasPriceOracle"), SALT + ) + }); + implementationConfigs["L2StandardBridge"] = ImplementationConfig({ + name: "L2StandardBridge", + artifactPath: "L2StandardBridge.sol:L2StandardBridge", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("L2StandardBridge.sol:L2StandardBridge"), SALT + ) + }); + implementationConfigs["SequencerFeeVault"] = ImplementationConfig({ + name: "SequencerFeeVault", + artifactPath: "SequencerFeeVault.sol:SequencerFeeVault", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("SequencerFeeVault.sol:SequencerFeeVault"), SALT + ) + }); + implementationConfigs["OptimismMintableERC20Factory"] = ImplementationConfig({ + name: "OptimismMintableERC20Factory", + artifactPath: "OptimismMintableERC20Factory.sol:OptimismMintableERC20Factory", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("OptimismMintableERC20Factory.sol:OptimismMintableERC20Factory"), SALT + ) + }); + implementationConfigs["L2ERC721Bridge"] = ImplementationConfig({ + name: "L2ERC721Bridge", + artifactPath: "L2ERC721Bridge.sol:L2ERC721Bridge", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("L2ERC721Bridge.sol:L2ERC721Bridge"), SALT + ) + }); + implementationConfigs["L1Block"] = ImplementationConfig({ + name: "L1Block", + artifactPath: "L1Block.sol:L1Block", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("L1Block.sol:L1Block"), SALT) + }); + implementationConfigs["L1BlockCGT"] = ImplementationConfig({ + name: "L1BlockCGT", + artifactPath: "L1BlockCGT.sol:L1BlockCGT", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("L1BlockCGT.sol:L1BlockCGT"), SALT) + }); + implementationConfigs["L2ToL1MessagePasser"] = ImplementationConfig({ + name: "L2ToL1MessagePasser", + artifactPath: "L2ToL1MessagePasser.sol:L2ToL1MessagePasser", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("L2ToL1MessagePasser.sol:L2ToL1MessagePasser"), SALT + ) + }); + implementationConfigs["L2ToL1MessagePasserCGT"] = ImplementationConfig({ + name: "L2ToL1MessagePasserCGT", + artifactPath: "L2ToL1MessagePasserCGT.sol:L2ToL1MessagePasserCGT", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("L2ToL1MessagePasserCGT.sol:L2ToL1MessagePasserCGT"), SALT + ) + }); + + implementationConfigs["OptimismMintableERC721Factory"] = ImplementationConfig({ + name: "OptimismMintableERC721Factory", + artifactPath: "OptimismMintableERC721Factory.sol:OptimismMintableERC721Factory", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("OptimismMintableERC721Factory.sol:OptimismMintableERC721Factory"), SALT + ) + }); + implementationConfigs["L2ProxyAdmin"] = ImplementationConfig({ + name: "L2ProxyAdmin", + artifactPath: "L2ProxyAdmin.sol:L2ProxyAdmin", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("L2ProxyAdmin.sol:L2ProxyAdmin"), SALT) + }); + implementationConfigs["BaseFeeVault"] = ImplementationConfig({ + name: "BaseFeeVault", + artifactPath: "BaseFeeVault.sol:BaseFeeVault", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("BaseFeeVault.sol:BaseFeeVault"), SALT) + }); + implementationConfigs["L1FeeVault"] = ImplementationConfig({ + name: "L1FeeVault", + artifactPath: "L1FeeVault.sol:L1FeeVault", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("L1FeeVault.sol:L1FeeVault"), SALT) + }); + implementationConfigs["OperatorFeeVault"] = ImplementationConfig({ + name: "OperatorFeeVault", + artifactPath: "OperatorFeeVault.sol:OperatorFeeVault", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("OperatorFeeVault.sol:OperatorFeeVault"), SALT + ) + }); + implementationConfigs["SchemaRegistry"] = ImplementationConfig({ + name: "SchemaRegistry", + artifactPath: "SchemaRegistry.sol:SchemaRegistry", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("SchemaRegistry.sol:SchemaRegistry"), SALT + ) + }); + implementationConfigs["EAS"] = ImplementationConfig({ + name: "EAS", + artifactPath: "EAS.sol:EAS", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("EAS.sol:EAS"), SALT) + }); + implementationConfigs["CrossL2Inbox"] = ImplementationConfig({ + name: "CrossL2Inbox", + artifactPath: "CrossL2Inbox.sol:CrossL2Inbox", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("CrossL2Inbox.sol:CrossL2Inbox"), SALT) + }); + implementationConfigs["L2ToL2CrossDomainMessenger"] = ImplementationConfig({ + name: "L2ToL2CrossDomainMessenger", + artifactPath: "L2ToL2CrossDomainMessenger.sol:L2ToL2CrossDomainMessenger", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("L2ToL2CrossDomainMessenger.sol:L2ToL2CrossDomainMessenger"), SALT + ) + }); + implementationConfigs["SuperchainETHBridge"] = ImplementationConfig({ + name: "SuperchainETHBridge", + artifactPath: "SuperchainETHBridge.sol:SuperchainETHBridge", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("SuperchainETHBridge.sol:SuperchainETHBridge"), SALT + ) + }); + implementationConfigs["ETHLiquidity"] = ImplementationConfig({ + name: "ETHLiquidity", + artifactPath: "ETHLiquidity.sol:ETHLiquidity", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("ETHLiquidity.sol:ETHLiquidity"), SALT) + }); + implementationConfigs["OptimismSuperchainERC20Factory"] = ImplementationConfig({ + name: "OptimismSuperchainERC20Factory", + artifactPath: "OptimismSuperchainERC20Factory.sol:OptimismSuperchainERC20Factory", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("OptimismSuperchainERC20Factory.sol:OptimismSuperchainERC20Factory"), SALT + ) + }); + implementationConfigs["OptimismSuperchainERC20Beacon"] = ImplementationConfig({ + name: "OptimismSuperchainERC20Beacon", + artifactPath: "OptimismSuperchainERC20Beacon.sol:OptimismSuperchainERC20Beacon", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("OptimismSuperchainERC20Beacon.sol:OptimismSuperchainERC20Beacon"), SALT + ) + }); + implementationConfigs["SuperchainTokenBridge"] = ImplementationConfig({ + name: "SuperchainTokenBridge", + artifactPath: "SuperchainTokenBridge.sol:SuperchainTokenBridge", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("SuperchainTokenBridge.sol:SuperchainTokenBridge"), SALT + ) + }); + implementationConfigs["NativeAssetLiquidity"] = ImplementationConfig({ + name: "NativeAssetLiquidity", + artifactPath: "NativeAssetLiquidity.sol:NativeAssetLiquidity", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("NativeAssetLiquidity.sol:NativeAssetLiquidity"), SALT + ) + }); + implementationConfigs["LiquidityController"] = ImplementationConfig({ + name: "LiquidityController", + artifactPath: "LiquidityController.sol:LiquidityController", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("LiquidityController.sol:LiquidityController"), SALT + ) + }); + implementationConfigs["FeeSplitter"] = ImplementationConfig({ + name: "FeeSplitter", + artifactPath: "FeeSplitter.sol:FeeSplitter", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address(DeployUtils.getCode("FeeSplitter.sol:FeeSplitter"), SALT) + }); + implementationConfigs["ConditionalDeployer"] = ImplementationConfig({ + name: "ConditionalDeployer", + artifactPath: "ConditionalDeployer.sol:ConditionalDeployer", + deploymentGasLimit: UpgradeUtils.DEFAULT_DEPLOYMENT_GAS, + implementation: UpgradeUtils.computeCreate2Address( + DeployUtils.getCode("ConditionalDeployer.sol:ConditionalDeployer"), SALT + ) + }); + } +} diff --git a/packages/contracts-bedrock/snapshots/abi/L2ProxyAdmin.json b/packages/contracts-bedrock/snapshots/abi/L2ProxyAdmin.json index 28eecaa8bee59..447a0109b331c 100644 --- a/packages/contracts-bedrock/snapshots/abi/L2ProxyAdmin.json +++ b/packages/contracts-bedrock/snapshots/abi/L2ProxyAdmin.json @@ -1,12 +1,6 @@ [ { - "inputs": [ - { - "internalType": "address", - "name": "_owner", - "type": "address" - } - ], + "inputs": [], "stateMutability": "nonpayable", "type": "constructor" }, diff --git a/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC721Factory.json b/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC721Factory.json index e6ecac1d93814..1bddfc5bc283f 100644 --- a/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC721Factory.json +++ b/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC721Factory.json @@ -1,17 +1,6 @@ [ { - "inputs": [ - { - "internalType": "address", - "name": "_bridge", - "type": "address" - }, - { - "internalType": "uint256", - "name": "_remoteChainId", - "type": "uint256" - } - ], + "inputs": [], "stateMutability": "nonpayable", "type": "constructor" }, @@ -83,6 +72,24 @@ "stateMutability": "nonpayable", "type": "function" }, + { + "inputs": [ + { + "internalType": "address", + "name": "_bridge", + "type": "address" + }, + { + "internalType": "uint256", + "name": "_remoteChainID", + "type": "uint256" + } + ], + "name": "initialize", + "outputs": [], + "stateMutability": "nonpayable", + "type": "function" + }, { "inputs": [ { @@ -128,6 +135,19 @@ "stateMutability": "view", "type": "function" }, + { + "anonymous": false, + "inputs": [ + { + "indexed": false, + "internalType": "uint8", + "name": "version", + "type": "uint8" + } + ], + "name": "Initialized", + "type": "event" + }, { "anonymous": false, "inputs": [ diff --git a/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC721FactoryLegacyMapping.json b/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC721FactoryLegacyMapping.json new file mode 100644 index 0000000000000..36f0ba347ba29 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/abi/OptimismMintableERC721FactoryLegacyMapping.json @@ -0,0 +1,21 @@ +[ + { + "inputs": [ + { + "internalType": "address", + "name": "", + "type": "address" + } + ], + "name": "isOptimismMintableERC721", + "outputs": [ + { + "internalType": "bool", + "name": "", + "type": "bool" + } + ], + "stateMutability": "view", + "type": "function" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index cac22ae7dab56..f0743b3e55829 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -96,8 +96,8 @@ "sourceCodeHash": "0x7e438cbbe9a8248887b8c21f68c811f90a5cae4902cbbf7b0a1f6cd644dc42d9" }, "src/L2/L2ContractsManager.sol:L2ContractsManager": { - "initCodeHash": "0xc6953fefa5142a37061fc6e96d0ec251a8ff8bcc2d09e8fdeb023e8677ff17c7", - "sourceCodeHash": "0xa4fba8f6dd5f7e1cfcba63ca8b9d0fbe621d1fe33aeb6147a185045fcded7c14" + "initCodeHash": "0xf16ab1061ba6d4205583c0136fedecd7db78740c2376b68d5123a7a001d89d6b", + "sourceCodeHash": "0x5f076c6770a0ef56921a7ba347965d3d7f85daa3a6f9d5737823ec22283fbbe5" }, "src/L2/L2CrossDomainMessenger.sol:L2CrossDomainMessenger": { "initCodeHash": "0xe160be403df12709c371c33195d1b9c3b5e9499e902e86bdabc8eed749c3fd61", @@ -108,8 +108,8 @@ "sourceCodeHash": "0xc05bfcfadfd09a56cfea68e7c1853faa36d114d9a54cd307348be143e442c35a" }, "src/L2/L2ProxyAdmin.sol:L2ProxyAdmin": { - "initCodeHash": "0x85b054c8105191d272014459858020a90fcf7db401ef0fb028999f967461d25a", - "sourceCodeHash": "0x0d402be0c35dcdd3f6642c2949932705dd09c8e2d08a06c328ccbf8ed6c65808" + "initCodeHash": "0x18107f86dcabd2b72764a3c4bc58d6dd653b2d1ccca2a8df7f61327f495eec8a", + "sourceCodeHash": "0x2a52ab5b0bc836b2d215380ce1fd4751cc29e50a416282f4d4ba7a007be03a91" }, "src/L2/L2StandardBridge.sol:L2StandardBridge": { "initCodeHash": "0xba5b288a396b34488ba7be68473305529c7da7c43e5f1cfc48d6a4aecd014103", @@ -148,8 +148,8 @@ "sourceCodeHash": "0xd93a8d5de6fd89ebf503976511065f0c2414814affdb908f26a867ffdd0f9fbe" }, "src/L2/OptimismMintableERC721Factory.sol:OptimismMintableERC721Factory": { - "initCodeHash": "0xa692a3fc4a71eb3381a59d1ab655bbc02e8b507add7c3f560ee24b001d88ae6e", - "sourceCodeHash": "0xb0be3deac32956251adb37d3ca61f619ca4348a1355a41c856a3a95adde0e4ff" + "initCodeHash": "0x4883e8b18af472308931a7fda8b41f874efe844c11da08af45b40125f89d955a", + "sourceCodeHash": "0xeb7718e9882254215b467daa2f61c80932470cc23c98ab25d76856038bb2ba95" }, "src/L2/OptimismSuperchainERC20.sol:OptimismSuperchainERC20": { "initCodeHash": "0x1ad4b7c19d10f80559bad15063ac1fd420f36d76853eb6d846b0acd52fb93acb", diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OptimismMintableERC721Factory.json b/packages/contracts-bedrock/snapshots/storageLayout/OptimismMintableERC721Factory.json index 2c0076337f50b..afba6fd9ca5f1 100644 --- a/packages/contracts-bedrock/snapshots/storageLayout/OptimismMintableERC721Factory.json +++ b/packages/contracts-bedrock/snapshots/storageLayout/OptimismMintableERC721Factory.json @@ -5,5 +5,40 @@ "offset": 0, "slot": "0", "type": "mapping(address => bool)" + }, + { + "bytes": "1", + "label": "_initialized", + "offset": 0, + "slot": "1", + "type": "uint8" + }, + { + "bytes": "1", + "label": "_initializing", + "offset": 1, + "slot": "1", + "type": "bool" + }, + { + "bytes": "20", + "label": "bridge", + "offset": 2, + "slot": "1", + "type": "address" + }, + { + "bytes": "32", + "label": "remoteChainID", + "offset": 0, + "slot": "2", + "type": "uint256" + }, + { + "bytes": "1472", + "label": "__gap", + "offset": 0, + "slot": "3", + "type": "uint256[46]" } ] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/storageLayout/OptimismMintableERC721FactoryLegacyMapping.json b/packages/contracts-bedrock/snapshots/storageLayout/OptimismMintableERC721FactoryLegacyMapping.json new file mode 100644 index 0000000000000..2c0076337f50b --- /dev/null +++ b/packages/contracts-bedrock/snapshots/storageLayout/OptimismMintableERC721FactoryLegacyMapping.json @@ -0,0 +1,9 @@ +[ + { + "bytes": "32", + "label": "isOptimismMintableERC721", + "offset": 0, + "slot": "0", + "type": "mapping(address => bool)" + } +] \ No newline at end of file diff --git a/packages/contracts-bedrock/snapshots/upgrades/current-upgrade-bundle.json b/packages/contracts-bedrock/snapshots/upgrades/current-upgrade-bundle.json new file mode 100644 index 0000000000000..5693dcd668023 --- /dev/null +++ b/packages/contracts-bedrock/snapshots/upgrades/current-upgrade-bundle.json @@ -0,0 +1,238 @@ +{ + "metadata": { + "version": "1.0.0" + }, + "transactions": [ + { + "data": "0x9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca4608060405234801561001057600080fd5b506105b8806100206000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806354fd4d5014610046578063cdcb760a14610098578063e0145f5c146100d0575b600080fd5b6100826040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b60405161008f91906103f7565b60405180910390f35b6100ab6100a6366004610440565b6100ea565b60405173ffffffffffffffffffffffffffffffffffffffff909116815260200161008f565b734e59b44847b379578588920ca78fbf26c0b4956c6100ab565b8051602080830191909120604080517fff00000000000000000000000000000000000000000000000000000000000000818501527f4e59b44847b379578588920ca78fbf26c0b4956c000000000000000000000000602182015260358101869052605580820184905282518083039091018152607590910190915280519201919091206000919073ffffffffffffffffffffffffffffffffffffffff81163b156101d85760405173ffffffffffffffffffffffffffffffffffffffff8216907ffbe57d889a7f75a4e0c7da304cd158fcaddc4b925cdd9f4cfb115c0f9e48009b90600090a291506103779050565b600080734e59b44847b379578588920ca78fbf26c0b4956c73ffffffffffffffffffffffffffffffffffffffff168787604051602001610219929190610519565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152908290526102519161053f565b6000604051808303816000865af19150503d806000811461028e576040519150601f19603f3d011682016040523d82523d6000602084013e610293565b606091505b5091509150806102a29061055b565b60601c94508115806102e057508273ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff1614155b1561032257806040517fcb0fc6f700000000000000000000000000000000000000000000000000000000815260040161031991906103f7565b60405180910390fd5b8473ffffffffffffffffffffffffffffffffffffffff167f9b7318127ed899f286ea9ddd7925ed8ad24a682b6a825c3b5b3d88a3f00bc1d28860405161036a91815260200190565b60405180910390a2505050505b92915050565b60005b83811015610398578181015183820152602001610380565b838111156103a7576000848401525b50505050565b600081518084526103c581602086016020860161037d565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061040a60208301846103ad565b9392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806040838503121561045357600080fd5b82359150602083013567ffffffffffffffff8082111561047257600080fd5b818501915085601f83011261048657600080fd5b81358181111561049857610498610411565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156104de576104de610411565b816040528281528860208487010111156104f757600080fd5b8260208601602083013760006020848301015280955050505050509250929050565b8281526000825161053181602085016020870161037d565b919091016020019392505050565b6000825161055181846020870161037d565b9190910192915050565b6000815160208301517fffffffffffffffffffffffffffffffffffffffff000000000000000000000000808216935060148310156105a35780818460140360031b1b83161693505b50505091905056fea164736f6c634300080f000a", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "ConditionalDeployer Deployment", + "to": "0x4e59b44847b379578588920cA78FbF26c0B4956C" + }, + { + "data": "0x3659cfe6000000000000000000000000906835344844979ffd3a752eaa23728d513db00b", + "from": "0x0000000000000000000000000000000000000000", + "gasLimit": 50000, + "intent": "Upgrade ConditionalDeployer Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca40000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000048a608060405234801561001057600080fd5b5061046a806100206000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c8063a6ed563e11610076578063bd02d0f51161005b578063bd02d0f51461018e578063ca446dd9146101b8578063e2a4853a1461011557600080fd5b8063a6ed563e1461018e578063abfdcced146101aa57600080fd5b80634e91db08116100a75780634e91db081461011557806354fd4d50146101275780637ae1cfca1461017057600080fd5b80630528afe2146100c357806321f8a721146100d8575b600080fd5b6100d66100d1366004610239565b6101c6565b005b6100eb6100e63660046102ae565b610229565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100d66101233660046102c7565b9055565b6101636040518060400160405280600581526020017f312e322e3200000000000000000000000000000000000000000000000000000081525081565b60405161010c91906102e9565b61017e6100e63660046102ae565b604051901515815260200161010c565b61019c6100e63660046102ae565b60405190815260200161010c565b6100d661012336600461035c565b6100d6610123366004610391565b8060005b81811015610223576102118484838181106101e7576101e76103cf565b90506040020160000135858584818110610203576102036103cf565b905060400201602001359055565b8061021b816103fe565b9150506101ca565b50505050565b6000610233825490565b92915050565b6000806020838503121561024c57600080fd5b823567ffffffffffffffff8082111561026457600080fd5b818501915085601f83011261027857600080fd5b81358181111561028757600080fd5b8660208260061b850101111561029c57600080fd5b60209290920196919550909350505050565b6000602082840312156102c057600080fd5b5035919050565b600080604083850312156102da57600080fd5b50508035926020909101359150565b600060208083528351808285015260005b81811015610316578581018301518582016040015282016102fa565b81811115610328576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b6000806040838503121561036f57600080fd5b823591506020830135801515811461038657600080fd5b809150509250929050565b600080604083850312156103a457600080fd5b82359150602083013573ffffffffffffffffffffffffffffffffffffffff8116811461038657600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203610456577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b506001019056fea164736f6c634300080f000a00000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy StorageSetter Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000001f25608060405234801561001057600080fd5b5061001961001e565b6100eb565b600054600160a81b900460ff161561008c5760405162461bcd60e51b815260206004820152602760248201527f496e697469616c697a61626c653a20636f6e747261637420697320696e697469604482015266616c697a696e6760c81b606482015260840160405180910390fd5b60005460ff600160a01b909104811610156100e9576000805460ff60a01b191660ff60a01b17905560405160ff81527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b565b611e2b806100fa6000396000f3fe60806040526004361061018b5760003560e01c80638cbeeef2116100d6578063c4d66de81161007f578063ddd5a40f11610059578063ddd5a40f1461043e578063e46e245a14610454578063ecc704281461046957600080fd5b8063c4d66de8146103de578063d764ad0b146103fe578063db505d801461041157600080fd5b8063a7119869116100b0578063a711986914610333578063b1b1b2091461038e578063b28ade25146103be57600080fd5b80638cbeeef2146102405780639fce812c14610333578063a4e7f8bd1461035e57600080fd5b80634c1d6a69116101385780635c975abb116101125780635c975abb146102c25780636e296e45146102e257806383a740741461031c57600080fd5b80634c1d6a691461024057806354fd4d50146102565780635644cfdf146102ac57600080fd5b80632f7d3922116101695780632f7d3922146101ed5780633dbb202b146102035780633f827a5a1461021857600080fd5b8063028f85f7146101905780630c568498146101c35780632828d7e8146101d8575b600080fd5b34801561019c57600080fd5b506101a5601081565b60405167ffffffffffffffff90911681526020015b60405180910390f35b3480156101cf57600080fd5b506101a5603f81565b3480156101e457600080fd5b506101a5604081565b3480156101f957600080fd5b506101a561520881565b610216610211366004611861565b6104ce565b005b34801561022457600080fd5b5061022d600181565b60405161ffff90911681526020016101ba565b34801561024c57600080fd5b506101a5619c4081565b34801561026257600080fd5b5061029f6040518060400160405280600581526020017f322e322e3000000000000000000000000000000000000000000000000000000081525081565b6040516101ba9190611933565b3480156102b857600080fd5b506101a561138881565b3480156102ce57600080fd5b5060005b60405190151581526020016101ba565b3480156102ee57600080fd5b506102f7610761565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101ba565b34801561032857600080fd5b506101a562030d4081565b34801561033f57600080fd5b5060cf5473ffffffffffffffffffffffffffffffffffffffff166102f7565b34801561036a57600080fd5b506102d2610379366004611946565b60ce6020526000908152604090205460ff1681565b34801561039a57600080fd5b506102d26103a9366004611946565b60cb6020526000908152604090205460ff1681565b3480156103ca57600080fd5b506101a56103d936600461198e565b61084d565b3480156103ea57600080fd5b506102166103f9366004611a6e565b61090e565b61021661040c366004611a8b565b610b0d565b34801561041d57600080fd5b5060cf546102f79073ffffffffffffffffffffffffffffffffffffffff1681565b34801561044a57600080fd5b506101a561010481565b34801561046057600080fd5b506101a5602881565b34801561047557600080fd5b506104c060cd547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790565b6040519081526020016101ba565b60cf54604080516020601f86018190048102820181019092528481526106369273ffffffffffffffffffffffffffffffffffffffff169161052c9190879087908190840183828082843760009201919091525087925061084d915050565b347fd764ad0b0000000000000000000000000000000000000000000000000000000061059860cd547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790565b338a34898c8c6040516024016105b49796959493929190611b5a565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff00000000000000000000000000000000000000000000000000000000909316929092179091526113f2565b8373ffffffffffffffffffffffffffffffffffffffff167fcb0f7ffd78f9aee47a248fae8db181db6eee833039123e026dcbff529522e52a3385856106bb60cd547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790565b866040516106cd959493929190611bb9565b60405180910390a260405134815233907f8ebb2ec2465bdb2a06a66fc37a0963af8a2a6a1479d81d56fdb8cbb98096d5469060200160405180910390a2505060cd80547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff808216600101167fffff0000000000000000000000000000000000000000000000000000000000009091161790555050565b60cc5460009073ffffffffffffffffffffffffffffffffffffffff167fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff215301610830576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603560248201527f43726f7373446f6d61696e4d657373656e6765723a2078446f6d61696e4d657360448201527f7361676553656e646572206973206e6f7420736574000000000000000000000060648201526084015b60405180910390fd5b5060cc5473ffffffffffffffffffffffffffffffffffffffff1690565b600080603f610863604063ffffffff8616611c36565b61086d9190611c66565b611388619c406108808162030d40611cb4565b61088a9190611cb4565b6108949190611cb4565b61089e9190611cb4565b9050600061010467ffffffffffffffff1685516108bb9190611ce0565b90506108f96108cb601083611c36565b6108d59084611cb4565b67ffffffffffffffff166108ea602884611c36565b67ffffffffffffffff16611480565b61090590615208611cb4565b95945050505050565b6000547501000000000000000000000000000000000000000000900460ff1615808015610959575060005460017401000000000000000000000000000000000000000090910460ff16105b8061098b5750303b15801561098b575060005474010000000000000000000000000000000000000000900460ff166001145b610a17576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a65640000000000000000000000000000000000006064820152608401610827565b600080547fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff16740100000000000000000000000000000000000000001790558015610a9d57600080547fffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffff1675010000000000000000000000000000000000000000001790555b610aa682611499565b8015610b0957600080547fffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffffff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b5050565b60f087901c60028110610bc8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604d60248201527f43726f7373446f6d61696e4d657373656e6765723a206f6e6c7920766572736960448201527f6f6e2030206f722031206d657373616765732061726520737570706f7274656460648201527f20617420746869732074696d6500000000000000000000000000000000000000608482015260a401610827565b8061ffff16600003610cbd576000610c19878986868080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152508f92506115d5915050565b600081815260cb602052604090205490915060ff1615610cbb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603760248201527f43726f7373446f6d61696e4d657373656e6765723a206c65676163792077697460448201527f6864726177616c20616c72656164792072656c617965640000000000000000006064820152608401610827565b505b6000610d03898989898989898080601f0160208091040260200160405190810160405280939291908181526020018383808284376000920191909152506115f492505050565b9050610d4c60cf54337fffffffffffffffffffffffffeeeeffffffffffffffffffffffffffffffffeeef0173ffffffffffffffffffffffffffffffffffffffff90811691161490565b15610d8457853414610d6057610d60611cf8565b600081815260ce602052604090205460ff1615610d7f57610d7f611cf8565b610ed6565b3415610e38576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152605060248201527f43726f7373446f6d61696e4d657373656e6765723a2076616c7565206d75737460448201527f206265207a65726f20756e6c657373206d6573736167652069732066726f6d2060648201527f612073797374656d206164647265737300000000000000000000000000000000608482015260a401610827565b600081815260ce602052604090205460ff16610ed6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f43726f7373446f6d61696e4d657373656e6765723a206d65737361676520636160448201527f6e6e6f74206265207265706c61796564000000000000000000000000000000006064820152608401610827565b610edf87611617565b15610f92576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604360248201527f43726f7373446f6d61696e4d657373656e6765723a2063616e6e6f742073656e60448201527f64206d65737361676520746f20626c6f636b65642073797374656d206164647260648201527f6573730000000000000000000000000000000000000000000000000000000000608482015260a401610827565b600081815260cb602052604090205460ff1615611031576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603660248201527f43726f7373446f6d61696e4d657373656e6765723a206d65737361676520686160448201527f7320616c7265616479206265656e2072656c61796564000000000000000000006064820152608401610827565b61105285611043611388619c40611cb4565b67ffffffffffffffff1661166c565b1580611078575060cc5473ffffffffffffffffffffffffffffffffffffffff1661dead14155b1561119157600081815260ce602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555182917f99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f91a27fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff320161118a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602d60248201527f43726f7373446f6d61696e4d657373656e6765723a206661696c656420746f2060448201527f72656c6179206d657373616765000000000000000000000000000000000000006064820152608401610827565b50506113e9565b60cc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff8a16179055600061122288619c405a6111e59190611d27565b8988888080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061168a92505050565b60cc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001661dead179055905080156112d857600082815260cb602052604090205460ff161561127557611275611cf8565b600082815260cb602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555183917f4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c91a26113e5565b600082815260ce602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790555183917f99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f91a27fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff32016113e5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602d60248201527f43726f7373446f6d61696e4d657373656e6765723a206661696c656420746f2060448201527f72656c6179206d657373616765000000000000000000000000000000000000006064820152608401610827565b5050505b50505050505050565b6040517fc2b3e5ac0000000000000000000000000000000000000000000000000000000081527342000000000000000000000000000000000000169063c2b3e5ac90849061144890889088908790600401611d3e565b6000604051808303818588803b15801561146157600080fd5b505af1158015611475573d6000803e3d6000fd5b505050505050505050565b6000818310156114905781611492565b825b9392505050565b6000547501000000000000000000000000000000000000000000900460ff16611544576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e670000000000000000000000000000000000000000006064820152608401610827565b60cc5473ffffffffffffffffffffffffffffffffffffffff1661158e5760cc80547fffffffffffffffffffffffff00000000000000000000000000000000000000001661dead1790555b60cf80547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b60006115e3858585856116a2565b805190602001209050949350505050565b600061160487878787878761173b565b8051906020012090509695505050505050565b600073ffffffffffffffffffffffffffffffffffffffff8216301480611666575073ffffffffffffffffffffffffffffffffffffffff8216734200000000000000000000000000000000000016145b92915050565b600080603f83619c4001026040850201603f5a021015949350505050565b6000806000835160208501868989f195945050505050565b6060848484846040516024016116bb9493929190611d7d565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fcbd4ece9000000000000000000000000000000000000000000000000000000001790529050949350505050565b606086868686868660405160240161175896959493929190611dc7565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fd764ad0b0000000000000000000000000000000000000000000000000000000017905290509695505050505050565b73ffffffffffffffffffffffffffffffffffffffff811681146117fc57600080fd5b50565b60008083601f84011261181157600080fd5b50813567ffffffffffffffff81111561182957600080fd5b60208301915083602082850101111561184157600080fd5b9250929050565b803563ffffffff8116811461185c57600080fd5b919050565b6000806000806060858703121561187757600080fd5b8435611882816117da565b9350602085013567ffffffffffffffff81111561189e57600080fd5b6118aa878288016117ff565b90945092506118bd905060408601611848565b905092959194509250565b6000815180845260005b818110156118ee576020818501810151868301820152016118d2565b81811115611900576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061149260208301846118c8565b60006020828403121561195857600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600080604083850312156119a157600080fd5b823567ffffffffffffffff808211156119b957600080fd5b818501915085601f8301126119cd57600080fd5b8135818111156119df576119df61195f565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908382118183101715611a2557611a2561195f565b81604052828152886020848701011115611a3e57600080fd5b826020860160208301376000602084830101528096505050505050611a6560208401611848565b90509250929050565b600060208284031215611a8057600080fd5b8135611492816117da565b600080600080600080600060c0888a031215611aa657600080fd5b873596506020880135611ab8816117da565b95506040880135611ac8816117da565b9450606088013593506080880135925060a088013567ffffffffffffffff811115611af257600080fd5b611afe8a828b016117ff565b989b979a50959850939692959293505050565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b878152600073ffffffffffffffffffffffffffffffffffffffff808916602084015280881660408401525085606083015263ffffffff8516608083015260c060a0830152611bac60c083018486611b11565b9998505050505050505050565b73ffffffffffffffffffffffffffffffffffffffff86168152608060208201526000611be9608083018688611b11565b905083604083015263ffffffff831660608301529695505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b600067ffffffffffffffff80831681851681830481118215151615611c5d57611c5d611c07565b02949350505050565b600067ffffffffffffffff80841680611ca8577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b92169190910492915050565b600067ffffffffffffffff808316818516808303821115611cd757611cd7611c07565b01949350505050565b60008219821115611cf357611cf3611c07565b500190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052600160045260246000fd5b600082821015611d3957611d39611c07565b500390565b73ffffffffffffffffffffffffffffffffffffffff8416815267ffffffffffffffff8316602082015260606040820152600061090560608301846118c8565b600073ffffffffffffffffffffffffffffffffffffffff808716835280861660208401525060806040830152611db660808301856118c8565b905082606083015295945050505050565b868152600073ffffffffffffffffffffffffffffffffffffffff808816602084015280871660408401525084606083015283608083015260c060a0830152611e1260c08301846118c8565b9897505050505050505056fea164736f6c634300080f000a000000000000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L2CrossDomainMessenger Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000001ec8608060405234801561001057600080fd5b50611ea8806100206000396000f3fe608060405234801561001057600080fd5b506004361061018d5760003560e01c806368d5dca6116100e3578063c59859181161008c578063f45e65d811610066578063f45e65d8146102fc578063f820614014610304578063fe173b971461029357600080fd5b8063c5985918146102ce578063de26c4a1146102d6578063f1c7a58b146102e957600080fd5b8063960e3a23116100bd578063960e3a23146102a1578063b3d72079146102b3578063b54501bc146102bb57600080fd5b806368d5dca6146102765780636ef25c3a146102935780638e98b1061461029957600080fd5b80632e0f2625116101455780634ef6e2241161011f5780634ef6e22414610218578063519b4bd31461022557806354fd4d501461022d57600080fd5b80632e0f2625146101f6578063313ce567146101fe57806349948e0e1461020557600080fd5b806322b90ab31161017657806322b90ab3146101d1578063275aedd2146101db578063291b0383146101ee57600080fd5b80630c18c16214610192578063105d0b81146101ad575b600080fd5b61019a61030c565b6040519081526020015b60405180910390f35b6000546101c1906301000000900460ff1681565b60405190151581526020016101a4565b6101d961042d565b005b61019a6101e93660046118fa565b6105b6565b6101d9610776565b61019a600681565b600661019a565b61019a610213366004611942565b61099e565b6000546101c19060ff1681565b61019a6109db565b6102696040518060400160405280600581526020017f312e362e3000000000000000000000000000000000000000000000000000000081525081565b6040516101a49190611a11565b61027e610a3c565b60405163ffffffff90911681526020016101a4565b4861019a565b6101d9610ac1565b6000546101c190610100900460ff1681565b6101d9610cbb565b6000546101c19062010000900460ff1681565b61027e610ec2565b61019a6102e4366004611942565b610f23565b61019a6102f73660046118fa565b61101d565b61019a6110f1565b61019a6111e4565b6000805460ff16156103a5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602860248201527f47617350726963654f7261636c653a206f76657268656164282920697320646560448201527f707265636174656400000000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b73420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16638b239f736040518163ffffffff1660e01b8152600401602060405180830381865afa158015610404573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104289190611a84565b905090565b3373deaddeaddeaddeaddeaddeaddeaddeaddead0001146104f6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604160248201527f47617350726963654f7261636c653a206f6e6c7920746865206465706f73697460448201527f6f72206163636f756e742063616e2073657420697345636f746f6e6520666c6160648201527f6700000000000000000000000000000000000000000000000000000000000000608482015260a40161039c565b60005460ff1615610589576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f47617350726963654f7261636c653a2045636f746f6e6520616c72656164792060448201527f6163746976650000000000000000000000000000000000000000000000000000606482015260840161039c565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055565b6000805462010000900460ff166105cf57506000919050565b600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16634d5d9a2a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610630573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106549190611a9d565b63ffffffff169050600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff166316d3bc7f6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156106bd573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106e19190611ac3565b67ffffffffffffffff169050600060039054906101000a900460ff161561072a578061070d8386611b1c565b610718906064611b1c565b6107229190611b59565b949350505050565b610722620f424083860286810485148715177fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff01176107699190611b71565b8281019081106000031790565b3373deaddeaddeaddeaddeaddeaddeaddeaddead00011461083f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604160248201527f47617350726963654f7261636c653a206f6e6c7920746865206465706f73697460448201527f6f72206163636f756e742063616e20736574206973497374686d757320666c6160648201527f6700000000000000000000000000000000000000000000000000000000000000608482015260a40161039c565b600054610100900460ff166108d6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603960248201527f47617350726963654f7261636c653a20497374686d75732063616e206f6e6c7960448201527f2062652061637469766174656420616674657220466a6f726400000000000000606482015260840161039c565b60005462010000900460ff161561096f576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f47617350726963654f7261636c653a20497374686d757320616c72656164792060448201527f6163746976650000000000000000000000000000000000000000000000000000606482015260840161039c565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffff1662010000179055565b60008054610100900460ff16156109be576109b882611245565b92915050565b60005460ff16156109d2576109b882611264565b6109b882611308565b600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16635cf249696040518163ffffffff1660e01b8152600401602060405180830381865afa158015610404573d6000803e3d6000fd5b600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff166368d5dca66040518163ffffffff1660e01b8152600401602060405180830381865afa158015610a9d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906104289190611a9d565b3373deaddeaddeaddeaddeaddeaddeaddeaddead000114610b64576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603f60248201527f47617350726963654f7261636c653a206f6e6c7920746865206465706f73697460448201527f6f72206163636f756e742063616e20736574206973466a6f726420666c616700606482015260840161039c565b60005460ff16610bf6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603960248201527f47617350726963654f7261636c653a20466a6f72642063616e206f6e6c79206260448201527f65206163746976617465642061667465722045636f746f6e6500000000000000606482015260840161039c565b600054610100900460ff1615610c8d576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f47617350726963654f7261636c653a20466a6f726420616c726561647920616360448201527f7469766500000000000000000000000000000000000000000000000000000000606482015260840161039c565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff16610100179055565b3373deaddeaddeaddeaddeaddeaddeaddeaddead000114610d6057604080517f08c379a00000000000000000000000000000000000000000000000000000000081526020600482015260248101919091527f47617350726963654f7261636c653a206f6e6c7920746865206465706f73697460448201527f6f72206163636f756e742063616e207365742069734a6f7669616e20666c6167606482015260840161039c565b60005462010000900460ff16610df8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603a60248201527f47617350726963654f7261636c653a204a6f7669616e2063616e206f6e6c792060448201527f62652061637469766174656420616674657220497374686d7573000000000000606482015260840161039c565b6000546301000000900460ff1615610e92576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f47617350726963654f7261636c653a204a6f7669616e20616c7265616479206160448201527f6374697665000000000000000000000000000000000000000000000000000000606482015260840161039c565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ffffff166301000000179055565b600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff1663c59859186040518163ffffffff1660e01b8152600401602060405180830381865afa158015610a9d573d6000803e3d6000fd5b60008054610100900460ff1615610f6a57620f4240610f55610f448461145c565b51610f50906044611b59565b611779565b610f60906010611b1c565b6109b89190611b71565b6000610f75836117d8565b60005490915060ff1615610f895792915050565b73420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16638b239f736040518163ffffffff1660e01b8152600401602060405180830381865afa158015610fe8573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061100c9190611a84565b6110169082611b59565b9392505050565b60008054610100900460ff166110b5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603660248201527f47617350726963654f7261636c653a206765744c314665655570706572426f7560448201527f6e64206f6e6c7920737570706f72747320466a6f726400000000000000000000606482015260840161039c565b60006110c2836044611b59565b905060006110d160ff83611b71565b6110db9083611b59565b6110e6906010611b59565b905061072281611868565b6000805460ff1615611185576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f47617350726963654f7261636c653a207363616c61722829206973206465707260448201527f6563617465640000000000000000000000000000000000000000000000000000606482015260840161039c565b73420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16639e8c49666040518163ffffffff1660e01b8152600401602060405180830381865afa158015610404573d6000803e3d6000fd5b600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff1663f82061406040518163ffffffff1660e01b8152600401602060405180830381865afa158015610404573d6000803e3d6000fd5b60006109b86112538361145c565b5161125f906044611b59565b611868565b600080611270836117d8565b9050600061127c6109db565b611284610ec2565b61128f906010611bac565b63ffffffff1661129f9190611b1c565b905060006112ab6111e4565b6112b3610a3c565b63ffffffff166112c39190611b1c565b905060006112d18284611b59565b6112db9085611b1c565b90506112e96006600a611cf8565b6112f4906010611b1c565b6112fe9082611b71565b9695505050505050565b600080611314836117d8565b9050600073420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16639e8c49666040518163ffffffff1660e01b8152600401602060405180830381865afa158015611377573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061139b9190611a84565b6113a36109db565b73420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff16638b239f736040518163ffffffff1660e01b8152600401602060405180830381865afa158015611402573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906114269190611a84565b6114309085611b59565b61143a9190611b1c565b6114449190611b1c565b90506114526006600a611cf8565b6107229082611b71565b60606115eb565b818153600101919050565b600082840393505b838110156110165782810151828201511860001a1590930292600101611476565b825b602082106114e35782516114ae601f83611463565b52602092909201917fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe090910190602101611499565b81156110165782516114f86001840383611463565b520160010192915050565b60006001830392505b6101078210611544576115368360ff1661153160fd6115318760081c60e00189611463565b611463565b93506101068203915061150c565b600782106115715761156a8360ff16611531600785036115318760081c60e00189611463565b9050611016565b6107228360ff166115318560081c8560051b0187611463565b6115e38282036115c76115b784600081518060001a8160011a60081b178160021a60101b17915050919050565b639e3779b90260131c611fff1690565b8060021b6040510182815160e01c1860e01b8151188152505050565b600101919050565b6180003860405139618000604051016020830180600d8551820103826002015b8181101561171e576000805b50508051604051600082901a600183901a60081b1760029290921a60101b91909117639e3779b9810260111c617ffc16909101805160e081811c878603811890911b9091189091528401908183039084841061167357506116ae565b600184019350611fff82116116a8578251600081901a600182901a60081b1760029190911a60101b1781036116a857506116ae565b50611617565b8383106116bc57505061171e565b600183039250858311156116da576116d78787888603611497565b96505b6116ee60098501600385016003850161146e565b91506116fb878284611503565b9650506117138461170e8684860161158a565b61158a565b91505080935061160b565b50506117308383848851850103611497565b925050506040519150618000820180820391508183526020830160005b8381101561176557828101518282015260200161174d565b506000920191825250602001604052919050565b60008061178983620cc394611b1c565b6117b3907ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffd763200611d04565b90506117c36064620f4240611d78565b8112156109b8576110166064620f4240611d78565b80516000908190815b8181101561185b578481815181106117fb576117fb611e34565b01602001517fff000000000000000000000000000000000000000000000000000000000000001660000361183b57611834600484611b59565b9250611849565b611846601084611b59565b92505b8061185381611e63565b9150506117e1565b5061072282610440611b59565b60008061187483611779565b905060006118806111e4565b611888610a3c565b63ffffffff166118989190611b1c565b6118a06109db565b6118a8610ec2565b6118b3906010611bac565b63ffffffff166118c39190611b1c565b6118cd9190611b59565b90506118db60066002611b1c565b6118e690600a611cf8565b6118f08284611b1c565b6107229190611b71565b60006020828403121561190c57600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60006020828403121561195457600080fd5b813567ffffffffffffffff8082111561196c57600080fd5b818401915084601f83011261198057600080fd5b81358181111561199257611992611913565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156119d8576119d8611913565b816040528281528760208487010111156119f157600080fd5b826020860160208301376000928101602001929092525095945050505050565b600060208083528351808285015260005b81811015611a3e57858101830151858201604001528201611a22565b81811115611a50576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b600060208284031215611a9657600080fd5b5051919050565b600060208284031215611aaf57600080fd5b815163ffffffff8116811461101657600080fd5b600060208284031215611ad557600080fd5b815167ffffffffffffffff8116811461101657600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0483118215151615611b5457611b54611aed565b500290565b60008219821115611b6c57611b6c611aed565b500190565b600082611ba7577f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b500490565b600063ffffffff80831681851681830481118215151615611bcf57611bcf611aed565b02949350505050565b600181815b80851115611c3157817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611c1757611c17611aed565b80851615611c2457918102915b93841c9390800290611bdd565b509250929050565b600082611c48575060016109b8565b81611c55575060006109b8565b8160018114611c6b5760028114611c7557611c91565b60019150506109b8565b60ff841115611c8657611c86611aed565b50506001821b6109b8565b5060208310610133831016604e8410600b8410161715611cb4575081810a6109b8565b611cbe8383611bd8565b807fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff04821115611cf057611cf0611aed565b029392505050565b60006110168383611c39565b6000808212827f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff03841381151615611d3e57611d3e611aed565b827f8000000000000000000000000000000000000000000000000000000000000000038412811615611d7257611d72611aed565b50500190565b60007f7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600084136000841385830485118282161615611db957611db9611aed565b7f80000000000000000000000000000000000000000000000000000000000000006000871286820588128184161615611df457611df4611aed565b60008712925087820587128484161615611e1057611e10611aed565b87850587128184161615611e2657611e26611aed565b505050929093029392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8203611e9457611e94611aed565b506001019056fea164736f6c634300080f000a000000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy GasPriceOracle Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000002b8d60806040523480156200001157600080fd5b506200001c62000022565b620000e4565b600054610100900460ff16156200008f5760405162461bcd60e51b815260206004820152602760248201527f496e697469616c697a61626c653a20636f6e747261637420697320696e697469604482015266616c697a696e6760c81b606482015260840160405180910390fd5b60005460ff9081161015620000e2576000805460ff191660ff9081179091556040519081527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b565b612a9980620000f46000396000f3fe6080604052600436106101125760003560e01c80635c975abb116100a5578063927ede2d11610074578063c4d66de811610059578063c4d66de8146103ee578063c89701a21461040e578063e11013dd1461043b57600080fd5b8063927ede2d146103b0578063a3a79548146103db57600080fd5b80635c975abb1461032e5780637f46ddb214610244578063870876231461034a5780638f601f661461036a57600080fd5b806336c717c1116100e157806336c717c1146102445780633cb747bf14610295578063540abf73146102c257806354fd4d50146102e257600080fd5b80630166a07a146101eb57806309fc88431461020b5780631635f5fd1461021e57806332b7006d1461023157600080fd5b366101e65761011f61044e565b6101b0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603760248201527f5374616e646172644272696467653a2066756e6374696f6e2063616e206f6e6c60448201527f792062652063616c6c65642066726f6d20616e20454f4100000000000000000060648201526084015b60405180910390fd5b6101e473deaddeaddeaddeaddeaddeaddeaddeaddead000033333462030d406040518060200160405280600081525061048b565b005b600080fd5b3480156101f757600080fd5b506101e461020636600461248c565b610566565b6101e461021936600461253d565b610908565b6101e461022c366004612590565b6109e4565b6101e461023f366004612603565b610e36565b34801561025057600080fd5b5060045473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b3480156102a157600080fd5b5060035461026b9073ffffffffffffffffffffffffffffffffffffffff1681565b3480156102ce57600080fd5b506101e46102dd366004612657565b610f15565b3480156102ee57600080fd5b50604080518082018252600681527f312e31332e3000000000000000000000000000000000000000000000000000006020820152905161028c9190612744565b34801561033a57600080fd5b506040516000815260200161028c565b34801561035657600080fd5b506101e4610365366004612757565b610f5a565b34801561037657600080fd5b506103a26103853660046127da565b600260209081526000928352604080842090915290825290205481565b60405190815260200161028c565b3480156103bc57600080fd5b5060035473ffffffffffffffffffffffffffffffffffffffff1661026b565b6101e46103e9366004612757565b611033565b3480156103fa57600080fd5b506101e4610409366004612813565b611077565b34801561041a57600080fd5b5060045461026b9073ffffffffffffffffffffffffffffffffffffffff1681565b6101e4610449366004612830565b611220565b600032330361045d5750600190565b333b60170361048557604051602081016040526020600082333c5160e81c62ef010014905090565b50600090565b7fffffffffffffffffffffffff215221522152215221522152215221522153000073ffffffffffffffffffffffffffffffffffffffff8716016104da576104d58585858585611269565b61055e565b60008673ffffffffffffffffffffffffffffffffffffffff1663c01e1bd66040518163ffffffff1660e01b8152600401602060405180830381865afa158015610527573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061054b9190612893565b905061055c87828888888888611433565b505b505050505050565b60035473ffffffffffffffffffffffffffffffffffffffff1633148015610639575060048054600354604080517f6e296e45000000000000000000000000000000000000000000000000000000008152905173ffffffffffffffffffffffffffffffffffffffff938416949390921692636e296e459282820192602092908290030181865afa1580156105fd573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106219190612893565b73ffffffffffffffffffffffffffffffffffffffff16145b6106eb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604160248201527f5374616e646172644272696467653a2066756e6374696f6e2063616e206f6e6c60448201527f792062652063616c6c65642066726f6d20746865206f7468657220627269646760648201527f6500000000000000000000000000000000000000000000000000000000000000608482015260a4016101a7565b6106f4876117ec565b1561084257610703878761184e565b6107b5576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604a60248201527f5374616e646172644272696467653a2077726f6e672072656d6f746520746f6b60448201527f656e20666f72204f7074696d69736d204d696e7461626c65204552433230206c60648201527f6f63616c20746f6b656e00000000000000000000000000000000000000000000608482015260a4016101a7565b6040517f40c10f1900000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8581166004830152602482018590528816906340c10f1990604401600060405180830381600087803b15801561082557600080fd5b505af1158015610839573d6000803e3d6000fd5b505050506108c4565b73ffffffffffffffffffffffffffffffffffffffff8088166000908152600260209081526040808320938a16835292905220546108809084906128df565b73ffffffffffffffffffffffffffffffffffffffff8089166000818152600260209081526040808320948c16835293905291909120919091556108c490858561196e565b61055c878787878787878080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250611a4292505050565b61091061044e565b61099c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603760248201527f5374616e646172644272696467653a2066756e6374696f6e2063616e206f6e6c60448201527f792062652063616c6c65642066726f6d20616e20454f4100000000000000000060648201526084016101a7565b6109df3333348686868080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061126992505050565b505050565b60035473ffffffffffffffffffffffffffffffffffffffff1633148015610ab7575060048054600354604080517f6e296e45000000000000000000000000000000000000000000000000000000008152905173ffffffffffffffffffffffffffffffffffffffff938416949390921692636e296e459282820192602092908290030181865afa158015610a7b573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610a9f9190612893565b73ffffffffffffffffffffffffffffffffffffffff16145b610b69576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604160248201527f5374616e646172644272696467653a2066756e6374696f6e2063616e206f6e6c60448201527f792062652063616c6c65642066726f6d20746865206f7468657220627269646760648201527f6500000000000000000000000000000000000000000000000000000000000000608482015260a4016101a7565b823414610bf8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603a60248201527f5374616e646172644272696467653a20616d6f756e742073656e7420646f657360448201527f206e6f74206d6174636820616d6f756e7420726571756972656400000000000060648201526084016101a7565b3073ffffffffffffffffffffffffffffffffffffffff851603610c9d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f5374616e646172644272696467653a2063616e6e6f742073656e6420746f207360448201527f656c66000000000000000000000000000000000000000000000000000000000060648201526084016101a7565b60035473ffffffffffffffffffffffffffffffffffffffff90811690851603610d48576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602860248201527f5374616e646172644272696467653a2063616e6e6f742073656e6420746f206d60448201527f657373656e67657200000000000000000000000000000000000000000000000060648201526084016101a7565b610d8a85858585858080601f016020809104026020016040519081016040528093929190818152602001838380828437600092019190915250611ad092505050565b6000610da7855a8660405180602001604052806000815250611b71565b90508061055e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f5374616e646172644272696467653a20455448207472616e736665722066616960448201527f6c6564000000000000000000000000000000000000000000000000000000000060648201526084016101a7565b610e3e61044e565b610eca576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603760248201527f5374616e646172644272696467653a2066756e6374696f6e2063616e206f6e6c60448201527f792062652063616c6c65642066726f6d20616e20454f4100000000000000000060648201526084016101a7565b610f0e853333878787878080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061048b92505050565b5050505050565b61055c87873388888888888080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061143392505050565b610f6261044e565b610fee576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603760248201527f5374616e646172644272696467653a2066756e6374696f6e2063616e206f6e6c60448201527f792062652063616c6c65642066726f6d20616e20454f4100000000000000000060648201526084016101a7565b61055e86863333888888888080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061143392505050565b61055e863387878787878080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061048b92505050565b600054610100900460ff16158080156110975750600054600160ff909116105b806110b15750303b1580156110b1575060005460ff166001145b61113d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084016101a7565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055801561119b57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b6111b973420000000000000000000000000000000000000783611b89565b801561121c57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b5050565b6112633385348686868080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201919091525061126992505050565b50505050565b8234146112f8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603e60248201527f5374616e646172644272696467653a206272696467696e6720455448206d757360448201527f7420696e636c7564652073756666696369656e74204554482076616c7565000060648201526084016101a7565b61130485858584611c73565b60035460045460405173ffffffffffffffffffffffffffffffffffffffff92831692633dbb202b9287929116907f1635f5fd0000000000000000000000000000000000000000000000000000000090611367908b908b9086908a906024016128f6565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009485161790525160e086901b90921682526113fa9291889060040161293f565b6000604051808303818588803b15801561141357600080fd5b505af1158015611427573d6000803e3d6000fd5b50505050505050505050565b34156114c1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f5374616e646172644272696467653a2063616e6e6f742073656e642076616c7560448201527f650000000000000000000000000000000000000000000000000000000000000060648201526084016101a7565b6114ca876117ec565b15611618576114d9878761184e565b61158b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604a60248201527f5374616e646172644272696467653a2077726f6e672072656d6f746520746f6b60448201527f656e20666f72204f7074696d69736d204d696e7461626c65204552433230206c60648201527f6f63616c20746f6b656e00000000000000000000000000000000000000000000608482015260a4016101a7565b6040517f9dc29fac00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff868116600483015260248201859052881690639dc29fac90604401600060405180830381600087803b1580156115fb57600080fd5b505af115801561160f573d6000803e3d6000fd5b505050506116ac565b61163a73ffffffffffffffffffffffffffffffffffffffff8816863086611d14565b73ffffffffffffffffffffffffffffffffffffffff8088166000908152600260209081526040808320938a1683529290522054611678908490612984565b73ffffffffffffffffffffffffffffffffffffffff8089166000908152600260209081526040808320938b16835292905220555b6116ba878787878786611d72565b60035460045460405173ffffffffffffffffffffffffffffffffffffffff92831692633dbb202b9216907f0166a07a000000000000000000000000000000000000000000000000000000009061171e908b908d908c908c908c908b9060240161299c565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009485161790525160e085901b90921682526117b19291879060040161293f565b600060405180830381600087803b1580156117cb57600080fd5b505af11580156117df573d6000803e3d6000fd5b5050505050505050505050565b6000611818827f1d1d8b6300000000000000000000000000000000000000000000000000000000611e00565b806118485750611848827fec4fc8e300000000000000000000000000000000000000000000000000000000611e00565b92915050565b600061187a837f1d1d8b6300000000000000000000000000000000000000000000000000000000611e00565b15611923578273ffffffffffffffffffffffffffffffffffffffff1663c01e1bd66040518163ffffffff1660e01b8152600401602060405180830381865afa1580156118ca573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906118ee9190612893565b73ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16149050611848565b8273ffffffffffffffffffffffffffffffffffffffff1663d6c0b2c46040518163ffffffff1660e01b8152600401602060405180830381865afa1580156118ca573d6000803e3d6000fd5b60405173ffffffffffffffffffffffffffffffffffffffff83166024820152604481018290526109df9084907fa9059cbb00000000000000000000000000000000000000000000000000000000906064015b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff0000000000000000000000000000000000000000000000000000000090931692909217909152611e23565b8373ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff167fb0444523268717a02698be47d0803aa7468c00acbed2f8bd93a0459cde61dd89868686604051611aba939291906129f7565b60405180910390a461055e868686868686611f2f565b8373ffffffffffffffffffffffffffffffffffffffff1673deaddeaddeaddeaddeaddeaddeaddeaddead000073ffffffffffffffffffffffffffffffffffffffff16600073ffffffffffffffffffffffffffffffffffffffff167fb0444523268717a02698be47d0803aa7468c00acbed2f8bd93a0459cde61dd89868686604051611b5d939291906129f7565b60405180910390a461126384848484611fb7565b6000806000835160208501868989f195945050505050565b600054610100900460ff16611c20576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016101a7565b6003805473ffffffffffffffffffffffffffffffffffffffff9384167fffffffffffffffffffffffff00000000000000000000000000000000000000009182161790915560048054929093169116179055565b8373ffffffffffffffffffffffffffffffffffffffff1673deaddeaddeaddeaddeaddeaddeaddeaddead000073ffffffffffffffffffffffffffffffffffffffff16600073ffffffffffffffffffffffffffffffffffffffff167f73d170910aba9e6d50b102db522b1dbcd796216f5128b445aa2135272886497e868686604051611d00939291906129f7565b60405180910390a461126384848484612024565b60405173ffffffffffffffffffffffffffffffffffffffff808516602483015283166044820152606481018290526112639085907f23b872dd00000000000000000000000000000000000000000000000000000000906084016119c0565b8373ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff167f73d170910aba9e6d50b102db522b1dbcd796216f5128b445aa2135272886497e868686604051611dea939291906129f7565b60405180910390a461055e868686868686612083565b6000611e0b836120fb565b8015611e1c5750611e1c838361215f565b9392505050565b6000611e85826040518060400160405280602081526020017f5361666545524332303a206c6f772d6c6576656c2063616c6c206661696c65648152508573ffffffffffffffffffffffffffffffffffffffff1661222e9092919063ffffffff16565b8051909150156109df5780806020019051810190611ea39190612a35565b6109df576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f5361666545524332303a204552433230206f7065726174696f6e20646964206e60448201527f6f7420737563636565640000000000000000000000000000000000000000000060648201526084016101a7565b8373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff168773ffffffffffffffffffffffffffffffffffffffff167fd59c65b35445225835c83f50b6ede06a7be047d22e357073e250d9af537518cd868686604051611fa7939291906129f7565b60405180910390a4505050505050565b8273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167f31b2166ff604fc5672ea5df08a78081d2bc6d746cadce880747f3643d819e83d8484604051612016929190612a57565b60405180910390a350505050565b8273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167f2849b43074093a05396b6f2a937dee8565b15a48a7b3d4bffb732a5017380af58484604051612016929190612a57565b8373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff168773ffffffffffffffffffffffffffffffffffffffff167f7ff126db8024424bbfd9826e8ab82ff59136289ea440b04b39a0df1b03b9cabf868686604051611fa7939291906129f7565b6000612127827f01ffc9a70000000000000000000000000000000000000000000000000000000061215f565b80156118485750612158827fffffffff0000000000000000000000000000000000000000000000000000000061215f565b1592915050565b604080517fffffffff000000000000000000000000000000000000000000000000000000008316602480830191909152825180830390910181526044909101909152602080820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f01ffc9a700000000000000000000000000000000000000000000000000000000178152825160009392849283928392918391908a617530fa92503d91506000519050828015612217575060208210155b80156122235750600081115b979650505050505050565b606061223d8484600085612245565b949350505050565b6060824710156122d7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f416464726573733a20696e73756666696369656e742062616c616e636520666f60448201527f722063616c6c000000000000000000000000000000000000000000000000000060648201526084016101a7565b73ffffffffffffffffffffffffffffffffffffffff85163b612355576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a2063616c6c20746f206e6f6e2d636f6e747261637400000060448201526064016101a7565b6000808673ffffffffffffffffffffffffffffffffffffffff16858760405161237e9190612a70565b60006040518083038185875af1925050503d80600081146123bb576040519150601f19603f3d011682016040523d82523d6000602084013e6123c0565b606091505b5091509150612223828286606083156123da575081611e1c565b8251156123ea5782518084602001fd5b816040517f08c379a00000000000000000000000000000000000000000000000000000000081526004016101a79190612744565b73ffffffffffffffffffffffffffffffffffffffff8116811461244057600080fd5b50565b60008083601f84011261245557600080fd5b50813567ffffffffffffffff81111561246d57600080fd5b60208301915083602082850101111561248557600080fd5b9250929050565b600080600080600080600060c0888a0312156124a757600080fd5b87356124b28161241e565b965060208801356124c28161241e565b955060408801356124d28161241e565b945060608801356124e28161241e565b93506080880135925060a088013567ffffffffffffffff81111561250557600080fd5b6125118a828b01612443565b989b979a50959850939692959293505050565b803563ffffffff8116811461253857600080fd5b919050565b60008060006040848603121561255257600080fd5b61255b84612524565b9250602084013567ffffffffffffffff81111561257757600080fd5b61258386828701612443565b9497909650939450505050565b6000806000806000608086880312156125a857600080fd5b85356125b38161241e565b945060208601356125c38161241e565b935060408601359250606086013567ffffffffffffffff8111156125e657600080fd5b6125f288828901612443565b969995985093965092949392505050565b60008060008060006080868803121561261b57600080fd5b85356126268161241e565b94506020860135935061263b60408701612524565b9250606086013567ffffffffffffffff8111156125e657600080fd5b600080600080600080600060c0888a03121561267257600080fd5b873561267d8161241e565b9650602088013561268d8161241e565b9550604088013561269d8161241e565b9450606088013593506126b260808901612524565b925060a088013567ffffffffffffffff81111561250557600080fd5b60005b838110156126e95781810151838201526020016126d1565b838111156112635750506000910152565b600081518084526127128160208601602086016126ce565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000611e1c60208301846126fa565b60008060008060008060a0878903121561277057600080fd5b863561277b8161241e565b9550602087013561278b8161241e565b9450604087013593506127a060608801612524565b9250608087013567ffffffffffffffff8111156127bc57600080fd5b6127c889828a01612443565b979a9699509497509295939492505050565b600080604083850312156127ed57600080fd5b82356127f88161241e565b915060208301356128088161241e565b809150509250929050565b60006020828403121561282557600080fd5b8135611e1c8161241e565b6000806000806060858703121561284657600080fd5b84356128518161241e565b935061285f60208601612524565b9250604085013567ffffffffffffffff81111561287b57600080fd5b61288787828801612443565b95989497509550505050565b6000602082840312156128a557600080fd5b8151611e1c8161241e565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000828210156128f1576128f16128b0565b500390565b600073ffffffffffffffffffffffffffffffffffffffff80871683528086166020840152508360408301526080606083015261293560808301846126fa565b9695505050505050565b73ffffffffffffffffffffffffffffffffffffffff8416815260606020820152600061296e60608301856126fa565b905063ffffffff83166040830152949350505050565b60008219821115612997576129976128b0565b500190565b600073ffffffffffffffffffffffffffffffffffffffff80891683528088166020840152808716604084015280861660608401525083608083015260c060a08301526129eb60c08301846126fa565b98975050505050505050565b73ffffffffffffffffffffffffffffffffffffffff84168152826020820152606060408201526000612a2c60608301846126fa565b95945050505050565b600060208284031215612a4757600080fd5b81518015158114611e1c57600080fd5b82815260406020820152600061223d60408301846126fa565b60008251612a828184602087016126ce565b919091019291505056fea164736f6c634300080f000a00000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L2StandardBridge Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000f6e6080604052348015600e575f80fd5b5060156019565b60c9565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000900460ff161560685760405163f92ee8a960e01b815260040160405180910390fd5b80546001600160401b039081161460c65780546001600160401b0319166001600160401b0390811782556040519081527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b50565b610e98806100d65f395ff3fe6080604052600436106100dc575f3560e01c80638312f1491161007c578063b49dc74111610057578063b49dc741146102a0578063d0e12f90146102bf578063d3e5792b146102ee578063d4ff9218146100e7575f80fd5b80638312f1491461025857806384411d651461026d57806385b5b14d14610281575f80fd5b80633ccfd60b116100b75780633ccfd60b1461017757806354fd4d501461019957806366d003ac146101ee57806382356d8a1461021a575f80fd5b80630d9019e1146100e7578063307f2962146101375780633bbed4a014610158575f80fd5b366100e357005b5f80fd5b3480156100f2575f80fd5b5060025473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b348015610142575f80fd5b50610156610151366004610c65565b610302565b005b348015610163575f80fd5b50610156610172366004610ca2565b610485565b348015610182575f80fd5b5061018b6105e9565b60405190815260200161012e565b3480156101a4575f80fd5b506101e16040518060400160405280600581526020017f312e362e3000000000000000000000000000000000000000000000000000000081525081565b60405161012e9190610cbd565b3480156101f9575f80fd5b5060025461010d9073ffffffffffffffffffffffffffffffffffffffff1681565b348015610225575f80fd5b5060025461024b9074010000000000000000000000000000000000000000900460ff1681565b60405161012e9190610d76565b348015610263575f80fd5b5061018b60015481565b348015610278575f80fd5b5061018b5f5481565b34801561028c575f80fd5b5061015661029b366004610d8a565b6108f7565b3480156102ab575f80fd5b506101566102ba366004610da1565b610a1a565b3480156102ca575f80fd5b5060025474010000000000000000000000000000000000000000900460ff1661024b565b3480156102f9575f80fd5b5060015461018b565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa15801561035f573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906103839190610ddc565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146103e7576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600280547401000000000000000000000000000000000000000080820460ff1692849290917fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff9091169083600181111561044357610443610d10565b02179055507ff2ec44eb1c3b3acd547b76333eb2c4b27eee311860c57a9fdb04c95f62398fc88183604051610479929190610df7565b60405180910390a15050565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156104e2573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906105069190610ddc565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461056a576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6002805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff000000000000000000000000000000000000000083168117909355604080519190921680825260208201939093527f62e69886a5df0ba8ffcacbfc1388754e7abd9bde24b036354c561f1acd4e45939101610479565b5f6001544710156106a7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d207769746864726160648201527f77616c20616d6f756e7400000000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b479050805f808282546106ba9190610e12565b90915550506002546040805183815273ffffffffffffffffffffffffffffffffffffffff909216602083018190523383830152905190917fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba919081900360600190a16002546040517f38e04cbeb8c10f8f568618aa75be0f10b6729b8b4237743b4de20cbcde2839ee9161076f9185918591339174010000000000000000000000000000000000000000900460ff1690610e4a565b60405180910390a1600160025474010000000000000000000000000000000000000000900460ff1660018111156107a8576107a8610d10565b0361084c575f6107b88284610c2e565b905080610847576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f4665655661756c743a206661696c656420746f2073656e642045544820746f2060448201527f4c322066656520726563697069656e7400000000000000000000000000000000606482015260840161069e565b505090565b6040517fc2b3e5ac00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8216600482015262061a806024820152606060448201525f60648201527342000000000000000000000000000000000000169063c2b3e5ac9084906084015f604051808303818588803b1580156108dc575f80fd5b505af11580156108ee573d5f803e3d5ffd5b50505050505090565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610954573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906109789190610ddc565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109dc576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180549082905560408051828152602081018490527f895a067c78583e800418fabf3da26a9496aab2ff3429cebdf7fefa642b2e42039101610479565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000810460ff16159067ffffffffffffffff165f81158015610a645750825b90505f8267ffffffffffffffff166001148015610a805750303b155b905081158015610a8e575080155b15610ac5576040517ff92ee8a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b84547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001660011785558315610b265784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff16680100000000000000001785555b6002805473ffffffffffffffffffffffffffffffffffffffff8a167fffffffffffffffffffffffff000000000000000000000000000000000000000082168117835560018a81558993927fffffffffffffffffffffff000000000000000000000000000000000000000000169091179074010000000000000000000000000000000000000000908490811115610bbe57610bbe610d10565b02179055508315610c245784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff168555604051600181527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b5050505050505050565b5f610c3a835a84610c41565b9392505050565b5f805f805f858888f1949350505050565b803560028110610c60575f80fd5b919050565b5f60208284031215610c75575f80fd5b610c3a82610c52565b73ffffffffffffffffffffffffffffffffffffffff81168114610c9f575f80fd5b50565b5f60208284031215610cb2575f80fd5b8135610c3a81610c7e565b602081525f82518060208401528060208501604085015e5f6040828501015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011684010191505092915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b60028110610d72577f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b9052565b60208101610d848284610d3d565b92915050565b5f60208284031215610d9a575f80fd5b5035919050565b5f805f60608486031215610db3575f80fd5b8335610dbe81610c7e565b925060208401359150610dd360408501610c52565b90509250925092565b5f60208284031215610dec575f80fd5b8151610c3a81610c7e565b60408101610e058285610d3d565b610c3a6020830184610d3d565b80820180821115610d84577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b84815273ffffffffffffffffffffffffffffffffffffffff84811660208301528316604082015260808101610e826060830184610d3d565b9594505050505056fea164736f6c6343000819000a000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy SequencerFeeVault Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000002b6d608060405234801561001057600080fd5b5061001961001e565b6100de565b600054610100900460ff161561008a5760405162461bcd60e51b815260206004820152602760248201527f496e697469616c697a61626c653a20636f6e747261637420697320696e697469604482015266616c697a696e6760c81b606482015260840160405180910390fd5b60005460ff90811610156100dc576000805460ff191660ff9081179091556040519081527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b565b612a80806100ed6000396000f3fe60806040523480156200001157600080fd5b5060043610620000935760003560e01c8063c4d66de81162000062578063c4d66de81462000175578063ce5ac90f146200018e578063e78cea9214620001a5578063ee9a31a214620001c657600080fd5b8063316b3739146200009857806354fd4d5014620000fb578063896f93d114620001475780638cf0629c146200015e575b600080fd5b620000d1620000a936600462000636565b60026020526000908152604090205473ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b620001386040518060400160405280600681526020017f312e31302e32000000000000000000000000000000000000000000000000000081525081565b604051620000f29190620006c9565b620000d162000158366004620007c0565b620001e5565b620000d16200016f3660046200083d565b620001fc565b6200018c6200018636600462000636565b6200041b565b005b620000d16200019f366004620007c0565b620005ed565b600154620000d19073ffffffffffffffffffffffffffffffffffffffff1681565b60015473ffffffffffffffffffffffffffffffffffffffff16620000d1565b6000620001f4848484620005ed565b949350505050565b600073ffffffffffffffffffffffffffffffffffffffff8516620002a7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603f60248201527f4f7074696d69736d4d696e7461626c654552433230466163746f72793a206d7560448201527f73742070726f766964652072656d6f746520746f6b656e20616464726573730060648201526084015b60405180910390fd5b600085858585604051602001620002c29493929190620008d4565b604051602081830303815290604052805190602001209050600081600160009054906101000a900473ffffffffffffffffffffffffffffffffffffffff16888888886040516200031290620005fe565b620003229594939291906200092e565b8190604051809103906000f590508015801562000343573d6000803e3d6000fd5b5073ffffffffffffffffffffffffffffffffffffffff81811660008181526002602052604080822080547fffffffffffffffffffffffff000000000000000000000000000000000000000016948d1694851790555193945090927fceeb8e7d520d7f3b65fc11a262b91066940193b05d4f93df07cfdced0eb551cf9190a360405133815273ffffffffffffffffffffffffffffffffffffffff80891691908316907f52fe89dd5930f343d25650b62fd367bae47088bcddffd2a88350a6ecdd620cdb9060200160405180910390a39695505050505050565b600054610100900460ff16158080156200043c5750600054600160ff909116105b80620004585750303b15801562000458575060005460ff166001145b620004e6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084016200029e565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905580156200054557600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b600180547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff84161790558015620005e957600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b5050565b6000620001f48484846012620001fc565b6120e0806200099483390190565b803573ffffffffffffffffffffffffffffffffffffffff811681146200063157600080fd5b919050565b6000602082840312156200064957600080fd5b62000654826200060c565b9392505050565b6000815180845260005b81811015620006835760208185018101518683018201520162000665565b8181111562000696576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006200065460208301846200065b565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082601f8301126200071f57600080fd5b813567ffffffffffffffff808211156200073d576200073d620006de565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715620007865762000786620006de565b81604052838152866020858801011115620007a057600080fd5b836020870160208301376000602085830101528094505050505092915050565b600080600060608486031215620007d657600080fd5b620007e1846200060c565b9250602084013567ffffffffffffffff80821115620007ff57600080fd5b6200080d878388016200070d565b935060408601359150808211156200082457600080fd5b5062000833868287016200070d565b9150509250925092565b600080600080608085870312156200085457600080fd5b6200085f856200060c565b9350602085013567ffffffffffffffff808211156200087d57600080fd5b6200088b888389016200070d565b94506040870135915080821115620008a257600080fd5b50620008b1878288016200070d565b925050606085013560ff81168114620008c957600080fd5b939692955090935050565b73ffffffffffffffffffffffffffffffffffffffff851681526080602082015260006200090560808301866200065b565b82810360408401526200091981866200065b565b91505060ff8316606083015295945050505050565b600073ffffffffffffffffffffffffffffffffffffffff808816835280871660208401525060a060408301526200096960a08301866200065b565b82810360608401526200097d81866200065b565b91505060ff83166080830152969550505050505056fe6101a06040523480156200001257600080fd5b50604051620020e0380380620020e0833981016040819052620000359162000215565b6040805180820190915260018152603160f81b6020820152839081908185600362000061838262000350565b50600462000070828262000350565b5050825160208085019190912083518483012060e08290526101008190524660a0818152604080517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f81880181905281830187905260608201869052608082019490945230818401528151808203909301835260c0019052805194019390932091935091906080523060c05261012052505050506001600160a01b0394851661014052509390921661016052505060ff16610180526200041c565b80516001600160a01b03811681146200014357600080fd5b919050565b634e487b7160e01b600052604160045260246000fd5b600082601f8301126200017057600080fd5b81516001600160401b03808211156200018d576200018d62000148565b604051601f8301601f19908116603f01168101908282118183101715620001b857620001b862000148565b81604052838152602092508683858801011115620001d557600080fd5b600091505b83821015620001f95785820183015181830184015290820190620001da565b838211156200020b5760008385830101525b9695505050505050565b600080600080600060a086880312156200022e57600080fd5b62000239866200012b565b945062000249602087016200012b565b60408701519094506001600160401b03808211156200026757600080fd5b6200027589838a016200015e565b945060608801519150808211156200028c57600080fd5b506200029b888289016200015e565b925050608086015160ff81168114620002b357600080fd5b809150509295509295909350565b600181811c90821680620002d657607f821691505b602082108103620002f757634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200034b57600081815260208120601f850160051c81016020861015620003265750805b601f850160051c820191505b81811015620003475782815560010162000332565b5050505b505050565b81516001600160401b038111156200036c576200036c62000148565b62000384816200037d8454620002c1565b84620002fd565b602080601f831160018114620003bc5760008415620003a35750858301515b600019600386901b1c1916600185901b17855562000347565b600085815260208120601f198616915b82811015620003ed57888601518255948401946001909101908401620003cc565b50858210156200040c5787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b60805160a05160c05160e0516101005161012051610140516101605161018051611c37620004a960003960006102700152600081816103a70152818161041c0152818161064801526107aa0152600081816101d501526103cd01526000611174015260006111c30152600061119e015260006110f7015260006111210152600061114b0152611c376000f3fe608060405234801561001057600080fd5b50600436106101a35760003560e01c806370a08231116100ee578063ae1f6aaf11610097578063d6c0b2c411610071578063d6c0b2c4146103cb578063dd62ed3e14610404578063e78cea92146103a5578063ee9a31a21461041757600080fd5b8063ae1f6aaf146103a5578063c01e1bd6146103cb578063d505accf146103f157600080fd5b80639dc29fac116100c85780639dc29fac1461036c578063a457c2d71461037f578063a9059cbb1461039257600080fd5b806370a082311461031b5780637ecebe001461035157806395d89b411461036457600080fd5b8063313ce5671161015057806340c10f191161012a57806340c10f19146102b557806354fd4d50146102ca5780636afdd8501461030657600080fd5b8063313ce567146102695780633644e5151461029a57806339509351146102a257600080fd5b8063095ea7b311610181578063095ea7b31461023157806318160ddd1461024457806323b872dd1461025657600080fd5b806301ffc9a7146101a8578063033964be146101d057806306fdde031461021c575b600080fd5b6101bb6101b636600461194b565b61043e565b60405190151581526020015b60405180910390f35b6101f77f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101c7565b61022461052f565b6040516101c7919061198d565b6101bb61023f366004611a29565b6105c1565b6002545b6040519081526020016101c7565b6101bb610264366004611a53565b6105db565b60405160ff7f00000000000000000000000000000000000000000000000000000000000000001681526020016101c7565b6102486105ff565b6101bb6102b0366004611a29565b61060e565b6102c86102c3366004611a29565b610630565b005b6102246040518060400160405280600581526020017f312e342e3100000000000000000000000000000000000000000000000000000081525081565b6e22d473030f116ddee9f6b43ac78ba36101f7565b610248610329366004611a8f565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205490565b61024861035f366004611a8f565b610758565b610224610783565b6102c861037a366004611a29565b610792565b6101bb61038d366004611a29565b6108a9565b6101bb6103a0366004611a29565b610956565b7f00000000000000000000000000000000000000000000000000000000000000006101f7565b7f00000000000000000000000000000000000000000000000000000000000000006101f7565b6102c86103ff366004611aaa565b610964565b610248610412366004611b1d565b610b23565b6101f77f000000000000000000000000000000000000000000000000000000000000000081565b60007f01ffc9a7000000000000000000000000000000000000000000000000000000007f1d1d8b63000000000000000000000000000000000000000000000000000000007fec4fc8e3000000000000000000000000000000000000000000000000000000007fffffffff0000000000000000000000000000000000000000000000000000000085168314806104f757507fffffffff00000000000000000000000000000000000000000000000000000000858116908316145b8061052657507fffffffff00000000000000000000000000000000000000000000000000000000858116908216145b95945050505050565b60606003805461053e90611b50565b80601f016020809104026020016040519081016040528092919081815260200182805461056a90611b50565b80156105b75780601f1061058c576101008083540402835291602001916105b7565b820191906000526020600020905b81548152906001019060200180831161059a57829003601f168201915b5050505050905090565b6000336105cf818585610bc4565b60019150505b92915050565b6000336105e9858285610d78565b6105f4858585610e2a565b506001949350505050565b60006106096110dd565b905090565b6000336105cf8185856106218383610b23565b61062b9190611bcc565b610bc4565b3373ffffffffffffffffffffffffffffffffffffffff7f000000000000000000000000000000000000000000000000000000000000000016146106fa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603460248201527f4f7074696d69736d4d696e7461626c6545524332303a206f6e6c79206272696460448201527f67652063616e206d696e7420616e64206275726e00000000000000000000000060648201526084015b60405180910390fd5b6107048282611211565b8173ffffffffffffffffffffffffffffffffffffffff167f0f6798a560793a54c3bcfe86a93cde1e73087d944c0ea20544137d41213968858260405161074c91815260200190565b60405180910390a25050565b73ffffffffffffffffffffffffffffffffffffffff81166000908152600560205260408120546105d5565b60606004805461053e90611b50565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610857576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603460248201527f4f7074696d69736d4d696e7461626c6545524332303a206f6e6c79206272696460448201527f67652063616e206d696e7420616e64206275726e00000000000000000000000060648201526084016106f1565b6108618282611331565b8173ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca58260405161074c91815260200190565b600033816108b78286610b23565b905083811015610949576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a2064656372656173656420616c6c6f77616e63652062656c6f7760448201527f207a65726f00000000000000000000000000000000000000000000000000000060648201526084016106f1565b6105f48286868403610bc4565b6000336105cf818585610e2a565b834211156109ce576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f45524332305065726d69743a206578706972656420646561646c696e6500000060448201526064016106f1565b60007f6e71edae12b1b97f4d1f60370fef10105fa2faae0126114a169c64845d6126c98888886109fd8c611516565b60408051602081019690965273ffffffffffffffffffffffffffffffffffffffff94851690860152929091166060840152608083015260a082015260c0810186905260e0016040516020818303038152906040528051906020012090506000610a658261154b565b90506000610a75828787876115b4565b90508973ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614610b0c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f45524332305065726d69743a20696e76616c6964207369676e6174757265000060448201526064016106f1565b610b178a8a8a610bc4565b50505050505050505050565b60007fffffffffffffffffffffffffffffffffffdd2b8cfcf0ee922116094bc538745d73ffffffffffffffffffffffffffffffffffffffff831601610b8957507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6105d5565b73ffffffffffffffffffffffffffffffffffffffff8084166000908152600160209081526040808320938616835292905220545b9392505050565b73ffffffffffffffffffffffffffffffffffffffff8316610c66576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f45524332303a20617070726f76652066726f6d20746865207a65726f2061646460448201527f726573730000000000000000000000000000000000000000000000000000000060648201526084016106f1565b73ffffffffffffffffffffffffffffffffffffffff8216610d09576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a20617070726f766520746f20746865207a65726f20616464726560448201527f737300000000000000000000000000000000000000000000000000000000000060648201526084016106f1565b73ffffffffffffffffffffffffffffffffffffffff83811660008181526001602090815260408083209487168084529482529182902085905590518481527f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92591015b60405180910390a3505050565b6000610d848484610b23565b90507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8114610e245781811015610e17576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f45524332303a20696e73756666696369656e7420616c6c6f77616e636500000060448201526064016106f1565b610e248484848403610bc4565b50505050565b73ffffffffffffffffffffffffffffffffffffffff8316610ecd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f45524332303a207472616e736665722066726f6d20746865207a65726f20616460448201527f647265737300000000000000000000000000000000000000000000000000000060648201526084016106f1565b73ffffffffffffffffffffffffffffffffffffffff8216610f70576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602360248201527f45524332303a207472616e7366657220746f20746865207a65726f206164647260448201527f657373000000000000000000000000000000000000000000000000000000000060648201526084016106f1565b73ffffffffffffffffffffffffffffffffffffffff831660009081526020819052604090205481811015611026576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f45524332303a207472616e7366657220616d6f756e742065786365656473206260448201527f616c616e6365000000000000000000000000000000000000000000000000000060648201526084016106f1565b73ffffffffffffffffffffffffffffffffffffffff80851660009081526020819052604080822085850390559185168152908120805484929061106a908490611bcc565b925050819055508273ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff167fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef846040516110d091815260200190565b60405180910390a3610e24565b60003073ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614801561114357507f000000000000000000000000000000000000000000000000000000000000000046145b1561116d57507f000000000000000000000000000000000000000000000000000000000000000090565b50604080517f00000000000000000000000000000000000000000000000000000000000000006020808301919091527f0000000000000000000000000000000000000000000000000000000000000000828401527f000000000000000000000000000000000000000000000000000000000000000060608301524660808301523060a0808401919091528351808403909101815260c0909201909252805191012090565b73ffffffffffffffffffffffffffffffffffffffff821661128e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f45524332303a206d696e7420746f20746865207a65726f20616464726573730060448201526064016106f1565b80600260008282546112a09190611bcc565b909155505073ffffffffffffffffffffffffffffffffffffffff8216600090815260208190526040812080548392906112da908490611bcc565b909155505060405181815273ffffffffffffffffffffffffffffffffffffffff8316906000907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef9060200160405180910390a35050565b73ffffffffffffffffffffffffffffffffffffffff82166113d4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f45524332303a206275726e2066726f6d20746865207a65726f2061646472657360448201527f730000000000000000000000000000000000000000000000000000000000000060648201526084016106f1565b73ffffffffffffffffffffffffffffffffffffffff82166000908152602081905260409020548181101561148a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45524332303a206275726e20616d6f756e7420657863656564732062616c616e60448201527f636500000000000000000000000000000000000000000000000000000000000060648201526084016106f1565b73ffffffffffffffffffffffffffffffffffffffff831660009081526020819052604081208383039055600280548492906114c6908490611be4565b909155505060405182815260009073ffffffffffffffffffffffffffffffffffffffff8516907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef90602001610d6b565b73ffffffffffffffffffffffffffffffffffffffff811660009081526005602052604090208054600181018255905b50919050565b60006105d56115586110dd565b836040517f19010000000000000000000000000000000000000000000000000000000000006020820152602281018390526042810182905260009060620160405160208183030381529060405280519060200120905092915050565b60008060006115c5878787876115dc565b915091506115d2816116f4565b5095945050505050565b6000807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a083111561161357506000905060036116eb565b8460ff16601b1415801561162b57508460ff16601c14155b1561163c57506000905060046116eb565b6040805160008082526020820180845289905260ff881692820192909252606081018690526080810185905260019060a0016020604051602081039080840390855afa158015611690573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015191505073ffffffffffffffffffffffffffffffffffffffff81166116e4576000600192509250506116eb565b9150600090505b94509492505050565b600081600481111561170857611708611bfb565b036117105750565b600181600481111561172457611724611bfb565b0361178b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f45434453413a20696e76616c6964207369676e6174757265000000000000000060448201526064016106f1565b600281600481111561179f5761179f611bfb565b03611806576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601f60248201527f45434453413a20696e76616c6964207369676e6174757265206c656e6774680060448201526064016106f1565b600381600481111561181a5761181a611bfb565b036118a7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45434453413a20696e76616c6964207369676e6174757265202773272076616c60448201527f756500000000000000000000000000000000000000000000000000000000000060648201526084016106f1565b60048160048111156118bb576118bb611bfb565b03611948576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602260248201527f45434453413a20696e76616c6964207369676e6174757265202776272076616c60448201527f756500000000000000000000000000000000000000000000000000000000000060648201526084016106f1565b50565b60006020828403121561195d57600080fd5b81357fffffffff0000000000000000000000000000000000000000000000000000000081168114610bbd57600080fd5b600060208083528351808285015260005b818110156119ba5785810183015185820160400152820161199e565b818111156119cc576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b803573ffffffffffffffffffffffffffffffffffffffff81168114611a2457600080fd5b919050565b60008060408385031215611a3c57600080fd5b611a4583611a00565b946020939093013593505050565b600080600060608486031215611a6857600080fd5b611a7184611a00565b9250611a7f60208501611a00565b9150604084013590509250925092565b600060208284031215611aa157600080fd5b610bbd82611a00565b600080600080600080600060e0888a031215611ac557600080fd5b611ace88611a00565b9650611adc60208901611a00565b95506040880135945060608801359350608088013560ff81168114611b0057600080fd5b9699959850939692959460a0840135945060c09093013592915050565b60008060408385031215611b3057600080fd5b611b3983611a00565b9150611b4760208401611a00565b90509250929050565b600181811c90821680611b6457607f821691505b602082108103611545577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b60008219821115611bdf57611bdf611b9d565b500190565b600082821015611bf657611bf6611b9d565b500390565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fdfea164736f6c634300080f000aa164736f6c634300080f000a00000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy OptimismMintableERC20Factory Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca40000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000170d608060405234801561001057600080fd5b5061001961001e565b6100de565b600054610100900460ff161561008a5760405162461bcd60e51b815260206004820152602760248201527f496e697469616c697a61626c653a20636f6e747261637420697320696e697469604482015266616c697a696e6760c81b606482015260840160405180910390fd5b60005460ff90811610156100dc576000805460ff191660ff9081179091556040519081527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b565b611620806100ed6000396000f3fe608060405234801561001057600080fd5b50600436106100be5760003560e01c80637f46ddb211610076578063aa5574521161005b578063aa557452146101c9578063c4d66de8146101dc578063c89701a2146101ef57600080fd5b80637f46ddb21461018d578063927ede2d146101ab57600080fd5b806354fd4d50116100a757806354fd4d50146101225780635c975abb1461016b578063761f44931461017a57600080fd5b80633687011a146100c35780633cb747bf146100d8575b600080fd5b6100d66100d136600461129e565b61020f565b005b6001546100f89073ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b61015e6040518060400160405280600681526020017f312e31302e30000000000000000000000000000000000000000000000000000081525081565b604051610119919061138c565b60405160008152602001610119565b6100d661018836600461139f565b6102c0565b60025473ffffffffffffffffffffffffffffffffffffffff166100f8565b60015473ffffffffffffffffffffffffffffffffffffffff166100f8565b6100d66101d7366004611437565b6107de565b6100d66101ea3660046114ae565b61089a565b6002546100f89073ffffffffffffffffffffffffffffffffffffffff1681565b610217610a43565b6102a8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602d60248201527f4552433732314272696467653a206163636f756e74206973206e6f742065787460448201527f65726e616c6c79206f776e65640000000000000000000000000000000000000060648201526084015b60405180910390fd5b6102b88686333388888888610a80565b505050505050565b60015473ffffffffffffffffffffffffffffffffffffffff16331480156103955750600254600154604080517f6e296e45000000000000000000000000000000000000000000000000000000008152905173ffffffffffffffffffffffffffffffffffffffff9384169390921691636e296e45916004808201926020929091908290030181865afa158015610359573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061037d91906114cb565b73ffffffffffffffffffffffffffffffffffffffff16145b610421576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603f60248201527f4552433732314272696467653a2066756e6374696f6e2063616e206f6e6c792060448201527f62652063616c6c65642066726f6d20746865206f746865722062726964676500606482015260840161029f565b3073ffffffffffffffffffffffffffffffffffffffff8816036104c6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602a60248201527f4c324552433732314272696467653a206c6f63616c20746f6b656e2063616e6e60448201527f6f742062652073656c6600000000000000000000000000000000000000000000606482015260840161029f565b6104f0877faecafc2300000000000000000000000000000000000000000000000000000000610fd6565b61057c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603660248201527f4c324552433732314272696467653a206c6f63616c20746f6b656e20696e746560448201527f7266616365206973206e6f7420636f6d706c69616e7400000000000000000000606482015260840161029f565b8673ffffffffffffffffffffffffffffffffffffffff1663d6c0b2c46040518163ffffffff1660e01b8152600401602060405180830381865afa1580156105c7573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906105eb91906114cb565b73ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff16146106cb576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604b60248201527f4c324552433732314272696467653a2077726f6e672072656d6f746520746f6b60448201527f656e20666f72204f7074696d69736d204d696e7461626c65204552433732312060648201527f6c6f63616c20746f6b656e000000000000000000000000000000000000000000608482015260a40161029f565b6040517fa144819400000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85811660048301526024820185905288169063a144819490604401600060405180830381600087803b15801561073b57600080fd5b505af115801561074f573d6000803e3d6000fd5b505050508473ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff168873ffffffffffffffffffffffffffffffffffffffff167f1f39bf6707b5d608453e0ae4c067b562bcc4c85c0f562ef5d2c774d2e7f131ac878787876040516107cd9493929190611531565b60405180910390a450505050505050565b73ffffffffffffffffffffffffffffffffffffffff8516610881576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f4552433732314272696467653a206e667420726563697069656e742063616e6e60448201527f6f74206265206164647265737328302900000000000000000000000000000000606482015260840161029f565b6108918787338888888888610a80565b50505050505050565b600054610100900460ff16158080156108ba5750600054600160ff909116105b806108d45750303b1580156108d4575060005460ff166001145b610960576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a6564000000000000000000000000000000000000606482015260840161029f565b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905580156109be57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b6109dc73420000000000000000000000000000000000000783610ff9565b8015610a3f57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b5050565b6000323303610a525750600190565b333b601703610a7a57604051602081016040526020600082333c5160e81c62ef010014905090565b50600090565b73ffffffffffffffffffffffffffffffffffffffff8716610b23576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603160248201527f4c324552433732314272696467653a2072656d6f746520746f6b656e2063616e60448201527f6e6f742062652061646472657373283029000000000000000000000000000000606482015260840161029f565b6040517f6352211e0000000000000000000000000000000000000000000000000000000081526004810185905273ffffffffffffffffffffffffffffffffffffffff891690636352211e90602401602060405180830381865afa158015610b8e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610bb291906114cb565b73ffffffffffffffffffffffffffffffffffffffff168673ffffffffffffffffffffffffffffffffffffffff1614610c6c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603e60248201527f4c324552433732314272696467653a205769746864726177616c206973206e6f60448201527f74206265696e6720696e69746961746564206279204e4654206f776e65720000606482015260840161029f565b60008873ffffffffffffffffffffffffffffffffffffffff1663d6c0b2c46040518163ffffffff1660e01b8152600401602060405180830381865afa158015610cb9573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610cdd91906114cb565b90508773ffffffffffffffffffffffffffffffffffffffff168173ffffffffffffffffffffffffffffffffffffffff1614610d9a576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603760248201527f4c324552433732314272696467653a2072656d6f746520746f6b656e20646f6560448201527f73206e6f74206d6174636820676976656e2076616c7565000000000000000000606482015260840161029f565b6040517f9dc29fac00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8881166004830152602482018790528a1690639dc29fac90604401600060405180830381600087803b158015610e0a57600080fd5b505af1158015610e1e573d6000803e3d6000fd5b505050506000818a8989898888604051602401610e419796959493929190611571565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f761f44930000000000000000000000000000000000000000000000000000000017905260015460025491517f3dbb202b00000000000000000000000000000000000000000000000000000000815292935073ffffffffffffffffffffffffffffffffffffffff90811692633dbb202b92610f1692169085908a906004016115ce565b600060405180830381600087803b158015610f3057600080fd5b505af1158015610f44573d6000803e3d6000fd5b505050508773ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff168b73ffffffffffffffffffffffffffffffffffffffff167fb7460e2a880f256ebef3406116ff3eee0cee51ebccdc2a40698f87ebb2e9c1a58a8a8989604051610fc29493929190611531565b60405180910390a450505050505050505050565b6000610fe1836110e3565b8015610ff25750610ff28383611148565b9392505050565b600054610100900460ff16611090576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e67000000000000000000000000000000000000000000606482015260840161029f565b6001805473ffffffffffffffffffffffffffffffffffffffff9384167fffffffffffffffffffffffff00000000000000000000000000000000000000009182161790915560028054929093169116179055565b600061110f827f01ffc9a700000000000000000000000000000000000000000000000000000000611148565b80156111425750611140827fffffffff00000000000000000000000000000000000000000000000000000000611148565b155b92915050565b604080517fffffffff000000000000000000000000000000000000000000000000000000008316602480830191909152825180830390910181526044909101909152602080820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f01ffc9a700000000000000000000000000000000000000000000000000000000178152825160009392849283928392918391908a617530fa92503d91506000519050828015611200575060208210155b801561120c5750600081115b979650505050505050565b73ffffffffffffffffffffffffffffffffffffffff8116811461123957600080fd5b50565b803563ffffffff8116811461125057600080fd5b919050565b60008083601f84011261126757600080fd5b50813567ffffffffffffffff81111561127f57600080fd5b60208301915083602082850101111561129757600080fd5b9250929050565b60008060008060008060a087890312156112b757600080fd5b86356112c281611217565b955060208701356112d281611217565b9450604087013593506112e76060880161123c565b9250608087013567ffffffffffffffff81111561130357600080fd5b61130f89828a01611255565b979a9699509497509295939492505050565b6000815180845260005b818110156113475760208185018101518683018201520161132b565b81811115611359576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b602081526000610ff26020830184611321565b600080600080600080600060c0888a0312156113ba57600080fd5b87356113c581611217565b965060208801356113d581611217565b955060408801356113e581611217565b945060608801356113f581611217565b93506080880135925060a088013567ffffffffffffffff81111561141857600080fd5b6114248a828b01611255565b989b979a50959850939692959293505050565b600080600080600080600060c0888a03121561145257600080fd5b873561145d81611217565b9650602088013561146d81611217565b9550604088013561147d81611217565b9450606088013593506114926080890161123c565b925060a088013567ffffffffffffffff81111561141857600080fd5b6000602082840312156114c057600080fd5b8135610ff281611217565b6000602082840312156114dd57600080fd5b8151610ff281611217565b8183528181602085013750600060208284010152600060207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f840116840101905092915050565b73ffffffffffffffffffffffffffffffffffffffff851681528360208201526060604082015260006115676060830184866114e8565b9695505050505050565b600073ffffffffffffffffffffffffffffffffffffffff808a1683528089166020840152808816604084015280871660608401525084608083015260c060a08301526115c160c0830184866114e8565b9998505050505050505050565b73ffffffffffffffffffffffffffffffffffffffff841681526060602082015260006115fd6060830185611321565b905063ffffffff8316604083015294935050505056fea164736f6c634300080f000a00000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L2ERC721Bridge Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000735608060405234801561001057600080fd5b50610715806100206000396000f3fe608060405234801561001057600080fd5b50600436106101985760003560e01c806364ca23ef116100e3578063c59859181161008c578063e81b2c6d11610066578063e81b2c6d146103f0578063f8206140146103f9578063fe3d57101461040257600080fd5b8063c598591814610375578063d844471514610395578063e591b282146103ce57600080fd5b80638b239f73116100bd5780638b239f73146103435780639e8c49661461034c578063b80777ea1461035557600080fd5b806364ca23ef146102ff57806368d5dca6146103135780638381f58a1461032f57600080fd5b80634397dfef1161014557806354fd4d501161011f57806354fd4d501461027b578063550fcdc9146102bd5780635cf24969146102f657600080fd5b80634397dfef1461021a578063440a5e20146102425780634d5d9a2a1461024a57600080fd5b806316d3bc7f1161017657806316d3bc7f146101d657806321326849146102035780633db6be2b1461021257600080fd5b8063015d8eb91461019d578063098999be146101b257806309bd5a60146101ba575b600080fd5b6101b06101ab366004610623565b610433565b005b6101b0610572565b6101c360025481565b6040519081526020015b60405180910390f35b6008546101ea9067ffffffffffffffff1681565b60405167ffffffffffffffff90911681526020016101cd565b604051600081526020016101cd565b6101b0610585565b6040805173eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee815260126020820152016101cd565b6101b06105af565b6008546102669068010000000000000000900463ffffffff1681565b60405163ffffffff90911681526020016101cd565b60408051808201909152600581527f312e382e3000000000000000000000000000000000000000000000000000000060208201525b6040516101cd9190610695565b60408051808201909152600381527f455448000000000000000000000000000000000000000000000000000000000060208201526102b0565b6101c360015481565b6003546101ea9067ffffffffffffffff1681565b6003546102669068010000000000000000900463ffffffff1681565b6000546101ea9067ffffffffffffffff1681565b6101c360055481565b6101c360065481565b6000546101ea9068010000000000000000900467ffffffffffffffff1681565b600354610266906c01000000000000000000000000900463ffffffff1681565b60408051808201909152600581527f457468657200000000000000000000000000000000000000000000000000000060208201526102b0565b60405173deaddeaddeaddeaddeaddeaddeaddeaddead000181526020016101cd565b6101c360045481565b6101c360075481565b600854610420906c01000000000000000000000000900461ffff1681565b60405161ffff90911681526020016101cd565b3373deaddeaddeaddeaddeaddeaddeaddeaddead0001146104da576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603b60248201527f4c31426c6f636b3a206f6e6c7920746865206465706f7369746f72206163636f60448201527f756e742063616e20736574204c3120626c6f636b2076616c7565730000000000606482015260840160405180910390fd5b6000805467ffffffffffffffff98891668010000000000000000027fffffffffffffffffffffffffffffffff00000000000000000000000000000000909116998916999099179890981790975560019490945560029290925560038054919094167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009190911617909255600491909155600555600655565b61057a6105af565b60a43560a01c600855565b61058d6105af565b6dffff00000000000000000000000060b03560901c1660a43560a01c17600855565b73deaddeaddeaddeaddeaddeaddeaddeaddead00013381146105d957633cc50b456000526004601cfd5b60043560801c60035560143560801c60005560243560015560443560075560643560025560843560045550565b803567ffffffffffffffff8116811461061e57600080fd5b919050565b600080600080600080600080610100898b03121561064057600080fd5b61064989610606565b975061065760208a01610606565b9650604089013595506060890135945061067360808a01610606565b979a969950949793969560a0850135955060c08501359460e001359350915050565b600060208083528351808285015260005b818110156106c2578581018301518582016040015282016106a6565b818111156106d4576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01692909201604001939250505056fea164736f6c634300080f000a0000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L1Block Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca4000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000006e9608060405234801561001057600080fd5b506106c9806100206000396000f3fe6080604052600436106100695760003560e01c806382e3702d1161004357806382e3702d14610120578063c2b3e5ac14610160578063ecc704281461017357600080fd5b80633f827a5a1461009257806344df8e70146100bf57806354fd4d50146100d457600080fd5b3661008d5761008b33620186a0604051806020016040528060008152506101d8565b005b600080fd5b34801561009e57600080fd5b506100a7600181565b60405161ffff90911681526020015b60405180910390f35b3480156100cb57600080fd5b5061008b61039c565b3480156100e057600080fd5b50604080518082018252600581527f312e322e30000000000000000000000000000000000000000000000000000000602082015290516100b691906104c7565b34801561012c57600080fd5b5061015061013b3660046104e1565b60006020819052908152604090205460ff1681565b60405190151581526020016100b6565b61008b61016e366004610529565b6101d8565b34801561017f57600080fd5b506101ca6001547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790565b6040519081526020016100b6565b600061026e6040518060c001604052806102326001547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790565b815233602082015273ffffffffffffffffffffffffffffffffffffffff871660408201523460608201526080810186905260a0018490526103d4565b600081815260208190526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055905073ffffffffffffffffffffffffffffffffffffffff8416336103096001547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790565b7f02a52367d10742d8032712c1bb8e0144ff1ec5ffda1ed7d70bb05a27449550543487878760405161033e949392919061062d565b60405180910390a45050600180547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8082168301167fffff0000000000000000000000000000000000000000000000000000000000009091161790555050565b476103a681610421565b60405181907f7967de617a5ac1cc7eba2d6f37570a0135afa950d8bb77cdd35f0d0b4e85a16f90600090a250565b80516020808301516040808501516060860151608087015160a0880151935160009761040497909695910161065d565b604051602081830303815290604052805190602001209050919050565b8060405161042e90610450565b6040518091039082f090508015801561044b573d6000803e3d6000fd5b505050565b6008806106b583390190565b6000815180845260005b8181101561048257602081850181015186830182015201610466565b81811115610494576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006104da602083018461045c565b9392505050565b6000602082840312156104f357600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60008060006060848603121561053e57600080fd5b833573ffffffffffffffffffffffffffffffffffffffff8116811461056257600080fd5b925060208401359150604084013567ffffffffffffffff8082111561058657600080fd5b818601915086601f83011261059a57600080fd5b8135818111156105ac576105ac6104fa565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156105f2576105f26104fa565b8160405282815289602084870101111561060b57600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b84815283602082015260806040820152600061064c608083018561045c565b905082606083015295945050505050565b868152600073ffffffffffffffffffffffffffffffffffffffff808816602084015280871660408401525084606083015283608083015260c060a08301526106a860c083018461045c565b9897505050505050505056fe608060405230fffea164736f6c634300080f000a0000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L2ToL1MessagePasser Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000003b91608060405234801561001057600080fd5b5061001961001e565b6100de565b600154610100900460ff161561008a5760405162461bcd60e51b815260206004820152602760248201527f496e697469616c697a61626c653a20636f6e747261637420697320696e697469604482015266616c697a696e6760c81b606482015260840160405180910390fd5b60015460ff90811610156100dc576001805460ff191660ff9081179091556040519081527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b565b613aa4806100ed6000396000f3fe60806040523480156200001157600080fd5b5060043610620000935760003560e01c8063d23822421162000062578063d23822421462000150578063d97df652146200015a578063e78cea921462000197578063ee9a31a214620001be57600080fd5b806354fd4d5014620000985780635572acae14620000ed5780637d1d0c5b1462000124578063cd6dc6871462000137575b600080fd5b620000d56040518060400160405280600581526020017f312e352e3000000000000000000000000000000000000000000000000000000081525081565b604051620000e4919062000638565b60405180910390f35b62000113620000fe3660046200067e565b60006020819052908152604090205460ff1681565b6040519015158152602001620000e4565b6002545b604051908152602001620000e4565b6200014e620001483660046200069c565b620001e3565b005b6200012860025481565b620001716200016b366004620007ab565b620003be565b60405173ffffffffffffffffffffffffffffffffffffffff9091168152602001620000e4565b600154620001719062010000900473ffffffffffffffffffffffffffffffffffffffff1681565b60015462010000900473ffffffffffffffffffffffffffffffffffffffff1662000171565b600154610100900460ff16158080156200020157506001805460ff16105b806200021c5750303b1580156200021c57506001805460ff16145b620002ae576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084015b60405180910390fd5b600180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00168117905580156200030c57600180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b600180547fffffffffffffffffffff0000000000000000000000000000000000000000ffff166201000073ffffffffffffffffffffffffffffffffffffffff86160217905560028290558015620003b957600180547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff1681556040519081527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b505050565b600073ffffffffffffffffffffffffffffffffffffffff84166200048c576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526044602482018190527f4f7074696d69736d4d696e7461626c65455243373231466163746f72793a204c908201527f3120746f6b656e20616464726573732063616e6e6f742062652061646472657360648201527f7328302900000000000000000000000000000000000000000000000000000000608482015260a401620002a5565b6000848484604051602001620004a59392919062000828565b604051602081830303815290604052805190602001209050600081600160029054906101000a900473ffffffffffffffffffffffffffffffffffffffff16600254888888604051620004f790620005bc565b6200050795949392919062000877565b8190604051809103906000f590508015801562000528573d6000803e3d6000fd5b5073ffffffffffffffffffffffffffffffffffffffff8181166000818152602081815260409182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0016600117905590513381529394509189169290917fe72783bb8e0ca31286b85278da59684dd814df9762a52f0837f89edd1483b299910160405180910390a395945050505050565b6131bf80620008d983390190565b6000815180845260005b81811015620005f257602081850181015186830182015201620005d4565b8181111562000605576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006200064d6020830184620005ca565b9392505050565b803573ffffffffffffffffffffffffffffffffffffffff811681146200067957600080fd5b919050565b6000602082840312156200069157600080fd5b6200064d8262000654565b60008060408385031215620006b057600080fd5b620006bb8362000654565b946020939093013593505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082601f8301126200070a57600080fd5b813567ffffffffffffffff80821115620007285762000728620006c9565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715620007715762000771620006c9565b816040528381528660208588010111156200078b57600080fd5b836020870160208301376000602085830101528094505050505092915050565b600080600060608486031215620007c157600080fd5b620007cc8462000654565b9250602084013567ffffffffffffffff80821115620007ea57600080fd5b620007f887838801620006f8565b935060408601359150808211156200080f57600080fd5b506200081e86828701620006f8565b9150509250925092565b73ffffffffffffffffffffffffffffffffffffffff84168152606060208201526000620008596060830185620005ca565b82810360408401526200086d8185620005ca565b9695505050505050565b600073ffffffffffffffffffffffffffffffffffffffff808816835286602084015280861660408401525060a06060830152620008b860a0830185620005ca565b8281036080840152620008cc8185620005ca565b9897505050505050505056fe60e06040523480156200001157600080fd5b50604051620031bf380380620031bf83398101604081905262000034916200062d565b8181600062000044838262000756565b50600162000053828262000756565b5050506001600160a01b038516620000d85760405162461bcd60e51b815260206004820152603360248201527f4f7074696d69736d4d696e7461626c654552433732313a20627269646765206360448201527f616e6e6f7420626520616464726573732830290000000000000000000000000060648201526084015b60405180910390fd5b83600003620001505760405162461bcd60e51b815260206004820152603660248201527f4f7074696d69736d4d696e7461626c654552433732313a2072656d6f7465206360448201527f6861696e2069642063616e6e6f74206265207a65726f000000000000000000006064820152608401620000cf565b6001600160a01b038316620001ce5760405162461bcd60e51b815260206004820152603960248201527f4f7074696d69736d4d696e7461626c654552433732313a2072656d6f7465207460448201527f6f6b656e2063616e6e6f742062652061646472657373283029000000000000006064820152608401620000cf565b60808490526001600160a01b0383811660a081905290861660c0526200020290601462000256602090811b62000eed17901c565b62000218856200041660201b620011301760201c565b6040516020016200022b92919062000822565b604051602081830303815290604052600a90816200024a919062000756565b50505050505062000993565b6060600062000267836002620008ac565b62000274906002620008ce565b6001600160401b038111156200028e576200028e62000553565b6040519080825280601f01601f191660200182016040528015620002b9576020820181803683370190505b509050600360fc1b81600081518110620002d757620002d7620008e9565b60200101906001600160f81b031916908160001a905350600f60fb1b81600181518110620003095762000309620008e9565b60200101906001600160f81b031916908160001a90535060006200032f846002620008ac565b6200033c906001620008ce565b90505b6001811115620003be576f181899199a1a9b1b9c1cb0b131b232b360811b85600f1660108110620003745762000374620008e9565b1a60f81b8282815181106200038d576200038d620008e9565b60200101906001600160f81b031916908160001a90535060049490941c93620003b681620008ff565b90506200033f565b5083156200040f5760405162461bcd60e51b815260206004820181905260248201527f537472696e67733a20686578206c656e67746820696e73756666696369656e746044820152606401620000cf565b9392505050565b6060816000036200043e5750506040805180820190915260018152600360fc1b602082015290565b8160005b81156200046e5780620004558162000919565b9150620004669050600a836200094b565b915062000442565b6000816001600160401b038111156200048b576200048b62000553565b6040519080825280601f01601f191660200182016040528015620004b6576020820181803683370190505b5090505b84156200052e57620004ce60018362000962565b9150620004dd600a866200097c565b620004ea906030620008ce565b60f81b818381518110620005025762000502620008e9565b60200101906001600160f81b031916908160001a90535062000526600a866200094b565b9450620004ba565b949350505050565b80516001600160a01b03811681146200054e57600080fd5b919050565b634e487b7160e01b600052604160045260246000fd5b60005b83811015620005865781810151838201526020016200056c565b8381111562000596576000848401525b50505050565b600082601f830112620005ae57600080fd5b81516001600160401b0380821115620005cb57620005cb62000553565b604051601f8301601f19908116603f01168101908282118183101715620005f657620005f662000553565b816040528381528660208588010111156200061057600080fd5b6200062384602083016020890162000569565b9695505050505050565b600080600080600060a086880312156200064657600080fd5b620006518662000536565b945060208601519350620006686040870162000536565b60608701519093506001600160401b03808211156200068657600080fd5b6200069489838a016200059c565b93506080880151915080821115620006ab57600080fd5b50620006ba888289016200059c565b9150509295509295909350565b600181811c90821680620006dc57607f821691505b602082108103620006fd57634e487b7160e01b600052602260045260246000fd5b50919050565b601f8211156200075157600081815260208120601f850160051c810160208610156200072c5750805b601f850160051c820191505b818110156200074d5782815560010162000738565b5050505b505050565b81516001600160401b0381111562000772576200077262000553565b6200078a81620007838454620006c7565b8462000703565b602080601f831160018114620007c25760008415620007a95750858301515b600019600386901b1c1916600185901b1785556200074d565b600085815260208120601f198616915b82811015620007f357888601518255948401946001909101908401620007d2565b5085821015620008125787850151600019600388901b60f8161c191681555b5050505050600190811b01905550565b6832ba3432b932bab69d60b91b8152600083516200084881600985016020880162000569565b600160fe1b60099184019182015283516200086b81600a84016020880162000569565b712f746f6b656e5552493f75696e743235363d60701b600a9290910191820152601c01949350505050565b634e487b7160e01b600052601160045260246000fd5b6000816000190483118215151615620008c957620008c962000896565b500290565b60008219821115620008e457620008e462000896565b500190565b634e487b7160e01b600052603260045260246000fd5b60008162000911576200091162000896565b506000190190565b6000600182016200092e576200092e62000896565b5060010190565b634e487b7160e01b600052601260045260246000fd5b6000826200095d576200095d62000935565b500490565b60008282101562000977576200097762000896565b500390565b6000826200098e576200098e62000935565b500690565b60805160a05160c0516127d9620009e6600039600081816103e20152818161047a01528181610b210152610c430152600081816101e001526103bc015260008181610329015261040801526127d96000f3fe608060405234801561001057600080fd5b50600436106101ae5760003560e01c80637d1d0c5b116100ee578063c87b56dd11610097578063e78cea9211610071578063e78cea92146103e0578063e951819614610406578063e985e9c51461042c578063ee9a31a21461047557600080fd5b8063c87b56dd1461039f578063d547cfb7146103b2578063d6c0b2c4146103ba57600080fd5b8063a1448194116100c8578063a144819414610366578063a22cb46514610379578063b88d4fde1461038c57600080fd5b80637d1d0c5b1461032457806395d89b411461034b5780639dc29fac1461035357600080fd5b806323b872dd1161015b5780634f6ccce7116101355780634f6ccce7146102af57806354fd4d50146102c25780636352211e146102fe57806370a082311461031157600080fd5b806323b872dd146102765780632f745c591461028957806342842e0e1461029c57600080fd5b8063081812fc1161018c578063081812fc1461023c578063095ea7b31461024f57806318160ddd1461026457600080fd5b806301ffc9a7146101b3578063033964be146101db57806306fdde0314610227575b600080fd5b6101c66101c1366004612226565b61049c565b60405190151581526020015b60405180910390f35b6102027f000000000000000000000000000000000000000000000000000000000000000081565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101d2565b61022f6104fa565b6040516101d291906122b9565b61020261024a3660046122cc565b61058c565b61026261025d36600461230e565b6105c0565b005b6008545b6040519081526020016101d2565b610262610284366004612338565b610751565b61026861029736600461230e565b6107f2565b6102626102aa366004612338565b6108c1565b6102686102bd3660046122cc565b6108dc565b61022f6040518060400160405280600581526020017f312e332e3200000000000000000000000000000000000000000000000000000081525081565b61020261030c3660046122cc565b61099a565b61026861031f366004612374565b610a2c565b6102687f000000000000000000000000000000000000000000000000000000000000000081565b61022f610afa565b61026261036136600461230e565b610b09565b61026261037436600461230e565b610c2b565b61026261038736600461238f565b610d42565b61026261039a3660046123fa565b610d51565b61022f6103ad3660046122cc565b610df9565b61022f610e5f565b7f0000000000000000000000000000000000000000000000000000000000000000610202565b7f0000000000000000000000000000000000000000000000000000000000000000610202565b7f0000000000000000000000000000000000000000000000000000000000000000610268565b6101c661043a3660046124f4565b73ffffffffffffffffffffffffffffffffffffffff918216600090815260056020908152604080832093909416825291909152205460ff1690565b6102027f000000000000000000000000000000000000000000000000000000000000000081565b60007faecafc23000000000000000000000000000000000000000000000000000000007fffffffff0000000000000000000000000000000000000000000000000000000083168114806104f357506104f38361126d565b9392505050565b60606000805461050990612527565b80601f016020809104026020016040519081016040528092919081815260200182805461053590612527565b80156105825780601f1061055757610100808354040283529160200191610582565b820191906000526020600020905b81548152906001019060200180831161056557829003601f168201915b5050505050905090565b6000610597826112c3565b5060009081526004602052604090205473ffffffffffffffffffffffffffffffffffffffff1690565b60006105cb8261099a565b90508073ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff160361068d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602160248201527f4552433732313a20617070726f76616c20746f2063757272656e74206f776e6560448201527f720000000000000000000000000000000000000000000000000000000000000060648201526084015b60405180910390fd5b3373ffffffffffffffffffffffffffffffffffffffff821614806106b657506106b6813361043a565b610742576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603e60248201527f4552433732313a20617070726f76652063616c6c6572206973206e6f7420746f60448201527f6b656e206f776e6572206e6f7220617070726f76656420666f7220616c6c00006064820152608401610684565b61074c8383611351565b505050565b61075b33826113f1565b6107e7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f4552433732313a2063616c6c6572206973206e6f7420746f6b656e206f776e6560448201527f72206e6f7220617070726f7665640000000000000000000000000000000000006064820152608401610684565b61074c8383836114b0565b60006107fd83610a2c565b821061088b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f455243373231456e756d657261626c653a206f776e657220696e646578206f7560448201527f74206f6620626f756e64730000000000000000000000000000000000000000006064820152608401610684565b5073ffffffffffffffffffffffffffffffffffffffff919091166000908152600660209081526040808320938352929052205490565b61074c83838360405180602001604052806000815250610d51565b60006108e760085490565b8210610975576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602c60248201527f455243373231456e756d657261626c653a20676c6f62616c20696e646578206f60448201527f7574206f6620626f756e647300000000000000000000000000000000000000006064820152608401610684565b600882815481106109885761098861257a565b90600052602060002001549050919050565b60008181526002602052604081205473ffffffffffffffffffffffffffffffffffffffff1680610a26576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f4552433732313a20696e76616c696420746f6b656e20494400000000000000006044820152606401610684565b92915050565b600073ffffffffffffffffffffffffffffffffffffffff8216610ad1576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602960248201527f4552433732313a2061646472657373207a65726f206973206e6f74206120766160448201527f6c6964206f776e657200000000000000000000000000000000000000000000006064820152608401610684565b5073ffffffffffffffffffffffffffffffffffffffff1660009081526003602052604090205490565b60606001805461050990612527565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610bce576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603a60248201527f4f7074696d69736d4d696e7461626c654552433732313a206f6e6c792062726960448201527f6467652063616e2063616c6c20746869732066756e6374696f6e0000000000006064820152608401610684565b610bd781611722565b8173ffffffffffffffffffffffffffffffffffffffff167fcc16f5dbb4873280815c1ee09dbd06736cffcc184412cf7a71a0fdb75d397ca582604051610c1f91815260200190565b60405180910390a25050565b3373ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614610cf0576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603a60248201527f4f7074696d69736d4d696e7461626c654552433732313a206f6e6c792062726960448201527f6467652063616e2063616c6c20746869732066756e6374696f6e0000000000006064820152608401610684565b610cfa82826117fb565b8173ffffffffffffffffffffffffffffffffffffffff167f0f6798a560793a54c3bcfe86a93cde1e73087d944c0ea20544137d412139688582604051610c1f91815260200190565b610d4d338383611815565b5050565b610d5b33836113f1565b610de7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f4552433732313a2063616c6c6572206973206e6f7420746f6b656e206f776e6560448201527f72206e6f7220617070726f7665640000000000000000000000000000000000006064820152608401610684565b610df384848484611942565b50505050565b6060610e04826112c3565b6000610e0e6119e5565b90506000815111610e2e57604051806020016040528060008152506104f3565b80610e3884611130565b604051602001610e499291906125a9565b6040516020818303038152906040529392505050565b600a8054610e6c90612527565b80601f0160208091040260200160405190810160405280929190818152602001828054610e9890612527565b8015610ee55780601f10610eba57610100808354040283529160200191610ee5565b820191906000526020600020905b815481529060010190602001808311610ec857829003601f168201915b505050505081565b60606000610efc836002612607565b610f07906002612644565b67ffffffffffffffff811115610f1f57610f1f6123cb565b6040519080825280601f01601f191660200182016040528015610f49576020820181803683370190505b5090507f300000000000000000000000000000000000000000000000000000000000000081600081518110610f8057610f8061257a565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053507f780000000000000000000000000000000000000000000000000000000000000081600181518110610fe357610fe361257a565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350600061101f846002612607565b61102a906001612644565b90505b60018111156110c7577f303132333435363738396162636465660000000000000000000000000000000085600f166010811061106b5761106b61257a565b1a60f81b8282815181106110815761108161257a565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535060049490941c936110c08161265c565b905061102d565b5083156104f3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f537472696e67733a20686578206c656e67746820696e73756666696369656e746044820152606401610684565b60608160000361117357505060408051808201909152600181527f3000000000000000000000000000000000000000000000000000000000000000602082015290565b8160005b811561119d578061118781612691565b91506111969050600a836126f8565b9150611177565b60008167ffffffffffffffff8111156111b8576111b86123cb565b6040519080825280601f01601f1916602001820160405280156111e2576020820181803683370190505b5090505b8415611265576111f760018361270c565b9150611204600a86612723565b61120f906030612644565b60f81b8183815181106112245761122461257a565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a90535061125e600a866126f8565b94506111e6565b949350505050565b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f780e9d63000000000000000000000000000000000000000000000000000000001480610a265750610a26826119f4565b60008181526002602052604090205473ffffffffffffffffffffffffffffffffffffffff1661134e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601860248201527f4552433732313a20696e76616c696420746f6b656e20494400000000000000006044820152606401610684565b50565b600081815260046020526040902080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff841690811790915581906113ab8261099a565b73ffffffffffffffffffffffffffffffffffffffff167f8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b92560405160405180910390a45050565b6000806113fd8361099a565b90508073ffffffffffffffffffffffffffffffffffffffff168473ffffffffffffffffffffffffffffffffffffffff16148061146b575073ffffffffffffffffffffffffffffffffffffffff80821660009081526005602090815260408083209388168352929052205460ff165b8061126557508373ffffffffffffffffffffffffffffffffffffffff166114918461058c565b73ffffffffffffffffffffffffffffffffffffffff1614949350505050565b8273ffffffffffffffffffffffffffffffffffffffff166114d08261099a565b73ffffffffffffffffffffffffffffffffffffffff1614611573576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602560248201527f4552433732313a207472616e736665722066726f6d20696e636f72726563742060448201527f6f776e65720000000000000000000000000000000000000000000000000000006064820152608401610684565b73ffffffffffffffffffffffffffffffffffffffff8216611615576040517f08c379a0000000000000000000000000000000000000000000000000000000008152602060048201526024808201527f4552433732313a207472616e7366657220746f20746865207a65726f2061646460448201527f72657373000000000000000000000000000000000000000000000000000000006064820152608401610684565b611620838383611ad7565b61162b600082611351565b73ffffffffffffffffffffffffffffffffffffffff8316600090815260036020526040812080546001929061166190849061270c565b909155505073ffffffffffffffffffffffffffffffffffffffff8216600090815260036020526040812080546001929061169c908490612644565b909155505060008181526002602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff86811691821790925591518493918716917fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef91a4505050565b600061172d8261099a565b905061173b81600084611ad7565b611746600083611351565b73ffffffffffffffffffffffffffffffffffffffff8116600090815260036020526040812080546001929061177c90849061270c565b909155505060008281526002602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001690555183919073ffffffffffffffffffffffffffffffffffffffff8416907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef908390a45050565b610d4d828260405180602001604052806000815250611bdd565b8173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff16036118aa576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601960248201527f4552433732313a20617070726f766520746f2063616c6c6572000000000000006044820152606401610684565b73ffffffffffffffffffffffffffffffffffffffff83811660008181526005602090815260408083209487168084529482529182902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001686151590811790915591519182527f17307eab39ab6107e8899845ad3d59bd9653f200f220920489ca2b5937696c31910160405180910390a3505050565b61194d8484846114b0565b61195984848484611c80565b610df3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603260248201527f4552433732313a207472616e7366657220746f206e6f6e20455243373231526560448201527f63656976657220696d706c656d656e74657200000000000000000000000000006064820152608401610684565b6060600a805461050990612527565b60007fffffffff0000000000000000000000000000000000000000000000000000000082167f80ac58cd000000000000000000000000000000000000000000000000000000001480611a8757507fffffffff0000000000000000000000000000000000000000000000000000000082167f5b5e139f00000000000000000000000000000000000000000000000000000000145b80610a2657507f01ffc9a7000000000000000000000000000000000000000000000000000000007fffffffff00000000000000000000000000000000000000000000000000000000831614610a26565b73ffffffffffffffffffffffffffffffffffffffff8316611b3f57611b3a81600880546000838152600960205260408120829055600182018355919091527ff3f7a9fe364faab93b216da50a3214154f22a0a2b415b23a84c8169e8b636ee30155565b611b7c565b8173ffffffffffffffffffffffffffffffffffffffff168373ffffffffffffffffffffffffffffffffffffffff1614611b7c57611b7c8382611e73565b73ffffffffffffffffffffffffffffffffffffffff8216611ba05761074c81611f2a565b8273ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff161461074c5761074c8282611fd9565b611be7838361202a565b611bf46000848484611c80565b61074c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603260248201527f4552433732313a207472616e7366657220746f206e6f6e20455243373231526560448201527f63656976657220696d706c656d656e74657200000000000000000000000000006064820152608401610684565b600073ffffffffffffffffffffffffffffffffffffffff84163b15611e68576040517f150b7a0200000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff85169063150b7a0290611cf7903390899088908890600401612737565b6020604051808303816000875af1925050508015611d50575060408051601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201909252611d4d91810190612780565b60015b611e1d573d808015611d7e576040519150601f19603f3d011682016040523d82523d6000602084013e611d83565b606091505b508051600003611e15576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603260248201527f4552433732313a207472616e7366657220746f206e6f6e20455243373231526560448201527f63656976657220696d706c656d656e74657200000000000000000000000000006064820152608401610684565b805181602001fd5b7fffffffff00000000000000000000000000000000000000000000000000000000167f150b7a0200000000000000000000000000000000000000000000000000000000149050611265565b506001949350505050565b60006001611e8084610a2c565b611e8a919061270c565b600083815260076020526040902054909150808214611eea5773ffffffffffffffffffffffffffffffffffffffff841660009081526006602090815260408083208584528252808320548484528184208190558352600790915290208190555b50600091825260076020908152604080842084905573ffffffffffffffffffffffffffffffffffffffff9094168352600681528383209183525290812055565b600854600090611f3c9060019061270c565b60008381526009602052604081205460088054939450909284908110611f6457611f6461257a565b906000526020600020015490508060088381548110611f8557611f8561257a565b6000918252602080832090910192909255828152600990915260408082208490558582528120556008805480611fbd57611fbd61279d565b6001900381819060005260206000200160009055905550505050565b6000611fe483610a2c565b73ffffffffffffffffffffffffffffffffffffffff9093166000908152600660209081526040808320868452825280832085905593825260079052919091209190915550565b73ffffffffffffffffffffffffffffffffffffffff82166120a7576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4552433732313a206d696e7420746f20746865207a65726f20616464726573736044820152606401610684565b60008181526002602052604090205473ffffffffffffffffffffffffffffffffffffffff1615612133576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601c60248201527f4552433732313a20746f6b656e20616c7265616479206d696e746564000000006044820152606401610684565b61213f60008383611ad7565b73ffffffffffffffffffffffffffffffffffffffff82166000908152600360205260408120805460019290612175908490612644565b909155505060008181526002602052604080822080547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff861690811790915590518392907fddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef908290a45050565b7fffffffff000000000000000000000000000000000000000000000000000000008116811461134e57600080fd5b60006020828403121561223857600080fd5b81356104f3816121f8565b60005b8381101561225e578181015183820152602001612246565b83811115610df35750506000910152565b60008151808452612287816020860160208601612243565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006104f3602083018461226f565b6000602082840312156122de57600080fd5b5035919050565b803573ffffffffffffffffffffffffffffffffffffffff8116811461230957600080fd5b919050565b6000806040838503121561232157600080fd5b61232a836122e5565b946020939093013593505050565b60008060006060848603121561234d57600080fd5b612356846122e5565b9250612364602085016122e5565b9150604084013590509250925092565b60006020828403121561238657600080fd5b6104f3826122e5565b600080604083850312156123a257600080fd5b6123ab836122e5565b9150602083013580151581146123c057600080fd5b809150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b6000806000806080858703121561241057600080fd5b612419856122e5565b9350612427602086016122e5565b925060408501359150606085013567ffffffffffffffff8082111561244b57600080fd5b818701915087601f83011261245f57600080fd5b813581811115612471576124716123cb565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f011681019083821181831017156124b7576124b76123cb565b816040528281528a60208487010111156124d057600080fd5b82602086016020830137600060208483010152809550505050505092959194509250565b6000806040838503121561250757600080fd5b612510836122e5565b915061251e602084016122e5565b90509250929050565b600181811c9082168061253b57607f821691505b602082108103612574577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600083516125bb818460208801612243565b8351908301906125cf818360208801612243565b01949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048311821515161561263f5761263f6125d8565b500290565b60008219821115612657576126576125d8565b500190565b60008161266b5761266b6125d8565b507fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0190565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036126c2576126c26125d8565b5060010190565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601260045260246000fd5b600082612707576127076126c9565b500490565b60008282101561271e5761271e6125d8565b500390565b600082612732576127326126c9565b500690565b600073ffffffffffffffffffffffffffffffffffffffff808716835280861660208401525083604083015260806060830152612776608083018461226f565b9695505050505050565b60006020828403121561279257600080fd5b81516104f3816121f8565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603160045260246000fdfea164736f6c634300080f000aa164736f6c634300080f000a000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy OptimismMintableERC721Factory Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000001c34608060405234801561001057600080fd5b50600061001c3361002b565b6100258161002b565b5061007b565b600080546001600160a01b038381166001600160a01b0319831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b611baa8061008a6000396000f3fe6080604052600436106101445760003560e01c80637eff275e116100c057806399a88ec411610074578063b794726211610059578063b7947262146103c8578063f2fde38b14610403578063f3b7dead1461042357600080fd5b806399a88ec4146103885780639b2ea4bd146103a857600080fd5b80638d52d4a0116100a55780638d52d4a01461032a5780638da5cb5b1461034a5780639623609d1461037557600080fd5b80637eff275e146102ea578063860f7cda1461030a57600080fd5b80633ab76e9f116101175780636bd9f516116100fc5780636bd9f51614610278578063715018a6146102b55780637c36f37e146102ca57600080fd5b80633ab76e9f1461020257806354fd4d501461022f57600080fd5b80630652b57a1461014957806307c8f7b01461016b578063204e1c7a1461018b578063238181ae146101d5575b600080fd5b34801561015557600080fd5b50610169610164366004611427565b610443565b005b34801561017757600080fd5b50610169610186366004611444565b610492565b34801561019757600080fd5b506101ab6101a6366004611427565b6104e4565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b3480156101e157600080fd5b506101f56101f0366004611427565b61070a565b6040516101cc91906114dc565b34801561020e57600080fd5b506003546101ab9073ffffffffffffffffffffffffffffffffffffffff1681565b34801561023b57600080fd5b506101f56040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b34801561028457600080fd5b506102a8610293366004611427565b60016020526000908152604090205460ff1681565b6040516101cc919061151e565b3480156102c157600080fd5b506101696107a4565b3480156102d657600080fd5b506101696102e5366004611427565b6107b8565b3480156102f657600080fd5b5061016961030536600461155f565b610947565b34801561031657600080fd5b506101696103253660046116ba565b610afa565b34801561033657600080fd5b5061016961034536600461170a565b610b31565b34801561035657600080fd5b5060005473ffffffffffffffffffffffffffffffffffffffff166101ab565b61016961038336600461173c565b610ba5565b34801561039457600080fd5b506101696103a336600461155f565b610dbc565b3480156103b457600080fd5b506101696103c33660046117b2565b61104c565b3480156103d457600080fd5b5060035474010000000000000000000000000000000000000000900460ff1660405190151581526020016101cc565b34801561040f57600080fd5b5061016961041e366004611427565b6110e2565b34801561042f57600080fd5b506101ab61043e366004611427565b611199565b61044b61130f565b600380547fffffffffffffffffffffffff00000000000000000000000000000000000000001673ffffffffffffffffffffffffffffffffffffffff92909216919091179055565b61049a61130f565b6003805491151574010000000000000000000000000000000000000000027fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff909216919091179055565b73ffffffffffffffffffffffffffffffffffffffff811660009081526001602052604081205460ff1681816002811115610520576105206114ef565b0361059b578273ffffffffffffffffffffffffffffffffffffffff16635c60da1b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610570573d6000803e3d6000fd5b505050506040513d601f19601f8201168201806040525081019061059491906117f9565b9392505050565b60018160028111156105af576105af6114ef565b036105ff578273ffffffffffffffffffffffffffffffffffffffff1663aaf10f426040518163ffffffff1660e01b8152600401602060405180830381865afa158015610570573d6000803e3d6000fd5b6002816002811115610613576106136114ef565b0361069d5760035473ffffffffffffffffffffffffffffffffffffffff8481166000908152600260205260409081902090517fbf40fac1000000000000000000000000000000000000000000000000000000008152919092169163bf40fac1916106809190600401611863565b602060405180830381865afa158015610570573d6000803e3d6000fd5b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601e60248201527f50726f787941646d696e3a20756e6b6e6f776e2070726f78792074797065000060448201526064015b60405180910390fd5b50919050565b6002602052600090815260409020805461072390611816565b80601f016020809104026020016040519081016040528092919081815260200182805461074f90611816565b801561079c5780601f106107715761010080835404028352916020019161079c565b820191906000526020600020905b81548152906001019060200180831161077f57829003601f168201915b505050505081565b6107ac61130f565b6107b66000611390565b565b3373deaddeaddeaddeaddeaddeaddeaddeaddead000114610805576040517fcde661e700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60408051600481526024810182526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fd55ec697000000000000000000000000000000000000000000000000000000001790529051600091829173ffffffffffffffffffffffffffffffffffffffff8516916108839161190c565b600060405180830381855af49150503d80600081146108be576040519150601f19603f3d011682016040523d82523d6000602084013e6108c3565b606091505b50915091508161090157806040517f1c0a89cc0000000000000000000000000000000000000000000000000000000081526004016106fb91906114dc565b60405173ffffffffffffffffffffffffffffffffffffffff8416907f14e22d69ea30aab5b2220164345b33bdb5125e9c77a7d5fe12e23a1c691bd13990600090a2505050565b61094f61130f565b73ffffffffffffffffffffffffffffffffffffffff821660009081526001602052604081205460ff169081600281111561098b5761098b6114ef565b03610a17576040517f8f28397000000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8381166004830152841690638f283970906024015b600060405180830381600087803b1580156109fa57600080fd5b505af1158015610a0e573d6000803e3d6000fd5b50505050505050565b6001816002811115610a2b57610a2b6114ef565b03610a84576040517f13af403500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff83811660048301528416906313af4035906024016109e0565b6002816002811115610a9857610a986114ef565b0361069d576003546040517ff2fde38b00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff84811660048301529091169063f2fde38b906024016109e0565b505050565b610b0261130f565b73ffffffffffffffffffffffffffffffffffffffff82166000908152600260205260409020610af5828261196e565b610b3961130f565b73ffffffffffffffffffffffffffffffffffffffff82166000908152600160208190526040909120805483927fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff0090911690836002811115610b9c57610b9c6114ef565b02179055505050565b610bad61130f565b73ffffffffffffffffffffffffffffffffffffffff831660009081526001602052604081205460ff1690816002811115610be957610be96114ef565b03610caf576040517f4f1ef28600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff851690634f1ef286903490610c449087908790600401611a88565b60006040518083038185885af1158015610c62573d6000803e3d6000fd5b50505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052610ca99190810190611abf565b50610db6565b610cb98484610dbc565b60008473ffffffffffffffffffffffffffffffffffffffff163484604051610ce1919061190c565b60006040518083038185875af1925050503d8060008114610d1e576040519150601f19603f3d011682016040523d82523d6000602084013e610d23565b606091505b5050905080610db4576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f50726f787941646d696e3a2063616c6c20746f2070726f78792061667465722060448201527f75706772616465206661696c656400000000000000000000000000000000000060648201526084016106fb565b505b50505050565b610dc461130f565b73ffffffffffffffffffffffffffffffffffffffff821660009081526001602052604081205460ff1690816002811115610e0057610e006114ef565b03610e59576040517f3659cfe600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8381166004830152841690633659cfe6906024016109e0565b6001816002811115610e6d57610e6d6114ef565b03610eec576040517f9b0b0fda0000000000000000000000000000000000000000000000000000000081527f360894a13ba1a3210667c828492db98dca3e2076cc3735a920a3ca505d382bbc600482015273ffffffffffffffffffffffffffffffffffffffff8381166024830152841690639b0b0fda906044016109e0565b6002816002811115610f0057610f006114ef565b036110445773ffffffffffffffffffffffffffffffffffffffff831660009081526002602052604081208054610f3590611816565b80601f0160208091040260200160405190810160405280929190818152602001828054610f6190611816565b8015610fae5780601f10610f8357610100808354040283529160200191610fae565b820191906000526020600020905b815481529060010190602001808311610f9157829003601f168201915b50506003546040517f9b2ea4bd00000000000000000000000000000000000000000000000000000000815294955073ffffffffffffffffffffffffffffffffffffffff1693639b2ea4bd935061100c92508591508790600401611b36565b600060405180830381600087803b15801561102657600080fd5b505af115801561103a573d6000803e3d6000fd5b5050505050505050565b610af5611b6e565b61105461130f565b6003546040517f9b2ea4bd00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff90911690639b2ea4bd906110ac9085908590600401611b36565b600060405180830381600087803b1580156110c657600080fd5b505af11580156110da573d6000803e3d6000fd5b505050505050565b6110ea61130f565b73ffffffffffffffffffffffffffffffffffffffff811661118d576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084016106fb565b61119681611390565b50565b73ffffffffffffffffffffffffffffffffffffffff811660009081526001602052604081205460ff16818160028111156111d5576111d56114ef565b03611225578273ffffffffffffffffffffffffffffffffffffffff1663f851a4406040518163ffffffff1660e01b8152600401602060405180830381865afa158015610570573d6000803e3d6000fd5b6001816002811115611239576112396114ef565b03611289578273ffffffffffffffffffffffffffffffffffffffff1663893d20e86040518163ffffffff1660e01b8152600401602060405180830381865afa158015610570573d6000803e3d6000fd5b600281600281111561129d5761129d6114ef565b0361069d57600360009054906101000a900473ffffffffffffffffffffffffffffffffffffffff1673ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610570573d6000803e3d6000fd5b60005473ffffffffffffffffffffffffffffffffffffffff1633146107b6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e657260448201526064016106fb565b6000805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681178455604051919092169283917f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e09190a35050565b73ffffffffffffffffffffffffffffffffffffffff8116811461119657600080fd5b60006020828403121561143957600080fd5b813561059481611405565b60006020828403121561145657600080fd5b8135801515811461059457600080fd5b60005b83811015611481578181015183820152602001611469565b83811115610db65750506000910152565b600081518084526114aa816020860160208601611466565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006105946020830184611492565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b6020810160038310611559577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b91905290565b6000806040838503121561157257600080fd5b823561157d81611405565b9150602083013561158d81611405565b809150509250929050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561160e5761160e611598565b604052919050565b600067ffffffffffffffff82111561163057611630611598565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600061166f61166a84611616565b6115c7565b905082815283838301111561168357600080fd5b828260208301376000602084830101529392505050565b600082601f8301126116ab57600080fd5b6105948383356020850161165c565b600080604083850312156116cd57600080fd5b82356116d881611405565b9150602083013567ffffffffffffffff8111156116f457600080fd5b6117008582860161169a565b9150509250929050565b6000806040838503121561171d57600080fd5b823561172881611405565b915060208301356003811061158d57600080fd5b60008060006060848603121561175157600080fd5b833561175c81611405565b9250602084013561176c81611405565b9150604084013567ffffffffffffffff81111561178857600080fd5b8401601f8101861361179957600080fd5b6117a88682356020840161165c565b9150509250925092565b600080604083850312156117c557600080fd5b823567ffffffffffffffff8111156117dc57600080fd5b6117e88582860161169a565b925050602083013561158d81611405565b60006020828403121561180b57600080fd5b815161059481611405565b600181811c9082168061182a57607f821691505b602082108103610704577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b600060208083526000845461187781611816565b8084870152604060018084166000811461189857600181146118d0576118fe565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff008516838a01528284151560051b8a010195506118fe565b896000528660002060005b858110156118f65781548b82018601529083019088016118db565b8a0184019650505b509398975050505050505050565b6000825161191e818460208701611466565b9190910192915050565b601f821115610af557600081815260208120601f850160051c8101602086101561194f5750805b601f850160051c820191505b818110156110da5782815560010161195b565b815167ffffffffffffffff81111561198857611988611598565b61199c816119968454611816565b84611928565b602080601f8311600181146119ef57600084156119b95750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b1785556110da565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b82811015611a3c57888601518255948401946001909101908401611a1d565b5085821015611a7857878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b73ffffffffffffffffffffffffffffffffffffffff83168152604060208201526000611ab76040830184611492565b949350505050565b600060208284031215611ad157600080fd5b815167ffffffffffffffff811115611ae857600080fd5b8201601f81018413611af957600080fd5b8051611b0761166a82611616565b818152856020838501011115611b1c57600080fd5b611b2d826020830160208601611466565b95945050505050565b604081526000611b496040830185611492565b905073ffffffffffffffffffffffffffffffffffffffff831660208301529392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052600160045260246000fdfea164736f6c634300080f000a000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L2ProxyAdmin Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000f636080604052348015600e575f80fd5b5060156019565b60c9565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000900460ff161560685760405163f92ee8a960e01b815260040160405180910390fd5b80546001600160401b039081161460c65780546001600160401b0319166001600160401b0390811782556040519081527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b50565b610e8d806100d65f395ff3fe6080604052600436106100d1575f3560e01c806382356d8a1161007c57806385b5b14d1161005757806385b5b14d14610276578063b49dc74114610295578063d0e12f90146102b4578063d3e5792b146102e3575f80fd5b806382356d8a1461020f5780638312f1491461024d57806384411d6514610262575f80fd5b80633ccfd60b116100ac5780633ccfd60b1461016c57806354fd4d501461018e57806366d003ac146101e3575f80fd5b80630d9019e1146100dc578063307f29621461012c5780633bbed4a01461014d575f80fd5b366100d857005b5f80fd5b3480156100e7575f80fd5b5060025473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b348015610137575f80fd5b5061014b610146366004610c5a565b6102f7565b005b348015610158575f80fd5b5061014b610167366004610c97565b61047a565b348015610177575f80fd5b506101806105de565b604051908152602001610123565b348015610199575f80fd5b506101d66040518060400160405280600581526020017f312e362e3000000000000000000000000000000000000000000000000000000081525081565b6040516101239190610cb2565b3480156101ee575f80fd5b506002546101029073ffffffffffffffffffffffffffffffffffffffff1681565b34801561021a575f80fd5b506002546102409074010000000000000000000000000000000000000000900460ff1681565b6040516101239190610d6b565b348015610258575f80fd5b5061018060015481565b34801561026d575f80fd5b506101805f5481565b348015610281575f80fd5b5061014b610290366004610d7f565b6108ec565b3480156102a0575f80fd5b5061014b6102af366004610d96565b610a0f565b3480156102bf575f80fd5b5060025474010000000000000000000000000000000000000000900460ff16610240565b3480156102ee575f80fd5b50600154610180565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610354573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906103789190610dd1565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146103dc576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600280547401000000000000000000000000000000000000000080820460ff1692849290917fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff9091169083600181111561043857610438610d05565b02179055507ff2ec44eb1c3b3acd547b76333eb2c4b27eee311860c57a9fdb04c95f62398fc8818360405161046e929190610dec565b60405180910390a15050565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156104d7573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104fb9190610dd1565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461055f576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6002805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff000000000000000000000000000000000000000083168117909355604080519190921680825260208201939093527f62e69886a5df0ba8ffcacbfc1388754e7abd9bde24b036354c561f1acd4e4593910161046e565b5f60015447101561069c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d207769746864726160648201527f77616c20616d6f756e7400000000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b479050805f808282546106af9190610e07565b90915550506002546040805183815273ffffffffffffffffffffffffffffffffffffffff909216602083018190523383830152905190917fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba919081900360600190a16002546040517f38e04cbeb8c10f8f568618aa75be0f10b6729b8b4237743b4de20cbcde2839ee916107649185918591339174010000000000000000000000000000000000000000900460ff1690610e3f565b60405180910390a1600160025474010000000000000000000000000000000000000000900460ff16600181111561079d5761079d610d05565b03610841575f6107ad8284610c23565b90508061083c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f4665655661756c743a206661696c656420746f2073656e642045544820746f2060448201527f4c322066656520726563697069656e74000000000000000000000000000000006064820152608401610693565b505090565b6040517fc2b3e5ac00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8216600482015262061a806024820152606060448201525f60648201527342000000000000000000000000000000000000169063c2b3e5ac9084906084015f604051808303818588803b1580156108d1575f80fd5b505af11580156108e3573d5f803e3d5ffd5b50505050505090565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610949573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061096d9190610dd1565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109d1576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180549082905560408051828152602081018490527f895a067c78583e800418fabf3da26a9496aab2ff3429cebdf7fefa642b2e4203910161046e565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000810460ff16159067ffffffffffffffff165f81158015610a595750825b90505f8267ffffffffffffffff166001148015610a755750303b155b905081158015610a83575080155b15610aba576040517ff92ee8a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b84547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001660011785558315610b1b5784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff16680100000000000000001785555b6002805473ffffffffffffffffffffffffffffffffffffffff8a167fffffffffffffffffffffffff000000000000000000000000000000000000000082168117835560018a81558993927fffffffffffffffffffffff000000000000000000000000000000000000000000169091179074010000000000000000000000000000000000000000908490811115610bb357610bb3610d05565b02179055508315610c195784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff168555604051600181527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b5050505050505050565b5f610c2f835a84610c36565b9392505050565b5f805f805f858888f1949350505050565b803560028110610c55575f80fd5b919050565b5f60208284031215610c6a575f80fd5b610c2f82610c47565b73ffffffffffffffffffffffffffffffffffffffff81168114610c94575f80fd5b50565b5f60208284031215610ca7575f80fd5b8135610c2f81610c73565b602081525f82518060208401528060208501604085015e5f6040828501015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011684010191505092915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b60028110610d67577f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b9052565b60208101610d798284610d32565b92915050565b5f60208284031215610d8f575f80fd5b5035919050565b5f805f60608486031215610da8575f80fd5b8335610db381610c73565b925060208401359150610dc860408501610c47565b90509250925092565b5f60208284031215610de1575f80fd5b8151610c2f81610c73565b60408101610dfa8285610d32565b610c2f6020830184610d32565b80820180821115610d79577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b84815273ffffffffffffffffffffffffffffffffffffffff84811660208301528316604082015260808101610e776060830184610d32565b9594505050505056fea164736f6c6343000819000a0000000000000000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy BaseFeeVault Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000f636080604052348015600e575f80fd5b5060156019565b60c9565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000900460ff161560685760405163f92ee8a960e01b815260040160405180910390fd5b80546001600160401b039081161460c65780546001600160401b0319166001600160401b0390811782556040519081527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b50565b610e8d806100d65f395ff3fe6080604052600436106100d1575f3560e01c806382356d8a1161007c57806385b5b14d1161005757806385b5b14d14610276578063b49dc74114610295578063d0e12f90146102b4578063d3e5792b146102e3575f80fd5b806382356d8a1461020f5780638312f1491461024d57806384411d6514610262575f80fd5b80633ccfd60b116100ac5780633ccfd60b1461016c57806354fd4d501461018e57806366d003ac146101e3575f80fd5b80630d9019e1146100dc578063307f29621461012c5780633bbed4a01461014d575f80fd5b366100d857005b5f80fd5b3480156100e7575f80fd5b5060025473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b348015610137575f80fd5b5061014b610146366004610c5a565b6102f7565b005b348015610158575f80fd5b5061014b610167366004610c97565b61047a565b348015610177575f80fd5b506101806105de565b604051908152602001610123565b348015610199575f80fd5b506101d66040518060400160405280600581526020017f312e362e3000000000000000000000000000000000000000000000000000000081525081565b6040516101239190610cb2565b3480156101ee575f80fd5b506002546101029073ffffffffffffffffffffffffffffffffffffffff1681565b34801561021a575f80fd5b506002546102409074010000000000000000000000000000000000000000900460ff1681565b6040516101239190610d6b565b348015610258575f80fd5b5061018060015481565b34801561026d575f80fd5b506101805f5481565b348015610281575f80fd5b5061014b610290366004610d7f565b6108ec565b3480156102a0575f80fd5b5061014b6102af366004610d96565b610a0f565b3480156102bf575f80fd5b5060025474010000000000000000000000000000000000000000900460ff16610240565b3480156102ee575f80fd5b50600154610180565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610354573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906103789190610dd1565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146103dc576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600280547401000000000000000000000000000000000000000080820460ff1692849290917fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff9091169083600181111561043857610438610d05565b02179055507ff2ec44eb1c3b3acd547b76333eb2c4b27eee311860c57a9fdb04c95f62398fc8818360405161046e929190610dec565b60405180910390a15050565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156104d7573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104fb9190610dd1565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461055f576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6002805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff000000000000000000000000000000000000000083168117909355604080519190921680825260208201939093527f62e69886a5df0ba8ffcacbfc1388754e7abd9bde24b036354c561f1acd4e4593910161046e565b5f60015447101561069c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d207769746864726160648201527f77616c20616d6f756e7400000000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b479050805f808282546106af9190610e07565b90915550506002546040805183815273ffffffffffffffffffffffffffffffffffffffff909216602083018190523383830152905190917fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba919081900360600190a16002546040517f38e04cbeb8c10f8f568618aa75be0f10b6729b8b4237743b4de20cbcde2839ee916107649185918591339174010000000000000000000000000000000000000000900460ff1690610e3f565b60405180910390a1600160025474010000000000000000000000000000000000000000900460ff16600181111561079d5761079d610d05565b03610841575f6107ad8284610c23565b90508061083c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f4665655661756c743a206661696c656420746f2073656e642045544820746f2060448201527f4c322066656520726563697069656e74000000000000000000000000000000006064820152608401610693565b505090565b6040517fc2b3e5ac00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8216600482015262061a806024820152606060448201525f60648201527342000000000000000000000000000000000000169063c2b3e5ac9084906084015f604051808303818588803b1580156108d1575f80fd5b505af11580156108e3573d5f803e3d5ffd5b50505050505090565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610949573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061096d9190610dd1565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109d1576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180549082905560408051828152602081018490527f895a067c78583e800418fabf3da26a9496aab2ff3429cebdf7fefa642b2e4203910161046e565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000810460ff16159067ffffffffffffffff165f81158015610a595750825b90505f8267ffffffffffffffff166001148015610a755750303b155b905081158015610a83575080155b15610aba576040517ff92ee8a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b84547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001660011785558315610b1b5784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff16680100000000000000001785555b6002805473ffffffffffffffffffffffffffffffffffffffff8a167fffffffffffffffffffffffff000000000000000000000000000000000000000082168117835560018a81558993927fffffffffffffffffffffff000000000000000000000000000000000000000000169091179074010000000000000000000000000000000000000000908490811115610bb357610bb3610d05565b02179055508315610c195784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff168555604051600181527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b5050505050505050565b5f610c2f835a84610c36565b9392505050565b5f805f805f858888f1949350505050565b803560028110610c55575f80fd5b919050565b5f60208284031215610c6a575f80fd5b610c2f82610c47565b73ffffffffffffffffffffffffffffffffffffffff81168114610c94575f80fd5b50565b5f60208284031215610ca7575f80fd5b8135610c2f81610c73565b602081525f82518060208401528060208501604085015e5f6040828501015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011684010191505092915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b60028110610d67577f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b9052565b60208101610d798284610d32565b92915050565b5f60208284031215610d8f575f80fd5b5035919050565b5f805f60608486031215610da8575f80fd5b8335610db381610c73565b925060208401359150610dc860408501610c47565b90509250925092565b5f60208284031215610de1575f80fd5b8151610c2f81610c73565b60408101610dfa8285610d32565b610c2f6020830184610d32565b80820180821115610d79577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b84815273ffffffffffffffffffffffffffffffffffffffff84811660208301528316604082015260808101610e776060830184610d32565b9594505050505056fea164736f6c6343000819000a0000000000000000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L1FeeVault Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000f636080604052348015600e575f80fd5b5060156019565b60c9565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000900460ff161560685760405163f92ee8a960e01b815260040160405180910390fd5b80546001600160401b039081161460c65780546001600160401b0319166001600160401b0390811782556040519081527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b50565b610e8d806100d65f395ff3fe6080604052600436106100d1575f3560e01c806382356d8a1161007c57806385b5b14d1161005757806385b5b14d14610276578063b49dc74114610295578063d0e12f90146102b4578063d3e5792b146102e3575f80fd5b806382356d8a1461020f5780638312f1491461024d57806384411d6514610262575f80fd5b80633ccfd60b116100ac5780633ccfd60b1461016c57806354fd4d501461018e57806366d003ac146101e3575f80fd5b80630d9019e1146100dc578063307f29621461012c5780633bbed4a01461014d575f80fd5b366100d857005b5f80fd5b3480156100e7575f80fd5b5060025473ffffffffffffffffffffffffffffffffffffffff165b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b348015610137575f80fd5b5061014b610146366004610c5a565b6102f7565b005b348015610158575f80fd5b5061014b610167366004610c97565b61047a565b348015610177575f80fd5b506101806105de565b604051908152602001610123565b348015610199575f80fd5b506101d66040518060400160405280600581526020017f312e312e3000000000000000000000000000000000000000000000000000000081525081565b6040516101239190610cb2565b3480156101ee575f80fd5b506002546101029073ffffffffffffffffffffffffffffffffffffffff1681565b34801561021a575f80fd5b506002546102409074010000000000000000000000000000000000000000900460ff1681565b6040516101239190610d6b565b348015610258575f80fd5b5061018060015481565b34801561026d575f80fd5b506101805f5481565b348015610281575f80fd5b5061014b610290366004610d7f565b6108ec565b3480156102a0575f80fd5b5061014b6102af366004610d96565b610a0f565b3480156102bf575f80fd5b5060025474010000000000000000000000000000000000000000900460ff16610240565b3480156102ee575f80fd5b50600154610180565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610354573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906103789190610dd1565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146103dc576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600280547401000000000000000000000000000000000000000080820460ff1692849290917fffffffffffffffffffffff00ffffffffffffffffffffffffffffffffffffffff9091169083600181111561043857610438610d05565b02179055507ff2ec44eb1c3b3acd547b76333eb2c4b27eee311860c57a9fdb04c95f62398fc8818360405161046e929190610dec565b60405180910390a15050565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156104d7573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906104fb9190610dd1565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff161461055f576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6002805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff000000000000000000000000000000000000000083168117909355604080519190921680825260208201939093527f62e69886a5df0ba8ffcacbfc1388754e7abd9bde24b036354c561f1acd4e4593910161046e565b5f60015447101561069c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604a60248201527f4665655661756c743a207769746864726177616c20616d6f756e74206d75737460448201527f2062652067726561746572207468616e206d696e696d756d207769746864726160648201527f77616c20616d6f756e7400000000000000000000000000000000000000000000608482015260a4015b60405180910390fd5b479050805f808282546106af9190610e07565b90915550506002546040805183815273ffffffffffffffffffffffffffffffffffffffff909216602083018190523383830152905190917fc8a211cc64b6ed1b50595a9fcb1932b6d1e5a6e8ef15b60e5b1f988ea9086bba919081900360600190a16002546040517f38e04cbeb8c10f8f568618aa75be0f10b6729b8b4237743b4de20cbcde2839ee916107649185918591339174010000000000000000000000000000000000000000900460ff1690610e3f565b60405180910390a1600160025474010000000000000000000000000000000000000000900460ff16600181111561079d5761079d610d05565b03610841575f6107ad8284610c23565b90508061083c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603060248201527f4665655661756c743a206661696c656420746f2073656e642045544820746f2060448201527f4c322066656520726563697069656e74000000000000000000000000000000006064820152608401610693565b505090565b6040517fc2b3e5ac00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8216600482015262061a806024820152606060448201525f60648201527342000000000000000000000000000000000000169063c2b3e5ac9084906084015f604051808303818588803b1580156108d1575f80fd5b505af11580156108e3573d5f803e3d5ffd5b50505050505090565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610949573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061096d9190610dd1565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146109d1576040517f7cd7e09f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180549082905560408051828152602081018490527f895a067c78583e800418fabf3da26a9496aab2ff3429cebdf7fefa642b2e4203910161046e565b7ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a00805468010000000000000000810460ff16159067ffffffffffffffff165f81158015610a595750825b90505f8267ffffffffffffffff166001148015610a755750303b155b905081158015610a83575080155b15610aba576040517ff92ee8a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b84547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001660011785558315610b1b5784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff16680100000000000000001785555b6002805473ffffffffffffffffffffffffffffffffffffffff8a167fffffffffffffffffffffffff000000000000000000000000000000000000000082168117835560018a81558993927fffffffffffffffffffffff000000000000000000000000000000000000000000169091179074010000000000000000000000000000000000000000908490811115610bb357610bb3610d05565b02179055508315610c195784547fffffffffffffffffffffffffffffffffffffffffffffff00ffffffffffffffff168555604051600181527fc7f505b2f371ae2175ee4913f4499e1f2633a7b5936321eed1cdaeb6115181d29060200160405180910390a15b5050505050505050565b5f610c2f835a84610c36565b9392505050565b5f805f805f858888f1949350505050565b803560028110610c55575f80fd5b919050565b5f60208284031215610c6a575f80fd5b610c2f82610c47565b73ffffffffffffffffffffffffffffffffffffffff81168114610c94575f80fd5b50565b5f60208284031215610ca7575f80fd5b8135610c2f81610c73565b602081525f82518060208401528060208501604085015e5f6040828501015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011684010191505092915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b60028110610d67577f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b9052565b60208101610d798284610d32565b92915050565b5f60208284031215610d8f575f80fd5b5035919050565b5f805f60608486031215610da8575f80fd5b8335610db381610c73565b925060208401359150610dc860408501610c47565b90509250925092565b5f60208284031215610de1575f80fd5b8151610c2f81610c73565b60408101610dfa8285610d32565b610c2f6020830184610d32565b80820180821115610d79577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b84815273ffffffffffffffffffffffffffffffffffffffff84811660208301528316604082015260808101610e776060830184610d32565b9594505050505056fea164736f6c6343000819000a0000000000000000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy OperatorFeeVault Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca40000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000081e608060405234801561001057600080fd5b506107fe806100206000396000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806354fd4d501461004657806360d7a27814610098578063a2ea7c6e146100b9575b600080fd5b6100826040518060400160405280600c81526020017f312e332e312d626574612e32000000000000000000000000000000000000000081525081565b60405161008f9190610473565b60405180910390f35b6100ab6100a636600461048d565b6100d9565b60405190815260200161008f565b6100cc6100c736600461053f565b61029d565b60405161008f9190610558565b60008060405180608001604052806000801b81526020018573ffffffffffffffffffffffffffffffffffffffff168152602001841515815260200187878080601f01602080910402602001604051908101604052809392919081815260200183838082843760009201829052509390945250929350915061015b9050826103c5565b600081815260208190526040902054909150156101a4576040517f23369fa600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80825260008181526020818152604091829020845181559084015160018201805493860151151574010000000000000000000000000000000000000000027fffffffffffffffffffffff00000000000000000000000000000000000000000090941673ffffffffffffffffffffffffffffffffffffffff9092169190911792909217909155606083015183919060028201906102409082610682565b509050503373ffffffffffffffffffffffffffffffffffffffff16817fd0b86852e21f9e5fa4bc3b0cff9757ffe243d50c4b43968a42202153d651ea5e8460405161028b9190610558565b60405180910390a39695505050505050565b604080516080810182526000808252602082018190529181019190915260608082015260008281526020818152604091829020825160808101845281548152600182015473ffffffffffffffffffffffffffffffffffffffff8116938201939093527401000000000000000000000000000000000000000090920460ff1615159282019290925260028201805491929160608401919061033c906105e0565b80601f0160208091040260200160405190810160405280929190818152602001828054610368906105e0565b80156103b55780601f1061038a576101008083540402835291602001916103b5565b820191906000526020600020905b81548152906001019060200180831161039857829003601f168201915b5050505050815250509050919050565b60008160600151826020015183604001516040516020016103e89392919061079c565b604051602081830303815290604052805190602001209050919050565b60005b83811015610420578181015183820152602001610408565b50506000910152565b60008151808452610441816020860160208601610405565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006104866020830184610429565b9392505050565b600080600080606085870312156104a357600080fd5b843567ffffffffffffffff808211156104bb57600080fd5b818701915087601f8301126104cf57600080fd5b8135818111156104de57600080fd5b8860208285010111156104f057600080fd5b6020928301965094505085013573ffffffffffffffffffffffffffffffffffffffff8116811461051f57600080fd5b91506040850135801515811461053457600080fd5b939692955090935050565b60006020828403121561055157600080fd5b5035919050565b602081528151602082015273ffffffffffffffffffffffffffffffffffffffff6020830151166040820152604082015115156060820152600060608301516080808401526105a960a0840182610429565b949350505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600181811c908216806105f457607f821691505b60208210810361062d577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f82111561067d57600081815260208120601f850160051c8101602086101561065a5750805b601f850160051c820191505b8181101561067957828155600101610666565b5050505b505050565b815167ffffffffffffffff81111561069c5761069c6105b1565b6106b0816106aa84546105e0565b84610633565b602080601f83116001811461070357600084156106cd5750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555610679565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b8281101561075057888601518255948401946001909101908401610731565b508582101561078c57878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b600084516107ae818460208901610405565b60609490941b7fffffffffffffffffffffffffffffffffffffffff000000000000000000000000169190930190815290151560f81b60148201526015019291505056fea164736f6c6343000813000a0000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy SchemaRegistry Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca4000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000046c961016060405234801561001157600080fd5b50604080518082018252600381526245415360e81b60208083019182528351808501855260058152640312e332e360dc1b908201529151812060e08190527f6a08c3e203132c561752255a4d52ffae85bb9c5d33cb3291520dea1b843563896101008190524660a081815286517f8b73c3c69bb8fe3d512ecc4cf759cc79239f7b179b0ffacaa9a75d522b39400f818801819052818901959095526060810193909352608080840192909252308382018190528751808503909201825260c093840190975280519501949094209093529290915261012091909152516101405260805160a05160c05160e05161010051610120516101405161457e61014b600039600061073701526000612784015260006127d3015260006127ae01526000612707015260006127310152600061275b015261457e6000f3fe60806040526004361061018b5760003560e01c806395411525116100d6578063d45c44351161007f578063ed24911d11610059578063ed24911d146104fd578063f10b5cc814610512578063f17325e71461054157600080fd5b8063d45c443514610467578063e30bb5631461049e578063e71ff365146104dd57600080fd5b8063b469318d116100b0578063b469318d146103ba578063b83010d314610414578063cf190f341461044757600080fd5b80639541152514610367578063a3112a641461037a578063a6d4dbc7146103a757600080fd5b806344adc90e116101385780634d003070116101125780634d003070146102de57806354fd4d50146102fe57806379f7573a1461034757600080fd5b806344adc90e1461029857806346926267146102b85780634cb7e9e5146102cb57600080fd5b806317d7de7c1161016957806317d7de7c146102205780632d0335ab146102425780633c0427151461028557600080fd5b80630eabf6601461019057806312b11a17146101a557806313893f61146101e7575b600080fd5b6101a361019e3660046134c8565b610554565b005b3480156101b157600080fd5b507ffeb2925a02bae3dae48d424a0437a2b6ac939aa9230ddc55a1a76f065d9880765b6040519081526020015b60405180910390f35b3480156101f357600080fd5b506102076102023660046134c8565b6106eb565b60405167ffffffffffffffff90911681526020016101de565b34801561022c57600080fd5b50610235610730565b6040516101de9190613578565b34801561024e57600080fd5b506101d461025d3660046135bd565b73ffffffffffffffffffffffffffffffffffffffff1660009081526020819052604090205490565b6101d46102933660046135da565b610760565b6102ab6102a63660046134c8565b610863565b6040516101de9190613615565b6101a36102c6366004613659565b6109e4565b6101a36102d93660046134c8565b610a68565b3480156102ea57600080fd5b506102076102f9366004613671565b610b4b565b34801561030a57600080fd5b506102356040518060400160405280600c81526020017f312e342e312d626574612e33000000000000000000000000000000000000000081525081565b34801561035357600080fd5b506101a3610362366004613671565b610b58565b6102ab6103753660046134c8565b610bef565b34801561038657600080fd5b5061039a610395366004613671565b610e62565b6040516101de9190613771565b6101a36103b5366004613784565b611025565b3480156103c657600080fd5b506102076103d5366004613797565b73ffffffffffffffffffffffffffffffffffffffff919091166000908152603460209081526040808320938352929052205467ffffffffffffffff1690565b34801561042057600080fd5b507fb5d556f07587ec0f08cf386545cc4362c702a001650c2058002615ee5c9d1e756101d4565b34801561045357600080fd5b50610207610462366004613671565b6110ca565b34801561047357600080fd5b50610207610482366004613671565b60009081526033602052604090205467ffffffffffffffff1690565b3480156104aa57600080fd5b506104cd6104b9366004613671565b600090815260326020526040902054151590565b60405190151581526020016101de565b3480156104e957600080fd5b506102076104f83660046134c8565b6110d8565b34801561050957600080fd5b506101d4611110565b34801561051e57600080fd5b5060405173420000000000000000000000000000000000002081526020016101de565b6101d461054f3660046137c3565b61111a565b348160005b818110156106e4577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82018114600086868481811061059a5761059a6137fe565b90506020028101906105ac919061382d565b6105b590613ac3565b60208101518051919250908015806105d257508260400151518114155b15610609576040517f947d5a8400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b818110156106ad576106a56040518060a001604052808660000151815260200185848151811061063e5761063e6137fe565b6020026020010151815260200186604001518481518110610661576106616137fe565b60200260200101518152602001866060015173ffffffffffffffffffffffffffffffffffffffff168152602001866080015167ffffffffffffffff168152506111d8565b60010161060c565b506106c383600001518385606001518a886113e9565b6106cd9088613bed565b9650505050506106dd8160010190565b9050610559565b5050505050565b60004282825b818110156107245761071c3387878481811061070f5761070f6137fe565b9050602002013585611a18565b6001016106f1565b50909150505b92915050565b606061075b7f0000000000000000000000000000000000000000000000000000000000000000611b17565b905090565b600061077361076e83613d22565b611ca5565b604080516001808252818301909252600091816020015b6040805160c081018252600080825260208083018290529282018190526060808301829052608083015260a082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff90920191018161078a5790505090506107f86020840184613d9d565b61080190613dd1565b81600081518110610814576108146137fe565b602090810291909101015261083d83358261083560c0870160a088016135bd565b346001611e2f565b60200151600081518110610853576108536137fe565b6020026020010151915050919050565b60608160008167ffffffffffffffff8111156108815761088161386b565b6040519080825280602002602001820160405280156108b457816020015b606081526020019060019003908161089f5790505b509050600034815b848110156109ce577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff85018114368989848181106108fc576108fc6137fe565b905060200281019061090e9190613ddd565b905061091d6020820182613e11565b9050600003610958576040517f947d5a8400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600061097d823561096c6020850185613e11565b61097591613e79565b338887611e2f565b805190915061098c9086613bed565b945080602001518785815181106109a5576109a56137fe565b6020026020010181905250806020015151860195505050506109c78160010190565b90506108bc565b506109d98383612541565b979650505050505050565b604080516001808252818301909252600091816020015b60408051808201909152600080825260208201528152602001906001900390816109fb579050509050610a3636839003830160208401613eed565b81600081518110610a4957610a496137fe565b6020908102919091010152610a63823582333460016113e9565b505050565b348160005b818110156106e4577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8201811436868684818110610aad57610aad6137fe565b9050602002810190610abf9190613ddd565b9050610b2c8135610ad36020840184613f09565b808060200260200160405190810160405280939291908181526020016000905b82821015610b1f57610b1060408302860136819003810190613eed565b81526020019060010190610af3565b50505050503388866113e9565b610b369086613bed565b94505050610b448160010190565b9050610a6d565b60004261072a838261262b565b33600090815260208190526040902054808211610ba1576040517f756688fe00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b336000908152602081815260409182902084905581518381529081018490527f57b09af877df9068fd60a69d7b21f5576b8b38955812d6ae4ac52942f1e38fb7910160405180910390a15050565b60608160008167ffffffffffffffff811115610c0d57610c0d61386b565b604051908082528060200260200182016040528015610c4057816020015b6060815260200190600190039081610c2b5790505b509050600034815b848110156109ce577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8501811436898984818110610c8857610c886137fe565b9050602002810190610c9a919061382d565b9050366000610cac6020840184613e11565b909250905080801580610ccd5750610cc76040850185613f71565b90508114155b15610d04576040517f947d5a8400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60005b81811015610de557610ddd6040518060a0016040528087600001358152602001868685818110610d3957610d396137fe565b9050602002810190610d4b9190613d9d565b610d5490613dd1565b8152602001610d666040890189613f71565b85818110610d7657610d766137fe565b905060600201803603810190610d8c9190613fd8565b8152602001610da16080890160608a016135bd565b73ffffffffffffffffffffffffffffffffffffffff168152602001610dcc60a0890160808a01613ff4565b67ffffffffffffffff169052611ca5565b600101610d07565b506000610e0e8535610df78587613e79565b610e076080890160608a016135bd565b8b8a611e2f565b8051909150610e1d9089613bed565b975080602001518a8881518110610e3657610e366137fe565b602002602001018190525080602001515189019850505050505050610e5b8160010190565b9050610c48565b604080516101408101825260008082526020820181905291810182905260608082018390526080820183905260a0820183905260c0820183905260e0820183905261010082019290925261012081019190915260008281526032602090815260409182902082516101408101845281548152600182015492810192909252600281015467ffffffffffffffff808216948401949094526801000000000000000081048416606084015270010000000000000000000000000000000090049092166080820152600382015460a0820152600482015473ffffffffffffffffffffffffffffffffffffffff90811660c0830152600583015490811660e083015274010000000000000000000000000000000000000000900460ff16151561010082015260068201805491929161012084019190610f9c9061400f565b80601f0160208091040260200160405190810160405280929190818152602001828054610fc89061400f565b80156110155780601f10610fea57610100808354040283529160200191611015565b820191906000526020600020905b815481529060010190602001808311610ff857829003601f168201915b5050505050815250509050919050565b61103c6110373683900383018361405c565b6111d8565b604080516001808252818301909252600091816020015b604080518082019091526000808252602082015281526020019060019003908161105357905050905061108e36839003830160208401613eed565b816000815181106110a1576110a16137fe565b6020908102919091010152610a638235826110c260e0860160c087016135bd565b3460016113e9565b60004261072a338483611a18565b60004282825b81811015610724576111088686838181106110fb576110fb6137fe565b905060200201358461262b565b6001016110de565b600061075b6126ed565b604080516001808252818301909252600091829190816020015b6040805160c081018252600080825260208083018290529282018190526060808301829052608083015260a082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9092019101816111345790505090506111a26020840184613d9d565b6111ab90613dd1565b816000815181106111be576111be6137fe565b602090810291909101015261083d83358233346001611e2f565b608081015167ffffffffffffffff161580159061120c57504267ffffffffffffffff16816080015167ffffffffffffffff16105b15611243576040517f1ab7da6b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6020808201516040808401516060850151855184518587015173ffffffffffffffffffffffffffffffffffffffff84166000908152978890529487208054969794969495611337957fb5d556f07587ec0f08cf386545cc4362c702a001650c2058002615ee5c9d1e7595949392886112ba836140ca565b909155506080808c015160408051602081019990995273ffffffffffffffffffffffffffffffffffffffff9097169688019690965260608701949094529285019190915260a084015260c083015267ffffffffffffffff1660e0820152610100015b60405160208183030381529060405280519060200120612821565b90506113ad84606001518284602001518560400151866000015160405160200161139993929190928352602083019190915260f81b7fff0000000000000000000000000000000000000000000000000000000000000016604082015260410190565b604051602081830303815290604052612834565b6113e3576040517f8baa579f00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b50505050565b6040517fa2ea7c6e0000000000000000000000000000000000000000000000000000000081526004810186905260009081907342000000000000000000000000000000000000209063a2ea7c6e90602401600060405180830381865afa158015611457573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261149d9190810190614102565b80519091506114d8576040517fbf37b20e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b855160008167ffffffffffffffff8111156114f5576114f561386b565b60405190808252806020026020018201604052801561159457816020015b60408051610140810182526000808252602080830182905292820181905260608083018290526080830182905260a0830182905260c0830182905260e0830182905261010083019190915261012082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff9092019101816115135790505b50905060008267ffffffffffffffff8111156115b2576115b261386b565b6040519080825280602002602001820160405280156115db578160200160208202803683370190505b50905060005b838110156119fa5760008a82815181106115fd576115fd6137fe565b6020908102919091018101518051600090815260329092526040909120805491925090611656576040517fc5723b5100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8c816001015414611693576040517fbf37b20e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600581015473ffffffffffffffffffffffffffffffffffffffff8c81169116146116e9576040517f4ca8886700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600581015474010000000000000000000000000000000000000000900460ff1661173f576040517f157bd4c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6002810154700100000000000000000000000000000000900467ffffffffffffffff1615611799576040517f905e710700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b426002820180547fffffffffffffffff0000000000000000ffffffffffffffffffffffffffffffff811670010000000000000000000000000000000067ffffffffffffffff948516810291821793849055604080516101408101825287548152600188015460208201529386169286169290921791830191909152680100000000000000008304841660608301529091049091166080820152600382015460a0820152600482015473ffffffffffffffffffffffffffffffffffffffff90811660c0830152600583015490811660e083015274010000000000000000000000000000000000000000900460ff16151561010082015260068201805483916101208401916118a59061400f565b80601f01602080910402602001604051908101604052809291908181526020018280546118d19061400f565b801561191e5780601f106118f35761010080835404028352916020019161191e565b820191906000526020600020905b81548152906001019060200180831161190157829003601f168201915b505050505081525050858481518110611939576119396137fe565b6020026020010181905250816020015184848151811061195b5761195b6137fe565b6020026020010181815250508c8b73ffffffffffffffffffffffffffffffffffffffff16868581518110611991576119916137fe565b602002602001015160c0015173ffffffffffffffffffffffffffffffffffffffff167ff930a6e2523c9cc298691873087a740550b8fc85a0680830414c148ed927f61585600001516040516119e891815260200190565b60405180910390a450506001016115e1565b50611a0a84838360018b8b612a03565b9a9950505050505050505050565b73ffffffffffffffffffffffffffffffffffffffff83166000908152603460209081526040808320858452918290529091205467ffffffffffffffff1615611a8c576040517fec9d6eeb00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008381526020829052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff861690811790915590519091859173ffffffffffffffffffffffffffffffffffffffff8816917f92a1f7a41a7c585a8b09e25b195e225b1d43248daca46b0faf9e0792777a222991a450505050565b604080516020808252818301909252606091600091906020820181803683370190505090506000805b6020811015611be2576000858260208110611b5d57611b5d6137fe565b1a60f81b90507fff000000000000000000000000000000000000000000000000000000000000008116600003611b935750611be2565b80848481518110611ba657611ba66137fe565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a9053505060019182019101611b40565b5060008167ffffffffffffffff811115611bfe57611bfe61386b565b6040519080825280601f01601f191660200182016040528015611c28576020820181803683370190505b50905060005b82811015611c9c57838181518110611c4857611c486137fe565b602001015160f81c60f81b828281518110611c6557611c656137fe565b60200101907effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1916908160001a905350600101611c2e565b50949350505050565b608081015167ffffffffffffffff1615801590611cd957504267ffffffffffffffff16816080015167ffffffffffffffff16105b15611d10576040517f1ab7da6b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6020808201516040808401516060808601518651855186880151868801519488015160808901518051908b012060a08a015173ffffffffffffffffffffffffffffffffffffffff871660009081529b8c9052988b2080549a9b989a9899611337997ffeb2925a02bae3dae48d424a0437a2b6ac939aa9230ddc55a1a76f065d988076999493928c611da0836140ca565b919050558e6080015160405160200161131c9b9a999897969594939291909a8b5273ffffffffffffffffffffffffffffffffffffffff998a1660208c015260408b019890985295909716606089015267ffffffffffffffff938416608089015291151560a088015260c087015260e0860152610100850193909352610120840152166101408201526101600190565b60408051808201909152600081526060602082015284516040805180820190915260008152606060208201528167ffffffffffffffff811115611e7457611e7461386b565b604051908082528060200260200182016040528015611e9d578160200160208202803683370190505b5060208201526040517fa2ea7c6e000000000000000000000000000000000000000000000000000000008152600481018990526000907342000000000000000000000000000000000000209063a2ea7c6e90602401600060405180830381865afa158015611f0f573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611f559190810190614102565b8051909150611f90576040517fbf37b20e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008367ffffffffffffffff811115611fab57611fab61386b565b60405190808252806020026020018201604052801561204a57816020015b60408051610140810182526000808252602080830182905292820181905260608083018290526080830182905260a0830182905260c0830182905260e0830182905261010083019190915261012082015282527fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff909201910181611fc95790505b50905060008467ffffffffffffffff8111156120685761206861386b565b604051908082528060200260200182016040528015612091578160200160208202803683370190505b50905060005b858110156125205760008b82815181106120b3576120b36137fe565b60200260200101519050600067ffffffffffffffff16816020015167ffffffffffffffff16141580156120fe57504267ffffffffffffffff16816020015167ffffffffffffffff1611155b15612135576040517f08e8b93700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8460400151158015612148575080604001515b1561217f576040517f157bd4c300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006040518061014001604052806000801b81526020018f81526020016121a34290565b67ffffffffffffffff168152602001836020015167ffffffffffffffff168152602001600067ffffffffffffffff16815260200183606001518152602001836000015173ffffffffffffffffffffffffffffffffffffffff1681526020018d73ffffffffffffffffffffffffffffffffffffffff16815260200183604001511515815260200183608001518152509050600080600090505b6122458382612df4565b600081815260326020526040902054909250156122645760010161223b565b81835260008281526032602090815260409182902085518155908501516001820155908401516002820180546060870151608088015167ffffffffffffffff908116700100000000000000000000000000000000027fffffffffffffffff0000000000000000ffffffffffffffffffffffffffffffff92821668010000000000000000027fffffffffffffffffffffffffffffffff000000000000000000000000000000009094169190951617919091171691909117905560a0840151600382015560c084015160048201805473ffffffffffffffffffffffffffffffffffffffff9283167fffffffffffffffffffffffff000000000000000000000000000000000000000090911617905560e0850151600583018054610100880151151574010000000000000000000000000000000000000000027fffffffffffffffffffffff000000000000000000000000000000000000000000909116929093169190911791909117905561012084015184919060068201906123e49082614228565b50505060608401511561243b57606084015160009081526032602052604090205461243b576040517fc5723b5100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8287868151811061244e5761244e6137fe565b60200260200101819052508360a00151868681518110612470576124706137fe565b6020026020010181815250508189602001518681518110612493576124936137fe565b6020026020010181815250508f8e73ffffffffffffffffffffffffffffffffffffffff16856000015173ffffffffffffffffffffffffffffffffffffffff167f8bf46bf4cfd674fa735a3d63ec1c9ad4153f033c290341f3a588b75685141b358560405161250391815260200190565b60405180910390a4505050506125198160010190565b9050612097565b5061253083838360008c8c612a03565b845250919998505050505050505050565b606060008267ffffffffffffffff81111561255e5761255e61386b565b604051908082528060200260200182016040528015612587578160200160208202803683370190505b508451909150600090815b818110156126205760008782815181106125ae576125ae6137fe565b6020026020010151905060008151905060005b8181101561260c578281815181106125db576125db6137fe565b60200260200101518787815181106125f5576125f56137fe565b6020908102919091010152600195860195016125c1565b5050506126198160010190565b9050612592565b509195945050505050565b60008281526033602052604090205467ffffffffffffffff161561267b576040517f2e26794600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008281526033602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000001667ffffffffffffffff85169081179091559051909184917f5aafceeb1c7ad58e4a84898bdee37c02c0fc46e7d24e6b60e8209449f183459f9190a35050565b60003073ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001614801561275357507f000000000000000000000000000000000000000000000000000000000000000046145b1561277d57507f000000000000000000000000000000000000000000000000000000000000000090565b50604080517f00000000000000000000000000000000000000000000000000000000000000006020808301919091527f0000000000000000000000000000000000000000000000000000000000000000828401527f000000000000000000000000000000000000000000000000000000000000000060608301524660808301523060a0808401919091528351808403909101815260c0909201909252805191012090565b600061072a61282e6126ed565b83612e53565b60008060006128438585612e95565b9092509050600081600481111561285c5761285c614342565b14801561289457508573ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff16145b156128a4576001925050506129fc565b6000808773ffffffffffffffffffffffffffffffffffffffff16631626ba7e60e01b88886040516024016128d9929190614371565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529181526020820180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fffffffff000000000000000000000000000000000000000000000000000000009094169390931790925290516129629190614392565b600060405180830381855afa9150503d806000811461299d576040519150601f19603f3d011682016040523d82523d6000602084013e6129a2565b606091505b50915091508180156129b5575080516020145b80156129f5575080517f1626ba7e00000000000000000000000000000000000000000000000000000000906129f390830160209081019084016143a4565b145b9450505050505b9392505050565b84516000906001819003612a5b57612a538888600081518110612a2857612a286137fe565b602002602001015188600081518110612a4357612a436137fe565b6020026020010151888888612eda565b915050612dea565b602088015173ffffffffffffffffffffffffffffffffffffffff8116612afc5760005b82811015612ae157878181518110612a9857612a986137fe565b6020026020010151600014612ad9576040517f1574f9f300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600101612a7e565b508315612af157612af1856131f9565b600092505050612dea565b6000808273ffffffffffffffffffffffffffffffffffffffff1663ce46e0466040518163ffffffff1660e01b8152600401602060405180830381865afa158015612b4a573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612b6e91906143bd565b905060005b84811015612c2b5760008a8281518110612b8f57612b8f6137fe565b6020026020010151905080600003612ba75750612c23565b82612bde576040517f1574f9f300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b88811115612c18576040517f1101129400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b978890039792909201915b600101612b73565b508715612d06576040517f88e5b2d900000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8416906388e5b2d9908490612c88908e908e906004016143da565b60206040518083038185885af1158015612ca6573d6000803e3d6000fd5b50505050506040513d601f19601f82011682018060405250810190612ccb91906143bd565b612d01576040517fbf2f3a8b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b612dd5565b6040517f91db0b7e00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8416906391db0b7e908490612d5c908e908e906004016143da565b60206040518083038185885af1158015612d7a573d6000803e3d6000fd5b50505050506040513d601f19601f82011682018060405250810190612d9f91906143bd565b612dd5576040517fe8bee83900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8515612de457612de4876131f9565b50925050505b9695505050505050565b60208083015160c084015160e0850151604080870151606088015161010089015160a08a01516101208b01519451600099612e3599989796918c9101614493565b60405160208183030381529060405280519060200120905092915050565b6040517f190100000000000000000000000000000000000000000000000000000000000060208201526022810183905260428101829052600090606201612e35565b6000808251604103612ecb5760208301516040840151606085015160001a612ebf8782858561320c565b94509450505050612ed3565b506000905060025b9250929050565b602086015160009073ffffffffffffffffffffffffffffffffffffffff8116612f4e578515612f35576040517f1574f9f300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b8215612f4457612f44846131f9565b6000915050612dea565b8515613039578073ffffffffffffffffffffffffffffffffffffffff1663ce46e0466040518163ffffffff1660e01b8152600401602060405180830381865afa158015612f9f573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190612fc391906143bd565b612ff9576040517f1574f9f300000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b83861115613033576040517f1101129400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b85840393505b8415613111576040517fe49617e100000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82169063e49617e1908890613093908b90600401613771565b60206040518083038185885af11580156130b1573d6000803e3d6000fd5b50505050506040513d601f19601f820116820180604052508101906130d691906143bd565b61310c576040517fccf3bb2700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6131de565b6040517fe60c350500000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff82169063e60c3505908890613165908b90600401613771565b60206040518083038185885af1158015613183573d6000803e3d6000fd5b50505050506040513d601f19601f820116820180604052508101906131a891906143bd565b6131de576040517fbd8ba84d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82156131ed576131ed846131f9565b50939695505050505050565b8015613209576132093382613324565b50565b6000807f7fffffffffffffffffffffffffffffff5d576e7357a4501ddfe92f46681b20a0831115613243575060009050600361331b565b8460ff16601b1415801561325b57508460ff16601c14155b1561326c575060009050600461331b565b6040805160008082526020820180845289905260ff881692820192909252606081018690526080810185905260019060a0016020604051602081039080840390855afa1580156132c0573d6000803e3d6000fd5b50506040517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0015191505073ffffffffffffffffffffffffffffffffffffffff81166133145760006001925092505061331b565b9150600090505b94509492505050565b80471015613393576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601d60248201527f416464726573733a20696e73756666696369656e742062616c616e636500000060448201526064015b60405180910390fd5b60008273ffffffffffffffffffffffffffffffffffffffff168260405160006040518083038185875af1925050503d80600081146133ed576040519150601f19603f3d011682016040523d82523d6000602084013e6133f2565b606091505b5050905080610a63576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603a60248201527f416464726573733a20756e61626c6520746f2073656e642076616c75652c207260448201527f6563697069656e74206d61792068617665207265766572746564000000000000606482015260840161338a565b60008083601f84011261349557600080fd5b50813567ffffffffffffffff8111156134ad57600080fd5b6020830191508360208260051b8501011115612ed357600080fd5b600080602083850312156134db57600080fd5b823567ffffffffffffffff8111156134f257600080fd5b6134fe85828601613483565b90969095509350505050565b60005b8381101561352557818101518382015260200161350d565b50506000910152565b6000815180845261354681602086016020860161350a565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006129fc602083018461352e565b73ffffffffffffffffffffffffffffffffffffffff8116811461320957600080fd5b80356135b88161358b565b919050565b6000602082840312156135cf57600080fd5b81356129fc8161358b565b6000602082840312156135ec57600080fd5b813567ffffffffffffffff81111561360357600080fd5b820160e081850312156129fc57600080fd5b6020808252825182820181905260009190848201906040850190845b8181101561364d57835183529284019291840191600101613631565b50909695505050505050565b60006060828403121561366b57600080fd5b50919050565b60006020828403121561368357600080fd5b5035919050565b6000610140825184526020830151602085015260408301516136b8604086018267ffffffffffffffff169052565b5060608301516136d4606086018267ffffffffffffffff169052565b5060808301516136f0608086018267ffffffffffffffff169052565b5060a083015160a085015260c083015161372260c086018273ffffffffffffffffffffffffffffffffffffffff169052565b5060e083015161374a60e086018273ffffffffffffffffffffffffffffffffffffffff169052565b506101008381015115159085015261012080840151818601839052612dea8387018261352e565b6020815260006129fc602083018461368a565b6000610100828403121561366b57600080fd5b600080604083850312156137aa57600080fd5b82356137b58161358b565b946020939093013593505050565b6000602082840312156137d557600080fd5b813567ffffffffffffffff8111156137ec57600080fd5b8201604081850312156129fc57600080fd5b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fd5b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff6183360301811261386157600080fd5b9190910192915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60405160a0810167ffffffffffffffff811182821017156138bd576138bd61386b565b60405290565b60405160c0810167ffffffffffffffff811182821017156138bd576138bd61386b565b6040516080810167ffffffffffffffff811182821017156138bd576138bd61386b565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff811182821017156139505761395061386b565b604052919050565b600067ffffffffffffffff8211156139725761397261386b565b5060051b60200190565b60006040828403121561398e57600080fd5b6040516040810181811067ffffffffffffffff821117156139b1576139b161386b565b604052823581526020928301359281019290925250919050565b6000606082840312156139dd57600080fd5b6040516060810181811067ffffffffffffffff82111715613a0057613a0061386b565b604052905080823560ff81168114613a1757600080fd5b8082525060208301356020820152604083013560408201525092915050565b600082601f830112613a4757600080fd5b81356020613a5c613a5783613958565b613909565b82815260609283028501820192828201919087851115613a7b57600080fd5b8387015b85811015613a9e57613a9189826139cb565b8452928401928101613a7f565b5090979650505050505050565b803567ffffffffffffffff811681146135b857600080fd5b600060a08236031215613ad557600080fd5b613add61389a565b8235815260208084013567ffffffffffffffff80821115613afd57600080fd5b9085019036601f830112613b1057600080fd5b8135613b1e613a5782613958565b81815260069190911b83018401908481019036831115613b3d57600080fd5b938501935b82851015613b6657613b54368661397c565b82528582019150604085019450613b42565b80868801525050506040860135925080831115613b8257600080fd5b5050613b9036828601613a36565b604083015250613ba2606084016135ad565b6060820152613bb360808401613aab565b608082015292915050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b8181038181111561072a5761072a613bbe565b801515811461320957600080fd5b600067ffffffffffffffff821115613c2857613c2861386b565b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01660200190565b600060c08284031215613c6657600080fd5b613c6e6138c3565b90508135613c7b8161358b565b81526020613c8a838201613aab565b818301526040830135613c9c81613c00565b604083015260608381013590830152608083013567ffffffffffffffff811115613cc557600080fd5b8301601f81018513613cd657600080fd5b8035613ce4613a5782613c0e565b8181528684838501011115613cf857600080fd5b818484018583013760008483830101528060808601525050505060a082013560a082015292915050565b600060e08236031215613d3457600080fd5b613d3c61389a565b82358152602083013567ffffffffffffffff811115613d5a57600080fd5b613d6636828601613c54565b602083015250613d7936604085016139cb565b604082015260a0830135613d8c8161358b565b6060820152613bb360c08401613aab565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff4183360301811261386157600080fd5b600061072a3683613c54565b600082357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffc183360301811261386157600080fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112613e4657600080fd5b83018035915067ffffffffffffffff821115613e6157600080fd5b6020019150600581901b3603821315612ed357600080fd5b6000613e87613a5784613958565b80848252602080830192508560051b850136811115613ea557600080fd5b855b81811015613ee157803567ffffffffffffffff811115613ec75760008081fd5b613ed336828a01613c54565b865250938201938201613ea7565b50919695505050505050565b600060408284031215613eff57600080fd5b6129fc838361397c565b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112613f3e57600080fd5b83018035915067ffffffffffffffff821115613f5957600080fd5b6020019150600681901b3603821315612ed357600080fd5b60008083357fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe1843603018112613fa657600080fd5b83018035915067ffffffffffffffff821115613fc157600080fd5b6020019150606081023603821315612ed357600080fd5b600060608284031215613fea57600080fd5b6129fc83836139cb565b60006020828403121561400657600080fd5b6129fc82613aab565b600181811c9082168061402357607f821691505b60208210810361366b577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b6000610100828403121561406f57600080fd5b61407761389a565b82358152614088846020850161397c565b602082015261409a84606085016139cb565b604082015260c08301356140ad8161358b565b60608201526140be60e08401613aab565b60808201529392505050565b60007fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff82036140fb576140fb613bbe565b5060010190565b6000602080838503121561411557600080fd5b825167ffffffffffffffff8082111561412d57600080fd5b908401906080828703121561414157600080fd5b6141496138e6565b825181528383015161415a8161358b565b81850152604083015161416c81613c00565b604082015260608301518281111561418357600080fd5b80840193505086601f84011261419857600080fd5b825191506141a8613a5783613c0e565b82815287858486010111156141bc57600080fd5b6141cb8386830187870161350a565b60608201529695505050505050565b601f821115610a6357600081815260208120601f850160051c810160208610156142015750805b601f850160051c820191505b818110156142205782815560010161420d565b505050505050565b815167ffffffffffffffff8111156142425761424261386b565b61425681614250845461400f565b846141da565b602080601f8311600181146142a957600084156142735750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555614220565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b828110156142f6578886015182559484019460019091019084016142d7565b508582101561433257878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b01905550565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b82815260406020820152600061438a604083018461352e565b949350505050565b6000825161386181846020870161350a565b6000602082840312156143b657600080fd5b5051919050565b6000602082840312156143cf57600080fd5b81516129fc81613c00565b6000604082016040835280855180835260608501915060608160051b8601019250602080880160005b8381101561444f577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffa088870301855261443d86835161368a565b95509382019390820190600101614403565b50508584038187015286518085528782019482019350915060005b828110156144865784518452938101939281019260010161446a565b5091979650505050505050565b89815260007fffffffffffffffffffffffffffffffffffffffff000000000000000000000000808b60601b166020840152808a60601b166034840152507fffffffffffffffff000000000000000000000000000000000000000000000000808960c01b166048840152808860c01b1660508401525085151560f81b6058830152846059830152835161452c81607985016020880161350a565b80830190507fffffffff000000000000000000000000000000000000000000000000000000008460e01b166079820152607d81019150509a995050505050505050505056fea164736f6c6343000813000a0000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy EAS Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca4000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000013e96080604052348015600e575f80fd5b5060156019565b60d4565b5f54610100900460ff161560835760405162461bcd60e51b815260206004820152602760248201527f496e697469616c697a61626c653a20636f6e747261637420697320696e697469604482015266616c697a696e6760c81b606482015260840160405180910390fd5b5f5460ff908116101560d2575f805460ff191660ff9081179091556040519081527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b565b611308806100e15f395ff3fe608060405260043610610096575f3560e01c80637dfbd04911610066578063b87ea8d41161004c578063b87ea8d414610284578063c4d66de814610298578063d61a398b146102b7575f80fd5b80637dfbd0491461024e5780637fc81bb714610265575f80fd5b80630a7617b31461014e5780630c0544a31461016f578063394d2731146101d157806354fd4d50146101f9575f80fd5b3661014a573373ffffffffffffffffffffffffffffffffffffffff7f21346dddac42cc163a6523eefc19df981df7352c870dc3b0b17a6a92fc6fe8135c161461010b576040517f14885cf900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040805134815247602082018190529133917f213e72af0d3613bd643cff3059f872c1015e6541624e37872bf95eefbaf220a8910160405180910390a2005b5f80fd5b348015610159575f80fd5b5061016d610168366004610f49565b61030d565b005b34801561017a575f80fd5b506001546101ab9070010000000000000000000000000000000090046fffffffffffffffffffffffffffffffff1681565b6040516fffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b3480156101dc575f80fd5b506001546101ab906fffffffffffffffffffffffffffffffff1681565b348015610204575f80fd5b506102416040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b6040516101c89190610f64565b348015610259575f80fd5b506101ab6301e1338081565b348015610270575f80fd5b5061016d61027f366004610fb7565b6104cf565b34801561028f575f80fd5b5061016d6106c2565b3480156102a3575f80fd5b5061016d6102b2366004610f49565b610a9a565b3480156102c2575f80fd5b505f546102e89062010000900473ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016101c8565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa15801561036a573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061038e9190610fe6565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146103f2576040517f38bac74200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73ffffffffffffffffffffffffffffffffffffffff811661043f576040517f99c6ec0800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f805473ffffffffffffffffffffffffffffffffffffffff838116620100008181027fffffffffffffffffffff0000000000000000000000000000000000000000ffff85161790945560408051949093049091168084526020840191909152917f16417cc372deec0caee5f52e2ad77a5f07b4591fd56b4ff31b6e20f817d4daeb91015b60405180910390a15050565b73420000000000000000000000000000000000001873ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa15801561052c573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906105509190610fe6565b73ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff16146105b4576040517f38bac74200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b806fffffffffffffffffffffffffffffffff165f036105ff576040517fcf85916100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6301e133806fffffffffffffffffffffffffffffffff8216111561064f576040517f30b9f35e00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180546fffffffffffffffffffffffffffffffff8381167001000000000000000000000000000000008181028385161790945560408051949093049091168084526020840191909152917f4492086b630ed3846eec0979dd87a71c814ceb1c6dab80ab81e3450b21e4de2891016104c3565b6001546106f7906fffffffffffffffffffffffffffffffff70010000000000000000000000000000000082048116911661102e565b6fffffffffffffffffffffffffffffffff16421015610742576040517f1e4a9f3a00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b600180547fffffffffffffffffffffffffffffffff0000000000000000000000000000000016426fffffffffffffffffffffffffffffffff161790555f61079c734200000000000000000000000000000000000011610c8e565b90505f6107bc734200000000000000000000000000000000000019610c8e565b90505f6107dc73420000000000000000000000000000000000001a610c8e565b90505f6107fc73420000000000000000000000000000000000001b610c8e565b90506108075f610edb565b5f8282610814868861105e565b61081e919061105e565b610828919061105e565b9050805f03610863576040517fc8972e5200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f80546040517f54e7f42d000000000000000000000000000000000000000000000000000000008152600481018890526024810187905260448101859052606481018690526201000090910473ffffffffffffffffffffffffffffffffffffffff16906354e7f42d906084015f60405180830381865afa1580156108e9573d5f803e3d5ffd5b505050506040513d5f823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261092e919081019061111c565b80519091505f81900361096d576040517f763970d600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f805b82811015610a1d575f84828151811061098b5761098b6111eb565b6020026020010151602001519050805f036109a65750610a15565b5f6109cd8684815181106109bc576109bc6111eb565b60200260200101515f015183610f01565b905080610a06576040517fd68d1b1800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610a10828561105e565b935050505b600101610970565b50838114610a57576040517f9c01eac000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7f73f9a13241a1848ec157967f3a85601709353e616f1f2605d818c0f2d21774df8385604051610a88929190611218565b60405180910390a15050505050505050565b5f54610100900460ff1615808015610ab857505f54600160ff909116105b80610ad15750303b158015610ad157505f5460ff166001145b610b61576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a6564000000000000000000000000000000000000606482015260840160405180910390fd5b5f80547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790558015610bbd575f80547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b5f80547fffffffffffffffffffff0000000000000000000000000000000000000000ffff166201000073ffffffffffffffffffffffffffffffffffffffff85160217905572015180000000000000000000000000000000006fffffffffffffffffffffffffffffffff4216176001558015610c8a575f80547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb3847402498906020016104c3565b5050565b5f60018273ffffffffffffffffffffffffffffffffffffffff166382356d8a6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610cda573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610cfe91906112b3565b6001811115610d0f57610d0f611286565b14610d46576040517fb4726cbe00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b3073ffffffffffffffffffffffffffffffffffffffff168273ffffffffffffffffffffffffffffffffffffffff166366d003ac6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610da6573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610dca9190610fe6565b73ffffffffffffffffffffffffffffffffffffffff1614610e17576040517fc3380cef00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b47610e2183610edb565b8273ffffffffffffffffffffffffffffffffffffffff16633ccfd60b6040518163ffffffff1660e01b81526004016020604051808303815f875af1158015610e6b573d5f803e3d5ffd5b505050506040513d601f19601f82011682018060405250810190610e8f91906112d1565b91504782610e9d83836112e8565b14610ed4576040517f87c91c5c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5050919050565b807f21346dddac42cc163a6523eefc19df981df7352c870dc3b0b17a6a92fc6fe8135d50565b5f610f0d835a84610f14565b9392505050565b5f805f805f858888f1949350505050565b73ffffffffffffffffffffffffffffffffffffffff81168114610f46575f80fd5b50565b5f60208284031215610f59575f80fd5b8135610f0d81610f25565b602081525f82518060208401528060208501604085015e5f6040828501015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011684010191505092915050565b5f60208284031215610fc7575f80fd5b81356fffffffffffffffffffffffffffffffff81168114610f0d575f80fd5b5f60208284031215610ff6575f80fd5b8151610f0d81610f25565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b6fffffffffffffffffffffffffffffffff81811683821601908082111561105757611057611001565b5092915050565b8082018082111561107157611071611001565b92915050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b6040805190810167ffffffffffffffff811182821017156110c7576110c7611077565b60405290565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016810167ffffffffffffffff8111828210171561111457611114611077565b604052919050565b5f602080838503121561112d575f80fd5b825167ffffffffffffffff80821115611144575f80fd5b818501915085601f830112611157575f80fd5b81518181111561116957611169611077565b611177848260051b016110cd565b818152848101925060069190911b830184019087821115611196575f80fd5b928401925b818410156111e057604084890312156111b2575f80fd5b6111ba6110a4565b84516111c581610f25565b8152848601518682015283526040909301929184019161119b565b979650505050505050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52603260045260245ffd5b604080825283518282018190525f91906020906060850190828801855b82811015611270578151805173ffffffffffffffffffffffffffffffffffffffff168552850151858501529285019290840190600101611235565b5050508093505050508260208301529392505050565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52602160045260245ffd5b5f602082840312156112c3575f80fd5b815160028110610f0d575f80fd5b5f602082840312156112e1575f80fd5b5051919050565b818103818111156110715761107161100156fea164736f6c6343000819000a0000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy FeeSplitter Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca40000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000069e6080604052348015600e575f80fd5b506106828061001c5f395ff3fe608060405234801561000f575f80fd5b506004361061003f575f3560e01c8063331b637f1461004357806354fd4d5014610069578063ab4d6f75146100b2575b5f80fd5b610056610051366004610512565b6100c7565b6040519081526020015b60405180910390f35b6100a56040518060400160405280600581526020017f312e302e3200000000000000000000000000000000000000000000000000000081525081565b604051610060919061053b565b6100c56100c036600461058e565b61039e565b005b5f67ffffffffffffffff801683602001511115610110576040517fd1f79e8200000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b604083015163ffffffff1015610152576040517f94338eba00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b606083015167ffffffffffffffff1015610198576040517f596a19a900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b82516040515f916101dd91859060200160609290921b7fffffffffffffffffffffffffffffffffffffffff000000000000000000000000168252601482015260340190565b604080518083037fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe00181528282528051602091820120878201516060890151898501515f9487018590527fffffffffffffffff00000000000000000000000000000000000000000000000060c084811b8216602c8a015283901b1660348801527fffffffff0000000000000000000000000000000000000000000000000000000060e082901b16603c88015292965090949093919291016040516020818303038152906040526102ac906105bc565b90505f85826040516020016102cb929190918252602082015260400190565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152828252805160209182012060808d01519184018190529183015291505f90606001604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe081840301815291905280516020909101207effffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f0300000000000000000000000000000000000000000000000000000000000000179a9950505050505050505050565b5f6103b76103b136859003850185610601565b836100c7565b90505f6103c38261043b565b509050806103fd576040517fe3c0081600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b827f5c37832d2e8d10e346e55ad62071a6a2f9fa5130614ef2ec6617555c6f467ba78560405161042d9190610622565b60405180910390a250505050565b5f805a835491505a6103e891031115939092509050565b803573ffffffffffffffffffffffffffffffffffffffff81168114610475575f80fd5b919050565b5f60a0828403121561048a575f80fd5b60405160a0810181811067ffffffffffffffff821117156104d2577f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b6040529050806104e183610452565b8152602083013560208201526040830135604082015260608301356060820152608083013560808201525092915050565b5f8060c08385031215610523575f80fd5b61052d848461047a565b9460a0939093013593505050565b602081525f82518060208401528060208501604085015e5f6040828501015260407fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011684010191505092915050565b5f8082840360c08112156105a0575f80fd5b60a08112156105ad575f80fd5b50919360a08501359350915050565b805160208083015191908110156105fb577fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8160200360031b1b821691505b50919050565b5f60a08284031215610611575f80fd5b61061b838361047a565b9392505050565b60a0810173ffffffffffffffffffffffffffffffffffffffff61064484610452565b168252602083013560208301526040830135604083015260608301356060830152608083013560808301529291505056fea164736f6c6343000819000a0000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy CrossL2Inbox Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca4000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000011836080604052348015600e575f80fd5b506111678061001c5f395ff3fe6080604052600436106100b8575f3560e01c80637056f41f11610071578063b1b1b2091161004c578063b1b1b20914610228578063bc294d7d14610266578063ecc7042814610291575f80fd5b80637056f41f146101b65780637936cbee146101d55780638d1d298f14610215575f80fd5b806352617f3c116100a157806352617f3c1461011c57806354fd4d50146101425780636b0c3c5e14610197575f80fd5b806324794462146100bc57806338ffde18146100e3575b5f80fd5b3480156100c7575f80fd5b506100d06102c5565b6040519081526020015b60405180910390f35b3480156100ee575f80fd5b506100f7610344565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020016100da565b348015610127575f80fd5b5061012f5f81565b60405161ffff90911681526020016100da565b34801561014d575f80fd5b5061018a6040518060400160405280600581526020017f312e332e3000000000000000000000000000000000000000000000000000000081525081565b6040516100da9190610c7e565b3480156101a2575f80fd5b506100d06101b1366004610d00565b6103c3565b3480156101c1575f80fd5b506100d06101d0366004610d77565b6104ae565b3480156101e0575f80fd5b506101e96106ba565b6040805173ffffffffffffffffffffffffffffffffffffffff90931683526020830191909152016100da565b61018a610223366004610dcf565b61075e565b348015610233575f80fd5b50610256610242366004610e25565b5f6020819052908152604090205460ff1681565b60405190151581526020016100da565b348015610271575f80fd5b506100d0610280366004610e25565b60026020525f908152604090205481565b34801561029c575f80fd5b506001547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff166100d0565b5f7ff13569814868ede994184d5a425471fb19e869768a33421cb701a2ba3d420c0a5c61031e576040517fbca35af600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b507f711dfa3259c842fffc17d6e1f1e0fc5927756133a2345ca56b4cb8178589fee75c90565b5f7ff13569814868ede994184d5a425471fb19e869768a33421cb701a2ba3d420c0a5c61039d576040517fbca35af600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b507fb83444d07072b122e2e72a669ce32857d892345c19856f4e7142d06a167ab3f35c90565b5f610407874688888888888080601f0160208091040260200160405190810160405280939291908181526020018383808284375f92019190915250610ae192505050565b5f878152600260205260409020549091508114610450576040517f6eca2e4b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b858473ffffffffffffffffffffffffffffffffffffffff16887f382409ac69001e11931a28435afef442cbfd20d9891907e8fa373ba7d351f32088878760405161049c93929190610e3c565b60405180910390a49695505050505050565b5f4685036104e8576040517f8ed9a95d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b7fffffffffffffffffffffffffbdffffffffffffffffffffffffffffffffffffdd73ffffffffffffffffffffffffffffffffffffffff851601610557576040517f4faa250900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f6105816001547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff1690565b90506105c6864683338989898080601f0160208091040260200160405190810160405280939291908181526020018383808284375f92019190915250610ae192505050565b5f828152600260205260408120829055600180549294507dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff909216919061060a83610ea5565b91906101000a8154817dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff02191690837dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff16021790555050808573ffffffffffffffffffffffffffffffffffffffff16877f382409ac69001e11931a28435afef442cbfd20d9891907e8fa373ba7d351f3203388886040516106a993929190610e3c565b60405180910390a450949350505050565b5f807ff13569814868ede994184d5a425471fb19e869768a33421cb701a2ba3d420c0a5c610714576040517fbca35af600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b50507fb83444d07072b122e2e72a669ce32857d892345c19856f4e7142d06a167ab3f35c907f711dfa3259c842fffc17d6e1f1e0fc5927756133a2345ca56b4cb8178589fee75c90565b60607ff13569814868ede994184d5a425471fb19e869768a33421cb701a2ba3d420c0a5c156107b9576040517f37ed32e800000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60017ff13569814868ede994184d5a425471fb19e869768a33421cb701a2ba3d420c0a5d7342000000000000000000000000000000000000236107ff6020860186610f06565b73ffffffffffffffffffffffffffffffffffffffff161461084c576040517f7987c15700000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73420000000000000000000000000000000000002273ffffffffffffffffffffffffffffffffffffffff1663ab4d6f7585858560405161088d929190610f21565b6040519081900381207fffffffff0000000000000000000000000000000000000000000000000000000060e085901b1682526108cc9291600401610f30565b5f604051808303815f87803b1580156108e3575f80fd5b505af11580156108f5573d5f803e3d5ffd5b505050505f805f805f6109088888610b1f565b9450945094509450945046851461094b576040517f31ac221100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60808901355f61095f878387878a88610ae1565b5f8181526020819052604090205490915060ff16156109aa576040517f9ca9480b00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f81815260208190526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff001660011790556109ea8285610be8565b5f8673ffffffffffffffffffffffffffffffffffffffff163485604051610a119190610f89565b5f6040518083038185875af1925050503d805f8114610a4b576040519150601f19603f3d011682016040523d82523d5f602084013e610a50565b606091505b509950905080610a6257885189602001fd5b8186847fc270d73e26d2d39dee7ef92093555927e344e243415547ecc350b2b5385b68a28c80519060200120604051610a9d91815260200190565b60405180910390a4610aaf5f80610be8565b50505050505050505f7ff13569814868ede994184d5a425471fb19e869768a33421cb701a2ba3d420c0a5d9392505050565b5f868686868686604051602001610afd96959493929190610f9f565b6040516020818303038152906040528051906020012090509695505050505050565b5f808080606081610b33602082898b610ff5565b810190610b409190610e25565b90507f382409ac69001e11931a28435afef442cbfd20d9891907e8fa373ba7d351f3208114610b9b576040517fdf1eb58600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b610ba960806020898b610ff5565b810190610bb6919061101c565b91975095509350610bca876080818b610ff5565b810190610bd7919061107e565b969995985093965092949392505050565b817f711dfa3259c842fffc17d6e1f1e0fc5927756133a2345ca56b4cb8178589fee75d807fb83444d07072b122e2e72a669ce32857d892345c19856f4e7142d06a167ab3f35d5050565b5f81518084528060208401602086015e5f6020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081525f610c906020830184610c32565b9392505050565b73ffffffffffffffffffffffffffffffffffffffff81168114610cb8575f80fd5b50565b5f8083601f840112610ccb575f80fd5b50813567ffffffffffffffff811115610ce2575f80fd5b602083019150836020828501011115610cf9575f80fd5b9250929050565b5f805f805f8060a08789031215610d15575f80fd5b86359550602087013594506040870135610d2e81610c97565b93506060870135610d3e81610c97565b9250608087013567ffffffffffffffff811115610d59575f80fd5b610d6589828a01610cbb565b979a9699509497509295939492505050565b5f805f8060608587031215610d8a575f80fd5b843593506020850135610d9c81610c97565b9250604085013567ffffffffffffffff811115610db7575f80fd5b610dc387828801610cbb565b95989497509550505050565b5f805f83850360c0811215610de2575f80fd5b60a0811215610def575f80fd5b5083925060a084013567ffffffffffffffff811115610e0c575f80fd5b610e1886828701610cbb565b9497909650939450505050565b5f60208284031215610e35575f80fd5b5035919050565b73ffffffffffffffffffffffffffffffffffffffff8416815260406020820152816040820152818360608301375f818301606090810191909152601f9092017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016010192915050565b5f7dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff808316818103610efc577f4e487b71000000000000000000000000000000000000000000000000000000005f52601160045260245ffd5b6001019392505050565b5f60208284031215610f16575f80fd5b8135610c9081610c97565b818382375f9101908152919050565b60c081018335610f3f81610c97565b73ffffffffffffffffffffffffffffffffffffffff1682526020848101359083015260408085013590830152606080850135908301526080938401359382019390935260a0015290565b5f82518060208501845e5f920191825250919050565b8681528560208201528460408201525f73ffffffffffffffffffffffffffffffffffffffff808616606084015280851660808401525060c060a0830152610fe960c0830184610c32565b98975050505050505050565b5f8085851115611003575f80fd5b8386111561100f575f80fd5b5050820193919092039150565b5f805f6060848603121561102e575f80fd5b83359250602084013561104081610c97565b929592945050506040919091013590565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f806040838503121561108f575f80fd5b823561109a81610c97565b9150602083013567ffffffffffffffff808211156110b6575f80fd5b818501915085601f8301126110c9575f80fd5b8135818111156110db576110db611051565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561112157611121611051565b81604052828152886020848701011115611139575f80fd5b826020860160208301375f602084830101528095505050505050925092905056fea164736f6c6343000819000a0000000000000000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L2ToL2CrossDomainMessenger Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca4000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000007ab608060405234801561001057600080fd5b5061078b806100206000396000f3fe6080604052600436106100345760003560e01c80634f0edcc91461003957806354fd4d501461005b57806364a197f3146100ba575b600080fd5b34801561004557600080fd5b506100596100543660046105ae565b6100db565b005b34801561006757600080fd5b506100a46040518060400160405280600581526020017f312e302e3100000000000000000000000000000000000000000000000000000081525081565b6040516100b1919061065a565b60405180910390f35b6100cd6100c8366004610674565b610340565b6040519081526020016100b1565b3373420000000000000000000000000000000000002314610128576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60008073420000000000000000000000000000000000002373ffffffffffffffffffffffffffffffffffffffff16637936cbee6040518163ffffffff1660e01b81526004016040805180830381865afa158015610189573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906101ad91906106a0565b909250905073ffffffffffffffffffffffffffffffffffffffff82163014610201576040517fbc22e2aa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517fa0712d68000000000000000000000000000000000000000000000000000000008152600481018490527342000000000000000000000000000000000000259063a0712d6890602401600060405180830381600087803b15801561026757600080fd5b505af115801561027b573d6000803e3d6000fd5b50505050828460405161028d9061057d565b73ffffffffffffffffffffffffffffffffffffffff90911681526020016040518091039082f09050801580156102c7573d6000803e3d6000fd5b50508373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff167fe5479bb8ebad3b9ac81f55f424a6289cf0a54ff2641708f41dcb2b26f264d3598584604051610331929190918252602082015260400190565b60405180910390a35050505050565b600073ffffffffffffffffffffffffffffffffffffffff831661038f576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73420000000000000000000000000000000000002573ffffffffffffffffffffffffffffffffffffffff166344df8e70346040518263ffffffff1660e01b81526004016000604051808303818588803b1580156103eb57600080fd5b505af11580156103ff573d6000803e3d6000fd5b50506040805133602482015273ffffffffffffffffffffffffffffffffffffffff881660448201523460648083019190915282518083039091018152608490910182526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f4f0edcc90000000000000000000000000000000000000000000000000000000017905290517f7056f41f0000000000000000000000000000000000000000000000000000000081527342000000000000000000000000000000000000239450637056f41f93506104de9250869130916004016106ce565b6020604051808303816000875af11580156104fd573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610521919061070c565b604080513481526020810185905291925073ffffffffffffffffffffffffffffffffffffffff85169133917fed98a2ff78833375c368471a747cdf0633024dde3f870feb08a934ac5be83402910160405180910390a392915050565b60598061072683390190565b73ffffffffffffffffffffffffffffffffffffffff811681146105ab57600080fd5b50565b6000806000606084860312156105c357600080fd5b83356105ce81610589565b925060208401356105de81610589565b929592945050506040919091013590565b6000815180845260005b81811015610615576020818501810151868301820152016105f9565b81811115610627576000602083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b60208152600061066d60208301846105ef565b9392505050565b6000806040838503121561068757600080fd5b823561069281610589565b946020939093013593505050565b600080604083850312156106b357600080fd5b82516106be81610589565b6020939093015192949293505050565b83815273ffffffffffffffffffffffffffffffffffffffff8316602082015260606040820152600061070360608301846105ef565b95945050505050565b60006020828403121561071e57600080fd5b505191905056fe608060405260405160593803806059833981016040819052601e91602a565b806001600160a01b0316ff5b600060208284031215603b57600080fd5b81516001600160a01b0381168114605157600080fd5b939250505056fea164736f6c634300080f000a000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy SuperchainETHBridge Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000eab6080604052348015600e575f80fd5b50610e8f8061001c5f395ff3fe608060405234801561000f575f80fd5b506004361061003f575f3560e01c8063316b37391461004357806354fd4d50146100a2578063ef26a315146100eb575b5f80fd5b610078610051366004610681565b5f6020819052908152604090205473ffffffffffffffffffffffffffffffffffffffff1681565b60405173ffffffffffffffffffffffffffffffffffffffff90911681526020015b60405180910390f35b6100de6040518060400160405280600581526020017f312e302e3100000000000000000000000000000000000000000000000000000081525081565b60405161009991906106e6565b6100786100f93660046107cc565b5f80858585856040516024016101129493929190610853565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0818403018152918152602080830180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167ff6d2ee860000000000000000000000000000000000000000000000000000000017905290519192505f919061019c90820161064c565b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe082820381018352601f9091011660408190526101f3907342000000000000000000000000000000000000279085906020016108a8565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529082905261022f92916020016108f5565b60405160208183030381529060405290505f878787876040516020016102589493929190610853565b60405160208183030381529060405280519060200120905061027b81835f610310565b73ffffffffffffffffffffffffffffffffffffffff8181165f818152602081815260409182902080547fffffffffffffffffffffffff000000000000000000000000000000000000000016948e1694851790559051338152939750919290917fc4dc49b8346732c19a032264daf2bebf08be752a393ea2a8519cd1953ef4d932910160405180910390a3505050949350505050565b5f806040518060400160405280601081526020017f67363d3d37363d34f03d5260086018f30000000000000000000000000000000081525090505f858251602084015ff5905073ffffffffffffffffffffffffffffffffffffffff81166103d8576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601160248201527f4445504c4f594d454e545f4641494c454400000000000000000000000000000060448201526064015b60405180910390fd5b6103e1866104e0565b92505f8173ffffffffffffffffffffffffffffffffffffffff16858760405161040a9190610909565b5f6040518083038185875af1925050503d805f8114610444576040519150601f19603f3d011682016040523d82523d5f602084013e610449565b606091505b50509050808015610470575073ffffffffffffffffffffffffffffffffffffffff84163b15155b6104d6576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601560248201527f494e495449414c495a4154494f4e5f4641494c4544000000000000000000000060448201526064016103cf565b5050509392505050565b604080518082018252601081527f67363d3d37363d34f03d5260086018f30000000000000000000000000000000060209182015290517fff00000000000000000000000000000000000000000000000000000000000000918101919091527fffffffffffffffffffffffffffffffffffffffff0000000000000000000000003060601b166021820152603581018290527f21c35dbe1b344a2488cf3321d6ce542f8e9f305544ff09e4993a62319a497c1f60558201525f9081906105bb906075015b6040516020818303038152906040528051906020012090565b6040517fd69400000000000000000000000000000000000000000000000000000000000060208201527fffffffffffffffffffffffffffffffffffffffff000000000000000000000000606083901b1660228201527f01000000000000000000000000000000000000000000000000000000000000006036820152909150610645906037016105a2565b9392505050565b61056e8061091583390190565b803573ffffffffffffffffffffffffffffffffffffffff8116811461067c575f80fd5b919050565b5f60208284031215610691575f80fd5b61064582610659565b5f81518084528060208401602086015e5f6020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081525f610645602083018461069a565b7f4e487b71000000000000000000000000000000000000000000000000000000005f52604160045260245ffd5b5f82601f830112610734575f80fd5b813567ffffffffffffffff8082111561074f5761074f6106f8565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715610795576107956106f8565b816040528381528660208588010111156107ad575f80fd5b836020870160208301375f602085830101528094505050505092915050565b5f805f80608085870312156107df575f80fd5b6107e885610659565b9350602085013567ffffffffffffffff80821115610804575f80fd5b61081088838901610725565b94506040870135915080821115610825575f80fd5b5061083287828801610725565b925050606085013560ff81168114610848575f80fd5b939692955090935050565b73ffffffffffffffffffffffffffffffffffffffff85168152608060208201525f610881608083018661069a565b8281036040840152610893818661069a565b91505060ff8316606083015295945050505050565b73ffffffffffffffffffffffffffffffffffffffff83168152604060208201525f6108d6604083018461069a565b949350505050565b5f81518060208401855e5f93019283525090919050565b5f6108d661090383866108de565b846108de565b5f61064582846108de56fe60a060405260405161056e38038061056e83398101604081905261002291610354565b61002c828261003e565b506001600160a01b031660805261043f565b610047826100fb565b6040516001600160a01b038316907f1cf3b03a6cf19fa2baba4df148e9dcabedea7f8a5c07840e207e5c089be95d3e905f90a28051156100ef576100ea826001600160a01b0316635c60da1b6040518163ffffffff1660e01b8152600401602060405180830381865afa1580156100c0573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906100e49190610410565b82610209565b505050565b6100f761027c565b5050565b806001600160a01b03163b5f0361013557604051631933b43b60e21b81526001600160a01b03821660048201526024015b60405180910390fd5b807fa3f0ad74e5423aebfd80d3ef4346578335a9a72aeaee59ff6cb3582b35133d5080546001600160a01b0319166001600160a01b0392831617905560408051635c60da1b60e01b815290515f92841691635c60da1b9160048083019260209291908290030181865afa1580156101ae573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906101d29190610410565b9050806001600160a01b03163b5f036100f757604051634c9c8ce360e01b81526001600160a01b038216600482015260240161012c565b60605f80846001600160a01b0316846040516102259190610429565b5f60405180830381855af49150503d805f811461025d576040519150601f19603f3d011682016040523d82523d5f602084013e610262565b606091505b50909250905061027385838361029d565b95945050505050565b341561029b5760405163b398979f60e01b815260040160405180910390fd5b565b6060826102b2576102ad826102fc565b6102f5565b81511580156102c957506001600160a01b0384163b155b156102f257604051639996b31560e01b81526001600160a01b038516600482015260240161012c565b50805b9392505050565b80511561030c5780518082602001fd5b604051630a12f52160e11b815260040160405180910390fd5b80516001600160a01b038116811461033b575f80fd5b919050565b634e487b7160e01b5f52604160045260245ffd5b5f8060408385031215610365575f80fd5b61036e83610325565b60208401519092506001600160401b038082111561038a575f80fd5b818501915085601f83011261039d575f80fd5b8151818111156103af576103af610340565b604051601f8201601f19908116603f011681019083821181831017156103d7576103d7610340565b816040528281528860208487010111156103ef575f80fd5b8260208601602083015e5f6020848301015280955050505050509250929050565b5f60208284031215610420575f80fd5b6102f582610325565b5f82518060208501845e5f920191825250919050565b6080516101186104565f395f602301526101185ff3fe608060405261000c61000e565b005b61001e610019610020565b6100b3565b565b5f7f000000000000000000000000000000000000000000000000000000000000000073ffffffffffffffffffffffffffffffffffffffff16635c60da1b6040518163ffffffff1660e01b8152600401602060405180830381865afa15801561008a573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906100ae91906100d1565b905090565b365f80375f80365f845af43d5f803e8080156100cd573d5ff35b3d5ffd5b5f602082840312156100e1575f80fd5b815173ffffffffffffffffffffffffffffffffffffffff81168114610104575f80fd5b939250505056fea164736f6c6343000819000aa164736f6c6343000819000a000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy OptimismSuperchainERC20Factory Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca40000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000014f608060405234801561001057600080fd5b5061012f806100206000396000f3fe608060405234801561001057600080fd5b50600436106100365760003560e01c806354fd4d501461003b5780635c60da1b1461008d575b600080fd5b6100776040518060400160405280600581526020017f312e302e3100000000000000000000000000000000000000000000000000000081525081565b60405161008491906100af565b60405180910390f35b60405173b9415c6ca93bdc545d4c5177512fcc22efa38f288152602001610084565b600060208083528351808285015260005b818110156100dc578581018301518582016040015282016100c0565b818111156100ee576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01692909201604001939250505056fea164736f6c634300080f000a0000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy OptimismSuperchainERC20Beacon Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca4000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000008496080604052348015600e575f80fd5b5061082d8061001c5f395ff3fe608060405234801561000f575f80fd5b506004361061003f575f3560e01c806354fd4d50146100435780637cfd6dbc14610095578063c1a433d8146100aa575b5f80fd5b61007f6040518060400160405280600581526020017f312e302e3100000000000000000000000000000000000000000000000000000081525081565b60405161008c91906106b3565b60405180910390f35b6100a86100a33660046106f0565b6100cb565b005b6100bd6100b836600461073e565b610300565b60405190815260200161008c565b3373420000000000000000000000000000000000002314610118576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b5f8073420000000000000000000000000000000000002373ffffffffffffffffffffffffffffffffffffffff16637936cbee6040518163ffffffff1660e01b81526004016040805180830381865afa158015610176573d5f803e3d5ffd5b505050506040513d601f19601f8201168201806040525081019061019a9190610781565b909250905073ffffffffffffffffffffffffffffffffffffffff821630146101ee576040517fbc22e2aa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517f18bf507700000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8581166004830152602482018590528716906318bf5077906044015f604051808303815f87803b15801561025b575f80fd5b505af115801561026d573d5f803e3d5ffd5b505050508373ffffffffffffffffffffffffffffffffffffffff168573ffffffffffffffffffffffffffffffffffffffff168773ffffffffffffffffffffffffffffffffffffffff167f434965d7426acf45a548f00783c067e9ad789c8c66444f0a5ad8941d5005be9386856040516102f0929190918252602082015260400190565b60405180910390a4505050505050565b5f73ffffffffffffffffffffffffffffffffffffffff841661034e576040517fd92e233d00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517f01ffc9a70000000000000000000000000000000000000000000000000000000081527f3333199400000000000000000000000000000000000000000000000000000000600482015273ffffffffffffffffffffffffffffffffffffffff8616906301ffc9a790602401602060405180830381865afa1580156103d6573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906103fa91906107ad565b610430576040517f0ed63dae00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517f2b8c49e30000000000000000000000000000000000000000000000000000000081523360048201526024810184905273ffffffffffffffffffffffffffffffffffffffff861690632b8c49e3906044015f604051808303815f87803b15801561049b575f80fd5b505af11580156104ad573d5f803e3d5ffd5b50506040805173ffffffffffffffffffffffffffffffffffffffff89811660248301523360448301528816606482015260848082018890528251808303909101815260a490910182526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f7cfd6dbc0000000000000000000000000000000000000000000000000000000017905290517f7056f41f0000000000000000000000000000000000000000000000000000000081529092507342000000000000000000000000000000000000239150637056f41f90610595908690309086906004016107cc565b6020604051808303815f875af11580156105b1573d5f803e3d5ffd5b505050506040513d601f19601f820116820180604052508101906105d59190610809565b91508473ffffffffffffffffffffffffffffffffffffffff163373ffffffffffffffffffffffffffffffffffffffff168773ffffffffffffffffffffffffffffffffffffffff167f0247bfe63a1aaa59e073e20b172889babfda8d3273b5798e0e9ac4388e6dd11c8787604051610656929190918252602082015260400190565b60405180910390a450949350505050565b5f81518084528060208401602086015e5f6020828601015260207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0601f83011685010191505092915050565b602081525f6106c56020830184610667565b9392505050565b73ffffffffffffffffffffffffffffffffffffffff811681146106ed575f80fd5b50565b5f805f8060808587031215610703575f80fd5b843561070e816106cc565b9350602085013561071e816106cc565b9250604085013561072e816106cc565b9396929550929360600135925050565b5f805f8060808587031215610751575f80fd5b843561075c816106cc565b9350602085013561076c816106cc565b93969395505050506040820135916060013590565b5f8060408385031215610792575f80fd5b825161079d816106cc565b6020939093015192949293505050565b5f602082840312156107bd575f80fd5b815180151581146106c5575f80fd5b83815273ffffffffffffffffffffffffffffffffffffffff83166020820152606060408201525f6108006060830184610667565b95945050505050565b5f60208284031215610819575f80fd5b505191905056fea164736f6c6343000819000a0000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy SuperchainTokenBridge Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca4000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000003b3608060405234801561001057600080fd5b50610393806100206000396000f3fe60806040526004361061003f5760003560e01c806344df8e701461004457806354fd4d501461004e578063a0712d68146100ad578063b60d4288146100cd575b600080fd5b61004c6100d5565b005b34801561005a57600080fd5b506100976040518060400160405280600581526020017f312e312e3000000000000000000000000000000000000000000000000000000081525081565b6040516100a491906102a1565b60405180910390f35b3480156100b957600080fd5b5061004c6100c8366004610314565b61015a565b61004c610229565b3373420000000000000000000000000000000000002414610122576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60405134815233907f875e07afd7ce17c6531b1a6b7b34829dcd8b7e6639448afbd6a8e29fa1422b82906020015b60405180910390a2565b33734200000000000000000000000000000000000024146101a7576040517f82b4290000000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80336040516101b590610295565b73ffffffffffffffffffffffffffffffffffffffff90911681526020016040518091039082f09050801580156101ef573d6000803e3d6000fd5b505060405181815233907f85719716ac5bd2744ae7ed3d16702129383049b97123b506320e7a5826ebbbba9060200160405180910390a250565b34600003610263576040517f2c5211c600000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60405134815233907fbb9e497a5b82d1a37f9496dd70c6efb97ba0d98c66c3422d05010105d063359890602001610150565b60598061032e83390190565b600060208083528351808285015260005b818110156102ce578581018301518582016040015282016102b2565b818111156102e0576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b60006020828403121561032657600080fd5b503591905056fe608060405260405160593803806059833981016040819052601e91602a565b806001600160a01b0316ff5b600060208284031215603b57600080fd5b81516001600160a01b0381168114605157600080fd5b939250505056fea164736f6c634300080f000a00000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy ETHLiquidity Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000c6e608060405234801561001057600080fd5b50610c4e806100206000396000f3fe608060405234801561001057600080fd5b50600436106101a35760003560e01c806364ca23ef116100ee578063c598591811610097578063e591b28211610071578063e591b28214610383578063e81b2c6d146103a5578063f8206140146103ae578063fe3d5710146103b757600080fd5b8063c598591814610353578063c6d5301614610373578063d84447151461037b57600080fd5b80638b239f73116100c85780638b239f73146103215780639e8c49661461032a578063b80777ea1461033357600080fd5b806364ca23ef146102dd57806368d5dca6146102f15780638381f58a1461030d57600080fd5b80634397dfef1161015057806354fd4d501161012a57806354fd4d50146102b7578063550fcdc9146102cc5780635cf24969146102d457600080fd5b80634397dfef14610248578063440a5e201461027e5780634d5d9a2a1461028657600080fd5b806316d3bc7f1161018157806316d3bc7f146101e1578063213268491461020e5780633db6be2b1461024057600080fd5b8063015d8eb9146101a8578063098999be146101bd57806309bd5a60146101c5575b600080fd5b6101bb6101b6366004610a13565b6103e8565b005b6101bb610528565b6101ce60025481565b6040519081526020015b60405180910390f35b6008546101f59067ffffffffffffffff1681565b60405167ffffffffffffffff90911681526020016101d8565b7fd2ff82c9b477ff6a09f530b1c627ffb4b0b81e2ae2ba427f824162e8dad020aa5460405190151581526020016101d8565b6101bb61053b565b610250610565565b6040805173ffffffffffffffffffffffffffffffffffffffff909316835260ff9091166020830152016101d8565b6101bb6105cc565b6008546102a29068010000000000000000900463ffffffff1681565b60405163ffffffff90911681526020016101d8565b6102bf610623565b6040516101d89190610ab5565b6102bf610683565b6101ce60015481565b6003546101f59067ffffffffffffffff1681565b6003546102a29068010000000000000000900463ffffffff1681565b6000546101f59067ffffffffffffffff1681565b6101ce60055481565b6101ce60065481565b6000546101f59068010000000000000000900467ffffffffffffffff1681565b6003546102a2906c01000000000000000000000000900463ffffffff1681565b6101bb610793565b6102bf610931565b60405173deaddeaddeaddeaddeaddeaddeaddeaddead000181526020016101d8565b6101ce60045481565b6101ce60075481565b6008546103d5906c01000000000000000000000000900461ffff1681565b60405161ffff90911681526020016101d8565b3373deaddeaddeaddeaddeaddeaddeaddeaddead000114610490576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152603b60248201527f4c31426c6f636b3a206f6e6c7920746865206465706f7369746f72206163636f60448201527f756e742063616e20736574204c3120626c6f636b2076616c756573000000000060648201526084015b60405180910390fd5b6000805467ffffffffffffffff98891668010000000000000000027fffffffffffffffffffffffffffffffff00000000000000000000000000000000909116998916999099179890981790975560019490945560029290925560038054919094167fffffffffffffffffffffffffffffffffffffffffffffffff00000000000000009190911617909255600491909155600555600655565b6105306105cc565b60a43560a01c600855565b6105436105cc565b6dffff00000000000000000000000060b03560901c1660a43560a01c17600855565b6040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152601660248201527f4c31426c6f636b4347543a20646570726563617465640000000000000000000060448201526000908190606401610487565b73deaddeaddeaddeaddeaddeaddeaddeaddead00013381146105f657633cc50b456000526004601cfd5b60043560801c60035560143560801c60005560243560015560443560075560643560025560843560045550565b606061065f60408051808201909152600581527f312e382e30000000000000000000000000000000000000000000000000000000602082015290565b60405160200161066f9190610b06565b604051602081830303815290604052905090565b60606106ad7fd2ff82c9b477ff6a09f530b1c627ffb4b0b81e2ae2ba427f824162e8dad020aa5490565b6106e9575060408051808201909152600381527f4554480000000000000000000000000000000000000000000000000000000000602082015290565b73420000000000000000000000000000000000002a73ffffffffffffffffffffffffffffffffffffffff1663550fcdc96040518163ffffffff1660e01b8152600401600060405180830381865afa158015610748573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016820160405261078e9190810190610b76565b905090565b3373deaddeaddeaddeaddeaddeaddeaddeaddead00011461085c576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152604160248201527f4c31426c6f636b3a206f6e6c7920746865206465706f7369746f72206163636f60448201527f756e742063616e20736574206973437573746f6d476173546f6b656e20666c6160648201527f6700000000000000000000000000000000000000000000000000000000000000608482015260a401610487565b7fd2ff82c9b477ff6a09f530b1c627ffb4b0b81e2ae2ba427f824162e8dad020aa541561090b576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4c31426c6f636b3a20437573746f6d476173546f6b656e20616c72656164792060448201527f61637469766500000000000000000000000000000000000000000000000000006064820152608401610487565b60017fd2ff82c9b477ff6a09f530b1c627ffb4b0b81e2ae2ba427f824162e8dad020aa55565b606061095b7fd2ff82c9b477ff6a09f530b1c627ffb4b0b81e2ae2ba427f824162e8dad020aa5490565b610997575060408051808201909152600581527f4574686572000000000000000000000000000000000000000000000000000000602082015290565b73420000000000000000000000000000000000002a73ffffffffffffffffffffffffffffffffffffffff1663d84447156040518163ffffffff1660e01b8152600401600060405180830381865afa158015610748573d6000803e3d6000fd5b803567ffffffffffffffff81168114610a0e57600080fd5b919050565b600080600080600080600080610100898b031215610a3057600080fd5b610a39896109f6565b9750610a4760208a016109f6565b96506040890135955060608901359450610a6360808a016109f6565b979a969950949793969560a0850135955060c08501359460e001359350915050565b60005b83811015610aa0578181015183820152602001610a88565b83811115610aaf576000848401525b50505050565b6020815260008251806020840152610ad4816040850160208701610a85565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169190910160400192915050565b60008251610b18818460208701610a85565b7f2b637573746f6d2d6761732d746f6b656e000000000000000000000000000000920191825250601101919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600060208284031215610b8857600080fd5b815167ffffffffffffffff80821115610ba057600080fd5b818401915084601f830112610bb457600080fd5b815181811115610bc657610bc6610b47565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908382118183101715610c0c57610c0c610b47565b81604052828152876020848701011115610c2557600080fd5b610c36836020830160208801610a85565b97965050505050505056fea164736f6c634300080f000a000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L1BlockCGT Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000862608060405234801561001057600080fd5b50610842806100206000396000f3fe6080604052600436106100695760003560e01c806382e3702d1161004357806382e3702d146100f6578063c2b3e5ac14610136578063ecc704281461014957600080fd5b80633f827a5a1461009257806344df8e70146100bf57806354fd4d50146100d457600080fd5b3661008d5761008b33620186a0604051806020016040528060008152506101ae565b005b600080fd5b34801561009e57600080fd5b506100a7600181565b60405161ffff90911681526020015b60405180910390f35b3480156100cb57600080fd5b5061008b610284565b3480156100e057600080fd5b506100e96102bc565b6040516100b691906105dd565b34801561010257600080fd5b506101266101113660046105f7565b60006020819052908152604090205460ff1681565b60405190151581526020016100b6565b61008b61014436600461063f565b6101ae565b34801561015557600080fd5b506101a06001547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790565b6040519081526020016100b6565b73420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff1663213268496040518163ffffffff1660e01b8152600401602060405180830381865afa15801561020d573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906102319190610743565b801561023d5750600034115b15610274576040517fcdfaa11100000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b61027f83838361031c565b505050565b4761028e816104e0565b60405181907f7967de617a5ac1cc7eba2d6f37570a0135afa950d8bb77cdd35f0d0b4e85a16f90600090a250565b60606102f860408051808201909152600581527f312e322e30000000000000000000000000000000000000000000000000000000602082015290565b6040516020016103089190610765565b604051602081830303815290604052905090565b60006103b26040518060c001604052806103766001547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790565b815233602082015273ffffffffffffffffffffffffffffffffffffffff871660408201523460608201526080810186905260a00184905261050a565b600081815260208190526040902080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055905073ffffffffffffffffffffffffffffffffffffffff84163361044d6001547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff167e010000000000000000000000000000000000000000000000000000000000001790565b7f02a52367d10742d8032712c1bb8e0144ff1ec5ffda1ed7d70bb05a27449550543487878760405161048294939291906107a6565b60405180910390a45050600180547dffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff8082168301167fffff0000000000000000000000000000000000000000000000000000000000009091161790555050565b806040516104ed90610557565b6040518091039082f090508015801561027f573d6000803e3d6000fd5b80516020808301516040808501516060860151608087015160a0880151935160009761053a9790969591016107d6565b604051602081830303815290604052805190602001209050919050565b60088061082e83390190565b60005b8381101561057e578181015183820152602001610566565b8381111561058d576000848401525b50505050565b600081518084526105ab816020860160208601610563565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006105f06020830184610593565b9392505050565b60006020828403121561060957600080fd5b5035919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b60008060006060848603121561065457600080fd5b833573ffffffffffffffffffffffffffffffffffffffff8116811461067857600080fd5b925060208401359150604084013567ffffffffffffffff8082111561069c57600080fd5b818601915086601f8301126106b057600080fd5b8135818111156106c2576106c2610610565b604051601f82017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f0116810190838211818310171561070857610708610610565b8160405282815289602084870101111561072157600080fd5b8260208601602083013760006020848301015280955050505050509250925092565b60006020828403121561075557600080fd5b815180151581146105f057600080fd5b60008251610777818460208701610563565b7f2b637573746f6d2d6761732d746f6b656e000000000000000000000000000000920191825250601101919050565b8481528360208201526080604082015260006107c56080830185610593565b905082606083015295945050505050565b868152600073ffffffffffffffffffffffffffffffffffffffff808816602084015280871660408401525084606083015283608083015260c060a083015261082160c0830184610593565b9897505050505050505056fe608060405230fffea164736f6c634300080f000a000000000000000000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L2ToL1MessagePasserCGT Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000001082608060405234801561001057600080fd5b5061001961001e565b6100de565b600054610100900460ff161561008a5760405162461bcd60e51b815260206004820152602760248201527f496e697469616c697a61626c653a20636f6e747261637420697320696e697469604482015266616c697a696e6760c81b606482015260840160405180910390fd5b60005460ff90811610156100dc576000805460ff191660ff9081179091556040519081527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b565b610f95806100ed6000396000f3fe6080604052600436106100c75760003560e01c80638da5cb5b11610074578063d84447151161004e578063d844471514610214578063f2fde38b14610229578063f46eccc41461024957600080fd5b80638da5cb5b1461019f57806390657147146101d4578063c6f69fbb146101f457600080fd5b806354fd4d50116100a557806354fd4d5014610116578063550fcdc914610175578063715018a61461018a57600080fd5b80630c984832146100cc57806340c10f19146100ee57806344df8e701461010e575b600080fd5b3480156100d857600080fd5b506100ec6100e7366004610b66565b610289565b005b3480156100fa57600080fd5b506100ec610109366004610b88565b610308565b6100ec61046a565b34801561012257600080fd5b5061015f6040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b60405161016c9190610bb2565b60405180910390f35b34801561018157600080fd5b5061015f610560565b34801561019657600080fd5b506100ec6105ee565b3480156101ab57600080fd5b5060335460405173ffffffffffffffffffffffffffffffffffffffff909116815260200161016c565b3480156101e057600080fd5b506100ec6101ef366004610cff565b610602565b34801561020057600080fd5b506100ec61020f366004610b66565b6107bf565b34801561022057600080fd5b5061015f61083b565b34801561023557600080fd5b506100ec610244366004610b66565b610848565b34801561025557600080fd5b50610279610264366004610b66565b60656020526000908152604090205460ff1681565b604051901515815260200161016c565b6102916108ff565b73ffffffffffffffffffffffffffffffffffffffff811660008181526065602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055517f83b05b6735acd4b85e3bded8e72c851d1a87718f81e3c8e6f0c9d9a2baa88e469190a250565b3360009081526065602052604090205460ff16610351576040517f5fbc4ede00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517f2e1a7d4d0000000000000000000000000000000000000000000000000000000081526004810182905273420000000000000000000000000000000000002990632e1a7d4d90602401600060405180830381600087803b1580156103b757600080fd5b505af11580156103cb573d6000803e3d6000fd5b5050505080826040516103dd90610b31565b73ffffffffffffffffffffffffffffffffffffffff90911681526020016040518091039082f0905080158015610417573d6000803e3d6000fd5b505060405181815273ffffffffffffffffffffffffffffffffffffffff83169033907fec89d80a36947288037745287dde87d62cd8c141d5323130b3d26d97d84004c79060200160405180910390a35050565b3360009081526065602052604090205460ff166104b3576040517f5fbc4ede00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b73420000000000000000000000000000000000002973ffffffffffffffffffffffffffffffffffffffff1663d0e30db0346040518263ffffffff1660e01b81526004016000604051808303818588803b15801561050f57600080fd5b505af1158015610523573d6000803e3d6000fd5b50506040513481523393507f875e07afd7ce17c6531b1a6b7b34829dcd8b7e6639448afbd6a8e29fa1422b829250602001905060405180910390a2565b6067805461056d90610d73565b80601f016020809104026020016040519081016040528092919081815260200182805461059990610d73565b80156105e65780601f106105bb576101008083540402835291602001916105e6565b820191906000526020600020905b8154815290600101906020018083116105c957829003601f168201915b505050505081565b6105f66108ff565b6106006000610980565b565b600054610100900460ff16158080156106225750600054600160ff909116105b8061063c5750303b15801561063c575060005460ff166001145b6106cd576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602e60248201527f496e697469616c697a61626c653a20636f6e747261637420697320616c72656160448201527f647920696e697469616c697a656400000000000000000000000000000000000060648201526084015b60405180910390fd5b600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00166001179055801561072b57600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff166101001790555b6107336109f7565b61073c84610848565b60666107488482610e15565b5060676107558382610e15565b5080156107b957600080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00ff169055604051600181527f7f26b83ff96e1f2b6a682f133852f6798a09c465da95921460cefb38474024989060200160405180910390a15b50505050565b6107c76108ff565b73ffffffffffffffffffffffffffffffffffffffff811660008181526065602052604080822080547fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff00169055517fdf6bf03dfab5b4ccec3ba95544b98d7ecc9d4b9293d8673e86cb6edb5ac0cb629190a250565b6066805461056d90610d73565b6108506108ff565b73ffffffffffffffffffffffffffffffffffffffff81166108f3576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602660248201527f4f776e61626c653a206e6577206f776e657220697320746865207a65726f206160448201527f646472657373000000000000000000000000000000000000000000000000000060648201526084016106c4565b6108fc81610980565b50565b60335473ffffffffffffffffffffffffffffffffffffffff163314610600576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820181905260248201527f4f776e61626c653a2063616c6c6572206973206e6f7420746865206f776e657260448201526064016106c4565b6033805473ffffffffffffffffffffffffffffffffffffffff8381167fffffffffffffffffffffffff0000000000000000000000000000000000000000831681179093556040519116919082907f8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e090600090a35050565b600054610100900460ff16610a8e576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016106c4565b610600600054610100900460ff16610b28576040517f08c379a000000000000000000000000000000000000000000000000000000000815260206004820152602b60248201527f496e697469616c697a61626c653a20636f6e7472616374206973206e6f74206960448201527f6e697469616c697a696e6700000000000000000000000000000000000000000060648201526084016106c4565b61060033610980565b605980610f3083390190565b803573ffffffffffffffffffffffffffffffffffffffff81168114610b6157600080fd5b919050565b600060208284031215610b7857600080fd5b610b8182610b3d565b9392505050565b60008060408385031215610b9b57600080fd5b610ba483610b3d565b946020939093013593505050565b600060208083528351808285015260005b81811015610bdf57858101830151858201604001528201610bc3565b81811115610bf1576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe016929092016040019392505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600082601f830112610c6557600080fd5b813567ffffffffffffffff80821115610c8057610c80610c25565b604051601f83017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715610cc657610cc6610c25565b81604052838152866020858801011115610cdf57600080fd5b836020870160208301376000602085830101528094505050505092915050565b600080600060608486031215610d1457600080fd5b610d1d84610b3d565b9250602084013567ffffffffffffffff80821115610d3a57600080fd5b610d4687838801610c54565b93506040860135915080821115610d5c57600080fd5b50610d6986828701610c54565b9150509250925092565b600181811c90821680610d8757607f821691505b602082108103610dc0577f4e487b7100000000000000000000000000000000000000000000000000000000600052602260045260246000fd5b50919050565b601f821115610e1057600081815260208120601f850160051c81016020861015610ded5750805b601f850160051c820191505b81811015610e0c57828155600101610df9565b5050505b505050565b815167ffffffffffffffff811115610e2f57610e2f610c25565b610e4381610e3d8454610d73565b84610dc6565b602080601f831160018114610e965760008415610e605750858301515b7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600386901b1c1916600185901b178555610e0c565b6000858152602081207fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08616915b82811015610ee357888601518255948401946001909101908401610ec4565b5085821015610f1f57878501517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff600388901b60f8161c191681555b5050505050600190811b0190555056fe608060405260405160593803806059833981016040819052601e91602a565b806001600160a01b0316ff5b600060208284031215603b57600080fd5b81516001600160a01b0381168114605157600080fd5b939250505056fea164736f6c634300080f000a000000000000000000000000000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy LiquidityController Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca40000000000000000000000000000000000000000000000000000000000000040000000000000000000000000000000000000000000000000000000000000036d608060405234801561001057600080fd5b5061034d806100206000396000f3fe6080604052600436106100345760003560e01c80632e1a7d4d1461003957806354fd4d501461005b578063d0e30db0146100ba575b600080fd5b34801561004557600080fd5b5061005961005436600461025b565b6100c2565b005b34801561006757600080fd5b506100a46040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b6040516100b19190610274565b60405180910390f35b6100596101cb565b3373420000000000000000000000000000000000002a1461010f576040517f565369fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b47811115610149576040517f7b7f21e900000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b80336040516101579061024f565b73ffffffffffffffffffffffffffffffffffffffff90911681526020016040518091039082f0905080158015610191573d6000803e3d6000fd5b505060405181815233907fb1cce8684b4ffa8667b4577654e61ee3480d661ee9c27522ac80e211f6bd4d259060200160405180910390a250565b3373420000000000000000000000000000000000002a14610218576040517f565369fa00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60405134815233907f7ff07ce9a287649537e4b012e45cf012d90228b12e2b56bb03515a6b5436fcdf9060200160405180910390a2565b6059806102e883390190565b60006020828403121561026d57600080fd5b5035919050565b600060208083528351808285015260005b818110156102a157858101830151858201604001528201610285565b818111156102b3576000604083870101525b50601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01692909201604001939250505056fe608060405260405160593803806059833981016040819052601e91602a565b806001600160a01b0316ff5b600060208284031215603b57600080fd5b81516001600160a01b0381168114605157600080fd5b939250505056fea164736f6c634300080f000a00000000000000000000000000000000000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy NativeAssetLiquidity Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0x3659cfe6000000000000000000000000db7bae515c7d179ccacd3b4e0a1937eb84b69910", + "from": "0x0000000000000000000000000000000000000000", + "gasLimit": 50000, + "intent": "Upgrade L2ProxyAdmin Implementation", + "to": "0x4200000000000000000000000000000000000018" + }, + { + "data": "0xcdcb760a9b217f1b15f9c04316d04d42f550c340c5b2ee8e5ae05cab4f8cd9cb21970ca400000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000003c5c6104406040523480156200001257600080fd5b50604051620038bc380380620038bc8339810160408190526200003591620001ec565b30608090815281516001600160a01b0390811660a09081526020840151821660e09081526040850151831660c0908152606086015184166101009081529486015184166101209081529286015184166101409081529086015184166101609081529186015184166101809081529486015184166101a09081529286015184166101c09081529086015184166101e09081529186015184166102009081529486015184166102209081529286015184166102409081529086015184166102609081529186015184166102809081529486015184166102a09081529286015184166102c09081529086015184166102e09081529186015184166103009081529486015184166103209081529286015184166103409081529086015184166103609081529186015184166103809081529486015184166103a0529185015183166103c0529084015182166103e052830151811661040052910151166104205262000447565b6040516103a081016001600160401b0381118282101715620001c957634e487b7160e01b600052604160045260246000fd5b60405290565b80516001600160a01b0381168114620001e757600080fd5b919050565b60006103a082840312156200020057600080fd5b6200020a62000197565b6200021583620001cf565b81526200022560208401620001cf565b60208201526200023860408401620001cf565b60408201526200024b60608401620001cf565b60608201526200025e60808401620001cf565b60808201526200027160a08401620001cf565b60a08201526200028460c08401620001cf565b60c08201526200029760e08401620001cf565b60e0820152610100620002ac818501620001cf565b90820152610120620002c0848201620001cf565b90820152610140620002d4848201620001cf565b90820152610160620002e8848201620001cf565b90820152610180620002fc848201620001cf565b908201526101a062000310848201620001cf565b908201526101c062000324848201620001cf565b908201526101e062000338848201620001cf565b908201526102006200034c848201620001cf565b9082015261022062000360848201620001cf565b9082015261024062000374848201620001cf565b9082015261026062000388848201620001cf565b908201526102806200039c848201620001cf565b908201526102a0620003b0848201620001cf565b908201526102c0620003c4848201620001cf565b908201526102e0620003d8848201620001cf565b90820152610300620003ec848201620001cf565b9082015261032062000400848201620001cf565b9082015261034062000414848201620001cf565b9082015261036062000428848201620001cf565b908201526103806200043c848201620001cf565b908201529392505050565b60805160a05160c05160e05161010051610120516101405161016051610180516101a0516101c0516101e05161020051610220516102405161026051610280516102a0516102c0516102e05161030051610320516103405161036051610380516103a0516103c0516103e051610400516104205161320e620006ae6000396000818161052a01526119dd01526000818161050101526113580152600081816104d8015261120a0152600081816104af01526112f1015260008181610486015261192301526000818161045d01526118e501526000818161043401526118a701526000818161040b01526118690152600081816103e2015261182b0152600081816103b901526117ed01526000818161039001526117af015260008181610367015261199f01526000818161033e015261196101526000818161031501526115ee0152600081816102ec015261156e0152600081816102c301526114ef01526000818161029a0152611771015260008181610271015261110a015260008181610248015261173301526000818161021f015261170d0152600081816101f601526116c50152600081816101ce015261169f0152600081816101a60152610fe601526000818161017e015261107201526000818161015601526113d201526000818161012e0152610ee001526000818160df0152610dda015260008181610107015261165701526000818160ba01528181610dfc01528181610f0201528181611008015281816110940152818161112c0152818161122c0152818161137a015281816113f4015281816115110152818161159001526116100152600061057d015261320e6000f3fe608060405234801561001057600080fd5b50600436106100415760003560e01c806354fd4d5014610046578063615f64fe14610098578063d55ec6971461055c575b600080fd5b6100826040518060400160405280600581526020017f312e302e3000000000000000000000000000000000000000000000000000000081525081565b60405161008f9190612c34565b60405180910390f35b604080516103a08101825273ffffffffffffffffffffffffffffffffffffffff7f0000000000000000000000000000000000000000000000000000000000000000811682527f0000000000000000000000000000000000000000000000000000000000000000811660208301527f00000000000000000000000000000000000000000000000000000000000000008116828401527f0000000000000000000000000000000000000000000000000000000000000000811660608301527f0000000000000000000000000000000000000000000000000000000000000000811660808301527f0000000000000000000000000000000000000000000000000000000000000000811660a08301527f0000000000000000000000000000000000000000000000000000000000000000811660c08301527f0000000000000000000000000000000000000000000000000000000000000000811660e08301527f000000000000000000000000000000000000000000000000000000000000000081166101008301527f000000000000000000000000000000000000000000000000000000000000000081166101208301527f000000000000000000000000000000000000000000000000000000000000000081166101408301527f000000000000000000000000000000000000000000000000000000000000000081166101608301527f000000000000000000000000000000000000000000000000000000000000000081166101808301527f000000000000000000000000000000000000000000000000000000000000000081166101a08301527f000000000000000000000000000000000000000000000000000000000000000081166101c08301527f000000000000000000000000000000000000000000000000000000000000000081166101e08301527f000000000000000000000000000000000000000000000000000000000000000081166102008301527f000000000000000000000000000000000000000000000000000000000000000081166102208301527f000000000000000000000000000000000000000000000000000000000000000081166102408301527f000000000000000000000000000000000000000000000000000000000000000081166102608301527f000000000000000000000000000000000000000000000000000000000000000081166102808301527f000000000000000000000000000000000000000000000000000000000000000081166102a08301527f000000000000000000000000000000000000000000000000000000000000000081166102c08301527f000000000000000000000000000000000000000000000000000000000000000081166102e08301527f000000000000000000000000000000000000000000000000000000000000000081166103008301527f000000000000000000000000000000000000000000000000000000000000000081166103208301527f000000000000000000000000000000000000000000000000000000000000000081166103408301527f000000000000000000000000000000000000000000000000000000000000000081166103608301527f000000000000000000000000000000000000000000000000000000000000000016610380820152905161008f9190612c47565b610564610566565b005b73ffffffffffffffffffffffffffffffffffffffff7f00000000000000000000000000000000000000000000000000000000000000001630036105d5576040517fada337cf00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006105df6105ed565b90506105ea81610d9b565b50565b6105f5612a85565b73420000000000000000000000000000000000001573ffffffffffffffffffffffffffffffffffffffff1663213268496040518163ffffffff1660e01b8152600401602060405180830381865afa158015610654573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906106789190612eaf565b15156101608201526040805160208082018084527fdb505d800000000000000000000000000000000000000000000000000000000090529151909182917342000000000000000000000000000000000000079163db505d809160248086019291908187030181865afa1580156106f2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107169190612ef3565b73ffffffffffffffffffffffffffffffffffffffff16905281526040805160208082018084527fc89701a20000000000000000000000000000000000000000000000000000000090529151909182917342000000000000000000000000000000000000109163c89701a29160248086019291908187030181865afa1580156107a2573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906107c69190612ef3565b73ffffffffffffffffffffffffffffffffffffffff168152508160200181905250604051806020016040528073420000000000000000000000000000000000001473ffffffffffffffffffffffffffffffffffffffff1663c89701a26040518163ffffffff1660e01b8152600401602060405180830381865afa158015610851573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906108759190612ef3565b73ffffffffffffffffffffffffffffffffffffffff168152508160400181905250604051806020016040528073420000000000000000000000000000000000001273ffffffffffffffffffffffffffffffffffffffff1663e78cea926040518163ffffffff1660e01b8152600401602060405180830381865afa158015610900573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109249190612ef3565b73ffffffffffffffffffffffffffffffffffffffff1690526060820152604080518082018083527fe78cea92000000000000000000000000000000000000000000000000000000009052905181907342000000000000000000000000000000000000179063e78cea92906044808501916020918187030181865afa1580156109b0573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906109d49190612ef3565b73ffffffffffffffffffffffffffffffffffffffff16815260200173420000000000000000000000000000000000001773ffffffffffffffffffffffffffffffffffffffff1663d23822426040518163ffffffff1660e01b8152600401602060405180830381865afa158015610a4e573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610a729190612f10565b90526080820152610a96734200000000000000000000000000000000000011611a01565b60a0820152610ab8734200000000000000000000000000000000000019611a01565b60c0820152610ada73420000000000000000000000000000000000001a611a01565b60e0820152610afc73420000000000000000000000000000000000001b611a01565b61010082015261016081015115610cec57600073420000000000000000000000000000000000002a905060405180606001604052808273ffffffffffffffffffffffffffffffffffffffff16638da5cb5b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610b7c573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610ba09190612ef3565b73ffffffffffffffffffffffffffffffffffffffff1681526020018273ffffffffffffffffffffffffffffffffffffffff1663d84447156040518163ffffffff1660e01b8152600401600060405180830381865afa158015610c06573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052610c4c9190810190612fea565b81526020018273ffffffffffffffffffffffffffffffffffffffff1663550fcdc96040518163ffffffff1660e01b8152600401600060405180830381865afa158015610c9c573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052610ce29190810190612fea565b9052610120830152505b604051806020016040528073420000000000000000000000000000000000002b73ffffffffffffffffffffffffffffffffffffffff1663d61a398b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015610d56573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190610d7a9190612ef3565b73ffffffffffffffffffffffffffffffffffffffff16905261014082015290565b80515160405173ffffffffffffffffffffffffffffffffffffffff9091166024820152610e9e90734200000000000000000000000000000000000007907f0000000000000000000000000000000000000000000000000000000000000000907f000000000000000000000000000000000000000000000000000000000000000090604401604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fc4d66de80000000000000000000000000000000000000000000000000000000017905260006014611bb9565b60208101515160405173ffffffffffffffffffffffffffffffffffffffff9091166024820152610fa490734200000000000000000000000000000000000010907f0000000000000000000000000000000000000000000000000000000000000000907f0000000000000000000000000000000000000000000000000000000000000000906044015b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fc4d66de800000000000000000000000000000000000000000000000000000000179052600080611bb9565b60408082015151905173ffffffffffffffffffffffffffffffffffffffff909116602482015261103090734200000000000000000000000000000000000014907f0000000000000000000000000000000000000000000000000000000000000000907f000000000000000000000000000000000000000000000000000000000000000090604401610f26565b60608101515160405173ffffffffffffffffffffffffffffffffffffffff90911660248201526110bc90734200000000000000000000000000000000000012907f0000000000000000000000000000000000000000000000000000000000000000907f000000000000000000000000000000000000000000000000000000000000000090604401610f26565b6080810151805160209091015160405173ffffffffffffffffffffffffffffffffffffffff909216602483015260448201526111ce90734200000000000000000000000000000000000017907f0000000000000000000000000000000000000000000000000000000000000000907f000000000000000000000000000000000000000000000000000000000000000090606401604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fcd6dc6870000000000000000000000000000000000000000000000000000000017905260016000611bb9565b80610160015115611315576101208101518051602082015160409283015192516112d79373420000000000000000000000000000000000002a937f0000000000000000000000000000000000000000000000000000000000000000937f000000000000000000000000000000000000000000000000000000000000000093611259939060240161303b565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167f9065714700000000000000000000000000000000000000000000000000000000179052600080611bb9565b6113157342000000000000000000000000000000000000297f0000000000000000000000000000000000000000000000000000000000000000612249565b6101408101515160405173ffffffffffffffffffffffffffffffffffffffff90911660248201526113a29073420000000000000000000000000000000000002b907f0000000000000000000000000000000000000000000000000000000000000000907f000000000000000000000000000000000000000000000000000000000000000090604401610f26565b60a08101518051602082015160409283015192516114bf93734200000000000000000000000000000000000011937f0000000000000000000000000000000000000000000000000000000000000000937f00000000000000000000000000000000000000000000000000000000000000009361142193906024016130b5565b604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08184030181529190526020810180517bffffffffffffffffffffffffffffffffffffffffffffffffffffffff167fb49dc741000000000000000000000000000000000000000000000000000000001790527ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a006000611bb9565b60c081015180516020820151604092830151925161153e93734200000000000000000000000000000000000019937f0000000000000000000000000000000000000000000000000000000000000000937f00000000000000000000000000000000000000000000000000000000000000009361142193906024016130b5565b60e08101518051602082015160409283015192516115bd9373420000000000000000000000000000000000001a937f0000000000000000000000000000000000000000000000000000000000000000937f00000000000000000000000000000000000000000000000000000000000000009361142193906024016130b5565b61010081015180516020820151604092830151925161163d9373420000000000000000000000000000000000001b937f0000000000000000000000000000000000000000000000000000000000000000937f00000000000000000000000000000000000000000000000000000000000000009361142193906024016130b5565b61167b73420000000000000000000000000000000000000f7f0000000000000000000000000000000000000000000000000000000000000000612249565b6116e97342000000000000000000000000000000000000158261016001516116c3577f0000000000000000000000000000000000000000000000000000000000000000612249565b7f0000000000000000000000000000000000000000000000000000000000000000612249565b611757734200000000000000000000000000000000000016826101600151611731577f0000000000000000000000000000000000000000000000000000000000000000612249565b7f0000000000000000000000000000000000000000000000000000000000000000612249565b6117957342000000000000000000000000000000000000187f0000000000000000000000000000000000000000000000000000000000000000612249565b6117d37342000000000000000000000000000000000000227f0000000000000000000000000000000000000000000000000000000000000000612249565b6118117342000000000000000000000000000000000000237f0000000000000000000000000000000000000000000000000000000000000000612249565b61184f7342000000000000000000000000000000000000247f0000000000000000000000000000000000000000000000000000000000000000612249565b61188d7342000000000000000000000000000000000000257f0000000000000000000000000000000000000000000000000000000000000000612249565b6118cb7342000000000000000000000000000000000000267f0000000000000000000000000000000000000000000000000000000000000000612249565b6119097342000000000000000000000000000000000000277f0000000000000000000000000000000000000000000000000000000000000000612249565b6119477342000000000000000000000000000000000000287f0000000000000000000000000000000000000000000000000000000000000000612249565b6119857342000000000000000000000000000000000000207f0000000000000000000000000000000000000000000000000000000000000000612249565b6119c37342000000000000000000000000000000000000217f0000000000000000000000000000000000000000000000000000000000000000612249565b6105ea73420000000000000000000000000000000000002c7f0000000000000000000000000000000000000000000000000000000000000000612249565b611a2260408051606081018252600080825260208201819052909182015290565b600082905060405180606001604052808273ffffffffffffffffffffffffffffffffffffffff16630d9019e16040518163ffffffff1660e01b8152600401602060405180830381865afa158015611a7d573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611aa19190612ef3565b73ffffffffffffffffffffffffffffffffffffffff1681526020018273ffffffffffffffffffffffffffffffffffffffff1663d3e5792b6040518163ffffffff1660e01b8152600401602060405180830381865afa158015611b07573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611b2b9190612f10565b81526020018273ffffffffffffffffffffffffffffffffffffffff1663d0e12f906040518163ffffffff1660e01b8152600401602060405180830381865afa158015611b7b573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611b9f919061311e565b6001811115611bb057611bb0613086565b90529392505050565b611bc28661250c565b15612241576040517f204e1c7a00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff871660048201526000907342000000000000000000000000000000000000189063204e1c7a90602401602060405180830381865afa158015611c48573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611c6c9190612ef3565b905073ffffffffffffffffffffffffffffffffffffffff871673420000000000000000000000000000000000001814801590611cbe575073ffffffffffffffffffffffffffffffffffffffff81163b15155b8015611def5750611def8773ffffffffffffffffffffffffffffffffffffffff166354fd4d506040518163ffffffff1660e01b8152600401600060405180830381865afa158015611d13573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611d599190810190612fea565b8773ffffffffffffffffffffffffffffffffffffffff166354fd4d506040518163ffffffff1660e01b8152600401600060405180830381865afa158015611da4573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0168201604052611dea9190810190612fea565b612555565b15611e43576040517ff8ce5d1600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff881660048201526024015b60405180910390fd5b6040517f3659cfe600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8681166004830152881690633659cfe690602401600060405180830381600087803b158015611eac57600080fd5b505af1158015611ec0573d6000803e3d6000fd5b50506040517fa6ed563e000000000000000000000000000000000000000000000000000000008152600481018690526000925073ffffffffffffffffffffffffffffffffffffffff8a16915063a6ed563e90602401602060405180830381865afa158015611f32573d6000803e3d6000fd5b505050506040513d601f19601f82011682018060405250810190611f569190612f10565b90506000611f6860ff8516600861313f565b6040517f4e91db080000000000000000000000000000000000000000000000000000000081526004810187905260ff90911b198381166024830152915073ffffffffffffffffffffffffffffffffffffffff8a1690634e91db0890604401600060405180830381600087803b158015611fe057600080fd5b505af1158015611ff4573d6000803e3d6000fd5b50506040517fa6ed563e0000000000000000000000000000000000000000000000000000000081527ff0c57e16840df040f15088dc2f81fe391c3923bec73e23a9662efc9c229c6a006004820181905292506000915073ffffffffffffffffffffffffffffffffffffffff8c169063a6ed563e90602401602060405180830381865afa158015612088573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906120ac9190612f10565b90508060ff604082901c16156120ee576040517fc996d78400000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b6040517f4e91db08000000000000000000000000000000000000000000000000000000008152600481018490527fffffffffffffffffffffffffffffffffffffffffffffffff000000000000000082811660248301529073ffffffffffffffffffffffffffffffffffffffff8e1690634e91db0890604401600060405180830381600087803b15801561218057600080fd5b505af1158015612194573d6000803e3d6000fd5b505050508c73ffffffffffffffffffffffffffffffffffffffff16634f1ef2868d8c6040518363ffffffff1660e01b81526004016121d39291906131a3565b6000604051808303816000875af11580156121f2573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526122389190810190612fea565b50505050505050505b505050505050565b6122528261250c565b61225a575050565b6040517f204e1c7a00000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff831660048201526000907342000000000000000000000000000000000000189063204e1c7a90602401602060405180830381865afa1580156122db573d6000803e3d6000fd5b505050506040513d601f19601f820116820180604052508101906122ff9190612ef3565b905073ffffffffffffffffffffffffffffffffffffffff831673420000000000000000000000000000000000001814801590612351575073ffffffffffffffffffffffffffffffffffffffff81163b15155b801561243757506124378373ffffffffffffffffffffffffffffffffffffffff166354fd4d506040518163ffffffff1660e01b8152600401600060405180830381865afa1580156123a6573d6000803e3d6000fd5b505050506040513d6000823e601f3d9081017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe01682016040526123ec9190810190612fea565b8373ffffffffffffffffffffffffffffffffffffffff166354fd4d506040518163ffffffff1660e01b8152600401600060405180830381865afa158015611da4573d6000803e3d6000fd5b15612486576040517ff8ce5d1600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff84166004820152602401611e3a565b6040517f3659cfe600000000000000000000000000000000000000000000000000000000815273ffffffffffffffffffffffffffffffffffffffff8381166004830152841690633659cfe690602401600060405180830381600087803b1580156124ef57600080fd5b505af1158015612503573d6000803e3d6000fd5b50505050505050565b60007208400000000000000000000000000000000000600b83901c721fffffffffffffffffffffffffffffffffffff1614801561254f575061254d8261257c565b155b92915050565b600061256183836125e4565b15801561257557506125738383612634565b155b9392505050565b600073ffffffffffffffffffffffffffffffffffffffff8216734200000000000000000000000000000000000042148061254f575073ffffffffffffffffffffffffffffffffffffffff82167342000000000000000000000000000000000000061492915050565b6000806125f0846126a9565b905060006125fd846126a9565b80518351919250148015612618575080602001518260200151145b801561262b575080604001518260400151145b95945050505050565b600080612640846126a9565b9050600061264d846126a9565b805183519192501180612673575080518251148015612673575080602001518260200151105b8061262b575080518251148015612691575080602001518260200151145b801561262b5750604090810151910151109392505050565b6126cd60405180606001604052806000815260200160008152602001600081525090565b600061270e836040518060400160405280600181526020017f2e00000000000000000000000000000000000000000000000000000000000000815250612875565b905060038151101561274c576040517f9eda858c00000000000000000000000000000000000000000000000000000000815260040160405180910390fd5b60006127a782600281518110612764576127646131d2565b60200260200101516040518060400160405280600181526020017f2d00000000000000000000000000000000000000000000000000000000000000815250612875565b90506000612804826000815181106127c1576127c16131d2565b60200260200101516040518060400160405280600181526020017f2b00000000000000000000000000000000000000000000000000000000000000815250612875565b9050604051806060016040528061283485600081518110612827576128276131d2565b6020026020010151612920565b815260200161284f85600181518110612827576128276131d2565b815260200161286a83600081518110612827576128276131d2565b905295945050505050565b606060006128838484612998565b9050601f1960208201600183510160051b81018651838201526001845101845260005b8251606084528181146128eb5760405182820380825286601f8201165b8b8501810151838201528701806128c35750600082820160200152603f018616810160405284525b8751602094909401930190508183106128a65750505050809150825161291957602081019150600281510382525b5092915050565b80516000907f1999999999999999999999999999999999999999999999999999999999999999825b600181019050603060ff82870151160382851185600a028281019650600983118188108317171586029550505050828110612948575050806129925763101827966000526004601cfd5b50919050565b606082518251818111612a7d5760208501945060208401935060206040510192508460018284880103016000602084106129d157508286205b601f841660200360031b87515b8951818118831c612a33578315612a115783878c2014612a115760018b019a50848b10612a0b5750612a42565b506129de565b858b038952998601996020909801978615612a3357848b10612a0b5750612a42565b5060018a019950838a106129de575b5050604080517fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe08189030160051c8152602090970190525050505b505092915050565b604080516101a08101825260006101808201818152825282516020808201855282825280840191909152835180820185528281528385015283518082018552828152606084015283518085019094528184528301529060808201908152602001612b0660408051606081018252600080825260208201819052909182015290565b8152602001612b2c60408051606081018252600080825260208201819052909182015290565b8152602001612b5260408051606081018252600080825260208201819052909182015290565b8152602001612b7860408051606081018252600080825260208201819052909182015290565b81526040805160608082018352600082526020828101829052928201529101908152604080516020818101909252600081529101908152600060209091015290565b60005b83811015612bd5578181015183820152602001612bbd565b83811115612be4576000848401525b50505050565b60008151808452612c02816020860160208601612bba565b601f017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0169290920160200192915050565b6020815260006125756020830184612bea565b815173ffffffffffffffffffffffffffffffffffffffff1681526103a081016020830151612c8d602084018273ffffffffffffffffffffffffffffffffffffffff169052565b506040830151612cb5604084018273ffffffffffffffffffffffffffffffffffffffff169052565b506060830151612cdd606084018273ffffffffffffffffffffffffffffffffffffffff169052565b506080830151612d05608084018273ffffffffffffffffffffffffffffffffffffffff169052565b5060a0830151612d2d60a084018273ffffffffffffffffffffffffffffffffffffffff169052565b5060c0830151612d5560c084018273ffffffffffffffffffffffffffffffffffffffff169052565b5060e0830151612d7d60e084018273ffffffffffffffffffffffffffffffffffffffff169052565b506101008381015173ffffffffffffffffffffffffffffffffffffffff90811691840191909152610120808501518216908401526101408085015182169084015261016080850151821690840152610180808501518216908401526101a0808501518216908401526101c0808501518216908401526101e08085015182169084015261020080850151821690840152610220808501518216908401526102408085015182169084015261026080850151821690840152610280808501518216908401526102a0808501518216908401526102c0808501518216908401526102e08085015182169084015261030080850151821690840152610320808501518216908401526103408085015182169084015261036080850151821690840152610380808501519182168185015290612a7d565b600060208284031215612ec157600080fd5b8151801515811461257557600080fd5b73ffffffffffffffffffffffffffffffffffffffff811681146105ea57600080fd5b600060208284031215612f0557600080fd5b815161257581612ed1565b600060208284031215612f2257600080fd5b5051919050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052604160045260246000fd5b600067ffffffffffffffff80841115612f7357612f73612f29565b604051601f85017fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe0908116603f01168101908282118183101715612fb957612fb9612f29565b81604052809350858152868686011115612fd257600080fd5b612fe0866020830187612bba565b5050509392505050565b600060208284031215612ffc57600080fd5b815167ffffffffffffffff81111561301357600080fd5b8201601f8101841361302457600080fd5b61303384825160208401612f58565b949350505050565b73ffffffffffffffffffffffffffffffffffffffff8416815260606020820152600061306a6060830185612bea565b828103604084015261307c8185612bea565b9695505050505050565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b73ffffffffffffffffffffffffffffffffffffffff84168152602081018390526060810160028310613110577f4e487b7100000000000000000000000000000000000000000000000000000000600052602160045260246000fd5b826040830152949350505050565b60006020828403121561313057600080fd5b81516002811061257557600080fd5b6000817fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff048311821515161561319e577f4e487b7100000000000000000000000000000000000000000000000000000000600052601160045260246000fd5b500290565b73ffffffffffffffffffffffffffffffffffffffff831681526040602082015260006130336040830184612bea565b7f4e487b7100000000000000000000000000000000000000000000000000000000600052603260045260246000fdfea164736f6c634300080f000a0000000000000000000000002a5a3eabb9fd571a3af0299eebdf8eaafe29a9140000000000000000000000004e384c0418917d263a44531b8a0d3840f7486963000000000000000000000000547d0fba434877d7237d511cf87fabe2ee26b15200000000000000000000000094d9d712a447c0f5d2a12c656dcabaed41a7709f000000000000000000000000bf0265a2420c5f4a328b715570cc6d681ed92d21000000000000000000000000c053fc0155bf8bda5b568af53276e538f0ea4d58000000000000000000000000c1a46399783394e621f658bae23d8bdd58ac1236000000000000000000000000f57b0b99a3d5c28515477c51e9e1d17492835ac8000000000000000000000000dde1b9d8022e9e9c81e82a63cee81384acc820aa00000000000000000000000027e51b2254433a3284d9ba73ea551c397db2a124000000000000000000000000a0f4ffff79a0a3e039fcbef738751efba8e84f96000000000000000000000000ddb52cd2a61e383cc052b6b14a1a1eda89f61749000000000000000000000000db7bae515c7d179ccacd3b4e0a1937eb84b69910000000000000000000000000cae7e3cb9ee182494c9c6014abe70dba5259a404000000000000000000000000cae7e3cb9ee182494c9c6014abe70dba5259a404000000000000000000000000dd85b8b8dfcec0d4d695956f28291ffe18ea6a4100000000000000000000000070de55bc0bfbc52c5d0cca1da5816c2428886a34000000000000000000000000bec660b456b84a081e90af29be43385bda5bf7b600000000000000000000000093a8a7a9c98cb998d88dba3373a6c7f8ee2e8a4600000000000000000000000037dc2fe754052a9fac35f17282599fafbeb9f423000000000000000000000000784f1fae11f1c3a9c413423fe1b370a3636b8d560000000000000000000000002f76618143d9d2731c56778192d3893864b423d700000000000000000000000033b5beb844f8a500b3d3a88d1562af8669b88cad0000000000000000000000004c45f68f1f8c84cd6c8d8b9cdea4d6e485ac1d28000000000000000000000000e15df75abe345199eb6ddcb65b77e2e160ca9827000000000000000000000000dda87ef358082ab3f4ba8982290c671efdc4d159000000000000000000000000400f88f8ac1af3b6e6689738710100f51bf50402000000000000000000000000bdf761cb756fd9f39f1c45164dfaffaa32bfe4ca000000000000000000000000906835344844979ffd3a752eaa23728d513db00b00000000", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 375000, + "intent": "Deploy L2ContractsManager Implementation", + "to": "0x420000000000000000000000000000000000002C" + }, + { + "data": "0x7c36f37e0000000000000000000000000eefea7ca613bd5c0353833778ac9b6d676abd29", + "from": "0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001", + "gasLimit": 18446744073709551615, + "intent": "L2ProxyAdmin Upgrade Predeploys", + "to": "0x4200000000000000000000000000000000000018" + } + ] +} \ No newline at end of file diff --git a/packages/contracts-bedrock/src/L2/L2ContractsManager.sol b/packages/contracts-bedrock/src/L2/L2ContractsManager.sol index c4f8b3c0ffe14..056d32d093244 100644 --- a/packages/contracts-bedrock/src/L2/L2ContractsManager.sol +++ b/packages/contracts-bedrock/src/L2/L2ContractsManager.sol @@ -7,6 +7,7 @@ import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenge import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; import { IERC721Bridge } from "interfaces/universal/IERC721Bridge.sol"; import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IOptimismMintableERC721Factory } from "interfaces/L2/IOptimismMintableERC721Factory.sol"; import { IFeeVault } from "interfaces/L2/IFeeVault.sol"; import { ILiquidityController } from "interfaces/L2/ILiquidityController.sol"; import { IFeeSplitter } from "interfaces/L2/IFeeSplitter.sol"; @@ -185,6 +186,12 @@ contract L2ContractsManager is ISemver { bridge: IOptimismMintableERC20Factory(Predeploys.OPTIMISM_MINTABLE_ERC20_FACTORY).bridge() }); + // OptimismMintableERC721Factory + fullConfig_.mintableERC721Factory = L2ContractsManagerTypes.MintableERC721FactoryConfig({ + bridge: IOptimismMintableERC721Factory(Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY).bridge(), + remoteChainID: IOptimismMintableERC721Factory(Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY).remoteChainID() + }); + // SequencerFeeVault fullConfig_.sequencerFeeVault = L2ContractsManagerUtils.readFeeVaultConfig(Predeploys.SEQUENCER_FEE_WALLET); @@ -259,6 +266,19 @@ contract L2ContractsManager is ISemver { 0 ); + // OptimismMintableERC721Factory + L2ContractsManagerUtils.upgradeToAndCall( + Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY, + OPTIMISM_MINTABLE_ERC721_FACTORY_IMPL, + STORAGE_SETTER_IMPL, + abi.encodeCall( + IOptimismMintableERC721Factory.initialize, + (_config.mintableERC721Factory.bridge, _config.mintableERC721Factory.remoteChainID) + ), + bytes32(uint256(1)), // Initializable storage is at slot 1 due to mapping at slot 0 + 0 + ); + // LiquidityController (only on custom gas token networks) if (_config.isCustomGasToken) { L2ContractsManagerUtils.upgradeToAndCall( @@ -369,9 +389,6 @@ contract L2ContractsManager is ISemver { Predeploys.L2_TO_L1_MESSAGE_PASSER, _config.isCustomGasToken ? L2_TO_L1_MESSAGE_PASSER_CGT_IMPL : L2_TO_L1_MESSAGE_PASSER_IMPL ); - L2ContractsManagerUtils.upgradeTo( - Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY, OPTIMISM_MINTABLE_ERC721_FACTORY_IMPL - ); L2ContractsManagerUtils.upgradeTo(Predeploys.PROXY_ADMIN, PROXY_ADMIN_IMPL); // TODO(#18838): Add dev flagging for CrossL2Inbox and L2ToL2CrossDomainMessenger once DevFeatures is // implemented for L2. diff --git a/packages/contracts-bedrock/src/L2/L2ProxyAdmin.sol b/packages/contracts-bedrock/src/L2/L2ProxyAdmin.sol index 7d7dfe2579ac4..f44bead1127dc 100644 --- a/packages/contracts-bedrock/src/L2/L2ProxyAdmin.sol +++ b/packages/contracts-bedrock/src/L2/L2ProxyAdmin.sol @@ -34,8 +34,9 @@ contract L2ProxyAdmin is ProxyAdmin, ISemver { string public constant version = "1.0.0"; /// @notice The constructor for the L2ProxyAdmin contract. - /// @param _owner Address of the initial owner of this contract. - constructor(address _owner) ProxyAdmin(_owner) { } + /// @dev The owner can be set to address(0), since this contract is deployed behind a proxy. + /// The proxy's owner is set via storage manipulation in the L2Genesis. + constructor() ProxyAdmin(address(0)) { } /// @notice Upgrades the predeploys via delegatecall to the l2ContractsManager contract. /// @param _l2ContractsManager Address of the l2ContractsManager contract. diff --git a/packages/contracts-bedrock/src/L2/OptimismMintableERC721Factory.sol b/packages/contracts-bedrock/src/L2/OptimismMintableERC721Factory.sol index 3838e590587dd..407a5c2e6b61e 100644 --- a/packages/contracts-bedrock/src/L2/OptimismMintableERC721Factory.sol +++ b/packages/contracts-bedrock/src/L2/OptimismMintableERC721Factory.sol @@ -2,26 +2,35 @@ pragma solidity 0.8.15; // Contracts +import { Initializable } from "@openzeppelin/contracts/proxy/utils/Initializable.sol"; import { OptimismMintableERC721 } from "src/L2/OptimismMintableERC721.sol"; // Interfaces import { ISemver } from "interfaces/universal/ISemver.sol"; +/// @notice Legacy mapping storage layout for OptimismMintableERC721Factory. +contract OptimismMintableERC721FactoryLegacyMapping { + /// @notice Tracks addresses created by this factory. + mapping(address => bool) public isOptimismMintableERC721; +} + /// @custom:proxied true /// @custom:predeploy 0x4200000000000000000000000000000000000017 /// @title OptimismMintableERC721Factory /// @notice Factory contract for creating OptimismMintableERC721 contracts. -contract OptimismMintableERC721Factory is ISemver { - /// @custom:legacy true +contract OptimismMintableERC721Factory is ISemver, OptimismMintableERC721FactoryLegacyMapping, Initializable { /// @notice Address of the ERC721 bridge on this network. - address public immutable BRIDGE; + /// @custom:network-specific + address public bridge; - /// @custom:legacy true /// @notice Chain ID for the remote network. - uint256 public immutable REMOTE_CHAIN_ID; + /// @custom:network-specific + uint256 public remoteChainID; - /// @notice Tracks addresses created by this factory. - mapping(address => bool) public isOptimismMintableERC721; + /// @notice Reserve extra slots in the storage layout for future upgrades. + /// A gap size of 46 was chosen here, so that the first slot used in a child contract + /// would be a multiple of 50. + uint256[46] private __gap; /// @notice Emitted whenever a new OptimismMintableERC721 contract is created. /// @param localToken Address of the token on the this domain. @@ -30,27 +39,36 @@ contract OptimismMintableERC721Factory is ISemver { event OptimismMintableERC721Created(address indexed localToken, address indexed remoteToken, address deployer); /// @notice Semantic version. - /// @custom:semver 1.4.2 - string public constant version = "1.4.2"; + /// @custom:semver 1.5.0 + string public constant version = "1.5.0"; + + /// @notice Constructs the OptimismMintableERC721Factory contract. + constructor() { + _disableInitializers(); + } - /// @notice The semver MUST be bumped any time that there is a change in - /// the OptimismMintableERC721 token contract since this contract - /// is responsible for deploying OptimismMintableERC721 contracts. + /// @notice Initializes the contract. /// @param _bridge Address of the ERC721 bridge on this network. - /// @param _remoteChainId Chain ID for the remote network. - constructor(address _bridge, uint256 _remoteChainId) { - BRIDGE = _bridge; - REMOTE_CHAIN_ID = _remoteChainId; + /// @param _remoteChainID Chain ID for the remote network. + function initialize(address _bridge, uint256 _remoteChainID) external initializer { + bridge = _bridge; + remoteChainID = _remoteChainID; } - /// @notice Address of the ERC721 bridge on this network. - function bridge() external view returns (address) { - return BRIDGE; + /// @notice Getter function for the address of the ERC721 bridge on this network. + /// Public getter is legacy and will be removed in the future. Use `bridge` instead. + /// @return Address of the ERC721 bridge on this network. + /// @custom:legacy + function BRIDGE() external view returns (address) { + return bridge; } - /// @notice Chain ID for the remote network. - function remoteChainID() external view returns (uint256) { - return REMOTE_CHAIN_ID; + /// @notice Getter function for the chain ID of the remote network. + /// Public getter is legacy and will be removed in the future. Use `remoteChainID` instead. + /// @return Chain ID for the remote network. + /// @custom:legacy + function REMOTE_CHAIN_ID() external view returns (uint256) { + return remoteChainID; } /// @notice Creates an instance of the standard ERC721. @@ -69,7 +87,7 @@ contract OptimismMintableERC721Factory is ISemver { bytes32 salt = keccak256(abi.encode(_remoteToken, _name, _symbol)); address localToken = - address(new OptimismMintableERC721{ salt: salt }(BRIDGE, REMOTE_CHAIN_ID, _remoteToken, _name, _symbol)); + address(new OptimismMintableERC721{ salt: salt }(bridge, remoteChainID, _remoteToken, _name, _symbol)); isOptimismMintableERC721[localToken] = true; emit OptimismMintableERC721Created(localToken, _remoteToken, msg.sender); diff --git a/packages/contracts-bedrock/src/libraries/L2ContractsManagerTypes.sol b/packages/contracts-bedrock/src/libraries/L2ContractsManagerTypes.sol index 21ff9181fcfab..7d200c5fe37cb 100644 --- a/packages/contracts-bedrock/src/libraries/L2ContractsManagerTypes.sol +++ b/packages/contracts-bedrock/src/libraries/L2ContractsManagerTypes.sol @@ -30,6 +30,12 @@ library L2ContractsManagerTypes { address bridge; } + /// @notice Configuration for OptimismMintableERC721Factory. + struct MintableERC721FactoryConfig { + address bridge; + uint256 remoteChainID; + } + /// @notice Configuration for a FeeVault contract. struct FeeVaultConfig { address recipient; @@ -56,6 +62,7 @@ library L2ContractsManagerTypes { StandardBridgeConfig standardBridge; ERC721BridgeConfig erc721Bridge; MintableERC20FactoryConfig mintableERC20Factory; + MintableERC721FactoryConfig mintableERC721Factory; FeeVaultConfig sequencerFeeVault; FeeVaultConfig baseFeeVault; FeeVaultConfig l1FeeVault; diff --git a/packages/contracts-bedrock/src/libraries/NetworkUpgradeTxns.sol b/packages/contracts-bedrock/src/libraries/NetworkUpgradeTxns.sol new file mode 100644 index 0000000000000..06d405aff40ab --- /dev/null +++ b/packages/contracts-bedrock/src/libraries/NetworkUpgradeTxns.sol @@ -0,0 +1,103 @@ +// SPDX-License-Identifier: MIT +pragma solidity ^0.8.15; + +// Utilities +import { Vm } from "forge-std/Vm.sol"; +import { stdJson } from "forge-std/StdJson.sol"; + +/// @title NetworkUpgradeTxns +/// @notice Standard library for generating Network Upgrade Transaction (NUT) artifacts. +/// Generates simplified JSON format that is converted to deposit transactions by op-node. +library NetworkUpgradeTxns { + using stdJson for string; + + Vm private constant vm = Vm(address(uint160(uint256(keccak256("hevm cheat code"))))); + + /// @notice Metadata for the Network Upgrade Transaction bundle. + /// @param version Bundle format version for compatibility tracking. + struct BundleMetadata { + string version; + } + + /// @notice Represents a single Network Upgrade Transaction + /// This struct is serialized to JSON and later converted to a DepositTx by op-node. + /// See op-node/rollup/derive/parse_upgrade_transactions.go for conversion logic. + /// @dev Fields MUST be in alphabetical order for JSON parseJson/abi.decode to work. + /// @param data The calldata for the transaction. + /// @param from The address of the sender of the transaction. + /// @param gasLimit The gas limit for the transaction. + /// @param intent Human-readable description of the transaction's purpose. + /// @param to The address of the recipient of the transaction. + struct NetworkUpgradeTxn { + bytes data; + address from; + uint64 gasLimit; + string intent; + address to; + } + + /// @notice Writes the nut bundle to a JSON file. + /// @param _txns The array of upgrade transactions. + /// @param _metadata The bundle metadata. + /// @param _outputPath The file path for the output JSON. + function writeArtifact( + NetworkUpgradeTxn[] memory _txns, + BundleMetadata memory _metadata, + string memory _outputPath + ) + internal + { + // Build transactions array + string memory txnsArray = "["; + + for (uint256 i = 0; i < _txns.length; i++) { + string memory txnJson = serializeTxn(_txns[i], i); + txnsArray = string.concat(txnsArray, txnJson); + if (i < _txns.length - 1) { + txnsArray = string.concat(txnsArray, ","); + } + } + + txnsArray = string.concat(txnsArray, "]"); + + // Build metadata object + string memory metadataKey = "metadata"; + string memory metadataJson = vm.serializeString(metadataKey, "version", _metadata.version); + + // Build final bundle manually to avoid string escaping of the transactions array + string memory finalJson = string.concat("{\"metadata\":", metadataJson, ",\"transactions\":", txnsArray, "}"); + + // Writes the final serialized JSON bundle to file + vm.writeJson(finalJson, _outputPath); + } + + /// @notice Serializes a single transaction to JSON. + /// @param _txn The transaction to serialize. + /// @param _index The transaction index. + /// @return serializedJson_ The serialized JSON string. + function serializeTxn( + NetworkUpgradeTxn memory _txn, + uint256 _index + ) + internal + returns (string memory serializedJson_) + { + string memory key = vm.toString(_index); + + vm.serializeBytes(key, "data", _txn.data); + vm.serializeAddress(key, "from", _txn.from); + vm.serializeUint(key, "gasLimit", uint256(_txn.gasLimit)); + vm.serializeString(key, "intent", _txn.intent); + serializedJson_ = vm.serializeAddress(key, "to", _txn.to); + } + + /// @notice Reads upgrade transactions from a JSON file. + /// @param _inputPath The file path for the input JSON. + /// @return txns_ The array of upgrade transactions. + function readArtifact(string memory _inputPath) internal view returns (NetworkUpgradeTxn[] memory txns_) { + string memory json = vm.readFile(_inputPath); + // Parse the transactions array from the bundle structure + bytes memory parsedData = vm.parseJson(json, ".transactions"); + txns_ = abi.decode(parsedData, (NetworkUpgradeTxns.NetworkUpgradeTxn[])); + } +} diff --git a/packages/contracts-bedrock/test/L2/L2ContractsManager.t.sol b/packages/contracts-bedrock/test/L2/L2ContractsManager.t.sol index 9aca3b03832e5..bb5d595275bbf 100644 --- a/packages/contracts-bedrock/test/L2/L2ContractsManager.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ContractsManager.t.sol @@ -19,6 +19,7 @@ import { ICrossDomainMessenger } from "interfaces/universal/ICrossDomainMessenge import { IStandardBridge } from "interfaces/universal/IStandardBridge.sol"; import { IERC721Bridge } from "interfaces/universal/IERC721Bridge.sol"; import { IOptimismMintableERC20Factory } from "interfaces/universal/IOptimismMintableERC20Factory.sol"; +import { IOptimismMintableERC721Factory } from "interfaces/L2/IOptimismMintableERC721Factory.sol"; import { IFeeVault } from "interfaces/L2/IFeeVault.sol"; import { IFeeSplitter } from "interfaces/L2/IFeeSplitter.sol"; import { IProxyAdmin } from "interfaces/universal/IProxyAdmin.sol"; @@ -116,7 +117,7 @@ contract L2ContractsManager_Upgrade_Test is CommonTest { implementations.l1BlockCGTImpl = address(new L1BlockCGT()); implementations.l2ToL1MessagePasserImpl = address(new L2ToL1MessagePasser()); implementations.l2ToL1MessagePasserCGTImpl = address(new L2ToL1MessagePasserCGT()); - implementations.optimismMintableERC721FactoryImpl = address(new OptimismMintableERC721Factory(address(0), 0)); + implementations.optimismMintableERC721FactoryImpl = address(new OptimismMintableERC721Factory()); implementations.proxyAdminImpl = address(new ProxyAdmin(address(0))); implementations.superchainETHBridgeImpl = address(new SuperchainETHBridge()); implementations.ethLiquidityImpl = address(new ETHLiquidity()); @@ -282,6 +283,16 @@ contract L2ContractsManager_Upgrade_Test is CommonTest { _state2.config.mintableERC20Factory.bridge, "MintableERC20Factory config mismatch" ); + assertEq( + _state1.config.mintableERC721Factory.bridge, + _state2.config.mintableERC721Factory.bridge, + "MintableERC721Factory bridge mismatch" + ); + assertEq( + _state1.config.mintableERC721Factory.remoteChainID, + _state2.config.mintableERC721Factory.remoteChainID, + "MintableERC721Factory remoteChainID mismatch" + ); assertEq( _state1.config.sequencerFeeVault.recipient, _state2.config.sequencerFeeVault.recipient, @@ -374,6 +385,18 @@ contract L2ContractsManager_Upgrade_Test is CommonTest { "OptimismMintableERC20Factory.bridge not preserved" ); + // OptimismMintableERC721Factory + assertEq( + address(IOptimismMintableERC721Factory(Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY).bridge()), + address(preUpgradeConfig.mintableERC721Factory.bridge), + "OptimismMintableERC721Factory.bridge not preserved" + ); + assertEq( + IOptimismMintableERC721Factory(Predeploys.OPTIMISM_MINTABLE_ERC721_FACTORY).remoteChainID(), + preUpgradeConfig.mintableERC721Factory.remoteChainID, + "OptimismMintableERC721Factory.remoteChainID not preserved" + ); + // SequencerFeeVault assertEq( IFeeVault(payable(Predeploys.SEQUENCER_FEE_WALLET)).recipient(), diff --git a/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol b/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol index e1f00008c029a..cbfb39fee14a1 100644 --- a/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol +++ b/packages/contracts-bedrock/test/L2/L2ProxyAdmin.t.sol @@ -50,11 +50,11 @@ abstract contract L2ProxyAdmin_TestInit is CommonTest, MockHelper { /// @notice Tests the `constructor` function of the `L2ProxyAdmin` contract. contract L2ProxyAdmin_Constructor_Test is L2ProxyAdmin_TestInit { /// @notice Tests that the `constructor` function succeeds. - function test_constructor_succeeds(address _owner) public { + function test_constructor_succeeds() public { // Deploy the L2ProxyAdmin contract - l2ProxyAdmin = IL2ProxyAdmin(address(new L2ProxyAdmin(_owner))); - // It sets the owner to the correct address - assertEq(l2ProxyAdmin.owner(), _owner); + l2ProxyAdmin = IL2ProxyAdmin(address(new L2ProxyAdmin())); + // It sets the owner to address(0) + assertEq(l2ProxyAdmin.owner(), address(0)); } } @@ -126,7 +126,10 @@ contract L2ProxyAdmin_UpgradePredeploys_Test is L2ProxyAdmin_TestInit { /// @notice Tests the `setProxyType` function of the `L2ProxyAdmin` contract for backwards compatibility. contract L2ProxyAdmin_SetProxyType_Test is ProxyAdmin_SetProxyType_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } @@ -134,7 +137,10 @@ contract L2ProxyAdmin_SetProxyType_Test is ProxyAdmin_SetProxyType_Test { /// @notice Tests the `setImplementationName` function of the `L2ProxyAdmin` contract for backwards compatibility. contract L2ProxyAdmin_SetImplementationName_Test is ProxyAdmin_SetImplementationName_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } @@ -142,7 +148,10 @@ contract L2ProxyAdmin_SetImplementationName_Test is ProxyAdmin_SetImplementation /// @notice Tests the `setAddressManager` function of the `L2ProxyAdmin` contract for backwards compatibility. contract L2ProxyAdmin_SetAddressManager_Test is ProxyAdmin_SetAddressManager_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } @@ -150,7 +159,10 @@ contract L2ProxyAdmin_SetAddressManager_Test is ProxyAdmin_SetAddressManager_Tes /// @notice Tests the `isUpgrading` function of the `L2ProxyAdmin` contract for backwards compatibility. contract L2ProxyAdmin_IsUpgrading_Test is ProxyAdmin_IsUpgrading_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } @@ -158,7 +170,10 @@ contract L2ProxyAdmin_IsUpgrading_Test is ProxyAdmin_IsUpgrading_Test { /// @notice Tests the `getProxyImplementation` function of the `L2ProxyAdmin` contract for backwards compatibility. contract L2ProxyAdmin_GetProxyImplementation_Test is ProxyAdmin_GetProxyImplementation_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } @@ -166,7 +181,10 @@ contract L2ProxyAdmin_GetProxyImplementation_Test is ProxyAdmin_GetProxyImplemen /// @notice Tests the `getProxyAdmin` function of the `L2ProxyAdmin` contract for backwards compatibility. contract L2ProxyAdmin_GetProxyAdmin_Test is ProxyAdmin_GetProxyAdmin_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } @@ -174,7 +192,10 @@ contract L2ProxyAdmin_GetProxyAdmin_Test is ProxyAdmin_GetProxyAdmin_Test { /// @notice Tests the `changeProxyAdmin` function of the `L2ProxyAdmin` contract for backwards compatibility. contract L2ProxyAdmin_ChangeProxyAdmin_Test is ProxyAdmin_ChangeProxyAdmin_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } @@ -182,7 +203,10 @@ contract L2ProxyAdmin_ChangeProxyAdmin_Test is ProxyAdmin_ChangeProxyAdmin_Test /// @notice Tests the `upgrade` function of the `L2ProxyAdmin` contract for backwards compatibility. contract L2ProxyAdmin_Upgrade_Test is ProxyAdmin_Upgrade_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } @@ -190,7 +214,10 @@ contract L2ProxyAdmin_Upgrade_Test is ProxyAdmin_Upgrade_Test { /// @notice Tests the `upgradeAndCall` function of the `L2ProxyAdmin` contract for backwards compatibility. contract L2ProxyAdmin_UpgradeAndCall_Test is ProxyAdmin_UpgradeAndCall_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } @@ -198,6 +225,9 @@ contract L2ProxyAdmin_UpgradeAndCall_Test is ProxyAdmin_UpgradeAndCall_Test { /// @notice General backwards-compatibility tests for the `L2ProxyAdmin` contract. contract L2ProxyAdmin_Uncategorized_Test is ProxyAdmin_Uncategorized_Test { function _createAdmin(address _owner) internal override returns (IProxyAdmin) { - return IProxyAdmin(address(new L2ProxyAdmin(_owner))); + IProxyAdmin admin = IProxyAdmin(address(new L2ProxyAdmin())); + // Manually set the owner in the storage slot 0. + vm.store(address(admin), bytes32(0), bytes32(uint256(uint160(_owner)))); + return admin; } } diff --git a/packages/contracts-bedrock/test/L2/OptimismMintableERC721Factory.t.sol b/packages/contracts-bedrock/test/L2/OptimismMintableERC721Factory.t.sol index 76e1ca7cdc9a9..81907d4fdc902 100644 --- a/packages/contracts-bedrock/test/L2/OptimismMintableERC721Factory.t.sol +++ b/packages/contracts-bedrock/test/L2/OptimismMintableERC721Factory.t.sol @@ -29,11 +29,11 @@ abstract contract OptimismMintableERC721Factory_TestInit is CommonTest { } } -/// @title OptimismMintableERC721Factory_Constructor_Test -/// @notice Tests the `constructor` of the `OptimismMintableERC721Factory` contract. -contract OptimismMintableERC721Factory_Constructor_Test is OptimismMintableERC721Factory_TestInit { - /// @notice Tests that the constructor sets the correct values. - function test_constructor_succeeds() external view { +/// @title OptimismMintableERC721Factory_Initialize_Test +/// @notice Tests the initialization of the `OptimismMintableERC721Factory` contract. +contract OptimismMintableERC721Factory_Initialize_Test is OptimismMintableERC721Factory_TestInit { + /// @notice Tests that the initialization sets the correct values. + function test_initialize_succeeds() external view { assertEq(l2OptimismMintableERC721Factory.BRIDGE(), address(l2ERC721Bridge)); assertEq(l2OptimismMintableERC721Factory.bridge(), address(l2ERC721Bridge)); assertEq(l2OptimismMintableERC721Factory.REMOTE_CHAIN_ID(), deploy.cfg().l1ChainID()); diff --git a/packages/contracts-bedrock/test/libraries/NetworkUpgradeTxns.t.sol b/packages/contracts-bedrock/test/libraries/NetworkUpgradeTxns.t.sol new file mode 100644 index 0000000000000..2ead1c2e3f579 --- /dev/null +++ b/packages/contracts-bedrock/test/libraries/NetworkUpgradeTxns.t.sol @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Interfaces +import { IGasPriceOracle } from "interfaces/L2/IGasPriceOracle.sol"; +import { IProxy } from "interfaces/universal/IProxy.sol"; + +// Testing +import { Test } from "test/setup/Test.sol"; + +// Libraries +import { NetworkUpgradeTxns } from "src/libraries/NetworkUpgradeTxns.sol"; +import { Predeploys } from "src/libraries/Predeploys.sol"; +import { DeployUtils } from "scripts/libraries/DeployUtils.sol"; + +/// @title NetworkUpgradeTxns_TestInit +/// @notice Reusable test initialization for `NetworkUpgradeTxns` tests. +abstract contract NetworkUpgradeTxns_TestInit is Test { + // Test constants matching Go implementation + address constant L1_BLOCK_DEPLOYER = 0x4210000000000000000000000000000000000000; + address constant GAS_PRICE_ORACLE_DEPLOYER = 0x4210000000000000000000000000000000000001; + address constant DEPOSITOR_ACCOUNT = 0xDeaDDEaDDeAdDeAdDEAdDEaddeAddEAdDEAd0001; + + // Intent strings from Ecotone upgrade (ecotone_upgrade_transactions.go:27-32) + string constant INTENT_DEPLOY_L1_BLOCK = "Ecotone: L1 Block Deployment"; + string constant INTENT_DEPLOY_GAS_PRICE_ORACLE = "Ecotone: Gas Price Oracle Deployment"; + string constant INTENT_UPDATE_L1_BLOCK_PROXY = "Ecotone: L1 Block Proxy Update"; + string constant INTENT_UPDATE_GAS_PRICE_ORACLE = "Ecotone: Gas Price Oracle Proxy Update"; + string constant INTENT_ENABLE_ECOTONE = "Ecotone: Gas Price Oracle Set Ecotone"; + string constant INTENT_BEACON_ROOTS = "Ecotone: beacon block roots contract deployment"; +} + +/// @title NetworkUpgradeTxns_SerializeTxn_Test +/// @notice Tests the `serializeTxn` function. +contract NetworkUpgradeTxns_SerializeTxn_Test is NetworkUpgradeTxns_TestInit { + /// @notice Test that serializeTxn correctly serializes all fields with fuzzed inputs. + function testFuzz_serializeTxn_succeeds( + address _from, + address _to, + uint64 _gasLimit, + bytes calldata _data, + string calldata _intent + ) + public + { + vm.assume(_gasLimit > 0); + vm.assume(bytes(_intent).length > 0); + + NetworkUpgradeTxns.NetworkUpgradeTxn memory txn = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: _intent, + from: _from, + to: _to, + gasLimit: _gasLimit, + data: _data + }); + + string memory json = NetworkUpgradeTxns.serializeTxn(txn, 0); + + // Verify JSON is not empty + assertGt(bytes(json).length, 0, "Serialized JSON should not be empty"); + } + + /// @notice Test that serializeTxn is deterministic for the same input with fuzzed parameters. + function testFuzz_serializeTxn_deterministic_succeeds( + address _from, + address _to, + uint64 _gasLimit, + bytes calldata _data, + string calldata _intent + ) + public + { + vm.assume(_gasLimit > 0); + vm.assume(bytes(_intent).length > 0); + + NetworkUpgradeTxns.NetworkUpgradeTxn memory txn = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: _intent, + from: _from, + to: _to, + gasLimit: _gasLimit, + data: _data + }); + + string memory json1 = NetworkUpgradeTxns.serializeTxn(txn, 0); + string memory json2 = NetworkUpgradeTxns.serializeTxn(txn, 0); + + assertEq(keccak256(bytes(json1)), keccak256(bytes(json2)), "Serialization should be deterministic"); + } + + /// @notice Test that serializeTxn handles different indices with fuzzed transaction. + /// @dev The index parameter is used internally by forge-std but doesn't affect the output JSON. + function testFuzz_serializeTxn_differentIndices_succeeds( + address _from, + address _to, + uint64 _gasLimit, + bytes calldata _data, + string calldata _intent, + uint256 _index1, + uint256 _index2 + ) + public + { + vm.assume(_gasLimit > 0); + vm.assume(bytes(_intent).length > 0); + + NetworkUpgradeTxns.NetworkUpgradeTxn memory txn = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: _intent, + from: _from, + to: _to, + gasLimit: _gasLimit, + data: _data + }); + + string memory json1 = NetworkUpgradeTxns.serializeTxn(txn, _index1); + string memory json2 = NetworkUpgradeTxns.serializeTxn(txn, _index2); + + // Verify both produce non-empty JSON + assertGt(bytes(json1).length, 0, "First serialization should produce valid JSON"); + assertGt(bytes(json2).length, 0, "Second serialization should produce valid JSON"); + + // Note: The index is used internally by forge-std's serialization but the output + // JSON is the same since it only contains the transaction data, not the index. + assertEq(keccak256(bytes(json1)), keccak256(bytes(json2)), "Same transaction produces same JSON"); + } +} + +/// @title NetworkUpgradeTxns_WriteArtifact_Test +/// @notice Tests the `writeArtifact` function. +contract NetworkUpgradeTxns_WriteArtifact_Test is NetworkUpgradeTxns_TestInit { + /// @notice Test writeArtifact with empty array + function test_writeArtifact_emptyArray_succeeds() public { + NetworkUpgradeTxns.NetworkUpgradeTxn[] memory txns = new NetworkUpgradeTxns.NetworkUpgradeTxn[](0); + string memory outputPath = "deployments/nut-test-empty.json"; + NetworkUpgradeTxns.BundleMetadata memory metadata = NetworkUpgradeTxns.BundleMetadata({ version: "" }); + NetworkUpgradeTxns.writeArtifact(txns, metadata, outputPath); + } + + /// @notice Test writeArtifact creates valid JSON file + function test_writeArtifact_succeeds() public { + NetworkUpgradeTxns.NetworkUpgradeTxn[] memory txns = new NetworkUpgradeTxns.NetworkUpgradeTxn[](2); + + txns[0] = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: INTENT_DEPLOY_L1_BLOCK, + from: L1_BLOCK_DEPLOYER, + to: address(0), + gasLimit: 375_000, + data: DeployUtils.getCode("L1Block.sol:L1Block") + }); + + txns[1] = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: INTENT_ENABLE_ECOTONE, + from: DEPOSITOR_ACCOUNT, + to: Predeploys.GAS_PRICE_ORACLE, + gasLimit: 50_000, + data: abi.encodeCall(IGasPriceOracle.setEcotone, ()) + }); + + string memory outputPath = "deployments/nut-test.json"; + NetworkUpgradeTxns.BundleMetadata memory metadata = NetworkUpgradeTxns.BundleMetadata({ version: "1.0.0" }); + NetworkUpgradeTxns.writeArtifact(txns, metadata, outputPath); + + // Read json file and validate the transactions + NetworkUpgradeTxns.NetworkUpgradeTxn[] memory readTxns = NetworkUpgradeTxns.readArtifact(outputPath); + assertEq(readTxns.length, txns.length, "Transaction count mismatch"); + for (uint256 i = 0; i < txns.length; i++) { + assertEq(readTxns[i].intent, txns[i].intent, "'intent' doesn't match"); + assertEq(readTxns[i].from, txns[i].from, "'from' doesn't match"); + assertEq(readTxns[i].to, txns[i].to, "'to' doesn't match"); + assertEq(readTxns[i].gasLimit, txns[i].gasLimit, "'gasLimit' doesn't match"); + assertEq(readTxns[i].data, txns[i].data, "'data' doesn't match"); + } + } +} + +/// @title NetworkUpgradeTxns_Uncategorized_Test +/// @notice Tests that the artifact produced by the library matches the expected values. +contract NetworkUpgradeTxns_Uncategorized_Test is NetworkUpgradeTxns_TestInit { + /// @notice EIP-4788 beacon roots contract deployment data from EIP spec + /// Obtained from https://eips.ethereum.org/EIPS/eip-4788#deployment + bytes constant EIP4788_CREATION_DATA = + hex"60618060095f395ff33373fffffffffffffffffffffffffffffffffffffffe14604d57602036146024575f5ffd5b5f35801560495762001fff810690815414603c575f5ffd5b62001fff01545f5260205ff35b5f5ffd5b62001fff42064281555f359062001fff015500"; + + /// @notice Test constructing Ecotone upgrade transactions, writing to file and reading back. + function test_ecotoneUpgrade_roundtrip_succeeds() public { + NetworkUpgradeTxns.NetworkUpgradeTxn[] memory txns = new NetworkUpgradeTxns.NetworkUpgradeTxn[](6); + + // 1. Deploy L1Block + // ecotone_upgrade_transactions.go:47 + txns[0] = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: INTENT_DEPLOY_L1_BLOCK, + from: L1_BLOCK_DEPLOYER, + to: address(0), + gasLimit: 375_000, + data: DeployUtils.getCode("L1Block.sol:L1Block") + }); + + // 2. Deploy GasPriceOracle + // ecotone_upgrade_transactions.go:64 + txns[1] = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: INTENT_DEPLOY_GAS_PRICE_ORACLE, + from: GAS_PRICE_ORACLE_DEPLOYER, + to: address(0), + gasLimit: 1_000_000, + data: DeployUtils.getCode("GasPriceOracle.sol:GasPriceOracle") + }); + + // 3. Update L1Block proxy + // ecotone_upgrade_transactions.go:81 + // Calculate the deployed L1Block address + address newL1BlockAddress = vm.computeCreateAddress(L1_BLOCK_DEPLOYER, 0); + txns[2] = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: INTENT_UPDATE_L1_BLOCK_PROXY, + from: address(0), + to: Predeploys.L1_BLOCK_ATTRIBUTES, + gasLimit: 50_000, + data: abi.encodeCall(IProxy.upgradeTo, (newL1BlockAddress)) + }); + + // 4. Update GasPriceOracle proxy + // ecotone_upgrade_transactions.go:98 + // Calculate the deployed GasPriceOracle address + address newGasPriceOracleAddress = vm.computeCreateAddress(GAS_PRICE_ORACLE_DEPLOYER, 0); + txns[3] = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: INTENT_UPDATE_GAS_PRICE_ORACLE, + from: address(0), + to: Predeploys.GAS_PRICE_ORACLE, + gasLimit: 50_000, + data: abi.encodeCall(IProxy.upgradeTo, (newGasPriceOracleAddress)) + }); + + // 5. Enable Ecotone on GasPriceOracle + // ecotone_upgrade_transactions.go:115 + txns[4] = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: INTENT_ENABLE_ECOTONE, + from: DEPOSITOR_ACCOUNT, + to: Predeploys.GAS_PRICE_ORACLE, + gasLimit: 80_000, + data: abi.encodeCall(IGasPriceOracle.setEcotone, ()) + }); + + // 6. Deploy EIP-4788 beacon block roots contract + // ecotone_upgrade_transactions.go:130 + txns[5] = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: INTENT_BEACON_ROOTS, + from: 0x0B799C86a49DEeb90402691F1041aa3AF2d3C875, + to: address(0), // Contract deployment + gasLimit: 250_000, // hex constant 0x3d090, as defined in EIP-4788 (250_000 in decimal) + data: EIP4788_CREATION_DATA + }); + + // Write transactions to JSON file + string memory outputPath = "deployments/nut-ecotone-upgrade-test.json"; + NetworkUpgradeTxns.BundleMetadata memory metadata = NetworkUpgradeTxns.BundleMetadata({ version: "1.0.0" }); + NetworkUpgradeTxns.writeArtifact(txns, metadata, outputPath); + + // Read back the transactions + NetworkUpgradeTxns.NetworkUpgradeTxn[] memory readTxns = NetworkUpgradeTxns.readArtifact(outputPath); + + // Validate array length matches + assertEq(readTxns.length, txns.length, "Transaction count mismatch"); + + // Validate each transaction matches + for (uint256 i = 0; i < txns.length; i++) { + assertEq(readTxns[i].intent, txns[i].intent, "'intent' doesn't match"); + assertEq(readTxns[i].from, txns[i].from, "'from' doesn't match"); + assertEq(readTxns[i].to, txns[i].to, "'to' doesn't match"); + assertEq(readTxns[i].gasLimit, txns[i].gasLimit, "'gasLimit' doesn't match"); + assertEq(readTxns[i].data, txns[i].data, "'data' doesn't match"); + } + } + + function testFuzz_txnStruct_succeeds( + address _from, + address _to, + uint64 _gasLimit, + bytes memory _data, + string calldata _intent + ) + public + pure + { + NetworkUpgradeTxns.NetworkUpgradeTxn memory txn = NetworkUpgradeTxns.NetworkUpgradeTxn({ + intent: _intent, + from: _from, + to: _to, + gasLimit: _gasLimit, + data: _data + }); + + assertEq(txn.intent, _intent); + assertEq(txn.from, _from); + assertEq(txn.to, _to); + assertEq(txn.gasLimit, _gasLimit); + assertEq(txn.data, _data); + } +} diff --git a/packages/contracts-bedrock/test/scripts/GenerateNUTBundle.t.sol b/packages/contracts-bedrock/test/scripts/GenerateNUTBundle.t.sol new file mode 100644 index 0000000000000..3580a25f0e880 --- /dev/null +++ b/packages/contracts-bedrock/test/scripts/GenerateNUTBundle.t.sol @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.15; + +// Testing +import { Test } from "test/setup/Test.sol"; + +// Scripts +import { GenerateNUTBundle } from "scripts/upgrade/GenerateNUTBundle.s.sol"; + +// Libraries +import { NetworkUpgradeTxns } from "src/libraries/NetworkUpgradeTxns.sol"; +import { UpgradeUtils } from "scripts/libraries/UpgradeUtils.sol"; + +/// @title GenerateNUTBundleTest +/// @notice Tests that GenerateNUTBundle correctly generates Network Upgrade Transaction bundles +/// for L2 hardfork upgrades. +contract GenerateNUTBundleTest is Test { + GenerateNUTBundle script; + + uint256 constant TEST_L1_CHAIN_ID = 1; + + function setUp() public { + script = new GenerateNUTBundle(); + script.setUp(); + } + + /// @notice Tests that run succeeds. + function test_run_succeeds() public { + GenerateNUTBundle.Output memory output = script.run(); + + // Verify artifact written correctly + NetworkUpgradeTxns.NetworkUpgradeTxn[] memory readTxns = + NetworkUpgradeTxns.readArtifact(script.upgradeBundlePath()); + assertEq(readTxns.length, output.txns.length, "Transaction count mismatch"); + for (uint256 i = 0; i < readTxns.length; i++) { + assertEq(readTxns[i].intent, output.txns[i].intent, "Intent mismatch"); + assertEq(readTxns[i].from, output.txns[i].from, "From mismatch"); + assertEq(readTxns[i].to, output.txns[i].to, "To mismatch"); + assertEq(readTxns[i].gasLimit, uint256(output.txns[i].gasLimit), "Gas limit mismatch"); + assertEq(keccak256(readTxns[i].data), keccak256(output.txns[i].data), "Data mismatch"); + } + } + + /// @notice Tests that transactions have correct structure. + /// @dev Includes ConditionalDeployer and ProxyAdmin upgrades. + function test_run_transactionStructure_succeeds() public { + GenerateNUTBundle.Output memory output = script.run(); + + // Should include: + // 1. ConditionalDeployer deployment + // 2. ConditionalDeployer upgrade + // 3. All implementation deployments (StorageSetter + predeploys) + // 4. L2ProxyAdmin upgrade + // 5. L2ContractsManager deployment + // 6. Upgrade execution + + // Verify ConditionalDeployer deployment + assertEq( + output.txns[0].intent, + "ConditionalDeployer Deployment", + "First transaction should be ConditionalDeployer deployment" + ); + + // Verify ConditionalDeployer upgrade + assertEq( + output.txns[1].intent, + "Upgrade ConditionalDeployer Implementation", + "Second transaction should be ConditionalDeployer upgrade" + ); + + // Verify implementation deployments + string[] memory implementationsToUpgrade = UpgradeUtils.getImplementationsNamesToUpgrade(); + for (uint256 i = 0; i < implementationsToUpgrade.length; i++) { + assertEq( + output.txns[i + 2].intent, + string.concat("Deploy ", implementationsToUpgrade[i], " Implementation"), + string.concat("Transaction should be ", implementationsToUpgrade[i], " deployment") + ); + } + + // Verify L2ProxyAdmin upgrade + assertEq( + output.txns[output.txns.length - 3].intent, + "Upgrade L2ProxyAdmin Implementation", + "Third to last transaction should be L2ProxyAdmin upgrade" + ); + + // Verify L2ContractsManager deployment + assertEq( + output.txns[output.txns.length - 2].intent, + "Deploy L2ContractsManager Implementation", + "Second to last transaction should be L2ContractsManager implementation deployment" + ); + + // Verify upgrade execution + assertEq( + output.txns[output.txns.length - 1].intent, + "L2ProxyAdmin Upgrade Predeploys", + "Last transaction should be L2ProxyAdmin upgrade predeploys" + ); + } + + /// @notice Tests that multiple runs produce deterministic results. + function test_run_deterministicOutput_succeeds() public { + GenerateNUTBundle.Output memory output1 = script.run(); + GenerateNUTBundle.Output memory output2 = script.run(); + + _compareTransactions(output1, output2); + } + + function _compareTransactions( + GenerateNUTBundle.Output memory _output1, + GenerateNUTBundle.Output memory _output2 + ) + internal + pure + { + assertEq(_output1.txns.length, _output2.txns.length, "Should produce same number of transactions"); + for (uint256 i = 0; i < _output1.txns.length; i++) { + assertEq(_output1.txns[i].intent, _output2.txns[i].intent, "Transaction intent should match"); + assertEq(_output1.txns[i].from, _output2.txns[i].from, "Transaction from should match"); + assertEq(_output1.txns[i].to, _output2.txns[i].to, "Transaction to should match"); + assertEq(_output1.txns[i].gasLimit, _output2.txns[i].gasLimit, "Transaction gasLimit should match"); + assertEq( + keccak256(_output1.txns[i].data), keccak256(_output2.txns[i].data), "Transaction data should match" + ); + } + } +} diff --git a/packages/contracts-bedrock/test/scripts/L2Genesis.t.sol b/packages/contracts-bedrock/test/scripts/L2Genesis.t.sol index 7750eff501e9d..d6676a00957df 100644 --- a/packages/contracts-bedrock/test/scripts/L2Genesis.t.sol +++ b/packages/contracts-bedrock/test/scripts/L2Genesis.t.sol @@ -48,10 +48,9 @@ abstract contract L2Genesis_TestInit is Test { // Verify owner in the implementation to catch storage shifting issues // The implementation is stored in the code namespace address proxyAdminImpl = Predeploys.predeployToCodeNamespace(Predeploys.PROXY_ADMIN); + assertEq( - input.opChainProxyAdminOwner, - IL2ProxyAdmin(proxyAdminImpl).owner(), - "ProxyAdmin implementation owner should match expected" + IL2ProxyAdmin(proxyAdminImpl).owner(), address(0), "ProxyAdmin implementation owner should match expected" ); } From a535847e2e22939bbc89fc0dea2bb9b37833e266 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 10 Mar 2026 05:22:07 +1000 Subject: [PATCH 082/201] Revert "op-e2e: Simplify output game creation by auto-deriving rootClaim (#19294)" (#19445) This reverts commit cb794fb85e59f64c622736ec18838f6d7ab12449. --- op-e2e/e2eutils/disputegame/helper.go | 62 +++++++++------------- op-e2e/faultproofs/multi_test.go | 4 +- op-e2e/faultproofs/output_alphabet_test.go | 18 +++---- op-e2e/faultproofs/output_cannon_test.go | 40 +++++++------- op-e2e/faultproofs/permissioned_test.go | 2 +- op-e2e/faultproofs/precompile_test.go | 2 +- op-e2e/faultproofs/preimages_test.go | 2 +- op-e2e/faultproofs/response_delay_test.go | 4 +- 8 files changed, 61 insertions(+), 73 deletions(-) diff --git a/op-e2e/e2eutils/disputegame/helper.go b/op-e2e/e2eutils/disputegame/helper.go index 868169652556f..d5021d7314de6 100644 --- a/op-e2e/e2eutils/disputegame/helper.go +++ b/op-e2e/e2eutils/disputegame/helper.go @@ -56,7 +56,6 @@ type GameCfg struct { allowUnsafe bool superOutputRoots []eth.Bytes32 super eth.Super - outputRoot common.Hash } type GameOpt interface { Apply(cfg *GameCfg) @@ -93,13 +92,6 @@ func WithSuper(super eth.Super) GameOpt { }) } -// WithOutputRoot allows specifying a custom output root. -func WithOutputRoot(outputRoot common.Hash) GameOpt { - return gameOptFn(func(c *GameCfg) { - c.outputRoot = outputRoot - }) -} - type DisputeSystem interface { L1BeaconEndpoint() endpoint.RestHTTP SupervisorClient() *sources.SupervisorClient @@ -191,31 +183,29 @@ func NewGameCfg(opts ...GameOpt) *GameCfg { return cfg } -func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputCannonGameHelper { - return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, cannonGameType, opts...) +func (h *FactoryHelper) StartOutputCannonGameWithCorrectRoot(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputCannonGameHelper { + cfg := NewGameCfg(opts...) + h.WaitForBlock(l2Node, l2BlockNumber, cfg) + output, err := h.System.RollupClient(l2Node).OutputAtBlock(ctx, l2BlockNumber) + h.Require.NoErrorf(err, "Failed to get output at block %v", l2BlockNumber) + return h.StartOutputCannonGame(ctx, l2Node, l2BlockNumber, common.Hash(output.OutputRoot), opts...) +} + +func (h *FactoryHelper) StartOutputCannonGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, opts ...GameOpt) *OutputCannonGameHelper { + return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, rootClaim, cannonGameType, opts...) } -func (h *FactoryHelper) StartPermissionedGame(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputCannonGameHelper { - return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, permissionedGameType, opts...) +func (h *FactoryHelper) StartPermissionedGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, opts ...GameOpt) *OutputCannonGameHelper { + return h.startOutputCannonGameOfType(ctx, l2Node, l2BlockNumber, rootClaim, permissionedGameType, opts...) } -func (h *FactoryHelper) startOutputCannonGameOfType(ctx context.Context, l2Node string, l2BlockNumber uint64, gameType uint32, opts ...GameOpt) *OutputCannonGameHelper { +func (h *FactoryHelper) startOutputCannonGameOfType(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, gameType uint32, opts ...GameOpt) *OutputCannonGameHelper { cfg := NewGameCfg(opts...) logger := testlog.Logger(h.T, log.LevelInfo).New("role", "OutputCannonGameHelper") rollupClient := h.System.RollupClient(l2Node) l2Client := h.System.NodeClient(l2Node) - extraData := h.createBisectionGameExtraData(l2Node, l2BlockNumber, cfg) - - // If a custom output root was provided via options, use it; otherwise derive from extraData - var rootClaim common.Hash - if cfg.outputRoot != (common.Hash{}) { - rootClaim = cfg.outputRoot - } else { - output, err := rollupClient.OutputAtBlock(ctx, l2BlockNumber) - h.Require.NoErrorf(err, "Failed to get output at block %v", l2BlockNumber) - rootClaim = common.Hash(output.OutputRoot) - } + extraData := h.CreateBisectionGameExtraData(l2Node, l2BlockNumber, cfg) ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() @@ -317,23 +307,21 @@ func (h *FactoryHelper) GetL1Head(ctx context.Context, game contracts.FaultDispu return l1Head } -func (h *FactoryHelper) StartOutputAlphabetGame(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputAlphabetGameHelper { +func (h *FactoryHelper) StartOutputAlphabetGameWithCorrectRoot(ctx context.Context, l2Node string, l2BlockNumber uint64, opts ...GameOpt) *OutputAlphabetGameHelper { + cfg := NewGameCfg(opts...) + h.WaitForBlock(l2Node, l2BlockNumber, cfg) + output, err := h.System.RollupClient(l2Node).OutputAtBlock(ctx, l2BlockNumber) + h.Require.NoErrorf(err, "Failed to get output at block %v", l2BlockNumber) + return h.StartOutputAlphabetGame(ctx, l2Node, l2BlockNumber, common.Hash(output.OutputRoot)) +} + +func (h *FactoryHelper) StartOutputAlphabetGame(ctx context.Context, l2Node string, l2BlockNumber uint64, rootClaim common.Hash, opts ...GameOpt) *OutputAlphabetGameHelper { cfg := NewGameCfg(opts...) logger := testlog.Logger(h.T, log.LevelInfo).New("role", "OutputAlphabetGameHelper") rollupClient := h.System.RollupClient(l2Node) l2Client := h.System.NodeClient(l2Node) - extraData := h.createBisectionGameExtraData(l2Node, l2BlockNumber, cfg) - - // If a custom output root was provided via options, use it; otherwise derive from extraData - var rootClaim common.Hash - if cfg.outputRoot != (common.Hash{}) { - rootClaim = cfg.outputRoot - } else { - output, err := rollupClient.OutputAtBlock(ctx, l2BlockNumber) - h.Require.NoErrorf(err, "Failed to get output at block %v", l2BlockNumber) - rootClaim = common.Hash(output.OutputRoot) - } + extraData := h.CreateBisectionGameExtraData(l2Node, l2BlockNumber, cfg) ctx, cancel := context.WithTimeout(ctx, 1*time.Minute) defer cancel() @@ -364,7 +352,7 @@ func (h *FactoryHelper) StartOutputAlphabetGame(ctx context.Context, l2Node stri } } -func (h *FactoryHelper) createBisectionGameExtraData(l2Node string, l2BlockNumber uint64, cfg *GameCfg) []byte { +func (h *FactoryHelper) CreateBisectionGameExtraData(l2Node string, l2BlockNumber uint64, cfg *GameCfg) []byte { h.WaitForBlock(l2Node, l2BlockNumber, cfg) h.T.Logf("Creating game with l2 block number: %v", l2BlockNumber) extraData := make([]byte, 32) diff --git a/op-e2e/faultproofs/multi_test.go b/op-e2e/faultproofs/multi_test.go index cde98203fdbce..e8e87268ea52a 100644 --- a/op-e2e/faultproofs/multi_test.go +++ b/op-e2e/faultproofs/multi_test.go @@ -20,8 +20,8 @@ func TestMultipleGameTypes(t *testing.T) { gameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game1 := gameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) - game2 := gameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xbb})) + game1 := gameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) + game2 := gameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xbb}) latestClaim1 := game1.DisputeLastBlock(ctx) latestClaim2 := game2.DisputeLastBlock(ctx) diff --git a/op-e2e/faultproofs/output_alphabet_test.go b/op-e2e/faultproofs/output_alphabet_test.go index ba246f41b87a4..135ab233a8815 100644 --- a/op-e2e/faultproofs/output_alphabet_test.go +++ b/op-e2e/faultproofs/output_alphabet_test.go @@ -24,7 +24,7 @@ func TestOutputAlphabetGame_ChallengerWins(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{0xff})) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, common.Hash{0xff}) correctTrace := game.CreateHonestActor(ctx, "sequencer") game.LogGameData(ctx) @@ -81,7 +81,7 @@ func TestOutputAlphabetGame_ReclaimBond(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{0xff})) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, common.Hash{0xff}) game.LogGameData(ctx) // The dispute game should have a zero balance @@ -151,7 +151,7 @@ func TestOutputAlphabetGame_ValidOutputRoot(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 2) + game := disputeGameFactory.StartOutputAlphabetGameWithCorrectRoot(ctx, "sequencer", 2) correctTrace := game.CreateHonestActor(ctx, "sequencer") game.LogGameData(ctx) claim := game.DisputeLastBlock(ctx) @@ -190,9 +190,9 @@ func TestChallengerCompleteExhaustiveDisputeGame(t *testing.T) { disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) var game *disputegame.OutputAlphabetGameHelper if isRootCorrect { - game = disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1) + game = disputeGameFactory.StartOutputAlphabetGameWithCorrectRoot(ctx, "sequencer", 1) } else { - game = disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa, 0xbb, 0xcc})) + game = disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa, 0xbb, 0xcc}) } claim := game.DisputeLastBlock(ctx) @@ -256,7 +256,7 @@ func TestOutputAlphabetGame_FreeloaderEarnsNothing(t *testing.T) { require.Nil(t, err) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 2) + game := disputeGameFactory.StartOutputAlphabetGameWithCorrectRoot(ctx, "sequencer", 2) correctTrace := game.CreateHonestActor(ctx, "sequencer") game.LogGameData(ctx) claim := game.DisputeLastBlock(ctx) @@ -319,14 +319,14 @@ func TestHighestActedL1BlockMetric(t *testing.T) { disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) honestChallenger := disputeGameFactory.StartChallenger(ctx, "Honest", challenger.WithAlphabet(), challenger.WithPrivKey(sys.Cfg.Secrets.Alice)) - game1 := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa})) + game1 := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa}) sys.AdvanceTime(game1.MaxClockDuration(ctx)) require.NoError(t, wait.ForNextBlock(ctx, l1Client)) game1.WaitForGameStatus(ctx, types.GameStatusDefenderWon) - disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 2, disputegame.WithOutputRoot(common.Hash{0xaa})) - disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{0xaa})) + disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 2, common.Hash{0xaa}) + disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 3, common.Hash{0xaa}) honestChallenger.WaitL1HeadActedOn(ctx, l1Client) diff --git a/op-e2e/faultproofs/output_cannon_test.go b/op-e2e/faultproofs/output_cannon_test.go index 5685d48a325d7..9e758590b5e6a 100644 --- a/op-e2e/faultproofs/output_cannon_test.go +++ b/op-e2e/faultproofs/output_cannon_test.go @@ -30,7 +30,7 @@ func testOutputCannonGame(t *testing.T, allocType config.AllocType) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 4, disputegame.WithOutputRoot(common.Hash{0x01})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 4, common.Hash{0x01}) arena := createOutputGameArena(t, sys, game) testCannonGame(t, ctx, arena, &game.SplitGameHelper) } @@ -46,7 +46,7 @@ func testOutputCannonChallengeAllZeroClaim(t *testing.T, allocType config.AllocT t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 3, common.Hash{}) arena := createOutputGameArena(t, sys, game) testCannonChallengeAllZeroClaim(t, ctx, arena, &game.SplitGameHelper) } @@ -68,7 +68,7 @@ func TestOutputCannon_PublishCannonRootClaim(t *testing.T) { sys, _ := StartFaultDisputeSystem(t, WithAllocType(allocType)) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", test.disputeL2BlockNumber, disputegame.WithOutputRoot(common.Hash{0x01})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", test.disputeL2BlockNumber, common.Hash{0x01}) game.DisputeLastBlock(ctx) game.LogGameData(ctx) @@ -99,7 +99,7 @@ func TestOutputCannonDisputeGame(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) require.NotNil(t, game) game.LogGameData(ctx) @@ -137,7 +137,7 @@ func testOutputCannonDefendStep(t *testing.T, allocType config.AllocType) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) arena := createOutputGameArena(t, sys, game) testCannonDefendStep(t, ctx, arena, &game.SplitGameHelper) } @@ -163,7 +163,7 @@ func testOutputCannonStepWithLargePreimage(t *testing.T, allocType config.AllocT l2BlockNumber := safeHead.NumberU64() disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Dispute any block - it will have to read the L1 batches to see if the block is reached - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", l2BlockNumber, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", l2BlockNumber, common.Hash{0x01, 0xaa}) require.NotNil(t, game) outputRootClaim := game.DisputeBlock(ctx, l2BlockNumber) game.LogGameData(ctx) @@ -250,7 +250,7 @@ func testPreimageStep(t *testing.T, allocType config.AllocType, preimageOptConfi t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) require.NotNil(t, game) outputRootClaim := game.DisputeLastBlock(ctx) game.LogGameData(ctx) @@ -299,7 +299,7 @@ func testOutputCannonStepWithKzgPointEvaluation(t *testing.T, allocType config.A t.Logf("KZG Point Evaluation block number: %d", precompileBlock) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", bigs.Uint64Strict(precompileBlock), disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", bigs.Uint64Strict(precompileBlock), common.Hash{0x01, 0xaa}) require.NotNil(t, game) outputRootClaim := game.DisputeLastBlock(ctx) game.LogGameData(ctx) @@ -337,7 +337,7 @@ func testOutputCannonProposedOutputRootValid_AttackWithCorrectTrace(t *testing.T t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1) + game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", 1) arena := createOutputGameArena(t, sys, game) testCannonProposalValid_AttackWithCorrectTrace(t, ctx, arena, &game.SplitGameHelper) } @@ -352,7 +352,7 @@ func testOutputCannonProposedOutputRootValid_DefendWithCorrectTrace(t *testing.T t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1) + game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", 1) arena := createOutputGameArena(t, sys, game) testCannonProposalValid_DefendWithCorrectTrace(t, ctx, arena, &game.SplitGameHelper) } @@ -368,7 +368,7 @@ func testOutputCannonPoisonedPostState(t *testing.T, allocType config.AllocType) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is dishonest - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0xaa}) arena := createOutputGameArena(t, sys, game) testCannonPoisonedPostState(t, ctx, arena, &game.SplitGameHelper) } @@ -384,7 +384,7 @@ func testDisputeOutputRootBeyondProposedBlockValidOutputRoot(t *testing.T, alloc disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is dishonest - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1) + game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", 1) arena := createOutputGameArena(t, sys, game) testDisputeRootBeyondProposedBlockValidOutputRoot(t, ctx, arena, &game.SplitGameHelper) } @@ -400,7 +400,7 @@ func testDisputeOutputRootBeyondProposedBlockInvalidOutputRoot(t *testing.T, all disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is dishonest - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0xaa}) arena := createOutputGameArena(t, sys, game) testDisputeRootBeyondProposedBlockInvalidOutputRoot(t, ctx, arena, &game.SplitGameHelper) } @@ -416,7 +416,7 @@ func testTestDisputeOutputRootChangeClaimedOutputRoot(t *testing.T, allocType co disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is dishonest - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 1, common.Hash{0xaa}) arena := createOutputGameArena(t, sys, game) testDisputeRootChangeClaimedRoot(t, ctx, arena, &game.SplitGameHelper) } @@ -460,7 +460,7 @@ func TestInvalidateUnsafeProposal(t *testing.T) { blockNum := uint64(1) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is _dishonest_ because the required data is not available on L1 - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", blockNum, disputegame.WithUnsafeProposal()) + game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", blockNum, disputegame.WithUnsafeProposal()) correctTrace := game.CreateHonestActor(ctx, "sequencer", disputegame.WithPrivKey(sys.Cfg.Secrets.Alice)) @@ -521,7 +521,7 @@ func TestInvalidateProposalForFutureBlock(t *testing.T) { farFutureBlockNum := uint64(10_000_000) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Root claim is _dishonest_ because the required data is not available on L1 - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", farFutureBlockNum, disputegame.WithOutputRoot(common.Hash{0xaa}), disputegame.WithFutureProposal()) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", farFutureBlockNum, common.Hash{0xaa}, disputegame.WithFutureProposal()) correctTrace := game.CreateHonestActor(ctx, "sequencer", disputegame.WithPrivKey(sys.Cfg.Secrets.Alice)) @@ -562,7 +562,7 @@ func testInvalidateCorrectProposalFutureBlock(t *testing.T, allocType config.All require.NoError(t, err, "Failed to get output at safe head") // Create a dispute game with an output root that is valid at `safeHead`, but that claims to correspond to block // `safeHead.Number + 10000`. This is dishonest, because this block does not exist yet. - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 10_000, disputegame.WithOutputRoot(common.Hash(output.OutputRoot)), disputegame.WithFutureProposal()) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 10_000, common.Hash(output.OutputRoot), disputegame.WithFutureProposal()) // Start the honest challenger. game.StartChallenger(ctx, "Honest", challenger.WithPrivKey(sys.Cfg.Secrets.Bob)) @@ -595,7 +595,7 @@ func testOutputCannonHonestSafeTraceExtensionValidRoot(t *testing.T, allocType c // Create a dispute game with an honest claim disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", safeHeadNum-1) + game := disputeGameFactory.StartOutputCannonGameWithCorrectRoot(ctx, "sequencer", safeHeadNum-1) require.NotNil(t, game) // Create a correct trace actor with an honest trace extending to L2 block #4 @@ -649,7 +649,7 @@ func testOutputCannonHonestSafeTraceExtensionInvalidRoot(t *testing.T, allocType // Create a dispute game with a dishonest claim @ L2 block #4 disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", safeHeadNum-1, disputegame.WithOutputRoot(common.Hash{0xCA, 0xFE})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", safeHeadNum-1, common.Hash{0xCA, 0xFE}) require.NotNil(t, game) // Create a correct trace actor with an honest trace extending to L2 block #5 @@ -699,7 +699,7 @@ func testAgreeFirstBlockWithOriginOf1(t *testing.T, allocType config.AllocType) // Create a dispute game with a dishonest claim @ L2 block #4 disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) // Make the agreed block the first one with L1 origin of block 1 so the claim is blockNum+1 - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", blockNum+1, disputegame.WithOutputRoot(common.Hash{0xCA, 0xFE})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", blockNum+1, common.Hash{0xCA, 0xFE}) require.NotNil(t, game) outputRootClaim := game.DisputeLastBlock(ctx) game.LogGameData(ctx) diff --git a/op-e2e/faultproofs/permissioned_test.go b/op-e2e/faultproofs/permissioned_test.go index f7193a81788f2..f6b0920b107b5 100644 --- a/op-e2e/faultproofs/permissioned_test.go +++ b/op-e2e/faultproofs/permissioned_test.go @@ -20,7 +20,7 @@ func TestPermissionedGameType(t *testing.T) { gameFactory := disputegame.NewFactoryHelper(t, ctx, sys, disputegame.WithFactoryPrivKey(sys.Cfg.Secrets.Proposer)) - game := gameFactory.StartPermissionedGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) + game := gameFactory.StartPermissionedGame(ctx, "sequencer", 1, common.Hash{0x01, 0xaa}) // Start a challenger with both cannon and alphabet support gameFactory.StartChallenger(ctx, "TowerDefense", diff --git a/op-e2e/faultproofs/precompile_test.go b/op-e2e/faultproofs/precompile_test.go index f7570107c2eaf..226716be1c37c 100644 --- a/op-e2e/faultproofs/precompile_test.go +++ b/op-e2e/faultproofs/precompile_test.go @@ -117,7 +117,7 @@ func TestDisputePrecompile(t *testing.T) { }) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", bigs.Uint64Strict(receipt.BlockNumber), disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", bigs.Uint64Strict(receipt.BlockNumber), common.Hash{0x01, 0xaa}) require.NotNil(t, game) outputRootClaim := game.DisputeLastBlock(ctx) game.LogGameData(ctx) diff --git a/op-e2e/faultproofs/preimages_test.go b/op-e2e/faultproofs/preimages_test.go index 01a0674f63db8..aef8671f0613e 100644 --- a/op-e2e/faultproofs/preimages_test.go +++ b/op-e2e/faultproofs/preimages_test.go @@ -38,7 +38,7 @@ func TestLocalPreimages(t *testing.T) { t.Cleanup(sys.Close) disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 3, disputegame.WithOutputRoot(common.Hash{0x01, 0xaa})) + game := disputeGameFactory.StartOutputCannonGame(ctx, "sequencer", 3, common.Hash{0x01, 0xaa}) require.NotNil(t, game) claim := game.DisputeLastBlock(ctx) diff --git a/op-e2e/faultproofs/response_delay_test.go b/op-e2e/faultproofs/response_delay_test.go index 69eabade65d5a..2e809345aca33 100644 --- a/op-e2e/faultproofs/response_delay_test.go +++ b/op-e2e/faultproofs/response_delay_test.go @@ -49,7 +49,7 @@ func TestChallengerResponseDelay(t *testing.T) { // Create a dispute game with incorrect root to trigger challenger response disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa, 0xbb, 0xcc})) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa, 0xbb, 0xcc}) // Make an invalid claim that the honest challenger should counter invalidClaim := game.RootClaim(ctx) @@ -95,7 +95,7 @@ func TestChallengerResponseDelayWithMultipleActions(t *testing.T) { responseDelay := 2 * time.Second disputeGameFactory := disputegame.NewFactoryHelper(t, ctx, sys) - game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, disputegame.WithOutputRoot(common.Hash{0xaa, 0xbb, 0xcc})) + game := disputeGameFactory.StartOutputAlphabetGame(ctx, "sequencer", 1, common.Hash{0xaa, 0xbb, 0xcc}) // Start challenger with response delay game.StartChallenger(ctx, "sequencer", "DelayedChallenger", From caeef61cc458633fa6a2af4803e8ec2d5301cbf5 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 10 Mar 2026 06:26:10 +1000 Subject: [PATCH 083/201] supernode: Return whatever optimistic blocks are available (#19348) * op-supernode: return partial optimistic blocks in superroot_atTimestamp Previously, when VerifiedAt returned NotFound for a chain, the code would skip optimistic data collection for that chain entirely. This meant OptimisticAtTimestamp would be missing entries for chains that lacked verified data, even if optimistic data was available. Now the handler always attempts to collect optimistic data regardless of whether verified data is present. Chains where both verified and optimistic data are unavailable are absent from the map (as documented by the response type comment). The Data field remains nil whenever any chain lacks verified data. New tests cover: - verified NotFound but optimistic available -> chain present in map - verified NotFound and optimistic NotFound -> chain absent from map - verified NotFound and optimistic errors non-NotFound -> returns error https://claude.ai/code/session_01SMasfxHWzRezDREXkV2TzR * op-acceptance-tests: unskip interop fault proof tests (#19180) These tests were blocked on supernode not returning optimistic blocks for chains that hadn't yet been fully verified. Now that the superroot_atTimestamp RPC returns partial optimistic data (whatever blocks are available), the skip condition is resolved. https://claude.ai/code/session_01SMasfxHWzRezDREXkV2TzR * Attempt to fix test. * Fix test. * Rename method. * Better name * Simplify superroot code. --------- Co-authored-by: Claude --- .../tests/interop/proofs/fpp/fpp_test.go | 2 - .../serial/interop_fault_proofs_test.go | 2 - .../tests/superfaultproofs/singlechain.go | 7 +- .../superfaultproofs/superfaultproofs.go | 80 ++++++++----------- op-devstack/dsl/l2_cl.go | 19 +++++ .../supernode/activity/superroot/superroot.go | 19 +++-- .../activity/superroot/superroot_test.go | 43 ++++++++++ 7 files changed, 113 insertions(+), 59 deletions(-) diff --git a/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go b/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go index 2fcd6b8eff2d3..ba4d858b8a76c 100644 --- a/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go +++ b/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go @@ -22,8 +22,6 @@ func TestFPP(gt *testing.T) { func TestNextSuperRootNotFound(gt *testing.T) { t := devtest.SerialT(gt) - // TODO(#19180): Unskip this once supernode is updated. - t.Skip("Supernode does not yet return optimistic blocks until blocks are fully validated") sys := presets.NewSimpleInterop(t) blockTime := sys.L2ChainA.Escape().RollupConfig().BlockTime diff --git a/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go b/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go index 63498eba2397a..2071744bd86dc 100644 --- a/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go +++ b/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go @@ -10,8 +10,6 @@ import ( func TestInteropFaultProofs(gt *testing.T) { t := devtest.SerialT(gt) - // TODO(#19180): Unskip this once supernode is updated. - t.Skip("Supernode does not yet return optimistic blocks until blocks are fully validated") sys := presets.NewSimpleInterop(t) sfp.RunSuperFaultProofTest(t, sys) } diff --git a/op-acceptance-tests/tests/superfaultproofs/singlechain.go b/op-acceptance-tests/tests/superfaultproofs/singlechain.go index 4750e87b54141..e59081143669a 100644 --- a/op-acceptance-tests/tests/superfaultproofs/singlechain.go +++ b/op-acceptance-tests/tests/superfaultproofs/singlechain.go @@ -6,6 +6,7 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum/crypto" ) @@ -34,7 +35,7 @@ func RunSingleChainSuperFaultProofSmokeTest(t devtest.T, sys *presets.SingleChai // Stop batch submission so safe head stalls, then we have a known boundary. c.Batcher.Stop() t.Cleanup(c.Batcher.Start) - awaitSafeHeadsStalled(t, sys.L2CLA) + sys.L2CLA.WaitForStall(types.CrossSafe) endTimestamp := nextTimestampAfterSafeHeads(t, chains) startTimestamp := endTimestamp - 1 @@ -45,9 +46,7 @@ func RunSingleChainSuperFaultProofSmokeTest(t devtest.T, sys *presets.SingleChai c.EL.Reached(eth.Unsafe, target, 60) // L1 head where chain has no batch data at endTimestamp. - respBefore := awaitOptimisticPattern(t, sys.SuperRoots, endTimestamp, - nil, []eth.ChainID{c.ID}) - l1HeadBefore := respBefore.CurrentL1 + l1HeadBefore := l1BlockWithLocalSafeBlocks(t, sys.L1EL, sys.SuperRoots, endTimestamp, nil, []eth.ChainID{c.ID}) // Resume batching so the chain's data at endTimestamp becomes available. c.Batcher.Start() diff --git a/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go b/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go index 0445b7dcacc16..166cba86ff296 100644 --- a/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go +++ b/op-acceptance-tests/tests/superfaultproofs/superfaultproofs.go @@ -2,6 +2,7 @@ package superfaultproofs import ( "context" + "math" "math/big" "math/rand" "os" @@ -24,6 +25,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/crypto" ) @@ -122,47 +124,37 @@ func latestRequiredL1(resp eth.SuperRootAtTimestampResponse) eth.BlockID { return latest } -// awaitSafeHeadsStalled waits until every node's safe head has stopped advancing -// for at least 10 seconds. -func awaitSafeHeadsStalled(t devtest.T, nodes ...*dsl.L2CLNode) { - var last []eth.BlockID - var stableSince time.Time +// l1BlockWithLocalSafeBlocks finds an L1 block where the specified chains either do or do not have safe blocks. +func l1BlockWithLocalSafeBlocks(t devtest.T, l1El *dsl.L1ELNode, sn *dsl.Supernode, timestamp uint64, hasSafe, notSafe []eth.ChainID) eth.BlockID { + t.Logf("Finding L1 block where %v have safe blocks and %v do not", hasSafe, notSafe) + var l1Block eth.BlockID t.Require().Eventually(func() bool { - cur := make([]eth.BlockID, len(nodes)) - for i, n := range nodes { - cur[i] = n.SyncStatus().SafeL2.ID() - } - if slices.Equal(cur, last) { - if stableSince.IsZero() { - stableSince = time.Now() + resp := sn.SuperRootAtTimestamp(timestamp) + + candidate := uint64(math.MaxUint64) + for _, id := range notSafe { + if optimistic, has := resp.OptimisticAtTimestamp[id]; has && optimistic.RequiredL1.Number <= candidate { + candidate = optimistic.RequiredL1.Number - 1 // We need this chain to not have a safe block, so L1 head must be the block before it. } - return time.Since(stableSince) >= 10*time.Second } - last = cur - stableSince = time.Time{} - return false - }, 2*time.Minute, 2*time.Second, "safe heads did not stall in time") -} + // If we didn't have any notSafe chains, we can use the current L1 block. + if candidate == math.MaxUint64 { + candidate = resp.CurrentL1.Number + } -// awaitOptimisticPattern polls the supernode until every chain in mustHave has -// optimistic data and every chain in mustMiss does not. -func awaitOptimisticPattern(t devtest.T, sn *dsl.Supernode, timestamp uint64, mustHave, mustMiss []eth.ChainID) eth.SuperRootAtTimestampResponse { - var resp eth.SuperRootAtTimestampResponse - t.Require().Eventually(func() bool { - resp = sn.SuperRootAtTimestamp(timestamp) - for _, id := range mustHave { - if _, has := resp.OptimisticAtTimestamp[id]; !has { + // Now verify that all the required chains have a safe block at the candidate L1 block. + for _, id := range hasSafe { + if optimistic, has := resp.OptimisticAtTimestamp[id]; !has { return false - } - } - for _, id := range mustMiss { - if _, has := resp.OptimisticAtTimestamp[id]; has { + } else if optimistic.RequiredL1.Number > candidate { return false } } + + l1Block = l1El.BlockRefByNumber(candidate).ID() return true - }, 2*time.Minute, 2*time.Second, "timed out waiting for optimistic pattern") - return resp + }, 2*time.Minute, 2*time.Second, "timed out waiting for l1 block") + return l1Block } // runKonaInteropProgram runs the kona interop fault proof program and checks the result. @@ -485,7 +477,7 @@ func RunUnsafeProposalTest(t devtest.T, sys *presets.SimpleInterop) { // that timestamp maps to a block at or below every chain's safe head. chains[0].Batcher.Stop() defer chains[0].Batcher.Start() - awaitSafeHeadsStalled(t, chains[0].CLNode) + chains[0].CLNode.WaitForStall(types.LocalSafe) stalledStatus, err := chains[0].Rollup.SyncStatus(t.Ctx()) t.Require().NoError(err) @@ -502,7 +494,7 @@ func RunUnsafeProposalTest(t devtest.T, sys *presets.SimpleInterop) { chains[1].Batcher.Stop() defer chains[1].Batcher.Start() - awaitSafeHeadsStalled(t, chains[1].CLNode) + chains[1].CLNode.WaitForStall(types.LocalSafe) endTimestamp := chains[0].Cfg.TimestampForBlock(stalledSafeHead + 1) agreedTimestamp := endTimestamp - 1 @@ -570,13 +562,13 @@ func RunSuperFaultProofTest(t devtest.T, sys *presets.SimpleInterop) { t.Require().Len(chains, 2, "expected exactly 2 interop chains") // -- Stage 1: Freeze batch submission ---------------------------------- + chains[1].Batcher.Stop() // Stop chain 1 first and wait for chains[0] to have at least that local safe head. + t.Cleanup(chains[1].Batcher.Start) + // Wait for safe heads to stall (local safe will continue on chains[0] but interop validation can't progress because chains[1] local safe has stalled) + chains[1].CLNode.WaitForStall(types.CrossSafe) chains[0].Batcher.Stop() - chains[1].Batcher.Stop() - t.Cleanup(func() { - chains[0].Batcher.Start() - chains[1].Batcher.Start() - }) - awaitSafeHeadsStalled(t, sys.L2CLA, sys.L2CLB) + t.Cleanup(chains[0].Batcher.Start) + chains[0].CLNode.WaitForStall(types.LocalSafe) // Wait for chains[0] local safe head to stall endTimestamp := nextTimestampAfterSafeHeads(t, chains) startTimestamp := endTimestamp - 1 @@ -591,15 +583,11 @@ func RunSuperFaultProofTest(t devtest.T, sys *presets.SimpleInterop) { // -- Stage 2: Capture L1 heads at different batch-availability points -- // L1 head where neither chain has batch data at endTimestamp. - respBefore := awaitOptimisticPattern(t, sys.SuperRoots, endTimestamp, - nil, []eth.ChainID{chains[0].ID, chains[1].ID}) - l1HeadBefore := respBefore.CurrentL1 + l1HeadBefore := l1BlockWithLocalSafeBlocks(t, sys.L1EL, sys.SuperRoots, endTimestamp, nil, []eth.ChainID{chains[0].ID, chains[1].ID}) // L1 head where only the first chain has batch data. chains[0].Batcher.Start() - respAfterFirst := awaitOptimisticPattern(t, sys.SuperRoots, endTimestamp, - []eth.ChainID{chains[0].ID}, []eth.ChainID{chains[1].ID}) - l1HeadAfterFirst := respAfterFirst.CurrentL1 + l1HeadAfterFirst := l1BlockWithLocalSafeBlocks(t, sys.L1EL, sys.SuperRoots, endTimestamp, []eth.ChainID{chains[0].ID}, []eth.ChainID{chains[1].ID}) chains[0].Batcher.Stop() // L1 head where both chains have batch data (fully validated). diff --git a/op-devstack/dsl/l2_cl.go b/op-devstack/dsl/l2_cl.go index bfd4f562015be..b4b6b4ee77023 100644 --- a/op-devstack/dsl/l2_cl.go +++ b/op-devstack/dsl/l2_cl.go @@ -168,6 +168,25 @@ func (cl *L2CLNode) NotAdvancedFn(lvl types.SafetyLevel, attempts int) CheckFunc } } +// awaitSafeHeadsStalled waits until every node's safe head has stopped advancing +// for at least 10 seconds. +func (cl *L2CLNode) WaitForStall(lvl types.SafetyLevel) { + var last eth.BlockID + var stableSince time.Time + cl.require.Eventuallyf(func() bool { + cur := cl.HeadBlockRef(lvl).ID() + if cur == last { + if stableSince.IsZero() { + stableSince = time.Now() + } + return time.Since(stableSince) >= 10*time.Second + } + last = cur + stableSince = time.Time{} + return false + }, 2*time.Minute, 2*time.Second, "expected %v head to stall", lvl) +} + // ReachedFn returns a lambda that checks the L2CL chain head with given safety level reaches the target block number // Composable with other lambdas to wait in parallel func (cl *L2CLNode) ReachedFn(lvl types.SafetyLevel, target uint64, attempts int) CheckFunc { diff --git a/op-supernode/supernode/activity/superroot/superroot.go b/op-supernode/supernode/activity/superroot/superroot.go index d0717a4982375..9710c406971dd 100644 --- a/op-supernode/supernode/activity/superroot/superroot.go +++ b/op-supernode/supernode/activity/superroot/superroot.go @@ -98,18 +98,19 @@ func (s *Superroot) atTimestamp(ctx context.Context, timestamp uint64) (eth.Supe notFound := false chainIDs := make([]eth.ChainID, 0, len(s.chains)) - // collect verified and optimistic L2 and L1 blocks at the given timestamp + // Collect verified L2 and L1 blocks at the given timestamp for chainID, chain := range s.chains { chainIDs = append(chainIDs, chainID) // verifiedAt returns the L2 block which is fully verified at the given timestamp, and the minimum L1 block at which verification is possible verifiedL2, verifiedL1, err := chain.VerifiedAt(ctx, timestamp) if errors.Is(err, ethereum.NotFound) { notFound = true - continue // To allow other chains to populate unverified blocks + continue } else if err != nil { s.log.Warn("failed to get verified block", "chain_id", chainID.String(), "err", err) return eth.SuperRootAtTimestampResponse{}, fmt.Errorf("failed to get verified block: %w", err) } + // Verified data is available: update min required L1 and collect the output root if verifiedL1.Number < minVerifiedRequiredL1.Number || minVerifiedRequiredL1 == (eth.BlockID{}) { minVerifiedRequiredL1 = verifiedL1 } @@ -120,15 +121,23 @@ func (s *Superroot) atTimestamp(ctx context.Context, timestamp uint64) (eth.Supe return eth.SuperRootAtTimestampResponse{}, fmt.Errorf("failed to compute output root at L2 block %d for chain ID %v: %w", verifiedL2.Number, chainID, err) } chainOutputs = append(chainOutputs, eth.ChainIDAndOutput{ChainID: chainID, Output: outRoot}) - // Optimistic output is the full output at the optimistic L2 block for the timestamp + } + + // Collect optimistic data for all chains regardless of whether verified data is available. + for chainID, chain := range s.chains { optimisticOut, err := chain.OptimisticOutputAtTimestamp(ctx, timestamp) - if err != nil { + if errors.Is(err, ethereum.NotFound) { + // If optimistic data is also absent, the chain is simply excluded from OptimisticAtTimestamp. + continue + } else if err != nil { s.log.Warn("failed to get optimistic block", "chain_id", chainID.String(), "err", err) return eth.SuperRootAtTimestampResponse{}, fmt.Errorf("failed to get optimistic block at timestamp %v for chain ID %v: %w", timestamp, chainID, err) } // Also include the source L1 for context _, optimisticL1, err := chain.OptimisticAt(ctx, timestamp) - if err != nil { + if errors.Is(err, ethereum.NotFound) { + continue + } else if err != nil { s.log.Warn("failed to get optimistic source L1", "chain_id", chainID.String(), "err", err) return eth.SuperRootAtTimestampResponse{}, fmt.Errorf("failed to get optimistic source L1 at timestamp %v for chain ID %v: %w", timestamp, chainID, err) } diff --git a/op-supernode/supernode/activity/superroot/superroot_test.go b/op-supernode/supernode/activity/superroot/superroot_test.go index b39ae70e79230..771805f312f5b 100644 --- a/op-supernode/supernode/activity/superroot/superroot_test.go +++ b/op-supernode/supernode/activity/superroot/superroot_test.go @@ -218,6 +218,8 @@ func TestSuperroot_AtTimestamp_NotFoundOnVerifiedAt(t *testing.T) { chains := map[eth.ChainID]cc.ChainContainer{ eth.ChainIDFromUInt64(10): &mockCC{ verifiedErr: fmt.Errorf("nope: %w", ethereum.NotFound), + optL2: eth.BlockID{Number: 100}, + optL1: eth.BlockID{Number: 1000}, }, eth.ChainIDFromUInt64(11): &mockCC{ verL2: eth.BlockID{Number: 200}, @@ -233,10 +235,51 @@ func TestSuperroot_AtTimestamp_NotFoundOnVerifiedAt(t *testing.T) { actual, err := api.AtTimestamp(context.Background(), 123) require.NoError(t, err) require.Nil(t, actual.Data) + // Chain 10 has no verified data but optimistic data is available, so it should be present + require.Contains(t, actual.OptimisticAtTimestamp, eth.ChainIDFromUInt64(10)) + require.Contains(t, actual.OptimisticAtTimestamp, eth.ChainIDFromUInt64(11)) +} + +func TestSuperroot_AtTimestamp_NotFoundOnVerifiedAtAndOptimisticAt(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{ + eth.ChainIDFromUInt64(10): &mockCC{ + verifiedErr: fmt.Errorf("nope: %w", ethereum.NotFound), + optimisticErr: ethereum.NotFound, + }, + eth.ChainIDFromUInt64(11): &mockCC{ + verL2: eth.BlockID{Number: 200}, + verL1: eth.BlockID{Number: 1100}, + optL2: eth.BlockID{Number: 200}, + optL1: eth.BlockID{Number: 1100}, + output: eth.Bytes32{0x12}, + status: ð.SyncStatus{CurrentL1: eth.L1BlockRef{Number: 2100}}, + }, + } + s := New(gethlog.New(), chains) + api := &superrootAPI{s: s} + actual, err := api.AtTimestamp(context.Background(), 123) + require.NoError(t, err) + require.Nil(t, actual.Data) + // Chain 10 has neither verified nor optimistic data, so it should be absent require.NotContains(t, actual.OptimisticAtTimestamp, eth.ChainIDFromUInt64(10)) require.Contains(t, actual.OptimisticAtTimestamp, eth.ChainIDFromUInt64(11)) } +func TestSuperroot_AtTimestamp_ErrorOnOptimisticAtWhenVerifiedNotFound(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{ + eth.ChainIDFromUInt64(10): &mockCC{ + verifiedErr: fmt.Errorf("nope: %w", ethereum.NotFound), + optimisticErr: assertErr(), + }, + } + s := New(gethlog.New(), chains) + api := &superrootAPI{s: s} + _, err := api.AtTimestamp(context.Background(), 123) + require.Error(t, err) +} + func TestSuperroot_AtTimestamp_ErrorOnOutputRoot(t *testing.T) { t.Parallel() chains := map[eth.ChainID]cc.ChainContainer{ From 22faa753713d9603713da27050b4d0ef6acdb43e Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 10 Mar 2026 07:34:05 +1000 Subject: [PATCH 084/201] fix(supernode): consider verifier L1 progress in superroot CurrentL1 (#19441) * op-supernode: return partial optimistic blocks in superroot_atTimestamp Previously, when VerifiedAt returned NotFound for a chain, the code would skip optimistic data collection for that chain entirely. This meant OptimisticAtTimestamp would be missing entries for chains that lacked verified data, even if optimistic data was available. Now the handler always attempts to collect optimistic data regardless of whether verified data is present. Chains where both verified and optimistic data are unavailable are absent from the map (as documented by the response type comment). The Data field remains nil whenever any chain lacks verified data. New tests cover: - verified NotFound but optimistic available -> chain present in map - verified NotFound and optimistic NotFound -> chain absent from map - verified NotFound and optimistic errors non-NotFound -> returns error https://claude.ai/code/session_01SMasfxHWzRezDREXkV2TzR * op-acceptance-tests: unskip interop fault proof tests (#19180) These tests were blocked on supernode not returning optimistic blocks for chains that hadn't yet been fully verified. Now that the superroot_atTimestamp RPC returns partial optimistic data (whatever blocks are available), the skip condition is resolved. https://claude.ai/code/session_01SMasfxHWzRezDREXkV2TzR * Attempt to fix test. * Fix test. * Rename method. * Better name * Simplify superroot code. * fix(supernode): consider verifier L1 progress in superroot CurrentL1 The superroot_atTimestamp RPC was only considering derivation pipeline CurrentL1 when computing the minimum L1 block. This adds verifier L1 progress to the minimum calculation so callers know that both derivation and verification have processed up to the reported L1 block. Closes #18651 Co-Authored-By: Claude Opus 4.6 * fix: remove goimports alignment spacing in mock methods Co-Authored-By: Claude Opus 4.6 * fix: apply goimports formatting to realign mock method blocks Co-Authored-By: Claude Opus 4.6 * fix: goimports formatting in superroot_test.go struct fields Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude --- .../supernode/activity/interop/algo_test.go | 1 + .../activity/interop/interop_test.go | 14 ++-- .../supernode/activity/interop/logdb_test.go | 14 ++-- .../supernode/activity/superroot/superroot.go | 12 ++-- .../activity/superroot/superroot_test.go | 65 +++++++++++++++++-- .../chain_container/chain_container.go | 11 ++++ 6 files changed, 92 insertions(+), 25 deletions(-) diff --git a/op-supernode/supernode/activity/interop/algo_test.go b/op-supernode/supernode/activity/interop/algo_test.go index 7e921e74f3dea..b23d1ac4e1d7a 100644 --- a/op-supernode/supernode/activity/interop/algo_test.go +++ b/op-supernode/supernode/activity/interop/algo_test.go @@ -897,6 +897,7 @@ func (m *algoMockChain) Stop(ctx context.Context) error { retu func (m *algoMockChain) Pause(ctx context.Context) error { return nil } func (m *algoMockChain) Resume(ctx context.Context) error { return nil } func (m *algoMockChain) RegisterVerifier(v activity.VerificationActivity) {} +func (m *algoMockChain) VerifierCurrentL1s() []eth.BlockID { return nil } func (m *algoMockChain) LocalSafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) { return eth.L2BlockRef{}, nil } diff --git a/op-supernode/supernode/activity/interop/interop_test.go b/op-supernode/supernode/activity/interop/interop_test.go index 8bfc8d6351e12..59ac6f56676c7 100644 --- a/op-supernode/supernode/activity/interop/interop_test.go +++ b/op-supernode/supernode/activity/interop/interop_test.go @@ -1251,13 +1251,13 @@ func newMockChainContainer(id uint64) *mockChainContainer { return &mockChainContainer{id: eth.ChainIDFromUInt64(id)} } -func (m *mockChainContainer) ID() eth.ChainID { return m.id } -func (m *mockChainContainer) Start(ctx context.Context) error { return nil } -func (m *mockChainContainer) Stop(ctx context.Context) error { return nil } -func (m *mockChainContainer) Pause(ctx context.Context) error { return nil } -func (m *mockChainContainer) Resume(ctx context.Context) error { return nil } -func (m *mockChainContainer) RegisterVerifier(v activity.VerificationActivity) { -} +func (m *mockChainContainer) ID() eth.ChainID { return m.id } +func (m *mockChainContainer) Start(ctx context.Context) error { return nil } +func (m *mockChainContainer) Stop(ctx context.Context) error { return nil } +func (m *mockChainContainer) Pause(ctx context.Context) error { return nil } +func (m *mockChainContainer) Resume(ctx context.Context) error { return nil } +func (m *mockChainContainer) RegisterVerifier(v activity.VerificationActivity) {} +func (m *mockChainContainer) VerifierCurrentL1s() []eth.BlockID { return nil } func (m *mockChainContainer) LocalSafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) { // Simulate slow chains. Sleep is outside the lock so it doesn't block other // concurrent mock operations during tests. diff --git a/op-supernode/supernode/activity/interop/logdb_test.go b/op-supernode/supernode/activity/interop/logdb_test.go index d9170632ee2ba..4ed65b2cafd9f 100644 --- a/op-supernode/supernode/activity/interop/logdb_test.go +++ b/op-supernode/supernode/activity/interop/logdb_test.go @@ -643,13 +643,13 @@ type statefulMockChainContainer struct { fetchReceiptsFn func(blockID eth.BlockID) (eth.BlockInfo, types.Receipts, error) } -func (m *statefulMockChainContainer) ID() eth.ChainID { return m.id } -func (m *statefulMockChainContainer) Start(ctx context.Context) error { return nil } -func (m *statefulMockChainContainer) Stop(ctx context.Context) error { return nil } -func (m *statefulMockChainContainer) Pause(ctx context.Context) error { return nil } -func (m *statefulMockChainContainer) Resume(ctx context.Context) error { return nil } -func (m *statefulMockChainContainer) RegisterVerifier(v activity.VerificationActivity) { -} +func (m *statefulMockChainContainer) ID() eth.ChainID { return m.id } +func (m *statefulMockChainContainer) Start(ctx context.Context) error { return nil } +func (m *statefulMockChainContainer) Stop(ctx context.Context) error { return nil } +func (m *statefulMockChainContainer) Pause(ctx context.Context) error { return nil } +func (m *statefulMockChainContainer) Resume(ctx context.Context) error { return nil } +func (m *statefulMockChainContainer) RegisterVerifier(v activity.VerificationActivity) {} +func (m *statefulMockChainContainer) VerifierCurrentL1s() []eth.BlockID { return nil } func (m *statefulMockChainContainer) LocalSafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) { return m.blockAtTimestampFn(ts) } diff --git a/op-supernode/supernode/activity/superroot/superroot.go b/op-supernode/supernode/activity/superroot/superroot.go index 9710c406971dd..8c922a6a6ce9f 100644 --- a/op-supernode/supernode/activity/superroot/superroot.go +++ b/op-supernode/supernode/activity/superroot/superroot.go @@ -57,10 +57,8 @@ func (s *Superroot) atTimestamp(ctx context.Context, timestamp uint64) (eth.Supe finalizedInitialized bool chainOutputs = make([]eth.ChainIDAndOutput, 0, len(s.chains)) ) - // Get current l1s - // this informs callers that the chains local views have considered at least up to this L1 block - // TODO(#18651): Currently there are no verifiers to consider, but once there are, this needs to be updated to consider if - // they have also processed the L1 data. + // Get current L1s — the minimum L1 block that all derivation pipelines and verifiers have processed. + // This informs callers that the chains' local views have considered at least up to this L1 block. for chainID, chain := range s.chains { status, err := chain.SyncStatus(ctx) if err != nil { @@ -75,6 +73,12 @@ func (s *Superroot) atTimestamp(ctx context.Context, timestamp uint64) (eth.Supe if currentL1.Number < minCurrentL1.Number || minCurrentL1 == (eth.BlockID{}) { minCurrentL1 = currentL1 } + // Also consider the L1 progress of any registered verifiers. + for _, verifierL1 := range chain.VerifierCurrentL1s() { + if verifierL1.Number < minCurrentL1.Number || minCurrentL1 == (eth.BlockID{}) { + minCurrentL1 = verifierL1 + } + } // Conservative aggregation across chains: take the minimum timestamps. // If any chain has a zero timestamp (not initialized), the aggregate is zero. if !safeInitialized { diff --git a/op-supernode/supernode/activity/superroot/superroot_test.go b/op-supernode/supernode/activity/superroot/superroot_test.go index 771805f312f5b..348a45df2bc49 100644 --- a/op-supernode/supernode/activity/superroot/superroot_test.go +++ b/op-supernode/supernode/activity/superroot/superroot_test.go @@ -17,12 +17,13 @@ import ( ) type mockCC struct { - verL2 eth.BlockID - verL1 eth.BlockID - optL2 eth.BlockID - optL1 eth.BlockID - output eth.Bytes32 - status *eth.SyncStatus + verL2 eth.BlockID + verL1 eth.BlockID + optL2 eth.BlockID + optL1 eth.BlockID + output eth.Bytes32 + status *eth.SyncStatus + verifierL1s []eth.BlockID verifiedErr error outputErr error @@ -35,7 +36,9 @@ func (m *mockCC) Stop(ctx context.Context) error { return nil } func (m *mockCC) Pause(ctx context.Context) error { return nil } func (m *mockCC) Resume(ctx context.Context) error { return nil } -func (m *mockCC) RegisterVerifier(v activity.VerificationActivity) { +func (m *mockCC) RegisterVerifier(v activity.VerificationActivity) {} +func (m *mockCC) VerifierCurrentL1s() []eth.BlockID { + return m.verifierL1s } func (m *mockCC) LocalSafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) { @@ -319,5 +322,53 @@ func TestSuperroot_AtTimestamp_EmptyChains(t *testing.T) { require.Len(t, out.OptimisticAtTimestamp, 0) } +func TestSuperroot_AtTimestamp_VerifierL1ReducesCurrentL1(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{ + eth.ChainIDFromUInt64(10): &mockCC{ + verL2: eth.BlockID{Number: 100}, + verL1: eth.BlockID{Number: 1000}, + optL2: eth.BlockID{Number: 100}, + optL1: eth.BlockID{Number: 1000}, + output: eth.Bytes32{}, + status: ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: 2000}, + }, + // Verifier has only processed up to L1 block 1500, which is less than derivation's 2000 + verifierL1s: []eth.BlockID{{Number: 1500}}, + }, + } + s := New(gethlog.New(), chains) + api := &superrootAPI{s: s} + out, err := api.AtTimestamp(context.Background(), 123) + require.NoError(t, err) + // CurrentL1 should be 1500 (verifier), not 2000 (derivation) + require.Equal(t, uint64(1500), out.CurrentL1.Number) +} + +func TestSuperroot_AtTimestamp_VerifierL1HigherThanDerivationDoesNotIncrease(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{ + eth.ChainIDFromUInt64(10): &mockCC{ + verL2: eth.BlockID{Number: 100}, + verL1: eth.BlockID{Number: 1000}, + optL2: eth.BlockID{Number: 100}, + optL1: eth.BlockID{Number: 1000}, + output: eth.Bytes32{}, + status: ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: 2000}, + }, + // Verifier is ahead of derivation — should not increase the minimum + verifierL1s: []eth.BlockID{{Number: 3000}}, + }, + } + s := New(gethlog.New(), chains) + api := &superrootAPI{s: s} + out, err := api.AtTimestamp(context.Background(), 123) + require.NoError(t, err) + // CurrentL1 should still be 2000 (derivation), since verifier is ahead + require.Equal(t, uint64(2000), out.CurrentL1.Number) +} + // assertErr returns a generic error instance used to signal mock failures. func assertErr() error { return fmt.Errorf("mock error") } diff --git a/op-supernode/supernode/chain_container/chain_container.go b/op-supernode/supernode/chain_container/chain_container.go index e5c1308e23d00..1631c12f1cdf9 100644 --- a/op-supernode/supernode/chain_container/chain_container.go +++ b/op-supernode/supernode/chain_container/chain_container.go @@ -46,6 +46,9 @@ type ChainContainer interface { // invalidatedBlock is the block that triggered the rewind and is passed to reset callbacks. RewindEngine(ctx context.Context, timestamp uint64, invalidatedBlock eth.BlockRef) error RegisterVerifier(v activity.VerificationActivity) + // VerifierCurrentL1s returns the CurrentL1 from each registered verifier. + // This allows callers to determine the minimum L1 block that all verifiers have processed. + VerifierCurrentL1s() []eth.BlockID // FetchReceipts fetches the receipts for a given block by hash. // Returns block info and receipts, or an error if the block or receipts cannot be fetched. FetchReceipts(ctx context.Context, blockHash eth.BlockID) (eth.BlockInfo, types.Receipts, error) @@ -158,6 +161,14 @@ func (c *simpleChainContainer) RegisterVerifier(v activity.VerificationActivity) c.verifiers = append(c.verifiers, v) } +func (c *simpleChainContainer) VerifierCurrentL1s() []eth.BlockID { + result := make([]eth.BlockID, len(c.verifiers)) + for i, v := range c.verifiers { + result[i] = v.CurrentL1() + } + return result +} + // defaultVirtualNodeFactory is the default factory that creates a real VirtualNode func defaultVirtualNodeFactory(cfg *opnodecfg.Config, log gethlog.Logger, initOverload *rollupNode.InitializationOverrides, appVersion string, superAuthority rollup.SuperAuthority) virtual_node.VirtualNode { initOverload.SuperAuthority = superAuthority From f5dc94934f7a099af0c5edb5f26999662cc271dc Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Mon, 9 Mar 2026 19:53:16 -0400 Subject: [PATCH 085/201] op-devstack: fix flashblocks preset sequencer EL selection (#19458) --- op-devstack/presets/flashblocks.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/op-devstack/presets/flashblocks.go b/op-devstack/presets/flashblocks.go index 06baffbe287fc..2ce8b1c522d49 100644 --- a/op-devstack/presets/flashblocks.go +++ b/op-devstack/presets/flashblocks.go @@ -52,7 +52,11 @@ func NewSingleChainWithFlashblocks(t devtest.T) *SingleChainWithFlashblocks { l1Net := system.L1Network(match.FirstL1Network) l2 := system.L2Network(match.Assume(t, match.L2ChainA)) sequencerCL := l2.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) - sequencerEL := l2.L2ELNode(match.Assume(t, match.EngineFor(sequencerCL))) + sequencerEL := l2.L2ELNode(match.Assume(t, match.MatchElemFn[stack.L2ELNode](func(el stack.L2ELNode) bool { + // In flashblocks topologies the active CL may be linked through rollup-boost. + // Selecting by sequencer key keeps us on the sequencer EL in both direct and proxied setups. + return el.ID().Key() == sequencerCL.ID().Key() + }))) var challengerCfg *challengerConfig.Config if len(l2.L2Challengers()) > 0 { challengerCfg = l2.L2Challengers()[0].Config() From f7650d58f968e876891d2e1b00f4e2bdd6ecc68f Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 10 Mar 2026 10:30:44 +1000 Subject: [PATCH 086/201] fix(ci): capture vm-compat findings JSON as CI artifact (#19442) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(ci): capture vm-compat findings JSON as CI artifact When the analyze-op-program-client job fails, CircleCI truncates the Docker build log output, losing the beginning of the findings JSON array. This makes it impossible to review the full findings. Instead of relying on stdout, the analysis now writes findings to a file that gets extracted from the Docker image and stored as a CI artifact. Changes: - run-static-analysis.sh: write findings to $FINDINGS_OUTPUT_PATH if set - Dockerfile.vmcompat: capture analysis exit code instead of failing the build, so findings can be extracted from the image - Makefile run-vm-compat: tag image, extract findings via docker cp, then check exit code and fail appropriately - CircleCI: add store_artifacts step (when: always) for findings JSON Co-Authored-By: Claude Opus 4.6 * refactor(op-program): use docker export stage instead of docker cp Use a FROM scratch export stage with --output to write findings files directly to the host, matching the pattern in Dockerfile.repro. This replaces the docker create/cp/rm dance with a simpler approach. Co-Authored-By: Claude Opus 4.6 * refactor(op-program): move exit code check into Dockerfile Add a `check` stage that fails the build if analysis had a non-zero exit code. The Makefile now runs two cached builds: `--target export` to extract findings, then `--target check` to fail on errors. This removes all the shell exit-code parsing from the Makefile. Co-Authored-By: Claude Opus 4.6 * refactor(op-program): single docker invocation with check stage Chain export → check → export so a single docker build --output handles both: the check stage fails if analysis found issues, and the export stage (depending on check) only writes findings on success. Restores the commented-out go1.25 build line. Co-Authored-By: Claude Opus 4.6 * refactor(op-program): always export findings, check exit code after Export both findings and exit code via the scratch export stage, then check the exit code in the Makefile after docker build completes. This ensures findings are always available as CI artifacts even when analysis fails. Co-Authored-By: Claude Opus 4.6 * fix(op-program): add commented exit check for go1.25 vm-compat build Co-Authored-By: Claude Opus 4.6 * fix(ci): use FINDINGS_OUTPUT_PATH env var instead of hardcoded path Co-Authored-By: Claude Opus 4.6 * fix(ci): revert COPY to hardcoded path — ENV not visible across stages $FINDINGS_OUTPUT_PATH is set in the builder stage but the scratch export stage doesn't inherit it. COPY expanded to an empty source, copying the entire builder filesystem and hitting permission errors on Go module cache directories. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .circleci/continue/main.yml | 4 ++++ op-program/Dockerfile.vmcompat | 11 +++++++++-- op-program/Makefile | 14 ++++++++++++-- op-program/scripts/run-static-analysis.sh | 4 ++++ 4 files changed, 29 insertions(+), 4 deletions(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 86587ce1c371f..a307355684b61 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -2534,6 +2534,10 @@ jobs: command: | make run-vm-compat working_directory: op-program + - store_artifacts: + path: op-program/bin/vm-compat-output/vm-compat-findings.json + destination: vm-compat-findings.json + when: always op-program-compat: docker: diff --git a/op-program/Dockerfile.vmcompat b/op-program/Dockerfile.vmcompat index 4045e300dcb5d..96c4caaad4f01 100644 --- a/op-program/Dockerfile.vmcompat +++ b/op-program/Dockerfile.vmcompat @@ -25,7 +25,14 @@ COPY ./go.mod ./go.sum ./ RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build go mod download COPY . /app -# Run the op-program analysis +# Run the op-program analysis, capturing findings even on failure. +# The exit code is saved so the caller can extract findings before failing. WORKDIR /app/op-program +ENV FINDINGS_OUTPUT_PATH=/app/op-program/vm-compat-findings.json RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \ - make "analyze-op-program-client-${VM_TARGET}" + touch $FINDINGS_OUTPUT_PATH && \ + make "analyze-op-program-client-${VM_TARGET}"; echo $? > /app/op-program/vm-compat-exit-code + +FROM scratch AS export +COPY --from=builder /app/op-program/vm-compat-findings.json . +COPY --from=builder /app/op-program/vm-compat-exit-code . diff --git a/op-program/Makefile b/op-program/Makefile index a3c93af29fd3c..0da54691a4a0f 100644 --- a/op-program/Makefile +++ b/op-program/Makefile @@ -146,10 +146,20 @@ analyze-op-program-client-current: analyze-op-program-client-next: ./scripts/run-static-analysis.sh ./vm-profiles/cannon-multithreaded-64-next.yaml ./compatibility-test/baseline-cannon-multithreaded-64-next.json +VM_COMPAT_OUTPUT_DIR := bin/vm-compat-output + run-vm-compat: - @docker build --build-arg GO_VERSION=1.24.10-alpine3.21 --build-arg VM_TARGET=current --progress plain -f Dockerfile.vmcompat ../ + @rm -rf "$(VM_COMPAT_OUTPUT_DIR)" && mkdir -p "$(VM_COMPAT_OUTPUT_DIR)" + @docker build \ + --build-arg GO_VERSION=1.24.10-alpine3.21 \ + --build-arg VM_TARGET=current \ + --progress plain \ + --output "$(VM_COMPAT_OUTPUT_DIR)" \ + -f Dockerfile.vmcompat ../ + @exit $$(cat "$(VM_COMPAT_OUTPUT_DIR)/vm-compat-exit-code") # TODO(#18334): Uncomment once vm-compat supports go1.25 - #@docker build --build-arg GO_VERSION=1.25.4-alpine3.21 --build-arg VM_TARGET=next --progress plain -f Dockerfile.vmcompat ../ + #@docker build --build-arg GO_VERSION=1.25.4-alpine3.21 --build-arg VM_TARGET=next --progress plain --output "$(VM_COMPAT_OUTPUT_DIR)" -f Dockerfile.vmcompat ../ + #@exit $$(cat "$(VM_COMPAT_OUTPUT_DIR)/vm-compat-exit-code") .PHONY: \ op-program \ diff --git a/op-program/scripts/run-static-analysis.sh b/op-program/scripts/run-static-analysis.sh index 210af63b4fd0d..a47e968c9136e 100755 --- a/op-program/scripts/run-static-analysis.sh +++ b/op-program/scripts/run-static-analysis.sh @@ -57,6 +57,10 @@ ISSUE_COUNT=$(jq 'length' "$OUTPUT_FILE") if [ "$ISSUE_COUNT" -gt 0 ]; then echo "❌ Analysis found $ISSUE_COUNT issues!" cat "$OUTPUT_FILE" + if [ -n "$FINDINGS_OUTPUT_PATH" ]; then + cp "$OUTPUT_FILE" "$FINDINGS_OUTPUT_PATH" + echo "Findings written to $FINDINGS_OUTPUT_PATH" + fi rm -f "$OUTPUT_FILE" exit 1 else From b894aa134c39c1dcdc91ea073caf027a9cce4177 Mon Sep 17 00:00:00 2001 From: smartcontracts <14298799+smartcontracts@users.noreply.github.com> Date: Mon, 9 Mar 2026 21:39:47 -0400 Subject: [PATCH 087/201] chore(ci): skip kona-build-release on PRs that don't touch Rust (#19459) The kona-build-release job takes ~9 minutes even when no Rust files changed, because cargo rebuilds from scratch on every fresh checkout (mtime-based fingerprinting defeats the target cache). Fix: use CircleCI path-filtering to detect rust/ changes. On feature branches where rust/ is untouched, the build step checks for cached binaries in the restored target cache and exits early if found. Safety: - develop/main always build fresh (never skip) - If cached binaries are missing, falls through to cargo build - All other rust-build-binary invocations default to building (opt-in) - Flake-shake workflow is unaffected (doesn't pass the parameter) Co-authored-by: smartcontracts Co-authored-by: Claude Opus 4.6 --- .circleci/config.yml | 1 + .circleci/continue/main.yml | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) diff --git a/.circleci/config.yml b/.circleci/config.yml index 73ff5875102aa..a9efa3d40b1c6 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -123,6 +123,7 @@ workflows: .* c-flake-shake-iterations << pipeline.parameters.flake-shake-iterations >> .circleci/continue/main.yml .* c-flake-shake-workers << pipeline.parameters.flake-shake-workers >> .circleci/continue/main.yml .* c-go-cache-version << pipeline.parameters.go-cache-version >> .circleci/continue/main.yml + rust/.* c-rust_files_changed true .circleci/continue/main.yml (rust|\.circleci)/.* c-rust_ci_dispatch << pipeline.parameters.rust_ci_dispatch >> .circleci/continue/rust-ci.yml (rust|\.circleci)/.* c-default_docker_image << pipeline.parameters.default_docker_image >> .circleci/continue/rust-ci.yml diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index a307355684b61..14318aa8d7352 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -71,6 +71,12 @@ parameters: c-flake-shake-workers: type: integer default: 50 + # Set to true by path-filtering when files under rust/ change. + # When false on feature branches, kona-build-release skips cargo build and + # reuses cached binaries — saving ~9 minutes on PRs that don't touch Rust. + c-rust_files_changed: + type: boolean + default: false # go-cache-version can be used as a cache buster when making breaking changes to caching strategy c-go-cache-version: type: string @@ -576,6 +582,10 @@ commands: description: "Whether to save the cache at the end of the build" type: boolean default: false + rust_files_changed: + description: "Set to false to skip cargo build and reuse cached binaries (feature branches only)." + type: boolean + default: true steps: - utils/checkout-with-mise: checkout-method: blobless @@ -596,6 +606,22 @@ commands: PWD=$(pwd) export CARGO_TARGET_DIR="$PWD/<< parameters.directory >>/target" echo "CARGO_TARGET_DIR: $CARGO_TARGET_DIR" + + # On feature branches where rust/ hasn't changed, reuse binaries from + # the restored target cache instead of running cargo build (~9 min saving). + # Always build on develop/main as a safety backstop. + if [ "<< parameters.rust_files_changed >>" = "false" ] \ + && [ "$CIRCLE_BRANCH" != "develop" ] \ + && [ "$CIRCLE_BRANCH" != "main" ]; then + binary_dir="$CARGO_TARGET_DIR/<< parameters.profile >>" + if [ -d "$binary_dir" ] && ls "$binary_dir"/* >/dev/null 2>&1; then + echo "No rust/ changes on feature branch — skipping cargo build" + ls -lh "$binary_dir"/ | head -20 + exit 0 + fi + echo "WARNING: no cached binaries found, building from source" + fi + export PROFILE="--profile << parameters.profile >>" # Debug profile is specified as "debug" in the config/target, but cargo build expects "dev" @@ -676,6 +702,10 @@ jobs: description: "Whether to persist the built binaries to the CircleCI workspace" type: boolean default: false + rust_files_changed: + description: "Set to false to skip cargo build and reuse cached binaries (feature branches only)." + type: boolean + default: true steps: - rust-build: directory: << parameters.directory >> @@ -687,6 +717,7 @@ jobs: binary: << parameters.binary >> toolchain: << parameters.toolchain >> save_cache: << parameters.save_cache >> + rust_files_changed: << parameters.rust_files_changed >> - when: condition: << parameters.persist_to_workspace >> steps: @@ -3140,6 +3171,7 @@ workflows: features: "default" save_cache: true persist_to_workspace: true + rust_files_changed: << pipeline.parameters.c-rust_files_changed >> context: - circleci-repo-readonly-authenticated-github-token - rust-build-submodule: &rust-build-op-rbuilder From 855027e76080bb6d1d1ad83210313b4fe89f7c77 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Mon, 9 Mar 2026 23:57:29 -0400 Subject: [PATCH 088/201] chore(ci): gate acceptance-tests in rust. add jovian in base acceptance tests set (#19222) --- .circleci/config.yml | 6 +- .circleci/continue/main.yml | 102 ++++++++++++++++-- op-acceptance-tests/acceptance-tests.yaml | 17 +-- .../tests/base/eth_simulate_test.go | 15 +-- op-devstack/sysgo/l1_nodes_subprocess.go | 17 +-- 5 files changed, 118 insertions(+), 39 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a9efa3d40b1c6..a873095572747 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -130,9 +130,9 @@ workflows: (rust|\.circleci)/.* c-base_image << pipeline.parameters.base_image >> .circleci/continue/rust-ci.yml (rust|\.circleci)/.* c-go-cache-version << pipeline.parameters.go-cache-version >> .circleci/continue/rust-ci.yml - (rust|\.circleci)/.* c-rust_e2e_dispatch << pipeline.parameters.rust_e2e_dispatch >> .circleci/continue/rust-e2e.yml - (rust|\.circleci)/.* c-default_docker_image << pipeline.parameters.default_docker_image >> .circleci/continue/rust-e2e.yml - (rust|\.circleci)/.* c-go-cache-version << pipeline.parameters.go-cache-version >> .circleci/continue/rust-e2e.yml + (rust|op-e2e|\.circleci)/.* c-rust_e2e_dispatch << pipeline.parameters.rust_e2e_dispatch >> .circleci/continue/rust-e2e.yml + (rust|op-e2e|\.circleci)/.* c-default_docker_image << pipeline.parameters.default_docker_image >> .circleci/continue/rust-e2e.yml + (rust|op-e2e|\.circleci)/.* c-go-cache-version << pipeline.parameters.go-cache-version >> .circleci/continue/rust-e2e.yml setup-tag: when: diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 14318aa8d7352..b7eacabad9aa7 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -415,6 +415,7 @@ commands: ROOT_DIR="$(pwd)" BIN_DIR="$ROOT_DIR/.circleci-cache/rust-binaries" echo "export RUST_BINARY_PATH_KONA_NODE=$ROOT_DIR/rust/target/release/kona-node" >> "$BASH_ENV" + echo "export OP_RETH_EXEC_PATH=$ROOT_DIR/rust/target/release/op-reth" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" @@ -1943,6 +1944,14 @@ jobs: description: The gate to run the acceptance tests against. This gate should be defined in op-acceptance-tests/acceptance-tests.yaml. type: string default: "" + l2_cl_kind: + description: "L2 consensus layer client (op-node or kona)" + type: string + default: "op-node" + l2_el_kind: + description: "L2 execution layer client (op-geth or op-reth)" + type: string + default: "op-geth" no_output_timeout: description: Timeout for when CircleCI kills the job if there's no output type: string @@ -1985,6 +1994,11 @@ jobs: command: | echo 'export CIRCLECI_PIPELINE_SCHEDULE_NAME="<< pipeline.schedule.name >>"' >> $BASH_ENV echo 'export CIRCLECI_PARAMETERS_SYNC_TEST_OP_NODE_DISPATCH="<< pipeline.parameters.c-sync_test_op_node_dispatch >>"' >> $BASH_ENV + - run: + name: Configure L2 stack + command: | + echo "export DEVSTACK_L2CL_KIND=<>" >> "$BASH_ENV" + echo "export DEVSTACK_L2EL_KIND=<>" >> "$BASH_ENV" # Run the acceptance tests - run: name: Run acceptance tests (gate=<>) @@ -2051,6 +2065,14 @@ jobs: description: The gate to run the acceptance tests against. This gate should be defined in op-acceptance-tests/acceptance-tests.yaml. type: string default: "" + l2_cl_kind: + description: "L2 consensus layer client (op-node or kona)" + type: string + default: "op-node" + l2_el_kind: + description: "L2 execution layer client (op-geth or op-reth)" + type: string + default: "op-geth" no_output_timeout: description: Timeout for when CircleCI kills the job if there's no output type: string @@ -2070,9 +2092,16 @@ jobs: command: | ROOT_DIR="$(pwd)" BIN_DIR="$ROOT_DIR/.circleci-cache/rust-binaries" + echo "export RUST_BINARY_PATH_KONA_NODE=$ROOT_DIR/rust/target/release/kona-node" >> "$BASH_ENV" + echo "export OP_RETH_EXEC_PATH=$ROOT_DIR/rust/target/release/op-reth" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" + - run: + name: Configure L2 stack + command: | + echo "export DEVSTACK_L2CL_KIND=<>" >> "$BASH_ENV" + echo "export DEVSTACK_L2EL_KIND=<>" >> "$BASH_ENV" # Restore cached Go modules - restore_cache: keys: @@ -2158,6 +2187,14 @@ jobs: gate: type: string default: "flake-shake" + l2_cl_kind: + description: "L2 consensus layer client (op-node or kona)" + type: string + default: "op-node" + l2_el_kind: + description: "L2 execution layer client (op-geth or op-reth)" + type: string + default: "op-geth" machine: image: ubuntu-2404:current resource_class: xlarge @@ -2174,8 +2211,14 @@ jobs: ROOT_DIR="$(pwd)" BIN_DIR="$ROOT_DIR/.circleci-cache/rust-binaries" echo "export RUST_BINARY_PATH_KONA_NODE=$BIN_DIR/kona-node" >> "$BASH_ENV" + echo "export OP_RETH_EXEC_PATH=$ROOT_DIR/rust/target/release/op-reth" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_OP_RBUILDER=$BIN_DIR/op-rbuilder" >> "$BASH_ENV" echo "export RUST_BINARY_PATH_ROLLUP_BOOST=$BIN_DIR/rollup-boost" >> "$BASH_ENV" + - run: + name: Configure L2 stack + command: | + echo "export DEVSTACK_L2CL_KIND=<>" >> "$BASH_ENV" + echo "export DEVSTACK_L2EL_KIND=<>" >> "$BASH_ENV" - restore_cache: keys: - go-mod-v1-{{ checksum "go.sum" }} @@ -2392,6 +2435,16 @@ jobs: - "op-program/bin/meta*" - "rust/kona/prestate-artifacts-*/" + # Aggregator job - allows downstream jobs to depend on a single memory-all job. + memory-all: + docker: + - image: <> + resource_class: small + steps: + - run: + name: All memory-all tests passed + command: echo "All memory-all acceptance test variants passed" + # Aggregator job - allows downstream jobs to depend on a single job instead of listing all build jobs. rust-binaries-for-sysgo: docker: @@ -3164,11 +3217,11 @@ workflows: ignore-dirs: ./packages/contracts-bedrock/lib context: - circleci-repo-readonly-authenticated-github-token - - rust-build-binary: &kona-build-release - name: kona-build-release + # Acceptance test jobs (formerly in separate acceptance-tests workflow) + - rust-build-binary: + name: rust-workspace-binaries directory: rust profile: "release" - features: "default" save_cache: true persist_to_workspace: true rust_files_changed: << pipeline.parameters.c-rust_files_changed >> @@ -3191,15 +3244,15 @@ workflows: - circleci-repo-readonly-authenticated-github-token - rust-binaries-for-sysgo: requires: - - kona-build-release + - rust-workspace-binaries - rust-build-op-rbuilder - rust-build-rollup-boost - go-binaries-for-sysgo: context: - circleci-repo-readonly-authenticated-github-token - # IN-MEMORY (all) + # IN-MEMORY (all) - op-node/op-geth - op-acceptance-tests: - name: memory-all + name: memory-all-opn-op-geth gate: "" # Empty gate = gateless mode no_output_timeout: 120m # Allow longer runs for memory-all gate context: @@ -3211,6 +3264,43 @@ workflows: - cannon-prestate - rust-binaries-for-sysgo - go-binaries-for-sysgo + # IN-MEMORY (all) - op-node/op-reth + - op-acceptance-tests: + name: memory-all-opn-op-reth + gate: "base" + l2_el_kind: op-reth + no_output_timeout: 120m # Allow longer runs for memory-all gate + context: + - circleci-repo-readonly-authenticated-github-token + - slack + - discord + requires: + - contracts-bedrock-build + - cannon-prestate + - rust-binaries-for-sysgo + - go-binaries-for-sysgo + # IN-MEMORY (all) - kona/op-reth + - op-acceptance-tests: + name: memory-all-kona-op-reth + gate: "base" + l2_cl_kind: kona + l2_el_kind: op-reth + no_output_timeout: 120m # Allow longer runs for memory-all gate + context: + - circleci-repo-readonly-authenticated-github-token + - slack + - discord + requires: + - contracts-bedrock-build + - cannon-prestate + - rust-binaries-for-sysgo + - go-binaries-for-sysgo + # Aggregator for all memory-all acceptance test variants + - memory-all: + requires: + - memory-all-opn-op-geth + - memory-all-opn-op-reth + - memory-all-kona-op-reth # Generate flaky test report - generate-flaky-report: name: generate-flaky-tests-report diff --git a/op-acceptance-tests/acceptance-tests.yaml b/op-acceptance-tests/acceptance-tests.yaml index ead1240580fd8..504083590a455 100644 --- a/op-acceptance-tests/acceptance-tests.yaml +++ b/op-acceptance-tests/acceptance-tests.yaml @@ -86,6 +86,14 @@ gates: owner: "anton evangelatov" target_gate: "depreqres" + - id: jovian + description: "Jovian network tests." + tests: + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/jovian/bpo2 + timeout: 10m + - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/jovian/pectra + timeout: 10m + - id: isthmus description: "Isthmus network tests." tests: @@ -104,6 +112,7 @@ gates: description: "Sanity/smoke acceptance tests for all networks." inherits: - isthmus + - jovian tests: - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/deposit @@ -177,14 +186,6 @@ gates: - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el timeout: 30m - - id: jovian - inherits: - - base - description: "Jovian network tests." - tests: - - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/jovian/... - timeout: 10m - - id: cgt description: "Custom Gas Token (CGT) network tests." tests: diff --git a/op-acceptance-tests/tests/base/eth_simulate_test.go b/op-acceptance-tests/tests/base/eth_simulate_test.go index 0d9fd4e17ec46..4bfaa808d98ad 100644 --- a/op-acceptance-tests/tests/base/eth_simulate_test.go +++ b/op-acceptance-tests/tests/base/eth_simulate_test.go @@ -15,7 +15,7 @@ func TestEthSimulateV1(gt *testing.T) { ctx := t.Ctx() type SimulateParams struct { - ReturnFullTransactions bool + ReturnFullTransactions bool `json:"returnFullTransactions"` BlockStateCalls []any `json:"blockStateCalls"` } @@ -35,9 +35,6 @@ func TestEthSimulateV1(gt *testing.T) { } // wait until the chain mines at least one block - // (known limitation that we cannot simulate on top of the genesis block, - // Since the EL will just reuse the l1 attributes tx from the previous block - // and there is no such transaction for the genesis block). sys.L1Network.WaitForBlock() // Require the RPC call to succeed @@ -68,14 +65,4 @@ func TestEthSimulateV1(gt *testing.T) { bgu, err := hexutil.DecodeUint64(respBlock["blobGasUsed"].(string)) require.NoError(t, err) require.NotZero(t, bgu) - - err = rpcClient.CallContext( - ctx, - &resp, - "eth_simulateV1", - params, - "0x0", // Genesis block - ) - t.Log("resp", resp) - require.Error(t, err, "eth_simulateV1 cannot be used on the genesis block") } diff --git a/op-devstack/sysgo/l1_nodes_subprocess.go b/op-devstack/sysgo/l1_nodes_subprocess.go index 43268d6618ca6..041fe5c4e015d 100644 --- a/op-devstack/sysgo/l1_nodes_subprocess.go +++ b/op-devstack/sysgo/l1_nodes_subprocess.go @@ -87,7 +87,7 @@ func (n *ExternalL1Geth) Start() { n.p.Cleanup(func() { n.userProxy.Close() }) - n.userRPC = "ws://" + n.userProxy.Addr() + n.userRPC = "http://" + n.userProxy.Addr() } logOut := logpipe.ToLogger(n.p.Logger().New("src", "stdout")) logErr := logpipe.ToLogger(n.p.Logger().New("src", "stderr")) @@ -95,17 +95,18 @@ func (n *ExternalL1Geth) Start() { authRPC := make(chan string, 1) onLogEntry := func(e logpipe.LogEntry) { switch e.LogMessage() { - case "WebSocket enabled": - select { - case userRPC <- e.FieldValue("url").(string): - default: - } case "HTTP server started": - if e.FieldValue("auth").(bool) { + auth, _ := e.FieldValue("auth").(bool) + if auth { select { case authRPC <- "http://" + e.FieldValue("endpoint").(string): default: } + } else { + select { + case userRPC <- "http://" + e.FieldValue("endpoint").(string): + default: + } } } } @@ -182,7 +183,7 @@ func WithL1NodesSubprocess(id stack.ComponentID, clID stack.ComponentID) stack.O args := []string{ "--log.format", "json", "--datadir", dataDirPath, - "--ws", "--ws.addr", "127.0.0.1", "--ws.port", "0", "--ws.origins", "*", "--ws.api", "admin,debug,eth,net,txpool", + "--http", "--http.addr", "127.0.0.1", "--http.port", "0", "--http.api", "admin,debug,eth,net,txpool", "--authrpc.addr", "127.0.0.1", "--authrpc.port", "0", "--authrpc.jwtsecret", jwtPath, "--ipcdisable", "--port", "0", From 2601115337986d2d577a5935ea85c9f522310d93 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Tue, 10 Mar 2026 14:26:35 +1000 Subject: [PATCH 089/201] fix(ci): fix SIGPIPE exit code 141 in kona-build-release skip logic (#19467) The `ls | head -20` pipe in the early-exit path fails with exit code 141 (SIGPIPE) when ls output exceeds 20 lines, because CircleCI runs bash with `-eo pipefail`. Add `|| true` to suppress the broken pipe. Co-authored-by: Claude Opus 4.6 --- .circleci/continue/main.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index b7eacabad9aa7..8b44d578b6853 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -617,7 +617,7 @@ commands: binary_dir="$CARGO_TARGET_DIR/<< parameters.profile >>" if [ -d "$binary_dir" ] && ls "$binary_dir"/* >/dev/null 2>&1; then echo "No rust/ changes on feature branch — skipping cargo build" - ls -lh "$binary_dir"/ | head -20 + ls -lh "$binary_dir"/ | head -20 || true exit 0 fi echo "WARNING: no cached binaries found, building from source" From 4b692e4cf6d21a3b8059a5e90f52f8dbd8241b7e Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Tue, 10 Mar 2026 10:39:21 -0400 Subject: [PATCH 090/201] feat(ci): add required gate jobs for Rust CI (#19453) * feat(ci): add required gate jobs for Rust CI and E2E workflows Always include rust-ci.yml and rust-e2e.yml in the continuation pipeline by changing the path-filtering mapping from conditional inclusion to always-include. Gate individual job execution behind a new c-rust_changes_detected boolean parameter, set to true only when (rust|\.circleci)/.* paths change. Add required-rust-ci and required-rust-e2e gate jobs that always run and fan-in on required Rust jobs. When no Rust changes are detected, the required jobs are skipped and CircleCI treats the dependencies as satisfied, so the gate jobs pass immediately. This enables GitHub required status checks for Rust CI. Also gate kona-publish-prestates with c-rust_changes_detected to preserve current behavior (only publish on develop with Rust changes). Co-Authored-By: Claude Opus 4.6 * fix(ci): use workflow-level gating instead of unsupported job-level when CircleCI does not support `when` as a key on job references within a workflow (error: "Unexpected argument(s): when"). Move the gating from job-level to workflow-level by splitting each workflow into two mutually exclusive workflows: - rust-ci: runs when c-rust_changes_detected=true OR dispatch (real jobs + gate) - rust-ci-gate-skip: runs when webhook AND NOT c-rust_changes_detected (gate only) Same pattern for rust-e2e-ci / rust-e2e-gate-skip. The gate job (required-rust-ci / required-rust-e2e) runs in exactly one workflow per pipeline, ensuring GitHub always sees the status check. Co-Authored-By: Claude Opus 4.6 * test: temporarily remove .circleci from rust change detection path Remove .circleci from the path filter so only rust/ changes trigger rust CI. This lets us verify the gate-skip workflows work correctly on this PR (since .circleci/ files are in the diff vs develop). This commit will be reverted after verification. Co-Authored-By: Claude Opus 4.6 * Revert "test: temporarily remove .circleci from rust change detection path" This reverts commit 9512bf70ae96d9a6af6ad23fb953b87a48446539. --------- Co-authored-by: Claude Opus 4.6 --- .circleci/config.yml | 18 +++++--- .circleci/continue/rust-ci.yml | 80 ++++++++++++++++++++++++++++----- .circleci/continue/rust-e2e.yml | 59 +++++++++++++++++++++++- 3 files changed, 139 insertions(+), 18 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index a873095572747..8638b5d6acea5 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -125,14 +125,18 @@ workflows: .* c-go-cache-version << pipeline.parameters.go-cache-version >> .circleci/continue/main.yml rust/.* c-rust_files_changed true .circleci/continue/main.yml - (rust|\.circleci)/.* c-rust_ci_dispatch << pipeline.parameters.rust_ci_dispatch >> .circleci/continue/rust-ci.yml - (rust|\.circleci)/.* c-default_docker_image << pipeline.parameters.default_docker_image >> .circleci/continue/rust-ci.yml - (rust|\.circleci)/.* c-base_image << pipeline.parameters.base_image >> .circleci/continue/rust-ci.yml - (rust|\.circleci)/.* c-go-cache-version << pipeline.parameters.go-cache-version >> .circleci/continue/rust-ci.yml + # Rust CI — always include config, gate jobs via c-rust_changes_detected + .* c-default_docker_image << pipeline.parameters.default_docker_image >> .circleci/continue/rust-ci.yml + .* c-base_image << pipeline.parameters.base_image >> .circleci/continue/rust-ci.yml + .* c-go-cache-version << pipeline.parameters.go-cache-version >> .circleci/continue/rust-ci.yml + .* c-rust_ci_dispatch << pipeline.parameters.rust_ci_dispatch >> .circleci/continue/rust-ci.yml + (rust|\.circleci)/.* c-rust_changes_detected true .circleci/continue/rust-ci.yml - (rust|op-e2e|\.circleci)/.* c-rust_e2e_dispatch << pipeline.parameters.rust_e2e_dispatch >> .circleci/continue/rust-e2e.yml - (rust|op-e2e|\.circleci)/.* c-default_docker_image << pipeline.parameters.default_docker_image >> .circleci/continue/rust-e2e.yml - (rust|op-e2e|\.circleci)/.* c-go-cache-version << pipeline.parameters.go-cache-version >> .circleci/continue/rust-e2e.yml + # Rust E2E — always include config, gate jobs via c-rust_changes_detected + .* c-default_docker_image << pipeline.parameters.default_docker_image >> .circleci/continue/rust-e2e.yml + .* c-go-cache-version << pipeline.parameters.go-cache-version >> .circleci/continue/rust-e2e.yml + .* c-rust_e2e_dispatch << pipeline.parameters.rust_e2e_dispatch >> .circleci/continue/rust-e2e.yml + (rust|op-e2e|\.circleci)/.* c-rust_changes_detected true .circleci/continue/rust-e2e.yml setup-tag: when: diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index e89cefd435760..3cc7cc53a607b 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -21,6 +21,9 @@ parameters: c-go-cache-version: type: string default: "v0.0" + c-rust_changes_detected: + type: boolean + default: false # ============================================================================ # COMMANDS @@ -1026,18 +1029,27 @@ jobs: echo "Successfully published prestates artifacts to GCS" - rust-save-build-cache: *kona-publish-prestate-cache + required-rust-ci: + docker: + - image: <> + resource_class: small + steps: + - run: echo "Required Rust CI checks passed" + # ============================================================================ # WORKFLOWS # ============================================================================ workflows: # ========================================================================== # Unified Rust CI workflow - # Runs on any rust/.* change or manual dispatch with rust_ci_dispatch=true + # Runs on rust path changes or manual dispatch with rust_ci_dispatch=true # ========================================================================== rust-ci: when: or: - - equal: ["webhook", << pipeline.trigger_source >>] + - and: + - equal: ["webhook", << pipeline.trigger_source >>] + - << pipeline.parameters.c-rust_changes_detected >> - and: - equal: [true, <>] - equal: ["api", << pipeline.trigger_source >>] @@ -1171,17 +1183,13 @@ workflows: # Kona crate-specific jobs (lint, FPVM builds, benches, coverage) # ----------------------------------------------------------------------- - kona-cargo-lint: - name: kona-lint-<> - matrix: - parameters: - target: ["cannon"] + name: kona-lint-cannon + target: "cannon" context: *rust-ci-context - kona-build-fpvm: - name: kona-build-fpvm-<> - matrix: - parameters: - target: ["cannon-client"] + name: kona-build-fpvm-cannon-client + target: "cannon-client" context: *rust-ci-context - kona-coverage: @@ -1193,6 +1201,57 @@ workflows: name: kona-host-client-offline-cannon context: *rust-ci-context + # ----------------------------------------------------------------------- + # Required gate — fans in on required Rust CI jobs + # ----------------------------------------------------------------------- + - required-rust-ci: + requires: + - rust-tests + - rust-clippy + - rust-docs + + # ========================================================================== + # Required Rust CI gate (skip) — runs when no rust changes and no dispatch + # Just runs the rust cargo tests for the different packages in the repo and try to build the fpvm-prestates + # ========================================================================== + rust-ci-gate-short: + when: + and: + - equal: ["webhook", << pipeline.trigger_source >>] + - not: << pipeline.parameters.c-rust_changes_detected >> + jobs: + - rust-ci-cargo-tests: + name: rust-tests + directory: rust + context: *rust-ci-context + + - rust-ci-cargo-tests: + name: op-reth-integration-tests + directory: rust + command: "--justfile op-reth/justfile test-integration" + cache_profile: debug + context: *rust-ci-context + + - rust-ci-cargo-tests: + name: op-reth-tests-edge + directory: rust + command: "--justfile op-reth/justfile test" + flags: "edge" + cache_profile: debug + context: *rust-ci-context + + - kona-build-fpvm: + name: kona-build-fpvm-cannon-client + target: "cannon-client" + context: *rust-ci-context + + - required-rust-ci: + requires: + - rust-tests + - op-reth-integration-tests + - op-reth-tests-edge + - kona-build-fpvm-cannon-client + # ========================================================================== # Kona scheduled workflows @@ -1211,6 +1270,7 @@ workflows: and: - equal: ["develop", <>] - equal: ["webhook", << pipeline.trigger_source >>] # Only trigger on push to develop, not scheduled runs + - << pipeline.parameters.c-rust_changes_detected >> # Only publish when rust paths changed jobs: - kona-publish-prestate-artifacts: name: kona-publish-<> diff --git a/.circleci/continue/rust-e2e.yml b/.circleci/continue/rust-e2e.yml index 1cd90095834b1..eb20cc392a05b 100644 --- a/.circleci/continue/rust-e2e.yml +++ b/.circleci/continue/rust-e2e.yml @@ -16,6 +16,9 @@ parameters: c-go-cache-version: type: string default: "v0.0" + c-rust_changes_detected: + type: boolean + default: false # Commands used by rust-e2e jobs commands: @@ -164,14 +167,24 @@ jobs: - go-save-cache: namespace: kona-ci + required-rust-e2e: + docker: + - image: <> + resource_class: small + steps: + - run: echo "Required Rust E2E checks passed" + # ============================================================================ # RUST E2E WORKFLOWS # ============================================================================ workflows: + # Rust E2E CI — runs on rust path changes or manual dispatch rust-e2e-ci: when: or: - - equal: ["webhook", << pipeline.trigger_source >>] + - and: + - equal: ["webhook", << pipeline.trigger_source >>] + - << pipeline.parameters.c-rust_changes_detected >> - and: - equal: [true, <>] - equal: ["api", << pipeline.trigger_source >>] @@ -233,4 +246,48 @@ workflows: - contracts-bedrock-build context: - circleci-repo-readonly-authenticated-github-token + # ----------------------------------------------------------------------- + # Required gate — fans in on all E2E test jobs + # ----------------------------------------------------------------------- + - required-rust-e2e: + requires: + - rust-e2e-simple-kona + - rust-e2e-simple-kona-geth + - rust-e2e-simple-kona-sequencer + - rust-e2e-large-kona-sequencer + - rust-e2e-restart + - kona-proof-action-single + + # Required Rust E2E gate (skip) — runs when no rust changes and no dispatch + # Just runs action tests + rust-e2e-gate-skip: + when: + and: + - equal: ["webhook", << pipeline.trigger_source >>] + - not: << pipeline.parameters.c-rust_changes_detected >> + jobs: + - contracts-bedrock-build: + build_args: --skip test + context: + - circleci-repo-readonly-authenticated-github-token + - rust-build-binary: + name: kona-build-release + directory: rust + profile: release + context: + - circleci-repo-readonly-authenticated-github-token + # Proof tests - single kind only, interop excluded per original config + - kona-proof-action-tests: + name: kona-proof-action-single + kind: single + requires: + - kona-build-release + - contracts-bedrock-build + context: + - circleci-repo-readonly-authenticated-github-token + + - required-rust-e2e: + requires: + - kona-proof-action-single + From ce74ec85111911dffba44e81cdff7539e91fe485 Mon Sep 17 00:00:00 2001 From: steven <12021290+stevennevins@users.noreply.github.com> Date: Tue, 10 Mar 2026 12:01:58 -0400 Subject: [PATCH 091/201] feat: enable setting CANNON_KONA as respected game type (#19117) * test: add coverage for CANNON_KONA respected game type paths Add tests verifying that the existing override key (overrides.cfg.startingRespectedGameType) can upgrade a chain's respected game type to CANNON_KONA. Also add deploy-path tests for PERMISSIONED_CANNON, CANNON_KONA, and invalid game types. * chore: format * fix: hack for forklive being out of date * fix: add assert for game type change * feat: add feature flag enforcement * Update OPContractsManagerV2.t.sol Co-authored-by: graphite-app[bot] <96075541+graphite-app[bot]@users.noreply.github.com> * chore: bump semver * fix: skip CANNON_KONA revert test when feature is enabled --------- Co-authored-by: graphite-app[bot] <96075541+graphite-app[bot]@users.noreply.github.com> --- .circleci/continue/main.yml | 1 + .../scripts/libraries/Config.sol | 5 + .../snapshots/semver-lock.json | 4 +- .../src/L1/opcm/OPContractsManagerV2.sol | 8 +- .../src/libraries/DevFeatures.sol | 4 +- .../test/L1/opcm/OPContractsManagerV2.t.sol | 138 +++++++++++++++++- .../test/setup/FeatureFlags.sol | 6 + 7 files changed, 158 insertions(+), 8 deletions(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 8b44d578b6853..37e215a40a0ef 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -2935,6 +2935,7 @@ workflows: - OPCM_V2,CUSTOM_GAS_TOKEN - OPCM_V2,OPTIMISM_PORTAL_INTEROP - OPCM_V2,ZK_DISPUTE_GAME + - OPCM_V2,CANNON_KONA context: - circleci-repo-readonly-authenticated-github-token - slack diff --git a/packages/contracts-bedrock/scripts/libraries/Config.sol b/packages/contracts-bedrock/scripts/libraries/Config.sol index 52e6ce05734c7..c29084a1ad2fd 100644 --- a/packages/contracts-bedrock/scripts/libraries/Config.sol +++ b/packages/contracts-bedrock/scripts/libraries/Config.sol @@ -308,6 +308,11 @@ library Config { return vm.envOr("DEV_FEATURE__ZK_DISPUTE_GAME", false); } + /// @notice Returns true if the development feature cannon_kona is enabled. + function devFeatureCannonKona() internal view returns (bool) { + return vm.envOr("DEV_FEATURE__CANNON_KONA", false); + } + /// @notice Returns true if the system feature custom_gas_token is enabled. function sysFeatureCustomGasToken() internal view returns (bool) { return vm.envOr("SYS_FEATURE__CUSTOM_GAS_TOKEN", false); diff --git a/packages/contracts-bedrock/snapshots/semver-lock.json b/packages/contracts-bedrock/snapshots/semver-lock.json index f0743b3e55829..c3dd415e65d95 100644 --- a/packages/contracts-bedrock/snapshots/semver-lock.json +++ b/packages/contracts-bedrock/snapshots/semver-lock.json @@ -52,8 +52,8 @@ "sourceCodeHash": "0xb3184aa5d95a82109e7134d1f61941b30e25f655b9849a0e303d04bbce0cde0b" }, "src/L1/opcm/OPContractsManagerV2.sol:OPContractsManagerV2": { - "initCodeHash": "0x0c5d72873002f8ac44e92e6cca44d4916da3af53af6efe1d63a59ae5a752b221", - "sourceCodeHash": "0x2962c31609cf23c2656d16522328f01dda6be65bd03c44a4ad570266714dde0a" + "initCodeHash": "0xa8f3bc8a242bda17c40a396e7adbc63ec040c2ac70faf4a4521c587b12022b8b", + "sourceCodeHash": "0xb751280c5f7e9b50f48cc9a77da3a6bc5db5bc11fed07026f8711df48821a321" }, "src/L2/BaseFeeVault.sol:BaseFeeVault": { "initCodeHash": "0x838bbd7f381e84e21887f72bd1da605bfc4588b3c39aed96cbce67c09335b3ee", diff --git a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol index 9a4db3305a3e1..89898aa481344 100644 --- a/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol +++ b/packages/contracts-bedrock/src/L1/opcm/OPContractsManagerV2.sol @@ -153,9 +153,9 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { /// - Major bump: New required sequential upgrade /// - Minor bump: Replacement OPCM for same upgrade /// - Patch bump: Development changes (expected for normal dev work) - /// @custom:semver 7.0.11 + /// @custom:semver 7.0.12 function version() public pure returns (string memory) { - return "7.0.11"; + return "7.0.12"; } /// @param _standardValidator The standard validator for this OPCM release. @@ -346,6 +346,10 @@ contract OPContractsManagerV2 is ISemver, OPContractsManagerUtilsCaller { // disabling the currently-respected game type, since the validation requires the starting // respected game type to correspond to an enabled game config. if (_isMatchingInstructionByKey(_instruction, "overrides.cfg.startingRespectedGameType")) { + GameType gameType = abi.decode(_instruction.data, (GameType)); + if (gameType.raw() == GameTypes.CANNON_KONA.raw()) { + return isDevFeatureEnabled(DevFeatures.CANNON_KONA); + } return true; } diff --git a/packages/contracts-bedrock/src/libraries/DevFeatures.sol b/packages/contracts-bedrock/src/libraries/DevFeatures.sol index f14c47048b249..d0749bbb42455 100644 --- a/packages/contracts-bedrock/src/libraries/DevFeatures.sol +++ b/packages/contracts-bedrock/src/libraries/DevFeatures.sol @@ -14,9 +14,7 @@ library DevFeatures { bytes32 public constant OPTIMISM_PORTAL_INTEROP = bytes32(0x0000000000000000000000000000000000000000000000000000000000000001); - /// @notice The feature that enables deployment of the CANNON_KONA fault dispute game. - /// @custom:legacy - /// This feature is no longer used, but is kept here for legacy reasons. + /// @notice The feature that gates the respected game type override for CANNON_KONA during upgrades. bytes32 public constant CANNON_KONA = bytes32(0x0000000000000000000000000000000000000000000000000000000000000010); /// @notice The feature that enables deployment of V2 dispute game contracts. diff --git a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol index 6116e3dce86e8..6ffcd6ff7a958 100644 --- a/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol +++ b/packages/contracts-bedrock/test/L1/opcm/OPContractsManagerV2.t.sol @@ -818,6 +818,110 @@ contract OPContractsManagerV2_Upgrade_Test is OPContractsManagerV2_Upgrade_TestI // Verify the system is still paused after the upgrade. assertTrue(superchainConfig.paused(address(0)), "System should still be paused after upgrade"); } + + /// @notice Tests upgrading the respected game type to CANNON_KONA via the override key. + function test_upgrade_respectedGameTypeCannonToKona_succeeds() public { + vm.mockCall( + address(opcmV2.contractsContainer()), + abi.encodeCall(IOPContractsManagerContainer.isDevFeatureEnabled, (DevFeatures.CANNON_KONA)), + abi.encode(true) + ); + /// This is a hack because fork live has an outdated suprchain registry reference that it + /// pulls the addresses from + IAnchorStateRegistry anchorStateRegistry = optimismPortal2.anchorStateRegistry(); + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ + key: "overrides.cfg.startingRespectedGameType", + data: abi.encode(GameTypes.CANNON_KONA) + }) + ); + runCurrentUpgradeV2(chainPAO); + assertEq( + anchorStateRegistry.respectedGameType().raw(), + GameTypes.CANNON_KONA.raw(), + "respected game type should remain CANNON_KONA" + ); + } + + /// @notice Tests that overriding to CANNON_KONA is a no-op when already CANNON_KONA. + function test_upgrade_respectedGameTypeAlreadyKona_succeeds() public { + vm.mockCall( + address(opcmV2.contractsContainer()), + abi.encodeCall(IOPContractsManagerContainer.isDevFeatureEnabled, (DevFeatures.CANNON_KONA)), + abi.encode(true) + ); + vm.mockCall( + address(anchorStateRegistry), + abi.encodeCall(IAnchorStateRegistry.respectedGameType, ()), + abi.encode(GameTypes.CANNON_KONA) + ); + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ + key: "overrides.cfg.startingRespectedGameType", + data: abi.encode(GameTypes.CANNON_KONA) + }) + ); + runCurrentUpgradeV2(chainPAO); + assertEq( + anchorStateRegistry.respectedGameType().raw(), + GameTypes.CANNON_KONA.raw(), + "respected game type should remain CANNON_KONA" + ); + } + + function test_upgrade_respectedGameTypeUnchangedWithoutOverride_succeeds() public { + /// This is a hack because fork live has an outdated suprchain registry reference that it pulls the addresses + /// from + IAnchorStateRegistry anchorStateRegistry = optimismPortal2.anchorStateRegistry(); + GameType before = anchorStateRegistry.respectedGameType(); + runCurrentUpgradeV2(chainPAO); + assertEq( + anchorStateRegistry.respectedGameType().raw(), + before.raw(), + "respected game type should be unchanged without override" + ); + } + + /// @notice Tests that overriding to a disabled game type reverts during upgrade. + function test_upgrade_respectedGameTypeOverrideToDisabled_reverts() public { + address opcmV2Container = address(opcmV2.contractsContainer()); + vm.mockCall( + opcmV2Container, + abi.encodeCall(IOPContractsManagerContainer.isDevFeatureEnabled, (DevFeatures.CANNON_KONA)), + abi.encode(true) + ); + v2UpgradeInput.disputeGameConfigs[2].enabled = false; + v2UpgradeInput.disputeGameConfigs[2].initBond = 0; + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ + key: "overrides.cfg.startingRespectedGameType", + data: abi.encode(GameTypes.CANNON_KONA) + }) + ); + // nosemgrep: sol-style-use-abi-encodecall + runCurrentUpgradeV2( + chainPAO, abi.encodeWithSelector(IOPContractsManagerV2.OPContractsManagerV2_InvalidGameConfigs.selector) + ); + } + + /// @notice Tests that the respected game type override is rejected when CANNON_KONA dev feature is off. + function test_upgrade_respectedGameTypeOverrideWithoutDevFeature_reverts() public { + skipIfDevFeatureEnabled(DevFeatures.CANNON_KONA); + v2UpgradeInput.extraInstructions.push( + IOPContractsManagerUtils.ExtraInstruction({ + key: "overrides.cfg.startingRespectedGameType", + data: abi.encode(GameTypes.CANNON_KONA) + }) + ); + // nosemgrep: sol-style-use-abi-encodecall + runCurrentUpgradeV2( + chainPAO, + abi.encodeWithSelector( + IOPContractsManagerV2.OPContractsManagerV2_InvalidUpgradeInstruction.selector, + "overrides.cfg.startingRespectedGameType" + ) + ); + } } /// @title OPContractsManagerV2_IsPermittedUpgradeSequence_Test @@ -1063,7 +1167,7 @@ contract OPContractsManagerV2_Deploy_Test is OPContractsManagerV2_TestInit { IOPContractsManagerV2.FullConfig deployConfig; /// @notice Sets up the test. - function setUp() public override { + function setUp() public virtual override { super.setUp(); // Set up default deploy config. @@ -1259,6 +1363,38 @@ contract OPContractsManagerV2_Deploy_Test is OPContractsManagerV2_TestInit { deployConfig, abi.encodeWithSelector(IOPContractsManagerV2.OPContractsManagerV2_InvalidGameConfigs.selector) ); } + + /// @notice PERMISSIONED_CANNON as respected game type succeeds during deploy. + function test_deploy_permissionedCannonRespectedGameType_succeeds() public { + deployConfig.startingRespectedGameType = GameTypes.PERMISSIONED_CANNON; + // We expect PLDG-10 and CKDG-10 validator errors because CANNON and CANNON_KONA are + // disabled during initial deployment (no implementations registered). + IOPContractsManagerV2.ChainContracts memory cts = runDeployV2(deployConfig, bytes(""), "PLDG-10,CKDG-10"); + assertEq( + cts.anchorStateRegistry.respectedGameType().raw(), + GameTypes.PERMISSIONED_CANNON.raw(), + "respected game type should be PERMISSIONED_CANNON" + ); + } + + /// @notice CANNON_KONA as respected game type reverts because its dispute game is not enabled + /// during initial deployment. + function test_deploy_cannonKonaRespectedGameType_reverts() public { + deployConfig.startingRespectedGameType = GameTypes.CANNON_KONA; + // nosemgrep: sol-style-use-abi-encodecall + runDeployV2( + deployConfig, abi.encodeWithSelector(IOPContractsManagerV2.OPContractsManagerV2_InvalidGameConfigs.selector) + ); + } + + /// @notice An invalid game type reverts. + function test_deploy_invalidRespectedGameType_reverts() public { + deployConfig.startingRespectedGameType = GameType.wrap(255); + // nosemgrep: sol-style-use-abi-encodecall + runDeployV2( + deployConfig, abi.encodeWithSelector(IOPContractsManagerV2.OPContractsManagerV2_InvalidGameConfigs.selector) + ); + } } /// @title OPContractsManagerV2_DevFeatureBitmap_Test diff --git a/packages/contracts-bedrock/test/setup/FeatureFlags.sol b/packages/contracts-bedrock/test/setup/FeatureFlags.sol index c28957556112e..f0f98543575e9 100644 --- a/packages/contracts-bedrock/test/setup/FeatureFlags.sol +++ b/packages/contracts-bedrock/test/setup/FeatureFlags.sol @@ -53,6 +53,10 @@ abstract contract FeatureFlags { console.log("Setup: DEV_FEATURE__ZK_DISPUTE_GAME is enabled"); devFeatureBitmap |= DevFeatures.ZK_DISPUTE_GAME; } + if (Config.devFeatureCannonKona()) { + console.log("Setup: DEV_FEATURE__CANNON_KONA is enabled"); + devFeatureBitmap |= DevFeatures.CANNON_KONA; + } } /// @notice Returns the string name of a feature. @@ -67,6 +71,8 @@ abstract contract FeatureFlags { return "DEV_FEATURE__L2CM"; } else if (_feature == DevFeatures.ZK_DISPUTE_GAME) { return "DEV_FEATURE__ZK_DISPUTE_GAME"; + } else if (_feature == DevFeatures.CANNON_KONA) { + return "DEV_FEATURE__CANNON_KONA"; } else if (_feature == Features.CUSTOM_GAS_TOKEN) { return "SYS_FEATURE__CUSTOM_GAS_TOKEN"; } else if (_feature == Features.ETH_LOCKBOX) { From 9d34793384c13f8cc71301ad584e00a66b4d7999 Mon Sep 17 00:00:00 2001 From: wwared Date: Tue, 10 Mar 2026 15:55:31 -0300 Subject: [PATCH 092/201] chore: Remove remnants of kona-supervisor (#19470) * chore: Remove remnants of kona-supervisor * chore: Add kona-node back to justfile --------- Co-authored-by: wwared <541936+wwared@users.noreply.github.com> --- justfile | 2 +- op-acceptance-tests/justfile | 2 +- op-devstack/sysgo/supervisor.go | 2 +- op-devstack/sysgo/supervisor_kona.go | 185 --------------------------- 4 files changed, 3 insertions(+), 188 deletions(-) delete mode 100644 op-devstack/sysgo/supervisor_kona.go diff --git a/justfile b/justfile index a06616305dfed..d5ed8c55affa0 100644 --- a/justfile +++ b/justfile @@ -1,6 +1,6 @@ # Build all Rust binaries (release) for sysgo tests. build-rust-release: - cd rust && cargo build --release --bin kona-node --bin kona-supervisor + cd rust && cargo build --release --bin kona-node cd op-rbuilder && cargo build --release -p op-rbuilder --bin op-rbuilder cd rollup-boost && cargo build --release -p rollup-boost --bin rollup-boost diff --git a/op-acceptance-tests/justfile b/op-acceptance-tests/justfile index 306134ae94a40..befa509184c05 100644 --- a/op-acceptance-tests/justfile +++ b/op-acceptance-tests/justfile @@ -63,7 +63,7 @@ acceptance-test devnet="" gate="base": make cannon-prestates fi - echo "Building Rust binaries (kona-node, kona-supervisor, op-rbuilder, rollup-boost)..." + echo "Building Rust binaries (kona-node, op-rbuilder, rollup-boost)..." cd {{REPO_ROOT}} just build-rust-release fi diff --git a/op-devstack/sysgo/supervisor.go b/op-devstack/sysgo/supervisor.go index 08b8d37e2a924..4a2ab1a00abc3 100644 --- a/op-devstack/sysgo/supervisor.go +++ b/op-devstack/sysgo/supervisor.go @@ -18,7 +18,7 @@ type Supervisor interface { func WithSupervisor(supervisorID stack.ComponentID, clusterID stack.ComponentID, l1ELID stack.ComponentID) stack.Option[*Orchestrator] { switch os.Getenv("DEVSTACK_SUPERVISOR_KIND") { case "kona": - return WithKonaSupervisor(supervisorID, clusterID, l1ELID) + panic("kona-supervisor has been removed") default: return WithOPSupervisor(supervisorID, clusterID, l1ELID) } diff --git a/op-devstack/sysgo/supervisor_kona.go b/op-devstack/sysgo/supervisor_kona.go deleted file mode 100644 index 8ae222b4c5cb6..0000000000000 --- a/op-devstack/sysgo/supervisor_kona.go +++ /dev/null @@ -1,185 +0,0 @@ -package sysgo - -import ( - "encoding/json" - "os" - "sync" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/logpipe" - "github.com/ethereum-optimism/optimism/op-service/tasks" - "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" -) - -type KonaSupervisor struct { - mu sync.Mutex - - id stack.ComponentID - userRPC string - - userProxy *tcpproxy.Proxy - - execPath string - args []string - // Each entry is of the form "key=value". - env []string - - p devtest.P - - sub *SubProcess -} - -var _ stack.Lifecycle = (*OpSupervisor)(nil) - -func (s *KonaSupervisor) hydrate(sys stack.ExtensibleSystem) { - tlog := sys.Logger().New("id", s.id) - supClient, err := client.NewRPC(sys.T().Ctx(), tlog, s.userRPC, client.WithLazyDial()) - sys.T().Require().NoError(err) - sys.T().Cleanup(supClient.Close) - - sys.AddSupervisor(shim.NewSupervisor(shim.SupervisorConfig{ - CommonConfig: shim.NewCommonConfig(sys.T()), - ID: s.id, - Client: supClient, - })) -} - -func (s *KonaSupervisor) UserRPC() string { - return s.userRPC -} - -func (s *KonaSupervisor) Start() { - s.mu.Lock() - defer s.mu.Unlock() - if s.sub != nil { - s.p.Logger().Warn("Kona-supervisor already started") - return - } - - // Create a proxy for the user RPC, - // so other services can connect, and stay connected, across restarts. - if s.userProxy == nil { - s.userProxy = tcpproxy.New(s.p.Logger()) - s.p.Require().NoError(s.userProxy.Start()) - s.p.Cleanup(func() { - s.userProxy.Close() - }) - s.userRPC = "http://" + s.userProxy.Addr() - } - - // Create the sub-process. - // We pipe sub-process logs to the test-logger. - // And inspect them along the way, to get the RPC server address. - logOut := logpipe.ToLogger(s.p.Logger().New("src", "stdout")) - logErr := logpipe.ToLogger(s.p.Logger().New("src", "stderr")) - userRPC := make(chan string, 1) - onLogEntry := func(e logpipe.LogEntry) { - switch e.LogMessage() { - case "RPC server bound to address": - userRPC <- "http://" + e.FieldValue("addr").(string) - } - } - stdOutLogs := logpipe.LogCallback(func(line []byte) { - e := logpipe.ParseRustStructuredLogs(line) - logOut(e) - onLogEntry(e) - }) - stdErrLogs := logpipe.LogCallback(func(line []byte) { - e := logpipe.ParseRustStructuredLogs(line) - logErr(e) - }) - - s.sub = NewSubProcess(s.p, stdOutLogs, stdErrLogs) - err := s.sub.Start(s.execPath, s.args, s.env) - s.p.Require().NoError(err, "Must start") - - var userRPCAddr string - s.p.Require().NoError(tasks.Await(s.p.Ctx(), userRPC, &userRPCAddr), "need user RPC") - - s.userProxy.SetUpstream(ProxyAddr(s.p.Require(), userRPCAddr)) -} - -func (s *KonaSupervisor) Stop() { - s.mu.Lock() - defer s.mu.Unlock() - if s.sub == nil { - s.p.Logger().Warn("kona-supervisor already stopped") - return - } - err := s.sub.Stop(true) - s.p.Require().NoError(err, "Must stop") - s.sub = nil -} - -func WithKonaSupervisor(supervisorID stack.ComponentID, clusterID stack.ComponentID, l1ELID stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), supervisorID)) - require := p.Require() - - l1EL, ok := orch.GetL1EL(l1ELID) - require.True(ok, "need L1 EL node to connect supervisor to") - - cluster, ok := orch.GetCluster(clusterID) - require.True(ok, "need cluster to determine dependency set") - - require.NotNil(cluster.cfgset, "need a full config set") - require.NoError(cluster.cfgset.CheckChains(), "config set must be valid") - - tempDataDir := p.TempDir() - - cfgDir := p.TempDir() - - depsetCfgPath := cfgDir + "/depset.json" - depsetData, err := cluster.DepSet().MarshalJSON() - require.NoError(err, "failed to marshal dependency set") - p.Require().NoError(err, os.WriteFile(depsetCfgPath, depsetData, 0o644)) - - rollupCfgPath := cfgDir + "/rollup-config-*.json" - for _, l2NetID := range orch.registry.IDsByKind(stack.KindL2Network) { - l2Net, ok := orch.GetL2Network(l2NetID) - require.True(ok, "need l2 network") - chainID := l2Net.id.ChainID() - rollupData, err := json.Marshal(l2Net.rollupCfg) - require.NoError(err, "failed to marshal rollup config") - p.Require().NoError(err, os.WriteFile(cfgDir+"/rollup-config-"+chainID.String()+".json", rollupData, 0o644)) - } - - envVars := []string{ - "RPC_ADDR=127.0.0.1", - "DATADIR=" + tempDataDir, - "DEPENDENCY_SET=" + depsetCfgPath, - "ROLLUP_CONFIG_PATHS=" + rollupCfgPath, - "L1_RPC=" + l1EL.UserRPC(), - "RPC_ENABLE_ADMIN=true", - "L2_CONSENSUS_NODES=", - "L2_CONSENSUS_JWT_SECRET=", - "KONA_LOG_LEVEL=3", // info level, consistent with l2_cl_kona.go - "KONA_LOG_STDOUT_FORMAT=json", - } - - execPath, err := EnsureRustBinary(p, RustBinarySpec{ - SrcDir: "kona", - Package: "kona-supervisor", - Binary: "kona-supervisor", - }) - p.Require().NoError(err, "prepare kona-supervisor binary") - p.Require().NotEmpty(execPath, "kona-supervisor binary path resolved") - - konaSupervisor := &KonaSupervisor{ - id: supervisorID, - userRPC: "", // retrieved from logs - execPath: execPath, - args: []string{}, - env: envVars, - p: p, - } - orch.registry.Register(supervisorID, konaSupervisor) - p.Logger().Info("Starting kona-supervisor") - konaSupervisor.Start() - p.Cleanup(konaSupervisor.Stop) - p.Logger().Info("Kona-supervisor is up", "rpc", konaSupervisor.UserRPC()) - }) -} From 504232c0d0a47612ce4a92bb52adc20dae42969d Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 11 Mar 2026 05:54:23 +1000 Subject: [PATCH 093/201] kona(derive): remove dead EthereumDataSourceVariant file (#19341) `variant.rs` defined `EthereumDataSourceVariant` and implemented `AsyncIterator` for it, but `AsyncIterator` is not defined anywhere in the kona codebase and the file was never included in `sources/mod.rs`. The type was therefore completely unreachable and unusable at runtime. `EthereumDataSource` in `ethereum.rs` already covers both the calldata and blob cases through the `DataAvailabilityProvider` trait, making `EthereumDataSourceVariant` redundant. Fixes https://github.com/ethereum-optimism/optimism/issues/19340 Co-authored-by: Claude Sonnet 4.6 --- .../protocol/derive/src/sources/variant.rs | 78 ------------------- 1 file changed, 78 deletions(-) delete mode 100644 rust/kona/crates/protocol/derive/src/sources/variant.rs diff --git a/rust/kona/crates/protocol/derive/src/sources/variant.rs b/rust/kona/crates/protocol/derive/src/sources/variant.rs deleted file mode 100644 index 6d76c34bf31d9..0000000000000 --- a/rust/kona/crates/protocol/derive/src/sources/variant.rs +++ /dev/null @@ -1,78 +0,0 @@ -//! Data source - -use crate::{ - BlobSource, CalldataSource, - AsyncIterator, BlobProvider, ChainProvider, - PipelineResult, -}; -use alloc::boxed::Box; -use alloy_primitives::Bytes; -use async_trait::async_trait; - -/// An enum over the various data sources. -#[derive(Debug, Clone)] -pub enum EthereumDataSourceVariant -where - CP: ChainProvider + Send, - B: BlobProvider + Send, -{ - /// A calldata source. - Calldata(CalldataSource), - /// A blob source. - Blob(BlobSource), -} - -#[async_trait] -impl AsyncIterator for EthereumDataSourceVariant -where - CP: ChainProvider + Send, - B: BlobProvider + Send, -{ - type Item = Bytes; - - async fn next(&mut self) -> PipelineResult { - match self { - Self::Calldata(c) => c.next().await, - Self::Blob(b) => b.next().await, - } - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::test_utils::TestChainProvider; - use kona_protocol::BlockInfo; - - use crate::{ - BlobData, EthereumDataSourceVariant, - test_utils::TestBlobProvider, - }; - - #[tokio::test] - async fn test_variant_next_calldata() { - let chain = TestChainProvider::default(); - let block_ref = BlockInfo::default(); - let mut source = - CalldataSource::new(chain, Default::default(), block_ref, Default::default()); - source.open = true; - source.calldata.push_back(Default::default()); - let mut variant: EthereumDataSourceVariant = - EthereumDataSourceVariant::Calldata(source); - assert!(variant.next().await.is_ok()); - } - - #[tokio::test] - async fn test_variant_next_blob() { - let chain = TestChainProvider::default(); - let blob = TestBlobProvider::default(); - let block_ref = BlockInfo::default(); - let mut source = - BlobSource::new(chain, blob, Default::default(), block_ref, Default::default()); - source.open = true; - source.data.push(BlobData { calldata: Some(Default::default()), ..Default::default() }); - let mut variant: EthereumDataSourceVariant = - EthereumDataSourceVariant::Blob(source); - assert!(variant.next().await.is_ok()); - } -} From 8b8639bab90707240606d6d8911dfe3a6ef8bd5c Mon Sep 17 00:00:00 2001 From: Maurelian Date: Tue, 10 Mar 2026 16:20:49 -0400 Subject: [PATCH 094/201] chore: bump just to 1.46.0 (#19469) Keep tooling current with upstream releases. --- mise.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mise.toml b/mise.toml index 0c666d9b25dbc..69921cecc617d 100644 --- a/mise.toml +++ b/mise.toml @@ -14,7 +14,7 @@ yq = "4.44.5" shellcheck = "0.10.0" shfmt = "3.11.0" direnv = "2.35.0" -just = "1.37.0" +just = "1.46.0" make = "4.4.1" svm-rs = "0.5.19" From d25685cb6189c10907ec12ba6172ccbeb2cfb8ec Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 11 Mar 2026 08:47:47 +1000 Subject: [PATCH 095/201] fix(kona/derive): accept EIP-4844 type-3 batcher txs in CalldataSource (#19355) The `load_calldata` match on `TxEnvelope` used a catch-all `_ => return None` that silently dropped EIP-4844 (type 3) transactions. The derivation spec (derivation.md:504) and op-node's `isValidBatchTx` both explicitly accept type-3 transactions, so kona must do the same to avoid divergent L2 state in the pre-Ecotone window. Add a `TxEnvelope::Eip4844` match arm that extracts `to` and `input` like the other accepted transaction types, and update the corresponding test from asserting the tx is ignored to asserting its calldata is included. Closes #19352 Co-authored-by: Claude Opus 4.6 --- rust/kona/crates/protocol/derive/src/sources/calldata.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/rust/kona/crates/protocol/derive/src/sources/calldata.rs b/rust/kona/crates/protocol/derive/src/sources/calldata.rs index be9ad3675b44b..84265c1df739a 100644 --- a/rust/kona/crates/protocol/derive/src/sources/calldata.rs +++ b/rust/kona/crates/protocol/derive/src/sources/calldata.rs @@ -49,6 +49,7 @@ impl CalldataSource { TxEnvelope::Legacy(tx) => (tx.tx().to(), tx.tx().input()), TxEnvelope::Eip2930(tx) => (tx.tx().to(), tx.tx().input()), TxEnvelope::Eip1559(tx) => (tx.tx().to(), tx.tx().input()), + TxEnvelope::Eip4844(tx) => (tx.tx().to(), tx.tx().input()), _ => return None, }; let to = tx_kind?; @@ -237,7 +238,7 @@ mod tests { } #[tokio::test] - async fn test_load_calldata_blob_tx_ignored() { + async fn test_load_calldata_valid_blob_tx() { let batch_inbox_address = address!("0123456789012345678901234567890123456789"); let mut source = default_test_calldata_source(); source.batch_inbox_address = batch_inbox_address; @@ -248,7 +249,7 @@ mod tests { assert!( source.load_calldata(&BlockInfo::default(), tx.recover_signer().unwrap()).await.is_ok() ); - assert!(source.calldata.is_empty()); + assert!(!source.calldata.is_empty()); // Calldata is NOT empty. assert!(source.open); } From 37c98925e438efa476c6b598b5113b73dc6dbe76 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 11 Mar 2026 09:25:44 +1000 Subject: [PATCH 096/201] fix(kona/protocol): add bounds checks in span batch decode to prevent panics on truncated input (#19361) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(kona/protocol): add bounds checks in span batch decode to prevent panics on truncated input Multiple span batch decode functions panic on truncated input instead of returning an error. This adds explicit length checks before each unsafe slice operation: - prefix.rs: decode_parent_check and decode_l1_origin_check now check r.len() >= 20 before split_at(20) - transactions.rs: decode_tx_sigs now checks r.len() >= 64 before indexing r[..32] and r[32..64] - transactions.rs: decode_tx_tos now checks r.len() >= 20 before indexing r[..20] On short input, each function returns SpanBatchError::Decoding(...) instead of panicking, allowing the batch to be dropped gracefully — consistent with Go op-node's io.ReadFull behavior. Co-Authored-By: Claude Opus 4.6 * style: fix rustfmt formatting in span batch decode Co-Authored-By: Claude Opus 4.6 * style: fix clippy field_reassign_with_default in span batch tests Use struct initialization syntax instead of Default::default() followed by field reassignment. Co-Authored-By: Claude Opus 4.6 * style: fix rustfmt formatting for struct initialization Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .../protocol/protocol/src/batch/prefix.rs | 29 +++++++++++ .../protocol/src/batch/transactions.rs | 50 +++++++++++++++++++ 2 files changed, 79 insertions(+) diff --git a/rust/kona/crates/protocol/protocol/src/batch/prefix.rs b/rust/kona/crates/protocol/protocol/src/batch/prefix.rs index a95a54995afd2..0c0f6fa69a3fe 100644 --- a/rust/kona/crates/protocol/protocol/src/batch/prefix.rs +++ b/rust/kona/crates/protocol/protocol/src/batch/prefix.rs @@ -47,6 +47,9 @@ impl SpanBatchPrefix { /// Decodes the parent check from a reader. pub fn decode_parent_check(&mut self, r: &mut &[u8]) -> Result<(), SpanBatchError> { + if r.len() < 20 { + return Err(SpanBatchError::Decoding(SpanDecodingError::ParentCheck)); + } let (parent_check, remaining) = r.split_at(20); let parent_check = FixedBytes::<20>::from_slice(parent_check); *r = remaining; @@ -56,6 +59,9 @@ impl SpanBatchPrefix { /// Decodes the L1 origin check from a reader. pub fn decode_l1_origin_check(&mut self, r: &mut &[u8]) -> Result<(), SpanBatchError> { + if r.len() < 20 { + return Err(SpanBatchError::Decoding(SpanDecodingError::L1OriginCheck)); + } let (l1_origin_check, remaining) = r.split_at(20); let l1_origin_check = FixedBytes::<20>::from_slice(l1_origin_check); *r = remaining; @@ -79,6 +85,29 @@ mod test { use alloc::vec::Vec; use alloy_primitives::address; + #[test] + fn test_decode_parent_check_truncated() { + let mut prefix = SpanBatchPrefix::default(); + let buf = [0u8; 19]; // one byte short + let result = prefix.decode_parent_check(&mut buf.as_slice()); + assert_eq!(result, Err(SpanBatchError::Decoding(SpanDecodingError::ParentCheck))); + } + + #[test] + fn test_decode_l1_origin_check_truncated() { + let mut prefix = SpanBatchPrefix::default(); + let buf = [0u8; 19]; // one byte short + let result = prefix.decode_l1_origin_check(&mut buf.as_slice()); + assert_eq!(result, Err(SpanBatchError::Decoding(SpanDecodingError::L1OriginCheck))); + } + + #[test] + fn test_decode_parent_check_empty() { + let mut prefix = SpanBatchPrefix::default(); + let result = prefix.decode_parent_check(&mut [].as_slice()); + assert_eq!(result, Err(SpanBatchError::Decoding(SpanDecodingError::ParentCheck))); + } + #[test] fn test_span_batch_prefix_encoding_roundtrip() { let expected = SpanBatchPrefix { diff --git a/rust/kona/crates/protocol/protocol/src/batch/transactions.rs b/rust/kona/crates/protocol/protocol/src/batch/transactions.rs index 3938d31aa1efb..81f638b582cff 100644 --- a/rust/kona/crates/protocol/protocol/src/batch/transactions.rs +++ b/rust/kona/crates/protocol/protocol/src/batch/transactions.rs @@ -153,6 +153,11 @@ impl SpanBatchTransactions { let mut sigs = Vec::with_capacity(self.total_block_tx_count as usize); for i in 0..self.total_block_tx_count { let y_parity = y_parity_bits.get_bit(i as usize).expect("same length"); + if r.len() < 64 { + return Err(SpanBatchError::Decoding( + SpanDecodingError::InvalidTransactionSignature, + )); + } let r_val = U256::from_be_slice(&r[..32]); let s_val = U256::from_be_slice(&r[32..64]); sigs.push(Signature::new(r_val, s_val, y_parity == 1)); @@ -193,6 +198,9 @@ impl SpanBatchTransactions { let mut tos = Vec::with_capacity(self.total_block_tx_count as usize); let contract_creation_count = self.contract_creation_count(); for _ in 0..(self.total_block_tx_count - contract_creation_count) { + if r.len() < 20 { + return Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData)); + } let to = Address::from_slice(&r[..20]); tos.push(to); r.advance(20); @@ -356,6 +364,48 @@ mod tests { use alloy_consensus::{Signed, TxEip1559, TxEip2930, TxEip7702}; use alloy_primitives::{Signature, TxKind, address}; + #[test] + fn test_decode_tx_sigs_truncated() { + let mut txs = SpanBatchTransactions { total_block_tx_count: 1, ..Default::default() }; + // Provide a valid y_parity bitfield (1 bit = 1 byte) but truncated signature data + // SpanBatchBits::decode for 1 bit needs 1 byte for the bitfield + let buf = vec![0u8]; // y_parity byte, but no r/s signature bytes + let result = txs.decode_tx_sigs(&mut buf.as_slice()); + assert_eq!( + result, + Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionSignature)) + ); + } + + #[test] + fn test_decode_tx_tos_truncated() { + let mut txs = SpanBatchTransactions { + total_block_tx_count: 1, + contract_creation_bits: SpanBatchBits::default(), + ..Default::default() + }; + let buf = [0u8; 19]; // one byte short of a 20-byte address + let result = txs.decode_tx_tos(&mut buf.as_slice()); + assert_eq!( + result, + Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData)) + ); + } + + #[test] + fn test_decode_tx_tos_empty() { + let mut txs = SpanBatchTransactions { + total_block_tx_count: 1, + contract_creation_bits: SpanBatchBits::default(), + ..Default::default() + }; + let result = txs.decode_tx_tos(&mut [].as_slice()); + assert_eq!( + result, + Err(SpanBatchError::Decoding(SpanDecodingError::InvalidTransactionData)) + ); + } + #[test] fn test_span_batch_transactions_add_empty_txs() { let mut span_batch_txs = SpanBatchTransactions::default(); From 7da40558c66893624a7a79bca1964836ebdffba4 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 11 Mar 2026 10:03:28 +1000 Subject: [PATCH 097/201] =?UTF-8?q?fix(kona/derive):=20copy=20origin=20fie?= =?UTF-8?q?ld=20during=20Holocene=20BatchQueue=E2=86=92BatchValidator=20tr?= =?UTF-8?q?ansition=20(#19360)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(kona/derive): copy origin field during Holocene BatchQueue→BatchValidator transition (BQ-12) On Holocene activation BatchProvider::attempt_update() transitions from BatchQueue to BatchValidator. It was copying l1_blocks but not the origin field, leaving BatchValidator.origin as None. Go's BatchMux.TransformHolocene() copies both fields (batch_mux.go:67-68): bs.l1Blocks = slices.Clone(bp.l1Blocks) bs.origin = bp.origin Without the origin copy, the first update_origins() call after transition always enters the self.origin != self.prev.origin() branch (None != Some(…)). This causes two failure modes: 1. Normal case: current L1 block is pushed onto l1_blocks again, creating a duplicate entry that corrupts the two-slot epoch window. 2. Lagging case: l1_blocks.clear() discards all transferred history, then next_batch() returns MissingOrigin.crit(), halting derivation. Fix: copy batch_queue.origin into bv.origin alongside l1_blocks, matching the Go reference implementation. Closes https://github.com/ethereum-optimism/optimism/issues/19356 * fix: apply rustfmt formatting fix Wrap long comment line in batch_provider.rs to satisfy nightly rustfmt check. Co-Authored-By: Claude Sonnet 4.6 * fix: rename test to reflect expected behavior The test verifies that origin IS transferred during Holocene transition, so rename from _not_transferred to _transferred. Co-Authored-By: Claude Opus 4.6 * Remove dangling reference to random issue ID. --------- Co-authored-by: Claude Sonnet 4.6 --- .../derive/src/stages/batch/batch_provider.rs | 58 ++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/rust/kona/crates/protocol/derive/src/stages/batch/batch_provider.rs b/rust/kona/crates/protocol/derive/src/stages/batch/batch_provider.rs index d6036e283cdca..1d1a12fd91f71 100644 --- a/rust/kona/crates/protocol/derive/src/stages/batch/batch_provider.rs +++ b/rust/kona/crates/protocol/derive/src/stages/batch/batch_provider.rs @@ -18,7 +18,7 @@ use kona_protocol::{BlockInfo, L2BlockInfo, SingleBatch}; /// When Holocene is active, the [`BatchValidator`] is used. /// /// When transitioning between the two stages, the mux will reset the active stage, but -/// retain `l1_blocks`. +/// retain `l1_blocks` and `origin`. #[derive(Debug)] pub struct BatchProvider where @@ -74,6 +74,7 @@ where let batch_queue = self.batch_queue.take().expect("Must have batch queue"); let mut bv = BatchValidator::new(self.cfg.clone(), batch_queue.prev); bv.l1_blocks = batch_queue.l1_blocks; + bv.origin = batch_queue.origin; self.batch_validator = Some(bv); } else if self.batch_validator.is_some() && !self.cfg.is_holocene_active(origin.timestamp) { // If the batch validator is active, and Holocene is not active, it indicates an L1 @@ -306,4 +307,59 @@ mod test { }; assert!(bv.l1_blocks.len() == 1); } + + // On Holocene activation, BatchProvider.attempt_update() must copy BOTH l1_blocks + // AND origin from the old BatchQueue to the new BatchValidator. + // + // Without copying origin, BatchValidator.origin starts as None. The first + // update_origins() call always enters the `self.origin != self.prev.origin()` branch + // (None != Some(...)), causing either duplicate l1_block insertion (normal case) or + // l1_blocks.clear() followed by MissingOrigin.crit() halt (lagging case). + #[test] + fn test_spec_batch_provider_holocene_transition_origin_transferred() { + let provider = TestNextBatchProvider::new(vec![]); + let l2_provider = TestL2ChainProvider::default(); + // Holocene activates at timestamp 2. + let cfg = Arc::new(RollupConfig { + hardforks: HardForkConfig { holocene_time: Some(2), ..Default::default() }, + ..Default::default() + }); + let mut batch_provider = BatchProvider::new(cfg, provider, l2_provider); + + // Initialize with BatchQueue (Holocene not yet active at timestamp 0). + batch_provider.attempt_update().unwrap(); + assert!(batch_provider.batch_queue.is_some(), "Expected BatchQueue pre-Holocene"); + + // Seed BatchQueue.l1_blocks with two blocks (as would happen during normal derivation). + let block_a = BlockInfo { number: 1, timestamp: 0, ..Default::default() }; + let block_b = BlockInfo { number: 2, timestamp: 2, ..Default::default() }; + { + let bq = batch_provider.batch_queue.as_mut().unwrap(); + bq.l1_blocks.push(block_a); + bq.l1_blocks.push(block_b); + bq.origin = Some(block_b); // BatchQueue.origin set to the current L1 head + // Advance the mock prev origin to Holocene activation timestamp. + bq.prev.origin = Some(block_b); + } + + // Trigger Holocene transition via attempt_update(). + batch_provider.attempt_update().unwrap(); + assert!(batch_provider.batch_queue.is_none(), "BatchQueue should be gone post-Holocene"); + assert!(batch_provider.batch_validator.is_some(), "Expected BatchValidator post-Holocene"); + + let bv = batch_provider.batch_validator.as_ref().unwrap(); + + // Verify l1_blocks were transferred. + assert_eq!(bv.l1_blocks.len(), 2, "l1_blocks must be transferred from BatchQueue"); + assert_eq!(bv.l1_blocks[0], block_a); + assert_eq!(bv.l1_blocks[1], block_b); + + // Verify origin was transferred: BatchValidator.origin must equal the + // origin from the old BatchQueue, matching Go's TransformHolocene (batch_mux.go:68). + assert_eq!( + bv.origin, + Some(block_b), + "BatchValidator.origin must be copied from BatchQueue on Holocene transition" + ); + } } From 9f3b9093547e15b8399b227d4a231d31e986e8ca Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 11 Mar 2026 12:41:19 +1000 Subject: [PATCH 098/201] fix(kona/derive): return Reset instead of Critical on blob under-fill in BlobData::fill (#19362) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(kona/derive): return Reset instead of Critical on blob under-fill in BlobData::fill When the blob provider returns fewer blobs than there are blob-hash placeholders (under-fill), BlobData::fill previously returned BlobDecodingError::InvalidLength which mapped to PipelineErrorKind::Critical — permanently terminating the pipeline and requiring operator restart. This is wrong. Under-fill is a transient condition: - The provider may not have all blobs cached/synced yet for a newly confirmed block - The L1 block may have been reorged out, causing the provider to return no blobs In both cases the correct response is to reset the pipeline and retry from the last safe L1 block, exactly as op-node does: fillBlobPointers wraps the same error as NewResetError at blob_data_source.go:110. Changes: - Add ResetError::BlobsUnderFill(usize, usize) to pipeline.rs - Add BlobProviderError::NotEnoughBlobs(usize, usize) to sources.rs, mapped to ResetError::BlobsUnderFill.reset() in From for PipelineErrorKind - Change BlobData::fill return type from Result to Result, returning NotEnoughBlobs on under-fill - Update blobs.rs caller to use the simplified .into() conversion - Update tests to assert under-fill produces a Reset-level error Fixes https://github.com/ethereum-optimism/optimism/issues/19359 * fix(kona/derive): remove unused BlobProviderError import The refactor to use Into::::into made the direct BlobProviderError import unnecessary, triggering a clippy warning. Co-Authored-By: Claude Opus 4.6 * fix: apply cargo fmt to blobs.rs import ordering Co-Authored-By: Claude Opus 4.6 * fix(kona/derive): address review feedback on blob under-fill PR - Wrap BlobProviderError in ResetError::BlobsUnderFill instead of duplicating fields - Use named fields for BlobProviderError::NotEnoughBlobs - Remove unnecessary map_err in BlobSource fill loop - Add Clone to BlobDecodingError and BlobProviderError Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .../protocol/derive/src/errors/pipeline.rs | 9 ++++++++- .../protocol/derive/src/errors/sources.rs | 19 +++++++++++++++++-- .../protocol/derive/src/sources/blob_data.rs | 13 ++++++++----- .../protocol/derive/src/sources/blobs.rs | 8 +++----- 4 files changed, 36 insertions(+), 13 deletions(-) diff --git a/rust/kona/crates/protocol/derive/src/errors/pipeline.rs b/rust/kona/crates/protocol/derive/src/errors/pipeline.rs index 3a8936ebe3fe1..a537dcc97c924 100644 --- a/rust/kona/crates/protocol/derive/src/errors/pipeline.rs +++ b/rust/kona/crates/protocol/derive/src/errors/pipeline.rs @@ -1,6 +1,6 @@ //! This module contains derivation errors thrown within the pipeline. -use crate::BuilderError; +use crate::{BlobProviderError, BuilderError}; use alloc::string::String; use alloy_eips::BlockId; use alloy_primitives::B256; @@ -353,6 +353,9 @@ pub enum ResetError { /// typically because an L1 reorg removed it. The pipeline must reset to recover. #[error("Block not found: {0}")] BlockNotFound(BlockId), + /// The blob provider returned fewer blobs than expected (under-fill). + #[error("Blob provider under-fill: {0}")] + BlobsUnderFill(BlobProviderError), } impl ResetError { @@ -442,6 +445,10 @@ mod tests { ResetError::HoloceneActivation, ResetError::BlobsUnavailable(0), ResetError::BlockNotFound(B256::default().into()), + ResetError::BlobsUnderFill(BlobProviderError::NotEnoughBlobs { + expected: 0, + actual: 0, + }), ]; for error in reset_errors { let expected = PipelineErrorKind::Reset(error.clone()); diff --git a/rust/kona/crates/protocol/derive/src/errors/sources.rs b/rust/kona/crates/protocol/derive/src/errors/sources.rs index ba9932edfcee9..8c38e2676d7f3 100644 --- a/rust/kona/crates/protocol/derive/src/errors/sources.rs +++ b/rust/kona/crates/protocol/derive/src/errors/sources.rs @@ -5,7 +5,7 @@ use alloc::string::{String, ToString}; use thiserror::Error; /// Blob Decoding Error -#[derive(Error, Debug, PartialEq, Eq)] +#[derive(Error, Clone, Debug, PartialEq, Eq)] pub enum BlobDecodingError { /// Invalid field element #[error("Invalid field element")] @@ -22,7 +22,7 @@ pub enum BlobDecodingError { } /// An error returned by the [`BlobProviderError`]. -#[derive(Error, Debug, PartialEq, Eq)] +#[derive(Error, Clone, Debug, PartialEq, Eq)] pub enum BlobProviderError { /// The number of specified blob hashes did not match the number of returned sidecars. #[error("Blob sidecar length mismatch: expected {0}, got {1}")] @@ -33,6 +33,16 @@ pub enum BlobProviderError { /// Blob decoding error. #[error("Blob decoding error: {0}")] BlobDecoding(#[from] BlobDecodingError), + /// The blob provider returned fewer blobs than requested (under-fill). + #[error( + "Not enough blobs: expected blob at index {expected} but provider returned only {actual} blobs" + )] + NotEnoughBlobs { + /// The blob index that was expected. + expected: usize, + /// The actual number of blobs returned by the provider. + actual: usize, + }, /// The beacon node returned a 404 for the requested slot, indicating the slot was missed or /// orphaned. Blobs for missed/orphaned slots will never become available, so the pipeline /// must reset to move past the L1 block that referenced them. @@ -54,6 +64,7 @@ impl From for PipelineErrorKind { BlobProviderError::SidecarLengthMismatch(_, _) | BlobProviderError::SlotDerivation | BlobProviderError::BlobDecoding(_) => PipelineError::Provider(val.to_string()).crit(), + BlobProviderError::NotEnoughBlobs { .. } => ResetError::BlobsUnderFill(val).reset(), BlobProviderError::BlobNotFound { slot, .. } => { ResetError::BlobsUnavailable(slot).reset() } @@ -97,5 +108,9 @@ mod tests { matches!(err, PipelineErrorKind::Reset(_)), "BlobNotFound must map to Reset so the pipeline moves past the missed slot" ); + + let err: PipelineErrorKind = + BlobProviderError::NotEnoughBlobs { expected: 2, actual: 1 }.into(); + assert!(matches!(err, PipelineErrorKind::Reset(_))); } } diff --git a/rust/kona/crates/protocol/derive/src/sources/blob_data.rs b/rust/kona/crates/protocol/derive/src/sources/blob_data.rs index 62b7e8282b45b..4ce4cd0a396e2 100644 --- a/rust/kona/crates/protocol/derive/src/sources/blob_data.rs +++ b/rust/kona/crates/protocol/derive/src/sources/blob_data.rs @@ -1,6 +1,6 @@ //! Contains the `BlobData` struct. -use crate::BlobDecodingError; +use crate::{BlobDecodingError, BlobProviderError}; use alloc::{boxed::Box, vec}; use alloy_eips::eip4844::{BYTES_PER_BLOB, Blob, VERSIONED_HASH_VERSION_KZG}; use alloy_primitives::Bytes; @@ -146,18 +146,18 @@ impl BlobData { &mut self, blobs: &[Box], index: usize, - ) -> Result { + ) -> Result { // Do not fill if there is calldata here if self.calldata.is_some() { return Ok(false); } if index >= blobs.len() { - return Err(BlobDecodingError::InvalidLength); + return Err(BlobProviderError::NotEnoughBlobs { expected: index, actual: blobs.len() }); } if blobs[index].is_empty() { - return Err(BlobDecodingError::MissingData); + return Err(BlobDecodingError::MissingData.into()); } self.data = Some(Bytes::from(*blobs[index])); @@ -190,7 +190,10 @@ mod tests { fn test_fill_oob_index() { let mut blob_data = BlobData::default(); let blobs = vec![Box::new(Blob::with_last_byte(1u8))]; - assert_eq!(blob_data.fill(&blobs, 1), Err(BlobDecodingError::InvalidLength)); + assert_eq!( + blob_data.fill(&blobs, 1), + Err(BlobProviderError::NotEnoughBlobs { expected: 1, actual: 1 }) + ); } #[test] diff --git a/rust/kona/crates/protocol/derive/src/sources/blobs.rs b/rust/kona/crates/protocol/derive/src/sources/blobs.rs index 367ffe763292e..f8661f660f153 100644 --- a/rust/kona/crates/protocol/derive/src/sources/blobs.rs +++ b/rust/kona/crates/protocol/derive/src/sources/blobs.rs @@ -1,8 +1,8 @@ //! Blob Data Source use crate::{ - BlobData, BlobProvider, BlobProviderError, ChainProvider, DataAvailabilityProvider, - PipelineError, PipelineErrorKind, PipelineResult, + BlobData, BlobProvider, ChainProvider, DataAvailabilityProvider, PipelineError, + PipelineErrorKind, PipelineResult, }; use alloc::{boxed::Box, vec::Vec}; use alloy_consensus::{ @@ -165,9 +165,7 @@ where // Fill the blob pointers. let mut blob_index = 0; for blob in &mut data { - let should_increment = blob - .fill(&blobs, blob_index) - .map_err(|e| -> PipelineErrorKind { BlobProviderError::from(e).into() })?; + let should_increment = blob.fill(&blobs, blob_index)?; if should_increment { blob_index += 1; } From 444427759c643a24d0b644ba85bdb1dc9051e514 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Wed, 11 Mar 2026 12:57:24 +1000 Subject: [PATCH 099/201] fix(ci): add passthrough parameter declarations to continuation configs (#19462) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CircleCI forwards all explicitly-passed pipeline parameters to continuation configs during dynamic config (setup: true). When parameters are only declared in the setup config (config.yml) but not in the continuation configs, manually triggered pipelines fail with "Unexpected argument(s)". This was introduced by #19142 which renamed continuation parameters with a c- prefix to fix "Conflicting pipeline parameters" errors. The c- prefix solved the conflict but broke manual dispatch because the original parameter names are still forwarded to continuation. Add all setup config parameters as passthrough declarations in each continuation config (main.yml, rust-ci.yml, rust-e2e.yml). These are not referenced by any job — the c- prefixed versions remain in use. Co-authored-by: Claude Opus 4.6 --- .circleci/continue/main.yml | 76 +++++++++++++++++++++++++++++++++ .circleci/continue/rust-ci.yml | 76 +++++++++++++++++++++++++++++++++ .circleci/continue/rust-e2e.yml | 76 +++++++++++++++++++++++++++++++++ 3 files changed, 228 insertions(+) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 37e215a40a0ef..e4af363bda8f9 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -81,6 +81,82 @@ parameters: c-go-cache-version: type: string default: "v0.0" + # Passthrough declarations for setup config parameters. + # CircleCI forwards all explicitly-passed pipeline parameters to continuation configs. + # Without these declarations, manually triggered pipelines fail with "Unexpected argument(s)". + # These are not referenced by any job — the c- prefixed versions above are used instead. + default_docker_image: + type: string + default: cimg/base:2026.03 + base_image: + type: string + default: default + main_dispatch: + type: boolean + default: true + fault_proofs_dispatch: + type: boolean + default: false + reproducibility_dispatch: + type: boolean + default: false + kontrol_dispatch: + type: boolean + default: false + cannon_full_test_dispatch: + type: boolean + default: false + sdk_dispatch: + type: boolean + default: false + docker_publish_dispatch: + type: boolean + default: false + stale_check_dispatch: + type: boolean + default: false + contracts_coverage_dispatch: + type: boolean + default: false + heavy_fuzz_dispatch: + type: boolean + default: false + sync_test_op_node_dispatch: + type: boolean + default: false + ai_contracts_test_dispatch: + type: boolean + default: false + rust_ci_dispatch: + type: boolean + default: false + rust_e2e_dispatch: + type: boolean + default: false + github-event-type: + type: string + default: "__not_set__" + github-event-action: + type: string + default: "__not_set__" + github-event-base64: + type: string + default: "__not_set__" + devnet-metrics-collect: + type: boolean + default: false + flake-shake-dispatch: + type: boolean + default: false + flake-shake-iterations: + type: integer + default: 300 + flake-shake-workers: + type: integer + default: 50 + go-cache-version: + type: string + default: "v0.0" orbs: go: circleci/go@1.8.0 diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index 3cc7cc53a607b..2db59a0c8b7d7 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -24,6 +24,82 @@ parameters: c-rust_changes_detected: type: boolean default: false + # Passthrough declarations for setup config parameters. + # CircleCI forwards all explicitly-passed pipeline parameters to continuation configs. + # Without these declarations, manually triggered pipelines fail with "Unexpected argument(s)". + # These are not referenced by any job — the c- prefixed versions above are used instead. + default_docker_image: + type: string + default: cimg/base:2026.03 + base_image: + type: string + default: default + main_dispatch: + type: boolean + default: true + fault_proofs_dispatch: + type: boolean + default: false + reproducibility_dispatch: + type: boolean + default: false + kontrol_dispatch: + type: boolean + default: false + cannon_full_test_dispatch: + type: boolean + default: false + sdk_dispatch: + type: boolean + default: false + docker_publish_dispatch: + type: boolean + default: false + stale_check_dispatch: + type: boolean + default: false + contracts_coverage_dispatch: + type: boolean + default: false + heavy_fuzz_dispatch: + type: boolean + default: false + sync_test_op_node_dispatch: + type: boolean + default: false + ai_contracts_test_dispatch: + type: boolean + default: false + rust_ci_dispatch: + type: boolean + default: false + rust_e2e_dispatch: + type: boolean + default: false + github-event-type: + type: string + default: "__not_set__" + github-event-action: + type: string + default: "__not_set__" + github-event-base64: + type: string + default: "__not_set__" + devnet-metrics-collect: + type: boolean + default: false + flake-shake-dispatch: + type: boolean + default: false + flake-shake-iterations: + type: integer + default: 300 + flake-shake-workers: + type: integer + default: 50 + go-cache-version: + type: string + default: "v0.0" # ============================================================================ # COMMANDS diff --git a/.circleci/continue/rust-e2e.yml b/.circleci/continue/rust-e2e.yml index eb20cc392a05b..7887cc9c985a0 100644 --- a/.circleci/continue/rust-e2e.yml +++ b/.circleci/continue/rust-e2e.yml @@ -19,6 +19,82 @@ parameters: c-rust_changes_detected: type: boolean default: false + # Passthrough declarations for setup config parameters. + # CircleCI forwards all explicitly-passed pipeline parameters to continuation configs. + # Without these declarations, manually triggered pipelines fail with "Unexpected argument(s)". + # These are not referenced by any job — the c- prefixed versions above are used instead. + default_docker_image: + type: string + default: cimg/base:2026.03 + base_image: + type: string + default: default + main_dispatch: + type: boolean + default: true + fault_proofs_dispatch: + type: boolean + default: false + reproducibility_dispatch: + type: boolean + default: false + kontrol_dispatch: + type: boolean + default: false + cannon_full_test_dispatch: + type: boolean + default: false + sdk_dispatch: + type: boolean + default: false + docker_publish_dispatch: + type: boolean + default: false + stale_check_dispatch: + type: boolean + default: false + contracts_coverage_dispatch: + type: boolean + default: false + heavy_fuzz_dispatch: + type: boolean + default: false + sync_test_op_node_dispatch: + type: boolean + default: false + ai_contracts_test_dispatch: + type: boolean + default: false + rust_ci_dispatch: + type: boolean + default: false + rust_e2e_dispatch: + type: boolean + default: false + github-event-type: + type: string + default: "__not_set__" + github-event-action: + type: string + default: "__not_set__" + github-event-base64: + type: string + default: "__not_set__" + devnet-metrics-collect: + type: boolean + default: false + flake-shake-dispatch: + type: boolean + default: false + flake-shake-iterations: + type: integer + default: 300 + flake-shake-workers: + type: integer + default: 50 + go-cache-version: + type: string + default: "v0.0" # Commands used by rust-e2e jobs commands: From ebb6baeb15e48fc434cc527263b931c8715fa7ea Mon Sep 17 00:00:00 2001 From: wwared Date: Wed, 11 Mar 2026 09:45:08 -0300 Subject: [PATCH 100/201] op-challenger: Replace supervisor with supernode in vm runner (#19438) * op-challenger: Replace supervisor with supernode in vm runner Adds a new supernode RPC endpoint `superroot_syncStatus` which returns the aggregate values of all individual op-node's current `SyncStatus`es. The new method is very similar to `superroot_atTimestamp` but this keeps them separate for now, though they could be combined in the future. * op-supernode: Move `superroot_syncStatus` to `supernode_syncStatus` * op-supernode: Use the minimum value for CurrentL1 in syncStatus * op-supernode: Consider `chain.VerifierCurrentL1s` in `supernode_syncStatus` * op-supernode: Deduplicate common code between `supernode_syncStatus` and `superroot_atTiemstamp` * op-supernode: Address review comments - Add `CurrentLocalSafeTimestamp` to `superroot_atTimestamp` result - Make `CurrentSafeTimestamp` be derived from the safe heads instead of local-safe for `superroot_atTimestamp` - Remove unnecessary `Result` type in helper - Build `chainIDs` in the loop inside `Aggregate` - Remove superfluous `else if` case - Update corresponding tests --------- Co-authored-by: wwared <541936+wwared@users.noreply.github.com> --- op-challenger/runner/game_inputs.go | 18 +- op-challenger/runner/runner.go | 20 +- op-service/apis/supernode.go | 1 + op-service/eth/supernode_status.go | 26 ++ op-service/eth/superroot_at_timestamp.go | 4 + op-service/sources/supernode_client.go | 5 + op-service/sources/supernode_client_test.go | 57 ++++ op-supernode/README.md | 2 + .../internal/syncstatus/syncstatus.go | 85 ++++++ .../supernode/activity/supernode/supernode.go | 45 +++ .../activity/supernode/supernode_test.go | 263 ++++++++++++++++++ .../supernode/activity/superroot/superroot.go | 68 +---- .../activity/superroot/superroot_test.go | 5 +- op-supernode/supernode/supernode.go | 2 + 14 files changed, 524 insertions(+), 77 deletions(-) create mode 100644 op-service/eth/supernode_status.go create mode 100644 op-supernode/supernode/activity/internal/syncstatus/syncstatus.go create mode 100644 op-supernode/supernode/activity/supernode/supernode.go create mode 100644 op-supernode/supernode/activity/supernode/supernode_test.go diff --git a/op-challenger/runner/game_inputs.go b/op-challenger/runner/game_inputs.go index bbd0c7039279d..ef2c5dbd7c5ad 100644 --- a/op-challenger/runner/game_inputs.go +++ b/op-challenger/runner/game_inputs.go @@ -16,13 +16,13 @@ import ( "github.com/ethereum/go-ethereum/log" ) -func createGameInputs(ctx context.Context, log log.Logger, rollupClient *sources.RollupClient, supervisorClient *sources.SupervisorClient, typeName string, gameType gameTypes.GameType) (utils.LocalGameInputs, error) { +func createGameInputs(ctx context.Context, log log.Logger, rollupClient *sources.RollupClient, superNodeClient *sources.SuperNodeClient, typeName string, gameType gameTypes.GameType) (utils.LocalGameInputs, error) { switch gameType { case gameTypes.SuperCannonGameType, gameTypes.SuperPermissionedGameType, gameTypes.SuperCannonKonaGameType: - if supervisorClient == nil { - return utils.LocalGameInputs{}, fmt.Errorf("game type %s requires supervisor rpc to be set", gameType) + if superNodeClient == nil { + return utils.LocalGameInputs{}, fmt.Errorf("game type %s requires supernode rpc to be set", gameType) } - return createGameInputsInterop(ctx, log, supervisorClient, typeName) + return createGameInputsInterop(ctx, log, superNodeClient, typeName) default: if rollupClient == nil { return utils.LocalGameInputs{}, fmt.Errorf("game type %s requires rollup rpc to be set", gameType) @@ -76,10 +76,10 @@ func createGameInputsSingle(ctx context.Context, log log.Logger, client *sources return localInputs, nil } -func createGameInputsInterop(ctx context.Context, log log.Logger, client *sources.SupervisorClient, typeName string) (utils.LocalGameInputs, error) { +func createGameInputsInterop(ctx context.Context, log log.Logger, client *sources.SuperNodeClient, typeName string) (utils.LocalGameInputs, error) { status, err := client.SyncStatus(ctx) if err != nil { - return utils.LocalGameInputs{}, fmt.Errorf("failed to get supervisor sync status: %w", err) + return utils.LocalGameInputs{}, fmt.Errorf("failed to get supernode sync status: %w", err) } log.Info("Got sync status", "status", status, "type", typeName) @@ -88,15 +88,15 @@ func createGameInputsInterop(ctx context.Context, log log.Logger, client *source if claimTimestamp == 0 { return utils.LocalGameInputs{}, errors.New("finalized timestamp is 0") } - l1Head := status.MinSyncedL1 + l1Head := status.CurrentL1 log.Info("Using L1 head", "head", l1Head, "type", typeName) if l1Head.Number == 0 { return utils.LocalGameInputs{}, errors.New("l1 head is 0") } - prestateProvider := super.NewSuperRootPrestateProvider(client, agreedTimestamp) + prestateProvider := super.NewSuperNodePrestateProvider(client, agreedTimestamp) gameDepth := types.Depth(30) - provider := super.NewSupervisorSuperTraceProvider(log, nil, prestateProvider, client, l1Head.ID(), gameDepth, agreedTimestamp, claimTimestamp+10) + provider := super.NewSuperNodeTraceProvider(log, prestateProvider, client, l1Head, gameDepth, agreedTimestamp, claimTimestamp+10) var agreedPrestate []byte var claim common.Hash switch rand.IntN(3) { diff --git a/op-challenger/runner/runner.go b/op-challenger/runner/runner.go index f3de6125d9d25..9c4d70f03f006 100644 --- a/op-challenger/runner/runner.go +++ b/op-challenger/runner/runner.go @@ -113,14 +113,14 @@ func (r *Runner) Start(ctx context.Context) error { } rollupClient = cl } - var supervisorClient *sources.SupervisorClient + var superNodeClient *sources.SuperNodeClient if r.cfg.SuperRPC != "" { - r.log.Info("Dialling supervisor client", "url", r.cfg.SuperRPC) - cl, err := dial.DialSupervisorClientWithTimeout(ctx, r.log, r.cfg.SuperRPC) + r.log.Info("Dialling supernode client", "url", r.cfg.SuperRPC) + cl, err := dial.DialSuperNodeClientWithTimeout(ctx, r.log, r.cfg.SuperRPC) if err != nil { - return fmt.Errorf("failed to dial supervisor: %w", err) + return fmt.Errorf("failed to dial supernode: %w", err) } - supervisorClient = cl + superNodeClient = cl } l1Client, err := dial.DialRPCClientWithTimeout(ctx, r.log, r.cfg.L1EthRpc) @@ -131,20 +131,20 @@ func (r *Runner) Start(ctx context.Context) error { for _, runConfig := range r.runConfigs { r.wg.Add(1) - go r.loop(ctx, runConfig, rollupClient, supervisorClient, caller) + go r.loop(ctx, runConfig, rollupClient, superNodeClient, caller) } r.log.Info("Runners started", "num", len(r.runConfigs)) return nil } -func (r *Runner) loop(ctx context.Context, runConfig RunConfig, rollupClient *sources.RollupClient, supervisorClient *sources.SupervisorClient, caller *batching.MultiCaller) { +func (r *Runner) loop(ctx context.Context, runConfig RunConfig, rollupClient *sources.RollupClient, superNodeClient *sources.SuperNodeClient, caller *batching.MultiCaller) { defer r.wg.Done() t := time.NewTicker(1 * time.Minute) defer t.Stop() for { baseLog := r.log.New("run_id", generateRunID()) - r.runAndRecordOnce(ctx, baseLog, runConfig, rollupClient, supervisorClient, caller) + r.runAndRecordOnce(ctx, baseLog, runConfig, rollupClient, superNodeClient, caller) select { case <-t.C: case <-ctx.Done(): @@ -153,7 +153,7 @@ func (r *Runner) loop(ctx context.Context, runConfig RunConfig, rollupClient *so } } -func (r *Runner) runAndRecordOnce(ctx context.Context, rlog log.Logger, runConfig RunConfig, rollupClient *sources.RollupClient, supervisorClient *sources.SupervisorClient, caller *batching.MultiCaller) { +func (r *Runner) runAndRecordOnce(ctx context.Context, rlog log.Logger, runConfig RunConfig, rollupClient *sources.RollupClient, superNodeClient *sources.SuperNodeClient, caller *batching.MultiCaller) { recordError := func(err error, configName string, m Metricer, log log.Logger) { if errors.Is(err, ErrUnexpectedStatusCode) { log.Error("Incorrect status code", "type", runConfig.Name, "err", err) @@ -194,7 +194,7 @@ func (r *Runner) runAndRecordOnce(ctx context.Context, rlog log.Logger, runConfi prestateSource = &HashPrestateFetcher{prestateHash: runConfig.Prestate} } - localInputs, err := createGameInputs(ctx, rlog, rollupClient, supervisorClient, runConfig.Name, runConfig.GameType) + localInputs, err := createGameInputs(ctx, rlog, rollupClient, superNodeClient, runConfig.Name, runConfig.GameType) if err != nil { recordError(err, runConfig.Name, r.m, rlog) return diff --git a/op-service/apis/supernode.go b/op-service/apis/supernode.go index 753356b4292ba..6f5ee28c86f4f 100644 --- a/op-service/apis/supernode.go +++ b/op-service/apis/supernode.go @@ -10,4 +10,5 @@ import ( // It is intentionally small and can be expanded as needed. type SupernodeQueryAPI interface { SuperRootAtTimestamp(ctx context.Context, timestamp uint64) (eth.SuperRootAtTimestampResponse, error) + SyncStatus(ctx context.Context) (eth.SuperNodeSyncStatusResponse, error) } diff --git a/op-service/eth/supernode_status.go b/op-service/eth/supernode_status.go new file mode 100644 index 0000000000000..2d90b9f2bf06e --- /dev/null +++ b/op-service/eth/supernode_status.go @@ -0,0 +1,26 @@ +package eth + +// SuperNodeSyncStatusResponse is the response returned by supernode_syncStatus. +type SuperNodeSyncStatusResponse struct { + // Chains contains the per-chain op-node sync status. + Chains map[ChainID]SyncStatus `json:"chains"` + + // ChainIDs are the chain IDs in the dependency set, sorted ascending. + ChainIDs []ChainID `json:"chain_ids"` + + // CurrentL1 is the highest L1 block ID that has been fully derived and verified by all chains. + // This value is derived from the minimum per-chain current L1 block IDs, including validators. + CurrentL1 BlockID `json:"current_l1"` + + // SafeTimestamp is the highest L2 timestamp that is safe across the dependency set at the CurrentL1. + // This value is derived from the minimum per-chain safe L2 head timestamp. + SafeTimestamp uint64 `json:"safe_timestamp"` + + // LocalSafeTimestamp is the highest L2 timestamp that is local-safe across the dependency set at the CurrentL1. + // This value is derived from the minimum per-chain local safe L2 head timestamp. + LocalSafeTimestamp uint64 `json:"local_safe_timestamp"` + + // FinalizedTimestamp is the highest L2 timestamp that is finalized across the dependency set at the CurrentL1. + // This value is derived from the minimum per-chain finalized L2 head timestamp. + FinalizedTimestamp uint64 `json:"finalized_timestamp"` +} diff --git a/op-service/eth/superroot_at_timestamp.go b/op-service/eth/superroot_at_timestamp.go index fc90ef36a7671..0a058253dd982 100644 --- a/op-service/eth/superroot_at_timestamp.go +++ b/op-service/eth/superroot_at_timestamp.go @@ -58,6 +58,10 @@ type SuperRootAtTimestampResponse struct { // This value is derived from the minimum per-chain safe L2 head timestamp. CurrentSafeTimestamp uint64 `json:"safe_timestamp"` + // CurrentSafeTimestamp is the highest L2 timestamp that is safe across the dependency set at the CurrentL1. + // This value is derived from the minimum per-chain local-safe L2 head timestamp. + CurrentLocalSafeTimestamp uint64 `json:"local_safe_timestamp"` + // CurrentFinalizedTimestamp is the highest L2 timestamp that is finalized across the dependency set at the CurrentL1. // This value is derived from the minimum per-chain finalized L2 head timestamp. CurrentFinalizedTimestamp uint64 `json:"finalized_timestamp"` diff --git a/op-service/sources/supernode_client.go b/op-service/sources/supernode_client.go index c201145c42fba..d8d271abb0248 100644 --- a/op-service/sources/supernode_client.go +++ b/op-service/sources/supernode_client.go @@ -23,6 +23,11 @@ func (c *SuperNodeClient) SuperRootAtTimestamp(ctx context.Context, timestamp ui return } +func (c *SuperNodeClient) SyncStatus(ctx context.Context) (result eth.SuperNodeSyncStatusResponse, err error) { + err = c.rpc.CallContext(ctx, &result, "supernode_syncStatus") + return +} + func (cl *SuperNodeClient) Close() { cl.rpc.Close() } diff --git a/op-service/sources/supernode_client_test.go b/op-service/sources/supernode_client_test.go index 951268cebd13f..d6d0ad9d14577 100644 --- a/op-service/sources/supernode_client_test.go +++ b/op-service/sources/supernode_client_test.go @@ -146,3 +146,60 @@ func TestSuperNodeClient_SuperRootAtTimestamp(t *testing.T) { require.NotNil(t, err) }) } + +func TestSuperNodeClient_SyncStatus(t *testing.T) { + t.Run("Success", func(t *testing.T) { + ctx := context.Background() + rpc := new(mockRPC) + defer rpc.AssertExpectations(t) + client := NewSuperNodeClient(rpc) + + chainA := eth.ChainIDFromUInt64(1) + chainB := eth.ChainIDFromUInt64(2) + expected := eth.SuperNodeSyncStatusResponse{ + Chains: map[eth.ChainID]eth.SyncStatus{ + chainA: { + CurrentL1: eth.L1BlockRef{Number: 100}, + UnsafeL2: eth.L2BlockRef{Number: 10, Time: 200}, + CrossUnsafeL2: eth.L2BlockRef{Number: 9, Time: 190}, + LocalSafeL2: eth.L2BlockRef{Number: 9, Time: 175}, + SafeL2: eth.L2BlockRef{Number: 8, Time: 160}, + FinalizedL2: eth.L2BlockRef{Number: 6, Time: 120}, + }, + chainB: { + CurrentL1: eth.L1BlockRef{Number: 100}, + UnsafeL2: eth.L2BlockRef{Number: 11, Time: 210}, + CrossUnsafeL2: eth.L2BlockRef{Number: 10, Time: 200}, + LocalSafeL2: eth.L2BlockRef{Number: 9, Time: 170}, + SafeL2: eth.L2BlockRef{Number: 8, Time: 160}, + FinalizedL2: eth.L2BlockRef{Number: 5, Time: 100}, + }, + }, + CurrentL1: eth.BlockID{Number: 100}, + ChainIDs: []eth.ChainID{chainA, chainB}, + SafeTimestamp: 160, + LocalSafeTimestamp: 170, + FinalizedTimestamp: 100, + } + rpc.On("CallContext", ctx, new(eth.SuperNodeSyncStatusResponse), + "supernode_syncStatus", []any(nil)).Run(func(args mock.Arguments) { + *args[1].(*eth.SuperNodeSyncStatusResponse) = expected + }).Return([]error{nil}) + + result, err := client.SyncStatus(ctx) + require.NoError(t, err) + require.Equal(t, expected, result) + }) + + t.Run("Error", func(t *testing.T) { + ctx := context.Background() + rpc := new(mockRPC) + defer rpc.AssertExpectations(t) + client := NewSuperNodeClient(rpc) + + rpc.On("CallContext", ctx, new(eth.SuperNodeSyncStatusResponse), + "supernode_syncStatus", []any(nil)).Return([]error{errors.New("boom")}) + _, err := client.SyncStatus(ctx) + require.Error(t, err) + }) +} diff --git a/op-supernode/README.md b/op-supernode/README.md index fcdb47e1d6fde..177155215765f 100644 --- a/op-supernode/README.md +++ b/op-supernode/README.md @@ -79,6 +79,8 @@ Components which expose Start/Stop are given a goroutine to work during `op-supe - Runtime: emits a simple heartbeat message to the logs to show liveness. - `SuperRoot` - RPC: `superroot_atTimestamp` produces a SuperRoot from Verified L2 blocks, and includes sync/derivation information for Proofs. +- `Supernode` + - RPC: `supernode_syncStatus` returns aggregate per-chain sync status across the dependency set. ### Quickstart Build: diff --git a/op-supernode/supernode/activity/internal/syncstatus/syncstatus.go b/op-supernode/supernode/activity/internal/syncstatus/syncstatus.go new file mode 100644 index 0000000000000..4005ee676fb85 --- /dev/null +++ b/op-supernode/supernode/activity/internal/syncstatus/syncstatus.go @@ -0,0 +1,85 @@ +package syncstatus + +import ( + "context" + "slices" + + "github.com/ethereum-optimism/optimism/op-service/eth" + cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" + gethlog "github.com/ethereum/go-ethereum/log" +) + +func Aggregate(ctx context.Context, log gethlog.Logger, chains map[eth.ChainID]cc.ChainContainer) (eth.SuperNodeSyncStatusResponse, error) { + var ( + statuses map[eth.ChainID]eth.SyncStatus + minCurrentL1 eth.BlockID + minLocalSafeTimestamp uint64 + minSafeTimestamp uint64 + minFinalizedTimestamp uint64 + safeInitialized bool + localSafeInitialized bool + finalizedInitialized bool + ) + statuses = make(map[eth.ChainID]eth.SyncStatus, len(chains)) + chainIDs := make([]eth.ChainID, 0, len(chains)) + + for chainID, chain := range chains { + chainIDs = append(chainIDs, chainID) + status, err := chain.SyncStatus(ctx) + if err != nil { + log.Warn("failed to get sync status", "chain_id", chainID.String(), "err", err) + return eth.SuperNodeSyncStatusResponse{}, err + } + if status == nil { + status = ð.SyncStatus{} + } + statuses[chainID] = *status + + // Get current L1s — the minimum L1 block that all derivation pipelines and verifiers have processed. + // This informs callers that the chains' local views have considered at least up to this L1 block. + currentL1 := status.CurrentL1.ID() + if currentL1.Number < minCurrentL1.Number || minCurrentL1 == (eth.BlockID{}) { + minCurrentL1 = currentL1 + } + // Also consider the L1 progress of any registered verifiers. + for _, verifierL1 := range chain.VerifierCurrentL1s() { + if verifierL1.Number < minCurrentL1.Number || minCurrentL1 == (eth.BlockID{}) { + minCurrentL1 = verifierL1 + } + } + + // Conservative aggregation across chains: take the minimum timestamps. + // If any chain has a zero timestamp (not initialized), the aggregate is zero. + if !localSafeInitialized { + minLocalSafeTimestamp = status.LocalSafeL2.Time + localSafeInitialized = true + } else if status.LocalSafeL2.Time < minLocalSafeTimestamp { + minLocalSafeTimestamp = status.LocalSafeL2.Time + } + + if !safeInitialized { + minSafeTimestamp = status.SafeL2.Time + safeInitialized = true + } else if status.SafeL2.Time < minSafeTimestamp { + minSafeTimestamp = status.SafeL2.Time + } + + if !finalizedInitialized { + minFinalizedTimestamp = status.FinalizedL2.Time + finalizedInitialized = true + } else if status.FinalizedL2.Time < minFinalizedTimestamp { + minFinalizedTimestamp = status.FinalizedL2.Time + } + } + + slices.SortFunc(chainIDs, func(a, b eth.ChainID) int { return a.Cmp(b) }) + + return eth.SuperNodeSyncStatusResponse{ + Chains: statuses, + ChainIDs: chainIDs, + CurrentL1: minCurrentL1, + SafeTimestamp: minSafeTimestamp, + LocalSafeTimestamp: minLocalSafeTimestamp, + FinalizedTimestamp: minFinalizedTimestamp, + }, nil +} diff --git a/op-supernode/supernode/activity/supernode/supernode.go b/op-supernode/supernode/activity/supernode/supernode.go new file mode 100644 index 0000000000000..1ea98f6798cc6 --- /dev/null +++ b/op-supernode/supernode/activity/supernode/supernode.go @@ -0,0 +1,45 @@ +package supernode + +import ( + "context" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" + "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity/internal/syncstatus" + cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" + gethlog "github.com/ethereum/go-ethereum/log" +) + +var _ activity.RPCActivity = (*Activity)(nil) + +type Activity struct { + log gethlog.Logger + chains map[eth.ChainID]cc.ChainContainer +} + +func New(log gethlog.Logger, chains map[eth.ChainID]cc.ChainContainer) *Activity { + return &Activity{ + log: log, + chains: chains, + } +} + +func (a *Activity) Name() string { return "supernode" } + +func (a *Activity) Reset(chainID eth.ChainID, timestamp uint64, invalidatedBlock eth.BlockRef) { + // No-op: sync status queries chain containers directly. +} + +func (a *Activity) RPCNamespace() string { return "supernode" } +func (a *Activity) RPCService() interface{} { return &api{a: a} } + +type api struct{ a *Activity } + +// SyncStatus returns all the per-node SyncStatus responses and computes the current localsafe/safe/finalized timestamps. +func (api *api) SyncStatus(ctx context.Context) (eth.SuperNodeSyncStatusResponse, error) { + return api.a.syncStatus(ctx) +} + +func (a *Activity) syncStatus(ctx context.Context) (eth.SuperNodeSyncStatusResponse, error) { + return syncstatus.Aggregate(ctx, a.log, a.chains) +} diff --git a/op-supernode/supernode/activity/supernode/supernode_test.go b/op-supernode/supernode/activity/supernode/supernode_test.go new file mode 100644 index 0000000000000..faebab7cdcb5d --- /dev/null +++ b/op-supernode/supernode/activity/supernode/supernode_test.go @@ -0,0 +1,263 @@ +package supernode + +import ( + "context" + "fmt" + "testing" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" + cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" + "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + gethlog "github.com/ethereum/go-ethereum/log" + "github.com/stretchr/testify/require" +) + +type mockCC struct { + status *eth.SyncStatus + verifierL1s []eth.BlockID + + verifiedErr error + outputErr error + syncStatusErr error +} + +func (m *mockCC) Start(ctx context.Context) error { return nil } +func (m *mockCC) Stop(ctx context.Context) error { return nil } +func (m *mockCC) Pause(ctx context.Context) error { return nil } +func (m *mockCC) Resume(ctx context.Context) error { return nil } + +func (m *mockCC) RegisterVerifier(v activity.VerificationActivity) {} +func (m *mockCC) VerifierCurrentL1s() []eth.BlockID { + return m.verifierL1s +} + +func (m *mockCC) LocalSafeBlockAtTimestamp(ctx context.Context, ts uint64) (eth.L2BlockRef, error) { + return eth.L2BlockRef{}, nil +} + +func (m *mockCC) SyncStatus(ctx context.Context) (*eth.SyncStatus, error) { + if m.syncStatusErr != nil { + return nil, m.syncStatusErr + } + if m.status == nil { + return ð.SyncStatus{}, nil + } + return m.status, nil +} + +func (m *mockCC) SafeHeadAtL1(ctx context.Context, l1BlockNum uint64) (eth.BlockID, eth.BlockID, error) { + return eth.BlockID{}, eth.BlockID{}, nil +} + +func (m *mockCC) L1AtSafeHead(ctx context.Context, l2 eth.BlockID) (eth.BlockID, error) { + return eth.BlockID{}, nil +} + +func (m *mockCC) VerifiedAt(ctx context.Context, ts uint64) (eth.BlockID, eth.BlockID, error) { + if m.verifiedErr != nil { + return eth.BlockID{}, eth.BlockID{}, m.verifiedErr + } + return eth.BlockID{}, eth.BlockID{}, nil +} + +func (m *mockCC) OptimisticAt(ctx context.Context, ts uint64) (eth.BlockID, eth.BlockID, error) { + return eth.BlockID{}, eth.BlockID{}, nil +} + +func (m *mockCC) OutputRootAtL2BlockNumber(ctx context.Context, l2BlockNum uint64) (eth.Bytes32, error) { + if m.outputErr != nil { + return eth.Bytes32{}, m.outputErr + } + return eth.Bytes32{}, nil +} + +func (m *mockCC) OptimisticOutputAtTimestamp(ctx context.Context, ts uint64) (*eth.OutputResponse, error) { + return ð.OutputResponse{}, nil +} + +func (m *mockCC) RewindEngine(ctx context.Context, timestamp uint64, invalidatedBlock eth.BlockRef) error { + return nil +} + +func (m *mockCC) L1ForL2(ctx context.Context, l2Block eth.BlockID) (eth.BlockID, error) { + return eth.BlockID{}, nil +} + +func (m *mockCC) FetchReceipts(ctx context.Context, blockID eth.BlockID) (eth.BlockInfo, types.Receipts, error) { + return nil, nil, nil +} + +func (m *mockCC) ID() eth.ChainID { + return eth.ChainIDFromUInt64(10) +} + +func (m *mockCC) BlockTime() uint64 { return 1 } + +func (m *mockCC) InvalidateBlock(ctx context.Context, height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} + +func (m *mockCC) IsDenied(height uint64, payloadHash common.Hash) (bool, error) { + return false, nil +} + +func (m *mockCC) SetResetCallback(cb cc.ResetCallback) {} + +var _ cc.ChainContainer = (*mockCC)(nil) + +func TestSupernode_SyncStatus_Succeeds(t *testing.T) { + t.Parallel() + chainA := eth.ChainIDFromUInt64(10) + chainB := eth.ChainIDFromUInt64(420) + + chains := map[eth.ChainID]cc.ChainContainer{ + chainA: &mockCC{ + status: ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: 2000}, + UnsafeL2: eth.L2BlockRef{Number: 120, Time: 220}, + CrossUnsafeL2: eth.L2BlockRef{Number: 118, Time: 205}, + SafeL2: eth.L2BlockRef{Number: 110, Time: 170}, + LocalSafeL2: eth.L2BlockRef{Number: 111, Time: 180}, + FinalizedL2: eth.L2BlockRef{Number: 100, Time: 140}, + }, + }, + chainB: &mockCC{ + status: ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: 2000}, + UnsafeL2: eth.L2BlockRef{Number: 130, Time: 230}, + CrossUnsafeL2: eth.L2BlockRef{Number: 128, Time: 215}, + SafeL2: eth.L2BlockRef{Number: 112, Time: 175}, + LocalSafeL2: eth.L2BlockRef{Number: 113, Time: 190}, + FinalizedL2: eth.L2BlockRef{Number: 101, Time: 150}, + }, + }, + } + + s := New(gethlog.New(), chains) + api := &api{a: s} + out, err := api.SyncStatus(context.Background()) + require.NoError(t, err) + + require.Len(t, out.Chains, 2) + require.Contains(t, out.Chains, chainA) + require.Contains(t, out.Chains, chainB) + require.Equal(t, []eth.ChainID{chainA, chainB}, out.ChainIDs) + require.Equal(t, uint64(2000), out.CurrentL1.Number) + require.Equal(t, out.Chains[chainA].CurrentL1.ID(), out.CurrentL1) + require.Equal(t, uint64(170), out.SafeTimestamp) + require.Equal(t, uint64(180), out.LocalSafeTimestamp) + require.Equal(t, uint64(140), out.FinalizedTimestamp) + + statusA := out.Chains[chainA] + require.Greater(t, statusA.UnsafeL2.Number, statusA.SafeL2.Number) + require.Greater(t, statusA.SafeL2.Number, statusA.FinalizedL2.Number) + require.Greater(t, statusA.LocalSafeL2.Number, statusA.SafeL2.Number) +} + +func TestSupernode_SyncStatus_UsesMinimumCurrentL1(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{ + eth.ChainIDFromUInt64(10): &mockCC{ + status: ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: 100, Hash: common.Hash{0x11}}, + }, + }, + eth.ChainIDFromUInt64(11): &mockCC{ + status: ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: 101, Hash: common.Hash{0x22}}, + }, + }, + } + s := New(gethlog.New(), chains) + api := &api{a: s} + out, err := api.SyncStatus(context.Background()) + require.NoError(t, err) + require.Equal(t, eth.BlockID{Number: 100, Hash: common.Hash{0x11}}, out.CurrentL1) +} + +func TestSupernode_SyncStatus_UsesMinimumVerifierCurrentL1(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{ + eth.ChainIDFromUInt64(10): &mockCC{ + status: ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: 200, Hash: common.Hash{0x11}}, + }, + verifierL1s: []eth.BlockID{ + {Number: 150, Hash: common.Hash{0x33}}, + {Number: 175, Hash: common.Hash{0x44}}, + }, + }, + eth.ChainIDFromUInt64(11): &mockCC{ + status: ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: 180, Hash: common.Hash{0x22}}, + }, + verifierL1s: []eth.BlockID{ + {Number: 190, Hash: common.Hash{0x55}}, + }, + }, + } + s := New(gethlog.New(), chains) + api := &api{a: s} + out, err := api.SyncStatus(context.Background()) + require.NoError(t, err) + require.Equal(t, eth.BlockID{Number: 150, Hash: common.Hash{0x33}}, out.CurrentL1) +} + +func TestSupernode_SyncStatus_ErrorOnCurrentL1(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{ + eth.ChainIDFromUInt64(10): &mockCC{ + syncStatusErr: assertErr(), + }, + } + s := New(gethlog.New(), chains) + api := &api{a: s} + _, err := api.SyncStatus(context.Background()) + require.Error(t, err) +} + +func TestSupernode_SyncStatus_IgnoresUnsafeOutputRootErrors(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{ + eth.ChainIDFromUInt64(10): &mockCC{ + verifiedErr: fmt.Errorf("not available: %w", ethereum.NotFound), + outputErr: assertErr(), + status: ð.SyncStatus{ + CurrentL1: eth.L1BlockRef{Number: 100}, + UnsafeL2: eth.L2BlockRef{Number: 10, Time: 20}, + LocalSafeL2: eth.L2BlockRef{Number: 9, Time: 18}, + FinalizedL2: eth.L2BlockRef{Number: 8, Time: 16}, + }, + }, + } + s := New(gethlog.New(), chains) + api := &api{a: s} + out, err := api.SyncStatus(context.Background()) + require.NoError(t, err) + require.Equal(t, []eth.ChainID{eth.ChainIDFromUInt64(10)}, out.ChainIDs) + require.Equal(t, eth.BlockID{Number: 100}, out.CurrentL1) + require.Equal(t, out.Chains[eth.ChainIDFromUInt64(10)].CurrentL1.ID(), out.CurrentL1) + require.Equal(t, uint64(0), out.SafeTimestamp) + require.Equal(t, uint64(18), out.LocalSafeTimestamp) +} + +func TestSupernode_SyncStatus_EmptyChains(t *testing.T) { + t.Parallel() + chains := map[eth.ChainID]cc.ChainContainer{} + s := New(gethlog.New(), chains) + api := &api{a: s} + + out, err := api.SyncStatus(context.Background()) + require.NoError(t, err) + require.Len(t, out.Chains, 0) + require.Len(t, out.ChainIDs, 0) + require.Equal(t, eth.BlockID{}, out.CurrentL1) + require.Equal(t, uint64(0), out.SafeTimestamp) + require.Equal(t, uint64(0), out.LocalSafeTimestamp) + require.Equal(t, uint64(0), out.FinalizedTimestamp) +} + +func assertErr() error { return fmt.Errorf("mock error") } diff --git a/op-supernode/supernode/activity/superroot/superroot.go b/op-supernode/supernode/activity/superroot/superroot.go index 8c922a6a6ce9f..0c3661317049a 100644 --- a/op-supernode/supernode/activity/superroot/superroot.go +++ b/op-supernode/supernode/activity/superroot/superroot.go @@ -4,9 +4,9 @@ import ( "context" "errors" "fmt" - "slices" "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity/internal/syncstatus" cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common/hexutil" @@ -47,64 +47,20 @@ func (api *superrootAPI) AtTimestamp(ctx context.Context, timestamp hexutil.Uint } func (s *Superroot) atTimestamp(ctx context.Context, timestamp uint64) (eth.SuperRootAtTimestampResponse, error) { + aggregate, err := syncstatus.Aggregate(ctx, s.log, s.chains) + if err != nil { + return eth.SuperRootAtTimestampResponse{}, err + } + var ( optimistic = make(map[eth.ChainID]eth.OutputWithRequiredL1, len(s.chains)) - minCurrentL1 eth.BlockID minVerifiedRequiredL1 eth.BlockID - minSafeTimestamp uint64 - minFinalizedTimestamp uint64 - safeInitialized bool - finalizedInitialized bool chainOutputs = make([]eth.ChainIDAndOutput, 0, len(s.chains)) ) - // Get current L1s — the minimum L1 block that all derivation pipelines and verifiers have processed. - // This informs callers that the chains' local views have considered at least up to this L1 block. - for chainID, chain := range s.chains { - status, err := chain.SyncStatus(ctx) - if err != nil { - s.log.Warn("failed to get sync status", "chain_id", chainID.String(), "err", err) - return eth.SuperRootAtTimestampResponse{}, err - } - if status == nil { // defensive - status = ð.SyncStatus{} - } - - currentL1 := status.CurrentL1.ID() - if currentL1.Number < minCurrentL1.Number || minCurrentL1 == (eth.BlockID{}) { - minCurrentL1 = currentL1 - } - // Also consider the L1 progress of any registered verifiers. - for _, verifierL1 := range chain.VerifierCurrentL1s() { - if verifierL1.Number < minCurrentL1.Number || minCurrentL1 == (eth.BlockID{}) { - minCurrentL1 = verifierL1 - } - } - // Conservative aggregation across chains: take the minimum timestamps. - // If any chain has a zero timestamp (not initialized), the aggregate is zero. - if !safeInitialized { - minSafeTimestamp = status.LocalSafeL2.Time - safeInitialized = true - } else if minSafeTimestamp == 0 || status.LocalSafeL2.Time == 0 { - minSafeTimestamp = 0 - } else if status.LocalSafeL2.Time < minSafeTimestamp { - minSafeTimestamp = status.LocalSafeL2.Time - } - - if !finalizedInitialized { - minFinalizedTimestamp = status.FinalizedL2.Time - finalizedInitialized = true - } else if minFinalizedTimestamp == 0 || status.FinalizedL2.Time == 0 { - minFinalizedTimestamp = 0 - } else if status.FinalizedL2.Time < minFinalizedTimestamp { - minFinalizedTimestamp = status.FinalizedL2.Time - } - } notFound := false - chainIDs := make([]eth.ChainID, 0, len(s.chains)) // Collect verified L2 and L1 blocks at the given timestamp for chainID, chain := range s.chains { - chainIDs = append(chainIDs, chainID) // verifiedAt returns the L2 block which is fully verified at the given timestamp, and the minimum L1 block at which verification is possible verifiedL2, verifiedL1, err := chain.VerifiedAt(ctx, timestamp) if errors.Is(err, ethereum.NotFound) { @@ -151,15 +107,13 @@ func (s *Superroot) atTimestamp(ctx context.Context, timestamp uint64) (eth.Supe } } - slices.SortFunc(chainIDs, func(a, b eth.ChainID) int { - return a.Cmp(b) - }) response := eth.SuperRootAtTimestampResponse{ - CurrentL1: minCurrentL1, - CurrentSafeTimestamp: minSafeTimestamp, - CurrentFinalizedTimestamp: minFinalizedTimestamp, + CurrentL1: aggregate.CurrentL1, + CurrentSafeTimestamp: aggregate.SafeTimestamp, + CurrentLocalSafeTimestamp: aggregate.LocalSafeTimestamp, + CurrentFinalizedTimestamp: aggregate.FinalizedTimestamp, OptimisticAtTimestamp: optimistic, - ChainIDs: chainIDs, + ChainIDs: aggregate.ChainIDs, } if !notFound { // Build super root from collected outputs diff --git a/op-supernode/supernode/activity/superroot/superroot_test.go b/op-supernode/supernode/activity/superroot/superroot_test.go index 348a45df2bc49..5a6cab262afe7 100644 --- a/op-supernode/supernode/activity/superroot/superroot_test.go +++ b/op-supernode/supernode/activity/superroot/superroot_test.go @@ -122,6 +122,7 @@ func TestSuperroot_AtTimestamp_Succeeds(t *testing.T) { output: eth.Bytes32{}, status: ð.SyncStatus{ CurrentL1: eth.L1BlockRef{Number: 2000}, + SafeL2: eth.L2BlockRef{Time: 190}, LocalSafeL2: eth.L2BlockRef{Time: 200}, FinalizedL2: eth.L2BlockRef{Time: 150}, }, @@ -134,6 +135,7 @@ func TestSuperroot_AtTimestamp_Succeeds(t *testing.T) { output: eth.Bytes32{}, status: ð.SyncStatus{ CurrentL1: eth.L1BlockRef{Number: 2100}, + SafeL2: eth.L2BlockRef{Time: 170}, LocalSafeL2: eth.L2BlockRef{Time: 180}, FinalizedL2: eth.L2BlockRef{Time: 140}, }, @@ -146,7 +148,8 @@ func TestSuperroot_AtTimestamp_Succeeds(t *testing.T) { require.Len(t, out.OptimisticAtTimestamp, 2) // min values require.Equal(t, uint64(2000), out.CurrentL1.Number) - require.Equal(t, uint64(180), out.CurrentSafeTimestamp) + require.Equal(t, uint64(170), out.CurrentSafeTimestamp) + require.Equal(t, uint64(180), out.CurrentLocalSafeTimestamp) require.Equal(t, uint64(140), out.CurrentFinalizedTimestamp) require.Equal(t, uint64(1000), out.Data.VerifiedRequiredL1.Number) // With zero outputs, the superroot will be deterministic, just ensure it's set diff --git a/op-supernode/supernode/supernode.go b/op-supernode/supernode/supernode.go index dceb971dc556b..bbcf1c05cd627 100644 --- a/op-supernode/supernode/supernode.go +++ b/op-supernode/supernode/supernode.go @@ -18,6 +18,7 @@ import ( "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity" "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity/heartbeat" "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity/interop" + supernodeactivity "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity/supernode" "github.com/ethereum-optimism/optimism/op-supernode/supernode/activity/superroot" cc "github.com/ethereum-optimism/optimism/op-supernode/supernode/chain_container" "github.com/ethereum-optimism/optimism/op-supernode/supernode/resources" @@ -88,6 +89,7 @@ func New(ctx context.Context, log gethlog.Logger, version string, requestStop co // Initialize fixed activities s.activities = []activity.Activity{ heartbeat.New(log.New("activity", "heartbeat"), 10*time.Second), + supernodeactivity.New(log.New("activity", "supernode"), s.chains), superroot.New(log.New("activity", "superroot"), s.chains), } From d40fb204ed556c2d7d272d2d505de53d4bec14f7 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Wed, 11 Mar 2026 09:15:50 -0400 Subject: [PATCH 101/201] chore(rust/op-reth): bump op-reth to 1.11.2 (#19472) --- rust/Cargo.lock | 252 ++++++++++----------- rust/Cargo.toml | 144 ++++++------ rust/op-reth/bin/Cargo.toml | 2 +- rust/op-reth/crates/chainspec/Cargo.toml | 2 +- rust/op-reth/crates/cli/Cargo.toml | 2 +- rust/op-reth/crates/consensus/Cargo.toml | 2 +- rust/op-reth/crates/evm/Cargo.toml | 2 +- rust/op-reth/crates/exex/Cargo.toml | 2 +- rust/op-reth/crates/flashblocks/Cargo.toml | 2 +- rust/op-reth/crates/hardforks/Cargo.toml | 2 +- rust/op-reth/crates/node/Cargo.toml | 2 +- rust/op-reth/crates/payload/Cargo.toml | 2 +- rust/op-reth/crates/primitives/Cargo.toml | 2 +- rust/op-reth/crates/reth/Cargo.toml | 2 +- rust/op-reth/crates/rpc/Cargo.toml | 2 +- rust/op-reth/crates/storage/Cargo.toml | 2 +- rust/op-reth/crates/trie/Cargo.toml | 2 +- rust/op-reth/crates/txpool/Cargo.toml | 2 +- 18 files changed, 214 insertions(+), 214 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index bb4e5922c355b..06541dc35f0cc 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -7838,7 +7838,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.11.1" +version = "1.11.2" dependencies = [ "clap", "reth-cli-util", @@ -9274,7 +9274,7 @@ checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "reth-basic-payload-builder" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9298,7 +9298,7 @@ dependencies = [ [[package]] name = "reth-chain-state" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9330,7 +9330,7 @@ dependencies = [ [[package]] name = "reth-chainspec" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9350,7 +9350,7 @@ dependencies = [ [[package]] name = "reth-cli" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-genesis", "clap", @@ -9364,7 +9364,7 @@ dependencies = [ [[package]] name = "reth-cli-commands" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9450,7 +9450,7 @@ dependencies = [ [[package]] name = "reth-cli-runner" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "reth-tasks", "tokio", @@ -9460,7 +9460,7 @@ dependencies = [ [[package]] name = "reth-cli-util" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9480,7 +9480,7 @@ dependencies = [ [[package]] name = "reth-codecs" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9500,7 +9500,7 @@ dependencies = [ [[package]] name = "reth-codecs-derive" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "proc-macro2", "quote", @@ -9510,7 +9510,7 @@ dependencies = [ [[package]] name = "reth-config" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "eyre", "humantime-serde", @@ -9526,7 +9526,7 @@ dependencies = [ [[package]] name = "reth-consensus" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9539,7 +9539,7 @@ dependencies = [ [[package]] name = "reth-consensus-common" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9551,7 +9551,7 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9577,7 +9577,7 @@ dependencies = [ [[package]] name = "reth-db" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "derive_more", @@ -9604,7 +9604,7 @@ dependencies = [ [[package]] name = "reth-db-api" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9633,7 +9633,7 @@ dependencies = [ [[package]] name = "reth-db-common" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9663,7 +9663,7 @@ dependencies = [ [[package]] name = "reth-db-models" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9678,7 +9678,7 @@ dependencies = [ [[package]] name = "reth-discv4" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9703,7 +9703,7 @@ dependencies = [ [[package]] name = "reth-discv5" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9727,7 +9727,7 @@ dependencies = [ [[package]] name = "reth-dns-discovery" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "dashmap", @@ -9751,7 +9751,7 @@ dependencies = [ [[package]] name = "reth-downloaders" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9786,7 +9786,7 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9844,7 +9844,7 @@ dependencies = [ [[package]] name = "reth-ecies" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "aes", "alloy-primitives", @@ -9872,7 +9872,7 @@ dependencies = [ [[package]] name = "reth-engine-local" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9896,7 +9896,7 @@ dependencies = [ [[package]] name = "reth-engine-primitives" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9921,7 +9921,7 @@ dependencies = [ [[package]] name = "reth-engine-service" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "futures", "pin-project", @@ -9943,7 +9943,7 @@ dependencies = [ [[package]] name = "reth-engine-tree" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eip7928", @@ -10000,7 +10000,7 @@ dependencies = [ [[package]] name = "reth-engine-util" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -10028,7 +10028,7 @@ dependencies = [ [[package]] name = "reth-era" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10043,7 +10043,7 @@ dependencies = [ [[package]] name = "reth-era-downloader" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "bytes", @@ -10059,7 +10059,7 @@ dependencies = [ [[package]] name = "reth-era-utils" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10081,7 +10081,7 @@ dependencies = [ [[package]] name = "reth-errors" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -10092,7 +10092,7 @@ dependencies = [ [[package]] name = "reth-eth-wire" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-chains", "alloy-primitives", @@ -10121,7 +10121,7 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-chains", "alloy-consensus", @@ -10145,7 +10145,7 @@ dependencies = [ [[package]] name = "reth-ethereum" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -10186,7 +10186,7 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "clap", "eyre", @@ -10209,7 +10209,7 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10225,7 +10225,7 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10243,7 +10243,7 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -10257,7 +10257,7 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10286,7 +10286,7 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10306,7 +10306,7 @@ dependencies = [ [[package]] name = "reth-etl" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "rayon", "reth-db-api", @@ -10316,7 +10316,7 @@ dependencies = [ [[package]] name = "reth-evm" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10340,7 +10340,7 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10362,7 +10362,7 @@ dependencies = [ [[package]] name = "reth-execution-errors" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-evm", "alloy-primitives", @@ -10375,7 +10375,7 @@ dependencies = [ [[package]] name = "reth-execution-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10393,7 +10393,7 @@ dependencies = [ [[package]] name = "reth-exex" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10431,7 +10431,7 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "eyre", @@ -10463,7 +10463,7 @@ dependencies = [ [[package]] name = "reth-exex-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10477,7 +10477,7 @@ dependencies = [ [[package]] name = "reth-fs-util" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "serde", "serde_json", @@ -10487,7 +10487,7 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10515,7 +10515,7 @@ dependencies = [ [[package]] name = "reth-ipc" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "bytes", "futures", @@ -10535,7 +10535,7 @@ dependencies = [ [[package]] name = "reth-libmdbx" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "bitflags 2.11.0", "byteorder", @@ -10551,7 +10551,7 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "bindgen", "cc", @@ -10560,7 +10560,7 @@ dependencies = [ [[package]] name = "reth-metrics" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "futures", "metrics", @@ -10572,7 +10572,7 @@ dependencies = [ [[package]] name = "reth-net-banlist" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "ipnet", @@ -10581,7 +10581,7 @@ dependencies = [ [[package]] name = "reth-net-nat" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "futures-util", "if-addrs 0.14.0", @@ -10595,7 +10595,7 @@ dependencies = [ [[package]] name = "reth-network" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10652,7 +10652,7 @@ dependencies = [ [[package]] name = "reth-network-api" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10677,7 +10677,7 @@ dependencies = [ [[package]] name = "reth-network-p2p" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10700,7 +10700,7 @@ dependencies = [ [[package]] name = "reth-network-peers" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10715,7 +10715,7 @@ dependencies = [ [[package]] name = "reth-network-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -10729,7 +10729,7 @@ dependencies = [ [[package]] name = "reth-nippy-jar" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "anyhow", "bincode 1.3.3", @@ -10746,7 +10746,7 @@ dependencies = [ [[package]] name = "reth-node-api" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -10770,7 +10770,7 @@ dependencies = [ [[package]] name = "reth-node-builder" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10839,7 +10839,7 @@ dependencies = [ [[package]] name = "reth-node-core" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10894,7 +10894,7 @@ dependencies = [ [[package]] name = "reth-node-ethereum" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-network", @@ -10932,7 +10932,7 @@ dependencies = [ [[package]] name = "reth-node-ethstats" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10956,7 +10956,7 @@ dependencies = [ [[package]] name = "reth-node-events" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10980,7 +10980,7 @@ dependencies = [ [[package]] name = "reth-node-metrics" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "bytes", "eyre", @@ -11009,7 +11009,7 @@ dependencies = [ [[package]] name = "reth-node-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "reth-chainspec", "reth-db-api", @@ -11020,7 +11020,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-primitives", "reth-chainspec", @@ -11061,7 +11061,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -11089,7 +11089,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11139,7 +11139,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-chains", "alloy-consensus", @@ -11170,7 +11170,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11199,7 +11199,7 @@ dependencies = [ [[package]] name = "reth-optimism-exex" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11226,7 +11226,7 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11271,7 +11271,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -11281,7 +11281,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -11348,7 +11348,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11387,7 +11387,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11414,7 +11414,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11485,7 +11485,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "reth-codecs", @@ -11497,7 +11497,7 @@ dependencies = [ [[package]] name = "reth-optimism-trie" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11542,7 +11542,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.11.1" +version = "1.11.2" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11581,7 +11581,7 @@ dependencies = [ [[package]] name = "reth-payload-builder" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11602,7 +11602,7 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "pin-project", "reth-payload-primitives", @@ -11614,7 +11614,7 @@ dependencies = [ [[package]] name = "reth-payload-primitives" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11637,7 +11637,7 @@ dependencies = [ [[package]] name = "reth-payload-util" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11647,7 +11647,7 @@ dependencies = [ [[package]] name = "reth-payload-validator" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -11657,7 +11657,7 @@ dependencies = [ [[package]] name = "reth-primitives" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "once_cell", @@ -11670,7 +11670,7 @@ dependencies = [ [[package]] name = "reth-primitives-traits" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11704,7 +11704,7 @@ dependencies = [ [[package]] name = "reth-provider" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11749,7 +11749,7 @@ dependencies = [ [[package]] name = "reth-prune" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11778,7 +11778,7 @@ dependencies = [ [[package]] name = "reth-prune-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "arbitrary", @@ -11794,7 +11794,7 @@ dependencies = [ [[package]] name = "reth-revm" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "reth-primitives-traits", @@ -11807,7 +11807,7 @@ dependencies = [ [[package]] name = "reth-rpc" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -11884,7 +11884,7 @@ dependencies = [ [[package]] name = "reth-rpc-api" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eip7928", "alloy-eips", @@ -11914,7 +11914,7 @@ dependencies = [ [[package]] name = "reth-rpc-builder" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-network", "alloy-provider", @@ -11955,7 +11955,7 @@ dependencies = [ [[package]] name = "reth-rpc-convert" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-evm", @@ -11979,7 +11979,7 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12009,7 +12009,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -12053,7 +12053,7 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12101,7 +12101,7 @@ dependencies = [ [[package]] name = "reth-rpc-layer" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-rpc-types-engine", "http", @@ -12115,7 +12115,7 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12131,7 +12131,7 @@ dependencies = [ [[package]] name = "reth-stages" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12181,7 +12181,7 @@ dependencies = [ [[package]] name = "reth-stages-api" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12208,7 +12208,7 @@ dependencies = [ [[package]] name = "reth-stages-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "arbitrary", @@ -12222,7 +12222,7 @@ dependencies = [ [[package]] name = "reth-static-file" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "parking_lot", @@ -12242,7 +12242,7 @@ dependencies = [ [[package]] name = "reth-static-file-types" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "clap", @@ -12257,7 +12257,7 @@ dependencies = [ [[package]] name = "reth-storage-api" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12281,7 +12281,7 @@ dependencies = [ [[package]] name = "reth-storage-errors" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12298,7 +12298,7 @@ dependencies = [ [[package]] name = "reth-tasks" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "auto_impl", "dyn-clone", @@ -12316,7 +12316,7 @@ dependencies = [ [[package]] name = "reth-testing-utils" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12332,7 +12332,7 @@ dependencies = [ [[package]] name = "reth-tokio-util" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "tokio", "tokio-stream", @@ -12342,7 +12342,7 @@ dependencies = [ [[package]] name = "reth-tracing" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "clap", "eyre", @@ -12361,7 +12361,7 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "clap", "eyre", @@ -12379,7 +12379,7 @@ dependencies = [ [[package]] name = "reth-transaction-pool" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12425,7 +12425,7 @@ dependencies = [ [[package]] name = "reth-trie" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12451,7 +12451,7 @@ dependencies = [ [[package]] name = "reth-trie-common" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -12478,7 +12478,7 @@ dependencies = [ [[package]] name = "reth-trie-db" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "metrics", @@ -12498,7 +12498,7 @@ dependencies = [ [[package]] name = "reth-trie-parallel" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -12523,7 +12523,7 @@ dependencies = [ [[package]] name = "reth-trie-sparse" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -12542,7 +12542,7 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.1#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" dependencies = [ "zstd", ] diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 69f2fd6832bd0..b46e8f85db595 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -297,78 +297,78 @@ alloy-op-evm = { version = "0.26.3", path = "alloy-op-evm/", default-features = alloy-op-hardforks = { version = "0.4.7", path = "alloy-op-hardforks/", default-features = false } # ==================== RETH CRATES (from git rev 564ffa58 / main) ==================== -reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-basic-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-chain-state = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-cli-commands = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-cli-runner = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-cli-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-consensus-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-downloaders = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-e2e-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-engine-local = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-eth-wire = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-ethereum-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-ethereum-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-ethereum-forks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-ethereum-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-execution-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-execution-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-fs-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-network = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-node-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-node-core = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-node-events = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-node-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-payload-builder-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-payload-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-payload-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-payload-validator = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-primitives-traits = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-provider = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-prune = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-revm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-rpc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-rpc-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-rpc-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-rpc-engine-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-rpc-eth-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-rpc-server-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-stages = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-static-file = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-static-file-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-storage-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-storage-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-tasks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-tracing = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-trie-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } -reth-trie-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1" } -reth-zstd-compressors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.1", default-features = false } +reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-basic-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-chain-state = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-cli-commands = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-cli-runner = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-cli-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-consensus-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-downloaders = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-e2e-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-engine-local = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-eth-wire = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-ethereum-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-ethereum-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-ethereum-forks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-ethereum-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-execution-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-execution-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-fs-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-network = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-node-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-node-core = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-node-events = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-node-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-payload-builder-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-payload-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-payload-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-payload-validator = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-primitives-traits = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-provider = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-prune = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-revm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-rpc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-rpc-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-rpc-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-rpc-engine-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-rpc-eth-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-rpc-server-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-stages = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-static-file = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-static-file-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-storage-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-storage-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-tasks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-tracing = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-trie-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +reth-trie-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } +reth-zstd-compressors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } # ==================== REVM (latest: op-reth versions) ==================== revm = { version = "34.0.0", default-features = false } diff --git a/rust/op-reth/bin/Cargo.toml b/rust/op-reth/bin/Cargo.toml index 37ca6c20df29b..b86dc0b9aed47 100644 --- a/rust/op-reth/bin/Cargo.toml +++ b/rust/op-reth/bin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "op-reth" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/chainspec/Cargo.toml b/rust/op-reth/crates/chainspec/Cargo.toml index e218e5cb9d3a6..cfdb01ca7215d 100644 --- a/rust/op-reth/crates/chainspec/Cargo.toml +++ b/rust/op-reth/crates/chainspec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-chainspec" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/cli/Cargo.toml b/rust/op-reth/crates/cli/Cargo.toml index 729974f192b91..1f5554d8a505c 100644 --- a/rust/op-reth/crates/cli/Cargo.toml +++ b/rust/op-reth/crates/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-cli" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/consensus/Cargo.toml b/rust/op-reth/crates/consensus/Cargo.toml index 9d9a8bbf392c7..f386066883f23 100644 --- a/rust/op-reth/crates/consensus/Cargo.toml +++ b/rust/op-reth/crates/consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-consensus" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/evm/Cargo.toml b/rust/op-reth/crates/evm/Cargo.toml index fa69e1188d858..9c0efeffc8545 100644 --- a/rust/op-reth/crates/evm/Cargo.toml +++ b/rust/op-reth/crates/evm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-evm" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/exex/Cargo.toml b/rust/op-reth/crates/exex/Cargo.toml index 9ad570d2800a8..bc2992733eb94 100644 --- a/rust/op-reth/crates/exex/Cargo.toml +++ b/rust/op-reth/crates/exex/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-exex" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/flashblocks/Cargo.toml b/rust/op-reth/crates/flashblocks/Cargo.toml index ddd793c309d97..0febb66c629c8 100644 --- a/rust/op-reth/crates/flashblocks/Cargo.toml +++ b/rust/op-reth/crates/flashblocks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-flashblocks" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/hardforks/Cargo.toml b/rust/op-reth/crates/hardforks/Cargo.toml index 70a95fafb334e..6a709cdc1182c 100644 --- a/rust/op-reth/crates/hardforks/Cargo.toml +++ b/rust/op-reth/crates/hardforks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-forks" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/node/Cargo.toml b/rust/op-reth/crates/node/Cargo.toml index f9c43ea759c17..0f521c1f58a82 100644 --- a/rust/op-reth/crates/node/Cargo.toml +++ b/rust/op-reth/crates/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-node" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/payload/Cargo.toml b/rust/op-reth/crates/payload/Cargo.toml index 775375d2b472b..7248c36e80e51 100644 --- a/rust/op-reth/crates/payload/Cargo.toml +++ b/rust/op-reth/crates/payload/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-payload-builder" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/primitives/Cargo.toml b/rust/op-reth/crates/primitives/Cargo.toml index 2fd6b889b57f6..3c2709b59ac19 100644 --- a/rust/op-reth/crates/primitives/Cargo.toml +++ b/rust/op-reth/crates/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-primitives" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/reth/Cargo.toml b/rust/op-reth/crates/reth/Cargo.toml index 0636f01a7af2d..66731edc26f45 100644 --- a/rust/op-reth/crates/reth/Cargo.toml +++ b/rust/op-reth/crates/reth/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-op" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/rpc/Cargo.toml b/rust/op-reth/crates/rpc/Cargo.toml index 5777562808ef6..b8ef3fb9a3552 100644 --- a/rust/op-reth/crates/rpc/Cargo.toml +++ b/rust/op-reth/crates/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-rpc" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/storage/Cargo.toml b/rust/op-reth/crates/storage/Cargo.toml index d11679f35256b..5d2b58c9d3915 100644 --- a/rust/op-reth/crates/storage/Cargo.toml +++ b/rust/op-reth/crates/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-storage" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/trie/Cargo.toml b/rust/op-reth/crates/trie/Cargo.toml index 2120d9ddc4dc8..987b5ac20500d 100644 --- a/rust/op-reth/crates/trie/Cargo.toml +++ b/rust/op-reth/crates/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-trie" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/txpool/Cargo.toml b/rust/op-reth/crates/txpool/Cargo.toml index 0bd7474cbe32e..7bcb17bd7e710 100644 --- a/rust/op-reth/crates/txpool/Cargo.toml +++ b/rust/op-reth/crates/txpool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-txpool" -version = "1.11.1" +version = "1.11.2" edition.workspace = true rust-version.workspace = true license.workspace = true From 0c675ceee1ad2fa1a28bb8a20304d749526428f2 Mon Sep 17 00:00:00 2001 From: wwared Date: Wed, 11 Mar 2026 10:41:55 -0300 Subject: [PATCH 102/201] op-devstack: Add P2P config for supernode follow mode CLs (#19439) Co-authored-by: wwared <541936+wwared@users.noreply.github.com> --- .../supernode/interop/follow_l2/sync_test.go | 68 ++++++++++++++--- op-devstack/sysgo/l2_cl_opnode.go | 63 ++++------------ op-devstack/sysgo/l2_cl_p2p_config.go | 74 +++++++++++++++++++ op-devstack/sysgo/l2_cl_supernode.go | 22 +++++- op-devstack/sysgo/system_two_l2_follow_l2.go | 5 +- 5 files changed, 168 insertions(+), 64 deletions(-) create mode 100644 op-devstack/sysgo/l2_cl_p2p_config.go diff --git a/op-acceptance-tests/tests/supernode/interop/follow_l2/sync_test.go b/op-acceptance-tests/tests/supernode/interop/follow_l2/sync_test.go index 264017492c01e..e6bc4e9719ec6 100644 --- a/op-acceptance-tests/tests/supernode/interop/follow_l2/sync_test.go +++ b/op-acceptance-tests/tests/supernode/interop/follow_l2/sync_test.go @@ -10,7 +10,7 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) -func TestFollowSource_LocalSafeDivergesThenConverges(gt *testing.T) { +func TestFollowSource_HeadsDivergeThenConverge(gt *testing.T) { t := devtest.SerialT(gt) require := t.Require() sys := presets.NewTwoL2SupernodeFollowL2(t, 0) @@ -21,6 +21,15 @@ func TestFollowSource_LocalSafeDivergesThenConverges(gt *testing.T) { follower *dsl.L2CLNode } + type headSnapshot struct { + sourceUnsafe uint64 + sourceLocalSafe uint64 + sourceCrossSafe uint64 + followerUnsafe uint64 + followerLocalSafe uint64 + followerCrossSafe uint64 + } + chains := []chainPair{ {name: "A", source: sys.L2ACL, follower: sys.L2AFollowCL}, {name: "B", source: sys.L2BCL, follower: sys.L2BFollowCL}, @@ -39,26 +48,66 @@ func TestFollowSource_LocalSafeDivergesThenConverges(gt *testing.T) { pausedAt := sys.Supernode.EnsureInteropPaused(sys.L2ACL, sys.L2BCL, 10) t.Logger().Info("interop paused", "timestamp", pausedAt) - // While interop is paused, local-safe should continue to advance and lead cross-safe. + baselines := make(map[string]headSnapshot, len(chains)) + for _, chain := range chains { + sourceStatus := chain.source.SyncStatus() + followerStatus := chain.follower.SyncStatus() + baselines[chain.name] = headSnapshot{ + sourceUnsafe: sourceStatus.UnsafeL2.Number, + sourceLocalSafe: sourceStatus.LocalSafeL2.Number, + sourceCrossSafe: sourceStatus.SafeL2.Number, + followerUnsafe: followerStatus.UnsafeL2.Number, + followerLocalSafe: followerStatus.LocalSafeL2.Number, + followerCrossSafe: followerStatus.SafeL2.Number, + } + } + + // While interop is paused, unsafe should advance independently ahead of local-safe, and local-safe ahead of cross-safe. require.Eventually(func() bool { for _, chain := range chains { + baseline := baselines[chain.name] sourceStatus := chain.source.SyncStatus() followerStatus := chain.follower.SyncStatus() - if sourceStatus.LocalSafeL2.Number <= sourceStatus.SafeL2.Number+1 { + if sourceStatus.UnsafeL2.Number <= baseline.sourceUnsafe { + return false + } + if followerStatus.UnsafeL2.Number <= baseline.followerUnsafe { + return false + } + if sourceStatus.UnsafeL2.Number <= sourceStatus.LocalSafeL2.Number { + return false + } + if followerStatus.UnsafeL2.Number <= followerStatus.LocalSafeL2.Number { + return false + } + if sourceStatus.LocalSafeL2.Number <= sourceStatus.SafeL2.Number { + return false + } + if followerStatus.LocalSafeL2.Number <= followerStatus.SafeL2.Number { + return false + } + if sourceStatus.LocalSafeL2.Number <= baseline.sourceLocalSafe { + return false + } + if followerStatus.LocalSafeL2.Number <= baseline.followerLocalSafe { + return false + } + if sourceStatus.SafeL2.Number < baseline.sourceCrossSafe { return false } - if followerStatus.LocalSafeL2.Number <= followerStatus.SafeL2.Number+1 { + if followerStatus.SafeL2.Number < baseline.followerCrossSafe { return false } } return true - }, 2*time.Minute, 2*time.Second, "expected local-safe to lead cross-safe on source and follower") + }, 2*time.Minute, 2*time.Second, "expected unsafe > local-safe > cross-safe with unsafe advancing on source and follower") - // Core follow-source checks: follower must match source local-safe and cross-safe independently. - divergenceChecks := make([]dsl.CheckFunc, 0, len(chains)*2) + // Core follow-source checks: follower must match source unsafe, local-safe, and cross-safe independently. + divergenceChecks := make([]dsl.CheckFunc, 0, len(chains)*3) for _, chain := range chains { divergenceChecks = append(divergenceChecks, + chain.follower.MatchedFn(chain.source, types.LocalUnsafe, 20), chain.follower.MatchedFn(chain.source, types.LocalSafe, 20), chain.follower.MatchedFn(chain.source, types.CrossSafe, 20), ) @@ -85,10 +134,11 @@ func TestFollowSource_LocalSafeDivergesThenConverges(gt *testing.T) { return true }, 3*time.Minute, 2*time.Second, "expected local-safe and cross-safe to converge on followers") - // Final sanity: follower and source converge to the same local-safe and cross-safe heads. - finalChecks := make([]dsl.CheckFunc, 0, len(chains)*2) + // Final sanity: follower and source converge to the same unsafe, local-safe, and cross-safe heads. + finalChecks := make([]dsl.CheckFunc, 0, len(chains)*3) for _, chain := range chains { finalChecks = append(finalChecks, + chain.follower.MatchedFn(chain.source, types.LocalUnsafe, 20), chain.follower.MatchedFn(chain.source, types.LocalSafe, 20), chain.follower.MatchedFn(chain.source, types.CrossSafe, 20), ) diff --git a/op-devstack/sysgo/l2_cl_opnode.go b/op-devstack/sysgo/l2_cl_opnode.go index 2b272cfaee973..80511ea53ea86 100644 --- a/op-devstack/sysgo/l2_cl_opnode.go +++ b/op-devstack/sysgo/l2_cl_opnode.go @@ -3,13 +3,10 @@ package sysgo import ( "context" "encoding/hex" - "flag" "fmt" "sync" "time" - "github.com/urfave/cli/v2" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" @@ -21,9 +18,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/opnode" "github.com/ethereum-optimism/optimism/op-node/config" - opNodeFlags "github.com/ethereum-optimism/optimism/op-node/flags" - "github.com/ethereum-optimism/optimism/op-node/p2p" - p2pcli "github.com/ethereum-optimism/optimism/op-node/p2p/cli" "github.com/ethereum-optimism/optimism/op-node/rollup/driver" "github.com/ethereum-optimism/optimism/op-node/rollup/interop" nodeSync "github.com/ethereum-optimism/optimism/op-node/rollup/sync" @@ -224,48 +218,20 @@ func withOpNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack logger := p.Logger() - var p2pSignerSetup p2p.SignerSetup - var p2pConfig *p2p.Config - // code block for P2P setup - { - // make a dummy flagset since p2p config initialization helpers only input cli context - fs := flag.NewFlagSet("", flag.ContinueOnError) - // use default flags - for _, f := range opNodeFlags.P2PFlags(opNodeFlags.EnvVarPrefix) { - require.NoError(f.Apply(fs)) - } - // mandatory P2P flags - require.NoError(fs.Set(opNodeFlags.AdvertiseIPName, "127.0.0.1")) - require.NoError(fs.Set(opNodeFlags.AdvertiseTCPPortName, "0")) - require.NoError(fs.Set(opNodeFlags.AdvertiseUDPPortName, "0")) - require.NoError(fs.Set(opNodeFlags.ListenIPName, "127.0.0.1")) - require.NoError(fs.Set(opNodeFlags.ListenTCPPortName, "0")) - require.NoError(fs.Set(opNodeFlags.ListenUDPPortName, "0")) - // avoid resource unavailable error by using memorydb - require.NoError(fs.Set(opNodeFlags.DiscoveryPathName, "memory")) - require.NoError(fs.Set(opNodeFlags.PeerstorePathName, "memory")) - // For peer ID - networkPrivKey, err := crypto.GenerateKey() - require.NoError(err) - networkPrivKeyHex := hex.EncodeToString(crypto.FromECDSA(networkPrivKey)) - require.NoError(fs.Set(opNodeFlags.P2PPrivRawName, networkPrivKeyHex)) - // Explicitly set to empty; do not default to resolving DNS of external bootnodes - require.NoError(fs.Set(opNodeFlags.BootnodesName, "")) - - cliCtx := cli.NewContext(&cli.App{}, fs, nil) - if cfg.IsSequencer { - p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2CLID.ChainID().ToBig())) - require.NoError(err, "need p2p key for sequencer") - p2pKeyHex := hex.EncodeToString(crypto.FromECDSA(p2pKey)) - require.NoError(fs.Set(opNodeFlags.SequencerP2PKeyName, p2pKeyHex)) - p2pSignerSetup, err = p2pcli.LoadSignerSetup(cliCtx, logger) - require.NoError(err, "failed to load p2p signer") - logger.Info("Sequencer key acquired") - } - p2pConfig, err = p2pcli.NewConfig(cliCtx, l2Net.rollupCfg.BlockTime) - require.NoError(err, "failed to load p2p config") - p2pConfig.NoDiscovery = cfg.NoDiscovery + sequencerP2PKeyHex := "" + if cfg.IsSequencer { + p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2CLID.ChainID().ToBig())) + require.NoError(err, "need p2p key for sequencer") + sequencerP2PKeyHex = hex.EncodeToString(crypto.FromECDSA(p2pKey)) } + p2pConfig, p2pSignerSetup := newDevstackP2PConfig( + p, + logger, + l2Net.rollupCfg.BlockTime, + cfg.NoDiscovery, + cfg.EnableReqRespSync, + sequencerP2PKeyHex, + ) // specify interop config, but do not configure anything, to disable indexing mode interopCfg := &interop.Config{} @@ -280,9 +246,6 @@ func withOpNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack } } - // Set the req-resp sync flag as per config - p2pConfig.EnableReqRespSync = cfg.EnableReqRespSync - nodeCfg := &config.Config{ L1: &config.L1EndpointConfig{ L1NodeAddr: l1EL.UserRPC(), diff --git a/op-devstack/sysgo/l2_cl_p2p_config.go b/op-devstack/sysgo/l2_cl_p2p_config.go new file mode 100644 index 0000000000000..2644ab05ada73 --- /dev/null +++ b/op-devstack/sysgo/l2_cl_p2p_config.go @@ -0,0 +1,74 @@ +package sysgo + +import ( + "encoding/hex" + "flag" + + "github.com/urfave/cli/v2" + + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + opNodeFlags "github.com/ethereum-optimism/optimism/op-node/flags" + "github.com/ethereum-optimism/optimism/op-node/p2p" + p2pcli "github.com/ethereum-optimism/optimism/op-node/p2p/cli" +) + +func newDevstackP2PConfig( + p devtest.P, + logger log.Logger, + blockTime uint64, + noDiscovery bool, + enableReqRespSync bool, + sequencerP2PKeyHex string, +) (*p2p.Config, p2p.SignerSetup) { + require := p.Require() + + // make a dummy flagset since p2p config initialization helpers only input cli context + fs := flag.NewFlagSet("", flag.ContinueOnError) + // use default flags + for _, f := range opNodeFlags.P2PFlags(opNodeFlags.EnvVarPrefix) { + require.NoError(f.Apply(fs)) + } + // mandatory P2P flags + require.NoError(fs.Set(opNodeFlags.AdvertiseIPName, "127.0.0.1")) + require.NoError(fs.Set(opNodeFlags.AdvertiseTCPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.AdvertiseUDPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.ListenIPName, "127.0.0.1")) + require.NoError(fs.Set(opNodeFlags.ListenTCPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.ListenUDPPortName, "0")) + // avoid resource unavailable error by using memorydb + require.NoError(fs.Set(opNodeFlags.DiscoveryPathName, "memory")) + require.NoError(fs.Set(opNodeFlags.PeerstorePathName, "memory")) + // Explicitly set to empty; do not default to resolving DNS of external bootnodes + require.NoError(fs.Set(opNodeFlags.BootnodesName, "")) + // For peer ID + networkPrivKey, err := crypto.GenerateKey() + networkPrivKeyHex := hex.EncodeToString(crypto.FromECDSA(networkPrivKey)) + require.NoError(err) + require.NoError(fs.Set(opNodeFlags.P2PPrivRawName, networkPrivKeyHex)) + if noDiscovery { + require.NoError(fs.Set(opNodeFlags.NoDiscoveryName, "true")) + } + if enableReqRespSync { + require.NoError(fs.Set(opNodeFlags.SyncReqRespName, "true")) + } + + cliCtx := cli.NewContext(&cli.App{}, fs, nil) + + var p2pSignerSetup p2p.SignerSetup + if sequencerP2PKeyHex != "" { + require.NoError(fs.Set(opNodeFlags.SequencerP2PKeyName, sequencerP2PKeyHex)) + p2pSignerSetup, err = p2pcli.LoadSignerSetup(cliCtx, logger) + require.NoError(err, "failed to load p2p signer") + logger.Info("Sequencer key acquired") + } + + p2pConfig, err := p2pcli.NewConfig(cliCtx, blockTime) + require.NoError(err, "failed to load p2p config") + p2pConfig.NoDiscovery = noDiscovery + p2pConfig.EnableReqRespSync = enableReqRespSync + + return p2pConfig, p2pSignerSetup +} diff --git a/op-devstack/sysgo/l2_cl_supernode.go b/op-devstack/sysgo/l2_cl_supernode.go index b10f17dd742df..adecee0333615 100644 --- a/op-devstack/sysgo/l2_cl_supernode.go +++ b/op-devstack/sysgo/l2_cl_supernode.go @@ -2,15 +2,18 @@ package sysgo import ( "context" + "encoding/hex" "fmt" "sort" "strconv" "sync" "time" + "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" altda "github.com/ethereum-optimism/optimism/op-alt-da" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" @@ -315,6 +318,20 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI if cluster, ok := orch.ClusterForL2(l2ChainID); ok { depSet = cluster.DepSet() } + sequencerP2PKeyHex := "" + if isSequencer { + p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2ChainID.ToBig())) + require.NoError(err, "need p2p key for supernode virtual sequencer") + sequencerP2PKeyHex = hex.EncodeToString(crypto.FromECDSA(p2pKey)) + } + p2pConfig, p2pSignerSetup := newDevstackP2PConfig( + p, + logger.New("chain_id", l2ChainID.String(), "component", "supernode-p2p"), + l2Net.rollupCfg.BlockTime, + false, + true, + sequencerP2PKeyHex, + ) return &config.Config{ L1: &config.L1EndpointConfig{ L1NodeAddr: l1EL.UserRPC(), @@ -335,12 +352,13 @@ func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeI Beacon: &config.L1BeaconEndpointConfig{BeaconAddr: l1CL.beaconHTTPAddr}, Driver: driver.Config{SequencerEnabled: isSequencer, SequencerConfDepth: 2}, Rollup: *l2Net.rollupCfg, + P2PSigner: p2pSignerSetup, RPC: oprpc.CLIConfig{ListenAddr: "127.0.0.1", ListenPort: 0, EnableAdmin: true}, InteropConfig: interopCfg, - P2P: nil, + P2P: p2pConfig, L1EpochPollInterval: 2 * time.Second, RuntimeConfigReloadInterval: 0, - Sync: nodeSync.Config{SyncMode: nodeSync.CLSync}, + Sync: nodeSync.Config{SyncMode: nodeSync.CLSync, SyncModeReqResp: true}, ConfigPersistence: config.DisabledConfigPersistence{}, Metrics: opmetrics.CLIConfig{}, Pprof: oppprof.CLIConfig{}, diff --git a/op-devstack/sysgo/system_two_l2_follow_l2.go b/op-devstack/sysgo/system_two_l2_follow_l2.go index 01c01c8dabc8e..7bf5b9ad11c01 100644 --- a/op-devstack/sysgo/system_two_l2_follow_l2.go +++ b/op-devstack/sysgo/system_two_l2_follow_l2.go @@ -43,14 +43,13 @@ func DefaultTwoL2SupernodeFollowL2System(dest *DefaultTwoL2SupernodeFollowL2Syst // Chain A follower opt.Add(WithL2ELNode(ids.L2AFollowerEL)) opt.Add(WithOpNodeFollowL2(ids.L2AFollowerCL, ids.L1CL, ids.L1EL, ids.L2AFollowerEL, ids.L2ACL)) - // TODO(#19379): The chain source is a supernode proxy CL, which does not implement opp2p_* RPCs. - // Skip CL P2P wiring and rely on follow-source + EL P2P for data availability. - // opt.Add(WithL2CLP2PConnection(ids.L2ACL, ids.L2AFollowerCL)) + opt.Add(WithL2CLP2PConnection(ids.L2ACL, ids.L2AFollowerCL)) opt.Add(WithL2ELP2PConnection(ids.L2AEL, ids.L2AFollowerEL, false)) // Chain B follower opt.Add(WithL2ELNode(ids.L2BFollowerEL)) opt.Add(WithOpNodeFollowL2(ids.L2BFollowerCL, ids.L1CL, ids.L1EL, ids.L2BFollowerEL, ids.L2BCL)) + opt.Add(WithL2CLP2PConnection(ids.L2BCL, ids.L2BFollowerCL)) opt.Add(WithL2ELP2PConnection(ids.L2BEL, ids.L2BFollowerEL, false)) opt.Add(stack.Finally(func(orch *Orchestrator) { From 86f1d29ac38e1dbbf2403aa8b71d4e40b1921810 Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Wed, 11 Mar 2026 18:08:41 +0100 Subject: [PATCH 103/201] README: Document monorepo downloading & shallow cloning (#19485) --- README.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/README.md b/README.md index a7693b417e731..bdd480d522e01 100644 --- a/README.md +++ b/README.md @@ -145,6 +145,33 @@ Some exceptions to this rule exist for cases in which we absolutely must deploy If you're changing or adding a contract and you're unsure about which branch to make a PR into, default to using a feature branch. Feature branches are typically used when there are conflicts between 2 projects touching the same code, to avoid conflicts from merging both into `develop`. +## Downloading & Shallow-Cloning the Monorepo + +If you want to use the monorepo as a dependency, e.g. in CI, you can greatly speed up the fetching process by either downloading it directly as an archive from Github instead of cloning as a git repository or shallow-cloning it. +This avoids downloading the full monorepo git history, which is unfortunately a few GBs in size, but which also isn't needed for many use cases, like CI. + +To fetch the monorepo at a specific commit/branch/tag `$REF`, download and unpack with +``` +curl -L https://github.com/ethereum-optimism/optimism/archive/$REF.tar.gz | tar xz +``` +Note that if you need any of its submodules, you'd need to manually download those too. + +If you want a shallow git clone of latest `develop`, you can just do +``` +git clone --depth 1 --shallow-submodules https://github.com/ethereum-optimism/optimism.git +``` +which takes only a few seconds on a good internet connection. + +If you want to shallow-checkout a specific branch or tag `$REF`, do + +``` +git clone --no-checkout --depth 1 --shallow-submodules https://github.com/ethereum-optimism/optimism.git +cd optimism +git fetch --depth 1 origin "$REF" +git checkout "$REF" +``` +which should also only take a few seconds. + ## License All other files within this repository are licensed under the [MIT License](https://github.com/ethereum-optimism/optimism/blob/master/LICENSE) unless stated otherwise. From 648778dea27e62a146cdcedb1fc68b88c0a0dc07 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 12 Mar 2026 07:34:11 +1000 Subject: [PATCH 104/201] fix(kona/derive): make syscfg update failure non-fatal in L1Traversal (#19358) * fix(kona/derive): make syscfg update failure non-fatal in L1Traversal When `update_with_receipts` fails to apply system config changes from L1 receipts, both `PollingTraversal::advance_origin` and `IndexedTraversal::provide_next_block` were returning a Critical pipeline error, permanently halting the pipeline. op-node's reference implementation (`l1_traversal.go:78-82`) treats this failure as non-fatal with the comment: "failure to apply is just informational, so we just log the error and continue." This commit aligns kona's behaviour with op-node: on syscfg update error, log a warning and continue advancing the origin rather than halting. Fixes https://github.com/ethereum-optimism/optimism/issues/19353 Co-Authored-By: Claude Sonnet 4.6 * fix: apply rustfmt to indexed.rs test line added in this fix The block2 struct literal added in this fix exceeds the 100-char line width limit. Wrap it to comply with rustfmt requirements. Co-Authored-By: Claude Sonnet 4.6 --------- Co-authored-by: Claude Sonnet 4.6 --- .../derive/src/stages/traversal/indexed.rs | 17 +++-- .../derive/src/stages/traversal/polling.rs | 65 +++++++++++++++---- 2 files changed, 63 insertions(+), 19 deletions(-) diff --git a/rust/kona/crates/protocol/derive/src/stages/traversal/indexed.rs b/rust/kona/crates/protocol/derive/src/stages/traversal/indexed.rs index 5b940d8a3b033..fd70ea9660ea2 100644 --- a/rust/kona/crates/protocol/derive/src/stages/traversal/indexed.rs +++ b/rust/kona/crates/protocol/derive/src/stages/traversal/indexed.rs @@ -100,13 +100,14 @@ impl IndexedTraversal { } Ok(false) => { /* Ignore, no update applied */ } Err(err) => { - error!(target: "traversal", ?err, "Failed to update system config at block {}", block_info.number); + // Failure to update the system config is non-fatal: one or more receipts may be + // malformed or invalid. Log a warning and continue. + warn!(target: "traversal", ?err, "Failed to update system config at block {} (non-fatal, continuing)", block_info.number); kona_macros::set!( gauge, crate::Metrics::PIPELINE_SYS_CONFIG_UPDATE_ERROR, block_info.number as f64 ); - return Err(PipelineError::SystemConfigUpdate(err).crit()); } } @@ -315,15 +316,19 @@ mod tests { let first = b256!("3333333333333333333333333333333333333333333333333333333333333333"); let second = b256!("4444444444444444444444444444444444444444444444444444444444444444"); let block1 = BlockInfo { hash: first, ..BlockInfo::default() }; - let block2 = BlockInfo { number: 1, hash: second, ..BlockInfo::default() }; + let block2 = + BlockInfo { number: 1, hash: second, parent_hash: first, ..BlockInfo::default() }; let blocks = vec![block1, block2]; let receipts = new_receipts(); let mut traversal = new_test_managed(blocks, receipts); traversal.block = Some(block1); assert_eq!(traversal.next_l1_block().await.unwrap(), Some(block1)); - // provide_next_block will fail due to system config update error - let err = traversal.provide_next_block(block2).await.unwrap_err(); - matches!(err, PipelineErrorKind::Critical(PipelineError::SystemConfigUpdate(_))); + // A system config update error is now non-fatal (matches op-node behaviour): + // provide_next_block returns Ok(()) and origin advances to block2. + assert!( + traversal.provide_next_block(block2).await.is_ok(), + "system config update failure should be non-fatal (warn + continue)" + ); } #[tokio::test] diff --git a/rust/kona/crates/protocol/derive/src/stages/traversal/polling.rs b/rust/kona/crates/protocol/derive/src/stages/traversal/polling.rs index c31160fa2501e..ed46625c33ee4 100644 --- a/rust/kona/crates/protocol/derive/src/stages/traversal/polling.rs +++ b/rust/kona/crates/protocol/derive/src/stages/traversal/polling.rs @@ -108,13 +108,14 @@ impl OriginAdvancer for PollingTraversal { } Ok(false) => { /* Ignore, no update applied */ } Err(err) => { - error!(target: "l1_traversal", ?err, "Failed to update system config at block {}", next_l1_origin.number); + // Failure to update the system config is non-fatal: one or more receipts may be + // malformed or invalid. Log a warning and continue. + warn!(target: "l1_traversal", ?err, "Failed to update system config at block {} (non-fatal, continuing)", next_l1_origin.number); kona_macros::set!( gauge, crate::Metrics::PIPELINE_SYS_CONFIG_UPDATE_ERROR, next_l1_origin.number as f64 ); - return Err(PipelineError::SystemConfigUpdate(err).crit()); } } @@ -175,9 +176,14 @@ impl SignalReceiver for PollingTraversal { #[cfg(test)] pub(crate) mod tests { use super::*; - use crate::{errors::PipelineErrorKind, test_utils::TraversalTestHelper}; + use crate::{ + errors::PipelineErrorKind, + test_utils::{TestChainProvider, TraversalTestHelper}, + }; use alloc::vec; - use alloy_primitives::{address, b256}; + use alloy_consensus::Receipt; + use alloy_primitives::{Bytes, Log, LogData, address, b256}; + use kona_genesis::CONFIG_UPDATE_TOPIC; #[test] fn test_l1_traversal_batcher_address() { @@ -285,18 +291,51 @@ pub(crate) mod tests { #[tokio::test] async fn test_l1_traversal_system_config_update_fails() { + // Build a 3-node chain: genesis (hash=0x0) → block1 → block2. + // block2 has a receipt with a log from L1_SYS_CONFIG_ADDR that has only + // 1 topic instead of the required >= 3, triggering a syscfg update error. + // The fix under test makes this error non-fatal: advance_origin warns and + // continues (matching op-node's l1_traversal.go:78-82 behaviour). let first = b256!("3333333333333333333333333333333333333333333333333333333333333333"); let second = b256!("4444444444444444444444444444444444444444444444444444444444444444"); - let block1 = BlockInfo { hash: first, ..BlockInfo::default() }; - let block2 = BlockInfo { hash: second, ..BlockInfo::default() }; - let blocks = vec![block1, block2]; - let receipts = TraversalTestHelper::new_receipts(); - let mut traversal = TraversalTestHelper::new_from_blocks(blocks, receipts); + // block1: child of genesis (parent_hash = 0x0 = genesis.hash) + let block1 = BlockInfo { number: 1, hash: first, ..BlockInfo::default() }; + // block2: child of block1, with a receipt that triggers syscfg update failure + let block2 = + BlockInfo { number: 2, hash: second, parent_hash: first, ..BlockInfo::default() }; + + let mut provider = TestChainProvider::default(); + let rollup_config = RollupConfig { + l1_system_config_address: TraversalTestHelper::L1_SYS_CONFIG_ADDR, + ..RollupConfig::default() + }; + provider.insert_block(1, block1); + provider.insert_block(2, block2); + // block1 gets an empty receipt (no syscfg updates). + provider.insert_receipts(first, vec![Receipt::default()]); + // block2 gets a malformed log from L1_SYS_CONFIG_ADDR (only 1 topic instead of 3). + // update_with_receipts returns Err(InvalidTopicLen(1)) → non-fatal with the fix. + let bad_log = Log { + address: TraversalTestHelper::L1_SYS_CONFIG_ADDR, + data: LogData::new_unchecked(vec![CONFIG_UPDATE_TOPIC], Bytes::default()), + }; + let bad_receipt = Receipt { + status: alloy_consensus::Eip658Value::Eip658(true), + logs: vec![bad_log], + ..Receipt::default() + }; + provider.insert_receipts(second, vec![bad_receipt]); + + let mut traversal = PollingTraversal::new(provider, Arc::new(rollup_config)); + + // First advance_origin: genesis → block1 (empty receipt, no syscfg update) assert!(traversal.advance_origin().await.is_ok()); - // Only the second block should fail since the second receipt - // contains invalid logs that will error for a system config update. - let err = traversal.advance_origin().await.unwrap_err(); - matches!(err, PipelineErrorKind::Critical(PipelineError::SystemConfigUpdate(_))); + // Second advance_origin: block1 → block2 (bad receipt triggers syscfg update + // error, but it is now non-fatal: warn + continue = Ok(())). + assert!( + traversal.advance_origin().await.is_ok(), + "system config update failure should be non-fatal (warn + continue)" + ); } #[tokio::test] From 6eb557fd9aa7fe05e2470d34ce34386ba2b1ee8f Mon Sep 17 00:00:00 2001 From: serpixel <5087962+serpixel@users.noreply.github.com> Date: Wed, 11 Mar 2026 22:49:11 +0100 Subject: [PATCH 105/201] feat(teams): rename core team (#19436) --- .github/CODEOWNERS | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 39c66c4853f7a..8a8428ba09bc0 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -6,8 +6,8 @@ /op-node/rollup/derive @ethereum-optimism/consensus # exclusive /rust/kona/crates/protocol @ethereum-optimism/consensus # exclusive -/op-deployer @ethereum-optimism/platforms-team @ethereum-optimism/monorepo-reviewers -/op-validator @ethereum-optimism/platforms-team @ethereum-optimism/monorepo-reviewers +/op-deployer @ethereum-optimism/core-team @ethereum-optimism/monorepo-reviewers +/op-validator @ethereum-optimism/core-team @ethereum-optimism/monorepo-reviewers /op-conductor @ethereum-optimism/op-conductor @ethereum-optimism/monorepo-reviewers From 38f07c551a47205a5fd0e2006421bc34020726f1 Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Wed, 11 Mar 2026 19:10:13 -0400 Subject: [PATCH 106/201] refactor(op-devstack): simplify acceptance testing around direct preset constructors (#19452) * refactor(op-devstack): simplify acceptance testing around direct preset constructors fix acceptance and kona e2e test harness regressions fix same-timestamp cycle test and kona shared runtime wrappers address review feedback and restore devstack test coverage fix(ci): stabilize rust e2e websocket tests * refactor(op-devstack): validate preset option support * fix(ci): reduce rust subprocess log volume --- .circleci/config.yml | 4 - .circleci/continue/main.yml | 174 +--- PR.md | 92 ++ op-acceptance-tests/README.md | 94 +- op-acceptance-tests/acceptance-tests.yaml | 8 +- op-acceptance-tests/cmd/main.go | 315 ------ op-acceptance-tests/justfile | 118 +-- .../tests/base/advance_test.go | 2 +- .../tests/base/chain/init_test.go | 13 - .../tests/base/conductor/init_test.go | 11 - .../conductor/leadership_transfer_test.go | 7 +- .../tests/base/deposit/init_test.go | 11 - .../tests/base/eth_simulate_test.go | 2 +- op-acceptance-tests/tests/base/init_test.go | 12 - .../tests/base/public_rpc_advance_test.go | 2 +- .../tests/base/withdrawal/cannon/init_test.go | 12 - .../base/withdrawal/cannon_kona/init_test.go | 12 - .../base/withdrawal/permissioned/init_test.go | 12 - .../base/withdrawal/withdrawal_test_helper.go | 42 +- .../tests/batcher/batcher_test.go | 35 +- .../tests/batcher/init_test.go | 33 - .../tests/batcher/throttling/init_test.go | 33 - .../batcher/throttling/throttling_test.go | 17 +- .../cgt_introspection_test.go | 3 +- .../cgt_l1_portal_introspection_test.go | 3 +- .../cgt_native_payment_test.go | 3 +- .../cgt_portal_reverts_test.go | 3 +- .../custom_gas_token/cgt_reverts_test.go | 8 +- .../custom_gas_token/cgt_systemconfig_test.go | 5 +- .../tests/custom_gas_token/helpers.go | 16 + .../tests/custom_gas_token/init_test.go | 24 - .../tests/depreqres/common/common.go | 66 +- .../reqressyncdisabled/clsync/clsync_test.go | 6 +- .../reqressyncdisabled/clsync/init_test.go | 23 - .../reqressyncdisabled/depreqres_test.go | 4 +- .../divergence/divergence_test.go | 39 +- .../reqressyncdisabled/elsync/elsync_test.go | 6 +- .../reqressyncdisabled/elsync/init_test.go | 23 - .../depreqres/reqressyncdisabled/init_test.go | 23 - .../syncmodereqressync/clsync/clsync_test.go | 6 +- .../syncmodereqressync/clsync/init_test.go | 23 - .../syncmodereqressync/elsync/elsync_test.go | 6 +- .../syncmodereqressync/elsync/init_test.go | 23 - .../tests/ecotone/init_test.go | 13 - op-acceptance-tests/tests/fjord/init_test.go | 13 - .../flashblocks/flashblocks_stream_test.go | 10 +- .../tests/flashblocks/init_test.go | 12 - .../tests/fusaka/fusaka_test.go | 4 +- op-acceptance-tests/tests/fusaka/helpers.go | 4 +- .../fusaka/{init_test.go => setup_test.go} | 17 +- .../tests/interop/contract/init_test.go | 11 - .../tests/interop/loadtest/doc.go | 2 +- .../interop/loadtest/interop_load_test.go | 12 - .../tests/interop/message/init_test.go | 11 - .../interop/message/interop_happy_tx_test.go | 8 - .../tests/interop/prep/init_test.go | 14 - .../tests/interop/prep/prep_test.go | 11 +- .../interop/proofs-singlechain/init_test.go | 17 - .../interop_fault_proofs_test.go | 2 +- .../tests/interop/proofs/challenger_test.go | 6 +- .../tests/interop/proofs/fpp/fpp_test.go | 6 +- .../tests/interop/proofs/fpp/init_test.go | 16 - .../tests/interop/proofs/init_test.go | 16 - .../tests/interop/proofs/proposer_test.go | 2 +- .../tests/interop/proofs/serial/init_test.go | 16 - .../serial/interop_fault_proofs_test.go | 6 +- .../interop/proofs/withdrawal/init_test.go | 11 - .../proofs/withdrawal/withdrawal_test.go | 2 +- .../interop/reorgs/init_exec_msg_test.go | 3 +- .../tests/interop/reorgs/init_test.go | 13 - .../reorgs/l2_reorgs_after_l1_reorg_test.go | 8 +- .../tests/interop/seqwindow/expiry_test.go | 13 +- .../tests/interop/seqwindow/init_test.go | 26 - .../tests/interop/smoke/init_test.go | 11 - .../sync/multisupervisor_interop/init_test.go | 19 - .../interop/sync/simple_interop/init_test.go | 11 - .../upgrade-no-supervisor/init_test.go | 13 - .../upgrade-singlechain/crossl2inbox_test.go | 14 +- .../interop/upgrade-singlechain/init_test.go | 16 - .../tests/interop/upgrade/init_test.go | 14 - .../tests/interop/upgrade/post_test.go | 7 +- .../tests/interop/upgrade/pre_test.go | 6 +- .../tests/interop/upgrade/setup_test.go | 22 + .../tests/isthmus/erc20_bridge/init_test.go | 13 - .../tests/isthmus/operator_fee/init_test.go | 13 - .../tests/isthmus/pectra/init_test.go | 13 - .../isthmus/pectra/pectra_features_test.go | 14 +- .../preinterop-singlechain/init_test.go | 17 - .../interop_fault_proofs_test.go | 5 +- .../isthmus/preinterop/challenger_test.go | 3 +- .../tests/isthmus/preinterop/init_test.go | 16 - .../preinterop/interop_fault_proofs_test.go | 7 +- .../preinterop/interop_readiness_test.go | 2 +- .../tests/isthmus/preinterop/proposer_test.go | 3 +- .../tests/isthmus/preinterop/setup_test.go | 13 + .../isthmus/withdrawal_root/init_test.go | 13 - .../tests/jovian/bpo2/bpo2_test.go | 27 +- .../tests/jovian/bpo2/joviantest/cases.go | 400 ++++++++ .../tests/jovian/pectra/pectra_test.go | 25 +- .../tests/proofs/cannon/init_test.go | 21 - .../tests/proofs/cannon/setup_test.go | 23 + .../tests/proofs/cannon/smoke_test.go | 3 +- .../tests/proofs/cannon/step_test.go | 5 +- op-acceptance-tests/tests/rules/init_test.go | 12 +- op-acceptance-tests/tests/rules/rules_test.go | 11 +- .../tests/safeheaddb_clsync/init_test.go | 17 - .../safeheaddb_clsync/safeheaddb_test.go | 10 +- .../tests/safeheaddb_elsync/init_test.go | 17 - .../safeheaddb_elsync/safeheaddb_test.go | 15 +- .../tests/sequencer/init_test.go | 17 - .../tests/supernode/advance_multiple_test.go | 2 +- .../tests/supernode/init_test.go | 14 - .../activation_after_genesis_test.go | 4 + .../supernode/interop/activation/init_test.go | 22 - .../interop/activation_at_genesis_test.go | 3 +- .../supernode/interop/cross_message_test.go | 3 +- .../supernode/interop/follow_l2/init_test.go | 18 - .../interop/head_progression_test.go | 5 +- .../tests/supernode/interop/helpers_test.go | 12 + .../tests/supernode/interop/init_test.go | 19 - .../supernode/interop/reorg/init_test.go | 15 - .../same_timestamp_invalid/init_test.go | 16 - .../same_timestamp_test.go | 11 +- .../interop/timestamp_progression_test.go | 5 +- .../tests/sync/clsync/gap_clp2p/init_test.go | 21 +- .../tests/sync/clsync/gap_clp2p/sync_test.go | 3 +- .../tests/sync/elsync/gap_clp2p/init_test.go | 26 +- .../tests/sync/elsync/gap_clp2p/sync_test.go | 5 +- .../tests/sync/elsync/gap_elp2p/init_test.go | 21 +- .../tests/sync/elsync/gap_elp2p/sync_test.go | 9 +- .../tests/sync/elsync/reorg/init_test.go | 21 +- .../tests/sync/elsync/reorg/sync_test.go | 27 +- .../tests/sync/follow_l2/init_test.go | 16 - .../tests/sync/follow_l2/setup_test.go | 25 + .../tests/sync/follow_l2/sync_test.go | 15 +- .../tests/sync/manual/init_test.go | 22 - .../tests/sync/manual/sync_test.go | 9 +- .../sync_tester/sync_tester_e2e/init_test.go | 14 - .../sync_tester_e2e/sync_tester_e2e_test.go | 4 +- .../sync_tester_elsync/elsync_test.go | 15 +- .../sync_tester_elsync/init_test.go | 17 - .../sync_tester_elsync_multi/init_test.go | 24 - .../sync_tester_elsync_multi/sync_test.go | 20 +- .../sync_tester_ext_el/ext_config.go | 88 -- .../sync_tester_ext_el_test.go | 183 ---- .../sync_tester/sync_tester_hfs/init_test.go | 26 - .../sync_tester_hfs/sync_tester_hfs_test.go | 25 +- .../sync_tester_hfs_ext_test.go | 289 ------ op-chain-ops/script/cheatcodes_external.go | 23 + .../script/cheatcodes_external_test.go | 41 + op-devstack/README.md | 91 +- op-devstack/compat/compat.go | 11 - op-devstack/dsl/bridge.go | 3 +- op-devstack/dsl/cluster.go | 26 - op-devstack/dsl/conductor.go | 2 +- op-devstack/dsl/ecotone_fees.go | 3 +- op-devstack/dsl/faucet.go | 2 +- op-devstack/dsl/fb_ws_client.go | 174 ---- op-devstack/dsl/fjord_fees.go | 5 +- op-devstack/dsl/funder.go | 2 +- op-devstack/dsl/l1_cl.go | 14 +- op-devstack/dsl/l1_el.go | 12 +- op-devstack/dsl/l1_network.go | 29 +- op-devstack/dsl/l2_batcher.go | 6 +- op-devstack/dsl/l2_challenger.go | 2 +- op-devstack/dsl/l2_cl.go | 63 +- op-devstack/dsl/l2_el.go | 45 +- op-devstack/dsl/l2_network.go | 93 +- op-devstack/dsl/l2_op_rbuilder.go | 20 +- op-devstack/dsl/l2_proposer.go | 2 +- op-devstack/dsl/multi_client.go | 2 +- op-devstack/dsl/operator_fee.go | 5 +- .../dsl/proofs/dispute_game_factory.go | 5 +- op-devstack/dsl/rollup_boost.go | 12 +- op-devstack/dsl/sequencer.go | 2 +- op-devstack/dsl/superchain.go | 26 - op-devstack/dsl/supernode.go | 6 +- op-devstack/dsl/supervisor.go | 16 +- op-devstack/example/init_test.go | 27 - op-devstack/presets/cl_config.go | 58 -- op-devstack/presets/flashblocks.go | 180 +++- op-devstack/presets/interop.go | 299 ++---- op-devstack/presets/interop_from_runtime.go | 213 ++++ op-devstack/presets/logging.go | 81 -- op-devstack/presets/minimal.go | 64 +- op-devstack/presets/minimal_external_el.go | 62 -- op-devstack/presets/minimal_from_runtime.go | 76 ++ .../presets/minimal_with_conductors.go | 45 +- .../presets/minimal_with_synctester.go | 34 - op-devstack/presets/mixed_frontends.go | 132 +++ op-devstack/presets/networks.go | 328 ++++++ op-devstack/presets/op_rbuilder_rules.go | 43 +- op-devstack/presets/option_validation.go | 150 +++ op-devstack/presets/options.go | 246 +++++ op-devstack/presets/options_test.go | 121 +++ op-devstack/presets/orchestrator.go | 186 ---- op-devstack/presets/proof.go | 157 ++- op-devstack/presets/rpc_frontends.go | 592 +++++++++++ op-devstack/presets/simple_with_synctester.go | 39 +- .../presets/singlechain_from_runtime.go | 187 ++++ op-devstack/presets/singlechain_multinode.go | 103 +- .../presets/singlechain_twoverifiers.go | 40 +- .../presets/superproofs_from_runtime.go | 156 +++ op-devstack/presets/sync_tester_config.go | 24 - op-devstack/presets/sysgo_runtime.go | 175 ++++ op-devstack/presets/timetravel.go | 20 +- op-devstack/presets/twol2.go | 129 +-- op-devstack/presets/twol2_follow_l2.go | 52 +- op-devstack/presets/twol2_from_runtime.go | 209 ++++ op-devstack/shim/cluster.go | 39 - op-devstack/shim/common.go | 68 -- op-devstack/shim/conductor.go | 41 - op-devstack/shim/el.go | 61 -- op-devstack/shim/faucet.go | 42 - op-devstack/shim/fb_ws_client.go | 50 - op-devstack/shim/keyring.go | 37 - op-devstack/shim/l1_cl.go | 39 - op-devstack/shim/l1_el.go | 31 - op-devstack/shim/l1_network.go | 105 -- op-devstack/shim/l2_batcher.go | 39 - op-devstack/shim/l2_challenger.go | 37 - op-devstack/shim/l2_cl.go | 117 --- op-devstack/shim/l2_el.go | 64 -- op-devstack/shim/l2_network.go | 360 ------- op-devstack/shim/l2_proposer.go | 33 - op-devstack/shim/matcher.go | 41 - op-devstack/shim/network.go | 135 --- op-devstack/shim/op_rbuilder.go | 58 -- op-devstack/shim/rollup_boost.go | 64 -- op-devstack/shim/superchain.go | 36 - op-devstack/shim/supernode.go | 42 - op-devstack/shim/supervisor.go | 46 - op-devstack/shim/sync_tester.go | 56 -- op-devstack/shim/system.go | 327 ------ op-devstack/shim/test_sequencer.go | 59 -- op-devstack/stack/cluster.go | 14 - op-devstack/stack/common.go | 1 + op-devstack/stack/component_id.go | 382 ------- op-devstack/stack/component_id_test.go | 289 ------ op-devstack/stack/component_registry.go | 243 ----- op-devstack/stack/conductor.go | 3 +- op-devstack/stack/context.go | 48 - op-devstack/stack/context_test.go | 54 +- op-devstack/stack/ext_network_config.go | 11 - op-devstack/stack/faucet.go | 3 +- op-devstack/stack/fb_ws_client.go | 15 - op-devstack/stack/l1_cl.go | 3 +- op-devstack/stack/l1_el.go | 2 - op-devstack/stack/l1_network.go | 14 - op-devstack/stack/l2_batcher.go | 3 +- op-devstack/stack/l2_challenger.go | 3 +- op-devstack/stack/l2_cl.go | 9 +- op-devstack/stack/l2_el.go | 1 - op-devstack/stack/l2_network.go | 34 - op-devstack/stack/l2_proposer.go | 4 +- op-devstack/stack/lifecycle.go | 6 + op-devstack/stack/match/archive.go | 30 - op-devstack/stack/match/core.go | 40 - op-devstack/stack/match/doc.go | 16 - op-devstack/stack/match/engine.go | 55 -- op-devstack/stack/match/first.go | 27 - op-devstack/stack/match/gate.go | 31 - op-devstack/stack/match/gate_test.go | 52 - op-devstack/stack/match/interop.go | 14 - op-devstack/stack/match/labels.go | 38 - op-devstack/stack/match/second.go | 8 - op-devstack/stack/match/sequencer.go | 21 - op-devstack/stack/match/util.go | 171 ---- op-devstack/stack/match/util_test.go | 88 -- op-devstack/stack/matcher.go | 60 -- op-devstack/stack/network.go | 12 - op-devstack/stack/op_rbuilder.go | 1 - op-devstack/stack/orchestrator.go | 276 ------ op-devstack/stack/registry.go | 364 ------- op-devstack/stack/registry_test.go | 597 ----------- op-devstack/stack/rollup_boost.go | 1 - op-devstack/stack/superchain.go | 8 - op-devstack/stack/supernode.go | 37 +- op-devstack/stack/supervisor.go | 1 - op-devstack/stack/sync_tester.go | 3 +- op-devstack/stack/system.go | 65 -- op-devstack/stack/test_sequencer.go | 1 - op-devstack/sysext/addrbook.go | 66 -- op-devstack/sysext/control_plane.go | 53 - op-devstack/sysext/helpers.go | 139 --- op-devstack/sysext/l1.go | 75 -- op-devstack/sysext/l2.go | 553 ----------- op-devstack/sysext/orchestrator.go | 132 --- op-devstack/sysext/system.go | 125 --- op-devstack/sysgo/add_game_type.go | 151 +-- op-devstack/sysgo/cluster.go | 28 - op-devstack/sysgo/component_target.go | 26 + op-devstack/sysgo/control_plane.go | 56 -- op-devstack/sysgo/control_plane_test.go | 210 ---- op-devstack/sysgo/default_chain_ids.go | 9 + op-devstack/sysgo/deployer.go | 136 +-- op-devstack/sysgo/fakepos.go | 2 +- op-devstack/sysgo/faucet.go | 144 --- op-devstack/sysgo/keys.go | 15 - op-devstack/sysgo/l1_network.go | 26 +- op-devstack/sysgo/l1_nodes.go | 144 +-- op-devstack/sysgo/l1_nodes_subprocess.go | 246 ----- op-devstack/sysgo/l1_runtime.go | 229 +++++ op-devstack/sysgo/l2_batcher.go | 137 +-- op-devstack/sysgo/l2_challenger.go | 237 +---- op-devstack/sysgo/l2_cl.go | 54 +- op-devstack/sysgo/l2_cl_kona.go | 190 +--- op-devstack/sysgo/l2_cl_opnode.go | 245 +---- op-devstack/sysgo/l2_cl_p2p_config.go | 2 +- op-devstack/sysgo/l2_cl_p2p_util.go | 45 - op-devstack/sysgo/l2_cl_supernode.go | 330 +------ op-devstack/sysgo/l2_conductor.go | 28 + op-devstack/sysgo/l2_el.go | 62 +- op-devstack/sysgo/l2_el_opgeth.go | 91 +- op-devstack/sysgo/l2_el_opreth.go | 198 +--- op-devstack/sysgo/l2_el_p2p_util.go | 26 - op-devstack/sysgo/l2_el_synctester.go | 104 +- op-devstack/sysgo/l2_metrics_dashboard.go | 313 +----- .../sysgo/l2_metrics_dashboard_test.go | 36 - op-devstack/sysgo/l2_network.go | 50 +- .../sysgo/l2_network_superchain_registry.go | 75 -- op-devstack/sysgo/l2_proposer.go | 156 +-- op-devstack/sysgo/mixed_runtime.go | 623 ++++++++++++ op-devstack/sysgo/multichain_proofs.go | 327 ++++++ .../sysgo/multichain_supernode_runtime.go | 852 ++++++++++++++++ .../sysgo/multichain_supervisor_runtime.go | 472 +++++++++ op-devstack/sysgo/op_rbuilder.go | 109 +- op-devstack/sysgo/orchestrator.go | 178 ---- op-devstack/sysgo/orchestrator_getters.go | 48 - op-devstack/sysgo/preset_config.go | 123 +++ op-devstack/sysgo/rollup_boost.go | 153 +-- op-devstack/sysgo/runtime_state.go | 120 +++ op-devstack/sysgo/rust_binary.go | 73 +- op-devstack/sysgo/singlechain_build.go | 682 +++++++++++++ op-devstack/sysgo/singlechain_flashblocks.go | 127 +++ op-devstack/sysgo/singlechain_interop.go | 60 ++ op-devstack/sysgo/singlechain_runtime.go | 415 ++++++++ op-devstack/sysgo/singlechain_variants.go | 386 ++++++++ op-devstack/sysgo/subproc.go | 4 +- op-devstack/sysgo/superchain.go | 15 - op-devstack/sysgo/superroot.go | 462 ++++----- op-devstack/sysgo/supervisor.go | 41 +- op-devstack/sysgo/supervisor_kona.go | 99 ++ op-devstack/sysgo/supervisor_op.go | 84 +- op-devstack/sysgo/sync_tester.go | 133 +-- op-devstack/sysgo/system.go | 934 ------------------ .../sysgo/system_singlechain_multinode.go | 83 -- .../sysgo/system_singlechain_twoverifiers.go | 79 -- op-devstack/sysgo/system_synctester.go | 79 -- op-devstack/sysgo/system_synctester_ext.go | 103 -- op-devstack/sysgo/system_test.go | 138 --- op-devstack/sysgo/system_two_l2_follow_l2.go | 61 -- op-devstack/sysgo/test_sequencer.go | 321 ------ op-devstack/sysgo/util.go | 2 +- op-devstack/sysgo/world.go | 155 +++ op-service/logpipe/pipe.go | 7 + op-service/logpipe/pipe_test.go | 27 + op-up/main.go | 233 +++-- rust/kona/tests/node/common/conductor_test.go | 5 +- rust/kona/tests/node/common/engine_test.go | 23 +- rust/kona/tests/node/common/init_test.go | 17 - rust/kona/tests/node/common/p2p_test.go | 21 +- rust/kona/tests/node/common/rpc_test.go | 14 +- rust/kona/tests/node/common/setup_test.go | 11 + rust/kona/tests/node/common/sync_test.go | 6 +- rust/kona/tests/node/common/sync_ws_test.go | 20 +- .../tests/node/common/tx_inclusion_test.go | 3 +- .../{init_test.go => helpers_test.go} | 12 +- .../node/long-running/tx_producer_test.go | 2 +- rust/kona/tests/node/reorgs/init_test.go | 18 - .../reorgs/l2_reorg_after_l1_reorgs_test.go | 12 +- rust/kona/tests/node/reorgs/l2_reorg_test.go | 2 +- .../kona/tests/node/restart/conn_drop_test.go | 16 +- rust/kona/tests/node/restart/helpers_test.go | 18 + rust/kona/tests/node/restart/init_test.go | 21 - rust/kona/tests/node/restart/restart_test.go | 14 +- .../node/restart/sequencer_restart_test.go | 13 +- rust/kona/tests/node/restart/setup_test.go | 51 + rust/kona/tests/node/utils/mixed_preset.go | 514 ++-------- .../node/utils/mixed_preset_with_conductor.go | 28 +- rust/kona/tests/node/utils/package_scope.go | 166 ++++ .../tests/node/utils/test_sequencer_preset.go | 80 +- .../node/utils/test_sequencer_preset_test.go | 38 + rust/kona/tests/node/utils/ws.go | 5 +- .../supervisor/l2reorg/init_exec_msg_test.go | 247 +++++ .../l2reorgAfterL1reorg/reorg_test.go | 162 +++ .../supervisor/pre_interop/helpers_test.go | 14 + .../tests/supervisor/pre_interop/post_test.go | 208 ++++ .../tests/supervisor/pre_interop/pre_test.go | 110 +++ .../supervisor/presets/interop_minimal.go | 93 +- rust/op-reth/crates/tests/Makefile | 36 +- rust/op-reth/crates/tests/README.md | 51 +- .../tests/devnets/opgeth-seq-opreth-val.yaml | 74 -- .../tests/devnets/opreth-seq-opgeth-val.yaml | 74 -- .../crates/tests/proofs/core/init_test.go | 14 - .../crates/tests/proofs/prune/init_test.go | 14 - .../crates/tests/proofs/reorg/init_test.go | 14 - .../crates/tests/proofs/utils/preset.go | 343 ++----- 398 files changed, 11271 insertions(+), 17843 deletions(-) create mode 100644 PR.md delete mode 100644 op-acceptance-tests/cmd/main.go delete mode 100644 op-acceptance-tests/tests/base/chain/init_test.go delete mode 100644 op-acceptance-tests/tests/base/conductor/init_test.go delete mode 100644 op-acceptance-tests/tests/base/deposit/init_test.go delete mode 100644 op-acceptance-tests/tests/base/init_test.go delete mode 100644 op-acceptance-tests/tests/base/withdrawal/cannon/init_test.go delete mode 100644 op-acceptance-tests/tests/base/withdrawal/cannon_kona/init_test.go delete mode 100644 op-acceptance-tests/tests/base/withdrawal/permissioned/init_test.go delete mode 100644 op-acceptance-tests/tests/batcher/init_test.go delete mode 100644 op-acceptance-tests/tests/batcher/throttling/init_test.go delete mode 100644 op-acceptance-tests/tests/custom_gas_token/init_test.go delete mode 100644 op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/init_test.go delete mode 100644 op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/init_test.go delete mode 100644 op-acceptance-tests/tests/depreqres/reqressyncdisabled/init_test.go delete mode 100644 op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/init_test.go delete mode 100644 op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/init_test.go delete mode 100644 op-acceptance-tests/tests/ecotone/init_test.go delete mode 100644 op-acceptance-tests/tests/fjord/init_test.go delete mode 100644 op-acceptance-tests/tests/flashblocks/init_test.go rename op-acceptance-tests/tests/fusaka/{init_test.go => setup_test.go} (67%) delete mode 100644 op-acceptance-tests/tests/interop/contract/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/message/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/prep/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/proofs-singlechain/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/proofs/fpp/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/proofs/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/proofs/serial/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/proofs/withdrawal/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/reorgs/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/seqwindow/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/smoke/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/sync/multisupervisor_interop/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/sync/simple_interop/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/upgrade-no-supervisor/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/upgrade-singlechain/init_test.go delete mode 100644 op-acceptance-tests/tests/interop/upgrade/init_test.go create mode 100644 op-acceptance-tests/tests/interop/upgrade/setup_test.go delete mode 100644 op-acceptance-tests/tests/isthmus/erc20_bridge/init_test.go delete mode 100644 op-acceptance-tests/tests/isthmus/operator_fee/init_test.go delete mode 100644 op-acceptance-tests/tests/isthmus/pectra/init_test.go delete mode 100644 op-acceptance-tests/tests/isthmus/preinterop-singlechain/init_test.go delete mode 100644 op-acceptance-tests/tests/isthmus/preinterop/init_test.go create mode 100644 op-acceptance-tests/tests/isthmus/preinterop/setup_test.go delete mode 100644 op-acceptance-tests/tests/isthmus/withdrawal_root/init_test.go create mode 100644 op-acceptance-tests/tests/jovian/bpo2/joviantest/cases.go delete mode 100644 op-acceptance-tests/tests/proofs/cannon/init_test.go create mode 100644 op-acceptance-tests/tests/proofs/cannon/setup_test.go delete mode 100644 op-acceptance-tests/tests/safeheaddb_clsync/init_test.go delete mode 100644 op-acceptance-tests/tests/safeheaddb_elsync/init_test.go delete mode 100644 op-acceptance-tests/tests/sequencer/init_test.go delete mode 100644 op-acceptance-tests/tests/supernode/init_test.go delete mode 100644 op-acceptance-tests/tests/supernode/interop/activation/init_test.go delete mode 100644 op-acceptance-tests/tests/supernode/interop/follow_l2/init_test.go create mode 100644 op-acceptance-tests/tests/supernode/interop/helpers_test.go delete mode 100644 op-acceptance-tests/tests/supernode/interop/init_test.go delete mode 100644 op-acceptance-tests/tests/supernode/interop/reorg/init_test.go delete mode 100644 op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/init_test.go delete mode 100644 op-acceptance-tests/tests/sync/follow_l2/init_test.go create mode 100644 op-acceptance-tests/tests/sync/follow_l2/setup_test.go delete mode 100644 op-acceptance-tests/tests/sync/manual/init_test.go delete mode 100644 op-acceptance-tests/tests/sync_tester/sync_tester_e2e/init_test.go delete mode 100644 op-acceptance-tests/tests/sync_tester/sync_tester_elsync/init_test.go delete mode 100644 op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/init_test.go delete mode 100644 op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/ext_config.go delete mode 100644 op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go delete mode 100644 op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go delete mode 100644 op-acceptance-tests/tests/sync_tester/sync_tester_hfs_ext/sync_tester_hfs_ext_test.go create mode 100644 op-chain-ops/script/cheatcodes_external_test.go delete mode 100644 op-devstack/compat/compat.go delete mode 100644 op-devstack/dsl/cluster.go delete mode 100644 op-devstack/dsl/fb_ws_client.go delete mode 100644 op-devstack/dsl/superchain.go delete mode 100644 op-devstack/example/init_test.go delete mode 100644 op-devstack/presets/cl_config.go create mode 100644 op-devstack/presets/interop_from_runtime.go delete mode 100644 op-devstack/presets/logging.go delete mode 100644 op-devstack/presets/minimal_external_el.go create mode 100644 op-devstack/presets/minimal_from_runtime.go delete mode 100644 op-devstack/presets/minimal_with_synctester.go create mode 100644 op-devstack/presets/mixed_frontends.go create mode 100644 op-devstack/presets/networks.go create mode 100644 op-devstack/presets/option_validation.go create mode 100644 op-devstack/presets/options.go create mode 100644 op-devstack/presets/options_test.go delete mode 100644 op-devstack/presets/orchestrator.go create mode 100644 op-devstack/presets/rpc_frontends.go create mode 100644 op-devstack/presets/singlechain_from_runtime.go create mode 100644 op-devstack/presets/superproofs_from_runtime.go delete mode 100644 op-devstack/presets/sync_tester_config.go create mode 100644 op-devstack/presets/sysgo_runtime.go create mode 100644 op-devstack/presets/twol2_from_runtime.go delete mode 100644 op-devstack/shim/cluster.go delete mode 100644 op-devstack/shim/common.go delete mode 100644 op-devstack/shim/conductor.go delete mode 100644 op-devstack/shim/el.go delete mode 100644 op-devstack/shim/faucet.go delete mode 100644 op-devstack/shim/fb_ws_client.go delete mode 100644 op-devstack/shim/keyring.go delete mode 100644 op-devstack/shim/l1_cl.go delete mode 100644 op-devstack/shim/l1_el.go delete mode 100644 op-devstack/shim/l1_network.go delete mode 100644 op-devstack/shim/l2_batcher.go delete mode 100644 op-devstack/shim/l2_challenger.go delete mode 100644 op-devstack/shim/l2_cl.go delete mode 100644 op-devstack/shim/l2_el.go delete mode 100644 op-devstack/shim/l2_network.go delete mode 100644 op-devstack/shim/l2_proposer.go delete mode 100644 op-devstack/shim/matcher.go delete mode 100644 op-devstack/shim/network.go delete mode 100644 op-devstack/shim/op_rbuilder.go delete mode 100644 op-devstack/shim/rollup_boost.go delete mode 100644 op-devstack/shim/superchain.go delete mode 100644 op-devstack/shim/supernode.go delete mode 100644 op-devstack/shim/supervisor.go delete mode 100644 op-devstack/shim/sync_tester.go delete mode 100644 op-devstack/shim/system.go delete mode 100644 op-devstack/shim/test_sequencer.go delete mode 100644 op-devstack/stack/cluster.go delete mode 100644 op-devstack/stack/component_id.go delete mode 100644 op-devstack/stack/component_id_test.go delete mode 100644 op-devstack/stack/component_registry.go delete mode 100644 op-devstack/stack/ext_network_config.go delete mode 100644 op-devstack/stack/fb_ws_client.go create mode 100644 op-devstack/stack/lifecycle.go delete mode 100644 op-devstack/stack/match/archive.go delete mode 100644 op-devstack/stack/match/core.go delete mode 100644 op-devstack/stack/match/doc.go delete mode 100644 op-devstack/stack/match/engine.go delete mode 100644 op-devstack/stack/match/first.go delete mode 100644 op-devstack/stack/match/gate.go delete mode 100644 op-devstack/stack/match/gate_test.go delete mode 100644 op-devstack/stack/match/interop.go delete mode 100644 op-devstack/stack/match/labels.go delete mode 100644 op-devstack/stack/match/second.go delete mode 100644 op-devstack/stack/match/sequencer.go delete mode 100644 op-devstack/stack/match/util.go delete mode 100644 op-devstack/stack/match/util_test.go delete mode 100644 op-devstack/stack/matcher.go delete mode 100644 op-devstack/stack/orchestrator.go delete mode 100644 op-devstack/stack/registry.go delete mode 100644 op-devstack/stack/registry_test.go delete mode 100644 op-devstack/stack/system.go delete mode 100644 op-devstack/sysext/addrbook.go delete mode 100644 op-devstack/sysext/control_plane.go delete mode 100644 op-devstack/sysext/helpers.go delete mode 100644 op-devstack/sysext/l1.go delete mode 100644 op-devstack/sysext/l2.go delete mode 100644 op-devstack/sysext/orchestrator.go delete mode 100644 op-devstack/sysext/system.go delete mode 100644 op-devstack/sysgo/cluster.go create mode 100644 op-devstack/sysgo/component_target.go delete mode 100644 op-devstack/sysgo/control_plane.go delete mode 100644 op-devstack/sysgo/control_plane_test.go create mode 100644 op-devstack/sysgo/default_chain_ids.go delete mode 100644 op-devstack/sysgo/faucet.go delete mode 100644 op-devstack/sysgo/keys.go delete mode 100644 op-devstack/sysgo/l1_nodes_subprocess.go create mode 100644 op-devstack/sysgo/l1_runtime.go create mode 100644 op-devstack/sysgo/l2_conductor.go delete mode 100644 op-devstack/sysgo/l2_metrics_dashboard_test.go delete mode 100644 op-devstack/sysgo/l2_network_superchain_registry.go create mode 100644 op-devstack/sysgo/mixed_runtime.go create mode 100644 op-devstack/sysgo/multichain_proofs.go create mode 100644 op-devstack/sysgo/multichain_supernode_runtime.go create mode 100644 op-devstack/sysgo/multichain_supervisor_runtime.go delete mode 100644 op-devstack/sysgo/orchestrator.go delete mode 100644 op-devstack/sysgo/orchestrator_getters.go create mode 100644 op-devstack/sysgo/preset_config.go create mode 100644 op-devstack/sysgo/runtime_state.go create mode 100644 op-devstack/sysgo/singlechain_build.go create mode 100644 op-devstack/sysgo/singlechain_flashblocks.go create mode 100644 op-devstack/sysgo/singlechain_interop.go create mode 100644 op-devstack/sysgo/singlechain_runtime.go create mode 100644 op-devstack/sysgo/singlechain_variants.go create mode 100644 op-devstack/sysgo/supervisor_kona.go delete mode 100644 op-devstack/sysgo/system.go delete mode 100644 op-devstack/sysgo/system_singlechain_multinode.go delete mode 100644 op-devstack/sysgo/system_singlechain_twoverifiers.go delete mode 100644 op-devstack/sysgo/system_synctester.go delete mode 100644 op-devstack/sysgo/system_synctester_ext.go delete mode 100644 op-devstack/sysgo/system_test.go delete mode 100644 op-devstack/sysgo/system_two_l2_follow_l2.go delete mode 100644 op-devstack/sysgo/test_sequencer.go create mode 100644 op-devstack/sysgo/world.go delete mode 100644 rust/kona/tests/node/common/init_test.go create mode 100644 rust/kona/tests/node/common/setup_test.go rename rust/kona/tests/node/long-running/{init_test.go => helpers_test.go} (76%) delete mode 100644 rust/kona/tests/node/reorgs/init_test.go create mode 100644 rust/kona/tests/node/restart/helpers_test.go delete mode 100644 rust/kona/tests/node/restart/init_test.go create mode 100644 rust/kona/tests/node/restart/setup_test.go create mode 100644 rust/kona/tests/node/utils/package_scope.go create mode 100644 rust/kona/tests/node/utils/test_sequencer_preset_test.go create mode 100644 rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go create mode 100644 rust/kona/tests/supervisor/l2reorgAfterL1reorg/reorg_test.go create mode 100644 rust/kona/tests/supervisor/pre_interop/helpers_test.go create mode 100644 rust/kona/tests/supervisor/pre_interop/post_test.go create mode 100644 rust/kona/tests/supervisor/pre_interop/pre_test.go delete mode 100644 rust/op-reth/crates/tests/devnets/opgeth-seq-opreth-val.yaml delete mode 100644 rust/op-reth/crates/tests/devnets/opreth-seq-opgeth-val.yaml delete mode 100644 rust/op-reth/crates/tests/proofs/core/init_test.go delete mode 100644 rust/op-reth/crates/tests/proofs/prune/init_test.go delete mode 100644 rust/op-reth/crates/tests/proofs/reorg/init_test.go diff --git a/.circleci/config.yml b/.circleci/config.yml index 8638b5d6acea5..3131d497ba60b 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -50,9 +50,6 @@ parameters: heavy_fuzz_dispatch: type: boolean default: false - sync_test_op_node_dispatch: - type: boolean - default: false ai_contracts_test_dispatch: type: boolean default: false @@ -113,7 +110,6 @@ workflows: .* c-stale_check_dispatch << pipeline.parameters.stale_check_dispatch >> .circleci/continue/main.yml .* c-contracts_coverage_dispatch << pipeline.parameters.contracts_coverage_dispatch >> .circleci/continue/main.yml .* c-heavy_fuzz_dispatch << pipeline.parameters.heavy_fuzz_dispatch >> .circleci/continue/main.yml - .* c-sync_test_op_node_dispatch << pipeline.parameters.sync_test_op_node_dispatch >> .circleci/continue/main.yml .* c-ai_contracts_test_dispatch << pipeline.parameters.ai_contracts_test_dispatch >> .circleci/continue/main.yml .* c-github-event-type << pipeline.parameters.github-event-type >> .circleci/continue/main.yml .* c-github-event-action << pipeline.parameters.github-event-action >> .circleci/continue/main.yml diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index e4af363bda8f9..42df66bd9b8e2 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -44,9 +44,6 @@ parameters: c-heavy_fuzz_dispatch: type: boolean default: false - c-sync_test_op_node_dispatch: - type: boolean - default: false c-ai_contracts_test_dispatch: type: boolean default: false @@ -2014,133 +2011,12 @@ jobs: - notify-failures-on-develop: mentions: "<>" - op-acceptance-sync-tests-docker: - parameters: - gate: - description: The gate to run the acceptance tests against. This gate should be defined in op-acceptance-tests/acceptance-tests.yaml. - type: string - default: "" - l2_cl_kind: - description: "L2 consensus layer client (op-node or kona)" - type: string - default: "op-node" - l2_el_kind: - description: "L2 execution layer client (op-geth or op-reth)" - type: string - default: "op-geth" - no_output_timeout: - description: Timeout for when CircleCI kills the job if there's no output - type: string - default: 30m - # Optional sync test configuration parameters - network_preset: - description: Network preset - type: string - default: "" - l2_cl_syncmode: - description: L2 CL Sync mode - can be EL Sync or CL Sync - type: string - default: "" - resource_class: xlarge - docker: - - image: <> - circleci_ip_ranges: true - steps: - - utils/checkout-with-mise: - checkout-method: blobless - enable-mise-cache: true - # Restore cached Go modules - - restore_cache: - keys: - - go-mod-v1-{{ checksum "go.sum" }} - - go-mod-v1- - # Download Go dependencies - - run: - name: Download Go dependencies - working_directory: op-acceptance-tests - command: go mod download - - run: - name: Lint/Vet/Build op-acceptance-tests/cmd - working_directory: op-acceptance-tests - command: | - just cmd-check - # Persist schedule name into env var - - run: - name: Persist schedule name into env var - command: | - echo 'export CIRCLECI_PIPELINE_SCHEDULE_NAME="<< pipeline.schedule.name >>"' >> $BASH_ENV - echo 'export CIRCLECI_PARAMETERS_SYNC_TEST_OP_NODE_DISPATCH="<< pipeline.parameters.c-sync_test_op_node_dispatch >>"' >> $BASH_ENV - - run: - name: Configure L2 stack - command: | - echo "export DEVSTACK_L2CL_KIND=<>" >> "$BASH_ENV" - echo "export DEVSTACK_L2EL_KIND=<>" >> "$BASH_ENV" - # Run the acceptance tests - - run: - name: Run acceptance tests (gate=<>) - working_directory: op-acceptance-tests - no_output_timeout: 1h - environment: - GOFLAGS: "-mod=mod" - GO111MODULE: "on" - GOGC: "0" - # Optional sync test configuration environment variables (only set if parameters are provided) - NETWORK_PRESET: "<>" - L2_CL_SYNCMODE: "<>" - command: | - # Run the tests - LOG_LEVEL=debug just acceptance-test "" "<>" - - run: - name: Print results (summary) - working_directory: op-acceptance-tests - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - cat "$LOG_DIR/summary.log" || true - - run: - name: Print results (failures) - working_directory: op-acceptance-tests - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - cat "$LOG_DIR/failed/*.log" || true - when: on_fail - - run: - name: Print results (all) - working_directory: op-acceptance-tests - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - cat "$LOG_DIR/all.log" || true - - run: - name: Generate JUnit XML test report for CircleCI - working_directory: op-acceptance-tests - when: always - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - gotestsum --junitfile results/results.xml --raw-command cat $LOG_DIR/raw_go_events.log || true - # Save the module cache for future runs - - save_cache: - key: go-mod-v1-{{ checksum "go.sum" }} - paths: - - "/go/pkg/mod" - # Store test results and artifacts - - when: - condition: always - steps: - - store_test_results: - path: ./op-acceptance-tests/results - - when: - condition: always - steps: - - store_artifacts: - path: ./op-acceptance-tests/logs - - notify-failures-on-develop: - mentions: "@changwan <@U08L5U8070U>" # @changwan @Anton Evangelatov - op-acceptance-tests: parameters: gate: description: The gate to run the acceptance tests against. This gate should be defined in op-acceptance-tests/acceptance-tests.yaml. type: string - default: "" + default: "base" l2_cl_kind: description: "L2 consensus layer client (op-node or kona)" type: string @@ -2149,6 +2025,10 @@ jobs: description: "L2 execution layer client (op-geth or op-reth)" type: string default: "op-geth" + run_all: + description: When true, run all tests in gateless mode. + type: boolean + default: false no_output_timeout: description: Timeout for when CircleCI kills the job if there's no output type: string @@ -2200,16 +2080,17 @@ jobs: command: go test -v -c -o /dev/null $(go list -f '{{if .TestGoFiles}}{{.ImportPath}}{{end}}' ./tests/...) # Run the acceptance tests (if the devnet is running) - run: - name: Run acceptance tests (gate=<>) + name: Run acceptance tests working_directory: op-acceptance-tests no_output_timeout: 1h command: | - if [[ "<>" == "" ]]; then + if <>; then echo "Running in gateless mode - auto-discovering all tests in ./op-acceptance-tests/..." + LOG_LEVEL=info just acceptance-test-all else echo "Running in gate mode (gate=<>)" + LOG_LEVEL=info just acceptance-test "<>" fi - LOG_LEVEL=info just acceptance-test "" "<>" - run: name: Print results (summary) working_directory: op-acceptance-tests @@ -2326,7 +2207,6 @@ jobs: --flake-shake \ --flake-shake-iterations "$FLAKE_SHAKE_ITERATIONS" \ --log.level debug \ - --orchestrator sysgo \ --logdir "./$OUTPUT_DIR" - persist_to_workspace: root: op-acceptance-tests @@ -3330,7 +3210,7 @@ workflows: # IN-MEMORY (all) - op-node/op-geth - op-acceptance-tests: name: memory-all-opn-op-geth - gate: "" # Empty gate = gateless mode + run_all: true no_output_timeout: 120m # Allow longer runs for memory-all gate context: - circleci-repo-readonly-authenticated-github-token @@ -3723,40 +3603,6 @@ workflows: context: - circleci-repo-optimism - scheduled-sync-test-op-node: - when: - or: - - equal: [build_daily, <>] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.c-sync_test_op_node_dispatch >>] - jobs: - - contracts-bedrock-build: # needed for sysgo tests - build_args: --skip test - context: - - circleci-repo-readonly-authenticated-github-token - - cannon-prestate: # needed for sysgo tests - context: - - circleci-repo-readonly-authenticated-github-token - - op-acceptance-sync-tests-docker: - name: "sync-test-<>-daily-<>" - gate: sync-test-op-node - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - slack - requires: - - contracts-bedrock-build - - cannon-prestate - matrix: - parameters: - network_preset: - [ - "op-sepolia", - "unichain-sepolia", - "op-mainnet", - ] - l2_cl_syncmode: ["consensus-layer", "execution-layer"] - scheduled-heavy-fuzz-tests: when: or: diff --git a/PR.md b/PR.md new file mode 100644 index 0000000000000..dd51d0e764af4 --- /dev/null +++ b/PR.md @@ -0,0 +1,92 @@ +# PR1: Flashblocks Runtime Constructor (No Orchestrator Path) + +## Summary + +This PR makes `presets.NewSingleChainWithFlashblocks(...)` run through a real constructor DAG instead of the legacy orchestrator/system wiring path. + +It is the first concrete preset migration where: + +- the preset no longer calls `DefaultSingleChainSystemWithFlashblocks()`, +- construction happens via direct hierarchical constructor calls in `sysgo`, and +- acceptance tests still consume the same preset API surface. + +## What Changed + +### 1) New runtime for flashblocks + +Added: + +- `op-devstack/sysgo/flashblocks_runtime.go` + +This runtime now constructs and boots the flashblocks test target directly: + +1. build L1/L2 intent world, +2. start L1 (EL + fake beacon CL), +3. start sequencer EL, +4. start builder EL (`op-rbuilder`), +5. wire EL P2P peering, +6. start rollup-boost, +7. start sequencer CL (`op-node`), +8. start faucet service for L1 and L2. + +The runtime exports topology + endpoint data needed by presets (L1/L2 configs, deployment, node RPCs, flashblocks WS URLs, faucet endpoints). + +### 2) `NewSingleChainWithFlashblocks` now uses the runtime + +Updated: + +- `op-devstack/presets/flashblocks.go` + +Changes: + +- `NewSingleChainWithFlashblocks` now instantiates `sysgo.NewFlashblocksRuntime(...)`. +- It assembles DSL/shim frontends directly from runtime references. +- It no longer routes through orchestrator/system constructor chains. +- It rejects orchestrator options for this preset (`opts` must be empty). + +### 3) Removed dead flashblocks orchestrator adapter + +Updated: + +- `op-devstack/presets/sysgo_runtime.go` + +Removed: + +- `singleChainWithFlashblocksRuntime` type +- `singleChainWithFlashblocksRuntimeFromOrchestrator(...)` + +This deletes the now-unused preset-specific flashblocks orchestrator hydration path. + +### 4) Added runtime test-sequencer startup + +The runtime now starts an in-process test-sequencer service directly (no orchestrator path), configures L1 + L2 sequencing backends, and exports: + +- admin RPC endpoint, +- JWT secret, +- per-chain control RPC endpoints. + +The preset wires this into `dsl.TestSequencer` via the existing frontend constructor. + +### 5) Flashblocks tests are back to strict test-sequencer usage + +Updated: + +- `op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go` + +`driveViaTestSequencer(...)` now requires the test-sequencer to exist again (fallback removed), so the test behavior matches the prior deterministic sequencing model. + +## Validation + +Executed: + +- `go test ./op-devstack/sysgo -run '^$'` +- `go test ./op-devstack/presets -run '^$'` +- `go test ./op-acceptance-tests/tests/flashblocks -count=1` + +All passed. + +## PR2 Proposal + +1. Move shared constructor primitives into a dedicated package (e.g. runtime builders for L1/L2/faucet/sequencer services). +2. Migrate next preset(s) to runtime assembly (`minimal`, then `base/conductor` path). +3. Start deleting flashblocks legacy sysgo constructor plumbing in `system.go` once no call-sites remain. diff --git a/op-acceptance-tests/README.md b/op-acceptance-tests/README.md index 3b81565d39837..11e1ac0df909a 100644 --- a/op-acceptance-tests/README.md +++ b/op-acceptance-tests/README.md @@ -19,29 +19,18 @@ This process helps maintain high-quality standards across all networks in the OP ## Architecture -The acceptance testing system supports two orchestrator modes: +The acceptance testing system uses `sysgo` in-process orchestration: ### **sysgo (In-process)** - **Use case**: Fast, isolated testing without external dependencies - **Benefits**: Quick startup, no external infrastructure needed - **Dependencies**: None (pure Go services) -### **sysext (External)** -- **Use case**: Testing against Kurtosis-managed devnets or persistent networks -- **Benefits**: Testing against realistic network conditions -- **Dependencies**: Docker, Kurtosis (for Kurtosis devnets) - -The system automatically selects the appropriate orchestrator based on your usage pattern. - ## Dependencies ### Basic Dependencies * Mise (install as instructed in CONTRIBUTING.md) -### Additional Dependencies (for external devnet testing) -* Docker -* Kurtosis - Dependencies are managed using the repo-wide `mise` config. Run `mise install` at the repo root to install `op-acceptor` and other tools. ## Usage @@ -50,41 +39,37 @@ Dependencies are managed using the repo-wide `mise` config. Run `mise install` a ```bash # Run in-process tests (fast, no external dependencies) -just acceptance-test "" base - -# Run against Kurtosis devnets (requires Docker + Kurtosis) -just acceptance-test simple base -just acceptance-test interop interop +just acceptance-test base ``` ### Available Commands ```bash -# Default: Run tests against simple devnet with base gate +# Default: run in-process tests with base gate just -# Run specific devnet and gate combinations -just acceptance-test +# Run a specific gate +just acceptance-test -# Use specific op-acceptor version -ACCEPTOR_VERSION=v1.0.0 just acceptance-test "" base +# Run all tests (gateless) +just acceptance-test-all ``` ### Direct CLI Usage -You can also run the acceptance test wrapper directly: +You can also run `op-acceptor` directly: ```bash cd op-acceptance-tests -# In-process testing (sysgo orchestrator) -go run cmd/main.go --orchestrator sysgo --gate base --testdir .. --validators ./acceptance-tests.yaml --acceptor op-acceptor - -# External devnet testing (sysext orchestrator) -go run cmd/main.go --orchestrator sysext --devnet simple --gate base --testdir .. --validators ./acceptance-tests.yaml --kurtosis-dir ../kurtosis-devnet --acceptor op-acceptor - -# Remote network testing -go run cmd/main.go --orchestrator sysext --devnet "kt://my-network" --gate base --testdir .. --validators ./acceptance-tests.yaml --acceptor op-acceptor +# In-process testing +op-acceptor \ + --gate base \ + --testdir .. \ + --validators ./acceptance-tests.yaml \ + --log.level info \ + --allow-skips \ + --exclude-gates flake-shake ``` ## Development Usage @@ -95,43 +80,13 @@ For rapid test development, use in-process testing: ```bash cd op-acceptance-tests -# Not providing a network uses the sysgo orchestrator (in-memory network) which is faster and easier to iterate with. -just acceptance-test "" base +just acceptance-test base ``` -### Testing Against External Devnets - -For integration testing against realistic networks: - -1. **Automated approach** (rebuilds devnet each time): - ```bash - just acceptance-test interop interop - ``` - -2. **Manual approach** (once-off) - ```bash - cd op-acceptance-tests - # This spins up a devnet, then runs op-acceptor - go run cmd/main.go --orchestrator sysext --devnet "interop" --gate interop --testdir .. --validators ./acceptance-tests.yaml - ``` - -3. **Manual approach** (faster for repeated testing): - ```bash - # Deploy devnet once - cd kurtosis-devnet - just isthmus-devnet - - # Run tests multiple times against the same devnet - cd op-acceptance-tests - # This runs op-acceptor (devnet spin up is skipped due to `--reuse-devnet`) - go run cmd/main.go --orchestrator sysext --devnet "interop" --gate interop --testdir .. --validators ./acceptance-tests.yaml --reuse-devnet - ``` - ### Configuration - `acceptance-tests.yaml`: Defines the validation gates and the suites and tests that should be run for each gate. - `justfile`: Contains the commands for running the acceptance tests. -- `cmd/main.go`: Wrapper binary that handles orchestrator selection and devnet management. ### Logging Configuration @@ -189,16 +144,14 @@ op-acceptor \ --validators ./acceptance-tests.yaml \ --gate flake-shake \ --flake-shake \ - --flake-shake-iterations 10 \ - --orchestrator sysgo + --flake-shake-iterations 10 # Run with more iterations for thorough testing op-acceptor \ --validators ./acceptance-tests.yaml \ --gate flake-shake \ --flake-shake \ - --flake-shake-iterations 100 \ - --orchestrator sysgo + --flake-shake-iterations 100 ``` ### Adding Tests to Flake-Shake @@ -261,16 +214,13 @@ For rapid development and testing: ```bash cd op-acceptance-tests -# Run all tests (sysgo gateless mode) - most comprehensive coverage -just acceptance-test "" "" +# Run all tests (gateless mode) - most comprehensive coverage +just acceptance-test-all # Run specific gate-based tests (traditional mode) -just acceptance-test "" base # In-process (sysgo) with gate -just acceptance-test simple base # External devnet (sysext) with gate +just acceptance-test base ``` -Using an empty gate (`""`) triggers gateless mode with the sysgo orchestrator, auto-discovering all tests. - ## Further Information For more details about `op-acceptor` and the acceptance testing process, refer to the main documentation or ask the team for guidance. diff --git a/op-acceptance-tests/acceptance-tests.yaml b/op-acceptance-tests/acceptance-tests.yaml index 504083590a455..1a3fea772fee0 100644 --- a/op-acceptance-tests/acceptance-tests.yaml +++ b/op-acceptance-tests/acceptance-tests.yaml @@ -121,7 +121,7 @@ gates: timeout: 10m - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/ecotone timeout: 10m - # TODO(infra#401): Re-enable the test when the sysext missing toolset is implemented + # TODO(infra#401): Re-enable the test once the remaining infra gap is resolved. #- package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/withdrawal # timeout: 10m - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/fjord @@ -180,12 +180,6 @@ gates: - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/flashblocks timeout: 5m - - id: sync-test-op-node - description: "Sync tests for op-node with external networks via the op-sync-tester - tests run daily." - tests: - - package: github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el - timeout: 30m - - id: cgt description: "Custom Gas Token (CGT) network tests." tests: diff --git a/op-acceptance-tests/cmd/main.go b/op-acceptance-tests/cmd/main.go deleted file mode 100644 index 6bb15b918e8df..0000000000000 --- a/op-acceptance-tests/cmd/main.go +++ /dev/null @@ -1,315 +0,0 @@ -package main - -import ( - "context" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/telemetry" - "github.com/honeycombio/otel-config-go/otelconfig" - "github.com/urfave/cli/v2" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -const ( - // Default values - defaultDevnet = "" // empty string means use 'sysgo' orchestrator (in-memory Go devnet) - defaultGate = "holocene" - defaultAcceptor = "op-acceptor" -) - -// AcceptorConfig holds all configuration for running op-acceptor -type AcceptorConfig struct { - Orchestrator string - Devnet string - Gate string - TestDir string - Validators string - LogLevel string - Acceptor string - Serial bool - ShowProgress bool -} - -var ( - // Command line flags - orchestratorFlag = &cli.StringFlag{ - Name: "orchestrator", - Usage: "Orchestrator type: 'sysgo' (in-process) or 'sysext' (external devnet)", - Value: "sysext", - EnvVars: []string{"ORCHESTRATOR"}, - Required: false, - } - devnetFlag = &cli.StringFlag{ - Name: "devnet", - Usage: "Devnet specification: name (e.g. 'isthmus' → 'kt://isthmus-devnet'), URL (e.g. 'kt://isthmus-devnet'), or file path (e.g. '/path/to/persistent-devnet-env.json'). Ignored when orchestrator=sysgo.", - Value: "", - EnvVars: []string{"DEVNET"}, - } - gateFlag = &cli.StringFlag{ - Name: "gate", - Usage: "The gate to use", - Value: defaultGate, - EnvVars: []string{"GATE"}, - } - testDirFlag = &cli.StringFlag{ - Name: "testdir", - Usage: "Path to the test directory", - Required: true, - EnvVars: []string{"TEST_DIR"}, - } - validatorsFlag = &cli.StringFlag{ - Name: "validators", - Usage: "Path to the validators YAML file", - Required: true, - EnvVars: []string{"VALIDATORS"}, - } - logLevelFlag = &cli.StringFlag{ - Name: "log.level", - Usage: "Log level for op-acceptor", - Value: "debug", - EnvVars: []string{"LOG_LEVEL"}, - } - kurtosisDirFlag = &cli.StringFlag{ - Name: "kurtosis-dir", - Usage: "Path to the kurtosis-devnet directory (required for Kurtosisnets)", - Required: false, - EnvVars: []string{"KURTOSIS_DIR"}, - } - acceptorFlag = &cli.StringFlag{ - Name: "acceptor", - Usage: "Path to the op-acceptor binary", - Value: defaultAcceptor, - EnvVars: []string{"ACCEPTOR"}, - } - reuseDevnetFlag = &cli.BoolFlag{ - Name: "reuse-devnet", - Usage: "Reuse the devnet if it already exists (only applies to Kurtosisnets)", - Value: false, - EnvVars: []string{"REUSE_DEVNET"}, - } - serialFlag = &cli.BoolFlag{ - Name: "serial", - Usage: "Run the acceptance tests in serial mode", - Value: false, - EnvVars: []string{"SERIAL"}, - } - showProgressFlag = &cli.BoolFlag{ - Name: "show-progress", - Usage: "Show progress information during test execution", - Value: false, - EnvVars: []string{"SHOW_PROGRESS"}, - } -) - -func main() { - app := &cli.App{ - Name: "op-acceptance-test", - Usage: "Run Optimism acceptance tests", - Flags: []cli.Flag{ - orchestratorFlag, - devnetFlag, - gateFlag, - testDirFlag, - validatorsFlag, - logLevelFlag, - kurtosisDirFlag, - acceptorFlag, - reuseDevnetFlag, - serialFlag, - showProgressFlag, - }, - Action: runAcceptanceTest, - } - - if err := app.Run(os.Args); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -func runAcceptanceTest(c *cli.Context) error { - // Get command line arguments - orchestrator := c.String(orchestratorFlag.Name) - devnet := c.String(devnetFlag.Name) - gate := c.String(gateFlag.Name) - testDir := c.String(testDirFlag.Name) - validators := c.String(validatorsFlag.Name) - logLevel := c.String(logLevelFlag.Name) - kurtosisDir := c.String(kurtosisDirFlag.Name) - acceptor := c.String(acceptorFlag.Name) - reuseDevnet := c.Bool(reuseDevnetFlag.Name) - serial := c.Bool(serialFlag.Name) - showProgress := c.Bool(showProgressFlag.Name) - - // Validate inputs based on orchestrator type - if orchestrator != "sysgo" && orchestrator != "sysext" { - return fmt.Errorf("orchestrator must be 'sysgo' or 'sysext', got: %s", orchestrator) - } - - if orchestrator == "sysext" && devnet == "" { - return fmt.Errorf("devnet is required when orchestrator=sysext") - } - - // We need kurtosis-dir for devnet deployment when: - // 1. Using sysext orchestrator with a devnet - // 2. The devnet is a simple name (not a full URL) - // 3. We're not reusing an existing devnet - isSimpleName := devnet != "" && !strings.HasPrefix(devnet, "kt://") && !strings.HasPrefix(devnet, "ktnative://") && !strings.HasPrefix(devnet, "/") - needsDeployment := orchestrator == "sysext" && isSimpleName && !reuseDevnet - if needsDeployment && kurtosisDir == "" { - return fmt.Errorf("kurtosis-dir is required for Kurtosis devnet deployment") - } - - // Get the absolute path of the test directory - absTestDir, err := filepath.Abs(testDir) - if err != nil { - return fmt.Errorf("failed to get absolute path of test directory: %w", err) - } - - // Get the absolute path of the validators file - absValidators, err := filepath.Abs(validators) - if err != nil { - return fmt.Errorf("failed to get absolute path of validators file: %w", err) - } - - // Get the absolute path of the kurtosis directory (only if provided) - var absKurtosisDir string - if kurtosisDir != "" { - absKurtosisDir, err = filepath.Abs(kurtosisDir) - if err != nil { - return fmt.Errorf("failed to get absolute path of kurtosis directory: %w", err) - } - } - - ctx := c.Context - ctx, shutdown, err := telemetry.SetupOpenTelemetry( - ctx, - otelconfig.WithServiceName("op-acceptance-tests"), - ) - if err != nil { - return fmt.Errorf("failed to setup OpenTelemetry: %w", err) - } - defer shutdown() - - tracer := otel.Tracer("op-acceptance-tests") - ctx, span := tracer.Start(ctx, "op-acceptance-tests") - defer span.End() - - steps := []func(ctx context.Context) error{} - - // Deploy devnet if needed (simple name devnets only, when not reusing) - if needsDeployment { - steps = append(steps, - func(ctx context.Context) error { - return deployDevnet(ctx, tracer, devnet, absKurtosisDir) - }, - ) - } - - // Run acceptance tests - steps = append(steps, - func(ctx context.Context) error { - config := AcceptorConfig{ - Orchestrator: orchestrator, - Devnet: devnet, - Gate: gate, - TestDir: absTestDir, - Validators: absValidators, - LogLevel: logLevel, - Acceptor: acceptor, - Serial: serial, - ShowProgress: showProgress, - } - return runOpAcceptor(ctx, tracer, config) - }, - ) - - for _, step := range steps { - if err := step(ctx); err != nil { - return fmt.Errorf("failed to run step: %w", err) - } - } - - return nil -} - -func deployDevnet(ctx context.Context, tracer trace.Tracer, devnet string, kurtosisDir string) error { - ctx, span := tracer.Start(ctx, "deploy devnet") - defer span.End() - - env := telemetry.InstrumentEnvironment(ctx, os.Environ()) - // Kurtosis recipes follow the pattern: -devnet - devnetRecipe := fmt.Sprintf("%s-devnet", devnet) - devnetCmd := exec.CommandContext(ctx, "just", devnetRecipe) - devnetCmd.Dir = kurtosisDir - devnetCmd.Stdout = os.Stdout - devnetCmd.Stderr = os.Stderr - devnetCmd.Env = env - if err := devnetCmd.Run(); err != nil { - return fmt.Errorf("failed to deploy devnet: %w", err) - } - return nil -} - -func runOpAcceptor(ctx context.Context, tracer trace.Tracer, config AcceptorConfig) error { - ctx, span := tracer.Start(ctx, "run acceptance test") - defer span.End() - - env := telemetry.InstrumentEnvironment(ctx, os.Environ()) - - // Build the command arguments - args := []string{ - "--testdir", config.TestDir, - "--gate", config.Gate, - "--validators", config.Validators, - "--log.level", config.LogLevel, - "--orchestrator", config.Orchestrator, - } - if config.Serial { - args = append(args, "--serial") - } - if config.ShowProgress { - args = append(args, "--show-progress") - args = append(args, "--progress-interval", "20s") - } - - // Handle devnet parameter based on orchestrator type - if config.Orchestrator == "sysext" && config.Devnet != "" { - var devnetEnvURL string - - if strings.HasPrefix(config.Devnet, "kt://") || strings.HasPrefix(config.Devnet, "ktnative://") { - // Already a URL or file path - use directly - devnetEnvURL = config.Devnet - } else { - // Simple name - wrap as Kurtosis URL - devnetEnvURL = fmt.Sprintf("kt://%s-devnet", config.Devnet) - } - - args = append(args, "--devnet-env-url", devnetEnvURL) - } - - // For sysgo, we allow skips - if config.Orchestrator == "sysgo" { - args = append(args, "--allow-skips") - } - - // Exclude quarantined tests by default in all runs except when explicitly running the flake-shake gate - if config.Gate != "flake-shake" { - args = append(args, "--exclude-gates", "flake-shake") - } - - acceptorCmd := exec.CommandContext(ctx, config.Acceptor, args...) - acceptorCmd.Env = env - acceptorCmd.Stdout = os.Stdout - acceptorCmd.Stderr = os.Stderr - - if err := acceptorCmd.Run(); err != nil { - return fmt.Errorf("failed to run acceptance test: %w", err) - } - return nil -} diff --git a/op-acceptance-tests/justfile b/op-acceptance-tests/justfile index befa509184c05..5547fb3c13d3a 100644 --- a/op-acceptance-tests/justfile +++ b/op-acceptance-tests/justfile @@ -6,43 +6,53 @@ ACCEPTOR_IMAGE := env_var_or_default("ACCEPTOR_IMAGE", DOCKER_REGISTRY + "/op-ac # Default recipe - runs acceptance tests default: - @just acceptance-test "" base + @just acceptance-test base jovian: - @just acceptance-test jovian jovian + @just acceptance-test jovian interop: - @just acceptance-test "" interop + @just acceptance-test interop cgt: - @just acceptance-test "" cgt + @just acceptance-test cgt +acceptance-test-all: + @just _acceptance-test base all # Run acceptance tests with mise-managed binary -# Usage: just acceptance-test [devnet] [gate] +# Usage: just acceptance-test [gate] # Examples: -# just acceptance-test "" base # In-process (sysgo) with specific gate -# just acceptance-test "" "" # In-process gateless mode (all tests) -# just acceptance-test "simple" base # External devnet with specific gate -# just acceptance-test "simple" "" # External devnet gateless mode (all tests) -acceptance-test devnet="" gate="base": +# just acceptance-test base # In-process with specific gate +# just acceptance-test-all # In-process all-tests mode +acceptance-test gate="base": + @just _acceptance-test "{{gate}}" gate + +_acceptance-test gate mode: #!/usr/bin/env bash set -euo pipefail - # Determine mode and orchestrator - GATELESS_MODE=$([[ "{{gate}}" == "" ]] && echo "true" || echo "false") - ORCHESTRATOR=$([[ "{{devnet}}" == "" ]] && echo "sysgo" || echo "sysext") + MODE="{{mode}}" + + if [[ "$MODE" != "gate" && "$MODE" != "all" ]]; then + echo "error: invalid mode '$MODE' (expected gate|all)." >&2 + exit 1 + fi + + if [[ "$MODE" == "gate" && "{{gate}}" == "" ]]; then + echo "error: gate must be non-empty for gate mode; use 'just acceptance-test-all' for all tests." >&2 + exit 1 + fi - # Display mode information - if [[ "$GATELESS_MODE" == "true" ]]; then - echo -e "DEVNET: $([[ "$ORCHESTRATOR" == "sysgo" ]] && echo "in-memory" || echo "{{devnet}}") ($ORCHESTRATOR), MODE: gateless (all tests)\n" + if [[ "$MODE" == "all" ]]; then + echo -e "DEVNET: in-memory, MODE: all tests\n" else - echo -e "DEVNET: $([[ "$ORCHESTRATOR" == "sysgo" ]] && echo "in-memory" || echo "{{devnet}}") ($ORCHESTRATOR), GATE: {{gate}}\n" + echo -e "DEVNET: in-memory, GATE: {{gate}}\n" fi - # Build dependencies for sysgo (in-process) mode if not in CI + # Build dependencies for in-process mode if not in CI. # In CI jobs already take care of this, so we skip it. - if [[ "$ORCHESTRATOR" == "sysgo" && -z "${CIRCLECI:-}" ]]; then + if [[ -z "${CIRCLECI:-}" ]]; then echo "Building contracts (local build)..." cd {{REPO_ROOT}} echo " - Updating submodules..." @@ -70,97 +80,53 @@ acceptance-test devnet="" gate="base": cd {{REPO_ROOT}}/op-acceptance-tests - # Check mise installation and fallback to Docker if needed if ! command -v mise >/dev/null; then - echo "Mise not installed, falling back to Docker..." - just acceptance-test-docker {{devnet}} {{gate}} - exit 0 + echo "error: mise is required for acceptance-test runs." >&2 + exit 1 fi - # Install op-acceptor using mise if ! mise install op-acceptor; then - echo "WARNING: Failed to install op-acceptor with mise, falling back to Docker..." - just acceptance-test-docker {{devnet}} {{gate}} - exit 0 + echo "error: failed to install op-acceptor with mise." >&2 + exit 1 fi - # Set binary path and log level BINARY_PATH=$(mise which op-acceptor) echo "Using mise-managed binary: $BINARY_PATH" LOG_LEVEL="$(echo "${LOG_LEVEL:-info}" | grep -E '^(debug|info|warn|error)$' || echo 'info')" echo "LOG_LEVEL: $LOG_LEVEL" - # Deploy devnet for sysext if it's a simple name - if [[ "$ORCHESTRATOR" == "sysext" && ! "{{devnet}}" =~ ^(kt://|ktnative://|/) ]]; then - echo "Deploying devnet {{devnet}}..." - just {{KURTOSIS_DIR}}/{{devnet}}-devnet || true - fi - - # Build command arguments based on mode - if [[ "$GATELESS_MODE" == "true" ]]; then - # Gateless mode - use binary directly + if [[ "$MODE" == "all" ]]; then CMD_ARGS=( "$BINARY_PATH" + "--orchestrator" "sysgo" "--testdir" "{{REPO_ROOT}}/op-acceptance-tests/..." "--validators" "./acceptance-tests.yaml" + "--log.level" "${LOG_LEVEL}" "--exclude-gates" "flake-shake" "--allow-skips" "--timeout" "120m" - "--orchestrator" "$ORCHESTRATOR" "--show-progress" ) else - # Gate mode - use go run with acceptor binary CMD_ARGS=( - "go" "run" "cmd/main.go" + "$BINARY_PATH" + "--orchestrator" "sysgo" "--gate" "{{gate}}" "--testdir" "{{REPO_ROOT}}" "--validators" "./acceptance-tests.yaml" - "--acceptor" "$BINARY_PATH" "--log.level" "${LOG_LEVEL}" - "--orchestrator" "$ORCHESTRATOR" + "--allow-skips" "--show-progress" ) - fi - # Add sysext-specific arguments - if [[ "$ORCHESTRATOR" == "sysext" ]]; then - CMD_ARGS+=("--devnet" "{{devnet}}" "--kurtosis-dir" "{{KURTOSIS_DIR}}" "--serial") + if [[ "{{gate}}" != "flake-shake" ]]; then + CMD_ARGS+=("--exclude-gates" "flake-shake") + fi fi - # Execute the command "${CMD_ARGS[@]}" - - -# Run acceptance tests against a devnet using Docker (fallback if needed) -acceptance-test-docker devnet="simple" gate="base": - #!/usr/bin/env bash - set -euo pipefail - - echo -e "DEVNET: {{devnet}}, GATE: {{gate}}\n" - - # First run the appropriate devnet from the kurtosis-devnet directory if needed. - just {{KURTOSIS_DIR}}/{{ devnet }}-devnet - - # Print which image is being used (for debugging) - echo "Using acceptor image: {{ACCEPTOR_IMAGE}}" - - # Run op-acceptor with the repository mounted at the correct Go module path - docker run \ - -v "$(pwd)/acceptance-tests.yaml:/acceptance-tests.yaml" \ - -v "{{REPO_ROOT}}:/go/src/github.com/ethereum-optimism/optimism" \ - {{ACCEPTOR_IMAGE}} \ - --testdir "/go/src/github.com/ethereum-optimism/optimism" \ - --gate {{gate}} \ - --validators /acceptance-tests.yaml \ - $( [[ "{{gate}}" != "flake-shake" ]] && echo --exclude-gates flake-shake ) \ - --log.level debug - - - clean: - kurtosis clean --all rm -rf tests/interop/loadtest/artifacts diff --git a/op-acceptance-tests/tests/base/advance_test.go b/op-acceptance-tests/tests/base/advance_test.go index 2c37e4bd4ad21..7852774b0e0e0 100644 --- a/op-acceptance-tests/tests/base/advance_test.go +++ b/op-acceptance-tests/tests/base/advance_test.go @@ -10,7 +10,7 @@ import ( ) func TestCLAdvance(gt *testing.T) { - t := devtest.ParallelT(gt) + t := devtest.SerialT(gt) sys := presets.NewMinimal(t) tracer := t.Tracer() ctx := t.Ctx() diff --git a/op-acceptance-tests/tests/base/chain/init_test.go b/op-acceptance-tests/tests/base/chain/init_test.go deleted file mode 100644 index c1ecc9ea1af35..0000000000000 --- a/op-acceptance-tests/tests/base/chain/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package chain - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithMinimal(), - ) -} diff --git a/op-acceptance-tests/tests/base/conductor/init_test.go b/op-acceptance-tests/tests/base/conductor/init_test.go deleted file mode 100644 index 1740b4e464408..0000000000000 --- a/op-acceptance-tests/tests/base/conductor/init_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package conductor - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithMinimalWithConductors()) -} diff --git a/op-acceptance-tests/tests/base/conductor/leadership_transfer_test.go b/op-acceptance-tests/tests/base/conductor/leadership_transfer_test.go index 985d9760c2864..af1ab12a22fb1 100644 --- a/op-acceptance-tests/tests/base/conductor/leadership_transfer_test.go +++ b/op-acceptance-tests/tests/base/conductor/leadership_transfer_test.go @@ -3,7 +3,6 @@ package conductor import ( "context" "fmt" - "strings" "testing" "time" @@ -11,7 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" @@ -31,6 +29,7 @@ func TestConductorLeadershipTransfer(gt *testing.T) { tracer := t.Tracer() ctx := t.Ctx() logger.Info("Started Conductor Leadership Transfer test") + require.NotEmpty(t, sys.ConductorSets, "expected at least one L2 conductor set") ctx, span := tracer.Start(ctx, "test chains") defer span.End() @@ -40,6 +39,7 @@ func TestConductorLeadershipTransfer(gt *testing.T) { // Test all L2 chains in the system for l2Chain, conductors := range sys.ConductorSets { + require.NotEmpty(t, conductors, "expected conductors in L2 chain", "chainId", l2Chain.String()) chainId := l2Chain.String() _, span = tracer.Start(ctx, fmt.Sprintf("test chain %s", chainId)) @@ -50,8 +50,7 @@ func TestConductorLeadershipTransfer(gt *testing.T) { idToConductor := make(map[string]conductorWithInfo) for _, conductor := range conductors { - conductorId := strings.TrimPrefix(conductor.String(), stack.KindConductor.String()+"-") - idToConductor[conductorId] = conductorWithInfo{conductor, consensus.ServerInfo{}} + idToConductor[conductor.String()] = conductorWithInfo{conductor, consensus.ServerInfo{}} } for _, memberInfo := range membership.Servers { conductor, ok := idToConductor[memberInfo.ID] diff --git a/op-acceptance-tests/tests/base/deposit/init_test.go b/op-acceptance-tests/tests/base/deposit/init_test.go deleted file mode 100644 index 22f5bb598421d..0000000000000 --- a/op-acceptance-tests/tests/base/deposit/init_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package deposit - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithMinimal()) -} diff --git a/op-acceptance-tests/tests/base/eth_simulate_test.go b/op-acceptance-tests/tests/base/eth_simulate_test.go index 4bfaa808d98ad..5c35984ccaeb4 100644 --- a/op-acceptance-tests/tests/base/eth_simulate_test.go +++ b/op-acceptance-tests/tests/base/eth_simulate_test.go @@ -10,7 +10,7 @@ import ( ) func TestEthSimulateV1(gt *testing.T) { - t := devtest.ParallelT(gt) + t := devtest.SerialT(gt) sys := presets.NewMinimal(t) ctx := t.Ctx() diff --git a/op-acceptance-tests/tests/base/init_test.go b/op-acceptance-tests/tests/base/init_test.go deleted file mode 100644 index bbea98422c0c7..0000000000000 --- a/op-acceptance-tests/tests/base/init_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package base - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithMinimal()) -} diff --git a/op-acceptance-tests/tests/base/public_rpc_advance_test.go b/op-acceptance-tests/tests/base/public_rpc_advance_test.go index fd3ba51bf7a5b..ce30ca15e75d1 100644 --- a/op-acceptance-tests/tests/base/public_rpc_advance_test.go +++ b/op-acceptance-tests/tests/base/public_rpc_advance_test.go @@ -9,7 +9,7 @@ import ( ) func TestPublicRpcAdvance(gt *testing.T) { - t := devtest.ParallelT(gt) + t := devtest.SerialT(gt) sys := presets.NewMinimal(t) sys.L2Chain.PublicRPC().Advanced(eth.Unsafe, 5) diff --git a/op-acceptance-tests/tests/base/withdrawal/cannon/init_test.go b/op-acceptance-tests/tests/base/withdrawal/cannon/init_test.go deleted file mode 100644 index 693c20e9253b5..0000000000000 --- a/op-acceptance-tests/tests/base/withdrawal/cannon/init_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package withdrawal - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/withdrawal" - gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" -) - -func TestMain(m *testing.M) { - withdrawal.InitWithGameType(m, gameTypes.CannonGameType) -} diff --git a/op-acceptance-tests/tests/base/withdrawal/cannon_kona/init_test.go b/op-acceptance-tests/tests/base/withdrawal/cannon_kona/init_test.go deleted file mode 100644 index 80e12341b1960..0000000000000 --- a/op-acceptance-tests/tests/base/withdrawal/cannon_kona/init_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package withdrawal - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/withdrawal" - gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" -) - -func TestMain(m *testing.M) { - withdrawal.InitWithGameType(m, gameTypes.CannonKonaGameType) -} diff --git a/op-acceptance-tests/tests/base/withdrawal/permissioned/init_test.go b/op-acceptance-tests/tests/base/withdrawal/permissioned/init_test.go deleted file mode 100644 index 0017d530c31be..0000000000000 --- a/op-acceptance-tests/tests/base/withdrawal/permissioned/init_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package withdrawal - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/base/withdrawal" - gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" -) - -func TestMain(m *testing.M) { - withdrawal.InitWithGameType(m, gameTypes.PermissionedGameType) -} diff --git a/op-acceptance-tests/tests/base/withdrawal/withdrawal_test_helper.go b/op-acceptance-tests/tests/base/withdrawal/withdrawal_test_helper.go index c7d087104d4cd..6ff415cc71705 100644 --- a/op-acceptance-tests/tests/base/withdrawal/withdrawal_test_helper.go +++ b/op-acceptance-tests/tests/base/withdrawal/withdrawal_test_helper.go @@ -4,30 +4,42 @@ import ( "testing" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" - "github.com/ethereum-optimism/optimism/op-devstack/compat" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + ps "github.com/ethereum-optimism/optimism/op-proposer/proposer" "github.com/ethereum-optimism/optimism/op-service/eth" ) -func InitWithGameType(m *testing.M, gameType gameTypes.GameType) { - presets.DoMain(m, - presets.WithCompatibleTypes(compat.SysGo), - presets.WithMinimal(), - presets.WithTimeTravel(), - presets.WithFinalizationPeriodSeconds(1), - // Satisfy OptimismPortal2 PROOF_MATURITY_DELAY_SECONDS check, avoid OptimismPortal_ProofNotOldEnough() revert - presets.WithProofMaturityDelaySeconds(2), - // Satisfy AnchorStateRegistry DISPUTE_GAME_FINALITY_DELAY_SECONDS check, avoid OptimismPortal_InvalidRootClaim() revert - presets.WithDisputeGameFinalityDelaySeconds(2), - presets.WithAddedGameType(gameType), - presets.WithRespectedGameType(gameType), - ) +func withdrawalOpts(gameType gameTypes.GameType) []presets.Option { + opts := []presets.Option{ + presets.WithTimeTravelEnabled(), + presets.WithDeployerOptions( + sysgo.WithFinalizationPeriodSeconds(1), + // Satisfy OptimismPortal2 PROOF_MATURITY_DELAY_SECONDS check, avoid OptimismPortal_ProofNotOldEnough() revert + sysgo.WithProofMaturityDelaySeconds(2), + // Satisfy AnchorStateRegistry DISPUTE_GAME_FINALITY_DELAY_SECONDS check, avoid OptimismPortal_InvalidRootClaim() revert + sysgo.WithDisputeGameFinalityDelaySeconds(2), + ), + presets.WithGameTypeAdded(gameType), + presets.WithRespectedGameTypeOverride(gameType), + presets.WithProposerOption(func(_ sysgo.ComponentTarget, cfg *ps.CLIConfig) { + cfg.DisputeGameType = uint32(gameType) + }), + } + if gameType == gameTypes.CannonKonaGameType { + opts = append(opts, presets.WithChallengerCannonKonaEnabled()) + } + return opts +} + +func newSystem(t devtest.T, gameType gameTypes.GameType) *presets.Minimal { + return presets.NewMinimal(t, withdrawalOpts(gameType)...) } func TestWithdrawal(gt *testing.T, gameType gameTypes.GameType) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newSystem(t, gameType) require := sys.T.Require() bridge := sys.StandardBridge() diff --git a/op-acceptance-tests/tests/batcher/batcher_test.go b/op-acceptance-tests/tests/batcher/batcher_test.go index 40c4fb698b7ac..11caedd99a177 100644 --- a/op-acceptance-tests/tests/batcher/batcher_test.go +++ b/op-acceptance-tests/tests/batcher/batcher_test.go @@ -5,11 +5,12 @@ import ( "time" "github.com/davecgh/go-spew/spew" + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txplan" @@ -23,16 +24,36 @@ func TestBatcherFullChannelsAfterDowntime(gt *testing.T) { gt.Skip("Skipping test until we fix nonce too high error: tx: 177 state: 176") t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithTestSeq(t) + opts := presets.Combine( + // Keep verifier EL-sync behavior and no-discovery from the old package-level TestMain. + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn(func(_ devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.VerifierSyncMode = sync.ELSync + cfg.NoDiscovery = true + })), + // The test advances L1/L2 time aggressively, so enable orchestrator time travel. + presets.WithTimeTravelEnabled(), + presets.WithBatcherOption(func(id sysgo.ComponentTarget, cfg *bss.CLIConfig) { + cfg.Stopped = true + + // Set the blob max size to 40_000 bytes for test purposes. + cfg.MaxL1TxSize = 40_000 + cfg.TestUseMaxTxSizeForBlobs = true + + cfg.PollInterval = 1000 * time.Millisecond + + cfg.MaxChannelDuration = 50 + cfg.MaxPendingTransactions = 7 + }), + ) + sys := presets.NewSingleChainMultiNodeWithTestSeq(t, opts...) + sys.AdvanceTime(0) // assert time-travel support is available in this constructor path l := t.Logger() ts_L2 := sys.TestSequencer.Escape().ControlAPI(sys.L2EL.ChainID()) alice := sys.FunderL2.NewFundedEOA(eth.OneWei) cathrine := sys.FunderL2.NewFundedEOA(eth.OneTenthEther) - cl := sys.L1Network.Escape().L1CLNode(match.FirstL1CL) - - sys.ControlPlane.FakePoSState(cl.ID(), stack.Stop) + sys.L1CL.Stop() latestUnsafe_A := sys.L2CL.StopSequencer() l.Info("Latest unsafe block after stopping the L2 sequencer", "latestUnsafe", latestUnsafe_A) @@ -61,7 +82,7 @@ func TestBatcherFullChannelsAfterDowntime(gt *testing.T) { sys.L2CL.StartSequencer() l.Info("Current L1 unsafe block", "currentL1Unsafe", sys.L1EL.BlockRefByLabel(eth.Unsafe)) - sys.ControlPlane.FakePoSState(cl.ID(), stack.Start) + sys.L1CL.Start() sys.L2Batcher.Start() diff --git a/op-acceptance-tests/tests/batcher/init_test.go b/op-acceptance-tests/tests/batcher/init_test.go deleted file mode 100644 index a7f4e665550d5..0000000000000 --- a/op-acceptance-tests/tests/batcher/init_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package batcher - -import ( - "testing" - "time" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainMultiNode(), - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithNoDiscovery(), - presets.WithTimeTravel(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - cfg.Stopped = true - - // set the blob max size to 40_000 bytes for test purposes - cfg.MaxL1TxSize = 40_000 - cfg.TestUseMaxTxSizeForBlobs = true - - cfg.PollInterval = 1000 * time.Millisecond - - cfg.MaxChannelDuration = 50 - cfg.MaxPendingTransactions = 7 - })), - ) -} diff --git a/op-acceptance-tests/tests/batcher/throttling/init_test.go b/op-acceptance-tests/tests/batcher/throttling/init_test.go deleted file mode 100644 index f27364fe952d8..0000000000000 --- a/op-acceptance-tests/tests/batcher/throttling/init_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package throttling - -import ( - "testing" - "time" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - batcherConfig "github.com/ethereum-optimism/optimism/op-batcher/config" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -const blockSizeLimit = 5_000 - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithMinimal(), - presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - // Enable throttling with step controller for predictable behavior - cfg.ThrottleConfig.LowerThreshold = 99 // > 0 enables the throttling loop. - cfg.ThrottleConfig.UpperThreshold = 100 - cfg.ThrottleConfig.ControllerType = batcherConfig.StepControllerType - - cfg.ThrottleConfig.BlockSizeLowerLimit = blockSizeLimit - 1 - cfg.ThrottleConfig.BlockSizeUpperLimit = blockSizeLimit - - cfg.PollInterval = 500 * time.Millisecond // Fast poll for quicker test feedback - })), - ) -} diff --git a/op-acceptance-tests/tests/batcher/throttling/throttling_test.go b/op-acceptance-tests/tests/batcher/throttling/throttling_test.go index 2aedd64be12c5..880b65be05ad1 100644 --- a/op-acceptance-tests/tests/batcher/throttling/throttling_test.go +++ b/op-acceptance-tests/tests/batcher/throttling/throttling_test.go @@ -7,22 +7,37 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop/loadtest" + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" + batcherConfig "github.com/ethereum-optimism/optimism/op-batcher/config" "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txinclude" "github.com/ethereum-optimism/optimism/op-service/txplan" ) +const blockSizeLimit = 5_000 + // TestDABlockThrottling verifies that the execution client respects the block size limit set via // miner_setMaxDASize. It spams transactions to saturate block space and asserts that blocks are // filled to near capacity without exceeding the limit. func TestDABlockThrottling(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := presets.NewMinimal(t, presets.WithBatcherOption(func(_ sysgo.ComponentTarget, cfg *bss.CLIConfig) { + // Enable throttling with step controller for predictable behavior. + cfg.ThrottleConfig.LowerThreshold = 99 // > 0 enables the throttling loop. + cfg.ThrottleConfig.UpperThreshold = 100 + cfg.ThrottleConfig.ControllerType = batcherConfig.StepControllerType + + cfg.ThrottleConfig.BlockSizeLowerLimit = blockSizeLimit - 1 + cfg.ThrottleConfig.BlockSizeUpperLimit = blockSizeLimit + + cfg.PollInterval = 500 * time.Millisecond // Fast poll for quicker test feedback. + })) spamCtx, cancelSpam := context.WithCancel(t.Ctx()) defer cancelSpam() diff --git a/op-acceptance-tests/tests/custom_gas_token/cgt_introspection_test.go b/op-acceptance-tests/tests/custom_gas_token/cgt_introspection_test.go index 06a77a3878d8b..9afbb76406dc6 100644 --- a/op-acceptance-tests/tests/custom_gas_token/cgt_introspection_test.go +++ b/op-acceptance-tests/tests/custom_gas_token/cgt_introspection_test.go @@ -4,14 +4,13 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" ) // TestCGT_IntrospectionViaL1Block verifies that the L2 L1Block predeploy reports // that CGT mode is enabled and exposes non-empty token metadata (name, symbol). func TestCGT_IntrospectionViaL1Block(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newCGTMinimal(t) name, symbol := ensureCGTOrSkip(t, sys) diff --git a/op-acceptance-tests/tests/custom_gas_token/cgt_l1_portal_introspection_test.go b/op-acceptance-tests/tests/custom_gas_token/cgt_l1_portal_introspection_test.go index 173476ca3eafb..bc8838e824895 100644 --- a/op-acceptance-tests/tests/custom_gas_token/cgt_l1_portal_introspection_test.go +++ b/op-acceptance-tests/tests/custom_gas_token/cgt_l1_portal_introspection_test.go @@ -6,7 +6,6 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" @@ -18,7 +17,7 @@ import ( // a valid SystemConfig address via its systemConfig() view. func TestCGT_L1PortalIntrospection(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newCGTMinimal(t) // Skip if this devnet is not CGT-enabled (uses your existing gate). ensureCGTOrSkip(t, sys) diff --git a/op-acceptance-tests/tests/custom_gas_token/cgt_native_payment_test.go b/op-acceptance-tests/tests/custom_gas_token/cgt_native_payment_test.go index 2d6ab79b5a61c..33d8d5f04f1cd 100644 --- a/op-acceptance-tests/tests/custom_gas_token/cgt_native_payment_test.go +++ b/op-acceptance-tests/tests/custom_gas_token/cgt_native_payment_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -13,7 +12,7 @@ import ( // recipient +amount and sender > amount decrease (amount + gas). func TestCGT_ValueTransferPaysGasInToken(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newCGTMinimal(t) ensureCGTOrSkip(t, sys) diff --git a/op-acceptance-tests/tests/custom_gas_token/cgt_portal_reverts_test.go b/op-acceptance-tests/tests/custom_gas_token/cgt_portal_reverts_test.go index fa1b3a982554b..6969273f1cd9a 100644 --- a/op-acceptance-tests/tests/custom_gas_token/cgt_portal_reverts_test.go +++ b/op-acceptance-tests/tests/custom_gas_token/cgt_portal_reverts_test.go @@ -6,7 +6,6 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" ) @@ -15,7 +14,7 @@ import ( // (receive() -> depositTransaction) reverts under CGT, preventing ETH from getting stuck. func TestCGT_PortalReceiveReverts(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newCGTMinimal(t) ensureCGTOrSkip(t, sys) l1c := sys.L1EL.EthClient() diff --git a/op-acceptance-tests/tests/custom_gas_token/cgt_reverts_test.go b/op-acceptance-tests/tests/custom_gas_token/cgt_reverts_test.go index 30a2277ffafcc..e17be86d8cf1d 100644 --- a/op-acceptance-tests/tests/custom_gas_token/cgt_reverts_test.go +++ b/op-acceptance-tests/tests/custom_gas_token/cgt_reverts_test.go @@ -2,15 +2,13 @@ package custom_gas_token import ( "context" + "math/big" "testing" "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" - "math/big" - "github.com/ethereum/go-ethereum" "github.com/lmittmann/w3" ) @@ -19,7 +17,7 @@ import ( // L2CrossDomainMessenger reverts under CGT (non-payable path). func TestCGT_MessengerRejectsValue(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newCGTMinimal(t) ensureCGTOrSkip(t, sys) ctx, cancel := context.WithTimeout(t.Ctx(), 30*time.Second) @@ -41,7 +39,7 @@ func TestCGT_MessengerRejectsValue(gt *testing.T) { // ETH-specific withdraw path on L2StandardBridge reverts under CGT. func TestCGT_L2StandardBridge_LegacyWithdrawReverts(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newCGTMinimal(t) ensureCGTOrSkip(t, sys) ctx, cancel := context.WithTimeout(t.Ctx(), 30*time.Second) diff --git a/op-acceptance-tests/tests/custom_gas_token/cgt_systemconfig_test.go b/op-acceptance-tests/tests/custom_gas_token/cgt_systemconfig_test.go index a350b8d60f6be..bbc11de26dce2 100644 --- a/op-acceptance-tests/tests/custom_gas_token/cgt_systemconfig_test.go +++ b/op-acceptance-tests/tests/custom_gas_token/cgt_systemconfig_test.go @@ -6,7 +6,6 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" @@ -18,7 +17,7 @@ import ( // CGT=true via isCustomGasToken(). Skips if the devnet does not wire this flag. func TestCGT_SystemConfigFlagOnL1(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newCGTMinimal(t) ensureCGTOrSkip(t, sys) l1c := sys.L1EL.EthClient() @@ -63,7 +62,7 @@ func TestCGT_SystemConfigFlagOnL1(gt *testing.T) { // using locally encoded calls (mirrors the previous test structure). Skips on devnets without the flag. func TestCGT_SystemConfigFeatureFlag(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newCGTMinimal(t) // Skip if not in CGT mode (uses L2 L1Block.isCustomGasToken()). ensureCGTOrSkip(t, sys) diff --git a/op-acceptance-tests/tests/custom_gas_token/helpers.go b/op-acceptance-tests/tests/custom_gas_token/helpers.go index f353e192e5a7c..fad3f91968f1f 100644 --- a/op-acceptance-tests/tests/custom_gas_token/helpers.go +++ b/op-acceptance-tests/tests/custom_gas_token/helpers.go @@ -3,10 +3,12 @@ package custom_gas_token import ( "context" + "math/big" "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum/go-ethereum" "github.com/ethereum/go-ethereum/common" @@ -23,6 +25,20 @@ var ( l2BridgeAddr = common.HexToAddress("0x4200000000000000000000000000000000000010") ) +func cgtOpts() []presets.Option { + // Create a CGT-enabled devnet with 1M tokens of liquidity. + liq := new(big.Int).Mul(big.NewInt(1_000_000), big.NewInt(1e18)) + return []presets.Option{ + presets.WithDeployerOptions( + sysgo.WithCustomGasToken("Custom Gas Token", "CGT", liq, common.Address{}), + ), + } +} + +func newCGTMinimal(t devtest.T) *presets.Minimal { + return presets.NewMinimal(t, cgtOpts()...) +} + // isCGTEnabled checks if CGT mode is enabled without skipping the test. // Returns true if CGT is enabled, false if native ETH mode, and false if the check fails. func isCGTEnabled(t devtest.T, sys *presets.Minimal) bool { diff --git a/op-acceptance-tests/tests/custom_gas_token/init_test.go b/op-acceptance-tests/tests/custom_gas_token/init_test.go deleted file mode 100644 index 30291aa205c3f..0000000000000 --- a/op-acceptance-tests/tests/custom_gas_token/init_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package custom_gas_token - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - // Create a CGT-enabled devnet with 1M tokens of liquidity - liq := new(big.Int).Mul(big.NewInt(1_000_000), big.NewInt(1e18)) // 1M tokens * 18 decimals - - presets.DoMain(m, - presets.WithMinimal(), - stack.MakeCommon(sysgo.WithDeployerOptions( - sysgo.WithCustomGasToken("Custom Gas Token", "CGT", liq, common.Address{}), - )), - ) -} diff --git a/op-acceptance-tests/tests/depreqres/common/common.go b/op-acceptance-tests/tests/depreqres/common/common.go index 42ac2185fb6b6..5e77615a4dc50 100644 --- a/op-acceptance-tests/tests/depreqres/common/common.go +++ b/op-acceptance-tests/tests/depreqres/common/common.go @@ -4,15 +4,73 @@ import ( "testing" "time" + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testreq" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) +func syncModeOpt(syncMode sync.Mode) presets.Option { + return presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(_ devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + if syncMode == sync.CLSync { + cfg.SequencerSyncMode = sync.CLSync + } + cfg.VerifierSyncMode = syncMode + })) +} + +func reqRespSyncDisabledOpt() presets.Option { + return presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(_ devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.EnableReqRespSync = false + cfg.UseReqRespSync = false + })) +} + +func syncModeReqRespSyncOpt() presets.Option { + return presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(_ devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.UseReqRespSync = true + })) +} + +func noDiscoveryOpt() presets.Option { + return presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(_ devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.NoDiscovery = true + })) +} + +func batcherStoppedOpt() presets.Option { + return presets.WithBatcherOption(func(_ sysgo.ComponentTarget, cfg *bss.CLIConfig) { + cfg.Stopped = true + }) +} + +func ReqRespSyncDisabledOpts(syncMode sync.Mode) []presets.Option { + return []presets.Option{ + syncModeOpt(syncMode), + reqRespSyncDisabledOpt(), + noDiscoveryOpt(), + batcherStoppedOpt(), + } +} + +func SyncModeReqRespSyncOpts(syncMode sync.Mode) []presets.Option { + return []presets.Option{ + syncModeOpt(syncMode), + syncModeReqRespSyncOpt(), + noDiscoveryOpt(), + batcherStoppedOpt(), + } +} + // stableSyncStatus returns the sync status of node after any in-flight gossip messages // have been drained. DisconnectPeer closes the libp2p connection but a buffered gossip // payload can still arrive and be processed via AddUnsafePayload (SyncModeReqResp=true @@ -29,9 +87,9 @@ func stableSyncStatus(require *testreq.Assertions, node *dsl.L2CLNode) *eth.Sync return ss } -func UnsafeChainNotStalling_Disconnect(gt *testing.T, syncMode sync.Mode, sleep time.Duration) { +func UnsafeChainNotStalling_Disconnect(gt *testing.T, syncMode sync.Mode, sleep time.Duration, opts ...presets.Option) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := presets.NewSingleChainMultiNodeWithoutCheck(t, opts...) require := t.Require() l := t.Logger().With("syncmode", syncMode) @@ -72,9 +130,9 @@ func UnsafeChainNotStalling_Disconnect(gt *testing.T, syncMode sync.Mode, sleep sys.L2ELB.Reached(eth.Unsafe, ssA_after.UnsafeL2.Number, 30) } -func UnsafeChainNotStalling_RestartOpNode(gt *testing.T, syncMode sync.Mode, sleep time.Duration) { +func UnsafeChainNotStalling_RestartOpNode(gt *testing.T, syncMode sync.Mode, sleep time.Duration, opts ...presets.Option) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := presets.NewSingleChainMultiNodeWithoutCheck(t, opts...) require := t.Require() l := t.Logger().With("syncmode", syncMode) diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/clsync_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/clsync_test.go index 1080f90f4b23c..f09fe4e442ced 100644 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/clsync_test.go +++ b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/clsync_test.go @@ -9,13 +9,13 @@ import ( ) func TestUnsafeChainNotStalling_CLSync_Short(gt *testing.T) { - common.UnsafeChainNotStalling_Disconnect(gt, sync.CLSync, 20*time.Second) + common.UnsafeChainNotStalling_Disconnect(gt, sync.CLSync, 20*time.Second, common.ReqRespSyncDisabledOpts(sync.CLSync)...) } func TestUnsafeChainNotStalling_CLSync_Long(gt *testing.T) { - common.UnsafeChainNotStalling_Disconnect(gt, sync.CLSync, 95*time.Second) + common.UnsafeChainNotStalling_Disconnect(gt, sync.CLSync, 95*time.Second, common.ReqRespSyncDisabledOpts(sync.CLSync)...) } func TestUnsafeChainNotStalling_CLSync_RestartOpNode_Long(gt *testing.T) { - common.UnsafeChainNotStalling_RestartOpNode(gt, sync.CLSync, 95*time.Second) + common.UnsafeChainNotStalling_RestartOpNode(gt, sync.CLSync, 95*time.Second, common.ReqRespSyncDisabledOpts(sync.CLSync)...) } diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/init_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/init_test.go deleted file mode 100644 index 1463cb3bbeabb..0000000000000 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/clsync/init_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package clsync - -import ( - "testing" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainMultiNode(), - presets.WithConsensusLayerSync(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithReqRespSyncDisabled(), - presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - cfg.Stopped = true - })), - ) -} diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/depreqres_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/depreqres_test.go index 37fccdbbb92b4..fe5412fe7b57b 100644 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/depreqres_test.go +++ b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/depreqres_test.go @@ -3,16 +3,18 @@ package depreqres import ( "testing" + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/depreqres/common" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) func TestUnsafeChainNotStalling_DisabledReqRespSync(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := presets.NewSingleChainMultiNodeWithoutCheck(t, common.ReqRespSyncDisabledOpts(sync.ELSync)...) // We don't want the safe head to move, as this can also progress the unsafe head sys.L2Batcher.Stop() l := t.Logger() diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/divergence/divergence_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/divergence/divergence_test.go index 48c4b5d2f99c9..ce60a83251c4c 100644 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/divergence/divergence_test.go +++ b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/divergence/divergence_test.go @@ -2,35 +2,21 @@ package divergence import ( "testing" + "time" - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/depreqres/common" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum" ) -func TestMain(m *testing.M) { - // No ELP2P, CLP2P to control the supply of unsafe payload to the CL - presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithReqRespSyncDisabled(), - presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - cfg.Stopped = true - })), - ) -} - // TestCLELDivergence tests that the CL and EL diverge when the CL advances the unsafe head, due to accepting SYNCING response from the EL, but the EL cannot validate the block (yet), does not canonicalize it, and doesn't serve it. func TestCLELDivergence(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := presets.NewSingleChainMultiNodeWithoutP2PWithoutCheck(t, common.ReqRespSyncDisabledOpts(sync.ELSync)...) require := t.Require() l := t.Logger() @@ -49,19 +35,22 @@ func TestCLELDivergence(gt *testing.T) { for _, delta := range []uint64{3, 4, 5} { targetNumber := startNum + delta + targetBlock := sys.L2EL.BlockRefByNumber(targetNumber) + l.Info("Sending payload ", "target", targetNumber, "startNum", startNum) sys.L2CLB.SignalTarget(sys.L2EL, targetNumber) // Canonical unsafe head never advances because of the gap require.Equal(startNum+1, sys.L2ELB.BlockRefByLabel(eth.Unsafe).Number) - // Unsafe head on CL advanced, but on EL we cannot fetch state for the unsafe block hash yet - targetBlock := sys.L2EL.BlockRefByNumber(targetNumber) - - // Confirm that L2CLB SyncStatus returns the newest unsafe block number and hash - ss := sys.L2CLB.SyncStatus() - require.Equal(targetNumber, ss.UnsafeL2.Number) - require.Equal(targetBlock.Hash, ss.UnsafeL2.Hash) + // EL-sync can quickly reset the status tracker after exposing the posted + // unsafe head, so poll tightly and without extra RPCs between the post and + // the SyncStatus check. + var ss *eth.SyncStatus + require.Eventually(func() bool { + ss = sys.L2CLB.SyncStatus() + return ss.UnsafeL2.Number == targetNumber && ss.UnsafeL2.Hash == targetBlock.Hash + }, 2*time.Second, 10*time.Millisecond, "L2CLB unsafe head did not expose target block") // Confirm that L2ELB cannot fetch the block by hash yet, because the block is not canonicalized, even though the CL reference is set to it. _, err := sys.L2ELB.Escape().L2EthClient().L2BlockRefByHash(t.Ctx(), ss.UnsafeL2.Hash) diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/elsync_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/elsync_test.go index 09bdb325de538..aa1cddce4a61e 100644 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/elsync_test.go +++ b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/elsync_test.go @@ -9,13 +9,13 @@ import ( ) func TestUnsafeChainNotStalling_ELSync_Short(gt *testing.T) { - common.UnsafeChainNotStalling_Disconnect(gt, sync.ELSync, 20*time.Second) + common.UnsafeChainNotStalling_Disconnect(gt, sync.ELSync, 20*time.Second, common.ReqRespSyncDisabledOpts(sync.ELSync)...) } func TestUnsafeChainNotStalling_ELSync_Long(gt *testing.T) { - common.UnsafeChainNotStalling_Disconnect(gt, sync.ELSync, 95*time.Second) + common.UnsafeChainNotStalling_Disconnect(gt, sync.ELSync, 95*time.Second, common.ReqRespSyncDisabledOpts(sync.ELSync)...) } func TestUnsafeChainNotStalling_ELSync_RestartOpNode_Long(gt *testing.T) { - common.UnsafeChainNotStalling_RestartOpNode(gt, sync.ELSync, 95*time.Second) + common.UnsafeChainNotStalling_RestartOpNode(gt, sync.ELSync, 95*time.Second, common.ReqRespSyncDisabledOpts(sync.ELSync)...) } diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/init_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/init_test.go deleted file mode 100644 index 47899a6ab8380..0000000000000 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/elsync/init_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package elsync - -import ( - "testing" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainMultiNode(), - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithReqRespSyncDisabled(), - presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - cfg.Stopped = true - })), - ) -} diff --git a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/init_test.go b/op-acceptance-tests/tests/depreqres/reqressyncdisabled/init_test.go deleted file mode 100644 index 41bc97589e3aa..0000000000000 --- a/op-acceptance-tests/tests/depreqres/reqressyncdisabled/init_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package depreqres - -import ( - "testing" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainMultiNode(), - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithReqRespSyncDisabled(), - presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - cfg.Stopped = true - })), - ) -} diff --git a/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/clsync_test.go b/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/clsync_test.go index 1080f90f4b23c..b6ca0bf73a89c 100644 --- a/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/clsync_test.go +++ b/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/clsync_test.go @@ -9,13 +9,13 @@ import ( ) func TestUnsafeChainNotStalling_CLSync_Short(gt *testing.T) { - common.UnsafeChainNotStalling_Disconnect(gt, sync.CLSync, 20*time.Second) + common.UnsafeChainNotStalling_Disconnect(gt, sync.CLSync, 20*time.Second, common.SyncModeReqRespSyncOpts(sync.CLSync)...) } func TestUnsafeChainNotStalling_CLSync_Long(gt *testing.T) { - common.UnsafeChainNotStalling_Disconnect(gt, sync.CLSync, 95*time.Second) + common.UnsafeChainNotStalling_Disconnect(gt, sync.CLSync, 95*time.Second, common.SyncModeReqRespSyncOpts(sync.CLSync)...) } func TestUnsafeChainNotStalling_CLSync_RestartOpNode_Long(gt *testing.T) { - common.UnsafeChainNotStalling_RestartOpNode(gt, sync.CLSync, 95*time.Second) + common.UnsafeChainNotStalling_RestartOpNode(gt, sync.CLSync, 95*time.Second, common.SyncModeReqRespSyncOpts(sync.CLSync)...) } diff --git a/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/init_test.go b/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/init_test.go deleted file mode 100644 index b7a96209aa3cb..0000000000000 --- a/op-acceptance-tests/tests/depreqres/syncmodereqressync/clsync/init_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package clsync - -import ( - "testing" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainMultiNode(), - presets.WithConsensusLayerSync(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithSyncModeReqRespSync(), - presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - cfg.Stopped = true - })), - ) -} diff --git a/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/elsync_test.go b/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/elsync_test.go index 09bdb325de538..90a1818303450 100644 --- a/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/elsync_test.go +++ b/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/elsync_test.go @@ -9,13 +9,13 @@ import ( ) func TestUnsafeChainNotStalling_ELSync_Short(gt *testing.T) { - common.UnsafeChainNotStalling_Disconnect(gt, sync.ELSync, 20*time.Second) + common.UnsafeChainNotStalling_Disconnect(gt, sync.ELSync, 20*time.Second, common.SyncModeReqRespSyncOpts(sync.ELSync)...) } func TestUnsafeChainNotStalling_ELSync_Long(gt *testing.T) { - common.UnsafeChainNotStalling_Disconnect(gt, sync.ELSync, 95*time.Second) + common.UnsafeChainNotStalling_Disconnect(gt, sync.ELSync, 95*time.Second, common.SyncModeReqRespSyncOpts(sync.ELSync)...) } func TestUnsafeChainNotStalling_ELSync_RestartOpNode_Long(gt *testing.T) { - common.UnsafeChainNotStalling_RestartOpNode(gt, sync.ELSync, 95*time.Second) + common.UnsafeChainNotStalling_RestartOpNode(gt, sync.ELSync, 95*time.Second, common.SyncModeReqRespSyncOpts(sync.ELSync)...) } diff --git a/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/init_test.go b/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/init_test.go deleted file mode 100644 index b35036a13c4be..0000000000000 --- a/op-acceptance-tests/tests/depreqres/syncmodereqressync/elsync/init_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package elsync - -import ( - "testing" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainMultiNode(), - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithSyncModeReqRespSync(), - presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - cfg.Stopped = true - })), - ) -} diff --git a/op-acceptance-tests/tests/ecotone/init_test.go b/op-acceptance-tests/tests/ecotone/init_test.go deleted file mode 100644 index 43f8df195d299..0000000000000 --- a/op-acceptance-tests/tests/ecotone/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package ecotone - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithMinimal(), - ) -} diff --git a/op-acceptance-tests/tests/fjord/init_test.go b/op-acceptance-tests/tests/fjord/init_test.go deleted file mode 100644 index c66034f06f195..0000000000000 --- a/op-acceptance-tests/tests/fjord/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package fjord - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithMinimal(), - ) -} diff --git a/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go b/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go index debdac098c73e..53b3e34404c8f 100644 --- a/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go +++ b/op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go @@ -53,7 +53,6 @@ func TestFlashblocksStream(gt *testing.T) { logger.Info("Flashblocks stream rate", "rate", flashblocksStreamRateMs) - // Test all L2 chains in the system oprbuilderNode := sys.L2OPRBuilder rollupBoostNode := sys.L2RollupBoost _, span = tracer.Start(ctx, "test chain") @@ -64,15 +63,11 @@ func TestFlashblocksStream(gt *testing.T) { DriveViaTestSequencer(t, sys, 3) - // Test the presence / absence of a flashblocks stream operating at a 250ms rate from a flashblocks-websocket-proxy node. - // Allow a generous window for first flashblocks to appear. testDuration := time.Duration(int64(flashblocksStreamRateMs*maxExpectedFlashblocks*2)) * time.Millisecond - // Allow up to 15% of expected flashblocks to be missing due to timing variations failureTolerance := int(0.15 * float64(maxExpectedFlashblocks)) logger.Debug("Test duration", "duration", testDuration, "failure tolerance (of flashblocks)", failureTolerance) - // Instrument builder stream separately to confirm flashblocks emission upstream. builderOutput := make(chan []byte, maxExpectedFlashblocks) defer close(builderOutput) builderDone := make(chan struct{}) @@ -158,13 +153,12 @@ func evaluateFlashblocksStream(t devtest.T, logger log.Logger, streamedMessages require.Greater(t, lastIndex, -1, "some bug: last index should be greater than -1 by now") require.Greater(t, currentIndex, -1, "some bug: current index should be greater than -1 by now") - // same block number, just the flashblock incremented if currentBlockNumber == lastBlockNumber { require.Greater(t, currentIndex, lastIndex, "some bug: current index should be greater than last index from the stream") totalFlashblocksProduced += (currentIndex - lastIndex) - } else if currentBlockNumber > lastBlockNumber { // new block number - totalFlashblocksProduced += (currentIndex + 1) // assuming it's a new block number whose flashblocks begin from 0th-index + } else if currentBlockNumber > lastBlockNumber { + totalFlashblocksProduced += (currentIndex + 1) } lastIndex = currentIndex diff --git a/op-acceptance-tests/tests/flashblocks/init_test.go b/op-acceptance-tests/tests/flashblocks/init_test.go deleted file mode 100644 index 6d2759e47bd8b..0000000000000 --- a/op-acceptance-tests/tests/flashblocks/init_test.go +++ /dev/null @@ -1,12 +0,0 @@ -package flashblocks - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainSystemWithFlashblocks()) -} diff --git a/op-acceptance-tests/tests/fusaka/fusaka_test.go b/op-acceptance-tests/tests/fusaka/fusaka_test.go index 2118cde85173a..42fb21c179d59 100644 --- a/op-acceptance-tests/tests/fusaka/fusaka_test.go +++ b/op-acceptance-tests/tests/fusaka/fusaka_test.go @@ -27,7 +27,7 @@ import ( func TestSafeHeadAdvancesAfterOsaka(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newMinimalFusaka(t) l1Config := sys.L1Network.Escape().ChainConfig() t.Log("Waiting for Osaka to activate") t.Require().NotNil(l1Config.OsakaTime) @@ -51,7 +51,7 @@ func TestSafeHeadAdvancesAfterOsaka(gt *testing.T) { func TestBlobBaseFeeIsCorrectAfterBPOFork(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newMinimalFusaka(t) t.Log("Waiting for BPO1 to activate") t.Require().NotNil(sys.L1Network.Escape().ChainConfig().BPO1Time) sys.L1EL.WaitForTime(*sys.L1Network.Escape().ChainConfig().BPO1Time) diff --git a/op-acceptance-tests/tests/fusaka/helpers.go b/op-acceptance-tests/tests/fusaka/helpers.go index cf67b7a63560f..b0c657894821e 100644 --- a/op-acceptance-tests/tests/fusaka/helpers.go +++ b/op-acceptance-tests/tests/fusaka/helpers.go @@ -12,8 +12,8 @@ import ( // ConfigureDevstackEnvVars sets the appropriate env vars to use a mise-installed geth binary for // the L1 EL. This is useful in Osaka acceptance tests since op-geth does not include full Osaka -// support. This is meant to run before presets.DoMain in a TestMain function. It will log to -// stdout. ResetDevstackEnvVars should be used to reset the environment variables when TestMain +// support. This is meant to run before constructing a devstack target in the test. It will log to +// stdout. ResetDevstackEnvVars should be used to reset the environment variables when the test // exits. // // Note that this is a no-op if either [sysgo.DevstackL1ELKindVar] or [sysgo.GethExecPathEnvVar] diff --git a/op-acceptance-tests/tests/fusaka/init_test.go b/op-acceptance-tests/tests/fusaka/setup_test.go similarity index 67% rename from op-acceptance-tests/tests/fusaka/init_test.go rename to op-acceptance-tests/tests/fusaka/setup_test.go index 61b00b94cb07e..16df5a29ee967 100644 --- a/op-acceptance-tests/tests/fusaka/init_test.go +++ b/op-acceptance-tests/tests/fusaka/setup_test.go @@ -1,32 +1,29 @@ package fusaka import ( - "testing" - "github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-batcher/flags" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum/go-ethereum/params/forks" ) -func TestMain(m *testing.M) { +func newMinimalFusaka(t devtest.T) *presets.Minimal { resetEnvVars := ConfigureDevstackEnvVars() - defer resetEnvVars() + t.Cleanup(resetEnvVars) - presets.DoMain(m, stack.MakeCommon(stack.Combine[*sysgo.Orchestrator]( - sysgo.DefaultMinimalSystem(&sysgo.DefaultMinimalSystemIDs{}), - sysgo.WithDeployerOptions( + return presets.NewMinimal(t, + presets.WithDeployerOptions( sysgo.WithDefaultBPOBlobSchedule, // Make the BPO fork happen after Osaka so we can easily use geth's eip4844.CalcBlobFee // to calculate the blob base fee using the Osaka parameters. sysgo.WithForkAtL1Offset(forks.Osaka, 0), sysgo.WithForkAtL1Offset(forks.BPO1, 1), ), - sysgo.WithBatcherOption(func(_ stack.ComponentID, cfg *batcher.CLIConfig) { + presets.WithBatcherOption(func(_ sysgo.ComponentTarget, cfg *batcher.CLIConfig) { cfg.DataAvailabilityType = flags.BlobsType cfg.TxMgrConfig.CellProofTime = 0 // Force cell proofs to be used }), - ))) + ) } diff --git a/op-acceptance-tests/tests/interop/contract/init_test.go b/op-acceptance-tests/tests/interop/contract/init_test.go deleted file mode 100644 index 4947d860a8099..0000000000000 --- a/op-acceptance-tests/tests/interop/contract/init_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package contract - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSimpleInterop()) -} diff --git a/op-acceptance-tests/tests/interop/loadtest/doc.go b/op-acceptance-tests/tests/interop/loadtest/doc.go index 2ccc1b0ef9818..57eeaf1454320 100644 --- a/op-acceptance-tests/tests/interop/loadtest/doc.go +++ b/op-acceptance-tests/tests/interop/loadtest/doc.go @@ -1,4 +1,4 @@ -// Package loadtest contains interop load tests that run against sysgo and sysext networks +// Package loadtest contains interop load tests that run against sysgo networks // satisfying the SimpleInterop spec. // // Configure test behavior with the following environment variables: diff --git a/op-acceptance-tests/tests/interop/loadtest/interop_load_test.go b/op-acceptance-tests/tests/interop/loadtest/interop_load_test.go index 40801f85b0958..e1fd27cb5e812 100644 --- a/op-acceptance-tests/tests/interop/loadtest/interop_load_test.go +++ b/op-acceptance-tests/tests/interop/loadtest/interop_load_test.go @@ -3,7 +3,6 @@ package loadtest import ( "context" "fmt" - "log/slog" "math/big" "os" "path/filepath" @@ -18,7 +17,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/accounting" "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/log/logfilter" "github.com/ethereum-optimism/optimism/op-service/plan" "github.com/ethereum-optimism/optimism/op-service/sources/batching" "github.com/ethereum-optimism/optimism/op-service/sources/batching/rpcblock" @@ -32,16 +30,6 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSimpleInterop(), - presets.WithLogFilter( - logfilter.DefaultMute( - logfilter.Level(slog.LevelWarn).Show(), - ), - ), - ) -} - // TODO(16371) every txintent.Call implementation should probably just be a txplan.Option. func planCall(t devtest.T, call txintent.Call) txplan.Option { plan := make([]txplan.Option, 0) diff --git a/op-acceptance-tests/tests/interop/message/init_test.go b/op-acceptance-tests/tests/interop/message/init_test.go deleted file mode 100644 index 246c6f1d892fd..0000000000000 --- a/op-acceptance-tests/tests/interop/message/init_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package msg - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSimpleInterop()) -} diff --git a/op-acceptance-tests/tests/interop/message/interop_happy_tx_test.go b/op-acceptance-tests/tests/interop/message/interop_happy_tx_test.go index 391e24ba5695a..e16ab6951a75e 100644 --- a/op-acceptance-tests/tests/interop/message/interop_happy_tx_test.go +++ b/op-acceptance-tests/tests/interop/message/interop_happy_tx_test.go @@ -6,7 +6,6 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" - "github.com/ethereum-optimism/optimism/op-devstack/compat" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" @@ -50,11 +49,4 @@ func TestInteropHappyTx(gt *testing.T) { // TODO(#16598): Make this relative to the block time 500), ) - - orch := presets.Orchestrator() - // Do not print the chain on persistent devnets - if orch.Type() != compat.Persistent { - sys.L2ChainA.PrintChain() - sys.L2ChainB.PrintChain() - } } diff --git a/op-acceptance-tests/tests/interop/prep/init_test.go b/op-acceptance-tests/tests/interop/prep/init_test.go deleted file mode 100644 index ad5ad19077828..0000000000000 --- a/op-acceptance-tests/tests/interop/prep/init_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package msg - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - // Configure a system without interop activation. - // We just want to test if we can sync with a supervisor before interop is configured. - // The supervisor will not be indexing data yet before interop, and has to handle that interop is not scheduled. - presets.DoMain(m, presets.WithSimpleInterop(), presets.WithUnscheduledInterop()) -} diff --git a/op-acceptance-tests/tests/interop/prep/prep_test.go b/op-acceptance-tests/tests/interop/prep/prep_test.go index 7e1047f51b120..38f758dfa2101 100644 --- a/op-acceptance-tests/tests/interop/prep/prep_test.go +++ b/op-acceptance-tests/tests/interop/prep/prep_test.go @@ -3,9 +3,12 @@ package msg import ( "testing" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-core/forks" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -15,7 +18,13 @@ import ( func TestUnscheduledInterop(gt *testing.T) { gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := presets.NewSimpleInterop(t, presets.WithDeployerOptions( + func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { + for _, l2 := range builder.L2s() { + l2.WithForkAtOffset(forks.Interop, nil) + } + }, + )) t.Logger().Info("Checking that chain A and B can sync, even though interop is not scheduled") dsl.CheckAll(t, sys.L2CLA.AdvancedFn(types.Finalized, 5, 100), diff --git a/op-acceptance-tests/tests/interop/proofs-singlechain/init_test.go b/op-acceptance-tests/tests/interop/proofs-singlechain/init_test.go deleted file mode 100644 index a3563ca58af3e..0000000000000 --- a/op-acceptance-tests/tests/interop/proofs-singlechain/init_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package proofs_singlechain - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithSingleChainSuperInteropSupernode(), - presets.WithL2NetworkCount(1), - stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), - ) -} diff --git a/op-acceptance-tests/tests/interop/proofs-singlechain/interop_fault_proofs_test.go b/op-acceptance-tests/tests/interop/proofs-singlechain/interop_fault_proofs_test.go index 0d8faa8db4e7b..3e02cd30f9fc0 100644 --- a/op-acceptance-tests/tests/interop/proofs-singlechain/interop_fault_proofs_test.go +++ b/op-acceptance-tests/tests/interop/proofs-singlechain/interop_fault_proofs_test.go @@ -10,6 +10,6 @@ import ( func TestInteropSingleChainFaultProofs(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainInterop(t) + sys := presets.NewSingleChainInteropSupernodeProofs(t, presets.WithChallengerCannonKonaEnabled()) sfp.RunSingleChainSuperFaultProofSmokeTest(t, sys) } diff --git a/op-acceptance-tests/tests/interop/proofs/challenger_test.go b/op-acceptance-tests/tests/interop/proofs/challenger_test.go index 034538a9833aa..2f8b2d1cf9111 100644 --- a/op-acceptance-tests/tests/interop/proofs/challenger_test.go +++ b/op-acceptance-tests/tests/interop/proofs/challenger_test.go @@ -16,7 +16,7 @@ import ( func TestChallengerPlaysGame(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) + sys := presets.NewSimpleInteropSupernodeProofs(t, presets.WithChallengerCannonKonaEnabled()) dsl.CheckAll(t, sys.L2CLA.AdvancedFn(types.CrossSafe, 1, 30), sys.L2CLB.AdvancedFn(types.CrossSafe, 1, 30), @@ -38,7 +38,7 @@ func TestChallengerPlaysGame(gt *testing.T) { func TestChallengerRespondsToMultipleInvalidClaims(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) + sys := presets.NewSimpleInteropSupernodeProofs(t, presets.WithChallengerCannonKonaEnabled()) dsl.CheckAll(t, sys.L2CLA.AdvancedFn(types.CrossSafe, 1, 30), sys.L2CLB.AdvancedFn(types.CrossSafe, 1, 30), @@ -61,7 +61,7 @@ func TestChallengerRespondsToMultipleInvalidClaims(gt *testing.T) { func TestChallengerRespondsToMultipleInvalidClaimsEOA(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) + sys := presets.NewSimpleInteropSupernodeProofs(t, presets.WithChallengerCannonKonaEnabled()) dsl.CheckAll(t, sys.L2CLA.AdvancedFn(types.CrossSafe, 1, 30), sys.L2CLB.AdvancedFn(types.CrossSafe, 1, 30), diff --git a/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go b/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go index ba4d858b8a76c..386b9e3982ee9 100644 --- a/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go +++ b/op-acceptance-tests/tests/interop/proofs/fpp/fpp_test.go @@ -10,7 +10,7 @@ import ( func TestFPP(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := presets.NewSimpleInteropSupernodeProofs(t, presets.WithChallengerCannonKonaEnabled()) startTimestamp := max(sys.L2ChainA.Escape().RollupConfig().TimestampForBlock(1), sys.L2ChainB.Escape().RollupConfig().TimestampForBlock(1)) endTimestamp := sys.L2ChainA.Escape().RollupConfig().TimestampForBlock(5) @@ -22,7 +22,9 @@ func TestFPP(gt *testing.T) { func TestNextSuperRootNotFound(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + // TODO(#19180): Unskip this once supernode is updated. + t.Skip("Supernode does not yet return optimistic blocks until blocks are fully validated") + sys := presets.NewSimpleInteropSupernodeProofs(t, presets.WithChallengerCannonKonaEnabled()) blockTime := sys.L2ChainA.Escape().RollupConfig().BlockTime // Need to setup situation where the next super root is not found but the next block is safe on the first chain, but not safe on the second. diff --git a/op-acceptance-tests/tests/interop/proofs/fpp/init_test.go b/op-acceptance-tests/tests/interop/proofs/fpp/init_test.go deleted file mode 100644 index 5219e3f5cfa47..0000000000000 --- a/op-acceptance-tests/tests/interop/proofs/fpp/init_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package fpp - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithSuperInteropSupernode(), - stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), - ) -} diff --git a/op-acceptance-tests/tests/interop/proofs/init_test.go b/op-acceptance-tests/tests/interop/proofs/init_test.go deleted file mode 100644 index 4ee350a536fc7..0000000000000 --- a/op-acceptance-tests/tests/interop/proofs/init_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package proofs - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithSuperInteropSupernode(), - stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), - ) -} diff --git a/op-acceptance-tests/tests/interop/proofs/proposer_test.go b/op-acceptance-tests/tests/interop/proofs/proposer_test.go index c4806c3b3615a..ed9152e0634d3 100644 --- a/op-acceptance-tests/tests/interop/proofs/proposer_test.go +++ b/op-acceptance-tests/tests/interop/proofs/proposer_test.go @@ -9,7 +9,7 @@ import ( func TestProposer(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) + sys := presets.NewSimpleInteropSupernodeProofs(t, presets.WithChallengerCannonKonaEnabled()) dgf := sys.DisputeGameFactory() diff --git a/op-acceptance-tests/tests/interop/proofs/serial/init_test.go b/op-acceptance-tests/tests/interop/proofs/serial/init_test.go deleted file mode 100644 index 0a8471073a80f..0000000000000 --- a/op-acceptance-tests/tests/interop/proofs/serial/init_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package serial - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithSuperInteropSupernode(), - stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), - ) -} diff --git a/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go b/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go index 2071744bd86dc..0ebd26efa614f 100644 --- a/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go +++ b/op-acceptance-tests/tests/interop/proofs/serial/interop_fault_proofs_test.go @@ -10,12 +10,14 @@ import ( func TestInteropFaultProofs(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + // TODO(#19180): Unskip this once supernode is updated. + t.Skip("Supernode does not yet return optimistic blocks until blocks are fully validated") + sys := presets.NewSimpleInteropSupernodeProofs(t, presets.WithChallengerCannonKonaEnabled()) sfp.RunSuperFaultProofTest(t, sys) } func TestInteropFaultProofs_ConsolidateValidCrossChainMessage(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := presets.NewSimpleInteropSupernodeProofs(t, presets.WithChallengerCannonKonaEnabled()) sfp.RunConsolidateValidCrossChainMessageTest(t, sys) } diff --git a/op-acceptance-tests/tests/interop/proofs/withdrawal/init_test.go b/op-acceptance-tests/tests/interop/proofs/withdrawal/init_test.go deleted file mode 100644 index d944f3fd7836a..0000000000000 --- a/op-acceptance-tests/tests/interop/proofs/withdrawal/init_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package withdrawal - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSuperInterop(), presets.WithTimeTravel()) -} diff --git a/op-acceptance-tests/tests/interop/proofs/withdrawal/withdrawal_test.go b/op-acceptance-tests/tests/interop/proofs/withdrawal/withdrawal_test.go index eff0185c1659e..711e6091e658c 100644 --- a/op-acceptance-tests/tests/interop/proofs/withdrawal/withdrawal_test.go +++ b/op-acceptance-tests/tests/interop/proofs/withdrawal/withdrawal_test.go @@ -11,7 +11,7 @@ import ( func TestSuperRootWithdrawal(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := presets.NewSimpleInteropSuperProofs(t, presets.WithTimeTravelEnabled()) sys.L1Network.WaitForOnline() initialL1Balance := eth.HalfEther diff --git a/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go b/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go index b70efccc35df9..d35b58ede0987 100644 --- a/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go +++ b/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -133,7 +132,7 @@ func TestReorgInitExecMsg(gt *testing.T) { require.NoError(t, err, "Expected to be able to call L2BlockRefByHash API, but got error") nextL1Origin := parentsL1Origin.L1Origin.Number + 1 - l1Origin, err := sys.L1Network.Escape().L1ELNode(match.FirstL1EL).EthClient().InfoByNumber(ctx, nextL1Origin) + l1Origin, err := sys.L1EL.EthClient().InfoByNumber(ctx, nextL1Origin) require.NoError(t, err, "Expected to get block number %v from L1 execution client", nextL1Origin) l1OriginHash := l1Origin.Hash() diff --git a/op-acceptance-tests/tests/interop/reorgs/init_test.go b/op-acceptance-tests/tests/interop/reorgs/init_test.go deleted file mode 100644 index df1ce435a198a..0000000000000 --- a/op-acceptance-tests/tests/interop/reorgs/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package reorgs - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // Other setups may be added here, hydrated from the same orchestrator - presets.DoMain(m, presets.WithSimpleInterop()) -} diff --git a/op-acceptance-tests/tests/interop/reorgs/l2_reorgs_after_l1_reorg_test.go b/op-acceptance-tests/tests/interop/reorgs/l2_reorgs_after_l1_reorg_test.go index a46eb0a7de812..51e7e5ed0c6aa 100644 --- a/op-acceptance-tests/tests/interop/reorgs/l2_reorgs_after_l1_reorg_test.go +++ b/op-acceptance-tests/tests/interop/reorgs/l2_reorgs_after_l1_reorg_test.go @@ -6,8 +6,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum/common" @@ -65,11 +63,9 @@ func testL2ReorgAfterL1Reorg(gt *testing.T, n int, preChecks, postChecks checksF sys := presets.NewSimpleInterop(t) - cl := sys.L1Network.Escape().L1CLNode(match.FirstL1CL) - sys.L1Network.WaitForBlock() - sys.ControlPlane.FakePoSState(cl.ID(), stack.Stop) + sys.L1CL.Stop() // sequence a few L1 and L2 blocks for range n + 1 { @@ -101,7 +97,7 @@ func testL2ReorgAfterL1Reorg(gt *testing.T, n int, preChecks, postChecks checksF sys.TestSequencer.SequenceBlock(t, sys.L1Network.ChainID(), divergence.ParentHash) // continue building on the alternative L1 chain - sys.ControlPlane.FakePoSState(cl.ID(), stack.Start) + sys.L1CL.Start() // confirm L1 reorged sys.L1EL.ReorgTriggered(divergence, 5) diff --git a/op-acceptance-tests/tests/interop/seqwindow/expiry_test.go b/op-acceptance-tests/tests/interop/seqwindow/expiry_test.go index 197db3f54a804..261bcbf12d8f9 100644 --- a/op-acceptance-tests/tests/interop/seqwindow/expiry_test.go +++ b/op-acceptance-tests/tests/interop/seqwindow/expiry_test.go @@ -6,9 +6,12 @@ import ( "github.com/ethereum/go-ethereum/common" + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" @@ -21,7 +24,15 @@ func TestSequencingWindowExpiry(gt *testing.T) { gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := presets.NewSimpleInterop(t, + presets.WithDeployerOptions(sysgo.WithSequencingWindow(10)), + presets.WithBatcherOption(func(id sysgo.ComponentTarget, cfg *bss.CLIConfig) { + // Span-batches during recovery don't appear to align well with the starting-point. + // It can be off by ~6 L2 blocks, possibly due to off-by-one in L1 block sync + // considerations in batcher stop or start. + cfg.BatchType = derive.SingularBatchType + }), + ) require := t.Require() alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) diff --git a/op-acceptance-tests/tests/interop/seqwindow/init_test.go b/op-acceptance-tests/tests/interop/seqwindow/init_test.go deleted file mode 100644 index b4110b7123f69..0000000000000 --- a/op-acceptance-tests/tests/interop/seqwindow/init_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package seqwindow - -import ( - "testing" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithSimpleInterop(), - // Short enough that we can run the test, - // long enough that the batcher can still submit something before we make things expire. - presets.WithSequencingWindow(10, 30), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - // Span-batches during recovery don't appear to align well with the starting-point. - // It can be off by ~6 L2 blocks, possibly due to off-by-one - // in L1 block sync considerations in batcher stop or start. - // So we end up having to encode block by block, so the full batch data does not get dropped. - cfg.BatchType = derive.SingularBatchType - }))) -} diff --git a/op-acceptance-tests/tests/interop/smoke/init_test.go b/op-acceptance-tests/tests/interop/smoke/init_test.go deleted file mode 100644 index 10ba3a9457fba..0000000000000 --- a/op-acceptance-tests/tests/interop/smoke/init_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package smoke - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithMinimal()) -} diff --git a/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/init_test.go b/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/init_test.go deleted file mode 100644 index 5c198171bd9e2..0000000000000 --- a/op-acceptance-tests/tests/interop/sync/multisupervisor_interop/init_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package sync - -import ( - "testing" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/log/logfilter" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithMultiSupervisorInterop(), - presets.WithLogFilter(logfilter.DefaultMute( - stack.KindSelector(stack.KindSupervisor).And(logfilter.Level(log.LevelInfo)).Show(), - logfilter.Level(log.LevelError).Show(), - ))) -} diff --git a/op-acceptance-tests/tests/interop/sync/simple_interop/init_test.go b/op-acceptance-tests/tests/interop/sync/simple_interop/init_test.go deleted file mode 100644 index 72d1350b9ebc4..0000000000000 --- a/op-acceptance-tests/tests/interop/sync/simple_interop/init_test.go +++ /dev/null @@ -1,11 +0,0 @@ -package sync - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSimpleInterop()) -} diff --git a/op-acceptance-tests/tests/interop/upgrade-no-supervisor/init_test.go b/op-acceptance-tests/tests/interop/upgrade-no-supervisor/init_test.go deleted file mode 100644 index dffadc30f826d..0000000000000 --- a/op-acceptance-tests/tests/interop/upgrade-no-supervisor/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package upgrade - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithMinimalInteropNoSupervisor(), - ) -} diff --git a/op-acceptance-tests/tests/interop/upgrade-singlechain/crossl2inbox_test.go b/op-acceptance-tests/tests/interop/upgrade-singlechain/crossl2inbox_test.go index 8083675cd36e7..2d09fa5eab2d6 100644 --- a/op-acceptance-tests/tests/interop/upgrade-singlechain/crossl2inbox_test.go +++ b/op-acceptance-tests/tests/interop/upgrade-singlechain/crossl2inbox_test.go @@ -3,13 +3,14 @@ package upgrade import ( "testing" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-core/forks" "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" ) @@ -17,10 +18,17 @@ import ( func TestPostInbox(gt *testing.T) { gt.Skip("Skipping Interop Acceptance Test") t := devtest.ParallelT(gt) - sys := presets.NewSingleChainInterop(t) + offset := uint64(30) + sys := presets.NewSingleChainInterop(t, presets.WithDeployerOptions( + func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { + for _, l2Cfg := range builder.L2s() { + l2Cfg.WithForkAtOffset(forks.Interop, &offset) + } + }, + )) devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { require := t.Require() - el := net.Escape().L2ELNode(match.FirstL2EL) + el := net.PrimaryEL() activationBlock := net.AwaitActivation(t, forks.Interop) require.NotZero(activationBlock, "must not activate interop at genesis") diff --git a/op-acceptance-tests/tests/interop/upgrade-singlechain/init_test.go b/op-acceptance-tests/tests/interop/upgrade-singlechain/init_test.go deleted file mode 100644 index ae7ba46c9c921..0000000000000 --- a/op-acceptance-tests/tests/interop/upgrade-singlechain/init_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package upgrade - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithSingleChainInterop(), - presets.WithSuggestedInteropActivationOffset(30), - presets.WithInteropNotAtGenesis(), - presets.WithL2NetworkCount(1), // Specifically testing dependency set of 1 upgrade - ) -} diff --git a/op-acceptance-tests/tests/interop/upgrade/init_test.go b/op-acceptance-tests/tests/interop/upgrade/init_test.go deleted file mode 100644 index c3da262c6a52f..0000000000000 --- a/op-acceptance-tests/tests/interop/upgrade/init_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package upgrade - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithSimpleInterop(), - presets.WithSuggestedInteropActivationOffset(60), - presets.WithInteropNotAtGenesis()) -} diff --git a/op-acceptance-tests/tests/interop/upgrade/post_test.go b/op-acceptance-tests/tests/interop/upgrade/post_test.go index 680639b270464..3a7e056bb98d0 100644 --- a/op-acceptance-tests/tests/interop/upgrade/post_test.go +++ b/op-acceptance-tests/tests/interop/upgrade/post_test.go @@ -14,7 +14,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/eth" stypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum/common" @@ -23,12 +22,12 @@ import ( func TestPostInbox(gt *testing.T) { gt.Skip("Skipping Interop Acceptance Test") t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) + sys := newSimpleInterop(t) devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { require := t.Require() activationBlock := net.AwaitActivation(t, forks.Interop) - el := net.Escape().L2ELNode(match.FirstL2EL) + el := net.PrimaryEL() implAddrBytes, err := el.EthClient().GetStorageAt(t.Ctx(), predeploys.CrossL2InboxAddr, genesis.ImplementationSlot, activationBlock.Hash.String()) require.NoError(err) @@ -43,7 +42,7 @@ func TestPostInbox(gt *testing.T) { func TestPostInteropUpgradeComprehensive(gt *testing.T) { gt.Skip("Skipping Interop Acceptance Test") t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := newSimpleInterop(t) require := t.Require() logger := t.Logger() diff --git a/op-acceptance-tests/tests/interop/upgrade/pre_test.go b/op-acceptance-tests/tests/interop/upgrade/pre_test.go index e058bc9fce4c1..e89ba2e0d0be3 100644 --- a/op-acceptance-tests/tests/interop/upgrade/pre_test.go +++ b/op-acceptance-tests/tests/interop/upgrade/pre_test.go @@ -15,8 +15,6 @@ import ( "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txintent" stypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" @@ -27,7 +25,7 @@ import ( func TestPreNoInbox(gt *testing.T) { gt.Skip("Skipping Interop Acceptance Test") t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) + sys := newSimpleInterop(t) require := t.Require() t.Logger().Info("Starting") @@ -36,7 +34,7 @@ func TestPreNoInbox(gt *testing.T) { interopTime := net.Escape().ChainConfig().InteropTime t.Require().NotNil(interopTime) pre := net.LatestBlockBeforeTimestamp(t, *interopTime) - el := net.Escape().L2ELNode(match.FirstL2EL) + el := net.PrimaryEL() codeAddr := common.HexToAddress("0xC0D3C0d3C0D3C0d3c0d3c0D3c0D3C0d3C0D30022") implCode, err := el.EthClient().CodeAtHash(t.Ctx(), codeAddr, pre.Hash) require.NoError(err) diff --git a/op-acceptance-tests/tests/interop/upgrade/setup_test.go b/op-acceptance-tests/tests/interop/upgrade/setup_test.go new file mode 100644 index 0000000000000..38278e71aa9b7 --- /dev/null +++ b/op-acceptance-tests/tests/interop/upgrade/setup_test.go @@ -0,0 +1,22 @@ +//go:build !ci + +package upgrade + +import ( + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-core/forks" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" +) + +func newSimpleInterop(t devtest.T) *presets.SimpleInterop { + offset := uint64(60) + return presets.NewSimpleInterop(t, presets.WithDeployerOptions( + func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { + for _, l2Cfg := range builder.L2s() { + l2Cfg.WithForkAtOffset(forks.Interop, &offset) + } + }, + )) +} diff --git a/op-acceptance-tests/tests/isthmus/erc20_bridge/init_test.go b/op-acceptance-tests/tests/isthmus/erc20_bridge/init_test.go deleted file mode 100644 index 3d01653ea3141..0000000000000 --- a/op-acceptance-tests/tests/isthmus/erc20_bridge/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package erc20bridge - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithMinimal(), - ) -} diff --git a/op-acceptance-tests/tests/isthmus/operator_fee/init_test.go b/op-acceptance-tests/tests/isthmus/operator_fee/init_test.go deleted file mode 100644 index 60dfbf22aef0b..0000000000000 --- a/op-acceptance-tests/tests/isthmus/operator_fee/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package operatorfee - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithMinimal(), - ) -} diff --git a/op-acceptance-tests/tests/isthmus/pectra/init_test.go b/op-acceptance-tests/tests/isthmus/pectra/init_test.go deleted file mode 100644 index 3664b18b2a9af..0000000000000 --- a/op-acceptance-tests/tests/isthmus/pectra/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package pectra - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithMinimal(), - ) -} diff --git a/op-acceptance-tests/tests/isthmus/pectra/pectra_features_test.go b/op-acceptance-tests/tests/isthmus/pectra/pectra_features_test.go index 7693b2cce63a9..9a45177f71279 100644 --- a/op-acceptance-tests/tests/isthmus/pectra/pectra_features_test.go +++ b/op-acceptance-tests/tests/isthmus/pectra/pectra_features_test.go @@ -10,8 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txplan" @@ -36,19 +34,15 @@ type testSystem struct { } func newSystem(t devtest.T) *testSystem { - system := shim.NewSystem(t) - orch := presets.Orchestrator() - orch.Hydrate(system) - - l2 := dsl.NewL2Network(system.L2Network(match.Assume(t, match.L2ChainA)), orch.ControlPlane()) + preset := presets.NewMinimal(t) + l2 := preset.L2Chain t.Require().True(l2.IsForkActive(forks.Isthmus), "Isthmus fork must be active for Pectra features") - l2EL := dsl.NewL2ELNode(l2.Escape().L2ELNode(match.WithArchive(t.Ctx())), orch.ControlPlane()) + l2EL := l2.ArchiveEL() wallet := dsl.NewRandomHDWallet(t, 30) - l2Faucet := dsl.NewFaucet(l2.Escape().Faucet(match.FirstFaucet)) return &testSystem{ - FunderL2: dsl.NewFunder(wallet, l2Faucet, l2EL), + FunderL2: dsl.NewFunder(wallet, preset.FaucetL2, l2EL), L2EL: l2EL, L2Chain: l2, } diff --git a/op-acceptance-tests/tests/isthmus/preinterop-singlechain/init_test.go b/op-acceptance-tests/tests/isthmus/preinterop-singlechain/init_test.go deleted file mode 100644 index 6cff962a06aca..0000000000000 --- a/op-acceptance-tests/tests/isthmus/preinterop-singlechain/init_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package preinterop_singlechain - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithSingleChainIsthmusSuperSupernode(), - presets.WithL2NetworkCount(1), - stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), - ) -} diff --git a/op-acceptance-tests/tests/isthmus/preinterop-singlechain/interop_fault_proofs_test.go b/op-acceptance-tests/tests/isthmus/preinterop-singlechain/interop_fault_proofs_test.go index 422bd109c68f1..50cc8d0f89ecb 100644 --- a/op-acceptance-tests/tests/isthmus/preinterop-singlechain/interop_fault_proofs_test.go +++ b/op-acceptance-tests/tests/isthmus/preinterop-singlechain/interop_fault_proofs_test.go @@ -10,6 +10,9 @@ import ( func TestPreinteropSingleChainFaultProofs(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainInterop(t) + sys := presets.NewSingleChainInteropIsthmusSuper( + t, + presets.WithChallengerCannonKonaEnabled(), + ) sfp.RunSingleChainSuperFaultProofSmokeTest(t, sys) } diff --git a/op-acceptance-tests/tests/isthmus/preinterop/challenger_test.go b/op-acceptance-tests/tests/isthmus/preinterop/challenger_test.go index 30ea9ceb0b0b7..984dad199b288 100644 --- a/op-acceptance-tests/tests/isthmus/preinterop/challenger_test.go +++ b/op-acceptance-tests/tests/isthmus/preinterop/challenger_test.go @@ -9,13 +9,12 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" ) func TestChallengerPlaysGame(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) + sys := newSimpleInteropPreinterop(t) dsl.CheckAll(t, sys.L2CLA.AdvancedFn(types.CrossSafe, 1, 30), sys.L2CLB.AdvancedFn(types.CrossSafe, 1, 30), diff --git a/op-acceptance-tests/tests/isthmus/preinterop/init_test.go b/op-acceptance-tests/tests/isthmus/preinterop/init_test.go deleted file mode 100644 index 6251765104e21..0000000000000 --- a/op-acceptance-tests/tests/isthmus/preinterop/init_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package preinterop - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithIsthmusSuperSupernode(), - stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), - ) -} diff --git a/op-acceptance-tests/tests/isthmus/preinterop/interop_fault_proofs_test.go b/op-acceptance-tests/tests/isthmus/preinterop/interop_fault_proofs_test.go index 5d0630b4a4f1c..f1f820607364b 100644 --- a/op-acceptance-tests/tests/isthmus/preinterop/interop_fault_proofs_test.go +++ b/op-acceptance-tests/tests/isthmus/preinterop/interop_fault_proofs_test.go @@ -5,23 +5,22 @@ import ( sfp "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/superfaultproofs" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" ) func TestPreinteropFaultProofs(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := newSimpleInteropPreinterop(t) sfp.RunSuperFaultProofTest(t, sys) } func TestPreinteropFaultProofs_TraceExtensionActivation(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := newSimpleInteropPreinterop(t) sfp.RunTraceExtensionActivationTest(t, sys) } func TestPreinteropFaultProofs_UnsafeProposal(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := newSimpleInteropPreinterop(t) sfp.RunUnsafeProposalTest(t, sys) } diff --git a/op-acceptance-tests/tests/isthmus/preinterop/interop_readiness_test.go b/op-acceptance-tests/tests/isthmus/preinterop/interop_readiness_test.go index 04c134ecc0de9..8d7cd0c771fb2 100644 --- a/op-acceptance-tests/tests/isthmus/preinterop/interop_readiness_test.go +++ b/op-acceptance-tests/tests/isthmus/preinterop/interop_readiness_test.go @@ -104,7 +104,7 @@ func init() { func TestInteropReadiness(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewSimpleInterop(t) + sys := newSimpleInteropPreinterop(t) t.Logger().Info("Started test") diff --git a/op-acceptance-tests/tests/isthmus/preinterop/proposer_test.go b/op-acceptance-tests/tests/isthmus/preinterop/proposer_test.go index 508531038001a..ec381fe816f16 100644 --- a/op-acceptance-tests/tests/isthmus/preinterop/proposer_test.go +++ b/op-acceptance-tests/tests/isthmus/preinterop/proposer_test.go @@ -4,12 +4,11 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" ) func TestProposer(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleInterop(t) + sys := newSimpleInteropPreinterop(t) dgf := sys.DisputeGameFactory() diff --git a/op-acceptance-tests/tests/isthmus/preinterop/setup_test.go b/op-acceptance-tests/tests/isthmus/preinterop/setup_test.go new file mode 100644 index 0000000000000..e4bf0aa9bf922 --- /dev/null +++ b/op-acceptance-tests/tests/isthmus/preinterop/setup_test.go @@ -0,0 +1,13 @@ +package preinterop + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func newSimpleInteropPreinterop(t devtest.T) *presets.SimpleInterop { + return presets.NewSimpleInteropIsthmusSuper( + t, + presets.WithChallengerCannonKonaEnabled(), + ) +} diff --git a/op-acceptance-tests/tests/isthmus/withdrawal_root/init_test.go b/op-acceptance-tests/tests/isthmus/withdrawal_root/init_test.go deleted file mode 100644 index b158de725cd57..0000000000000 --- a/op-acceptance-tests/tests/isthmus/withdrawal_root/init_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package withdrawal - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithMinimal(), - ) -} diff --git a/op-acceptance-tests/tests/jovian/bpo2/bpo2_test.go b/op-acceptance-tests/tests/jovian/bpo2/bpo2_test.go index c6701efe93468..c4349254b9cdd 100644 --- a/op-acceptance-tests/tests/jovian/bpo2/bpo2_test.go +++ b/op-acceptance-tests/tests/jovian/bpo2/bpo2_test.go @@ -4,34 +4,33 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/fusaka" - jovian "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/jovian" + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/jovian/bpo2/joviantest" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum/go-ethereum/params/forks" ) -func TestMain(m *testing.M) { +func setupBPO2(t devtest.T) *presets.Minimal { resetEnvVars := fusaka.ConfigureDevstackEnvVars() - defer resetEnvVars() - presets.DoMain(m, stack.MakeCommon(stack.Combine( - sysgo.DefaultMinimalSystem(&sysgo.DefaultMinimalSystemIDs{}), - sysgo.WithDeployerOptions( + t.Cleanup(resetEnvVars) + return presets.NewMinimal(t, + presets.WithDeployerOptions( sysgo.WithJovianAtGenesis, sysgo.WithDefaultBPOBlobSchedule, sysgo.WithForkAtL1Genesis(forks.BPO2), ), - ))) + ) } -func TestDAFootprint(t *testing.T) { - jovian.TestDAFootprint(t) +func TestDAFootprint(gt *testing.T) { + joviantest.RunDAFootprint(gt, setupBPO2) } -func TestMinBaseFee(t *testing.T) { - jovian.TestMinBaseFee(t) +func TestMinBaseFee(gt *testing.T) { + joviantest.RunMinBaseFee(gt, setupBPO2) } -func TestOperatorFee(t *testing.T) { - jovian.TestOperatorFee(t) +func TestOperatorFee(gt *testing.T) { + joviantest.RunOperatorFee(gt, setupBPO2) } diff --git a/op-acceptance-tests/tests/jovian/bpo2/joviantest/cases.go b/op-acceptance-tests/tests/jovian/bpo2/joviantest/cases.go new file mode 100644 index 0000000000000..d5e3bcf79142a --- /dev/null +++ b/op-acceptance-tests/tests/jovian/bpo2/joviantest/cases.go @@ -0,0 +1,400 @@ +package joviantest + +import ( + "context" + "crypto/rand" + "encoding/binary" + "math/big" + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop/loadtest" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + opforks "github.com/ethereum-optimism/optimism/op-core/forks" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + "github.com/ethereum-optimism/optimism/op-service/bigs" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txinclude" + "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" + "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/consensus/misc/eip1559" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/rlp" +) + +type SetupFn func(t devtest.T) *presets.Minimal + +type calldataSpammer struct { + eoa *loadtest.SyncEOA +} + +func newCalldataSpammer(eoa *loadtest.SyncEOA) *calldataSpammer { + return &calldataSpammer{ + eoa: eoa, + } +} + +func (s *calldataSpammer) Spam(t devtest.T) error { + data := make([]byte, 50_000) + _, err := rand.Read(data) + t.Require().NoError(err) + _, err = s.eoa.Include(t, txplan.WithTo(&common.Address{}), txplan.WithData(data)) + return err +} + +type daFootprintSystemConfig struct { + SetDAFootprintGasScalar func(scalar uint16) bindings.TypedCall[any] `sol:"setDAFootprintGasScalar"` + DAFootprintGasScalar func() bindings.TypedCall[uint16] `sol:"daFootprintGasScalar"` +} + +type daFootprintL1Block struct { + DAFootprintGasScalar func() bindings.TypedCall[uint16] `sol:"daFootprintGasScalar"` +} + +type daFootprintEnv struct { + l1Client *dsl.L1ELNode + l2Network *dsl.L2Network + l2EL *dsl.L2ELNode + systemConfig daFootprintSystemConfig + l1Block daFootprintL1Block +} + +func newDAFootprintEnv(t devtest.T, l2Network *dsl.L2Network, l1EL *dsl.L1ELNode, l2EL *dsl.L2ELNode) *daFootprintEnv { + systemConfig := bindings.NewBindings[daFootprintSystemConfig]( + bindings.WithClient(l1EL.EthClient()), + bindings.WithTo(l2Network.Escape().Deployment().SystemConfigProxyAddr()), + bindings.WithTest(t), + ) + + l1Block := bindings.NewBindings[daFootprintL1Block]( + bindings.WithClient(l2EL.Escape().EthClient()), + bindings.WithTo(common.HexToAddress("0x4200000000000000000000000000000000000015")), + bindings.WithTest(t), + ) + + return &daFootprintEnv{ + l1Client: l1EL, + l2Network: l2Network, + l2EL: l2EL, + systemConfig: systemConfig, + l1Block: l1Block, + } +} + +func (env *daFootprintEnv) checkCompatibility(t devtest.T) { + // Ensure getters exist on both L1 SystemConfig and L2 L1Block. + _, err := contractio.Read(env.systemConfig.DAFootprintGasScalar(), t.Ctx()) + t.Require().NoError(err) + _, err = contractio.Read(env.l1Block.DAFootprintGasScalar(), t.Ctx()) + t.Require().NoError(err) +} + +func (env *daFootprintEnv) getSystemConfigOwner(t devtest.T) *dsl.EOA { + priv := env.l2Network.Escape().Keys().Secret(devkeys.SystemConfigOwner.Key(env.l2Network.ChainID().ToBig())) + return dsl.NewKey(t, priv).User(env.l1Client) +} + +func (env *daFootprintEnv) setDAFootprintGasScalarViaSystemConfig(t devtest.T, scalar uint16) *types.Receipt { + owner := env.getSystemConfigOwner(t) + rec, err := contractio.Write(env.systemConfig.SetDAFootprintGasScalar(scalar), t.Ctx(), owner.Plan()) + t.Require().NoError(err, "SetDAFootprintGasScalar transaction failed") + t.Logf("Set DA footprint gas scalar on L1: scalar=%d", scalar) + return rec +} + +func (env *daFootprintEnv) getDAFootprintGasScalarOfSystemConfig(t devtest.T) uint16 { + scalar, err := contractio.Read(env.systemConfig.DAFootprintGasScalar(), t.Ctx()) + t.Require().NoError(err) + return scalar +} + +// expectL1BlockDAFootprintGasScalar expects the given DA footprint gas scalar to be set in the L1Block contract. +func (env *daFootprintEnv) expectL1BlockDAFootprintGasScalar(t devtest.T, expected uint16) { + current, err := contractio.Read(env.l1Block.DAFootprintGasScalar(), t.Ctx()) + t.Require().NoError(err, "Failed to read DA footprint gas scalar from L1Block") + t.Require().Equal(expected, current) +} + +type minBaseFeeEnv struct { + l1Client *dsl.L1ELNode + l2Network *dsl.L2Network + l2EL *dsl.L2ELNode + systemConfig minBaseFeeSystemConfig +} + +type minBaseFeeSystemConfig struct { + SetMinBaseFee func(minBaseFee uint64) bindings.TypedCall[any] `sol:"setMinBaseFee"` + MinBaseFee func() bindings.TypedCall[uint64] `sol:"minBaseFee"` +} + +func newMinBaseFee(t devtest.T, l2Network *dsl.L2Network, l1EL *dsl.L1ELNode, l2EL *dsl.L2ELNode) *minBaseFeeEnv { + systemConfig := bindings.NewBindings[minBaseFeeSystemConfig]( + bindings.WithClient(l1EL.EthClient()), + bindings.WithTo(l2Network.Escape().Deployment().SystemConfigProxyAddr()), + bindings.WithTest(t), + ) + + return &minBaseFeeEnv{ + l1Client: l1EL, + l2Network: l2Network, + l2EL: l2EL, + systemConfig: systemConfig, + } +} + +func (mbf *minBaseFeeEnv) checkCompatibility(t devtest.T) { + _, err := contractio.Read(mbf.systemConfig.MinBaseFee(), t.Ctx()) + if err != nil { + t.Fail() + } +} + +func (mbf *minBaseFeeEnv) getSystemConfigOwner(t devtest.T) *dsl.EOA { + priv := mbf.l2Network.Escape().Keys().Secret(devkeys.SystemConfigOwner.Key(mbf.l2Network.ChainID().ToBig())) + return dsl.NewKey(t, priv).User(mbf.l1Client) +} + +func (mbf *minBaseFeeEnv) setMinBaseFeeViaSytemConfigOnL1(t devtest.T, minBaseFee uint64) { + owner := mbf.getSystemConfigOwner(t) + + _, err := contractio.Write(mbf.systemConfig.SetMinBaseFee(minBaseFee), t.Ctx(), owner.Plan()) + t.Require().NoError(err, "SetMinBaseFee transaction failed") + + t.Logf("Set min base fee on L1: minBaseFee=%d", minBaseFee) +} + +func (mbf *minBaseFeeEnv) verifyMinBaseFee(t devtest.T, minBase *big.Int) { + // Wait for the next block. + _ = mbf.l2EL.WaitForBlock() + el := mbf.l2EL.Escape().EthClient() + info, err := el.InfoByLabel(t.Ctx(), "latest") + t.Require().NoError(err) + + // Verify base fee is clamped. + t.Require().True(info.BaseFee().Cmp(minBase) >= 0, "expected base fee to be >= minBaseFee") + t.Logf("base fee %s, minBase %s", info.BaseFee(), minBase) +} + +// waitForMinBaseFeeConfigChangeOnL2 waits until the L2 latest payload extra-data encodes the expected min base fee. +func (mbf *minBaseFeeEnv) waitForMinBaseFeeConfigChangeOnL2(t devtest.T, expected uint64) { + client := mbf.l2EL.Escape().L2EthClient() + expectedExtraData := eth.BytesMax32(eip1559.EncodeJovianExtraData(250, 6, expected)) + + // Check extradata in block header (for all clients). + var actualBlockExtraData []byte + t.Require().Eventually(func() bool { + info, err := client.InfoByLabel(t.Ctx(), "latest") + if err != nil { + return false + } + + // Get header RLP and decode to access Extra field. + headerRLP, err := info.HeaderRLP() + if err != nil { + return false + } + + var header types.Header + if err := rlp.DecodeBytes(headerRLP, &header); err != nil { + return false + } + + if len(header.Extra) != 17 { + return false + } + + got := binary.BigEndian.Uint64(header.Extra[9:]) + actualBlockExtraData = header.Extra + return got == expected + }, 2*time.Minute, 5*time.Second, "L2 min base fee in block header did not sync within timeout") + + t.Require().Equal(expectedExtraData, eth.BytesMax32(actualBlockExtraData), "block header extradata doesnt match") +} + +func RunDAFootprint(gt *testing.T, setup SetupFn) { + t := devtest.SerialT(gt) + sys := setup(t) + require := t.Require() + + require.True(sys.L2Chain.IsForkActive(opforks.Jovian), "Jovian fork must be active for this test") + + env := newDAFootprintEnv(t, sys.L2Chain, sys.L1EL, sys.L2EL) + env.checkCompatibility(t) + + systemOwner := env.getSystemConfigOwner(t) + sys.FunderL1.FundAtLeast(systemOwner, eth.HalfEther) + l2BlockTime := time.Duration(sys.L2Chain.Escape().RollupConfig().BlockTime) * time.Second + sys.L2EL.WaitForOnline() + ethClient := sys.L2EL.Escape().EthClient() + + s1000 := uint16(1000) + s0 := uint16(0) + cases := []struct { + name string + setScalar *uint16 + expected uint16 + }{ + {"DefaultScalar", nil, uint16(derive.DAFootprintGasScalarDefault)}, + {"Scalar1000", &s1000, uint16(1000)}, + {"ScalarZeroUsesDefault", &s0, uint16(derive.DAFootprintGasScalarDefault)}, + } + + for _, tc := range cases { + t.Run(tc.name, func(t devtest.T) { + require := t.Require() + if tc.setScalar != nil { + rec := env.setDAFootprintGasScalarViaSystemConfig(t, *tc.setScalar) + // Wait for change to propagate to L2. + // Retrying up to 100 times is overkill, but lower values may not work on + // persistent networks. See the following issue for more details. + // https://github.com/ethereum-optimism/optimism/issues/18061 + env.l2EL.WaitL1OriginReached(eth.Unsafe, bigs.Uint64Strict(rec.BlockNumber), 100) + } else { + scalar := env.getDAFootprintGasScalarOfSystemConfig(t) + if scalar != 0 { + t.Skipf("Skipping default scalar test because SystemConfig DA footprint gas scalar is set to %d != 0", scalar) + } + sys.L2EL.WaitForBlockNumber(1) // make sure we don't assert on genesis. + } + env.expectL1BlockDAFootprintGasScalar(t, tc.expected) + + var wg sync.WaitGroup + defer wg.Wait() + + ctx, cancel := context.WithTimeout(t.Ctx(), time.Minute) + defer cancel() + t = t.WithCtx(ctx) + + wg.Add(1) + go func() { + defer wg.Done() + eoa := sys.FunderL2.NewFundedEOA(eth.OneTenthEther) + includer := txinclude.NewPersistent(txinclude.NewPkSigner(eoa.Key().Priv(), eoa.ChainID().ToBig()), struct { + *txinclude.Resubmitter + *txinclude.Monitor + }{ + txinclude.NewResubmitter(ethClient, l2BlockTime), + txinclude.NewMonitor(ethClient, l2BlockTime), + }) + loadtest.NewBurst(l2BlockTime).Run(t, newCalldataSpammer(loadtest.NewSyncEOA(includer, eoa.Plan()))) + }() + + rollupCfg := sys.L2Chain.Escape().RollupConfig() + gasTarget := rollupCfg.Genesis.SystemConfig.GasLimit / rollupCfg.ChainOpConfig.EIP1559Elasticity + + var blockDAFootprint uint64 + info := sys.L2EL.WaitForUnsafe(func(info eth.BlockInfo) (bool, error) { + blockGasUsed := info.GasUsed() + blobGasUsed := info.BlobGasUsed() + require.NotNil(blobGasUsed, "blobGasUsed must not be nil for Jovian chains") + blockDAFootprint = *blobGasUsed + if blockDAFootprint <= blockGasUsed { + t.Logf("Block %s has DA footprint (%d) <= gasUsed (%d), trying next...", + eth.ToBlockID(info), blockDAFootprint, blockGasUsed) + return false, nil + } + if blockDAFootprint <= gasTarget { + t.Logf("Block %s has DA footprint (%d) <= gasTarget (%d), trying next...", + eth.ToBlockID(info), blockDAFootprint, gasTarget) + return false, nil + } + return true, nil + }) + + _, txs, err := ethClient.InfoAndTxsByHash(t.Ctx(), info.Hash()) + require.NoError(err) + _, receipts, err := sys.L2EL.Escape().L2EthClient().FetchReceipts(t.Ctx(), info.Hash()) + require.NoError(err) + + var totalDAFootprint uint64 + for i, tx := range txs { + if tx.IsDepositTx() { + continue + } + recScalar := receipts[i].DAFootprintGasScalar + require.NotNil(recScalar, "nil receipt DA footprint gas scalar") + require.EqualValues(tc.expected, *recScalar, "DA footprint gas scalar mismatch in receipt") + + txDAFootprint := bigs.Uint64Strict(tx.RollupCostData().EstimatedDASize()) * uint64(tc.expected) + require.Equal(txDAFootprint, receipts[i].BlobGasUsed, "tx DA footprint mismatch with receipt") + totalDAFootprint += txDAFootprint + } + t.Logf("Block %s has header/calculated DA footprint %d/%d", + eth.ToBlockID(info), blockDAFootprint, totalDAFootprint) + require.Equal(totalDAFootprint, blockDAFootprint, "Calculated DA footprint doesn't match block header DA footprint") + + // Check base fee calculation of next block. + // Calculate expected base fee as: + // parentBaseFee + max(1, parentBaseFee * gasUsedDelta / parentGasTarget / baseFeeChangeDenominator) + var ( + baseFee = new(big.Int) + denom = new(big.Int) + ) + baseFee.SetUint64(blockDAFootprint - gasTarget) // gasUsedDelta + baseFee.Mul(baseFee, info.BaseFee()) + baseFee.Div(baseFee, denom.SetUint64(gasTarget)) + baseFee.Div(baseFee, denom.SetUint64(*rollupCfg.ChainOpConfig.EIP1559DenominatorCanyon)) + if baseFee.Cmp(common.Big1) < 0 { + baseFee.Add(info.BaseFee(), common.Big1) + } else { + baseFee.Add(info.BaseFee(), baseFee) + } + t.Logf("Expected base fee: %s", baseFee) + + next := sys.L2EL.WaitForBlockNumber(info.NumberU64() + 1) + require.Equal(baseFee, next.BaseFee(), "Wrong base fee") + }) + } +} + +func RunMinBaseFee(gt *testing.T, setup SetupFn) { + t := devtest.SerialT(gt) + sys := setup(t) + require := t.Require() + + require.True(sys.L2Chain.IsForkActive(opforks.Jovian), "Jovian fork must be active for this test") + + minBaseFee := newMinBaseFee(t, sys.L2Chain, sys.L1EL, sys.L2EL) + minBaseFee.checkCompatibility(t) + + systemOwner := minBaseFee.getSystemConfigOwner(t) + sys.FunderL1.FundAtLeast(systemOwner, eth.OneTenthEther) + + testCases := []struct { + name string + minBaseFee uint64 + }{ + // High minimum base fee. + {"MinBaseFeeHigh", 2_000_000_000}, + // Medium minimum base fee. + {"MinBaseFeeMedium", 1_000_000_000}, + // Zero minimum base fee (not enforced). + {"MinBaseFeeZero", 0}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t devtest.T) { + minBaseFee.setMinBaseFeeViaSytemConfigOnL1(t, tc.minBaseFee) + minBaseFee.waitForMinBaseFeeConfigChangeOnL2(t, tc.minBaseFee) + + minBaseFee.verifyMinBaseFee(t, big.NewInt(int64(tc.minBaseFee))) + + t.Log("Test completed successfully:", + "testCase", tc.name, + "minBaseFee", tc.minBaseFee) + }) + } +} + +func RunOperatorFee(gt *testing.T, setup SetupFn) { + t := devtest.SerialT(gt) + sys := setup(t) + t.Require().True(sys.L2Chain.IsForkActive(opforks.Jovian), "Jovian fork must be active for this test") + dsl.RunOperatorFeeTest(t, sys.L2Chain, sys.L1EL, sys.FunderL1, sys.FunderL2) +} diff --git a/op-acceptance-tests/tests/jovian/pectra/pectra_test.go b/op-acceptance-tests/tests/jovian/pectra/pectra_test.go index f2e80642e613a..ec8fbefd58399 100644 --- a/op-acceptance-tests/tests/jovian/pectra/pectra_test.go +++ b/op-acceptance-tests/tests/jovian/pectra/pectra_test.go @@ -3,27 +3,26 @@ package pectra import ( "testing" - "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/jovian" + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/jovian/bpo2/joviantest" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" ) -func TestMain(m *testing.M) { - presets.DoMain(m, stack.Combine(stack.MakeCommon(stack.Combine( - sysgo.DefaultMinimalSystem(&sysgo.DefaultMinimalSystemIDs{}), - sysgo.WithDeployerOptions(sysgo.WithJovianAtGenesis), - )))) +func setupPectra(t devtest.T) *presets.Minimal { + return presets.NewMinimal(t, + presets.WithDeployerOptions(sysgo.WithJovianAtGenesis), + ) } -func TestDAFootprint(t *testing.T) { - jovian.TestDAFootprint(t) +func TestDAFootprint(gt *testing.T) { + joviantest.RunDAFootprint(gt, setupPectra) } -func TestMinBaseFee(t *testing.T) { - jovian.TestMinBaseFee(t) +func TestMinBaseFee(gt *testing.T) { + joviantest.RunMinBaseFee(gt, setupPectra) } -func TestOperatorFee(t *testing.T) { - jovian.TestOperatorFee(t) +func TestOperatorFee(gt *testing.T) { + joviantest.RunOperatorFee(gt, setupPectra) } diff --git a/op-acceptance-tests/tests/proofs/cannon/init_test.go b/op-acceptance-tests/tests/proofs/cannon/init_test.go deleted file mode 100644 index bdba42260d304..0000000000000 --- a/op-acceptance-tests/tests/proofs/cannon/init_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package cannon - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithProofs(), - stack.MakeCommon(sysgo.WithDeployerOptions(sysgo.WithJovianAtGenesis)), - presets.WithSafeDBEnabled(), - // Requires access to a challenger config which only sysgo provides - // These tests would also be exceptionally slow on real L1s - presets.WithCompatibleTypes(compat.SysGo), - ) -} diff --git a/op-acceptance-tests/tests/proofs/cannon/setup_test.go b/op-acceptance-tests/tests/proofs/cannon/setup_test.go new file mode 100644 index 0000000000000..5180476e20cb2 --- /dev/null +++ b/op-acceptance-tests/tests/proofs/cannon/setup_test.go @@ -0,0 +1,23 @@ +package cannon + +import ( + gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func cannonOpts() []presets.Option { + return []presets.Option{ + presets.WithGameTypeAdded(gameTypes.CannonGameType), + presets.WithCannonKonaGameTypeAdded(), + presets.WithDeployerOptions(sysgo.WithJovianAtGenesis), + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn(func(p devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.SafeDBPath = p.TempDir() + })), + } +} + +func newSystem(t devtest.T) *presets.Minimal { + return presets.NewMinimal(t, cannonOpts()...) +} diff --git a/op-acceptance-tests/tests/proofs/cannon/smoke_test.go b/op-acceptance-tests/tests/proofs/cannon/smoke_test.go index ed890f8baad15..22d6517d01e9a 100644 --- a/op-acceptance-tests/tests/proofs/cannon/smoke_test.go +++ b/op-acceptance-tests/tests/proofs/cannon/smoke_test.go @@ -6,12 +6,11 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/gameargs" gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" ) func TestSmoke(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewMinimal(t) + sys := newSystem(t) require := t.Require() dgf := sys.DisputeGameFactory() diff --git a/op-acceptance-tests/tests/proofs/cannon/step_test.go b/op-acceptance-tests/tests/proofs/cannon/step_test.go index fce594728f605..addf858771d5c 100644 --- a/op-acceptance-tests/tests/proofs/cannon/step_test.go +++ b/op-acceptance-tests/tests/proofs/cannon/step_test.go @@ -5,14 +5,13 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl/proofs" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) func TestExecuteStep_Cannon(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewMinimal(t) + sys := newSystem(t) l1User := sys.FunderL1.NewFundedEOA(eth.ThousandEther) blockNum := uint64(3) @@ -29,7 +28,7 @@ func TestExecuteStep_Cannon(gt *testing.T) { func TestExecuteStep_CannonKona(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewMinimal(t) + sys := newSystem(t) l1User := sys.FunderL1.NewFundedEOA(eth.ThousandEther) blockNum := uint64(3) diff --git a/op-acceptance-tests/tests/rules/init_test.go b/op-acceptance-tests/tests/rules/init_test.go index 12e9e05606920..2f2f5889ef003 100644 --- a/op-acceptance-tests/tests/rules/init_test.go +++ b/op-acceptance-tests/tests/rules/init_test.go @@ -2,10 +2,9 @@ package rules import ( "os" - "testing" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum/go-ethereum/common" ) @@ -15,13 +14,8 @@ func rulesEnabled() bool { return os.Getenv(RULES_TEST_ENABLE_ENV) == "1" } -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - options := presets.WithSingleChainSystemWithFlashblocks() - if rulesEnabled() { - options = stack.Combine(options, presets.WithOPRBuilderRules(TestRulesYAML, TestRefreshInterval)) - } - presets.DoMain(m, options) +func newRulesSystem(t devtest.T) *presets.SingleChainWithFlashblocks { + return presets.NewSingleChainWithFlashblocks(t, presets.WithOPRBuilderRules(TestRulesYAML, TestRefreshInterval)) } // BoostedRecipient is the well-known address that receives boosted transactions in tests. diff --git a/op-acceptance-tests/tests/rules/rules_test.go b/op-acceptance-tests/tests/rules/rules_test.go index 52c00e6fbd81a..70de70471e1d4 100644 --- a/op-acceptance-tests/tests/rules/rules_test.go +++ b/op-acceptance-tests/tests/rules/rules_test.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/flashblocks" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/retry" @@ -42,7 +41,7 @@ func TestBoostPriorityOrdering(gt *testing.T) { tracer := t.Tracer() ctx := t.Ctx() - sys := presets.NewSingleChainWithFlashblocks(t) + sys := newRulesSystem(t) topLevelCtx, span := tracer.Start(ctx, "test boost priority ordering") defer span.End() @@ -170,7 +169,7 @@ func TestBoostedVsNonBoostedOrdering(gt *testing.T) { tracer := t.Tracer() ctx := t.Ctx() - sys := presets.NewSingleChainWithFlashblocks(t) + sys := newRulesSystem(t) topLevelCtx, span := tracer.Start(ctx, "test boosted vs non-boosted ordering") defer span.End() @@ -263,7 +262,7 @@ func TestSameSenderNonceOrdering(gt *testing.T) { tracer := t.Tracer() ctx := t.Ctx() - sys := presets.NewSingleChainWithFlashblocks(t) + sys := newRulesSystem(t) topLevelCtx, span := tracer.Start(ctx, "test same sender nonce ordering") defer span.End() @@ -363,7 +362,7 @@ func TestMultipleSendersWithMixedPriorities(gt *testing.T) { tracer := t.Tracer() ctx := t.Ctx() - sys := presets.NewSingleChainWithFlashblocks(t) + sys := newRulesSystem(t) topLevelCtx, span := tracer.Start(ctx, "test multiple senders mixed priorities") defer span.End() @@ -515,7 +514,7 @@ func TestSingleSenderRandomNonceOrderWithRandomScores(gt *testing.T) { tracer := t.Tracer() ctx := t.Ctx() - sys := presets.NewSingleChainWithFlashblocks(t) + sys := newRulesSystem(t) topLevelCtx, span := tracer.Start(ctx, "test single sender random nonce order with random scores") defer span.End() diff --git a/op-acceptance-tests/tests/safeheaddb_clsync/init_test.go b/op-acceptance-tests/tests/safeheaddb_clsync/init_test.go deleted file mode 100644 index d914f08106068..0000000000000 --- a/op-acceptance-tests/tests/safeheaddb_clsync/init_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package safeheaddb_elsync - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainMultiNode(), - presets.WithConsensusLayerSync(), - presets.WithSafeDBEnabled(), - // Destructive test that requiring an in-memory only geth database - presets.WithCompatibleTypes(compat.SysGo), - ) -} diff --git a/op-acceptance-tests/tests/safeheaddb_clsync/safeheaddb_test.go b/op-acceptance-tests/tests/safeheaddb_clsync/safeheaddb_test.go index 8adee38f982b4..31a07ed6c7c13 100644 --- a/op-acceptance-tests/tests/safeheaddb_clsync/safeheaddb_test.go +++ b/op-acceptance-tests/tests/safeheaddb_clsync/safeheaddb_test.go @@ -6,12 +6,20 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) func TestPreserveDatabaseOnCLResync(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNode(t) + sys := presets.NewSingleChainMultiNode(t, + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn(func(p devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.SequencerSyncMode = sync.CLSync + cfg.VerifierSyncMode = sync.CLSync + cfg.SafeDBPath = p.TempDir() + })), + ) startSafeBlock := sys.L2CLB.SafeL2BlockRef().Number dsl.CheckAll(t, diff --git a/op-acceptance-tests/tests/safeheaddb_elsync/init_test.go b/op-acceptance-tests/tests/safeheaddb_elsync/init_test.go deleted file mode 100644 index 3673da02e3d20..0000000000000 --- a/op-acceptance-tests/tests/safeheaddb_elsync/init_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package safeheaddb_elsync - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainMultiNode(), - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithSafeDBEnabled(), - // Destructive test that requiring an in-memory only geth database - presets.WithCompatibleTypes(compat.SysGo), - ) -} diff --git a/op-acceptance-tests/tests/safeheaddb_elsync/safeheaddb_test.go b/op-acceptance-tests/tests/safeheaddb_elsync/safeheaddb_test.go index 5bb992e531951..e6ee101e26fea 100644 --- a/op-acceptance-tests/tests/safeheaddb_elsync/safeheaddb_test.go +++ b/op-acceptance-tests/tests/safeheaddb_elsync/safeheaddb_test.go @@ -6,12 +6,23 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) +func newSingleChainMultiNodeELSync(t devtest.T) *presets.SingleChainMultiNode { + return presets.NewSingleChainMultiNode(t, + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn(func(p devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.VerifierSyncMode = sync.ELSync + cfg.SafeDBPath = p.TempDir() + })), + ) +} + func TestTruncateDatabaseOnELResync(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNode(t) + sys := newSingleChainMultiNodeELSync(t) dsl.CheckAll(t, sys.L2CL.AdvancedFn(types.LocalSafe, 1, 30), @@ -40,7 +51,7 @@ func TestTruncateDatabaseOnELResync(gt *testing.T) { func TestNotTruncateDatabaseOnRestartWithExistingDatabase(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNode(t) + sys := newSingleChainMultiNodeELSync(t) dsl.CheckAll(t, sys.L2CL.AdvancedFn(types.LocalSafe, 1, 30), diff --git a/op-acceptance-tests/tests/sequencer/init_test.go b/op-acceptance-tests/tests/sequencer/init_test.go deleted file mode 100644 index ed9fb2d955542..0000000000000 --- a/op-acceptance-tests/tests/sequencer/init_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package sequencer - -import ( - "log/slog" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithMinimal(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithLogLevel(slog.LevelDebug), - ) -} diff --git a/op-acceptance-tests/tests/supernode/advance_multiple_test.go b/op-acceptance-tests/tests/supernode/advance_multiple_test.go index afdc7e2f37d3e..b9bea1e26b170 100644 --- a/op-acceptance-tests/tests/supernode/advance_multiple_test.go +++ b/op-acceptance-tests/tests/supernode/advance_multiple_test.go @@ -17,7 +17,7 @@ import ( // - the two CLs are advancing unsafe and local safe heads func TestTwoChainProgress(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewTwoL2(t) + sys := presets.NewTwoL2Supernode(t) blockTime := sys.L2A.Escape().RollupConfig().BlockTime waitTime := time.Duration(blockTime+1) * time.Second diff --git a/op-acceptance-tests/tests/supernode/init_test.go b/op-acceptance-tests/tests/supernode/init_test.go deleted file mode 100644 index b75a9dfe93774..0000000000000 --- a/op-acceptance-tests/tests/supernode/init_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package supernode - -import ( - "os" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates a two-L2 setup against the shared backend -func TestMain(m *testing.M) { - _ = os.Setenv("DEVSTACK_L2CL_KIND", "supernode") - presets.DoMain(m, presets.WithTwoL2Supernode()) -} diff --git a/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go b/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go index 9b3f351a5c8bc..95f0cf68f4ce8 100644 --- a/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go +++ b/op-acceptance-tests/tests/supernode/interop/activation/activation_after_genesis_test.go @@ -9,6 +9,10 @@ import ( "github.com/ethereum-optimism/optimism/op-service/eth" ) +// InteropActivationDelay is the delay in seconds from genesis to interop activation. +// This is set to 20 seconds to allow several blocks to be produced before interop kicks in. +const InteropActivationDelay = uint64(20) + // TestSupernodeInteropActivationAfterGenesis tests behavior when interop is activated // AFTER genesis. This verifies that VerifiedAt (via superroot_atTimestamp) returns // verified data for timestamps both before and after the activation boundary. diff --git a/op-acceptance-tests/tests/supernode/interop/activation/init_test.go b/op-acceptance-tests/tests/supernode/interop/activation/init_test.go deleted file mode 100644 index dfc13d262dc44..0000000000000 --- a/op-acceptance-tests/tests/supernode/interop/activation/init_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package activation - -import ( - "os" - "testing" -) - -// InteropActivationDelay is the delay in seconds from genesis to interop activation. -// This is set to 20 seconds to allow several blocks to be produced before interop kicks in. -const InteropActivationDelay = uint64(20) - -// TestMain creates a two-L2 setup with a shared supernode that has interop enabled -// AFTER genesis (delayed by InteropActivationDelay seconds). -// This allows testing that safety proceeds normally before interop activation. -func TestMain(m *testing.M) { - // Set the L2CL kind to supernode for all tests in this package - _ = os.Setenv("DEVSTACK_L2CL_KIND", "supernode") - // TODO https://github.com/ethereum-optimism/optimism/issues/19403 - // invoking presets.WithTwoL2SupernodeInterop with a nonzero interop activation delay - // results in an unstable test setup due to bugs in op-supernode (it will hang when shutting down) - // presets.DoMain(m, presets.WithTwoL2SupernodeInterop(InteropActivationDelay)) -} diff --git a/op-acceptance-tests/tests/supernode/interop/activation_at_genesis_test.go b/op-acceptance-tests/tests/supernode/interop/activation_at_genesis_test.go index 2407128ac20f1..4be0dbd225f28 100644 --- a/op-acceptance-tests/tests/supernode/interop/activation_at_genesis_test.go +++ b/op-acceptance-tests/tests/supernode/interop/activation_at_genesis_test.go @@ -5,7 +5,6 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -15,7 +14,7 @@ import ( // Also verifies that VerifiedAt (via superroot_atTimestamp) works correctly. func TestSupernodeInteropActivationAtGenesis(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewTwoL2SupernodeInterop(t, 0) + sys := newSupernodeInteropWithTimeTravel(t, 0) genesisTime := sys.L2A.Escape().RollupConfig().Genesis.L2Time blockTime := sys.L2A.Escape().RollupConfig().BlockTime diff --git a/op-acceptance-tests/tests/supernode/interop/cross_message_test.go b/op-acceptance-tests/tests/supernode/interop/cross_message_test.go index fb29899c78eaa..59e96f290ac0d 100644 --- a/op-acceptance-tests/tests/supernode/interop/cross_message_test.go +++ b/op-acceptance-tests/tests/supernode/interop/cross_message_test.go @@ -6,7 +6,6 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" ) @@ -16,7 +15,7 @@ import ( // All messages are valid, and no interruptions to the chains are expected. func TestSupernodeInteropBidirectionalMessages(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewTwoL2SupernodeInterop(t, 0) + sys := newSupernodeInteropWithTimeTravel(t, 0) // Create funded EOAs on both chains alice := sys.FunderA.NewFundedEOA(eth.OneEther) diff --git a/op-acceptance-tests/tests/supernode/interop/follow_l2/init_test.go b/op-acceptance-tests/tests/supernode/interop/follow_l2/init_test.go deleted file mode 100644 index 4bc98d0f47f58..0000000000000 --- a/op-acceptance-tests/tests/supernode/interop/follow_l2/init_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package follow_l2 - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain( - m, - presets.WithTwoL2SupernodeFollowL2(0), - presets.WithReqRespSyncDisabled(), - presets.WithNoDiscovery(), - presets.WithCompatibleTypes(compat.SysGo), - ) -} diff --git a/op-acceptance-tests/tests/supernode/interop/head_progression_test.go b/op-acceptance-tests/tests/supernode/interop/head_progression_test.go index fb5b8f7d19284..2442db2f44704 100644 --- a/op-acceptance-tests/tests/supernode/interop/head_progression_test.go +++ b/op-acceptance-tests/tests/supernode/interop/head_progression_test.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -27,7 +26,7 @@ import ( // - Finalized L2 blocks have sane L1 origins (behind the L1 finalized head) func TestSupernodeInterop_SafeHeadProgression(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewTwoL2SupernodeInterop(t, 0) + sys := newSupernodeInteropWithTimeTravel(t, 0) attempts := 15 // each attempt is hardcoded with a 2s by the DSL. finalTargetBlockNum := uint64(10) @@ -154,7 +153,7 @@ func TestSupernodeInterop_SafeHeadProgression(gt *testing.T) { // - Safe head advances after slower chain catches up func TestSupernodeInterop_SafeHeadWithUnevenProgress(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewTwoL2SupernodeInterop(t, 0) + sys := newSupernodeInteropWithTimeTravel(t, 0) attempts := 15 initialTargetBlockNum := uint64(5) diff --git a/op-acceptance-tests/tests/supernode/interop/helpers_test.go b/op-acceptance-tests/tests/supernode/interop/helpers_test.go new file mode 100644 index 0000000000000..130ff227850f0 --- /dev/null +++ b/op-acceptance-tests/tests/supernode/interop/helpers_test.go @@ -0,0 +1,12 @@ +package interop + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" +) + +func newSupernodeInteropWithTimeTravel(t devtest.T, delaySeconds uint64) *presets.TwoL2SupernodeInterop { + return presets.NewTwoL2SupernodeInterop(t, delaySeconds, + presets.WithTimeTravelEnabled(), + ) +} diff --git a/op-acceptance-tests/tests/supernode/interop/init_test.go b/op-acceptance-tests/tests/supernode/interop/init_test.go deleted file mode 100644 index 96b31eb0ae955..0000000000000 --- a/op-acceptance-tests/tests/supernode/interop/init_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package interop - -import ( - "os" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates a two-L2 setup with a shared supernode that has interop enabled. -// This allows testing of cross-chain message verification at each timestamp. -func TestMain(m *testing.M) { - // Set the L2CL kind to supernode for all tests in this package - _ = os.Setenv("DEVSTACK_L2CL_KIND", "supernode") - presets.DoMain(m, - presets.WithTwoL2SupernodeInterop(0), - presets.WithTimeTravel(), // Enable time travel for faster tests - ) -} diff --git a/op-acceptance-tests/tests/supernode/interop/reorg/init_test.go b/op-acceptance-tests/tests/supernode/interop/reorg/init_test.go deleted file mode 100644 index 8cc9d75999607..0000000000000 --- a/op-acceptance-tests/tests/supernode/interop/reorg/init_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package reorg - -import ( - "os" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates an isolated two-L2 setup with a shared supernode that has interop enabled. -// This package tests block invalidation and reorg scenarios that would pollute other tests if run on a shared devnet. -func TestMain(m *testing.M) { - _ = os.Setenv("DEVSTACK_L2CL_KIND", "supernode") - presets.DoMain(m, presets.WithTwoL2SupernodeInterop(0)) -} diff --git a/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/init_test.go b/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/init_test.go deleted file mode 100644 index 1762d11f2adcf..0000000000000 --- a/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/init_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package same_timestamp_invalid - -import ( - "os" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -// TestMain creates an isolated two-L2 setup with a shared supernode that has interop enabled. -// This package tests that executing messages referencing initiating messages from the same -// timestamp are correctly detected as invalid and replaced. -func TestMain(m *testing.M) { - _ = os.Setenv("DEVSTACK_L2CL_KIND", "supernode") - presets.DoMain(m, presets.WithTwoL2SupernodeInterop(0)) -} diff --git a/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/same_timestamp_test.go b/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/same_timestamp_test.go index e4f8b58e8a9de..1ef8326dd370c 100644 --- a/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/same_timestamp_test.go +++ b/op-acceptance-tests/tests/supernode/interop/same_timestamp_invalid/same_timestamp_test.go @@ -46,12 +46,15 @@ func TestSupernodeSameTimestampCycle(gt *testing.T) { sys := presets.NewTwoL2SupernodeInterop(t, 0).ForSameTimestampTesting(t) rng := rand.New(rand.NewSource(55555)) - pairA := sys.PrepareInitA(rng, 0) - pairB := sys.PrepareInitB(rng, 0) + // Create the actual cycle shape: each chain executes the other chain's init + // before emitting its own init in the same block. That means the init lands at + // log index 1, not 0. + pairA := sys.PrepareInitA(rng, 1) + pairB := sys.PrepareInitB(rng, 1) sys.IncludeAndValidate( - []*txplan.PlannedTx{pairA.SubmitInit(), pairB.SubmitExecTo(sys.Alice)}, - []*txplan.PlannedTx{pairB.SubmitInit(), pairA.SubmitExecTo(sys.Bob)}, + []*txplan.PlannedTx{pairB.SubmitExecTo(sys.Alice), pairA.SubmitInit()}, + []*txplan.PlannedTx{pairA.SubmitExecTo(sys.Bob), pairB.SubmitInit()}, true, true, // both replaced (cycle detected) ) } diff --git a/op-acceptance-tests/tests/supernode/interop/timestamp_progression_test.go b/op-acceptance-tests/tests/supernode/interop/timestamp_progression_test.go index 82cade4ac5587..772c6868bbf12 100644 --- a/op-acceptance-tests/tests/supernode/interop/timestamp_progression_test.go +++ b/op-acceptance-tests/tests/supernode/interop/timestamp_progression_test.go @@ -5,14 +5,13 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" ) // TestSupernodeInteropVerifiedAt tests that the VerifiedAt endpoint returns // correct data after the interop activity has processed timestamps. func TestSupernodeInteropVerifiedAt(gt *testing.T) { t := devtest.ParallelT(gt) - sys := presets.NewTwoL2SupernodeInterop(t, 0) + sys := newSupernodeInteropWithTimeTravel(t, 0) blockTime := sys.L2A.Escape().RollupConfig().BlockTime genesisTime := sys.L2A.Escape().RollupConfig().Genesis.L2Time @@ -55,7 +54,7 @@ func TestSupernodeInteropVerifiedAt(gt *testing.T) { // This proves the supernode waits for all chains' local safe heads before verifying. func TestSupernodeInteropChainLag(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewTwoL2SupernodeInterop(t, 0) + sys := newSupernodeInteropWithTimeTravel(t, 0) blockTime := sys.L2A.Escape().RollupConfig().BlockTime ctx := t.Ctx() diff --git a/op-acceptance-tests/tests/sync/clsync/gap_clp2p/init_test.go b/op-acceptance-tests/tests/sync/clsync/gap_clp2p/init_test.go index 40343a01fa647..5b074f1674545 100644 --- a/op-acceptance-tests/tests/sync/clsync/gap_clp2p/init_test.go +++ b/op-acceptance-tests/tests/sync/clsync/gap_clp2p/init_test.go @@ -1,22 +1,21 @@ package gap_clp2p import ( - "testing" - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" ) -func TestMain(m *testing.M) { - // No ELP2P, CLP2P to control the supply of unsafe payload to the CL - presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), - presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { +func gapCLP2POpts() []presets.Option { + return []presets.Option{ + presets.WithBatcherOption(func(_ sysgo.ComponentTarget, cfg *bss.CLIConfig) { // For stopping derivation, not to advance safe heads cfg.Stopped = true - })), - ) + }), + } +} + +func newGapCLP2PSystem(t devtest.T) *presets.SingleChainMultiNode { + return presets.NewSingleChainMultiNodeWithoutP2PWithoutCheck(t, gapCLP2POpts()...) } diff --git a/op-acceptance-tests/tests/sync/clsync/gap_clp2p/sync_test.go b/op-acceptance-tests/tests/sync/clsync/gap_clp2p/sync_test.go index f4cc41ac27d1d..c00d71a53f9a3 100644 --- a/op-acceptance-tests/tests/sync/clsync/gap_clp2p/sync_test.go +++ b/op-acceptance-tests/tests/sync/clsync/gap_clp2p/sync_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) @@ -12,7 +11,7 @@ import ( // TestSyncAfterInitialELSync tests that blocks received out of order would be processed in order when running in CL sync mode. Note that this is not going to happen when running in EL sync mode, which relies on healthy ELP2P, something that is disabled in this test. func TestSyncAfterInitialELSync(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := newGapCLP2PSystem(t) require := t.Require() sys.L2CL.Advanced(types.LocalUnsafe, 7, 30) diff --git a/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go index 83745675957de..4453bf43b23b6 100644 --- a/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go +++ b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/init_test.go @@ -1,23 +1,25 @@ package gap_clp2p import ( - "testing" - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + nodeSync "github.com/ethereum-optimism/optimism/op-node/rollup/sync" ) -func TestMain(m *testing.M) { - // No ELP2P, CLP2P to control the supply of unsafe payload to the CL - presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { +func gapCLP2POpts() []presets.Option { + return []presets.Option{ + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn(func(_ devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.VerifierSyncMode = nodeSync.ELSync + })), + presets.WithBatcherOption(func(_ sysgo.ComponentTarget, cfg *bss.CLIConfig) { // For stopping derivation, not to advance safe heads cfg.Stopped = true - })), - ) + }), + } +} + +func newGapCLP2PSystem(t devtest.T) *presets.SingleChainMultiNode { + return presets.NewSingleChainMultiNodeWithoutP2PWithoutCheck(t, gapCLP2POpts()...) } diff --git a/op-acceptance-tests/tests/sync/elsync/gap_clp2p/sync_test.go b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/sync_test.go index edb32f84bcef6..349544b548bfc 100644 --- a/op-acceptance-tests/tests/sync/elsync/gap_clp2p/sync_test.go +++ b/op-acceptance-tests/tests/sync/elsync/gap_clp2p/sync_test.go @@ -5,14 +5,13 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) func TestReachUnsafeTipByAppendingUnsafePayload(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := newGapCLP2PSystem(t) logger := t.Logger() sys.L2CL.Advanced(types.LocalUnsafe, 7, 30) @@ -47,7 +46,7 @@ func TestReachUnsafeTipByAppendingUnsafePayload(gt *testing.T) { // while maintaining correct Engine API semantics. func TestCLUnsafeNotRewoundOnInvalidDuringELSync(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := newGapCLP2PSystem(t) logger := t.Logger() require := t.Require() diff --git a/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go index 43dec39747583..b58eefce6b0d9 100644 --- a/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go +++ b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/init_test.go @@ -1,22 +1,21 @@ package gap_elp2p import ( - "testing" - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" ) -func TestMain(m *testing.M) { - // No ELP2P, CLP2P to control the supply of unsafe payload to the CL - presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), - presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { +func gapELP2POpts() []presets.Option { + return []presets.Option{ + presets.WithBatcherOption(func(_ sysgo.ComponentTarget, cfg *bss.CLIConfig) { // For stopping derivation, not to advance safe heads cfg.Stopped = true - })), - ) + }), + } +} + +func newGapELP2PSystem(t devtest.T) *presets.SingleChainMultiNode { + return presets.NewSingleChainMultiNodeWithoutP2PWithoutCheck(t, gapELP2POpts()...) } diff --git a/op-acceptance-tests/tests/sync/elsync/gap_elp2p/sync_test.go b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/sync_test.go index 796958797fd92..63d9d8a70fab7 100644 --- a/op-acceptance-tests/tests/sync/elsync/gap_elp2p/sync_test.go +++ b/op-acceptance-tests/tests/sync/elsync/gap_elp2p/sync_test.go @@ -4,7 +4,6 @@ import ( "testing" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum" @@ -56,7 +55,7 @@ import ( // - With ELP2P enabled, repeated FCU attempts eventually validate and advance the canonical chain. func TestL2ELP2PCanonicalChainAdvancedByFCU(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := newGapELP2PSystem(t) require := t.Require() logger := t.Logger() @@ -251,7 +250,7 @@ func TestL2ELP2PCanonicalChainAdvancedByFCU(gt *testing.T) { // and by avoiding advancement of the chain on invalid data. func TestELP2PFCUUnavailableHash(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := newGapELP2PSystem(t) logger := t.Logger() genesis := sys.L2ELB.BlockRefByNumber(0) @@ -307,7 +306,7 @@ func TestELP2PFCUUnavailableHash(gt *testing.T) { // appendability/sync checks first, per Engine API behavior. func TestSafeDoesNotAdvanceWhenUnsafeIsSyncing_NoELP2P(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := newGapELP2PSystem(t) logger := t.Logger() // Advance few blocks to make sure reference node advanced @@ -394,7 +393,7 @@ func TestSafeDoesNotAdvanceWhenUnsafeIsSyncing_NoELP2P(gt *testing.T) { // invalid payloads—whether rejected at the CL or EL—do not advance the chain. func TestInvalidPayloadThroughCLP2P(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := newGapELP2PSystem(t) logger := t.Logger() require := t.Require() ctx := t.Ctx() diff --git a/op-acceptance-tests/tests/sync/elsync/reorg/init_test.go b/op-acceptance-tests/tests/sync/elsync/reorg/init_test.go index d0a15b5d4d01b..c687960261f48 100644 --- a/op-acceptance-tests/tests/sync/elsync/reorg/init_test.go +++ b/op-acceptance-tests/tests/sync/elsync/reorg/init_test.go @@ -1,16 +1,19 @@ package reorg import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" ) -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithNewSingleChainMultiNodeWithTestSeq(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithNoDiscovery(), - ) +func reorgOpts() []presets.Option { + return []presets.Option{ + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn(func(_ devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.NoDiscovery = true + })), + } +} + +func newReorgSystem(t devtest.T) *presets.SingleChainMultiNodeWithTestSeq { + return presets.NewSingleChainMultiNodeWithTestSeq(t, reorgOpts()...) } diff --git a/op-acceptance-tests/tests/sync/elsync/reorg/sync_test.go b/op-acceptance-tests/tests/sync/elsync/reorg/sync_test.go index d7b10d8c47ee9..eb154a4a46227 100644 --- a/op-acceptance-tests/tests/sync/elsync/reorg/sync_test.go +++ b/op-acceptance-tests/tests/sync/elsync/reorg/sync_test.go @@ -5,9 +5,6 @@ import ( "time" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" @@ -21,19 +18,17 @@ import ( // 4. CLP2P is restored, the verifier backfills and the unsafe gap is closed. func TestUnsafeGapFillAfterSafeReorg(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithTestSeq(t) + sys := newReorgSystem(t) require := t.Require() logger := t.Logger() ctx := t.Ctx() ts := sys.TestSequencer.Escape().ControlAPI(sys.L1Network.ChainID()) - cl := sys.L1Network.Escape().L1CLNode(match.FirstL1CL) - // Pass the L1 genesis sys.L1Network.WaitForBlock() // Stop auto advancing L1 - sys.ControlPlane.FakePoSState(cl.ID(), stack.Stop) + sys.L1CL.Stop() startL1Block := sys.L1EL.BlockRefByLabel(eth.Unsafe) @@ -68,7 +63,7 @@ func TestUnsafeGapFillAfterSafeReorg(gt *testing.T) { require.NoError(ts.Next(ctx)) // Start advancing L1 - sys.ControlPlane.FakePoSState(cl.ID(), stack.Start) + sys.L1CL.Start() // Make sure L1 reorged sys.L1EL.WaitForBlockNumber(l1BlockBeforeReorg.Number) @@ -153,7 +148,7 @@ func TestUnsafeGapFillAfterSafeReorg(gt *testing.T) { // 4. Verifier then backfills and closes the unsafe gap once reconnected via CLP2P. func TestUnsafeGapFillAfterUnsafeReorg_RestartL2CL(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithTestSeq(t) + sys := newReorgSystem(t) require := t.Require() logger := t.Logger() ctx := t.Ctx() @@ -162,13 +157,11 @@ func TestUnsafeGapFillAfterUnsafeReorg_RestartL2CL(gt *testing.T) { sys.L2Batcher.Stop() ts := sys.TestSequencer.Escape().ControlAPI(sys.L1Network.ChainID()) - cl := sys.L1Network.Escape().L1CLNode(match.FirstL1CL) - // Pass the L1 genesis sys.L1Network.WaitForBlock() // Stop auto advancing L1 - sys.ControlPlane.FakePoSState(cl.ID(), stack.Stop) + sys.L1CL.Stop() startL1Block := sys.L1EL.BlockRefByLabel(eth.Unsafe) @@ -203,7 +196,7 @@ func TestUnsafeGapFillAfterUnsafeReorg_RestartL2CL(gt *testing.T) { require.NoError(ts.Next(ctx)) // Start advancing L1 - sys.ControlPlane.FakePoSState(cl.ID(), stack.Start) + sys.L1CL.Start() // Make sure L1 reorged sys.L1EL.WaitForBlockNumber(l1BlockBeforeReorg.Number) @@ -277,7 +270,7 @@ func TestUnsafeGapFillAfterUnsafeReorg_RestartL2CL(gt *testing.T) { // 4. CLP2P is restored Verifier, the verifier backfills and the unsafe gap is closed. func TestUnsafeGapFillAfterUnsafeReorg_RestartCLP2P(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainMultiNodeWithTestSeq(t) + sys := newReorgSystem(t) require := t.Require() logger := t.Logger() ctx := t.Ctx() @@ -286,13 +279,11 @@ func TestUnsafeGapFillAfterUnsafeReorg_RestartCLP2P(gt *testing.T) { sys.L2Batcher.Stop() ts := sys.TestSequencer.Escape().ControlAPI(sys.L1Network.ChainID()) - cl := sys.L1Network.Escape().L1CLNode(match.FirstL1CL) - // Pass the L1 genesis sys.L1Network.WaitForBlock() // Stop auto advancing L1 - sys.ControlPlane.FakePoSState(cl.ID(), stack.Stop) + sys.L1CL.Stop() startL1Block := sys.L1EL.BlockRefByLabel(eth.Unsafe) @@ -331,7 +322,7 @@ func TestUnsafeGapFillAfterUnsafeReorg_RestartCLP2P(gt *testing.T) { require.NoError(ts.Next(ctx)) // Start advancing L1 - sys.ControlPlane.FakePoSState(cl.ID(), stack.Start) + sys.L1CL.Start() // Make sure L1 reorged sys.L1EL.WaitForBlockNumber(l1BlockBeforeReorg.Number) diff --git a/op-acceptance-tests/tests/sync/follow_l2/init_test.go b/op-acceptance-tests/tests/sync/follow_l2/init_test.go deleted file mode 100644 index 47024bf47db28..0000000000000 --- a/op-acceptance-tests/tests/sync/follow_l2/init_test.go +++ /dev/null @@ -1,16 +0,0 @@ -package follow_l2 - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSingleChainTwoVerifiersFollowL2(), - presets.WithReqRespSyncDisabled(), - presets.WithNoDiscovery(), - presets.WithCompatibleTypes(compat.SysGo), - ) -} diff --git a/op-acceptance-tests/tests/sync/follow_l2/setup_test.go b/op-acceptance-tests/tests/sync/follow_l2/setup_test.go new file mode 100644 index 0000000000000..75e7bcca7a7ed --- /dev/null +++ b/op-acceptance-tests/tests/sync/follow_l2/setup_test.go @@ -0,0 +1,25 @@ +package follow_l2 + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func followL2Opts() []presets.Option { + return []presets.Option{ + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(_ devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.EnableReqRespSync = false + cfg.UseReqRespSync = false + })), + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn( + func(_ devtest.T, _ sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.NoDiscovery = true + })), + } +} + +func newSingleChainTwoVerifiersFollowL2(t devtest.T) *presets.SingleChainTwoVerifiers { + return presets.NewSingleChainTwoVerifiersWithoutCheck(t, followL2Opts()...) +} diff --git a/op-acceptance-tests/tests/sync/follow_l2/sync_test.go b/op-acceptance-tests/tests/sync/follow_l2/sync_test.go index 11c38822e3472..db5589644bca2 100644 --- a/op-acceptance-tests/tests/sync/follow_l2/sync_test.go +++ b/op-acceptance-tests/tests/sync/follow_l2/sync_test.go @@ -6,9 +6,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" @@ -17,7 +14,7 @@ import ( func TestFollowL2_Safe_Finalized_CurrentL1(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainTwoVerifiersWithoutCheck(t) + sys := newSingleChainTwoVerifiersFollowL2(t) logger := t.Logger() // Takes about 2 minutes for L1 finalization @@ -59,7 +56,7 @@ func TestFollowL2_Safe_Finalized_CurrentL1(gt *testing.T) { func TestFollowL2_ReorgRecovery(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainTwoVerifiersWithoutCheck(t) + sys := newSingleChainTwoVerifiersFollowL2(t) require := t.Require() logger := t.Logger() ctx := t.Ctx() @@ -67,13 +64,11 @@ func TestFollowL2_ReorgRecovery(gt *testing.T) { // L2CLB is the verifier without follow source, derivation enabled ts := sys.TestSequencer.Escape().ControlAPI(sys.L1Network.ChainID()) - cl := sys.L1Network.Escape().L1CLNode(match.FirstL1CL) - // Pass the L1 genesis sys.L1Network.WaitForBlock() // Stop auto advancing L1 - sys.ControlPlane.FakePoSState(cl.ID(), stack.Stop) + sys.L1CL.Stop() startL1Block := sys.L1EL.BlockRefByLabel(eth.Unsafe) @@ -102,7 +97,7 @@ func TestFollowL2_ReorgRecovery(gt *testing.T) { require.NoError(ts.Next(ctx)) // Start advancing L1 - sys.ControlPlane.FakePoSState(cl.ID(), stack.Start) + sys.L1CL.Start() // Make sure L1 reorged sys.L1EL.WaitForBlockNumber(l1BlockBeforeReorg.Number) @@ -130,7 +125,7 @@ func TestFollowL2_ReorgRecovery(gt *testing.T) { func TestFollowL2_WithoutCLP2P(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSingleChainTwoVerifiersWithoutCheck(t) + sys := newSingleChainTwoVerifiersFollowL2(t) require := t.Require() logger := t.Logger() diff --git a/op-acceptance-tests/tests/sync/manual/init_test.go b/op-acceptance-tests/tests/sync/manual/init_test.go deleted file mode 100644 index a3c8bdff18d33..0000000000000 --- a/op-acceptance-tests/tests/sync/manual/init_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package manual - -import ( - "testing" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - // No ELP2P, CLP2P to control the supply of unsafe payload to the CL - presets.DoMain(m, presets.WithSingleChainMultiNodeWithoutP2P(), - presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - // For stopping derivation, not to advance safe heads - cfg.Stopped = true - })), - ) -} diff --git a/op-acceptance-tests/tests/sync/manual/sync_test.go b/op-acceptance-tests/tests/sync/manual/sync_test.go index 65317f3c8a92e..d86b7a4b082d9 100644 --- a/op-acceptance-tests/tests/sync/manual/sync_test.go +++ b/op-acceptance-tests/tests/sync/manual/sync_test.go @@ -3,8 +3,10 @@ package manual import ( "testing" + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum/go-ethereum" @@ -14,7 +16,12 @@ func TestVerifierManualSync(gt *testing.T) { t := devtest.SerialT(gt) // Disable ELP2P and Batcher - sys := presets.NewSingleChainMultiNodeWithoutCheck(t) + sys := presets.NewSingleChainMultiNodeWithoutP2PWithoutCheck(t, + presets.WithBatcherOption(func(_ sysgo.ComponentTarget, cfg *bss.CLIConfig) { + // For stopping derivation, not to advance safe heads + cfg.Stopped = true + }), + ) require := t.Require() logger := t.Logger() diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/init_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/init_test.go deleted file mode 100644 index 171ff454e4c2c..0000000000000 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/init_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package sync_tester_e2e - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSimpleWithSyncTester(), - presets.WithCompatibleTypes(compat.SysGo), - ) -} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/sync_tester_e2e_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/sync_tester_e2e_test.go index f3e493e77e71a..b7239f9007a2e 100644 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/sync_tester_e2e_test.go +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_e2e/sync_tester_e2e_test.go @@ -21,10 +21,10 @@ func TestSyncTesterE2E(gt *testing.T) { ctx := t.Ctx() // Test that we can get chain IDs from both L2CL nodes - l2CLChainID := sys.L2CL.ID().ChainID() + l2CLChainID := sys.L2CL.ChainID() require.Equal(eth.ChainIDFromUInt64(901), l2CLChainID, "first L2CL should be on chain 901") - l2CL2ChainID := sys.L2CL2.ID().ChainID() + l2CL2ChainID := sys.L2CL2.ChainID() require.Equal(eth.ChainIDFromUInt64(901), l2CL2ChainID, "second L2CL should be on chain 901") // Test that the network started successfully diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/elsync_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/elsync_test.go index 82d6d9e4f016e..6b74436707df6 100644 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/elsync_test.go +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/elsync_test.go @@ -6,13 +6,26 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) +func simpleWithSyncTesterOpts() []presets.Option { + return []presets.Option{ + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn(func(_ devtest.T, id sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.VerifierSyncMode = sync.ELSync + })), + presets.WithGlobalSyncTesterELOption(sysgo.SyncTesterELOptionFn(func(_ devtest.T, id sysgo.ComponentTarget, cfg *sysgo.SyncTesterELConfig) { + cfg.ELSyncActive = true + })), + } +} + func TestSyncTesterELSync(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleWithSyncTester(t) + sys := presets.NewSimpleWithSyncTester(t, simpleWithSyncTesterOpts()...) require := t.Require() logger := t.Logger() ctx := t.Ctx() diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/init_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/init_test.go deleted file mode 100644 index 8125324dabaf6..0000000000000 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync/init_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package sync_tester_elsync - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithSimpleWithSyncTester(), - presets.WithELSyncActive(), - presets.WithCompatibleTypes(compat.SysGo), - ) -} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/init_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/init_test.go deleted file mode 100644 index 6b4a82e8ddc5d..0000000000000 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/init_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package multi - -import ( - "testing" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithSimpleWithSyncTester(), - presets.WithELSyncActive(), - presets.WithCompatibleTypes(compat.SysGo), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - // For stopping derivation, not to advance safe heads - cfg.Stopped = true - })), - ) -} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/sync_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/sync_test.go index b8ae7dcd2c256..a8e54fa2568dc 100644 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/sync_test.go +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_elsync_multi/sync_test.go @@ -3,15 +3,33 @@ package multi import ( "testing" + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup/sync" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) +func simpleWithSyncTesterOpts() []presets.Option { + return []presets.Option{ + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn(func(_ devtest.T, id sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.VerifierSyncMode = sync.ELSync + })), + presets.WithGlobalSyncTesterELOption(sysgo.SyncTesterELOptionFn(func(_ devtest.T, id sysgo.ComponentTarget, cfg *sysgo.SyncTesterELConfig) { + cfg.ELSyncActive = true + })), + presets.WithBatcherOption(func(id sysgo.ComponentTarget, cfg *bss.CLIConfig) { + // For stopping derivation, not to advance safe heads + cfg.Stopped = true + }), + } +} + func TestMultiELSync(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleWithSyncTester(t) + sys := presets.NewSimpleWithSyncTester(t, simpleWithSyncTesterOpts()...) require := t.Require() // Stop L2CL2 to control SyncTesterL2EL manually diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/ext_config.go b/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/ext_config.go deleted file mode 100644 index f41811d7815f6..0000000000000 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/ext_config.go +++ /dev/null @@ -1,88 +0,0 @@ -package sync_tester_ext_el - -import ( - "fmt" - "os" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// Configuration defaults for op-sepolia -const ( - DefaultNetworkPreset = "op-sepolia" - - // Tailscale networking endpoints - DefaultL2ELEndpointTailscale = "https://proxyd-l2-sepolia.primary.client.dev.oplabs.cloud" - DefaultL1CLBeaconEndpointTailscale = "https://beacon-api-proxy-sepolia.primary.client.dev.oplabs.cloud" - DefaultL1ELEndpointTailscale = "https://proxyd-l1-sepolia.primary.client.dev.oplabs.cloud" -) - -var ( - // Network presets for different networks against which we test op-node syncing - networkPresets = map[string]stack.ExtNetworkConfig{ - "op-sepolia": { - L2NetworkName: "op-sepolia", - L1ChainID: eth.ChainIDFromUInt64(11155111), - L2ELEndpoint: "https://ci-sepolia-l2.optimism.io", - L1CLBeaconEndpoint: "https://ci-sepolia-beacon.optimism.io", - L1ELEndpoint: "https://ci-sepolia-l1.optimism.io", - }, - "base-sepolia": { - L2NetworkName: "base-sepolia", - L1ChainID: eth.ChainIDFromUInt64(11155111), - L2ELEndpoint: "https://base-sepolia-rpc.optimism.io", - L1CLBeaconEndpoint: "https://ci-sepolia-beacon.optimism.io", - L1ELEndpoint: "https://ci-sepolia-l1.optimism.io", - }, - "unichain-sepolia": { - L2NetworkName: "unichain-sepolia", - L1ChainID: eth.ChainIDFromUInt64(11155111), - L2ELEndpoint: "https://unichain-sepolia-rpc.optimism.io", - L1CLBeaconEndpoint: "https://ci-sepolia-beacon.optimism.io", - L1ELEndpoint: "https://ci-sepolia-l1.optimism.io", - }, - "op-mainnet": { - L2NetworkName: "op-mainnet", - L1ChainID: eth.ChainIDFromUInt64(1), - L2ELEndpoint: "https://op-mainnet-rpc.optimism.io", - L1CLBeaconEndpoint: "https://ci-mainnet-beacon.optimism.io", - L1ELEndpoint: "https://ci-mainnet-l1.optimism.io", - }, - "base-mainnet": { - L2NetworkName: "base-mainnet", - L1ChainID: eth.ChainIDFromUInt64(1), - L2ELEndpoint: "https://base-mainnet-rpc.optimism.io", - L1CLBeaconEndpoint: "https://ci-mainnet-beacon.optimism.io", - L1ELEndpoint: "https://ci-mainnet-l1.optimism.io", - }, - } -) - -func GetNetworkPreset(name string) (stack.ExtNetworkConfig, error) { - var config stack.ExtNetworkConfig - if name == "" { - config = networkPresets[DefaultNetworkPreset] - } else { - var ok bool - config, ok = networkPresets[name] - if !ok { - return stack.ExtNetworkConfig{}, fmt.Errorf("NETWORK_PRESET %s not found", name) - } - } - // Override configuration with Tailscale endpoints if Tailscale networking is enabled - if os.Getenv("TAILSCALE_NETWORKING") == "true" { - config.L2ELEndpoint = getEnvOrDefault("L2_EL_ENDPOINT_TAILSCALE", DefaultL2ELEndpointTailscale) - config.L1CLBeaconEndpoint = getEnvOrDefault("L1_CL_BEACON_ENDPOINT_TAILSCALE", DefaultL1CLBeaconEndpointTailscale) - config.L1ELEndpoint = getEnvOrDefault("L1_EL_ENDPOINT_TAILSCALE", DefaultL1ELEndpointTailscale) - } - return config, nil -} - -// getEnvOrDefault returns the environment variable value or the default if not set -func getEnvOrDefault(envVar, defaultValue string) string { - if value := os.Getenv(envVar); value != "" { - return value - } - return defaultValue -} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go deleted file mode 100644 index 33253bba88f81..0000000000000 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_ext_el/sync_tester_ext_el_test.go +++ /dev/null @@ -1,183 +0,0 @@ -package sync_tester_ext_el - -import ( - "fmt" - "os" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-node/chaincfg" - "github.com/ethereum-optimism/optimism/op-node/rollup/sync" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testlog" - - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" -) - -var L2CLSyncMode = getSyncMode("L2_CL_SYNCMODE") - -func TestSyncTesterExtEL(gt *testing.T) { - t := devtest.SerialT(gt) - - if os.Getenv("CIRCLECI_PIPELINE_SCHEDULE_NAME") != "build_daily" && os.Getenv("CIRCLECI_PARAMETERS_SYNC_TEST_OP_NODE_DISPATCH") != "true" { - t.Skipf("TestSyncTesterExtEL only runs on daily scheduled pipeline jobs: schedule=%s dispatch=%s", os.Getenv("CIRCLECI_PIPELINE_SCHEDULE_NAME"), os.Getenv("CIRCLECI_PARAMETERS_SYNC_TEST_OP_NODE_DISPATCH")) - } - - l := t.Logger() - require := t.Require() - blocksToSync := uint64(20) - sys, target := setupSystem(gt, t, blocksToSync) - - attempts := 500 - if L2CLSyncMode == sync.ELSync { - // After EL Sync is finished, the FCU state will advance to target immediately so less attempts - attempts = 5 - // Signal L2CL for triggering EL Sync - // Must send consecutive three payloads due to default EL Sync policy - for i := 2; i >= 0; i-- { - sys.L2CL.SignalTarget(sys.L2ELReadOnly, target-uint64(i)) - } - } - - // Test that we can get sync status from L2CL node - l2CLSyncStatus := sys.L2CL.SyncStatus() - require.NotNil(l2CLSyncStatus, "L2CL should have sync status") - - sys.L2CL.Reached(types.LocalUnsafe, target, attempts) - - l2CLSyncStatus = sys.L2CL.SyncStatus() - require.NotNil(l2CLSyncStatus, "L2CL should have sync status") - - unsafeL2Ref := l2CLSyncStatus.UnsafeL2 - blk := sys.L2EL.BlockRefByNumber(unsafeL2Ref.Number) - require.Equal(unsafeL2Ref.Hash, blk.Hash, "L2EL should be on the same block as L2CL") - - stSessions := sys.SyncTester.ListSessions() - require.Equal(len(stSessions), 1, "expect exactly one session") - - stSession := sys.SyncTester.GetSession(stSessions[0]) - require.GreaterOrEqual(stSession.CurrentState.Latest, stSession.InitialState.Latest+blocksToSync, "SyncTester session Latest should be on the same block as L2CL") - require.GreaterOrEqual(stSession.CurrentState.Safe, stSession.InitialState.Safe+blocksToSync, "SyncTester session Safe should be on the same block as L2CL") - - l.Info("SyncTester ExtEL test completed successfully", "l2cl_chain_id", sys.L2CL.ID().ChainID(), "l2cl_sync_status", l2CLSyncStatus) -} - -// setupSystem initializes the system for the test and returns the system and the target block number of the session -func setupSystem(gt *testing.T, t devtest.T, blocksToSync uint64) (*presets.MinimalExternalEL, uint64) { - // Initialize orchestrator - orch, target := setupOrchestrator(gt, t, blocksToSync) - system := shim.NewSystem(t) - orch.Hydrate(system) - - // Extract the system components - l2 := system.L2Network(match.L2ChainA) - verifierCL := l2.L2CLNode(match.FirstL2CL) - syncTester := l2.SyncTester(match.FirstSyncTester) - - sys := &presets.MinimalExternalEL{ - Log: t.Logger(), - T: t, - ControlPlane: orch.ControlPlane(), - L1Network: dsl.NewL1Network(system.L1Network(match.FirstL1Network)), - L1EL: dsl.NewL1ELNode(system.L1Network(match.FirstL1Network).L1ELNode(match.FirstL1EL)), - L2Chain: dsl.NewL2Network(l2, orch.ControlPlane()), - L2CL: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), - L2ELReadOnly: dsl.NewL2ELNode(l2.L2ELNode(match.FirstL2EL), orch.ControlPlane()), - L2EL: dsl.NewL2ELNode(l2.L2ELNode(match.SecondL2EL), orch.ControlPlane()), - SyncTester: dsl.NewSyncTester(syncTester), - } - - return sys, target -} - -// setupOrchestrator initializes and configures the orchestrator for the test and returns the orchestrator and the target block number of the session -func setupOrchestrator(gt *testing.T, t devtest.T, blocksToSync uint64) (*sysgo.Orchestrator, uint64) { - l := t.Logger() - ctx := t.Ctx() - require := t.Require() - - config, err := GetNetworkPreset(os.Getenv("NETWORK_PRESET")) - require.NoError(err, "failed to initialize network preset") - - // Runtime configuration values - l.Info("Runtime configuration values for TestSyncTesterExtEL") - l.Info("NETWORK_PRESET", "value", os.Getenv("NETWORK_PRESET")) - l.Info("L2_NETWORK_NAME", "value", config.L2NetworkName) - l.Info("L1_CHAIN_ID", "value", config.L1ChainID) - l.Info("L2_EL_ENDPOINT", "value", config.L2ELEndpoint) - l.Info("L1_CL_BEACON_ENDPOINT", "value", config.L1CLBeaconEndpoint) - l.Info("L1_EL_ENDPOINT", "value", config.L1ELEndpoint) - l.Info("TAILSCALE_NETWORKING", "value", os.Getenv("TAILSCALE_NETWORKING")) - l.Info("L2_CL_SYNCMODE", "value", L2CLSyncMode) - - // Setup orchestrator - logger := testlog.Logger(gt, log.LevelInfo) - onFail := func(now bool) { - if now { - gt.FailNow() - } else { - gt.Fail() - } - } - onSkipNow := func() { - gt.SkipNow() - } - p := devtest.NewP(ctx, logger, onFail, onSkipNow) - gt.Cleanup(p.Close) - - // Fetch the latest block number from the remote L2EL node - cl, err := ethclient.DialContext(ctx, config.L2ELEndpoint) - require.NoError(err) - latestBlock, err := cl.BlockByNumber(ctx, nil) - require.NoError(err) - - initial := latestBlock.NumberU64() - 1000 - target := initial + blocksToSync - l.Info("LATEST_BLOCK", "latest_block", latestBlock.NumberU64(), "session_initial_block", initial, "target_block", target) - - opt := presets.WithExternalELWithSuperchainRegistry(config) - if L2CLSyncMode == sync.ELSync { - chainCfg := chaincfg.ChainByName(config.L2NetworkName) - if chainCfg == nil { - panic(fmt.Sprintf("network %s not found in superchain registry", config.L2NetworkName)) - } - opt = stack.Combine(opt, - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithELSyncActive(), - presets.WithSyncTesterELInitialState(eth.FCUState{ - Latest: initial, - Safe: 0, - // Need to set finalized to genesis to unskip EL Sync - Finalized: chainCfg.Genesis.L2.Number, - }), - ) - } else { - opt = stack.Combine(opt, - presets.WithSyncTesterELInitialState(eth.FCUState{ - Latest: initial, - Safe: initial, - Finalized: initial, - }), - ) - } - - var orch stack.Orchestrator = sysgo.NewOrchestrator(p, stack.SystemHook(opt)) - stack.ApplyOptionLifecycle(opt, orch) - - return orch.(*sysgo.Orchestrator), target -} - -func getSyncMode(envVar string) sync.Mode { - if value := os.Getenv(envVar); value == sync.ELSyncString { - return sync.ELSync - } - return sync.CLSync -} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go deleted file mode 100644 index f01e0f6781521..0000000000000 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/init_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package sync_tester_hfs - -import ( - "testing" - - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - "github.com/ethereum-optimism/optimism/op-core/forks" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" -) - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSimpleWithSyncTester(), - presets.WithCompatibleTypes(compat.SysGo), - presets.WithHardforkSequentialActivation(forks.Bedrock, forks.Jovian, 6), - presets.WithNoDiscovery(), - stack.MakeCommon(sysgo.WithBatcherOption(func(id stack.ComponentID, cfg *bss.CLIConfig) { - // For supporting pre-delta batches - cfg.BatchType = derive.SingularBatchType - // For supporting pre-Fjord batches - cfg.CompressionAlgo = derive.Zlib - }))) -} diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/sync_tester_hfs_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/sync_tester_hfs_test.go index 26599966e70d9..187dda2388c90 100644 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/sync_tester_hfs_test.go +++ b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs/sync_tester_hfs_test.go @@ -3,17 +3,40 @@ package sync_tester_hfs import ( "testing" + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" + "github.com/ethereum-optimism/optimism/op-core/forks" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) +func ptrToUint64(v uint64) *uint64 { + return &v +} + +func simpleWithSyncTesterOpts() []presets.Option { + return []presets.Option{ + presets.WithDeployerOptions(sysgo.WithHardforkSequentialActivation(forks.Bedrock, forks.Jovian, ptrToUint64(6))), + presets.WithGlobalL2CLOption(sysgo.L2CLOptionFn(func(_ devtest.T, id sysgo.ComponentTarget, cfg *sysgo.L2CLConfig) { + cfg.NoDiscovery = true + })), + presets.WithBatcherOption(func(id sysgo.ComponentTarget, cfg *bss.CLIConfig) { + // For supporting pre-delta batches + cfg.BatchType = derive.SingularBatchType + // For supporting pre-Fjord batches + cfg.CompressionAlgo = derive.Zlib + }), + } +} + func TestSyncTesterHardforks(gt *testing.T) { t := devtest.SerialT(gt) - sys := presets.NewSimpleWithSyncTester(t) + sys := presets.NewSimpleWithSyncTester(t, simpleWithSyncTesterOpts()...) require := t.Require() logger := t.Logger() ctx := t.Ctx() diff --git a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs_ext/sync_tester_hfs_ext_test.go b/op-acceptance-tests/tests/sync_tester/sync_tester_hfs_ext/sync_tester_hfs_ext_test.go deleted file mode 100644 index 6f411509690ff..0000000000000 --- a/op-acceptance-tests/tests/sync_tester/sync_tester_hfs_ext/sync_tester_hfs_ext_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package sync_tester_hfs_ext - -import ( - "context" - "fmt" - "os" - "strconv" - "testing" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-core/forks" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-node/chaincfg" - "github.com/ethereum-optimism/optimism/op-node/rollup/sync" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" -) - -// Configuration defaults for op-sepolia -const ( - DefaultL2NetworkName = "op-sepolia" - DefaultL1ChainID = 11155111 - DefaultL2ELEndpoint = "https://ci-sepolia-l2.optimism.io" - DefaultL1CLBeaconEndpoint = "https://ci-sepolia-beacon.optimism.io" - DefaultL1ELEndpoint = "https://ci-sepolia-l1.optimism.io" - - // Tailscale networking endpoints - DefaultL2ELEndpointTailscale = "https://proxyd-l2-sepolia.primary.client.dev.oplabs.cloud" - DefaultL1CLBeaconEndpointTailscale = "https://beacon-api-proxy-sepolia.primary.client.dev.oplabs.cloud" - DefaultL1ELEndpointTailscale = "https://proxyd-l1-sepolia.primary.client.dev.oplabs.cloud" -) - -var ( - // Network upgrade block numbers for op-sepolia - networkUpgradeBlocks = map[forks.Name]uint64{ - forks.Canyon: 4089330, - forks.Delta: 5700330, - forks.Ecotone: 8366130, - forks.Fjord: 12597930, - forks.Granite: 15837930, - forks.Holocene: 20415330, - forks.Isthmus: 26551530, - } - - // Load configuration from environment variables with defaults - L2NetworkName = getEnvOrDefault("L2_NETWORK_NAME", DefaultL2NetworkName) - L1ChainID = eth.ChainIDFromUInt64(getEnvUint64OrDefault("L1_CHAIN_ID", DefaultL1ChainID)) - - // Default endpoints - L2ELEndpoint = getEnvOrDefault("L2_EL_ENDPOINT", DefaultL2ELEndpoint) - L1CLBeaconEndpoint = getEnvOrDefault("L1_CL_BEACON_ENDPOINT", DefaultL1CLBeaconEndpoint) - L1ELEndpoint = getEnvOrDefault("L1_EL_ENDPOINT", DefaultL1ELEndpoint) -) - -func TestSyncTesterHFS_Canyon_CLSync(gt *testing.T) { - hfsExt(gt, forks.Canyon, sync.CLSync) -} - -func TestSyncTesterHFS_Canyon_ELSync(gt *testing.T) { - hfsExt(gt, forks.Canyon, sync.ELSync) -} - -func TestSyncTesterHFS_Delta_CLSync(gt *testing.T) { - hfsExt(gt, forks.Delta, sync.CLSync) -} - -func TestSyncTesterHFS_Delta_ELSync(gt *testing.T) { - hfsExt(gt, forks.Delta, sync.ELSync) -} - -func TestSyncTesterHFS_Ecotone_CLSync(gt *testing.T) { - hfsExt(gt, forks.Ecotone, sync.CLSync) -} - -func TestSyncTesterHFS_Ecotone_ELSync(gt *testing.T) { - hfsExt(gt, forks.Ecotone, sync.ELSync) -} - -func TestSyncTesterHFS_Fjord_CLSync(gt *testing.T) { - hfsExt(gt, forks.Fjord, sync.CLSync) -} - -func TestSyncTesterHFS_Fjord_ELSync(gt *testing.T) { - hfsExt(gt, forks.Fjord, sync.ELSync) -} - -func TestSyncTesterHFS_Granite_CLSync(gt *testing.T) { - hfsExt(gt, forks.Granite, sync.CLSync) -} - -func TestSyncTesterHFS_Granite_ELSync(gt *testing.T) { - hfsExt(gt, forks.Granite, sync.ELSync) -} - -func TestSyncTesterHFS_Holocene_CLSync(gt *testing.T) { - hfsExt(gt, forks.Holocene, sync.CLSync) -} - -func TestSyncTesterHFS_Holocene_ELSync(gt *testing.T) { - hfsExt(gt, forks.Holocene, sync.ELSync) -} - -func TestSyncTesterHFS_Isthmus_CLSync(gt *testing.T) { - hfsExt(gt, forks.Isthmus, sync.CLSync) -} - -func TestSyncTesterHFS_Isthmus_ELSync(gt *testing.T) { - hfsExt(gt, forks.Isthmus, sync.ELSync) -} - -// getEnvOrDefault returns the environment variable value or the default if not set -func getEnvOrDefault(envVar, defaultValue string) string { - if value := os.Getenv(envVar); value != "" { - return value - } - return defaultValue -} - -// getEnvUint64OrDefault returns the environment variable value as uint64 or the default if not set -func getEnvUint64OrDefault(envVar string, defaultValue uint64) uint64 { - if value := os.Getenv(envVar); value != "" { - if parsed, err := strconv.ParseUint(value, 10, 64); err == nil { - return parsed - } - } - return defaultValue -} - -// setupOrchestrator initializes and configures the orchestrator for the test -func setupOrchestrator(gt *testing.T, t devtest.T, blk, targetBlock uint64, l2CLSyncMode sync.Mode) *sysgo.Orchestrator { - l := t.Logger() - - // Override configuration with Tailscale endpoints if Tailscale networking is enabled - l2ELEndpoint := L2ELEndpoint - l1CLBeaconEndpoint := L1CLBeaconEndpoint - l1ELEndpoint := L1ELEndpoint - - if os.Getenv("TAILSCALE_NETWORKING") == "true" { - l2ELEndpoint = getEnvOrDefault("L2_EL_ENDPOINT_TAILSCALE", DefaultL2ELEndpointTailscale) - l1CLBeaconEndpoint = getEnvOrDefault("L1_CL_BEACON_ENDPOINT_TAILSCALE", DefaultL1CLBeaconEndpointTailscale) - l1ELEndpoint = getEnvOrDefault("L1_EL_ENDPOINT_TAILSCALE", DefaultL1ELEndpointTailscale) - } - - // Setup orchestrator directly without TestMain - logger := testlog.Logger(gt, log.LevelInfo) - onFail := func(now bool) { - if now { - gt.FailNow() - } else { - gt.Fail() - } - } - onSkipNow := func() { - gt.SkipNow() - } - p := devtest.NewP(context.Background(), logger, onFail, onSkipNow) - gt.Cleanup(p.Close) - - // Runtime configuration values - l.Info("Runtime configuration values for TestSyncTesterExtEL") - l.Info("L2_NETWORK_NAME", "value", L2NetworkName) - l.Info("L1_CHAIN_ID", "value", L1ChainID) - l.Info("L2_EL_ENDPOINT", "value", l2ELEndpoint) - l.Info("L1_CL_BEACON_ENDPOINT", "value", l1CLBeaconEndpoint) - l.Info("L1_EL_ENDPOINT", "value", l1ELEndpoint) - l.Info("TAILSCALE_NETWORKING", "value", os.Getenv("TAILSCALE_NETWORKING")) - l.Info("L2_CL_SYNCMODE", "value", l2CLSyncMode) - - config := stack.ExtNetworkConfig{ - L2NetworkName: L2NetworkName, - L1ChainID: L1ChainID, - L2ELEndpoint: L2ELEndpoint, - L1CLBeaconEndpoint: L1CLBeaconEndpoint, - L1ELEndpoint: L1ELEndpoint, - } - - // Create orchestrator with the same configuration that was in TestMain - opt := presets.WithExternalELWithSuperchainRegistry(config) - if l2CLSyncMode == sync.ELSync { - chainCfg := chaincfg.ChainByName(config.L2NetworkName) - if chainCfg == nil { - panic(fmt.Sprintf("network %s not found in superchain registry", config.L2NetworkName)) - } - opt = stack.Combine(opt, - presets.WithExecutionLayerSyncOnVerifiers(), - presets.WithELSyncActive(), - presets.WithSyncTesterELInitialState(eth.FCUState{ - Latest: blk, - Safe: 0, - // Need to set finalized to genesis to unskip EL Sync - Finalized: chainCfg.Genesis.L2.Number, - }), - ) - } else { - opt = stack.Combine(opt, - presets.WithSyncTesterELInitialState(eth.FCUState{ - Latest: blk, - Safe: blk, - Finalized: blk, - }), - ) - } - - var orch stack.Orchestrator = sysgo.NewOrchestrator(p, stack.SystemHook(opt)) - stack.ApplyOptionLifecycle(opt, orch) - - return orch.(*sysgo.Orchestrator) -} - -func hfsExt(gt *testing.T, upgradeName forks.Name, l2CLSyncMode sync.Mode) { - t := devtest.ParallelT(gt) - l := t.Logger() - - // Initial block number to sync from before the upgrade - blk := networkUpgradeBlocks[upgradeName] - 5 - - blocksToSync := uint64(10) - targetBlock := blk + blocksToSync - // Initialize orchestrator - - orch := setupOrchestrator(gt, t, blk, targetBlock, l2CLSyncMode) - system := shim.NewSystem(t) - orch.Hydrate(system) - - l2 := system.L2Network(match.L2ChainA) - verifierCL := l2.L2CLNode(match.FirstL2CL) - syncTester := l2.SyncTester(match.FirstSyncTester) - - sys := &struct { - L2CL *dsl.L2CLNode - L2ELReadOnly *dsl.L2ELNode - L2EL *dsl.L2ELNode - SyncTester *dsl.SyncTester - L2 *dsl.L2Network - }{ - L2CL: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), - L2ELReadOnly: dsl.NewL2ELNode(l2.L2ELNode(match.FirstL2EL), orch.ControlPlane()), - L2EL: dsl.NewL2ELNode(l2.L2ELNode(match.SecondL2EL), orch.ControlPlane()), - SyncTester: dsl.NewSyncTester(syncTester), - L2: dsl.NewL2Network(l2, orch.ControlPlane()), - } - require := t.Require() - - ft := sys.L2.Escape().RollupConfig().ActivationTime(upgradeName) - var l2CLSyncStatus *eth.SyncStatus - attempts := 1000 - if l2CLSyncMode == sync.ELSync { - // After EL Sync is finished, the FCU state will advance to target immediately so less attempts - attempts = 5 - // Signal L2CL for finishing EL Sync - // Must send consecutive three payloads due to default EL Sync policy - for i := 2; i >= 0; i-- { - sys.L2CL.SignalTarget(sys.L2ELReadOnly, targetBlock-uint64(i)) - } - } else { - l2CLSyncStatus := sys.L2CL.WaitForNonZeroUnsafeTime(t.Ctx()) - require.Less(l2CLSyncStatus.UnsafeL2.Time, *ft, "L2CL unsafe time should be less than fork timestamp before upgrade") - } - - sys.L2CL.Reached(types.LocalUnsafe, targetBlock, attempts) - l.Info("L2CL unsafe reached", "targetBlock", targetBlock, "upgrade_name", upgradeName) - sys.L2CL.Reached(types.LocalSafe, targetBlock, attempts) - l.Info("L2CL safe reached", "targetBlock", targetBlock, "upgrade_name", upgradeName) - - l2CLSyncStatus = sys.L2CL.SyncStatus() - require.NotNil(l2CLSyncStatus, "L2CL should have sync status") - require.Greater(l2CLSyncStatus.UnsafeL2.Time, *ft, "L2CL unsafe time should be greater than fork timestamp after upgrade") - - unsafeL2Ref := l2CLSyncStatus.UnsafeL2 - ref := sys.L2EL.BlockRefByNumber(unsafeL2Ref.Number) - require.Equal(unsafeL2Ref.Hash, ref.Hash, "L2EL should be on the same block as L2CL") - - stSessions := sys.SyncTester.ListSessions() - require.Equal(len(stSessions), 1, "expect exactly one session") - - stSession := sys.SyncTester.GetSession(stSessions[0]) - require.GreaterOrEqualf(stSession.CurrentState.Latest, stSession.InitialState.Latest+blocksToSync, "SyncTester session CurrentState.Latest only advanced %d", stSession.CurrentState.Latest-stSession.InitialState.Latest) - require.GreaterOrEqualf(stSession.CurrentState.Safe, stSession.InitialState.Safe+blocksToSync, "SyncTester session CurrentState.Safe only advanced %d", stSession.CurrentState.Safe-stSession.InitialState.Safe) - - l.Info("SyncTester HFS Ext test completed successfully", "l2cl_chain_id", sys.L2CL.ID().ChainID(), "l2cl_sync_status", l2CLSyncStatus, "upgrade_name", upgradeName) -} diff --git a/op-chain-ops/script/cheatcodes_external.go b/op-chain-ops/script/cheatcodes_external.go index a193b6596ad73..c29608fe1ede3 100644 --- a/op-chain-ops/script/cheatcodes_external.go +++ b/op-chain-ops/script/cheatcodes_external.go @@ -6,6 +6,7 @@ import ( "errors" "fmt" "math/big" + "path" "strconv" "strings" "time" @@ -37,6 +38,9 @@ func (c *CheatCodesPrecompile) ProjectRoot() string { } func (c *CheatCodesPrecompile) getArtifact(input string) (*foundry.Artifact, error) { + if name, contract, ok := parseArtifactPathInput(input); ok { + return c.h.af.ReadArtifact(name, contract) + } // fetching by relative file path, or using a contract version, is not supported parts := strings.SplitN(input, ":", 2) name := parts[0] + ".sol" @@ -48,6 +52,25 @@ func (c *CheatCodesPrecompile) getArtifact(input string) (*foundry.Artifact, err return c.h.af.ReadArtifact(name, contract) } +func parseArtifactPathInput(input string) (name string, contract string, ok bool) { + clean := strings.TrimPrefix(path.Clean(strings.TrimSpace(input)), "./") + if !strings.HasSuffix(clean, ".json") { + return "", "", false + } + + for _, prefix := range []string{"forge-artifacts/", "out/"} { + clean = strings.TrimPrefix(clean, prefix) + } + + name, file := path.Split(clean) + name = strings.TrimSuffix(name, "/") + contract = strings.TrimSuffix(file, ".json") + if name == "" || contract == "" { + return "", "", false + } + return name, contract, true +} + // GetCode implements https://book.getfoundry.sh/cheatcodes/get-code func (c *CheatCodesPrecompile) GetCode(input string) ([]byte, error) { artifact, err := c.getArtifact(input) diff --git a/op-chain-ops/script/cheatcodes_external_test.go b/op-chain-ops/script/cheatcodes_external_test.go new file mode 100644 index 0000000000000..d0d0e5c7497e9 --- /dev/null +++ b/op-chain-ops/script/cheatcodes_external_test.go @@ -0,0 +1,41 @@ +package script + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" + "github.com/stretchr/testify/require" +) + +func TestParseArtifactPathInput(t *testing.T) { + t.Run("qualified path with forge artifacts prefix", func(t *testing.T) { + name, contract, ok := parseArtifactPathInput("forge-artifacts/ScriptExample.s.sol/ScriptExample.json") + require.True(t, ok) + require.Equal(t, "ScriptExample.s.sol", name) + require.Equal(t, "ScriptExample", contract) + }) + + t.Run("qualified path without prefix", func(t *testing.T) { + name, contract, ok := parseArtifactPathInput("ScriptExample.s.sol/FooBar.json") + require.True(t, ok) + require.Equal(t, "ScriptExample.s.sol", name) + require.Equal(t, "FooBar", contract) + }) + + t.Run("legacy contract identifier", func(t *testing.T) { + _, _, ok := parseArtifactPathInput("ScriptExample") + require.False(t, ok) + }) +} + +func TestGetCodeSupportsExplicitArtifactPath(t *testing.T) { + af := foundry.OpenArtifactsDir("./testdata/test-artifacts") + c := &CheatCodesPrecompile{h: &Host{af: af}} + + legacyCode, err := c.GetCode("ScriptExample.s.sol:ScriptExample") + require.NoError(t, err) + + pathCode, err := c.GetCode("forge-artifacts/ScriptExample.s.sol/ScriptExample.json") + require.NoError(t, err) + require.Equal(t, legacyCode, pathCode) +} diff --git a/op-devstack/README.md b/op-devstack/README.md index 39503c6723a6d..665f5fdddf1db 100644 --- a/op-devstack/README.md +++ b/op-devstack/README.md @@ -1,35 +1,25 @@ # op-devstack -Devstack provides a flexible test-frontend, optimized for integration and network acceptance testing. +Devstack provides typed test presets and DSL helpers for integration and acceptance testing. ## Overview ### Packages -- `devtest`: `T` (test-scope) and `P` (package-scope) test handles. -- `stack`: interfaces, IDs, common typing, core building blocks. -- `shim`: implementations to turn RPC clients / config objects into objects fitting the `stack`. -- `sysgo`: backend, hydrates a `stack.System` with `shim` objects that link to in-process Go services. -- `sysext`: backend, hydrates a `stack.System` with `shim` objects that link to a devnet-descriptor, like Kurtosis-managed services. -- `presets`: provides options that: - - configure an orchestrator (e.g. validate contents or add new contents) - - hydrate DSL test setups (e.g. turn a test handle in system with DSL utils) -- `dsl`: makes test-interactions with the `stack` more convenient and readable. +- `devtest`: test handles and lifecycle helpers. +- `stack`: shared interfaces and common types used across DSL and runtime code. +- `sysgo`: process-backed and in-process runtime constructors for local test systems. +- `presets`: typed constructors that build fresh systems per test and expose convenient frontends. +- `dsl`: test interaction helpers built on top of preset frontends. ```mermaid graph TD - shim --implements interfaces--> stack - sysgo --hydrates system with shims--> shim - sysext --hydrates system with shims--> shim + sysgo --builds runtimes--> presets + presets --wraps runtimes--> dsl + dsl --uses shared interfaces--> stack - dsl --interacts with system--> stack - - presets --uses orchestrator--> sysgo - presets --uses orchestrator--> sysext - presets --creates DSL around system--> dsl - - userMain -- creates test setup --> presets - userTest -- uses test setup --> presets + userMain --creates test setup--> presets + userTest --uses test setup--> presets ``` @@ -38,20 +28,16 @@ graph TD There are some common patterns in this package: - `stack.X` (interface): presents a component -- `stack.X`-`Kind`: to identify the typing of the component. -- `stack.X`-`ID`: to identify the component. May be a combination of a name and chain-ID, e.g. there may be a default `sequencer` on each L2 chain. -- `shim.X`-`Config`: to provide data when instantiating a default component. -- `shim.New`-`X`: creates a default component (generally a shim, using RPC to wrap around the actual service) to implement an interface. -- `stack.Extensible`-`X` (interface): extension-interface, used during setup to add sub-components to a thing. - E.g. register and additional batch-submitter to an `ExtensibleL2Network`. +- `stack.X` (interface): stable protocol for a component category. +- `sysgo`: builds explicit runtime graphs for local test systems. +- `presets`: translate runtime references into typed preset outputs and DSL helpers. +- Preset constructors build explicit component graphs directly and return those references to tests. ### Components Available components: - `System`: a collection of chains and other components -- `Superchain`: a definition of L2s that share protocol rules -- `Cluster`: a definition of an interop dependency set. - `L1Network`: a L1 chain configuration and registered L1 components - `L1ELNode`: L1 execution-layer node, like geth or reth. - `L1CLNode`: L1 consensus-layer node. A full beacon node or a mock consensus replacement for testing. @@ -61,7 +47,7 @@ Available components: - `L2Batcher`: op-batcher, or equivalent - `L2Proposer`: op-proposer, or equivalent - `L2Challenger`: op-challenger, or equivalent - - `L2MetricsDashboard`: runs prometheus and grafana instances if any component registers metrics endpoints with the `Orchestrator` + - `L2MetricsDashboard`: runs prometheus and grafana instances if any component registers metrics endpoints - `Supervisor`: op-supervisor service, or equivalent - `Faucet`: util to fund eth to test accounts @@ -77,48 +63,14 @@ Available components: This is a `Key` coupled to an `ELNode` (L1 or L2). - `Funder`: a wallet combined with a faucet and EL node, to create pre-funded `EOA`s -### `Orchestrator` interface - -The `Orchestrator` is an intentionally minimalist interface. -This is implemented by different external packages, to provide backend-specific functionality, -and focused on creating and maintaining shared resources for tests, - -The orchestrator holds on to its own package-level test-handle and logger. -This package-level handle is not like the regular go-test variant, but rather meant for non-test-scoped contexts, -e.g. when running in tools or when running as global orchestrator inside a package-level `TestMain` function. - -The global orchestrator is set up with: -```go -var MyTestSetup presets.TestSetup[*MyTestResources] - -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithMyExampleResources(&MyTestSetup)) -} - -func TestMain(t *testing.T) { - resources := MyTestSetup(devtest.NewT(t)) - // resources.Sequencer.DoThing() -} -``` - -The preferred orchestrator kind is configured with env-var `DEVSTACK_ORCHESTRATOR`: -- `sysgo` instantiates an in-process Go backend, ready to spawn services on demand. -- `sysext` instantiates a devnet-descriptor based backend, - and attaches to the network (selection is configured with `DEVNET_ENV_URL`). - This may be a local kurtosis network, or a descriptor of an external network. - - #### `presets`, `Option`, `TestSetup` -In addition to `DoMain`, the `presets` package provides options, generally named `With...`. +The `presets` package provides options, generally named `With...`. Each `Option` may apply changes to one or more of the setup stages. E.g. some options may customize contract deployments, others may customize nodes, and others may do post-validation of test setups. -The `stack` package provides helper functions to sequence options, -and compose options with different stages. - A `TestSetup` is a function that prepares the frontend specific to a test, and returns a typed output that the test then may use. @@ -138,10 +90,9 @@ and returns a typed output that the test then may use. - Action-test - Implementations should take `client.RPC` (or equivalent), not raw endpoints. Dialing is best done by the system composer, which can customize retries, in-process RPC pipes, lazy-dialing, etc. as needed. - The system composer is responsible for tracking raw RPC URLs. These are not portable, and would expose too much low-level detail in the System interface. -- The system compose is responsible for the lifecycle of each component. E.g. kurtosis will keep running, but an in-process system will couple to the test lifecycle and shutdown via `t.Cleanup`. -- Test gates do not have direct access to the `Orchestrator`, since tests may share an orchestrator and should not critically modify what the orchestrator does. -- Orchestrators are shared: assuming a relatively static external kurtosis devnet or live network, the default operation for a package is to run against a single shared system. -- Orchestrators are configured in the `TestMain`, with generic presets, such that the different backends can support the preset or not. +- The system composer is responsible for the lifecycle of each component; in-process systems couple to the test lifecycle and shut down via `t.Cleanup`. +- Test gates do not have direct access to the `Orchestrator`; tests interact through typed preset outputs. +- Tests are isolated by default: each test constructs its own fresh system target. - There are no "chains": the word "chain" is reserved for the protocol typing of the onchain / state-transition related logic. Instead, there are "networks", which include the offchain resources and attached services of a chain. - Do not abbreviate "client" to "cl", to avoid confusion with "consensus-layer". @@ -149,9 +100,7 @@ and returns a typed output that the test then may use. ### The following environment variables can be used to configure devstack: -- `DEVSTACK_ORCHESTRATOR`: Configures the preferred orchestrator kind (see Orchestrator interface section above). - `DEVSTACK_KEYS_SALT`: Seeds the keys generated with `NewHDWallet`. This is useful for "isolating" test runs, and might be needed to reproduce CI and/or acceptance test runs. It can be any string, including the empty one to use the "usual" devkeys. -- `DEVNET_ENV_URL`: Used when `DEVSTACK_ORCHESTRATOR=sysext` to specify the network descriptor URL. - `DEVNET_EXPECT_PRECONDITIONS_MET`: This can be set of force test failures when their pre-conditions are not met, which would otherwise result in them being skipped. This is helpful in particular for runs that do intend to run specific tests (as opposed to whatever is available). `op-acceptor` does set that variable, for example. ### Rust stack env vars: diff --git a/op-devstack/compat/compat.go b/op-devstack/compat/compat.go deleted file mode 100644 index 966fe2769f8bc..0000000000000 --- a/op-devstack/compat/compat.go +++ /dev/null @@ -1,11 +0,0 @@ -package compat - -type Type string - -const ( - SysGo Type = "sysgo" - Kurtosis Type = "kurtosis" - Persistent Type = "persistent" - - CompatErrorCode = 42 -) diff --git a/op-devstack/dsl/bridge.go b/op-devstack/dsl/bridge.go index 3b228b67a6e0f..b1bd6a88596d8 100644 --- a/op-devstack/dsl/bridge.go +++ b/op-devstack/dsl/bridge.go @@ -12,7 +12,6 @@ import ( gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" nodebindings "github.com/ethereum-optimism/optimism/op-node/bindings" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -66,7 +65,7 @@ func NewStandardBridge(t devtest.T, l2Network *L2Network, l1EL *L1ELNode) *Stand bindings.WithClient(l1Client), bindings.WithTo(l1PortalAddr), bindings.WithTest(t)) - l2Client := l2Network.inner.L2ELNode(match.FirstL2EL).EthClient() + l2Client := l2Network.PrimaryEL().EthClient() l2tol1MessagePasser := bindings.NewBindings[bindings.L2ToL1MessagePasser]( bindings.WithClient(l2Client), bindings.WithTo(predeploys.L2ToL1MessagePasserAddr), diff --git a/op-devstack/dsl/cluster.go b/op-devstack/dsl/cluster.go deleted file mode 100644 index ae0509192b664..0000000000000 --- a/op-devstack/dsl/cluster.go +++ /dev/null @@ -1,26 +0,0 @@ -package dsl - -import "github.com/ethereum-optimism/optimism/op-devstack/stack" - -// Cluster wraps a stack.Cluster interface for DSL operations -type Cluster struct { - commonImpl - inner stack.Cluster -} - -// NewCluster creates a new Cluster DSL wrapper -func NewCluster(inner stack.Cluster) *Cluster { - return &Cluster{ - commonImpl: commonFromT(inner.T()), - inner: inner, - } -} - -func (c *Cluster) String() string { - return c.inner.ID().String() -} - -// Escape returns the underlying stack.Cluster -func (c *Cluster) Escape() stack.Cluster { - return c.inner -} diff --git a/op-devstack/dsl/conductor.go b/op-devstack/dsl/conductor.go index 56298880f44f9..fb94fc17cfb77 100644 --- a/op-devstack/dsl/conductor.go +++ b/op-devstack/dsl/conductor.go @@ -32,7 +32,7 @@ func NewConductor(inner stack.Conductor) *Conductor { } func (c *Conductor) String() string { - return c.inner.ID().String() + return c.inner.Name() } func (c *Conductor) Escape() stack.Conductor { diff --git a/op-devstack/dsl/ecotone_fees.go b/op-devstack/dsl/ecotone_fees.go index 3323b42a0b0f5..898ac06787f5e 100644 --- a/op-devstack/dsl/ecotone_fees.go +++ b/op-devstack/dsl/ecotone_fees.go @@ -5,7 +5,6 @@ import ( "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -45,7 +44,7 @@ func NewEcotoneFees(t devtest.T, l2Network *L2Network) *EcotoneFees { } func (ef *EcotoneFees) ValidateTransaction(from *EOA, to *EOA, amount *big.Int) EcotoneFeesValidationResult { - client := ef.l2Network.inner.L2ELNode(match.FirstL2EL).EthClient() + client := ef.l2Network.PrimaryEL().EthClient() startBalance := from.GetBalance() vaultsBefore := ef.getVaultBalances(client) diff --git a/op-devstack/dsl/faucet.go b/op-devstack/dsl/faucet.go index 1dbcccecf29cd..6ace03b466827 100644 --- a/op-devstack/dsl/faucet.go +++ b/op-devstack/dsl/faucet.go @@ -26,7 +26,7 @@ func NewFaucet(inner stack.Faucet) *Faucet { } func (f *Faucet) String() string { - return f.inner.ID().String() + return f.inner.Name() } // Escape returns the underlying stack.Faucet diff --git a/op-devstack/dsl/fb_ws_client.go b/op-devstack/dsl/fb_ws_client.go deleted file mode 100644 index be6149888feba..0000000000000 --- a/op-devstack/dsl/fb_ws_client.go +++ /dev/null @@ -1,174 +0,0 @@ -package dsl - -import ( - "context" - "errors" - "fmt" - "io" - "net" - "net/http" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum/go-ethereum/log" - "github.com/gorilla/websocket" -) - -type FlashblocksWSClientSet []*FlashblocksWSClient - -func NewFlashblocksWSClientSet(inner []stack.FlashblocksWSClient) FlashblocksWSClientSet { - flashblocksWSClients := make([]*FlashblocksWSClient, len(inner)) - for i, c := range inner { - flashblocksWSClients[i] = NewFlashblocksWSClient(c) - } - return flashblocksWSClients -} - -type FlashblocksWSClient struct { - commonImpl - inner stack.FlashblocksWSClient -} - -func NewFlashblocksWSClient(inner stack.FlashblocksWSClient) *FlashblocksWSClient { - return &FlashblocksWSClient{ - commonImpl: commonFromT(inner.T()), - inner: inner, - } -} - -func (c *FlashblocksWSClient) String() string { - return c.inner.ID().String() -} - -func (c *FlashblocksWSClient) Escape() stack.FlashblocksWSClient { - return c.inner -} - -func (c *FlashblocksWSClient) ListenFor(ctx context.Context, logger log.Logger, duration time.Duration, output chan<- []byte, done chan<- struct{}) error { - wsURL := c.Escape().WsUrl() - headers := c.Escape().WsHeaders() - return websocketListenFor(ctx, logger, wsURL, headers, duration, output, done) -} - -func websocketListenFor(ctx context.Context, logger log.Logger, wsURL string, headers http.Header, duration time.Duration, output chan<- []byte, done chan<- struct{}) error { - defer close(done) - - listenCtx, cancel := context.WithTimeout(ctx, duration) - defer cancel() - - logger.Debug("Testing WebSocket connection to", "url", wsURL, "headers", headers) - - // Log the headers for debug purposes - if headers != nil { - for key, values := range headers { - logger.Debug("Header", "key", key, "values", values) - } - } else { - logger.Debug("No headers provided") - } - - dialer := &websocket.Dialer{ - HandshakeTimeout: 6 * time.Second, - } - - // Always close the response body to prevent resource leaks - logger.Debug("Attempting WebSocket connection", "url", wsURL) - conn, resp, err := dialer.DialContext(listenCtx, wsURL, headers) - if err != nil { - if listenCtx.Err() != nil { - logger.Info("Context completed before WebSocket connection established", "reason", listenCtx.Err()) - return nil - } - logger.Error("WebSocket connection failed", "url", wsURL, "error", err) - if resp != nil { - logger.Error("HTTP response details", "status", resp.Status, "headers", resp.Header) - resp.Body.Close() - } - return fmt.Errorf("failed to connect to Flashblocks WebSocket endpoint %s: %w", wsURL, err) - } - - if resp != nil { - defer resp.Body.Close() - } - defer conn.Close() - - logger.Info("WebSocket connection established successfully", "url", wsURL, "reading_stream_for", duration) - go func() { - <-listenCtx.Done() - _ = conn.Close() - }() - - messageCount := 0 - for { - select { - case <-listenCtx.Done(): - logListenStop(logger, listenCtx.Err(), messageCount) - return nil - default: - } - - _, message, err := conn.ReadMessage() - if err != nil { - if listenCtx.Err() != nil { - logListenStop(logger, listenCtx.Err(), messageCount) - return nil - } - - // Check for close errors first - these are normal termination - if websocket.IsCloseError(err, websocket.CloseNormalClosure, websocket.CloseGoingAway, websocket.CloseNoStatusReceived) { - logger.Info("WebSocket connection closed by peer", "total_messages", messageCount) - return nil - } - - // Check if this is an unexpected close error (any close error not handled above) - var closeErr *websocket.CloseError - if errors.As(err, &closeErr) { - logger.Info("WebSocket connection closed unexpectedly", "code", closeErr.Code, "text", closeErr.Text, "total_messages", messageCount) - return nil - } - - // Check for EOF errors - connection was closed - if errors.Is(err, io.EOF) || errors.Is(err, io.ErrUnexpectedEOF) { - logger.Info("WebSocket connection closed (EOF)", "total_messages", messageCount) - return nil - } - - // Check for use of closed network connection - if errors.Is(err, net.ErrClosed) { - logger.Info("WebSocket connection closed (network)", "total_messages", messageCount) - return nil - } - - // Timeout errors are recoverable - keep reading - var netErr net.Error - if errors.As(err, &netErr) && netErr.Timeout() { - continue - } - - logger.Error("Error reading WebSocket message", "error", err, "message_count", messageCount) - return fmt.Errorf("error reading WebSocket message: %w", err) - } - - messageCount++ - logger.Debug("Received WebSocket message", "message_count", messageCount, "message_length", len(message)) - - select { - case output <- message: - logger.Debug("Message sent to output channel", "message_count", messageCount) - case <-listenCtx.Done(): - logListenStop(logger, listenCtx.Err(), messageCount) - return nil - } - } -} - -func logListenStop(logger log.Logger, reason error, messageCount int) { - switch { - case errors.Is(reason, context.DeadlineExceeded): - logger.Info("WebSocket read duration reached", "total_messages", messageCount) - case errors.Is(reason, context.Canceled): - logger.Info("WebSocket listener canceled", "total_messages", messageCount) - default: - logger.Info("WebSocket listener stopping", "total_messages", messageCount) - } -} diff --git a/op-devstack/dsl/fjord_fees.go b/op-devstack/dsl/fjord_fees.go index e6011819f9579..5e83055568c4e 100644 --- a/op-devstack/dsl/fjord_fees.go +++ b/op-devstack/dsl/fjord_fees.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -48,7 +47,7 @@ func NewFjordFees(t devtest.T, l2Network *L2Network) *FjordFees { // ValidateTransaction validates the transaction and returns the validation result func (ff *FjordFees) ValidateTransaction(from *EOA, to *EOA, amount *big.Int) FjordFeesValidationResult { - client := ff.l2Network.inner.L2ELNode(match.FirstL2EL).EthClient() + client := ff.l2Network.PrimaryEL().EthClient() startBalance := from.GetBalance() vaultsBefore := ff.getVaultBalances(client) @@ -148,7 +147,7 @@ func (ff *FjordFees) validateFjordFeatures(receipt *types.Receipt, l1Fee *big.In ff.require.NotNil(receipt.L1Fee, "L1 fee should be present in Fjord") ff.require.True(l1Fee.Cmp(big.NewInt(0)) > 0, "L1 fee should be greater than 0 in Fjord") - client := ff.l2Network.inner.L2ELNode(match.FirstL2EL).EthClient() + client := ff.l2Network.PrimaryEL().EthClient() _, txs, err := client.InfoAndTxsByHash(ff.ctx, receipt.BlockHash) ff.require.NoError(err) diff --git a/op-devstack/dsl/funder.go b/op-devstack/dsl/funder.go index 525d716a97280..da40b463b888b 100644 --- a/op-devstack/dsl/funder.go +++ b/op-devstack/dsl/funder.go @@ -14,7 +14,7 @@ type Funder struct { } func NewFunder(w *HDWallet, f *Faucet, el ELNode) *Funder { - f.t.Require().Equal(f.inner.ID().ChainID(), el.ChainID(), "faucet and EL must be on same chain") + f.t.Require().Equal(f.inner.ChainID(), el.ChainID(), "faucet and EL must be on same chain") return &Funder{ commonImpl: commonFromT(w.t), wallet: w, diff --git a/op-devstack/dsl/l1_cl.go b/op-devstack/dsl/l1_cl.go index 2945caa6b1a80..13ccbbb4fc881 100644 --- a/op-devstack/dsl/l1_cl.go +++ b/op-devstack/dsl/l1_cl.go @@ -17,10 +17,22 @@ func NewL1CLNode(inner stack.L1CLNode) *L1CLNode { } func (cl *L1CLNode) String() string { - return cl.inner.ID().String() + return cl.inner.Name() } // Escape returns the underlying stack.L1CLNode func (cl *L1CLNode) Escape() stack.L1CLNode { return cl.inner } + +func (cl *L1CLNode) Start() { + lifecycle, ok := cl.inner.(stack.Lifecycle) + cl.require.Truef(ok, "L1CL node %s is not lifecycle-controllable", cl.inner.Name()) + lifecycle.Start() +} + +func (cl *L1CLNode) Stop() { + lifecycle, ok := cl.inner.(stack.Lifecycle) + cl.require.Truef(ok, "L1CL node %s is not lifecycle-controllable", cl.inner.Name()) + lifecycle.Stop() +} diff --git a/op-devstack/dsl/l1_el.go b/op-devstack/dsl/l1_el.go index 207c8fb2e5759..9cff65d45171d 100644 --- a/op-devstack/dsl/l1_el.go +++ b/op-devstack/dsl/l1_el.go @@ -27,7 +27,7 @@ func NewL1ELNode(inner stack.L1ELNode) *L1ELNode { } func (el *L1ELNode) String() string { - return el.inner.ID().String() + return el.inner.Name() } // Escape returns the underlying stack.L1ELNode @@ -87,20 +87,20 @@ func (el *L1ELNode) BlockRefByNumber(number uint64) eth.L1BlockRef { // Composable with other lambdas to wait in parallel func (el *L1ELNode) ReorgTriggeredFn(target eth.L1BlockRef, attempts int) CheckFunc { return func() error { - el.log.Info("expecting chain to reorg on block ref", "id", el.inner.ID(), "chain", el.inner.ID().ChainID(), "target", target) + el.log.Info("expecting chain to reorg on block ref", "name", el.inner.Name(), "chain", el.inner.ChainID(), "target", target) return retry.Do0(el.ctx, attempts, &retry.FixedStrategy{Dur: 7 * time.Second}, func() error { reorged, err := el.inner.EthClient().BlockRefByNumber(el.ctx, target.Number) if err != nil { if strings.Contains(err.Error(), "not found") { // reorg is happening wait a bit longer - el.log.Info("chain still hasn't been reorged", "chain", el.inner.ID().ChainID(), "error", err) + el.log.Info("chain still hasn't been reorged", "chain", el.inner.ChainID(), "error", err) return err } return err } if target.Hash == reorged.Hash { // want not equal - el.log.Info("chain still hasn't been reorged", "chain", el.inner.ID().ChainID(), "ref", reorged) + el.log.Info("chain still hasn't been reorged", "chain", el.inner.ChainID(), "ref", reorged) return fmt.Errorf("expected head to reorg %s, but got %s", target, reorged) } @@ -108,8 +108,8 @@ func (el *L1ELNode) ReorgTriggeredFn(target eth.L1BlockRef, attempts int) CheckF return fmt.Errorf("expected parent of target to be the same as the parent of the reorged head, but they are different") } - el.log.Info("reorg on divergence block", "chain", el.inner.ID().ChainID(), "pre_blockref", target) - el.log.Info("reorg on divergence block", "chain", el.inner.ID().ChainID(), "post_blockref", reorged) + el.log.Info("reorg on divergence block", "chain", el.inner.ChainID(), "pre_blockref", target) + el.log.Info("reorg on divergence block", "chain", el.inner.ChainID(), "post_blockref", reorged) return nil }) diff --git a/op-devstack/dsl/l1_network.go b/op-devstack/dsl/l1_network.go index c3815ad626278..09085238985d4 100644 --- a/op-devstack/dsl/l1_network.go +++ b/op-devstack/dsl/l1_network.go @@ -5,26 +5,29 @@ import ( "github.com/davecgh/go-spew/spew" "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/eth" ) // L1Network wraps a stack.L1Network interface for DSL operations type L1Network struct { commonImpl - inner stack.L1Network + inner stack.L1Network + primaryEL *L1ELNode + primaryCL *L1CLNode } // NewL1Network creates a new L1Network DSL wrapper -func NewL1Network(inner stack.L1Network) *L1Network { +func NewL1Network(inner stack.L1Network, primaryEL *L1ELNode, primaryCL *L1CLNode) *L1Network { return &L1Network{ commonImpl: commonFromT(inner.T()), inner: inner, + primaryEL: primaryEL, + primaryCL: primaryCL, } } func (n *L1Network) String() string { - return n.inner.ID().String() + return n.inner.Name() } func (n *L1Network) ChainID() eth.ChainID { @@ -36,13 +39,23 @@ func (n *L1Network) Escape() stack.L1Network { return n.inner } +func (n *L1Network) PrimaryEL() *L1ELNode { + n.require.NotNil(n.primaryEL, "l1 network %s is missing a primary EL node", n.String()) + return n.primaryEL +} + +func (n *L1Network) PrimaryCL() *L1CLNode { + n.require.NotNil(n.primaryCL, "l1 network %s is missing a primary CL node", n.String()) + return n.primaryCL +} + func (n *L1Network) WaitForBlock() eth.BlockRef { - return NewL1ELNode(n.inner.L1ELNode(match.FirstL1EL)).WaitForBlock() + return n.PrimaryEL().WaitForBlock() } // PrintChain is used for testing/debugging, it prints the blockchain hashes and parent hashes to logs, which is useful when developing reorg tests func (n *L1Network) PrintChain() { - l1_el := n.inner.L1ELNode(match.FirstL1EL) + l1_el := n.PrimaryEL().Escape() unsafeHeadRef, err := l1_el.EthClient().InfoByLabel(n.ctx, "latest") n.require.NoError(err, "Expected to get latest block from L1 execution client") @@ -60,9 +73,9 @@ func (n *L1Network) PrintChain() { } func (n *L1Network) WaitForFinalization() eth.BlockRef { - return NewL1ELNode(n.inner.L1ELNode(match.FirstL1EL)).WaitForFinalization() + return n.PrimaryEL().WaitForFinalization() } func (n *L1Network) WaitForOnline() { - NewL1ELNode(n.inner.L1ELNode(match.FirstL1EL)).WaitForOnline() + n.PrimaryEL().WaitForOnline() } diff --git a/op-devstack/dsl/l2_batcher.go b/op-devstack/dsl/l2_batcher.go index 0a24b6166dcae..62b292b8ad6f3 100644 --- a/op-devstack/dsl/l2_batcher.go +++ b/op-devstack/dsl/l2_batcher.go @@ -25,7 +25,7 @@ func NewL2Batcher(inner stack.L2Batcher) *L2Batcher { } func (b *L2Batcher) String() string { - return b.inner.ID().String() + return b.inner.Name() } // Escape returns the underlying stack.L2Batcher @@ -45,12 +45,12 @@ func (b *L2Batcher) Stop() { } return err }) - require.NoError(b.t, err, fmt.Sprintf("Expected to be able to call StopBatcher API on chain %s, but got error", b.inner.ID().ChainID())) + require.NoError(b.t, err, fmt.Sprintf("Expected to be able to call StopBatcher API on chain %s, but got error", b.inner.ChainID())) } func (b *L2Batcher) Start() { err := retry.Do0(b.ctx, 3, retry.Exponential(), func() error { return b.inner.ActivityAPI().StartBatcher(b.ctx) }) - require.NoError(b.t, err, fmt.Sprintf("Expected to be able to call StartBatcher API on chain %s, but got error", b.inner.ID().ChainID())) + require.NoError(b.t, err, fmt.Sprintf("Expected to be able to call StartBatcher API on chain %s, but got error", b.inner.ChainID())) } diff --git a/op-devstack/dsl/l2_challenger.go b/op-devstack/dsl/l2_challenger.go index bc3d6bd9e38f6..cafa071cedd26 100644 --- a/op-devstack/dsl/l2_challenger.go +++ b/op-devstack/dsl/l2_challenger.go @@ -17,7 +17,7 @@ func NewL2Challenger(inner stack.L2Challenger) *L2Challenger { } func (c *L2Challenger) String() string { - return c.inner.ID().String() + return c.inner.Name() } // Escape returns the underlying stack.L2Challenger diff --git a/op-devstack/dsl/l2_cl.go b/op-devstack/dsl/l2_cl.go index b4b6b4ee77023..83c9586623530 100644 --- a/op-devstack/dsl/l2_cl.go +++ b/op-devstack/dsl/l2_cl.go @@ -19,25 +19,25 @@ import ( // L2CLNode wraps a stack.L2CLNode interface for DSL operations type L2CLNode struct { commonImpl - inner stack.L2CLNode - control stack.ControlPlane + inner stack.L2CLNode + managedPeers map[string]*L2CLNode } // NewL2CLNode creates a new L2CLNode DSL wrapper -func NewL2CLNode(inner stack.L2CLNode, control stack.ControlPlane) *L2CLNode { +func NewL2CLNode(inner stack.L2CLNode) *L2CLNode { return &L2CLNode{ - commonImpl: commonFromT(inner.T()), - inner: inner, - control: control, + commonImpl: commonFromT(inner.T()), + inner: inner, + managedPeers: make(map[string]*L2CLNode), } } -func (cl *L2CLNode) ID() stack.ComponentID { - return cl.inner.ID() +func (cl *L2CLNode) Name() string { + return cl.inner.Name() } func (cl *L2CLNode) String() string { - return cl.inner.ID().String() + return cl.inner.Name() } // Escape returns the underlying stack.L2CLNode @@ -50,11 +50,28 @@ func (cl *L2CLNode) SafeL2BlockRef() eth.L2BlockRef { } func (cl *L2CLNode) Start() { - cl.control.L2CLNodeState(cl.inner.ID(), stack.Start) + lifecycle, ok := cl.inner.(stack.Lifecycle) + cl.require.Truef(ok, "L2CL node %s is not lifecycle-controllable", cl.inner.Name()) + lifecycle.Start() + cl.restoreManagedPeers() } func (cl *L2CLNode) Stop() { - cl.control.L2CLNodeState(cl.inner.ID(), stack.Stop) + lifecycle, ok := cl.inner.(stack.Lifecycle) + cl.require.Truef(ok, "L2CL node %s is not lifecycle-controllable", cl.inner.Name()) + lifecycle.Stop() +} + +func (cl *L2CLNode) ManagePeer(peer *L2CLNode) { + cl.managedPeers[peer.Name()] = peer + peer.managedPeers[cl.Name()] = cl +} + +func (cl *L2CLNode) restoreManagedPeers() { + for _, peer := range cl.managedPeers { + cl.connectPeerRaw(peer) + peer.connectPeerRaw(cl) + } } func (cl *L2CLNode) StartSequencer() { @@ -125,7 +142,7 @@ func (cl *L2CLNode) HeadBlockRef(lvl types.SafetyLevel) eth.L2BlockRef { } func (cl *L2CLNode) ChainID() eth.ChainID { - return cl.inner.ID().ChainID() + return cl.inner.ChainID() } func (cl *L2CLNode) AwaitMinL1Processed(minL1 uint64) { @@ -144,7 +161,7 @@ func (cl *L2CLNode) AdvancedFn(lvl types.SafetyLevel, delta uint64, attempts int return func() error { initial := cl.HeadBlockRef(lvl) target := initial.Number + delta - cl.log.Info("Expecting chain to advance", "id", cl.inner.ID(), "chain", cl.ChainID(), "label", lvl, "delta", delta) + cl.log.Info("Expecting chain to advance", "name", cl.inner.Name(), "chain", cl.ChainID(), "label", lvl, "delta", delta) return cl.ReachedFn(lvl, target, attempts)() } } @@ -152,7 +169,7 @@ func (cl *L2CLNode) AdvancedFn(lvl types.SafetyLevel, delta uint64, attempts int func (cl *L2CLNode) NotAdvancedFn(lvl types.SafetyLevel, attempts int) CheckFunc { return func() error { initial := cl.HeadBlockRef(lvl) - logger := cl.log.With("id", cl.inner.ID(), "chain", cl.ChainID(), "label", lvl, "target", initial.Number) + logger := cl.log.With("name", cl.inner.Name(), "chain", cl.ChainID(), "label", lvl, "target", initial.Number) logger.Info("Expecting chain not to advance") for range attempts { time.Sleep(2 * time.Second) @@ -191,7 +208,7 @@ func (cl *L2CLNode) WaitForStall(lvl types.SafetyLevel) { // Composable with other lambdas to wait in parallel func (cl *L2CLNode) ReachedFn(lvl types.SafetyLevel, target uint64, attempts int) CheckFunc { return func() error { - logger := cl.log.With("id", cl.inner.ID(), "chain", cl.ChainID(), "label", lvl, "target", target) + logger := cl.log.With("name", cl.inner.Name(), "chain", cl.ChainID(), "label", lvl, "target", target) logger.Info("Expecting chain to reach") return retry.Do0(cl.ctx, attempts, &retry.FixedStrategy{Dur: 2 * time.Second}, func() error { @@ -234,7 +251,7 @@ func (cl *L2CLNode) RewindedFn(lvl types.SafetyLevel, delta uint64, attempts int initial := cl.HeadBlockRef(lvl) cl.require.GreaterOrEqual(initial.Number, delta, "cannot rewind before genesis") target := initial.Number - delta - logger := cl.log.With("id", cl.inner.ID(), "chain", cl.ChainID(), "label", lvl) + logger := cl.log.With("name", cl.inner.Name(), "chain", cl.ChainID(), "label", lvl) logger.Info("Expecting chain to rewind", "target", target, "delta", delta) // check rewind more aggressively, in shorter interval return retry.Do0(cl.ctx, attempts, &retry.FixedStrategy{Dur: 250 * time.Millisecond}, @@ -284,7 +301,7 @@ func (cl *L2CLNode) Rewinded(lvl types.SafetyLevel, delta uint64, attempts int) // ChainSyncStatus satisfies that the L2CLNode can provide sync status per chain func (cl *L2CLNode) ChainSyncStatus(chainID eth.ChainID, lvl types.SafetyLevel) eth.BlockID { - cl.require.Equal(chainID, cl.inner.ID().ChainID(), "chain ID mismatch") + cl.require.Equal(chainID, cl.inner.ChainID(), "chain ID mismatch") return cl.HeadBlockRef(lvl).ID() } @@ -338,6 +355,12 @@ func (cl *L2CLNode) Peers() *apis.PeerDump { } func (cl *L2CLNode) DisconnectPeer(peer *L2CLNode) { + delete(cl.managedPeers, peer.Name()) + delete(peer.managedPeers, cl.Name()) + cl.disconnectPeerRaw(peer) +} + +func (cl *L2CLNode) disconnectPeerRaw(peer *L2CLNode) { peerInfo := peer.PeerInfo() err := retry.Do0(cl.ctx, 3, retry.Exponential(), func() error { return cl.inner.P2PAPI().DisconnectPeer(cl.ctx, peerInfo.PeerID) @@ -346,6 +369,12 @@ func (cl *L2CLNode) DisconnectPeer(peer *L2CLNode) { } func (cl *L2CLNode) ConnectPeer(peer *L2CLNode) { + cl.managedPeers[peer.Name()] = peer + peer.managedPeers[cl.Name()] = cl + cl.connectPeerRaw(peer) +} + +func (cl *L2CLNode) connectPeerRaw(peer *L2CLNode) { peerInfo := peer.PeerInfo() cl.require.NotZero(len(peerInfo.Addresses), "failed to get peer address") // graceful backoff for p2p connection, to avoid dial backoff or connection refused error diff --git a/op-devstack/dsl/l2_el.go b/op-devstack/dsl/l2_el.go index e26a7e93bb6e7..a2d2b5f975159 100644 --- a/op-devstack/dsl/l2_el.go +++ b/op-devstack/dsl/l2_el.go @@ -11,6 +11,7 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/retry" suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" @@ -23,21 +24,19 @@ var emptyHash = common.Hash{} // L2ELNode wraps a stack.L2ELNode interface for DSL operations type L2ELNode struct { *elNode - inner stack.L2ELNode - control stack.ControlPlane + inner stack.L2ELNode } // NewL2ELNode creates a new L2ELNode DSL wrapper -func NewL2ELNode(inner stack.L2ELNode, control stack.ControlPlane) *L2ELNode { +func NewL2ELNode(inner stack.L2ELNode) *L2ELNode { return &L2ELNode{ - elNode: newELNode(commonFromT(inner.T()), inner), - inner: inner, - control: control, + elNode: newELNode(commonFromT(inner.T()), inner), + inner: inner, } } func (el *L2ELNode) String() string { - return el.inner.ID().String() + return el.inner.Name() } // Escape returns the underlying stack.L2ELNode @@ -45,8 +44,8 @@ func (el *L2ELNode) Escape() stack.L2ELNode { return el.inner } -func (el *L2ELNode) ID() stack.ComponentID { - return el.inner.ID() +func (el *L2ELNode) EthClient() apis.EthClient { + return el.inner.EthClient() } func (el *L2ELNode) BlockRefByLabel(label eth.BlockLabel) eth.L2BlockRef { @@ -103,7 +102,7 @@ func (el *L2ELNode) NotAdvancedFn(label eth.BlockLabel, attempts int) CheckFunc func (el *L2ELNode) ReachedFn(label eth.BlockLabel, target uint64, attempts int) CheckFunc { return func() error { - logger := el.log.With("id", el.inner.ID(), "chain", el.ChainID(), "label", label, "target", target) + logger := el.log.With("name", el.inner.Name(), "chain", el.ChainID(), "label", label, "target", target) logger.Info("Expecting L2EL to reach") return retry.Do0(el.ctx, attempts, &retry.FixedStrategy{Dur: 2 * time.Second}, func() error { @@ -130,20 +129,20 @@ func (el *L2ELNode) BlockRefByNumber(num uint64) eth.L2BlockRef { // Composable with other lambdas to wait in parallel func (el *L2ELNode) ReorgTriggeredFn(target eth.L2BlockRef, attempts int) CheckFunc { return func() error { - el.log.Info("expecting chain to reorg on block ref", "id", el.inner.ID(), "chain", el.inner.ID().ChainID(), "target", target) + el.log.Info("expecting chain to reorg on block ref", "name", el.inner.Name(), "chain", el.inner.ChainID(), "target", target) return retry.Do0(el.ctx, attempts, &retry.FixedStrategy{Dur: 2 * time.Second}, func() error { reorged, err := el.inner.EthClient().BlockRefByNumber(el.ctx, target.Number) if err != nil { if strings.Contains(err.Error(), "not found") { // reorg is happening wait a bit longer - el.log.Info("chain still hasn't been reorged", "chain", el.inner.ID().ChainID(), "error", err) + el.log.Info("chain still hasn't been reorged", "chain", el.inner.ChainID(), "error", err) return err } return err } if target.Hash == reorged.Hash { // want not equal - el.log.Info("chain still hasn't been reorged", "chain", el.inner.ID().ChainID(), "ref", reorged) + el.log.Info("chain still hasn't been reorged", "chain", el.inner.ChainID(), "ref", reorged) return fmt.Errorf("expected head to reorg %s, but got %s", target, reorged) } @@ -151,8 +150,8 @@ func (el *L2ELNode) ReorgTriggeredFn(target eth.L2BlockRef, attempts int) CheckF return fmt.Errorf("expected parent of target to be the same as the parent of the reorged head, but they are different") } - el.log.Info("reorg on divergence block", "chain", el.inner.ID().ChainID(), "pre_blockref", target) - el.log.Info("reorg on divergence block", "chain", el.inner.ID().ChainID(), "post_blockref", reorged) + el.log.Info("reorg on divergence block", "chain", el.inner.ChainID(), "pre_blockref", target) + el.log.Info("reorg on divergence block", "chain", el.inner.ChainID(), "post_blockref", reorged) return nil }) @@ -186,7 +185,7 @@ func (el *L2ELNode) TransactionTimeout() time.Duration { // L1OriginReachedFn returns a lambda that waits for the L1 origin to reach the target block number. func (el *L2ELNode) L1OriginReachedFn(label eth.BlockLabel, l1OriginTarget uint64, attempts int) CheckFunc { return func() error { - logger := el.log.With("id", el.inner.ID(), "chain", el.ChainID(), "label", label, "l1OriginTarget", l1OriginTarget) + logger := el.log.With("name", el.inner.Name(), "chain", el.ChainID(), "label", label, "l1OriginTarget", l1OriginTarget) logger.Info("Expecting L2EL to reach L1 origin") return retry.Do0(el.ctx, attempts, &retry.FixedStrategy{Dur: 1 * time.Second}, func() error { @@ -236,12 +235,16 @@ func (el *L2ELNode) VerifyWithdrawalHashChangedIn(blockHash common.Hash) { } func (el *L2ELNode) Stop() { - el.log.Info("Stopping", "id", el.inner.ID()) - el.control.L2ELNodeState(el.inner.ID(), stack.Stop) + el.log.Info("Stopping", "name", el.inner.Name()) + lifecycle, ok := el.inner.(stack.Lifecycle) + el.require.Truef(ok, "L2EL node %s is not lifecycle-controllable", el.inner.Name()) + lifecycle.Stop() } func (el *L2ELNode) Start() { - el.control.L2ELNodeState(el.inner.ID(), stack.Start) + lifecycle, ok := el.inner.(stack.Lifecycle) + el.require.Truef(ok, "L2EL node %s is not lifecycle-controllable", el.inner.Name()) + lifecycle.Start() } func (el *L2ELNode) PeerWith(peer *L2ELNode) { @@ -331,7 +334,7 @@ func (el *L2ELNode) FinishedELSync(refNode *L2ELNode, unsafe, safe, finalized ui } func (el *L2ELNode) ChainSyncStatus(chainID eth.ChainID, lvl suptypes.SafetyLevel) eth.BlockID { - el.require.Equal(chainID, el.inner.ID().ChainID(), "chain ID mismatch") + el.require.Equal(chainID, el.inner.ChainID(), "chain ID mismatch") var blockRef eth.L2BlockRef switch lvl { case suptypes.Finalized: @@ -376,7 +379,7 @@ func (el *L2ELNode) MatchedUnsafe(refNode SyncStatusProvider, attempts int) { // WaitForPendingNonceMatchFn returns a lambda that waits for the pending nonce of an account to match the provided reference nonce func (el *L2ELNode) WaitForPendingNonceMatchFn(account common.Address, nonce uint64, attempts int, duration time.Duration) CheckFunc { return func() error { - logger := el.log.With("id", el.inner.ID(), "account", account) + logger := el.log.With("name", el.inner.Name(), "account", account) logger.Debug("Expecting pending nonce to match with reference nonce", "nonce", nonce) return retry.Do0(el.ctx, attempts, &retry.FixedStrategy{Dur: duration}, func() error { diff --git a/op-devstack/dsl/l2_network.go b/op-devstack/dsl/l2_network.go index 9a80d2bfe2ba9..bed0d91f790de 100644 --- a/op-devstack/dsl/l2_network.go +++ b/op-devstack/dsl/l2_network.go @@ -11,7 +11,6 @@ import ( "github.com/ethereum-optimism/optimism/op-core/forks" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -24,21 +23,35 @@ import ( // L2Network wraps a stack.L2Network interface for DSL operations type L2Network struct { commonImpl - inner stack.L2Network - control stack.ControlPlane + inner stack.L2Network + primaryEL *L2ELNode + primaryCL *L2CLNode + primaryL1 *L1ELNode + archiveEL *L2ELNode + publicRPC *L2ELNode } // NewL2Network creates a new L2Network DSL wrapper -func NewL2Network(inner stack.L2Network, control stack.ControlPlane) *L2Network { +func NewL2Network(inner stack.L2Network, primaryEL *L2ELNode, primaryCL *L2CLNode, primaryL1 *L1ELNode, archiveEL *L2ELNode, publicRPC *L2ELNode) *L2Network { + if archiveEL == nil { + archiveEL = primaryEL + } + if publicRPC == nil { + publicRPC = primaryEL + } return &L2Network{ commonImpl: commonFromT(inner.T()), inner: inner, - control: control, + primaryEL: primaryEL, + primaryCL: primaryCL, + primaryL1: primaryL1, + archiveEL: archiveEL, + publicRPC: publicRPC, } } func (n *L2Network) String() string { - return n.inner.ID().String() + return n.inner.Name() } func (n *L2Network) ChainID() eth.ChainID { @@ -55,18 +68,38 @@ func (n *L2Network) Escape() stack.L2Network { return n.inner } +func (n *L2Network) PrimaryEL() *L2ELNode { + n.require.NotNil(n.primaryEL, "l2 network %s is missing a primary EL node", n.String()) + return n.primaryEL +} + +func (n *L2Network) ArchiveEL() *L2ELNode { + n.require.NotNil(n.archiveEL, "l2 network %s is missing an archive EL node", n.String()) + return n.archiveEL +} + +func (n *L2Network) PrimaryCL() *L2CLNode { + n.require.NotNil(n.primaryCL, "l2 network %s is missing a primary CL node", n.String()) + return n.primaryCL +} + +func (n *L2Network) PrimaryL1EL() *L1ELNode { + n.require.NotNil(n.primaryL1, "l2 network %s is missing a primary L1 EL node", n.String()) + return n.primaryL1 +} + func (n *L2Network) L2ELNodes() []*L2ELNode { innerNodes := n.inner.L2ELNodes() nodes := make([]*L2ELNode, len(innerNodes)) for i, inner := range innerNodes { - nodes[i] = NewL2ELNode(inner, n.control) + nodes[i] = NewL2ELNode(inner) } return nodes } func (n *L2Network) CatchUpTo(o *L2Network) { - this := n.inner.L2ELNode(match.FirstL2EL) - other := o.inner.L2ELNode(match.FirstL2EL) + this := n.PrimaryEL().Escape() + other := o.PrimaryEL().Escape() err := wait.For(n.ctx, 5*time.Second, func() (bool, error) { a, err := this.EthClient().InfoByLabel(n.ctx, "latest") @@ -91,26 +124,19 @@ func (n *L2Network) CatchUpTo(o *L2Network) { } func (n *L2Network) WaitForBlock() eth.BlockRef { - return NewL2ELNode(n.inner.L2ELNode(match.FirstL2EL), n.control).WaitForBlock() + return n.PrimaryEL().WaitForBlock() } func (n *L2Network) PublicRPC() *L2ELNode { - if proxyds := match.Proxyd.Match(n.Escape().L2ELNodes()); len(proxyds) > 0 { - n.log.Info("PublicRPC - Using proxyd", "network", n.String()) - return NewL2ELNode(proxyds[0], n.control) - } - - n.log.Info("PublicRPC - Using fallback instead of proxyd", "network", n.String()) - // Fallback since sysgo doesn't have proxyd support at the moment, and may never get it. - return NewL2ELNode(n.inner.L2ELNode(match.FirstL2EL), n.control) + n.require.NotNil(n.publicRPC, "l2 network %s is missing a public RPC node", n.String()) + return n.publicRPC } // PrintChain is used for testing/debugging, it prints the blockchain hashes and parent hashes to logs, which is useful when developing reorg tests func (n *L2Network) PrintChain() { - l2_el := n.inner.L2ELNode(match.FirstL2EL) - l2_cl := n.inner.L2CLNode(match.FirstL2CL) - - l1_el := n.inner.L1().L1ELNode(match.FirstL1EL) + l2_el := n.PrimaryEL().Escape() + l2_cl := n.PrimaryCL().Escape() + l1_el := n.PrimaryL1EL().Escape() biAddr := n.inner.RollupConfig().BatchInboxAddress dgfAddr := n.inner.Deployment().DisputeGameFactoryProxyAddr() @@ -179,7 +205,7 @@ func (n *L2Network) PrintChain() { } func (n *L2Network) unsafeHeadRef() eth.L2BlockRef { - l2_el := n.inner.L2ELNode(match.FirstL2EL) + l2_el := n.PrimaryEL().Escape() unsafeHead, err := l2_el.EthClient().InfoByLabel(n.ctx, eth.Unsafe) n.require.NoError(err, "Expected to get latest block from L2 execution client") @@ -195,16 +221,14 @@ func (n *L2Network) IsActivated(timestamp uint64) bool { blockNum, err := n.Escape().RollupConfig().TargetBlockNumber(timestamp) n.require.NoError(err) - el := n.Escape().L2ELNode(match.FirstL2EL) - head, err := el.EthClient().BlockRefByLabel(n.ctx, eth.Unsafe) + head, err := n.PrimaryEL().EthClient().BlockRefByLabel(n.ctx, eth.Unsafe) n.require.NoError(err) return head.Number >= blockNum } func (n *L2Network) IsForkActive(fork forks.Name) bool { - el := NewL2ELNode(n.inner.L2ELNode(match.FirstL2EL), n.control) - timestamp := el.BlockRefByLabel(eth.Unsafe).Time + timestamp := n.PrimaryEL().BlockRefByLabel(eth.Unsafe).Time return n.IsForkActiveAt(fork, timestamp) } @@ -221,8 +245,7 @@ func (n *L2Network) LatestBlockBeforeTimestamp(t devtest.T, timestamp uint64) et blockNum, err := n.Escape().RollupConfig().TargetBlockNumber(timestamp) require.NoError(err) - el := n.Escape().L2ELNode(match.FirstL2EL) - head, err := el.EthClient().BlockRefByLabel(t.Ctx(), eth.Unsafe) + head, err := n.PrimaryEL().EthClient().BlockRefByLabel(t.Ctx(), eth.Unsafe) require.NoError(err) t.Logger().Info("Preparing", @@ -234,7 +257,7 @@ func (n *L2Network) LatestBlockBeforeTimestamp(t devtest.T, timestamp uint64) et return head } else { t.Logger().Info("Reached block already, proceeding with last block before timestamp") - v, err := el.EthClient().BlockRefByNumber(t.Ctx(), blockNum-1) + v, err := n.PrimaryEL().EthClient().BlockRefByNumber(t.Ctx(), blockNum-1) require.NoError(err) return v } @@ -244,20 +267,18 @@ func (n *L2Network) LatestBlockBeforeTimestamp(t devtest.T, timestamp uint64) et func (n *L2Network) AwaitActivation(t devtest.T, forkName rollup.ForkName) eth.BlockID { require := t.Require() - el := n.Escape().L2ELNode(match.FirstL2EL) - rollupCfg := n.Escape().RollupConfig() maybeActivationTime := rollupCfg.ActivationTime(forkName) require.NotNil(maybeActivationTime, "Required fork is not scheduled for activation") activationTime := *maybeActivationTime if activationTime == 0 { - block, err := el.EthClient().BlockRefByNumber(t.Ctx(), 0) + block, err := n.PrimaryEL().EthClient().BlockRefByNumber(t.Ctx(), 0) require.NoError(err, "Fork activated at genesis, but failed to get genesis block") return block.ID() } blockNum, err := rollupCfg.TargetBlockNumber(activationTime) require.NoError(err) - activationBlock := eth.ToBlockID(NewL2ELNode(el, n.control).WaitForBlockNumber(blockNum)) + activationBlock := eth.ToBlockID(n.PrimaryEL().WaitForBlockNumber(blockNum)) t.Logger().Info("Activation block", "block", activationBlock) return activationBlock @@ -282,7 +303,7 @@ func (n *L2Network) DeriveData(blocks int) (channels []derive.ChannelID, channel rollupCfg := n.inner.RollupConfig() batchInboxAddr := rollupCfg.BatchInboxAddress - l1EC := n.inner.L1().L1ELNode(match.FirstL1EL).EthClient() + l1EC := n.PrimaryL1EL().EthClient() // Get current L1 block number before starting to monitor startBlockRef, err := l1EC.BlockRefByLabel(ctx, eth.Unsafe) @@ -293,7 +314,7 @@ func (n *L2Network) DeriveData(blocks int) (channels []derive.ChannelID, channel // Monitor L1 blocks for batch transactions for range blocks { - NewL1ELNode(n.inner.L1().L1ELNode(match.FirstL1EL)).WaitForBlock() + n.PrimaryL1EL().WaitForBlock() // Get current block number currentBlockRef, err := l1EC.BlockRefByLabel(ctx, eth.Unsafe) diff --git a/op-devstack/dsl/l2_op_rbuilder.go b/op-devstack/dsl/l2_op_rbuilder.go index fef26ccfde583..4dc222e612715 100644 --- a/op-devstack/dsl/l2_op_rbuilder.go +++ b/op-devstack/dsl/l2_op_rbuilder.go @@ -8,10 +8,10 @@ import ( type OPRBuilderNodeSet []*OPRBuilderNode -func NewOPRBuilderNodeSet(inner []stack.OPRBuilderNode, control stack.ControlPlane) OPRBuilderNodeSet { +func NewOPRBuilderNodeSet(inner []stack.OPRBuilderNode) OPRBuilderNodeSet { oprbuilders := make([]*OPRBuilderNode, len(inner)) for i, c := range inner { - oprbuilders[i] = NewOPRBuilderNode(c, control) + oprbuilders[i] = NewOPRBuilderNode(c) } return oprbuilders } @@ -20,20 +20,18 @@ type OPRBuilderNode struct { commonImpl inner stack.OPRBuilderNode wsClient *opclient.WSClient - control stack.ControlPlane } -func NewOPRBuilderNode(inner stack.OPRBuilderNode, control stack.ControlPlane) *OPRBuilderNode { +func NewOPRBuilderNode(inner stack.OPRBuilderNode) *OPRBuilderNode { return &OPRBuilderNode{ commonImpl: commonFromT(inner.T()), inner: inner, wsClient: inner.FlashblocksClient(), - control: control, } } func (c *OPRBuilderNode) String() string { - return c.inner.ID().String() + return c.inner.Name() } func (c *OPRBuilderNode) Escape() stack.OPRBuilderNode { @@ -45,10 +43,14 @@ func (c *OPRBuilderNode) FlashblocksClient() *opclient.WSClient { } func (el *OPRBuilderNode) Stop() { - el.log.Info("Stopping", "id", el.inner.ID()) - el.control.OPRBuilderNodeState(el.inner.ID(), stack.Stop) + el.log.Info("Stopping", "name", el.inner.Name()) + lifecycle, ok := el.inner.(stack.Lifecycle) + el.require.Truef(ok, "op-rbuilder node %s is not lifecycle-controllable", el.inner.Name()) + lifecycle.Stop() } func (el *OPRBuilderNode) Start() { - el.control.OPRBuilderNodeState(el.inner.ID(), stack.Start) + lifecycle, ok := el.inner.(stack.Lifecycle) + el.require.Truef(ok, "op-rbuilder node %s is not lifecycle-controllable", el.inner.Name()) + lifecycle.Start() } diff --git a/op-devstack/dsl/l2_proposer.go b/op-devstack/dsl/l2_proposer.go index 655c07cf7ebef..679659db79470 100644 --- a/op-devstack/dsl/l2_proposer.go +++ b/op-devstack/dsl/l2_proposer.go @@ -17,7 +17,7 @@ func NewL2Proposer(inner stack.L2Proposer) *L2Proposer { } func (p *L2Proposer) String() string { - return p.inner.ID().String() + return p.inner.Name() } // Escape returns the underlying stack.L2Proposer diff --git a/op-devstack/dsl/multi_client.go b/op-devstack/dsl/multi_client.go index f9e2e9c2aa56e..0bae50cc0bc4e 100644 --- a/op-devstack/dsl/multi_client.go +++ b/op-devstack/dsl/multi_client.go @@ -47,7 +47,7 @@ func getEthClientsFromL2Network(network *L2Network) ([]HeaderProvider, error) { hps := make([]HeaderProvider, 0, len(stackNetwork.L2ELNodes())) for _, n := range stackNetwork.L2ELNodes() { ethClient := n.L2EthClient() - if !regexp.MustCompile(`snapsync-\d+$`).MatchString(n.ID().Key()) { + if !regexp.MustCompile(`snapsync-\d+$`).MatchString(n.Name()) { hps = append(hps, ethClient) } } diff --git a/op-devstack/dsl/operator_fee.go b/op-devstack/dsl/operator_fee.go index 98ab6cdf824ad..6d217b8cab164 100644 --- a/op-devstack/dsl/operator_fee.go +++ b/op-devstack/dsl/operator_fee.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum-optimism/optimism/op-core/forks" "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" @@ -42,12 +41,12 @@ func NewOperatorFee(t devtest.T, l2Network *L2Network, l1EL *L1ELNode) *Operator bindings.WithTest(t)) l1Block := bindings.NewBindings[bindings.L1Block]( - bindings.WithClient(l2Network.inner.L2ELNode(match.FirstL2EL).EthClient()), + bindings.WithClient(l2Network.PrimaryEL().EthClient()), bindings.WithTo(predeploys.L1BlockAddr), bindings.WithTest(t)) gasPriceOracle := bindings.NewBindings[bindings.GasPriceOracle]( - bindings.WithClient(l2Network.inner.L2ELNode(match.FirstL2EL).EthClient()), + bindings.WithClient(l2Network.PrimaryEL().EthClient()), bindings.WithTo(predeploys.GasPriceOracleAddr), bindings.WithTest(t)) diff --git a/op-devstack/dsl/proofs/dispute_game_factory.go b/op-devstack/dsl/proofs/dispute_game_factory.go index 99fab3021345e..7727294866134 100644 --- a/op-devstack/dsl/proofs/dispute_game_factory.go +++ b/op-devstack/dsl/proofs/dispute_game_factory.go @@ -32,7 +32,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/dsl/contract" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" @@ -49,7 +48,7 @@ type DisputeGameFactory struct { addr common.Address l2CL *dsl.L2CLNode l2EL *dsl.L2ELNode - superNode stack.Supernode + superNode *dsl.Supernode gameHelper *GameHelper challengerCfg *challengerConfig.Config @@ -63,7 +62,7 @@ func NewDisputeGameFactory( dgfAddr common.Address, l2CL *dsl.L2CLNode, l2EL *dsl.L2ELNode, - superNode stack.Supernode, + superNode *dsl.Supernode, challengerCfg *challengerConfig.Config, ) *DisputeGameFactory { dgf := bindings.NewDisputeGameFactory(bindings.WithClient(ethClient), bindings.WithTo(dgfAddr), bindings.WithTest(t)) diff --git a/op-devstack/dsl/rollup_boost.go b/op-devstack/dsl/rollup_boost.go index 37c98803d383c..733a9e094e4d5 100644 --- a/op-devstack/dsl/rollup_boost.go +++ b/op-devstack/dsl/rollup_boost.go @@ -8,18 +8,17 @@ import ( type RollupBoostNodesSet []*RollupBoostNode -func NewRollupBoostNodesSet(inner []stack.RollupBoostNode, control stack.ControlPlane) RollupBoostNodesSet { +func NewRollupBoostNodesSet(inner []stack.RollupBoostNode) RollupBoostNodesSet { rollupBoostNodes := make([]*RollupBoostNode, len(inner)) for i, c := range inner { - rollupBoostNodes[i] = NewRollupBoostNode(c, control) + rollupBoostNodes[i] = NewRollupBoostNode(c) } return rollupBoostNodes } // RollupBoostNode wraps a stack.RollupBoostNode interface for DSL operations type RollupBoostNode struct { - inner stack.RollupBoostNode - control stack.ControlPlane + inner stack.RollupBoostNode } func (r *RollupBoostNode) Escape() stack.RollupBoostNode { @@ -27,10 +26,9 @@ func (r *RollupBoostNode) Escape() stack.RollupBoostNode { } // NewRollupBoostNode creates a new RollupBoostNode DSL wrapper -func NewRollupBoostNode(inner stack.RollupBoostNode, control stack.ControlPlane) *RollupBoostNode { +func NewRollupBoostNode(inner stack.RollupBoostNode) *RollupBoostNode { return &RollupBoostNode{ - inner, - control, + inner: inner, } } diff --git a/op-devstack/dsl/sequencer.go b/op-devstack/dsl/sequencer.go index 658b59ab7670b..04e398e538ca5 100644 --- a/op-devstack/dsl/sequencer.go +++ b/op-devstack/dsl/sequencer.go @@ -25,7 +25,7 @@ func NewTestSequencer(inner stack.TestSequencer) *TestSequencer { } func (s *TestSequencer) String() string { - return s.inner.ID().String() + return s.inner.Name() } func (s *TestSequencer) Escape() stack.TestSequencer { diff --git a/op-devstack/dsl/superchain.go b/op-devstack/dsl/superchain.go deleted file mode 100644 index d04ba437295b0..0000000000000 --- a/op-devstack/dsl/superchain.go +++ /dev/null @@ -1,26 +0,0 @@ -package dsl - -import "github.com/ethereum-optimism/optimism/op-devstack/stack" - -// Superchain wraps a stack.Superchain interface for DSL operations -type Superchain struct { - commonImpl - inner stack.Superchain -} - -// NewSuperchain creates a new Superchain DSL wrapper -func NewSuperchain(inner stack.Superchain) *Superchain { - return &Superchain{ - commonImpl: commonFromT(inner.T()), - inner: inner, - } -} - -func (s *Superchain) String() string { - return s.inner.ID().String() -} - -// Escape returns the underlying stack.Superchain -func (s *Superchain) Escape() stack.Superchain { - return s.inner -} diff --git a/op-devstack/dsl/supernode.go b/op-devstack/dsl/supernode.go index 7f4334d76b4ca..6e938abdae515 100644 --- a/op-devstack/dsl/supernode.go +++ b/op-devstack/dsl/supernode.go @@ -36,12 +36,12 @@ func NewSupernodeWithTestControl(inner stack.Supernode, testControl stack.Intero } } -func (s *Supernode) ID() stack.SupernodeID { - return s.inner.ID() +func (s *Supernode) Name() string { + return s.inner.Name() } func (s *Supernode) String() string { - return s.inner.ID().String() + return s.inner.Name() } // Escape returns the underlying stack.Supernode diff --git a/op-devstack/dsl/supervisor.go b/op-devstack/dsl/supervisor.go index 786136ae6f83a..04f5ba5ac6d1e 100644 --- a/op-devstack/dsl/supervisor.go +++ b/op-devstack/dsl/supervisor.go @@ -18,20 +18,18 @@ import ( type Supervisor struct { commonImpl - inner stack.Supervisor - control stack.ControlPlane + inner stack.Supervisor } -func NewSupervisor(inner stack.Supervisor, control stack.ControlPlane) *Supervisor { +func NewSupervisor(inner stack.Supervisor) *Supervisor { return &Supervisor{ commonImpl: commonFromT(inner.T()), inner: inner, - control: control, } } func (s *Supervisor) String() string { - return s.inner.ID().String() + return s.inner.Name() } func (s *Supervisor) Escape() stack.Supervisor { @@ -212,11 +210,15 @@ func (s *Supervisor) FetchSuperRootAtTimestamp(timestamp uint64) eth.SuperRootRe } func (s *Supervisor) Start() { - s.control.SupervisorState(s.inner.ID(), stack.Start) + lifecycle, ok := s.inner.(stack.Lifecycle) + s.require.Truef(ok, "supervisor %s is not lifecycle-controllable", s.inner.Name()) + lifecycle.Start() } func (s *Supervisor) Stop() { - s.control.SupervisorState(s.inner.ID(), stack.Stop) + lifecycle, ok := s.inner.(stack.Lifecycle) + s.require.Truef(ok, "supervisor %s is not lifecycle-controllable", s.inner.Name()) + lifecycle.Stop() } func (s *Supervisor) AddManagedL2CL(cl *L2CLNode) { diff --git a/op-devstack/example/init_test.go b/op-devstack/example/init_test.go deleted file mode 100644 index 79c83311d0e03..0000000000000 --- a/op-devstack/example/init_test.go +++ /dev/null @@ -1,27 +0,0 @@ -package example - -import ( - "testing" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/log/logfilter" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - presets.DoMain(m, presets.WithSimpleInterop(), - // Logging can be adjusted with filters globally - presets.WithPkgLogFilter( - logfilter.DefaultShow( // Random configuration - stack.KindSelector(stack.KindL2Proposer).Mute(), - stack.KindSelector(stack.KindL2Batcher).And(logfilter.Level(log.LevelError)).Show(), - stack.KindSelector(stack.KindL2CLNode).Mute(), - ), - // E.g. allow test interactions through while keeping background resource logs quiet - ), - presets.WithTestLogFilter(logfilter.DefaultMute(logfilter.Level(log.LevelInfo).Show())), - ) -} diff --git a/op-devstack/presets/cl_config.go b/op-devstack/presets/cl_config.go deleted file mode 100644 index 2e2fbc5168739..0000000000000 --- a/op-devstack/presets/cl_config.go +++ /dev/null @@ -1,58 +0,0 @@ -package presets - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-node/rollup/sync" -) - -func WithExecutionLayerSyncOnVerifiers() stack.CommonOption { - return stack.MakeCommon( - sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.VerifierSyncMode = sync.ELSync - }))) -} - -func WithConsensusLayerSync() stack.CommonOption { - return stack.MakeCommon( - sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.SequencerSyncMode = sync.CLSync - cfg.VerifierSyncMode = sync.CLSync - }))) -} - -func WithSafeDBEnabled() stack.CommonOption { - return stack.MakeCommon( - sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.SafeDBPath = p.TempDir() - }))) -} - -func WithReqRespSyncDisabled() stack.CommonOption { - return stack.MakeCommon( - sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.EnableReqRespSync = false - cfg.UseReqRespSync = false - }))) -} - -func WithSyncModeReqRespSync() stack.CommonOption { - return stack.MakeCommon( - sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.UseReqRespSync = true - }))) -} - -func WithNoDiscovery() stack.CommonOption { - return stack.MakeCommon( - sysgo.WithGlobalL2CLOption(sysgo.L2CLOptionFn( - func(_ devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.NoDiscovery = true - }))) -} diff --git a/op-devstack/presets/flashblocks.go b/op-devstack/presets/flashblocks.go index 2ce8b1c522d49..9df4e9dce0c81 100644 --- a/op-devstack/presets/flashblocks.go +++ b/op-devstack/presets/flashblocks.go @@ -3,14 +3,13 @@ package presets import ( "time" - challengerConfig "github.com/ethereum-optimism/optimism/op-challenger/config" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/dsl/proofs" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-faucet/faucet" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type SingleChainWithFlashblocks struct { @@ -36,54 +35,141 @@ func (m *SingleChainWithFlashblocks) DisputeGameFactory() *proofs.DisputeGameFac } func (m *SingleChainWithFlashblocks) AdvanceTime(amount time.Duration) { - ttSys, ok := m.system.(stack.TimeTravelSystem) - m.T.Require().True(ok, "attempting to advance time on incompatible system") - ttSys.AdvanceTime(amount) + m.Minimal.AdvanceTime(amount) } -func WithSingleChainSystemWithFlashblocks() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSingleChainSystemWithFlashblocks(&sysgo.SingleChainSystemWithFlashblocksIDs{})) +func NewSingleChainWithFlashblocks(t devtest.T, opts ...Option) *SingleChainWithFlashblocks { + presetCfg, _ := collectSupportedPresetConfig(t, "NewSingleChainWithFlashblocks", opts, singleChainWithFlashblocksPresetSupportedOptionKinds) + runtime := sysgo.NewFlashblocksRuntimeWithConfig(t, presetCfg) + return singleChainWithFlashblocksFromRuntime(t, runtime) } -func NewSingleChainWithFlashblocks(t devtest.T) *SingleChainWithFlashblocks { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - l1Net := system.L1Network(match.FirstL1Network) - l2 := system.L2Network(match.Assume(t, match.L2ChainA)) - sequencerCL := l2.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) - sequencerEL := l2.L2ELNode(match.Assume(t, match.MatchElemFn[stack.L2ELNode](func(el stack.L2ELNode) bool { - // In flashblocks topologies the active CL may be linked through rollup-boost. - // Selecting by sequencer key keeps us on the sequencer EL in both direct and proxied setups. - return el.ID().Key() == sequencerCL.ID().Key() - }))) - var challengerCfg *challengerConfig.Config - if len(l2.L2Challengers()) > 0 { - challengerCfg = l2.L2Challengers()[0].Config() +func singleChainWithFlashblocksFromRuntime(t devtest.T, runtime *sysgo.SingleChainRuntime) *SingleChainWithFlashblocks { + t.Require().NotNil(runtime.Flashblocks, "missing flashblocks support") + l1ChainID := runtime.L1Network.ChainID() + l2ChainID := runtime.L2Network.ChainID() + + l1Network := newPresetL1Network(t, "l1", runtime.L1Network.ChainConfig()) + l1EL := newL1ELFrontend(t, "l1", l1ChainID, runtime.L1EL.UserRPC()) + l1CL := newL1CLFrontend(t, "l1", l1ChainID, runtime.L1CL.BeaconHTTPAddr(), runtime.L1CL.FakePoS()) + l1Network.AddL1ELNode(l1EL) + l1Network.AddL1CLNode(l1CL) + + l2Chain := newPresetL2Network( + t, + "l2a", + runtime.L2Network.ChainConfig(), + runtime.L2Network.RollupConfig(), + runtime.L2Network.Deployment(), + newKeyring(runtime.Keys, t.Require()), + l1Network, + ) + + l2EL := newL2ELFrontend( + t, + "sequencer", + l2ChainID, + runtime.L2EL.UserRPC(), + runtime.L2EL.EngineRPC(), + runtime.L2EL.JWTPath(), + runtime.L2Network.RollupConfig(), + ) + l2CL := newL2CLFrontend( + t, + "sequencer", + l2ChainID, + runtime.L2CL.UserRPC(), + runtime.L2CL, + ) + + l2OPRBuilder := newOPRBuilderFrontend( + t, + "sequencer-builder", + l2ChainID, + runtime.Flashblocks.Builder.UserRPC(), + runtime.Flashblocks.Builder.FlashblocksWSURL(), + runtime.L2Network.RollupConfig(), + runtime.Flashblocks.Builder, + ) + l2RollupBoost := newRollupBoostFrontend( + t, + "rollup-boost", + l2ChainID, + runtime.Flashblocks.RollupBoost.UserRPC(), + runtime.Flashblocks.RollupBoost.FlashblocksWSURL(), + runtime.L2Network.RollupConfig(), + runtime.Flashblocks.RollupBoost, + ) + testSequencer := newTestSequencerFrontend( + t, + runtime.TestSequencer.Name, + runtime.TestSequencer.AdminRPC, + runtime.TestSequencer.ControlRPC, + runtime.TestSequencer.JWTSecret, + ) + + l2Chain.AddL2ELNode(l2EL) + l2Chain.AddL2CLNode(l2CL) + l2Chain.AddOPRBuilderNode(l2OPRBuilder) + l2Chain.AddRollupBoostNode(l2RollupBoost) + l2CL.attachEL(l2EL) + l2CL.attachOPRBuilderNode(l2OPRBuilder) + l2CL.attachRollupBoostNode(l2RollupBoost) + + faucetL1Frontend := newFaucetFrontendForChain(t, runtime.FaucetService, l1ChainID) + faucetL2Frontend := newFaucetFrontendForChain(t, runtime.FaucetService, l2ChainID) + l1Network.AddFaucet(faucetL1Frontend) + l2Chain.AddFaucet(faucetL2Frontend) + faucetL1 := dsl.NewFaucet(faucetL1Frontend) + faucetL2 := dsl.NewFaucet(faucetL2Frontend) + + l1ELDSL := dsl.NewL1ELNode(l1EL) + l1CLDSL := dsl.NewL1CLNode(l1CL) + l2ELDSL := dsl.NewL2ELNode(l2EL) + l2CLDSL := dsl.NewL2CLNode(l2CL) + + minimal := &Minimal{ + Log: t.Logger(), + T: t, + L1Network: dsl.NewL1Network(l1Network, l1ELDSL, l1CLDSL), + L1EL: l1ELDSL, + L1CL: l1CLDSL, + L2Chain: dsl.NewL2Network(l2Chain, l2ELDSL, l2CLDSL, l1ELDSL, nil, nil), + L2EL: l2ELDSL, + L2CL: l2CLDSL, + Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation + FaucetL1: faucetL1, + FaucetL2: faucetL2, } + minimal.FunderL1 = dsl.NewFunder(minimal.Wallet, minimal.FaucetL1, minimal.L1EL) + minimal.FunderL2 = dsl.NewFunder(minimal.Wallet, minimal.FaucetL2, minimal.L2EL) - out := &SingleChainWithFlashblocks{ - L2OPRBuilder: dsl.NewOPRBuilderNode(l2.OPRBuilderNode(match.Assume(t, match.FirstOPRBuilderNode)), orch.ControlPlane()), - L2RollupBoost: dsl.NewRollupBoostNode(l2.RollupBoostNode(match.Assume(t, match.FirstRollupBoostNode)), orch.ControlPlane()), - Minimal: &Minimal{ - Log: t.Logger(), - T: t, - ControlPlane: orch.ControlPlane(), - system: system, - L1Network: dsl.NewL1Network(system.L1Network(match.FirstL1Network)), - L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), - L2Chain: dsl.NewL2Network(l2, orch.ControlPlane()), - L2Batcher: dsl.NewL2Batcher(l2.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - L2EL: dsl.NewL2ELNode(sequencerEL, orch.ControlPlane()), - L2CL: dsl.NewL2CLNode(sequencerCL, orch.ControlPlane()), - Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation - FaucetL2: dsl.NewFaucet(l2.Faucet(match.Assume(t, match.FirstFaucet))), - challengerConfig: challengerCfg, - }, - TestSequencer: dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))), + return &SingleChainWithFlashblocks{ + L2OPRBuilder: dsl.NewOPRBuilderNode(l2OPRBuilder), + L2RollupBoost: dsl.NewRollupBoostNode(l2RollupBoost), + Minimal: minimal, + TestSequencer: dsl.NewTestSequencer(testSequencer), + } +} + +func newFaucetFrontendForChain(t devtest.T, faucetService *faucet.Service, chainID eth.ChainID) *faucetFrontend { + faucetName, faucetRPC, ok := defaultFaucetForChain(faucetService, chainID) + t.Require().Truef(ok, "missing default faucet for chain %s", chainID) + + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), faucetRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + + return newPresetFaucet(t, faucetName, chainID, rpcCl) +} + +func defaultFaucetForChain(faucetService *faucet.Service, chainID eth.ChainID) (string, string, bool) { + if faucetService == nil { + return "", "", false + } + faucetID, ok := faucetService.Defaults()[chainID] + if !ok { + return "", "", false } - out.FaucetL1 = dsl.NewFaucet(out.L1Network.Escape().Faucet(match.Assume(t, match.FirstFaucet))) - out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) - out.FunderL2 = dsl.NewFunder(out.Wallet, out.FaucetL2, out.L2EL) - return out + return faucetID.String(), faucetService.FaucetEndpoint(faucetID), true } diff --git a/op-devstack/presets/interop.go b/op-devstack/presets/interop.go index dfb8b76ba33ac..cc55d7d413c6d 100644 --- a/op-devstack/presets/interop.go +++ b/op-devstack/presets/interop.go @@ -11,25 +11,23 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/dsl/proofs" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" + "github.com/ethereum-optimism/optimism/op-service/clock" ) type SingleChainInterop struct { - Log log.Logger - T devtest.T - system stack.ExtensibleSystem + Log log.Logger + T devtest.T + timeTravel *clock.AdvancingClock Supervisor *dsl.Supervisor SuperRoots *dsl.Supernode TestSequencer *dsl.TestSequencer - ControlPlane stack.ControlPlane L1Network *dsl.L1Network L1EL *dsl.L1ELNode + L1CL *dsl.L1CLNode L2ChainA *dsl.L2Network L2BatcherA *dsl.L2Batcher @@ -47,58 +45,13 @@ type SingleChainInterop struct { challengerConfig *challengerConfig.Config } -func NewSingleChainInterop(t devtest.T) *SingleChainInterop { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - - // At this point, either an op-supervisor (legacy) or op-supernode (replacement) is acceptable. - // The proof DSL depends only on super-roots and can be backed by either source. - t.Gate().True(len(system.Supervisors()) > 0 || len(system.Supernodes()) > 0, "expected at least one supervisor or supernode") - - t.Gate().Equal(len(system.TestSequencers()), 1, "expected exactly one test sequencer") - - l1Net := system.L1Network(match.FirstL1Network) - l2A := system.L2Network(match.Assume(t, match.L2ChainA)) - - var supervisor *dsl.Supervisor - var superRoots *dsl.Supernode - switch { - case len(system.Supervisors()) > 0: - supervisor = dsl.NewSupervisor(system.Supervisor(match.Assume(t, match.FirstSupervisor)), orch.ControlPlane()) - case len(system.Supernodes()) > 0: - supernode := system.Supernode(match.Assume(t, match.FirstSupernode)) - superRoots = dsl.NewSupernode(supernode) - default: - t.Gate().True(false, "expected at least one supervisor or supernode") - } - - var challengerCfg *challengerConfig.Config - if len(l2A.L2Challengers()) > 0 { - challengerCfg = l2A.L2Challengers()[0].Config() - } - - out := &SingleChainInterop{ - Log: t.Logger(), - T: t, - system: system, - TestSequencer: dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))), - Supervisor: supervisor, - SuperRoots: superRoots, - ControlPlane: orch.ControlPlane(), - L1Network: dsl.NewL1Network(l1Net), - L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), - L2ChainA: dsl.NewL2Network(l2A, orch.ControlPlane()), - L2ELA: dsl.NewL2ELNode(l2A.L2ELNode(match.Assume(t, match.FirstL2EL)), orch.ControlPlane()), - L2CLA: dsl.NewL2CLNode(l2A.L2CLNode(match.Assume(t, match.FirstL2CL)), orch.ControlPlane()), - Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation - FaucetA: dsl.NewFaucet(l2A.Faucet(match.Assume(t, match.FirstFaucet))), - L2BatcherA: dsl.NewL2Batcher(l2A.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - challengerConfig: challengerCfg, - } - out.FaucetL1 = dsl.NewFaucet(out.L1Network.Escape().Faucet(match.Assume(t, match.FirstFaucet))) - out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) - out.FunderA = dsl.NewFunder(out.Wallet, out.FaucetA, out.L2ELA) +// NewSingleChainInterop creates a fresh SingleChainInterop target for the current test. +// +// The target is created from the single-chain interop runtime plus any additional preset options. +func NewSingleChainInterop(t devtest.T, opts ...Option) *SingleChainInterop { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewSingleChainInterop", opts, singleChainInteropPresetSupportedOptionKinds) + out := singleChainInteropFromRuntime(t, sysgo.NewSingleChainInteropRuntimeWithConfig(t, presetCfg)) + presetOpts.applyPreset(out) return out } @@ -109,19 +62,17 @@ func (s *SingleChainInterop) L2Networks() []*dsl.L2Network { } func (s *SingleChainInterop) DisputeGameFactory() *proofs.DisputeGameFactory { - supernode := s.system.Supernode(match.Assume(s.T, match.FirstSupernode)) - return proofs.NewDisputeGameFactory(s.T, s.L1Network, s.L1EL.EthClient(), s.L2ChainA.DisputeGameFactoryProxyAddr(), nil, nil, supernode, s.challengerConfig) + s.T.Require().NotNil(s.SuperRoots, "supernode not configured for this preset") + return proofs.NewDisputeGameFactory(s.T, s.L1Network, s.L1EL.EthClient(), s.L2ChainA.DisputeGameFactoryProxyAddr(), nil, nil, s.SuperRoots, s.challengerConfig) } func (s *SingleChainInterop) AdvanceTime(amount time.Duration) { - ttSys, ok := s.system.(stack.TimeTravelSystem) - s.T.Require().True(ok, "attempting to advance time on incompatible system") - ttSys.AdvanceTime(amount) + s.T.Require().NotNil(s.timeTravel, "attempting to advance time on incompatible system") + s.timeTravel.AdvanceTime(amount) } -// WithSingleChainInterop specifies a system that meets the SingleChainInterop criteria. -func WithSingleChainInterop() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSingleChainInteropSystem(&sysgo.DefaultSingleChainInteropSystemIDs{})) +func (s *SingleChainInterop) proofValidationContext() (devtest.T, *dsl.L1ELNode, []*dsl.L2Network) { + return s.T, s.L1EL, []*dsl.L2Network{s.L2ChainA} } type SimpleInterop struct { @@ -142,133 +93,89 @@ func (s *SimpleInterop) L2Networks() []*dsl.L2Network { } } -func (s *SimpleInterop) DisputeGameFactory() *proofs.DisputeGameFactory { - supernode := s.system.Supernode(match.Assume(s.T, match.FirstSupernode)) - return proofs.NewDisputeGameFactory(s.T, s.L1Network, s.L1EL.EthClient(), s.L2ChainA.DisputeGameFactoryProxyAddr(), nil, nil, supernode, s.challengerConfig) +func (s *SimpleInterop) proofValidationContext() (devtest.T, *dsl.L1ELNode, []*dsl.L2Network) { + return s.T, s.L1EL, s.L2Networks() } func (s *SingleChainInterop) StandardBridge(l2Chain *dsl.L2Network) *dsl.StandardBridge { return dsl.NewStandardBridge(s.T, l2Chain, s.L1EL) } -// WithSimpleInterop specifies a system that meets the SimpleInterop criteria. -func WithSimpleInterop() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultInteropSystem(&sysgo.DefaultInteropSystemIDs{})) -} - -// WithSuperInterop specifies a super root system that meets the SimpleInterop criteria. -func WithSuperInterop() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultInteropProofsSystem(&sysgo.DefaultInteropSystemIDs{})) +// NewSimpleInteropSuperProofs creates a fresh SimpleInterop target for the current test +// using the default super-root proofs system. +func NewSimpleInteropSuperProofs(t devtest.T, opts ...Option) *SimpleInterop { + presetCfg, _ := collectSupportedPresetConfig(t, "NewSimpleInteropSuperProofs", opts, simpleInteropSuperProofsPresetSupportedOptionKinds) + return simpleInteropFromRuntime(t, sysgo.NewSimpleInteropSuperProofsRuntimeWithConfig(t, presetCfg)) } -// WithSuperInteropSupernode specifies a super root system (for proofs) that sources super-roots via op-supernode -func WithSuperInteropSupernode() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSupernodeInteropProofsSystem(&sysgo.DefaultSupernodeInteropProofsSystemIDs{})) +// NewSimpleInteropSupernodeProofs creates a fresh SimpleInterop target for the current +// test using the super-root proofs system backed by op-supernode. +func NewSimpleInteropSupernodeProofs(t devtest.T, opts ...Option) *SimpleInterop { + presetCfg, _ := collectSupportedPresetConfig(t, "NewSimpleInteropSupernodeProofs", opts, supernodeProofsPresetSupportedOptionKinds) + return simpleInteropFromSupernodeProofsRuntime(t, sysgo.NewTwoL2SupernodeProofsRuntimeWithConfig(t, true, presetCfg)) } -// WithIsthmusSuperSupernode specifies a super root system (for proofs) that sources super-roots via op-supernode -func WithIsthmusSuperSupernode() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSupernodeIsthmusSuperProofsSystem(&sysgo.DefaultSupernodeInteropProofsSystemIDs{})) +// NewSingleChainInteropSupernodeProofs creates a fresh SingleChainInterop target for the +// current test using the single-chain super-root proofs system backed by op-supernode. +func NewSingleChainInteropSupernodeProofs(t devtest.T, opts ...Option) *SingleChainInterop { + presetCfg, _ := collectSupportedPresetConfig(t, "NewSingleChainInteropSupernodeProofs", opts, supernodeProofsPresetSupportedOptionKinds) + return singleChainInteropFromSupernodeProofsRuntime(t, sysgo.NewSingleChainSupernodeProofsRuntimeWithConfig(t, true, presetCfg)) } -func WithIsthmusSuper() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultIsthmusSuperProofsSystem(&sysgo.DefaultInteropSystemIDs{})) +// NewSimpleInteropIsthmusSuper creates a fresh SimpleInterop target for the current test +// using the Isthmus super-root system backed by op-supernode. +func NewSimpleInteropIsthmusSuper(t devtest.T, opts ...Option) *SimpleInterop { + presetCfg, _ := collectSupportedPresetConfig(t, "NewSimpleInteropIsthmusSuper", opts, supernodeProofsPresetSupportedOptionKinds) + return simpleInteropFromSupernodeProofsRuntime(t, sysgo.NewTwoL2SupernodeProofsRuntimeWithConfig(t, false, presetCfg)) } -// WithSingleChainIsthmusSuperSupernode specifies a single-chain super root system -// (for proofs) that sources super-roots via op-supernode, without interop at genesis. -func WithSingleChainIsthmusSuperSupernode() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSingleChainSupernodeIsthmusSuperProofsSystem(&sysgo.DefaultSingleChainSupernodeProofsSystemIDs{})) +// NewSingleChainInteropIsthmusSuper creates a fresh SingleChainInterop target for the +// current test using the single-chain Isthmus super-root system backed by op-supernode. +func NewSingleChainInteropIsthmusSuper(t devtest.T, opts ...Option) *SingleChainInterop { + presetCfg, _ := collectSupportedPresetConfig(t, "NewSingleChainInteropIsthmusSuper", opts, supernodeProofsPresetSupportedOptionKinds) + return singleChainInteropFromSupernodeProofsRuntime(t, sysgo.NewSingleChainSupernodeProofsRuntimeWithConfig(t, false, presetCfg)) } -// WithSingleChainSuperInteropSupernode specifies a single-chain super root system -// (for proofs) that sources super-roots via op-supernode, with interop at genesis. -func WithSingleChainSuperInteropSupernode() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSingleChainSupernodeInteropProofsSystem(&sysgo.DefaultSingleChainSupernodeProofsSystemIDs{})) -} - -// WithUnscheduledInterop adds a test-gate to not run the test if the interop upgrade is scheduled. -// If the backend is sysgo, it will disable the interop configuration -func WithUnscheduledInterop() stack.CommonOption { - return stack.Combine( - stack.MakeCommon(sysgo.WithDeployerOptions(func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { - for _, l2 := range builder.L2s() { - l2.WithForkAtOffset(forks.Interop, nil) - } - })), - stack.PostHydrate[stack.Orchestrator](func(sys stack.System) { - for _, l2Net := range sys.L2Networks() { - sys.T().Gate().Nil(l2Net.ChainConfig().InteropTime, "L2 (%s) must not have scheduled interop in chain config", l2Net.ID()) - sys.T().Gate().Nil(l2Net.RollupConfig().InteropTime, "L2 (%s) must not have scheduled interop in rollup config", l2Net.ID()) - } - }), - ) -} - -func NewSimpleInterop(t devtest.T) *SimpleInterop { - singleChain := NewSingleChainInterop(t) - orch := Orchestrator() - l2B := singleChain.system.L2Network(match.Assume(t, match.L2ChainB)) - out := &SimpleInterop{ - SingleChainInterop: *singleChain, - L2ChainB: dsl.NewL2Network(l2B, orch.ControlPlane()), - L2ELB: dsl.NewL2ELNode(l2B.L2ELNode(match.Assume(t, match.FirstL2EL)), orch.ControlPlane()), - L2CLB: dsl.NewL2CLNode(l2B.L2CLNode(match.Assume(t, match.FirstL2CL)), orch.ControlPlane()), - FaucetB: dsl.NewFaucet(l2B.Faucet(match.Assume(t, match.FirstFaucet))), - L2BatcherB: dsl.NewL2Batcher(l2B.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - } - out.FunderB = dsl.NewFunder(out.Wallet, out.FaucetB, out.L2ELB) +// NewSimpleInterop creates a fresh SimpleInterop target for the current test. +// +// The target is created from the interop runtime plus any additional preset options. +func NewSimpleInterop(t devtest.T, opts ...Option) *SimpleInterop { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewSimpleInterop", opts, singleChainInteropPresetSupportedOptionKinds) + out := simpleInteropFromRuntime(t, sysgo.NewSimpleInteropRuntimeWithConfig(t, presetCfg)) + presetOpts.applyPreset(out) return out } // WithSuggestedInteropActivationOffset suggests a hardfork time offset to use. // This is applied e.g. to the deployment if running against sysgo. -func WithSuggestedInteropActivationOffset(offset uint64) stack.CommonOption { - return stack.MakeCommon(sysgo.WithDeployerOptions( - func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { +func WithSuggestedInteropActivationOffset(offset uint64) Option { + return WithDeployerOptions( + func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { for _, l2Cfg := range builder.L2s() { l2Cfg.WithForkAtOffset(forks.Interop, &offset) } }, - )) + ) } // WithSequencingWindow suggests a sequencing window to use, and checks the maximum sequencing window. // The sequencing windows are expressed in number of L1 execution-layer blocks till sequencing window expiry. -// This is applied e.g. to the chain configuration setup if running against sysgo. -func WithSequencingWindow(suggestedSequencingWindow uint64, maxSequencingWindow uint64) stack.CommonOption { - return stack.Combine( - stack.MakeCommon(sysgo.WithDeployerOptions( - sysgo.WithSequencingWindow(suggestedSequencingWindow), - )), - // We can't configure sysext sequencing window, so we go with whatever is configured. - // The post-hydrate function will check that the sequencing window is within expected bounds. - stack.PostHydrate[stack.Orchestrator](func(sys stack.System) { - for _, l2Net := range sys.L2Networks() { - cfg := l2Net.RollupConfig() - l2Net.T().Gate().LessOrEqual(cfg.SeqWindowSize, maxSequencingWindow, - "sequencing window of chain %s must fit in max sequencing window size", l2Net.ChainID()) - } - }), - ) +// This is applied to runtime deployment/config validation. +func WithSequencingWindow(suggestedSequencingWindow uint64, maxSequencingWindow uint64) Option { + return option{ + kinds: optionKindDeployer | optionKindMaxSequencingWindow, + applyFn: func(cfg *sysgo.PresetConfig) { + cfg.DeployerOptions = append(cfg.DeployerOptions, sysgo.WithSequencingWindow(suggestedSequencingWindow)) + v := maxSequencingWindow + cfg.MaxSequencingWindow = &v + }, + } } // WithInteropNotAtGenesis adds a test-gate that checks // if the interop hardfork is configured at a non-genesis time. -func WithInteropNotAtGenesis() stack.CommonOption { - return stack.PostHydrate[stack.Orchestrator](func(sys stack.System) { - for _, l2Net := range sys.L2Networks() { - interopTime := l2Net.ChainConfig().InteropTime - sys.T().Gate().NotNil(interopTime, "must have interop") - sys.T().Gate().NotZero(*interopTime, "must not be at genesis") - } - }) -} - -func WithL2NetworkCount(count int) stack.CommonOption { - return stack.PostHydrate[stack.Orchestrator](func(sys stack.System) { - sys.T().Gate().Lenf(sys.L2Networks(), count, "Must have exactly %v chains", count) - }) +func WithInteropNotAtGenesis() Option { + return WithRequireInteropNotAtGenesis() } type MultiSupervisorInterop struct { @@ -283,30 +190,11 @@ type MultiSupervisorInterop struct { L2CLB2 *dsl.L2CLNode } -func WithMultiSupervisorInterop() stack.CommonOption { - return stack.MakeCommon(sysgo.MultiSupervisorInteropSystem(&sysgo.MultiSupervisorInteropSystemIDs{})) -} - -// NewMultiSupervisorInterop initializes below scenario: -// Two supervisor initialized, each managing two L2CLs per chains. -// Primary supervisor manages sequencer L2CLs for chain A, B. -// Secondary supervisor manages verifier L2CLs for chain A, B. -// Each L2CLs per chain is connected via P2P. -func NewMultiSupervisorInterop(t devtest.T) *MultiSupervisorInterop { - simpleInterop := NewSimpleInterop(t) - orch := Orchestrator() - - l2A := simpleInterop.system.L2Network(match.Assume(t, match.L2ChainA)) - l2B := simpleInterop.system.L2Network(match.Assume(t, match.L2ChainB)) - out := &MultiSupervisorInterop{ - SimpleInterop: *simpleInterop, - SupervisorSecondary: dsl.NewSupervisor(simpleInterop.system.Supervisor(match.Assume(t, match.SecondSupervisor)), orch.ControlPlane()), - L2ELA2: dsl.NewL2ELNode(l2A.L2ELNode(match.Assume(t, match.SecondL2EL)), orch.ControlPlane()), - L2CLA2: dsl.NewL2CLNode(l2A.L2CLNode(match.Assume(t, match.SecondL2CL)), orch.ControlPlane()), - L2ELB2: dsl.NewL2ELNode(l2B.L2ELNode(match.Assume(t, match.SecondL2EL)), orch.ControlPlane()), - L2CLB2: dsl.NewL2CLNode(l2B.L2CLNode(match.Assume(t, match.SecondL2CL)), orch.ControlPlane()), - } - return out +// NewMultiSupervisorInterop initializes a fresh multi-supervisor interop target for the +// current test. +func NewMultiSupervisorInterop(t devtest.T, opts ...Option) *MultiSupervisorInterop { + _, _ = collectSupportedPresetConfig(t, "NewMultiSupervisorInterop", opts, 0) + return multiSupervisorInteropFromRuntime(t, sysgo.NewMultiSupervisorInteropRuntime(t)) } // MinimalInteropNoSupervisor is like Minimal but with interop contracts deployed. @@ -315,40 +203,11 @@ type MinimalInteropNoSupervisor struct { Minimal } -// WithMinimalInteropNoSupervisor specifies a minimal system with interop contracts but no supervisor. -func WithMinimalInteropNoSupervisor() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultMinimalInteropSystem(&sysgo.DefaultMinimalSystemIDs{})) -} - -// NewMinimalInteropNoSupervisor creates a MinimalInteropNoSupervisor preset for acceptance tests. -func NewMinimalInteropNoSupervisor(t devtest.T) *MinimalInteropNoSupervisor { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - - l1Net := system.L1Network(match.FirstL1Network) - l2 := system.L2Network(match.Assume(t, match.L2ChainA)) - sequencerCL := l2.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) - sequencerEL := l2.L2ELNode(match.Assume(t, match.EngineFor(sequencerCL))) - - out := &MinimalInteropNoSupervisor{ - Minimal: Minimal{ - Log: t.Logger(), - T: t, - ControlPlane: orch.ControlPlane(), - system: system, - L1Network: dsl.NewL1Network(l1Net), - L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), - L2Chain: dsl.NewL2Network(l2, orch.ControlPlane()), - L2Batcher: dsl.NewL2Batcher(l2.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - L2EL: dsl.NewL2ELNode(sequencerEL, orch.ControlPlane()), - L2CL: dsl.NewL2CLNode(sequencerCL, orch.ControlPlane()), - Wallet: dsl.NewRandomHDWallet(t, 30), - FaucetL2: dsl.NewFaucet(l2.Faucet(match.Assume(t, match.FirstFaucet))), - }, +// NewMinimalInteropNoSupervisor creates a fresh MinimalInteropNoSupervisor target for the +// current test. +func NewMinimalInteropNoSupervisor(t devtest.T, opts ...Option) *MinimalInteropNoSupervisor { + _, _ = collectSupportedPresetConfig(t, "NewMinimalInteropNoSupervisor", opts, 0) + return &MinimalInteropNoSupervisor{ + Minimal: *minimalFromRuntime(t, sysgo.NewMinimalInteropNoSupervisorRuntime(t)), } - out.FaucetL1 = dsl.NewFaucet(out.L1Network.Escape().Faucet(match.Assume(t, match.FirstFaucet))) - out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) - out.FunderL2 = dsl.NewFunder(out.Wallet, out.FaucetL2, out.L2EL) - return out } diff --git a/op-devstack/presets/interop_from_runtime.go b/op-devstack/presets/interop_from_runtime.go new file mode 100644 index 0000000000000..c9e917ce09119 --- /dev/null +++ b/op-devstack/presets/interop_from_runtime.go @@ -0,0 +1,213 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func singleChainInteropFromRuntime(t devtest.T, runtime *sysgo.MultiChainRuntime) *SingleChainInterop { + chainA := runtime.Chains["l2a"] + t.Require().NotNil(chainA, "missing l2a interop chain") + l1ChainID := runtime.L1Network.ChainID() + l2ChainID := chainA.Network.ChainID() + + l1Network := newPresetL1Network(t, "l1", runtime.L1Network.ChainConfig()) + l1EL := newL1ELFrontend(t, "l1", l1ChainID, runtime.L1EL.UserRPC()) + l1CL := newL1CLFrontend(t, "l1", l1ChainID, runtime.L1CL.BeaconHTTPAddr(), runtime.L1CL.FakePoS()) + l1Network.AddL1ELNode(l1EL) + l1Network.AddL1CLNode(l1CL) + + l2Chain := newPresetL2Network( + t, + "l2a", + chainA.Network.ChainConfig(), + chainA.Network.RollupConfig(), + chainA.Network.Deployment(), + newKeyring(runtime.Keys, t.Require()), + l1Network, + ) + + l2EL := newL2ELFrontend( + t, + "sequencer", + l2ChainID, + chainA.EL.UserRPC(), + chainA.EL.EngineRPC(), + chainA.EL.JWTPath(), + chainA.Network.RollupConfig(), + chainA.EL, + ) + l2CL := newL2CLFrontend( + t, + "sequencer", + l2ChainID, + chainA.CL.UserRPC(), + chainA.CL, + ) + l2CL.attachEL(l2EL) + l2Batcher := newL2BatcherFrontend(t, "main", l2ChainID, chainA.Batcher.UserRPC()) + l2Chain.AddL2ELNode(l2EL) + l2Chain.AddL2CLNode(l2CL) + l2Chain.AddL2Batcher(l2Batcher) + + supervisor := newSupervisorFrontend(t, "1-primary", runtime.PrimarySupervisor.UserRPC(), runtime.PrimarySupervisor) + testSequencer := newTestSequencerFrontend( + t, + runtime.TestSequencer.Name, + runtime.TestSequencer.AdminRPC, + runtime.TestSequencer.ControlRPC, + runtime.TestSequencer.JWTSecret, + ) + l1ELDSL := dsl.NewL1ELNode(l1EL) + l1CLDSL := dsl.NewL1CLNode(l1CL) + l2ELDSL := dsl.NewL2ELNode(l2EL) + l2CLDSL := dsl.NewL2CLNode(l2CL) + + faucetAFrontend := newFaucetFrontendForChain(t, runtime.FaucetService, l2ChainID) + faucetL1Frontend := newFaucetFrontendForChain(t, runtime.FaucetService, l1ChainID) + out := &SingleChainInterop{ + Log: t.Logger(), + T: t, + timeTravel: runtime.TimeTravel, + Supervisor: dsl.NewSupervisor(supervisor), + SuperRoots: nil, + TestSequencer: dsl.NewTestSequencer(testSequencer), + L1Network: dsl.NewL1Network(l1Network, l1ELDSL, l1CLDSL), + L1EL: l1ELDSL, + L1CL: l1CLDSL, + L2ChainA: dsl.NewL2Network(l2Chain, l2ELDSL, l2CLDSL, l1ELDSL, nil, nil), + L2BatcherA: dsl.NewL2Batcher(l2Batcher), + L2ELA: l2ELDSL, + L2CLA: l2CLDSL, + Wallet: dsl.NewRandomHDWallet(t, 30), + FaucetA: dsl.NewFaucet(faucetAFrontend), + FaucetL1: dsl.NewFaucet(faucetL1Frontend), + challengerConfig: runtime.L2ChallengerConfig, + } + l1Network.AddFaucet(faucetL1Frontend) + l2Chain.AddFaucet(faucetAFrontend) + out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) + out.FunderA = dsl.NewFunder(out.Wallet, out.FaucetA, out.L2ELA) + return out +} + +func simpleInteropFromRuntime(t devtest.T, runtime *sysgo.MultiChainRuntime) *SimpleInterop { + singleChain := singleChainInteropFromRuntime(t, runtime) + chainB := runtime.Chains["l2b"] + t.Require().NotNil(chainB, "missing l2b interop chain") + l2BChainID := chainB.Network.ChainID() + + l1Network, ok := singleChain.L1Network.Escape().(*presetL1Network) + t.Require().True(ok, "expected preset L1 network") + + l2B := newPresetL2Network( + t, + "l2b", + chainB.Network.ChainConfig(), + chainB.Network.RollupConfig(), + chainB.Network.Deployment(), + newKeyring(runtime.Keys, t.Require()), + l1Network, + ) + + l2BEL := newL2ELFrontend( + t, + "sequencer", + l2BChainID, + chainB.EL.UserRPC(), + chainB.EL.EngineRPC(), + chainB.EL.JWTPath(), + chainB.Network.RollupConfig(), + chainB.EL, + ) + l2BCL := newL2CLFrontend(t, "sequencer", l2BChainID, chainB.CL.UserRPC(), chainB.CL) + l2BCL.attachEL(l2BEL) + l2BBatcher := newL2BatcherFrontend(t, "main", l2BChainID, chainB.Batcher.UserRPC()) + l2B.AddL2ELNode(l2BEL) + l2B.AddL2CLNode(l2BCL) + l2B.AddL2Batcher(l2BBatcher) + + l2BELDSL := dsl.NewL2ELNode(l2BEL) + l2BCLDSL := dsl.NewL2CLNode(l2BCL) + + faucetBFrontend := newFaucetFrontendForChain(t, runtime.FaucetService, l2BChainID) + out := &SimpleInterop{ + SingleChainInterop: *singleChain, + L2ChainB: dsl.NewL2Network(l2B, l2BELDSL, l2BCLDSL, singleChain.L1EL, nil, nil), + L2BatcherB: dsl.NewL2Batcher(l2BBatcher), + L2ELB: l2BELDSL, + L2CLB: l2BCLDSL, + FaucetB: dsl.NewFaucet(faucetBFrontend), + } + l2B.AddFaucet(faucetBFrontend) + out.FunderB = dsl.NewFunder(out.Wallet, out.FaucetB, out.L2ELB) + return out +} + +func multiSupervisorInteropFromRuntime(t devtest.T, runtime *sysgo.MultiChainRuntime) *MultiSupervisorInterop { + simpleInterop := simpleInteropFromRuntime(t, runtime) + chainA := runtime.Chains["l2a"] + chainB := runtime.Chains["l2b"] + t.Require().NotNil(chainA, "missing l2a interop chain") + t.Require().NotNil(chainB, "missing l2b interop chain") + t.Require().NotNil(chainA.Followers, "missing l2a followers") + t.Require().NotNil(chainB.Followers, "missing l2b followers") + l2A2 := chainA.Followers["verifier"] + l2B2 := chainB.Followers["verifier"] + t.Require().NotNil(l2A2, "missing l2a verifier follower") + t.Require().NotNil(l2B2, "missing l2b verifier follower") + l2AChainID := chainA.Network.ChainID() + l2BChainID := chainB.Network.ChainID() + + l2ELA2 := newL2ELFrontend( + t, + "verifier", + l2AChainID, + l2A2.EL.UserRPC(), + l2A2.EL.EngineRPC(), + l2A2.EL.JWTPath(), + chainA.Network.RollupConfig(), + l2A2.EL, + ) + l2CLA2 := newL2CLFrontend(t, "verifier", l2AChainID, l2A2.CL.UserRPC(), l2A2.CL) + l2CLA2.attachEL(l2ELA2) + + l2ELB2 := newL2ELFrontend( + t, + "verifier", + l2BChainID, + l2B2.EL.UserRPC(), + l2B2.EL.EngineRPC(), + l2B2.EL.JWTPath(), + chainB.Network.RollupConfig(), + l2B2.EL, + ) + l2CLB2 := newL2CLFrontend(t, "verifier", l2BChainID, l2B2.CL.UserRPC(), l2B2.CL) + l2CLB2.attachEL(l2ELB2) + + l2ANet, ok := simpleInterop.L2ChainA.Escape().(*presetL2Network) + t.Require().True(ok, "expected preset L2 network A") + l2ANet.AddL2ELNode(l2ELA2) + l2ANet.AddL2CLNode(l2CLA2) + l2BNet, ok := simpleInterop.L2ChainB.Escape().(*presetL2Network) + t.Require().True(ok, "expected preset L2 network B") + l2BNet.AddL2ELNode(l2ELB2) + l2BNet.AddL2CLNode(l2CLB2) + + supervisorSecondary := newSupervisorFrontend( + t, + "2-secondary", + runtime.SecondarySupervisor.UserRPC(), + runtime.SecondarySupervisor, + ) + + return &MultiSupervisorInterop{ + SimpleInterop: *simpleInterop, + SupervisorSecondary: dsl.NewSupervisor(supervisorSecondary), + L2ELA2: dsl.NewL2ELNode(l2ELA2), + L2CLA2: dsl.NewL2CLNode(l2CLA2), + L2ELB2: dsl.NewL2ELNode(l2ELB2), + L2CLB2: dsl.NewL2CLNode(l2CLB2), + } +} diff --git a/op-devstack/presets/logging.go b/op-devstack/presets/logging.go deleted file mode 100644 index 70a8a06ac46e7..0000000000000 --- a/op-devstack/presets/logging.go +++ /dev/null @@ -1,81 +0,0 @@ -package presets - -import ( - "fmt" - "log/slog" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/log/logfilter" - "github.com/ethereum-optimism/optimism/op-service/logmods" -) - -// WithLogLevel sets the global minimum log-level. -func WithLogLevel(minLevel slog.Level) stack.CommonOption { - fn := func(h logfilter.FilterHandler) { - h.Set(logfilter.DefaultMute(logfilter.Level(minLevel).Show())) - } - return stack.Combine( - withPkgLogFiltering(fn), - withTestLogFiltering(fn), - ) -} - -// WithLogFilter replaces the default log filter with the provided additive -// filters. This completely overrides the default INFO-level filtering. -func WithLogFilter(filter logfilter.Filter) stack.CommonOption { - fn := func(h logfilter.FilterHandler) { - h.Set(filter) - } - return stack.Combine( - withPkgLogFiltering(fn), - withTestLogFiltering(fn), - ) -} - -// WithPkgLogFilter applies the log filters to package-scope interactions -// (i.e. to things like DSL interactions, not to background services). -// Calling this overrides the default INFO-level filtering. -func WithPkgLogFilter(filter logfilter.Filter) stack.CommonOption { - fn := func(h logfilter.FilterHandler) { - h.Set(filter) - } - return withPkgLogFiltering(fn) -} - -// WithTestLogFilter applies the log filters to test-scope interactions -// (i.e. to things like DSL interactions, not to background services). -// Calling this overrides the default INFO-level filtering. -func WithTestLogFilter(filter logfilter.Filter) stack.CommonOption { - fn := func(h logfilter.FilterHandler) { - h.Set(filter) - } - return withTestLogFiltering(fn) -} - -// withPkgLogFiltering creates an option to apply changes to the log-handlers of -// package-level logger and test-scopes. -func withPkgLogFiltering(fn func(h logfilter.FilterHandler)) stack.CommonOption { - return stack.BeforeDeploy(func(orch stack.Orchestrator) { - logger := orch.P().Logger() - h := logger.Handler() - filterHandler, ok := logmods.FindHandler[logfilter.FilterHandler](h) - if !ok { - logger.Warn("Cannot apply log-filters to pkg-scope log-handler", "type", fmt.Sprintf("%T", h)) - return - } - fn(filterHandler) - }) -} - -func withTestLogFiltering(fn func(h logfilter.FilterHandler)) stack.CommonOption { - return stack.PreHydrate[stack.Orchestrator](func(sys stack.System) { - logger := sys.T().Logger() - h := logger.Handler() - filterHandler, ok := logmods.FindHandler[logfilter.FilterHandler](h) - if !ok { - logger.Warn("Cannot apply log-filters to test-scope log-handler", "type", fmt.Sprintf("%T", h)) - return - } - fn(filterHandler) - }) -} diff --git a/op-devstack/presets/minimal.go b/op-devstack/presets/minimal.go index f9116ec0ea4fa..0ad50bbe418c0 100644 --- a/op-devstack/presets/minimal.go +++ b/op-devstack/presets/minimal.go @@ -9,20 +9,18 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/dsl/proofs" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-service/clock" ) type Minimal struct { - Log log.Logger - T devtest.T - ControlPlane stack.ControlPlane - system stack.ExtensibleSystem + Log log.Logger + T devtest.T + timeTravel *clock.AdvancingClock L1Network *dsl.L1Network L1EL *dsl.L1ELNode + L1CL *dsl.L1CLNode L2Chain *dsl.L2Network L2Batcher *dsl.L2Batcher @@ -55,50 +53,20 @@ func (m *Minimal) DisputeGameFactory() *proofs.DisputeGameFactory { } func (m *Minimal) AdvanceTime(amount time.Duration) { - ttSys, ok := m.system.(stack.TimeTravelSystem) - m.T.Require().True(ok, "attempting to advance time on incompatible system") - ttSys.AdvanceTime(amount) + m.T.Require().NotNil(m.timeTravel, "attempting to advance time on incompatible system") + m.timeTravel.AdvanceTime(amount) } -func WithMinimal() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultMinimalSystem(&sysgo.DefaultMinimalSystemIDs{})) +func (m *Minimal) proofValidationContext() (devtest.T, *dsl.L1ELNode, []*dsl.L2Network) { + return m.T, m.L1EL, m.L2Networks() } -func NewMinimal(t devtest.T) *Minimal { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - - return minimalFromSystem(t, system, orch) -} - -func minimalFromSystem(t devtest.T, system stack.ExtensibleSystem, orch stack.Orchestrator) *Minimal { - l1Net := system.L1Network(match.FirstL1Network) - l2 := system.L2Network(match.Assume(t, match.L2ChainA)) - sequencerCL := l2.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) - sequencerEL := l2.L2ELNode(match.Assume(t, match.EngineFor(sequencerCL))) - var challengerCfg *challengerConfig.Config - if len(l2.L2Challengers()) > 0 { - challengerCfg = l2.L2Challengers()[0].Config() - } - - out := &Minimal{ - Log: t.Logger(), - T: t, - ControlPlane: orch.ControlPlane(), - system: system, - L1Network: dsl.NewL1Network(system.L1Network(match.FirstL1Network)), - L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), - L2Chain: dsl.NewL2Network(l2, orch.ControlPlane()), - L2Batcher: dsl.NewL2Batcher(l2.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - L2EL: dsl.NewL2ELNode(sequencerEL, orch.ControlPlane()), - L2CL: dsl.NewL2CLNode(sequencerCL, orch.ControlPlane()), - Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation - FaucetL2: dsl.NewFaucet(l2.Faucet(match.Assume(t, match.FirstFaucet))), - challengerConfig: challengerCfg, - } - out.FaucetL1 = dsl.NewFaucet(out.L1Network.Escape().Faucet(match.Assume(t, match.FirstFaucet))) - out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) - out.FunderL2 = dsl.NewFunder(out.Wallet, out.FaucetL2, out.L2EL) +// NewMinimal creates a fresh Minimal target for the current test. +// +// The target is created from the minimal runtime plus any additional preset options. +func NewMinimal(t devtest.T, opts ...Option) *Minimal { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewMinimal", opts, minimalPresetSupportedOptionKinds) + out := minimalFromRuntime(t, sysgo.NewMinimalRuntimeWithConfig(t, presetCfg)) + presetOpts.applyPreset(out) return out } diff --git a/op-devstack/presets/minimal_external_el.go b/op-devstack/presets/minimal_external_el.go deleted file mode 100644 index 797d384fc8959..0000000000000 --- a/op-devstack/presets/minimal_external_el.go +++ /dev/null @@ -1,62 +0,0 @@ -package presets - -import ( - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" -) - -type MinimalExternalEL struct { - Log log.Logger - T devtest.T - ControlPlane stack.ControlPlane - - L1Network *dsl.L1Network - L1EL *dsl.L1ELNode - - L2Chain *dsl.L2Network - L2CL *dsl.L2CLNode - L2EL *dsl.L2ELNode - L2ELReadOnly *dsl.L2ELNode - - SyncTester *dsl.SyncTester -} - -func (m *MinimalExternalEL) L2Networks() []*dsl.L2Network { - return []*dsl.L2Network{ - m.L2Chain, - } -} - -func WithExternalELWithSuperchainRegistry(networkPreset stack.ExtNetworkConfig) stack.CommonOption { - return stack.MakeCommon(sysgo.ExternalELSystemWithEndpointAndSuperchainRegistry(&sysgo.DefaultMinimalExternalELSystemIDs{}, networkPreset)) -} - -func NewMinimalExternalEL(t devtest.T) *MinimalExternalEL { - orch := Orchestrator() - system := shim.NewSystem(t) - orch.Hydrate(system) - - l2 := system.L2Network(match.L2ChainA) - verifierCL := l2.L2CLNode(match.FirstL2CL) - syncTester := l2.SyncTester(match.FirstSyncTester) - - sys := &MinimalExternalEL{ - Log: t.Logger(), - T: t, - ControlPlane: orch.ControlPlane(), - L1Network: dsl.NewL1Network(system.L1Network(match.FirstL1Network)), - L1EL: dsl.NewL1ELNode(system.L1Network(match.FirstL1Network).L1ELNode(match.FirstL1EL)), - L2Chain: dsl.NewL2Network(l2, orch.ControlPlane()), - L2CL: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), - L2ELReadOnly: dsl.NewL2ELNode(l2.L2ELNode(match.FirstL2EL), orch.ControlPlane()), - L2EL: dsl.NewL2ELNode(l2.L2ELNode(match.SecondL2EL), orch.ControlPlane()), - SyncTester: dsl.NewSyncTester(syncTester), - } - return sys -} diff --git a/op-devstack/presets/minimal_from_runtime.go b/op-devstack/presets/minimal_from_runtime.go new file mode 100644 index 0000000000000..0b8a3512d1bb1 --- /dev/null +++ b/op-devstack/presets/minimal_from_runtime.go @@ -0,0 +1,76 @@ +package presets + +import ( + challengerConfig "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +func minimalFromRuntime(t devtest.T, runtime *sysgo.SingleChainRuntime) *Minimal { + l1ChainID := runtime.L1Network.ChainID() + l2ChainID := runtime.L2Network.ChainID() + + l1Network := newPresetL1Network(t, "l1", runtime.L1Network.ChainConfig()) + l1EL := newL1ELFrontend(t, "l1", l1ChainID, runtime.L1EL.UserRPC()) + l1CL := newL1CLFrontend(t, "l1", l1ChainID, runtime.L1CL.BeaconHTTPAddr(), runtime.L1CL.FakePoS()) + l1Network.AddL1ELNode(l1EL) + l1Network.AddL1CLNode(l1CL) + + l2Chain := newPresetL2Network( + t, + "l2a", + runtime.L2Network.ChainConfig(), + runtime.L2Network.RollupConfig(), + runtime.L2Network.Deployment(), + newKeyring(runtime.Keys, t.Require()), + l1Network, + ) + l2EL := newL2ELFrontend(t, "sequencer", l2ChainID, runtime.L2EL.UserRPC(), runtime.L2EL.EngineRPC(), runtime.L2EL.JWTPath(), runtime.L2Network.RollupConfig()) + l2CL := newL2CLFrontend(t, "sequencer", l2ChainID, runtime.L2CL.UserRPC(), runtime.L2CL) + l2CL.attachEL(l2EL) + l2Batcher := newL2BatcherFrontend(t, "main", l2ChainID, runtime.L2Batcher.UserRPC()) + l2Chain.AddL2ELNode(l2EL) + l2Chain.AddL2CLNode(l2CL) + l2Chain.AddL2Batcher(l2Batcher) + + var challengerCfg *challengerConfig.Config + if runtime.L2Challenger != nil { + challengerCfg = runtime.L2Challenger.Config() + } + if challengerCfg != nil { + l2Chain.AddL2Challenger(newPresetL2Challenger(t, "main", l2ChainID, challengerCfg)) + } + + faucetL1Frontend := newFaucetFrontendForChain(t, runtime.FaucetService, l1ChainID) + faucetL2Frontend := newFaucetFrontendForChain(t, runtime.FaucetService, l2ChainID) + l1Network.AddFaucet(faucetL1Frontend) + l2Chain.AddFaucet(faucetL2Frontend) + faucetL1 := dsl.NewFaucet(faucetL1Frontend) + faucetL2 := dsl.NewFaucet(faucetL2Frontend) + + l1ELDSL := dsl.NewL1ELNode(l1EL) + l1CLDSL := dsl.NewL1CLNode(l1CL) + l2ELDSL := dsl.NewL2ELNode(l2EL) + l2CLDSL := dsl.NewL2CLNode(l2CL) + + out := &Minimal{ + Log: t.Logger(), + T: t, + timeTravel: runtime.TimeTravel, + L1Network: dsl.NewL1Network(l1Network, l1ELDSL, l1CLDSL), + L1EL: l1ELDSL, + L1CL: l1CLDSL, + L2Chain: dsl.NewL2Network(l2Chain, l2ELDSL, l2CLDSL, l1ELDSL, nil, nil), + L2Batcher: dsl.NewL2Batcher(l2Batcher), + L2EL: l2ELDSL, + L2CL: l2CLDSL, + Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation + FaucetL1: faucetL1, + FaucetL2: faucetL2, + challengerConfig: challengerCfg, + } + out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) + out.FunderL2 = dsl.NewFunder(out.Wallet, out.FaucetL2, out.L2EL) + return out +} diff --git a/op-devstack/presets/minimal_with_conductors.go b/op-devstack/presets/minimal_with_conductors.go index 4275b6705dc54..cab721da0795f 100644 --- a/op-devstack/presets/minimal_with_conductors.go +++ b/op-devstack/presets/minimal_with_conductors.go @@ -1,48 +1,25 @@ package presets import ( - "github.com/ethereum-optimism/optimism/op-devstack/compat" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-service/eth" ) -// once kurtosis and sysgo supports conductors, we can merge this with minimal type MinimalWithConductors struct { *Minimal - ConductorSets map[stack.ComponentID]dsl.ConductorSet + ConductorSets map[eth.ChainID]dsl.ConductorSet } -// TODO(#16418): shift this to a different sysgo constructor once the sysgo implementation supports conductors -func WithMinimalWithConductors() stack.CommonOption { - return stack.Combine( - stack.MakeCommon(sysgo.DefaultMinimalSystem(&sysgo.DefaultMinimalSystemIDs{})), - // TODO(#16418) add sysgo support - WithCompatibleTypes( - compat.Persistent, - compat.Kurtosis, - ), - ) -} - -func NewMinimalWithConductors(t devtest.T) *MinimalWithConductors { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - chains := system.L2Networks() - conductorSets := make(map[stack.ComponentID]dsl.ConductorSet) - for _, chain := range chains { - chainMatcher := match.L2ChainById(chain.ID()) - l2 := system.L2Network(match.Assume(t, chainMatcher)) - - conductorSets[chain.ID()] = dsl.NewConductorSet(l2.Conductors()) - } - return &MinimalWithConductors{ - Minimal: NewMinimal(t), - ConductorSets: conductorSets, - } +// NewMinimalWithConductors creates a fresh MinimalWithConductors target for the current +// test. +// +// The target is created from the runtime plus any additional preset options. +func NewMinimalWithConductors(t devtest.T, opts ...Option) *MinimalWithConductors { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewMinimalWithConductors", opts, minimalWithConductorsPresetSupportedOptionKinds) + out := minimalWithConductorsFromRuntime(t, sysgo.NewMinimalWithConductorsRuntimeWithConfig(t, presetCfg)) + presetOpts.applyPreset(out) + return out } diff --git a/op-devstack/presets/minimal_with_synctester.go b/op-devstack/presets/minimal_with_synctester.go deleted file mode 100644 index eb8b3e3f99ed6..0000000000000 --- a/op-devstack/presets/minimal_with_synctester.go +++ /dev/null @@ -1,34 +0,0 @@ -package presets - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type MinimalWithSyncTester struct { - Minimal - - SyncTester *dsl.SyncTester -} - -func WithMinimalWithSyncTester(fcu eth.FCUState) stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultMinimalSystemWithSyncTester(&sysgo.DefaultMinimalSystemWithSyncTesterIDs{}, fcu)) -} - -func NewMinimalWithSyncTester(t devtest.T) *MinimalWithSyncTester { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - minimal := minimalFromSystem(t, system, orch) - l2 := system.L2Network(match.Assume(t, match.L2ChainA)) - syncTester := l2.SyncTester(match.Assume(t, match.FirstSyncTester)) - return &MinimalWithSyncTester{ - Minimal: *minimal, - SyncTester: dsl.NewSyncTester(syncTester), - } -} diff --git a/op-devstack/presets/mixed_frontends.go b/op-devstack/presets/mixed_frontends.go new file mode 100644 index 0000000000000..c2a969e6cc7ec --- /dev/null +++ b/op-devstack/presets/mixed_frontends.go @@ -0,0 +1,132 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type MixedSingleChainNodeFrontends struct { + Spec sysgo.MixedSingleChainNodeSpec + EL *dsl.L2ELNode + CL *dsl.L2CLNode +} + +type MixedSingleChainFrontends struct { + L1Network *dsl.L1Network + L1EL *dsl.L1ELNode + L1CL *dsl.L1CLNode + L2Network *dsl.L2Network + L2Batcher *dsl.L2Batcher + FaucetL1 *dsl.Faucet + FaucetL2 *dsl.Faucet + TestSequencer *dsl.TestSequencer + Nodes []MixedSingleChainNodeFrontends +} + +func newFaucetFrontendByName(t devtest.T, name string, chainID eth.ChainID, faucetRPC string) *faucetFrontend { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), faucetRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + + return newPresetFaucet(t, name, chainID, rpcCl) +} + +func NewMixedSingleChainFrontends(t devtest.T, runtime *sysgo.MixedSingleChainRuntime) *MixedSingleChainFrontends { + l1Backend := runtime.L1Network + l2Backend := runtime.L2Network + l1ChainID := eth.ChainIDFromBig(l1Backend.ChainConfig().ChainID) + l2ChainID := eth.ChainIDFromBig(l2Backend.ChainConfig().ChainID) + + l1Network := newPresetL1Network(t, "l1", l1Backend.ChainConfig()) + l1EL := newL1ELFrontend(t, "l1", l1ChainID, runtime.L1EL.UserRPC()) + l1CL := newL1CLFrontend(t, "l1", l1ChainID, runtime.L1CL.BeaconHTTPAddr(), runtime.L1CL.FakePoS()) + l1Network.AddL1ELNode(l1EL) + l1Network.AddL1CLNode(l1CL) + + l2Network := newPresetL2Network( + t, + "l2a", + l2Backend.ChainConfig(), + l2Backend.RollupConfig(), + l2Backend.Deployment(), + newKeyring(l2Backend.Keys(), t.Require()), + l1Network, + ) + l2BatcherBackend := runtime.L2Batcher + l2Batcher := newL2BatcherFrontend(t, "main", l2ChainID, l2BatcherBackend.UserRPC()) + l2Network.AddL2Batcher(l2Batcher) + + l1ELDSL := dsl.NewL1ELNode(l1EL) + l1CLDSL := dsl.NewL1CLNode(l1CL) + + nodes := make([]MixedSingleChainNodeFrontends, 0, len(runtime.Nodes)) + var primaryL2EL *dsl.L2ELNode + var primaryL2CL *dsl.L2CLNode + for _, node := range runtime.Nodes { + l2EL := newL2ELFrontend( + t, + node.Spec.ELKey, + l2ChainID, + node.EL.UserRPC(), + node.EL.EngineRPC(), + node.EL.JWTPath(), + l2Backend.RollupConfig(), + node.EL, + ) + l2CL := newL2CLFrontend(t, node.Spec.CLKey, l2ChainID, node.CL.UserRPC(), node.CL) + l2CL.attachEL(l2EL) + l2Network.AddL2ELNode(l2EL) + l2Network.AddL2CLNode(l2CL) + l2ELDSL := dsl.NewL2ELNode(l2EL) + l2CLDSL := dsl.NewL2CLNode(l2CL) + if primaryL2EL == nil && node.Spec.IsSequencer { + primaryL2EL = l2ELDSL + primaryL2CL = l2CLDSL + } + nodes = append(nodes, MixedSingleChainNodeFrontends{ + Spec: node.Spec, + EL: l2ELDSL, + CL: l2CLDSL, + }) + } + t.Require().NotNil(primaryL2EL, "missing primary mixed L2 EL") + t.Require().NotNil(primaryL2CL, "missing primary mixed L2 CL") + + l1FaucetName, l1FaucetRPC, ok := defaultFaucetForChain(runtime.FaucetService, l1ChainID) + t.Require().Truef(ok, "missing default faucet for chain %s", l1ChainID) + l2FaucetName, l2FaucetRPC, ok := defaultFaucetForChain(runtime.FaucetService, l2ChainID) + t.Require().Truef(ok, "missing default faucet for chain %s", l2ChainID) + faucetL1Frontend := newFaucetFrontendByName(t, l1FaucetName, l1ChainID, l1FaucetRPC) + faucetL2Frontend := newFaucetFrontendByName(t, l2FaucetName, l2ChainID, l2FaucetRPC) + l1Network.AddFaucet(faucetL1Frontend) + l2Network.AddFaucet(faucetL2Frontend) + faucetL1 := dsl.NewFaucet(faucetL1Frontend) + faucetL2 := dsl.NewFaucet(faucetL2Frontend) + + var testSequencer *dsl.TestSequencer + if backend := runtime.TestSequencer; backend != nil { + t.Require().NotEmpty(backend.Name, "expected test sequencer name") + testSequencer = dsl.NewTestSequencer(newTestSequencerFrontend( + t, + backend.Name, + backend.AdminRPC, + backend.ControlRPC, + backend.JWTSecret, + )) + } + + return &MixedSingleChainFrontends{ + L1Network: dsl.NewL1Network(l1Network, l1ELDSL, l1CLDSL), + L1EL: l1ELDSL, + L1CL: l1CLDSL, + L2Network: dsl.NewL2Network(l2Network, primaryL2EL, primaryL2CL, l1ELDSL, nil, nil), + L2Batcher: dsl.NewL2Batcher(l2Batcher), + FaucetL1: faucetL1, + FaucetL2: faucetL2, + TestSequencer: testSequencer, + Nodes: nodes, + } +} diff --git a/op-devstack/presets/networks.go b/op-devstack/presets/networks.go new file mode 100644 index 0000000000000..2467fd230bbf7 --- /dev/null +++ b/op-devstack/presets/networks.go @@ -0,0 +1,328 @@ +package presets + +import ( + "slices" + "sort" + + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/locks" + "github.com/ethereum-optimism/optimism/op-service/testreq" +) + +type presetCommon struct { + log log.Logger + t devtest.T + req *testreq.Assertions + labels *locks.RWMap[string, string] + name string +} + +func newPresetCommon(t devtest.T, name string) presetCommon { + return presetCommon{ + log: t.Logger(), + t: t, + req: t.Require(), + labels: new(locks.RWMap[string, string]), + name: name, + } +} + +func (c *presetCommon) T() devtest.T { + return c.t +} + +func (c *presetCommon) Logger() log.Logger { + return c.log +} + +func (c *presetCommon) Name() string { + return c.name +} + +func (c *presetCommon) Label(key string) string { + out, _ := c.labels.Get(key) + return out +} + +func (c *presetCommon) SetLabel(key, value string) { + c.labels.Set(key, value) +} + +func (c *presetCommon) require() *testreq.Assertions { + return c.req +} + +type presetNetworkBase struct { + presetCommon + chainCfg *params.ChainConfig + chainID eth.ChainID + + faucets []*faucetFrontend + syncTesters []*syncTesterFrontend +} + +func (n *presetNetworkBase) ChainID() eth.ChainID { + return n.chainID +} + +func (n *presetNetworkBase) ChainConfig() *params.ChainConfig { + return n.chainCfg +} + +func (n *presetNetworkBase) Faucets() []stack.Faucet { + return mapSlice(sortByNameFunc(n.faucets), func(v *faucetFrontend) stack.Faucet { return v }) +} + +func (n *presetNetworkBase) AddFaucet(v *faucetFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "faucet %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.faucets, v.Name()) + n.require().False(exists, "faucet %s must not already exist", v.Name()) + n.faucets = append(n.faucets, v) +} + +func (n *presetNetworkBase) SyncTesters() []stack.SyncTester { + return mapSlice(sortByNameFunc(n.syncTesters), func(v *syncTesterFrontend) stack.SyncTester { return v }) +} + +func (n *presetNetworkBase) AddSyncTester(v *syncTesterFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "sync tester %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.syncTesters, v.Name()) + n.require().False(exists, "sync tester %s must not already exist", v.Name()) + n.syncTesters = append(n.syncTesters, v) +} + +type presetL1Network struct { + presetNetworkBase + + l1ELNodes []*l1ELFrontend + l1CLNodes []*l1CLFrontend +} + +var _ stack.L1Network = (*presetL1Network)(nil) + +func newPresetL1Network(t devtest.T, name string, chainCfg *params.ChainConfig) *presetL1Network { + chainID := eth.ChainIDFromBig(chainCfg.ChainID) + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + t.Require().NotEmpty(name, "l1 network name must not be empty") + return &presetL1Network{ + presetNetworkBase: presetNetworkBase{ + presetCommon: newPresetCommon(t, name), + chainCfg: chainCfg, + chainID: chainID, + }, + } +} + +func (n *presetL1Network) AddL1ELNode(v *l1ELFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "l1 EL node %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.l1ELNodes, v.Name()) + n.require().False(exists, "l1 EL node %s must not already exist", v.Name()) + n.l1ELNodes = append(n.l1ELNodes, v) +} + +func (n *presetL1Network) AddL1CLNode(v *l1CLFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "l1 CL node %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.l1CLNodes, v.Name()) + n.require().False(exists, "l1 CL node %s must not already exist", v.Name()) + n.l1CLNodes = append(n.l1CLNodes, v) +} + +func (n *presetL1Network) L1ELNodes() []stack.L1ELNode { + return mapSlice(sortByNameFunc(n.l1ELNodes), func(v *l1ELFrontend) stack.L1ELNode { return v }) +} + +func (n *presetL1Network) L1CLNodes() []stack.L1CLNode { + return mapSlice(sortByNameFunc(n.l1CLNodes), func(v *l1CLFrontend) stack.L1CLNode { return v }) +} + +type presetL2Network struct { + presetNetworkBase + + rollupCfg *rollup.Config + deployment stack.L2Deployment + keys *keyringImpl + + l1 *presetL1Network + + l2Batchers []*l2BatcherFrontend + l2Proposers []*l2ProposerFrontend + l2Challengers []*l2ChallengerFrontend + l2CLNodes []*l2CLFrontend + l2ELNodes []*l2ELFrontend + conductors []*conductorFrontend + rollupBoostNodes []*rollupBoostFrontend + oprBuilderNodes []*oprBuilderFrontend +} + +var _ stack.L2Network = (*presetL2Network)(nil) + +func newPresetL2Network( + t devtest.T, + name string, + chainCfg *params.ChainConfig, + rollupCfg *rollup.Config, + deployment stack.L2Deployment, + keys *keyringImpl, + l1 *presetL1Network, +) *presetL2Network { + chainID := eth.ChainIDFromBig(chainCfg.ChainID) + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + t.Require().NotEmpty(name, "l2 network name must not be empty") + t.Require().Equal(l1.ChainID(), eth.ChainIDFromBig(rollupCfg.L1ChainID), "rollup config must match expected L1 chain") + t.Require().Equal(chainID, eth.ChainIDFromBig(rollupCfg.L2ChainID), "rollup config must match expected L2 chain") + return &presetL2Network{ + presetNetworkBase: presetNetworkBase{ + presetCommon: newPresetCommon(t, name), + chainCfg: chainCfg, + chainID: chainID, + }, + rollupCfg: rollupCfg, + deployment: deployment, + keys: keys, + l1: l1, + } +} + +func (n *presetL2Network) RollupConfig() *rollup.Config { + n.require().NotNil(n.rollupCfg, "l2 chain %s must have a rollup config", n.Name()) + return n.rollupCfg +} + +func (n *presetL2Network) Deployment() stack.L2Deployment { + n.require().NotNil(n.deployment, "l2 chain %s must have a deployment", n.Name()) + return n.deployment +} + +func (n *presetL2Network) Keys() stack.Keys { + n.require().NotNil(n.keys, "l2 chain %s must have keys", n.Name()) + return n.keys +} + +func (n *presetL2Network) L1() stack.L1Network { + n.require().NotNil(n.l1, "l2 chain %s must have an L1 chain", n.Name()) + return n.l1 +} + +func (n *presetL2Network) AddL2Batcher(v *l2BatcherFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "l2 batcher %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.l2Batchers, v.Name()) + n.require().False(exists, "l2 batcher %s must not already exist", v.Name()) + n.l2Batchers = append(n.l2Batchers, v) +} + +func (n *presetL2Network) AddL2Proposer(v *l2ProposerFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "l2 proposer %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.l2Proposers, v.Name()) + n.require().False(exists, "l2 proposer %s must not already exist", v.Name()) + n.l2Proposers = append(n.l2Proposers, v) +} + +func (n *presetL2Network) AddL2Challenger(v *l2ChallengerFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "l2 challenger %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.l2Challengers, v.Name()) + n.require().False(exists, "l2 challenger %s must not already exist", v.Name()) + n.l2Challengers = append(n.l2Challengers, v) +} + +func (n *presetL2Network) AddL2CLNode(v *l2CLFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "l2 CL node %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.l2CLNodes, v.Name()) + n.require().False(exists, "l2 CL node %s must not already exist", v.Name()) + n.l2CLNodes = append(n.l2CLNodes, v) +} + +func (n *presetL2Network) AddL2ELNode(v *l2ELFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "l2 EL node %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.l2ELNodes, v.Name()) + n.require().False(exists, "l2 EL node %s must not already exist", v.Name()) + n.l2ELNodes = append(n.l2ELNodes, v) +} + +func (n *presetL2Network) AddConductor(v *conductorFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "conductor %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.conductors, v.Name()) + n.require().False(exists, "conductor %s must not already exist", v.Name()) + n.conductors = append(n.conductors, v) +} + +func (n *presetL2Network) AddRollupBoostNode(v *rollupBoostFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "rollup boost node %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.rollupBoostNodes, v.Name()) + n.require().False(exists, "rollup boost node %s must not already exist", v.Name()) + n.rollupBoostNodes = append(n.rollupBoostNodes, v) +} + +func (n *presetL2Network) AddOPRBuilderNode(v *oprBuilderFrontend) { + n.require().Equal(n.chainID, v.ChainID(), "OPR builder node %s must be on chain %s", v.Name(), n.chainID) + _, exists := componentByName(n.oprBuilderNodes, v.Name()) + n.require().False(exists, "OPR builder node %s must not already exist", v.Name()) + n.oprBuilderNodes = append(n.oprBuilderNodes, v) +} + +func (n *presetL2Network) L2Batchers() []stack.L2Batcher { + return mapSlice(sortByNameFunc(n.l2Batchers), func(v *l2BatcherFrontend) stack.L2Batcher { return v }) +} + +func (n *presetL2Network) L2Proposers() []stack.L2Proposer { + return mapSlice(sortByNameFunc(n.l2Proposers), func(v *l2ProposerFrontend) stack.L2Proposer { return v }) +} + +func (n *presetL2Network) L2Challengers() []stack.L2Challenger { + return mapSlice(sortByNameFunc(n.l2Challengers), func(v *l2ChallengerFrontend) stack.L2Challenger { return v }) +} + +func (n *presetL2Network) L2CLNodes() []stack.L2CLNode { + return mapSlice(sortByNameFunc(n.l2CLNodes), func(v *l2CLFrontend) stack.L2CLNode { return v }) +} + +func (n *presetL2Network) L2ELNodes() []stack.L2ELNode { + return mapSlice(sortByNameFunc(n.l2ELNodes), func(v *l2ELFrontend) stack.L2ELNode { return v }) +} + +func (n *presetL2Network) Conductors() []stack.Conductor { + return mapSlice(sortByNameFunc(n.conductors), func(v *conductorFrontend) stack.Conductor { return v }) +} + +func (n *presetL2Network) RollupBoostNodes() []stack.RollupBoostNode { + return mapSlice(sortByNameFunc(n.rollupBoostNodes), func(v *rollupBoostFrontend) stack.RollupBoostNode { return v }) +} + +func (n *presetL2Network) OPRBuilderNodes() []stack.OPRBuilderNode { + return mapSlice(sortByNameFunc(n.oprBuilderNodes), func(v *oprBuilderFrontend) stack.OPRBuilderNode { return v }) +} + +type named interface { + Name() string +} + +func componentByName[T named](components []T, name string) (T, bool) { + for _, component := range components { + if component.Name() == name { + return component, true + } + } + var zero T + return zero, false +} + +func sortByNameFunc[T named](components []T) []T { + out := slices.Clone(components) + sort.Slice(out, func(i, j int) bool { + return out[i].Name() < out[j].Name() + }) + return out +} + +func mapSlice[T any, U any](items []T, mapFn func(T) U) []U { + out := make([]U, len(items)) + for i, item := range items { + out[i] = mapFn(item) + } + return out +} diff --git a/op-devstack/presets/op_rbuilder_rules.go b/op-devstack/presets/op_rbuilder_rules.go index 5ee7d12a6697d..fad5ac080da21 100644 --- a/op-devstack/presets/op_rbuilder_rules.go +++ b/op-devstack/presets/op_rbuilder_rules.go @@ -6,27 +6,22 @@ import ( "path/filepath" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" ) -func WithOPRBuilderRules(ruleContent string, refreshInterval uint64) stack.CommonOption { - return stack.MakeCommon( - sysgo.WithGlobalOPRBuilderNodeOption(sysgo.OPRBuilderNodeOptionFn( - func(p devtest.P, id stack.ComponentID, cfg *sysgo.OPRBuilderNodeConfig) { - cfg.RulesEnabled = true - // Create a fixed directory for rules config - rulesDir := filepath.Join(os.TempDir(), "rules") - if err := os.MkdirAll(rulesDir, 0755); err != nil { - p.Errorf("Failed to create rules dir: %v", err) - } - // Write rules - rulesPath := filepath.Join(rulesDir, "ruleset.yaml") - if err := os.WriteFile(rulesPath, []byte(ruleContent), 0644); err != nil { - p.Errorf("Failed to create rules dir: %v", err) - } - // Write rule config pointing to rules file - rulesConfigContent := fmt.Sprintf(` +func WithOPRBuilderRules(ruleContent string, refreshInterval uint64) Option { + return WithOPRBuilderOption(sysgo.OPRBuilderNodeOptionFn( + func(p devtest.CommonT, _ sysgo.ComponentTarget, cfg *sysgo.OPRBuilderNodeConfig) { + cfg.RulesEnabled = true + rulesDir := filepath.Join(os.TempDir(), "rules") + if err := os.MkdirAll(rulesDir, 0o755); err != nil { + p.Errorf("failed to create rules dir: %v", err) + } + rulesPath := filepath.Join(rulesDir, "ruleset.yaml") + if err := os.WriteFile(rulesPath, []byte(ruleContent), 0o644); err != nil { + p.Errorf("failed to write rules file: %v", err) + } + rulesConfigContent := fmt.Sprintf(` file: - path: %s name: "Test Rules" @@ -34,10 +29,10 @@ file: refresh_interval: %d `, rulesPath, refreshInterval) - rulesConfigPath := filepath.Join(rulesDir, "rules_config.yaml") - if err := os.WriteFile(rulesConfigPath, []byte(rulesConfigContent), 0644); err != nil { - p.Errorf("Failed to write registry file: %v", err) - } - cfg.RulesConfigPath = rulesConfigPath - }))) + rulesConfigPath := filepath.Join(rulesDir, "rules_config.yaml") + if err := os.WriteFile(rulesConfigPath, []byte(rulesConfigContent), 0o644); err != nil { + p.Errorf("failed to write rules config file: %v", err) + } + cfg.RulesConfigPath = rulesConfigPath + })) } diff --git a/op-devstack/presets/option_validation.go b/op-devstack/presets/option_validation.go new file mode 100644 index 0000000000000..666649ead317f --- /dev/null +++ b/op-devstack/presets/option_validation.go @@ -0,0 +1,150 @@ +package presets + +import ( + "fmt" + "strings" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +type optionKinds uint64 + +const ( + optionKindDeployer optionKinds = 1 << iota + optionKindBatcher + optionKindProposer + optionKindOPRBuilder + optionKindGlobalL2CL + optionKindGlobalSyncTesterEL + optionKindAddedGameType + optionKindRespectedGameType + optionKindChallengerCannonKona + optionKindTimeTravel + optionKindMaxSequencingWindow + optionKindRequireInteropNotAtGen + optionKindAfterBuild + optionKindProofValidation +) + +const allOptionKinds = optionKindDeployer | + optionKindBatcher | + optionKindProposer | + optionKindOPRBuilder | + optionKindGlobalL2CL | + optionKindGlobalSyncTesterEL | + optionKindAddedGameType | + optionKindRespectedGameType | + optionKindChallengerCannonKona | + optionKindTimeTravel | + optionKindMaxSequencingWindow | + optionKindRequireInteropNotAtGen | + optionKindAfterBuild | + optionKindProofValidation + +var optionKindLabels = []struct { + kind optionKinds + label string +}{ + {kind: optionKindDeployer, label: "deployer options"}, + {kind: optionKindBatcher, label: "batcher options"}, + {kind: optionKindProposer, label: "proposer options"}, + {kind: optionKindOPRBuilder, label: "builder options"}, + {kind: optionKindGlobalL2CL, label: "L2 CL options"}, + {kind: optionKindGlobalSyncTesterEL, label: "sync tester EL options"}, + {kind: optionKindAddedGameType, label: "added game types"}, + {kind: optionKindRespectedGameType, label: "respected game types"}, + {kind: optionKindChallengerCannonKona, label: "challenger cannon-kona"}, + {kind: optionKindTimeTravel, label: "time travel"}, + {kind: optionKindMaxSequencingWindow, label: "max sequencing window"}, + {kind: optionKindRequireInteropNotAtGen, label: "interop-not-at-genesis"}, + {kind: optionKindAfterBuild, label: "after-build hooks"}, + {kind: optionKindProofValidation, label: "proof-validation hooks"}, +} + +func (k optionKinds) String() string { + if k == 0 { + return "none" + } + + names := make([]string, 0, len(optionKindLabels)) + for _, label := range optionKindLabels { + if k&label.kind == 0 { + continue + } + names = append(names, label.label) + } + if unknown := k &^ allOptionKinds; unknown != 0 { + names = append(names, fmt.Sprintf("unknown(%#x)", uint64(unknown))) + } + return strings.Join(names, ", ") +} + +func unsupportedPresetOptionKinds(opts Option, supported optionKinds) optionKinds { + if opts == nil { + return 0 + } + return opts.optionKinds() &^ supported +} + +func collectSupportedPresetConfig(t devtest.T, presetName string, opts []Option, supported optionKinds) (sysgo.PresetConfig, CombinedOption) { + cfg, combined := collectPresetConfig(opts) + if unsupported := unsupportedPresetOptionKinds(combined, supported); unsupported != 0 { + t.Require().FailNowf("%s does not support preset options: %s", presetName, unsupported) + } + return cfg, combined +} + +const minimalPresetSupportedOptionKinds = optionKindDeployer | + optionKindBatcher | + optionKindProposer | + optionKindGlobalL2CL | + optionKindAddedGameType | + optionKindRespectedGameType | + optionKindChallengerCannonKona | + optionKindTimeTravel | + optionKindAfterBuild | + optionKindProofValidation + +const minimalWithConductorsPresetSupportedOptionKinds = optionKindDeployer | + optionKindBatcher | + optionKindProposer | + optionKindGlobalL2CL | + optionKindAddedGameType | + optionKindRespectedGameType | + optionKindTimeTravel | + optionKindAfterBuild | + optionKindProofValidation + +const simpleWithSyncTesterPresetSupportedOptionKinds = minimalPresetSupportedOptionKinds | + optionKindGlobalSyncTesterEL + +const singleChainInteropPresetSupportedOptionKinds = optionKindDeployer | + optionKindBatcher | + optionKindProposer | + optionKindGlobalL2CL | + optionKindAddedGameType | + optionKindRespectedGameType | + optionKindTimeTravel | + optionKindMaxSequencingWindow | + optionKindRequireInteropNotAtGen | + optionKindAfterBuild | + optionKindProofValidation + +const simpleInteropSuperProofsPresetSupportedOptionKinds = optionKindDeployer | + optionKindBatcher | + optionKindProposer | + optionKindGlobalL2CL | + optionKindChallengerCannonKona | + optionKindTimeTravel | + optionKindMaxSequencingWindow | + optionKindRequireInteropNotAtGen + +const supernodeProofsPresetSupportedOptionKinds = optionKindChallengerCannonKona + +const twoL2SupernodePresetSupportedOptionKinds = optionKindDeployer + +const twoL2SupernodeInteropPresetSupportedOptionKinds = optionKindDeployer | + optionKindTimeTravel + +const singleChainWithFlashblocksPresetSupportedOptionKinds = optionKindOPRBuilder diff --git a/op-devstack/presets/options.go b/op-devstack/presets/options.go new file mode 100644 index 0000000000000..14934d8bc1346 --- /dev/null +++ b/op-devstack/presets/options.go @@ -0,0 +1,246 @@ +package presets + +import ( + gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +type Option interface { + applyConfig(cfg *sysgo.PresetConfig) + applyPreset(target any) + optionKinds() optionKinds +} + +type option struct { + applyFn func(cfg *sysgo.PresetConfig) + applyPresetFn func(target any) + kinds optionKinds +} + +func (o option) applyConfig(cfg *sysgo.PresetConfig) { + if o.applyFn == nil { + return + } + o.applyFn(cfg) +} + +func (o option) applyPreset(target any) { + if o.applyPresetFn != nil { + o.applyPresetFn(target) + } +} + +func (o option) optionKinds() optionKinds { + return o.kinds +} + +type CombinedOption []Option + +func Combine(opts ...Option) CombinedOption { + return CombinedOption(opts) +} + +func (c CombinedOption) applyConfig(cfg *sysgo.PresetConfig) { + for _, opt := range c { + if opt == nil { + continue + } + opt.applyConfig(cfg) + } +} + +func (c CombinedOption) applyPreset(target any) { + for _, opt := range c { + if opt == nil { + continue + } + opt.applyPreset(target) + } +} + +func (c CombinedOption) optionKinds() optionKinds { + var kinds optionKinds + for _, opt := range c { + if opt == nil { + continue + } + kinds |= opt.optionKinds() + } + return kinds +} + +func AfterBuild(fn func(target any)) Option { + var kinds optionKinds + if fn != nil { + kinds = optionKindAfterBuild + } + return option{applyPresetFn: fn, kinds: kinds} +} + +func collectPresetConfig(opts []Option) (sysgo.PresetConfig, CombinedOption) { + cfg := sysgo.NewPresetConfig() + combined := Combine(opts...) + combined.applyConfig(&cfg) + return cfg, combined +} + +func WithDeployerOptions(opts ...sysgo.DeployerOption) Option { + var kinds optionKinds + for _, opt := range opts { + if opt != nil { + kinds = optionKindDeployer + break + } + } + return option{ + kinds: kinds, + applyFn: func(cfg *sysgo.PresetConfig) { + cfg.DeployerOptions = append(cfg.DeployerOptions, opts...) + }, + } +} + +func WithBatcherOption(opt sysgo.BatcherOption) Option { + var kinds optionKinds + if opt != nil { + kinds = optionKindBatcher + } + return option{ + kinds: kinds, + applyFn: func(cfg *sysgo.PresetConfig) { + if opt == nil { + return + } + cfg.BatcherOptions = append(cfg.BatcherOptions, opt) + }, + } +} + +func WithGlobalL2CLOption(opt sysgo.L2CLOption) Option { + var kinds optionKinds + if opt != nil { + kinds = optionKindGlobalL2CL + } + return option{ + kinds: kinds, + applyFn: func(cfg *sysgo.PresetConfig) { + if opt == nil { + return + } + cfg.GlobalL2CLOptions = append(cfg.GlobalL2CLOptions, opt) + }, + } +} + +func WithGlobalSyncTesterELOption(opt sysgo.SyncTesterELOption) Option { + var kinds optionKinds + if opt != nil { + kinds = optionKindGlobalSyncTesterEL + } + return option{ + kinds: kinds, + applyFn: func(cfg *sysgo.PresetConfig) { + if opt == nil { + return + } + cfg.GlobalSyncTesterELOptions = append(cfg.GlobalSyncTesterELOptions, opt) + }, + } +} + +func WithProposerOption(opt sysgo.ProposerOption) Option { + var kinds optionKinds + if opt != nil { + kinds = optionKindProposer + } + return option{ + kinds: kinds, + applyFn: func(cfg *sysgo.PresetConfig) { + if opt == nil { + return + } + cfg.ProposerOptions = append(cfg.ProposerOptions, opt) + }, + } +} + +func WithOPRBuilderOption(opt sysgo.OPRBuilderNodeOption) Option { + var kinds optionKinds + if opt != nil { + kinds = optionKindOPRBuilder + } + return option{ + kinds: kinds, + applyFn: func(cfg *sysgo.PresetConfig) { + if opt == nil { + return + } + cfg.OPRBuilderOptions = append(cfg.OPRBuilderOptions, opt) + }, + } +} + +func WithGameTypeAdded(gameType gameTypes.GameType) Option { + return option{ + kinds: optionKindAddedGameType, + applyFn: func(cfg *sysgo.PresetConfig) { + cfg.AddedGameTypes = append(cfg.AddedGameTypes, gameType) + }, + } +} + +func WithRespectedGameTypeOverride(gameType gameTypes.GameType) Option { + return option{ + kinds: optionKindRespectedGameType, + applyFn: func(cfg *sysgo.PresetConfig) { + cfg.RespectedGameTypes = append(cfg.RespectedGameTypes, gameType) + }, + } +} + +func WithCannonKonaGameTypeAdded() Option { + return option{ + kinds: optionKindAddedGameType | optionKindChallengerCannonKona, + applyFn: func(cfg *sysgo.PresetConfig) { + cfg.EnableCannonKonaForChall = true + cfg.AddedGameTypes = append(cfg.AddedGameTypes, gameTypes.CannonKonaGameType) + }, + } +} + +func WithChallengerCannonKonaEnabled() Option { + return option{ + kinds: optionKindChallengerCannonKona, + applyFn: func(cfg *sysgo.PresetConfig) { + cfg.EnableCannonKonaForChall = true + }, + } +} + +func WithTimeTravelEnabled() Option { + return option{ + kinds: optionKindTimeTravel, + applyFn: func(cfg *sysgo.PresetConfig) { + cfg.EnableTimeTravel = true + }, + } +} + +func WithMaxSequencingWindow(max uint64) Option { + return option{ + kinds: optionKindMaxSequencingWindow, + applyFn: func(cfg *sysgo.PresetConfig) { + v := max + cfg.MaxSequencingWindow = &v + }, + } +} + +func WithRequireInteropNotAtGenesis() Option { + return option{ + kinds: optionKindRequireInteropNotAtGen, + applyFn: func(cfg *sysgo.PresetConfig) { + cfg.RequireInteropNotAtGen = true + }, + } +} diff --git a/op-devstack/presets/options_test.go b/op-devstack/presets/options_test.go new file mode 100644 index 0000000000000..e99fd0e743d8f --- /dev/null +++ b/op-devstack/presets/options_test.go @@ -0,0 +1,121 @@ +package presets + +import ( + "testing" + + gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/stretchr/testify/require" +) + +func TestOptionKindsFromCompositeOptions(t *testing.T) { + t.Run("WithSequencingWindow", func(t *testing.T) { + require.Equal(t, + optionKindDeployer|optionKindMaxSequencingWindow, + WithSequencingWindow(12, 24).optionKinds(), + ) + }) + + t.Run("WithCannonKonaGameTypeAdded", func(t *testing.T) { + require.Equal(t, + optionKindAddedGameType|optionKindChallengerCannonKona, + WithCannonKonaGameTypeAdded().optionKinds(), + ) + }) + + t.Run("RequireGameTypePresent", func(t *testing.T) { + require.Equal(t, + optionKindAfterBuild|optionKindProofValidation, + RequireGameTypePresent(gameTypes.CannonGameType).optionKinds(), + ) + }) + + t.Run("nil adapters do not claim support kinds", func(t *testing.T) { + require.Zero(t, WithDeployerOptions(nil).optionKinds()) + require.Zero(t, WithBatcherOption(nil).optionKinds()) + require.Zero(t, WithGlobalL2CLOption(nil).optionKinds()) + require.Zero(t, WithGlobalSyncTesterELOption(nil).optionKinds()) + require.Zero(t, WithProposerOption(nil).optionKinds()) + require.Zero(t, WithOPRBuilderOption(nil).optionKinds()) + require.Zero(t, AfterBuild(nil).optionKinds()) + }) +} + +func TestUnsupportedPresetOptionKinds(t *testing.T) { + builderOpt := sysgo.OPRBuilderNodeOptionFn(func(devtest.CommonT, sysgo.ComponentTarget, *sysgo.OPRBuilderNodeConfig) {}) + + tests := []struct { + name string + supported optionKinds + opts Option + want optionKinds + }{ + { + name: "minimal allows proof validation hooks", + supported: minimalPresetSupportedOptionKinds, + opts: Combine( + WithTimeTravelEnabled(), + RequireGameTypePresent(gameTypes.CannonGameType), + ), + want: 0, + }, + { + name: "minimal with conductors rejects challenger toggle", + supported: minimalWithConductorsPresetSupportedOptionKinds, + opts: WithChallengerCannonKonaEnabled(), + want: optionKindChallengerCannonKona, + }, + { + name: "flashblocks only allows builder adapters", + supported: singleChainWithFlashblocksPresetSupportedOptionKinds, + opts: Combine( + WithOPRBuilderOption(builderOpt), + WithTimeTravelEnabled(), + ), + want: optionKindTimeTravel, + }, + { + name: "simple interop super proofs reject builder and proof hooks", + supported: simpleInteropSuperProofsPresetSupportedOptionKinds, + opts: Combine( + WithOPRBuilderOption(builderOpt), + RequireGameTypePresent(gameTypes.CannonGameType), + ), + want: optionKindOPRBuilder | optionKindAfterBuild | optionKindProofValidation, + }, + { + name: "supernode proofs only allow challenger toggle", + supported: supernodeProofsPresetSupportedOptionKinds, + opts: Combine( + WithChallengerCannonKonaEnabled(), + WithTimeTravelEnabled(), + ), + want: optionKindTimeTravel, + }, + { + name: "two l2 supernode rejects time travel", + supported: twoL2SupernodePresetSupportedOptionKinds, + opts: WithTimeTravelEnabled(), + want: optionKindTimeTravel, + }, + { + name: "two l2 supernode interop accepts time travel", + supported: twoL2SupernodeInteropPresetSupportedOptionKinds, + opts: WithTimeTravelEnabled(), + want: 0, + }, + { + name: "unsupported proof validation is called out separately from generic after build", + supported: optionKindAfterBuild, + opts: RequireGameTypePresent(gameTypes.CannonGameType), + want: optionKindProofValidation, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require.Equal(t, tt.want, unsupportedPresetOptionKinds(tt.opts, tt.supported)) + }) + } +} diff --git a/op-devstack/presets/orchestrator.go b/op-devstack/presets/orchestrator.go deleted file mode 100644 index 00b264bbfb0ac..0000000000000 --- a/op-devstack/presets/orchestrator.go +++ /dev/null @@ -1,186 +0,0 @@ -package presets - -import ( - "context" - "fmt" - "os" - "runtime/debug" - "slices" - "sync/atomic" - - "github.com/ethereum/go-ethereum/log" - "go.opentelemetry.io/otel" - - "github.com/ethereum-optimism/optimism/devnet-sdk/telemetry" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysext" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-service/flags" - "github.com/ethereum-optimism/optimism/op-service/locks" - oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/log/logfilter" -) - -// lockedOrchestrator is the global variable that stores -// the global orchestrator that tests may use. -// Presets are expected to use the global orchestrator, -// unless explicitly told otherwise using a WithOrchestrator option. -var lockedOrchestrator locks.RWValue[stack.Orchestrator] - -type backendKind string - -const ( - backendKindSysGo backendKind = "sysgo" - backendKindSysExt backendKind = "sysext" -) - -type TestingM interface { - Run() int -} - -// DoMain runs M with the pre- and post-processing of tests, -// to setup the default global orchestrator and global logger. -// This will os.Exit(code) and not return. -func DoMain(m TestingM, opts ...stack.CommonOption) { - // nest the function, so we can defer-recover and defer-cleanup, before os.Exit - code := func() (errCode int) { - failed := new(atomic.Bool) - defer func() { - if failed.Load() { - errCode = 1 - } - }() - defer func() { - if x := recover(); x != nil && !failed.Load() { - debug.PrintStack() - _, _ = fmt.Fprintf(os.Stderr, "Panic during test Main: %v\n", x) - - failed.Store(true) - } - }() - - cfg := flags.ReadTestConfig() - logHandler := oplog.NewLogHandler(os.Stdout, cfg.LogConfig) - logHandler = logfilter.WrapFilterHandler(logHandler) - logHandler.(logfilter.FilterHandler).Set(logfilter.DefaultMute(logfilter.Level(log.LevelInfo).Show())) - logHandler = logfilter.WrapContextHandler(logHandler) - // The default can be changed using the WithLogFilters option which replaces this default - logger := log.NewLogger(logHandler) - oplog.SetGlobalLogHandler(logHandler) - - ctx, otelShutdown, err := telemetry.SetupOpenTelemetry(context.Background()) - if err != nil { - logger.Warn("Failed to setup OpenTelemetry", "error", err) - } else { - defer otelShutdown() - } - - ctx, run := otel.Tracer("run").Start(ctx, "test suite") - defer run.End() - - // All tests will inherit this package-level context - devtest.RootContext = ctx - - // Make the package-level logger use this context - logger.SetContext(ctx) - - onFail := func(now bool) { - if !failed.Load() { - logger.Error("Main failed") - debug.PrintStack() - failed.Store(true) - } - if now { - panic("critical Main fail") - } - } - - onSkipNow := func() { - logger.Info("Main skipped") - os.Exit(0) - } - p := devtest.NewP(ctx, logger, onFail, onSkipNow) - defer p.Close() - - p.Require().NotEmpty(opts, "Expecting orchestrator options") - - initOrchestrator(ctx, p, stack.Combine(opts...)) - - errCode = m.Run() - return - }() - _, _ = fmt.Fprintf(os.Stderr, "\nExiting, code: %d\n", code) - os.Exit(code) -} - -func initOrchestrator(ctx context.Context, p devtest.P, opt stack.CommonOption) { - ctx, span := p.Tracer().Start(ctx, "initializing orchestrator") - defer span.End() - - lockedOrchestrator.Lock() - defer lockedOrchestrator.Unlock() - if lockedOrchestrator.Value != nil { - return - } - backend := backendKindSysGo - if override, ok := os.LookupEnv("DEVSTACK_ORCHESTRATOR"); ok { - backend = backendKind(override) - } - switch backend { - case backendKindSysGo: - lockedOrchestrator.Value = sysgo.NewOrchestrator(p, stack.SystemHook(opt)) - case backendKindSysExt: - lockedOrchestrator.Value = sysext.NewOrchestrator(p, stack.SystemHook(opt)) - default: - panic(fmt.Sprintf("Unknown backend for initializing orchestrator: %s", backend)) - } - - p.Logger().InfoContext(ctx, "initializing orchestrator", "backend", backend) - stack.ApplyOptionLifecycle(opt, lockedOrchestrator.Value) -} - -// Orchestrator returns the globally configured orchestrator. -// -// Add a TestMain to your test package init the orchestrator: -// -// func TestMain(m *testing.M) { -// presets.DoMain(m) -// } -func Orchestrator() stack.Orchestrator { - out := lockedOrchestrator.Get() - if out == nil { - panic(` -Add a TestMain to your test package init the orchestrator: - - func TestMain(m *testing.M) { - presets.DoMain(m) - } -`) - } - return out -} - -// WithCompatibleTypes is a common option that can be used to ensure that the orchestrator is compatible with the preset. -// If the orchestrator is not compatible, the test will either: -// - fail with a non-zero exit code (42) if DEVNET_EXPECT_PRECONDITIONS_MET is non-empty -// - skip the whole test otherwise -// This is useful to ensure that the preset is only used with the correct orchestrator type. -// Do yourself a favor, if you use this option, add a good comment (or a TODO) justifying it! -func WithCompatibleTypes(t ...compat.Type) stack.CommonOption { - return stack.FnOption[stack.Orchestrator]{ - BeforeDeployFn: func(orch stack.Orchestrator) { - if !slices.Contains(t, orch.Type()) { - p := orch.P() - - if os.Getenv(devtest.ExpectPreconditionsMet) != "" { - p.Errorf("Orchestrator type %s is incompatible with this preset", orch.Type()) - os.Exit(compat.CompatErrorCode) - } else { - p.SkipNow() - } - } - }, - } -} diff --git a/op-devstack/presets/proof.go b/op-devstack/presets/proof.go index 90ed0e2e3d680..509c2f0af9c6c 100644 --- a/op-devstack/presets/proof.go +++ b/op-devstack/presets/proof.go @@ -2,124 +2,105 @@ package presets import ( gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/dsl/contract" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" ps "github.com/ethereum-optimism/optimism/op-proposer/proposer" "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txintent/contractio" ) -func WithRespectedGameType(gameType gameTypes.GameType) stack.CommonOption { - opts := WithProposerGameType(gameType) - opts = stack.Combine(opts, - stack.MakeCommon(sysgo.WithRespectedGameType(gameType)), // Set if sysgo is in use - RequireRespectedGameType(gameType), - ) - return opts +type proofValidationTarget interface { + proofValidationContext() (devtest.T, *dsl.L1ELNode, []*dsl.L2Network) } -func WithAddedGameType(gameType gameTypes.GameType) stack.CommonOption { - opts := stack.Combine( - stack.MakeCommon(sysgo.WithGameTypeAdded(gameType)), // Add if sysgo is in use - RequireGameTypePresent(gameType), // Verify present for other chains - ) - - if gameType == gameTypes.CannonKonaGameType { - opts = stack.Combine( - opts, - stack.MakeCommon(sysgo.WithChallengerCannonKonaEnabled()), - ) +func afterBuildProofValidation(fn func(t devtest.T, elNode *dsl.L1ELNode, l2Networks []*dsl.L2Network)) Option { + var kinds optionKinds + if fn != nil { + kinds = optionKindAfterBuild | optionKindProofValidation } - return opts -} - -func RequireGameTypePresent(gameType gameTypes.GameType) stack.CommonOption { - return stack.FnOption[stack.Orchestrator]{ - PostHydrateFn: func(sys stack.System) { - elNode := sys.L1Network(match.FirstL1Network).L1ELNode(match.FirstL1EL) - for _, l2Network := range sys.L2Networks() { - dgf := bindings.NewBindings[bindings.DisputeGameFactory]( - bindings.WithClient(elNode.EthClient()), - bindings.WithTo(l2Network.Deployment().DisputeGameFactoryProxyAddr()), - bindings.WithTest(sys.T()), - ) - gameImpl := contract.Read(dgf.GameImpls(uint32(gameType))) - sys.T().Gate().NotZerof(gameImpl, "Dispute game factory must have a game implementation for %s", gameType) + return option{ + kinds: kinds, + applyPresetFn: func(target any) { + if fn == nil { + return + } + proofTarget, ok := target.(proofValidationTarget) + if !ok { + return } + t, elNode, l2Networks := proofTarget.proofValidationContext() + fn(t, elNode, l2Networks) }, } } -func RequireRespectedGameType(gameType gameTypes.GameType) stack.CommonOption { - return stack.FnOption[stack.Orchestrator]{ - PostHydrateFn: func(sys stack.System) { - - elNode := sys.L1Network(match.FirstL1Network).L1ELNode(match.FirstL1EL) - for _, l2Network := range sys.L2Networks() { - l1PortalAddr := l2Network.RollupConfig().DepositContractAddress - l1Portal := bindings.NewBindings[bindings.OptimismPortal2]( - bindings.WithClient(elNode.EthClient()), - bindings.WithTo(l1PortalAddr), - bindings.WithTest(sys.T())) +func WithRespectedGameType(gameType gameTypes.GameType) Option { + opts := WithProposerGameType(gameType) + opts = Combine(opts, + WithRespectedGameTypeOverride(gameType), + RequireRespectedGameType(gameType), + ) + return opts +} - respectedGameType, err := contractio.Read(l1Portal.RespectedGameType(), sys.T().Ctx()) - sys.T().Require().NoError(err, "Failed to read respected game type") - sys.T().Gate().EqualValuesf(gameType, respectedGameType, "Respected game type must be %s", gameType) - } - }, - } +func RequireGameTypePresent(gameType gameTypes.GameType) Option { + return afterBuildProofValidation(func(t devtest.T, elNode *dsl.L1ELNode, l2Networks []*dsl.L2Network) { + for _, l2Network := range l2Networks { + dgf := bindings.NewBindings[bindings.DisputeGameFactory]( + bindings.WithClient(elNode.EthClient()), + bindings.WithTo(l2Network.Escape().Deployment().DisputeGameFactoryProxyAddr()), + bindings.WithTest(t), + ) + gameImpl := contract.Read(dgf.GameImpls(uint32(gameType))) + t.Gate().NotZerof(gameImpl, "Dispute game factory must have a game implementation for %s", gameType) + } + }) } -func WithProposerGameType(gameType gameTypes.GameType) stack.CommonOption { - return stack.Combine( - stack.MakeCommon( - sysgo.WithProposerOption(func(id stack.ComponentID, cfg *ps.CLIConfig) { - cfg.DisputeGameType = uint32(gameType) - }))) +func RequireRespectedGameType(gameType gameTypes.GameType) Option { + return afterBuildProofValidation(func(t devtest.T, elNode *dsl.L1ELNode, l2Networks []*dsl.L2Network) { + for _, l2Network := range l2Networks { + l1PortalAddr := l2Network.Escape().RollupConfig().DepositContractAddress + l1Portal := bindings.NewBindings[bindings.OptimismPortal2]( + bindings.WithClient(elNode.EthClient()), + bindings.WithTo(l1PortalAddr), + bindings.WithTest(t)) + + respectedGameType, err := contractio.Read(l1Portal.RespectedGameType(), t.Ctx()) + t.Require().NoError(err, "Failed to read respected game type") + t.Gate().EqualValuesf(gameType, respectedGameType, "Respected game type must be %s", gameType) + } + }) } -// TODO(infra#401): Implement support in the sysext toolset -func WithDeployerMatchL1PAO() stack.CommonOption { - return stack.MakeCommon( - sysgo.WithDeployerPipelineOption( - sysgo.WithDeployerMatchL1PAO(), - ), - ) +func WithProposerGameType(gameType gameTypes.GameType) Option { + return WithProposerOption(func(id sysgo.ComponentTarget, cfg *ps.CLIConfig) { + cfg.DisputeGameType = uint32(gameType) + }) } -// TODO(infra#401): Implement support in the sysext toolset -func WithGuardianMatchL1PAO() stack.CommonOption { - return stack.MakeCommon( - sysgo.WithDeployerOptions( - sysgo.WithGuardianMatchL1PAO(), - ), +func WithGuardianMatchL1PAO() Option { + return WithDeployerOptions( + sysgo.WithGuardianMatchL1PAO(), ) } -// TODO(infra#401): Implement support in the sysext toolset -func WithFinalizationPeriodSeconds(n uint64) stack.CommonOption { - return stack.MakeCommon(sysgo.WithDeployerOptions( +func WithFinalizationPeriodSeconds(n uint64) Option { + return WithDeployerOptions( sysgo.WithFinalizationPeriodSeconds(n), - )) + ) } -// TODO(infra#401): Implement support in the sysext toolset -func WithProofMaturityDelaySeconds(seconds uint64) stack.CommonOption { - return stack.MakeCommon(sysgo.WithDeployerOptions( +func WithProofMaturityDelaySeconds(seconds uint64) Option { + return WithDeployerOptions( sysgo.WithProofMaturityDelaySeconds(seconds), - )) + ) } -// TODO(infra#401): Implement support in the sysext toolset -func WithDisputeGameFinalityDelaySeconds(seconds uint64) stack.CommonOption { - return stack.MakeCommon(sysgo.WithDeployerOptions( +func WithDisputeGameFinalityDelaySeconds(seconds uint64) Option { + return WithDeployerOptions( sysgo.WithDisputeGameFinalityDelaySeconds(seconds), - )) -} - -// WithProofs enables a minimal system with permissionless proofs enabled -func WithProofs() stack.CommonOption { - return stack.MakeCommon(sysgo.ProofSystem(&sysgo.DefaultMinimalSystemIDs{})) + ) } diff --git a/op-devstack/presets/rpc_frontends.go b/op-devstack/presets/rpc_frontends.go new file mode 100644 index 0000000000000..71a0cccf2d5d7 --- /dev/null +++ b/op-devstack/presets/rpc_frontends.go @@ -0,0 +1,592 @@ +package presets + +import ( + "crypto/ecdsa" + "time" + + "github.com/ethereum/go-ethereum/common" + gethrpc "github.com/ethereum/go-ethereum/rpc" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + challengerConfig "github.com/ethereum-optimism/optimism/op-challenger/config" + conductorRpc "github.com/ethereum-optimism/optimism/op-conductor/rpc" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/apis" + opclient "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/locks" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/testreq" + "github.com/ethereum-optimism/optimism/op-sync-tester/synctester" +) + +type keyringImpl struct { + keys devkeys.Keys + require *testreq.Assertions +} + +var _ stack.Keys = (*keyringImpl)(nil) + +func newKeyring(keys devkeys.Keys, req *testreq.Assertions) *keyringImpl { + return &keyringImpl{ + keys: keys, + require: req, + } +} + +func (k *keyringImpl) Secret(key devkeys.Key) *ecdsa.PrivateKey { + pk, err := k.keys.Secret(key) + k.require.NoError(err) + return pk +} + +func (k *keyringImpl) Address(key devkeys.Key) common.Address { + addr, err := k.keys.Address(key) + k.require.NoError(err) + return addr +} + +type rpcELNode struct { + presetCommon + + client opclient.RPC + ethClient *sources.EthClient + chainID eth.ChainID + txTimeout time.Duration +} + +var _ stack.ELNode = (*rpcELNode)(nil) + +func newRPCELNode(t devtest.T, name string, chainID eth.ChainID, rpcCl opclient.RPC, timeout time.Duration) rpcELNode { + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + ethCl, err := sources.NewEthClient(rpcCl, t.Logger(), nil, sources.DefaultEthClientConfig(10)) + t.Require().NoError(err) + if timeout == 0 { + timeout = 30 * time.Second + } + return rpcELNode{ + presetCommon: newPresetCommon(t, name), + client: rpcCl, + ethClient: ethCl, + chainID: chainID, + txTimeout: timeout, + } +} + +func (r *rpcELNode) ChainID() eth.ChainID { + return r.chainID +} + +func (r *rpcELNode) EthClient() apis.EthClient { + return r.ethClient +} + +func (r *rpcELNode) TransactionTimeout() time.Duration { + return r.txTimeout +} + +type l1ELFrontend struct { + rpcELNode +} + +var _ stack.L1ELNode = (*l1ELFrontend)(nil) + +func newPresetL1ELNode(t devtest.T, name string, chainID eth.ChainID, rpcCl opclient.RPC) *l1ELFrontend { + return &l1ELFrontend{ + rpcELNode: newRPCELNode(t, name, chainID, rpcCl, 0), + } +} + +type l1CLFrontend struct { + presetCommon + chainID eth.ChainID + client apis.BeaconClient + lifecycle stack.Lifecycle +} + +var _ stack.L1CLNode = (*l1CLFrontend)(nil) + +func newPresetL1CLNode(t devtest.T, name string, chainID eth.ChainID, httpCl opclient.HTTP) *l1CLFrontend { + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + return &l1CLFrontend{ + presetCommon: newPresetCommon(t, name), + chainID: chainID, + client: sources.NewBeaconHTTPClient(httpCl), + } +} + +func (r *l1CLFrontend) ChainID() eth.ChainID { + return r.chainID +} + +func (r *l1CLFrontend) BeaconClient() apis.BeaconClient { + return r.client +} + +func (r *l1CLFrontend) Start() { + r.require().NotNil(r.lifecycle, "L1CL node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Start() +} + +func (r *l1CLFrontend) Stop() { + r.require().NotNil(r.lifecycle, "L1CL node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Stop() +} + +type l2ELFrontend struct { + rpcELNode + l2Client *sources.L2Client + l2EngineClient *sources.EngineClient + lifecycle stack.Lifecycle +} + +var _ stack.L2ELNode = (*l2ELFrontend)(nil) + +func newPresetL2ELNode(t devtest.T, name string, chainID eth.ChainID, userRPCCl opclient.RPC, engineRPCCl opclient.RPC, rollupCfg *rollup.Config) *l2ELFrontend { + t.Require().NotNil(rollupCfg, "rollup config must be configured") + l2Client, err := sources.NewL2Client(userRPCCl, t.Logger(), nil, sources.L2ClientSimpleConfig(rollupCfg, false, 10, 10)) + t.Require().NoError(err) + engineClientCfg := &sources.EngineClientConfig{ + L2ClientConfig: *sources.L2ClientSimpleConfig(rollupCfg, false, 10, 10), + } + engineClient, err := sources.NewEngineClient(engineRPCCl, t.Logger(), nil, engineClientCfg) + t.Require().NoError(err) + return &l2ELFrontend{ + rpcELNode: newRPCELNode(t, name, chainID, userRPCCl, 0), + l2Client: l2Client, + l2EngineClient: engineClient, + } +} + +func (r *l2ELFrontend) L2EthClient() apis.L2EthClient { + return r.l2Client +} + +func (r *l2ELFrontend) L2EngineClient() apis.EngineClient { + return r.l2EngineClient.EngineAPIClient +} + +func (r *l2ELFrontend) Start() { + r.require().NotNil(r.lifecycle, "L2EL node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Start() +} + +func (r *l2ELFrontend) Stop() { + r.require().NotNil(r.lifecycle, "L2EL node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Stop() +} + +type l2CLFrontend struct { + presetCommon + chainID eth.ChainID + client opclient.RPC + rollupClient apis.RollupClient + p2pClient apis.P2PClient + els locks.RWMap[string, *l2ELFrontend] + rollupBoostNodes locks.RWMap[string, *rollupBoostFrontend] + oprBuilderNodes locks.RWMap[string, *oprBuilderFrontend] + userRPC string + interopEndpoint string + interopJWTSecret eth.Bytes32 + lifecycle stack.Lifecycle +} + +var _ stack.L2CLNode = (*l2CLFrontend)(nil) + +func newPresetL2CLNode(t devtest.T, name string, chainID eth.ChainID, rpcCl opclient.RPC, userRPC, interopEndpoint string, interopJWTSecret eth.Bytes32) *l2CLFrontend { + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + return &l2CLFrontend{ + presetCommon: newPresetCommon(t, name), + chainID: chainID, + client: rpcCl, + rollupClient: sources.NewRollupClient(rpcCl), + p2pClient: sources.NewP2PClient(rpcCl), + userRPC: userRPC, + interopEndpoint: interopEndpoint, + interopJWTSecret: interopJWTSecret, + } +} + +func (r *l2CLFrontend) ClientRPC() opclient.RPC { + return r.client +} + +func (r *l2CLFrontend) ChainID() eth.ChainID { + return r.chainID +} + +func (r *l2CLFrontend) RollupAPI() apis.RollupClient { + return r.rollupClient +} + +func (r *l2CLFrontend) P2PAPI() apis.P2PClient { + return r.p2pClient +} + +func (r *l2CLFrontend) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { + return r.interopEndpoint, r.interopJWTSecret +} + +func (r *l2CLFrontend) UserRPC() string { + return r.userRPC +} + +func (r *l2CLFrontend) attachEL(el *l2ELFrontend) { + r.els.Set(el.Name(), el) +} + +func (r *l2CLFrontend) attachRollupBoostNode(node *rollupBoostFrontend) { + r.rollupBoostNodes.Set(node.Name(), node) +} + +func (r *l2CLFrontend) attachOPRBuilderNode(node *oprBuilderFrontend) { + r.oprBuilderNodes.Set(node.Name(), node) +} + +func (r *l2CLFrontend) ELs() []stack.L2ELNode { + return mapSlice(sortByNameFunc(r.els.Values()), func(v *l2ELFrontend) stack.L2ELNode { return v }) +} + +func (r *l2CLFrontend) RollupBoostNodes() []stack.RollupBoostNode { + return mapSlice(sortByNameFunc(r.rollupBoostNodes.Values()), func(v *rollupBoostFrontend) stack.RollupBoostNode { return v }) +} + +func (r *l2CLFrontend) OPRBuilderNodes() []stack.OPRBuilderNode { + return mapSlice(sortByNameFunc(r.oprBuilderNodes.Values()), func(v *oprBuilderFrontend) stack.OPRBuilderNode { return v }) +} + +func (r *l2CLFrontend) ELClient() apis.EthClient { + if els := sortByNameFunc(r.els.Values()); len(els) > 0 { + return els[0].EthClient() + } + if nodes := sortByNameFunc(r.rollupBoostNodes.Values()); len(nodes) > 0 { + return nodes[0].EthClient() + } + if nodes := sortByNameFunc(r.oprBuilderNodes.Values()); len(nodes) > 0 { + return nodes[0].EthClient() + } + return nil +} + +func (r *l2CLFrontend) Start() { + r.require().NotNil(r.lifecycle, "L2CL node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Start() +} + +func (r *l2CLFrontend) Stop() { + r.require().NotNil(r.lifecycle, "L2CL node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Stop() +} + +type l2BatcherFrontend struct { + presetCommon + chainID eth.ChainID + client *sources.BatcherAdminClient +} + +var _ stack.L2Batcher = (*l2BatcherFrontend)(nil) + +func newPresetL2Batcher(t devtest.T, name string, chainID eth.ChainID, rpcCl opclient.RPC) *l2BatcherFrontend { + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + return &l2BatcherFrontend{ + presetCommon: newPresetCommon(t, name), + chainID: chainID, + client: sources.NewBatcherAdminClient(rpcCl), + } +} + +func (r *l2BatcherFrontend) ChainID() eth.ChainID { + return r.chainID +} + +func (r *l2BatcherFrontend) ActivityAPI() apis.BatcherActivity { + return r.client +} + +type l2ProposerFrontend struct { + presetCommon + chainID eth.ChainID +} + +var _ stack.L2Proposer = (*l2ProposerFrontend)(nil) + +func (r *l2ProposerFrontend) ChainID() eth.ChainID { + return r.chainID +} + +type l2ChallengerFrontend struct { + presetCommon + chainID eth.ChainID + config *challengerConfig.Config +} + +var _ stack.L2Challenger = (*l2ChallengerFrontend)(nil) + +func newPresetL2Challenger(t devtest.T, name string, chainID eth.ChainID, cfg *challengerConfig.Config) *l2ChallengerFrontend { + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + return &l2ChallengerFrontend{ + presetCommon: newPresetCommon(t, name), + chainID: chainID, + config: cfg, + } +} + +func (r *l2ChallengerFrontend) ChainID() eth.ChainID { + return r.chainID +} + +func (r *l2ChallengerFrontend) Config() *challengerConfig.Config { + return r.config +} + +type oprBuilderFrontend struct { + rpcELNode + engineClient *sources.EngineClient + flashblocksClient *opclient.WSClient + lifecycle stack.Lifecycle +} + +var _ stack.OPRBuilderNode = (*oprBuilderFrontend)(nil) + +func newPresetOPRBuilderNode(t devtest.T, name string, chainID eth.ChainID, rpcCl opclient.RPC, rollupCfg *rollup.Config, flashblocksCl *opclient.WSClient) *oprBuilderFrontend { + engineClient, err := sources.NewEngineClient(rpcCl, t.Logger(), nil, sources.EngineClientDefaultConfig(rollupCfg)) + t.Require().NoError(err) + return &oprBuilderFrontend{ + rpcELNode: newRPCELNode(t, name, chainID, rpcCl, 0), + engineClient: engineClient, + flashblocksClient: flashblocksCl, + } +} + +func (r *oprBuilderFrontend) L2EthClient() apis.L2EthClient { + return r.engineClient.L2Client +} + +func (r *oprBuilderFrontend) L2EngineClient() apis.EngineClient { + return r.engineClient.EngineAPIClient +} + +func (r *oprBuilderFrontend) FlashblocksClient() *opclient.WSClient { + return r.flashblocksClient +} + +func (r *oprBuilderFrontend) Start() { + r.require().NotNil(r.lifecycle, "OPR builder node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Start() +} + +func (r *oprBuilderFrontend) Stop() { + r.require().NotNil(r.lifecycle, "OPR builder node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Stop() +} + +type rollupBoostFrontend struct { + rpcELNode + engineClient *sources.EngineClient + flashblocksClient *opclient.WSClient + lifecycle stack.Lifecycle +} + +var _ stack.RollupBoostNode = (*rollupBoostFrontend)(nil) + +func newPresetRollupBoostNode(t devtest.T, name string, chainID eth.ChainID, rpcCl opclient.RPC, rollupCfg *rollup.Config, flashblocksCl *opclient.WSClient) *rollupBoostFrontend { + engineClient, err := sources.NewEngineClient(rpcCl, t.Logger(), nil, sources.EngineClientDefaultConfig(rollupCfg)) + t.Require().NoError(err) + return &rollupBoostFrontend{ + rpcELNode: newRPCELNode(t, name, chainID, rpcCl, 0), + engineClient: engineClient, + flashblocksClient: flashblocksCl, + } +} + +func (r *rollupBoostFrontend) L2EthClient() apis.L2EthClient { + return r.engineClient.L2Client +} + +func (r *rollupBoostFrontend) L2EngineClient() apis.EngineClient { + return r.engineClient.EngineAPIClient +} + +func (r *rollupBoostFrontend) FlashblocksClient() *opclient.WSClient { + return r.flashblocksClient +} + +func (r *rollupBoostFrontend) Start() { + r.require().NotNil(r.lifecycle, "rollup boost node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Start() +} + +func (r *rollupBoostFrontend) Stop() { + r.require().NotNil(r.lifecycle, "rollup boost node %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Stop() +} + +type supervisorFrontend struct { + presetCommon + api apis.SupervisorAPI + lifecycle stack.Lifecycle +} + +var _ stack.Supervisor = (*supervisorFrontend)(nil) + +func newPresetSupervisor(t devtest.T, name string, rpcCl opclient.RPC) *supervisorFrontend { + return &supervisorFrontend{ + presetCommon: newPresetCommon(t, name), + api: sources.NewSupervisorClient(rpcCl), + } +} + +func (r *supervisorFrontend) AdminAPI() apis.SupervisorAdminAPI { + return r.api +} + +func (r *supervisorFrontend) QueryAPI() apis.SupervisorQueryAPI { + return r.api +} + +func (r *supervisorFrontend) Start() { + r.require().NotNil(r.lifecycle, "supervisor %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Start() +} + +func (r *supervisorFrontend) Stop() { + r.require().NotNil(r.lifecycle, "supervisor %s is not lifecycle-controllable", r.Name()) + r.lifecycle.Stop() +} + +type supernodeFrontend struct { + presetCommon + api apis.SupernodeQueryAPI +} + +var _ stack.Supernode = (*supernodeFrontend)(nil) + +func newPresetSupernode(t devtest.T, name string, rpcCl opclient.RPC) *supernodeFrontend { + return &supernodeFrontend{ + presetCommon: newPresetCommon(t, name), + api: sources.NewSuperNodeClient(rpcCl), + } +} + +func (r *supernodeFrontend) QueryAPI() apis.SupernodeQueryAPI { + return r.api +} + +type conductorFrontend struct { + presetCommon + chainID eth.ChainID + api conductorRpc.API +} + +var _ stack.Conductor = (*conductorFrontend)(nil) + +func newPresetConductor(t devtest.T, name string, chainID eth.ChainID, rpcCl *gethrpc.Client) *conductorFrontend { + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + return &conductorFrontend{ + presetCommon: newPresetCommon(t, name), + chainID: chainID, + api: conductorRpc.NewAPIClient(rpcCl), + } +} + +func (r *conductorFrontend) ChainID() eth.ChainID { + return r.chainID +} + +func (r *conductorFrontend) RpcAPI() conductorRpc.API { + return r.api +} + +type faucetFrontend struct { + presetCommon + chainID eth.ChainID + client *sources.FaucetClient +} + +var _ stack.Faucet = (*faucetFrontend)(nil) + +func newPresetFaucet(t devtest.T, name string, chainID eth.ChainID, rpcCl opclient.RPC) *faucetFrontend { + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + return &faucetFrontend{ + presetCommon: newPresetCommon(t, name), + chainID: chainID, + client: sources.NewFaucetClient(rpcCl), + } +} + +func (r *faucetFrontend) ChainID() eth.ChainID { + return r.chainID +} + +func (r *faucetFrontend) API() apis.Faucet { + return r.client +} + +type testSequencerFrontend struct { + presetCommon + api apis.TestSequencerAPI + controls map[eth.ChainID]apis.TestSequencerControlAPI +} + +var _ stack.TestSequencer = (*testSequencerFrontend)(nil) + +func newPresetTestSequencer(t devtest.T, name string, adminRPCCl opclient.RPC, controlRPCs map[eth.ChainID]opclient.RPC) *testSequencerFrontend { + s := &testSequencerFrontend{ + presetCommon: newPresetCommon(t, name), + api: sources.NewBuilderClient(adminRPCCl), + controls: make(map[eth.ChainID]apis.TestSequencerControlAPI, len(controlRPCs)), + } + for chainID, rpcCl := range controlRPCs { + s.controls[chainID] = sources.NewControlClient(rpcCl) + } + return s +} + +func (r *testSequencerFrontend) AdminAPI() apis.TestSequencerAdminAPI { + return r.api +} + +func (r *testSequencerFrontend) BuildAPI() apis.TestSequencerBuildAPI { + return r.api +} + +func (r *testSequencerFrontend) ControlAPI(chainID eth.ChainID) apis.TestSequencerControlAPI { + return r.controls[chainID] +} + +type syncTesterFrontend struct { + presetCommon + chainID eth.ChainID + addr string + client *sources.SyncTesterClient +} + +var _ stack.SyncTester = (*syncTesterFrontend)(nil) + +func newPresetSyncTester(t devtest.T, name string, chainID eth.ChainID, addr string, rpcCl opclient.RPC) *syncTesterFrontend { + t = t.WithCtx(stack.ContextWithChainID(t.Ctx(), chainID)) + return &syncTesterFrontend{ + presetCommon: newPresetCommon(t, name), + chainID: chainID, + addr: addr, + client: sources.NewSyncTesterClient(rpcCl), + } +} + +func (r *syncTesterFrontend) ChainID() eth.ChainID { + return r.chainID +} + +func (r *syncTesterFrontend) API() apis.SyncTester { + return r.client +} + +func (r *syncTesterFrontend) APIWithSession(sessionID string) apis.SyncTester { + require := r.T().Require() + require.NoError(synctester.IsValidSessionID(sessionID)) + rpcCl, err := opclient.NewRPC(r.T().Ctx(), r.Logger(), r.addr+"/"+sessionID, opclient.WithLazyDial()) + require.NoError(err, "sync tester failed to initialize rpc per session") + return sources.NewSyncTesterClient(rpcCl) +} diff --git a/op-devstack/presets/simple_with_synctester.go b/op-devstack/presets/simple_with_synctester.go index b8c7950322b7c..1a46d7cb3bcef 100644 --- a/op-devstack/presets/simple_with_synctester.go +++ b/op-devstack/presets/simple_with_synctester.go @@ -4,9 +4,6 @@ import ( "github.com/ethereum-optimism/optimism/op-core/forks" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" ) @@ -18,31 +15,17 @@ type SimpleWithSyncTester struct { L2CL2 *dsl.L2CLNode } -func WithSimpleWithSyncTester() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSimpleSystemWithSyncTester(&sysgo.DefaultSimpleSystemWithSyncTesterIDs{})) +// NewSimpleWithSyncTester creates a fresh SimpleWithSyncTester target for the current +// test. +// +// The target is created from the runtime plus any additional preset options. +func NewSimpleWithSyncTester(t devtest.T, opts ...Option) *SimpleWithSyncTester { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewSimpleWithSyncTester", opts, simpleWithSyncTesterPresetSupportedOptionKinds) + out := simpleWithSyncTesterFromRuntime(t, sysgo.NewSimpleWithSyncTesterRuntimeWithConfig(t, presetCfg)) + presetOpts.applyPreset(out) + return out } -func NewSimpleWithSyncTester(t devtest.T) *SimpleWithSyncTester { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - minimal := minimalFromSystem(t, system, orch) - l2 := system.L2Network(match.L2ChainA) - syncTester := l2.SyncTester(match.FirstSyncTester) - - // L2CL connected to L2EL initialized by sync tester - l2CL2 := l2.L2CLNode(match.SecondL2CL) - // L2EL initialized by sync tester - syncTesterL2EL := l2.L2ELNode(match.SecondL2EL) - - return &SimpleWithSyncTester{ - Minimal: *minimal, - SyncTester: dsl.NewSyncTester(syncTester), - SyncTesterL2EL: dsl.NewL2ELNode(syncTesterL2EL, orch.ControlPlane()), - L2CL2: dsl.NewL2CLNode(l2CL2, orch.ControlPlane()), - } -} - -func WithHardforkSequentialActivation(startFork, endFork forks.Name, delta uint64) stack.CommonOption { - return stack.MakeCommon(sysgo.WithDeployerOptions(sysgo.WithHardforkSequentialActivation(startFork, endFork, &delta))) +func WithHardforkSequentialActivation(startFork, endFork forks.Name, delta uint64) Option { + return WithDeployerOptions(sysgo.WithHardforkSequentialActivation(startFork, endFork, &delta)) } diff --git a/op-devstack/presets/singlechain_from_runtime.go b/op-devstack/presets/singlechain_from_runtime.go new file mode 100644 index 0000000000000..7f19ab67b81a7 --- /dev/null +++ b/op-devstack/presets/singlechain_from_runtime.go @@ -0,0 +1,187 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +func singleChainMultiNodeFromRuntime(t devtest.T, runtime *sysgo.SingleChainRuntime, runSyncChecks bool) *SingleChainMultiNode { + minimal := minimalFromRuntime(t, runtime) + l2ChainID := runtime.L2Network.ChainID() + nodeB := runtime.Nodes["b"] + t.Require().NotNil(nodeB, "missing single-chain node b") + + l2ELB := newL2ELFrontend( + t, + "b", + l2ChainID, + nodeB.EL.UserRPC(), + nodeB.EL.EngineRPC(), + nodeB.EL.JWTPath(), + runtime.L2Network.RollupConfig(), + nodeB.EL, + ) + l2CLB := newL2CLFrontend( + t, + "b", + l2ChainID, + nodeB.CL.UserRPC(), + nodeB.CL, + ) + l2CLB.attachEL(l2ELB) + l2Net, ok := minimal.L2Chain.Escape().(*presetL2Network) + t.Require().True(ok, "expected preset L2 network") + l2Net.AddL2ELNode(l2ELB) + l2Net.AddL2CLNode(l2CLB) + + preset := &SingleChainMultiNode{ + Minimal: *minimal, + L2ELB: dsl.NewL2ELNode(l2ELB), + L2CLB: dsl.NewL2CLNode(l2CLB), + } + if runtime.P2PEnabled { + preset.L2CLB.ManagePeer(preset.L2CL) + } + if runSyncChecks { + // Ensure the follower node is in sync with the sequencer before starting tests. + dsl.CheckAll(t, + preset.L2CLB.MatchedFn(preset.L2CL, types.CrossSafe, 30), + preset.L2CLB.MatchedFn(preset.L2CL, types.LocalUnsafe, 30), + ) + } + return preset +} + +func singleChainMultiNodeWithTestSeqFromRuntime(t devtest.T, runtime *sysgo.SingleChainRuntime) *SingleChainMultiNodeWithTestSeq { + preset := singleChainMultiNodeFromRuntime(t, runtime, false) + testSequencer := newTestSequencerFrontend( + t, + runtime.TestSequencer.Name, + runtime.TestSequencer.AdminRPC, + runtime.TestSequencer.ControlRPC, + runtime.TestSequencer.JWTSecret, + ) + return &SingleChainMultiNodeWithTestSeq{ + SingleChainMultiNode: *preset, + TestSequencer: dsl.NewTestSequencer(testSequencer), + } +} + +func singleChainTwoVerifiersFromRuntime(t devtest.T, runtime *sysgo.SingleChainRuntime) *SingleChainTwoVerifiers { + base := singleChainMultiNodeFromRuntime(t, runtime, false) + l2ChainID := runtime.L2Network.ChainID() + nodeC := runtime.Nodes["c"] + t.Require().NotNil(nodeC, "missing single-chain node c") + + l2ELC := newL2ELFrontend( + t, + "c", + l2ChainID, + nodeC.EL.UserRPC(), + nodeC.EL.EngineRPC(), + nodeC.EL.JWTPath(), + runtime.L2Network.RollupConfig(), + nodeC.EL, + ) + l2CLC := newL2CLFrontend( + t, + "c", + l2ChainID, + nodeC.CL.UserRPC(), + nodeC.CL, + ) + l2CLC.attachEL(l2ELC) + l2Net, ok := base.L2Chain.Escape().(*presetL2Network) + t.Require().True(ok, "expected preset L2 network") + l2Net.AddL2ELNode(l2ELC) + l2Net.AddL2CLNode(l2CLC) + testSequencer := newTestSequencerFrontend( + t, + runtime.TestSequencer.Name, + runtime.TestSequencer.AdminRPC, + runtime.TestSequencer.ControlRPC, + runtime.TestSequencer.JWTSecret, + ) + preset := &SingleChainTwoVerifiers{ + SingleChainMultiNode: *base, + L2ELC: dsl.NewL2ELNode(l2ELC), + L2CLC: dsl.NewL2CLNode(l2CLC), + TestSequencer: dsl.NewTestSequencer(testSequencer), + } + preset.L2CLC.ManagePeer(preset.L2CL) + preset.L2CLC.ManagePeer(preset.L2CLB) + return preset +} + +func simpleWithSyncTesterFromRuntime(t devtest.T, runtime *sysgo.SingleChainRuntime) *SimpleWithSyncTester { + minimal := minimalFromRuntime(t, runtime) + l2ChainID := runtime.L2Network.ChainID() + t.Require().NotNil(runtime.SyncTester, "missing sync tester support") + t.Require().NotNil(runtime.SyncTester.Node, "missing sync tester node") + + syncTesterName, syncTesterRPC, ok := runtime.SyncTester.Service.DefaultEndpoint(runtime.L2Network.ChainID()) + t.Require().Truef(ok, "missing sync tester for chain %s", runtime.L2Network.ChainID()) + syncTester := newSyncTesterFrontend(t, syncTesterName, l2ChainID, syncTesterRPC) + + syncTesterL2EL := newL2ELFrontend( + t, + "sync-tester-el", + l2ChainID, + runtime.SyncTester.Node.EL.UserRPC(), + runtime.SyncTester.Node.EL.EngineRPC(), + runtime.SyncTester.Node.EL.JWTPath(), + runtime.L2Network.RollupConfig(), + ) + l2CL2 := newL2CLFrontend( + t, + "verifier", + l2ChainID, + runtime.SyncTester.Node.CL.UserRPC(), + runtime.SyncTester.Node.CL, + ) + l2CL2.attachEL(syncTesterL2EL) + l2Net, ok := minimal.L2Chain.Escape().(*presetL2Network) + t.Require().True(ok, "expected preset L2 network") + l2Net.AddSyncTester(syncTester) + l2Net.AddL2ELNode(syncTesterL2EL) + l2Net.AddL2CLNode(l2CL2) + + preset := &SimpleWithSyncTester{ + Minimal: *minimal, + SyncTester: dsl.NewSyncTester(syncTester), + SyncTesterL2EL: dsl.NewL2ELNode(syncTesterL2EL), + L2CL2: dsl.NewL2CLNode(l2CL2), + } + preset.L2CL2.ManagePeer(preset.L2CL) + return preset +} + +func minimalWithConductorsFromRuntime(t devtest.T, runtime *sysgo.SingleChainRuntime) *MinimalWithConductors { + minimal := minimalFromRuntime(t, runtime) + l2ChainID := runtime.L2Network.ChainID() + t.Require().NotNil(runtime.Conductors, "missing conductor support") + + cAName := "sequencer" + cBName := "b" + cCName := "c" + cA := newConductorFrontend(t, cAName, l2ChainID, runtime.Conductors[cAName].HTTPEndpoint()) + cB := newConductorFrontend(t, cBName, l2ChainID, runtime.Conductors[cBName].HTTPEndpoint()) + cC := newConductorFrontend(t, cCName, l2ChainID, runtime.Conductors[cCName].HTTPEndpoint()) + l2Net, ok := minimal.L2Chain.Escape().(*presetL2Network) + t.Require().True(ok, "expected preset L2 network") + l2Net.AddConductor(cA) + l2Net.AddConductor(cB) + l2Net.AddConductor(cC) + + conductors := []stack.Conductor{cA, cB, cC} + return &MinimalWithConductors{ + Minimal: minimal, + ConductorSets: map[eth.ChainID]dsl.ConductorSet{ + l2ChainID: dsl.NewConductorSet(conductors), + }, + } +} diff --git a/op-devstack/presets/singlechain_multinode.go b/op-devstack/presets/singlechain_multinode.go index 61210c34bdf7a..79c7ee69c89ad 100644 --- a/op-devstack/presets/singlechain_multinode.go +++ b/op-devstack/presets/singlechain_multinode.go @@ -3,11 +3,7 @@ package presets import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" ) type SingleChainMultiNode struct { @@ -17,45 +13,38 @@ type SingleChainMultiNode struct { L2CLB *dsl.L2CLNode } -func WithSingleChainMultiNode() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSingleChainMultiNodeSystem(&sysgo.DefaultSingleChainMultiNodeSystemIDs{})) -} - -func NewSingleChainMultiNode(t devtest.T) *SingleChainMultiNode { - preset := NewSingleChainMultiNodeWithoutCheck(t) - // Ensure the follower node is in sync with the sequencer before starting tests - dsl.CheckAll(t, - preset.L2CLB.MatchedFn(preset.L2CL, types.CrossSafe, 30), - preset.L2CLB.MatchedFn(preset.L2CL, types.LocalUnsafe, 30), - ) - return preset +// NewSingleChainMultiNode creates a fresh SingleChainMultiNode target for the current +// test. +// +// The target is created from the runtime plus any additional preset options. +func NewSingleChainMultiNode(t devtest.T, opts ...Option) *SingleChainMultiNode { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewSingleChainMultiNode", opts, minimalPresetSupportedOptionKinds) + out := singleChainMultiNodeFromRuntime(t, sysgo.NewSingleChainMultiNodeRuntimeWithConfig(t, true, presetCfg), true) + presetOpts.applyPreset(out) + return out } -func NewSingleChainMultiNodeWithoutCheck(t devtest.T) *SingleChainMultiNode { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - minimal := minimalFromSystem(t, system, orch) - l2 := system.L2Network(match.Assume(t, match.L2ChainA)) - verifierCL := l2.L2CLNode(match.Assume(t, - match.And( - match.Not(match.WithSequencerActive(t.Ctx())), - match.Not(stack.ByID[stack.L2CLNode](minimal.L2CL.ID())), - ))) - verifierEL := l2.L2ELNode(match.Assume(t, - match.And( - match.EngineFor(verifierCL), - match.Not(stack.ByID[stack.L2ELNode](minimal.L2EL.ID()))))) - preset := &SingleChainMultiNode{ - Minimal: *minimal, - L2ELB: dsl.NewL2ELNode(verifierEL, orch.ControlPlane()), - L2CLB: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), - } - return preset +// NewSingleChainMultiNodeWithoutCheck creates a fresh SingleChainMultiNode target for the +// current test, without running the initial verifier sync checks. +// +// The target is created from the runtime plus any additional preset options. +func NewSingleChainMultiNodeWithoutCheck(t devtest.T, opts ...Option) *SingleChainMultiNode { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewSingleChainMultiNodeWithoutCheck", opts, minimalPresetSupportedOptionKinds) + out := singleChainMultiNodeFromRuntime(t, sysgo.NewSingleChainMultiNodeRuntimeWithConfig(t, true, presetCfg), false) + presetOpts.applyPreset(out) + return out } -func WithSingleChainMultiNodeWithoutP2P() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSingleChainMultiNodeSystemWithoutP2P(&sysgo.DefaultSingleChainMultiNodeSystemIDs{})) +// NewSingleChainMultiNodeWithoutP2PWithoutCheck creates a fresh SingleChainMultiNode +// target without preconfigured sequencer/verifier P2P links and without running initial sync +// checks. +// +// The target is created from the runtime plus any additional preset options. +func NewSingleChainMultiNodeWithoutP2PWithoutCheck(t devtest.T, opts ...Option) *SingleChainMultiNode { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewSingleChainMultiNodeWithoutP2PWithoutCheck", opts, minimalPresetSupportedOptionKinds) + out := singleChainMultiNodeFromRuntime(t, sysgo.NewSingleChainMultiNodeRuntimeWithConfig(t, false, presetCfg), false) + presetOpts.applyPreset(out) + return out } type SingleChainMultiNodeWithTestSeq struct { @@ -64,33 +53,13 @@ type SingleChainMultiNodeWithTestSeq struct { TestSequencer *dsl.TestSequencer } -func NewSingleChainMultiNodeWithTestSeq(t devtest.T) *SingleChainMultiNodeWithTestSeq { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - minimal := minimalFromSystem(t, system, orch) - l2 := system.L2Network(match.Assume(t, match.L2ChainA)) - verifierCL := l2.L2CLNode(match.Assume(t, - match.And( - match.Not(match.WithSequencerActive(t.Ctx())), - match.Not(stack.ByID[stack.L2CLNode](minimal.L2CL.ID())), - ))) - verifierEL := l2.L2ELNode(match.Assume(t, - match.And( - match.EngineFor(verifierCL), - match.Not(stack.ByID[stack.L2ELNode](minimal.L2EL.ID()))))) - preset := &SingleChainMultiNode{ - Minimal: *minimal, - L2ELB: dsl.NewL2ELNode(verifierEL, orch.ControlPlane()), - L2CLB: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), - } - out := &SingleChainMultiNodeWithTestSeq{ - SingleChainMultiNode: *preset, - TestSequencer: dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))), - } +// NewSingleChainMultiNodeWithTestSeq creates a fresh +// SingleChainMultiNodeWithTestSeq target for the current test. +// +// The target is created from the runtime plus any additional preset options. +func NewSingleChainMultiNodeWithTestSeq(t devtest.T, opts ...Option) *SingleChainMultiNodeWithTestSeq { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewSingleChainMultiNodeWithTestSeq", opts, minimalPresetSupportedOptionKinds) + out := singleChainMultiNodeWithTestSeqFromRuntime(t, sysgo.NewSingleChainMultiNodeRuntimeWithConfig(t, true, presetCfg)) + presetOpts.applyPreset(out) return out } - -func WithNewSingleChainMultiNodeWithTestSeq() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSingleChainMultiNodeWithTestSeqSystem(&sysgo.DefaultSingleChainMultiNodeWithTestSeqSystemIDs{})) -} diff --git a/op-devstack/presets/singlechain_twoverifiers.go b/op-devstack/presets/singlechain_twoverifiers.go index 5b3fa9312f6cc..4de57d3c2b963 100644 --- a/op-devstack/presets/singlechain_twoverifiers.go +++ b/op-devstack/presets/singlechain_twoverifiers.go @@ -3,9 +3,6 @@ package presets import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" ) @@ -18,32 +15,13 @@ type SingleChainTwoVerifiers struct { TestSequencer *dsl.TestSequencer } -func NewSingleChainTwoVerifiersWithoutCheck(t devtest.T) *SingleChainTwoVerifiers { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - singleChainMultiNode := NewSingleChainMultiNodeWithoutCheck(t) - l2 := system.L2Network(match.Assume(t, match.L2ChainA)) - verifierCL := l2.L2CLNode(match.Assume(t, - match.And( - match.Not(match.WithSequencerActive(t.Ctx())), - match.Not(stack.ByID[stack.L2CLNode](singleChainMultiNode.L2CL.ID())), - match.Not(stack.ByID[stack.L2CLNode](singleChainMultiNode.L2CLB.ID())), - ))) - verifierEL := l2.L2ELNode(match.Assume(t, - match.And( - match.Not(stack.ByID[stack.L2ELNode](singleChainMultiNode.L2EL.ID())), - match.Not(stack.ByID[stack.L2ELNode](singleChainMultiNode.L2ELB.ID())), - ))) - preset := &SingleChainTwoVerifiers{ - SingleChainMultiNode: *singleChainMultiNode, - L2ELC: dsl.NewL2ELNode(verifierEL, orch.ControlPlane()), - L2CLC: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), - TestSequencer: dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))), - } - return preset -} - -func WithSingleChainTwoVerifiersFollowL2() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSingleChainTwoVerifiersFollowL2System(&sysgo.DefaultSingleChainTwoVerifiersSystemIDs{})) +// NewSingleChainTwoVerifiersWithoutCheck creates a fresh +// SingleChainTwoVerifiers target for the current test. +// +// The target is created from the runtime plus any additional preset options. +func NewSingleChainTwoVerifiersWithoutCheck(t devtest.T, opts ...Option) *SingleChainTwoVerifiers { + presetCfg, presetOpts := collectSupportedPresetConfig(t, "NewSingleChainTwoVerifiersWithoutCheck", opts, minimalPresetSupportedOptionKinds) + out := singleChainTwoVerifiersFromRuntime(t, sysgo.NewSingleChainTwoVerifiersRuntimeWithConfig(t, presetCfg)) + presetOpts.applyPreset(out) + return out } diff --git a/op-devstack/presets/superproofs_from_runtime.go b/op-devstack/presets/superproofs_from_runtime.go new file mode 100644 index 0000000000000..cf178b13e4a01 --- /dev/null +++ b/op-devstack/presets/superproofs_from_runtime.go @@ -0,0 +1,156 @@ +package presets + +import ( + challengerConfig "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func attachChallenger(t devtest.T, l2Net *dsl.L2Network, name string, chainID eth.ChainID, cfg *challengerConfig.Config) { + if cfg == nil { + return + } + net, ok := l2Net.Escape().(*presetL2Network) + t.Require().True(ok, "expected preset L2 network") + net.AddL2Challenger(newPresetL2Challenger(t, name, chainID, cfg)) +} + +func simpleInteropFromSupernodeProofsRuntime(t devtest.T, runtime *sysgo.MultiChainRuntime) *SimpleInterop { + chainA := runtime.Chains["l2a"] + chainB := runtime.Chains["l2b"] + t.Require().NotNil(chainA, "missing l2a superproofs chain") + t.Require().NotNil(chainB, "missing l2b superproofs chain") + twoL2, components := twoL2FromRuntime(t, runtime) + + supernodeFrontend := newSupernodeFrontend(t, "supernode-two-l2-system", runtime.Supernode.UserRPC()) + testSequencer := newTestSequencerFrontend( + t, + runtime.TestSequencer.Name, + runtime.TestSequencer.AdminRPC, + runtime.TestSequencer.ControlRPC, + runtime.TestSequencer.JWTSecret, + ) + + out := &SimpleInterop{ + SingleChainInterop: SingleChainInterop{ + Log: t.Logger(), + T: t, + timeTravel: nil, + Supervisor: nil, + SuperRoots: dsl.NewSupernodeWithTestControl(supernodeFrontend, runtime.Supernode), + TestSequencer: dsl.NewTestSequencer(testSequencer), + L1Network: twoL2.L1Network, + L1EL: twoL2.L1EL, + L1CL: twoL2.L1CL, + L2ChainA: twoL2.L2A, + L2BatcherA: dsl.NewL2Batcher(components.l2ABatcher), + L2ELA: dsl.NewL2ELNode(components.l2AEL), + L2CLA: twoL2.L2ACL, + Wallet: dsl.NewRandomHDWallet(t, 30), + FaucetA: components.faucetA, + FaucetL1: dsl.NewFaucet(newFaucetFrontendForChain(t, runtime.FaucetService, runtime.L1Network.ChainID())), + challengerConfig: runtime.L2ChallengerConfig, + }, + L2ChainB: twoL2.L2B, + L2BatcherB: dsl.NewL2Batcher(components.l2BBatcher), + L2ELB: dsl.NewL2ELNode(components.l2BEL), + L2CLB: twoL2.L2BCL, + FaucetB: components.faucetB, + } + out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) + out.FunderA = dsl.NewFunder(out.Wallet, out.FaucetA, out.L2ELA) + out.FunderB = dsl.NewFunder(out.Wallet, out.FaucetB, out.L2ELB) + l1Net, ok := out.L1Network.Escape().(*presetL1Network) + t.Require().True(ok, "expected preset L1 network") + l1Net.AddFaucet(out.FaucetL1.Escape().(*faucetFrontend)) + + attachChallenger(t, out.L2ChainA, "main", chainA.Network.ChainID(), out.challengerConfig) + attachChallenger(t, out.L2ChainB, "main", chainB.Network.ChainID(), out.challengerConfig) + return out +} + +func singleChainInteropFromSupernodeProofsRuntime(t devtest.T, runtime *sysgo.MultiChainRuntime) *SingleChainInterop { + chainA := runtime.Chains["l2a"] + t.Require().NotNil(chainA, "missing l2a superproofs chain") + l1ChainID := runtime.L1Network.ChainID() + l2ChainID := chainA.Network.ChainID() + + l1Network := newPresetL1Network(t, "l1", runtime.L1Network.ChainConfig()) + l1EL := newL1ELFrontend(t, "l1", l1ChainID, runtime.L1EL.UserRPC()) + l1CL := newL1CLFrontend(t, "l1", l1ChainID, runtime.L1CL.BeaconHTTPAddr(), runtime.L1CL.FakePoS()) + l1Network.AddL1ELNode(l1EL) + l1Network.AddL1CLNode(l1CL) + + l2Chain := newPresetL2Network( + t, + "l2a", + chainA.Network.ChainConfig(), + chainA.Network.RollupConfig(), + chainA.Network.Deployment(), + newKeyring(runtime.Keys, t.Require()), + l1Network, + ) + l2EL := newL2ELFrontend( + t, + "sequencer", + l2ChainID, + chainA.EL.UserRPC(), + chainA.EL.EngineRPC(), + chainA.EL.JWTPath(), + chainA.Network.RollupConfig(), + chainA.EL, + ) + l2CL := newL2CLFrontend(t, "sequencer", l2ChainID, chainA.CL.UserRPC(), chainA.CL) + l2CL.attachEL(l2EL) + l2Batcher := newL2BatcherFrontend(t, "main", l2ChainID, chainA.Batcher.UserRPC()) + l2Chain.AddL2ELNode(l2EL) + l2Chain.AddL2CLNode(l2CL) + l2Chain.AddL2Batcher(l2Batcher) + + challengerCfg := runtime.L2ChallengerConfig + if challengerCfg != nil { + l2Chain.AddL2Challenger(newPresetL2Challenger(t, "main", l2ChainID, challengerCfg)) + } + + supernodeFrontend := newSupernodeFrontend(t, "supernode-single-system-proofs", runtime.Supernode.UserRPC()) + testSequencer := newTestSequencerFrontend( + t, + runtime.TestSequencer.Name, + runtime.TestSequencer.AdminRPC, + runtime.TestSequencer.ControlRPC, + runtime.TestSequencer.JWTSecret, + ) + l1ELDSL := dsl.NewL1ELNode(l1EL) + l1CLDSL := dsl.NewL1CLNode(l1CL) + l2ELDSL := dsl.NewL2ELNode(l2EL) + l2CLDSL := dsl.NewL2CLNode(l2CL) + faucetAFrontend := newFaucetFrontendForChain(t, runtime.FaucetService, l2ChainID) + faucetL1Frontend := newFaucetFrontendForChain(t, runtime.FaucetService, l1ChainID) + + out := &SingleChainInterop{ + Log: t.Logger(), + T: t, + timeTravel: nil, + Supervisor: nil, + SuperRoots: dsl.NewSupernodeWithTestControl(supernodeFrontend, runtime.Supernode), + TestSequencer: dsl.NewTestSequencer(testSequencer), + L1Network: dsl.NewL1Network(l1Network, l1ELDSL, l1CLDSL), + L1EL: l1ELDSL, + L1CL: l1CLDSL, + L2ChainA: dsl.NewL2Network(l2Chain, l2ELDSL, l2CLDSL, l1ELDSL, nil, nil), + L2BatcherA: dsl.NewL2Batcher(l2Batcher), + L2ELA: l2ELDSL, + L2CLA: l2CLDSL, + Wallet: dsl.NewRandomHDWallet(t, 30), + FaucetA: dsl.NewFaucet(faucetAFrontend), + FaucetL1: dsl.NewFaucet(faucetL1Frontend), + challengerConfig: challengerCfg, + } + l1Network.AddFaucet(faucetL1Frontend) + l2Chain.AddFaucet(faucetAFrontend) + out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) + out.FunderA = dsl.NewFunder(out.Wallet, out.FaucetA, out.L2ELA) + return out +} diff --git a/op-devstack/presets/sync_tester_config.go b/op-devstack/presets/sync_tester_config.go deleted file mode 100644 index b97a32cbb2337..0000000000000 --- a/op-devstack/presets/sync_tester_config.go +++ /dev/null @@ -1,24 +0,0 @@ -package presets - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -func WithSyncTesterELInitialState(fcu eth.FCUState) stack.CommonOption { - return stack.MakeCommon( - sysgo.WithGlobalSyncTesterELOption(sysgo.SyncTesterELOptionFn( - func(_ devtest.P, id stack.ComponentID, cfg *sysgo.SyncTesterELConfig) { - cfg.FCUState = fcu - }))) -} - -func WithELSyncActive() stack.CommonOption { - return stack.MakeCommon( - sysgo.WithGlobalSyncTesterELOption(sysgo.SyncTesterELOptionFn( - func(_ devtest.P, id stack.ComponentID, cfg *sysgo.SyncTesterELConfig) { - cfg.ELSyncActive = true - }))) -} diff --git a/op-devstack/presets/sysgo_runtime.go b/op-devstack/presets/sysgo_runtime.go new file mode 100644 index 0000000000000..8e1a894bbe95e --- /dev/null +++ b/op-devstack/presets/sysgo_runtime.go @@ -0,0 +1,175 @@ +package presets + +import ( + "os" + "strings" + + "github.com/ethereum/go-ethereum/common/hexutil" + gn "github.com/ethereum/go-ethereum/node" + "github.com/ethereum/go-ethereum/rpc" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func newL1ELFrontend(t devtest.T, name string, chainID eth.ChainID, userRPC string) *l1ELFrontend { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), userRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + return newPresetL1ELNode(t, name, chainID, rpcCl) +} + +func newL1CLFrontend(t devtest.T, name string, chainID eth.ChainID, beaconHTTPAddr string, lifecycle ...stack.Lifecycle) *l1CLFrontend { + beaconCl := client.NewBasicHTTPClient(beaconHTTPAddr, t.Logger()) + l1CL := newPresetL1CLNode(t, name, chainID, beaconCl) + if len(lifecycle) > 0 { + l1CL.lifecycle = lifecycle[0] + } + return l1CL +} + +func newL2ELFrontend(t devtest.T, name string, chainID eth.ChainID, userRPC string, engineRPC string, jwtPath string, rollupCfg *rollup.Config, lifecycle ...stack.Lifecycle) *l2ELFrontend { + userRPCCl, err := client.NewRPC(t.Ctx(), t.Logger(), userRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(userRPCCl.Close) + jwtSecret := readJWTSecret(t, jwtPath) + engineRPCCl, err := client.NewRPC( + t.Ctx(), + t.Logger(), + engineRPC, + client.WithLazyDial(), + client.WithGethRPCOptions(rpc.WithHTTPAuth(gn.NewJWTAuth(jwtSecret))), + ) + t.Require().NoError(err) + t.Cleanup(engineRPCCl.Close) + l2EL := newPresetL2ELNode(t, name, chainID, userRPCCl, engineRPCCl, rollupCfg) + if len(lifecycle) > 0 { + l2EL.lifecycle = lifecycle[0] + } + return l2EL +} + +func readJWTSecret(t devtest.T, jwtPath string) [32]byte { + t.Require().NotEmpty(jwtPath, "missing jwt path") + content, err := os.ReadFile(jwtPath) + t.Require().NoError(err, "failed to read jwt path %s", jwtPath) + raw, err := hexutil.Decode(strings.TrimSpace(string(content))) + t.Require().NoError(err, "failed to decode jwt secret from %s", jwtPath) + t.Require().Len(raw, 32, "invalid jwt secret length from %s", jwtPath) + var secret [32]byte + copy(secret[:], raw) + return secret +} + +func newL2CLFrontend(t devtest.T, name string, chainID eth.ChainID, userRPC string, node sysgo.L2CLNode) *l2CLFrontend { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), userRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + interopEndpoint, interopJWT := node.InteropRPC() + l2CL := newPresetL2CLNode(t, name, chainID, rpcCl, userRPC, interopEndpoint, interopJWT) + if lifecycle, ok := any(node).(stack.Lifecycle); ok { + l2CL.lifecycle = lifecycle + } + return l2CL +} + +func newL2BatcherFrontend(t devtest.T, name string, chainID eth.ChainID, rpcEndpoint string) *l2BatcherFrontend { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), rpcEndpoint, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + return newPresetL2Batcher(t, name, chainID, rpcCl) +} + +func newOPRBuilderFrontend(t devtest.T, name string, chainID eth.ChainID, userRPC string, flashblocksWSURL string, rollupCfg *rollup.Config, lifecycle ...stack.Lifecycle) *oprBuilderFrontend { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), userRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + + t.Require().NotEmpty(flashblocksWSURL, "missing flashblocks ws url for %s", name) + wsCl, err := client.DialWS(t.Ctx(), client.WSConfig{ + URL: flashblocksWSURL, + Log: t.Logger(), + }) + t.Require().NoError(err) + + oprb := newPresetOPRBuilderNode(t, name, chainID, rpcCl, rollupCfg, wsCl) + if len(lifecycle) > 0 { + oprb.lifecycle = lifecycle[0] + } + return oprb +} + +func newRollupBoostFrontend(t devtest.T, name string, chainID eth.ChainID, userRPC string, flashblocksWSURL string, rollupCfg *rollup.Config, lifecycle ...stack.Lifecycle) *rollupBoostFrontend { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), userRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + + t.Require().NotEmpty(flashblocksWSURL, "missing flashblocks ws url for %s", name) + wsCl, err := client.DialWS(t.Ctx(), client.WSConfig{ + URL: flashblocksWSURL, + Log: t.Logger(), + }) + t.Require().NoError(err) + + rollupBoost := newPresetRollupBoostNode(t, name, chainID, rpcCl, rollupCfg, wsCl) + if len(lifecycle) > 0 { + rollupBoost.lifecycle = lifecycle[0] + } + return rollupBoost +} + +func newSupervisorFrontend(t devtest.T, name string, userRPC string, lifecycle ...stack.Lifecycle) *supervisorFrontend { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), userRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + supervisor := newPresetSupervisor(t, name, rpcCl) + if len(lifecycle) > 0 { + supervisor.lifecycle = lifecycle[0] + } + return supervisor +} + +func newSupernodeFrontend(t devtest.T, name string, userRPC string) *supernodeFrontend { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), userRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + return newPresetSupernode(t, name, rpcCl) +} + +func newConductorFrontend(t devtest.T, name string, chainID eth.ChainID, rpcEndpoint string) *conductorFrontend { + rpcCl, err := rpc.DialContext(t.Ctx(), rpcEndpoint) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + return newPresetConductor(t, name, chainID, rpcCl) +} + +func newTestSequencerFrontend(t devtest.T, name string, adminRPC string, controlRPCs map[eth.ChainID]string, jwtSecret [32]byte) *testSequencerFrontend { + opts := []client.RPCOption{ + client.WithLazyDial(), + client.WithGethRPCOptions(rpc.WithHTTPAuth(gn.NewJWTAuth(jwtSecret))), + } + + adminRPCCl, err := client.NewRPC(t.Ctx(), t.Logger(), adminRPC, opts...) + t.Require().NoError(err) + t.Cleanup(adminRPCCl.Close) + + controlClients := make(map[eth.ChainID]client.RPC, len(controlRPCs)) + for chainID, endpoint := range controlRPCs { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), endpoint, opts...) + t.Require().NoErrorf(err, "failed to create control RPC client for chain %s", chainID) + t.Cleanup(rpcCl.Close) + controlClients[chainID] = rpcCl + } + return newPresetTestSequencer(t, name, adminRPCCl, controlClients) +} + +func newSyncTesterFrontend(t devtest.T, name string, chainID eth.ChainID, syncTesterRPC string) *syncTesterFrontend { + rpcCl, err := client.NewRPC(t.Ctx(), t.Logger(), syncTesterRPC, client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(rpcCl.Close) + return newPresetSyncTester(t, name, chainID, syncTesterRPC, rpcCl) +} diff --git a/op-devstack/presets/timetravel.go b/op-devstack/presets/timetravel.go index 47ccb3ae7b019..5e548019af627 100644 --- a/op-devstack/presets/timetravel.go +++ b/op-devstack/presets/timetravel.go @@ -1,21 +1,5 @@ package presets -import "github.com/ethereum-optimism/optimism/op-devstack/stack" - -func WithTimeTravel() stack.Option[stack.Orchestrator] { - return stack.Combine( - stack.BeforeDeploy[stack.Orchestrator](func(orch stack.Orchestrator) { - ttOrch, ok := orch.(stack.TimeTravelOrchestrator) - if !ok { - return - } - ttOrch.EnableTimeTravel() - }), - stack.PostHydrate[stack.Orchestrator](func(sys stack.System) { - sys.L1Networks() - ttSys, ok := sys.(stack.TimeTravelSystem) - sys.T().Gate().True(ok, "Requires system supporting time travel") - sys.T().Gate().True(ttSys.TimeTravelEnabled(), "Time travel must be enabled") - }), - ) +func WithTimeTravel() Option { + return WithTimeTravelEnabled() } diff --git a/op-devstack/presets/twol2.go b/op-devstack/presets/twol2.go index e405cd8f985fa..2e8d113bc535a 100644 --- a/op-devstack/presets/twol2.go +++ b/op-devstack/presets/twol2.go @@ -10,11 +10,9 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txplan" ) @@ -22,12 +20,12 @@ import ( // TwoL2 represents a two-L2 setup without interop considerations. // It is useful for testing components which bridge multiple L2s without necessarily using interop. type TwoL2 struct { - Log log.Logger - T devtest.T - ControlPlane stack.ControlPlane + Log log.Logger + T devtest.T L1Network *dsl.L1Network L1EL *dsl.L1ELNode + L1CL *dsl.L1CLNode L2A *dsl.L2Network L2B *dsl.L2Network @@ -35,45 +33,11 @@ type TwoL2 struct { L2BCL *dsl.L2CLNode } -func WithTwoL2() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultTwoL2System(&sysgo.DefaultTwoL2SystemIDs{})) -} - -func WithTwoL2Supernode() stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSupernodeTwoL2System(&sysgo.DefaultTwoL2SystemIDs{})) -} - -// WithTwoL2SupernodeInterop specifies a two-L2 system using a shared supernode with interop enabled. -// Use delaySeconds=0 for interop at genesis, or a positive value to test the transition from -// normal safety to interop-verified safety. -func WithTwoL2SupernodeInterop(delaySeconds uint64) stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultSupernodeInteropTwoL2System(&sysgo.DefaultTwoL2SystemIDs{}, delaySeconds)) -} - -func NewTwoL2(t devtest.T) *TwoL2 { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - - l1Net := system.L1Network(match.FirstL1Network) - l2a := system.L2Network(match.Assume(t, match.L2ChainA)) - l2b := system.L2Network(match.Assume(t, match.L2ChainB)) - l2aCL := l2a.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) - l2bCL := l2b.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) - - require.NotEqual(t, l2a.ChainID(), l2b.ChainID()) - - return &TwoL2{ - Log: t.Logger(), - T: t, - ControlPlane: orch.ControlPlane(), - L1Network: dsl.NewL1Network(l1Net), - L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), - L2A: dsl.NewL2Network(l2a, orch.ControlPlane()), - L2B: dsl.NewL2Network(l2b, orch.ControlPlane()), - L2ACL: dsl.NewL2CLNode(l2aCL, orch.ControlPlane()), - L2BCL: dsl.NewL2CLNode(l2bCL, orch.ControlPlane()), - } +// NewTwoL2Supernode creates a fresh TwoL2 target backed by a shared supernode for the +// current test. +func NewTwoL2Supernode(t devtest.T, opts ...Option) *TwoL2 { + presetCfg, _ := collectSupportedPresetConfig(t, "NewTwoL2Supernode", opts, twoL2SupernodePresetSupportedOptionKinds) + return twoL2SupernodeFromRuntime(t, sysgo.NewTwoL2SupernodeRuntimeWithConfig(t, presetCfg)) } // TwoL2SupernodeInterop represents a two-L2 setup with a shared supernode that has interop enabled. @@ -118,15 +82,13 @@ type TwoL2SupernodeInterop struct { // DelaySeconds is the delay from genesis to interop activation DelaySeconds uint64 - // system holds the underlying system for advanced operations - system stack.ExtensibleSystem + timeTravel *clock.AdvancingClock } // AdvanceTime advances the time-travel clock if enabled. func (s *TwoL2SupernodeInterop) AdvanceTime(amount time.Duration) { - ttSys, ok := s.system.(stack.TimeTravelSystem) - s.T.Require().True(ok, "attempting to advance time on incompatible system") - ttSys.AdvanceTime(amount) + s.T.Require().NotNil(s.timeTravel, "attempting to advance time on incompatible system") + s.timeTravel.AdvanceTime(amount) } // SuperNodeClient returns an API for calling supernode-specific RPC methods @@ -135,68 +97,11 @@ func (s *TwoL2SupernodeInterop) SuperNodeClient() apis.SupernodeQueryAPI { return s.Supernode.QueryAPI() } -// NewTwoL2SupernodeInterop creates a TwoL2SupernodeInterop preset for acceptance tests. -// Use delaySeconds=0 for interop at genesis, or a positive value to test the transition. -// The delaySeconds must match what was passed to WithTwoL2SupernodeInterop in TestMain. -func NewTwoL2SupernodeInterop(t devtest.T, delaySeconds uint64) *TwoL2SupernodeInterop { - system := shim.NewSystem(t) - orch := Orchestrator() - orch.Hydrate(system) - - l1Net := system.L1Network(match.FirstL1Network) - l2a := system.L2Network(match.Assume(t, match.L2ChainA)) - l2b := system.L2Network(match.Assume(t, match.L2ChainB)) - l2aCL := l2a.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) - l2bCL := l2b.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) - - require.NotEqual(t, l2a.ChainID(), l2b.ChainID()) - - // Get genesis time from the DSL wrapper - l2aNet := dsl.NewL2Network(l2a, orch.ControlPlane()) - genesisTime := l2aNet.Escape().RollupConfig().Genesis.L2Time - - // Get the supernode and its test control - stackSupernode := system.Supernode(match.Assume(t, match.FirstSupernode)) - var testControl stack.InteropTestControl - if sysgoOrch, ok := orch.(*sysgo.Orchestrator); ok { - testControl = sysgoOrch.InteropTestControl(stackSupernode.ID()) - } - - // Get the test sequencer for deterministic block building - var testSequencer *dsl.TestSequencer - if len(system.TestSequencers()) > 0 { - testSequencer = dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))) - } - - out := &TwoL2SupernodeInterop{ - TwoL2: TwoL2{ - Log: t.Logger(), - T: t, - ControlPlane: orch.ControlPlane(), - L1Network: dsl.NewL1Network(l1Net), - L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), - L2A: l2aNet, - L2B: dsl.NewL2Network(l2b, orch.ControlPlane()), - L2ACL: dsl.NewL2CLNode(l2aCL, orch.ControlPlane()), - L2BCL: dsl.NewL2CLNode(l2bCL, orch.ControlPlane()), - }, - Supernode: dsl.NewSupernodeWithTestControl(stackSupernode, testControl), - TestSequencer: testSequencer, - L2ELA: dsl.NewL2ELNode(l2a.L2ELNode(match.Assume(t, match.FirstL2EL)), orch.ControlPlane()), - L2ELB: dsl.NewL2ELNode(l2b.L2ELNode(match.Assume(t, match.FirstL2EL)), orch.ControlPlane()), - L2BatcherA: dsl.NewL2Batcher(l2a.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - L2BatcherB: dsl.NewL2Batcher(l2b.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - FaucetA: dsl.NewFaucet(l2a.Faucet(match.Assume(t, match.FirstFaucet))), - FaucetB: dsl.NewFaucet(l2b.Faucet(match.Assume(t, match.FirstFaucet))), - Wallet: dsl.NewRandomHDWallet(t, 30), - GenesisTime: genesisTime, - InteropActivationTime: genesisTime + delaySeconds, - DelaySeconds: delaySeconds, - system: system, - } - out.FunderA = dsl.NewFunder(out.Wallet, out.FaucetA, out.L2ELA) - out.FunderB = dsl.NewFunder(out.Wallet, out.FaucetB, out.L2ELB) - return out +// NewTwoL2SupernodeInterop creates a fresh TwoL2SupernodeInterop target for the current +// test. +func NewTwoL2SupernodeInterop(t devtest.T, delaySeconds uint64, opts ...Option) *TwoL2SupernodeInterop { + presetCfg, _ := collectSupportedPresetConfig(t, "NewTwoL2SupernodeInterop", opts, twoL2SupernodeInteropPresetSupportedOptionKinds) + return twoL2SupernodeInteropFromRuntime(t, sysgo.NewTwoL2SupernodeInteropRuntimeWithConfig(t, delaySeconds, presetCfg)) } // ============================================================================= diff --git a/op-devstack/presets/twol2_follow_l2.go b/op-devstack/presets/twol2_follow_l2.go index 2ed3a11ed0ae0..39645c22b708a 100644 --- a/op-devstack/presets/twol2_follow_l2.go +++ b/op-devstack/presets/twol2_follow_l2.go @@ -3,13 +3,11 @@ package presets import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" ) // TwoL2SupernodeFollowL2 extends TwoL2SupernodeInterop with one follow-source -// verifier per chain. +// follower per chain. type TwoL2SupernodeFollowL2 struct { TwoL2SupernodeInterop @@ -19,47 +17,9 @@ type TwoL2SupernodeFollowL2 struct { L2BFollowCL *dsl.L2CLNode } -// WithTwoL2SupernodeFollowL2 specifies a two-L2 system using a shared supernode -// with interop enabled and one follow-source verifier per chain. -// Use delaySeconds=0 for interop at genesis, or a positive value to test the transition from -// normal safety to interop-verified safety. -func WithTwoL2SupernodeFollowL2(delaySeconds uint64) stack.CommonOption { - return stack.MakeCommon(sysgo.DefaultTwoL2SupernodeFollowL2System(&sysgo.DefaultTwoL2SupernodeFollowL2SystemIDs{}, delaySeconds)) -} - -// NewTwoL2SupernodeFollowL2 creates a TwoL2SupernodeFollowL2 preset for acceptance tests. -// Use delaySeconds=0 for interop at genesis, or a positive value to test the transition. -// The delaySeconds must match what was passed to WithTwoL2SupernodeFollowL2 in TestMain. -func NewTwoL2SupernodeFollowL2(t devtest.T, delaySeconds uint64) *TwoL2SupernodeFollowL2 { - base := NewTwoL2SupernodeInterop(t, delaySeconds) - - l2a := base.system.L2Network(match.L2ChainA) - l2b := base.system.L2Network(match.L2ChainB) - - followerELAID := stack.NewL2ELNodeID("follower", l2a.ID().ChainID()) - followerCLAID := stack.NewL2CLNodeID("follower", l2a.ID().ChainID()) - followerELBID := stack.NewL2ELNodeID("follower", l2b.ID().ChainID()) - followerCLBID := stack.NewL2CLNodeID("follower", l2b.ID().ChainID()) - - followerELA := l2a.L2ELNode(match.MatchElemFn[stack.L2ELNode](func(elem stack.L2ELNode) bool { - return elem.ID() == followerELAID - })) - followerCLA := l2a.L2CLNode(match.MatchElemFn[stack.L2CLNode](func(elem stack.L2CLNode) bool { - return elem.ID() == followerCLAID - })) - - followerELB := l2b.L2ELNode(match.MatchElemFn[stack.L2ELNode](func(elem stack.L2ELNode) bool { - return elem.ID() == followerELBID - })) - followerCLB := l2b.L2CLNode(match.MatchElemFn[stack.L2CLNode](func(elem stack.L2CLNode) bool { - return elem.ID() == followerCLBID - })) - - return &TwoL2SupernodeFollowL2{ - TwoL2SupernodeInterop: *base, - L2AFollowEL: dsl.NewL2ELNode(followerELA, base.ControlPlane), - L2AFollowCL: dsl.NewL2CLNode(followerCLA, base.ControlPlane), - L2BFollowEL: dsl.NewL2ELNode(followerELB, base.ControlPlane), - L2BFollowCL: dsl.NewL2CLNode(followerCLB, base.ControlPlane), - } +// NewTwoL2SupernodeFollowL2 creates a fresh follow-source variant of the two-L2 +// supernode interop preset for the current test. +func NewTwoL2SupernodeFollowL2(t devtest.T, delaySeconds uint64, opts ...Option) *TwoL2SupernodeFollowL2 { + presetCfg, _ := collectSupportedPresetConfig(t, "NewTwoL2SupernodeFollowL2", opts, twoL2SupernodeInteropPresetSupportedOptionKinds) + return twoL2SupernodeFollowL2FromRuntime(t, sysgo.NewTwoL2SupernodeFollowL2RuntimeWithConfig(t, delaySeconds, presetCfg)) } diff --git a/op-devstack/presets/twol2_from_runtime.go b/op-devstack/presets/twol2_from_runtime.go new file mode 100644 index 0000000000000..9770b3b2f0512 --- /dev/null +++ b/op-devstack/presets/twol2_from_runtime.go @@ -0,0 +1,209 @@ +package presets + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" +) + +type twoL2RuntimeComponents struct { + l2AEL *l2ELFrontend + l2BEL *l2ELFrontend + + l2ABatcher *l2BatcherFrontend + l2BBatcher *l2BatcherFrontend + + faucetA *dsl.Faucet + faucetB *dsl.Faucet +} + +func twoL2SupernodeFromRuntime(t devtest.T, runtime *sysgo.MultiChainRuntime) *TwoL2 { + preset, _ := twoL2FromRuntime(t, runtime) + return preset +} + +func twoL2FromRuntime(t devtest.T, runtime *sysgo.MultiChainRuntime) (*TwoL2, *twoL2RuntimeComponents) { + chainA := runtime.Chains["l2a"] + chainB := runtime.Chains["l2b"] + t.Require().NotNil(chainA, "missing l2a runtime chain") + t.Require().NotNil(chainB, "missing l2b runtime chain") + l1ChainID := runtime.L1Network.ChainID() + l2AChainID := chainA.Network.ChainID() + l2BChainID := chainB.Network.ChainID() + + l1Network := newPresetL1Network(t, "l1", runtime.L1Network.ChainConfig()) + l1EL := newL1ELFrontend(t, "l1", l1ChainID, runtime.L1EL.UserRPC()) + l1CL := newL1CLFrontend(t, "l1", l1ChainID, runtime.L1CL.BeaconHTTPAddr(), runtime.L1CL.FakePoS()) + l1Network.AddL1ELNode(l1EL) + l1Network.AddL1CLNode(l1CL) + + l2A := newPresetL2Network( + t, + "l2a", + chainA.Network.ChainConfig(), + chainA.Network.RollupConfig(), + chainA.Network.Deployment(), + newKeyring(runtime.Keys, t.Require()), + l1Network, + ) + l2AEL := newL2ELFrontend(t, "sequencer", l2AChainID, chainA.EL.UserRPC(), chainA.EL.EngineRPC(), chainA.EL.JWTPath(), chainA.Network.RollupConfig(), chainA.EL) + l2ACL := newL2CLFrontend(t, "sequencer", l2AChainID, chainA.CL.UserRPC(), chainA.CL) + l2ACL.attachEL(l2AEL) + l2ABatcher := newL2BatcherFrontend(t, "main", l2AChainID, chainA.Batcher.UserRPC()) + l2A.AddL2ELNode(l2AEL) + l2A.AddL2CLNode(l2ACL) + l2A.AddL2Batcher(l2ABatcher) + + l2B := newPresetL2Network( + t, + "l2b", + chainB.Network.ChainConfig(), + chainB.Network.RollupConfig(), + chainB.Network.Deployment(), + newKeyring(runtime.Keys, t.Require()), + l1Network, + ) + l2BEL := newL2ELFrontend(t, "sequencer", l2BChainID, chainB.EL.UserRPC(), chainB.EL.EngineRPC(), chainB.EL.JWTPath(), chainB.Network.RollupConfig(), chainB.EL) + l2BCL := newL2CLFrontend(t, "sequencer", l2BChainID, chainB.CL.UserRPC(), chainB.CL) + l2BCL.attachEL(l2BEL) + l2BBatcher := newL2BatcherFrontend(t, "main", l2BChainID, chainB.Batcher.UserRPC()) + l2B.AddL2ELNode(l2BEL) + l2B.AddL2CLNode(l2BCL) + l2B.AddL2Batcher(l2BBatcher) + + faucetAFrontend := newFaucetFrontendForChain(t, runtime.FaucetService, l2AChainID) + faucetBFrontend := newFaucetFrontendForChain(t, runtime.FaucetService, l2BChainID) + l2A.AddFaucet(faucetAFrontend) + l2B.AddFaucet(faucetBFrontend) + faucetA := dsl.NewFaucet(faucetAFrontend) + faucetB := dsl.NewFaucet(faucetBFrontend) + + l1ELDSL := dsl.NewL1ELNode(l1EL) + l1CLDSL := dsl.NewL1CLNode(l1CL) + l2AELDSL := dsl.NewL2ELNode(l2AEL) + l2ACLDSL := dsl.NewL2CLNode(l2ACL) + l2BELDSL := dsl.NewL2ELNode(l2BEL) + l2BCLDSL := dsl.NewL2CLNode(l2BCL) + + preset := &TwoL2{ + Log: t.Logger(), + T: t, + L1Network: dsl.NewL1Network(l1Network, l1ELDSL, l1CLDSL), + L1EL: l1ELDSL, + L1CL: l1CLDSL, + L2A: dsl.NewL2Network(l2A, l2AELDSL, l2ACLDSL, l1ELDSL, nil, nil), + L2B: dsl.NewL2Network(l2B, l2BELDSL, l2BCLDSL, l1ELDSL, nil, nil), + L2ACL: l2ACLDSL, + L2BCL: l2BCLDSL, + } + return preset, &twoL2RuntimeComponents{ + l2AEL: l2AEL, + l2BEL: l2BEL, + l2ABatcher: l2ABatcher, + l2BBatcher: l2BBatcher, + faucetA: faucetA, + faucetB: faucetB, + } +} + +func twoL2SupernodeInteropFromRuntime(t devtest.T, runtime *sysgo.MultiChainRuntime) *TwoL2SupernodeInterop { + twoL2, components := twoL2FromRuntime(t, runtime) + + supernode := newSupernodeFrontend(t, "supernode-two-l2-system", runtime.Supernode.UserRPC()) + testSequencer := newTestSequencerFrontend( + t, + runtime.TestSequencer.Name, + runtime.TestSequencer.AdminRPC, + runtime.TestSequencer.ControlRPC, + runtime.TestSequencer.JWTSecret, + ) + + genesisTime := twoL2.L2A.Escape().RollupConfig().Genesis.L2Time + preset := &TwoL2SupernodeInterop{ + TwoL2: TwoL2{ + Log: twoL2.Log, + T: twoL2.T, + L1Network: twoL2.L1Network, + L1EL: twoL2.L1EL, + L1CL: twoL2.L1CL, + L2A: twoL2.L2A, + L2B: twoL2.L2B, + L2ACL: twoL2.L2ACL, + L2BCL: twoL2.L2BCL, + }, + Supernode: dsl.NewSupernodeWithTestControl(supernode, runtime.Supernode), + TestSequencer: dsl.NewTestSequencer(testSequencer), + L2ELA: dsl.NewL2ELNode(components.l2AEL), + L2ELB: dsl.NewL2ELNode(components.l2BEL), + L2BatcherA: dsl.NewL2Batcher(components.l2ABatcher), + L2BatcherB: dsl.NewL2Batcher(components.l2BBatcher), + FaucetA: components.faucetA, + FaucetB: components.faucetB, + Wallet: dsl.NewRandomHDWallet(t, 30), + GenesisTime: genesisTime, + InteropActivationTime: genesisTime + runtime.DelaySeconds, + DelaySeconds: runtime.DelaySeconds, + timeTravel: runtime.TimeTravel, + } + preset.FunderA = dsl.NewFunder(preset.Wallet, preset.FaucetA, preset.L2ELA) + preset.FunderB = dsl.NewFunder(preset.Wallet, preset.FaucetB, preset.L2ELB) + return preset +} + +func twoL2SupernodeFollowL2FromRuntime(t devtest.T, runtime *sysgo.MultiChainRuntime) *TwoL2SupernodeFollowL2 { + base := twoL2SupernodeInteropFromRuntime(t, runtime) + chainA := runtime.Chains["l2a"] + chainB := runtime.Chains["l2b"] + t.Require().NotNil(chainA, "missing l2a supernode chain") + t.Require().NotNil(chainB, "missing l2b supernode chain") + t.Require().NotNil(chainA.Followers, "missing l2a followers") + t.Require().NotNil(chainB.Followers, "missing l2b followers") + followerA := chainA.Followers["follower"] + followerB := chainB.Followers["follower"] + t.Require().NotNil(followerA, "missing l2a follower") + t.Require().NotNil(followerB, "missing l2b follower") + + l2AFollowEL := newL2ELFrontend( + t, + followerA.Name, + chainA.Network.ChainID(), + followerA.EL.UserRPC(), + followerA.EL.EngineRPC(), + followerA.EL.JWTPath(), + chainA.Network.RollupConfig(), + followerA.EL, + ) + l2AFollowCL := newL2CLFrontend(t, followerA.Name, chainA.Network.ChainID(), followerA.CL.UserRPC(), followerA.CL) + l2AFollowCL.attachEL(l2AFollowEL) + + l2BFollowEL := newL2ELFrontend( + t, + followerB.Name, + chainB.Network.ChainID(), + followerB.EL.UserRPC(), + followerB.EL.EngineRPC(), + followerB.EL.JWTPath(), + chainB.Network.RollupConfig(), + followerB.EL, + ) + l2BFollowCL := newL2CLFrontend(t, followerB.Name, chainB.Network.ChainID(), followerB.CL.UserRPC(), followerB.CL) + l2BFollowCL.attachEL(l2BFollowEL) + + l2ANet, ok := base.L2A.Escape().(*presetL2Network) + t.Require().True(ok, "expected preset L2 network A") + l2ANet.AddL2ELNode(l2AFollowEL) + l2ANet.AddL2CLNode(l2AFollowCL) + + l2BNet, ok := base.L2B.Escape().(*presetL2Network) + t.Require().True(ok, "expected preset L2 network B") + l2BNet.AddL2ELNode(l2BFollowEL) + l2BNet.AddL2CLNode(l2BFollowCL) + + return &TwoL2SupernodeFollowL2{ + TwoL2SupernodeInterop: *base, + L2AFollowEL: dsl.NewL2ELNode(l2AFollowEL), + L2AFollowCL: dsl.NewL2CLNode(l2AFollowCL), + L2BFollowEL: dsl.NewL2ELNode(l2BFollowEL), + L2BFollowCL: dsl.NewL2CLNode(l2BFollowCL), + } +} diff --git a/op-devstack/shim/cluster.go b/op-devstack/shim/cluster.go deleted file mode 100644 index 41e2cb3eea8f6..0000000000000 --- a/op-devstack/shim/cluster.go +++ /dev/null @@ -1,39 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" -) - -// ClusterConfig is the config to create a default cluster object -type ClusterConfig struct { - CommonConfig - DependencySet depset.DependencySet - ID stack.ComponentID -} - -// presetCluster implements Cluster with preset values -type presetCluster struct { - commonImpl - depSet depset.DependencySet - id stack.ComponentID -} - -var _ stack.Cluster = (*presetCluster)(nil) - -func NewCluster(cfg ClusterConfig) stack.Cluster { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &presetCluster{ - id: cfg.ID, - commonImpl: newCommon(cfg.CommonConfig), - depSet: cfg.DependencySet, - } -} - -func (p *presetCluster) ID() stack.ComponentID { - return p.id -} - -func (p *presetCluster) DependencySet() depset.DependencySet { - return p.depSet -} diff --git a/op-devstack/shim/common.go b/op-devstack/shim/common.go deleted file mode 100644 index 8b51387ffea04..0000000000000 --- a/op-devstack/shim/common.go +++ /dev/null @@ -1,68 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-service/testreq" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/locks" -) - -// CommonConfig provides common inputs for creating a new component -type CommonConfig struct { - // T may be annotated using the T.WithCtx function - T devtest.T -} - -// NewCommonConfig is a convenience method to build the config common between all components. -// Note that component constructors will decorate the test-scope with contextual metadata, -// the caller of the component constructor can generally leave the test-context as-is. -func NewCommonConfig(t devtest.T) CommonConfig { - return CommonConfig{ - T: t, - } -} - -type commonImpl struct { - log log.Logger - t devtest.T - req *testreq.Assertions - labels *locks.RWMap[string, string] -} - -var _ interface { - stack.Common - require() *testreq.Assertions -} = (*commonImpl)(nil) - -// newCommon creates an object to hold on to common component data, safe to embed in other structs -func newCommon(cfg CommonConfig) commonImpl { - return commonImpl{ - log: cfg.T.Logger(), - t: cfg.T, - req: cfg.T.Require(), - labels: new(locks.RWMap[string, string]), - } -} - -func (c *commonImpl) T() devtest.T { - return c.t -} - -func (c *commonImpl) Logger() log.Logger { - return c.log -} - -func (c *commonImpl) require() *testreq.Assertions { - return c.req -} - -func (c *commonImpl) Label(key string) string { - out, _ := c.labels.Get(key) - return out -} - -func (c *commonImpl) SetLabel(key, value string) { - c.labels.Set(key, value) -} diff --git a/op-devstack/shim/conductor.go b/op-devstack/shim/conductor.go deleted file mode 100644 index 4151241ab9636..0000000000000 --- a/op-devstack/shim/conductor.go +++ /dev/null @@ -1,41 +0,0 @@ -package shim - -import ( - "github.com/ethereum/go-ethereum/rpc" - - conductorRpc "github.com/ethereum-optimism/optimism/op-conductor/rpc" - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -type ConductorConfig struct { - CommonConfig - ID stack.ComponentID - Client *rpc.Client -} - -type rpcConductor struct { - commonImpl - id stack.ComponentID - - client *rpc.Client - api conductorRpc.API -} - -var _ stack.Conductor = (*rpcConductor)(nil) - -func NewConductor(cfg ConductorConfig) stack.Conductor { - return &rpcConductor{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - client: cfg.Client, - api: conductorRpc.NewAPIClient(cfg.Client), - } -} - -func (r *rpcConductor) ID() stack.ComponentID { - return r.id -} - -func (r *rpcConductor) RpcAPI() conductorRpc.API { - return r.api -} diff --git a/op-devstack/shim/el.go b/op-devstack/shim/el.go deleted file mode 100644 index 5280887cba02a..0000000000000 --- a/op-devstack/shim/el.go +++ /dev/null @@ -1,61 +0,0 @@ -package shim - -import ( - "time" - - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type ELNodeConfig struct { - CommonConfig - Client client.RPC - ChainID eth.ChainID - TransactionTimeout time.Duration -} - -type rpcELNode struct { - commonImpl - - client client.RPC - ethClient *sources.EthClient - chainID eth.ChainID - txTimeout time.Duration -} - -var _ stack.ELNode = (*rpcELNode)(nil) - -// newRpcELNode creates a generic ELNode, safe to embed in other structs -func newRpcELNode(cfg ELNodeConfig) rpcELNode { - ethCl, err := sources.NewEthClient(cfg.Client, cfg.T.Logger(), nil, sources.DefaultEthClientConfig(10)) - require.NoError(cfg.T, err) - - if cfg.TransactionTimeout == 0 { - cfg.TransactionTimeout = 30 * time.Second - } - - return rpcELNode{ - commonImpl: newCommon(cfg.CommonConfig), - client: cfg.Client, - ethClient: ethCl, - chainID: cfg.ChainID, - txTimeout: cfg.TransactionTimeout, - } -} - -func (r *rpcELNode) ChainID() eth.ChainID { - return r.chainID -} - -func (r *rpcELNode) EthClient() apis.EthClient { - return r.ethClient -} - -func (r *rpcELNode) TransactionTimeout() time.Duration { - return r.txTimeout -} diff --git a/op-devstack/shim/faucet.go b/op-devstack/shim/faucet.go deleted file mode 100644 index 80f6d8858c070..0000000000000 --- a/op-devstack/shim/faucet.go +++ /dev/null @@ -1,42 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type FaucetConfig struct { - CommonConfig - ID stack.ComponentID - Client client.RPC -} - -// presetFaucet wraps around a faucet-service, -// and is meant to fund users by making faucet RPC requests. -// This deconflicts funding requests by parallel tests from the same funding account. -type presetFaucet struct { - commonImpl - id stack.ComponentID - faucetClient *sources.FaucetClient -} - -var _ stack.Faucet = (*presetFaucet)(nil) - -func NewFaucet(cfg FaucetConfig) stack.Faucet { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &presetFaucet{ - id: cfg.ID, - commonImpl: newCommon(cfg.CommonConfig), - faucetClient: sources.NewFaucetClient(cfg.Client), - } -} - -func (p *presetFaucet) ID() stack.ComponentID { - return p.id -} - -func (p *presetFaucet) API() apis.Faucet { - return p.faucetClient -} diff --git a/op-devstack/shim/fb_ws_client.go b/op-devstack/shim/fb_ws_client.go deleted file mode 100644 index 5397c49ee9ed6..0000000000000 --- a/op-devstack/shim/fb_ws_client.go +++ /dev/null @@ -1,50 +0,0 @@ -package shim - -import ( - "net/http" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type FlashblocksWSClientConfig struct { - CommonConfig - ID stack.ComponentID - WsUrl string - WsHeaders http.Header -} - -type flashblocksWSClient struct { - commonImpl - id stack.ComponentID - wsUrl string - wsHeaders http.Header -} - -var _ stack.FlashblocksWSClient = (*flashblocksWSClient)(nil) - -func NewFlashblocksWSClient(cfg FlashblocksWSClientConfig) stack.FlashblocksWSClient { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &flashblocksWSClient{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - wsUrl: cfg.WsUrl, - wsHeaders: cfg.WsHeaders, - } -} - -func (r *flashblocksWSClient) ID() stack.ComponentID { - return r.id -} - -func (r *flashblocksWSClient) ChainID() eth.ChainID { - return r.id.ChainID() -} - -func (r *flashblocksWSClient) WsUrl() string { - return r.wsUrl -} - -func (r *flashblocksWSClient) WsHeaders() http.Header { - return r.wsHeaders -} diff --git a/op-devstack/shim/keyring.go b/op-devstack/shim/keyring.go deleted file mode 100644 index 90c6d76d0562c..0000000000000 --- a/op-devstack/shim/keyring.go +++ /dev/null @@ -1,37 +0,0 @@ -package shim - -import ( - "crypto/ecdsa" - - "github.com/ethereum/go-ethereum/common" - - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/testreq" -) - -type keyringImpl struct { - keys devkeys.Keys - require *testreq.Assertions -} - -var _ stack.Keys = (*keyringImpl)(nil) - -func NewKeyring(keys devkeys.Keys, req *testreq.Assertions) stack.Keys { - return &keyringImpl{ - keys: keys, - require: req, - } -} - -func (k *keyringImpl) Secret(key devkeys.Key) *ecdsa.PrivateKey { - pk, err := k.keys.Secret(key) - k.require.NoError(err) - return pk -} - -func (k *keyringImpl) Address(key devkeys.Key) common.Address { - addr, err := k.keys.Address(key) - k.require.NoError(err) - return addr -} diff --git a/op-devstack/shim/l1_cl.go b/op-devstack/shim/l1_cl.go deleted file mode 100644 index a518166d94c08..0000000000000 --- a/op-devstack/shim/l1_cl.go +++ /dev/null @@ -1,39 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type L1CLNodeConfig struct { - CommonConfig - ID stack.ComponentID - Client client.HTTP -} - -type rpcL1CLNode struct { - commonImpl - id stack.ComponentID - client apis.BeaconClient -} - -var _ stack.L1CLNode = (*rpcL1CLNode)(nil) - -func NewL1CLNode(cfg L1CLNodeConfig) stack.L1CLNode { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &rpcL1CLNode{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - client: sources.NewBeaconHTTPClient(cfg.Client), - } -} - -func (r *rpcL1CLNode) ID() stack.ComponentID { - return r.id -} - -func (r *rpcL1CLNode) BeaconClient() apis.BeaconClient { - return r.client -} diff --git a/op-devstack/shim/l1_el.go b/op-devstack/shim/l1_el.go deleted file mode 100644 index 005c7e182984e..0000000000000 --- a/op-devstack/shim/l1_el.go +++ /dev/null @@ -1,31 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/stretchr/testify/require" -) - -type L1ELNodeConfig struct { - ELNodeConfig - ID stack.ComponentID -} - -type rpcL1ELNode struct { - rpcELNode - id stack.ComponentID -} - -var _ stack.L1ELNode = (*rpcL1ELNode)(nil) - -func NewL1ELNode(cfg L1ELNodeConfig) stack.L1ELNode { - require.Equal(cfg.T, cfg.ID.ChainID(), cfg.ELNodeConfig.ChainID, "chainID must be configured to match node chainID") - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &rpcL1ELNode{ - rpcELNode: newRpcELNode(cfg.ELNodeConfig), - id: cfg.ID, - } -} - -func (r *rpcL1ELNode) ID() stack.ComponentID { - return r.id -} diff --git a/op-devstack/shim/l1_network.go b/op-devstack/shim/l1_network.go deleted file mode 100644 index 60c45430b1302..0000000000000 --- a/op-devstack/shim/l1_network.go +++ /dev/null @@ -1,105 +0,0 @@ -package shim - -import ( - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type L1NetworkConfig struct { - NetworkConfig - ID stack.ComponentID -} - -type presetL1Network struct { - presetNetwork - id stack.ComponentID -} - -var _ stack.ExtensibleL1Network = (*presetL1Network)(nil) - -func NewL1Network(cfg L1NetworkConfig) stack.ExtensibleL1Network { - require.Equal(cfg.T, cfg.ID.ChainID(), eth.ChainIDFromBig(cfg.NetworkConfig.ChainConfig.ChainID), "chain config must match expected chain") - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &presetL1Network{ - id: cfg.ID, - presetNetwork: newNetwork(cfg.NetworkConfig), - } -} - -func (p *presetL1Network) ID() stack.ComponentID { - return p.id -} - -func (p *presetL1Network) L1ELNode(m stack.L1ELMatcher) stack.L1ELNode { - getter := func(id stack.ComponentID) (stack.L1ELNode, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.L1ELNode), true - } - v, ok := findMatch(m, getter, p.L1ELNodes) - p.require().True(ok, "must find L1 EL %s", m) - return v -} - -func (p *presetL1Network) AddL1ELNode(v stack.L1ELNode) { - id := v.ID() - p.require().Equal(p.chainID, id.ChainID(), "l1 EL node %s must be on chain %s", id, p.chainID) - _, exists := p.registry.Get(id) - p.require().False(exists, "l1 EL node %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL1Network) L1CLNode(m stack.L1CLMatcher) stack.L1CLNode { - getter := func(id stack.ComponentID) (stack.L1CLNode, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.L1CLNode), true - } - v, ok := findMatch(m, getter, p.L1CLNodes) - p.require().True(ok, "must find L1 CL %s", m) - return v -} - -func (p *presetL1Network) AddL1CLNode(v stack.L1CLNode) { - id := v.ID() - p.require().Equal(p.chainID, id.ChainID(), "l1 CL node %s must be on chain %s", id, p.chainID) - _, exists := p.registry.Get(id) - p.require().False(exists, "l1 CL node %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL1Network) L1ELNodeIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindL1ELNode)) -} - -func (p *presetL1Network) L1ELNodes() []stack.L1ELNode { - ids := p.registry.IDsByKind(stack.KindL1ELNode) - result := make([]stack.L1ELNode, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.L1ELNode)) - } - } - return sortByIDFunc(result) -} - -func (p *presetL1Network) L1CLNodeIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindL1CLNode)) -} - -func (p *presetL1Network) L1CLNodes() []stack.L1CLNode { - ids := p.registry.IDsByKind(stack.KindL1CLNode) - result := make([]stack.L1CLNode, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.L1CLNode)) - } - } - return sortByIDFunc(result) -} diff --git a/op-devstack/shim/l2_batcher.go b/op-devstack/shim/l2_batcher.go deleted file mode 100644 index 4e3e785af4283..0000000000000 --- a/op-devstack/shim/l2_batcher.go +++ /dev/null @@ -1,39 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type L2BatcherConfig struct { - CommonConfig - ID stack.ComponentID - Client client.RPC -} - -type rpcL2Batcher struct { - commonImpl - id stack.ComponentID - client *sources.BatcherAdminClient -} - -var _ stack.L2Batcher = (*rpcL2Batcher)(nil) - -func NewL2Batcher(cfg L2BatcherConfig) stack.L2Batcher { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &rpcL2Batcher{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - client: sources.NewBatcherAdminClient(cfg.Client), - } -} - -func (r *rpcL2Batcher) ID() stack.ComponentID { - return r.id -} - -func (p *rpcL2Batcher) ActivityAPI() apis.BatcherActivity { - return p.client -} diff --git a/op-devstack/shim/l2_challenger.go b/op-devstack/shim/l2_challenger.go deleted file mode 100644 index 65f360503da0e..0000000000000 --- a/op-devstack/shim/l2_challenger.go +++ /dev/null @@ -1,37 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-challenger/config" - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -type L2ChallengerConfig struct { - CommonConfig - ID stack.ComponentID - Config *config.Config -} - -type rpcL2Challenger struct { - commonImpl - id stack.ComponentID - config *config.Config -} - -func (r *rpcL2Challenger) Config() *config.Config { - return r.config -} - -var _ stack.L2Challenger = (*rpcL2Challenger)(nil) - -func NewL2Challenger(cfg L2ChallengerConfig) stack.L2Challenger { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &rpcL2Challenger{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - config: cfg.Config, - } -} - -func (r *rpcL2Challenger) ID() stack.ComponentID { - return r.id -} diff --git a/op-devstack/shim/l2_cl.go b/op-devstack/shim/l2_cl.go deleted file mode 100644 index a1a490ed73e6e..0000000000000 --- a/op-devstack/shim/l2_cl.go +++ /dev/null @@ -1,117 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/locks" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type L2CLNodeConfig struct { - CommonConfig - ID stack.ComponentID - Client client.RPC - - UserRPC string - - InteropEndpoint string - InteropJwtSecret eth.Bytes32 -} - -type rpcL2CLNode struct { - commonImpl - id stack.ComponentID - client client.RPC - rollupClient apis.RollupClient - p2pClient apis.P2PClient - els locks.RWMap[stack.ComponentID, stack.L2ELNode] - rollupBoostNodes locks.RWMap[stack.ComponentID, stack.RollupBoostNode] - oprbuilderNodes locks.RWMap[stack.ComponentID, stack.OPRBuilderNode] - - userRPC string - - // Store interop ws endpoints and secrets to provide to the supervisor, - // when reconnection happens using the supervisor's admin_addL2RPC method. - // These fields are not intended for manual dial-in or initializing client.RPC - interopEndpoint string - interopJwtSecret eth.Bytes32 -} - -var _ stack.L2CLNode = (*rpcL2CLNode)(nil) -var _ stack.LinkableL2CLNode = (*rpcL2CLNode)(nil) - -func NewL2CLNode(cfg L2CLNodeConfig) stack.L2CLNode { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &rpcL2CLNode{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - client: cfg.Client, - rollupClient: sources.NewRollupClient(cfg.Client), - p2pClient: sources.NewP2PClient(cfg.Client), - userRPC: cfg.UserRPC, - interopEndpoint: cfg.InteropEndpoint, - interopJwtSecret: cfg.InteropJwtSecret, - } -} - -func (r *rpcL2CLNode) ClientRPC() client.RPC { - return r.client -} - -func (r *rpcL2CLNode) ID() stack.ComponentID { - return r.id -} - -func (r *rpcL2CLNode) RollupAPI() apis.RollupClient { - return r.rollupClient -} - -func (r *rpcL2CLNode) P2PAPI() apis.P2PClient { - return r.p2pClient -} - -func (r *rpcL2CLNode) LinkEL(el stack.L2ELNode) { - r.els.Set(el.ID(), el) -} - -func (r *rpcL2CLNode) LinkRollupBoostNode(rollupBoostNode stack.RollupBoostNode) { - r.rollupBoostNodes.Set(rollupBoostNode.ID(), rollupBoostNode) -} - -func (r *rpcL2CLNode) LinkOPRBuilderNode(oprb stack.OPRBuilderNode) { - r.oprbuilderNodes.Set(oprb.ID(), oprb) -} - -func (r *rpcL2CLNode) ELs() []stack.L2ELNode { - return sortByIDFunc(r.els.Values()) -} - -func (r *rpcL2CLNode) ELClient() apis.EthClient { - var ethclient apis.EthClient - if len(r.els.Values()) > 0 { - ethclient = r.els.Values()[0].EthClient() - } else if len(r.rollupBoostNodes.Values()) > 0 { - ethclient = r.rollupBoostNodes.Values()[0].EthClient() - } else if len(r.oprbuilderNodes.Values()) > 0 { - ethclient = r.oprbuilderNodes.Values()[0].EthClient() - } - return ethclient -} - -func (r *rpcL2CLNode) RollupBoostNodes() []stack.RollupBoostNode { - return sortByIDFunc(r.rollupBoostNodes.Values()) -} - -func (r *rpcL2CLNode) OPRBuilderNodes() []stack.OPRBuilderNode { - return sortByIDFunc(r.oprbuilderNodes.Values()) -} - -func (r *rpcL2CLNode) UserRPC() string { - return r.userRPC -} - -func (r *rpcL2CLNode) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { - return r.interopEndpoint, r.interopJwtSecret -} diff --git a/op-devstack/shim/l2_el.go b/op-devstack/shim/l2_el.go deleted file mode 100644 index 4bd7e41a826b6..0000000000000 --- a/op-devstack/shim/l2_el.go +++ /dev/null @@ -1,64 +0,0 @@ -package shim - -import ( - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type L2ELNodeConfig struct { - ELNodeConfig - EngineClient client.RPC - RollupCfg *rollup.Config - ID stack.ComponentID -} - -type rpcL2ELNode struct { - rpcELNode - l2Client *sources.L2Client - l2EngineClient *sources.EngineClient - - id stack.ComponentID -} - -var _ stack.L2ELNode = (*rpcL2ELNode)(nil) - -func NewL2ELNode(cfg L2ELNodeConfig) stack.L2ELNode { - require.Equal(cfg.T, cfg.ID.ChainID(), cfg.ELNodeConfig.ChainID, "chainID must be configured to match node chainID") - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - require.NotNil(cfg.T, cfg.RollupCfg, "rollup config must be configured") - l2Client, err := sources.NewL2Client(cfg.ELNodeConfig.Client, cfg.T.Logger(), nil, sources.L2ClientSimpleConfig(cfg.RollupCfg, false, 10, 10)) - require.NoError(cfg.T, err) - engineClientConfig := &sources.EngineClientConfig{ - L2ClientConfig: *sources.L2ClientSimpleConfig(cfg.RollupCfg, false, 10, 10), - } - // initialize engine API client using different client - engineClient, err := sources.NewEngineClient(cfg.EngineClient, cfg.T.Logger(), nil, engineClientConfig) - require.NoError(cfg.T, err) - return &rpcL2ELNode{ - rpcELNode: newRpcELNode(cfg.ELNodeConfig), - l2Client: l2Client, - l2EngineClient: engineClient, - id: cfg.ID, - } -} - -func (r *rpcL2ELNode) ID() stack.ComponentID { - return r.id -} - -func (r *rpcL2ELNode) L2EthClient() apis.L2EthClient { - return r.l2Client -} - -func (r *rpcL2ELNode) L2EthExtendedClient() apis.L2EthClient { - return r.l2Client -} - -func (r *rpcL2ELNode) L2EngineClient() apis.EngineClient { - return r.l2EngineClient.EngineAPIClient -} diff --git a/op-devstack/shim/l2_network.go b/op-devstack/shim/l2_network.go deleted file mode 100644 index 86128c95434c7..0000000000000 --- a/op-devstack/shim/l2_network.go +++ /dev/null @@ -1,360 +0,0 @@ -package shim - -import ( - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type L2NetworkConfig struct { - NetworkConfig - ID stack.ComponentID - RollupConfig *rollup.Config - Deployment stack.L2Deployment - Keys stack.Keys - - Superchain stack.Superchain - L1 stack.L1Network - Cluster stack.Cluster -} - -type presetL2Network struct { - presetNetwork - id stack.ComponentID - - rollupCfg *rollup.Config - deployment stack.L2Deployment - keys stack.Keys - - superchain stack.Superchain - l1 stack.L1Network - cluster stack.Cluster -} - -var _ stack.L2Network = (*presetL2Network)(nil) - -func NewL2Network(cfg L2NetworkConfig) stack.ExtensibleL2Network { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - // sanity-check the configs match the expected chains - require.Equal(cfg.T, cfg.ID.ChainID(), eth.ChainIDFromBig(cfg.NetworkConfig.ChainConfig.ChainID), "chain config must match expected chain") - require.Equal(cfg.T, cfg.L1.ChainID(), eth.ChainIDFromBig(cfg.RollupConfig.L1ChainID), "rollup config must match expected L1 chain") - require.Equal(cfg.T, cfg.ID.ChainID(), eth.ChainIDFromBig(cfg.RollupConfig.L2ChainID), "rollup config must match expected L2 chain") - return &presetL2Network{ - id: cfg.ID, - presetNetwork: newNetwork(cfg.NetworkConfig), - rollupCfg: cfg.RollupConfig, - deployment: cfg.Deployment, - keys: cfg.Keys, - superchain: cfg.Superchain, - l1: cfg.L1, - cluster: cfg.Cluster, - } -} - -func (p *presetL2Network) ID() stack.ComponentID { - return p.id -} - -func (p *presetL2Network) RollupConfig() *rollup.Config { - p.require().NotNil(p.rollupCfg, "l2 chain %s must have a rollup config", p.ID()) - return p.rollupCfg -} - -func (p *presetL2Network) Deployment() stack.L2Deployment { - p.require().NotNil(p.deployment, "l2 chain %s must have a deployment", p.ID()) - return p.deployment -} - -func (p *presetL2Network) Keys() stack.Keys { - p.require().NotNil(p.keys, "l2 chain %s must have keys", p.ID()) - return p.keys -} - -func (p *presetL2Network) Superchain() stack.Superchain { - p.require().NotNil(p.superchain, "l2 chain %s must have a superchain", p.ID()) - return p.superchain -} - -func (p *presetL2Network) L1() stack.L1Network { - p.require().NotNil(p.l1, "l2 chain %s must have an L1 chain", p.ID()) - return p.l1 -} - -func (p *presetL2Network) Cluster() stack.Cluster { - p.require().NotNil(p.cluster, "l2 chain %s must have a cluster", p.ID()) - return p.cluster -} - -func (p *presetL2Network) L2Batcher(m stack.L2BatcherMatcher) stack.L2Batcher { - getter := func(id stack.ComponentID) (stack.L2Batcher, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.L2Batcher), true - } - v, ok := findMatch(m, getter, p.L2Batchers) - p.require().True(ok, "must find L2 batcher %s", m) - return v -} - -func (p *presetL2Network) AddL2Batcher(v stack.L2Batcher) { - id := v.ID() - p.require().Equal(p.chainID, id.ChainID(), "l2 batcher %s must be on chain %s", id, p.chainID) - _, exists := p.registry.Get(id) - p.require().False(exists, "l2 batcher %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL2Network) Conductor(m stack.ConductorMatcher) stack.Conductor { - getter := func(id stack.ComponentID) (stack.Conductor, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.Conductor), true - } - v, ok := findMatch(m, getter, p.Conductors) - p.require().True(ok, "must find L2 conductor %s", m) - return v -} - -func (p *presetL2Network) AddConductor(v stack.Conductor) { - id := v.ID() - _, exists := p.registry.Get(id) - p.require().False(exists, "conductor %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL2Network) L2Proposer(m stack.L2ProposerMatcher) stack.L2Proposer { - getter := func(id stack.ComponentID) (stack.L2Proposer, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.L2Proposer), true - } - v, ok := findMatch(m, getter, p.L2Proposers) - p.require().True(ok, "must find L2 proposer %s", m) - return v -} - -func (p *presetL2Network) AddL2Proposer(v stack.L2Proposer) { - id := v.ID() - p.require().Equal(p.chainID, id.ChainID(), "l2 proposer %s must be on chain %s", id, p.chainID) - _, exists := p.registry.Get(id) - p.require().False(exists, "l2 proposer %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL2Network) L2Challenger(m stack.L2ChallengerMatcher) stack.L2Challenger { - getter := func(id stack.ComponentID) (stack.L2Challenger, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.L2Challenger), true - } - v, ok := findMatch(m, getter, p.L2Challengers) - p.require().True(ok, "must find L2 challenger %s", m) - return v -} - -func (p *presetL2Network) AddL2Challenger(v stack.L2Challenger) { - id := v.ID() - _, exists := p.registry.Get(id) - p.require().False(exists, "l2 challenger %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL2Network) L2CLNode(m stack.L2CLMatcher) stack.L2CLNode { - getter := func(id stack.ComponentID) (stack.L2CLNode, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.L2CLNode), true - } - v, ok := findMatch(m, getter, p.L2CLNodes) - p.require().True(ok, "must find L2 CL %s", m) - return v -} - -func (p *presetL2Network) AddL2CLNode(v stack.L2CLNode) { - id := v.ID() - p.require().Equal(p.chainID, id.ChainID(), "l2 CL node %s must be on chain %s", id, p.chainID) - _, exists := p.registry.Get(id) - p.require().False(exists, "l2 CL node %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL2Network) L2ELNode(m stack.L2ELMatcher) stack.L2ELNode { - getter := func(id stack.ComponentID) (stack.L2ELNode, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.L2ELNode), true - } - v, ok := findMatch(m, getter, p.L2ELNodes) - p.require().True(ok, "must find L2 EL %s", m) - return v -} - -func (p *presetL2Network) AddL2ELNode(v stack.L2ELNode) { - id := v.ID() - p.require().Equal(p.chainID, id.ChainID(), "l2 EL node %s must be on chain %s", id, p.chainID) - _, exists := p.registry.Get(id) - p.require().False(exists, "l2 EL node %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL2Network) L2BatcherIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindL2Batcher)) -} - -func (p *presetL2Network) L2Batchers() []stack.L2Batcher { - ids := p.registry.IDsByKind(stack.KindL2Batcher) - result := make([]stack.L2Batcher, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.L2Batcher)) - } - } - return sortByIDFunc(result) -} - -func (p *presetL2Network) L2ProposerIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindL2Proposer)) -} - -func (p *presetL2Network) L2Proposers() []stack.L2Proposer { - ids := p.registry.IDsByKind(stack.KindL2Proposer) - result := make([]stack.L2Proposer, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.L2Proposer)) - } - } - return sortByIDFunc(result) -} - -func (p *presetL2Network) L2ChallengerIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindL2Challenger)) -} - -func (p *presetL2Network) L2Challengers() []stack.L2Challenger { - ids := p.registry.IDsByKind(stack.KindL2Challenger) - result := make([]stack.L2Challenger, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.L2Challenger)) - } - } - return sortByIDFunc(result) -} - -func (p *presetL2Network) Conductors() []stack.Conductor { - ids := p.registry.IDsByKind(stack.KindConductor) - result := make([]stack.Conductor, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.Conductor)) - } - } - return sortByIDFunc(result) -} - -func (p *presetL2Network) L2CLNodeIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindL2CLNode)) -} - -func (p *presetL2Network) L2CLNodes() []stack.L2CLNode { - ids := p.registry.IDsByKind(stack.KindL2CLNode) - result := make([]stack.L2CLNode, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.L2CLNode)) - } - } - return sortByIDFunc(result) -} - -func (p *presetL2Network) L2ELNodeIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindL2ELNode)) -} - -func (p *presetL2Network) L2ELNodes() []stack.L2ELNode { - ids := p.registry.IDsByKind(stack.KindL2ELNode) - result := make([]stack.L2ELNode, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.L2ELNode)) - } - } - return sortByIDFunc(result) -} - -func (p *presetL2Network) RollupBoostNodes() []stack.RollupBoostNode { - ids := p.registry.IDsByKind(stack.KindRollupBoostNode) - result := make([]stack.RollupBoostNode, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.RollupBoostNode)) - } - } - return sortByIDFunc(result) -} - -func (p *presetL2Network) OPRBuilderNodes() []stack.OPRBuilderNode { - ids := p.registry.IDsByKind(stack.KindOPRBuilderNode) - result := make([]stack.OPRBuilderNode, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.OPRBuilderNode)) - } - } - return sortByIDFunc(result) -} - -func (p *presetL2Network) AddRollupBoostNode(v stack.RollupBoostNode) { - id := v.ID() - _, exists := p.registry.Get(id) - p.require().False(exists, "rollup boost node %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL2Network) AddOPRBuilderNode(v stack.OPRBuilderNode) { - id := v.ID() - _, exists := p.registry.Get(id) - p.require().False(exists, "OPR builder node %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetL2Network) OPRBuilderNode(m stack.OPRBuilderNodeMatcher) stack.OPRBuilderNode { - getter := func(id stack.ComponentID) (stack.OPRBuilderNode, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.OPRBuilderNode), true - } - v, ok := findMatch(m, getter, p.OPRBuilderNodes) - p.require().True(ok, "must find OPR builder node %s", m) - return v -} - -func (p *presetL2Network) RollupBoostNode(m stack.RollupBoostNodeMatcher) stack.RollupBoostNode { - getter := func(id stack.ComponentID) (stack.RollupBoostNode, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.RollupBoostNode), true - } - v, ok := findMatch(m, getter, p.RollupBoostNodes) - p.require().True(ok, "must find rollup boost node %s", m) - return v -} diff --git a/op-devstack/shim/l2_proposer.go b/op-devstack/shim/l2_proposer.go deleted file mode 100644 index 190cf3a450e04..0000000000000 --- a/op-devstack/shim/l2_proposer.go +++ /dev/null @@ -1,33 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/client" -) - -type L2ProposerConfig struct { - CommonConfig - ID stack.ComponentID - Client client.RPC -} - -type rpcL2Proposer struct { - commonImpl - id stack.ComponentID - client client.RPC -} - -var _ stack.L2Proposer = (*rpcL2Proposer)(nil) - -func NewL2Proposer(cfg L2ProposerConfig) stack.L2Proposer { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &rpcL2Proposer{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - client: cfg.Client, - } -} - -func (r *rpcL2Proposer) ID() stack.ComponentID { - return r.id -} diff --git a/op-devstack/shim/matcher.go b/op-devstack/shim/matcher.go deleted file mode 100644 index d8ce976fae55e..0000000000000 --- a/op-devstack/shim/matcher.go +++ /dev/null @@ -1,41 +0,0 @@ -package shim - -import ( - "slices" - "sort" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -// findMatch checks if the matcher is an ID wrapper for direct lookup. If not, then it will search the list of values for a matching element. -// If multiple elements match, the first found is returned. -// The values function is used to lazy-fetch values in sorted order, such that the search is deterministic. -func findMatch[E stack.Identifiable](m stack.Matcher[E], getValue func(stack.ComponentID) (E, bool), values func() []E) (out E, found bool) { - // Check for idMatcher wrapper (created by stack.ByID) - if idm, ok := m.(interface{ ID() stack.ComponentID }); ok { - return getValue(idm.ID()) - } - got := m.Match(values()) - if len(got) == 0 { - return - } - return got[0], true -} - -// sortByID sorts a slice of ComponentIDs. -func sortByID(ids []stack.ComponentID) []stack.ComponentID { - out := slices.Clone(ids) - sort.Slice(out, func(i, j int) bool { - return out[i].Less(out[j]) - }) - return out -} - -// sortByIDFunc sorts a slice of elements by extracting their ID. -func sortByIDFunc[T stack.Identifiable](elems []T) []T { - out := slices.Clone(elems) - sort.Slice(out, func(i, j int) bool { - return out[i].ID().Less(out[j].ID()) - }) - return out -} diff --git a/op-devstack/shim/network.go b/op-devstack/shim/network.go deleted file mode 100644 index d6652f4828da7..0000000000000 --- a/op-devstack/shim/network.go +++ /dev/null @@ -1,135 +0,0 @@ -package shim - -import ( - "github.com/ethereum/go-ethereum/params" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type NetworkConfig struct { - CommonConfig - ChainConfig *params.ChainConfig -} - -type presetNetwork struct { - commonImpl - chainCfg *params.ChainConfig - chainID eth.ChainID - - // Unified component registry for generic access - registry *stack.Registry -} - -var _ stack.Network = (*presetNetwork)(nil) - -// newNetwork creates a new network, safe to embed in other structs -func newNetwork(cfg NetworkConfig) presetNetwork { - return presetNetwork{ - commonImpl: newCommon(cfg.CommonConfig), - chainCfg: cfg.ChainConfig, - chainID: eth.ChainIDFromBig(cfg.ChainConfig.ChainID), - registry: stack.NewRegistry(), - } -} - -// --- ComponentRegistry interface implementation --- - -func (p *presetNetwork) Component(id stack.ComponentID) (any, bool) { - return p.registry.Get(id) -} - -func (p *presetNetwork) Components(kind stack.ComponentKind) []any { - ids := p.registry.IDsByKind(kind) - result := make([]any, 0, len(ids)) - for _, id := range ids { - if comp, ok := p.registry.Get(id); ok { - result = append(result, comp) - } - } - return result -} - -func (p *presetNetwork) ComponentIDs(kind stack.ComponentKind) []stack.ComponentID { - return p.registry.IDsByKind(kind) -} - -func (p *presetNetwork) ChainID() eth.ChainID { - return p.chainID -} - -func (p *presetNetwork) ChainConfig() *params.ChainConfig { - return p.chainCfg -} - -func (p *presetNetwork) FaucetIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindFaucet)) -} - -func (p *presetNetwork) Faucets() []stack.Faucet { - ids := p.registry.IDsByKind(stack.KindFaucet) - result := make([]stack.Faucet, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.Faucet)) - } - } - return sortByIDFunc(result) -} - -func (p *presetNetwork) Faucet(m stack.FaucetMatcher) stack.Faucet { - getter := func(id stack.ComponentID) (stack.Faucet, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.Faucet), true - } - v, ok := findMatch(m, getter, p.Faucets) - p.require().True(ok, "must find faucet %s", m) - return v -} - -func (p *presetNetwork) AddFaucet(v stack.Faucet) { - id := v.ID() - p.require().Equal(p.chainID, id.ChainID(), "faucet %s must be on chain %s", id, p.chainID) - _, exists := p.registry.Get(id) - p.require().False(exists, "faucet %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetNetwork) SyncTesterIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindSyncTester)) -} - -func (p *presetNetwork) SyncTesters() []stack.SyncTester { - ids := p.registry.IDsByKind(stack.KindSyncTester) - result := make([]stack.SyncTester, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.SyncTester)) - } - } - return sortByIDFunc(result) -} - -func (p *presetNetwork) SyncTester(m stack.SyncTesterMatcher) stack.SyncTester { - getter := func(id stack.ComponentID) (stack.SyncTester, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.SyncTester), true - } - v, ok := findMatch(m, getter, p.SyncTesters) - p.require().True(ok, "must find sync tester %s", m) - return v -} - -func (p *presetNetwork) AddSyncTester(v stack.SyncTester) { - id := v.ID() - p.require().Equal(p.chainID, id.ChainID(), "sync tester %s must be on chain %s", id, p.chainID) - _, exists := p.registry.Get(id) - p.require().False(exists, "sync tester %s must not already exist", id) - p.registry.Register(id, v) -} diff --git a/op-devstack/shim/op_rbuilder.go b/op-devstack/shim/op_rbuilder.go deleted file mode 100644 index 4b5c8216e4fed..0000000000000 --- a/op-devstack/shim/op_rbuilder.go +++ /dev/null @@ -1,58 +0,0 @@ -package shim - -import ( - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/apis" - opclient "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type OPRBuilderNodeConfig struct { - ELNodeConfig - RollupCfg *rollup.Config - ID stack.ComponentID - FlashblocksClient *opclient.WSClient -} - -type OPRBuilderNode struct { - rpcELNode - id stack.ComponentID - engineClient *sources.EngineClient - flashblocksClient *opclient.WSClient -} - -var _ stack.OPRBuilderNode = (*OPRBuilderNode)(nil) - -func NewOPRBuilderNode(cfg OPRBuilderNodeConfig) *OPRBuilderNode { - require.Equal(cfg.T, cfg.ID.ChainID(), cfg.ELNodeConfig.ChainID, "chainID must be configured to match node chainID") - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - l2EngineClient, err := sources.NewEngineClient(cfg.ELNodeConfig.Client, cfg.T.Logger(), nil, sources.EngineClientDefaultConfig(cfg.RollupCfg)) - - require.NoError(cfg.T, err) - - return &OPRBuilderNode{ - rpcELNode: newRpcELNode(cfg.ELNodeConfig), - engineClient: l2EngineClient, - id: cfg.ID, - flashblocksClient: cfg.FlashblocksClient, - } -} - -func (r *OPRBuilderNode) ID() stack.ComponentID { - return r.id -} - -func (r *OPRBuilderNode) L2EthClient() apis.L2EthClient { - return r.engineClient.L2Client -} - -func (r *OPRBuilderNode) FlashblocksClient() *opclient.WSClient { - return r.flashblocksClient -} - -func (r *OPRBuilderNode) L2EngineClient() apis.EngineClient { - return r.engineClient.EngineAPIClient -} diff --git a/op-devstack/shim/rollup_boost.go b/op-devstack/shim/rollup_boost.go deleted file mode 100644 index 9ef839579420e..0000000000000 --- a/op-devstack/shim/rollup_boost.go +++ /dev/null @@ -1,64 +0,0 @@ -package shim - -import ( - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/apis" - opclient "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type RollupBoostNodeConfig struct { - ELNodeConfig - RollupCfg *rollup.Config - ID stack.ComponentID - FlashblocksClient *opclient.WSClient -} - -type RollupBoostNode struct { - rpcELNode - engineClient *sources.EngineClient - - id stack.ComponentID - - flashblocksClient *opclient.WSClient -} - -var _ stack.RollupBoostNode = (*RollupBoostNode)(nil) - -func NewRollupBoostNode(cfg RollupBoostNodeConfig) *RollupBoostNode { - require.Equal(cfg.T, cfg.ID.ChainID(), cfg.ELNodeConfig.ChainID, "chainID must be configured to match node chainID") - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - l2EngineClient, err := sources.NewEngineClient(cfg.ELNodeConfig.Client, cfg.T.Logger(), nil, sources.EngineClientDefaultConfig(cfg.RollupCfg)) - - require.NoError(cfg.T, err) - - return &RollupBoostNode{ - rpcELNode: newRpcELNode(cfg.ELNodeConfig), - engineClient: l2EngineClient, - id: cfg.ID, - flashblocksClient: cfg.FlashblocksClient, - } -} - -func (r *RollupBoostNode) ID() stack.ComponentID { - return r.id -} - -func (r *RollupBoostNode) L2EthClient() apis.L2EthClient { - return r.engineClient.L2Client -} - -func (r *RollupBoostNode) FlashblocksClient() *opclient.WSClient { - return r.flashblocksClient -} - -func (r *RollupBoostNode) L2EngineClient() apis.EngineClient { - return r.engineClient.EngineAPIClient -} - -func (r *RollupBoostNode) ELNode() stack.ELNode { - return &r.rpcELNode -} diff --git a/op-devstack/shim/superchain.go b/op-devstack/shim/superchain.go deleted file mode 100644 index 8e651c09a6b79..0000000000000 --- a/op-devstack/shim/superchain.go +++ /dev/null @@ -1,36 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -type SuperchainConfig struct { - CommonConfig - ID stack.ComponentID - Deployment stack.SuperchainDeployment -} - -type presetSuperchain struct { - commonImpl - id stack.ComponentID - deployment stack.SuperchainDeployment -} - -var _ stack.Superchain = (*presetSuperchain)(nil) - -func NewSuperchain(cfg SuperchainConfig) stack.Superchain { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &presetSuperchain{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - deployment: cfg.Deployment, - } -} - -func (p *presetSuperchain) ID() stack.ComponentID { - return p.id -} - -func (p presetSuperchain) Deployment() stack.SuperchainDeployment { - return p.deployment -} diff --git a/op-devstack/shim/supernode.go b/op-devstack/shim/supernode.go deleted file mode 100644 index 42e05b91cd7a8..0000000000000 --- a/op-devstack/shim/supernode.go +++ /dev/null @@ -1,42 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type SuperNodeConfig struct { - CommonConfig - ID stack.SupernodeID - Client client.RPC -} - -type rpcSuperNode struct { - commonImpl - id stack.SupernodeID - - client client.RPC - api apis.SupernodeQueryAPI -} - -var _ stack.Supernode = (*rpcSuperNode)(nil) - -func NewSuperNode(cfg SuperNodeConfig) stack.Supernode { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &rpcSuperNode{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - client: cfg.Client, - api: sources.NewSuperNodeClient(cfg.Client), - } -} - -func (r *rpcSuperNode) ID() stack.SupernodeID { - return r.id -} - -func (r *rpcSuperNode) QueryAPI() apis.SupernodeQueryAPI { - return r.api -} diff --git a/op-devstack/shim/supervisor.go b/op-devstack/shim/supervisor.go deleted file mode 100644 index 2a91406ec02aa..0000000000000 --- a/op-devstack/shim/supervisor.go +++ /dev/null @@ -1,46 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type SupervisorConfig struct { - CommonConfig - ID stack.ComponentID - Client client.RPC -} - -type rpcSupervisor struct { - commonImpl - id stack.ComponentID - - client client.RPC - api apis.SupervisorAPI -} - -var _ stack.Supervisor = (*rpcSupervisor)(nil) - -func NewSupervisor(cfg SupervisorConfig) stack.Supervisor { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &rpcSupervisor{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - client: cfg.Client, - api: sources.NewSupervisorClient(cfg.Client), - } -} - -func (r *rpcSupervisor) ID() stack.ComponentID { - return r.id -} - -func (r *rpcSupervisor) AdminAPI() apis.SupervisorAdminAPI { - return r.api -} - -func (r *rpcSupervisor) QueryAPI() apis.SupervisorQueryAPI { - return r.api -} diff --git a/op-devstack/shim/sync_tester.go b/op-devstack/shim/sync_tester.go deleted file mode 100644 index d86862374f06b..0000000000000 --- a/op-devstack/shim/sync_tester.go +++ /dev/null @@ -1,56 +0,0 @@ -package shim - -import ( - "fmt" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum-optimism/optimism/op-sync-tester/synctester" -) - -type SyncTesterConfig struct { - CommonConfig - ID stack.ComponentID - Addr string - Client client.RPC -} - -// presetSyncTester wraps around a syncTester-service, -type presetSyncTester struct { - commonImpl - id stack.ComponentID - // Endpoint for initializing RPC Client per session - addr string - // RPC Client initialized without session - syncTesterClient *sources.SyncTesterClient -} - -var _ stack.SyncTester = (*presetSyncTester)(nil) - -func NewSyncTester(cfg SyncTesterConfig) stack.SyncTester { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - return &presetSyncTester{ - id: cfg.ID, - commonImpl: newCommon(cfg.CommonConfig), - addr: cfg.Addr, - syncTesterClient: sources.NewSyncTesterClient(cfg.Client), - } -} - -func (p *presetSyncTester) ID() stack.ComponentID { - return p.id -} - -func (p *presetSyncTester) API() apis.SyncTester { - return p.syncTesterClient -} - -func (p *presetSyncTester) APIWithSession(sessionID string) apis.SyncTester { - require := p.T().Require() - require.NoError(synctester.IsValidSessionID(sessionID)) - rpcCl, err := client.NewRPC(p.T().Ctx(), p.Logger(), p.addr+fmt.Sprintf("/%s", sessionID), client.WithLazyDial()) - require.NoError(err, "sync tester failed to initialize rpc per session") - return sources.NewSyncTesterClient(rpcCl) -} diff --git a/op-devstack/shim/system.go b/op-devstack/shim/system.go deleted file mode 100644 index 3523e4e9485a1..0000000000000 --- a/op-devstack/shim/system.go +++ /dev/null @@ -1,327 +0,0 @@ -package shim - -import ( - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/locks" -) - -// SystemConfig sets up a System. -// It is intentionally very minimal, the system is expected to be extended after creation, using Option functions -type SystemConfig struct { - CommonConfig -} - -type presetSystem struct { - commonImpl - - // timeTravelClock is the clock used to control time. nil if time travel is not enabled - timeTravelClock stack.TimeTravelClock - - // Unified component registry for generic access - registry *stack.Registry - - supernodes locks.RWMap[stack.ComponentID, stack.Supernode] -} - -var _ stack.ExtensibleSystem = (*presetSystem)(nil) - -// NewSystem creates a new empty System -func NewSystem(t devtest.T) stack.ExtensibleSystem { - return &presetSystem{ - commonImpl: newCommon(NewCommonConfig(t)), - registry: stack.NewRegistry(), - } -} - -// --- ComponentRegistry interface implementation --- - -func (p *presetSystem) Component(id stack.ComponentID) (any, bool) { - return p.registry.Get(id) -} - -func (p *presetSystem) Components(kind stack.ComponentKind) []any { - ids := p.registry.IDsByKind(kind) - result := make([]any, 0, len(ids)) - for _, id := range ids { - if comp, ok := p.registry.Get(id); ok { - result = append(result, comp) - } - } - return result -} - -func (p *presetSystem) ComponentIDs(kind stack.ComponentKind) []stack.ComponentID { - return p.registry.IDsByKind(kind) -} - -func (p *presetSystem) Superchain(m stack.SuperchainMatcher) stack.Superchain { - getter := func(id stack.ComponentID) (stack.Superchain, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.Superchain), true - } - v, ok := findMatch(m, getter, p.Superchains) - p.require().True(ok, "must find superchain %s", m) - return v -} - -func (p *presetSystem) AddSuperchain(v stack.Superchain) { - id := v.ID() - _, exists := p.registry.Get(id) - p.require().False(exists, "superchain %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetSystem) Cluster(m stack.ClusterMatcher) stack.Cluster { - getter := func(id stack.ComponentID) (stack.Cluster, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.Cluster), true - } - v, ok := findMatch(m, getter, p.Clusters) - p.require().True(ok, "must find cluster %s", m) - return v -} - -func (p *presetSystem) AddCluster(v stack.Cluster) { - id := v.ID() - _, exists := p.registry.Get(id) - p.require().False(exists, "cluster %s must not already exist", id) - p.registry.Register(id, v) -} - -// networkExistsByChainID checks if any network (L1 or L2) exists with the given chain ID -func (p *presetSystem) networkExistsByChainID(chainID eth.ChainID) bool { - l1ID := stack.NewL1NetworkID(chainID) - if _, ok := p.registry.Get(l1ID); ok { - return true - } - l2ID := stack.NewL2NetworkID(chainID) - if _, ok := p.registry.Get(l2ID); ok { - return true - } - return false -} - -func (p *presetSystem) Network(id eth.ChainID) stack.Network { - l1ID := stack.NewL1NetworkID(id) - if l1Net, ok := p.registry.Get(l1ID); ok { - return l1Net.(stack.L1Network) - } - l2ID := stack.NewL2NetworkID(id) - if l2Net, ok := p.registry.Get(l2ID); ok { - return l2Net.(stack.L2Network) - } - p.t.FailNow() - return nil -} - -func (p *presetSystem) L1Network(m stack.L1NetworkMatcher) stack.L1Network { - getter := func(id stack.ComponentID) (stack.L1Network, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.L1Network), true - } - v, ok := findMatch(m, getter, p.L1Networks) - p.require().True(ok, "must find l1 network %s", m) - return v -} - -func (p *presetSystem) AddL1Network(v stack.L1Network) { - id := v.ID() - p.require().False(p.networkExistsByChainID(id.ChainID()), "chain with id %s must not already exist", id.ChainID()) - _, exists := p.registry.Get(id) - p.require().False(exists, "L1 chain %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetSystem) L2Network(m stack.L2NetworkMatcher) stack.L2Network { - getter := func(id stack.ComponentID) (stack.L2Network, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.L2Network), true - } - v, ok := findMatch(m, getter, p.L2Networks) - p.require().True(ok, "must find l2 network %s", m) - return v -} - -func (p *presetSystem) AddL2Network(v stack.L2Network) { - id := v.ID() - p.require().False(p.networkExistsByChainID(id.ChainID()), "chain with id %s must not already exist", id.ChainID()) - _, exists := p.registry.Get(id) - p.require().False(exists, "L2 chain %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetSystem) Supervisor(m stack.SupervisorMatcher) stack.Supervisor { - getter := func(id stack.ComponentID) (stack.Supervisor, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.Supervisor), true - } - v, ok := findMatch(m, getter, p.Supervisors) - p.require().True(ok, "must find supervisor %s", m) - return v -} - -func (p *presetSystem) AddSupervisor(v stack.Supervisor) { - id := v.ID() - _, exists := p.registry.Get(id) - p.require().False(exists, "supervisor %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetSystem) Supernode(m stack.SupernodeMatcher) stack.Supernode { - v, ok := findMatch(m, p.supernodes.Get, p.Supernodes) - p.require().True(ok, "must find supernode %s", m) - return v -} - -func (p *presetSystem) AddSupernode(v stack.Supernode) { - p.require().True(p.supernodes.SetIfMissing(v.ID(), v), "supernode %s must not already exist", v.ID()) -} - -func (p *presetSystem) TestSequencer(m stack.TestSequencerMatcher) stack.TestSequencer { - getter := func(id stack.ComponentID) (stack.TestSequencer, bool) { - v, ok := p.registry.Get(id) - if !ok { - return nil, false - } - return v.(stack.TestSequencer), true - } - v, ok := findMatch(m, getter, p.TestSequencers) - p.require().True(ok, "must find sequencer %s", m) - return v -} - -func (p *presetSystem) AddTestSequencer(v stack.TestSequencer) { - id := v.ID() - _, exists := p.registry.Get(id) - p.require().False(exists, "sequencer %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetSystem) AddSyncTester(v stack.SyncTester) { - id := v.ID() - _, exists := p.registry.Get(id) - p.require().False(exists, "sync tester %s must not already exist", id) - p.registry.Register(id, v) -} - -func (p *presetSystem) SuperchainIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindSuperchain)) -} - -func (p *presetSystem) Superchains() []stack.Superchain { - ids := p.registry.IDsByKind(stack.KindSuperchain) - result := make([]stack.Superchain, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.Superchain)) - } - } - return sortByIDFunc(result) -} - -func (p *presetSystem) ClusterIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindCluster)) -} - -func (p *presetSystem) Clusters() []stack.Cluster { - ids := p.registry.IDsByKind(stack.KindCluster) - result := make([]stack.Cluster, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.Cluster)) - } - } - return sortByIDFunc(result) -} - -func (p *presetSystem) L1NetworkIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindL1Network)) -} - -func (p *presetSystem) L1Networks() []stack.L1Network { - ids := p.registry.IDsByKind(stack.KindL1Network) - result := make([]stack.L1Network, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.L1Network)) - } - } - return sortByIDFunc(result) -} - -func (p *presetSystem) L2NetworkIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindL2Network)) -} - -func (p *presetSystem) L2Networks() []stack.L2Network { - ids := p.registry.IDsByKind(stack.KindL2Network) - result := make([]stack.L2Network, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.L2Network)) - } - } - return sortByIDFunc(result) -} - -func (p *presetSystem) SupervisorIDs() []stack.ComponentID { - return sortByID(p.registry.IDsByKind(stack.KindSupervisor)) -} - -func (p *presetSystem) Supervisors() []stack.Supervisor { - ids := p.registry.IDsByKind(stack.KindSupervisor) - result := make([]stack.Supervisor, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.Supervisor)) - } - } - return sortByIDFunc(result) -} - -func (p *presetSystem) Supernodes() []stack.Supernode { - return stack.SortSupernodes(p.supernodes.Values()) -} - -func (p *presetSystem) TestSequencers() []stack.TestSequencer { - ids := p.registry.IDsByKind(stack.KindTestSequencer) - result := make([]stack.TestSequencer, 0, len(ids)) - for _, id := range ids { - if v, ok := p.registry.Get(id); ok { - result = append(result, v.(stack.TestSequencer)) - } - } - return sortByIDFunc(result) -} - -func (p *presetSystem) SetTimeTravelClock(cl stack.TimeTravelClock) { - p.timeTravelClock = cl -} - -func (p *presetSystem) TimeTravelEnabled() bool { - return p.timeTravelClock != nil -} - -func (p *presetSystem) AdvanceTime(amount time.Duration) { - p.require().True(p.TimeTravelEnabled(), "Attempting to advance time when time travel is not enabled") - p.timeTravelClock.AdvanceTime(amount) -} diff --git a/op-devstack/shim/test_sequencer.go b/op-devstack/shim/test_sequencer.go deleted file mode 100644 index 5edb519c6f864..0000000000000 --- a/op-devstack/shim/test_sequencer.go +++ /dev/null @@ -1,59 +0,0 @@ -package shim - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type TestSequencerConfig struct { - CommonConfig - ID stack.ComponentID - Client client.RPC - ControlClients map[eth.ChainID]client.RPC -} - -type rpcTestSequencer struct { - commonImpl - id stack.ComponentID - - client client.RPC - api apis.TestSequencerAPI - controls map[eth.ChainID]apis.TestSequencerControlAPI -} - -var _ stack.TestSequencer = (*rpcTestSequencer)(nil) - -func NewTestSequencer(cfg TestSequencerConfig) stack.TestSequencer { - cfg.T = cfg.T.WithCtx(stack.ContextWithID(cfg.T.Ctx(), cfg.ID)) - s := &rpcTestSequencer{ - commonImpl: newCommon(cfg.CommonConfig), - id: cfg.ID, - client: cfg.Client, - api: sources.NewBuilderClient(cfg.Client), - } - - s.controls = make(map[eth.ChainID]apis.TestSequencerControlAPI) - for k, v := range cfg.ControlClients { - s.controls[k] = sources.NewControlClient(v) - } - return s -} - -func (r *rpcTestSequencer) ID() stack.ComponentID { - return r.id -} - -func (r *rpcTestSequencer) AdminAPI() apis.TestSequencerAdminAPI { - return r.api -} - -func (r *rpcTestSequencer) BuildAPI() apis.TestSequencerBuildAPI { - return r.api -} - -func (r *rpcTestSequencer) ControlAPI(chainID eth.ChainID) apis.TestSequencerControlAPI { - return r.controls[chainID] -} diff --git a/op-devstack/stack/cluster.go b/op-devstack/stack/cluster.go deleted file mode 100644 index d1af1e22248cc..0000000000000 --- a/op-devstack/stack/cluster.go +++ /dev/null @@ -1,14 +0,0 @@ -package stack - -import ( - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" -) - -// Cluster represents a set of chains that interop with each other. -// This may include L1 chains (although potentially not two-way interop due to consensus-layer limitations). -type Cluster interface { - Common - ID() ComponentID - - DependencySet() depset.DependencySet -} diff --git a/op-devstack/stack/common.go b/op-devstack/stack/common.go index 88d2a416ea877..a4408fbefeebe 100644 --- a/op-devstack/stack/common.go +++ b/op-devstack/stack/common.go @@ -9,6 +9,7 @@ import ( type Common interface { T() devtest.T Logger() log.Logger + Name() string // Label retrieves a label by key. // If the label does not exist, it returns an empty string. diff --git a/op-devstack/stack/component_id.go b/op-devstack/stack/component_id.go deleted file mode 100644 index ce87817a391ba..0000000000000 --- a/op-devstack/stack/component_id.go +++ /dev/null @@ -1,382 +0,0 @@ -package stack - -import ( - "bytes" - "errors" - "fmt" - "log/slog" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/holiman/uint256" -) - -// ComponentKind identifies the type of component. -// This is used in serialization to make each ID unique and type-safe. -type ComponentKind string - -var _ slog.LogValuer = (*ComponentKind)(nil) - -// ChainIDProvider presents a type that provides a relevant ChainID. -type ChainIDProvider interface { - ChainID() eth.ChainID -} - -// KindProvider presents a type that provides a relevant ComponentKind. E.g. KindL2Batcher. -type KindProvider interface { - Kind() ComponentKind -} - -// Keyed presents a type that provides a relevant string key. E.g. a named superchain. -type Keyed interface { - Key() string -} - -const maxIDLength = 100 - -var errInvalidID = errors.New("invalid ID") - -func (k ComponentKind) LogValue() slog.Value { - return slog.StringValue(string(k)) -} - -func (k ComponentKind) String() string { - return string(k) -} - -func (k ComponentKind) MarshalText() ([]byte, error) { - return []byte(k), nil -} - -func (k *ComponentKind) UnmarshalText(data []byte) error { - *k = ComponentKind(data) - return nil -} - -// All component kinds. These values are used in serialization and must remain stable. -const ( - KindL1ELNode ComponentKind = "L1ELNode" - KindL1CLNode ComponentKind = "L1CLNode" - KindL1Network ComponentKind = "L1Network" - KindL2ELNode ComponentKind = "L2ELNode" - KindL2CLNode ComponentKind = "L2CLNode" - KindL2Network ComponentKind = "L2Network" - KindL2Batcher ComponentKind = "L2Batcher" - KindL2Proposer ComponentKind = "L2Proposer" - KindL2Challenger ComponentKind = "L2Challenger" - KindRollupBoostNode ComponentKind = "RollupBoostNode" - KindOPRBuilderNode ComponentKind = "OPRBuilderNode" - KindFaucet ComponentKind = "Faucet" - KindSyncTester ComponentKind = "SyncTester" - KindSupervisor ComponentKind = "Supervisor" - KindConductor ComponentKind = "Conductor" - KindCluster ComponentKind = "Cluster" - KindSuperchain ComponentKind = "Superchain" - KindSupernode ComponentKind = "Supernode" - KindTestSequencer ComponentKind = "TestSequencer" - KindFlashblocksClient ComponentKind = "FlashblocksWSClient" -) - -var hydrationComponentKindOrder = []ComponentKind{ - KindSuperchain, - KindCluster, - KindL1Network, - KindL2Network, - KindL1ELNode, - KindL1CLNode, - KindL2ELNode, - KindOPRBuilderNode, - KindRollupBoostNode, - KindL2CLNode, - KindSupervisor, - KindTestSequencer, - KindL2Batcher, - KindL2Challenger, - KindL2Proposer, -} - -// HydrationComponentKindOrder returns the deterministic kind ordering used by orchestrator hydration. -func HydrationComponentKindOrder() []ComponentKind { - out := make([]ComponentKind, len(hydrationComponentKindOrder)) - copy(out, hydrationComponentKindOrder) - return out -} - -// IDShape defines which fields an ID uses. -type IDShape uint8 - -const ( - // IDShapeKeyAndChain indicates the ID has both a key and chainID (e.g., L2Batcher-mynode-420) - IDShapeKeyAndChain IDShape = iota - // IDShapeChainOnly indicates the ID has only a chainID (e.g., L1Network-1) - IDShapeChainOnly - // IDShapeKeyOnly indicates the ID has only a key (e.g., Supervisor-mysupervisor) - IDShapeKeyOnly -) - -// ComponentID is the unified identifier for all components. -// It contains all possible fields; the shape determines which are used. -type ComponentID struct { - kind ComponentKind - shape IDShape - key string - chainID eth.ChainID -} - -// NewComponentID creates a new ComponentID with key and chainID. -func NewComponentID(kind ComponentKind, key string, chainID eth.ChainID) ComponentID { - return ComponentID{ - kind: kind, - shape: IDShapeKeyAndChain, - key: key, - chainID: chainID, - } -} - -// NewComponentIDChainOnly creates a new ComponentID with only a chainID. -func NewComponentIDChainOnly(kind ComponentKind, chainID eth.ChainID) ComponentID { - return ComponentID{ - kind: kind, - shape: IDShapeChainOnly, - chainID: chainID, - } -} - -// NewComponentIDKeyOnly creates a new ComponentID with only a key. -func NewComponentIDKeyOnly(kind ComponentKind, key string) ComponentID { - return ComponentID{ - kind: kind, - shape: IDShapeKeyOnly, - key: key, - } -} - -func (id ComponentID) Kind() ComponentKind { - return id.kind -} - -func (id ComponentID) Shape() IDShape { - return id.shape -} - -// HasChainID returns true if this ID has a chain ID component. -// This is true for IDShapeKeyAndChain and IDShapeChainOnly shapes. -func (id ComponentID) HasChainID() bool { - return id.shape == IDShapeKeyAndChain || id.shape == IDShapeChainOnly -} - -func (id ComponentID) Key() string { - return id.key -} - -func (id ComponentID) ChainID() eth.ChainID { - return id.chainID -} - -func (id ComponentID) String() string { - switch id.shape { - case IDShapeKeyAndChain: - return fmt.Sprintf("%s-%s-%s", id.kind, id.key, id.chainID) - case IDShapeChainOnly: - return fmt.Sprintf("%s-%s", id.kind, id.chainID) - case IDShapeKeyOnly: - return fmt.Sprintf("%s-%s", id.kind, id.key) - default: - return fmt.Sprintf("%s-", id.kind) - } -} - -func (id ComponentID) LogValue() slog.Value { - return slog.StringValue(id.String()) -} - -func (id ComponentID) MarshalText() ([]byte, error) { - if id.shape == IDShapeKeyAndChain || id.shape == IDShapeKeyOnly { - if len(id.key) > maxIDLength { - return nil, errInvalidID - } - } - return []byte(id.String()), nil -} - -func (id *ComponentID) UnmarshalText(data []byte) error { - return id.unmarshalTextWithKind(id.kind, data) -} - -// unmarshalTextWithKind unmarshals the ID, validating that the kind matches. -func (id *ComponentID) unmarshalTextWithKind(expectedKind ComponentKind, data []byte) error { - kindData, rest, ok := bytes.Cut(data, []byte("-")) - if !ok { - return fmt.Errorf("expected kind-prefix, but id has none: %q", data) - } - actualKind := ComponentKind(kindData) - if actualKind != expectedKind { - return fmt.Errorf("id %q has unexpected kind %q, expected %q", string(data), actualKind, expectedKind) - } - - // Determine shape based on expected kind - shape := kindToShape(expectedKind) - id.kind = expectedKind - id.shape = shape - - switch shape { - case IDShapeKeyAndChain: - keyData, chainData, ok := bytes.Cut(rest, []byte("-")) - if !ok { - return fmt.Errorf("expected chain separator, but found none: %q", string(data)) - } - if len(keyData) > maxIDLength { - return errInvalidID - } - var chainID eth.ChainID - if err := chainID.UnmarshalText(chainData); err != nil { - return fmt.Errorf("failed to unmarshal chain part: %w", err) - } - id.key = string(keyData) - id.chainID = chainID - case IDShapeChainOnly: - var chainID eth.ChainID - if err := chainID.UnmarshalText(rest); err != nil { - return fmt.Errorf("failed to unmarshal chain part: %w", err) - } - id.chainID = chainID - id.key = "" - case IDShapeKeyOnly: - if len(rest) > maxIDLength { - return errInvalidID - } - id.key = string(rest) - id.chainID = eth.ChainID(*uint256.NewInt(0)) - } - return nil -} - -// kindToShape returns the IDShape for a given ComponentKind. -func kindToShape(kind ComponentKind) IDShape { - switch kind { - case KindL1Network, KindL2Network: - return IDShapeChainOnly - case KindSupervisor, KindConductor, KindCluster, KindSuperchain, KindTestSequencer, KindSupernode: - return IDShapeKeyOnly - default: - return IDShapeKeyAndChain - } -} - -// Less compares two ComponentIDs for sorting. -func (id ComponentID) Less(other ComponentID) bool { - if id.kind != other.kind { - return id.kind < other.kind - } - if id.key != other.key { - return id.key < other.key - } - return id.chainID.Cmp(other.chainID) < 0 -} - -// idMatcher wraps ComponentID to implement Matcher[E] for any component type. -type idMatcher[E Identifiable] struct { - id ComponentID -} - -func (m idMatcher[E]) Match(elems []E) []E { - for i, elem := range elems { - if elem.ID() == m.id { - return elems[i : i+1] - } - } - return nil -} - -func (m idMatcher[E]) String() string { - return m.id.String() -} - -// ID returns the ComponentID this matcher wraps. -// This is used by shim.findMatch for direct registry lookup. -func (m idMatcher[E]) ID() ComponentID { - return m.id -} - -// ByID creates a matcher for a specific ComponentID. -// This allows using a ComponentID as a matcher for any component type. -func ByID[E Identifiable](id ComponentID) Matcher[E] { - return idMatcher[E]{id: id} -} - -// Convenience constructors for each component kind. - -func NewL1ELNodeID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindL1ELNode, key, chainID) -} - -func NewL1CLNodeID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindL1CLNode, key, chainID) -} - -func NewL1NetworkID(chainID eth.ChainID) ComponentID { - return NewComponentIDChainOnly(KindL1Network, chainID) -} - -func NewL2ELNodeID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindL2ELNode, key, chainID) -} - -func NewL2CLNodeID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindL2CLNode, key, chainID) -} - -func NewL2NetworkID(chainID eth.ChainID) ComponentID { - return NewComponentIDChainOnly(KindL2Network, chainID) -} - -func NewL2BatcherID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindL2Batcher, key, chainID) -} - -func NewL2ProposerID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindL2Proposer, key, chainID) -} - -func NewL2ChallengerID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindL2Challenger, key, chainID) -} - -func NewRollupBoostNodeID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindRollupBoostNode, key, chainID) -} - -func NewOPRBuilderNodeID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindOPRBuilderNode, key, chainID) -} - -func NewFaucetID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindFaucet, key, chainID) -} - -func NewSyncTesterID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindSyncTester, key, chainID) -} - -func NewSupervisorID(key string) ComponentID { - return NewComponentIDKeyOnly(KindSupervisor, key) -} - -func NewConductorID(key string) ComponentID { - return NewComponentIDKeyOnly(KindConductor, key) -} - -func NewClusterID(key string) ComponentID { - return NewComponentIDKeyOnly(KindCluster, key) -} - -func NewSuperchainID(key string) ComponentID { - return NewComponentIDKeyOnly(KindSuperchain, key) -} - -func NewTestSequencerID(key string) ComponentID { - return NewComponentIDKeyOnly(KindTestSequencer, key) -} - -func NewFlashblocksWSClientID(key string, chainID eth.ChainID) ComponentID { - return NewComponentID(KindFlashblocksClient, key, chainID) -} diff --git a/op-devstack/stack/component_id_test.go b/op-devstack/stack/component_id_test.go deleted file mode 100644 index 97209c9e978c3..0000000000000 --- a/op-devstack/stack/component_id_test.go +++ /dev/null @@ -1,289 +0,0 @@ -package stack - -import ( - "slices" - "testing" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/stretchr/testify/require" -) - -func TestComponentID_KeyAndChain(t *testing.T) { - chainID := eth.ChainIDFromUInt64(420) - id := NewComponentID(KindL2Batcher, "mynode", chainID) - - require.Equal(t, KindL2Batcher, id.Kind()) - require.Equal(t, "mynode", id.Key()) - require.Equal(t, chainID, id.ChainID()) - require.Equal(t, IDShapeKeyAndChain, id.Shape()) - require.Equal(t, "L2Batcher-mynode-420", id.String()) -} - -func TestComponentID_ChainOnly(t *testing.T) { - chainID := eth.ChainIDFromUInt64(1) - id := NewComponentIDChainOnly(KindL1Network, chainID) - - require.Equal(t, KindL1Network, id.Kind()) - require.Equal(t, "", id.Key()) - require.Equal(t, chainID, id.ChainID()) - require.Equal(t, IDShapeChainOnly, id.Shape()) - require.Equal(t, "L1Network-1", id.String()) -} - -func TestComponentID_KeyOnly(t *testing.T) { - id := NewComponentIDKeyOnly(KindSupervisor, "mysupervisor") - - require.Equal(t, KindSupervisor, id.Kind()) - require.Equal(t, "mysupervisor", id.Key()) - require.Equal(t, eth.ChainID{}, id.ChainID()) - require.Equal(t, IDShapeKeyOnly, id.Shape()) - require.Equal(t, "Supervisor-mysupervisor", id.String()) -} - -func TestComponentID_MarshalRoundTrip_KeyAndChain(t *testing.T) { - chainID := eth.ChainIDFromUInt64(420) - original := NewComponentID(KindL2Batcher, "mynode", chainID) - - data, err := original.MarshalText() - require.NoError(t, err) - require.Equal(t, "L2Batcher-mynode-420", string(data)) - - var parsed ComponentID - parsed.kind = KindL2Batcher // Must set kind before unmarshal - err = parsed.UnmarshalText(data) - require.NoError(t, err) - require.Equal(t, original, parsed) -} - -func TestComponentID_MarshalRoundTrip_ChainOnly(t *testing.T) { - chainID := eth.ChainIDFromUInt64(1) - original := NewComponentIDChainOnly(KindL1Network, chainID) - - data, err := original.MarshalText() - require.NoError(t, err) - require.Equal(t, "L1Network-1", string(data)) - - var parsed ComponentID - parsed.kind = KindL1Network - err = parsed.UnmarshalText(data) - require.NoError(t, err) - require.Equal(t, original, parsed) -} - -func TestComponentID_MarshalRoundTrip_KeyOnly(t *testing.T) { - original := NewComponentIDKeyOnly(KindSupervisor, "mysupervisor") - - data, err := original.MarshalText() - require.NoError(t, err) - require.Equal(t, "Supervisor-mysupervisor", string(data)) - - var parsed ComponentID - parsed.kind = KindSupervisor - err = parsed.UnmarshalText(data) - require.NoError(t, err) - require.Equal(t, original, parsed) -} - -func TestComponentID_UnmarshalKindMismatch(t *testing.T) { - var id ComponentID - id.kind = KindL2Batcher - err := id.UnmarshalText([]byte("L2ELNode-mynode-420")) - require.Error(t, err) - require.Contains(t, err.Error(), "unexpected kind") -} - -func TestID_TypeSafety(t *testing.T) { - chainID := eth.ChainIDFromUInt64(420) - - // Create two different ID types with same key and chainID - batcherID := NewL2BatcherID("mynode", chainID) - elNodeID := NewL2ELNodeID("mynode", chainID) - - // They should have different kinds - require.Equal(t, KindL2Batcher, batcherID.Kind()) - require.Equal(t, KindL2ELNode, elNodeID.Kind()) - - // Their string representations should be different - require.Equal(t, "L2Batcher-mynode-420", batcherID.String()) - require.Equal(t, "L2ELNode-mynode-420", elNodeID.String()) - - // The IDs should be different due to kind - require.NotEqual(t, batcherID, elNodeID) -} - -func TestID_MarshalRoundTrip(t *testing.T) { - chainID := eth.ChainIDFromUInt64(420) - original := NewL2BatcherID("mynode", chainID) - - data, err := original.MarshalText() - require.NoError(t, err) - require.Equal(t, "L2Batcher-mynode-420", string(data)) - - // Unmarshal into a ComponentID with kind preset - var parsed ComponentID - parsed.kind = KindL2Batcher // Must set kind before unmarshal - - err = parsed.UnmarshalText(data) - require.NoError(t, err) - require.Equal(t, original, parsed) -} - -func TestID_UnmarshalKindMismatch(t *testing.T) { - // Try to unmarshal an L2ELNode ID into a ComponentID expecting L2Batcher - var batcherID ComponentID - batcherID.kind = KindL2Batcher - err := batcherID.UnmarshalText([]byte("L2ELNode-mynode-420")) - require.Error(t, err) - require.Contains(t, err.Error(), "unexpected kind") -} - -func TestID_ChainOnlyTypes(t *testing.T) { - chainID := eth.ChainIDFromUInt64(1) - networkID := NewL1NetworkID(chainID) - - require.Equal(t, KindL1Network, networkID.Kind()) - require.Equal(t, chainID, networkID.ChainID()) - require.Equal(t, "L1Network-1", networkID.String()) - - data, err := networkID.MarshalText() - require.NoError(t, err) - - var parsed ComponentID - parsed.kind = KindL1Network // Must set kind before unmarshal - err = parsed.UnmarshalText(data) - require.NoError(t, err) - require.Equal(t, networkID, parsed) -} - -func TestID_KeyOnlyTypes(t *testing.T) { - supervisorID := NewSupervisorID("mysupervisor") - - require.Equal(t, KindSupervisor, supervisorID.Kind()) - require.Equal(t, "mysupervisor", supervisorID.Key()) - require.Equal(t, "Supervisor-mysupervisor", supervisorID.String()) - - data, err := supervisorID.MarshalText() - require.NoError(t, err) - - var parsed ComponentID - parsed.kind = KindSupervisor // Must set kind before unmarshal - err = parsed.UnmarshalText(data) - require.NoError(t, err) - require.Equal(t, supervisorID, parsed) -} - -func TestID_Sorting(t *testing.T) { - chainID1 := eth.ChainIDFromUInt64(100) - chainID2 := eth.ChainIDFromUInt64(200) - - ids := []ComponentID{ - NewL2BatcherID("charlie", chainID1), - NewL2BatcherID("alice", chainID1), - NewL2BatcherID("alice", chainID2), - NewL2BatcherID("bob", chainID1), - } - - // Sort using the ID's comparison - sorted := slices.Clone(ids) - slices.SortFunc(sorted, func(a, b ComponentID) int { - if a.Less(b) { - return -1 - } - if b.Less(a) { - return 1 - } - return 0 - }) - - // Should be sorted by key first, then by chainID - require.Equal(t, "alice", sorted[0].Key()) - require.Equal(t, chainID1, sorted[0].ChainID()) - require.Equal(t, "alice", sorted[1].Key()) - require.Equal(t, chainID2, sorted[1].ChainID()) - require.Equal(t, "bob", sorted[2].Key()) - require.Equal(t, "charlie", sorted[3].Key()) -} - -func TestID_MapKey(t *testing.T) { - chainID := eth.ChainIDFromUInt64(420) - - // IDs should work as map keys - m := make(map[ComponentID]string) - - id1 := NewL2BatcherID("node1", chainID) - id2 := NewL2BatcherID("node2", chainID) - - m[id1] = "value1" - m[id2] = "value2" - - require.Equal(t, "value1", m[id1]) - require.Equal(t, "value2", m[id2]) - - // Same key+chainID should retrieve same value - id1Copy := NewL2BatcherID("node1", chainID) - require.Equal(t, "value1", m[id1Copy]) -} - -func TestAllIDTypes(t *testing.T) { - chainID := eth.ChainIDFromUInt64(420) - - // Test all ID constructors and their kinds - tests := []struct { - name string - id interface{ Kind() ComponentKind } - expected ComponentKind - }{ - {"L1ELNode", NewL1ELNodeID("node", chainID), KindL1ELNode}, - {"L1CLNode", NewL1CLNodeID("node", chainID), KindL1CLNode}, - {"L1Network", NewL1NetworkID(chainID), KindL1Network}, - {"L2ELNode", NewL2ELNodeID("node", chainID), KindL2ELNode}, - {"L2CLNode", NewL2CLNodeID("node", chainID), KindL2CLNode}, - {"L2Network", NewL2NetworkID(chainID), KindL2Network}, - {"L2Batcher", NewL2BatcherID("node", chainID), KindL2Batcher}, - {"L2Proposer", NewL2ProposerID("node", chainID), KindL2Proposer}, - {"L2Challenger", NewL2ChallengerID("node", chainID), KindL2Challenger}, - {"RollupBoostNode", NewRollupBoostNodeID("node", chainID), KindRollupBoostNode}, - {"OPRBuilderNode", NewOPRBuilderNodeID("node", chainID), KindOPRBuilderNode}, - {"Faucet", NewFaucetID("node", chainID), KindFaucet}, - {"SyncTester", NewSyncTesterID("node", chainID), KindSyncTester}, - {"Supervisor", NewSupervisorID("node"), KindSupervisor}, - {"Conductor", NewConductorID("node"), KindConductor}, - {"Cluster", NewClusterID("node"), KindCluster}, - {"Superchain", NewSuperchainID("node"), KindSuperchain}, - {"TestSequencer", NewTestSequencerID("node"), KindTestSequencer}, - {"FlashblocksClient", NewFlashblocksWSClientID("node", chainID), KindFlashblocksClient}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - require.Equal(t, tt.expected, tt.id.Kind()) - }) - } -} - -// TestSerializationCompatibility verifies that the new ID system produces -// the same serialization format as the old system. -func TestSerializationCompatibility(t *testing.T) { - chainID := eth.ChainIDFromUInt64(420) - - // These formats must match the old ID system exactly - tests := []struct { - name string - id interface{ MarshalText() ([]byte, error) } - expected string - }{ - {"L2Batcher", NewL2BatcherID("mynode", chainID), "L2Batcher-mynode-420"}, - {"L2ELNode", NewL2ELNodeID("mynode", chainID), "L2ELNode-mynode-420"}, - {"L1Network", NewL1NetworkID(eth.ChainIDFromUInt64(1)), "L1Network-1"}, - {"Supervisor", NewSupervisorID("mysupervisor"), "Supervisor-mysupervisor"}, - {"RollupBoostNode", NewRollupBoostNodeID("boost", chainID), "RollupBoostNode-boost-420"}, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - data, err := tt.id.MarshalText() - require.NoError(t, err) - require.Equal(t, tt.expected, string(data)) - }) - } -} diff --git a/op-devstack/stack/component_registry.go b/op-devstack/stack/component_registry.go deleted file mode 100644 index b1bf913383f21..0000000000000 --- a/op-devstack/stack/component_registry.go +++ /dev/null @@ -1,243 +0,0 @@ -package stack - -// ComponentRegistry provides generic component access for systems and networks. -// This interface enables unified component lookup regardless of component type, -// reducing the need for type-specific getter methods on container interfaces. -// -// Components are stored by ComponentID and can be queried by: -// - Exact ID match (Component) -// - Kind (Components, ComponentIDs) -// -// Implementations should use the Registry type internally for storage. -type ComponentRegistry interface { - // Component returns a component by its ID. - // Returns (component, true) if found, (nil, false) otherwise. - Component(id ComponentID) (any, bool) - - // Components returns all components of a given kind. - // Returns an empty slice if no components of that kind exist. - Components(kind ComponentKind) []any - - // ComponentIDs returns all component IDs of a given kind. - // Returns an empty slice if no components of that kind exist. - ComponentIDs(kind ComponentKind) []ComponentID -} - -// --- Free functions for typed component access --- -// These functions provide type-safe access to components without requiring -// type-specific methods on every container interface. - -// GetComponent returns a typed component from a registry by ID. -// Returns (component, true) if found and type matches, (nil/zero, false) otherwise. -func GetComponent[T any](r ComponentRegistry, id ComponentID) (T, bool) { - comp, ok := r.Component(id) - if !ok { - var zero T - return zero, false - } - typed, ok := comp.(T) - return typed, ok -} - -// GetComponentsByKind returns all components of a given kind, typed. -// Only components that match the expected type are returned. -func GetComponentsByKind[T any](r ComponentRegistry, kind ComponentKind) []T { - comps := r.Components(kind) - result := make([]T, 0, len(comps)) - for _, comp := range comps { - if typed, ok := comp.(T); ok { - result = append(result, typed) - } - } - return result -} - -// --- Typed getter free functions for L2Network components --- - -// GetL2BatcherByID returns an L2Batcher from a network by ID. -func GetL2BatcherByID(n L2Network, id ComponentID) (L2Batcher, bool) { - return GetComponent[L2Batcher](n, id) -} - -// GetL2ProposerByID returns an L2Proposer from a network by ID. -func GetL2ProposerByID(n L2Network, id ComponentID) (L2Proposer, bool) { - return GetComponent[L2Proposer](n, id) -} - -// GetL2ChallengerByID returns an L2Challenger from a network by ID. -func GetL2ChallengerByID(n L2Network, id ComponentID) (L2Challenger, bool) { - return GetComponent[L2Challenger](n, id) -} - -// GetL2CLNodeByID returns an L2CLNode from a network by ID. -func GetL2CLNodeByID(n L2Network, id ComponentID) (L2CLNode, bool) { - return GetComponent[L2CLNode](n, id) -} - -// GetL2ELNodeByID returns an L2ELNode from a network by ID. -func GetL2ELNodeByID(n L2Network, id ComponentID) (L2ELNode, bool) { - return GetComponent[L2ELNode](n, id) -} - -// GetConductorByID returns a Conductor from a network by ID. -func GetConductorByID(n L2Network, id ComponentID) (Conductor, bool) { - return GetComponent[Conductor](n, id) -} - -// GetRollupBoostNodeByID returns a RollupBoostNode from a network by ID. -func GetRollupBoostNodeByID(n L2Network, id ComponentID) (RollupBoostNode, bool) { - return GetComponent[RollupBoostNode](n, id) -} - -// GetOPRBuilderNodeByID returns an OPRBuilderNode from a network by ID. -func GetOPRBuilderNodeByID(n L2Network, id ComponentID) (OPRBuilderNode, bool) { - return GetComponent[OPRBuilderNode](n, id) -} - -// --- Typed getter free functions for L1Network components --- - -// GetL1ELNodeByID returns an L1ELNode from a network by ID. -func GetL1ELNodeByID(n L1Network, id ComponentID) (L1ELNode, bool) { - return GetComponent[L1ELNode](n, id) -} - -// GetL1CLNodeByID returns an L1CLNode from a network by ID. -func GetL1CLNodeByID(n L1Network, id ComponentID) (L1CLNode, bool) { - return GetComponent[L1CLNode](n, id) -} - -// --- Typed getter free functions for Network components (shared by L1 and L2) --- - -// GetFaucetByID returns a Faucet from a network by ID. -func GetFaucetByID(n Network, id ComponentID) (Faucet, bool) { - return GetComponent[Faucet](n, id) -} - -// GetSyncTesterByID returns a SyncTester from a network by ID. -func GetSyncTesterByID(n Network, id ComponentID) (SyncTester, bool) { - return GetComponent[SyncTester](n, id) -} - -// --- Typed getter free functions for System components --- - -// GetSuperchainByID returns a Superchain from a system by ID. -func GetSuperchainByID(s System, id ComponentID) (Superchain, bool) { - return GetComponent[Superchain](s, id) -} - -// GetClusterByID returns a Cluster from a system by ID. -func GetClusterByID(s System, id ComponentID) (Cluster, bool) { - return GetComponent[Cluster](s, id) -} - -// GetL1NetworkByID returns an L1Network from a system by ID. -func GetL1NetworkByID(s System, id ComponentID) (L1Network, bool) { - return GetComponent[L1Network](s, id) -} - -// GetL2NetworkByID returns an L2Network from a system by ID. -func GetL2NetworkByID(s System, id ComponentID) (L2Network, bool) { - return GetComponent[L2Network](s, id) -} - -// GetSupervisorByID returns a Supervisor from a system by ID. -func GetSupervisorByID(s System, id ComponentID) (Supervisor, bool) { - return GetComponent[Supervisor](s, id) -} - -// GetTestSequencerByID returns a TestSequencer from a system by ID. -func GetTestSequencerByID(s System, id ComponentID) (TestSequencer, bool) { - return GetComponent[TestSequencer](s, id) -} - -// --- List getter free functions --- - -// GetL2Batchers returns all L2Batchers from a network. -func GetL2Batchers(n L2Network) []L2Batcher { - return GetComponentsByKind[L2Batcher](n, KindL2Batcher) -} - -// GetL2Proposers returns all L2Proposers from a network. -func GetL2Proposers(n L2Network) []L2Proposer { - return GetComponentsByKind[L2Proposer](n, KindL2Proposer) -} - -// GetL2Challengers returns all L2Challengers from a network. -func GetL2Challengers(n L2Network) []L2Challenger { - return GetComponentsByKind[L2Challenger](n, KindL2Challenger) -} - -// GetL2CLNodes returns all L2CLNodes from a network. -func GetL2CLNodes(n L2Network) []L2CLNode { - return GetComponentsByKind[L2CLNode](n, KindL2CLNode) -} - -// GetL2ELNodes returns all L2ELNodes from a network. -func GetL2ELNodes(n L2Network) []L2ELNode { - return GetComponentsByKind[L2ELNode](n, KindL2ELNode) -} - -// GetConductors returns all Conductors from a network. -func GetConductors(n L2Network) []Conductor { - return GetComponentsByKind[Conductor](n, KindConductor) -} - -// GetRollupBoostNodes returns all RollupBoostNodes from a network. -func GetRollupBoostNodes(n L2Network) []RollupBoostNode { - return GetComponentsByKind[RollupBoostNode](n, KindRollupBoostNode) -} - -// GetOPRBuilderNodes returns all OPRBuilderNodes from a network. -func GetOPRBuilderNodes(n L2Network) []OPRBuilderNode { - return GetComponentsByKind[OPRBuilderNode](n, KindOPRBuilderNode) -} - -// GetL1ELNodes returns all L1ELNodes from a network. -func GetL1ELNodes(n L1Network) []L1ELNode { - return GetComponentsByKind[L1ELNode](n, KindL1ELNode) -} - -// GetL1CLNodes returns all L1CLNodes from a network. -func GetL1CLNodes(n L1Network) []L1CLNode { - return GetComponentsByKind[L1CLNode](n, KindL1CLNode) -} - -// GetFaucets returns all Faucets from a network. -func GetFaucets(n Network) []Faucet { - return GetComponentsByKind[Faucet](n, KindFaucet) -} - -// GetSyncTesters returns all SyncTesters from a network. -func GetSyncTesters(n Network) []SyncTester { - return GetComponentsByKind[SyncTester](n, KindSyncTester) -} - -// GetSuperchains returns all Superchains from a system. -func GetSuperchains(s System) []Superchain { - return GetComponentsByKind[Superchain](s, KindSuperchain) -} - -// GetClusters returns all Clusters from a system. -func GetClusters(s System) []Cluster { - return GetComponentsByKind[Cluster](s, KindCluster) -} - -// GetL1Networks returns all L1Networks from a system. -func GetL1Networks(s System) []L1Network { - return GetComponentsByKind[L1Network](s, KindL1Network) -} - -// GetL2Networks returns all L2Networks from a system. -func GetL2Networks(s System) []L2Network { - return GetComponentsByKind[L2Network](s, KindL2Network) -} - -// GetSupervisors returns all Supervisors from a system. -func GetSupervisors(s System) []Supervisor { - return GetComponentsByKind[Supervisor](s, KindSupervisor) -} - -// GetTestSequencers returns all TestSequencers from a system. -func GetTestSequencers(s System) []TestSequencer { - return GetComponentsByKind[TestSequencer](s, KindTestSequencer) -} diff --git a/op-devstack/stack/conductor.go b/op-devstack/stack/conductor.go index 7f5720cbfd056..e92aa62840aaa 100644 --- a/op-devstack/stack/conductor.go +++ b/op-devstack/stack/conductor.go @@ -2,11 +2,12 @@ package stack import ( conductorRpc "github.com/ethereum-optimism/optimism/op-conductor/rpc" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type Conductor interface { Common - ID() ComponentID + ChainID() eth.ChainID RpcAPI() conductorRpc.API } diff --git a/op-devstack/stack/context.go b/op-devstack/stack/context.go index 1ff61b0921bcf..ab46b35f90c29 100644 --- a/op-devstack/stack/context.go +++ b/op-devstack/stack/context.go @@ -2,30 +2,11 @@ package stack import ( "context" - "log/slog" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/log/logfilter" ) -// ContextWithKind annotates the context with the given kind of service -func ContextWithKind(ctx context.Context, kind ComponentKind) context.Context { - return logfilter.AddLogAttrToContext(ctx, "kind", kind) -} - -// KindFromContext extracts the kind from the context. -func KindFromContext(ctx context.Context) ComponentKind { - v, _ := logfilter.ValueFromContext[ComponentKind](ctx, "kind") - return v -} - -// KindSelector creates a log-filter that applies the given inner log-filter only if it matches the given kind. -// For logs of the specified kind, it applies the inner filter. -// For logs of other kinds, it returns false (filters them out). -func KindSelector(kind ComponentKind) logfilter.Selector { - return logfilter.Select("kind", kind) -} - // ContextWithChainID annotates the context with the given chainID of service func ContextWithChainID(ctx context.Context, chainID eth.ChainID) context.Context { return logfilter.AddLogAttrToContext(ctx, "chainID", chainID) @@ -43,32 +24,3 @@ func ChainIDFromContext(ctx context.Context) eth.ChainID { func ChainIDSelector(chainID eth.ChainID) logfilter.Selector { return logfilter.Select("chainID", chainID) } - -// ContextWithID attaches a component ID to the context. -// This also automatically attaches the chain ID and component kind to the context, if available from the ID. -func ContextWithID(ctx context.Context, id slog.LogValuer) context.Context { - if idWithChainID, ok := id.(ChainIDProvider); ok { - chainID := idWithChainID.ChainID() - // Only set chain ID if it's non-zero (i.e., the ID type actually has a meaningful chain ID) - if chainID != (eth.ChainID{}) { - ctx = ContextWithChainID(ctx, chainID) - } - } - if idWithKind, ok := id.(KindProvider); ok { - ctx = ContextWithKind(ctx, idWithKind.Kind()) - } - ctx = logfilter.AddLogAttrToContext(ctx, "id", id) - return ctx -} - -func IDFromContext[T slog.LogValuer](ctx context.Context) T { - v, _ := logfilter.ValueFromContext[T](ctx, "id") - return v -} - -// IDSelector creates a log-filter that applies the given inner log-filter only if it matches the given ID. -// This can be composed with logfilter package utils like logfilter.MuteAll or logfilter.Level -// to adjust logging for a specific chain ID. -func IDSelector(id slog.LogValuer) logfilter.Selector { - return logfilter.Select("id", id) -} diff --git a/op-devstack/stack/context_test.go b/op-devstack/stack/context_test.go index 8e28729bb3862..72f950fc239f5 100644 --- a/op-devstack/stack/context_test.go +++ b/op-devstack/stack/context_test.go @@ -16,57 +16,17 @@ func TestContext(t *testing.T) { ctx := context.Background() chainA := eth.ChainIDFromUInt64(900) chainB := eth.ChainIDFromUInt64(901) - t.Run("chainID", func(t *testing.T) { - require.Equal(t, eth.ChainID{}, ChainIDFromContext(ctx), "none") - require.Equal(t, chainA, ChainIDFromContext(ContextWithChainID(ctx, chainA)), "lookup") - require.Equal(t, chainB, ChainIDFromContext(ContextWithChainID(ContextWithChainID(ctx, chainA), chainB)), "priority") - }) - t.Run("kind", func(t *testing.T) { - require.Equal(t, ComponentKind(""), KindFromContext(ctx), "none") - require.Equal(t, KindL2Batcher, KindFromContext(ContextWithKind(ctx, KindL2Batcher)), "lookup") - require.Equal(t, KindL2Proposer, KindFromContext(ContextWithKind(ContextWithKind(ctx, KindL2Batcher), KindL2Proposer)), "priority") - }) - t.Run("id", func(t *testing.T) { - require.Equal(t, ComponentID{}, IDFromContext[ComponentID](ctx), "none") - id1 := NewL2BatcherID("batcherA", chainA) - ctx1 := ContextWithID(ctx, id1) - require.Equal(t, KindL2Batcher, KindFromContext(ctx1), "lookup kind") - require.Equal(t, chainA, ChainIDFromContext(ctx1), "lookup chainID") - require.Equal(t, id1, IDFromContext[ComponentID](ctx1), "lookup ID") - // now overlay another different kind of ID on top - id2 := NewSuperchainID("foobar") - ctx2 := ContextWithID(ctx1, id2) - require.Equal(t, KindSuperchain, KindFromContext(ctx2), "lookup kind") - require.Equal(t, chainA, ChainIDFromContext(ctx2), "chainID still preserved") - require.Equal(t, id2, IDFromContext[ComponentID](ctx2), "lookup ID - now shows superchain") - // With type aliases, IDFromContext returns the stored ComponentID regardless of "type" - // The Kind() method can be used to check the actual kind of ID - require.Equal(t, KindSuperchain, IDFromContext[ComponentID](ctx2).Kind(), "id kind check") - }) + require.Equal(t, eth.ChainID{}, ChainIDFromContext(ctx), "none") + require.Equal(t, chainA, ChainIDFromContext(ContextWithChainID(ctx, chainA)), "lookup") + require.Equal(t, chainB, ChainIDFromContext(ContextWithChainID(ContextWithChainID(ctx, chainA), chainB)), "priority") } func TestLogFilter(t *testing.T) { ctx := context.Background() chainA := eth.ChainIDFromUInt64(900) chainB := eth.ChainIDFromUInt64(901) - t.Run("chainID", func(t *testing.T) { - fn := ChainIDSelector(chainA).Mute() - require.Equal(t, tri.Undefined, fn(ctx, log.LevelDebug), "regular context should be false") - require.Equal(t, tri.False, fn(ContextWithChainID(ctx, chainA), log.LevelDebug), "detected chain should be muted") - require.Equal(t, tri.Undefined, fn(ContextWithChainID(ctx, chainB), log.LevelDebug), "different chain should be shown") - }) - t.Run("kind", func(t *testing.T) { - fn := KindSelector(KindL2Batcher).Mute() - require.Equal(t, tri.Undefined, fn(ctx, log.LevelDebug), "regular context should be false") - require.Equal(t, tri.False, fn(ContextWithKind(ctx, KindL2Batcher), log.LevelDebug), "detected kind should be muted") - require.Equal(t, tri.Undefined, fn(ContextWithKind(ctx, KindL2Proposer), log.LevelDebug), "different kind should be shown") - }) - t.Run("id", func(t *testing.T) { - id1 := NewL2BatcherID("batcherA", chainA) - fn := IDSelector(id1).Mute() - require.Equal(t, tri.Undefined, fn(ctx, log.LevelDebug), "regular context should be false") - require.Equal(t, tri.False, fn(ContextWithID(ctx, id1), log.LevelDebug), "detected id should be muted") - id2 := NewSuperchainID("foobar") - require.Equal(t, tri.Undefined, fn(ContextWithID(ctx, id2), log.LevelDebug), "different id should be shown") - }) + fn := ChainIDSelector(chainA).Mute() + require.Equal(t, tri.Undefined, fn(ctx, log.LevelDebug), "regular context should be false") + require.Equal(t, tri.False, fn(ContextWithChainID(ctx, chainA), log.LevelDebug), "detected chain should be muted") + require.Equal(t, tri.Undefined, fn(ContextWithChainID(ctx, chainB), log.LevelDebug), "different chain should be shown") } diff --git a/op-devstack/stack/ext_network_config.go b/op-devstack/stack/ext_network_config.go deleted file mode 100644 index b5bac43d1bfab..0000000000000 --- a/op-devstack/stack/ext_network_config.go +++ /dev/null @@ -1,11 +0,0 @@ -package stack - -import "github.com/ethereum-optimism/optimism/op-service/eth" - -type ExtNetworkConfig struct { - L2NetworkName string - L1ChainID eth.ChainID - L2ELEndpoint string - L1CLBeaconEndpoint string - L1ELEndpoint string -} diff --git a/op-devstack/stack/faucet.go b/op-devstack/stack/faucet.go index fe590c798c5be..10ebf48e2374a 100644 --- a/op-devstack/stack/faucet.go +++ b/op-devstack/stack/faucet.go @@ -2,10 +2,11 @@ package stack import ( "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type Faucet interface { Common - ID() ComponentID + ChainID() eth.ChainID API() apis.Faucet } diff --git a/op-devstack/stack/fb_ws_client.go b/op-devstack/stack/fb_ws_client.go deleted file mode 100644 index 54e2b95dae2e0..0000000000000 --- a/op-devstack/stack/fb_ws_client.go +++ /dev/null @@ -1,15 +0,0 @@ -package stack - -import ( - "net/http" - - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type FlashblocksWSClient interface { - Common - ChainID() eth.ChainID - ID() ComponentID - WsUrl() string - WsHeaders() http.Header -} diff --git a/op-devstack/stack/l1_cl.go b/op-devstack/stack/l1_cl.go index a1e3ce3734ae7..49face44aa715 100644 --- a/op-devstack/stack/l1_cl.go +++ b/op-devstack/stack/l1_cl.go @@ -2,13 +2,14 @@ package stack import ( "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" ) // L1CLNode is a L1 ethereum consensus-layer node, aka Beacon node. // This node may not be a full beacon node, and instead run a mock L1 consensus node. type L1CLNode interface { Common - ID() ComponentID + ChainID() eth.ChainID BeaconClient() apis.BeaconClient } diff --git a/op-devstack/stack/l1_el.go b/op-devstack/stack/l1_el.go index ec65e6241ddc9..da4441bbc1aad 100644 --- a/op-devstack/stack/l1_el.go +++ b/op-devstack/stack/l1_el.go @@ -2,7 +2,5 @@ package stack // L1ELNode is a L1 ethereum execution-layer node type L1ELNode interface { - ID() ComponentID - ELNode } diff --git a/op-devstack/stack/l1_network.go b/op-devstack/stack/l1_network.go index ce124772ef6ba..60b4857f14fc3 100644 --- a/op-devstack/stack/l1_network.go +++ b/op-devstack/stack/l1_network.go @@ -3,21 +3,7 @@ package stack // L1Network represents a L1 chain, a collection of configuration and node resources. type L1Network interface { Network - ID() ComponentID - - L1ELNode(m L1ELMatcher) L1ELNode - L1CLNode(m L1CLMatcher) L1CLNode - - L1ELNodeIDs() []ComponentID - L1CLNodeIDs() []ComponentID L1ELNodes() []L1ELNode L1CLNodes() []L1CLNode } - -type ExtensibleL1Network interface { - ExtensibleNetwork - L1Network - AddL1ELNode(v L1ELNode) - AddL1CLNode(v L1CLNode) -} diff --git a/op-devstack/stack/l2_batcher.go b/op-devstack/stack/l2_batcher.go index 04af98094282f..12ddee248cc6d 100644 --- a/op-devstack/stack/l2_batcher.go +++ b/op-devstack/stack/l2_batcher.go @@ -2,11 +2,12 @@ package stack import ( "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" ) // L2Batcher represents an L2 batch-submission service, posting L2 data of an L2 to L1. type L2Batcher interface { Common - ID() ComponentID + ChainID() eth.ChainID ActivityAPI() apis.BatcherActivity } diff --git a/op-devstack/stack/l2_challenger.go b/op-devstack/stack/l2_challenger.go index 7d9f9d2dfc2c4..c4d524c12457e 100644 --- a/op-devstack/stack/l2_challenger.go +++ b/op-devstack/stack/l2_challenger.go @@ -2,10 +2,11 @@ package stack import ( "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type L2Challenger interface { Common - ID() ComponentID + ChainID() eth.ChainID Config() *config.Config } diff --git a/op-devstack/stack/l2_cl.go b/op-devstack/stack/l2_cl.go index 14d6d0695d465..b4c64fdd9f486 100644 --- a/op-devstack/stack/l2_cl.go +++ b/op-devstack/stack/l2_cl.go @@ -9,7 +9,7 @@ import ( // L2CLNode is a L2 ethereum consensus-layer node type L2CLNode interface { Common - ID() ComponentID + ChainID() eth.ChainID ClientRPC() client.RPC RollupAPI() apis.RollupClient @@ -25,10 +25,3 @@ type L2CLNode interface { ELClient() apis.EthClient } - -type LinkableL2CLNode interface { - // Links the nodes. Does not make any backend changes, just registers the EL as connected to this CL. - LinkEL(el L2ELNode) - LinkRollupBoostNode(rollupBoostNode RollupBoostNode) - LinkOPRBuilderNode(oprb OPRBuilderNode) -} diff --git a/op-devstack/stack/l2_el.go b/op-devstack/stack/l2_el.go index 4678cb084ec0c..861f15b808d7b 100644 --- a/op-devstack/stack/l2_el.go +++ b/op-devstack/stack/l2_el.go @@ -6,7 +6,6 @@ import ( // L2ELNode is a L2 ethereum execution-layer node type L2ELNode interface { - ID() ComponentID L2EthClient() apis.L2EthClient L2EngineClient() apis.EngineClient diff --git a/op-devstack/stack/l2_network.go b/op-devstack/stack/l2_network.go index 7fb0f010b9840..025d94bcb84b3 100644 --- a/op-devstack/stack/l2_network.go +++ b/op-devstack/stack/l2_network.go @@ -22,32 +22,13 @@ type Keys interface { } // L2Network represents a L2 chain, a collection of configuration and node resources. -// There is an extension-interface ExtensibleL2Network for adding new components to the chain. type L2Network interface { Network - ID() ComponentID RollupConfig() *rollup.Config Deployment() L2Deployment Keys() Keys - Superchain() Superchain L1() L1Network - Cluster() Cluster - - L2Batcher(m L2BatcherMatcher) L2Batcher - L2Proposer(m L2ProposerMatcher) L2Proposer - L2Challenger(m L2ChallengerMatcher) L2Challenger - L2CLNode(m L2CLMatcher) L2CLNode - L2ELNode(m L2ELMatcher) L2ELNode - Conductor(m ConductorMatcher) Conductor - RollupBoostNode(m RollupBoostNodeMatcher) RollupBoostNode - OPRBuilderNode(m OPRBuilderNodeMatcher) OPRBuilderNode - - L2BatcherIDs() []ComponentID - L2ProposerIDs() []ComponentID - L2ChallengerIDs() []ComponentID - L2CLNodeIDs() []ComponentID - L2ELNodeIDs() []ComponentID L2Batchers() []L2Batcher L2Proposers() []L2Proposer @@ -58,18 +39,3 @@ type L2Network interface { RollupBoostNodes() []RollupBoostNode OPRBuilderNodes() []OPRBuilderNode } - -// ExtensibleL2Network is an optional extension interface for L2Network, -// for adding new components to the chain. Used during test-setup, not generally during test execution. -type ExtensibleL2Network interface { - ExtensibleNetwork - L2Network - AddL2Batcher(v L2Batcher) - AddL2Proposer(v L2Proposer) - AddL2Challenger(v L2Challenger) - AddL2CLNode(v L2CLNode) - AddL2ELNode(v L2ELNode) - AddConductor(v Conductor) - AddRollupBoostNode(v RollupBoostNode) - AddOPRBuilderNode(v OPRBuilderNode) -} diff --git a/op-devstack/stack/l2_proposer.go b/op-devstack/stack/l2_proposer.go index 71e8dbe58cc18..27eb7fb4c978e 100644 --- a/op-devstack/stack/l2_proposer.go +++ b/op-devstack/stack/l2_proposer.go @@ -1,7 +1,9 @@ package stack +import "github.com/ethereum-optimism/optimism/op-service/eth" + // L2Proposer is a L2 output proposer, posting claims of L2 state to L1. type L2Proposer interface { Common - ID() ComponentID + ChainID() eth.ChainID } diff --git a/op-devstack/stack/lifecycle.go b/op-devstack/stack/lifecycle.go new file mode 100644 index 0000000000000..3aee741c34d5e --- /dev/null +++ b/op-devstack/stack/lifecycle.go @@ -0,0 +1,6 @@ +package stack + +type Lifecycle interface { + Start() + Stop() +} diff --git a/op-devstack/stack/match/archive.go b/op-devstack/stack/match/archive.go deleted file mode 100644 index dac4ca2febc58..0000000000000 --- a/op-devstack/stack/match/archive.go +++ /dev/null @@ -1,30 +0,0 @@ -package match - -import ( - "context" - "math/big" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum/go-ethereum/common" -) - -// WithArchive matches the first archive node. This matcher makes two assumptions: -// -// 1. Non-archive nodes only store the last N blocks worth of historical balances, while archive -// nodes store all of them. -// 2. The chain has at least N+1 blocks. -// -// Either assumption being false could result in false positives or false negatives. Note that -// there is also a race condition where assumption (2) becomes true after the function returns. -func WithArchive(ctx context.Context) stack.Matcher[stack.L2ELNode] { - return MatchElemFn[stack.L2ELNode](func(elem stack.L2ELNode) bool { - if _, err := elem.L2EthClient().BlockRefByNumber(ctx, 1); err != nil { - // The devnet is fresh. This is almost guaranteed to be a devnet created by sysgo, - // which always uses archive mode. - return true - } - // Use block 1 since EL clients may keep the genesis block when not in archive mode. - _, err := elem.L2EthClient().BalanceAt(ctx, common.Address{}, big.NewInt(1)) - return err == nil - }) -} diff --git a/op-devstack/stack/match/core.go b/op-devstack/stack/match/core.go deleted file mode 100644 index 073d607fa5b1a..0000000000000 --- a/op-devstack/stack/match/core.go +++ /dev/null @@ -1,40 +0,0 @@ -package match - -import ( - "fmt" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -// MatchFn implements stack.Matcher, checking all elements at once. -type MatchFn[E stack.Identifiable] func(elems []E) []E - -func (m MatchFn[E]) Match(elems []E) []E { - return m(elems) -} - -func (m MatchFn[E]) String() string { - var x E - return fmt.Sprintf("MatchFn[%T]", x) -} - -var _ stack.Matcher[stack.L2Network] = MatchFn[stack.L2Network](nil) - -// MatchElemFn implements stack.Matcher, checking one element at a time. -type MatchElemFn[E stack.Identifiable] func(elem E) bool - -func (m MatchElemFn[E]) Match(elems []E) (out []E) { - for _, elem := range elems { - if m(elem) { - out = append(out, elem) - } - } - return out -} - -func (m MatchElemFn[E]) String() string { - var x E - return fmt.Sprintf("MatchElemFn[%T]", x) -} - -var _ stack.Matcher[stack.L2Network] = MatchElemFn[stack.L2Network](nil) diff --git a/op-devstack/stack/match/doc.go b/op-devstack/stack/match/doc.go deleted file mode 100644 index ab4472dc73aad..0000000000000 --- a/op-devstack/stack/match/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -/* -Package match provides matching presets and utils for selecting devstack components. - -Matchers can be composed, e.g.: -- `And(OpGeth, WithLabel("name", "alice"))` to select the op-geth node named "alice". -- `Or(OpGeth, OpReth)` to select an op-geth or op-reth node. -- `Not(OpGeth)` to select anything but an op-geth node. - -Custom matchers can also be implemented: -- MatchFn can filter a list of elements down to just the matched elements -- MatchElemFn can filter by checking each individual element - -For convenience, aliases for common matchers are also provided, -e.g. for matching "chain A", or matching the first L2 EL node. -*/ -package match diff --git a/op-devstack/stack/match/engine.go b/op-devstack/stack/match/engine.go deleted file mode 100644 index a0eca09d00e8b..0000000000000 --- a/op-devstack/stack/match/engine.go +++ /dev/null @@ -1,55 +0,0 @@ -package match - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -func WithEngine(engine stack.ComponentID) stack.Matcher[stack.L2CLNode] { - return MatchElemFn[stack.L2CLNode](func(elem stack.L2CLNode) bool { - for _, el := range elem.ELs() { - if el.ID() == engine { - return true - } - } - // Check RollupBoost nodes with matching key/chainID - rbID := stack.NewRollupBoostNodeID(engine.Key(), engine.ChainID()) - for _, rb := range elem.RollupBoostNodes() { - if rb.ID() == rbID { - return true - } - } - // Check OPRBuilder nodes with matching key/chainID - oprbID := stack.NewOPRBuilderNodeID(engine.Key(), engine.ChainID()) - for _, oprb := range elem.OPRBuilderNodes() { - if oprb.ID() == oprbID { - return true - } - } - return false - }) -} - -func EngineFor(cl stack.L2CLNode) stack.Matcher[stack.L2ELNode] { - return MatchElemFn[stack.L2ELNode](func(elem stack.L2ELNode) bool { - for _, el := range cl.ELs() { - if el.ID() == elem.ID() { - return true - } - } - // Check RollupBoost nodes with matching key/chainID - rbID := stack.NewRollupBoostNodeID(elem.ID().Key(), elem.ID().ChainID()) - for _, rb := range cl.RollupBoostNodes() { - if rb.ID() == rbID { - return true - } - } - // Check OPRBuilder nodes with matching key/chainID - oprbID := stack.NewOPRBuilderNodeID(elem.ID().Key(), elem.ID().ChainID()) - for _, oprb := range cl.OPRBuilderNodes() { - if oprb.ID() == oprbID { - return true - } - } - return false - }) -} diff --git a/op-devstack/stack/match/first.go b/op-devstack/stack/match/first.go deleted file mode 100644 index 56af94e80abb3..0000000000000 --- a/op-devstack/stack/match/first.go +++ /dev/null @@ -1,27 +0,0 @@ -package match - -import "github.com/ethereum-optimism/optimism/op-devstack/stack" - -var FirstL2EL = First[stack.L2ELNode]() -var FirstL2CL = First[stack.L2CLNode]() -var FirstL2Batcher = First[stack.L2Batcher]() -var FirstL2Proposer = First[stack.L2Proposer]() -var FirstL2Challenger = First[stack.L2Challenger]() - -var FirstTestSequencer = First[stack.TestSequencer]() -var FirstSupervisor = First[stack.Supervisor]() -var FirstSupernode = First[stack.Supernode]() - -var FirstL1EL = First[stack.L1ELNode]() -var FirstL1CL = First[stack.L1CLNode]() - -var FirstL1Network = First[stack.L1Network]() -var FirstL2Network = First[stack.L2Network]() -var FirstSuperchain = First[stack.Superchain]() -var FirstCluster = First[stack.Cluster]() - -var FirstFaucet = First[stack.Faucet]() -var FirstSyncTester = First[stack.SyncTester]() - -var FirstOPRBuilderNode = First[stack.OPRBuilderNode]() -var FirstRollupBoostNode = First[stack.RollupBoostNode]() diff --git a/op-devstack/stack/match/gate.go b/op-devstack/stack/match/gate.go deleted file mode 100644 index 115fbc63dc066..0000000000000 --- a/op-devstack/stack/match/gate.go +++ /dev/null @@ -1,31 +0,0 @@ -package match - -import ( - "fmt" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -type assume[E stack.Identifiable] struct { - t devtest.T - inner stack.Matcher[E] -} - -func (a *assume[E]) Match(elems []E) []E { - elems = a.inner.Match(elems) - a.t.Gate().NotEmpty(elems, "must match something to continue, but matched nothing with %s", a.inner) - return elems -} - -func (a *assume[E]) String() string { - return fmt.Sprintf("Assume(%s)", a.inner) -} - -// Assume skips the test if no elements were matched with the inner matcher -func Assume[E stack.Identifiable](t devtest.T, inner stack.Matcher[E]) stack.Matcher[E] { - return &assume[E]{ - t: t, - inner: inner, - } -} diff --git a/op-devstack/stack/match/gate_test.go b/op-devstack/stack/match/gate_test.go deleted file mode 100644 index 61f26077f58ea..0000000000000 --- a/op-devstack/stack/match/gate_test.go +++ /dev/null @@ -1,52 +0,0 @@ -package match - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-service/testreq" -) - -type gateTesting struct { - log func(format string, args ...interface{}) - hit bool -} - -func (g *gateTesting) Errorf(format string, args ...interface{}) { - g.log(format, args...) -} - -func (g *gateTesting) FailNow() { - g.hit = true - panic("gate hit") -} - -func (g *gateTesting) Helper() { -} - -type fakeTesting struct { - devtest.T // embedded but nil, to inherit interface - g *gateTesting -} - -func (f *fakeTesting) Gate() *testreq.Assertions { - return testreq.New(f.g) -} - -func TestAssume(t *testing.T) { - a := newTestObject("a") - b := newTestObject("b") - fT := &fakeTesting{T: nil, g: &gateTesting{log: t.Logf}} - - m := Assume(fT, First[*testObject]()) - require.Equal(t, m.String(), "Assume(ByIndex(0))") - require.Equal(t, []*testObject{a}, m.Match([]*testObject{a})) - require.Equal(t, []*testObject{a}, m.Match([]*testObject{a, b})) - require.False(t, fT.g.hit, "no skipping if we got a match") - require.PanicsWithValue(t, "gate hit", func() { - m.Match([]*testObject{}) - }) - require.True(t, fT.g.hit, "skip if we have no match") -} diff --git a/op-devstack/stack/match/interop.go b/op-devstack/stack/match/interop.go deleted file mode 100644 index 1810debd76436..0000000000000 --- a/op-devstack/stack/match/interop.go +++ /dev/null @@ -1,14 +0,0 @@ -package match - -import "github.com/ethereum-optimism/optimism/op-devstack/stack" - -// L2ChainA is an alias for the first L2 network. -var L2ChainA = First[stack.L2Network]() - -// L2ChainB is an alias for the second L2 network. -var L2ChainB = Second[stack.L2Network]() - -// L2ChainById returns a matcher for the L2 network with the given ID. -func L2ChainById(id stack.ComponentID) stack.Matcher[stack.L2Network] { - return byID[stack.L2Network](id) -} diff --git a/op-devstack/stack/match/labels.go b/op-devstack/stack/match/labels.go deleted file mode 100644 index 44ea4e09fc988..0000000000000 --- a/op-devstack/stack/match/labels.go +++ /dev/null @@ -1,38 +0,0 @@ -package match - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -func WithLabel[E interface { - stack.Identifiable - Label(key string) string -}](key, value string) stack.Matcher[E] { - return MatchElemFn[E](func(elem E) bool { - return elem.Label(key) == value - }) -} - -const ( - LabelVendor = "vendor" -) - -type Vendor string - -const ( - Geth Vendor = "geth" - OpReth Vendor = "op-reth" - OpGeth Vendor = "op-geth" - Proxyd Vendor = "proxyd" - FlashblocksWSClient Vendor = "flashblocks-websocket-proxy" - OpNode Vendor = "op-node" - KonaNode Vendor = "kona-node" -) - -func (v Vendor) Match(elems []stack.L2ELNode) []stack.L2ELNode { - return WithLabel[stack.L2ELNode](LabelVendor, string(v)).Match(elems) -} - -func (v Vendor) String() string { - return string(v) -} diff --git a/op-devstack/stack/match/second.go b/op-devstack/stack/match/second.go deleted file mode 100644 index c483f015c397d..0000000000000 --- a/op-devstack/stack/match/second.go +++ /dev/null @@ -1,8 +0,0 @@ -package match - -import "github.com/ethereum-optimism/optimism/op-devstack/stack" - -var SecondL2EL = Second[stack.L2ELNode]() -var SecondL2CL = Second[stack.L2CLNode]() - -var SecondSupervisor = Second[stack.Supervisor]() diff --git a/op-devstack/stack/match/sequencer.go b/op-devstack/stack/match/sequencer.go deleted file mode 100644 index 49e6b3b25e0ee..0000000000000 --- a/op-devstack/stack/match/sequencer.go +++ /dev/null @@ -1,21 +0,0 @@ -package match - -import ( - "context" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/retry" -) - -func WithSequencerActive(ctx context.Context) stack.Matcher[stack.L2CLNode] { - return MatchElemFn[stack.L2CLNode](func(elem stack.L2CLNode) bool { - sequencing, err := retry.Do(ctx, 10, retry.Exponential(), func() (bool, error) { - return elem.RollupAPI().SequencerActive(ctx) - }) - if err != nil { - // Not available so can't be used by the test - return false - } - return sequencing - }) -} diff --git a/op-devstack/stack/match/util.go b/op-devstack/stack/match/util.go deleted file mode 100644 index 97a3361dcaa07..0000000000000 --- a/op-devstack/stack/match/util.go +++ /dev/null @@ -1,171 +0,0 @@ -package match - -import ( - "fmt" - "strings" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -func First[E stack.Identifiable]() stack.Matcher[E] { - return ByIndex[E](0) -} - -func Second[E stack.Identifiable]() stack.Matcher[E] { - return ByIndex[E](1) -} - -func byID[E stack.Identifiable](id stack.ComponentID) stack.Matcher[E] { - return MatchElemFn[E](func(elem E) bool { - return elem.ID() == id - }) -} - -type byIndexMatcher[E stack.Identifiable] struct { - index int -} - -func (ma byIndexMatcher[E]) Match(elems []E) []E { - if ma.index < 0 { - return nil - } - if ma.index >= len(elems) { - return nil - } - return elems[ma.index : ma.index+1] -} - -func (ma byIndexMatcher[E]) String() string { - return fmt.Sprintf("ByIndex(%d)", ma.index) -} - -// ByIndex matches element i (zero-indexed). -func ByIndex[E stack.Identifiable](index int) stack.Matcher[E] { - return byIndexMatcher[E]{index: index} -} - -type lastMatcher[E stack.Identifiable] struct{} - -func (ma lastMatcher[E]) Match(elems []E) []E { - if len(elems) == 0 { - return nil - } - return elems[len(elems)-1:] -} - -func (ma lastMatcher[E]) String() string { - return "Last" -} - -// Last matches the last element. -func Last[E stack.Identifiable]() stack.Matcher[E] { - return lastMatcher[E]{} -} - -type onlyMatcher[E stack.Identifiable] struct{} - -func (ma onlyMatcher[E]) Match(elems []E) []E { - if len(elems) != 1 { - return nil - } - return elems -} - -func (ma onlyMatcher[E]) String() string { - return "Only" -} - -// Only matches the only value. If there are none, or more than one, then no value is matched. -func Only[E stack.Identifiable]() stack.Matcher[E] { - return onlyMatcher[E]{} -} - -type andMatcher[E stack.Identifiable] struct { - inner []stack.Matcher[E] -} - -func (ma andMatcher[E]) Match(elems []E) []E { - for _, matcher := range ma.inner { - elems = matcher.Match(elems) - } - return elems -} - -func (ma andMatcher[E]) String() string { - return fmt.Sprintf("And(%s)", joinStr(ma.inner)) -} - -// And combines all the matchers, by running them all, narrowing down the set with each application. -// If none are provided, all inputs are matched. -func And[E stack.Identifiable](matchers ...stack.Matcher[E]) stack.Matcher[E] { - return andMatcher[E]{inner: matchers} -} - -type orMatcher[E stack.Identifiable] struct { - inner []stack.Matcher[E] -} - -func (ma orMatcher[E]) Match(elems []E) []E { - seen := make(map[stack.ComponentID]struct{}) - for _, matcher := range ma.inner { - for _, elem := range matcher.Match(elems) { - seen[elem.ID()] = struct{}{} - } - } - // preserve sort order and duplicates by iterating the original list - out := make([]E, 0, len(seen)) - for _, elem := range elems { - if _, ok := seen[elem.ID()]; ok { - out = append(out, elem) - } - } - return out -} - -func (ma orMatcher[E]) String() string { - return fmt.Sprintf("Or(%s)", joinStr(ma.inner)) -} - -func joinStr[V fmt.Stringer](elems []V) string { - var out strings.Builder - for i, e := range elems { - out.WriteString(e.String()) - if i < len(elems)-1 { - out.WriteString(", ") - } - } - return out.String() -} - -// Or returns each of the inputs that have a match with any of the matchers. -// All inputs are applied to all matchers, even if matched previously. -func Or[E stack.Identifiable](matchers ...stack.Matcher[E]) stack.Matcher[E] { - return orMatcher[E]{inner: matchers} -} - -type notMatcher[E stack.Identifiable] struct { - inner stack.Matcher[E] -} - -func (ma notMatcher[E]) Match(elems []E) []E { - matched := make(map[stack.ComponentID]struct{}) - for _, elem := range ma.inner.Match(elems) { - matched[elem.ID()] = struct{}{} - } - out := make([]E, 0, len(elems)) - for _, elem := range elems { - if _, ok := matched[elem.ID()]; !ok { - out = append(out, elem) - } - } - return out -} - -func (ma notMatcher[E]) String() string { - return fmt.Sprintf("Not(%s)", ma.inner) -} - -// Not matches the elements that do not match the given matcher. -func Not[E stack.Identifiable](matcher stack.Matcher[E]) stack.Matcher[E] { - return notMatcher[E]{inner: matcher} -} diff --git a/op-devstack/stack/match/util_test.go b/op-devstack/stack/match/util_test.go deleted file mode 100644 index 5080c6f5a99fe..0000000000000 --- a/op-devstack/stack/match/util_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package match - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -type testObject struct { - id stack.ComponentID -} - -func (t *testObject) ID() stack.ComponentID { - return t.id -} - -var _ stack.Identifiable = (*testObject)(nil) - -func newTestObject(key string) *testObject { - return &testObject{id: stack.NewComponentIDKeyOnly(stack.KindL2ELNode, key)} -} - -func TestUtils(t *testing.T) { - a := newTestObject("a") - b := newTestObject("b") - c := newTestObject("c") - d := newTestObject("d") - - t.Run("first", func(t *testing.T) { - m := First[*testObject]() - require.Equal(t, m.String(), "ByIndex(0)") - require.Equal(t, []*testObject{a}, m.Match([]*testObject{a, b, c, d})) - require.Equal(t, []*testObject{b}, m.Match([]*testObject{b, a, c, d})) - require.Equal(t, []*testObject{b}, m.Match([]*testObject{b, b, b})) - require.Equal(t, []*testObject(nil), m.Match([]*testObject{})) - }) - t.Run("last", func(t *testing.T) { - m := Last[*testObject]() - require.Equal(t, m.String(), "Last") - require.Equal(t, []*testObject{d}, m.Match([]*testObject{a, b, c, d})) - require.Equal(t, []*testObject{c}, m.Match([]*testObject{b, a, c})) - }) - t.Run("only", func(t *testing.T) { - m := Only[*testObject]() - t.Log(m.String()) - require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, b, c, d})) - require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, b})) - require.Equal(t, []*testObject{c}, m.Match([]*testObject{c})) - require.Equal(t, []*testObject(nil), m.Match([]*testObject{})) - }) - t.Run("and", func(t *testing.T) { - m := And(First[*testObject](), Second[*testObject]()) - require.Equal(t, m.String(), "And(ByIndex(0), ByIndex(1))") - require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, b, c, d})) - // narrowed down to single element with First - require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, a})) - m2 := And(Second[*testObject](), First[*testObject]()) - // Narrowed down to b, then select b as first - require.Equal(t, []*testObject{b}, m2.Match([]*testObject{a, b})) - }) - t.Run("or", func(t *testing.T) { - m := Or(First[*testObject](), Second[*testObject]()) - t.Log(m.String()) - require.Equal(t, []*testObject{a, b}, m.Match([]*testObject{a, b, c, d})) - }) - t.Run("not", func(t *testing.T) { - m := Not(Or(First[*testObject](), Second[*testObject]())) - require.Equal(t, m.String(), "Not(Or(ByIndex(0), ByIndex(1)))") - require.Equal(t, []*testObject{c, d}, m.Match([]*testObject{a, b, c, d})) - require.Equal(t, []*testObject{}, m.Match([]*testObject{})) - m2 := Not(Last[*testObject]()) - t.Log(m.String()) - require.Equal(t, []*testObject{a, b, c}, m2.Match([]*testObject{a, b, c, d})) - }) - t.Run("by-index", func(t *testing.T) { - m := ByIndex[*testObject](2) - require.Equal(t, m.String(), "ByIndex(2)") - require.Equal(t, []*testObject{c}, m.Match([]*testObject{a, b, c, d})) - require.Equal(t, []*testObject{c}, m.Match([]*testObject{a, b, c})) - require.Equal(t, []*testObject(nil), m.Match([]*testObject{a, b})) - require.Equal(t, []*testObject(nil), m.Match([]*testObject{a})) - require.Equal(t, []*testObject(nil), m.Match([]*testObject{})) - m2 := ByIndex[*testObject](-1) - require.Equal(t, []*testObject(nil), m2.Match([]*testObject{a, b})) - }) -} diff --git a/op-devstack/stack/matcher.go b/op-devstack/stack/matcher.go deleted file mode 100644 index d73b3f1dabb29..0000000000000 --- a/op-devstack/stack/matcher.go +++ /dev/null @@ -1,60 +0,0 @@ -package stack - -// Identifiable is implemented by all components that have an ID. -type Identifiable interface { - ID() ComponentID -} - -// Matcher abstracts what can be used as getter-method argument. -// All ID types implement this interface, and lookup functions check -// if the argument is an ID before searching for a match. -// This enables lookups such as getting a component by labels, -// by its state, by its relation to other components, etc. -type Matcher[E Identifiable] interface { - // Match finds the elements that pass the matcher. - // If no element passes, it returns an empty slice. - // Callers should guarantee a stable order of ids, to ensure a deterministic match. - Match(elems []E) []E - - // String must describe the matcher for debugging purposes. - // This does not get used for matching. - String() string -} - -type ClusterMatcher = Matcher[Cluster] - -type L1CLMatcher = Matcher[L1CLNode] - -type L1ELMatcher = Matcher[L1ELNode] - -type L1NetworkMatcher = Matcher[L1Network] - -type L2NetworkMatcher = Matcher[L2Network] - -type SuperchainMatcher = Matcher[Superchain] - -type L2BatcherMatcher = Matcher[L2Batcher] - -type L2ChallengerMatcher = Matcher[L2Challenger] - -type L2ProposerMatcher = Matcher[L2Proposer] - -type L2CLMatcher = Matcher[L2CLNode] - -type SupervisorMatcher = Matcher[Supervisor] - -type SupernodeMatcher = Matcher[Supernode] - -type TestSequencerMatcher = Matcher[TestSequencer] - -type ConductorMatcher = Matcher[Conductor] - -type L2ELMatcher = Matcher[L2ELNode] - -type FaucetMatcher = Matcher[Faucet] - -type SyncTesterMatcher = Matcher[SyncTester] - -type RollupBoostNodeMatcher = Matcher[RollupBoostNode] - -type OPRBuilderNodeMatcher = Matcher[OPRBuilderNode] diff --git a/op-devstack/stack/network.go b/op-devstack/stack/network.go index 35c30363f2cc3..aa410ac2b0d18 100644 --- a/op-devstack/stack/network.go +++ b/op-devstack/stack/network.go @@ -11,24 +11,12 @@ import ( // A network hosts configuration resources and tracks participating nodes. type Network interface { Common - ComponentRegistry ChainID() eth.ChainID ChainConfig() *params.ChainConfig - Faucet(m FaucetMatcher) Faucet Faucets() []Faucet - FaucetIDs() []ComponentID - SyncTester(m SyncTesterMatcher) SyncTester SyncTesters() []SyncTester - SyncTesterIDs() []ComponentID -} - -type ExtensibleNetwork interface { - Network - - AddFaucet(f Faucet) - AddSyncTester(st SyncTester) } diff --git a/op-devstack/stack/op_rbuilder.go b/op-devstack/stack/op_rbuilder.go index 0f7e92396bd83..cb16fc6f77905 100644 --- a/op-devstack/stack/op_rbuilder.go +++ b/op-devstack/stack/op_rbuilder.go @@ -7,7 +7,6 @@ import ( // OPRBuilderNode is a L2 ethereum execution-layer node type OPRBuilderNode interface { - ID() ComponentID L2EthClient() apis.L2EthClient L2EngineClient() apis.EngineClient FlashblocksClient() *client.WSClient diff --git a/op-devstack/stack/orchestrator.go b/op-devstack/stack/orchestrator.go deleted file mode 100644 index 07efe9d27f0c3..0000000000000 --- a/op-devstack/stack/orchestrator.go +++ /dev/null @@ -1,276 +0,0 @@ -package stack - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" -) - -// Lifecycle represents a controllable component by ControlPlane -type Lifecycle interface { - Start() - Stop() -} - -type ControlAction int - -const ( - Start ControlAction = iota - Stop -) - -// ControlPlane is the interface for the orchestrators to control components of the system. -type ControlPlane interface { - SupervisorState(id ComponentID, action ControlAction) - L2CLNodeState(id ComponentID, action ControlAction) - L2ELNodeState(id ComponentID, action ControlAction) - FakePoSState(id ComponentID, action ControlAction) - RollupBoostNodeState(id ComponentID, action ControlAction) - OPRBuilderNodeState(id ComponentID, action ControlAction) -} - -// Orchestrator is the base interface for all system orchestrators. -// It imposes some common things across all orchestrators, but may also have optional extensions, that not every type of backend might support. -type Orchestrator interface { - // P is the test-handle of the orchestrator. - // This may not be a Go-test handle. - // Orchestrators may be instantiated by dev-tools or test-package TestMain functions. - P() devtest.P - - // Hydrate adds all services that the orchestrator is aware of to the given system. - // An orchestrator may be asked to hydrate different systems, one for each test. - Hydrate(sys ExtensibleSystem) - - ControlPlane() ControlPlane - - Type() compat.Type -} - -type TimeTravelOrchestrator interface { - EnableTimeTravel() -} - -// GateWithRemediation is an example of a test-gate that checks a system and may use an orchestrator to remediate any shortcomings. -// func GateWithRemediation(sys System, orchestrator Orchestrator) { -// step 1: check if system already does the right thing -// step 2: if not, check if orchestrator can help us -// step 3: maybe try different things, if none work, test-skip -// } - -type SystemHook interface { - // PreHydrate runs before a system is hydrated, - // to prepare settings on the system like logging, or inspect test-scope - PreHydrate(sys System) - - // PostHydrate runs after a system is hydrated, to run any checks. - // This may register validation that runs at the end of the test, using the sys.T().Cleanup function. - PostHydrate(sys System) -} - -// ApplyOptionLifecycle applies all option lifecycle stages to the given orchestrator -func ApplyOptionLifecycle[O Orchestrator](opt Option[O], orch O) { - opt.BeforeDeploy(orch) - opt.Deploy(orch) - opt.AfterDeploy(orch) - opt.Finally(orch) -} - -// Option is used to define a change that inspects and/or changes a System during the lifecycle. -type Option[O Orchestrator] interface { - // BeforeDeploy runs before any chain is created/deployed - BeforeDeploy(orch O) - // Deploy runs the deployment - Deploy(orch O) - // AfterDeploy runs after chains are created/deployed - AfterDeploy(orch O) - // Finally runs at the very end of orchestrator setup, - // but before any test-scope is created. - Finally(orch O) - // SystemHook is embedded: Options may expose system hooks, to run in test-scope. - SystemHook -} - -type CommonOption = Option[Orchestrator] - -// CombinedOption is a list of options. -// For each option lifecycle stage, all options are applied, left to right. -type CombinedOption[O Orchestrator] []Option[O] - -var _ CommonOption = (CombinedOption[Orchestrator])(nil) - -// Combine is a method to define a CombinedOption, more readable than a slice definition. -func Combine[O Orchestrator](opts ...Option[O]) CombinedOption[O] { - return CombinedOption[O](opts) -} - -// Add changes the option into a new Option that that first applies the receiver, and then the other options. -// This is a convenience for bundling options together. -func (c *CombinedOption[O]) Add(other ...Option[O]) { - *c = append(*c, other...) -} - -func (c CombinedOption[O]) BeforeDeploy(orch O) { - for _, opt := range c { - opt.BeforeDeploy(orch) - } -} - -func (c CombinedOption[O]) Deploy(orch O) { - for _, opt := range c { - opt.Deploy(orch) - } -} - -func (c CombinedOption[O]) AfterDeploy(orch O) { - for _, opt := range c { - opt.AfterDeploy(orch) - } -} - -func (c CombinedOption[O]) Finally(orch O) { - for _, opt := range c { - opt.Finally(orch) - } -} - -func (c CombinedOption[O]) PreHydrate(sys System) { - for _, opt := range c { - opt.PreHydrate(sys) - } -} - -func (c CombinedOption[O]) PostHydrate(sys System) { - for _, opt := range c { - opt.PostHydrate(sys) - } -} - -// FnOption defines an option with more flexible function instances per option lifecycle stage. -// Each nil attribute is simply a no-op when not set. -type FnOption[O Orchestrator] struct { - BeforeDeployFn func(orch O) - DeployFn func(orch O) - AfterDeployFn func(orch O) - FinallyFn func(orch O) - PreHydrateFn func(sys System) - PostHydrateFn func(sys System) -} - -var _ CommonOption = (*FnOption[Orchestrator])(nil) - -func (f FnOption[O]) BeforeDeploy(orch O) { - if f.BeforeDeployFn != nil { - f.BeforeDeployFn(orch) - } -} - -func (f FnOption[O]) Deploy(orch O) { - if f.DeployFn != nil { - f.DeployFn(orch) - } -} - -func (f FnOption[O]) AfterDeploy(orch O) { - if f.AfterDeployFn != nil { - f.AfterDeployFn(orch) - } -} - -func (f FnOption[O]) Finally(orch O) { - if f.FinallyFn != nil { - f.FinallyFn(orch) - } -} - -func (f FnOption[O]) PreHydrate(sys System) { - if f.PreHydrateFn != nil { - f.PreHydrateFn(sys) - } -} - -func (f FnOption[O]) PostHydrate(sys System) { - if f.PostHydrateFn != nil { - f.PostHydrateFn(sys) - } -} - -// BeforeDeploy registers a function to run before the deployment stage of the orchestrator. -// This may be used to customize deployment settings. -func BeforeDeploy[O Orchestrator](fn func(orch O)) Option[O] { - return FnOption[O]{BeforeDeployFn: fn} -} - -// Deploy registers a function to run during the deployment stage of the orchestrator. -// This may be used to perform deployments. -func Deploy[O Orchestrator](fn func(orch O)) Option[O] { - return FnOption[O]{DeployFn: fn} -} - -// AfterDeploy registers a function to run after the deployment stage of the orchestrator. -// This may be used to customize the orchestrator, after having deployment configuration in place. -func AfterDeploy[O Orchestrator](fn func(orch O)) Option[O] { - return FnOption[O]{AfterDeployFn: fn} -} - -// Finally registers a function to run at the end of orchestrator setup. -// This may be used for any orchestrator post-validation, -// or to export any of the now ready orchestrator resources. -func Finally[O Orchestrator](fn func(orch O)) Option[O] { - return FnOption[O]{FinallyFn: fn} -} - -// PreHydrate hooks up an option callback to run before a new System has been hydrated by the Orchestrator. -func PreHydrate[O Orchestrator](fn func(sys System)) Option[O] { - return FnOption[O]{PostHydrateFn: fn} -} - -// PostHydrate hooks up an option callback to run when a new System has been hydrated by the Orchestrator. -// This is essentially a test-case preamble, -// to globally configure checks or gates that should run on the test-scope level. -// Test post-checks can be configured with sys.T().Cleanup(...). -func PostHydrate[O Orchestrator](fn func(sys System)) Option[O] { - return FnOption[O]{PostHydrateFn: fn} -} - -// MakeCommon makes the type-specific option a common option. -// If the result runs with a different orchestrator type than expected -// the actual typed option will not run. -// This can be used to mix in customizations. -// Later common options should verify the orchestrator has the properties it needs to have. -func MakeCommon[O Orchestrator](opt Option[O]) CommonOption { - return FnOption[Orchestrator]{ - BeforeDeployFn: func(orch Orchestrator) { - if o, ok := orch.(O); ok { - opt.BeforeDeploy(o) - } else { - orch.P().Logger().Debug("BeforeDeploy option does not apply to this orchestrator type") - } - }, - DeployFn: func(orch Orchestrator) { - if o, ok := orch.(O); ok { - opt.Deploy(o) - } else { - orch.P().Logger().Debug("Deploy option does not apply to this orchestrator type") - } - }, - AfterDeployFn: func(orch Orchestrator) { - if o, ok := orch.(O); ok { - opt.AfterDeploy(o) - } else { - orch.P().Logger().Debug("AfterDeploy option does not apply to this orchestrator type") - } - }, - FinallyFn: func(orch Orchestrator) { - if o, ok := orch.(O); ok { - opt.Finally(o) - } else { - orch.P().Logger().Debug("Finally option does not apply to this orchestrator type") - } - }, - PreHydrateFn: func(sys System) { - opt.PreHydrate(sys) - }, - PostHydrateFn: func(sys System) { - opt.PostHydrate(sys) - }, - } -} diff --git a/op-devstack/stack/registry.go b/op-devstack/stack/registry.go deleted file mode 100644 index 791c292c5307e..0000000000000 --- a/op-devstack/stack/registry.go +++ /dev/null @@ -1,364 +0,0 @@ -package stack - -import ( - "sync" - - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// Registrable is the interface that components must implement to be stored in the Registry. -// It provides a way to get the component's ID as a ComponentID. -type Registrable interface { - // RegistryID returns the ComponentID for this component. - // This is used as the key in the unified registry. - RegistryID() ComponentID -} - -// Registry is a unified storage for all components in the system. -// It replaces multiple type-specific maps with a single registry that supports: -// - Type-safe access via generic functions -// - Secondary indexes by Kind and ChainID -// - Thread-safe concurrent access -type Registry struct { - mu sync.RWMutex - - // Primary storage: ComponentID -> component value - components map[ComponentID]any - - // Secondary index: ComponentKind -> list of ComponentIDs - byKind map[ComponentKind][]ComponentID - - // Secondary index: ChainID -> list of ComponentIDs - byChainID map[eth.ChainID][]ComponentID -} - -type registryEntry struct { - id ComponentID - component any -} - -// NewRegistry creates a new empty Registry. -func NewRegistry() *Registry { - return &Registry{ - components: make(map[ComponentID]any), - byKind: make(map[ComponentKind][]ComponentID), - byChainID: make(map[eth.ChainID][]ComponentID), - } -} - -// Register adds a component to the registry. -// If a component with the same ID already exists, it is replaced. -func (r *Registry) Register(id ComponentID, component any) { - r.mu.Lock() - defer r.mu.Unlock() - - // Check if this ID already exists (for index cleanup) - _, exists := r.components[id] - if exists { - // Remove from indexes before re-adding - r.removeFromIndexesLocked(id) - } - - // Store in primary map - r.components[id] = component - - // Add to kind index - r.byKind[id.Kind()] = append(r.byKind[id.Kind()], id) - - // Add to chainID index (if applicable) - if id.HasChainID() { - chainID := id.ChainID() - if chainID != (eth.ChainID{}) { - r.byChainID[chainID] = append(r.byChainID[chainID], id) - } - } -} - -// RegisterComponent registers a Registrable component using its RegistryID. -func (r *Registry) RegisterComponent(component Registrable) { - r.Register(component.RegistryID(), component) -} - -// Unregister removes a component from the registry. -func (r *Registry) Unregister(id ComponentID) { - r.mu.Lock() - defer r.mu.Unlock() - - if _, exists := r.components[id]; !exists { - return - } - - delete(r.components, id) - r.removeFromIndexesLocked(id) -} - -// removeFromIndexesLocked removes an ID from secondary indexes. -// Caller must hold the write lock. -func (r *Registry) removeFromIndexesLocked(id ComponentID) { - // Remove from kind index - kind := id.Kind() - ids := r.byKind[kind] - for i, existingID := range ids { - if existingID == id { - r.byKind[kind] = append(ids[:i], ids[i+1:]...) - break - } - } - - // Remove from chainID index - if id.HasChainID() { - chainID := id.ChainID() - if chainID != (eth.ChainID{}) { - ids := r.byChainID[chainID] - for i, existingID := range ids { - if existingID == id { - r.byChainID[chainID] = append(ids[:i], ids[i+1:]...) - break - } - } - } - } -} - -// Get retrieves a component by its ID. -// Returns nil and false if the component is not found. -func (r *Registry) Get(id ComponentID) (any, bool) { - r.mu.RLock() - defer r.mu.RUnlock() - - component, ok := r.components[id] - return component, ok -} - -// Has returns true if a component with the given ID exists. -func (r *Registry) Has(id ComponentID) bool { - r.mu.RLock() - defer r.mu.RUnlock() - - _, ok := r.components[id] - return ok -} - -// GetByKind returns all components of a specific kind. -func (r *Registry) GetByKind(kind ComponentKind) []any { - r.mu.RLock() - defer r.mu.RUnlock() - - ids := r.byKind[kind] - result := make([]any, 0, len(ids)) - for _, id := range ids { - if component, ok := r.components[id]; ok { - result = append(result, component) - } - } - return result -} - -// GetByChainID returns all components associated with a specific chain. -func (r *Registry) GetByChainID(chainID eth.ChainID) []any { - r.mu.RLock() - defer r.mu.RUnlock() - - ids := r.byChainID[chainID] - result := make([]any, 0, len(ids)) - for _, id := range ids { - if component, ok := r.components[id]; ok { - result = append(result, component) - } - } - return result -} - -// IDsByKind returns all component IDs of a specific kind. -func (r *Registry) IDsByKind(kind ComponentKind) []ComponentID { - r.mu.RLock() - defer r.mu.RUnlock() - - ids := r.byKind[kind] - result := make([]ComponentID, len(ids)) - copy(result, ids) - return result -} - -// IDsByChainID returns all component IDs associated with a specific chain. -func (r *Registry) IDsByChainID(chainID eth.ChainID) []ComponentID { - r.mu.RLock() - defer r.mu.RUnlock() - - ids := r.byChainID[chainID] - result := make([]ComponentID, len(ids)) - copy(result, ids) - return result -} - -// AllIDs returns all component IDs in the registry. -func (r *Registry) AllIDs() []ComponentID { - r.mu.RLock() - defer r.mu.RUnlock() - - result := make([]ComponentID, 0, len(r.components)) - for id := range r.components { - result = append(result, id) - } - return result -} - -// All returns all components in the registry. -func (r *Registry) All() []any { - r.mu.RLock() - defer r.mu.RUnlock() - - result := make([]any, 0, len(r.components)) - for _, component := range r.components { - result = append(result, component) - } - return result -} - -// Len returns the number of components in the registry. -func (r *Registry) Len() int { - r.mu.RLock() - defer r.mu.RUnlock() - - return len(r.components) -} - -// Range calls fn for each component in the registry. -// If fn returns false, iteration stops. -func (r *Registry) Range(fn func(id ComponentID, component any) bool) { - r.mu.RLock() - entries := make([]registryEntry, 0, len(r.components)) - for id, component := range r.components { - entries = append(entries, registryEntry{id: id, component: component}) - } - r.mu.RUnlock() - - for _, entry := range entries { - if !fn(entry.id, entry.component) { - break - } - } -} - -// RangeByKind calls fn for each component of a specific kind. -// If fn returns false, iteration stops. -func (r *Registry) RangeByKind(kind ComponentKind, fn func(id ComponentID, component any) bool) { - r.mu.RLock() - ids := r.byKind[kind] - entries := make([]registryEntry, 0, len(ids)) - for _, id := range ids { - if component, ok := r.components[id]; ok { - entries = append(entries, registryEntry{id: id, component: component}) - } - } - r.mu.RUnlock() - - for _, entry := range entries { - if !fn(entry.id, entry.component) { - break - } - } -} - -// RangeByChainID calls fn for each component associated with a specific chain. -// If fn returns false, iteration stops. -func (r *Registry) RangeByChainID(chainID eth.ChainID, fn func(id ComponentID, component any) bool) { - r.mu.RLock() - ids := r.byChainID[chainID] - entries := make([]registryEntry, 0, len(ids)) - for _, id := range ids { - if component, ok := r.components[id]; ok { - entries = append(entries, registryEntry{id: id, component: component}) - } - } - r.mu.RUnlock() - - for _, entry := range entries { - if !fn(entry.id, entry.component) { - break - } - } -} - -// Clear removes all components from the registry. -func (r *Registry) Clear() { - r.mu.Lock() - defer r.mu.Unlock() - - r.components = make(map[ComponentID]any) - r.byKind = make(map[ComponentKind][]ComponentID) - r.byChainID = make(map[eth.ChainID][]ComponentID) -} - -// Type-safe generic accessor functions. -// These provide compile-time type safety when working with the registry. - -// RegistryGet retrieves a component by its ID and returns it as the expected type. -// Returns the zero value and false if not found or if the type doesn't match. -func RegistryGet[T any](r *Registry, id ComponentID) (T, bool) { - component, ok := r.Get(id) - if !ok { - var zero T - return zero, false - } - - typed, ok := component.(T) - if !ok { - var zero T - return zero, false - } - - return typed, true -} - -// RegistryGetByKind retrieves all components of a specific kind and casts them to the expected type. -// Components that don't match the expected type are skipped. -func RegistryGetByKind[T any](r *Registry, kind ComponentKind) []T { - components := r.GetByKind(kind) - result := make([]T, 0, len(components)) - for _, component := range components { - if typed, ok := component.(T); ok { - result = append(result, typed) - } - } - return result -} - -// RegistryGetByChainID retrieves all components for a chain and casts them to the expected type. -// Components that don't match the expected type are skipped. -func RegistryGetByChainID[T any](r *Registry, chainID eth.ChainID) []T { - components := r.GetByChainID(chainID) - result := make([]T, 0, len(components)) - for _, component := range components { - if typed, ok := component.(T); ok { - result = append(result, typed) - } - } - return result -} - -// RegistryRange calls fn for each component of the expected type. -// Components that don't match the expected type are skipped. -func RegistryRange[T any](r *Registry, fn func(id ComponentID, component T) bool) { - r.Range(func(id ComponentID, component any) bool { - if typed, ok := component.(T); ok { - return fn(id, typed) - } - return true // skip non-matching types - }) -} - -// RegistryRangeByKind calls fn for each component of a specific kind that matches the expected type. -func RegistryRangeByKind[T any](r *Registry, kind ComponentKind, fn func(id ComponentID, component T) bool) { - r.RangeByKind(kind, func(id ComponentID, component any) bool { - if typed, ok := component.(T); ok { - return fn(id, typed) - } - return true - }) -} - -// RegistryRegister is a type-safe way to register a component with an ID. -func RegistryRegister[T any](r *Registry, id ComponentID, component T) { - r.Register(id, component) -} diff --git a/op-devstack/stack/registry_test.go b/op-devstack/stack/registry_test.go deleted file mode 100644 index e4951da3e54da..0000000000000 --- a/op-devstack/stack/registry_test.go +++ /dev/null @@ -1,597 +0,0 @@ -package stack - -import ( - "sync" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/stretchr/testify/require" -) - -// mockComponent is a test component that implements Registrable. -type mockComponent struct { - id ComponentID - name string -} - -func (m *mockComponent) RegistryID() ComponentID { - return m.id -} - -func requireCompletesWithoutDeadlock(t *testing.T, fn func()) { - t.Helper() - - done := make(chan struct{}) - go func() { - fn() - close(done) - }() - - select { - case <-done: - case <-time.After(2 * time.Second): - t.Fatal("operation timed out (likely callback executed under lock)") - } -} - -func TestRegistry_RegisterAndGet(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id := NewComponentID(KindL2Batcher, "batcher1", chainID) - component := &mockComponent{id: id, name: "test-batcher"} - - // Register - r.Register(id, component) - - // Get - got, ok := r.Get(id) - require.True(t, ok) - require.Equal(t, component, got) - - // Check Has - require.True(t, r.Has(id)) - - // Check non-existent - otherId := NewComponentID(KindL2Batcher, "batcher2", chainID) - _, ok = r.Get(otherId) - require.False(t, ok) - require.False(t, r.Has(otherId)) -} - -func TestRegistry_RegisterComponent(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id := NewComponentID(KindL2Batcher, "batcher1", chainID) - component := &mockComponent{id: id, name: "test-batcher"} - - // Register using RegisterComponent - r.RegisterComponent(component) - - // Get - got, ok := r.Get(id) - require.True(t, ok) - require.Equal(t, component, got) -} - -func TestRegistry_Unregister(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id := NewComponentID(KindL2Batcher, "batcher1", chainID) - component := &mockComponent{id: id, name: "test-batcher"} - - r.Register(id, component) - require.True(t, r.Has(id)) - - r.Unregister(id) - require.False(t, r.Has(id)) - - // Unregistering again should be a no-op - r.Unregister(id) - require.False(t, r.Has(id)) -} - -func TestRegistry_Replace(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id := NewComponentID(KindL2Batcher, "batcher1", chainID) - component1 := &mockComponent{id: id, name: "original"} - component2 := &mockComponent{id: id, name: "replacement"} - - r.Register(id, component1) - r.Register(id, component2) // Replace - - got, ok := r.Get(id) - require.True(t, ok) - require.Equal(t, component2, got) - - // Should only have one entry - require.Equal(t, 1, r.Len()) - - // Should only be in indexes once - ids := r.IDsByKind(KindL2Batcher) - require.Len(t, ids, 1) -} - -func TestRegistry_GetByKind(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - // Register multiple batchers - batcher1 := &mockComponent{ - id: NewComponentID(KindL2Batcher, "batcher1", chainID), - name: "batcher1", - } - batcher2 := &mockComponent{ - id: NewComponentID(KindL2Batcher, "batcher2", chainID), - name: "batcher2", - } - // Register a proposer (different kind) - proposer := &mockComponent{ - id: NewComponentID(KindL2Proposer, "proposer1", chainID), - name: "proposer1", - } - - r.Register(batcher1.id, batcher1) - r.Register(batcher2.id, batcher2) - r.Register(proposer.id, proposer) - - // Get batchers - batchers := r.GetByKind(KindL2Batcher) - require.Len(t, batchers, 2) - - // Get proposers - proposers := r.GetByKind(KindL2Proposer) - require.Len(t, proposers, 1) - - // Get non-existent kind - challengers := r.GetByKind(KindL2Challenger) - require.Len(t, challengers, 0) -} - -func TestRegistry_GetByChainID(t *testing.T) { - r := NewRegistry() - - chainID1 := eth.ChainIDFromUInt64(420) - chainID2 := eth.ChainIDFromUInt64(421) - - // Components on chain 420 - batcher1 := &mockComponent{ - id: NewComponentID(KindL2Batcher, "batcher1", chainID1), - name: "batcher1", - } - proposer1 := &mockComponent{ - id: NewComponentID(KindL2Proposer, "proposer1", chainID1), - name: "proposer1", - } - - // Component on chain 421 - batcher2 := &mockComponent{ - id: NewComponentID(KindL2Batcher, "batcher2", chainID2), - name: "batcher2", - } - - r.Register(batcher1.id, batcher1) - r.Register(proposer1.id, proposer1) - r.Register(batcher2.id, batcher2) - - // Get all on chain 420 - chain420 := r.GetByChainID(chainID1) - require.Len(t, chain420, 2) - - // Get all on chain 421 - chain421 := r.GetByChainID(chainID2) - require.Len(t, chain421, 1) - - // Non-existent chain - chain999 := r.GetByChainID(eth.ChainIDFromUInt64(999)) - require.Len(t, chain999, 0) -} - -func TestRegistry_KeyOnlyComponents(t *testing.T) { - r := NewRegistry() - - // Key-only components (like Supervisor) don't have a ChainID - supervisor := &mockComponent{ - id: NewComponentIDKeyOnly(KindSupervisor, "supervisor1"), - name: "supervisor1", - } - - r.Register(supervisor.id, supervisor) - - // Should be findable by kind - supervisors := r.GetByKind(KindSupervisor) - require.Len(t, supervisors, 1) - - // Should not appear in any chain index - // (GetByChainID with zero ChainID should not return it) - byChain := r.GetByChainID(eth.ChainID{}) - require.Len(t, byChain, 0) -} - -func TestRegistry_ChainOnlyComponents(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(1) - - // Chain-only components (like L1Network) don't have a key - network := &mockComponent{ - id: NewComponentIDChainOnly(KindL1Network, chainID), - name: "mainnet", - } - - r.Register(network.id, network) - - // Should be findable by kind - networks := r.GetByKind(KindL1Network) - require.Len(t, networks, 1) - - // Should be findable by chain - byChain := r.GetByChainID(chainID) - require.Len(t, byChain, 1) -} - -func TestRegistry_IDsByKind(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id1 := NewComponentID(KindL2Batcher, "batcher1", chainID) - id2 := NewComponentID(KindL2Batcher, "batcher2", chainID) - - r.Register(id1, &mockComponent{id: id1}) - r.Register(id2, &mockComponent{id: id2}) - - ids := r.IDsByKind(KindL2Batcher) - require.Len(t, ids, 2) - require.Contains(t, ids, id1) - require.Contains(t, ids, id2) -} - -func TestRegistry_AllAndLen(t *testing.T) { - r := NewRegistry() - - require.Equal(t, 0, r.Len()) - require.Len(t, r.All(), 0) - require.Len(t, r.AllIDs(), 0) - - chainID := eth.ChainIDFromUInt64(420) - id1 := NewComponentID(KindL2Batcher, "batcher1", chainID) - id2 := NewComponentID(KindL2Proposer, "proposer1", chainID) - - r.Register(id1, &mockComponent{id: id1}) - r.Register(id2, &mockComponent{id: id2}) - - require.Equal(t, 2, r.Len()) - require.Len(t, r.All(), 2) - require.Len(t, r.AllIDs(), 2) -} - -func TestRegistry_Range(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id1 := NewComponentID(KindL2Batcher, "batcher1", chainID) - id2 := NewComponentID(KindL2Batcher, "batcher2", chainID) - - r.Register(id1, &mockComponent{id: id1, name: "b1"}) - r.Register(id2, &mockComponent{id: id2, name: "b2"}) - - // Collect all - var collected []ComponentID - r.Range(func(id ComponentID, component any) bool { - collected = append(collected, id) - return true - }) - require.Len(t, collected, 2) - - // Early termination - collected = nil - r.Range(func(id ComponentID, component any) bool { - collected = append(collected, id) - return false // stop after first - }) - require.Len(t, collected, 1) -} - -func TestRegistry_RangeByKind(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - batcher := NewComponentID(KindL2Batcher, "batcher1", chainID) - proposer := NewComponentID(KindL2Proposer, "proposer1", chainID) - - r.Register(batcher, &mockComponent{id: batcher}) - r.Register(proposer, &mockComponent{id: proposer}) - - var collected []ComponentID - r.RangeByKind(KindL2Batcher, func(id ComponentID, component any) bool { - collected = append(collected, id) - return true - }) - require.Len(t, collected, 1) - require.Equal(t, batcher, collected[0]) -} - -func TestRegistry_RangeByChainID(t *testing.T) { - r := NewRegistry() - - chainID1 := eth.ChainIDFromUInt64(420) - chainID2 := eth.ChainIDFromUInt64(421) - - batcher1 := NewComponentID(KindL2Batcher, "batcher1", chainID1) - batcher2 := NewComponentID(KindL2Batcher, "batcher2", chainID2) - - r.Register(batcher1, &mockComponent{id: batcher1}) - r.Register(batcher2, &mockComponent{id: batcher2}) - - var collected []ComponentID - r.RangeByChainID(chainID1, func(id ComponentID, component any) bool { - collected = append(collected, id) - return true - }) - require.Len(t, collected, 1) - require.Equal(t, batcher1, collected[0]) - - // Test early termination - collected = nil - r.RangeByChainID(chainID1, func(id ComponentID, component any) bool { - collected = append(collected, id) - return false // stop immediately - }) - require.Len(t, collected, 1) -} - -func TestRegistry_Range_CallbackCanMutateRegistry(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id := NewComponentID(KindL2Batcher, "batcher1", chainID) - r.Register(id, &mockComponent{id: id}) - - requireCompletesWithoutDeadlock(t, func() { - r.Range(func(id ComponentID, component any) bool { - r.Clear() - return false - }) - }) - - require.Equal(t, 0, r.Len()) -} - -func TestRegistry_RangeByKind_CallbackCanMutateRegistry(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - oldID := NewComponentID(KindL2Batcher, "batcher1", chainID) - newID := NewComponentID(KindL2Batcher, "batcher2", chainID) - r.Register(oldID, &mockComponent{id: oldID}) - - requireCompletesWithoutDeadlock(t, func() { - r.RangeByKind(KindL2Batcher, func(id ComponentID, component any) bool { - r.Unregister(oldID) - r.Register(newID, &mockComponent{id: newID}) - return false - }) - }) - - require.False(t, r.Has(oldID)) - require.True(t, r.Has(newID)) -} - -func TestRegistry_RangeByChainID_CallbackCanMutateRegistry(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - oldID := NewComponentID(KindL2Batcher, "batcher1", chainID) - newID := NewComponentID(KindL2Batcher, "batcher2", chainID) - r.Register(oldID, &mockComponent{id: oldID}) - - requireCompletesWithoutDeadlock(t, func() { - r.RangeByChainID(chainID, func(id ComponentID, component any) bool { - r.Unregister(oldID) - r.Register(newID, &mockComponent{id: newID}) - return false - }) - }) - - require.False(t, r.Has(oldID)) - require.True(t, r.Has(newID)) -} - -func TestRegistry_Clear(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id := NewComponentID(KindL2Batcher, "batcher1", chainID) - r.Register(id, &mockComponent{id: id}) - - require.Equal(t, 1, r.Len()) - - r.Clear() - - require.Equal(t, 0, r.Len()) - require.False(t, r.Has(id)) - require.Len(t, r.GetByKind(KindL2Batcher), 0) - require.Len(t, r.GetByChainID(chainID), 0) -} - -func TestRegistry_ConcurrentAccess(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - var wg sync.WaitGroup - numGoroutines := 100 - - // Concurrent writes - for i := 0; i < numGoroutines; i++ { - wg.Add(1) - go func(i int) { - defer wg.Done() - id := NewComponentID(KindL2Batcher, string(rune('a'+i%26)), chainID) - r.Register(id, &mockComponent{id: id}) - }(i) - } - - // Concurrent reads - for i := 0; i < numGoroutines; i++ { - wg.Add(1) - go func() { - defer wg.Done() - _ = r.GetByKind(KindL2Batcher) - _ = r.GetByChainID(chainID) - _ = r.Len() - }() - } - - wg.Wait() - - // Should have some components (exact count depends on key collisions) - require.Greater(t, r.Len(), 0) -} - -// Tests for type-safe generic accessor functions - -func TestRegistryGet_TypeSafe(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id := NewL2BatcherID("batcher1", chainID) - component := &mockComponent{id: id, name: "test-batcher"} - - // Use the ID for generic registry functions - RegistryRegister(r, id, component) - - // Type-safe get - got, ok := RegistryGet[*mockComponent](r, id) - require.True(t, ok) - require.Equal(t, component, got) - - // Wrong type should fail - gotStr, ok := RegistryGet[string](r, id) - require.False(t, ok) - require.Equal(t, "", gotStr) -} - -func TestRegistryGetByKind_TypeSafe(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - batcher1 := &mockComponent{ - id: NewComponentID(KindL2Batcher, "batcher1", chainID), - name: "batcher1", - } - batcher2 := &mockComponent{ - id: NewComponentID(KindL2Batcher, "batcher2", chainID), - name: "batcher2", - } - - r.Register(batcher1.id, batcher1) - r.Register(batcher2.id, batcher2) - - // Type-safe get by kind - batchers := RegistryGetByKind[*mockComponent](r, KindL2Batcher) - require.Len(t, batchers, 2) - - // Wrong type returns empty - wrongType := RegistryGetByKind[string](r, KindL2Batcher) - require.Len(t, wrongType, 0) -} - -func TestRegistryGetByChainID_TypeSafe(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - batcher := &mockComponent{ - id: NewComponentID(KindL2Batcher, "batcher1", chainID), - name: "batcher1", - } - proposer := &mockComponent{ - id: NewComponentID(KindL2Proposer, "proposer1", chainID), - name: "proposer1", - } - - r.Register(batcher.id, batcher) - r.Register(proposer.id, proposer) - - // Get all mockComponents on chain - components := RegistryGetByChainID[*mockComponent](r, chainID) - require.Len(t, components, 2) -} - -func TestRegistryRange_TypeSafe(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - batcher := &mockComponent{ - id: NewComponentID(KindL2Batcher, "batcher1", chainID), - name: "batcher1", - } - r.Register(batcher.id, batcher) - - // Also register a non-mockComponent - r.Register(NewComponentID(KindL2Proposer, "other", chainID), "not a mockComponent") - - var collected []*mockComponent - RegistryRange(r, func(id ComponentID, component *mockComponent) bool { - collected = append(collected, component) - return true - }) - - // Should only collect mockComponents - require.Len(t, collected, 1) - require.Equal(t, batcher, collected[0]) -} - -func TestRegistryRangeByKind_TypeSafe(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - - batcher := &mockComponent{ - id: NewComponentID(KindL2Batcher, "batcher1", chainID), - name: "batcher1", - } - proposer := &mockComponent{ - id: NewComponentID(KindL2Proposer, "proposer1", chainID), - name: "proposer1", - } - - r.Register(batcher.id, batcher) - r.Register(proposer.id, proposer) - - var collected []*mockComponent - RegistryRangeByKind(r, KindL2Batcher, func(id ComponentID, component *mockComponent) bool { - collected = append(collected, component) - return true - }) - - require.Len(t, collected, 1) - require.Equal(t, batcher, collected[0]) -} - -func TestRegistry_UnregisterUpdatesIndexes(t *testing.T) { - r := NewRegistry() - - chainID := eth.ChainIDFromUInt64(420) - id := NewComponentID(KindL2Batcher, "batcher1", chainID) - r.Register(id, &mockComponent{id: id}) - - // Verify indexes before unregister - require.Len(t, r.IDsByKind(KindL2Batcher), 1) - require.Len(t, r.IDsByChainID(chainID), 1) - - r.Unregister(id) - - // Indexes should be updated - require.Len(t, r.IDsByKind(KindL2Batcher), 0) - require.Len(t, r.IDsByChainID(chainID), 0) -} diff --git a/op-devstack/stack/rollup_boost.go b/op-devstack/stack/rollup_boost.go index 7079f722a81df..44fd9165e809d 100644 --- a/op-devstack/stack/rollup_boost.go +++ b/op-devstack/stack/rollup_boost.go @@ -7,7 +7,6 @@ import ( // RollupBoostNode is a shim service between an L2 consensus-layer node and an L2 ethereum execution-layer node type RollupBoostNode interface { - ID() ComponentID L2EthClient() apis.L2EthClient L2EngineClient() apis.EngineClient FlashblocksClient() *client.WSClient diff --git a/op-devstack/stack/superchain.go b/op-devstack/stack/superchain.go index b1680bbeffd6c..7f90d16dd254b 100644 --- a/op-devstack/stack/superchain.go +++ b/op-devstack/stack/superchain.go @@ -8,11 +8,3 @@ type SuperchainDeployment interface { ProtocolVersionsAddr() common.Address SuperchainConfigAddr() common.Address } - -// Superchain is a collection of L2 chains with common rules and shared configuration on L1 -type Superchain interface { - Common - ID() ComponentID - - Deployment() SuperchainDeployment -} diff --git a/op-devstack/stack/supernode.go b/op-devstack/stack/supernode.go index 6b5fb48abf632..d9f2a84c24f1f 100644 --- a/op-devstack/stack/supernode.go +++ b/op-devstack/stack/supernode.go @@ -1,44 +1,9 @@ package stack -import ( - "fmt" - "sort" - - "github.com/ethereum-optimism/optimism/op-service/apis" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// SupernodeID is kept as a semantic alias for ComponentID. -// Supernode IDs are key-only IDs with KindSupernode. -type SupernodeID = ComponentID - -func NewSupernodeID(key string, chains ...eth.ChainID) SupernodeID { - var suffix string - for _, chain := range chains { - suffix += chain.String() - } - return NewComponentIDKeyOnly(KindSupernode, fmt.Sprintf("%s-%s", key, suffix)) -} - -func SortSupernodeIDs(ids []SupernodeID) []SupernodeID { - out := append([]SupernodeID(nil), ids...) - sort.Slice(out, func(i, j int) bool { - return out[i].Less(out[j]) - }) - return out -} - -func SortSupernodes(elems []Supernode) []Supernode { - out := append([]Supernode(nil), elems...) - sort.Slice(out, func(i, j int) bool { - return out[i].ID().Less(out[j].ID()) - }) - return out -} +import "github.com/ethereum-optimism/optimism/op-service/apis" type Supernode interface { Common - ID() ComponentID QueryAPI() apis.SupernodeQueryAPI } diff --git a/op-devstack/stack/supervisor.go b/op-devstack/stack/supervisor.go index 2deac9fb53634..8ca5f458439e0 100644 --- a/op-devstack/stack/supervisor.go +++ b/op-devstack/stack/supervisor.go @@ -7,7 +7,6 @@ import ( // Supervisor is an interop service, used to cross-verify messages between chains. type Supervisor interface { Common - ID() ComponentID AdminAPI() apis.SupervisorAdminAPI QueryAPI() apis.SupervisorQueryAPI diff --git a/op-devstack/stack/sync_tester.go b/op-devstack/stack/sync_tester.go index d08ccd9902de8..32239ba6f518f 100644 --- a/op-devstack/stack/sync_tester.go +++ b/op-devstack/stack/sync_tester.go @@ -2,11 +2,12 @@ package stack import ( "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type SyncTester interface { Common - ID() ComponentID + ChainID() eth.ChainID API() apis.SyncTester APIWithSession(sessionID string) apis.SyncTester diff --git a/op-devstack/stack/system.go b/op-devstack/stack/system.go deleted file mode 100644 index 85c5cd8debf9b..0000000000000 --- a/op-devstack/stack/system.go +++ /dev/null @@ -1,65 +0,0 @@ -package stack - -import ( - "time" - - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// System represents a collection of L1 and L2 chains, any superchains or clusters, and any peripherals. -type System interface { - Common - ComponentRegistry - - Superchain(m SuperchainMatcher) Superchain - Cluster(m ClusterMatcher) Cluster - L1Network(m L1NetworkMatcher) L1Network - L2Network(m L2NetworkMatcher) L2Network - - Network(id eth.ChainID) Network - - Supervisor(m SupervisorMatcher) Supervisor - Supernode(m SupernodeMatcher) Supernode - TestSequencer(id TestSequencerMatcher) TestSequencer - - SuperchainIDs() []ComponentID - ClusterIDs() []ComponentID - L1NetworkIDs() []ComponentID - L2NetworkIDs() []ComponentID - SupervisorIDs() []ComponentID - - Superchains() []Superchain - Clusters() []Cluster - L1Networks() []L1Network - L2Networks() []L2Network - Supervisors() []Supervisor - Supernodes() []Supernode - TestSequencers() []TestSequencer -} - -// ExtensibleSystem is an extension-interface to add new components to the system. -// Regular tests should not be modifying the system. -// Test gates may use this to remediate any shortcomings of an existing system. -type ExtensibleSystem interface { - System - AddSuperchain(v Superchain) - AddCluster(v Cluster) - AddL1Network(v L1Network) - AddL2Network(v L2Network) - AddSupervisor(v Supervisor) - AddSupernode(v Supernode) - AddTestSequencer(v TestSequencer) - AddSyncTester(v SyncTester) -} - -type TimeTravelClock interface { - AdvanceTime(d time.Duration) -} - -// TimeTravelSystem is an extension-interface to support time travel. -type TimeTravelSystem interface { - System - SetTimeTravelClock(cl TimeTravelClock) - TimeTravelEnabled() bool - AdvanceTime(amount time.Duration) -} diff --git a/op-devstack/stack/test_sequencer.go b/op-devstack/stack/test_sequencer.go index 072bbbb0b4ccd..94aae09187fdb 100644 --- a/op-devstack/stack/test_sequencer.go +++ b/op-devstack/stack/test_sequencer.go @@ -8,7 +8,6 @@ import ( // TestSequencer type TestSequencer interface { Common - ID() ComponentID AdminAPI() apis.TestSequencerAdminAPI BuildAPI() apis.TestSequencerBuildAPI diff --git a/op-devstack/sysext/addrbook.go b/op-devstack/sysext/addrbook.go deleted file mode 100644 index e26225c3debb3..0000000000000 --- a/op-devstack/sysext/addrbook.go +++ /dev/null @@ -1,66 +0,0 @@ -package sysext - -import ( - "github.com/ethereum/go-ethereum/common" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -const ( - ProtocolVersionsAddressName = "ProtocolVersionsProxy" - SuperchainConfigAddressName = "SuperchainConfigProxy" - - SystemConfigAddressName = "systemConfigProxy" - DisputeGameFactoryName = "disputeGameFactoryProxy" - L1StandardBridgeProxyName = "l1StandardBridgeProxy" -) - -type l1AddressBook struct { - protocolVersions common.Address - superchainConfig common.Address -} - -func newL1AddressBook(t devtest.T, addresses descriptors.AddressMap) *l1AddressBook { - // TODO(#15817) op-devstack: sysext: fix address book - return &l1AddressBook{} -} - -func (a *l1AddressBook) ProtocolVersionsAddr() common.Address { - return a.protocolVersions -} - -func (a *l1AddressBook) SuperchainConfigAddr() common.Address { - return a.superchainConfig -} - -var _ stack.SuperchainDeployment = (*l1AddressBook)(nil) - -type l2AddressBook struct { - systemConfig common.Address - disputeGameFactory common.Address - l1StandardBridge common.Address -} - -func newL2AddressBook(l1Addresses descriptors.AddressMap) *l2AddressBook { - return &l2AddressBook{ - systemConfig: l1Addresses[SystemConfigAddressName], - disputeGameFactory: l1Addresses[DisputeGameFactoryName], - l1StandardBridge: l1Addresses[L1StandardBridgeProxyName], - } -} - -func (a *l2AddressBook) SystemConfigProxyAddr() common.Address { - return a.systemConfig -} - -func (a *l2AddressBook) DisputeGameFactoryProxyAddr() common.Address { - return a.disputeGameFactory -} - -func (a *l2AddressBook) L1StandardBridgeProxyAddr() common.Address { - return a.l1StandardBridge -} - -var _ stack.L2Deployment = (*l2AddressBook)(nil) diff --git a/op-devstack/sysext/control_plane.go b/op-devstack/sysext/control_plane.go deleted file mode 100644 index 157894df5abf3..0000000000000 --- a/op-devstack/sysext/control_plane.go +++ /dev/null @@ -1,53 +0,0 @@ -package sysext - -import ( - "github.com/ethereum-optimism/optimism/devnet-sdk/controller/surface" - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -type ControlPlane struct { - o *Orchestrator -} - -func (c *ControlPlane) setLifecycleState(svcID string, mode stack.ControlAction) { - ctx := c.o.P().Ctx() - require := c.o.P().Require() - - ctl, err := c.o.env.Control() - require.NoError(err, "Error getting control plane") - lc, ok := ctl.(surface.ServiceLifecycleSurface) - require.True(ok, "Control plane does not support service lifecycle management") - - switch mode { - case stack.Start: - require.NoError(lc.StartService(ctx, svcID), "Error starting service") - case stack.Stop: - require.NoError(lc.StopService(ctx, svcID), "Error stopping service") - } -} - -func (c *ControlPlane) SupervisorState(id stack.ComponentID, mode stack.ControlAction) { - c.setLifecycleState(id.Key(), mode) -} - -func (c *ControlPlane) L2CLNodeState(id stack.ComponentID, mode stack.ControlAction) { - c.setLifecycleState(id.Key(), mode) -} - -func (c *ControlPlane) L2ELNodeState(id stack.ComponentID, mode stack.ControlAction) { - c.setLifecycleState(id.Key(), mode) -} - -func (c *ControlPlane) FakePoSState(id stack.ComponentID, mode stack.ControlAction) { - panic("not implemented: plug in kurtosis wrapper, or gate for the test that uses this method to not run in kurtosis") -} - -func (c *ControlPlane) RollupBoostNodeState(id stack.ComponentID, mode stack.ControlAction) { - c.setLifecycleState(id.Key(), mode) -} - -func (c *ControlPlane) OPRBuilderNodeState(id stack.ComponentID, mode stack.ControlAction) { - c.setLifecycleState(id.Key(), mode) -} - -var _ stack.ControlPlane = (*ControlPlane)(nil) diff --git a/op-devstack/sysext/helpers.go b/op-devstack/sysext/helpers.go deleted file mode 100644 index c6160a43802d0..0000000000000 --- a/op-devstack/sysext/helpers.go +++ /dev/null @@ -1,139 +0,0 @@ -package sysext - -import ( - "fmt" - "net/http" - "net/url" - "strings" - - "github.com/ethereum/go-ethereum/rpc" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-service/client" -) - -const ( - ELServiceName = "el" - CLServiceName = "cl" - OPRBuilderServiceName = "op-rbuilder" - RollupBoostServiceName = "rollup-boost" - ConductorServiceName = "conductor" - - HTTPProtocol = "http" - RPCProtocol = "rpc" - MetricsProtocol = "metrics" - WssProtocol = "wss" - WebsocketFlashblocksProtocol = "ws-flashblocks" - - FeatureInterop = "interop" -) - -func (orch *Orchestrator) rpcClient(t devtest.T, service *descriptors.Service, protocol string, path string, opts ...client.RPCOption) client.RPC { - t.Helper() - - endpoint, header, err := orch.findProtocolService(service, protocol) - t.Require().NoError(err) - - endpoint, err = url.JoinPath(endpoint, path) - t.Require().NoError(err) - - if !orch.useEagerRPCClients { - opts = append(opts, client.WithLazyDial()) - } - - if orch.env.Env.ReverseProxyURL != "" && len(header) > 0 && !orch.useDirectCnx { - opts = append( - opts, - client.WithGethRPCOptions( - rpc.WithHeaders(header), - // we need both Header["Host"] and req.Host to be set - rpc.WithHTTPClient(&http.Client{ - Transport: hostAwareRoundTripper(header), - }), - ), - ) - } - - cl, err := client.NewRPC(t.Ctx(), t.Logger(), endpoint, opts...) - t.Require().NoError(err) - t.Cleanup(cl.Close) - return cl -} - -func (orch *Orchestrator) httpClient(t devtest.T, service *descriptors.Service, protocol string, path string) *client.BasicHTTPClient { - t.Helper() - - endpoint, header, err := orch.findProtocolService(service, protocol) - t.Require().NoError(err) - - endpoint, err = url.JoinPath(endpoint, path) - t.Require().NoError(err) - - opts := []client.BasicHTTPClientOption{} - - if orch.env.Env.ReverseProxyURL != "" && !orch.useDirectCnx { - opts = append( - opts, - client.WithHeader(header), - client.WithTransport(hostAwareRoundTripper(header)), - ) - } - - return client.NewBasicHTTPClient(endpoint, t.Logger(), opts...) -} - -func (orch *Orchestrator) findProtocolService(service *descriptors.Service, protocol string) (string, http.Header, error) { - for proto, endpoint := range service.Endpoints { - if proto == protocol { - // Force direct connect for websocket protocols - if protocol != WebsocketFlashblocksProtocol { - if orch.env.Env.ReverseProxyURL != "" && len(endpoint.ReverseProxyHeader) > 0 && !orch.useDirectCnx { - // For WebSocket protocols, convert HTTP URL to WebSocket URL - if protocol == WebsocketFlashblocksProtocol { - wsURL := strings.NewReplacer("http://", "ws://", "https://", "wss://").Replace(orch.env.Env.ReverseProxyURL) - wsURL += "/ws" - - return wsURL, endpoint.ReverseProxyHeader, nil - } - return orch.env.Env.ReverseProxyURL, endpoint.ReverseProxyHeader, nil - } - } - - port := endpoint.Port - if orch.usePrivatePorts { - port = endpoint.PrivatePort - } - scheme := endpoint.Scheme - if scheme == "" { - scheme = HTTPProtocol - } - host := endpoint.Host - path := "" - if strings.Contains(host, "/") { - parts := strings.SplitN(host, "/", 2) - host = parts[0] - path = "/" + parts[1] - } - return fmt.Sprintf("%s://%s:%d%s", scheme, host, port, path), nil, nil - } - } - return "", nil, fmt.Errorf("protocol %s not found", protocol) -} - -type hostSettingRoundTripper struct { - host string - rt http.RoundTripper -} - -func (h *hostSettingRoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - req.Host = h.host - return h.rt.RoundTrip(req) -} - -func hostAwareRoundTripper(header http.Header) http.RoundTripper { - return &hostSettingRoundTripper{ - host: header.Get("Host"), - rt: http.DefaultTransport, - } -} diff --git a/op-devstack/sysext/l1.go b/op-devstack/sysext/l1.go deleted file mode 100644 index 0ed5932e7c3a0..0000000000000 --- a/op-devstack/sysext/l1.go +++ /dev/null @@ -1,75 +0,0 @@ -package sysext - -import ( - "fmt" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -func (o *Orchestrator) hydrateL1(system stack.ExtensibleSystem) { - require := o.p.Require() - t := system.T() - - env := o.env - - commonConfig := shim.NewCommonConfig(t) - l1ID := eth.ChainIDFromBig(env.Env.L1.Config.ChainID) - l1 := shim.NewL1Network(shim.L1NetworkConfig{ - NetworkConfig: shim.NetworkConfig{ - CommonConfig: commonConfig, - ChainConfig: env.Env.L1.Config, - }, - ID: stack.NewL1NetworkID(l1ID), - }) - - opts := []client.RPCOption{} - - txTimeout := 30 * time.Second - if o.compatType == compat.Persistent { - // Increase the timeout by default for persistent devnets, but not for kurtosis - txTimeout = 5 * time.Minute - opts = append(opts, client.WithCallTimeout(time.Minute*5), client.WithBatchCallTimeout(time.Minute*10)) - } - - for idx, node := range env.Env.L1.Nodes { - elService, ok := node.Services[ELServiceName] - - require.True(ok, "need L1 EL service %d", idx) - - l1.AddL1ELNode(shim.NewL1ELNode(shim.L1ELNodeConfig{ - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: commonConfig, - Client: o.rpcClient(t, elService, RPCProtocol, "/", opts...), - ChainID: l1ID, - TransactionTimeout: txTimeout, - }, - ID: stack.NewL1ELNodeID(elService.Name, l1ID), - })) - - clService, ok := node.Services[CLServiceName] - require.True(ok, "need L1 CL service %d", idx) - - l1.AddL1CLNode(shim.NewL1CLNode(shim.L1CLNodeConfig{ - ID: stack.NewL1CLNodeID(clService.Name, l1ID), - CommonConfig: commonConfig, - Client: o.httpClient(t, clService, HTTPProtocol, "/"), - })) - } - - if faucet, ok := env.Env.L1.Services["faucet"]; ok { - for _, instance := range faucet { - l1.AddFaucet(shim.NewFaucet(shim.FaucetConfig{ - CommonConfig: commonConfig, - Client: o.rpcClient(t, instance, RPCProtocol, fmt.Sprintf("/chain/%s", env.Env.L1.Config.ChainID.String()), opts...), - ID: stack.NewFaucetID(instance.Name, l1ID), - })) - } - } - - system.AddL1Network(l1) -} diff --git a/op-devstack/sysext/l2.go b/op-devstack/sysext/l2.go deleted file mode 100644 index 9d2b9450ba87a..0000000000000 --- a/op-devstack/sysext/l2.go +++ /dev/null @@ -1,553 +0,0 @@ -package sysext - -import ( - "crypto/ecdsa" - "encoding/hex" - "fmt" - "net/http" - "strings" - "time" - - "math/big" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/rpc" -) - -func getL2ID(net *descriptors.L2Chain) stack.ComponentID { - return stack.NewL2NetworkID(eth.ChainIDFromBig(net.Config.ChainID)) -} - -func (o *Orchestrator) hydrateL2(net *descriptors.L2Chain, system stack.ExtensibleSystem) { - t := system.T() - commonConfig := shim.NewCommonConfig(t) - - env := o.env - l2ID := getL2ID(net) - - l1 := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(eth.ChainIDFromBig(env.Env.L1.Config.ChainID)))) - - cfg := shim.L2NetworkConfig{ - NetworkConfig: shim.NetworkConfig{ - CommonConfig: commonConfig, - ChainConfig: net.Config, - }, - ID: l2ID, - RollupConfig: net.RollupConfig, - Deployment: newL2AddressBook(net.Addresses), - Keys: o.defineSystemKeys(t, net), - Superchain: system.Superchain(stack.ByID[stack.Superchain](stack.NewSuperchainID(env.Env.Name))), - L1: l1, - } - if o.isInterop() { - cfg.Cluster = system.Cluster(stack.ByID[stack.Cluster](stack.NewClusterID(env.Env.Name))) - } - - opts := []client.RPCOption{} - - if o.compatType == compat.Persistent { - // Increase the timeout by default for persistent devnets, but not for kurtosis - opts = append(opts, client.WithCallTimeout(time.Minute*5), client.WithBatchCallTimeout(time.Minute*10)) - } - - l2 := shim.NewL2Network(cfg) - - for _, node := range net.Nodes { - o.hydrateL2ELCL(&node, l2, opts) - o.hydrateConductors(&node, l2) - o.hydrateRollupBoostNodeMaybe(&node, l2, opts) - o.hydrateOPRBuilderMaybe(&node, l2, opts) - } - o.hydrateBatcherMaybe(net, l2) - o.hydrateProposerMaybe(net, l2) - o.hydrateChallengerMaybe(net, l2) - o.hydrateL2ProxydMaybe(net, l2) - - if faucet, ok := net.Services["faucet"]; ok { - for _, instance := range faucet { - l2.AddFaucet(shim.NewFaucet(shim.FaucetConfig{ - CommonConfig: commonConfig, - Client: o.rpcClient(t, instance, RPCProtocol, fmt.Sprintf("/chain/%s", l2.ChainID().String()), opts...), - ID: stack.NewFaucetID(instance.Name, l2.ChainID()), - })) - } - } - - system.AddL2Network(l2) -} - -func (o *Orchestrator) hydrateL2ELCL(node *descriptors.Node, l2Net stack.ExtensibleL2Network, opts []client.RPCOption) { - require := l2Net.T().Require() - l2ID := l2Net.ID() - - txTimeout := 30 * time.Second - if o.compatType == compat.Persistent { - txTimeout = 5 * time.Minute - } - - elService, ok := node.Services[ELServiceName] - require.True(ok, "need L2 EL service for chain", l2ID) - elClient := o.rpcClient(l2Net.T(), elService, RPCProtocol, "/", opts...) - l2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ - RollupCfg: l2Net.RollupConfig(), - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(l2Net.T()), - Client: elClient, - ChainID: l2ID.ChainID(), - TransactionTimeout: txTimeout, - }, - ID: stack.NewL2ELNodeID(elService.Name, l2ID.ChainID()), - }) - if strings.Contains(node.Name, "geth") { - l2EL.SetLabel(match.LabelVendor, string(match.OpGeth)) - } - if strings.Contains(node.Name, "reth") { - l2EL.SetLabel(match.LabelVendor, string(match.OpReth)) - } - l2Net.AddL2ELNode(l2EL) - - clService, ok := node.Services[CLServiceName] - require.True(ok, "need L2 CL service for chain", l2ID) - - var endpointString string - // Parse the endpoint from the service descriptor. - for proto, endpoint := range clService.Endpoints { - if proto == RPCProtocol { - port := endpoint.Port - if o.usePrivatePorts { - port = endpoint.PrivatePort - } - scheme := endpoint.Scheme - if scheme == "" { - scheme = HTTPProtocol - } - host := endpoint.Host - path := "" - if strings.Contains(host, "/") { - parts := strings.SplitN(host, "/", 2) - host = parts[0] - path = "/" + parts[1] - } - endpointString = fmt.Sprintf("%s://%s:%d%s", scheme, host, port, path) - break - } - } - - require.NotEmpty(endpointString, "no endpoint found for CL service", clService.Name) - - l2Net.Logger().Info("Found endpoint for CL service", "endpoint", endpointString) - - clClient := o.rpcClient(l2Net.T(), clService, RPCProtocol, "/", opts...) - l2CL := shim.NewL2CLNode(shim.L2CLNodeConfig{ - ID: stack.NewL2CLNodeID(clService.Name, l2ID.ChainID()), - CommonConfig: shim.NewCommonConfig(l2Net.T()), - Client: clClient, - UserRPC: endpointString, - }) - l2Net.AddL2CLNode(l2CL) - l2CL.(stack.LinkableL2CLNode).LinkEL(l2EL) -} - -func (o *Orchestrator) hydrateConductors(node *descriptors.Node, l2Net stack.ExtensibleL2Network) { - require := l2Net.T().Require() - l2ID := l2Net.ID() - - conductorService, ok := node.Services[ConductorServiceName] - if !ok { - l2Net.Logger().Debug("L2 net node is missing a conductor service", "node", node.Name, "l2", l2ID) - return - } - - endpoint, header, err := o.findProtocolService(conductorService, RPCProtocol) - require.NoError(err, "failed to find RPC service for conductor") - - opts := make([]rpc.ClientOption, 0) - - if o.env.Env.ReverseProxyURL != "" && len(header) > 0 && !o.useDirectCnx { - opts = append(opts, - rpc.WithHeaders(header), - rpc.WithHTTPClient(&http.Client{ - Transport: hostAwareRoundTripper(header), - })) - } - conductorClient, err := rpc.DialOptions(l2Net.T().Ctx(), endpoint, opts...) - require.NoError(err, "failed to dial conductor endpoint") - l2Net.T().Cleanup(func() { conductorClient.Close() }) - - conductor := shim.NewConductor(shim.ConductorConfig{ - CommonConfig: shim.NewCommonConfig(l2Net.T()), - Client: conductorClient, - ID: stack.NewConductorID(conductorService.Name), - }) - - l2Net.AddConductor(conductor) -} - -func (o *Orchestrator) hydrateRollupBoostNodeMaybe(node *descriptors.Node, l2Net stack.ExtensibleL2Network, opts []client.RPCOption) { - require := l2Net.T().Require() - l2ID := l2Net.ID() - - rollupBoostService, ok := node.Services[RollupBoostServiceName] - if !ok { - l2Net.Logger().Debug("L2 net node does not have a rollup-boost service", "node", node.Name, "l2", l2ID) - return - } - - flashblocksWsUrl, flashblocksWsHeaders, err := o.findProtocolService(rollupBoostService, WebsocketFlashblocksProtocol) - require.NoError(err, "failed to find websocket service for rollup-boost") - - wsClient, err := client.DialWS(l2Net.T().Ctx(), client.WSConfig{ - URL: flashblocksWsUrl, - Headers: flashblocksWsHeaders, - Log: l2Net.Logger(), - }) - require.NoError(err, "failed to create rollup-boost websocket client") - - rollupBoost := shim.NewRollupBoostNode(shim.RollupBoostNodeConfig{ - ID: stack.NewRollupBoostNodeID(rollupBoostService.Name, l2ID.ChainID()), - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(l2Net.T()), - Client: o.rpcClient(l2Net.T(), rollupBoostService, RPCProtocol, "/", opts...), - ChainID: l2ID.ChainID(), - }, - RollupCfg: l2Net.RollupConfig(), - FlashblocksClient: wsClient, - }) - - l2Net.AddRollupBoostNode(rollupBoost) -} - -func (o *Orchestrator) hydrateL2ProxydMaybe(net *descriptors.L2Chain, l2Net stack.ExtensibleL2Network) { - require := l2Net.T().Require() - l2ID := getL2ID(net) - require.Equal(l2ID, l2Net.ID(), "must match L2 chain descriptor and target L2 net") - - proxydService, ok := net.Services["proxyd"] - if !ok { - l2Net.Logger().Warn("L2 net is missing a proxyd service") - return - } - - for _, instance := range proxydService { - l2Proxyd := shim.NewL2ELNode(shim.L2ELNodeConfig{ - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(l2Net.T()), - Client: o.rpcClient(l2Net.T(), instance, HTTPProtocol, "/"), - ChainID: l2ID.ChainID(), - }, - RollupCfg: l2Net.RollupConfig(), - ID: stack.NewL2ELNodeID(instance.Name, l2ID.ChainID()), - }) - l2Proxyd.SetLabel(match.LabelVendor, string(match.Proxyd)) - l2Net.AddL2ELNode(l2Proxyd) - } -} - -func (o *Orchestrator) hydrateOPRBuilderMaybe(node *descriptors.Node, l2Net stack.ExtensibleL2Network, opts []client.RPCOption) { - require := l2Net.T().Require() - l2ID := l2Net.ID() - - rbuilderService, ok := node.Services[OPRBuilderServiceName] - if !ok { - l2Net.Logger().Debug("L2 net node does not have a oprbuilder service", "node", node.Name, "l2", l2ID) - return - } - - flashblocksWsUrl, flashblocksWsHeaders, err := o.findProtocolService(rbuilderService, WebsocketFlashblocksProtocol) - require.NoError(err, "failed to find websocket service for rbuilder") - - wsClient, err := client.DialWS(l2Net.T().Ctx(), client.WSConfig{ - URL: flashblocksWsUrl, - Headers: flashblocksWsHeaders, - Log: l2Net.Logger(), - }) - require.NoError(err, "failed to create rbuilder websocket client") - - flashblocksBuilder := shim.NewOPRBuilderNode(shim.OPRBuilderNodeConfig{ - ID: stack.NewOPRBuilderNodeID(rbuilderService.Name, l2ID.ChainID()), - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(l2Net.T()), - Client: o.rpcClient(l2Net.T(), rbuilderService, RPCProtocol, "/", opts...), - ChainID: l2ID.ChainID(), - }, - FlashblocksClient: wsClient, - }) - - l2Net.AddOPRBuilderNode(flashblocksBuilder) -} - -func (o *Orchestrator) hydrateBatcherMaybe(net *descriptors.L2Chain, l2Net stack.ExtensibleL2Network) { - require := l2Net.T().Require() - l2ID := getL2ID(net) - require.Equal(l2ID, l2Net.ID(), "must match L2 chain descriptor and target L2 net") - - batcherService, ok := net.Services["batcher"] - if !ok { - l2Net.Logger().Warn("L2 net is missing a batcher service") - return - } - - for _, instance := range batcherService { - l2Net.AddL2Batcher(shim.NewL2Batcher(shim.L2BatcherConfig{ - CommonConfig: shim.NewCommonConfig(l2Net.T()), - ID: stack.NewL2BatcherID(instance.Name, l2ID.ChainID()), - Client: o.rpcClient(l2Net.T(), instance, HTTPProtocol, "/"), - })) - } -} - -func (o *Orchestrator) hydrateProposerMaybe(net *descriptors.L2Chain, l2Net stack.ExtensibleL2Network) { - require := l2Net.T().Require() - l2ID := getL2ID(net) - require.Equal(l2ID, l2Net.ID(), "must match L2 chain descriptor and target L2 net") - - proposerService, ok := net.Services["proposer"] - if !ok { - l2Net.Logger().Warn("L2 net is missing a proposer service") - return - } - - for _, instance := range proposerService { - l2Net.AddL2Proposer(shim.NewL2Proposer(shim.L2ProposerConfig{ - CommonConfig: shim.NewCommonConfig(l2Net.T()), - ID: stack.NewL2ProposerID(instance.Name, l2ID.ChainID()), - Client: o.rpcClient(l2Net.T(), instance, HTTPProtocol, "/"), - })) - } -} - -func (o *Orchestrator) hydrateChallengerMaybe(net *descriptors.L2Chain, l2Net stack.ExtensibleL2Network) { - require := l2Net.T().Require() - l2ID := getL2ID(net) - require.Equal(l2ID, l2Net.ID(), "must match L2 chain descriptor and target L2 net") - - challengerService, ok := net.Services["challenger"] - if !ok { - l2Net.Logger().Warn("L2 net is missing a challenger service") - return - } - - for _, instance := range challengerService { - l2Net.AddL2Challenger(shim.NewL2Challenger(shim.L2ChallengerConfig{ - CommonConfig: shim.NewCommonConfig(l2Net.T()), - ID: stack.NewL2ChallengerID(instance.Name, l2ID.ChainID()), - })) - } -} - -func (o *Orchestrator) defineSystemKeys(t devtest.T, net *descriptors.L2Chain) stack.Keys { - devnetKeys := o.getActualSystemKeys(t, net) - t.Require().NotNil(devnetKeys, "sysext backend requires actual system keys from devnet descriptor, but none were found. "+ - "Ensure the devnet environment contains the required wallet configurations.") - - return devnetKeys -} - -func (o *Orchestrator) getActualSystemKeys(t devtest.T, net *descriptors.L2Chain) stack.Keys { - env := o.env - if env == nil || env.Env == nil { - return nil - } - - if net == nil { - t.Logf("No L2 chain provided") - return nil - } - - l1Wallets := net.L1Wallets - if l1Wallets == nil { - t.Logf("No L1 wallets found in L2 chain config") - return nil - } - - chainID := net.Config.ChainID - - keyMap := make(map[string]*ecdsa.PrivateKey) - loadedL1Keys := 0 - for walletRole, keySpec := range o.getWalletMappings(l1Wallets) { - if wallet, exists := l1Wallets[walletRole]; exists { - t.Require().NotEmpty(wallet.PrivateKey, "Private key for wallet role '%s' is empty", walletRole) - - privateKey := o.parsePrivateKey(wallet.PrivateKey) - t.Require().NotNil(privateKey, "Failed to parse private key for wallet role '%s'", walletRole) - - var keyPath string - switch keyType := keySpec.(type) { - case devkeys.Role: - keyPath = keyType.Key(chainID).String() - case devkeys.UserKey: - keyPath = keyType.String() - case *FaucetKey: - keyPath = keyType.String() - default: - t.Errorf("Unknown key type for wallet role '%s'", walletRole) - continue - } - - keyMap[keyPath] = privateKey - loadedL1Keys++ - } - } - - // Also check L2 wallets for chain-specific user keys - loadedL2Keys := 0 - if net.Wallets != nil { - l2ChainID := net.Config.ChainID - - for walletRole, wallet := range net.Wallets { - if strings.HasPrefix(walletRole, "dev-account-") { - indexStr := strings.TrimPrefix(walletRole, "dev-account-") - index := 0 - if _, err := fmt.Sscanf(indexStr, "%d", &index); err != nil { - continue - } - - t.Require().NotEmpty(wallet.PrivateKey, "Private key for L2 wallet '%s' is empty", walletRole) - - privateKey := o.parsePrivateKey(wallet.PrivateKey) - t.Require().NotNil(privateKey, "Failed to parse private key for L2 wallet '%s'", walletRole) - - chainUserKey := devkeys.ChainUserKey{ChainID: l2ChainID, Index: uint64(index)} - keyPath := chainUserKey.String() - keyMap[keyPath] = privateKey - loadedL2Keys++ - } - } - } else { - t.Logger().Warn("No L2 wallets found in devnet config") - } - - if loadedL1Keys > 0 || loadedL2Keys > 0 { - t.Logf("Loaded devnet keys: %d L1 system keys, %d L2 user keys", loadedL1Keys, loadedL2Keys) - } - - return &devnetKeyring{ - devnetKeys: keyMap, - chainID: chainID, - } -} - -func (o *Orchestrator) getWalletMappings(l1Wallets descriptors.WalletMap) map[string]interface{} { - mappings := make(map[string]interface{}) - - // System role mappings - systemRoles := map[string]devkeys.Role{ - "systemConfigOwner": devkeys.SystemConfigOwner, - "l1ProxyAdmin": devkeys.L1ProxyAdminOwnerRole, - "l2ProxyAdmin": devkeys.L2ProxyAdminOwnerRole, - "batcher": devkeys.BatcherRole, - "proposer": devkeys.ProposerRole, - "challenger": devkeys.ChallengerRole, - "sequencer": devkeys.SequencerP2PRole, - "sequencerFeeVaultRecipient": devkeys.SequencerFeeVaultRecipientRole, - "baseFeeVaultRecipient": devkeys.BaseFeeVaultRecipientRole, - "l1FeeVaultRecipient": devkeys.L1FeeVaultRecipientRole, - "operatorFeeVaultRecipient": devkeys.OperatorFeeVaultRecipientRole, - } - - for walletRole, devkeyRole := range systemRoles { - mappings[walletRole] = devkeyRole - } - - o.addFaucetMappings(mappings, l1Wallets) - - // Dynamically discover user-key-* mappings from available L1 wallets - for walletRole := range l1Wallets { - if strings.HasPrefix(walletRole, "user-key-") { - if _, alreadyMapped := mappings[walletRole]; !alreadyMapped { - indexStr := strings.TrimPrefix(walletRole, "user-key-") - index := 0 - if _, err := fmt.Sscanf(indexStr, "%d", &index); err == nil { - mappings[walletRole] = devkeys.UserKey(index) - } - } - } - } - - return mappings -} - -// addFaucetMappings implements the faucet wallet fallback logic described in op-acceptor's faucet.go -func (o *Orchestrator) addFaucetMappings(mappings map[string]interface{}, l1Wallets descriptors.WalletMap) { - // L1 faucet logic following op-acceptor convention: - // - prefer l1Faucet if available (store under its own name) - // - fallback to user-key-20 only if l1Faucet doesn't exist (use devkeys mapping) - if _, hasL1Faucet := l1Wallets["l1Faucet"]; hasL1Faucet { - mappings["l1Faucet"] = &FaucetKey{name: "l1Faucet"} - } else if _, hasUserKey20 := l1Wallets["user-key-20"]; hasUserKey20 { - // Only use user-key-20 as fallback when l1Faucet doesn't exist - mappings["user-key-20"] = devkeys.UserKey(20) - } - - // L2 faucet logic: use l2Faucet if present (store under its own name) - if _, hasL2Faucet := l1Wallets["l2Faucet"]; hasL2Faucet { - mappings["l2Faucet"] = &FaucetKey{name: "l2Faucet"} - } -} - -type FaucetKey struct { - name string -} - -func (f *FaucetKey) String() string { - return f.name -} - -func (o *Orchestrator) parsePrivateKey(keyStr string) *ecdsa.PrivateKey { - keyStr = strings.TrimPrefix(keyStr, "0x") - - keyBytes, err := hex.DecodeString(keyStr) - if err != nil { - return nil - } - - privateKey, err := crypto.ToECDSA(keyBytes) - if err != nil { - return nil - } - - return privateKey -} - -type devnetKeyring struct { - devnetKeys map[string]*ecdsa.PrivateKey - chainID *big.Int -} - -func (d *devnetKeyring) getPrivateKey(key devkeys.Key) *ecdsa.PrivateKey { - keyPath := key.String() - privateKey, exists := d.devnetKeys[keyPath] - - if !exists { - // If it's a UserKey, try to map it to a ChainUserKey for L2 dev-account - if userKey, ok := key.(devkeys.UserKey); ok { - chainUserKey := devkeys.ChainUserKey{ChainID: d.chainID, Index: uint64(userKey)} - chainKeyPath := chainUserKey.String() - if chainPrivateKey, chainExists := d.devnetKeys[chainKeyPath]; chainExists { - return chainPrivateKey - } - } - panic(fmt.Sprintf("devnet key not found for %s - ensure all required keys are present in devnet configuration", keyPath)) - } - - return privateKey -} - -func (d *devnetKeyring) Secret(key devkeys.Key) *ecdsa.PrivateKey { - return d.getPrivateKey(key) -} - -func (d *devnetKeyring) Address(key devkeys.Key) common.Address { - privateKey := d.getPrivateKey(key) - return crypto.PubkeyToAddress(privateKey.PublicKey) -} diff --git a/op-devstack/sysext/orchestrator.go b/op-devstack/sysext/orchestrator.go deleted file mode 100644 index cb8a302e00cf3..0000000000000 --- a/op-devstack/sysext/orchestrator.go +++ /dev/null @@ -1,132 +0,0 @@ -package sysext - -import ( - "os" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -const defaultDevnetUrl = "kt://interop-devnet" - -type OrchestratorOption func(*Orchestrator) - -type Orchestrator struct { - p devtest.P - - env *env.DevnetEnv - - usePrivatePorts bool - useEagerRPCClients bool - - controlPlane *ControlPlane - useDirectCnx bool - - // sysHook is called after hydration of a new test-scope system frontend, - // essentially a test-case preamble. - sysHook stack.SystemHook - - compatType compat.Type -} - -var _ stack.Orchestrator = (*Orchestrator)(nil) - -func (o *Orchestrator) ControlPlane() stack.ControlPlane { - return o.controlPlane -} - -func (o *Orchestrator) Type() compat.Type { - return o.compatType -} - -func getCompatType(url string) compat.Type { - // if the scheme is overridden, use that - if scheme := os.Getenv(env.EnvURLVar); scheme != "" { - url = scheme - } - if strings.HasPrefix(url, "kt") { // kt:// and ktnative:// are the same for this purpose - return compat.Kurtosis - } - return compat.Persistent -} - -func NewOrchestrator(p devtest.P, sysHook stack.SystemHook) *Orchestrator { - url := os.Getenv(env.EnvURLVar) - if url == "" { - p.Logger().Warn("No devnet URL specified, using default", "default", defaultDevnetUrl) - url = defaultDevnetUrl - } - env, err := env.LoadDevnetFromURL(url) - p.Require().NoError(err, "Error loading devnet environment") - p.Require().NotNil(env, "Error loading devnet environment: DevnetEnv is nil") - p.Require().NotNil(env.Env, "Error loading devnet environment: DevnetEnvironment is nil") - - orch := &Orchestrator{ - env: env, - p: p, - sysHook: sysHook, - compatType: getCompatType(url), - } - orch.controlPlane = &ControlPlane{ - o: orch, - } - return orch -} - -func (o *Orchestrator) P() devtest.P { - return o.p -} - -func (o *Orchestrator) Hydrate(sys stack.ExtensibleSystem) { - if o.env == nil || o.env.Env == nil { - panic("orchestrator not properly initialized: env is nil") - } - - o.sysHook.PreHydrate(sys) - o.hydrateL1(sys) - o.hydrateSuperchain(sys) - o.hydrateClustersMaybe(sys) - o.hydrateSupervisorsMaybe(sys) - o.hydrateTestSequencersMaybe(sys) - for _, l2Net := range o.env.Env.L2 { - o.hydrateL2(l2Net, sys) - } - o.sysHook.PostHydrate(sys) -} - -func isInterop(env *descriptors.DevnetEnvironment) bool { - for _, feature := range env.Features { - if feature == FeatureInterop { - return true - } - } - return false -} - -func (o *Orchestrator) isInterop() bool { - // Ugly hack to ensure we can use L2[0] for supervisor - // Ultimately this should be removed. - return isInterop(o.env.Env) && len(o.env.Env.L2) > 0 -} - -func WithPrivatePorts() OrchestratorOption { - return func(orchestrator *Orchestrator) { - orchestrator.usePrivatePorts = true - } -} - -func WithEagerRPCClients() OrchestratorOption { - return func(orchestrator *Orchestrator) { - orchestrator.useEagerRPCClients = true - } -} - -func WithDirectConnections() OrchestratorOption { - return func(orchestrator *Orchestrator) { - orchestrator.useDirectCnx = true - } -} diff --git a/op-devstack/sysext/system.go b/op-devstack/sysext/system.go deleted file mode 100644 index 01a6a186f6073..0000000000000 --- a/op-devstack/sysext/system.go +++ /dev/null @@ -1,125 +0,0 @@ -package sysext - -import ( - "encoding/json" - "fmt" - - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - client "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" - "github.com/ethereum/go-ethereum/common" - gn "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/rpc" -) - -func (o *Orchestrator) hydrateSuperchain(sys stack.ExtensibleSystem) { - env := o.env - sys.AddSuperchain(shim.NewSuperchain(shim.SuperchainConfig{ - CommonConfig: shim.NewCommonConfig(sys.T()), - ID: stack.NewSuperchainID(env.Env.Name), - Deployment: newL1AddressBook(sys.T(), env.Env.L1.Addresses), - })) -} - -func (o *Orchestrator) hydrateClustersMaybe(sys stack.ExtensibleSystem) { - if !o.isInterop() { - sys.T().Logger().Info("Interop is inactive, skipping clusters") - return - } - - require := sys.T().Require() - env := o.env - - depsets := o.env.Env.DepSets - - for _, d := range depsets { - var depSet depset.StaticConfigDependencySet - require.NoError(json.Unmarshal(d, &depSet)) - - sys.AddCluster(shim.NewCluster(shim.ClusterConfig{ - CommonConfig: shim.NewCommonConfig(sys.T()), - ID: stack.NewClusterID(env.Env.Name), - DependencySet: &depSet, - })) - } -} - -func (o *Orchestrator) hydrateSupervisorsMaybe(sys stack.ExtensibleSystem) { - if !o.isInterop() { - sys.T().Logger().Info("Interop is inactive, skipping supervisors") - return - } - - supervisors := make(map[stack.ComponentID]bool) - for _, l2 := range o.env.Env.L2 { - if supervisorService, ok := l2.Services["supervisor"]; ok { - for _, instance := range supervisorService { - id := stack.NewSupervisorID(instance.Name) - if supervisors[id] { - // each supervisor appears in multiple L2s (covering the dependency set), - // so we need to deduplicate - continue - } - supervisors[id] = true - sys.AddSupervisor(shim.NewSupervisor(shim.SupervisorConfig{ - CommonConfig: shim.NewCommonConfig(sys.T()), - ID: id, - Client: o.rpcClient(sys.T(), instance, RPCProtocol, "/"), - })) - } - } - } -} - -func (o *Orchestrator) hydrateTestSequencersMaybe(sys stack.ExtensibleSystem) { - sequencers := make(map[string]bool) - - // Collect all L2 chain IDs and the shared JWT secret - var ( - chainIDs []eth.ChainID - jwt string - ) - - for _, l2 := range o.env.Env.L2 { - chainID, _ := eth.ChainIDFromString(l2.Chain.ID) - chainIDs = append(chainIDs, chainID) - jwt = l2.JWT - } - - opts := []client.RPCOption{ - client.WithGethRPCOptions(rpc.WithHTTPAuth(gn.NewJWTAuth(common.HexToHash(jwt)))), - } - - for _, l2 := range o.env.Env.L2 { - if sequencerService, ok := l2.Services["test-sequencer"]; ok { - for _, instance := range sequencerService { - if sequencers[instance.Name] { - // Each test_sequencer appears in multiple L2s - // So we need to deduplicate - continue - } - sequencers[instance.Name] = true - - cc := make(map[eth.ChainID]client.RPC, len(chainIDs)) - for _, chainID := range chainIDs { - cc[chainID] = o.rpcClient( - sys.T(), - instance, - RPCProtocol, - fmt.Sprintf("/sequencers/sequencer-%s", chainID.String()), - opts..., - ) - } - - sys.AddTestSequencer(shim.NewTestSequencer(shim.TestSequencerConfig{ - CommonConfig: shim.NewCommonConfig(sys.T()), - ID: stack.NewTestSequencerID(instance.Name), - Client: o.rpcClient(sys.T(), instance, RPCProtocol, "/", opts...), - ControlClients: cc, - })) - } - } - } -} diff --git a/op-devstack/sysgo/add_game_type.go b/op-devstack/sysgo/add_game_type.go index a1407cbf2599a..9e7fa692abdd1 100644 --- a/op-devstack/sysgo/add_game_type.go +++ b/op-devstack/sysgo/add_game_type.go @@ -12,7 +12,6 @@ import ( "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/manage" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" op_service "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -28,98 +27,26 @@ import ( "github.com/ethereum/go-ethereum/rpc" ) -func WithGameTypeAdded(gameType gameTypes.GameType) stack.Option[*Orchestrator] { - if gameType == gameTypes.PermissionedGameType { - // Permissioned games are added as part of the initial deployment - // so no action required. - return stack.Combine[*Orchestrator]() - } - opts := stack.FnOption[*Orchestrator]{ - FinallyFn: func(o *Orchestrator) { - absolutePrestate := PrestateForGameType(o.P(), gameType) - l1ELID, l2NetIDs := requireGameTypeTargetIDs(o) - for _, l2NetID := range l2NetIDs { - addGameType(o, absolutePrestate, gameType, l1ELID, l2NetID.ChainID()) - } - }, - } - return opts -} - -func WithRespectedGameType(gameType gameTypes.GameType) stack.Option[*Orchestrator] { - return stack.FnOption[*Orchestrator]{ - FinallyFn: func(o *Orchestrator) { - l1ELID, l2NetIDs := requireGameTypeTargetIDs(o) - for _, l2NetID := range l2NetIDs { - setRespectedGameType(o, gameType, l1ELID, l2NetID.ChainID()) - } - }, - } -} - -func WithCannonGameTypeAdded(l1ELID stack.ComponentID, l2ChainID eth.ChainID) stack.Option[*Orchestrator] { - return stack.FnOption[*Orchestrator]{ - FinallyFn: func(o *Orchestrator) { - // TODO(#17867): Rebuild the op-program prestate using the newly minted L2 chain configs before using it. - absolutePrestate := getAbsolutePrestate(o.P(), "op-program/bin/prestate-proof-mt64.json") - addGameType(o, absolutePrestate, gameTypes.CannonGameType, l1ELID, l2ChainID) - }, - } -} - -func WithCannonKonaGameTypeAdded() stack.Option[*Orchestrator] { - return stack.FnOption[*Orchestrator]{ - BeforeDeployFn: func(o *Orchestrator) { - o.l2ChallengerOpts.useCannonKonaConfig = true - }, - FinallyFn: func(o *Orchestrator) { - absolutePrestate := getCannonKonaAbsolutePrestate(o.P()) - l1ELID, l2NetIDs := requireGameTypeTargetIDs(o) - for _, l2NetID := range l2NetIDs { - addGameType(o, absolutePrestate, gameTypes.CannonKonaGameType, l1ELID, l2NetID.ChainID()) - } - }, - } -} - -func requireGameTypeTargetIDs(o *Orchestrator) (stack.ComponentID, []stack.ComponentID) { - require := o.P().Require() - l2NetIDs := o.registry.IDsByKind(stack.KindL2Network) - require.NotEmpty(l2NetIDs, "need at least one L2 network to configure game types") - - l1ELIDs := o.registry.IDsByKind(stack.KindL1ELNode) - require.NotEmpty(l1ELIDs, "need at least one L1 EL node to configure game types") - - return l1ELIDs[0], l2NetIDs -} - -func WithChallengerCannonKonaEnabled() stack.Option[*Orchestrator] { - return stack.FnOption[*Orchestrator]{ - BeforeDeployFn: func(o *Orchestrator) { - o.l2ChallengerOpts.useCannonKonaConfig = true - }, - } -} - -func setRespectedGameType(o *Orchestrator, gameType gameTypes.GameType, l1ELID stack.ComponentID, l2ChainID eth.ChainID) { - t := o.P() +func setRespectedGameTypeForRuntime( + t devtest.T, + keys devkeys.Keys, + gameType gameTypes.GameType, + l1ChainID eth.ChainID, + l1ELRPC string, + l2Net *L2Network, +) { require := t.Require() - require.NotNil(o.wb, "must have a world builder") - l1ChainID := l1ELID.ChainID() - - l2Network, ok := o.GetL2Network(stack.NewL2NetworkID(l2ChainID)) - require.True(ok, "l2Net must exist") - portalAddr := l2Network.rollupCfg.DepositContractAddress + require.NotNil(l2Net, "l2 network must exist") + require.NotNil(l2Net.rollupCfg, "l2 rollup config must exist") - l1EL, ok := o.GetL1EL(l1ELID) - require.True(ok, "l1El must exist") + portalAddr := l2Net.rollupCfg.DepositContractAddress - rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) + rpcClient, err := rpc.DialContext(t.Ctx(), l1ELRPC) require.NoError(err) defer rpcClient.Close() client := ethclient.NewClient(rpcClient) - guardianKey, err := o.keys.Secret(devkeys.SuperchainOperatorKeys(l1ChainID.ToBig())(devkeys.SuperchainConfigGuardianKey)) + guardianKey, err := keys.Secret(devkeys.SuperchainOperatorKeys(l1ChainID.ToBig())(devkeys.SuperchainConfigGuardianKey)) require.NoError(err, "failed to get guardian key") transactOpts, err := bind.NewKeyedTransactorWithChainID(guardianKey, l1ChainID.ToBig()) @@ -153,34 +80,38 @@ func setRespectedGameType(o *Orchestrator, gameType gameTypes.GameType, l1ELID s require.Equal(rcpt.Status, gethTypes.ReceiptStatusSuccessful, "set respected game type tx did not execute correctly") } -func addGameType(o *Orchestrator, absolutePrestate common.Hash, gameType gameTypes.GameType, l1ELID stack.ComponentID, l2ChainID eth.ChainID) { - t := o.P() +func addGameTypeForRuntime( + t devtest.T, + keys devkeys.Keys, + absolutePrestate common.Hash, + gameType gameTypes.GameType, + l1ChainID eth.ChainID, + l1ELRPC string, + l2Net *L2Network, +) { require := t.Require() - require.NotNil(o.wb, "must have a world builder") - l1ChainID := l1ELID.ChainID() - - opcmAddr := o.wb.output.ImplementationsDeployment.OpcmImpl - - l1EL, ok := o.GetL1EL(l1ELID) - require.True(ok, "l1El must exist") + require.NotNil(l2Net, "l2 network must exist") + require.NotNil(l2Net.deployment, "l2 deployment must exist") + require.NotEqual(common.Address{}, l2Net.opcmImpl, "missing OPCM implementation address") + require.NotEqual(common.Address{}, l2Net.mipsImpl, "missing MIPS implementation address") - rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) + rpcClient, err := rpc.DialContext(t.Ctx(), l1ELRPC) require.NoError(err) defer rpcClient.Close() client := ethclient.NewClient(rpcClient) - l1PAO, err := o.keys.Address(devkeys.ChainOperatorKeys(l1ChainID.ToBig())(devkeys.L1ProxyAdminOwnerRole)) + l1PAO, err := keys.Address(devkeys.ChainOperatorKeys(l1ChainID.ToBig())(devkeys.L1ProxyAdminOwnerRole)) require.NoError(err, "failed to get l1 proxy admin owner address") cfg := manage.AddGameTypeConfig{ - L1RPCUrl: l1EL.UserRPC(), + L1RPCUrl: l1ELRPC, Logger: t.Logger(), ArtifactsLocator: LocalArtifacts(t), CacheDir: t.TempDir(), L1ProxyAdminOwner: l1PAO, - OPCMImpl: opcmAddr, - SystemConfigProxy: o.wb.outL2Deployment[l2ChainID].SystemConfigProxyAddr(), - DelayedWETHProxy: o.wb.outL2Deployment[l2ChainID].PermissionlessDelayedWETHProxyAddr(), + OPCMImpl: l2Net.opcmImpl, + SystemConfigProxy: l2Net.deployment.SystemConfigProxyAddr(), + DelayedWETHProxy: l2Net.deployment.PermissionlessDelayedWETHProxyAddr(), DisputeGameType: uint32(gameType), DisputeAbsolutePrestate: absolutePrestate, DisputeMaxGameDepth: big.NewInt(73), @@ -188,19 +119,19 @@ func addGameType(o *Orchestrator, absolutePrestate common.Hash, gameType gameTyp DisputeClockExtension: 10800, DisputeMaxClockDuration: 302400, InitialBond: eth.GWei(80_000_000).ToBig(), // 0.08 ETH - VM: o.wb.output.ImplementationsDeployment.MipsImpl, + VM: l2Net.mipsImpl, Permissionless: true, - SaltMixer: fmt.Sprintf("devstack-%s-%s", l2ChainID, absolutePrestate.Hex()), + SaltMixer: fmt.Sprintf("devstack-%s-%s", l2Net.ChainID(), absolutePrestate.Hex()), } - OPChainProxyAdmin := o.wb.outL2Deployment[l2ChainID].ProxyAdminAddr() + opChainProxyAdmin := l2Net.deployment.ProxyAdminAddr() _, addGameTypeCalldata, err := manage.AddGameType(t.Ctx(), cfg) require.NoError(err, "failed to create add game type calldata") require.Len(addGameTypeCalldata, 1, "calldata must contain one entry") chainOps := devkeys.ChainOperatorKeys(l1ChainID.ToBig()) - l1PAOKey, err := o.keys.Secret(chainOps(devkeys.L1ProxyAdminOwnerRole)) + l1PAOKey, err := keys.Secret(chainOps(devkeys.L1ProxyAdminOwnerRole)) require.NoError(err, "failed to get l1 proxy admin owner key") transactOpts, err := bind.NewKeyedTransactorWithChainID(l1PAOKey, l1ChainID.ToBig()) require.NoError(err, "must have transact opts") @@ -209,18 +140,18 @@ func addGameType(o *Orchestrator, absolutePrestate common.Hash, gameType gameTyp t.Log("Deploying delegate call proxy contract") delegateCallProxy, proxyContract := deployDelegateCallProxy(t, transactOpts, client, l1PAO) // transfer ownership to the proxy so that we can delegatecall the opcm - transferOwnership(t, l1PAOKey, client, OPChainProxyAdmin, delegateCallProxy) - dgf := o.wb.outL2Deployment[l2ChainID].DisputeGameFactoryProxyAddr() + transferOwnership(t, l1PAOKey, client, opChainProxyAdmin, delegateCallProxy) + dgf := l2Net.deployment.DisputeGameFactoryProxyAddr() transferOwnership(t, l1PAOKey, client, dgf, delegateCallProxy) t.Log("sending opcm.addGameType transaction") - tx, err := proxyContract.ExecuteDelegateCall(transactOpts, opcmAddr, addGameTypeCalldata[0].Data) + tx, err := proxyContract.ExecuteDelegateCall(transactOpts, l2Net.opcmImpl, addGameTypeCalldata[0].Data) require.NoError(err, "failed to send add game type tx") _, err = wait.ForReceiptOK(t.Ctx(), client, tx.Hash()) require.NoError(err, "failed to wait for add game type receipt") // reset ProxyAdmin ownership transfers - transferOwnershipForDelegateCallProxy(t, l1ChainID.ToBig(), l1PAOKey, client, delegateCallProxy, OPChainProxyAdmin, l1PAO) + transferOwnershipForDelegateCallProxy(t, l1ChainID.ToBig(), l1PAOKey, client, delegateCallProxy, opChainProxyAdmin, l1PAO) transferOwnershipForDelegateCallProxy(t, l1ChainID.ToBig(), l1PAOKey, client, delegateCallProxy, dgf, l1PAO) } @@ -236,7 +167,7 @@ func PrestateForGameType(t devtest.CommonT, gameType gameTypes.GameType) common. } } -func LocalArtifacts(t devtest.P) *artifacts.Locator { +func LocalArtifacts(t devtest.T) *artifacts.Locator { require := t.Require() _, testFilename, _, ok := runtime.Caller(0) require.Truef(ok, "failed to get test filename") diff --git a/op-devstack/sysgo/cluster.go b/op-devstack/sysgo/cluster.go deleted file mode 100644 index 63eebac2e76fb..0000000000000 --- a/op-devstack/sysgo/cluster.go +++ /dev/null @@ -1,28 +0,0 @@ -package sysgo - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" -) - -type Cluster struct { - id stack.ComponentID - cfgset depset.FullConfigSetMerged -} - -func (c *Cluster) hydrate(system stack.ExtensibleSystem) { - sysCluster := shim.NewCluster(shim.ClusterConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: c.id, - DependencySet: c.cfgset.DependencySet, - }) - system.AddCluster(sysCluster) -} - -func (c *Cluster) DepSet() *depset.StaticConfigDependencySet { - if c.cfgset.DependencySet == nil { - return nil - } - return c.cfgset.DependencySet.(*depset.StaticConfigDependencySet) -} diff --git a/op-devstack/sysgo/component_target.go b/op-devstack/sysgo/component_target.go new file mode 100644 index 0000000000000..4300176f93562 --- /dev/null +++ b/op-devstack/sysgo/component_target.go @@ -0,0 +1,26 @@ +package sysgo + +import ( + "fmt" + + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type ComponentTarget struct { + Name string + ChainID eth.ChainID +} + +func NewComponentTarget(name string, chainID eth.ChainID) ComponentTarget { + return ComponentTarget{ + Name: name, + ChainID: chainID, + } +} + +func (t ComponentTarget) String() string { + if t.ChainID == (eth.ChainID{}) { + return t.Name + } + return fmt.Sprintf("%s-%s", t.Name, t.ChainID) +} diff --git a/op-devstack/sysgo/control_plane.go b/op-devstack/sysgo/control_plane.go deleted file mode 100644 index 1f4f019bdaec9..0000000000000 --- a/op-devstack/sysgo/control_plane.go +++ /dev/null @@ -1,56 +0,0 @@ -package sysgo - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -type ControlPlane struct { - o *Orchestrator -} - -func control(lifecycle stack.Lifecycle, mode stack.ControlAction) { - switch mode { - case stack.Start: - lifecycle.Start() - case stack.Stop: - lifecycle.Stop() - } -} - -func (c *ControlPlane) SupervisorState(id stack.ComponentID, mode stack.ControlAction) { - component, ok := c.o.GetSupervisor(id) - c.o.P().Require().True(ok, "need supervisor to change state") - control(component, mode) -} - -func (c *ControlPlane) L2CLNodeState(id stack.ComponentID, mode stack.ControlAction) { - component, ok := c.o.GetL2CL(id) - c.o.P().Require().True(ok, "need l2cl node to change state") - control(component, mode) -} - -func (c *ControlPlane) L2ELNodeState(id stack.ComponentID, mode stack.ControlAction) { - component, ok := c.o.GetL2EL(id) - c.o.P().Require().True(ok, "need l2el node to change state") - control(component, mode) -} - -func (c *ControlPlane) FakePoSState(id stack.ComponentID, mode stack.ControlAction) { - component, ok := c.o.GetL1CL(id) - c.o.P().Require().True(ok, "need l1cl node to change state of fakePoS module") - control(component.fakepos, mode) -} - -func (c *ControlPlane) OPRBuilderNodeState(id stack.ComponentID, mode stack.ControlAction) { - component, ok := c.o.GetOPRBuilder(id) - c.o.P().Require().True(ok, "need oprbuilder node to change state") - control(component, mode) -} - -func (c *ControlPlane) RollupBoostNodeState(id stack.ComponentID, mode stack.ControlAction) { - component, ok := c.o.GetRollupBoost(id) - c.o.P().Require().True(ok, "need rollup boost node to change state") - control(component, mode) -} - -var _ stack.ControlPlane = (*ControlPlane)(nil) diff --git a/op-devstack/sysgo/control_plane_test.go b/op-devstack/sysgo/control_plane_test.go deleted file mode 100644 index aa734be01b7f8..0000000000000 --- a/op-devstack/sysgo/control_plane_test.go +++ /dev/null @@ -1,210 +0,0 @@ -package sysgo - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/retry" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/status" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" -) - -// TestControlPlane tests start/stop functionality provided control plane -func TestControlPlane(gt *testing.T) { - var ids DefaultInteropSystemIDs - opt := DefaultInteropSystem(&ids) - - logger := testlog.Logger(gt, log.LevelInfo) - onFail, onSkipNow := exiters(gt) - - p := devtest.NewP(context.Background(), logger, onFail, onSkipNow) - gt.Cleanup(p.Close) - - orch := NewOrchestrator(p, stack.Combine[*Orchestrator]()) - stack.ApplyOptionLifecycle(opt, orch) - - control := orch.ControlPlane() - - gt.Run("test-SupervisorRestart", func(gt *testing.T) { - gt.Parallel() - - t := devtest.SerialT(gt) - system := shim.NewSystem(t) - orch.Hydrate(system) - - testSupervisorRestart(ids, system, control) - }) - - gt.Run("test-L2CLRestart", func(gt *testing.T) { - gt.Parallel() - - t := devtest.SerialT(gt) - system := shim.NewSystem(t) - orch.Hydrate(system) - - testL2CLRestart(ids, system, control) - }) -} - -func testSupervisorRestart(ids DefaultInteropSystemIDs, system stack.System, control stack.ControlPlane) { - t := system.T() - logger := t.Logger() - supervisor := system.Supervisor(stack.ByID[stack.Supervisor](ids.Supervisor)) - - // progress supervisor - for range 3 { - time.Sleep(time.Second * 2) - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - status, err := supervisor.QueryAPI().SyncStatus(ctx) - require.NoError(t, err) - cancel() - logger.Info("supervisor L1 view", "tip", status.MinSyncedL1) - } - - // stop supervisor - control.SupervisorState(ids.Supervisor, stack.Stop) - - // supervisor API will not work since L2CL stopped - { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - _, err := retry.Do[eth.SupervisorSyncStatus](ctx, 10, retry.Fixed(time.Millisecond*500), func() (eth.SupervisorSyncStatus, error) { - return supervisor.QueryAPI().SyncStatus(ctx) - }) - cancel() - require.Error(t, err) - } - - // restart supervisor - control.SupervisorState(ids.Supervisor, stack.Start) - - // check supervisor API is back - { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - _, err := retry.Do[eth.SupervisorSyncStatus](ctx, 3, retry.Fixed(time.Millisecond*500), func() (eth.SupervisorSyncStatus, error) { - return supervisor.QueryAPI().SyncStatus(ctx) - }) - if err != nil { - // API is still back, although supervisor status tracker not ready - require.Equal(t, errors.Unwrap(err).Error(), status.ErrStatusTrackerNotReady.Error()) - } - cancel() - } -} - -func testL2CLRestart(ids DefaultInteropSystemIDs, system stack.System, control stack.ControlPlane) { - t := system.T() - logger := t.Logger() - seqA := system.L2Network(stack.ByID[stack.L2Network](ids.L2A)).L2CLNode(stack.ByID[stack.L2CLNode](ids.L2ACL)) - - // progress chain - for range 3 { - time.Sleep(time.Second * 2) - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - status, err := seqA.RollupAPI().SyncStatus(ctx) - require.NoError(t, err) - cancel() - logger.Info("chain A", "tip", status.UnsafeL2) - } - - // stop L2CL - control.L2CLNodeState(ids.L2ACL, stack.Stop) - - // L2CL API will still kind of work, it is not functioning, - // but since L2CL is behind a proxy, the proxy is still online, and may create a different error. - // The dial will be accepted, and the connection then closed, once the connection behind the proxy fails. - { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - _, err := retry.Do[*eth.SyncStatus](ctx, 10, retry.Fixed(time.Millisecond*500), func() (*eth.SyncStatus, error) { - return seqA.RollupAPI().SyncStatus(ctx) - }) - cancel() - require.Error(t, err, "should not be able to get sync-status when node behind proxy is offline") - } - - // restart L2CL - control.L2CLNodeState(ids.L2ACL, stack.Start) - - // check L2CL API is back - { - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - _, err := retry.Do[*eth.SyncStatus](ctx, 3, retry.Fixed(time.Millisecond*500), func() (*eth.SyncStatus, error) { - return seqA.RollupAPI().SyncStatus(ctx) - }) - require.NoError(t, err) - cancel() - } -} - -// TestControlPlaneFakePoS tests the start/stop functionality provided by the control plane for the fakePoS module -func TestControlPlaneFakePoS(gt *testing.T) { - var ids DefaultInteropSystemIDs - opt := DefaultInteropSystem(&ids) - - logger := testlog.Logger(gt, log.LevelInfo) - onFail, onSkipNow := exiters(gt) - p := devtest.NewP(context.Background(), logger, onFail, onSkipNow) - gt.Cleanup(p.Close) - - orch := NewOrchestrator(p, stack.Combine[*Orchestrator]()) - stack.ApplyOptionLifecycle(opt, orch) - - control := orch.ControlPlane() - - t := devtest.SerialT(gt) - system := shim.NewSystem(t) - orch.Hydrate(system) - - ctx := t.Ctx() - - el := system.L1Network(stack.ByID[stack.L1Network](ids.L1)).L1ELNode(match.FirstL1EL) - - // progress chain - blockTime := time.Second * 6 - for range 2 { - time.Sleep(blockTime) - - head, err := el.EthClient().InfoByLabel(ctx, "latest") - require.NoError(t, err) - logger.Info("L1 chain", "number", head.NumberU64(), "hash", head.Hash()) - } - - logger.Info("Stopping fakePoS service") - control.FakePoSState(ids.L1CL, stack.Stop) - - head, err := el.EthClient().InfoByLabel(ctx, "latest") - require.NoError(t, err) - - // L1 chain won't progress since fakePoS is stopped - // Wait and check that L1 chain won't progress - for range 2 { - time.Sleep(blockTime) - - other, err := el.EthClient().InfoByLabel(ctx, "latest") - require.NoError(t, err) - logger.Info("L1 chain", "number", other.NumberU64(), "hash", other.Hash(), "previous", head.Hash()) - - require.Equal(t, other.Hash(), head.Hash()) - } - - // Restart fakePoS - logger.Info("Starting fakePoS service") - control.FakePoSState(ids.L1CL, stack.Start) - - // L1 chain should progress again eventually - require.Eventually(t, func() bool { - other, err := el.EthClient().InfoByLabel(ctx, "latest") - require.NoError(t, err) - logger.Info("L1 chain", "number", other.NumberU64(), "hash", other.Hash(), "previous", head.Hash()) - - return other.Hash() != head.Hash() && other.NumberU64() > head.NumberU64() - }, time.Second*20, time.Second*2) -} diff --git a/op-devstack/sysgo/default_chain_ids.go b/op-devstack/sysgo/default_chain_ids.go new file mode 100644 index 0000000000000..f7f63acbc8cd1 --- /dev/null +++ b/op-devstack/sysgo/default_chain_ids.go @@ -0,0 +1,9 @@ +package sysgo + +import "github.com/ethereum-optimism/optimism/op-service/eth" + +var ( + DefaultL1ID = eth.ChainIDFromUInt64(900) + DefaultL2AID = eth.ChainIDFromUInt64(901) + DefaultL2BID = eth.ChainIDFromUInt64(902) +) diff --git a/op-devstack/sysgo/deployer.go b/op-devstack/sysgo/deployer.go index fa12568a2fc2e..7e43b9592722e 100644 --- a/op-devstack/sysgo/deployer.go +++ b/op-devstack/sysgo/deployer.go @@ -32,30 +32,21 @@ import ( const funderMnemonicIndex = 10_000 const devFeatureBitmapKey = "devFeatureBitmap" -type DeployerOption func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) - -func WithDeployerOptions(opts ...DeployerOption) stack.Option[*Orchestrator] { - return stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Require().NotNil(o.wb, "must have a world builder") - for _, opt := range opts { - opt(o.P(), o.keys, o.wb.builder) - } - }) -} +type DeployerOption func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) func WithForkAtL1Genesis(fork forks.Fork) DeployerOption { - return func(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { + return func(_ devtest.T, _ devkeys.Keys, builder intentbuilder.Builder) { builder.L1().WithL1ForkAtGenesis(fork) } } func WithForkAtL1Offset(fork forks.Fork, offset uint64) DeployerOption { - return func(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { + return func(_ devtest.T, _ devkeys.Keys, builder intentbuilder.Builder) { builder.L1().WithL1ForkAtOffset(fork, &offset) } } -func WithDefaultBPOBlobSchedule(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { +func WithDefaultBPOBlobSchedule(_ devtest.T, _ devkeys.Keys, builder intentbuilder.Builder) { // Once we get the latest changes from op-geth we can change this to // params.DefaultBlobSchedule. builder.L1().WithL1BlobSchedule(¶ms.BlobScheduleConfig{ @@ -69,7 +60,7 @@ func WithDefaultBPOBlobSchedule(_ devtest.P, _ devkeys.Keys, builder intentbuild }) } -func WithJovianAtGenesis(p devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { +func WithJovianAtGenesis(p devtest.T, _ devkeys.Keys, builder intentbuilder.Builder) { for _, l2Cfg := range builder.L2s() { l2Cfg.WithForkAtGenesis(opforks.Jovian) } @@ -84,90 +75,17 @@ func WithDeployerCacheDir(dirPath string) DeployerPipelineOption { } // WithDAFootprintGasScalar sets the DA footprint gas scalar with which the networks identified by -// l2IDs will be launched. If there are no l2IDs provided, all L2 networks are set with scalar. -func WithDAFootprintGasScalar(scalar uint16, l2IDs ...stack.ComponentID) DeployerOption { - return func(p devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { +// l2ChainIDs will be launched. If there are no l2ChainIDs provided, all L2 networks are set with scalar. +func WithDAFootprintGasScalar(scalar uint16, l2ChainIDs ...eth.ChainID) DeployerOption { + return func(p devtest.T, _ devkeys.Keys, builder intentbuilder.Builder) { for _, l2 := range builder.L2s() { - if len(l2IDs) == 0 || slices.ContainsFunc(l2IDs, func(id stack.ComponentID) bool { - return id.ChainID() == l2.ChainID() - }) { + if len(l2ChainIDs) == 0 || slices.Contains(l2ChainIDs, l2.ChainID()) { l2.WithDAFootprintGasScalar(scalar) } } } } -func WithDeployerPipelineOption(opt DeployerPipelineOption) stack.Option[*Orchestrator] { - return stack.BeforeDeploy(func(o *Orchestrator) { - o.deployerPipelineOptions = append(o.deployerPipelineOptions, opt) - }) -} - -func WithDeployer() stack.Option[*Orchestrator] { - return stack.FnOption[*Orchestrator]{ - BeforeDeployFn: func(o *Orchestrator) { - o.P().Require().Nil(o.wb, "must not already have a world builder") - o.wb = &worldBuilder{ - p: o.P(), - logger: o.P().Logger(), - require: o.P().Require(), - keys: o.keys, - builder: intentbuilder.New(), - } - }, - DeployFn: func(o *Orchestrator) { - o.P().Require().NotNil(o.wb, "must have a world builder") - o.wb.deployerPipelineOptions = o.deployerPipelineOptions - o.wb.Build() - }, - AfterDeployFn: func(o *Orchestrator) { - wb := o.wb - require := o.P().Require() - require.NotNil(o.wb, "must have a world builder") - - l1ID := stack.NewL1NetworkID(eth.ChainIDFromUInt64(wb.output.AppliedIntent.L1ChainID)) - superchainID := stack.NewSuperchainID("main") - clusterID := stack.NewClusterID("main") - - l1Net := &L1Network{ - id: l1ID, - genesis: wb.outL1Genesis, - blockTime: 6, - } - o.registry.Register(l1ID, l1Net) - - o.registry.Register(superchainID, &Superchain{ - id: superchainID, - deployment: wb.outSuperchainDeployment, - }) - o.registry.Register(clusterID, &Cluster{ - id: clusterID, - cfgset: wb.outFullCfgSet, - }) - - for _, chainID := range wb.l2Chains { - l2Genesis, ok := wb.outL2Genesis[chainID] - require.True(ok, "L2 genesis must exist") - l2RollupCfg, ok := wb.outL2RollupCfg[chainID] - require.True(ok, "L2 rollup config must exist") - l2Dep, ok := wb.outL2Deployment[chainID] - require.True(ok, "L2 deployment must exist") - - l2ID := stack.NewL2NetworkID(chainID) - l2Net := &L2Network{ - id: l2ID, - l1ChainID: l1ID.ChainID(), - genesis: l2Genesis, - rollupCfg: l2RollupCfg, - deployment: l2Dep, - keys: o.keys, - } - o.registry.Register(l2ID, l2Net) - } - }, - } -} - type L2Deployment struct { systemConfigProxyAddr common.Address disputeGameFactoryProxy common.Address @@ -198,12 +116,8 @@ func (d *L2Deployment) PermissionlessDelayedWETHProxyAddr() common.Address { return d.permissionlessDelayedWETHProxy } -type InteropMigration struct { - DisputeGameFactory common.Address -} - type worldBuilder struct { - p devtest.P + p devtest.CommonT logger log.Logger require *testreq.Assertions @@ -224,8 +138,6 @@ type worldBuilder struct { outFullCfgSet depset.FullConfigSetMerged outSuperchainDeployment *SuperchainDeployment - - outInteropMigration *InteropMigration } var ( @@ -234,14 +146,14 @@ var ( ) func WithEmbeddedContractSources() DeployerOption { - return func(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { + return func(_ devtest.T, _ devkeys.Keys, builder intentbuilder.Builder) { builder.WithL1ContractsLocator(artifacts.EmbeddedLocator) builder.WithL2ContractsLocator(artifacts.EmbeddedLocator) } } func WithLocalContractSources() DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { paths, err := contractPaths() p.Require().NoError(err) wd, err := os.Getwd() @@ -256,7 +168,7 @@ func WithLocalContractSources() DeployerOption { } func WithCommons(l1ChainID eth.ChainID) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { _, l1Config := builder.WithL1(l1ChainID) l1StartTimestamp := uint64(time.Now().Unix()) + 1 @@ -280,14 +192,14 @@ func WithCommons(l1ChainID eth.ChainID) DeployerOption { } func WithGuardianMatchL1PAO() DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { _, superCfg := builder.WithSuperchain() intentbuilder.WithOverrideGuardianToL1PAO(p, keys, superCfg.L1ChainID(), superCfg) } } func WithPrefundedL2(l1ChainID, l2ChainID eth.ChainID) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { _, l2Config := builder.WithL2(l2ChainID) intentbuilder.WithDevkeyVaults(p, keys, l2Config) intentbuilder.WithDevkeyL2Roles(p, keys, l2Config) @@ -311,7 +223,7 @@ func WithPrefundedL2(l1ChainID, l2ChainID eth.ChainID) DeployerOption { // WithDevFeatureEnabled adds a feature as enabled in the dev feature bitmap func WithDevFeatureEnabled(flag common.Hash) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { currentValue := builder.GlobalOverride(devFeatureBitmapKey) var bitmap common.Hash if currentValue != nil { @@ -323,7 +235,7 @@ func WithDevFeatureEnabled(flag common.Hash) DeployerOption { // WithInteropAtGenesis activates interop at genesis for all known L2s func WithInteropAtGenesis() DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { for _, l2Cfg := range builder.L2s() { l2Cfg.WithForkAtGenesis(opforks.Interop) } @@ -335,7 +247,7 @@ func WithInteropAtGenesis() DeployerOption { // until (including) endFork. Each successive fork is scheduled at // an increasing offset. func WithHardforkSequentialActivation(startFork, endFork opforks.Name, delta *uint64) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { for _, l2Cfg := range builder.L2s() { l2Cfg.WithForkAtGenesis(startFork) activateWithOffset := false @@ -363,7 +275,7 @@ func WithHardforkSequentialActivation(startFork, endFork opforks.Name, delta *ui // WithSequencingWindow overrides the number of L1 blocks in a sequencing window, applied to all L2s. func WithSequencingWindow(n uint64) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { builder.WithGlobalOverride("sequencerWindowSize", uint64(n)) } } @@ -379,7 +291,7 @@ func WithDeployerMatchL1PAO() DeployerPipelineOption { // WithFinalizationPeriodSeconds overrides the number of L1 blocks in a sequencing window, applied to all L2s. func WithFinalizationPeriodSeconds(n uint64) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { for _, l2Cfg := range builder.L2s() { l2Cfg.WithFinalizationPeriodSeconds(n) } @@ -387,19 +299,19 @@ func WithFinalizationPeriodSeconds(n uint64) DeployerOption { } func WithProofMaturityDelaySeconds(n uint64) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { builder.WithGlobalOverride("proofMaturityDelaySeconds", uint64(n)) } } func WithDisputeGameFinalityDelaySeconds(seconds uint64) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { builder.WithGlobalOverride("disputeGameFinalityDelaySeconds", seconds) } } func WithCustomGasToken(name, symbol string, initialLiquidity *big.Int, liquidityControllerOwner common.Address) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { for _, l2Cfg := range builder.L2s() { l2Cfg.WithCustomGasToken(name, symbol, initialLiquidity, liquidityControllerOwner) } @@ -450,7 +362,7 @@ func (wb *worldBuilder) buildL2DeploymentOutputs() { } func WithRevenueShare(enabled bool, chainFeesRecipient common.Address) DeployerOption { - return func(p devtest.P, keys devkeys.Keys, builder intentbuilder.Builder) { + return func(p devtest.T, keys devkeys.Keys, builder intentbuilder.Builder) { for _, l2Cfg := range builder.L2s() { l2Cfg.WithRevenueShare(enabled, chainFeesRecipient) } diff --git a/op-devstack/sysgo/fakepos.go b/op-devstack/sysgo/fakepos.go index cc1ae23c52e52..be920e3794521 100644 --- a/op-devstack/sysgo/fakepos.go +++ b/op-devstack/sysgo/fakepos.go @@ -6,7 +6,7 @@ import ( ) type FakePoS struct { - p devtest.P + p devtest.CommonT fakepos *geth.FakePoS } diff --git a/op-devstack/sysgo/faucet.go b/op-devstack/sysgo/faucet.go deleted file mode 100644 index df0261cb6c1f6..0000000000000 --- a/op-devstack/sysgo/faucet.go +++ /dev/null @@ -1,144 +0,0 @@ -package sysgo - -import ( - "context" - "fmt" - - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-faucet/config" - "github.com/ethereum-optimism/optimism/op-faucet/faucet" - fconf "github.com/ethereum-optimism/optimism/op-faucet/faucet/backend/config" - ftypes "github.com/ethereum-optimism/optimism/op-faucet/faucet/backend/types" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/endpoint" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" -) - -type FaucetService struct { - service *faucet.Service -} - -func (n *FaucetService) hydrate(system stack.ExtensibleSystem) { - if n == nil || n.service == nil { - return - } - - require := system.T().Require() - - for faucetID, chainID := range n.service.Faucets() { - faucetRPC := n.service.FaucetEndpoint(faucetID) - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), faucetRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - id := stack.NewFaucetID(faucetID.String(), chainID) - front := shim.NewFaucet(shim.FaucetConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: id, - Client: rpcCl, - }) - net := system.Network(chainID).(stack.ExtensibleNetwork) - net.AddFaucet(front) - } - - // Label the default faucets, in case we have multiple - for chainID, faucetID := range n.service.Defaults() { - id := stack.NewFaucetID(faucetID.String(), chainID) - net := system.Network(chainID).(stack.ExtensibleNetwork) - net.Faucet(stack.ByID[stack.Faucet](id)).SetLabel("default", "true") - } -} - -func isL2ELFaucetKind(kind stack.ComponentKind) bool { - switch kind { - case stack.KindL2ELNode, stack.KindRollupBoostNode, stack.KindOPRBuilderNode: - return true - default: - return false - } -} - -func WithFaucets(l1ELs []stack.ComponentID, l2ELs []stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - require := orch.P().Require() - - require.NotEmpty(l2ELs, "need at least one L2 EL for faucet service") - for i, l1ELID := range l1ELs { - require.Equalf(stack.KindL1ELNode, l1ELID.Kind(), "l1ELs[%d] must be kind %s", i, stack.KindL1ELNode) - require.Truef(l1ELID.HasChainID(), "l1ELs[%d] must be chain-scoped", i) - } - for i, l2ELID := range l2ELs { - require.Truef(isL2ELFaucetKind(l2ELID.Kind()), - "l2ELs[%d] must be one of %s, %s, %s", - i, stack.KindL2ELNode, stack.KindRollupBoostNode, stack.KindOPRBuilderNode) - require.Truef(l2ELID.HasChainID(), "l2ELs[%d] must be chain-scoped", i) - } - - faucetID := stack.NewFaucetID("dev-faucet", l2ELs[0].ChainID()) - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), faucetID)) - require = p.Require() - - require.Nil(orch.faucet, "can only support a single faucet-service in sysgo") - - funderKey, err := orch.keys.Secret(devkeys.UserKey(funderMnemonicIndex)) - require.NoError(err, "need funder key") - funderKeyStr := hexutil.Encode(crypto.FromECDSA(funderKey)) - - faucets := make(map[ftypes.FaucetID]*fconf.FaucetEntry) - for _, elID := range l1ELs { - id := ftypes.FaucetID(fmt.Sprintf("dev-faucet-%s", elID.ChainID())) - require.NotContains(faucets, id, "one faucet per chain only") - - el, ok := orch.GetL1EL(elID) - require.True(ok, "need L1 EL for faucet", elID) - - faucets[id] = &fconf.FaucetEntry{ - ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, - ChainID: elID.ChainID(), - TxCfg: fconf.TxManagerConfig{ - PrivateKey: funderKeyStr, - }, - } - } - for _, elID := range l2ELs { - id := ftypes.FaucetID(fmt.Sprintf("dev-faucet-%s", elID.ChainID())) - require.NotContains(faucets, id, "one faucet per chain only") - - el, ok := orch.GetL2EL(elID) - require.True(ok, "need L2 EL for faucet", elID) - - faucets[id] = &fconf.FaucetEntry{ - ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, - ChainID: elID.ChainID(), - TxCfg: fconf.TxManagerConfig{ - PrivateKey: funderKeyStr, - }, - } - } - cfg := &config.Config{ - RPC: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - }, - Faucets: &fconf.Config{ - Faucets: faucets, - }, - } - logger := p.Logger() - srv, err := faucet.FromConfig(p.Ctx(), cfg, logger) - require.NoError(err, "must setup faucet service") - require.NoError(srv.Start(p.Ctx())) - p.Cleanup(func() { - ctx, cancel := context.WithCancel(context.Background()) - cancel() // force-quit - logger.Info("Closing faucet") - _ = srv.Stop(ctx) - logger.Info("Closed faucet") - }) - - orch.faucet = &FaucetService{service: srv} - }) -} diff --git a/op-devstack/sysgo/keys.go b/op-devstack/sysgo/keys.go deleted file mode 100644 index b1e4f74a8c64c..0000000000000 --- a/op-devstack/sysgo/keys.go +++ /dev/null @@ -1,15 +0,0 @@ -package sysgo - -import ( - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/stack" -) - -func WithMnemonicKeys(mnemonic string) stack.Option[*Orchestrator] { - return stack.BeforeDeploy(func(orch *Orchestrator) { - require := orch.P().Require() - hd, err := devkeys.NewMnemonicDevKeys(mnemonic) - require.NoError(err) - orch.keys = hd - }) -} diff --git a/op-devstack/sysgo/l1_network.go b/op-devstack/sysgo/l1_network.go index e2cf19594c2ad..56b3ad202ad7f 100644 --- a/op-devstack/sysgo/l1_network.go +++ b/op-devstack/sysgo/l1_network.go @@ -2,24 +2,26 @@ package sysgo import ( "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/params" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type L1Network struct { - id stack.ComponentID + name string + chainID eth.ChainID genesis *core.Genesis blockTime uint64 } -func (n *L1Network) hydrate(system stack.ExtensibleSystem) { - sysL1Net := shim.NewL1Network(shim.L1NetworkConfig{ - NetworkConfig: shim.NetworkConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ChainConfig: n.genesis.Config, - }, - ID: n.id, - }) - system.AddL1Network(sysL1Net) +func (n *L1Network) Name() string { + return n.name +} + +func (n *L1Network) ChainID() eth.ChainID { + return n.chainID +} + +func (n *L1Network) ChainConfig() *params.ChainConfig { + return n.genesis.Config } diff --git a/op-devstack/sysgo/l1_nodes.go b/op-devstack/sysgo/l1_nodes.go index 7fbe95b1e4533..eaf20820c294e 100644 --- a/op-devstack/sysgo/l1_nodes.go +++ b/op-devstack/sysgo/l1_nodes.go @@ -1,27 +1,21 @@ package sysgo import ( - "os" - "path/filepath" - - "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type L1ELNode interface { - hydrator l1ELNode() UserRPC() string AuthRPC() string } type L1Geth struct { - id stack.ComponentID + name string + chainID eth.ChainID userRPC string authRPC string l1Geth *geth.GethInstance @@ -38,138 +32,18 @@ func (g *L1Geth) AuthRPC() string { return g.authRPC } -func (n *L1Geth) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - - frontend := shim.NewL1ELNode(shim.L1ELNodeConfig{ - ID: n.id, - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - Client: rpcCl, - ChainID: n.id.ChainID(), - }, - }) - l1Net := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(n.id.ChainID()))) - l1Net.(stack.ExtensibleL1Network).AddL1ELNode(frontend) -} - type L1CLNode struct { - id stack.ComponentID + name string + chainID eth.ChainID beaconHTTPAddr string beacon *fakebeacon.FakeBeacon fakepos *FakePoS } -func (n *L1CLNode) hydrate(system stack.ExtensibleSystem) { - beaconCl := client.NewBasicHTTPClient(n.beaconHTTPAddr, system.Logger()) - frontend := shim.NewL1CLNode(shim.L1CLNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: n.id, - Client: beaconCl, - }) - l1Net := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(n.id.ChainID()))) - l1Net.(stack.ExtensibleL1Network).AddL1CLNode(frontend) -} - -const DevstackL1ELKindEnvVar = "DEVSTACK_L1EL_KIND" - -func WithL1Nodes(l1ELID stack.ComponentID, l1CLID stack.ComponentID) stack.Option[*Orchestrator] { - switch os.Getenv(DevstackL1ELKindEnvVar) { - case "geth": - return WithL1NodesSubprocess(l1ELID, l1CLID) - default: - return WithL1NodesInProcess(l1ELID, l1CLID) - } +func (n *L1CLNode) BeaconHTTPAddr() string { + return n.beaconHTTPAddr } -func WithL1NodesInProcess(l1ELID stack.ComponentID, l1CLID stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - clP := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l1CLID)) - elP := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l1ELID)) - require := orch.P().Require() - - l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) - require.True(ok, "L1 network must exist") - - blockTimeL1 := l1Net.blockTime - l1FinalizedDistance := uint64(20) - l1Clock := clock.SystemClock - if orch.timeTravelClock != nil { - l1Clock = orch.timeTravelClock - } - - blobPath := clP.TempDir() - - clLogger := clP.Logger() - bcn := fakebeacon.NewBeacon(clLogger, blobstore.New(), l1Net.genesis.Timestamp, blockTimeL1) - clP.Cleanup(func() { - _ = bcn.Close() - }) - require.NoError(bcn.Start("127.0.0.1:0")) - beaconApiAddr := bcn.BeaconAddr() - require.NotEmpty(beaconApiAddr, "beacon API listener must be up") - - orch.writeDefaultJWT() - - elLogger := elP.Logger() - l1Geth, fp, err := geth.InitL1( - blockTimeL1, - l1FinalizedDistance, - l1Net.genesis, - l1Clock, - filepath.Join(blobPath, "l1_el"), - bcn, - geth.WithAuth(orch.jwtPath), - ) - require.NoError(err) - require.NoError(l1Geth.Node.Start()) - elP.Cleanup(func() { - elLogger.Info("Closing L1 geth") - _ = l1Geth.Close() - }) - - l1ELNode := &L1Geth{ - id: l1ELID, - userRPC: l1Geth.Node.HTTPEndpoint(), - authRPC: l1Geth.Node.HTTPAuthEndpoint(), - l1Geth: l1Geth, - blobPath: blobPath, - } - require.False(orch.registry.Has(l1ELID), "must not already exist") - orch.registry.Register(l1ELID, l1ELNode) - - l1CLNode := &L1CLNode{ - id: l1CLID, - beaconHTTPAddr: beaconApiAddr, - beacon: bcn, - fakepos: &FakePoS{fakepos: fp, p: clP}, - } - require.False(orch.registry.Has(l1CLID), "must not already exist") - orch.registry.Register(l1CLID, l1CLNode) - }) -} - -// WithExtL1Nodes initializes L1 EL and CL nodes that connect to external RPC endpoints -func WithExtL1Nodes(l1ELID stack.ComponentID, l1CLID stack.ComponentID, elRPCEndpoint string, clRPCEndpoint string) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - require := orch.P().Require() - - // Create L1 EL node with external RPC - l1ELNode := &L1Geth{ - id: l1ELID, - userRPC: elRPCEndpoint, - } - require.False(orch.registry.Has(l1ELID), "must not already exist") - orch.registry.Register(l1ELID, l1ELNode) - - // Create L1 CL node with external RPC - l1CLNode := &L1CLNode{ - id: l1CLID, - beaconHTTPAddr: clRPCEndpoint, - } - require.False(orch.registry.Has(l1CLID), "must not already exist") - orch.registry.Register(l1CLID, l1CLNode) - }) +func (n *L1CLNode) FakePoS() stack.Lifecycle { + return n.fakepos } diff --git a/op-devstack/sysgo/l1_nodes_subprocess.go b/op-devstack/sysgo/l1_nodes_subprocess.go deleted file mode 100644 index 041fe5c4e015d..0000000000000 --- a/op-devstack/sysgo/l1_nodes_subprocess.go +++ /dev/null @@ -1,246 +0,0 @@ -package sysgo - -import ( - "encoding/json" - "os" - "os/exec" - "path/filepath" - "sync" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/clock" - "github.com/ethereum-optimism/optimism/op-service/logpipe" - "github.com/ethereum-optimism/optimism/op-service/tasks" - "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" - "github.com/ethereum/go-ethereum/ethclient" -) - -type ExternalL1Geth struct { - mu sync.Mutex - - id stack.ComponentID - l1Net *L1Network - // authRPC points to a proxy that forwards to geth's endpoint - authRPC string - // userRPC points to a proxy that forwards to geth's endpoint - userRPC string - - authProxy *tcpproxy.Proxy - userProxy *tcpproxy.Proxy - - execPath string - args []string - // Each entry is of the form "key=value". - env []string - - p devtest.P - - sub *SubProcess -} - -func (*ExternalL1Geth) l1ELNode() {} - -func (n *ExternalL1Geth) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - l1Net := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(n.id.ChainID()))) - sysL1EL := shim.NewL1ELNode(shim.L1ELNodeConfig{ - ID: n.id, - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - Client: rpcCl, - ChainID: n.id.ChainID(), - }, - }) - sysL1EL.SetLabel(match.LabelVendor, string(match.Geth)) - l1Net.(stack.ExtensibleL1Network).AddL1ELNode(sysL1EL) -} - -func (n *ExternalL1Geth) Start() { - n.mu.Lock() - defer n.mu.Unlock() - if n.sub != nil { - n.p.Logger().Warn("geth already started") - return - } - if n.authProxy == nil { - n.authProxy = tcpproxy.New(n.p.Logger()) - n.p.Require().NoError(n.authProxy.Start()) - n.p.Cleanup(func() { - n.authProxy.Close() - }) - n.authRPC = "ws://" + n.authProxy.Addr() - } - if n.userProxy == nil { - n.userProxy = tcpproxy.New(n.p.Logger()) - n.p.Require().NoError(n.userProxy.Start()) - n.p.Cleanup(func() { - n.userProxy.Close() - }) - n.userRPC = "http://" + n.userProxy.Addr() - } - logOut := logpipe.ToLogger(n.p.Logger().New("src", "stdout")) - logErr := logpipe.ToLogger(n.p.Logger().New("src", "stderr")) - userRPC := make(chan string, 1) - authRPC := make(chan string, 1) - onLogEntry := func(e logpipe.LogEntry) { - switch e.LogMessage() { - case "HTTP server started": - auth, _ := e.FieldValue("auth").(bool) - if auth { - select { - case authRPC <- "http://" + e.FieldValue("endpoint").(string): - default: - } - } else { - select { - case userRPC <- "http://" + e.FieldValue("endpoint").(string): - default: - } - } - } - } - stdOutLogs := logpipe.LogCallback(func(line []byte) { - e := logpipe.ParseGoStructuredLogs(line) - logOut(e) - onLogEntry(e) - }) - stdErrLogs := logpipe.LogCallback(func(line []byte) { - e := logpipe.ParseGoStructuredLogs(line) - logErr(e) - onLogEntry(e) - }) - n.sub = NewSubProcess(n.p, stdOutLogs, stdErrLogs) - - err := n.sub.Start(n.execPath, n.args, n.env) - n.p.Require().NoError(err, "Must start") - - var userRPCAddr, authRPCAddr string - n.p.Require().NoError(tasks.Await(n.p.Ctx(), userRPC, &userRPCAddr), "need user RPC") - n.p.Require().NoError(tasks.Await(n.p.Ctx(), authRPC, &authRPCAddr), "need auth RPC") - - n.userProxy.SetUpstream(ProxyAddr(n.p.Require(), userRPCAddr)) - n.authProxy.SetUpstream(ProxyAddr(n.p.Require(), authRPCAddr)) -} - -func (n *ExternalL1Geth) Stop() { - n.mu.Lock() - defer n.mu.Unlock() - err := n.sub.Stop(true) - n.p.Require().NoError(err, "Must stop") - n.sub = nil -} - -func (n *ExternalL1Geth) UserRPC() string { - return n.userRPC -} - -func (n *ExternalL1Geth) AuthRPC() string { - return n.authRPC -} - -const GethExecPathEnvVar = "SYSGO_GETH_EXEC_PATH" - -func WithL1NodesSubprocess(id stack.ComponentID, clID stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) - require := p.Require() - - execPath, ok := os.LookupEnv(GethExecPathEnvVar) - require.True(ok) - _, err := os.Stat(execPath) - p.Require().NotErrorIs(err, os.ErrNotExist, "geth executable must exist") - - l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(id.ChainID())) - require.True(ok, "L1 network required") - - jwtPath, jwtSecret := orch.writeDefaultJWT() - - tempDir := p.TempDir() - data, err := json.Marshal(l1Net.genesis) - p.Require().NoError(err, "must json-encode genesis") - chainConfigPath := filepath.Join(tempDir, "genesis.json") - p.Require().NoError(os.WriteFile(chainConfigPath, data, 0o644), "must write genesis file") - - dataDirPath := filepath.Join(tempDir, "data") - p.Require().NoError(os.MkdirAll(dataDirPath, 0o755), "must create datadir") - - cmd := exec.Command(execPath, "--datadir", dataDirPath, "init", chainConfigPath) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - require.NoError(cmd.Run(), "initialize geth datadir") - - args := []string{ - "--log.format", "json", - "--datadir", dataDirPath, - "--http", "--http.addr", "127.0.0.1", "--http.port", "0", "--http.api", "admin,debug,eth,net,txpool", - "--authrpc.addr", "127.0.0.1", "--authrpc.port", "0", "--authrpc.jwtsecret", jwtPath, - "--ipcdisable", - "--port", "0", - "--nodiscover", - "--verbosity", "5", - "--miner.recommit", "2s", - "--gcmode", "archive", - } - - l1EL := &ExternalL1Geth{ - id: id, - l1Net: l1Net, - authRPC: "", - userRPC: "", - execPath: execPath, - args: args, - env: []string{}, - p: p, - } - - p.Logger().Info("Starting geth") - l1EL.Start() - p.Cleanup(l1EL.Stop) - p.Logger().Info("geth is ready", "userRPC", l1EL.userRPC, "authRPC", l1EL.authRPC) - elCID := id - require.False(orch.registry.Has(elCID), "must be unique L1 EL node") - orch.registry.Register(elCID, l1EL) - - backend, err := ethclient.DialContext(p.Ctx(), l1EL.userRPC) - require.NoError(err) - - l1Clock := clock.SystemClock - if orch.timeTravelClock != nil { - l1Clock = orch.timeTravelClock - } - - bcn := fakebeacon.NewBeacon(p.Logger(), blobstore.New(), l1Net.genesis.Timestamp, l1Net.blockTime) - p.Cleanup(func() { - _ = bcn.Close() - }) - require.NoError(bcn.Start("127.0.0.1:0")) - beaconApiAddr := bcn.BeaconAddr() - require.NotEmpty(beaconApiAddr, "beacon API listener must be up") - - engineCl, err := dialEngine(p.Ctx(), l1EL.AuthRPC(), jwtSecret) - require.NoError(err) - fp := &FakePoS{ - p: p, - fakepos: geth.NewFakePoS(backend, engineCl, l1Clock, p.Logger(), l1Net.blockTime, 20, bcn, l1Net.genesis.Config), - } - fp.Start() - p.Cleanup(fp.Stop) - orch.registry.Register(clID, &L1CLNode{ - id: clID, - beaconHTTPAddr: bcn.BeaconAddr(), - beacon: bcn, - fakepos: fp, - }) - }) -} diff --git a/op-devstack/sysgo/l1_runtime.go b/op-devstack/sysgo/l1_runtime.go new file mode 100644 index 0000000000000..fdc6775a0953d --- /dev/null +++ b/op-devstack/sysgo/l1_runtime.go @@ -0,0 +1,229 @@ +package sysgo + +import ( + "encoding/json" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/blobstore" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/fakebeacon" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/logpipe" + "github.com/ethereum-optimism/optimism/op-service/tasks" + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/ethclient" +) + +const DevstackL1ELKindEnvVar = "DEVSTACK_L1EL_KIND" + +const GethExecPathEnvVar = "SYSGO_GETH_EXEC_PATH" + +func writeJWTSecret(t devtest.T) (string, [32]byte) { + jwtPath := filepath.Join(t.TempDir(), "jwt_secret") + jwtSecret := [32]byte{123} + err := os.WriteFile(jwtPath, []byte(hexutil.Encode(jwtSecret[:])), 0o600) + t.Require().NoError(err, "failed to write jwt secret") + return jwtPath, jwtSecret +} + +func startInProcessL1(t devtest.T, l1Net *L1Network, jwtPath string) (*L1Geth, *L1CLNode) { + return startInProcessL1WithClock(t, l1Net, jwtPath, clock.SystemClock) +} + +func startInProcessL1WithClock(t devtest.T, l1Net *L1Network, jwtPath string, l1Clock clock.Clock) (*L1Geth, *L1CLNode) { + if os.Getenv(DevstackL1ELKindEnvVar) == "geth" { + return startSubprocessL1WithClock(t, l1Net, jwtPath, l1Clock) + } + + require := t.Require() + l1ChainID := l1Net.ChainID() + + blobPath := t.TempDir() + bcn := fakebeacon.NewBeacon(t.Logger().New("component", "l1cl"), blobstore.New(), l1Net.genesis.Timestamp, l1Net.blockTime) + t.Cleanup(func() { + _ = bcn.Close() + }) + require.NoError(bcn.Start("127.0.0.1:0")) + beaconAddr := bcn.BeaconAddr() + require.NotEmpty(beaconAddr, "beacon API listener must be up") + + l1Geth, fp, err := geth.InitL1( + l1Net.blockTime, + 20, + l1Net.genesis, + l1Clock, + filepath.Join(blobPath, "l1_el"), + bcn, + geth.WithAuth(jwtPath), + ) + require.NoError(err) + require.NoError(l1Geth.Node.Start()) + t.Cleanup(func() { + t.Logger().Info("Closing L1 geth") + _ = l1Geth.Close() + }) + + l1EL := &L1Geth{ + name: "l1", + chainID: l1ChainID, + userRPC: l1Geth.Node.HTTPEndpoint(), + authRPC: l1Geth.Node.HTTPAuthEndpoint(), + l1Geth: l1Geth, + blobPath: blobPath, + } + l1CL := &L1CLNode{ + name: "l1", + chainID: l1ChainID, + beaconHTTPAddr: beaconAddr, + beacon: bcn, + fakepos: &FakePoS{fakepos: fp, p: t}, + } + return l1EL, l1CL +} + +func startSubprocessL1WithClock(t devtest.T, l1Net *L1Network, jwtPath string, l1Clock clock.Clock) (*L1Geth, *L1CLNode) { + require := t.Require() + l1ChainID := l1Net.ChainID() + + execPath, ok := os.LookupEnv(GethExecPathEnvVar) + require.True(ok, "%s must be set when %s=geth", GethExecPathEnvVar, DevstackL1ELKindEnvVar) + _, err := os.Stat(execPath) + require.NotErrorIs(err, os.ErrNotExist, "geth executable must exist") + + tempDir := t.TempDir() + data, err := json.Marshal(l1Net.genesis) + require.NoError(err, "must json-encode genesis") + chainConfigPath := filepath.Join(tempDir, "genesis.json") + require.NoError(os.WriteFile(chainConfigPath, data, 0o644), "must write genesis file") + + dataDirPath := filepath.Join(tempDir, "data") + require.NoError(os.MkdirAll(dataDirPath, 0o755), "must create datadir") + + initCmd := exec.Command(execPath, "--datadir", dataDirPath, "init", chainConfigPath) + initCmd.Stdout = os.Stdout + initCmd.Stderr = os.Stderr + require.NoError(initCmd.Run(), "initialize geth datadir") + + userProxy := tcpproxy.New(t.Logger().New("component", "l1el-user-proxy")) + require.NoError(userProxy.Start()) + t.Cleanup(func() { + userProxy.Close() + }) + authProxy := tcpproxy.New(t.Logger().New("component", "l1el-auth-proxy")) + require.NoError(authProxy.Start()) + t.Cleanup(func() { + authProxy.Close() + }) + + userRPC := "ws://" + userProxy.Addr() + authRPC := "ws://" + authProxy.Addr() + userRPCUpstream := make(chan string, 1) + authRPCUpstream := make(chan string, 1) + onLogEntry := func(e logpipe.LogEntry) { + switch e.LogMessage() { + case "WebSocket enabled": + select { + case userRPCUpstream <- e.FieldValue("url").(string): + default: + } + case "HTTP server started": + if e.FieldValue("auth").(bool) { + select { + case authRPCUpstream <- "http://" + e.FieldValue("endpoint").(string): + default: + } + } + } + } + logOut := logpipe.ToLogger(t.Logger().New("component", "l1el", "src", "stdout")) + logErr := logpipe.ToLogger(t.Logger().New("component", "l1el", "src", "stderr")) + stdOutLogs := logpipe.LogCallback(func(line []byte) { + e := logpipe.ParseGoStructuredLogs(line) + logOut(e) + onLogEntry(e) + }) + stdErrLogs := logpipe.LogCallback(func(line []byte) { + e := logpipe.ParseGoStructuredLogs(line) + logErr(e) + onLogEntry(e) + }) + sub := NewSubProcess(t, stdOutLogs, stdErrLogs) + args := []string{ + "--log.format", "json", + "--datadir", dataDirPath, + "--ws", "--ws.addr", "127.0.0.1", "--ws.port", "0", "--ws.origins", "*", "--ws.api", "admin,debug,eth,net,txpool", + "--authrpc.addr", "127.0.0.1", "--authrpc.port", "0", "--authrpc.jwtsecret", jwtPath, + "--ipcdisable", + "--port", "0", + "--nodiscover", + "--verbosity", "5", + "--miner.recommit", "2s", + "--gcmode", "archive", + } + require.NoError(sub.Start(execPath, args, nil), "must start geth subprocess") + + var userRPCAddr string + var authRPCAddr string + require.NoError(tasks.Await(t.Ctx(), userRPCUpstream, &userRPCAddr), "need geth user RPC") + require.NoError(tasks.Await(t.Ctx(), authRPCUpstream, &authRPCAddr), "need geth auth RPC") + userProxy.SetUpstream(ProxyAddr(require, userRPCAddr)) + authProxy.SetUpstream(ProxyAddr(require, authRPCAddr)) + + backend, err := ethclient.DialContext(t.Ctx(), userRPC) + require.NoError(err, "failed to dial geth user RPC") + t.Cleanup(backend.Close) + + jwtSecret := readJWTSecret(t, jwtPath) + engineCl, err := dialEngine(t.Ctx(), authRPC, jwtSecret) + require.NoError(err, "failed to dial geth engine API") + t.Cleanup(func() { + engineCl.inner.Close() + }) + + bcn := fakebeacon.NewBeacon(t.Logger().New("component", "l1cl"), blobstore.New(), l1Net.genesis.Timestamp, l1Net.blockTime) + t.Cleanup(func() { + _ = bcn.Close() + }) + require.NoError(bcn.Start("127.0.0.1:0")) + beaconAddr := bcn.BeaconAddr() + require.NotEmpty(beaconAddr, "beacon API listener must be up") + + fp := &FakePoS{ + p: t, + fakepos: geth.NewFakePoS(backend, engineCl, l1Clock, t.Logger().New("component", "l1cl"), l1Net.blockTime, 20, bcn, l1Net.genesis.Config), + } + fp.Start() + t.Cleanup(fp.Stop) + + l1EL := &L1Geth{ + name: "l1", + chainID: l1ChainID, + userRPC: userRPC, + authRPC: authRPC, + blobPath: tempDir, + } + l1CL := &L1CLNode{ + name: "l1", + chainID: l1ChainID, + beaconHTTPAddr: beaconAddr, + beacon: bcn, + fakepos: fp, + } + return l1EL, l1CL +} + +func readJWTSecret(t devtest.T, jwtPath string) [32]byte { + data, err := os.ReadFile(jwtPath) + t.Require().NoError(err, "failed to read jwt secret file") + decoded, err := hexutil.Decode(strings.TrimSpace(string(data))) + t.Require().NoError(err, "failed to decode jwt secret file") + var jwtSecret [32]byte + copy(jwtSecret[:], decoded) + t.Require().Len(decoded, len(jwtSecret), "jwt secret must be 32 bytes") + return jwtSecret +} diff --git a/op-devstack/sysgo/l2_batcher.go b/op-devstack/sysgo/l2_batcher.go index 3a2013ded78fb..3bf6fdbb25f57 100644 --- a/op-devstack/sysgo/l2_batcher.go +++ b/op-devstack/sysgo/l2_batcher.go @@ -1,27 +1,13 @@ package sysgo import ( - "context" - "time" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" - batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/setuputils" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/endpoint" - oplog "github.com/ethereum-optimism/optimism/op-service/log" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type L2Batcher struct { - id stack.ComponentID + name string + chainID eth.ChainID service *bss.BatcherService rpc string l1RPC string @@ -29,119 +15,8 @@ type L2Batcher struct { l2ELRPC string } -func (b *L2Batcher) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), b.rpc, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - bFrontend := shim.NewL2Batcher(shim.L2BatcherConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: b.id, - Client: rpcCl, - }) - l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(b.id.ChainID()))) - l2Net.(stack.ExtensibleL2Network).AddL2Batcher(bFrontend) -} - -type BatcherOption func(id stack.ComponentID, cfg *bss.CLIConfig) - -func WithBatcherOption(opt BatcherOption) stack.Option[*Orchestrator] { - return stack.Deploy[*Orchestrator](func(orch *Orchestrator) { - orch.batcherOptions = append(orch.batcherOptions, opt) - }) +func (b *L2Batcher) UserRPC() string { + return b.rpc } -func WithBatcher(batcherID stack.ComponentID, l1ELID stack.ComponentID, l2CLID stack.ComponentID, l2ELID stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), batcherID)) - - require := p.Require() - batcherCID := batcherID - require.False(orch.registry.Has(batcherCID), "batcher must not already exist") - - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(l2CLID.ChainID())) - require.True(ok) - - l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) - require.True(ok) - - require.Equal(l2Net.l1ChainID, l1Net.id.ChainID(), "expecting L1EL on L1 of L2CL") - - require.Equal(l2CLID.ChainID(), l2ELID.ChainID(), "L2 CL and EL must be on same L2 chain") - - l1EL, ok := orch.GetL1EL(l1ELID) - require.True(ok) - - l2CL, ok := orch.GetL2CL(l2CLID) - require.True(ok) - - l2EL, ok := orch.GetL2EL(l2ELID) - require.True(ok) - - batcherSecret, err := orch.keys.Secret(devkeys.BatcherRole.Key(l2ELID.ChainID().ToBig())) - require.NoError(err) - - logger := p.Logger() - logger.SetContext(p.Ctx()) - logger.Info("Batcher key acquired", "addr", crypto.PubkeyToAddress(batcherSecret.PublicKey)) - - batcherCLIConfig := &bss.CLIConfig{ - L1EthRpc: l1EL.UserRPC(), - L2EthRpc: []string{l2EL.UserRPC()}, - RollupRpc: []string{l2CL.UserRPC()}, - MaxPendingTransactions: 7, - MaxChannelDuration: 1, - MaxL1TxSize: 120_000, - TestUseMaxTxSizeForBlobs: false, - TargetNumFrames: 1, - ApproxComprRatio: 0.4, - SubSafetyMargin: 4, - PollInterval: 500 * time.Millisecond, - TxMgrConfig: setuputils.NewTxMgrConfig(endpoint.URL(l1EL.UserRPC()), batcherSecret), - LogConfig: oplog.CLIConfig{ - Level: log.LevelInfo, - Format: oplog.FormatText, - }, - Stopped: false, - BatchType: derive.SpanBatchType, - MaxBlocksPerSpanBatch: 10, - DataAvailabilityType: batcherFlags.CalldataType, - CompressionAlgo: derive.Brotli, - RPC: oprpc.CLIConfig{ - EnableAdmin: true, - }, - } - for _, opt := range orch.batcherOptions { - opt(batcherID, batcherCLIConfig) - } - - batcherContext, cancelBatcherCtx := context.WithCancel(p.Ctx()) - var closeAppFn context.CancelCauseFunc = func(cause error) { - p.Errorf("closeAppFn called, batcher hit a critical error: %v", cause) - cancelBatcherCtx() - } - batcher, err := bss.BatcherServiceFromCLIConfig( - batcherContext, closeAppFn, "0.0.1", batcherCLIConfig, - logger) - require.NoError(err) - require.NoError(batcher.Start(p.Ctx())) - p.Cleanup(func() { - ctx, cancel := context.WithCancel(p.Ctx()) - cancel() // force-quit - logger.Info("Closing batcher") - _ = batcher.Stop(ctx) - logger.Info("Closed batcher") - }) - - b := &L2Batcher{ - id: batcherID, - service: batcher, - rpc: batcher.HTTPEndpoint(), - l1RPC: l1EL.UserRPC(), - l2CLRPC: l2CL.UserRPC(), - l2ELRPC: l2EL.UserRPC(), - } - orch.registry.Register(batcherID, b) - }) -} +type BatcherOption func(target ComponentTarget, cfg *bss.CLIConfig) diff --git a/op-devstack/sysgo/l2_challenger.go b/op-devstack/sysgo/l2_challenger.go index 3057819b413f9..76dc6d5d57d44 100644 --- a/op-devstack/sysgo/l2_challenger.go +++ b/op-devstack/sysgo/l2_challenger.go @@ -1,245 +1,18 @@ package sysgo import ( - "context" - "runtime" - "time" - - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - opchallenger "github.com/ethereum-optimism/optimism/op-challenger" "github.com/ethereum-optimism/optimism/op-challenger/config" - "github.com/ethereum-optimism/optimism/op-challenger/metrics" - shared "github.com/ethereum-optimism/optimism/op-devstack/shared/challenger" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/cliapp" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum-optimism/optimism/op-service/eth" ) -type l2ChallengerOpts struct { - useCannonKonaConfig bool -} - type L2Challenger struct { - id stack.ComponentID + name string + chainIDs []eth.ChainID service cliapp.Lifecycle - l2NetIDs []stack.ComponentID config *config.Config } -func (p *L2Challenger) hydrate(system stack.ExtensibleSystem) { - bFrontend := shim.NewL2Challenger(shim.L2ChallengerConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: p.id, - Config: p.config, - }) - - for _, netID := range p.l2NetIDs { - l2Net := system.L2Network(stack.ByID[stack.L2Network](netID)) - l2Net.(stack.ExtensibleL2Network).AddL2Challenger(bFrontend) - } -} - -func WithL2Challenger(challengerID stack.ComponentID, l1ELID stack.ComponentID, l1CLID stack.ComponentID, - supervisorID *stack.ComponentID, clusterID *stack.ComponentID, l2CLID *stack.ComponentID, l2ELIDs []stack.ComponentID, -) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - WithL2ChallengerPostDeploy(orch, challengerID, l1ELID, l1CLID, supervisorID, clusterID, l2CLID, l2ELIDs, nil) - }) -} - -func WithSuperL2Challenger(challengerID stack.ComponentID, l1ELID stack.ComponentID, l1CLID stack.ComponentID, - supervisorID *stack.ComponentID, clusterID *stack.ComponentID, l2ELIDs []stack.ComponentID, -) stack.Option[*Orchestrator] { - return stack.Finally(func(orch *Orchestrator) { - WithL2ChallengerPostDeploy(orch, challengerID, l1ELID, l1CLID, supervisorID, clusterID, nil, l2ELIDs, nil) - }) -} - -func WithSupernodeL2Challenger(challengerID stack.ComponentID, l1ELID stack.ComponentID, l1CLID stack.ComponentID, - supernodeID *stack.SupernodeID, clusterID *stack.ComponentID, l2ELIDs []stack.ComponentID, -) stack.Option[*Orchestrator] { - return stack.Finally(func(orch *Orchestrator) { - WithL2ChallengerPostDeploy(orch, challengerID, l1ELID, l1CLID, nil, clusterID, nil, l2ELIDs, supernodeID) - }) -} - -func WithL2ChallengerPostDeploy(orch *Orchestrator, challengerID stack.ComponentID, l1ELID stack.ComponentID, l1CLID stack.ComponentID, - supervisorID *stack.ComponentID, clusterID *stack.ComponentID, l2CLID *stack.ComponentID, l2ELIDs []stack.ComponentID, - supernodeID *stack.SupernodeID, -) { - ctx := orch.P().Ctx() - ctx = stack.ContextWithID(ctx, challengerID) - p := orch.P().WithCtx(ctx) - - require := p.Require() - challengerCID := challengerID - require.False(orch.registry.Has(challengerCID), "challenger must not already exist") - - challengerSecret, err := orch.keys.Secret(devkeys.ChallengerRole.Key(challengerID.ChainID().ToBig())) - require.NoError(err) - - logger := p.Logger() - logger.Info("Challenger key acquired", "addr", crypto.PubkeyToAddress(challengerSecret.PublicKey)) - - l1EL, ok := orch.GetL1EL(l1ELID) - require.True(ok) - l1CL, ok := orch.GetL1CL(l1CLID) - require.True(ok) - - l2Geneses := make([]*core.Genesis, 0, len(l2ELIDs)) - rollupCfgs := make([]*rollup.Config, 0, len(l2ELIDs)) - l2NetIDs := make([]stack.ComponentID, 0, len(l2ELIDs)) - var disputeGameFactoryAddr common.Address - var interopScheduled bool - - useSuperRoots := false - if orch.wb.outInteropMigration != nil { - disputeGameFactoryAddr = orch.wb.outInteropMigration.DisputeGameFactory - require.NotEmpty(disputeGameFactoryAddr, "dispute game factory address is empty") - useSuperRoots = true - } - for _, l2ELID := range l2ELIDs { - chainID := l2ELID.ChainID() - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(chainID)) - require.Truef(ok, "l2Net %s not found", chainID) - factory := l2Net.deployment.DisputeGameFactoryProxyAddr() - if disputeGameFactoryAddr == (common.Address{}) { - disputeGameFactoryAddr = factory - interopScheduled = l2Net.genesis.Config.InteropTime != nil - } else if !useSuperRoots { - require.Equal(l2Net.genesis.Config.InteropTime != nil, interopScheduled, "Cluster not consistently using interop") - } - - l2Geneses = append(l2Geneses, l2Net.genesis) - rollupCfgs = append(rollupCfgs, l2Net.rollupCfg) - l2NetIDs = append(l2NetIDs, l2Net.id) - } - - l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) - if !ok { - require.Fail("l1 network not found") - } - l1Genesis := l1Net.genesis - - if orch.l2ChallengerOpts.useCannonKonaConfig { - p.Log("Enabling cannon-kona, you may need to build kona-host and prestates with: cd kona && just") - } - - dir := p.TempDir() - var cfg *config.Config - // If interop is scheduled, or if we cannot do the pre-interop connection, then set up with supervisor - if interopScheduled || l2CLID == nil || useSuperRoots { - require.NotNil(clusterID, "need cluster in interop") - require.False(supervisorID != nil && supernodeID != nil, "cannot set both supervisorID and supernodeID") - - superRPC := "" - useSuperNode := false - switch { - case supervisorID != nil: - supervisorNode, ok := orch.GetSupervisor(*supervisorID) - require.True(ok) - superRPC = supervisorNode.UserRPC() - case supernodeID != nil: - supernode, ok := orch.supernodes.Get(*supernodeID) - require.True(ok) - superRPC = supernode.UserRPC() - useSuperNode = true - default: - require.FailNow("need supervisor or supernode to connect to in interop/super-roots") - } - - l2ELRPCs := make([]string, len(l2ELIDs)) - for i, l2ELID := range l2ELIDs { - l2EL, ok := orch.GetL2EL(l2ELID) - require.True(ok) - l2ELRPCs[i] = l2EL.UserRPC() - } - cluster, ok := orch.GetCluster(*clusterID) - require.True(ok) - prestateVariant := shared.InteropVariant - options := []shared.Option{ - shared.WithFactoryAddress(disputeGameFactoryAddr), - shared.WithPrivKey(challengerSecret), - shared.WithDepset(cluster.DepSet()), - shared.WithCannonConfig(rollupCfgs, l1Genesis, l2Geneses, prestateVariant), - shared.WithSuperCannonGameType(), - shared.WithSuperPermissionedGameType(), - } - if orch.l2ChallengerOpts.useCannonKonaConfig { - options = append(options, - shared.WithCannonKonaInteropConfig(rollupCfgs, l1Genesis, l2Geneses), - shared.WithSuperCannonKonaGameType(), - ) - } - cfg, err = shared.NewInteropChallengerConfig(dir, l1EL.UserRPC(), l1CL.beaconHTTPAddr, superRPC, l2ELRPCs, options...) - require.NoError(err, "Failed to create interop challenger config") - cfg.UseSuperNode = useSuperNode - } else { - require.NotNil(l2CLID, "need L2 CL to connect to pre-interop") - // In a post-interop infra setup, with unscheduled interop, we may see multiple EL nodes. - var l2ELID stack.ComponentID - for _, id := range l2ELIDs { - if id.ChainID() == l2CLID.ChainID() { - l2ELID = id - break - } - } - require.NotZero(l2ELID, "need single L2 EL to connect to pre-interop") - l2CL, ok := orch.GetL2CL(*l2CLID) - require.True(ok) - l2EL, ok := orch.GetL2EL(l2ELID) - require.True(ok) - prestateVariant := shared.MTCannonVariant - options := []shared.Option{ - shared.WithFactoryAddress(disputeGameFactoryAddr), - shared.WithPrivKey(challengerSecret), - shared.WithCannonConfig(rollupCfgs, l1Genesis, l2Geneses, prestateVariant), - shared.WithCannonGameType(), - shared.WithPermissionedGameType(), - shared.WithFastGames(), - } - if orch.l2ChallengerOpts.useCannonKonaConfig { - options = append(options, - shared.WithCannonKonaConfig(rollupCfgs, l1Genesis, l2Geneses), - shared.WithCannonKonaGameType(), - ) - } - cfg, err = shared.NewPreInteropChallengerConfig(dir, l1EL.UserRPC(), l1CL.beaconHTTPAddr, l2CL.UserRPC(), l2EL.UserRPC(), options...) - require.NoError(err, "Failed to create pre-interop challenger config") - } - - svc, err := opchallenger.Main(ctx, logger, cfg, metrics.NoopMetrics) - require.NoError(err) - - require.NoError(svc.Start(ctx)) - p.Cleanup(func() { - ctx, cancel := context.WithCancel(ctx) - cancel() // force-quit - logger.Info("Closing challenger") - // Start a separate goroutine to print a stack trace if the challenger fails to stop in a timely manner. - timer := time.AfterFunc(1*time.Minute, func() { - if svc.Stopped() { - return - } - // Print stack trace of all goroutines - buf := make([]byte, 1<<20) // 1MB buffer - stacklen := runtime.Stack(buf, true) - logger.Error("Challenger failed to stop; printing all goroutine stacks:\n%v", string(buf[:stacklen])) - }) - _ = svc.Stop(ctx) - timer.Stop() - logger.Info("Closed challenger") - }) - - c := &L2Challenger{ - id: challengerID, - service: svc, - l2NetIDs: l2NetIDs, - config: cfg, - } - orch.registry.Register(challengerID, c) +func (p *L2Challenger) Config() *config.Config { + return p.config } diff --git a/op-devstack/sysgo/l2_cl.go b/op-devstack/sysgo/l2_cl.go index 7f4f47e681453..009e55d85c3a0 100644 --- a/op-devstack/sysgo/l2_cl.go +++ b/op-devstack/sysgo/l2_cl.go @@ -1,8 +1,6 @@ package sysgo import ( - "os" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/stack" nodeSync "github.com/ethereum-optimism/optimism/op-node/rollup/sync" @@ -10,7 +8,6 @@ import ( ) type L2CLNode interface { - hydrate(system stack.ExtensibleSystem) stack.Lifecycle UserRPC() string InteropRPC() (endpoint string, jwtSecret eth.Bytes32) @@ -41,19 +38,19 @@ type L2CLConfig struct { } func L2CLSequencer() L2CLOption { - return L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { + return L2CLOptionFn(func(p devtest.T, _ ComponentTarget, cfg *L2CLConfig) { cfg.IsSequencer = true }) } func L2CLIndexing() L2CLOption { - return L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { + return L2CLOptionFn(func(p devtest.T, _ ComponentTarget, cfg *L2CLConfig) { cfg.IndexingMode = true }) } func L2CLFollowSource(source string) L2CLOption { - return L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { + return L2CLOptionFn(func(p devtest.T, _ ComponentTarget, cfg *L2CLConfig) { cfg.FollowSource = source }) } @@ -73,22 +70,15 @@ func DefaultL2CLConfig() *L2CLConfig { } type L2CLOption interface { - Apply(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) + Apply(p devtest.T, target ComponentTarget, cfg *L2CLConfig) } -// WithGlobalL2CLOption applies the L2CLOption to all L2CLNode instances in this orchestrator -func WithGlobalL2CLOption(opt L2CLOption) stack.Option[*Orchestrator] { - return stack.BeforeDeploy(func(o *Orchestrator) { - o.l2CLOptions = append(o.l2CLOptions, opt) - }) -} - -type L2CLOptionFn func(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) +type L2CLOptionFn func(p devtest.T, target ComponentTarget, cfg *L2CLConfig) var _ L2CLOption = L2CLOptionFn(nil) -func (fn L2CLOptionFn) Apply(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { - fn(p, id, cfg) +func (fn L2CLOptionFn) Apply(p devtest.T, target ComponentTarget, cfg *L2CLConfig) { + fn(p, target, cfg) } // L2CLOptionBundle a list of multiple L2CLOption, to all be applied in order. @@ -96,35 +86,9 @@ type L2CLOptionBundle []L2CLOption var _ L2CLOption = L2CLOptionBundle(nil) -func (l L2CLOptionBundle) Apply(p devtest.P, id stack.ComponentID, cfg *L2CLConfig) { +func (l L2CLOptionBundle) Apply(p devtest.T, target ComponentTarget, cfg *L2CLConfig) { for _, opt := range l { p.Require().NotNil(opt, "cannot Apply nil L2CLOption") - opt.Apply(p, id, cfg) - } -} - -// WithL2CLNode adds the default type of L2 CL node. -// The default can be configured with DEVSTACK_L2CL_KIND. -// Tests that depend on specific types can use options like WithKonaNode and WithOpNode directly. -func WithL2CLNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { - switch os.Getenv("DEVSTACK_L2CL_KIND") { - case "kona": - return WithKonaNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...) - case "supernode": - supe := stack.NewSupernodeID("default", l2CLID.ChainID()) - return WithSupernode(supe, l2CLID, l1CLID, l1ELID, l2ELID, opts...) - default: - return WithOpNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...) - } -} - -func WithL2CLNodeFollowL2(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, l2FollowSourceID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { - switch os.Getenv("DEVSTACK_L2CL_KIND") { - case "kona": - return WithKonaNodeFollowL2(l2CLID, l1CLID, l1ELID, l2ELID, l2FollowSourceID, opts...) - case "supernode": - panic("supernode does not support following") - default: - return WithOpNodeFollowL2(l2CLID, l1CLID, l1ELID, l2ELID, l2FollowSourceID, opts...) + opt.Apply(p, target, cfg) } } diff --git a/op-devstack/sysgo/l2_cl_kona.go b/op-devstack/sysgo/l2_cl_kona.go index 87823bf3bc33e..80350c2e43123 100644 --- a/op-devstack/sysgo/l2_cl_kona.go +++ b/op-devstack/sysgo/l2_cl_kona.go @@ -1,38 +1,28 @@ package sysgo import ( - "encoding/hex" - "encoding/json" "fmt" "net/url" - "os" - "path/filepath" "strings" "sync" - "github.com/ethereum/go-ethereum/crypto" - - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/logpipe" "github.com/ethereum-optimism/optimism/op-service/tasks" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" + "github.com/ethereum/go-ethereum/log" ) type KonaNode struct { mu sync.Mutex - id stack.ComponentID + name string + chainID eth.ChainID userRPC string interopEndpoint string // warning: currently not fully supported interopJwtSecret eth.Bytes32 - el stack.ComponentID userProxy *tcpproxy.Proxy @@ -41,33 +31,13 @@ type KonaNode struct { // Each entry is of the form "key=value". env []string - p devtest.P + p devtest.T sub *SubProcess l2MetricsRegistrar L2MetricsRegistrar } -func (k *KonaNode) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), k.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - sysL2CL := shim.NewL2CLNode(shim.L2CLNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: k.id, - Client: rpcCl, - UserRPC: k.userRPC, - InteropEndpoint: k.interopEndpoint, - InteropJwtSecret: k.interopJwtSecret, - }) - sysL2CL.SetLabel(match.LabelVendor, string(match.KonaNode)) - l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(k.id.ChainID()))) - l2Net.(stack.ExtensibleL2Network).AddL2CLNode(sysL2CL) - sysL2CL.(stack.LinkableL2CLNode).LinkEL(l2Net.L2ELNode(stack.ByID[stack.L2ELNode](k.el))) -} - func (k *KonaNode) Start() { k.mu.Lock() defer k.mu.Unlock() @@ -88,8 +58,8 @@ func (k *KonaNode) Start() { // Create the sub-process. // We pipe sub-process logs to the test-logger. // And inspect them along the way, to get the RPC server address. - logOut := logpipe.ToLogger(k.p.Logger().New("component", "kona-node", "src", "stdout")) - logErr := logpipe.ToLogger(k.p.Logger().New("component", "kona-node", "src", "stderr")) + logOut := logpipe.ToLoggerWithMinLevel(k.p.Logger().New("component", "kona-node", "src", "stdout"), log.LevelWarn) + logErr := logpipe.ToLoggerWithMinLevel(k.p.Logger().New("component", "kona-node", "src", "stderr"), log.LevelWarn) userRPCChan := make(chan string, 1) defer close(userRPCChan) @@ -131,7 +101,7 @@ func (k *KonaNode) Start() { if areMetricsEnabled() { var metricsTarget PrometheusMetricsTarget k.p.Require().NoError(tasks.Await(k.p.Ctx(), metricsTargetChan, &metricsTarget), "need metrics endpoint") - k.l2MetricsRegistrar.RegisterL2MetricsTargets(k.id, metricsTarget) + k.l2MetricsRegistrar.RegisterL2MetricsTargets(k.name, metricsTarget) } k.userProxy.SetUpstream(ProxyAddr(k.p.Require(), userRPCAddr)) @@ -160,149 +130,3 @@ func (k *KonaNode) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { } var _ L2CLNode = (*KonaNode)(nil) - -func WithKonaNodeFollowL2(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, l2FollowSourceID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - followSource := func(orch *Orchestrator) string { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) - l2CLFollowSource, ok := orch.GetL2CL(l2FollowSourceID) - p.Require().True(ok, "l2 CL Follow Source required") - return l2CLFollowSource.UserRPC() - }(orch) - opts = append(opts, L2CLFollowSource(followSource)) - withKonaNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...)(orch) - }) -} - -func WithKonaNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(withKonaNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...)) -} - -func withKonaNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) func(orch *Orchestrator) { - return func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) - - require := p.Require() - - l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1CLID.ChainID())) - require.True(ok, "l1 network required") - - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(l2CLID.ChainID())) - require.True(ok, "l2 network required") - - l1ChainConfig := l1Net.genesis.Config - - l1EL, ok := orch.GetL1EL(l1ELID) - require.True(ok, "l1 EL node required") - - l1CL, ok := orch.GetL1CL(l1CLID) - require.True(ok, "l1 CL node required") - - l2EL, ok := orch.GetL2EL(l2ELID) - require.True(ok, "l2 EL node required") - - cfg := DefaultL2CLConfig() - orch.l2CLOptions.Apply(orch.P(), l2CLID, cfg) // apply global options - L2CLOptionBundle(opts).Apply(orch.P(), l2CLID, cfg) // apply specific options - - tempKonaDir := p.TempDir() - - tempP2PPath := filepath.Join(tempKonaDir, "p2pkey.txt") - - tempRollupCfgPath := filepath.Join(tempKonaDir, "rollup.json") - rollupCfgData, err := json.Marshal(l2Net.rollupCfg) - p.Require().NoError(err, "must write rollup config") - p.Require().NoError(err, os.WriteFile(tempRollupCfgPath, rollupCfgData, 0o644)) - - tempL1CfgPath := filepath.Join(tempKonaDir, "l1-chain-config.json") - l1CfgData, err := json.Marshal(l1ChainConfig) - p.Require().NoError(err, "must write l1 chain config") - p.Require().NoError(err, os.WriteFile(tempL1CfgPath, l1CfgData, 0o644)) - - envVars := []string{ - "KONA_NODE_L1_ETH_RPC=" + l1EL.UserRPC(), - "KONA_NODE_L1_BEACON=" + l1CL.beaconHTTPAddr, - // TODO: WS RPC addresses do not work and will make the startup panic with a connection error in the - // JWT validation / engine-capabilities setup code-path. - "KONA_NODE_L2_ENGINE_RPC=" + strings.ReplaceAll(l2EL.EngineRPC(), "ws://", "http://"), - "KONA_NODE_L2_ENGINE_AUTH=" + l2EL.JWTPath(), - "KONA_NODE_ROLLUP_CONFIG=" + tempRollupCfgPath, - "KONA_NODE_L1_CHAIN_CONFIG=" + tempL1CfgPath, - "KONA_NODE_P2P_PRIV_PATH=" + tempP2PPath, - propagateEnvVarOrDefault("KONA_NODE_P2P_NO_DISCOVERY", "true"), - propagateEnvVarOrDefault("KONA_NODE_RPC_ADDR", "127.0.0.1"), - propagateEnvVarOrDefault("KONA_NODE_RPC_PORT", "0"), - propagateEnvVarOrDefault("KONA_NODE_RPC_WS_ENABLED", "true"), - propagateEnvVarOrDefault("KONA_METRICS_ADDR", ""), - propagateEnvVarOrDefault("KONA_LOG_LEVEL", "3"), // default to info level - propagateEnvVarOrDefault("KONA_LOG_STDOUT_FORMAT", "json"), - // p2p ports - propagateEnvVarOrDefault("KONA_NODE_P2P_LISTEN_IP", "127.0.0.1"), - propagateEnvVarOrDefault("KONA_NODE_P2P_LISTEN_TCP_PORT", "0"), - propagateEnvVarOrDefault("KONA_NODE_P2P_LISTEN_UDP_PORT", "0"), - } - - if areMetricsEnabled() { - // NB: Instead of getAvailableLocalPort, we should pass "0" so the OS picks its - // own port, but that is not currently logged properly so we cannot parse it. - // See: https://github.com/op-rs/kona/issues/2987 - metricsPort, err := getAvailableLocalPort() - p.Require().NoError(err, "WithKonaNode: getting metrics port") - - envVars = append(envVars, propagateEnvVarOrDefault("KONA_METRICS_PORT", metricsPort)) - envVars = append(envVars, "KONA_METRICS_ENABLED=true") - } - - if cfg.FollowSource != "" { - envVars = append(envVars, - "KONA_NODE_L2_FOLLOW_SOURCE="+cfg.FollowSource, - ) - } - - if cfg.IsSequencer { - p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2CLID.ChainID().ToBig())) - require.NoError(err, "need p2p key for sequencer") - p2pKeyHex := "0x" + hex.EncodeToString(crypto.FromECDSA(p2pKey)) - // Write sequencer key to file (supported since kona PR #2871) - tempSeqKeyPath := filepath.Join(tempKonaDir, "p2p-sequencer.txt") - p.Require().NoError(os.WriteFile(tempSeqKeyPath, []byte(p2pKeyHex), 0o644)) - envVars = append(envVars, - "KONA_NODE_P2P_SEQUENCER_KEY_PATH="+tempSeqKeyPath, - "KONA_NODE_SEQUENCER_L1_CONFS=2", - "KONA_NODE_MODE=Sequencer", - ) - } else { - envVars = append(envVars, - "KONA_NODE_MODE=Validator", - ) - } - - execPath, err := EnsureRustBinary(p, RustBinarySpec{ - SrcDir: "kona", - Package: "kona-node", - Binary: "kona-node", - }) - p.Require().NoError(err, "prepare kona-node binary") - p.Require().NotEmpty(execPath, "kona-node binary path resolved") - - k := &KonaNode{ - id: l2CLID, - userRPC: "", // retrieved from logs - interopEndpoint: "", // retrieved from logs - interopJwtSecret: eth.Bytes32{}, - el: l2ELID, - execPath: execPath, - args: []string{"node"}, - env: envVars, - p: p, - l2MetricsRegistrar: orch, - } - p.Logger().Info("Starting kona-node") - k.Start() - p.Cleanup(k.Stop) - p.Logger().Info("Kona-node is up", "rpc", k.UserRPC()) - cid := l2CLID - require.False(orch.registry.Has(cid), "must not already exist") - orch.registry.Register(cid, k) - } -} diff --git a/op-devstack/sysgo/l2_cl_opnode.go b/op-devstack/sysgo/l2_cl_opnode.go index 80511ea53ea86..07b8bdbf4666a 100644 --- a/op-devstack/sysgo/l2_cl_opnode.go +++ b/op-devstack/sysgo/l2_cl_opnode.go @@ -2,48 +2,28 @@ package sysgo import ( "context" - "encoding/hex" - "fmt" "sync" - "time" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - - altda "github.com/ethereum-optimism/optimism/op-alt-da" - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/opnode" "github.com/ethereum-optimism/optimism/op-node/config" - "github.com/ethereum-optimism/optimism/op-node/rollup/driver" - "github.com/ethereum-optimism/optimism/op-node/rollup/interop" - nodeSync "github.com/ethereum-optimism/optimism/op-node/rollup/sync" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/clock" "github.com/ethereum-optimism/optimism/op-service/eth" - opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" - "github.com/ethereum-optimism/optimism/op-service/oppprof" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" - "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + "github.com/ethereum/go-ethereum/log" ) type OpNode struct { mu sync.Mutex - id stack.ComponentID + name string opNode *opnode.Opnode userRPC string interopEndpoint string interopJwtSecret eth.Bytes32 cfg *config.Config - p devtest.P + p devtest.CommonT logger log.Logger - el *stack.ComponentID // Optional: nil when using SyncTester userProxy *tcpproxy.Proxy interopProxy *tcpproxy.Proxy clock clock.Clock @@ -51,47 +31,6 @@ type OpNode struct { var _ L2CLNode = (*OpNode)(nil) -func (n *OpNode) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - sysL2CL := shim.NewL2CLNode(shim.L2CLNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: n.id, - Client: rpcCl, - UserRPC: n.userRPC, - InteropEndpoint: n.interopEndpoint, - InteropJwtSecret: n.interopJwtSecret, - }) - sysL2CL.SetLabel(match.LabelVendor, string(match.OpNode)) - l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) - l2Net.(stack.ExtensibleL2Network).AddL2CLNode(sysL2CL) - if n.el != nil { - for _, el := range l2Net.L2ELNodes() { - if el.ID() == *n.el { - sysL2CL.(stack.LinkableL2CLNode).LinkEL(el) - return - } - } - rbID := stack.NewRollupBoostNodeID(n.el.Key(), n.el.ChainID()) - for _, rb := range l2Net.RollupBoostNodes() { - if rb.ID() == rbID { - sysL2CL.(stack.LinkableL2CLNode).LinkRollupBoostNode(rb) - return - } - } - oprbID := stack.NewOPRBuilderNodeID(n.el.Key(), n.el.ChainID()) - for _, oprb := range l2Net.OPRBuilderNodes() { - if oprb.ID() == oprbID { - sysL2CL.(stack.LinkableL2CLNode).LinkOPRBuilderNode(oprb) - return - } - } - } -} - func (n *OpNode) UserRPC() string { return n.userRPC } @@ -155,181 +94,3 @@ func (n *OpNode) Stop() { n.opNode = nil } - -func WithOpNodeFollowL2(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, l2FollowSourceID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - followSource := func(orch *Orchestrator) string { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) - l2CLFollowSource, ok := orch.GetL2CL(l2FollowSourceID) - p.Require().True(ok, "l2 CL Follow Source required") - return l2CLFollowSource.UserRPC() - }(orch) - opts = append(opts, L2CLFollowSource(followSource)) - withOpNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...)(orch) - }) -} - -func WithOpNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(withOpNode(l2CLID, l1CLID, l1ELID, l2ELID, opts...)) -} - -func withOpNode(l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) func(orch *Orchestrator) { - return func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2CLID)) - - require := p.Require() - - l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1CLID.ChainID())) - require.True(ok, "l1 network required") - - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(l2CLID.ChainID())) - require.True(ok, "l2 network required") - - l1EL, ok := orch.GetL1EL(l1ELID) - require.True(ok, "l1 EL node required") - - l1CL, ok := orch.GetL1CL(l1CLID) - require.True(ok, "l1 CL node required") - - // Get the L2EL node (which can be a regular EL node or a SyncTesterEL) - l2EL, ok := orch.GetL2EL(l2ELID) - require.True(ok, "l2 EL node required") - - // Get dependency set from cluster if available - var depSet depset.DependencySet - if cluster, ok := orch.ClusterForL2(l2ELID.ChainID()); ok { - depSet = cluster.DepSet() - } - - cfg := DefaultL2CLConfig() - orch.l2CLOptions.Apply(p, l2CLID, cfg) // apply global options - L2CLOptionBundle(opts).Apply(p, l2CLID, cfg) // apply specific options - - syncMode := cfg.VerifierSyncMode - if cfg.IsSequencer { - syncMode = cfg.SequencerSyncMode - // Sanity check, to navigate legacy sync-mode test assumptions. - // Can't enable ELSync on the sequencer or it will never start sequencing because - // ELSync needs to receive gossip from the sequencer to drive the sync - p.Require().NotEqual(nodeSync.ELSync, syncMode, "sequencer cannot use EL sync") - } - - jwtPath, jwtSecret := orch.writeDefaultJWT() - - logger := p.Logger() - - sequencerP2PKeyHex := "" - if cfg.IsSequencer { - p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2CLID.ChainID().ToBig())) - require.NoError(err, "need p2p key for sequencer") - sequencerP2PKeyHex = hex.EncodeToString(crypto.FromECDSA(p2pKey)) - } - p2pConfig, p2pSignerSetup := newDevstackP2PConfig( - p, - logger, - l2Net.rollupCfg.BlockTime, - cfg.NoDiscovery, - cfg.EnableReqRespSync, - sequencerP2PKeyHex, - ) - - // specify interop config, but do not configure anything, to disable indexing mode - interopCfg := &interop.Config{} - - if cfg.IndexingMode { - interopCfg = &interop.Config{ - RPCAddr: "127.0.0.1", - // When L2CL starts, store its RPC port here - // given by the os, to reclaim when restart. - RPCPort: 0, - RPCJwtSecretPath: jwtPath, - } - } - - nodeCfg := &config.Config{ - L1: &config.L1EndpointConfig{ - L1NodeAddr: l1EL.UserRPC(), - L1TrustRPC: false, - L1RPCKind: sources.RPCKindDebugGeth, - RateLimit: 0, - BatchSize: 20, - HttpPollInterval: time.Millisecond * 100, - MaxConcurrency: 10, - CacheSize: 0, // auto-adjust to sequence window - }, - L1ChainConfig: l1Net.genesis.Config, - L2: &config.L2EndpointConfig{ - L2EngineAddr: l2EL.EngineRPC(), - L2EngineJWTSecret: jwtSecret, - }, - L2FollowSource: &config.L2FollowSourceConfig{ - L2RPCAddr: cfg.FollowSource, - }, - Beacon: &config.L1BeaconEndpointConfig{ - BeaconAddr: l1CL.beaconHTTPAddr, - }, - Driver: driver.Config{ - SequencerEnabled: cfg.IsSequencer, - SequencerConfDepth: 2, - }, - Rollup: *l2Net.rollupCfg, - DependencySet: depSet, - SupervisorEnabled: cfg.IndexingMode, - P2PSigner: p2pSignerSetup, // nil when not sequencer - RPC: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - // When L2CL starts, store its RPC port here - // given by the os, to reclaim when restart. - ListenPort: 0, - EnableAdmin: true, - }, - InteropConfig: interopCfg, - P2P: p2pConfig, - L1EpochPollInterval: time.Second * 2, - RuntimeConfigReloadInterval: 0, - Tracer: nil, - Sync: nodeSync.Config{ - SyncMode: syncMode, - SyncModeReqResp: cfg.UseReqRespSync, - SkipSyncStartCheck: false, - SupportsPostFinalizationELSync: false, - L2FollowSourceEndpoint: cfg.FollowSource, - NeedInitialResetEngine: cfg.IsSequencer && cfg.FollowSource != "", - }, - ConfigPersistence: config.DisabledConfigPersistence{}, - Metrics: opmetrics.CLIConfig{}, - Pprof: oppprof.CLIConfig{}, - SafeDBPath: "", - RollupHalt: "", - Cancel: nil, - ConductorEnabled: false, - ConductorRpc: nil, - ConductorRpcTimeout: 0, - AltDA: altda.CLIConfig{}, - IgnoreMissingPectraBlobSchedule: false, - ExperimentalOPStackAPI: true, - } - if cfg.SafeDBPath != "" { - nodeCfg.SafeDBPath = cfg.SafeDBPath - } - - l2CLNode := &OpNode{ - id: l2CLID, - cfg: nodeCfg, - logger: logger, - p: p, - } - - if orch.timeTravelClock != nil { - l2CLNode.clock = orch.timeTravelClock - } - - // Set the EL field to link to the L2EL node - l2CLNode.el = &l2ELID - cid := l2CLID - require.False(orch.registry.Has(cid), fmt.Sprintf("must not already exist: %s", l2CLID)) - orch.registry.Register(cid, l2CLNode) - l2CLNode.Start() - p.Cleanup(l2CLNode.Stop) - } -} diff --git a/op-devstack/sysgo/l2_cl_p2p_config.go b/op-devstack/sysgo/l2_cl_p2p_config.go index 2644ab05ada73..fe94889d54000 100644 --- a/op-devstack/sysgo/l2_cl_p2p_config.go +++ b/op-devstack/sysgo/l2_cl_p2p_config.go @@ -16,7 +16,7 @@ import ( ) func newDevstackP2PConfig( - p devtest.P, + p devtest.CommonT, logger log.Logger, blockTime uint64, noDiscovery bool, diff --git a/op-devstack/sysgo/l2_cl_p2p_util.go b/op-devstack/sysgo/l2_cl_p2p_util.go index 67d524dffa2b6..b5fda39a510d3 100644 --- a/op-devstack/sysgo/l2_cl_p2p_util.go +++ b/op-devstack/sysgo/l2_cl_p2p_util.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum/go-ethereum/log" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/retry" @@ -81,47 +80,3 @@ func getP2PClientsAndPeers(ctx context.Context, logger log.Logger, peerInfo2: peerInfo2, } } - -// WithL2CLP2PConnection connects P2P between two L2CLs -func WithL2CLP2PConnection(l2CL1ID, l2CL2ID stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - require := orch.P().Require() - l := orch.P().Logger() - - l2CL1, ok := orch.GetL2CL(l2CL1ID) - require.True(ok, "looking for L2 CL node 1 to connect p2p") - l2CL2, ok := orch.GetL2CL(l2CL2ID) - require.True(ok, "looking for L2 CL node 2 to connect p2p") - require.Equal(l2CL1ID.ChainID(), l2CL2ID.ChainID(), "must be same l2 chain") - - ctx := orch.P().Ctx() - logger := orch.P().Logger() - - p := getP2PClientsAndPeers(ctx, logger, require, l2CL1, l2CL2) - - connectPeer := func(l2CL L2CLNode, p2pClient *sources.P2PClient, multiAddress string) { - err := retry.Do0(ctx, 6, retry.Exponential(), func() error { - return p2pClient.ConnectPeer(ctx, multiAddress) - }) - l.Info("connecting to L2CL peer", "l2CL", l2CL, "rpc", l2CL.UserRPC(), "multiAddress", multiAddress, "error", err) - require.NoError(err, "failed to connect L2CL peer") - } - - connectPeer(l2CL1, p.client1, p.peerInfo2.Addresses[0]) - connectPeer(l2CL2, p.client2, p.peerInfo1.Addresses[0]) - - check := func(peerDump *apis.PeerDump, peerInfo *apis.PeerInfo) { - multiAddress := peerInfo.PeerID.String() - _, ok := peerDump.Peers[multiAddress] - require.True(ok, "peer register invalid") - } - - peerDump1, err := GetPeers(ctx, p.client1) - require.NoError(err) - peerDump2, err := GetPeers(ctx, p.client2) - require.NoError(err) - - check(peerDump1, p.peerInfo2) - check(peerDump2, p.peerInfo1) - }) -} diff --git a/op-devstack/sysgo/l2_cl_supernode.go b/op-devstack/sysgo/l2_cl_supernode.go index adecee0333615..650ce7fc1032e 100644 --- a/op-devstack/sysgo/l2_cl_supernode.go +++ b/op-devstack/sysgo/l2_cl_supernode.go @@ -2,49 +2,27 @@ package sysgo import ( "context" - "encoding/hex" - "fmt" - "sort" - "strconv" "sync" "time" - "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" - altda "github.com/ethereum-optimism/optimism/op-alt-da" - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-node/config" - "github.com/ethereum-optimism/optimism/op-node/rollup/driver" - "github.com/ethereum-optimism/optimism/op-node/rollup/interop" - nodeSync "github.com/ethereum-optimism/optimism/op-node/rollup/sync" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" - opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" - "github.com/ethereum-optimism/optimism/op-service/oppprof" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" - "github.com/ethereum-optimism/optimism/op-service/sources" snconfig "github.com/ethereum-optimism/optimism/op-supernode/config" "github.com/ethereum-optimism/optimism/op-supernode/supernode" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" ) type SuperNode struct { - mu sync.Mutex - - id stack.SupernodeID + mu sync.Mutex sn *supernode.Supernode cancel context.CancelFunc userRPC string interopEndpoint string interopJwtSecret eth.Bytes32 - p devtest.P + p devtest.CommonT logger log.Logger - els []*stack.ComponentID // Optional: nil when using SyncTester chains []eth.ChainID l1UserRPC string l1BeaconAddr string @@ -56,20 +34,6 @@ type SuperNode struct { var _ L2CLNode = (*SuperNode)(nil) -func (n *SuperNode) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - // note that the system is also hydrated by the SuperNodeProxy. - // It would be redundant to register nodes here as well. - system.AddSupernode(shim.NewSuperNode(shim.SuperNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: n.id, - Client: rpcCl, - })) -} - func (n *SuperNode) UserRPC() string { return n.userRPC } @@ -142,47 +106,17 @@ func (n *SuperNode) ResumeInteropActivity() { } } -// WithSupernode constructs a Supernode-based L2 CL node -func WithSupernode(supernodeID stack.SupernodeID, l2CLID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID, opts ...L2CLOption) stack.Option[*Orchestrator] { - args := []L2CLs{{CLID: l2CLID, ELID: l2ELID}} - return WithSharedSupernodeCLs(supernodeID, args, l1CLID, l1ELID) -} - // SuperNodeProxy is a thin wrapper that points to a shared supernode instance. type SuperNodeProxy struct { - id stack.ComponentID - p devtest.P + p devtest.CommonT logger log.Logger userRPC string interopEndpoint string interopJwtSecret eth.Bytes32 - el *stack.ComponentID } var _ L2CLNode = (*SuperNodeProxy)(nil) -func (n *SuperNodeProxy) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - sysL2CL := shim.NewL2CLNode(shim.L2CLNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: n.id, - Client: rpcCl, - UserRPC: n.userRPC, - InteropEndpoint: n.interopEndpoint, - InteropJwtSecret: n.interopJwtSecret, - }) - sysL2CL.SetLabel(match.LabelVendor, string(match.OpNode)) - l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) - l2Net.(stack.ExtensibleL2Network).AddL2CLNode(sysL2CL) - if n.el != nil { - sysL2CL.(stack.LinkableL2CLNode).LinkEL(l2Net.L2ELNode(stack.ByID[stack.L2ELNode](*n.el))) - } -} - func (n *SuperNodeProxy) Start() {} func (n *SuperNodeProxy) Stop() {} func (n *SuperNodeProxy) UserRPC() string { return n.userRPC } @@ -190,11 +124,6 @@ func (n *SuperNodeProxy) InteropRPC() (endpoint string, jwtSecret eth.Bytes32) { return n.interopEndpoint, n.interopJwtSecret } -type L2CLs struct { - CLID stack.ComponentID - ELID stack.ComponentID -} - // SupernodeConfig holds configuration options for the shared supernode. type SupernodeConfig struct { // InteropActivationTimestamp enables the interop activity at the given timestamp. @@ -225,256 +154,3 @@ func WithSupernodeInteropAtGenesis() SupernodeOption { cfg.UseGenesisInterop = true } } - -// WithSharedSupernodeCLsInterop starts one supernode for N L2 chains with interop enabled at genesis. -// The interop activation timestamp is computed from the first chain's genesis time. -func WithSharedSupernodeCLsInterop(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.ComponentID, l1ELID stack.ComponentID) stack.Option[*Orchestrator] { - return WithSharedSupernodeCLs(supernodeID, cls, l1CLID, l1ELID, WithSupernodeInteropAtGenesis()) -} - -// WithSharedSupernodeCLsInteropDelayed starts one supernode for N L2 chains with interop enabled -// at a specified offset from genesis. This allows testing the transition from non-interop to interop mode. -func WithSharedSupernodeCLsInteropDelayed(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.ComponentID, l1ELID stack.ComponentID, delaySeconds uint64) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - if len(cls) == 0 { - orch.P().Require().Fail("no chains provided") - return - } - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(cls[0].CLID.ChainID())) - if !ok { - orch.P().Require().Fail("l2 network not found") - return - } - genesisTime := l2Net.rollupCfg.Genesis.L2Time - activationTime := genesisTime + delaySeconds - orch.P().Logger().Info("enabling supernode interop with delay", - "genesis_time", genesisTime, - "activation_timestamp", activationTime, - "delay_seconds", delaySeconds, - ) - withSharedSupernodeCLsImpl(orch, supernodeID, cls, l1CLID, l1ELID, WithSupernodeInterop(activationTime)) - }) -} - -// WithSharedSupernodeCLs starts one supernode for N L2 chains and registers thin L2CL wrappers. -func WithSharedSupernodeCLs(supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.ComponentID, l1ELID stack.ComponentID, opts ...SupernodeOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - withSharedSupernodeCLsImpl(orch, supernodeID, cls, l1CLID, l1ELID, opts...) - }) -} - -// withSharedSupernodeCLsImpl is the implementation for starting a shared supernode. -func withSharedSupernodeCLsImpl(orch *Orchestrator, supernodeID stack.SupernodeID, cls []L2CLs, l1CLID stack.ComponentID, l1ELID stack.ComponentID, opts ...SupernodeOption) { - p := orch.P() - require := p.Require() - - require.Equal(stack.KindSupernode, supernodeID.Kind(), "supernode ID must be kind Supernode") - require.Equal(stack.KindL1CLNode, l1CLID.Kind(), "l1 CL ID must be kind L1CLNode") - require.Equal(stack.KindL1ELNode, l1ELID.Kind(), "l1 EL ID must be kind L1ELNode") - require.Equal(l1CLID.ChainID(), l1ELID.ChainID(), "l1 CL and EL IDs must be on the same chain") - require.NotEmpty(cls, "at least one L2 CL/EL pair is required") - for i := range cls { - ids := cls[i] - require.Equalf(stack.KindL2CLNode, ids.CLID.Kind(), "cls[%d].CLID must be kind L2CLNode", i) - require.Truef(ids.CLID.HasChainID(), "cls[%d].CLID must be chain-scoped", i) - require.Truef(ids.ELID.HasChainID(), "cls[%d].ELID must be chain-scoped", i) - require.Equalf(ids.CLID.ChainID(), ids.ELID.ChainID(), "cls[%d] CL and EL IDs must be on the same chain", i) - } - - // Apply options - snOpts := &SupernodeConfig{} - for _, opt := range opts { - opt(snOpts) - } - - // Resolve UseGenesisInterop: read the activation timestamp from the first chain's genesis. - if snOpts.UseGenesisInterop && snOpts.InteropActivationTimestamp == nil { - p.Require().NotEmpty(cls, "no chains provided for genesis interop resolution") - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(cls[0].CLID.ChainID())) - p.Require().True(ok, "l2 network not found for genesis interop resolution") - genesisTime := l2Net.rollupCfg.Genesis.L2Time - p.Logger().Info("enabling supernode interop at genesis", "activation_timestamp", genesisTime) - snOpts.InteropActivationTimestamp = &genesisTime - } - - l1EL, ok := orch.GetL1EL(l1ELID) - require.True(ok, "l1 EL node required") - l1CL, ok := orch.GetL1CL(l1CLID) - require.True(ok, "l1 CL node required") - - // Get L1 network to access L1 chain config - l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) - require.True(ok, "l1 network required") - - _, jwtSecret := orch.writeDefaultJWT() - - logger := p.Logger() - - // Build per-chain op-node configs - makeNodeCfg := func(l2Net *L2Network, l2ChainID eth.ChainID, l2EL L2ELNode, isSequencer bool) *config.Config { - interopCfg := &interop.Config{} - l2EngineAddr := l2EL.EngineRPC() - var depSet depset.DependencySet - if cluster, ok := orch.ClusterForL2(l2ChainID); ok { - depSet = cluster.DepSet() - } - sequencerP2PKeyHex := "" - if isSequencer { - p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2ChainID.ToBig())) - require.NoError(err, "need p2p key for supernode virtual sequencer") - sequencerP2PKeyHex = hex.EncodeToString(crypto.FromECDSA(p2pKey)) - } - p2pConfig, p2pSignerSetup := newDevstackP2PConfig( - p, - logger.New("chain_id", l2ChainID.String(), "component", "supernode-p2p"), - l2Net.rollupCfg.BlockTime, - false, - true, - sequencerP2PKeyHex, - ) - return &config.Config{ - L1: &config.L1EndpointConfig{ - L1NodeAddr: l1EL.UserRPC(), - L1TrustRPC: false, - L1RPCKind: sources.RPCKindDebugGeth, - RateLimit: 0, - BatchSize: 20, - HttpPollInterval: time.Millisecond * 100, - MaxConcurrency: 10, - CacheSize: 0, - }, - L1ChainConfig: l1Net.genesis.Config, - L2: &config.L2EndpointConfig{ - L2EngineAddr: l2EngineAddr, - L2EngineJWTSecret: jwtSecret, - }, - DependencySet: depSet, - Beacon: &config.L1BeaconEndpointConfig{BeaconAddr: l1CL.beaconHTTPAddr}, - Driver: driver.Config{SequencerEnabled: isSequencer, SequencerConfDepth: 2}, - Rollup: *l2Net.rollupCfg, - P2PSigner: p2pSignerSetup, - RPC: oprpc.CLIConfig{ListenAddr: "127.0.0.1", ListenPort: 0, EnableAdmin: true}, - InteropConfig: interopCfg, - P2P: p2pConfig, - L1EpochPollInterval: 2 * time.Second, - RuntimeConfigReloadInterval: 0, - Sync: nodeSync.Config{SyncMode: nodeSync.CLSync, SyncModeReqResp: true}, - ConfigPersistence: config.DisabledConfigPersistence{}, - Metrics: opmetrics.CLIConfig{}, - Pprof: oppprof.CLIConfig{}, - AltDA: altda.CLIConfig{}, - IgnoreMissingPectraBlobSchedule: false, - ExperimentalOPStackAPI: true, - } - } - - // Gather VN configs and chain IDs - vnCfgs := make(map[eth.ChainID]*config.Config) - chainIDs := make([]uint64, 0, len(cls)) - els := make([]*stack.ComponentID, 0, len(cls)) - for i := range cls { - a := cls[i] - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(a.CLID.ChainID())) - require.True(ok, "l2 network required") - l2ELNode, ok := orch.GetL2EL(a.ELID) - require.True(ok, "l2 EL node required") - l2ChainID := a.CLID.ChainID() - cfg := makeNodeCfg(l2Net, l2ChainID, l2ELNode, true) - require.NoError(cfg.Check(), "invalid op-node config for chain %s", a.CLID.ChainID()) - id := eth.EvilChainIDToUInt64(a.CLID.ChainID()) - chainIDs = append(chainIDs, id) - vnCfgs[eth.ChainIDFromUInt64(id)] = cfg - els = append(els, &cls[i].ELID) - } - - // Build supernode CLI config - snCfg := &snconfig.CLIConfig{ - Chains: chainIDs, - DataDir: p.TempDir(), - L1NodeAddr: l1EL.UserRPC(), - L1BeaconAddr: l1CL.beaconHTTPAddr, - RPCConfig: oprpc.CLIConfig{ListenAddr: "127.0.0.1", ListenPort: 0, EnableAdmin: true}, - InteropActivationTimestamp: snOpts.InteropActivationTimestamp, - } - if snOpts.InteropActivationTimestamp != nil { - logger.Info("supernode interop enabled", "activation_timestamp", *snOpts.InteropActivationTimestamp) - } - - snode := &SuperNode{ - id: supernodeID, - userRPC: "", - interopEndpoint: "", - interopJwtSecret: jwtSecret, - p: p, - logger: logger, - els: els, - chains: idsFromCLs(cls), - l1UserRPC: l1EL.UserRPC(), - l1BeaconAddr: l1CL.beaconHTTPAddr, - snCfg: snCfg, - vnCfgs: vnCfgs, - } - - // Start and register cleanup, following the same pattern as OpNode. - snode.Start() - p.Cleanup(snode.Stop) - - base := snode.UserRPC() - - // Wait for per-chain RPC routes to serve optimism_rollupConfig and register proxies - waitReady := func(u string) { - deadline := time.Now().Add(15 * time.Second) - for { - if time.Now().After(deadline) { - require.FailNow(fmt.Sprintf("timed out waiting for RPC to be ready at %s", u)) - } - rpcCl, err := client.NewRPC(p.Ctx(), logger, u, client.WithLazyDial()) - if err == nil { - var v any - if callErr := rpcCl.CallContext(p.Ctx(), &v, "optimism_rollupConfig"); callErr == nil { - rpcCl.Close() - break - } - rpcCl.Close() - } - time.Sleep(200 * time.Millisecond) - } - } - for i := range cls { - a := cls[i] - // Multi-chain router exposes per-chain namespace paths - rpc := base + "/" + strconv.FormatUint(eth.EvilChainIDToUInt64(a.CLID.ChainID()), 10) - waitReady(rpc) - proxy := &SuperNodeProxy{ - id: a.CLID, - p: p, - logger: logger, - userRPC: rpc, - interopEndpoint: rpc, - interopJwtSecret: jwtSecret, - el: &cls[i].ELID, - } - cid := a.CLID - require.False(orch.registry.Has(cid), fmt.Sprintf("must not already exist: %s", a.CLID)) - orch.registry.Register(cid, proxy) - } - - orch.supernodes.Set(supernodeID, snode) -} - -func idsFromCLs(cls []L2CLs) []eth.ChainID { - out := make([]eth.ChainID, 0, len(cls)) - seen := make(map[eth.ChainID]struct{}, len(cls)) - for _, c := range cls { - id := c.CLID.ChainID() - if _, ok := seen[id]; ok { - continue - } - seen[id] = struct{}{} - out = append(out, id) - } - sort.Slice(out, func(i, j int) bool { - return eth.EvilChainIDToUInt64(out[i]) < eth.EvilChainIDToUInt64(out[j]) - }) - return out -} diff --git a/op-devstack/sysgo/l2_conductor.go b/op-devstack/sysgo/l2_conductor.go new file mode 100644 index 0000000000000..d28fafbbaa64c --- /dev/null +++ b/op-devstack/sysgo/l2_conductor.go @@ -0,0 +1,28 @@ +package sysgo + +import ( + opconductor "github.com/ethereum-optimism/optimism/op-conductor/conductor" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +type Conductor struct { + name string + chainID eth.ChainID + + serverID string + consensusEndpoint string + rpcEndpoint string + service *opconductor.OpConductor +} + +func (c *Conductor) ServerID() string { + return c.serverID +} + +func (c *Conductor) ConsensusEndpoint() string { + return c.consensusEndpoint +} + +func (c *Conductor) HTTPEndpoint() string { + return c.rpcEndpoint +} diff --git a/op-devstack/sysgo/l2_el.go b/op-devstack/sysgo/l2_el.go index 6ebe9e2de07fb..3aa7acf828684 100644 --- a/op-devstack/sysgo/l2_el.go +++ b/op-devstack/sysgo/l2_el.go @@ -1,14 +1,11 @@ package sysgo import ( - "os" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/stack" ) type L2ELNode interface { - hydrate(system stack.ExtensibleSystem) stack.Lifecycle UserRPC() string EngineRPC() string @@ -16,7 +13,6 @@ type L2ELNode interface { } type L2ELConfig struct { - SupervisorID *stack.ComponentID P2PAddr string P2PPort int P2PNodeKeyHex string @@ -25,21 +21,15 @@ type L2ELConfig struct { ProofHistory bool } -func L2ELWithSupervisor(supervisorID stack.ComponentID) L2ELOption { - return L2ELOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { - cfg.SupervisorID = &supervisorID - }) -} - func L2ELWithProofHistory(enable bool) L2ELOption { - return L2ELOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { + return L2ELOptionFn(func(p devtest.T, _ ComponentTarget, cfg *L2ELConfig) { cfg.ProofHistory = enable }) } // L2ELWithP2PConfig sets deterministic P2P identity and static peers for the L2 EL. func L2ELWithP2PConfig(addr string, port int, nodeKeyHex string, staticPeers, trustedPeers []string) L2ELOption { - return L2ELOptionFn(func(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { + return L2ELOptionFn(func(p devtest.T, _ ComponentTarget, cfg *L2ELConfig) { cfg.P2PAddr = addr cfg.P2PPort = port cfg.P2PNodeKeyHex = nodeKeyHex @@ -50,7 +40,6 @@ func L2ELWithP2PConfig(addr string, port int, nodeKeyHex string, staticPeers, tr func DefaultL2ELConfig() *L2ELConfig { return &L2ELConfig{ - SupervisorID: nil, P2PAddr: "127.0.0.1", P2PPort: 0, P2PNodeKeyHex: "", @@ -61,22 +50,15 @@ func DefaultL2ELConfig() *L2ELConfig { } type L2ELOption interface { - Apply(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) -} - -// WithGlobalL2ELOption applies the L2ELOption to all L2ELNode instances in this orchestrator -func WithGlobalL2ELOption(opt L2ELOption) stack.Option[*Orchestrator] { - return stack.BeforeDeploy(func(o *Orchestrator) { - o.l2ELOptions = append(o.l2ELOptions, opt) - }) + Apply(p devtest.T, target ComponentTarget, cfg *L2ELConfig) } -type L2ELOptionFn func(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) +type L2ELOptionFn func(p devtest.T, target ComponentTarget, cfg *L2ELConfig) var _ L2ELOption = L2ELOptionFn(nil) -func (fn L2ELOptionFn) Apply(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { - fn(p, id, cfg) +func (fn L2ELOptionFn) Apply(p devtest.T, target ComponentTarget, cfg *L2ELConfig) { + fn(p, target, cfg) } // L2ELOptionBundle a list of multiple L2ELOption, to all be applied in order. @@ -84,37 +66,9 @@ type L2ELOptionBundle []L2ELOption var _ L2ELOption = L2ELOptionBundle(nil) -func (l L2ELOptionBundle) Apply(p devtest.P, id stack.ComponentID, cfg *L2ELConfig) { +func (l L2ELOptionBundle) Apply(p devtest.T, target ComponentTarget, cfg *L2ELConfig) { for _, opt := range l { p.Require().NotNil(opt, "cannot Apply nil L2ELOption") - opt.Apply(p, id, cfg) + opt.Apply(p, target, cfg) } } - -// WithL2ELNode adds the default type of L2 CL node. -// The default can be configured with DEVSTACK_L2EL_KIND. -// Tests that depend on specific types can use options like WithKonaNode and WithOpNode directly. -func WithL2ELNode(id stack.ComponentID, opts ...L2ELOption) stack.Option[*Orchestrator] { - switch os.Getenv("DEVSTACK_L2EL_KIND") { - case "op-reth": - return WithOpReth(id, opts...) - default: - return WithOpGeth(id, opts...) - } -} - -func WithExtL2Node(id stack.ComponentID, elRPCEndpoint string) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - require := orch.P().Require() - - // Create L2 EL node with external RPC - l2ELNode := &OpGeth{ - id: id, - userRPC: elRPCEndpoint, - readOnly: true, - } - cid := id - require.False(orch.registry.Has(cid), "must not already exist") - orch.registry.Register(cid, l2ELNode) - }) -} diff --git a/op-devstack/sysgo/l2_el_opgeth.go b/op-devstack/sysgo/l2_el_opgeth.go index 57370d4403da8..4470284095055 100644 --- a/op-devstack/sysgo/l2_el_opgeth.go +++ b/op-devstack/sysgo/l2_el_opgeth.go @@ -11,29 +11,23 @@ import ( gn "github.com/ethereum/go-ethereum/node" "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum/go-ethereum/rpc" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" ) type OpGeth struct { mu sync.Mutex - p devtest.P + p devtest.CommonT logger log.Logger - id stack.ComponentID + name string l2Net *L2Network jwtPath string jwtSecret [32]byte supervisorRPC string l2Geth *geth.GethInstance - readOnly bool cfg *L2ELConfig authRPC string @@ -57,36 +51,6 @@ func (n *OpGeth) JWTPath() string { return n.jwtPath } -func (n *OpGeth) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - // ReadOnly cannot expose auth RPC - var engineCl client.RPC - if !n.readOnly { - auth := rpc.WithHTTPAuth(gn.NewJWTAuth(n.jwtSecret)) - engineCl, err = client.NewRPC(system.T().Ctx(), system.Logger(), n.authRPC, client.WithGethRPCOptions(auth)) - require.NoError(err) - system.T().Cleanup(engineCl.Close) - } - - l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) - sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ - RollupCfg: l2Net.RollupConfig(), - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - Client: rpcCl, - ChainID: n.id.ChainID(), - }, - EngineClient: engineCl, - ID: n.id, - }) - sysL2EL.SetLabel(match.LabelVendor, string(match.OpGeth)) - l2Net.(stack.ExtensibleL2Network).AddL2ELNode(sysL2EL) -} - func (n *OpGeth) Start() { n.mu.Lock() defer n.mu.Unlock() @@ -113,7 +77,7 @@ func (n *OpGeth) Start() { } require := n.p.Require() - l2Geth, err := geth.InitL2(n.id.String(), n.l2Net.genesis, n.jwtPath, + l2Geth, err := geth.InitL2(NewComponentTarget(n.name, n.l2Net.ChainID()).String(), n.l2Net.genesis, n.jwtPath, func(ethCfg *ethconfig.Config, nodeCfg *gn.Config) error { ethCfg.InteropMessageRPC = n.supervisorRPC ethCfg.InteropMempoolFiltering = true @@ -173,53 +137,8 @@ func (n *OpGeth) Stop() { n.logger.Warn("op-geth already stopped") return } - n.logger.Info("Closing op-geth", "id", n.id) + n.logger.Info("Closing op-geth", "name", n.name, "chain", n.l2Net.ChainID()) closeErr := n.l2Geth.Close() - n.logger.Info("Closed op-geth", "id", n.id, "err", closeErr) + n.logger.Info("Closed op-geth", "name", n.name, "chain", n.l2Net.ChainID(), "err", closeErr) n.l2Geth = nil } - -func WithOpGeth(id stack.ComponentID, opts ...L2ELOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) - require := p.Require() - - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(id.ChainID())) - require.True(ok, "L2 network required") - - cfg := DefaultL2ELConfig() - orch.l2ELOptions.Apply(p, id, cfg) // apply global options - L2ELOptionBundle(opts).Apply(p, id, cfg) // apply specific options - - jwtPath, jwtSecret := orch.writeDefaultJWT() - - useInterop := l2Net.genesis.Config.InteropTime != nil - - supervisorRPC := "" - if useInterop && cfg.SupervisorID != nil { - sup, ok := orch.GetSupervisor(*cfg.SupervisorID) - require.True(ok, "supervisor is required for interop") - supervisorRPC = sup.UserRPC() - } - - logger := p.Logger() - - l2EL := &OpGeth{ - id: id, - p: orch.P(), - logger: logger, - l2Net: l2Net, - jwtPath: jwtPath, - jwtSecret: jwtSecret, - supervisorRPC: supervisorRPC, - cfg: cfg, - } - l2EL.Start() - p.Cleanup(func() { - l2EL.Stop() - }) - cid := id - require.False(orch.registry.Has(cid), "must be unique L2 EL node") - orch.registry.Register(cid, l2EL) - }) -} diff --git a/op-devstack/sysgo/l2_el_opreth.go b/op-devstack/sysgo/l2_el_opreth.go index e09ec756f3aa7..e0fbc5603c717 100644 --- a/op-devstack/sysgo/l2_el_opreth.go +++ b/op-devstack/sysgo/l2_el_opreth.go @@ -1,31 +1,24 @@ package sysgo import ( - "encoding/json" "fmt" "net/url" - "os" - "os/exec" - "path/filepath" "strings" "sync" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/logpipe" "github.com/ethereum-optimism/optimism/op-service/tasks" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" - gn "github.com/ethereum/go-ethereum/node" - "github.com/ethereum/go-ethereum/rpc" + "github.com/ethereum/go-ethereum/log" ) type OpReth struct { mu sync.Mutex - id stack.ComponentID + name string + chainID eth.ChainID jwtPath string jwtSecret [32]byte authRPC string @@ -39,7 +32,7 @@ type OpReth struct { // Each entry is of the form "key=value". env []string - p devtest.P + p devtest.T sub *SubProcess @@ -48,35 +41,6 @@ type OpReth struct { var _ L2ELNode = (*OpReth)(nil) -func (n *OpReth) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - // Do not have to check whether client is readOnly because - // all external L2 Clients will be wrapped with op-geth sysgo devstack, supporting readOnly - var engineCl client.RPC - auth := rpc.WithHTTPAuth(gn.NewJWTAuth(n.jwtSecret)) - engineCl, err = client.NewRPC(system.T().Ctx(), system.Logger(), n.authRPC, client.WithGethRPCOptions(auth)) - require.NoError(err) - system.T().Cleanup(engineCl.Close) - - l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) - sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ - RollupCfg: l2Net.RollupConfig(), - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - Client: rpcCl, - ChainID: n.id.ChainID(), - }, - EngineClient: engineCl, - ID: n.id, - }) - sysL2EL.SetLabel(match.LabelVendor, string(match.OpReth)) - l2Net.(stack.ExtensibleL2Network).AddL2ELNode(sysL2EL) -} - func (n *OpReth) Start() { n.mu.Lock() defer n.mu.Unlock() @@ -100,8 +64,8 @@ func (n *OpReth) Start() { }) n.userRPC = "ws://" + n.userProxy.Addr() } - logOut := logpipe.ToLogger(n.p.Logger().New("component", "op-reth", "src", "stdout", "id", n.id.String())) - logErr := logpipe.ToLogger(n.p.Logger().New("component", "op-reth", "src", "stderr", "id", n.id.String())) + logOut := logpipe.ToLoggerWithMinLevel(n.p.Logger().New("component", "op-reth", "src", "stdout", "name", n.name, "chain", n.chainID), log.LevelWarn) + logErr := logpipe.ToLoggerWithMinLevel(n.p.Logger().New("component", "op-reth", "src", "stderr", "name", n.name, "chain", n.chainID), log.LevelWarn) authRPCChan := make(chan string, 1) defer close(authRPCChan) @@ -155,7 +119,7 @@ func (n *OpReth) Start() { if areMetricsEnabled() { var metricsTarget PrometheusMetricsTarget n.p.Require().NoError(tasks.Await(n.p.Ctx(), metricsTargetChan, &metricsTarget), "need metrics endpoint") - n.l2MetricsRegistrar.RegisterL2MetricsTargets(n.id, metricsTarget) + n.l2MetricsRegistrar.RegisterL2MetricsTargets(n.name, metricsTarget) } n.userProxy.SetUpstream(ProxyAddr(n.p.Require(), userRPCAddr)) @@ -183,149 +147,3 @@ func (n *OpReth) EngineRPC() string { func (n *OpReth) JWTPath() string { return n.jwtPath } - -func WithOpReth(id stack.ComponentID, opts ...L2ELOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) - require := p.Require() - - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(id.ChainID())) - require.True(ok, "L2 network required") - - cfg := DefaultL2ELConfig() - orch.l2ELOptions.Apply(p, id, cfg) // apply global options - L2ELOptionBundle(opts).Apply(p, id, cfg) // apply specific options - - jwtPath, jwtSecret := orch.writeDefaultJWT() - - useInterop := l2Net.genesis.Config.InteropTime != nil - - supervisorRPC := "" - if useInterop && cfg.SupervisorID != nil { - sup, ok := orch.GetSupervisor(*cfg.SupervisorID) - require.True(ok, "supervisor is required for interop") - supervisorRPC = sup.UserRPC() - } - - tempDir := p.TempDir() - data, err := json.Marshal(l2Net.genesis) - p.Require().NoError(err, "must json-encode genesis") - chainConfigPath := filepath.Join(tempDir, "genesis.json") - p.Require().NoError(os.WriteFile(chainConfigPath, data, 0o644), "must write genesis file") - - dataDirPath := filepath.Join(tempDir, "data") - p.Require().NoError(os.MkdirAll(dataDirPath, 0o755), "must create datadir") - - // reth writes logs not just to stdout, but also to file, - // and to global user-cache by default, rather than the datadir. - // So we customize this to temp-dir too, to not pollute the user-cache dir. - logDirPath := filepath.Join(tempDir, "logs") - p.Require().NoError(os.MkdirAll(dataDirPath, 0o755), "must create logs dir") - - tempP2PPath := filepath.Join(tempDir, "p2pkey.txt") - - execPath := os.Getenv("OP_RETH_EXEC_PATH") - p.Require().NotEmpty(execPath, "OP_RETH_EXEC_PATH environment variable must be set") - _, err = os.Stat(execPath) - p.Require().NotErrorIs(err, os.ErrNotExist, "executable must exist") - - // reth does not support env-var configuration like the Go services, - // so we use the CLI flags instead. - args := []string{ - "node", - "--addr=127.0.0.1", - "--authrpc.addr=127.0.0.1", - "--authrpc.jwtsecret=" + jwtPath, - "--authrpc.port=0", - "--builder.deadline=2", - "--builder.interval=100ms", - "--chain=" + chainConfigPath, - "--color=never", - "--datadir=" + dataDirPath, - "--disable-discovery", - "--http", - "--http.api=admin,debug,eth,net,trace,txpool,web3,rpc,reth,miner", - "--http.addr=127.0.0.1", - "--http.port=0", - "--ipcdisable", - "--log.file.directory=" + logDirPath, - "--log.stdout.format=json", - "--nat=none", - "--p2p-secret-key=" + tempP2PPath, - "--port=0", - "--rpc.eth-proof-window=30", - "--txpool.minimum-priority-fee=1", - "--txpool.nolocals", - "--with-unused-ports", - "--ws", - "--ws.api=admin,debug,eth,net,trace,txpool,web3,rpc,reth,miner", - "--ws.addr=127.0.0.1", - "--ws.port=0", - "-vvvv", - } - - if areMetricsEnabled() { - // Use port 0 to let the OS assign a port atomically at bind time. - // The actual port will be discovered by parsing the process logs. - args = append(args, "--metrics=127.0.0.1:0") - } - - if supervisorRPC != "" { - args = append(args, "--rollup.supervisor-http="+supervisorRPC) - } - - // initialise op-reth - initArgs := []string{ - "init", - "--datadir=" + dataDirPath, - "--chain=" + chainConfigPath, - } - err = exec.Command(execPath, initArgs...).Run() - p.Require().NoError(err, "must init op-reth node") - - if cfg.ProofHistory { - proofHistoryDir := filepath.Join(tempDir, "proof-history") - - // initialise proof history - initProofsArgs := []string{ - "proofs", - "init", - "--datadir=" + dataDirPath, - "--chain=" + chainConfigPath, - "--proofs-history.storage-path=" + proofHistoryDir, - } - err = exec.Command(execPath, initProofsArgs...).Run() - p.Require().NoError(err, "must init op-reth proof history") - - args = append( - args, - "--proofs-history", - // todo: make these configurable via env-vars (ethereum-optimism/optimism#18908) - "--proofs-history.window=200", - "--proofs-history.prune-interval=1m", - "--proofs-history.storage-path="+proofHistoryDir, - ) - } - - l2EL := &OpReth{ - id: id, - jwtPath: jwtPath, - jwtSecret: jwtSecret, - authRPC: "", - userRPC: "", - execPath: execPath, - args: args, - env: []string{}, - p: orch.p, - l2MetricsRegistrar: orch, - } - - p.Logger().Info("Starting op-reth") - l2EL.Start() - p.Cleanup(l2EL.Stop) - p.Logger().Info("op-reth is ready", "userRPC", l2EL.userRPC, "authRPC", l2EL.authRPC) - cid := id - require.False(orch.registry.Has(cid), "must be unique L2 EL node") - orch.registry.Register(cid, l2EL) - }) -} diff --git a/op-devstack/sysgo/l2_el_p2p_util.go b/op-devstack/sysgo/l2_el_p2p_util.go index ea4728aa0dc50..fc010e9a6cc50 100644 --- a/op-devstack/sysgo/l2_el_p2p_util.go +++ b/op-devstack/sysgo/l2_el_p2p_util.go @@ -10,36 +10,10 @@ import ( "github.com/ethereum/go-ethereum/p2p" "github.com/ethereum/go-ethereum/p2p/enode" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" - "github.com/ethereum-optimism/optimism/op-service/dial" "github.com/ethereum-optimism/optimism/op-service/testreq" ) -func WithL2ELP2PConnection(l2EL1ID, l2EL2ID stack.ComponentID, trusted bool) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - require := orch.P().Require() - - l2EL1, ok := orch.GetL2EL(l2EL1ID) - require.True(ok, "looking for L2 EL node 1 to connect p2p") - l2EL2, ok := orch.GetL2EL(l2EL2ID) - require.True(ok, "looking for L2 EL node 2 to connect p2p") - require.Equal(l2EL1ID.ChainID(), l2EL2ID.ChainID(), "must be same l2 chain") - - ctx := orch.P().Ctx() - logger := orch.P().Logger() - - rpc1, err := dial.DialRPCClientWithTimeout(ctx, logger, l2EL1.UserRPC()) - require.NoError(err, "failed to connect to el1 rpc") - defer rpc1.Close() - rpc2, err := dial.DialRPCClientWithTimeout(ctx, logger, l2EL2.UserRPC()) - require.NoError(err, "failed to connect to el2 rpc") - defer rpc2.Close() - - ConnectP2P(orch.P().Ctx(), require, rpc1, rpc2, trusted) - }) -} - type RpcCaller interface { CallContext(ctx context.Context, result interface{}, method string, args ...interface{}) error } diff --git a/op-devstack/sysgo/l2_el_synctester.go b/op-devstack/sysgo/l2_el_synctester.go index 28b543428d0b0..0c9d6e106e856 100644 --- a/op-devstack/sysgo/l2_el_synctester.go +++ b/op-devstack/sysgo/l2_el_synctester.go @@ -5,10 +5,6 @@ import ( "sync" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-service/client" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" ) @@ -18,8 +14,7 @@ import ( type SyncTesterEL struct { mu sync.Mutex - id stack.ComponentID - l2Net *L2Network + target ComponentTarget jwtPath string authRPC string @@ -29,10 +24,9 @@ type SyncTesterEL struct { userProxy *tcpproxy.Proxy config *SyncTesterELConfig - p devtest.P + p devtest.CommonT - // Reference to the orchestrator to find the EL node to connect to - orch *Orchestrator + syncTester *SyncTesterService } type SyncTesterELConfig struct { @@ -56,22 +50,15 @@ func DefaultSyncTesterELConfig() *SyncTesterELConfig { } type SyncTesterELOption interface { - Apply(p devtest.P, id stack.ComponentID, cfg *SyncTesterELConfig) + Apply(p devtest.T, target ComponentTarget, cfg *SyncTesterELConfig) } -// WithGlobalSyncTesterELOption applies the SyncTesterELOption to all SyncTesterEL instances in this orchestrator -func WithGlobalSyncTesterELOption(opt SyncTesterELOption) stack.Option[*Orchestrator] { - return stack.BeforeDeploy(func(o *Orchestrator) { - o.SyncTesterELOptions = append(o.SyncTesterELOptions, opt) - }) -} - -type SyncTesterELOptionFn func(p devtest.P, id stack.ComponentID, cfg *SyncTesterELConfig) +type SyncTesterELOptionFn func(p devtest.T, target ComponentTarget, cfg *SyncTesterELConfig) var _ SyncTesterELOption = SyncTesterELOptionFn(nil) -func (fn SyncTesterELOptionFn) Apply(p devtest.P, id stack.ComponentID, cfg *SyncTesterELConfig) { - fn(p, id, cfg) +func (fn SyncTesterELOptionFn) Apply(p devtest.T, target ComponentTarget, cfg *SyncTesterELConfig) { + fn(p, target, cfg) } // SyncTesterELOptionBundle a list of multiple SyncTesterELOption, to all be applied in order. @@ -79,54 +66,26 @@ type SyncTesterELOptionBundle []SyncTesterELOption var _ SyncTesterELOptionBundle = SyncTesterELOptionBundle(nil) -func (l SyncTesterELOptionBundle) Apply(p devtest.P, id stack.ComponentID, cfg *SyncTesterELConfig) { +func (l SyncTesterELOptionBundle) Apply(p devtest.T, target ComponentTarget, cfg *SyncTesterELConfig) { for _, opt := range l { p.Require().NotNil(opt, "cannot Apply nil SyncTesterELOption") - opt.Apply(p, id, cfg) + opt.Apply(p, target, cfg) } } var _ L2ELNode = (*SyncTesterEL)(nil) -func (n *SyncTesterEL) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - // Sync Tester EL is always writable, and needs no auth - engineCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), n.authRPC) - require.NoError(err) - system.T().Cleanup(engineCl.Close) - - l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(n.id.ChainID()))) - sysL2EL := shim.NewL2ELNode(shim.L2ELNodeConfig{ - RollupCfg: l2Net.RollupConfig(), - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - Client: rpcCl, - ChainID: n.id.ChainID(), - }, - EngineClient: engineCl, - ID: n.id, - }) - sysL2EL.SetLabel(match.LabelVendor, "sync-tester") - l2Net.(stack.ExtensibleL2Network).AddL2ELNode(sysL2EL) -} - func (n *SyncTesterEL) Start() { n.mu.Lock() defer n.mu.Unlock() - // The SyncTesterEL should connect to the existing sync tester service - // Get the endpoint from the orchestrator's syncTester service - if n.orch.syncTester == nil || n.orch.syncTester.service == nil { - n.p.Logger().Error("syncTester service not available in orchestrator") + // The SyncTesterEL should connect to the existing sync tester service. + if n.syncTester == nil || n.syncTester.service == nil { + n.p.Logger().Error("syncTester service not available") return } - // Use NewEndpoint to get the correct session-specific endpoint for this chain ID - endpoint := n.orch.syncTester.service.SyncTesterRPCPath(n.id.ChainID(), true) + endpoint := n.syncTester.SyncTesterRPCPath(n.target.ChainID, true) path := endpoint + n.config.Path() @@ -151,7 +110,7 @@ func (n *SyncTesterEL) Start() { n.userRPC = rpc + path } - sessionURL := n.orch.syncTester.service.RPC() + path + sessionURL := n.syncTester.RPC() + path n.authProxy.SetUpstream(ProxyAddr(n.p.Require(), sessionURL)) n.userProxy.SetUpstream(ProxyAddr(n.p.Require(), sessionURL)) @@ -172,38 +131,3 @@ func (n *SyncTesterEL) EngineRPC() string { func (n *SyncTesterEL) JWTPath() string { return n.jwtPath } - -// WithSyncTesterL2ELNode creates a SyncTesterEL that satisfies the L2ELNode interface -// The sync tester acts as an EL node that can be used by CL nodes for testing sync. -func WithSyncTesterL2ELNode(id, readonlyEL stack.ComponentID, opts ...SyncTesterELOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) - require := p.Require() - - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(readonlyEL.ChainID())) - require.True(ok, "L2 network required") - - cfg := DefaultSyncTesterELConfig() - orch.SyncTesterELOptions.Apply(p, id, cfg) // apply global options - SyncTesterELOptionBundle(opts).Apply(p, id, cfg) // apply specific options - - jwtPath, _ := orch.writeDefaultJWT() - - syncTesterEL := &SyncTesterEL{ - id: id, - l2Net: l2Net, - jwtPath: jwtPath, - config: cfg, - p: p, - orch: orch, - } - - p.Logger().Info("Starting sync tester EL", "id", id) - syncTesterEL.Start() - p.Cleanup(syncTesterEL.Stop) - p.Logger().Info("sync tester EL is ready", "userRPC", syncTesterEL.userRPC, "authRPC", syncTesterEL.authRPC) - cid := id - require.False(orch.registry.Has(cid), "must be unique L2 EL node") - orch.registry.Register(cid, syncTesterEL) - }) -} diff --git a/op-devstack/sysgo/l2_metrics_dashboard.go b/op-devstack/sysgo/l2_metrics_dashboard.go index 92845c08ae712..9acac9ac99e67 100644 --- a/op-devstack/sysgo/l2_metrics_dashboard.go +++ b/op-devstack/sysgo/l2_metrics_dashboard.go @@ -1,37 +1,11 @@ package sysgo -import ( - "fmt" - "os" - "path/filepath" - "sync" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/logpipe" - "gopkg.in/yaml.v3" -) - -const dockerExecutablePathEnvVar = "SYSGO_DOCKER_EXEC_PATH" -const grafanaProvisioningDirEnvVar = "SYSGO_GRAFANA_PROVISIONING_DIR" -const grafanaDataDirEnvVar = "SYSGO_GRAFANA_DATA_DIR" -const grafanaDockerImageTagEnvVar = "SYSGO_GRAFANA_DOCKER_IMAGE_TAG" -const prometheusDockerImageTagEnvVar = "SYSGO_PROMETHEUS_DOCKER_IMAGE_TAG" +import "fmt" const dockerToLocalHost = "host.docker.internal" -const prometheusHost = "0.0.0.0" -const prometheusServerPort = "9999" -const prometheusDockerPort = "9090" - -const grafanaHost = "0.0.0.0" -const grafanaServerPort = "3000" -const grafanaDockerPort = "3000" - type L2MetricsRegistrar interface { - // RegisterL2MetricsTargets is called by components when they are started (or earlier) to register - // their metrics endpoints so that a prometheus instance may be spun up to scrape metrics. - RegisterL2MetricsTargets(serviceName stack.Keyed, endpoints ...PrometheusMetricsTarget) + RegisterL2MetricsTargets(serviceName string, endpoints ...PrometheusMetricsTarget) } type PrometheusMetricsTarget string @@ -42,286 +16,3 @@ func NewPrometheusMetricsTarget(host string, port string, isRunningInDocker bool } return PrometheusMetricsTarget(fmt.Sprintf("%s:%s", host, port)) } - -type L2MetricsDashboard struct { - p devtest.P - - grafanaExecPath string - grafanaArgs []string - grafanaEnv []string - grafanaSubprocess *SubProcess - - prometheusExecPath string - prometheusArgs []string - prometheusEnv []string - prometheusSubprocess *SubProcess - - prometheusEndpoint string -} - -func (g *L2MetricsDashboard) Start() { - g.startPrometheus() - g.startGrafana() -} - -func (g *L2MetricsDashboard) Stop() { - var stopWaitGroup sync.WaitGroup - - stopWaitGroup.Add(1) - go func() { - defer stopWaitGroup.Done() - err := g.grafanaSubprocess.Stop(true) - g.p.Require().NoError(err, "Grafana must stop") - g.grafanaSubprocess = nil - }() - - stopWaitGroup.Add(1) - go func() { - defer stopWaitGroup.Done() - err := g.prometheusSubprocess.Stop(true) - g.p.Require().NoError(err, "Prometheus must stop") - g.prometheusSubprocess = nil - }() - - stopWaitGroup.Wait() -} - -func (g *L2MetricsDashboard) startPrometheus() { - // Create the sub-process. - // We pipe sub-process logs to the test-logger. - logOut := logpipe.ToLogger(g.p.Logger().New("component", "prometheus", "src", "stdout")) - logErr := logpipe.ToLogger(g.p.Logger().New("component", "prometheus", "src", "stderr")) - - stdOutLogs := logpipe.LogCallback(func(line []byte) { - e := logpipe.ParseRustStructuredLogs(line) - logOut(e) - }) - stdErrLogs := logpipe.LogCallback(func(line []byte) { - e := logpipe.ParseRustStructuredLogs(line) - logErr(e) - }) - - g.prometheusSubprocess = NewSubProcess(g.p, stdOutLogs, stdErrLogs) - - err := g.prometheusSubprocess.Start(g.prometheusExecPath, g.prometheusArgs, g.prometheusEnv) - g.p.Require().NoError(err, "prometheus must start") - - g.p.Logger().Info("Prometheus started", "endpoint", g.prometheusEndpoint) -} - -func (g *L2MetricsDashboard) startGrafana() { - // Create the sub-process. - // We pipe sub-process logs to the test-logger. - logOut := logpipe.ToLogger(g.p.Logger().New("component", "grafana", "src", "stdout")) - logErr := logpipe.ToLogger(g.p.Logger().New("component", "grafana", "src", "stderr")) - - stdOutLogs := logpipe.LogCallback(func(line []byte) { - e := logpipe.ParseRustStructuredLogs(line) - logOut(e) - }) - stdErrLogs := logpipe.LogCallback(func(line []byte) { - e := logpipe.ParseRustStructuredLogs(line) - logErr(e) - }) - - g.grafanaSubprocess = NewSubProcess(g.p, stdOutLogs, stdErrLogs) - - err := g.grafanaSubprocess.Start(g.grafanaExecPath, g.grafanaArgs, g.grafanaEnv) - g.p.Require().NoError(err, "Grafana must start") - - g.p.Logger().Info("Grafana started") -} - -func WithL2MetricsDashboard() stack.Option[*Orchestrator] { - return stack.Finally(func(orch *Orchestrator) { - // don't start prometheus or grafana if metrics are disabled or there is nothing exporting metrics. - metricsLen := orch.l2MetricsEndpoints.Len() - if !areMetricsEnabled() || metricsLen == 0 { - return - } - - p := orch.P() - - prometheusImageTag := getEnvVarOrDefault(prometheusDockerImageTagEnvVar, "v3.7.2") - prometheusEndpoint := fmt.Sprintf("http://%s:%s", prometheusHost, prometheusServerPort) - promConfig := getPrometheusConfigFilePath(p, orch) - // these are args to run via docker; see dashboard definition below - prometheusArgs := []string{ - "run", - "-p", fmt.Sprintf("%s:%s", prometheusServerPort, prometheusDockerPort), - "-v", fmt.Sprintf("%s:/etc/prometheus/prometheus.yml:ro", promConfig), - fmt.Sprintf("prom/prometheus:%s", prometheusImageTag), - "--config.file=/etc/prometheus/prometheus.yml", - } - - grafanaImageTag := getEnvVarOrDefault(grafanaDockerImageTagEnvVar, "12.2") - grafanaEndpoint := fmt.Sprintf("http://%s:%s", grafanaHost, grafanaServerPort) - grafanaProvDir := getGrafanaProvisioningDirPath(p) - grafanaDataDir := getGrafanaDataDir(p) - // these are args to run via docker; see dashboard definition below - grafanaArgs := []string{ - "run", - "-p", fmt.Sprintf("%s:%s", grafanaServerPort, grafanaDockerPort), - "-v", fmt.Sprintf("%s:/etc/grafana/provisioning:ro", grafanaProvDir), - "-v", fmt.Sprintf("%s:/var/lib/grafana", grafanaDataDir), - fmt.Sprintf("grafana/grafana:%s", grafanaImageTag), - } - grafanaEnv := []string{ - propagateEnvVarOrDefault("GF_SECURITY_ADMIN_USER", "admin"), - propagateEnvVarOrDefault("GF_SECURITY_ADMIN_PASSWORD", "admin"), - propagateEnvVarOrDefault("GF_USERS_ALLOW_SIGN_UP", "false"), - propagateEnvVarOrDefault("GF_INSTALL_PLUGINS", "grafana-piechart-panel"), - } - dashboard := &L2MetricsDashboard{ - p: p, - - prometheusExecPath: getEnvVarOrDefault(dockerExecutablePathEnvVar, "docker"), - prometheusArgs: prometheusArgs, - prometheusEnv: os.Environ(), - prometheusEndpoint: prometheusEndpoint, - - grafanaExecPath: getEnvVarOrDefault(dockerExecutablePathEnvVar, "docker"), - grafanaArgs: grafanaArgs, - grafanaEnv: append(grafanaEnv, os.Environ()...), - } - - p.Logger().Info("Starting metrics dashboard", "dashboard", dashboard) - - dashboard.Start() - p.Cleanup(dashboard.Stop) - p.Logger().Info("Metrics dashboard is up", "url", grafanaEndpoint) - }) -} - -// TODO: If our needs get more complex, use https://pkg.go.dev/github.com/prometheus/prometheus/config instead. -type prometheusConfig struct { - Global prometheusGlobalConfig `yaml:"global"` - ScrapeConfigs []prometheusScrapeConfigEntry `yaml:"scrape_configs"` -} - -type prometheusGlobalConfig struct { - ScrapeInterval string `yaml:"scrape_interval"` - EvaluationInterval string `yaml:"evaluation_interval"` -} - -type prometheusScrapeConfigEntry struct { - Name string `yaml:"job_name"` - Scheme string `yaml:"scheme"` - StaticConfigs []prometheusStaticConfig `yaml:"static_configs"` -} - -type prometheusStaticConfig struct { - Targets []string `yaml:"targets"` -} - -// Returns the path to the dynamically-generated prometheus.yml file for metrics scraping. -func getPrometheusConfigFilePath(p devtest.P, orch *Orchestrator) string { - - var scrapeConfigs []prometheusScrapeConfigEntry - - orch.l2MetricsEndpoints.Range(func(name string, endpoints []PrometheusMetricsTarget) bool { - var targets []string - for _, endpoint := range endpoints { - targets = append(targets, string(endpoint)) - } - scrapeConfigs = append(scrapeConfigs, prometheusScrapeConfigEntry{ - Name: name, - Scheme: "http", - StaticConfigs: []prometheusStaticConfig{{Targets: targets}}, - }) - return true - }) - - yamlConfig := prometheusConfig{ - Global: prometheusGlobalConfig{ - ScrapeInterval: "5s", - EvaluationInterval: "5s", - }, - ScrapeConfigs: scrapeConfigs, - } - - b, err := yaml.Marshal(&yamlConfig) - p.Require().NoError(err, "getPrometheusConfigFilePath: error creating yaml from scrape configs", "scrapeConfigs", scrapeConfigs) - - p.Logger().Info(`getPrometheusConfigFilePath: generated prometheus.yml`, "prometheus.yaml", string(b)) - - filePath := filepath.Join(p.TempDir(), "prometheus.yml") - file, err := os.Create(filePath) - p.Require().NoError(err, "getPrometheusConfigFilePath:error creating prometheus file", "filePath", filePath) - defer func() { - p.Require().NoError(file.Close()) - }() - - _, err = file.Write(b) - p.Require().NoError(err, "getPrometheusConfigFilePath:error writing string to prom file", "filePath", filePath, "contents", string(b)) - - return filePath -} - -// getGrafanaProvisioningDirPath returns the path to the grafana provisioning dir for metrics. -// If the provisioning dir env var is set, this function will use that path. If not, a temp dir -// will be created and removed when this process terminates. -// Note: from the returned directory, the generated prometheus.yml will be at: -// -// returned_dir_path/provisioning/datasources/prometheus.yml -func getGrafanaProvisioningDirPath(p devtest.P) string { - // If the caller provides a Grafana provisioning directory, use that, otherwise use a temp dir - baseDir := os.Getenv(grafanaProvisioningDirEnvVar) - if baseDir == "" { - baseDir = filepath.Join(p.TempDir(), "grafana") - } - - dirPath := filepath.Join(baseDir, "provisioning/datasources") - err := os.MkdirAll(dirPath, 0777) - p.Require().NoError(err, "getGrafanaProvisioningDirPath: error writing dir path", "dirPath", dirPath) - - p.Logger().Info("Created grafana/provisioning/datasources dir", "dirPath", dirPath) - - filePath := filepath.Join(dirPath, "prometheus.yml") - file, err := os.Create(filePath) - p.Require().NoError(err, "getGrafanaProvisioningDirPath: error creating prometheus file", "filePath", filePath) - defer func() { - p.Require().NoError(file.Close()) - }() - - contents := fmt.Sprintf( - ` -apiVersion: 1 - -datasources: - - name: Prometheus - type: prometheus - access: proxy - url: http://%s:%s - isDefault: true -`, dockerToLocalHost, prometheusServerPort) - - if _, err = file.WriteString(contents); err != nil { - p.Require().NoError(err, "getGrafanaProvisioningDirPath: error writing prom file", "filePath", filePath, "contents", contents) - } - - p.Logger().Info("getGrafanaProvisioningDirPath: wrote prom config to file", "filePath", filePath, "contents", contents) - - return baseDir -} - -// getGrafanaDataDir returns the path to the grafana provisioning dir for metrics. -// If the data dir env var is set, this function will use that path. If not, a temp dir -// will be created and removed when this process terminates. -func getGrafanaDataDir(p devtest.P) string { - // If the caller provides a Grafana data directory, use that, otherwise use a temp dir - baseDir := os.Getenv(grafanaDataDirEnvVar) - if baseDir == "" { - baseDir = filepath.Join(p.TempDir(), "grafana-data") - } - - if _, err := os.Stat(baseDir); err != nil && os.IsNotExist(err) { - if err := os.Mkdir(baseDir, 0777); err != nil { - p.Require().NoError(err, "getGrafanaDataDir: error creating grafana data directory", "baseDir", baseDir) - } - } else { - p.Require().NoError(err, "getGrafanaDataDir: checking if grafana data directory exists", "baseDir", baseDir) - } - - return baseDir -} diff --git a/op-devstack/sysgo/l2_metrics_dashboard_test.go b/op-devstack/sysgo/l2_metrics_dashboard_test.go deleted file mode 100644 index bde32d906914d..0000000000000 --- a/op-devstack/sysgo/l2_metrics_dashboard_test.go +++ /dev/null @@ -1,36 +0,0 @@ -package sysgo - -import ( - "os" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/stretchr/testify/require" -) - -func TestWithL2MetricsDashboard_DefaultDisabled(t *testing.T) { - // This should run without error if orch.l2MetricsEndpoints is unset - stack.ApplyOptionLifecycle(WithL2MetricsDashboard(), &Orchestrator{}) -} - -func TestWithL2MetricsDashboard_DisabledIfEndpointsRegisteredButNotExplicitlyEnabled(t *testing.T) { - id := stack.NewL2ELNodeID("test", eth.ChainIDFromUInt64(11111111111)) - metricsTarget := NewPrometheusMetricsTarget("localhost", "9090", false) - - o := &Orchestrator{} - o.RegisterL2MetricsTargets(id, metricsTarget) - - // This should run without error if disabled - stack.ApplyOptionLifecycle(WithL2MetricsDashboard(), o) -} - -func TestWithL2MetricsDashboard_DisabledIfNoEndpointsRegisteredButExplicitlyEnabled(t *testing.T) { - - o := &Orchestrator{} - err := os.Setenv(sysgoMetricsEnabledEnvVar, "true") - require.NoError(t, err, "error setting metrics enabled") - - // This should run without error if disabled - stack.ApplyOptionLifecycle(WithL2MetricsDashboard(), o) -} diff --git a/op-devstack/sysgo/l2_network.go b/op-devstack/sysgo/l2_network.go index bdd3b5bb2f9b8..20163f3b8248e 100644 --- a/op-devstack/sysgo/l2_network.go +++ b/op-devstack/sysgo/l2_network.go @@ -1,38 +1,52 @@ package sysgo import ( + "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/params" "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-service/eth" ) type L2Network struct { - id stack.ComponentID + name string + chainID eth.ChainID l1ChainID eth.ChainID genesis *core.Genesis rollupCfg *rollup.Config deployment *L2Deployment + opcmImpl common.Address + mipsImpl common.Address keys devkeys.Keys } -func (c *L2Network) hydrate(system stack.ExtensibleSystem) { - l1Net := system.L1Network(stack.ByID[stack.L1Network](stack.NewL1NetworkID(c.l1ChainID))) - sysL2Net := shim.NewL2Network(shim.L2NetworkConfig{ - NetworkConfig: shim.NetworkConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ChainConfig: c.genesis.Config, - }, - ID: c.id, - RollupConfig: c.rollupCfg, - Deployment: c.deployment, - Keys: shim.NewKeyring(c.keys, system.T().Require()), - Superchain: nil, - L1: l1Net, - Cluster: nil, - }) - system.AddL2Network(sysL2Net) +func (c *L2Network) Name() string { + return c.name +} + +func (c *L2Network) ChainID() eth.ChainID { + return c.chainID +} + +func (c *L2Network) L1ChainID() eth.ChainID { + return c.l1ChainID +} + +func (c *L2Network) ChainConfig() *params.ChainConfig { + return c.genesis.Config +} + +func (c *L2Network) RollupConfig() *rollup.Config { + return c.rollupCfg +} + +func (c *L2Network) Deployment() stack.L2Deployment { + return c.deployment +} + +func (c *L2Network) Keys() devkeys.Keys { + return c.keys } diff --git a/op-devstack/sysgo/l2_network_superchain_registry.go b/op-devstack/sysgo/l2_network_superchain_registry.go deleted file mode 100644 index f48a1fa6bdb5f..0000000000000 --- a/op-devstack/sysgo/l2_network_superchain_registry.go +++ /dev/null @@ -1,75 +0,0 @@ -package sysgo - -import ( - "fmt" - - "github.com/ethereum/go-ethereum/core" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-node/chaincfg" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/superutil" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" -) - -// WithL2NetworkFromSuperchainRegistry creates an L2 network using the rollup config from the superchain registry -func WithL2NetworkFromSuperchainRegistry(l2NetworkID stack.ComponentID, networkName string) stack.Option[*Orchestrator] { - return stack.BeforeDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2NetworkID)) - require := p.Require() - - // Load the rollup config from the superchain registry - rollupCfg, err := chaincfg.GetRollupConfig(networkName) - require.NoError(err, "failed to load rollup config for network %s", networkName) - - // Get the chain config from the superchain registry - chainCfg := chaincfg.ChainByName(networkName) - require.NotNil(chainCfg, "chain config not found for network %s", networkName) - - // Load the chain config using superutil - paramsChainConfig, err := superutil.LoadOPStackChainConfigFromChainID(chainCfg.ChainID) - require.NoError(err, "failed to load chain config for network %s", networkName) - - // Create a genesis config from the chain config - genesis := &core.Genesis{ - Config: paramsChainConfig, - } - - // Create the L2 network - l2Net := &L2Network{ - id: l2NetworkID, - l1ChainID: eth.ChainIDFromBig(rollupCfg.L1ChainID), - genesis: genesis, - rollupCfg: rollupCfg, - keys: orch.keys, - } - - cid := l2NetworkID - require.False(orch.registry.Has(cid), fmt.Sprintf("must not already exist: %s", l2NetworkID)) - orch.registry.Register(cid, l2Net) - }) -} - -// WithEmptyDepSet creates an L2 network using the rollup config from the superchain registry -func WithEmptyDepSet(l2NetworkID stack.ComponentID, networkName string) stack.Option[*Orchestrator] { - return stack.Combine( - WithL2NetworkFromSuperchainRegistry(l2NetworkID, networkName), - stack.BeforeDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), l2NetworkID)) - require := p.Require() - - // Check that chain config is available in registry - chainCfg := chaincfg.ChainByName(networkName) - require.NotNil(chainCfg, "chain config not found for network %s", networkName) - - // Create a minimal cluster with empty dependency set - clusterID := stack.NewClusterID(networkName) - cluster := &Cluster{ - id: clusterID, - cfgset: depset.FullConfigSetMerged{}, - } - - orch.registry.Register(clusterID, cluster) - }), - ) -} diff --git a/op-devstack/sysgo/l2_proposer.go b/op-devstack/sysgo/l2_proposer.go index 25007aef2b6d4..bb6e7c31cb55c 100644 --- a/op-devstack/sysgo/l2_proposer.go +++ b/op-devstack/sysgo/l2_proposer.go @@ -1,163 +1,15 @@ package sysgo import ( - "context" - "time" - - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/setuputils" ps "github.com/ethereum-optimism/optimism/op-proposer/proposer" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/endpoint" - oplog "github.com/ethereum-optimism/optimism/op-service/log" - opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" - "github.com/ethereum-optimism/optimism/op-service/oppprof" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + "github.com/ethereum-optimism/optimism/op-service/eth" ) type L2Proposer struct { - id stack.ComponentID + name string + chainID eth.ChainID service *ps.ProposerService userRPC string } -func (p *L2Proposer) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), p.userRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - - bFrontend := shim.NewL2Proposer(shim.L2ProposerConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: p.id, - Client: rpcCl, - }) - l2Net := system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(p.id.ChainID()))) - l2Net.(stack.ExtensibleL2Network).AddL2Proposer(bFrontend) -} - -type ProposerOption func(id stack.ComponentID, cfg *ps.CLIConfig) - -func WithProposerOption(opt ProposerOption) stack.Option[*Orchestrator] { - return stack.BeforeDeploy(func(o *Orchestrator) { - o.proposerOptions = append(o.proposerOptions, opt) - }) -} - -func WithProposer(proposerID stack.ComponentID, l1ELID stack.ComponentID, - l2CLID *stack.ComponentID, supervisorID *stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - WithProposerPostDeploy(orch, proposerID, l1ELID, l2CLID, supervisorID, nil) - }) -} - -func WithSuperProposer(proposerID stack.ComponentID, l1ELID stack.ComponentID, - supervisorID *stack.ComponentID) stack.Option[*Orchestrator] { - return stack.Finally(func(orch *Orchestrator) { - WithProposerPostDeploy(orch, proposerID, l1ELID, nil, supervisorID, nil) - }) -} - -func WithSupernodeProposer(proposerID stack.ComponentID, l1ELID stack.ComponentID, - supernodeID *stack.SupernodeID) stack.Option[*Orchestrator] { - return stack.Finally(func(orch *Orchestrator) { - WithProposerPostDeploy(orch, proposerID, l1ELID, nil, nil, supernodeID) - }) -} - -func WithProposerPostDeploy(orch *Orchestrator, proposerID stack.ComponentID, l1ELID stack.ComponentID, - l2CLID *stack.ComponentID, supervisorID *stack.ComponentID, supernodeID *stack.SupernodeID) { - ctx := orch.P().Ctx() - ctx = stack.ContextWithID(ctx, proposerID) - p := orch.P().WithCtx(ctx) - - require := p.Require() - proposerCID := proposerID - require.False(orch.registry.Has(proposerCID), "proposer must not already exist") - if supervisorID != nil && supernodeID != nil { - require.Fail("cannot have both supervisorID and supernodeID set for proposer") - } - - proposerSecret, err := orch.keys.Secret(devkeys.ProposerRole.Key(proposerID.ChainID().ToBig())) - require.NoError(err) - - logger := p.Logger() - logger.Info("Proposer key acquired", "addr", crypto.PubkeyToAddress(proposerSecret.PublicKey)) - - l1EL, ok := orch.GetL1EL(l1ELID) - require.True(ok) - - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(proposerID.ChainID())) - require.True(ok) - disputeGameFactoryAddr := l2Net.deployment.DisputeGameFactoryProxyAddr() - disputeGameType := 1 // Permissioned game type is the only one currently deployed - if orch.wb.outInteropMigration != nil { - disputeGameFactoryAddr = orch.wb.outInteropMigration.DisputeGameFactory - disputeGameType = 4 // SUPER_CANNON - } - - proposerCLIConfig := &ps.CLIConfig{ - L1EthRpc: l1EL.UserRPC(), - PollInterval: 500 * time.Millisecond, - AllowNonFinalized: true, - TxMgrConfig: setuputils.NewTxMgrConfig(endpoint.URL(l1EL.UserRPC()), proposerSecret), - RPCConfig: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - }, - LogConfig: oplog.CLIConfig{ - Level: log.LvlInfo, - Format: oplog.FormatText, - }, - MetricsConfig: opmetrics.CLIConfig{}, - PprofConfig: oppprof.CLIConfig{}, - DGFAddress: disputeGameFactoryAddr.Hex(), - ProposalInterval: 6 * time.Second, - DisputeGameType: uint32(disputeGameType), - ActiveSequencerCheckDuration: time.Second * 5, - WaitNodeSync: false, - } - for _, opt := range orch.proposerOptions { - opt(proposerID, proposerCLIConfig) - } - - // If supervisor is available, use it. Otherwise, connect to L2 CL. - switch { - case supervisorID != nil: - supervisorNode, ok := orch.GetSupervisor(*supervisorID) - require.True(ok, "supervisor not found") - proposerCLIConfig.SupervisorRpcs = []string{supervisorNode.UserRPC()} - case supernodeID != nil: - supernode, ok := orch.supernodes.Get(*supernodeID) - require.True(ok, "supernode not found") - proposerCLIConfig.SuperNodeRpcs = []string{supernode.UserRPC()} - default: - require.NotNil(l2CLID, "need L2 CL to connect to when no supervisor") - l2CL, ok := orch.GetL2CL(*l2CLID) - require.True(ok) - proposerCLIConfig.RollupRpc = l2CL.UserRPC() - } - - proposer, err := ps.ProposerServiceFromCLIConfig(ctx, "0.0.1", proposerCLIConfig, logger) - require.NoError(err) - - require.NoError(proposer.Start(ctx)) - p.Cleanup(func() { - ctx, cancel := context.WithCancel(ctx) - cancel() // force-quit - logger.Info("Closing proposer") - _ = proposer.Stop(ctx) - logger.Info("Closed proposer") - }) - - prop := &L2Proposer{ - id: proposerID, - service: proposer, - userRPC: proposer.HTTPEndpoint(), - } - orch.registry.Register(proposerID, prop) -} +type ProposerOption func(target ComponentTarget, cfg *ps.CLIConfig) diff --git a/op-devstack/sysgo/mixed_runtime.go b/op-devstack/sysgo/mixed_runtime.go new file mode 100644 index 0000000000000..4d1ac319467ff --- /dev/null +++ b/op-devstack/sysgo/mixed_runtime.go @@ -0,0 +1,623 @@ +package sysgo + +import ( + "context" + "encoding/hex" + "encoding/json" + "os" + "os/exec" + "path/filepath" + "strings" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-faucet/faucet" + "github.com/ethereum-optimism/optimism/op-service/endpoint" + "github.com/ethereum-optimism/optimism/op-service/eth" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + sequencerConfig "github.com/ethereum-optimism/optimism/op-test-sequencer/config" + testmetrics "github.com/ethereum-optimism/optimism/op-test-sequencer/metrics" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/builders/fakepos" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/builders/standardbuilder" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/committers/noopcommitter" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/committers/standardcommitter" + workconfig "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/config" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/publishers/nooppublisher" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/publishers/standardpublisher" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/sequencers/fullseq" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/signers/localkey" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/signers/noopsigner" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" +) + +type MixedL2ELKind string + +const ( + MixedL2ELOpGeth MixedL2ELKind = "op-geth" + MixedL2ELOpReth MixedL2ELKind = "op-reth" +) + +type MixedL2CLKind string + +const ( + MixedL2CLOpNode MixedL2CLKind = "op-node" + MixedL2CLKona MixedL2CLKind = "kona-node" +) + +type MixedSingleChainNodeSpec struct { + ELKey string + CLKey string + ELKind MixedL2ELKind + ELProofHistory bool + CLKind MixedL2CLKind + IsSequencer bool +} + +type MixedSingleChainPresetConfig struct { + NodeSpecs []MixedSingleChainNodeSpec + WithTestSequencer bool + TestSequencerName string + DeployerOptions []DeployerOption +} + +type mixedSingleChainNode struct { + spec MixedSingleChainNodeSpec + el L2ELNode + cl L2CLNode +} + +type MixedSingleChainRuntime struct { + L1Network *L1Network + L1EL *L1Geth + L1CL *L1CLNode + L2Network *L2Network + Nodes []MixedSingleChainNodeRefs + L2Batcher *L2Batcher + FaucetService *faucet.Service + TestSequencer *TestSequencerRuntime +} + +type MixedSingleChainNodeRefs struct { + Spec MixedSingleChainNodeSpec + EL L2ELNode + CL L2CLNode +} + +type mixedNoopMetricsRegistrar struct{} + +func (mixedNoopMetricsRegistrar) RegisterL2MetricsTargets(_ string, _ ...PrometheusMetricsTarget) { +} + +func NewMixedSingleChainRuntime(t devtest.T, cfg MixedSingleChainPresetConfig) *MixedSingleChainRuntime { + require := t.Require() + require.NotEmpty(cfg.NodeSpecs, "mixed runtime requires at least one L2 node spec") + + keys, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + require.NoError(err, "failed to derive dev keys from mnemonic") + + l1Net, l2Net := buildSingleChainWorld(t, keys, cfg.DeployerOptions...) + jwtPath, jwtSecret := writeJWTSecret(t) + l1EL, l1CL := startInProcessL1(t, l1Net, jwtPath) + + metricsRegistrar := mixedNoopMetricsRegistrar{} + + nodes := make([]mixedSingleChainNode, 0, len(cfg.NodeSpecs)) + for _, spec := range cfg.NodeSpecs { + identity := NewELNodeIdentity(0) + + var el L2ELNode + switch spec.ELKind { + case MixedL2ELOpGeth: + el = startL2ELNode(t, l2Net, jwtPath, jwtSecret, spec.ELKey, identity) + case MixedL2ELOpReth: + el = startMixedOpRethNode(t, l2Net, spec.ELKey, jwtPath, jwtSecret, spec.ELProofHistory, metricsRegistrar) + default: + require.FailNowf("unsupported EL kind", "unsupported mixed EL kind %q", spec.ELKind) + } + + var cl L2CLNode + switch spec.CLKind { + case MixedL2CLOpNode: + cl = startL2CLNode(t, keys, l1Net, l2Net, l1EL, l1CL, el, jwtSecret, l2CLNodeStartConfig{ + Key: spec.CLKey, + IsSequencer: spec.IsSequencer, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + }) + case MixedL2CLKona: + cl = startMixedKonaNode( + t, + keys, + l1Net, + l2Net, + l1EL, + l1CL, + el, + spec.CLKey, + spec.ELKey, + spec.IsSequencer, + metricsRegistrar, + ) + default: + require.FailNowf("unsupported CL kind", "unsupported mixed CL kind %q", spec.CLKind) + } + + nodes = append(nodes, mixedSingleChainNode{ + spec: spec, + el: el, + cl: cl, + }) + } + + for i := range nodes { + for j := 0; j < i; j++ { + connectL2CLPeers(t, t.Logger(), nodes[i].cl, nodes[j].cl) + connectL2ELPeers(t, t.Logger(), nodes[i].el.UserRPC(), nodes[j].el.UserRPC(), false) + } + } + + var sequencerNode *mixedSingleChainNode + for i := range nodes { + if nodes[i].spec.IsSequencer { + sequencerNode = &nodes[i] + break + } + } + require.NotNil(sequencerNode, "mixed runtime requires at least one sequencer node") + + l2Batcher := startMinimalBatcher(t, keys, l2Net, l1EL, sequencerNode.cl, sequencerNode.el) + faucetService := startFaucets(t, keys, l1Net.ChainID(), l2Net.ChainID(), l1EL.UserRPC(), sequencerNode.el.UserRPC()) + + var testSequencer *testSequencer + if cfg.WithTestSequencer { + testSequencerName := cfg.TestSequencerName + if testSequencerName == "" { + testSequencerName = "test-sequencer" + } + testSequencer = startTestSequencerForRPCs( + t, + keys, + testSequencerName, + jwtPath, + jwtSecret, + l1Net, + l1EL, + l1CL, + l2Net.ChainID(), + sequencerNode.el.UserRPC(), + sequencerNode.cl.UserRPC(), + ) + } + + return &MixedSingleChainRuntime{ + L1Network: l1Net, + L1EL: l1EL, + L1CL: l1CL, + L2Network: l2Net, + Nodes: mixedNodeRefs(nodes), + L2Batcher: l2Batcher, + FaucetService: faucetService, + TestSequencer: newTestSequencerRuntime(testSequencer, cfg.TestSequencerName), + } +} + +func mixedNodeRefs(nodes []mixedSingleChainNode) []MixedSingleChainNodeRefs { + out := make([]MixedSingleChainNodeRefs, 0, len(nodes)) + for _, node := range nodes { + out = append(out, MixedSingleChainNodeRefs{ + Spec: node.spec, + EL: node.el, + CL: node.cl, + }) + } + return out +} + +func startMixedOpRethNode( + t devtest.T, + l2Net *L2Network, + key string, + jwtPath string, + jwtSecret [32]byte, + proofHistory bool, + metricsRegistrar L2MetricsRegistrar, +) *OpReth { + tempDir := t.TempDir() + + data, err := json.Marshal(l2Net.genesis) + t.Require().NoError(err, "must json-encode genesis") + chainConfigPath := filepath.Join(tempDir, "genesis.json") + t.Require().NoError(os.WriteFile(chainConfigPath, data, 0o644), "must write genesis file") + + dataDirPath := filepath.Join(tempDir, "data") + t.Require().NoError(os.MkdirAll(dataDirPath, 0o755), "must create datadir") + + logDirPath := filepath.Join(tempDir, "logs") + t.Require().NoError(os.MkdirAll(logDirPath, 0o755), "must create logs dir") + + tempP2PPath := filepath.Join(tempDir, "p2pkey.txt") + + execPath := os.Getenv("OP_RETH_EXEC_PATH") + t.Require().NotEmpty(execPath, "OP_RETH_EXEC_PATH environment variable must be set") + _, err = os.Stat(execPath) + t.Require().NotErrorIs(err, os.ErrNotExist, "executable must exist") + + args := []string{ + "node", + "--addr=127.0.0.1", + "--authrpc.addr=127.0.0.1", + "--authrpc.jwtsecret=" + jwtPath, + "--authrpc.port=0", + "--builder.deadline=2", + "--builder.interval=100ms", + "--chain=" + chainConfigPath, + "--color=never", + "--datadir=" + dataDirPath, + "--disable-discovery", + "--http", + "--http.api=admin,debug,eth,net,trace,txpool,web3,rpc,reth,miner", + "--http.addr=127.0.0.1", + "--http.port=0", + "--ipcdisable", + "--log.file.directory=" + logDirPath, + "--log.stdout.format=json", + "--nat=none", + "--p2p-secret-key=" + tempP2PPath, + "--port=0", + "--rpc.eth-proof-window=30", + "--txpool.minimum-priority-fee=1", + "--txpool.nolocals", + "--with-unused-ports", + "--ws", + "--ws.api=admin,debug,eth,net,trace,txpool,web3,rpc,reth,miner", + "--ws.addr=127.0.0.1", + "--ws.port=0", + "-vvvv", + } + + if areMetricsEnabled() { + args = append(args, "--metrics=127.0.0.1:0") + } + + initArgs := []string{ + "init", + "--datadir=" + dataDirPath, + "--chain=" + chainConfigPath, + } + err = exec.Command(execPath, initArgs...).Run() + t.Require().NoError(err, "must init op-reth node") + + if proofHistory { + proofHistoryDir := filepath.Join(tempDir, "proof-history") + + initProofsArgs := []string{ + "proofs", + "init", + "--datadir=" + dataDirPath, + "--chain=" + chainConfigPath, + "--proofs-history.storage-path=" + proofHistoryDir, + } + err = exec.Command(execPath, initProofsArgs...).Run() + t.Require().NoError(err, "must init op-reth proof history") + + args = append( + args, + "--proofs-history", + "--proofs-history.window=200", + "--proofs-history.prune-interval=1m", + "--proofs-history.storage-path="+proofHistoryDir, + ) + } + + l2EL := &OpReth{ + name: key, + chainID: l2Net.ChainID(), + jwtPath: jwtPath, + jwtSecret: jwtSecret, + authRPC: "", + userRPC: "", + execPath: execPath, + args: args, + env: []string{}, + p: t, + l2MetricsRegistrar: metricsRegistrar, + } + + t.Logger().Info("Starting op-reth", "name", key, "chain", l2Net.ChainID()) + l2EL.Start() + t.Cleanup(l2EL.Stop) + t.Logger().Info("op-reth is ready", "name", key, "chain", l2Net.ChainID(), "userRPC", l2EL.userRPC, "authRPC", l2EL.authRPC) + return l2EL +} + +func startMixedKonaNode( + t devtest.T, + keys devkeys.Keys, + l1Net *L1Network, + l2Net *L2Network, + l1EL L1ELNode, + l1CL *L1CLNode, + l2EL L2ELNode, + clKey string, + elKey string, + isSequencer bool, + metricsRegistrar L2MetricsRegistrar, +) *KonaNode { + tempKonaDir := t.TempDir() + + tempP2PPath := filepath.Join(tempKonaDir, "p2pkey.txt") + + tempRollupCfgPath := filepath.Join(tempKonaDir, "rollup.json") + rollupCfgData, err := json.Marshal(l2Net.rollupCfg) + t.Require().NoError(err, "must write rollup config") + t.Require().NoError(os.WriteFile(tempRollupCfgPath, rollupCfgData, 0o644)) + + tempL1CfgPath := filepath.Join(tempKonaDir, "l1-chain-config.json") + l1CfgData, err := json.Marshal(l1Net.genesis.Config) + t.Require().NoError(err, "must write l1 chain config") + t.Require().NoError(os.WriteFile(tempL1CfgPath, l1CfgData, 0o644)) + + envVars := []string{ + "KONA_NODE_L1_ETH_RPC=" + l1EL.UserRPC(), + "KONA_NODE_L1_BEACON=" + l1CL.beaconHTTPAddr, + "KONA_NODE_L2_ENGINE_RPC=" + strings.ReplaceAll(l2EL.EngineRPC(), "ws://", "http://"), + "KONA_NODE_L2_ENGINE_AUTH=" + l2EL.JWTPath(), + "KONA_NODE_ROLLUP_CONFIG=" + tempRollupCfgPath, + "KONA_NODE_L1_CHAIN_CONFIG=" + tempL1CfgPath, + "KONA_NODE_P2P_PRIV_PATH=" + tempP2PPath, + propagateEnvVarOrDefault("KONA_NODE_P2P_NO_DISCOVERY", "true"), + propagateEnvVarOrDefault("KONA_NODE_RPC_ADDR", "127.0.0.1"), + propagateEnvVarOrDefault("KONA_NODE_RPC_PORT", "0"), + propagateEnvVarOrDefault("KONA_NODE_RPC_WS_ENABLED", "true"), + propagateEnvVarOrDefault("KONA_METRICS_ADDR", ""), + propagateEnvVarOrDefault("KONA_LOG_LEVEL", "3"), + propagateEnvVarOrDefault("KONA_LOG_STDOUT_FORMAT", "json"), + propagateEnvVarOrDefault("KONA_NODE_P2P_LISTEN_IP", "127.0.0.1"), + propagateEnvVarOrDefault("KONA_NODE_P2P_LISTEN_TCP_PORT", "0"), + propagateEnvVarOrDefault("KONA_NODE_P2P_LISTEN_UDP_PORT", "0"), + } + + if areMetricsEnabled() { + metricsPort, err := getAvailableLocalPort() + t.Require().NoError(err, "startMixedKonaNode: getting metrics port") + envVars = append(envVars, propagateEnvVarOrDefault("KONA_METRICS_PORT", metricsPort)) + envVars = append(envVars, "KONA_METRICS_ENABLED=true") + } + + if isSequencer { + p2pKey, err := keys.Secret(devkeys.SequencerP2PRole.Key(l2Net.ChainID().ToBig())) + t.Require().NoError(err, "need p2p key for sequencer") + p2pKeyHex := "0x" + hex.EncodeToString(crypto.FromECDSA(p2pKey)) + tempSeqKeyPath := filepath.Join(tempKonaDir, "p2p-sequencer.txt") + t.Require().NoError(os.WriteFile(tempSeqKeyPath, []byte(p2pKeyHex), 0o644)) + envVars = append(envVars, + "KONA_NODE_P2P_SEQUENCER_KEY_PATH="+tempSeqKeyPath, + "KONA_NODE_SEQUENCER_L1_CONFS=2", + "KONA_NODE_MODE=Sequencer", + ) + } else { + envVars = append(envVars, "KONA_NODE_MODE=Validator") + } + + execPath, err := EnsureRustBinary(t, RustBinarySpec{ + SrcDir: "rust/kona", + Package: "kona-node", + Binary: "kona-node", + }) + t.Require().NoError(err, "prepare kona-node binary") + t.Require().NotEmpty(execPath, "kona-node binary path resolved") + + k := &KonaNode{ + name: clKey, + chainID: l2Net.ChainID(), + userRPC: "", + interopEndpoint: "", + interopJwtSecret: eth.Bytes32{}, + execPath: execPath, + args: []string{"node"}, + env: envVars, + p: t, + l2MetricsRegistrar: metricsRegistrar, + } + t.Logger().Info("Starting kona-node", "name", clKey, "chain", l2Net.ChainID(), "el", elKey) + k.Start() + t.Cleanup(k.Stop) + t.Logger().Info("Kona-node is up", "name", clKey, "chain", l2Net.ChainID(), "rpc", k.UserRPC()) + return k +} + +func startTestSequencerForRPCs( + t devtest.T, + keys devkeys.Keys, + testSequencerName string, + jwtPath string, + jwtSecret [32]byte, + l1Net *L1Network, + l1EL *L1Geth, + l1CL *L1CLNode, + l2ChainID eth.ChainID, + l2ELRPC string, + l2CLRPC string, +) *testSequencer { + require := t.Require() + logger := t.Logger().New("component", "test-sequencer") + + l1ELClient, err := ethclient.DialContext(t.Ctx(), l1EL.UserRPC()) + require.NoError(err, "failed to dial L1 EL RPC for test-sequencer") + t.Cleanup(l1ELClient.Close) + + engineCl, err := dialEngine(t.Ctx(), l1EL.AuthRPC(), jwtSecret) + require.NoError(err, "failed to dial L1 engine API for test-sequencer") + t.Cleanup(func() { + engineCl.inner.Close() + }) + + l1ChainID := l1Net.ChainID() + + bidL1 := seqtypes.BuilderID("test-l1-builder") + cidL1 := seqtypes.CommitterID("test-noop-committer") + sidL1 := seqtypes.SignerID("test-noop-signer") + pidL1 := seqtypes.PublisherID("test-noop-publisher") + seqIDL1 := seqtypes.SequencerID("test-seq-" + l1ChainID.String()) + + ensemble := &workconfig.Ensemble{ + Builders: map[seqtypes.BuilderID]*workconfig.BuilderEntry{ + bidL1: { + L1: &fakepos.Config{ + ChainConfig: l1Net.genesis.Config, + EngineAPI: engineCl, + Backend: l1ELClient, + Beacon: l1CL.beacon, + FinalizedDistance: 20, + SafeDistance: 10, + BlockTime: 6, + }, + }, + }, + Signers: map[seqtypes.SignerID]*workconfig.SignerEntry{ + sidL1: {Noop: &noopsigner.Config{}}, + }, + Committers: map[seqtypes.CommitterID]*workconfig.CommitterEntry{ + cidL1: {Noop: &noopcommitter.Config{}}, + }, + Publishers: map[seqtypes.PublisherID]*workconfig.PublisherEntry{ + pidL1: {Noop: &nooppublisher.Config{}}, + }, + Sequencers: map[seqtypes.SequencerID]*workconfig.SequencerEntry{ + seqIDL1: { + Full: &fullseq.Config{ + ChainID: l1ChainID, + Builder: bidL1, + Signer: sidL1, + Committer: cidL1, + Publisher: pidL1, + }, + }, + }, + } + + bidL2 := seqtypes.BuilderID("test-standard-builder") + cidL2 := seqtypes.CommitterID("test-standard-committer") + sidL2 := seqtypes.SignerID("test-local-signer") + pidL2 := seqtypes.PublisherID("test-standard-publisher") + seqIDL2 := seqtypes.SequencerID("test-seq-" + l2ChainID.String()) + + p2pKey, err := keys.Secret(devkeys.SequencerP2PRole.Key(l2ChainID.ToBig())) + require.NoError(err, "need p2p key for test sequencer") + rawKey := hexutil.Bytes(crypto.FromECDSA(p2pKey)) + + ensemble.Builders[bidL2] = &workconfig.BuilderEntry{ + Standard: &standardbuilder.Config{ + L1ChainConfig: l1Net.genesis.Config, + L1EL: endpoint.MustRPC{ + Value: endpoint.HttpURL(l1EL.UserRPC()), + }, + L2EL: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2ELRPC), + }, + L2CL: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2CLRPC), + }, + }, + } + ensemble.Signers[sidL2] = &workconfig.SignerEntry{ + LocalKey: &localkey.Config{ + RawKey: &rawKey, + ChainID: l2ChainID, + }, + } + ensemble.Committers[cidL2] = &workconfig.CommitterEntry{ + Standard: &standardcommitter.Config{ + RPC: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2CLRPC), + }, + }, + } + ensemble.Publishers[pidL2] = &workconfig.PublisherEntry{ + Standard: &standardpublisher.Config{ + RPC: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2CLRPC), + }, + }, + } + ensemble.Sequencers[seqIDL2] = &workconfig.SequencerEntry{ + Full: &fullseq.Config{ + ChainID: l2ChainID, + Builder: bidL2, + Signer: sidL2, + Committer: cidL2, + Publisher: pidL2, + SequencerConfDepth: 2, + SequencerEnabled: true, + SequencerStopped: false, + SequencerMaxSafeLag: 0, + }, + } + + jobs := work.NewJobRegistry() + startedEnsemble, err := ensemble.Start(t.Ctx(), &work.StartOpts{ + Log: logger, + Metrics: &testmetrics.NoopMetrics{}, + Jobs: jobs, + }) + require.NoError(err, "failed to start test-sequencer ensemble") + + cfg := &sequencerConfig.Config{ + MetricsConfig: opmetrics.CLIConfig{ + Enabled: false, + }, + PprofConfig: oppprof.CLIConfig{ + ListenEnabled: false, + }, + LogConfig: oplog.CLIConfig{ + Level: log.LevelDebug, + Format: oplog.FormatText, + }, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + ListenPort: 0, + EnableAdmin: true, + }, + Ensemble: startedEnsemble, + JWTSecretPath: jwtPath, + Version: "dev", + MockRun: false, + } + + sq, err := sequencer.FromConfig(t.Ctx(), cfg, logger) + require.NoError(err, "failed to initialize test-sequencer service") + require.NoError(sq.Start(t.Ctx()), "failed to start test-sequencer service") + + t.Cleanup(func() { + ctx, cancel := context.WithCancel(t.Ctx()) + cancel() + logger.Info("Closing test-sequencer service") + closeErr := sq.Stop(ctx) + logger.Info("Closed test-sequencer service", "err", closeErr) + }) + + adminRPC := sq.RPC() + controlRPCs := map[eth.ChainID]string{ + l1ChainID: adminRPC + "/sequencers/" + seqIDL1.String(), + l2ChainID: adminRPC + "/sequencers/" + seqIDL2.String(), + } + + return &testSequencer{ + name: testSequencerName, + adminRPC: adminRPC, + jwtSecret: jwtSecret, + controlRPC: controlRPCs, + service: sq, + } +} diff --git a/op-devstack/sysgo/multichain_proofs.go b/op-devstack/sysgo/multichain_proofs.go new file mode 100644 index 0000000000000..8aabe222d8ec6 --- /dev/null +++ b/op-devstack/sysgo/multichain_proofs.go @@ -0,0 +1,327 @@ +package sysgo + +import ( + "context" + "runtime" + "sort" + "time" + + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + opchallenger "github.com/ethereum-optimism/optimism/op-challenger" + challengermetrics "github.com/ethereum-optimism/optimism/op-challenger/metrics" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + sharedchallenger "github.com/ethereum-optimism/optimism/op-devstack/shared/challenger" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/setuputils" + "github.com/ethereum-optimism/optimism/op-node/rollup" + ps "github.com/ethereum-optimism/optimism/op-proposer/proposer" + "github.com/ethereum-optimism/optimism/op-service/endpoint" + "github.com/ethereum-optimism/optimism/op-service/eth" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" +) + +func withSuperProofsDeployerFeature(cfg PresetConfig) PresetConfig { + cfg.DeployerOptions = append([]DeployerOption{ + WithDevFeatureEnabled(deployer.OptimismPortalInteropDevFlag), + }, cfg.DeployerOptions...) + return cfg +} + +func orderedRuntimeChains(runtime *MultiChainRuntime) []*MultiChainNodeRuntime { + keys := make([]string, 0, len(runtime.Chains)) + for key := range runtime.Chains { + keys = append(keys, key) + } + sort.Strings(keys) + + chains := make([]*MultiChainNodeRuntime, 0, len(keys)) + for _, key := range keys { + chains = append(chains, runtime.Chains[key]) + } + return chains +} + +func attachSupervisorSuperProofs(t devtest.T, runtime *MultiChainRuntime, cfg PresetConfig) *MultiChainRuntime { + chains := orderedRuntimeChains(runtime) + t.Require().NotEmpty(chains, "supervisor superproofs runtime must contain at least one chain") + t.Require().NotNil(runtime.PrimarySupervisor, "supervisor superproofs runtime must provide a supervisor") + + proofChain := chains[0] + cls := make([]L2CLNode, 0, len(chains)) + nets := make([]*L2Network, 0, len(chains)) + els := make([]L2ELNode, 0, len(chains)) + for _, chain := range chains { + t.Require().NotNil(chain, "runtime chain entry must not be nil") + cls = append(cls, chain.CL) + nets = append(nets, chain.Network) + els = append(els, chain.EL) + } + + superrootTime := awaitSuperrootTime(t, cls...) + superRoot := getSupervisorSuperRoot(t, runtime.PrimarySupervisor, superrootTime) + migrateSuperRoots(t, runtime.Keys, runtime.Migration, runtime.L1Network.ChainID(), runtime.L1EL, superRoot, superrootTime, proofChain.Network.ChainID()) + + challenger := startInteropChallenger( + t, + runtime.Keys, + runtime.L1Network, + runtime.L1EL, + runtime.L1CL, + runtime.DependencySet, + runtime.PrimarySupervisor.UserRPC(), + false, + nets, + els, + cfg.EnableCannonKonaForChall, + ) + runtime.L2ChallengerConfig = challenger.Config() + + _ = startSuperProposer( + t, + runtime.Keys, + "main", + proofChain.Network.ChainID(), + runtime.L1EL, + proofChain.Network, + runtime.PrimarySupervisor.UserRPC(), + "", + cfg.ProposerOptions..., + ) + + return runtime +} + +func attachSupernodeSuperProofs(t devtest.T, runtime *MultiChainRuntime, cfg PresetConfig) *MultiChainRuntime { + chains := orderedRuntimeChains(runtime) + t.Require().NotEmpty(chains, "supernode superproofs runtime must contain at least one chain") + t.Require().NotNil(runtime.Supernode, "supernode superproofs runtime must provide a supernode") + + proofChain := chains[0] + cls := make([]L2CLNode, 0, len(chains)) + nets := make([]*L2Network, 0, len(chains)) + els := make([]L2ELNode, 0, len(chains)) + for _, chain := range chains { + t.Require().NotNil(chain, "runtime chain entry must not be nil") + cls = append(cls, chain.CL) + nets = append(nets, chain.Network) + els = append(els, chain.EL) + } + + superrootTime := awaitSuperrootTime(t, cls...) + superRoot := getSupernodeSuperRoot(t, runtime.Supernode, superrootTime) + migrateSuperRoots(t, runtime.Keys, runtime.Migration, runtime.L1Network.ChainID(), runtime.L1EL, superRoot, superrootTime, proofChain.Network.ChainID()) + + challenger := startInteropChallenger( + t, + runtime.Keys, + runtime.L1Network, + runtime.L1EL, + runtime.L1CL, + runtime.DependencySet, + runtime.Supernode.UserRPC(), + true, + nets, + els, + cfg.EnableCannonKonaForChall, + ) + runtime.L2ChallengerConfig = challenger.Config() + + _ = startSuperProposer( + t, + runtime.Keys, + "main", + proofChain.Network.ChainID(), + runtime.L1EL, + proofChain.Network, + "", + runtime.Supernode.UserRPC(), + cfg.ProposerOptions..., + ) + + return runtime +} + +func NewSimpleInteropSuperProofsRuntimeWithConfig(t devtest.T, cfg PresetConfig) *MultiChainRuntime { + cfg = withSuperProofsDeployerFeature(cfg) + return attachSupervisorSuperProofs(t, NewSimpleInteropRuntimeWithConfig(t, cfg), cfg) +} + +func NewTwoL2SupernodeProofsRuntimeWithConfig(t devtest.T, interopAtGenesis bool, cfg PresetConfig) *MultiChainRuntime { + cfg = withSuperProofsDeployerFeature(cfg) + runtime, _ := newTwoL2SupernodeRuntimeWithConfig(t, interopAtGenesis, 0, cfg) + attachTestSequencerToRuntime(t, runtime, "test-sequencer-2l2") + return attachSupernodeSuperProofs(t, runtime, cfg) +} + +func NewSingleChainSupernodeProofsRuntimeWithConfig(t devtest.T, interopAtGenesis bool, cfg PresetConfig) *MultiChainRuntime { + cfg = withSuperProofsDeployerFeature(cfg) + runtime := newSingleChainSupernodeRuntimeWithConfig(t, interopAtGenesis, cfg) + attachTestSequencerToRuntime(t, runtime, "dev") + return attachSupernodeSuperProofs(t, runtime, cfg) +} + +func startSuperProposer( + t devtest.T, + keys devkeys.Keys, + proposerName string, + proposerChainID eth.ChainID, + l1EL L1ELNode, + l2Net *L2Network, + supervisorRPC string, + supernodeRPC string, + proposerOpts ...ProposerOption, +) *L2Proposer { + require := t.Require() + + proposerSecret, err := keys.Secret(devkeys.ProposerRole.Key(proposerChainID.ToBig())) + require.NoError(err) + + logger := t.Logger().New("component", "l2-proposer") + logger.Info("Proposer key acquired", "addr", crypto.PubkeyToAddress(proposerSecret.PublicKey)) + + proposerCLIConfig := &ps.CLIConfig{ + L1EthRpc: l1EL.UserRPC(), + PollInterval: 500 * time.Millisecond, + AllowNonFinalized: true, + TxMgrConfig: setuputils.NewTxMgrConfig(endpoint.URL(l1EL.UserRPC()), proposerSecret), + RPCConfig: oprpc.CLIConfig{ListenAddr: "127.0.0.1"}, + LogConfig: oplog.CLIConfig{Level: log.LvlInfo, Format: oplog.FormatText}, + MetricsConfig: opmetrics.CLIConfig{}, + PprofConfig: oppprof.CLIConfig{}, + DGFAddress: l2Net.deployment.DisputeGameFactoryProxyAddr().Hex(), + ProposalInterval: 6 * time.Second, + DisputeGameType: superCannonGameType, + ActiveSequencerCheckDuration: 5 * time.Second, + WaitNodeSync: false, + } + for _, opt := range proposerOpts { + if opt == nil { + continue + } + opt(NewComponentTarget(proposerName, proposerChainID), proposerCLIConfig) + } + switch { + case supernodeRPC != "": + proposerCLIConfig.SuperNodeRpcs = []string{supernodeRPC} + case supervisorRPC != "": + proposerCLIConfig.SupervisorRpcs = []string{supervisorRPC} + default: + require.FailNow("need supervisor or supernode RPC for super proposer") + } + + proposer, err := ps.ProposerServiceFromCLIConfig(t.Ctx(), "0.0.1", proposerCLIConfig, logger) + require.NoError(err) + require.NoError(proposer.Start(t.Ctx())) + t.Cleanup(func() { + ctx, cancel := context.WithCancel(t.Ctx()) + cancel() + logger.Info("Closing proposer") + _ = proposer.Stop(ctx) + logger.Info("Closed proposer") + }) + + return &L2Proposer{ + name: proposerName, + chainID: proposerChainID, + service: proposer, + userRPC: proposer.HTTPEndpoint(), + } +} + +func startInteropChallenger( + t devtest.T, + keys devkeys.Keys, + l1Net *L1Network, + l1EL L1ELNode, + l1CL *L1CLNode, + depSet depset.DependencySet, + superRPC string, + useSuperNode bool, + l2Nets []*L2Network, + l2ELs []L2ELNode, + enableCannonKona bool, +) *L2Challenger { + require := t.Require() + require.NotEmpty(l2Nets, "at least one L2 network is required") + require.Len(l2ELs, len(l2Nets), "need matching L2 ELs for challenger") + + challengerSecret, err := keys.Secret(devkeys.ChallengerRole.Key(l2Nets[0].ChainID().ToBig())) + require.NoError(err) + + logger := t.Logger().New("component", "l2-challenger") + logger.Info("Challenger key acquired", "addr", crypto.PubkeyToAddress(challengerSecret.PublicKey)) + + l2ELRPCs := make([]string, len(l2ELs)) + rollupCfgs := make([]*rollup.Config, len(l2Nets)) + l2Geneses := make([]*core.Genesis, len(l2Nets)) + l2ChainIDs := make([]eth.ChainID, len(l2Nets)) + for i := range l2Nets { + l2ELRPCs[i] = l2ELs[i].UserRPC() + rollupCfgs[i] = l2Nets[i].rollupCfg + l2Geneses[i] = l2Nets[i].genesis + l2ChainIDs[i] = l2Nets[i].ChainID() + } + staticDepSet, ok := depSet.(*depset.StaticConfigDependencySet) + require.True(ok, "expected static dependency set for super challenger") + + options := []sharedchallenger.Option{ + sharedchallenger.WithFactoryAddress(l2Nets[0].deployment.DisputeGameFactoryProxyAddr()), + sharedchallenger.WithPrivKey(challengerSecret), + sharedchallenger.WithDepset(staticDepSet), + sharedchallenger.WithCannonConfig(rollupCfgs, l1Net.genesis, l2Geneses, sharedchallenger.InteropVariant), + sharedchallenger.WithSuperCannonGameType(), + sharedchallenger.WithSuperPermissionedGameType(), + } + if enableCannonKona { + t.Log("Enabling cannon-kona for super challenger") + options = append(options, + sharedchallenger.WithCannonKonaInteropConfig(rollupCfgs, l1Net.genesis, l2Geneses), + sharedchallenger.WithSuperCannonKonaGameType(), + ) + } + cfg, err := sharedchallenger.NewInteropChallengerConfig( + t.TempDir(), + l1EL.UserRPC(), + l1CL.beaconHTTPAddr, + superRPC, + l2ELRPCs, + options..., + ) + require.NoError(err, "failed to create interop challenger config") + cfg.UseSuperNode = useSuperNode + + svc, err := opchallenger.Main(t.Ctx(), logger, cfg, challengermetrics.NoopMetrics) + require.NoError(err) + require.NoError(svc.Start(t.Ctx())) + t.Cleanup(func() { + ctx, cancel := context.WithCancel(t.Ctx()) + cancel() + logger.Info("Closing challenger") + timer := time.AfterFunc(time.Minute, func() { + if svc.Stopped() { + return + } + buf := make([]byte, 1<<20) + stackLen := runtime.Stack(buf, true) + logger.Error("Challenger failed to stop; printing all goroutine stacks:\n%v", string(buf[:stackLen])) + }) + _ = svc.Stop(ctx) + timer.Stop() + logger.Info("Closed challenger") + }) + + return &L2Challenger{ + name: "main", + chainIDs: l2ChainIDs, + service: svc, + config: cfg, + } +} diff --git a/op-devstack/sysgo/multichain_supernode_runtime.go b/op-devstack/sysgo/multichain_supernode_runtime.go new file mode 100644 index 0000000000000..c7d010d87b7e3 --- /dev/null +++ b/op-devstack/sysgo/multichain_supernode_runtime.go @@ -0,0 +1,852 @@ +package sysgo + +import ( + "context" + "encoding/hex" + "fmt" + "sort" + "strconv" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + opforks "github.com/ethereum-optimism/optimism/op-core/forks" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" + faucetConfig "github.com/ethereum-optimism/optimism/op-faucet/config" + "github.com/ethereum-optimism/optimism/op-faucet/faucet" + fconf "github.com/ethereum-optimism/optimism/op-faucet/faucet/backend/config" + ftypes "github.com/ethereum-optimism/optimism/op-faucet/faucet/backend/types" + opnodeconfig "github.com/ethereum-optimism/optimism/op-node/config" + "github.com/ethereum-optimism/optimism/op-node/rollup/driver" + "github.com/ethereum-optimism/optimism/op-node/rollup/interop" + nodeSync "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/endpoint" + "github.com/ethereum-optimism/optimism/op-service/eth" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + "github.com/ethereum-optimism/optimism/op-service/sources" + snconfig "github.com/ethereum-optimism/optimism/op-supernode/config" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + sequencerConfig "github.com/ethereum-optimism/optimism/op-test-sequencer/config" + testmetrics "github.com/ethereum-optimism/optimism/op-test-sequencer/metrics" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/builders/fakepos" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/builders/standardbuilder" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/committers/noopcommitter" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/committers/standardcommitter" + workconfig "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/config" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/publishers/nooppublisher" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/publishers/standardpublisher" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/sequencers/fullseq" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/signers/localkey" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/signers/noopsigner" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" +) + +func NewTwoL2SupernodeRuntime(t devtest.T) *MultiChainRuntime { + runtime, _ := newTwoL2SupernodeRuntime(t, false, 0) + return runtime +} + +func NewTwoL2SupernodeInteropRuntime(t devtest.T, delaySeconds uint64) *MultiChainRuntime { + return NewTwoL2SupernodeInteropRuntimeWithConfig(t, delaySeconds, PresetConfig{}) +} + +func NewTwoL2SupernodeInteropRuntimeWithConfig(t devtest.T, delaySeconds uint64, cfg PresetConfig) *MultiChainRuntime { + base, activationTime := newTwoL2SupernodeRuntimeWithConfig(t, true, delaySeconds, cfg) + chainA := base.Chains["l2a"] + chainB := base.Chains["l2b"] + t.Require().NotNil(chainA, "missing l2a supernode chain") + t.Require().NotNil(chainB, "missing l2b supernode chain") + attachTestSequencerToRuntime(t, base, "test-sequencer-2l2") + + t.Logger().Info("configured supernode interop runtime", + "genesis_time", chainA.Network.rollupCfg.Genesis.L2Time, + "activation_time", activationTime, + "delay_seconds", delaySeconds, + ) + + base.DelaySeconds = delaySeconds + return base +} + +func NewTwoL2SupernodeFollowL2RuntimeWithConfig(t devtest.T, delaySeconds uint64, cfg PresetConfig) *MultiChainRuntime { + runtime := NewTwoL2SupernodeInteropRuntimeWithConfig(t, delaySeconds, cfg) + addMultiChainFollowL2Node(t, runtime, "l2a", "follower") + addMultiChainFollowL2Node(t, runtime, "l2b", "follower") + return runtime +} + +func newTwoL2SupernodeRuntime(t devtest.T, enableInterop bool, delaySeconds uint64) (*MultiChainRuntime, uint64) { + return newTwoL2SupernodeRuntimeWithConfig(t, enableInterop, delaySeconds, PresetConfig{}) +} + +func NewTwoL2SupernodeRuntimeWithConfig(t devtest.T, cfg PresetConfig) *MultiChainRuntime { + runtime, _ := newTwoL2SupernodeRuntimeWithConfig(t, false, 0, cfg) + return runtime +} + +func newSingleChainSupernodeRuntimeWithConfig(t devtest.T, interopAtGenesis bool, cfg PresetConfig) *MultiChainRuntime { + require := t.Require() + + keys, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + require.NoError(err, "failed to derive dev keys from mnemonic") + + migration, l1Net, l2Net, depSet, _ := buildSingleChainWorldWithInteropAndState(t, keys, interopAtGenesis, cfg.DeployerOptions...) + validateSimpleInteropPresetConfig(t, cfg, l2Net) + + jwtPath, jwtSecret := writeJWTSecret(t) + l1Clock := clock.SystemClock + var timeTravelClock *clock.AdvancingClock + if cfg.EnableTimeTravel { + timeTravelClock = clock.NewAdvancingClock(100 * time.Millisecond) + l1Clock = timeTravelClock + } + l1EL, l1CL := startInProcessL1WithClock(t, l1Net, jwtPath, l1Clock) + l2EL := startSequencerEL(t, l2Net, jwtPath, jwtSecret, NewELNodeIdentity(0)) + + var depSetStatic *depset.StaticConfigDependencySet + if depSet != nil { + cast, ok := depSet.(*depset.StaticConfigDependencySet) + require.True(ok, "expected static dependency set") + depSetStatic = cast + } + + supernode, l2CL := startSingleChainSharedSupernode(t, l1Net, l1EL, l1CL, l2Net, l2EL, depSetStatic, jwtSecret, interopAtGenesis) + l2Batcher := startMinimalBatcher(t, keys, l2Net, l1EL, l2CL, l2EL, cfg.BatcherOptions...) + l2Proposer := startMinimalProposer(t, keys, l2Net, l1EL, l2CL, cfg.ProposerOptions...) + faucetService := startFaucets(t, keys, l1Net.ChainID(), l2Net.ChainID(), l1EL.UserRPC(), l2EL.UserRPC()) + + return &MultiChainRuntime{ + Keys: keys, + Migration: migration, + DependencySet: depSet, + L1Network: l1Net, + L1EL: l1EL, + L1CL: l1CL, + Chains: map[string]*MultiChainNodeRuntime{ + "l2a": { + Name: "l2a", + Network: l2Net, + EL: l2EL, + CL: l2CL, + Batcher: l2Batcher, + Proposer: l2Proposer, + }, + }, + Supernode: supernode, + FaucetService: faucetService, + TimeTravel: timeTravelClock, + } +} + +func newTwoL2SupernodeRuntimeWithConfig(t devtest.T, enableInterop bool, delaySeconds uint64, cfg PresetConfig) (*MultiChainRuntime, uint64) { + require := t.Require() + + keys, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + require.NoError(err, "failed to derive dev keys from mnemonic") + + wb, l1Net, l2ANet, l2BNet := buildTwoL2RuntimeWorld(t, keys, enableInterop, cfg.DeployerOptions...) + jwtPath, jwtSecret := writeJWTSecret(t) + l1Clock := clock.SystemClock + var timeTravelClock *clock.AdvancingClock + if cfg.EnableTimeTravel { + timeTravelClock = clock.NewAdvancingClock(100 * time.Millisecond) + l1Clock = timeTravelClock + } + l1EL, l1CL := startInProcessL1WithClock(t, l1Net, jwtPath, l1Clock) + + l2AIdentity := NewELNodeIdentity(0) + l2BIdentity := NewELNodeIdentity(0) + l2AEL := startSequencerEL(t, l2ANet, jwtPath, jwtSecret, l2AIdentity) + l2BEL := startSequencerEL(t, l2BNet, jwtPath, jwtSecret, l2BIdentity) + + var activationTime uint64 + var interopActivationTimestamp *uint64 + if enableInterop { + activationTime = l2ANet.rollupCfg.Genesis.L2Time + delaySeconds + interopActivationTimestamp = &activationTime + } + + var depSet *depset.StaticConfigDependencySet + if wb.outFullCfgSet.DependencySet != nil { + cast, ok := wb.outFullCfgSet.DependencySet.(*depset.StaticConfigDependencySet) + require.True(ok, "expected static dependency set") + depSet = cast + } + + supernode, l2ACL, l2BCL := startTwoL2SharedSupernode( + t, + l1Net, + l1EL, + l1CL, + l2ANet, + l2AEL, + l2BNet, + l2BEL, + depSet, + interopActivationTimestamp, + jwtSecret, + ) + + l2ABatcher := startMinimalBatcher(t, keys, l2ANet, l1EL, l2ACL, l2AEL) + l2AProposer := startMinimalProposer(t, keys, l2ANet, l1EL, l2ACL) + l2BBatcher := startMinimalBatcher(t, keys, l2BNet, l1EL, l2BCL, l2BEL) + l2BProposer := startMinimalProposer(t, keys, l2BNet, l1EL, l2BCL) + + faucetService := startFaucetsForRPCs(t, keys, map[eth.ChainID]string{ + l1Net.ChainID(): l1EL.UserRPC(), + l2ANet.ChainID(): l2AEL.UserRPC(), + l2BNet.ChainID(): l2BEL.UserRPC(), + }) + + return &MultiChainRuntime{ + Keys: keys, + Migration: newInteropMigrationState(wb), + DependencySet: wb.outFullCfgSet.DependencySet, + L1Network: l1Net, + L1EL: l1EL, + L1CL: l1CL, + Chains: map[string]*MultiChainNodeRuntime{ + "l2a": { + Name: "l2a", + Network: l2ANet, + EL: l2AEL, + CL: l2ACL, + Batcher: l2ABatcher, + Proposer: l2AProposer, + }, + "l2b": { + Name: "l2b", + Network: l2BNet, + EL: l2BEL, + CL: l2BCL, + Batcher: l2BBatcher, + Proposer: l2BProposer, + }, + }, + Supernode: supernode, + FaucetService: faucetService, + TimeTravel: timeTravelClock, + }, activationTime +} + +func buildTwoL2RuntimeWorld(t devtest.T, keys devkeys.Keys, enableInterop bool, deployerOpts ...DeployerOption) (*worldBuilder, *L1Network, *L2Network, *L2Network) { + wb := &worldBuilder{ + p: t, + logger: t.Logger(), + require: t.Require(), + keys: keys, + builder: intentbuilder.New(), + } + + applyConfigLocalContractSources(t, keys, wb.builder) + applyConfigCommons(t, keys, DefaultL1ID, wb.builder) + applyConfigPrefundedL2(t, keys, DefaultL1ID, DefaultL2AID, wb.builder) + applyConfigPrefundedL2(t, keys, DefaultL1ID, DefaultL2BID, wb.builder) + if enableInterop { + for _, l2Cfg := range wb.builder.L2s() { + l2Cfg.WithForkAtGenesis(opforks.Interop) + } + } + applyConfigDeployerOptions(t, keys, wb.builder, deployerOpts) + wb.Build() + + t.Require().Len(wb.l2Chains, 2, "expected exactly two L2 chains in TwoL2 world") + l1ID := eth.ChainIDFromUInt64(wb.output.AppliedIntent.L1ChainID) + + l1Net := &L1Network{ + name: "l1", + chainID: l1ID, + genesis: wb.outL1Genesis, + blockTime: 6, + } + + l2ANet := l2NetworkFromWorldBuilder(t, wb, l1ID, DefaultL2AID, keys) + l2BNet := l2NetworkFromWorldBuilder(t, wb, l1ID, DefaultL2BID, keys) + + return wb, l1Net, l2ANet, l2BNet +} + +func l2NetworkFromWorldBuilder(t devtest.T, wb *worldBuilder, l1ChainID, l2ChainID eth.ChainID, keys devkeys.Keys) *L2Network { + require := t.Require() + + l2Genesis, ok := wb.outL2Genesis[l2ChainID] + require.Truef(ok, "missing L2 genesis for chain %s", l2ChainID) + l2RollupCfg, ok := wb.outL2RollupCfg[l2ChainID] + require.Truef(ok, "missing L2 rollup config for chain %s", l2ChainID) + l2Dep, ok := wb.outL2Deployment[l2ChainID] + require.Truef(ok, "missing L2 deployment for chain %s", l2ChainID) + + return &L2Network{ + name: map[eth.ChainID]string{DefaultL2AID: "l2a", DefaultL2BID: "l2b"}[l2ChainID], + chainID: l2ChainID, + l1ChainID: l1ChainID, + genesis: l2Genesis, + rollupCfg: l2RollupCfg, + deployment: l2Dep, + opcmImpl: wb.output.ImplementationsDeployment.OpcmImpl, + mipsImpl: wb.output.ImplementationsDeployment.MipsImpl, + keys: keys, + } +} + +func addMultiChainFollowL2Node(t devtest.T, runtime *MultiChainRuntime, chainKey string, name string) *SingleChainNodeRuntime { + chain := runtime.Chains[chainKey] + t.Require().NotNil(chain, "missing %s runtime chain", chainKey) + t.Require().NotNil(chain.CL, "%s runtime chain missing CL follow source", chainKey) + + jwtPath := chain.EL.JWTPath() + jwtSecret := readJWTSecretFromPath(t, jwtPath) + l2EL := startL2ELNode(t, chain.Network, jwtPath, jwtSecret, name, NewELNodeIdentity(0)) + l2CL := startL2CLNode(t, runtime.Keys, runtime.L1Network, chain.Network, runtime.L1EL, runtime.L1CL, l2EL, jwtSecret, l2CLNodeStartConfig{ + Key: name, + IsSequencer: false, + NoDiscovery: true, + EnableReqResp: false, + UseReqResp: false, + L2FollowSource: chain.CL.UserRPC(), + DependencySet: runtime.DependencySet, + }) + + connectL2ELPeers(t, t.Logger(), chain.EL.UserRPC(), l2EL.UserRPC(), false) + connectL2CLPeers(t, t.Logger(), chain.CL, l2CL) + + node := &SingleChainNodeRuntime{ + Name: name, + IsSequencer: false, + EL: l2EL, + CL: l2CL, + } + if chain.Followers == nil { + chain.Followers = make(map[string]*SingleChainNodeRuntime) + } + chain.Followers[name] = node + return node +} + +func startTwoL2SharedSupernode( + t devtest.T, + l1Net *L1Network, + l1EL *L1Geth, + l1CL *L1CLNode, + l2ANet *L2Network, + l2AEL *OpGeth, + l2BNet *L2Network, + l2BEL *OpGeth, + depSet *depset.StaticConfigDependencySet, + interopActivationTimestamp *uint64, + jwtSecret [32]byte, +) (*SuperNode, *SuperNodeProxy, *SuperNodeProxy) { + require := t.Require() + logger := t.Logger().New("component", "supernode") + makeNodeCfg := func(l2Net *L2Network, l2EL L2ELNode) *opnodeconfig.Config { + p2pKey, err := l2Net.keys.Secret(devkeys.SequencerP2PRole.Key(l2Net.ChainID().ToBig())) + require.NoError(err, "need p2p key for supernode virtual sequencer") + p2pConfig, p2pSignerSetup := newDevstackP2PConfig( + t, + logger.New("chain_id", l2Net.ChainID().String(), "component", "supernode-p2p"), + l2Net.rollupCfg.BlockTime, + false, + true, + hex.EncodeToString(crypto.FromECDSA(p2pKey)), + ) + cfg := &opnodeconfig.Config{ + L1: &opnodeconfig.L1EndpointConfig{ + L1NodeAddr: l1EL.UserRPC(), + L1TrustRPC: false, + L1RPCKind: sources.RPCKindDebugGeth, + RateLimit: 0, + BatchSize: 20, + HttpPollInterval: time.Millisecond * 100, + MaxConcurrency: 10, + CacheSize: 0, + }, + L1ChainConfig: l1Net.genesis.Config, + L2: &opnodeconfig.L2EndpointConfig{ + L2EngineAddr: l2EL.EngineRPC(), + L2EngineJWTSecret: jwtSecret, + }, + DependencySet: depSet, + Beacon: &opnodeconfig.L1BeaconEndpointConfig{BeaconAddr: l1CL.beaconHTTPAddr}, + Driver: driver.Config{SequencerEnabled: true, SequencerConfDepth: 2}, + Rollup: *l2Net.rollupCfg, + P2PSigner: p2pSignerSetup, + RPC: oprpc.CLIConfig{ListenAddr: "127.0.0.1", ListenPort: 0, EnableAdmin: true}, + InteropConfig: &interop.Config{}, + P2P: p2pConfig, + L1EpochPollInterval: 2 * time.Second, + RuntimeConfigReloadInterval: 0, + Sync: nodeSync.Config{SyncMode: nodeSync.CLSync, SyncModeReqResp: true}, + ConfigPersistence: opnodeconfig.DisabledConfigPersistence{}, + Metrics: opmetrics.CLIConfig{}, + Pprof: oppprof.CLIConfig{}, + IgnoreMissingPectraBlobSchedule: false, + ExperimentalOPStackAPI: true, + } + require.NoError(cfg.Check(), "invalid supernode op-node config for chain %s", l2Net.ChainID()) + return cfg + } + + vnCfgs := map[eth.ChainID]*opnodeconfig.Config{ + l2ANet.ChainID(): makeNodeCfg(l2ANet, l2AEL), + l2BNet.ChainID(): makeNodeCfg(l2BNet, l2BEL), + } + chainIDs := []uint64{eth.EvilChainIDToUInt64(l2ANet.ChainID()), eth.EvilChainIDToUInt64(l2BNet.ChainID())} + + snCfg := &snconfig.CLIConfig{ + Chains: chainIDs, + DataDir: t.TempDir(), + L1NodeAddr: l1EL.UserRPC(), + L1BeaconAddr: l1CL.beaconHTTPAddr, + RPCConfig: oprpc.CLIConfig{ListenAddr: "127.0.0.1", ListenPort: 0, EnableAdmin: true}, + InteropActivationTimestamp: interopActivationTimestamp, + } + + supernode := &SuperNode{ + userRPC: "", + interopEndpoint: "", + interopJwtSecret: jwtSecret, + p: t, + logger: logger, + chains: []eth.ChainID{l2ANet.ChainID(), l2BNet.ChainID()}, + l1UserRPC: l1EL.UserRPC(), + l1BeaconAddr: l1CL.beaconHTTPAddr, + snCfg: snCfg, + vnCfgs: vnCfgs, + } + supernode.Start() + t.Cleanup(supernode.Stop) + + base := supernode.UserRPC() + l2ARPC := base + "/" + strconv.FormatUint(eth.EvilChainIDToUInt64(l2ANet.ChainID()), 10) + l2BRPC := base + "/" + strconv.FormatUint(eth.EvilChainIDToUInt64(l2BNet.ChainID()), 10) + + waitForSupernodeRoute(t, logger, l2ARPC) + waitForSupernodeRoute(t, logger, l2BRPC) + + l2ACL := &SuperNodeProxy{ + p: t, + logger: logger, + userRPC: l2ARPC, + interopEndpoint: l2ARPC, + interopJwtSecret: jwtSecret, + } + l2BCL := &SuperNodeProxy{ + p: t, + logger: logger, + userRPC: l2BRPC, + interopEndpoint: l2BRPC, + interopJwtSecret: jwtSecret, + } + + return supernode, l2ACL, l2BCL +} + +func startSingleChainSharedSupernode( + t devtest.T, + l1Net *L1Network, + l1EL *L1Geth, + l1CL *L1CLNode, + l2Net *L2Network, + l2EL *OpGeth, + depSet *depset.StaticConfigDependencySet, + jwtSecret [32]byte, + interopAtGenesis bool, +) (*SuperNode, *SuperNodeProxy) { + require := t.Require() + logger := t.Logger().New("component", "supernode") + makeNodeCfg := func() *opnodeconfig.Config { + p2pKey, err := l2Net.keys.Secret(devkeys.SequencerP2PRole.Key(l2Net.ChainID().ToBig())) + require.NoError(err, "need p2p key for supernode virtual sequencer") + p2pConfig, p2pSignerSetup := newDevstackP2PConfig( + t, + logger.New("chain_id", l2Net.ChainID().String(), "component", "supernode-p2p"), + l2Net.rollupCfg.BlockTime, + false, + true, + hex.EncodeToString(crypto.FromECDSA(p2pKey)), + ) + cfg := &opnodeconfig.Config{ + L1: &opnodeconfig.L1EndpointConfig{ + L1NodeAddr: l1EL.UserRPC(), + L1TrustRPC: false, + L1RPCKind: sources.RPCKindDebugGeth, + RateLimit: 0, + BatchSize: 20, + HttpPollInterval: 100 * time.Millisecond, + MaxConcurrency: 10, + CacheSize: 0, + }, + L1ChainConfig: l1Net.genesis.Config, + L2: &opnodeconfig.L2EndpointConfig{ + L2EngineAddr: l2EL.EngineRPC(), + L2EngineJWTSecret: jwtSecret, + }, + DependencySet: depSet, + Beacon: &opnodeconfig.L1BeaconEndpointConfig{BeaconAddr: l1CL.beaconHTTPAddr}, + Driver: driver.Config{SequencerEnabled: true, SequencerConfDepth: 2}, + Rollup: *l2Net.rollupCfg, + P2PSigner: p2pSignerSetup, + RPC: oprpc.CLIConfig{ListenAddr: "127.0.0.1", ListenPort: 0, EnableAdmin: true}, + InteropConfig: &interop.Config{}, + P2P: p2pConfig, + L1EpochPollInterval: 2 * time.Second, + Sync: nodeSync.Config{SyncMode: nodeSync.CLSync, SyncModeReqResp: true}, + ConfigPersistence: opnodeconfig.DisabledConfigPersistence{}, + Metrics: opmetrics.CLIConfig{}, + Pprof: oppprof.CLIConfig{}, + ExperimentalOPStackAPI: true, + IgnoreMissingPectraBlobSchedule: false, + } + require.NoError(cfg.Check(), "invalid supernode op-node config for chain %s", l2Net.ChainID()) + return cfg + } + + var interopActivationTimestamp *uint64 + if interopAtGenesis { + ts := l2Net.rollupCfg.Genesis.L2Time + interopActivationTimestamp = &ts + } + + snCfg := &snconfig.CLIConfig{ + Chains: []uint64{eth.EvilChainIDToUInt64(l2Net.ChainID())}, + DataDir: t.TempDir(), + L1NodeAddr: l1EL.UserRPC(), + L1BeaconAddr: l1CL.beaconHTTPAddr, + RPCConfig: oprpc.CLIConfig{ListenAddr: "127.0.0.1", ListenPort: 0, EnableAdmin: true}, + InteropActivationTimestamp: interopActivationTimestamp, + } + + supernode := &SuperNode{ + userRPC: "", + interopEndpoint: "", + interopJwtSecret: jwtSecret, + p: t, + logger: logger, + chains: []eth.ChainID{l2Net.ChainID()}, + l1UserRPC: l1EL.UserRPC(), + l1BeaconAddr: l1CL.beaconHTTPAddr, + snCfg: snCfg, + vnCfgs: map[eth.ChainID]*opnodeconfig.Config{ + l2Net.ChainID(): makeNodeCfg(), + }, + } + supernode.Start() + t.Cleanup(supernode.Stop) + + l2RPC := supernode.UserRPC() + "/" + strconv.FormatUint(eth.EvilChainIDToUInt64(l2Net.ChainID()), 10) + waitForSupernodeRoute(t, logger, l2RPC) + + return supernode, &SuperNodeProxy{ + p: t, + logger: logger, + userRPC: l2RPC, + interopEndpoint: l2RPC, + interopJwtSecret: jwtSecret, + } +} + +func waitForSupernodeRoute(t devtest.T, logger log.Logger, rpcEndpoint string) { + deadline := time.Now().Add(15 * time.Second) + for { + if time.Now().After(deadline) { + t.Require().FailNowf("supernode route readiness", "timed out waiting for supernode route %s", rpcEndpoint) + } + + rpcCl, err := client.NewRPC(t.Ctx(), logger, rpcEndpoint, client.WithLazyDial()) + if err == nil { + var out any + callErr := rpcCl.CallContext(t.Ctx(), &out, "optimism_rollupConfig") + rpcCl.Close() + if callErr == nil { + return + } + } + + time.Sleep(200 * time.Millisecond) + } +} + +type l2TestSequencerTarget struct { + chainID eth.ChainID + l2EL *OpGeth + l2CL L2CLNode +} + +func attachTestSequencerToRuntime(t devtest.T, runtime *MultiChainRuntime, testSequencerName string) { + t.Require().NotEmpty(runtime.Chains, "runtime must contain at least one chain") + + chainKeys := make([]string, 0, len(runtime.Chains)) + for key := range runtime.Chains { + chainKeys = append(chainKeys, key) + } + sort.Strings(chainKeys) + + firstChain := runtime.Chains[chainKeys[0]] + t.Require().NotNil(firstChain, "missing runtime chain %s", chainKeys[0]) + jwtPath := firstChain.EL.JWTPath() + jwtSecret := readJWTSecretFromPath(t, jwtPath) + + targets := make([]l2TestSequencerTarget, 0, len(chainKeys)) + for _, key := range chainKeys { + chain := runtime.Chains[key] + t.Require().NotNil(chain, "missing runtime chain %s", key) + l2EL, ok := chain.EL.(*OpGeth) + t.Require().True(ok, "runtime chain %s must use op-geth for test sequencer", key) + targets = append(targets, l2TestSequencerTarget{ + chainID: chain.Network.ChainID(), + l2EL: l2EL, + l2CL: chain.CL, + }) + } + + testSequencer := startTestSequencerForL2Chains( + t, + runtime.Keys, + testSequencerName, + jwtPath, + jwtSecret, + runtime.L1Network, + runtime.L1EL, + runtime.L1CL, + targets, + ) + runtime.TestSequencer = newTestSequencerRuntime(testSequencer, "") +} + +func startTestSequencerForL2Chains( + t devtest.T, + keys devkeys.Keys, + testSequencerName string, + jwtPath string, + jwtSecret [32]byte, + l1Net *L1Network, + l1EL *L1Geth, + l1CL *L1CLNode, + targets []l2TestSequencerTarget, +) *testSequencer { + require := t.Require() + logger := t.Logger().New("component", "test-sequencer") + + require.NotEmpty(targets, "at least one L2 target is required") + + l1ELClient, err := ethclient.DialContext(t.Ctx(), l1EL.UserRPC()) + require.NoError(err, "failed to dial L1 EL RPC for test-sequencer") + t.Cleanup(l1ELClient.Close) + + engineCl, err := dialEngine(t.Ctx(), l1EL.AuthRPC(), jwtSecret) + require.NoError(err, "failed to dial L1 engine API for test-sequencer") + t.Cleanup(func() { + engineCl.inner.Close() + }) + + l1ChainID := l1Net.ChainID() + bidL1 := seqtypes.BuilderID("test-l1-builder") + cidL1 := seqtypes.CommitterID("test-noop-committer") + sidL1 := seqtypes.SignerID("test-noop-signer") + pidL1 := seqtypes.PublisherID("test-noop-publisher") + seqIDL1 := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l1ChainID)) + + ensemble := &workconfig.Ensemble{ + Builders: map[seqtypes.BuilderID]*workconfig.BuilderEntry{ + bidL1: { + L1: &fakepos.Config{ + ChainConfig: l1Net.genesis.Config, + EngineAPI: engineCl, + Backend: l1ELClient, + Beacon: l1CL.beacon, + FinalizedDistance: 20, + SafeDistance: 10, + BlockTime: 6, + }, + }, + }, + Signers: map[seqtypes.SignerID]*workconfig.SignerEntry{ + sidL1: { + Noop: &noopsigner.Config{}, + }, + }, + Committers: map[seqtypes.CommitterID]*workconfig.CommitterEntry{ + cidL1: { + Noop: &noopcommitter.Config{}, + }, + }, + Publishers: map[seqtypes.PublisherID]*workconfig.PublisherEntry{ + pidL1: { + Noop: &nooppublisher.Config{}, + }, + }, + Sequencers: map[seqtypes.SequencerID]*workconfig.SequencerEntry{ + seqIDL1: { + Full: &fullseq.Config{ + ChainID: l1ChainID, + Builder: bidL1, + Signer: sidL1, + Committer: cidL1, + Publisher: pidL1, + }, + }, + }, + } + + sequencerIDs := map[eth.ChainID]seqtypes.SequencerID{ + l1ChainID: seqIDL1, + } + + for i, target := range targets { + suffix := "" + if len(targets) > 1 { + suffix = fmt.Sprintf("-%c", 'A'+i) + } + + bid := seqtypes.BuilderID(fmt.Sprintf("test-standard-builder%s", suffix)) + cid := seqtypes.CommitterID(fmt.Sprintf("test-standard-committer%s", suffix)) + sid := seqtypes.SignerID(fmt.Sprintf("test-local-signer%s", suffix)) + pid := seqtypes.PublisherID(fmt.Sprintf("test-standard-publisher%s", suffix)) + seqID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", target.chainID)) + + p2pKey, err := keys.Secret(devkeys.SequencerP2PRole.Key(target.chainID.ToBig())) + require.NoError(err, "need p2p key for test sequencer target %d", i) + rawKey := hexutil.Bytes(crypto.FromECDSA(p2pKey)) + + ensemble.Builders[bid] = &workconfig.BuilderEntry{ + Standard: &standardbuilder.Config{ + L1ChainConfig: l1Net.genesis.Config, + L1EL: endpoint.MustRPC{Value: endpoint.HttpURL(l1EL.UserRPC())}, + L2EL: endpoint.MustRPC{Value: endpoint.HttpURL(target.l2EL.UserRPC())}, + L2CL: endpoint.MustRPC{Value: endpoint.HttpURL(target.l2CL.UserRPC())}, + }, + } + ensemble.Signers[sid] = &workconfig.SignerEntry{ + LocalKey: &localkey.Config{RawKey: &rawKey, ChainID: target.chainID}, + } + ensemble.Committers[cid] = &workconfig.CommitterEntry{ + Standard: &standardcommitter.Config{RPC: endpoint.MustRPC{Value: endpoint.HttpURL(target.l2CL.UserRPC())}}, + } + ensemble.Publishers[pid] = &workconfig.PublisherEntry{ + Standard: &standardpublisher.Config{RPC: endpoint.MustRPC{Value: endpoint.HttpURL(target.l2CL.UserRPC())}}, + } + ensemble.Sequencers[seqID] = &workconfig.SequencerEntry{ + Full: &fullseq.Config{ + ChainID: target.chainID, + Builder: bid, + Signer: sid, + Committer: cid, + Publisher: pid, + SequencerConfDepth: 2, + SequencerEnabled: true, + SequencerStopped: false, + SequencerMaxSafeLag: 0, + }, + } + + sequencerIDs[target.chainID] = seqID + } + + jobs := work.NewJobRegistry() + startedEnsemble, err := ensemble.Start(t.Ctx(), &work.StartOpts{ + Log: logger, + Metrics: &testmetrics.NoopMetrics{}, + Jobs: jobs, + }) + require.NoError(err, "failed to start test-sequencer ensemble") + + cfg := &sequencerConfig.Config{ + MetricsConfig: opmetrics.CLIConfig{Enabled: false}, + PprofConfig: oppprof.CLIConfig{ListenEnabled: false}, + LogConfig: oplog.CLIConfig{ + Level: log.LevelDebug, + Format: oplog.FormatText, + }, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + ListenPort: 0, + EnableAdmin: true, + }, + Ensemble: startedEnsemble, + JWTSecretPath: jwtPath, + Version: "dev", + MockRun: false, + } + + sq, err := sequencer.FromConfig(t.Ctx(), cfg, logger) + require.NoError(err, "failed to initialize test-sequencer service") + require.NoError(sq.Start(t.Ctx()), "failed to start test-sequencer service") + + t.Cleanup(func() { + ctx, cancel := context.WithCancel(t.Ctx()) + cancel() + logger.Info("Closing test-sequencer service") + closeErr := sq.Stop(ctx) + logger.Info("Closed test-sequencer service", "err", closeErr) + }) + + adminRPC := sq.RPC() + controlRPCs := make(map[eth.ChainID]string, len(sequencerIDs)) + for chainID, seqID := range sequencerIDs { + controlRPCs[chainID] = adminRPC + "/sequencers/" + seqID.String() + } + + return &testSequencer{ + name: testSequencerName, + adminRPC: adminRPC, + jwtSecret: jwtSecret, + controlRPC: controlRPCs, + service: sq, + } +} + +func startFaucetsForRPCs(t devtest.T, keys devkeys.Keys, chainRPCs map[eth.ChainID]string) *faucet.Service { + require := t.Require() + logger := t.Logger().New("component", "faucet") + + funderKey, err := keys.Secret(devkeys.UserKey(funderMnemonicIndex)) + require.NoError(err, "need faucet funder key") + funderKeyStr := hexutil.Encode(crypto.FromECDSA(funderKey)) + + faucets := make(map[ftypes.FaucetID]*fconf.FaucetEntry, len(chainRPCs)) + for chainID, rpcURL := range chainRPCs { + faucetID := ftypes.FaucetID(fmt.Sprintf("dev-faucet-%s", chainID)) + faucets[faucetID] = &fconf.FaucetEntry{ + ELRPC: endpoint.MustRPC{Value: endpoint.URL(rpcURL)}, + ChainID: chainID, + TxCfg: fconf.TxManagerConfig{ + PrivateKey: funderKeyStr, + }, + } + } + + cfg := &faucetConfig.Config{ + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, + Faucets: &fconf.Config{ + Faucets: faucets, + }, + } + + srv, err := faucet.FromConfig(t.Ctx(), cfg, logger) + require.NoError(err, "failed to create faucet service") + require.NoError(srv.Start(t.Ctx()), "failed to start faucet service") + + t.Cleanup(func() { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // force-close + logger.Info("Closing faucet service") + closeErr := srv.Stop(ctx) + logger.Info("Closed faucet service", "err", closeErr) + }) + + return srv +} diff --git a/op-devstack/sysgo/multichain_supervisor_runtime.go b/op-devstack/sysgo/multichain_supervisor_runtime.go new file mode 100644 index 0000000000000..7502893604793 --- /dev/null +++ b/op-devstack/sysgo/multichain_supervisor_runtime.go @@ -0,0 +1,472 @@ +package sysgo + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + "time" + + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/dial" + "github.com/ethereum-optimism/optimism/op-service/eth" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/retry" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + supervisorConfig "github.com/ethereum-optimism/optimism/op-supervisor/config" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/syncnode" +) + +func NewSingleChainInteropRuntime(t devtest.T) *MultiChainRuntime { + return NewSingleChainInteropRuntimeWithConfig(t, PresetConfig{}) +} + +func NewSingleChainInteropRuntimeWithConfig(t devtest.T, cfg PresetConfig) *MultiChainRuntime { + require := t.Require() + + keys, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + require.NoError(err, "failed to derive dev keys from mnemonic") + + migration, l1Net, l2Net, depSet, fullCfgSet := buildSingleChainWorldWithInteropAndState(t, keys, true, cfg.DeployerOptions...) + validateSimpleInteropPresetConfig(t, cfg, l2Net) + + jwtPath, jwtSecret := writeJWTSecret(t) + l1Clock := clock.SystemClock + var timeTravelClock *clock.AdvancingClock + if cfg.EnableTimeTravel { + timeTravelClock = clock.NewAdvancingClock(100 * time.Millisecond) + l1Clock = timeTravelClock + } + l1EL, l1CL := startInProcessL1WithClock(t, l1Net, jwtPath, l1Clock) + supervisor := startSupervisor(t, "1-primary", l1EL, fullCfgSet, map[eth.ChainID]*rollup.Config{ + l2Net.ChainID(): l2Net.rollupCfg, + }) + + l2EL := startL2ELNodeWithSupervisor(t, l2Net, jwtPath, jwtSecret, "sequencer", NewELNodeIdentity(0), supervisor.UserRPC()) + l2CL := startL2CLNode(t, keys, l1Net, l2Net, l1EL, l1CL, l2EL, jwtSecret, l2CLNodeStartConfig{ + Key: "sequencer", + IsSequencer: true, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + IndexingMode: true, + DependencySet: depSet, + L2FollowSource: "", + L2CLOptions: cfg.GlobalL2CLOptions, + }) + connectManagedL2CLToSupervisor(t, supervisor, l2CL) + + l2Batcher := startMinimalBatcher(t, keys, l2Net, l1EL, l2CL, l2EL, cfg.BatcherOptions...) + l2Proposer := startMinimalProposer(t, keys, l2Net, l1EL, l2CL, cfg.ProposerOptions...) + applyMinimalGameTypeOptions(t, keys, l1Net, l2Net, l1EL, cfg.AddedGameTypes, cfg.RespectedGameTypes) + testSequencer := startTestSequencer(t, keys, jwtPath, jwtSecret, l1Net, l1EL, l1CL, l2EL, l2CL) + faucetService := startFaucets(t, keys, l1Net.ChainID(), l2Net.ChainID(), l1EL.UserRPC(), l2EL.UserRPC()) + + chainA := &MultiChainNodeRuntime{ + Name: "l2a", + Network: l2Net, + EL: l2EL, + CL: l2CL, + Batcher: l2Batcher, + Proposer: l2Proposer, + } + + return &MultiChainRuntime{ + Keys: keys, + FullConfigSet: fullCfgSet, + DependencySet: depSet, + Migration: migration, + L1Network: l1Net, + L1EL: l1EL, + L1CL: l1CL, + Chains: map[string]*MultiChainNodeRuntime{"l2a": chainA}, + PrimarySupervisor: supervisor, + FaucetService: faucetService, + TimeTravel: timeTravelClock, + TestSequencer: newTestSequencerRuntime(testSequencer, "dev"), + } +} + +func NewSimpleInteropRuntime(t devtest.T) *MultiChainRuntime { + return NewSimpleInteropRuntimeWithConfig(t, PresetConfig{}) +} + +func NewSimpleInteropRuntimeWithConfig(t devtest.T, cfg PresetConfig) *MultiChainRuntime { + require := t.Require() + + keys, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + require.NoError(err, "failed to derive dev keys from mnemonic") + + migration, l1Net, l2ANet, l2BNet, fullCfgSet := buildTwoL2WorldWithState(t, keys, true, cfg.DeployerOptions...) + validateSimpleInteropPresetConfig(t, cfg, l2ANet, l2BNet) + depSet := fullCfgSet.DependencySet + + jwtPath, jwtSecret := writeJWTSecret(t) + l1Clock := clock.SystemClock + var timeTravelClock *clock.AdvancingClock + if cfg.EnableTimeTravel { + timeTravelClock = clock.NewAdvancingClock(100 * time.Millisecond) + l1Clock = timeTravelClock + } + l1EL, l1CL := startInProcessL1WithClock(t, l1Net, jwtPath, l1Clock) + supervisor := startSupervisor(t, "1-primary", l1EL, fullCfgSet, map[eth.ChainID]*rollup.Config{ + l2ANet.ChainID(): l2ANet.rollupCfg, + l2BNet.ChainID(): l2BNet.rollupCfg, + }) + + l2AEL := startL2ELNodeWithSupervisor(t, l2ANet, jwtPath, jwtSecret, "sequencer", NewELNodeIdentity(0), supervisor.UserRPC()) + l2BEL := startL2ELNodeWithSupervisor(t, l2BNet, jwtPath, jwtSecret, "sequencer", NewELNodeIdentity(0), supervisor.UserRPC()) + l2ACL := startL2CLNode(t, keys, l1Net, l2ANet, l1EL, l1CL, l2AEL, jwtSecret, l2CLNodeStartConfig{ + Key: "sequencer", + IsSequencer: true, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + IndexingMode: true, + DependencySet: depSet, + L2FollowSource: "", + L2CLOptions: cfg.GlobalL2CLOptions, + }) + l2BCL := startL2CLNode(t, keys, l1Net, l2BNet, l1EL, l1CL, l2BEL, jwtSecret, l2CLNodeStartConfig{ + Key: "sequencer", + IsSequencer: true, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + IndexingMode: true, + DependencySet: depSet, + L2FollowSource: "", + L2CLOptions: cfg.GlobalL2CLOptions, + }) + connectManagedL2CLToSupervisor(t, supervisor, l2ACL) + connectManagedL2CLToSupervisor(t, supervisor, l2BCL) + + l2ABatcher := startMinimalBatcher(t, keys, l2ANet, l1EL, l2ACL, l2AEL, cfg.BatcherOptions...) + l2AProposer := startMinimalProposer(t, keys, l2ANet, l1EL, l2ACL, cfg.ProposerOptions...) + l2BBatcher := startMinimalBatcher(t, keys, l2BNet, l1EL, l2BCL, l2BEL, cfg.BatcherOptions...) + l2BProposer := startMinimalProposer(t, keys, l2BNet, l1EL, l2BCL, cfg.ProposerOptions...) + testSequencer := startTestSequencer(t, keys, jwtPath, jwtSecret, l1Net, l1EL, l1CL, l2AEL, l2ACL) + faucetService := startFaucetsForRPCs(t, keys, map[eth.ChainID]string{ + l1Net.ChainID(): l1EL.UserRPC(), + l2ANet.ChainID(): l2AEL.UserRPC(), + l2BNet.ChainID(): l2BEL.UserRPC(), + }) + + return &MultiChainRuntime{ + Keys: keys, + FullConfigSet: fullCfgSet, + DependencySet: depSet, + Migration: migration, + L1Network: l1Net, + L1EL: l1EL, + L1CL: l1CL, + Chains: map[string]*MultiChainNodeRuntime{ + "l2a": { + Name: "l2a", + Network: l2ANet, + EL: l2AEL, + CL: l2ACL, + Batcher: l2ABatcher, + Proposer: l2AProposer, + }, + "l2b": { + Name: "l2b", + Network: l2BNet, + EL: l2BEL, + CL: l2BCL, + Batcher: l2BBatcher, + Proposer: l2BProposer, + }, + }, + PrimarySupervisor: supervisor, + FaucetService: faucetService, + TimeTravel: timeTravelClock, + TestSequencer: newTestSequencerRuntime(testSequencer, "dev"), + } +} + +func validateSimpleInteropPresetConfig(t devtest.T, cfg PresetConfig, l2Nets ...*L2Network) { + require := t.Require() + if cfg.MaxSequencingWindow != nil { + for _, l2Net := range l2Nets { + require.LessOrEqualf( + l2Net.rollupCfg.SeqWindowSize, + *cfg.MaxSequencingWindow, + "sequencing window of chain %s must fit in max sequencing window size", + l2Net.ChainID(), + ) + } + } + if cfg.RequireInteropNotAtGen { + for _, l2Net := range l2Nets { + interopTime := l2Net.genesis.Config.InteropTime + require.NotNilf(interopTime, "chain %s must have interop", l2Net.ChainID()) + require.NotZerof(*interopTime, "chain %s interop must not be at genesis", l2Net.ChainID()) + } + } +} + +func NewMultiSupervisorInteropRuntime(t devtest.T) *MultiChainRuntime { + runtime := NewSimpleInteropRuntime(t) + chainA := runtime.Chains["l2a"] + chainB := runtime.Chains["l2b"] + t.Require().NotNil(chainA, "missing l2a interop chain") + t.Require().NotNil(chainB, "missing l2b interop chain") + + supervisorSecondary := startSupervisor( + t, + "2-secondary", + runtime.L1EL, + runtime.FullConfigSet, + map[eth.ChainID]*rollup.Config{ + chainA.Network.ChainID(): chainA.Network.rollupCfg, + chainB.Network.ChainID(): chainB.Network.rollupCfg, + }, + ) + + l2A2EL := startL2ELNodeWithSupervisor( + t, + chainA.Network, + chainA.EL.JWTPath(), + readJWTSecretFromPath(t, chainA.EL.JWTPath()), + "verifier", + NewELNodeIdentity(0), + supervisorSecondary.UserRPC(), + ) + l2A2CL := startL2CLNode(t, runtime.Keys, runtime.L1Network, chainA.Network, runtime.L1EL, runtime.L1CL, l2A2EL, readJWTSecretFromPath(t, chainA.EL.JWTPath()), l2CLNodeStartConfig{ + Key: "verifier", + IsSequencer: false, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + IndexingMode: true, + DependencySet: runtime.DependencySet, + L2FollowSource: "", + }) + + l2B2EL := startL2ELNodeWithSupervisor( + t, + chainB.Network, + chainB.EL.JWTPath(), + readJWTSecretFromPath(t, chainB.EL.JWTPath()), + "verifier", + NewELNodeIdentity(0), + supervisorSecondary.UserRPC(), + ) + l2B2CL := startL2CLNode(t, runtime.Keys, runtime.L1Network, chainB.Network, runtime.L1EL, runtime.L1CL, l2B2EL, readJWTSecretFromPath(t, chainB.EL.JWTPath()), l2CLNodeStartConfig{ + Key: "verifier", + IsSequencer: false, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + IndexingMode: true, + DependencySet: runtime.DependencySet, + L2FollowSource: "", + }) + + connectL2CLPeers(t, t.Logger(), chainA.CL, l2A2CL) + connectL2CLPeers(t, t.Logger(), chainB.CL, l2B2CL) + + connectManagedL2CLToSupervisor(t, supervisorSecondary, l2A2CL) + connectManagedL2CLToSupervisor(t, supervisorSecondary, l2B2CL) + + if chainA.Followers == nil { + chainA.Followers = make(map[string]*SingleChainNodeRuntime) + } + if chainB.Followers == nil { + chainB.Followers = make(map[string]*SingleChainNodeRuntime) + } + chainA.Followers["verifier"] = &SingleChainNodeRuntime{Name: "verifier", EL: l2A2EL, CL: l2A2CL} + chainB.Followers["verifier"] = &SingleChainNodeRuntime{Name: "verifier", EL: l2B2EL, CL: l2B2CL} + runtime.SecondarySupervisor = supervisorSecondary + return runtime +} + +func startSupervisor( + t devtest.T, + supervisorName string, + l1EL *L1Geth, + fullCfgSet depset.FullConfigSetMerged, + rollupCfgs map[eth.ChainID]*rollup.Config, +) Supervisor { + switch os.Getenv("DEVSTACK_SUPERVISOR_KIND") { + case "kona": + return startKonaSupervisor(t, supervisorName, l1EL, fullCfgSet, rollupCfgs) + default: + return startOPSupervisor(t, supervisorName, l1EL, fullCfgSet) + } +} + +func startOPSupervisor( + t devtest.T, + supervisorName string, + l1EL *L1Geth, + fullCfgSet depset.FullConfigSetMerged, +) *OpSupervisor { + cfg := &supervisorConfig.Config{ + MetricsConfig: opmetrics.CLIConfig{ + Enabled: false, + }, + PprofConfig: oppprof.CLIConfig{ + ListenEnabled: false, + }, + LogConfig: oplog.CLIConfig{ + Level: log.LevelDebug, + Format: oplog.FormatText, + }, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + ListenPort: 0, + EnableAdmin: true, + }, + SyncSources: &syncnode.CLISyncNodes{}, + L1RPC: l1EL.UserRPC(), + Datadir: t.TempDir(), + Version: "dev", + FullConfigSetSource: fullCfgSet, + MockRun: false, + SynchronousProcessors: false, + DatadirSyncEndpoint: "", + } + supervisorNode := &OpSupervisor{ + name: supervisorName, + userRPC: "", + cfg: cfg, + p: t, + logger: t.Logger().New("component", "supervisor"), + service: nil, + } + supervisorNode.Start() + t.Cleanup(supervisorNode.Stop) + return supervisorNode +} + +func startKonaSupervisor( + t devtest.T, + supervisorName string, + l1EL *L1Geth, + fullCfgSet depset.FullConfigSetMerged, + rollupCfgs map[eth.ChainID]*rollup.Config, +) *KonaSupervisor { + require := t.Require() + + cfgDir := t.TempDir() + depSetJSON, err := json.Marshal(fullCfgSet.DependencySet) + require.NoError(err, "failed to marshal dependency set") + depSetCfgPath := filepath.Join(cfgDir, "depset.json") + require.NoError(os.WriteFile(depSetCfgPath, depSetJSON, 0o644)) + + rollupCfgPath := filepath.Join(cfgDir, "rollup-config-*.json") + for chainID, cfg := range rollupCfgs { + rollupData, err := json.Marshal(cfg) + require.NoError(err, "failed to marshal rollup config for chain %s", chainID) + filePath := filepath.Join(cfgDir, "rollup-config-"+chainID.String()+".json") + require.NoError(os.WriteFile(filePath, rollupData, 0o644)) + } + + execPath, err := EnsureRustBinary(t, RustBinarySpec{ + SrcDir: "rust/kona", + Package: "kona-supervisor", + Binary: "kona-supervisor", + }) + require.NoError(err, "prepare kona-supervisor binary") + require.NotEmpty(execPath, "kona-supervisor binary path resolved") + + envVars := []string{ + "RPC_ADDR=127.0.0.1", + "DATADIR=" + t.TempDir(), + "DEPENDENCY_SET=" + depSetCfgPath, + "ROLLUP_CONFIG_PATHS=" + rollupCfgPath, + "L1_RPC=" + l1EL.UserRPC(), + "RPC_ENABLE_ADMIN=true", + "L2_CONSENSUS_NODES=", + "L2_CONSENSUS_JWT_SECRET=", + "KONA_LOG_LEVEL=3", + "KONA_LOG_STDOUT_FORMAT=json", + } + + konaSupervisor := &KonaSupervisor{ + name: supervisorName, + userRPC: "", + execPath: execPath, + args: []string{}, + env: envVars, + p: t, + } + konaSupervisor.Start() + t.Cleanup(konaSupervisor.Stop) + return konaSupervisor +} + +func connectManagedL2CLToSupervisor(t devtest.T, supervisor Supervisor, l2CL L2CLNode) { + interopEndpoint, secret := l2CL.InteropRPC() + supClient, err := dial.DialSupervisorClientWithTimeout(t.Ctx(), t.Logger(), supervisor.UserRPC(), client.WithLazyDial()) + t.Require().NoError(err) + t.Cleanup(supClient.Close) + + err = retry.Do0(t.Ctx(), 10, retry.Exponential(), func() error { + return supClient.AddL2RPC(t.Ctx(), interopEndpoint, secret) + }) + t.Require().NoErrorf(err, "must connect CL node %s to supervisor %s", l2CL, supervisorIDString(supervisor)) +} + +func supervisorIDString(supervisor Supervisor) string { + switch s := supervisor.(type) { + case *OpSupervisor: + return s.name + case *KonaSupervisor: + return s.name + default: + return "" + } +} + +func startL2ELNodeWithSupervisor( + t devtest.T, + l2Net *L2Network, + jwtPath string, + jwtSecret [32]byte, + key string, + identity *ELNodeIdentity, + supervisorRPC string, +) *OpGeth { + cfg := DefaultL2ELConfig() + cfg.P2PAddr = "127.0.0.1" + cfg.P2PPort = identity.Port + cfg.P2PNodeKeyHex = identity.KeyHex() + + l2EL := &OpGeth{ + name: key, + p: t, + logger: t.Logger().New("component", "l2el-"+key), + l2Net: l2Net, + jwtPath: jwtPath, + jwtSecret: jwtSecret, + supervisorRPC: supervisorRPC, + cfg: cfg, + } + l2EL.Start() + t.Cleanup(l2EL.Stop) + return l2EL +} + +func readJWTSecretFromPath(t devtest.T, jwtPath string) [32]byte { + content, err := os.ReadFile(jwtPath) + t.Require().NoError(err, "failed to read jwt path %s", jwtPath) + raw, err := hexutil.Decode(strings.TrimSpace(string(content))) + t.Require().NoError(err, "failed to decode jwt secret from %s", jwtPath) + t.Require().Len(raw, 32, "invalid jwt secret length from %s", jwtPath) + var secret [32]byte + copy(secret[:], raw) + return secret +} diff --git a/op-devstack/sysgo/op_rbuilder.go b/op-devstack/sysgo/op_rbuilder.go index b77bfbf0f3fc7..1b18922dbea07 100644 --- a/op-devstack/sysgo/op_rbuilder.go +++ b/op-devstack/sysgo/op_rbuilder.go @@ -3,7 +3,6 @@ package sysgo import ( "encoding/hex" - "encoding/json" "os" "path/filepath" "strconv" @@ -13,10 +12,9 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/logpipe" "github.com/ethereum-optimism/optimism/op-service/tasks" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" @@ -25,7 +23,8 @@ import ( type OPRBuilderNode struct { mu sync.Mutex - id stack.ComponentID + name string + chainID eth.ChainID rollupCfg *rollup.Config wsProxyURL string @@ -38,13 +37,12 @@ type OPRBuilderNode struct { authProxy *tcpproxy.Proxy logger log.Logger - p devtest.P + p devtest.CommonT sub *SubProcess cfg *OPRBuilderNodeConfig //nolint:unused,structcheck // configuration retained for restarts and JWT lookups } -var _ hydrator = (*OPRBuilderNode)(nil) var _ stack.Lifecycle = (*OPRBuilderNode)(nil) var _ L2ELNode = (*OPRBuilderNode)(nil) @@ -127,14 +125,14 @@ func DefaultOPRbuilderNodeConfig() *OPRBuilderNodeConfig { WithUnusedPorts: false, DisableDiscovery: true, DataDir: "", - ExtraArgs: nil, - Env: nil, RulesEnabled: false, RulesConfigPath: "", + ExtraArgs: nil, + Env: nil, } } -func (cfg *OPRBuilderNodeConfig) LaunchSpec(p devtest.P) (args []string, env []string) { +func (cfg *OPRBuilderNodeConfig) LaunchSpec(p devtest.CommonT) (args []string, env []string) { p.Require().NotNil(cfg, "nil OPRbuilderNodeConfig") env = append([]string(nil), cfg.Env...) @@ -251,21 +249,15 @@ func (cfg *OPRBuilderNodeConfig) LaunchSpec(p devtest.P) (args []string, env []s } type OPRBuilderNodeOption interface { - Apply(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) + Apply(p devtest.CommonT, target ComponentTarget, cfg *OPRBuilderNodeConfig) } -func WithGlobalOPRBuilderNodeOption(opt OPRBuilderNodeOption) stack.Option[*Orchestrator] { - return stack.BeforeDeploy(func(o *Orchestrator) { - o.oprbuilderNodeOptions = append(o.oprbuilderNodeOptions, opt) - }) -} - -type OPRBuilderNodeOptionFn func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) +type OPRBuilderNodeOptionFn func(p devtest.CommonT, target ComponentTarget, cfg *OPRBuilderNodeConfig) var _ OPRBuilderNodeOption = OPRBuilderNodeOptionFn(nil) -func (fn OPRBuilderNodeOptionFn) Apply(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { - fn(p, id, cfg) +func (fn OPRBuilderNodeOptionFn) Apply(p devtest.CommonT, target ComponentTarget, cfg *OPRBuilderNodeConfig) { + fn(p, target, cfg) } // OPRBuilderNodeOptionBundle applies multiple OPRBuilderNodeOptions in order. @@ -273,16 +265,16 @@ type OPRBuilderNodeOptionBundle []OPRBuilderNodeOption var _ OPRBuilderNodeOption = OPRBuilderNodeOptionBundle(nil) -func (b OPRBuilderNodeOptionBundle) Apply(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { +func (b OPRBuilderNodeOptionBundle) Apply(p devtest.CommonT, target ComponentTarget, cfg *OPRBuilderNodeConfig) { for _, opt := range b { p.Require().NotNil(opt, "cannot Apply nil OPRBuilderNodeOption") - opt.Apply(p, id, cfg) + opt.Apply(p, target, cfg) } } // OPRBuilderWithP2PConfig sets deterministic P2P identity and static peers for the builder EL. func OPRBuilderWithP2PConfig(addr string, port int, nodeKeyHex string, staticPeers, trustedPeers []string) OPRBuilderNodeOption { - return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { + return OPRBuilderNodeOptionFn(func(p devtest.CommonT, _ ComponentTarget, cfg *OPRBuilderNodeConfig) { cfg.P2PAddr = addr cfg.P2PPort = port cfg.P2PNodeKeyHex = nodeKeyHex @@ -293,7 +285,7 @@ func OPRBuilderWithP2PConfig(addr string, port int, nodeKeyHex string, staticPee // OPRBuilderWithNodeIdentity applies an ELNodeIdentity directly to the builder EL. func OPRBuilderWithNodeIdentity(identity *ELNodeIdentity, addr string, staticPeers, trustedPeers []string) OPRBuilderNodeOption { - return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { + return OPRBuilderNodeOptionFn(func(p devtest.CommonT, _ ComponentTarget, cfg *OPRBuilderNodeConfig) { cfg.P2PAddr = addr cfg.P2PPort = identity.Port cfg.P2PNodeKeyHex = identity.KeyHex() @@ -303,42 +295,17 @@ func OPRBuilderWithNodeIdentity(identity *ELNodeIdentity, addr string, staticPee } func OPRBuilderNodeWithExtraArgs(args ...string) OPRBuilderNodeOption { - return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { + return OPRBuilderNodeOptionFn(func(p devtest.CommonT, _ ComponentTarget, cfg *OPRBuilderNodeConfig) { cfg.ExtraArgs = append(cfg.ExtraArgs, args...) }) } func OPRBuilderNodeWithEnv(env ...string) OPRBuilderNodeOption { - return OPRBuilderNodeOptionFn(func(p devtest.P, id stack.ComponentID, cfg *OPRBuilderNodeConfig) { + return OPRBuilderNodeOptionFn(func(p devtest.CommonT, _ ComponentTarget, cfg *OPRBuilderNodeConfig) { cfg.Env = append(cfg.Env, env...) }) } -func (b *OPRBuilderNode) hydrate(system stack.ExtensibleSystem) { - elRPC, err := client.NewRPC(system.T().Ctx(), system.Logger(), b.rpcProxyURL, client.WithLazyDial()) - system.T().Require().NoError(err) - system.T().Cleanup(elRPC.Close) - - // Create a shared websocket client for flashblocks traffic over the proxy. - wsClient, err := client.DialWS(system.T().Ctx(), client.WSConfig{ - URL: b.wsProxyURL, - Log: system.Logger(), - }) - system.T().Require().NoError(err) - - node := shim.NewOPRBuilderNode(shim.OPRBuilderNodeConfig{ - ID: b.id, - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - Client: elRPC, - ChainID: b.id.ChainID(), - }, - RollupCfg: b.rollupCfg, - FlashblocksClient: wsClient, - }) - system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(b.id.ChainID()))).(stack.ExtensibleL2Network).AddOPRBuilderNode(node) -} - func (b *OPRBuilderNode) Start() { b.mu.Lock() defer b.mu.Unlock() @@ -382,8 +349,8 @@ func (b *OPRBuilderNode) Start() { defer close(authRPCChan) // Forward structured logs to Go logger and parse for port discovery - logOut := logpipe.ToLogger(b.logger.New("component", "op-OPRbuilderNode", "src", "stdout")) - logErr := logpipe.ToLogger(b.logger.New("component", "op-OPRbuilderNode", "src", "stderr")) + logOut := logpipe.ToLoggerWithMinLevel(b.logger.New("component", "op-OPRbuilderNode", "src", "stdout"), log.LevelWarn) + logErr := logpipe.ToLoggerWithMinLevel(b.logger.New("component", "op-OPRbuilderNode", "src", "stderr"), log.LevelWarn) // Log parsing callback to extract bound addresses from process output onLogEntry := func(e logpipe.LogEntry) { @@ -479,40 +446,6 @@ func (b *OPRBuilderNode) Stop() { b.sub = nil } -// WithOPRBuilderNode constructs and starts an OPRbuilderNode using the provided options. -func WithOPRBuilderNode(id stack.ComponentID, opts ...OPRBuilderNodeOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) - l2Net, ok := orch.GetL2Network(stack.NewL2NetworkID(id.ChainID())) - p.Require().True(ok, "l2 network required") - - tempDir := p.TempDir() - data, err := json.Marshal(l2Net.genesis) - p.Require().NoError(err, "must json-encode genesis") - chainConfigPath := filepath.Join(tempDir, "genesis.json") - p.Require().NoError(os.WriteFile(chainConfigPath, data, 0o644), "must write genesis file") - - // Build config from options - cfg := DefaultOPRbuilderNodeConfig() - cfg.AuthRPCJWTPath, _ = orch.writeDefaultJWT() - cfg.Chain = chainConfigPath - orch.oprbuilderNodeOptions.Apply(p, id, cfg) // apply global options - OPRBuilderNodeOptionBundle(opts).Apply(orch.P(), id, cfg) // apply specific options - - rb := &OPRBuilderNode{ - id: id, - logger: p.Logger(), - p: p, - rollupCfg: l2Net.rollupCfg, - cfg: cfg, - } - p.Logger().Info("Starting OPRbuilderNode") - rb.Start() - p.Cleanup(rb.Stop) - orch.registry.Register(id, rb) - }) -} - func (b *OPRBuilderNode) EngineRPC() string { return b.authProxyURL } @@ -524,3 +457,7 @@ func (b *OPRBuilderNode) JWTPath() string { func (b *OPRBuilderNode) UserRPC() string { return b.rpcProxyURL } + +func (b *OPRBuilderNode) FlashblocksWSURL() string { + return b.wsProxyURL +} diff --git a/op-devstack/sysgo/orchestrator.go b/op-devstack/sysgo/orchestrator.go deleted file mode 100644 index 42929cc451384..0000000000000 --- a/op-devstack/sysgo/orchestrator.go +++ /dev/null @@ -1,178 +0,0 @@ -package sysgo - -import ( - "os" - "path/filepath" - "sync" - "time" - - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/compat" - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/clock" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/locks" - "github.com/ethereum/go-ethereum/common/hexutil" -) - -type Orchestrator struct { - p devtest.P - - keys devkeys.Keys - - wb *worldBuilder - - // nil if no time travel is supported - timeTravelClock *clock.AdvancingClock - - // options - batcherOptions []BatcherOption - proposerOptions []ProposerOption - l2CLOptions L2CLOptionBundle - oprbuilderNodeOptions OPRBuilderNodeOptionBundle - l2ELOptions L2ELOptionBundle - l2ChallengerOpts l2ChallengerOpts - SyncTesterELOptions SyncTesterELOptionBundle - deployerPipelineOptions []DeployerPipelineOption - - // Unified component registry - replaces the 15 separate locks.RWMap fields - registry *stack.Registry - - // supernodes are stored separately from the registry and hydrated explicitly. - supernodes locks.RWMap[stack.ComponentID, *SuperNode] - - // service name => prometheus endpoints to scrape - l2MetricsEndpoints locks.RWMap[string, []PrometheusMetricsTarget] - - syncTester *SyncTesterService - faucet *FaucetService - - controlPlane *ControlPlane - - // sysHook is called after hydration of a new test-scope system frontend, - // essentially a test-case preamble. - sysHook stack.SystemHook - - jwtPath string - jwtSecret [32]byte - jwtPathOnce sync.Once -} - -func (o *Orchestrator) Type() compat.Type { - return compat.SysGo -} - -func (o *Orchestrator) ClusterForL2(chainID eth.ChainID) (*Cluster, bool) { - clusters := stack.RegistryGetByKind[*Cluster](o.registry, stack.KindCluster) - for _, cluster := range clusters { - if cluster.DepSet() != nil && cluster.DepSet().HasChain(chainID) { - return cluster, true - } - } - return nil, false -} - -func (o *Orchestrator) ControlPlane() stack.ControlPlane { - return o.controlPlane -} - -func (o *Orchestrator) EnableTimeTravel() { - if o.timeTravelClock == nil { - o.timeTravelClock = clock.NewAdvancingClock(100 * time.Millisecond) - } -} - -// GetL2EL returns the component at the exact ID if it implements L2ELNode. -// This supports polymorphism via interface implementation (e.g. OpGeth, OpReth, -// RollupBoostNode, OPRBuilderNode), but does not rewrite IDs across kinds. -func (o *Orchestrator) GetL2EL(id stack.ComponentID) (L2ELNode, bool) { - return stack.RegistryGet[L2ELNode](o.registry, id) -} - -var _ stack.Orchestrator = (*Orchestrator)(nil) - -func NewOrchestrator(p devtest.P, hook stack.SystemHook) *Orchestrator { - o := &Orchestrator{ - p: p, - sysHook: hook, - registry: stack.NewRegistry(), - } - o.controlPlane = &ControlPlane{o: o} - return o -} - -func (o *Orchestrator) P() devtest.P { - return o.p -} - -func (o *Orchestrator) writeDefaultJWT() (jwtPath string, secret [32]byte) { - o.jwtPathOnce.Do(func() { - // Sadly the geth node config cannot load JWT secret from memory, it has to be a file - o.jwtPath = filepath.Join(o.p.TempDir(), "jwt_secret") - o.jwtSecret = [32]byte{123} - err := os.WriteFile(o.jwtPath, []byte(hexutil.Encode(o.jwtSecret[:])), 0o600) - require.NoError(o.p, err, "failed to prepare jwt file") - }) - return o.jwtPath, o.jwtSecret -} - -func (o *Orchestrator) Hydrate(sys stack.ExtensibleSystem) { - o.sysHook.PreHydrate(sys) - if o.timeTravelClock != nil { - ttSys, ok := sys.(stack.TimeTravelSystem) - if ok { - ttSys.SetTimeTravelClock(o.timeTravelClock) - } - } - - // Hydrate all components in the unified registry. - for _, kind := range stack.HydrationComponentKindOrder() { - o.registry.RangeByKind(kind, func(id stack.ComponentID, component any) bool { - if h, ok := component.(hydrator); ok { - h.hydrate(sys) - } - return true - }) - } - - o.supernodes.Range(rangeHydrateFn[stack.ComponentID, *SuperNode](sys)) - - if o.syncTester != nil { - o.syncTester.hydrate(sys) - } - o.faucet.hydrate(sys) - o.sysHook.PostHydrate(sys) -} - -func (o *Orchestrator) RegisterL2MetricsTargets(id stack.Keyed, endpoints ...PrometheusMetricsTarget) { - wasSet := o.l2MetricsEndpoints.SetIfMissing(id.Key(), endpoints) - if !wasSet { - existing, _ := o.l2MetricsEndpoints.Get(id.Key()) - o.p.Logger().Warn("multiple endpoints registered with the same key", "key", id.Key(), "existing", existing, "new", endpoints) - } -} - -// InteropTestControl returns the InteropTestControl for a given SupernodeID. -// Returns nil if the supernode doesn't exist or doesn't implement the interface. -// This function is for integration test control only. -func (o *Orchestrator) InteropTestControl(id stack.SupernodeID) stack.InteropTestControl { - sn, ok := o.supernodes.Get(id) - if !ok { - return nil - } - return sn -} - -type hydrator interface { - hydrate(system stack.ExtensibleSystem) -} - -func rangeHydrateFn[I any, H hydrator](sys stack.ExtensibleSystem) func(id I, v H) bool { - return func(id I, v H) bool { - v.hydrate(sys) - return true - } -} diff --git a/op-devstack/sysgo/orchestrator_getters.go b/op-devstack/sysgo/orchestrator_getters.go deleted file mode 100644 index 19dd2e570027a..0000000000000 --- a/op-devstack/sysgo/orchestrator_getters.go +++ /dev/null @@ -1,48 +0,0 @@ -package sysgo - -import "github.com/ethereum-optimism/optimism/op-devstack/stack" - -// GetL1Network returns an L1 network by ID. -func (o *Orchestrator) GetL1Network(id stack.ComponentID) (*L1Network, bool) { - return stack.RegistryGet[*L1Network](o.registry, id) -} - -// GetL2Network returns an L2 network by ID. -func (o *Orchestrator) GetL2Network(id stack.ComponentID) (*L2Network, bool) { - return stack.RegistryGet[*L2Network](o.registry, id) -} - -// GetCluster returns a cluster by ID. -func (o *Orchestrator) GetCluster(id stack.ComponentID) (*Cluster, bool) { - return stack.RegistryGet[*Cluster](o.registry, id) -} - -// GetL1EL returns an L1 execution node by ID. -func (o *Orchestrator) GetL1EL(id stack.ComponentID) (L1ELNode, bool) { - return stack.RegistryGet[L1ELNode](o.registry, id) -} - -// GetL1CL returns an L1 consensus node by ID. -func (o *Orchestrator) GetL1CL(id stack.ComponentID) (*L1CLNode, bool) { - return stack.RegistryGet[*L1CLNode](o.registry, id) -} - -// GetL2CL returns an L2 consensus node by ID. -func (o *Orchestrator) GetL2CL(id stack.ComponentID) (L2CLNode, bool) { - return stack.RegistryGet[L2CLNode](o.registry, id) -} - -// GetSupervisor returns a supervisor by ID. -func (o *Orchestrator) GetSupervisor(id stack.ComponentID) (Supervisor, bool) { - return stack.RegistryGet[Supervisor](o.registry, id) -} - -// GetOPRBuilder returns an OPR builder node by ID. -func (o *Orchestrator) GetOPRBuilder(id stack.ComponentID) (*OPRBuilderNode, bool) { - return stack.RegistryGet[*OPRBuilderNode](o.registry, id) -} - -// GetRollupBoost returns a rollup-boost node by ID. -func (o *Orchestrator) GetRollupBoost(id stack.ComponentID) (*RollupBoostNode, bool) { - return stack.RegistryGet[*RollupBoostNode](o.registry, id) -} diff --git a/op-devstack/sysgo/preset_config.go b/op-devstack/sysgo/preset_config.go new file mode 100644 index 0000000000000..64d442d7c3a1e --- /dev/null +++ b/op-devstack/sysgo/preset_config.go @@ -0,0 +1,123 @@ +package sysgo + +import gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + +// PresetConfig captures preset constructor mutations. +// It is independent from orchestrator lifecycle hooks. +type PresetConfig struct { + DeployerOptions []DeployerOption + BatcherOptions []BatcherOption + ProposerOptions []ProposerOption + OPRBuilderOptions []OPRBuilderNodeOption + GlobalL2CLOptions []L2CLOption + GlobalSyncTesterELOptions []SyncTesterELOption + AddedGameTypes []gameTypes.GameType + RespectedGameTypes []gameTypes.GameType + EnableCannonKonaForChall bool + EnableTimeTravel bool + MaxSequencingWindow *uint64 + RequireInteropNotAtGen bool +} + +type PresetOption interface { + apply(cfg *PresetConfig) +} + +type presetOptionFn func(cfg *PresetConfig) + +func (fn presetOptionFn) apply(cfg *PresetConfig) { + fn(cfg) +} + +func NewPresetConfig(opts ...PresetOption) PresetConfig { + cfg := PresetConfig{} + for _, opt := range opts { + if opt == nil { + continue + } + opt.apply(&cfg) + } + return cfg +} + +func WithDeployerOptions(opts ...DeployerOption) PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + cfg.DeployerOptions = append(cfg.DeployerOptions, opts...) + }) +} + +func WithBatcherOption(opt BatcherOption) PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + if opt == nil { + return + } + cfg.BatcherOptions = append(cfg.BatcherOptions, opt) + }) +} + +func WithProposerOption(opt ProposerOption) PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + if opt == nil { + return + } + cfg.ProposerOptions = append(cfg.ProposerOptions, opt) + }) +} + +func WithOPRBuilderOption(opt OPRBuilderNodeOption) PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + if opt == nil { + return + } + cfg.OPRBuilderOptions = append(cfg.OPRBuilderOptions, opt) + }) +} + +func WithGlobalL2CLOption(opt L2CLOption) PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + if opt == nil { + return + } + cfg.GlobalL2CLOptions = append(cfg.GlobalL2CLOptions, opt) + }) +} + +func WithGlobalSyncTesterELOption(opt SyncTesterELOption) PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + if opt == nil { + return + } + cfg.GlobalSyncTesterELOptions = append(cfg.GlobalSyncTesterELOptions, opt) + }) +} + +func WithGameTypeAdded(gameType gameTypes.GameType) PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + cfg.AddedGameTypes = append(cfg.AddedGameTypes, gameType) + }) +} + +func WithRespectedGameTypeOverride(gameType gameTypes.GameType) PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + cfg.RespectedGameTypes = append(cfg.RespectedGameTypes, gameType) + }) +} + +func WithCannonKonaGameTypeAdded() PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + cfg.EnableCannonKonaForChall = true + cfg.AddedGameTypes = append(cfg.AddedGameTypes, gameTypes.CannonKonaGameType) + }) +} + +func WithChallengerCannonKonaEnabled() PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + cfg.EnableCannonKonaForChall = true + }) +} + +func WithTimeTravelEnabled() PresetOption { + return presetOptionFn(func(cfg *PresetConfig) { + cfg.EnableTimeTravel = true + }) +} diff --git a/op-devstack/sysgo/rollup_boost.go b/op-devstack/sysgo/rollup_boost.go index 2585920b6ce35..b59ca46cb655a 100644 --- a/op-devstack/sysgo/rollup_boost.go +++ b/op-devstack/sysgo/rollup_boost.go @@ -10,9 +10,8 @@ import ( "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/logpipe" "github.com/ethereum-optimism/optimism/op-service/tasks" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" @@ -24,7 +23,8 @@ import ( type RollupBoostNode struct { mu sync.Mutex - id stack.ComponentID + name string + chainID eth.ChainID wsProxyURL string wsProxy *tcpproxy.Proxy @@ -34,43 +34,16 @@ type RollupBoostNode struct { header http.Header logger log.Logger - p devtest.P + p devtest.CommonT sub *SubProcess cfg *RollupBoostConfig } -var _ hydrator = (*RollupBoostNode)(nil) var _ stack.Lifecycle = (*RollupBoostNode)(nil) var _ L2ELNode = (*RollupBoostNode)(nil) -func (r *RollupBoostNode) hydrate(system stack.ExtensibleSystem) { - elRPC, err := client.NewRPC(system.T().Ctx(), system.Logger(), r.rpcProxyURL, client.WithLazyDial()) - system.T().Require().NoError(err) - system.T().Cleanup(elRPC.Close) - - // Create a shared websocket client for flashblocks traffic over the proxy. - wsClient, err := client.DialWS(system.T().Ctx(), client.WSConfig{ - URL: r.wsProxyURL, - Headers: r.header, - Log: system.Logger(), - }) - system.T().Require().NoError(err) - - node := shim.NewRollupBoostNode(shim.RollupBoostNodeConfig{ - ID: r.id, - ELNodeConfig: shim.ELNodeConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - Client: elRPC, - ChainID: r.id.ChainID(), - }, - RollupCfg: system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(r.id.ChainID()))).RollupConfig(), - FlashblocksClient: wsClient, - }) - system.L2Network(stack.ByID[stack.L2Network](stack.NewL2NetworkID(r.id.ChainID()))).(stack.ExtensibleL2Network).AddRollupBoostNode(node) -} - func (r *RollupBoostNode) Start() { r.mu.Lock() defer r.mu.Unlock() @@ -104,8 +77,8 @@ func (r *RollupBoostNode) Start() { defer close(flashblocksWSChan) // Parse Rust-structured logs and forward into Go logger with attributes - logOut := logpipe.ToLogger(r.logger.New("stream", "stdout")) - logErr := logpipe.ToLogger(r.logger.New("stream", "stderr")) + logOut := logpipe.ToLoggerWithMinLevel(r.logger.New("stream", "stdout"), log.LevelWarn) + logErr := logpipe.ToLoggerWithMinLevel(r.logger.New("stream", "stderr"), log.LevelWarn) // Log parsing callback to extract bound addresses from process output onLogEntry := func(e logpipe.LogEntry) { @@ -174,54 +147,6 @@ func (r *RollupBoostNode) Stop() { r.sub = nil } -// WithRollupBoost starts a rollup-boost process using the provided options -// and registers a WSClient on the target L2 chain. -// l2ELID is required to link the proxy to the L2 EL it serves. -func WithRollupBoost(id stack.ComponentID, l2ELID stack.ComponentID, opts ...RollupBoostOption) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), id)) - logger := p.Logger() - - // Build config from options and derive sensible defaults - cfg := DefaultRollupBoostConfig() - RollupBoostOptionBundle(opts).Apply(orch, id, cfg) - // Source L2 engine/JWT from the L2 EL object (mandatory) - if l2EL, ok := orch.GetL2EL(l2ELID); ok { - engineRPC := l2EL.EngineRPC() - switch { - case strings.HasPrefix(engineRPC, "ws://"): - engineRPC = "http://" + strings.TrimPrefix(engineRPC, "ws://") - case strings.HasPrefix(engineRPC, "wss://"): - engineRPC = "https://" + strings.TrimPrefix(engineRPC, "wss://") - } - cfg.L2EngineURL = engineRPC - cfg.L2JWTPath = l2EL.JWTPath() - } - // Normalize builder URL and fallback JWT will be handled after builder link options are applied below. - - r := &RollupBoostNode{ - id: id, - logger: logger, - p: p, - cfg: cfg, - header: cfg.Headers, - } - // Apply any node-level link options - for _, opt := range opts { - if linkOpt, ok := opt.(interface { - applyNode(p devtest.P, id stack.ComponentID, r *RollupBoostNode) - }); ok { - linkOpt.applyNode(p, id, r) - } - } - logger.Info("Starting rollup-boost") - r.Start() - p.Cleanup(r.Stop) - // Register for hydration - orch.registry.Register(id, r) - }) -} - // RollupBoostConfig configures the rollup-boost process CLI and environment. type RollupBoostConfig struct { // RPC endpoint for rollup-boost itself @@ -281,7 +206,7 @@ func DefaultRollupBoostConfig() *RollupBoostConfig { } } -func (cfg *RollupBoostConfig) LaunchSpec(p devtest.P) (args []string, env []string) { +func (cfg *RollupBoostConfig) LaunchSpec(p devtest.CommonT) (args []string, env []string) { p.Require().NotNil(cfg, "nil RollupBoostConfig") env = append([]string(nil), cfg.Env...) @@ -354,66 +279,6 @@ func (cfg *RollupBoostConfig) LaunchSpec(p devtest.P) (args []string, env []stri return args, env } -type RollupBoostOption interface { - Apply(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) -} - -type RollupBoostOptionFn func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) - -var _ RollupBoostOption = RollupBoostOptionFn(nil) - -func (fn RollupBoostOptionFn) Apply(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { - fn(orch, id, cfg) -} - -type RollupBoostOptionBundle []RollupBoostOption - -var _ RollupBoostOption = RollupBoostOptionBundle(nil) - -func (b RollupBoostOptionBundle) Apply(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { - for _, opt := range b { - orch.P().Require().NotNil(opt, "cannot Apply nil RollupBoostOption") - opt.Apply(orch, id, cfg) - } -} - -// Convenience options -func RollupBoostWithExecutionMode(mode string) RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { - cfg.ExecutionMode = mode - }) -} - -func RollupBoostWithEnv(env ...string) RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { - cfg.Env = append(cfg.Env, env...) - }) -} - -func RollupBoostWithExtraArgs(args ...string) RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { - cfg.ExtraArgs = append(cfg.ExtraArgs, args...) - }) -} - -func RollupBoostWithBuilderNode(id stack.ComponentID) RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, rbID stack.ComponentID, cfg *RollupBoostConfig) { - builderNode, ok := orch.GetOPRBuilder(id) - if !ok { - orch.P().Require().FailNow("builder node not found") - } - cfg.BuilderURL = ensureHTTPURL(builderNode.authProxyURL) - cfg.BuilderJWTPath = builderNode.cfg.AuthRPCJWTPath - cfg.FlashblocksBuilderURL = builderNode.wsProxyURL - }) -} - -func RollupBoostWithFlashblocksDisabled() RollupBoostOption { - return RollupBoostOptionFn(func(orch *Orchestrator, id stack.ComponentID, cfg *RollupBoostConfig) { - cfg.EnableFlashblocks = false - }) -} - func ensureHTTPURL(u string) string { if strings.Contains(u, "://") { return u @@ -432,3 +297,7 @@ func (r *RollupBoostNode) JWTPath() string { func (r *RollupBoostNode) UserRPC() string { return r.rpcProxyURL } + +func (r *RollupBoostNode) FlashblocksWSURL() string { + return r.wsProxyURL +} diff --git a/op-devstack/sysgo/runtime_state.go b/op-devstack/sysgo/runtime_state.go new file mode 100644 index 0000000000000..b5c4e440a6f6e --- /dev/null +++ b/op-devstack/sysgo/runtime_state.go @@ -0,0 +1,120 @@ +package sysgo + +import ( + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + challengerconfig "github.com/ethereum-optimism/optimism/op-challenger/config" + "github.com/ethereum-optimism/optimism/op-faucet/faucet" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer" +) + +type TestSequencerRuntime struct { + Name string + AdminRPC string + JWTSecret [32]byte + ControlRPC map[eth.ChainID]string + Service *sequencer.Service +} + +func newTestSequencerRuntime(ts *testSequencer, name string) *TestSequencerRuntime { + if ts == nil { + return nil + } + if name == "" { + name = ts.name + } + return &TestSequencerRuntime{ + Name: name, + AdminRPC: ts.adminRPC, + JWTSecret: ts.jwtSecret, + ControlRPC: copyControlRPCMap(ts.controlRPC), + Service: ts.service, + } +} + +type SingleChainNodeRuntime struct { + Name string + IsSequencer bool + EL L2ELNode + CL L2CLNode +} + +type SyncTesterRuntime struct { + Service *SyncTesterService + Node *SingleChainNodeRuntime +} + +type FlashblocksRuntimeSupport struct { + Builder *OPRBuilderNode + RollupBoost *RollupBoostNode +} + +type SingleChainInteropSupport struct { + Migration *interopMigrationState + FullConfigSet depset.FullConfigSetMerged + DependencySet depset.DependencySet + Supervisor Supervisor +} + +type SingleChainRuntime struct { + Keys devkeys.Keys + + L1Network *L1Network + L2Network *L2Network + + L1EL *L1Geth + L1CL *L1CLNode + + L2EL L2ELNode + L2CL L2CLNode + + L2Batcher *L2Batcher + L2Proposer *L2Proposer + L2Challenger *L2Challenger + + FaucetService *faucet.Service + TimeTravel *clock.AdvancingClock + TestSequencer *TestSequencerRuntime + + Nodes map[string]*SingleChainNodeRuntime + SyncTester *SyncTesterRuntime + Conductors map[string]*Conductor + Flashblocks *FlashblocksRuntimeSupport + Interop *SingleChainInteropSupport + P2PEnabled bool +} + +type MultiChainNodeRuntime struct { + Name string + Network *L2Network + EL L2ELNode + CL L2CLNode + Batcher *L2Batcher + Proposer *L2Proposer + Followers map[string]*SingleChainNodeRuntime +} + +type MultiChainRuntime struct { + Keys devkeys.Keys + Migration *interopMigrationState + FullConfigSet depset.FullConfigSetMerged + DependencySet depset.DependencySet + + L1Network *L1Network + L1EL *L1Geth + L1CL *L1CLNode + + Chains map[string]*MultiChainNodeRuntime + + PrimarySupervisor Supervisor + SecondarySupervisor Supervisor + Supernode *SuperNode + + FaucetService *faucet.Service + TimeTravel *clock.AdvancingClock + TestSequencer *TestSequencerRuntime + L2ChallengerConfig *challengerconfig.Config + DelaySeconds uint64 +} diff --git a/op-devstack/sysgo/rust_binary.go b/op-devstack/sysgo/rust_binary.go index cd74a11e897c9..7546ecd1db2c0 100644 --- a/op-devstack/sysgo/rust_binary.go +++ b/op-devstack/sysgo/rust_binary.go @@ -2,10 +2,12 @@ package sysgo import ( "context" + "encoding/json" "fmt" "os" "os/exec" "path/filepath" + "sort" "strings" "github.com/ethereum-optimism/optimism/op-devstack/devtest" @@ -28,7 +30,7 @@ type RustBinarySpec struct { // Build behavior: // - RUST_JIT_BUILD=1: runs cargo build --release (letting cargo handle rebuild detection) // - Otherwise: only checks binary exists, errors if missing -func EnsureRustBinary(p devtest.P, spec RustBinarySpec) (string, error) { +func EnsureRustBinary(p devtest.CommonT, spec RustBinarySpec) (string, error) { envSuffix := toEnvVarSuffix(spec.Binary) // Check for explicit binary path override @@ -46,7 +48,6 @@ func EnsureRustBinary(p devtest.P, spec RustBinarySpec) (string, error) { return "", err } - binaryPath := filepath.Join(srcRoot, "target", "release", spec.Binary) jitBuild := os.Getenv("RUST_JIT_BUILD") != "" if jitBuild { @@ -54,13 +55,12 @@ func EnsureRustBinary(p devtest.P, spec RustBinarySpec) (string, error) { if err := buildRustBinary(p.Ctx(), srcRoot, spec.Package, spec.Binary); err != nil { return "", err } - } else { - if _, err := os.Stat(binaryPath); os.IsNotExist(err) { - return "", fmt.Errorf("%s binary not found at %s; "+ - "run 'just build-rust-debug' before the test or set RUST_JIT_BUILD=1", spec.Binary, binaryPath) - } } + binaryPath, err := resolveBuiltRustBinaryPath(srcRoot, spec.Binary) + if err != nil { + return "", fmt.Errorf("%s binary not found; run 'just build-rust-debug' before the test or set RUST_JIT_BUILD=1: %w", spec.Binary, err) + } return binaryPath, nil } @@ -94,3 +94,62 @@ func buildRustBinary(ctx context.Context, root, pkg, bin string) error { cmd.Stderr = os.Stderr return cmd.Run() } + +type cargoMetadata struct { + TargetDirectory string `json:"target_directory"` +} + +func resolveBuiltRustBinaryPath(srcRoot, binary string) (string, error) { + targetDir, err := cargoTargetDirectory(srcRoot) + if err != nil { + return "", err + } + + candidates := []string{ + filepath.Join(targetDir, "release", binary), + } + globMatches, err := filepath.Glob(filepath.Join(targetDir, "*", "release", binary)) + if err == nil { + candidates = append(candidates, globMatches...) + } + + seen := make(map[string]struct{}, len(candidates)) + var existing []string + for _, candidate := range candidates { + if _, dup := seen[candidate]; dup { + continue + } + seen[candidate] = struct{}{} + if _, err := os.Stat(candidate); err == nil { + existing = append(existing, candidate) + } + } + + switch len(existing) { + case 0: + return "", fmt.Errorf("no built binary found under target dir %s", targetDir) + case 1: + return existing[0], nil + default: + sort.Strings(existing) + return existing[0], nil + } +} + +func cargoTargetDirectory(srcRoot string) (string, error) { + cmd := exec.Command("cargo", "metadata", "--no-deps", "--format-version", "1") + cmd.Dir = srcRoot + out, err := cmd.Output() + if err != nil { + return "", fmt.Errorf("cargo metadata: %w", err) + } + + var meta cargoMetadata + if err := json.Unmarshal(out, &meta); err != nil { + return "", fmt.Errorf("parse cargo metadata: %w", err) + } + if meta.TargetDirectory == "" { + return "", fmt.Errorf("cargo metadata returned empty target directory") + } + return meta.TargetDirectory, nil +} diff --git a/op-devstack/sysgo/singlechain_build.go b/op-devstack/sysgo/singlechain_build.go new file mode 100644 index 0000000000000..463b32d9fa5c9 --- /dev/null +++ b/op-devstack/sysgo/singlechain_build.go @@ -0,0 +1,682 @@ +package sysgo + +import ( + "context" + "encoding/hex" + "flag" + "fmt" + "os" + "path/filepath" + "time" + + "github.com/urfave/cli/v2" + + altda "github.com/ethereum-optimism/optimism/op-alt-da" + "github.com/ethereum/go-ethereum/common/hexutil" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/params/forks" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" + faucetConfig "github.com/ethereum-optimism/optimism/op-faucet/config" + "github.com/ethereum-optimism/optimism/op-faucet/faucet" + fconf "github.com/ethereum-optimism/optimism/op-faucet/faucet/backend/config" + ftypes "github.com/ethereum-optimism/optimism/op-faucet/faucet/backend/types" + "github.com/ethereum-optimism/optimism/op-node/config" + opNodeFlags "github.com/ethereum-optimism/optimism/op-node/flags" + "github.com/ethereum-optimism/optimism/op-node/p2p" + p2pcli "github.com/ethereum-optimism/optimism/op-node/p2p/cli" + "github.com/ethereum-optimism/optimism/op-node/rollup/driver" + "github.com/ethereum-optimism/optimism/op-node/rollup/interop" + nodeSync "github.com/ethereum-optimism/optimism/op-node/rollup/sync" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/dial" + "github.com/ethereum-optimism/optimism/op-service/endpoint" + "github.com/ethereum-optimism/optimism/op-service/eth" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + "github.com/ethereum-optimism/optimism/op-service/retry" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" + sequencerConfig "github.com/ethereum-optimism/optimism/op-test-sequencer/config" + testmetrics "github.com/ethereum-optimism/optimism/op-test-sequencer/metrics" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/builders/fakepos" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/builders/standardbuilder" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/committers/noopcommitter" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/committers/standardcommitter" + workconfig "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/config" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/publishers/nooppublisher" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/publishers/standardpublisher" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/sequencers/fullseq" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/signers/localkey" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/signers/noopsigner" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" +) + +type testSequencer struct { + name string + adminRPC string + jwtSecret [32]byte + controlRPC map[eth.ChainID]string + service *sequencer.Service +} + +func buildSingleChainWorld(t devtest.T, keys devkeys.Keys, deployerOpts ...DeployerOption) (*L1Network, *L2Network) { + wb := &worldBuilder{ + p: t, + logger: t.Logger(), + require: t.Require(), + keys: keys, + builder: intentbuilder.New(), + } + + applyConfigLocalContractSources(t, keys, wb.builder) + applyConfigCommons(t, keys, DefaultL1ID, wb.builder) + applyConfigPrefundedL2(t, keys, DefaultL1ID, DefaultL2AID, wb.builder) + applyConfigDeployerOptions(t, keys, wb.builder, deployerOpts) + wb.Build() + + t.Require().Len(wb.l2Chains, 1, "expected exactly one L2 chain in flashblocks world") + l2ID := wb.l2Chains[0] + l1ID := eth.ChainIDFromUInt64(wb.output.AppliedIntent.L1ChainID) + + l1Net := &L1Network{ + name: "l1", + chainID: l1ID, + genesis: wb.outL1Genesis, + blockTime: 6, + } + l2Net := &L2Network{ + name: "l2a", + chainID: l2ID, + l1ChainID: l1ID, + genesis: wb.outL2Genesis[l2ID], + rollupCfg: wb.outL2RollupCfg[l2ID], + deployment: wb.outL2Deployment[l2ID], + opcmImpl: wb.output.ImplementationsDeployment.OpcmImpl, + mipsImpl: wb.output.ImplementationsDeployment.MipsImpl, + keys: keys, + } + return l1Net, l2Net +} + +func applyConfigLocalContractSources(t devtest.T, _ devkeys.Keys, builder intentbuilder.Builder) { + paths, err := contractPaths() + t.Require().NoError(err) + wd, err := os.Getwd() + t.Require().NoError(err) + artifactsPath := filepath.Join(wd, paths.FoundryArtifacts) + t.Require().NoError(ensureDir(artifactsPath)) + contractArtifacts, err := artifacts.NewFileLocator(artifactsPath) + t.Require().NoError(err) + builder.WithL1ContractsLocator(contractArtifacts) + builder.WithL2ContractsLocator(contractArtifacts) +} + +func applyConfigCommons(t devtest.T, keys devkeys.Keys, l1ChainID eth.ChainID, builder intentbuilder.Builder) { + _, l1Config := builder.WithL1(l1ChainID) + + l1StartTimestamp := uint64(time.Now().Unix()) + 1 + l1Config.WithTimestamp(l1StartTimestamp) + l1Config.WithL1ForkAtGenesis(forks.Prague) + + faucetFunderAddr, err := keys.Address(devkeys.UserKey(funderMnemonicIndex)) + t.Require().NoError(err, "need funder addr") + l1Config.WithPrefundedAccount(faucetFunderAddr, *eth.BillionEther.ToU256()) + + addrFor := intentbuilder.RoleToAddrProvider(t, keys, l1ChainID) + _, superCfg := builder.WithSuperchain() + intentbuilder.WithDevkeySuperRoles(t, keys, l1ChainID, superCfg) + l1Config.WithPrefundedAccount(addrFor(devkeys.SuperchainProxyAdminOwner), *millionEth) + l1Config.WithPrefundedAccount(addrFor(devkeys.SuperchainProtocolVersionsOwner), *millionEth) + l1Config.WithPrefundedAccount(addrFor(devkeys.SuperchainConfigGuardianKey), *millionEth) + l1Config.WithPrefundedAccount(addrFor(devkeys.L1ProxyAdminOwnerRole), *millionEth) +} + +func applyConfigPrefundedL2(t devtest.T, keys devkeys.Keys, l1ChainID, l2ChainID eth.ChainID, builder intentbuilder.Builder) { + _, l2Config := builder.WithL2(l2ChainID) + intentbuilder.WithDevkeyVaults(t, keys, l2Config) + intentbuilder.WithDevkeyL2Roles(t, keys, l2Config) + intentbuilder.WithDevkeyL1Roles(t, keys, l2Config, l1ChainID) + + faucetFunderAddr, err := keys.Address(devkeys.UserKey(funderMnemonicIndex)) + t.Require().NoError(err, "need funder addr") + l2Config.WithPrefundedAccount(faucetFunderAddr, *eth.BillionEther.ToU256()) + + addrFor := intentbuilder.RoleToAddrProvider(t, keys, l2ChainID) + l1Config := l2Config.L1Config() + l1Config.WithPrefundedAccount(addrFor(devkeys.BatcherRole), *millionEth) + l1Config.WithPrefundedAccount(addrFor(devkeys.ProposerRole), *millionEth) + l1Config.WithPrefundedAccount(addrFor(devkeys.ChallengerRole), *millionEth) + l1Config.WithPrefundedAccount(addrFor(devkeys.SystemConfigOwner), *millionEth) +} + +func startSequencerEL(t devtest.T, l2Net *L2Network, jwtPath string, jwtSecret [32]byte, identity *ELNodeIdentity) *OpGeth { + return startL2ELNode(t, l2Net, jwtPath, jwtSecret, "sequencer", identity) +} + +func startL2ELNode( + t devtest.T, + l2Net *L2Network, + jwtPath string, + jwtSecret [32]byte, + key string, + identity *ELNodeIdentity, +) *OpGeth { + cfg := DefaultL2ELConfig() + cfg.P2PAddr = "127.0.0.1" + cfg.P2PPort = identity.Port + cfg.P2PNodeKeyHex = identity.KeyHex() + + l2EL := &OpGeth{ + name: key, + p: t, + logger: t.Logger().New("component", "l2el-"+key), + l2Net: l2Net, + jwtPath: jwtPath, + jwtSecret: jwtSecret, + cfg: cfg, + } + l2EL.Start() + t.Cleanup(l2EL.Stop) + return l2EL +} + +func connectL2ELPeers(t devtest.T, logger log.Logger, initiatorRPC, acceptorRPC string, trusted bool) { + require := t.Require() + rpc1, err := dial.DialRPCClientWithTimeout(t.Ctx(), logger, initiatorRPC) + require.NoError(err, "failed to connect initiator EL RPC") + defer rpc1.Close() + rpc2, err := dial.DialRPCClientWithTimeout(t.Ctx(), logger, acceptorRPC) + require.NoError(err, "failed to connect acceptor EL RPC") + defer rpc2.Close() + ConnectP2P(t.Ctx(), require, rpc1, rpc2, trusted) +} + +func connectL2CLPeers(t devtest.T, logger log.Logger, l2CL1, l2CL2 L2CLNode) { + require := t.Require() + ctx := t.Ctx() + + p := getP2PClientsAndPeers(ctx, logger, require, l2CL1, l2CL2) + + connectPeer := func(p2pClient *sources.P2PClient, multiAddress string) { + err := retry.Do0(ctx, 6, retry.Exponential(), func() error { + return p2pClient.ConnectPeer(ctx, multiAddress) + }) + require.NoError(err, "failed to connect L2CL peer") + } + + connectPeer(p.client1, p.peerInfo2.Addresses[0]) + connectPeer(p.client2, p.peerInfo1.Addresses[0]) + + peerDump1, err := GetPeers(ctx, p.client1) + require.NoError(err) + peerDump2, err := GetPeers(ctx, p.client2) + require.NoError(err) + + _, ok1 := peerDump1.Peers[p.peerInfo2.PeerID.String()] + require.True(ok1, "peer register invalid (cl1 missing cl2)") + _, ok2 := peerDump2.Peers[p.peerInfo1.PeerID.String()] + require.True(ok2, "peer register invalid (cl2 missing cl1)") +} + +func startSequencerCL( + t devtest.T, + keys devkeys.Keys, + l1Net *L1Network, + l2Net *L2Network, + l1EL L1ELNode, + l1CL *L1CLNode, + l2EL L2ELNode, + jwtSecret [32]byte, + l2CLOpts []L2CLOption, +) *OpNode { + return startL2CLNode(t, keys, l1Net, l2Net, l1EL, l1CL, l2EL, jwtSecret, l2CLNodeStartConfig{ + Key: "sequencer", + IsSequencer: true, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + IndexingMode: false, + L2FollowSource: "", + L2CLOptions: l2CLOpts, + }) +} + +type l2CLNodeStartConfig struct { + Key string + IsSequencer bool + NoDiscovery bool + EnableReqResp bool + UseReqResp bool + IndexingMode bool + L2FollowSource string + DependencySet depset.DependencySet + L2CLOptions []L2CLOption +} + +func startL2CLNode( + t devtest.T, + keys devkeys.Keys, + l1Net *L1Network, + l2Net *L2Network, + l1EL L1ELNode, + l1CL *L1CLNode, + l2EL L2ELNode, + jwtSecret [32]byte, + startCfg l2CLNodeStartConfig, +) *OpNode { + require := t.Require() + cfg := DefaultL2CLConfig() + cfg.IsSequencer = startCfg.IsSequencer + cfg.NoDiscovery = startCfg.NoDiscovery + cfg.EnableReqRespSync = startCfg.EnableReqResp + cfg.UseReqRespSync = startCfg.UseReqResp + cfg.IndexingMode = startCfg.IndexingMode + cfg.FollowSource = startCfg.L2FollowSource + if len(startCfg.L2CLOptions) > 0 { + l2CLTarget := NewComponentTarget(startCfg.Key, l2Net.ChainID()) + for _, opt := range startCfg.L2CLOptions { + if opt == nil { + continue + } + opt.Apply(t, l2CLTarget, cfg) + } + } + + syncMode := cfg.VerifierSyncMode + if cfg.IsSequencer { + syncMode = cfg.SequencerSyncMode + } + + logger := t.Logger().New("component", "l2cl-"+startCfg.Key) + + // Build P2P config through the same path as sysgo op-node setup. + fs := flag.NewFlagSet("", flag.ContinueOnError) + for _, f := range opNodeFlags.P2PFlags(opNodeFlags.EnvVarPrefix) { + require.NoError(f.Apply(fs)) + } + require.NoError(fs.Set(opNodeFlags.AdvertiseIPName, "127.0.0.1")) + require.NoError(fs.Set(opNodeFlags.AdvertiseTCPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.AdvertiseUDPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.ListenIPName, "127.0.0.1")) + require.NoError(fs.Set(opNodeFlags.ListenTCPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.ListenUDPPortName, "0")) + require.NoError(fs.Set(opNodeFlags.DiscoveryPathName, "memory")) + require.NoError(fs.Set(opNodeFlags.PeerstorePathName, "memory")) + require.NoError(fs.Set(opNodeFlags.BootnodesName, "")) + + networkPrivKey, err := crypto.GenerateKey() + require.NoError(err) + networkPrivKeyHex := hex.EncodeToString(crypto.FromECDSA(networkPrivKey)) + require.NoError(fs.Set(opNodeFlags.P2PPrivRawName, networkPrivKeyHex)) + + cliCtx := cli.NewContext(&cli.App{}, fs, nil) + var p2pSignerSetup p2p.SignerSetup + if cfg.IsSequencer { + p2pKey, err := keys.Secret(devkeys.SequencerP2PRole.Key(l2Net.ChainID().ToBig())) + require.NoError(err, "need p2p key for sequencer") + p2pKeyHex := hex.EncodeToString(crypto.FromECDSA(p2pKey)) + require.NoError(fs.Set(opNodeFlags.SequencerP2PKeyName, p2pKeyHex)) + p2pSignerSetup, err = p2pcli.LoadSignerSetup(cliCtx, logger) + require.NoError(err, "failed to load p2p signer") + } + p2pConfig, err := p2pcli.NewConfig(cliCtx, l2Net.rollupCfg.BlockTime) + require.NoError(err, "failed to load p2p config") + p2pConfig.NoDiscovery = cfg.NoDiscovery + p2pConfig.EnableReqRespSync = cfg.EnableReqRespSync + + interopCfg := &interop.Config{} + if startCfg.IndexingMode { + interopCfg = &interop.Config{ + RPCAddr: "127.0.0.1", + RPCPort: 0, + RPCJwtSecretPath: l2EL.JWTPath(), + } + } + + nodeCfg := &config.Config{ + L1: &config.L1EndpointConfig{ + L1NodeAddr: l1EL.UserRPC(), + L1TrustRPC: false, + L1RPCKind: sources.RPCKindDebugGeth, + RateLimit: 0, + BatchSize: 20, + HttpPollInterval: 100, + MaxConcurrency: 10, + CacheSize: 0, + }, + L1ChainConfig: l1Net.genesis.Config, + L2: &config.L2EndpointConfig{ + L2EngineAddr: l2EL.EngineRPC(), + L2EngineJWTSecret: jwtSecret, + }, + L2FollowSource: &config.L2FollowSourceConfig{ + L2RPCAddr: cfg.FollowSource, + }, + Beacon: &config.L1BeaconEndpointConfig{ + BeaconAddr: l1CL.beaconHTTPAddr, + }, + Driver: driver.Config{ + SequencerEnabled: cfg.IsSequencer, + SequencerConfDepth: 2, + }, + Rollup: *l2Net.rollupCfg, + DependencySet: startCfg.DependencySet, + SupervisorEnabled: cfg.IndexingMode, + P2PSigner: p2pSignerSetup, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + ListenPort: 0, + EnableAdmin: true, + }, + InteropConfig: interopCfg, + P2P: p2pConfig, + L1EpochPollInterval: time.Second * 2, + RuntimeConfigReloadInterval: 0, + Tracer: nil, + Sync: nodeSync.Config{ + SyncMode: syncMode, + SyncModeReqResp: cfg.UseReqRespSync, + SkipSyncStartCheck: false, + SupportsPostFinalizationELSync: false, + L2FollowSourceEndpoint: cfg.FollowSource, + NeedInitialResetEngine: false, + }, + ConfigPersistence: config.DisabledConfigPersistence{}, + Metrics: opmetrics.CLIConfig{}, + Pprof: oppprof.CLIConfig{}, + SafeDBPath: cfg.SafeDBPath, + RollupHalt: "", + Cancel: nil, + ConductorEnabled: false, + ConductorRpc: nil, + ConductorRpcTimeout: 0, + AltDA: altda.CLIConfig{}, + IgnoreMissingPectraBlobSchedule: false, + ExperimentalOPStackAPI: true, + } + l2CL := &OpNode{ + name: startCfg.Key, + opNode: nil, + cfg: nodeCfg, + p: t, + logger: logger, + clock: clock.SystemClock, + } + l2CL.Start() + t.Cleanup(l2CL.Stop) + return l2CL +} + +func startTestSequencer( + t devtest.T, + keys devkeys.Keys, + jwtPath string, + jwtSecret [32]byte, + l1Net *L1Network, + l1EL *L1Geth, + l1CL *L1CLNode, + l2EL *OpGeth, + l2CL *OpNode, +) *testSequencer { + require := t.Require() + logger := t.Logger().New("component", "test-sequencer") + + l1ELClient, err := ethclient.DialContext(t.Ctx(), l1EL.UserRPC()) + require.NoError(err, "failed to dial L1 EL RPC for test-sequencer") + t.Cleanup(l1ELClient.Close) + + engineCl, err := dialEngine(t.Ctx(), l1EL.AuthRPC(), jwtSecret) + require.NoError(err, "failed to dial L1 engine API for test-sequencer") + t.Cleanup(func() { + engineCl.inner.Close() + }) + + l1ChainID := l1Net.ChainID() + l2ChainID := l2EL.l2Net.ChainID() + + // L1 sequencer components: fakepos builder + noop signer/committer/publisher. + bidL1 := seqtypes.BuilderID("test-l1-builder") + cidL1 := seqtypes.CommitterID("test-noop-committer") + sidL1 := seqtypes.SignerID("test-noop-signer") + pidL1 := seqtypes.PublisherID("test-noop-publisher") + seqIDL1 := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l1ChainID)) + + ensemble := &workconfig.Ensemble{ + Builders: map[seqtypes.BuilderID]*workconfig.BuilderEntry{ + bidL1: { + L1: &fakepos.Config{ + ChainConfig: l1Net.genesis.Config, + EngineAPI: engineCl, + Backend: l1ELClient, + Beacon: l1CL.beacon, + FinalizedDistance: 20, + SafeDistance: 10, + BlockTime: 6, + }, + }, + }, + Signers: map[seqtypes.SignerID]*workconfig.SignerEntry{ + sidL1: { + Noop: &noopsigner.Config{}, + }, + }, + Committers: map[seqtypes.CommitterID]*workconfig.CommitterEntry{ + cidL1: { + Noop: &noopcommitter.Config{}, + }, + }, + Publishers: map[seqtypes.PublisherID]*workconfig.PublisherEntry{ + pidL1: { + Noop: &nooppublisher.Config{}, + }, + }, + Sequencers: map[seqtypes.SequencerID]*workconfig.SequencerEntry{ + seqIDL1: { + Full: &fullseq.Config{ + ChainID: l1ChainID, + Builder: bidL1, + Signer: sidL1, + Committer: cidL1, + Publisher: pidL1, + }, + }, + }, + } + + // L2 sequencer components: standard builder/committer/publisher + local signer. + bidL2 := seqtypes.BuilderID("test-standard-builder") + cidL2 := seqtypes.CommitterID("test-standard-committer") + sidL2 := seqtypes.SignerID("test-local-signer") + pidL2 := seqtypes.PublisherID("test-standard-publisher") + seqIDL2 := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l2ChainID)) + + p2pKey, err := keys.Secret(devkeys.SequencerP2PRole.Key(l2ChainID.ToBig())) + require.NoError(err, "need p2p key for test sequencer") + rawKey := hexutil.Bytes(crypto.FromECDSA(p2pKey)) + + ensemble.Builders[bidL2] = &workconfig.BuilderEntry{ + Standard: &standardbuilder.Config{ + L1ChainConfig: l1Net.genesis.Config, + L1EL: endpoint.MustRPC{ + Value: endpoint.HttpURL(l1EL.UserRPC()), + }, + L2EL: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2EL.UserRPC()), + }, + L2CL: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2CL.UserRPC()), + }, + }, + } + ensemble.Signers[sidL2] = &workconfig.SignerEntry{ + LocalKey: &localkey.Config{ + RawKey: &rawKey, + ChainID: l2ChainID, + }, + } + ensemble.Committers[cidL2] = &workconfig.CommitterEntry{ + Standard: &standardcommitter.Config{ + RPC: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2CL.UserRPC()), + }, + }, + } + ensemble.Publishers[pidL2] = &workconfig.PublisherEntry{ + Standard: &standardpublisher.Config{ + RPC: endpoint.MustRPC{ + Value: endpoint.HttpURL(l2CL.UserRPC()), + }, + }, + } + ensemble.Sequencers[seqIDL2] = &workconfig.SequencerEntry{ + Full: &fullseq.Config{ + ChainID: l2ChainID, + Builder: bidL2, + Signer: sidL2, + Committer: cidL2, + Publisher: pidL2, + SequencerConfDepth: 2, + SequencerEnabled: true, + SequencerStopped: false, + SequencerMaxSafeLag: 0, + }, + } + + sequencerIDs := map[eth.ChainID]seqtypes.SequencerID{ + l1ChainID: seqIDL1, + l2ChainID: seqIDL2, + } + + jobs := work.NewJobRegistry() + startedEnsemble, err := ensemble.Start(t.Ctx(), &work.StartOpts{ + Log: logger, + Metrics: &testmetrics.NoopMetrics{}, + Jobs: jobs, + }) + require.NoError(err, "failed to start test-sequencer ensemble") + + cfg := &sequencerConfig.Config{ + MetricsConfig: opmetrics.CLIConfig{ + Enabled: false, + }, + PprofConfig: oppprof.CLIConfig{ + ListenEnabled: false, + }, + LogConfig: oplog.CLIConfig{ + Level: log.LevelDebug, + Format: oplog.FormatText, + }, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + ListenPort: 0, + EnableAdmin: true, + }, + Ensemble: startedEnsemble, + JWTSecretPath: jwtPath, + Version: "dev", + MockRun: false, + } + + sq, err := sequencer.FromConfig(t.Ctx(), cfg, logger) + require.NoError(err, "failed to initialize test-sequencer service") + require.NoError(sq.Start(t.Ctx()), "failed to start test-sequencer service") + + t.Cleanup(func() { + ctx, cancel := context.WithCancel(t.Ctx()) + cancel() + logger.Info("Closing test-sequencer service") + closeErr := sq.Stop(ctx) + logger.Info("Closed test-sequencer service", "err", closeErr) + }) + + adminRPC := sq.RPC() + controlRPCs := make(map[eth.ChainID]string, len(sequencerIDs)) + for chainID, seqID := range sequencerIDs { + controlRPCs[chainID] = adminRPC + "/sequencers/" + seqID.String() + } + + return &testSequencer{ + name: "test-sequencer", + adminRPC: adminRPC, + jwtSecret: jwtSecret, + controlRPC: controlRPCs, + service: sq, + } +} + +func startFaucets( + t devtest.T, + keys devkeys.Keys, + l1ChainID eth.ChainID, + l2ChainID eth.ChainID, + l1ELRPC string, + l2ELRPC string, +) *faucet.Service { + require := t.Require() + logger := t.Logger().New("component", "faucet") + + funderKey, err := keys.Secret(devkeys.UserKey(funderMnemonicIndex)) + require.NoError(err, "need faucet funder key") + funderKeyStr := hexutil.Encode(crypto.FromECDSA(funderKey)) + + faucets := map[ftypes.FaucetID]*fconf.FaucetEntry{ + ftypes.FaucetID(fmt.Sprintf("dev-faucet-%s", l1ChainID)): { + ELRPC: endpoint.MustRPC{Value: endpoint.URL(l1ELRPC)}, + ChainID: l1ChainID, + TxCfg: fconf.TxManagerConfig{ + PrivateKey: funderKeyStr, + }, + }, + ftypes.FaucetID(fmt.Sprintf("dev-faucet-%s", l2ChainID)): { + ELRPC: endpoint.MustRPC{Value: endpoint.URL(l2ELRPC)}, + ChainID: l2ChainID, + TxCfg: fconf.TxManagerConfig{ + PrivateKey: funderKeyStr, + }, + }, + } + + cfg := &faucetConfig.Config{ + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, + Faucets: &fconf.Config{ + Faucets: faucets, + }, + } + + srv, err := faucet.FromConfig(t.Ctx(), cfg, logger) + require.NoError(err, "failed to create faucet service") + require.NoError(srv.Start(t.Ctx()), "failed to start faucet service") + + t.Cleanup(func() { + ctx, cancel := context.WithCancel(context.Background()) + cancel() // force-close + logger.Info("Closing faucet service") + closeErr := srv.Stop(ctx) + logger.Info("Closed faucet service", "err", closeErr) + }) + + return srv +} + +func copyControlRPCMap(in map[eth.ChainID]string) map[eth.ChainID]string { + if len(in) == 0 { + return nil + } + out := make(map[eth.ChainID]string, len(in)) + for chainID, endpoint := range in { + out[chainID] = endpoint + } + return out +} diff --git a/op-devstack/sysgo/singlechain_flashblocks.go b/op-devstack/sysgo/singlechain_flashblocks.go new file mode 100644 index 0000000000000..c184d06a5c339 --- /dev/null +++ b/op-devstack/sysgo/singlechain_flashblocks.go @@ -0,0 +1,127 @@ +package sysgo + +import ( + "encoding/json" + "os" + "path/filepath" + "strings" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/eth" +) + +func startFlashblocksSingleChainPrimary( + t devtest.T, + keys devkeys.Keys, + world singleChainRuntimeWorld, + l1EL *L1Geth, + l1CL *L1CLNode, + jwtPath string, + jwtSecret [32]byte, + cfg PresetConfig, +) singleChainPrimaryRuntime { + logger := t.Logger() + + sequencerIdentity := NewELNodeIdentity(0) + builderIdentity := NewELNodeIdentity(0) + + l2EL := startSequencerEL(t, world.L2Network, jwtPath, jwtSecret, sequencerIdentity) + l2Builder := startBuilderEL(t, world.L2Network, jwtPath, builderIdentity, cfg.OPRBuilderOptions...) + + connectL2ELPeers(t, logger, l2EL.UserRPC(), l2Builder.UserRPC(), false) + connectL2ELPeers(t, logger, l2Builder.UserRPC(), l2EL.UserRPC(), true) + + rollupBoost := startRollupBoostNode(t, world.L2Network.ChainID(), l2EL, l2Builder) + l2CL := startSequencerCL(t, keys, world.L1Network, world.L2Network, l1EL, l1CL, rollupBoost, jwtSecret, nil) + + return singleChainPrimaryRuntime{ + EL: l2EL, + CL: l2CL, + Flashblocks: &FlashblocksRuntimeSupport{ + Builder: l2Builder, + RollupBoost: rollupBoost, + }, + } +} + +func NewFlashblocksRuntime(t devtest.T) *SingleChainRuntime { + return NewFlashblocksRuntimeWithConfig(t, PresetConfig{}) +} + +func NewFlashblocksRuntimeWithConfig(t devtest.T, cfg PresetConfig) *SingleChainRuntime { + return newSingleChainRuntimeWithConfig(t, cfg, singleChainRuntimeSpec{ + BuildWorld: newDefaultSingleChainWorld, + StartPrimary: startFlashblocksSingleChainPrimary, + StartBatcher: false, + StartProposer: false, + StartChallenger: false, + }) +} + +func startBuilderEL(t devtest.T, l2Net *L2Network, jwtPath string, identity *ELNodeIdentity, opts ...OPRBuilderNodeOption) *OPRBuilderNode { + require := t.Require() + + data, err := json.Marshal(l2Net.genesis) + require.NoError(err, "must json-encode L2 genesis") + chainConfigPath := filepath.Join(t.TempDir(), "op-rbuilder-genesis.json") + require.NoError(os.WriteFile(chainConfigPath, data, 0o644), "must write op-rbuilder genesis file") + + cfg := DefaultOPRbuilderNodeConfig() + cfg.AuthRPCJWTPath = jwtPath + cfg.Chain = chainConfigPath + cfg.P2PAddr = "127.0.0.1" + cfg.P2PPort = identity.Port + cfg.P2PNodeKeyHex = identity.KeyHex() + cfg.StaticPeers = nil + cfg.TrustedPeers = nil + if len(opts) > 0 { + target := NewComponentTarget("sequencer-builder", l2Net.ChainID()) + for _, opt := range opts { + if opt == nil { + continue + } + opt.Apply(t, target, cfg) + } + } + + builder := &OPRBuilderNode{ + name: "sequencer-builder", + chainID: l2Net.ChainID(), + logger: t.Logger().New("component", "op-rbuilder"), + p: t, + rollupCfg: l2Net.rollupCfg, + cfg: cfg, + } + builder.Start() + t.Cleanup(builder.Stop) + return builder +} + +func startRollupBoostNode(t devtest.T, chainID eth.ChainID, l2EL L2ELNode, builder *OPRBuilderNode) *RollupBoostNode { + cfg := DefaultRollupBoostConfig() + engineRPC := l2EL.EngineRPC() + switch { + case strings.HasPrefix(engineRPC, "ws://"): + engineRPC = "http://" + strings.TrimPrefix(engineRPC, "ws://") + case strings.HasPrefix(engineRPC, "wss://"): + engineRPC = "https://" + strings.TrimPrefix(engineRPC, "wss://") + } + cfg.L2EngineURL = engineRPC + cfg.L2JWTPath = l2EL.JWTPath() + cfg.BuilderURL = ensureHTTPURL(builder.authProxyURL) + cfg.BuilderJWTPath = builder.cfg.AuthRPCJWTPath + cfg.FlashblocksBuilderURL = builder.wsProxyURL + + rollupBoost := &RollupBoostNode{ + name: "rollup-boost", + chainID: chainID, + logger: t.Logger().New("component", "rollup-boost"), + p: t, + cfg: cfg, + header: cfg.Headers, + } + rollupBoost.Start() + t.Cleanup(rollupBoost.Stop) + return rollupBoost +} diff --git a/op-devstack/sysgo/singlechain_interop.go b/op-devstack/sysgo/singlechain_interop.go new file mode 100644 index 0000000000000..ec94ffdef4730 --- /dev/null +++ b/op-devstack/sysgo/singlechain_interop.go @@ -0,0 +1,60 @@ +package sysgo + +import ( + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" +) + +func newSingleChainInteropWorldNoSupervisor(t devtest.T, keys devkeys.Keys, cfg PresetConfig) singleChainRuntimeWorld { + l1Net, l2Net, depSet, fullCfgSet := buildSingleChainWorldWithInterop(t, keys, true, cfg.DeployerOptions...) + return singleChainRuntimeWorld{ + L1Network: l1Net, + L2Network: l2Net, + Interop: &SingleChainInteropSupport{ + DependencySet: depSet, + FullConfigSet: fullCfgSet, + }, + } +} + +func startSingleChainInteropPrimaryNoSupervisor( + t devtest.T, + keys devkeys.Keys, + world singleChainRuntimeWorld, + l1EL *L1Geth, + l1CL *L1CLNode, + jwtPath string, + jwtSecret [32]byte, + cfg PresetConfig, +) singleChainPrimaryRuntime { + t.Require().NotNil(world.Interop, "single-chain interop runtime requires interop support") + + sequencerIdentity := NewELNodeIdentity(0) + l2EL := startSequencerEL(t, world.L2Network, jwtPath, jwtSecret, sequencerIdentity) + l2CL := startL2CLNode(t, keys, world.L1Network, world.L2Network, l1EL, l1CL, l2EL, jwtSecret, l2CLNodeStartConfig{ + Key: "sequencer", + IsSequencer: true, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + DependencySet: world.Interop.DependencySet, + L2FollowSource: "", + L2CLOptions: cfg.GlobalL2CLOptions, + }) + return singleChainPrimaryRuntime{ + EL: l2EL, + CL: l2CL, + } +} + +// NewMinimalInteropNoSupervisorRuntime constructs the single-chain interop world +// without supervisor wiring. +func NewMinimalInteropNoSupervisorRuntime(t devtest.T) *SingleChainRuntime { + return newSingleChainRuntimeWithConfig(t, PresetConfig{}, singleChainRuntimeSpec{ + BuildWorld: newSingleChainInteropWorldNoSupervisor, + StartPrimary: startSingleChainInteropPrimaryNoSupervisor, + StartBatcher: true, + StartProposer: true, + StartChallenger: false, + }) +} diff --git a/op-devstack/sysgo/singlechain_runtime.go b/op-devstack/sysgo/singlechain_runtime.go new file mode 100644 index 0000000000000..b7842602ee510 --- /dev/null +++ b/op-devstack/sysgo/singlechain_runtime.go @@ -0,0 +1,415 @@ +package sysgo + +import ( + "context" + "runtime" + "time" + + bss "github.com/ethereum-optimism/optimism/op-batcher/batcher" + batcherFlags "github.com/ethereum-optimism/optimism/op-batcher/flags" + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + opchallenger "github.com/ethereum-optimism/optimism/op-challenger" + gameTypes "github.com/ethereum-optimism/optimism/op-challenger/game/types" + challengermetrics "github.com/ethereum-optimism/optimism/op-challenger/metrics" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + sharedchallenger "github.com/ethereum-optimism/optimism/op-devstack/shared/challenger" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/setuputils" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" + ps "github.com/ethereum-optimism/optimism/op-proposer/proposer" + "github.com/ethereum-optimism/optimism/op-service/clock" + "github.com/ethereum-optimism/optimism/op-service/endpoint" + "github.com/ethereum-optimism/optimism/op-service/eth" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + opmetrics "github.com/ethereum-optimism/optimism/op-service/metrics" + "github.com/ethereum-optimism/optimism/op-service/oppprof" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + "github.com/ethereum/go-ethereum/core" + "github.com/ethereum/go-ethereum/crypto" + "github.com/ethereum/go-ethereum/log" +) + +type singleChainRuntimeWorld struct { + L1Network *L1Network + L2Network *L2Network + Interop *SingleChainInteropSupport +} + +type singleChainPrimaryRuntime struct { + EL L2ELNode + CL L2CLNode + Flashblocks *FlashblocksRuntimeSupport +} + +type singleChainRuntimeSpec struct { + BuildWorld func(t devtest.T, keys devkeys.Keys, cfg PresetConfig) singleChainRuntimeWorld + StartPrimary func(t devtest.T, keys devkeys.Keys, world singleChainRuntimeWorld, l1EL *L1Geth, l1CL *L1CLNode, jwtPath string, jwtSecret [32]byte, cfg PresetConfig) singleChainPrimaryRuntime + StartBatcher bool + StartProposer bool + StartChallenger bool + TestSequencer string +} + +func newSingleChainNodeRuntime(name string, isSequencer bool, el L2ELNode, cl L2CLNode) *SingleChainNodeRuntime { + return &SingleChainNodeRuntime{ + Name: name, + IsSequencer: isSequencer, + EL: el, + CL: cl, + } +} + +func newDefaultSingleChainWorld(t devtest.T, keys devkeys.Keys, cfg PresetConfig) singleChainRuntimeWorld { + l1Net, l2Net := buildSingleChainWorld(t, keys, cfg.DeployerOptions...) + return singleChainRuntimeWorld{ + L1Network: l1Net, + L2Network: l2Net, + } +} + +func startDefaultSingleChainPrimary( + t devtest.T, + keys devkeys.Keys, + world singleChainRuntimeWorld, + l1EL *L1Geth, + l1CL *L1CLNode, + jwtPath string, + jwtSecret [32]byte, + cfg PresetConfig, +) singleChainPrimaryRuntime { + sequencerIdentity := NewELNodeIdentity(0) + l2EL := startSequencerEL(t, world.L2Network, jwtPath, jwtSecret, sequencerIdentity) + l2CL := startSequencerCL(t, keys, world.L1Network, world.L2Network, l1EL, l1CL, l2EL, jwtSecret, cfg.GlobalL2CLOptions) + return singleChainPrimaryRuntime{ + EL: l2EL, + CL: l2CL, + } +} + +func newSingleChainRuntimeWithConfig(t devtest.T, cfg PresetConfig, spec singleChainRuntimeSpec) *SingleChainRuntime { + require := t.Require() + + keys, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) + require.NoError(err, "failed to derive dev keys from mnemonic") + + world := spec.BuildWorld(t, keys, cfg) + jwtPath, jwtSecret := writeJWTSecret(t) + + l1Clock := clock.SystemClock + var timeTravelClock *clock.AdvancingClock + if cfg.EnableTimeTravel { + timeTravelClock = clock.NewAdvancingClock(100 * time.Millisecond) + l1Clock = timeTravelClock + } + l1EL, l1CL := startInProcessL1WithClock(t, world.L1Network, jwtPath, l1Clock) + + primary := spec.StartPrimary(t, keys, world, l1EL, l1CL, jwtPath, jwtSecret, cfg) + primaryNode := newSingleChainNodeRuntime("sequencer", true, primary.EL, primary.CL) + + var l2Batcher *L2Batcher + if spec.StartBatcher { + l2Batcher = startMinimalBatcher(t, keys, world.L2Network, l1EL, primary.CL, primary.EL, cfg.BatcherOptions...) + } + + var l2Proposer *L2Proposer + if spec.StartProposer { + l2Proposer = startMinimalProposer(t, keys, world.L2Network, l1EL, primary.CL, cfg.ProposerOptions...) + } + + var l2Challenger *L2Challenger + if spec.StartChallenger { + l2Challenger = startMinimalChallenger(t, keys, world.L1Network, world.L2Network, l1EL, l1CL, primary.EL, primary.CL, cfg.EnableCannonKonaForChall) + } + + applyMinimalGameTypeOptions(t, keys, world.L1Network, world.L2Network, l1EL, cfg.AddedGameTypes, cfg.RespectedGameTypes) + + sequencerEL, ok := primary.EL.(*OpGeth) + require.True(ok, "single-chain runtime primary EL must be op-geth for test sequencer") + sequencerCL, ok := primary.CL.(*OpNode) + require.True(ok, "single-chain runtime primary CL must be op-node for test sequencer") + testSequencer := startTestSequencer(t, keys, jwtPath, jwtSecret, world.L1Network, l1EL, l1CL, sequencerEL, sequencerCL) + testSequencerRuntime := newTestSequencerRuntime(testSequencer, spec.TestSequencer) + faucetService := startFaucets(t, keys, world.L1Network.ChainID(), world.L2Network.ChainID(), l1EL.UserRPC(), primary.EL.UserRPC()) + + return &SingleChainRuntime{ + Keys: keys, + L1Network: world.L1Network, + L2Network: world.L2Network, + L1EL: l1EL, + L1CL: l1CL, + L2EL: primary.EL, + L2CL: primary.CL, + L2Batcher: l2Batcher, + L2Proposer: l2Proposer, + L2Challenger: l2Challenger, + FaucetService: faucetService, + TimeTravel: timeTravelClock, + TestSequencer: testSequencerRuntime, + Nodes: map[string]*SingleChainNodeRuntime{ + primaryNode.Name: primaryNode, + }, + Flashblocks: primary.Flashblocks, + Interop: world.Interop, + } +} + +// SingleChainRuntime is the shared DAG runtime for single-chain preset topologies. +// It is the root for minimal, flashblocks, follower-node, sync-tester, conductor, +// and no-supervisor interop variants. +func NewMinimalRuntime(t devtest.T) *SingleChainRuntime { + return NewMinimalRuntimeWithConfig(t, PresetConfig{}) +} + +func NewMinimalRuntimeWithConfig(t devtest.T, cfg PresetConfig) *SingleChainRuntime { + return newSingleChainRuntimeWithConfig(t, cfg, singleChainRuntimeSpec{ + BuildWorld: newDefaultSingleChainWorld, + StartPrimary: startDefaultSingleChainPrimary, + StartBatcher: true, + StartProposer: true, + StartChallenger: true, + }) +} + +func startMinimalBatcher( + t devtest.T, + keys devkeys.Keys, + l2Net *L2Network, + l1EL L1ELNode, + l2CL L2CLNode, + l2EL L2ELNode, + batcherOpts ...BatcherOption, +) *L2Batcher { + require := t.Require() + batcherSecret, err := keys.Secret(devkeys.BatcherRole.Key(l2Net.ChainID().ToBig())) + require.NoError(err) + batcherTarget := NewComponentTarget("main", l2Net.ChainID()) + + logger := t.Logger().New("component", "l2-batcher") + logger.SetContext(t.Ctx()) + logger.Info("Batcher key acquired", "addr", crypto.PubkeyToAddress(batcherSecret.PublicKey)) + + batcherCLIConfig := &bss.CLIConfig{ + L1EthRpc: l1EL.UserRPC(), + L2EthRpc: []string{l2EL.UserRPC()}, + RollupRpc: []string{l2CL.UserRPC()}, + MaxPendingTransactions: 7, + MaxChannelDuration: 1, + MaxL1TxSize: 120_000, + TestUseMaxTxSizeForBlobs: false, + TargetNumFrames: 1, + ApproxComprRatio: 0.4, + SubSafetyMargin: 4, + PollInterval: 500 * time.Millisecond, + TxMgrConfig: setuputils.NewTxMgrConfig(endpoint.URL(l1EL.UserRPC()), batcherSecret), + LogConfig: oplog.CLIConfig{ + Level: log.LevelInfo, + Format: oplog.FormatText, + }, + Stopped: false, + BatchType: derive.SpanBatchType, + MaxBlocksPerSpanBatch: 10, + DataAvailabilityType: batcherFlags.CalldataType, + CompressionAlgo: derive.Brotli, + RPC: oprpc.CLIConfig{ + EnableAdmin: true, + }, + } + for _, opt := range batcherOpts { + if opt == nil { + continue + } + opt(batcherTarget, batcherCLIConfig) + } + + batcherCtx, cancelBatcherCtx := context.WithCancel(t.Ctx()) + closeAppFn := func(cause error) { + t.Errorf("closeAppFn called, batcher hit a critical error: %v", cause) + cancelBatcherCtx() + } + batcher, err := bss.BatcherServiceFromCLIConfig( + batcherCtx, + closeAppFn, + "0.0.1", + batcherCLIConfig, + logger, + ) + require.NoError(err) + require.NoError(batcher.Start(t.Ctx())) + t.Cleanup(func() { + ctx, cancel := context.WithCancel(t.Ctx()) + cancel() + logger.Info("Closing batcher") + _ = batcher.Stop(ctx) + logger.Info("Closed batcher") + }) + + return &L2Batcher{ + name: batcherTarget.Name, + chainID: batcherTarget.ChainID, + service: batcher, + rpc: batcher.HTTPEndpoint(), + l1RPC: l1EL.UserRPC(), + l2CLRPC: l2CL.UserRPC(), + l2ELRPC: l2EL.UserRPC(), + } +} + +func startMinimalProposer( + t devtest.T, + keys devkeys.Keys, + l2Net *L2Network, + l1EL L1ELNode, + l2CL L2CLNode, + proposerOpts ...ProposerOption, +) *L2Proposer { + require := t.Require() + proposerSecret, err := keys.Secret(devkeys.ProposerRole.Key(l2Net.ChainID().ToBig())) + require.NoError(err) + + logger := t.Logger().New("component", "l2-proposer") + logger.Info("Proposer key acquired", "addr", crypto.PubkeyToAddress(proposerSecret.PublicKey)) + + proposerCLIConfig := &ps.CLIConfig{ + L1EthRpc: l1EL.UserRPC(), + PollInterval: 500 * time.Millisecond, + AllowNonFinalized: true, + TxMgrConfig: setuputils.NewTxMgrConfig(endpoint.URL(l1EL.UserRPC()), proposerSecret), + RPCConfig: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, + LogConfig: oplog.CLIConfig{ + Level: log.LvlInfo, + Format: oplog.FormatText, + }, + MetricsConfig: opmetrics.CLIConfig{}, + PprofConfig: oppprof.CLIConfig{}, + DGFAddress: l2Net.deployment.DisputeGameFactoryProxyAddr().Hex(), + ProposalInterval: 6 * time.Second, + DisputeGameType: 1, + ActiveSequencerCheckDuration: 5 * time.Second, + WaitNodeSync: false, + RollupRpc: l2CL.UserRPC(), + } + for _, opt := range proposerOpts { + if opt == nil { + continue + } + opt(NewComponentTarget("main", l2Net.ChainID()), proposerCLIConfig) + } + + proposer, err := ps.ProposerServiceFromCLIConfig(t.Ctx(), "0.0.1", proposerCLIConfig, logger) + require.NoError(err) + require.NoError(proposer.Start(t.Ctx())) + t.Cleanup(func() { + ctx, cancel := context.WithCancel(t.Ctx()) + cancel() + logger.Info("Closing proposer") + _ = proposer.Stop(ctx) + logger.Info("Closed proposer") + }) + + return &L2Proposer{ + name: "main", + chainID: l2Net.ChainID(), + service: proposer, + userRPC: proposer.HTTPEndpoint(), + } +} + +func startMinimalChallenger( + t devtest.T, + keys devkeys.Keys, + l1Net *L1Network, + l2Net *L2Network, + l1EL L1ELNode, + l1CL *L1CLNode, + l2EL L2ELNode, + l2CL L2CLNode, + enableCannonKona bool, +) *L2Challenger { + require := t.Require() + challengerSecret, err := keys.Secret(devkeys.ChallengerRole.Key(l2Net.ChainID().ToBig())) + require.NoError(err) + + logger := t.Logger().New("component", "l2-challenger") + logger.Info("Challenger key acquired", "addr", crypto.PubkeyToAddress(challengerSecret.PublicKey)) + + rollupCfgs := []*rollup.Config{l2Net.rollupCfg} + l2Geneses := []*core.Genesis{l2Net.genesis} + options := []sharedchallenger.Option{ + sharedchallenger.WithFactoryAddress(l2Net.deployment.DisputeGameFactoryProxyAddr()), + sharedchallenger.WithPrivKey(challengerSecret), + sharedchallenger.WithCannonConfig(rollupCfgs, l1Net.genesis, l2Geneses, sharedchallenger.MTCannonVariant), + sharedchallenger.WithCannonGameType(), + sharedchallenger.WithPermissionedGameType(), + sharedchallenger.WithFastGames(), + } + if enableCannonKona { + t.Log("Enabling cannon-kona for challenger") + options = append(options, + sharedchallenger.WithCannonKonaConfig(rollupCfgs, l1Net.genesis, l2Geneses), + sharedchallenger.WithCannonKonaGameType(), + ) + } + cfg, err := sharedchallenger.NewPreInteropChallengerConfig( + t.TempDir(), + l1EL.UserRPC(), + l1CL.beaconHTTPAddr, + l2CL.UserRPC(), + l2EL.UserRPC(), + options..., + ) + require.NoError(err, "failed to create pre-interop challenger config") + + svc, err := opchallenger.Main(t.Ctx(), logger, cfg, challengermetrics.NoopMetrics) + require.NoError(err) + require.NoError(svc.Start(t.Ctx())) + t.Cleanup(func() { + ctx, cancel := context.WithCancel(t.Ctx()) + cancel() + logger.Info("Closing challenger") + timer := time.AfterFunc(1*time.Minute, func() { + if svc.Stopped() { + return + } + buf := make([]byte, 1<<20) + stackLen := runtime.Stack(buf, true) + logger.Error("Challenger failed to stop; printing all goroutine stacks:\n%v", string(buf[:stackLen])) + }) + _ = svc.Stop(ctx) + timer.Stop() + logger.Info("Closed challenger") + }) + + return &L2Challenger{ + name: "main", + chainIDs: []eth.ChainID{l2Net.ChainID()}, + service: svc, + config: cfg, + } +} + +func applyMinimalGameTypeOptions( + t devtest.T, + keys devkeys.Keys, + l1Net *L1Network, + l2Net *L2Network, + l1EL L1ELNode, + addedGameTypes []gameTypes.GameType, + respectedGameTypes []gameTypes.GameType, +) { + if len(addedGameTypes) == 0 && len(respectedGameTypes) == 0 { + return + } + l1ChainID := l1Net.ChainID() + + for _, gameType := range addedGameTypes { + if gameType == gameTypes.PermissionedGameType { + continue + } + addGameTypeForRuntime(t, keys, PrestateForGameType(t, gameType), gameType, l1ChainID, l1EL.UserRPC(), l2Net) + } + for _, gameType := range respectedGameTypes { + setRespectedGameTypeForRuntime(t, keys, gameType, l1ChainID, l1EL.UserRPC(), l2Net) + } +} diff --git a/op-devstack/sysgo/singlechain_variants.go b/op-devstack/sysgo/singlechain_variants.go new file mode 100644 index 0000000000000..bdfbb989b6d57 --- /dev/null +++ b/op-devstack/sysgo/singlechain_variants.go @@ -0,0 +1,386 @@ +package sysgo + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "sync/atomic" + "time" + + opconductor "github.com/ethereum-optimism/optimism/op-conductor/conductor" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-service/endpoint" + "github.com/ethereum-optimism/optimism/op-service/eth" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/retry" + oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" + synctesterconfig "github.com/ethereum-optimism/optimism/op-sync-tester/config" + "github.com/ethereum-optimism/optimism/op-sync-tester/synctester" + stconf "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/config" + sttypes "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/types" + "github.com/ethereum/go-ethereum/log" +) + +func NewSingleChainMultiNodeRuntime(t devtest.T, withP2P bool) *SingleChainRuntime { + return NewSingleChainMultiNodeRuntimeWithConfig(t, withP2P, PresetConfig{}) +} + +func NewSingleChainMultiNodeRuntimeWithConfig(t devtest.T, withP2P bool, cfg PresetConfig) *SingleChainRuntime { + runtime := NewMinimalRuntimeWithConfig(t, cfg) + nodeB := addSingleChainOpNode(t, runtime, "b", false, "", cfg.GlobalL2CLOptions...) + if withP2P { + connectSingleChainNodes(t, runtime.L2EL, runtime.L2CL, nodeB) + } + runtime.P2PEnabled = withP2P + return runtime +} + +func NewSingleChainTwoVerifiersRuntime(t devtest.T) *SingleChainRuntime { + return NewSingleChainTwoVerifiersRuntimeWithConfig(t, PresetConfig{}) +} + +func NewSingleChainTwoVerifiersRuntimeWithConfig(t devtest.T, cfg PresetConfig) *SingleChainRuntime { + runtime := NewSingleChainMultiNodeRuntimeWithConfig(t, true, cfg) + nodeB := runtime.Nodes["b"] + t.Require().NotNil(nodeB, "missing single-chain node b") + nodeC := addSingleChainOpNode(t, runtime, "c", false, nodeB.CL.UserRPC(), cfg.GlobalL2CLOptions...) + + connectSingleChainNodes(t, runtime.L2EL, runtime.L2CL, nodeC) + connectSingleChainNodes(t, nodeB.EL, nodeB.CL, nodeC) + + // Follow legacy behavior: test-sequencer is wired against node "b". + replaceSingleChainTestSequencer(t, runtime, "dev", nodeB) + return runtime +} + +func NewSimpleWithSyncTesterRuntime(t devtest.T) *SingleChainRuntime { + return NewSimpleWithSyncTesterRuntimeWithConfig(t, PresetConfig{}) +} + +func NewSimpleWithSyncTesterRuntimeWithConfig(t devtest.T, cfg PresetConfig) *SingleChainRuntime { + runtime := NewMinimalRuntimeWithConfig(t, cfg) + syncTester := startSyncTesterService(t, map[eth.ChainID]string{ + runtime.L2Network.ChainID(): runtime.L2EL.UserRPC(), + }) + syncTesterELCfg := DefaultSyncTesterELConfig() + if len(cfg.GlobalSyncTesterELOptions) > 0 { + syncTesterELTarget := NewComponentTarget("sync-tester-el", runtime.L2Network.ChainID()) + for _, opt := range cfg.GlobalSyncTesterELOptions { + if opt == nil { + continue + } + opt.Apply(t, syncTesterELTarget, syncTesterELCfg) + } + } + syncTesterEL := startSyncTesterELNode( + t, + runtime.L2EL.JWTPath(), + syncTester, + NewComponentTarget("sync-tester-el", runtime.L2Network.ChainID()), + syncTesterELCfg, + ) + jwtSecret := readJWTSecretFromPath(t, runtime.L2EL.JWTPath()) + l2CL2 := startL2CLNode(t, runtime.Keys, runtime.L1Network, runtime.L2Network, runtime.L1EL, runtime.L1CL, syncTesterEL, jwtSecret, l2CLNodeStartConfig{ + Key: "verifier", + IsSequencer: false, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + L2CLOptions: cfg.GlobalL2CLOptions, + }) + node := newSingleChainNodeRuntime("verifier", false, syncTesterEL, l2CL2) + runtime.Nodes[node.Name] = node + connectSingleChainCLPeer(t, runtime.L2CL, node.CL) + runtime.SyncTester = &SyncTesterRuntime{ + Service: syncTester, + Node: node, + } + return runtime +} + +func NewMinimalWithConductorsRuntime(t devtest.T) *SingleChainRuntime { + return NewMinimalWithConductorsRuntimeWithConfig(t, PresetConfig{}) +} + +func NewMinimalWithConductorsRuntimeWithConfig(t devtest.T, cfg PresetConfig) *SingleChainRuntime { + // Conductor tests only exercise sequencing leadership. They do not need a + // challenger, and rust e2e jobs do not build cannon artifacts. + runtime := newSingleChainRuntimeWithConfig(t, cfg, singleChainRuntimeSpec{ + BuildWorld: newDefaultSingleChainWorld, + StartPrimary: startDefaultSingleChainPrimary, + StartBatcher: true, + StartProposer: true, + StartChallenger: false, + }) + nodeB := addSingleChainOpNode(t, runtime, "b", true, "", cfg.GlobalL2CLOptions...) + nodeC := addSingleChainOpNode(t, runtime, "c", true, "", cfg.GlobalL2CLOptions...) + + conductorA := startConductorNode(t, "sequencer", runtime.L2Network, runtime.L2CL.(*OpNode), runtime.L2EL, true, false) + conductorB := startConductorNode(t, "b", runtime.L2Network, nodeB.CL.(*OpNode), nodeB.EL, false, true) + conductorC := startConductorNode(t, "c", runtime.L2Network, nodeC.CL.(*OpNode), nodeC.EL, false, true) + startConductorCluster(t, conductorA, []*Conductor{conductorB, conductorC}) + + runtime.Conductors = map[string]*Conductor{ + "sequencer": conductorA, + "b": conductorB, + "c": conductorC, + } + return runtime +} + +func connectSingleChainNodes(t devtest.T, sourceEL L2ELNode, sourceCL L2CLNode, target *SingleChainNodeRuntime) { + connectL2ELPeers(t, t.Logger(), sourceEL.UserRPC(), target.EL.UserRPC(), false) + connectSingleChainCLPeer(t, sourceCL, target.CL) +} + +func connectSingleChainCLPeer(t devtest.T, sourceCL, targetCL L2CLNode) { + connectL2CLPeers(t, t.Logger(), sourceCL, targetCL) +} + +func replaceSingleChainTestSequencer(t devtest.T, runtime *SingleChainRuntime, name string, node *SingleChainNodeRuntime) { + l2EL, ok := node.EL.(*OpGeth) + t.Require().True(ok, "single-chain test sequencer requires an op-geth EL node") + l2CL, ok := node.CL.(*OpNode) + t.Require().True(ok, "single-chain test sequencer requires an op-node CL node") + testSequencer := startTestSequencer( + t, + runtime.Keys, + runtime.L2EL.JWTPath(), + readJWTSecretFromPath(t, runtime.L2EL.JWTPath()), + runtime.L1Network, + runtime.L1EL, + runtime.L1CL, + l2EL, + l2CL, + ) + runtime.TestSequencer = newTestSequencerRuntime(testSequencer, name) +} + +func addSingleChainOpNode( + t devtest.T, + runtime *SingleChainRuntime, + name string, + isSequencer bool, + followSource string, + l2Opts ...L2CLOption, +) *SingleChainNodeRuntime { + jwtPath := runtime.L2EL.JWTPath() + jwtSecret := readJWTSecretFromPath(t, jwtPath) + identity := NewELNodeIdentity(0) + l2EL := startL2ELNode(t, runtime.L2Network, jwtPath, jwtSecret, name, identity) + l2CL := startL2CLNode(t, runtime.Keys, runtime.L1Network, runtime.L2Network, runtime.L1EL, runtime.L1CL, l2EL, jwtSecret, l2CLNodeStartConfig{ + Key: name, + IsSequencer: isSequencer, + NoDiscovery: true, + EnableReqResp: true, + UseReqResp: true, + L2FollowSource: followSource, + L2CLOptions: l2Opts, + }) + node := newSingleChainNodeRuntime(name, isSequencer, l2EL, l2CL) + runtime.Nodes[name] = node + return node +} + +func startSyncTesterService(t devtest.T, chainRPCs map[eth.ChainID]string) *SyncTesterService { + require := t.Require() + syncTesters := make(map[sttypes.SyncTesterID]*stconf.SyncTesterEntry) + for chainID, elRPC := range chainRPCs { + id := sttypes.SyncTesterID(fmt.Sprintf("dev-sync-tester-%s", chainID)) + syncTesters[id] = &stconf.SyncTesterEntry{ + ELRPC: endpoint.MustRPC{Value: endpoint.URL(elRPC)}, + ChainID: chainID, + } + } + cfg := &synctesterconfig.Config{ + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + }, + SyncTesters: &stconf.Config{ + SyncTesters: syncTesters, + }, + } + logger := t.Logger().New("component", "sync-tester") + srv, err := synctester.FromConfig(t.Ctx(), cfg, logger) + require.NoError(err, "must setup sync tester service") + require.NoError(srv.Start(t.Ctx())) + t.Cleanup(func() { + ctx, cancel := context.WithCancel(context.Background()) + cancel() + logger.Info("Closing sync tester") + _ = srv.Stop(ctx) + logger.Info("Closed sync tester") + }) + return &SyncTesterService{ + service: srv, + } +} + +func startSyncTesterELNode( + t devtest.T, + jwtPath string, + syncTester *SyncTesterService, + target ComponentTarget, + cfg *SyncTesterELConfig, +) *SyncTesterEL { + node := &SyncTesterEL{ + target: target, + jwtPath: jwtPath, + config: cfg, + p: t, + syncTester: syncTester, + } + node.Start() + t.Cleanup(node.Stop) + return node +} + +func startConductorNode( + t devtest.T, + conductorName string, + l2Net *L2Network, + opNode *OpNode, + l2EL L2ELNode, + bootstrap bool, + paused bool, +) *Conductor { + require := t.Require() + serverID := conductorName + require.NotEmpty(serverID, "conductor ID key cannot be empty") + + var conductorRPCEndpoint atomic.Value + conductorRPCEndpoint.Store("") + opNode.cfg.ConductorEnabled = true + opNode.cfg.ConductorRpcTimeout = 5 * time.Second + opNode.cfg.ConductorRpc = func(ctx context.Context) (string, error) { + for { + if endpoint, _ := conductorRPCEndpoint.Load().(string); endpoint != "" { + return endpoint, nil + } + select { + case <-ctx.Done(): + return "", ctx.Err() + case <-time.After(100 * time.Millisecond): + } + } + } + opNode.cfg.Driver.SequencerStopped = true + opNode.Stop() + opNode.Start() + + cfg := opconductor.Config{ + ConsensusAddr: "127.0.0.1", + ConsensusPort: 0, + ConsensusAdvertisedAddr: "", + RaftServerID: serverID, + RaftStorageDir: filepath.Join(t.TempDir(), "raft"), + RaftBootstrap: bootstrap, + RaftSnapshotInterval: 120 * time.Second, + RaftSnapshotThreshold: 8192, + RaftTrailingLogs: 10240, + RaftHeartbeatTimeout: 1000 * time.Millisecond, + RaftLeaderLeaseTimeout: 500 * time.Millisecond, + NodeRPC: opNode.UserRPC(), + ExecutionRPC: l2EL.UserRPC(), + Paused: paused, + HealthCheck: opconductor.HealthCheckConfig{ + Interval: 3600, + UnsafeInterval: 3600, + SafeInterval: 3600, + MinPeerCount: 1, + }, + RollupCfg: *l2Net.rollupCfg, + RPCEnableProxy: false, + LogConfig: oplog.CLIConfig{ + Level: log.LevelInfo, + Format: oplog.FormatText, + Color: false, + }, + RPC: oprpc.CLIConfig{ + ListenAddr: "127.0.0.1", + ListenPort: 0, + }, + } + + logger := t.Logger().New("component", "conductor", "name", conductorName, "chain", l2Net.ChainID()) + svc, err := opconductor.New(t.Ctx(), &cfg, logger, "0.0.1") + require.NoError(err) + require.NoError(svc.Start(t.Ctx())) + t.Cleanup(func() { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + logger.Info("Closing conductor") + if err := svc.Stop(ctx); err != nil { + logger.Warn("Failed to close conductor cleanly", "err", err) + } + }) + + out := &Conductor{ + name: conductorName, + chainID: l2Net.ChainID(), + serverID: serverID, + consensusEndpoint: svc.ConsensusEndpoint(), + rpcEndpoint: svc.HTTPEndpoint(), + service: svc, + } + conductorRPCEndpoint.Store(svc.HTTPEndpoint()) + return out +} + +func startConductorCluster(t devtest.T, bootstrap *Conductor, members []*Conductor) { + require := t.Require() + ctx, cancel := context.WithTimeout(t.Ctx(), 90*time.Second) + defer cancel() + + err := retry.Do0(ctx, 90, retry.Fixed(500*time.Millisecond), func() error { + if !bootstrap.service.Leader(ctx) { + return errors.New("bootstrap conductor is not leader yet") + } + return nil + }) + require.NoError(err, "bootstrap conductor never became leader") + + for _, member := range members { + err := retry.Do0(ctx, 40, retry.Fixed(250*time.Millisecond), func() error { + return bootstrap.service.AddServerAsNonvoter(ctx, member.ServerID(), member.ConsensusEndpoint(), 0) + }) + require.NoErrorf(err, "failed to add conductor %s as non-voter", member.ServerID()) + + err = retry.Do0(ctx, 40, retry.Fixed(250*time.Millisecond), func() error { + return bootstrap.service.AddServerAsVoter(ctx, member.ServerID(), member.ConsensusEndpoint(), 0) + }) + require.NoErrorf(err, "failed to add conductor %s as voter", member.ServerID()) + } + + expectedServers := 1 + len(members) + err = retry.Do0(ctx, 90, retry.Fixed(500*time.Millisecond), func() error { + membership, err := bootstrap.service.ClusterMembership(ctx) + if err != nil { + return err + } + if len(membership.Servers) != expectedServers { + return fmt.Errorf("expected %d conductors in cluster membership, got %d", expectedServers, len(membership.Servers)) + } + return nil + }) + require.NoError(err, "conductor cluster did not converge to expected membership") + + cluster := append([]*Conductor{bootstrap}, members...) + for _, conductor := range cluster { + err := retry.Do0(ctx, 40, retry.Fixed(250*time.Millisecond), func() error { + return conductor.service.Resume(ctx) + }) + require.NoErrorf(err, "failed to resume conductor %s", conductor.ServerID()) + } + + for _, conductor := range cluster { + err := retry.Do0(ctx, 90, retry.Fixed(500*time.Millisecond), func() error { + if !conductor.service.SequencerHealthy(ctx) { + return fmt.Errorf("conductor %s sequencer is not healthy yet", conductor.ServerID()) + } + return nil + }) + require.NoErrorf(err, "conductor %s never became healthy", conductor.ServerID()) + } +} diff --git a/op-devstack/sysgo/subproc.go b/op-devstack/sysgo/subproc.go index cb60e01344421..148fa031fadd3 100644 --- a/op-devstack/sysgo/subproc.go +++ b/op-devstack/sysgo/subproc.go @@ -12,7 +12,7 @@ import ( // SubProcess is a process that can be started and stopped. type SubProcess struct { - p devtest.P + p devtest.CommonT cmd *exec.Cmd stdOutCallback logpipe.LogCallback @@ -24,7 +24,7 @@ type SubProcess struct { mu sync.Mutex } -func NewSubProcess(p devtest.P, stdOutCallback, stdErrCallback logpipe.LogCallback) *SubProcess { +func NewSubProcess(p devtest.CommonT, stdOutCallback, stdErrCallback logpipe.LogCallback) *SubProcess { return &SubProcess{ p: p, stdOutCallback: stdOutCallback, diff --git a/op-devstack/sysgo/superchain.go b/op-devstack/sysgo/superchain.go index ff421c88e03e2..85e82f539eaeb 100644 --- a/op-devstack/sysgo/superchain.go +++ b/op-devstack/sysgo/superchain.go @@ -1,7 +1,6 @@ package sysgo import ( - "github.com/ethereum-optimism/optimism/op-devstack/shim" "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum/go-ethereum/common" ) @@ -20,17 +19,3 @@ func (d *SuperchainDeployment) SuperchainConfigAddr() common.Address { func (d *SuperchainDeployment) ProtocolVersionsAddr() common.Address { return d.protocolVersionsAddr } - -type Superchain struct { - id stack.ComponentID - deployment *SuperchainDeployment -} - -func (s *Superchain) hydrate(system stack.ExtensibleSystem) { - sysSuperchain := shim.NewSuperchain(shim.SuperchainConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: s.id, - Deployment: s.deployment, - }) - system.AddSuperchain(sysSuperchain) -} diff --git a/op-devstack/sysgo/superroot.go b/op-devstack/sysgo/superroot.go index 7c05b3f1ea99a..5d6fd83e0a167 100644 --- a/op-devstack/sysgo/superroot.go +++ b/op-devstack/sysgo/superroot.go @@ -13,7 +13,6 @@ import ( "github.com/ethereum-optimism/optimism/op-challenger/game/fault/contracts/gameargs" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-e2e/bindings" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/contracts/bindings/delegatecallproxy" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" @@ -48,234 +47,6 @@ type MigrateInputV2 struct { StartingRespectedGameType uint32 } -func WithSuperRoots(l1ChainID eth.ChainID, l1ELID stack.ComponentID, clIDs []stack.ComponentID, supervisorID stack.ComponentID, primaryL2 eth.ChainID) stack.Option[*Orchestrator] { - return withSuperRoots(l1ChainID, l1ELID, clIDs, primaryL2, func(t devtest.CommonT, o *Orchestrator, timestamp uint64) eth.Bytes32 { - return getSuperRoot(t, o, timestamp, supervisorID) - }) -} - -func WithSuperRootsFromSupernode(l1ChainID eth.ChainID, l1ELID stack.ComponentID, clIDs []stack.ComponentID, supernodeID stack.SupernodeID, primaryL2 eth.ChainID) stack.Option[*Orchestrator] { - return withSuperRoots(l1ChainID, l1ELID, clIDs, primaryL2, func(t devtest.CommonT, o *Orchestrator, timestamp uint64) eth.Bytes32 { - return getSuperRootFromSupernode(t, o, timestamp, supernodeID) - }) -} - -func withSuperRoots(l1ChainID eth.ChainID, l1ELID stack.ComponentID, clIDs []stack.ComponentID, primaryL2 eth.ChainID, getSuperRootAtTimestamp func(t devtest.CommonT, o *Orchestrator, timestamp uint64) eth.Bytes32) stack.Option[*Orchestrator] { - return stack.FnOption[*Orchestrator]{ - FinallyFn: func(o *Orchestrator) { - t := o.P() - require := t.Require() - require.NotNil(o.wb, "must have a world builder") - require.NotEmpty(o.wb.output.ImplementationsDeployment.OpcmImpl, "must have an OPCM implementation") - - l1EL, ok := o.GetL1EL(l1ELID) - require.True(ok, "must have L1 EL node") - rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) - require.NoError(err) - client := ethclient.NewClient(rpcClient) - w3Client := w3.NewClient(rpcClient) - - var superrootTime uint64 - // Supernode does not support super roots at genesis. - // So let's wait for safe heads to advance before querying atTimestamp. - for _, clID := range clIDs { - l2CL, ok := o.GetL2CL(clID) - require.True(ok, "must have L2 CL node") - // TODO(#18947): Ideally, we should be able to wait on the supernode's SyncStatus directly - // rather than check the sync statuses of all CLs - rollupClient, err := dial.DialRollupClientWithTimeout(t.Ctx(), t.Logger(), l2CL.UserRPC()) - t.Require().NoError(err) - defer rollupClient.Close() - ctx, cancel := context.WithTimeout(t.Ctx(), time.Minute*2) - err = wait.For(ctx, time.Second*1, func() (bool, error) { - status, err := rollupClient.SyncStatus(ctx) - if err != nil { - return false, err - } - if status == nil { - return false, nil - } - superrootTime = status.SafeL2.Time - return status.SafeL2.Number > 0, nil - }) - cancel() - t.Require().NoError(err, "waiting for supernode chain safe head to advance failed") - } - - superRoot := getSuperRootAtTimestamp(t, o, superrootTime) - - l1pao, err := o.keys.Address(devkeys.ChainOperatorKeys(l1ChainID.ToBig())(devkeys.L1ProxyAdminOwnerRole)) - require.NoError(err, "must have L1 proxy admin owner private key") - - superchainConfigAddr := o.wb.outSuperchainDeployment.SuperchainConfigAddr() - superchainProxyAdmin := getProxyAdmin(t, w3Client, superchainConfigAddr) - require.NotEmpty(superchainProxyAdmin, "superchain proxy admin address is empty") - - // Detect OPCM version to determine if we use V1 or V2 migration input - opcmAddr := o.wb.output.ImplementationsDeployment.OpcmImpl - useV2 := isOPCMV2(t, w3Client, opcmAddr) - - absoluteCannonPrestate := getInteropCannonAbsolutePrestate(t) - absoluteCannonKonaPrestate := getInteropCannonKonaAbsolutePrestate(t) - - // Use primaryL2 to determine which challenger / proposer roles to promote to the shared permissioned fdg - permissionedChainOps := devkeys.ChainOperatorKeys(primaryL2.ToBig()) - proposer, err := o.keys.Address(permissionedChainOps(devkeys.ProposerRole)) - o.P().Require().NoError(err, "must have configured proposer") - challenger, err := o.keys.Address(permissionedChainOps(devkeys.ChallengerRole)) - o.P().Require().NoError(err, "must have configured challenger") - - // Build chain configs for both V1 and V2 compatibility - var opChainConfigs []bindings.OPContractsManagerOpChainConfig - var l2ChainIDs []eth.ChainID - for l2ChainID, l2Deployment := range o.wb.outL2Deployment { - l2ChainIDs = append(l2ChainIDs, l2ChainID) - opChainConfigs = append(opChainConfigs, bindings.OPContractsManagerOpChainConfig{ - SystemConfigProxy: l2Deployment.SystemConfigProxyAddr(), - CannonPrestate: absoluteCannonPrestate, - CannonKonaPrestate: absoluteCannonKonaPrestate, - }) - } - - opcmABI, err := bindings.OPContractsManagerMetaData.GetAbi() - o.P().Require().NoError(err, "invalid OPCM ABI") - contract := batching.NewBoundContract(opcmABI, opcmAddr) - - var migrateCallData []byte - if useV2 { - // OPCM V2 (>= 7.0.0) uses IOPContractsManagerMigrator.MigrateInput - var chainSystemConfigs []common.Address - for _, cfg := range opChainConfigs { - chainSystemConfigs = append(chainSystemConfigs, cfg.SystemConfigProxy) - } - - migrateInputV2 := MigrateInputV2{ - ChainSystemConfigs: chainSystemConfigs, - DisputeGameConfigs: []DisputeGameConfigV2{ - { - Enabled: true, - InitBond: big.NewInt(0), - GameType: superCannonGameType, - GameArgs: absoluteCannonPrestate[:], - }, - }, - StartingAnchorRoot: bindings.Proposal{ - Root: common.Hash(superRoot), - L2SequenceNumber: big.NewInt(int64(superrootTime)), - }, - StartingRespectedGameType: superCannonGameType, - } - migrateCall := contract.Call("migrate", migrateInputV2) - migrateCallData, err = migrateCall.Pack() - require.NoError(err) - } else { - // OPCM V1 (< 7.0.0) uses IOPContractsManagerInteropMigrator.MigrateInput - migrateInputV1 := bindings.OPContractsManagerInteropMigratorMigrateInput{ - UsePermissionlessGame: true, - StartingAnchorRoot: bindings.Proposal{ - Root: common.Hash(superRoot), - L2SequenceNumber: big.NewInt(int64(superrootTime)), - }, - GameParameters: bindings.OPContractsManagerInteropMigratorGameParameters{ - Proposer: proposer, - Challenger: challenger, - MaxGameDepth: big.NewInt(73), - SplitDepth: big.NewInt(30), - InitBond: big.NewInt(0), - ClockExtension: 10800, - MaxClockDuration: 302400, - }, - OpChainConfigs: opChainConfigs, - } - migrateCall := contract.Call("migrate", migrateInputV1) - migrateCallData, err = migrateCall.Pack() - require.NoError(err) - } - - chainOps := devkeys.ChainOperatorKeys(l1ChainID.ToBig()) - l1PAOKey, err := o.keys.Secret(chainOps(devkeys.L1ProxyAdminOwnerRole)) - require.NoError(err, "must have configured L1 proxy admin owner private key") - transactOpts, err := bind.NewKeyedTransactorWithChainID(l1PAOKey, l1ChainID.ToBig()) - require.NoError(err, "must have transact opts") - transactOpts.Context = t.Ctx() - - t.Log("Deploying delegate call proxy contract") - // The DelegateCallProxy is used to simulate a GnosisSafe proxy that satisfies the delegatecall requirement of the OPCM. - delegateCallProxy, proxyContract := deployDelegateCallProxy(t, transactOpts, client, l1pao) - oldSuperchainProxyAdminOwner := getOwner(t, w3Client, superchainProxyAdmin) - transferOwnership(t, l1PAOKey, client, superchainProxyAdmin, delegateCallProxy) - - oldDisputeGameFactories := make(map[eth.ChainID]common.Address) - for i, opChainConfig := range opChainConfigs { - var portal common.Address - require.NoError( - w3Client.Call( - w3eth.CallFunc(opChainConfig.SystemConfigProxy, optimismPortalFn).Returns(&portal), - )) - portalProxyAdmin := getProxyAdmin(t, w3Client, portal) - transferOwnership(t, l1PAOKey, client, portalProxyAdmin, delegateCallProxy) - - dgf := getDisputeGameFactory(t, w3Client, portal) - transferOwnership(t, l1PAOKey, client, dgf, delegateCallProxy) - oldDisputeGameFactories[l2ChainIDs[i]] = dgf - } - - t.Log("Executing delegate call") - migrateTx, err := proxyContract.ExecuteDelegateCall(transactOpts, opcmAddr, migrateCallData) - require.NoErrorf(err, "migrate delegatecall failed: %v", errutil.TryAddRevertReason(err)) - _, err = wait.ForReceiptOK(t.Ctx(), client, migrateTx.Hash()) - require.NoError(err) - - var sharedDGF common.Address - { - for _, l2Deployment := range o.wb.outL2Deployment { - portal := getOptimismPortal(t, w3Client, l2Deployment.SystemConfigProxyAddr()) - addr := getDisputeGameFactory(t, w3Client, portal) - if sharedDGF == (common.Address{}) { - sharedDGF = addr - } else { - require.Equal(sharedDGF, addr, "dispute game factory address is not the same for all deployments") - } - } - require.NotEmpty(getSuperGameImpl(t, w3Client, sharedDGF)) - o.wb.outInteropMigration = &InteropMigration{ - DisputeGameFactory: sharedDGF, - } - } - - // reset ownership transfers - resetOwnershipAfterMigration(t, - o, - l1ChainID.ToBig(), - l1PAOKey, - w3Client, - client, - delegateCallProxy, - opChainConfigs, - ) - - resetOldDisputeGameFactories(t, - o, - l1ChainID.ToBig(), - l1PAOKey, - client, - delegateCallProxy, - oldDisputeGameFactories, - ) - - transferOwnershipForDelegateCallProxy(t, l1ChainID.ToBig(), l1PAOKey, client, delegateCallProxy, superchainProxyAdmin, oldSuperchainProxyAdminOwner) - - superchainProxyAdminOwner := getOwner(t, w3Client, superchainProxyAdmin) - t.Require().Equal(oldSuperchainProxyAdminOwner, superchainProxyAdminOwner, "superchain proxy admin owner is not the L1PAO") - - for _, l2Deployment := range o.wb.outL2Deployment { - l2Deployment.disputeGameFactoryProxy = sharedDGF - } - t.Log("Interop migration complete") - }, - } -} - func deployDelegateCallProxy(t devtest.CommonT, transactOpts *bind.TransactOpts, client *ethclient.Client, owner common.Address) (common.Address, *delegatecallproxy.Delegatecallproxy) { deployAddress, tx, proxyContract, err := delegatecallproxy.DeployDelegatecallproxy(transactOpts, client, owner) t.Require().NoError(err, "DelegateCallProxy deployment failed") @@ -285,16 +56,39 @@ func deployDelegateCallProxy(t devtest.CommonT, transactOpts *bind.TransactOpts, return deployAddress, proxyContract } -func getSuperRoot(t devtest.CommonT, o *Orchestrator, timestamp uint64, supervisorID stack.ComponentID) eth.Bytes32 { - supervisor, ok := o.GetSupervisor(supervisorID) - t.Require().True(ok, "must have supervisor") +func awaitSuperrootTime(t devtest.T, cls ...L2CLNode) uint64 { + t.Require().NotEmpty(cls, "at least one L2 CL is required") + + var superrootTime uint64 + for _, l2CL := range cls { + rollupClient, err := dial.DialRollupClientWithTimeout(t.Ctx(), t.Logger(), l2CL.UserRPC()) + t.Require().NoError(err) + defer rollupClient.Close() + ctx, cancel := context.WithTimeout(t.Ctx(), 2*time.Minute) + err = wait.For(ctx, time.Second, func() (bool, error) { + status, err := rollupClient.SyncStatus(ctx) + if err != nil { + return false, err + } + if status == nil || status.SafeL2.Number == 0 { + return false, nil + } + superrootTime = status.SafeL2.Time + return true, nil + }) + cancel() + t.Require().NoError(err, "waiting for chain safe head to advance failed") + } + return superrootTime +} + +func getSupervisorSuperRoot(t devtest.T, supervisor Supervisor, timestamp uint64) eth.Bytes32 { client, err := dial.DialSupervisorClientWithTimeout(t.Ctx(), t.Logger(), supervisor.UserRPC()) t.Require().NoError(err) - // wait for the super root to be ready - ctx, cancel := context.WithTimeout(t.Ctx(), time.Minute*2) - err = wait.For(ctx, time.Second*1, func() (bool, error) { + ctx, cancel := context.WithTimeout(t.Ctx(), 2*time.Minute) + err = wait.For(ctx, time.Second, func() (bool, error) { status, err := client.SyncStatus(ctx) if err != nil { return false, err @@ -309,15 +103,12 @@ func getSuperRoot(t devtest.CommonT, o *Orchestrator, timestamp uint64, supervis return super.SuperRoot } -func getSuperRootFromSupernode(t devtest.CommonT, o *Orchestrator, timestamp uint64, supernodeID stack.SupernodeID) eth.Bytes32 { - supernode, ok := o.supernodes.Get(supernodeID) - t.Require().True(ok, "must have supernode") - +func getSupernodeSuperRoot(t devtest.T, supernode *SuperNode, timestamp uint64) eth.Bytes32 { client, err := dial.DialSuperNodeClientWithTimeout(t.Ctx(), t.Logger(), supernode.UserRPC()) t.Require().NoError(err) - ctx, cancel := context.WithTimeout(t.Ctx(), time.Minute*2) - err = wait.For(ctx, time.Second*1, func() (bool, error) { + ctx, cancel := context.WithTimeout(t.Ctx(), 2*time.Minute) + err = wait.For(ctx, time.Second, func() (bool, error) { resp, err := client.SuperRootAtTimestamp(ctx, timestamp) if err != nil { t.Logf("DEBUG: Failed to get super root at timestamp %d: err: %v", timestamp, err) @@ -334,6 +125,179 @@ func getSuperRootFromSupernode(t devtest.CommonT, o *Orchestrator, timestamp uin return resp.Data.SuperRoot } +func migrateSuperRoots( + t devtest.T, + keys devkeys.Keys, + migration *interopMigrationState, + l1ChainID eth.ChainID, + l1EL L1ELNode, + superRoot eth.Bytes32, + superrootTime uint64, + primaryL2 eth.ChainID, +) common.Address { + require := t.Require() + require.NotNil(migration, "interop migration state is required") + require.NotEmpty(migration.opcmImpl, "must have an OPCM implementation") + require.NotEmpty(migration.superchainConfigAddr, "must have a superchain deployment") + require.NotEmpty(migration.l2Deployments, "must have L2 deployments for interop migration") + + rpcClient, err := rpc.DialContext(t.Ctx(), l1EL.UserRPC()) + require.NoError(err) + client := ethclient.NewClient(rpcClient) + w3Client := w3.NewClient(rpcClient) + + l1pao, err := keys.Address(devkeys.ChainOperatorKeys(l1ChainID.ToBig())(devkeys.L1ProxyAdminOwnerRole)) + require.NoError(err, "must have L1 proxy admin owner private key") + + superchainProxyAdmin := getProxyAdmin(t, w3Client, migration.superchainConfigAddr) + require.NotEmpty(superchainProxyAdmin, "superchain proxy admin address is empty") + + useV2 := isOPCMV2(t, w3Client, migration.opcmImpl) + absoluteCannonPrestate := getInteropCannonAbsolutePrestate(t) + absoluteCannonKonaPrestate := getInteropCannonKonaAbsolutePrestate(t) + + permissionedChainOps := devkeys.ChainOperatorKeys(primaryL2.ToBig()) + proposer, err := keys.Address(permissionedChainOps(devkeys.ProposerRole)) + require.NoError(err, "must have configured proposer") + challenger, err := keys.Address(permissionedChainOps(devkeys.ChallengerRole)) + require.NoError(err, "must have configured challenger") + + var opChainConfigs []bindings.OPContractsManagerOpChainConfig + var l2ChainIDs []eth.ChainID + for l2ChainID, l2Deployment := range migration.l2Deployments { + l2ChainIDs = append(l2ChainIDs, l2ChainID) + opChainConfigs = append(opChainConfigs, bindings.OPContractsManagerOpChainConfig{ + SystemConfigProxy: l2Deployment.SystemConfigProxyAddr(), + CannonPrestate: absoluteCannonPrestate, + CannonKonaPrestate: absoluteCannonKonaPrestate, + }) + } + + opcmABI, err := bindings.OPContractsManagerMetaData.GetAbi() + require.NoError(err, "invalid OPCM ABI") + contract := batching.NewBoundContract(opcmABI, migration.opcmImpl) + + var migrateCallData []byte + if useV2 { + var chainSystemConfigs []common.Address + for _, cfg := range opChainConfigs { + chainSystemConfigs = append(chainSystemConfigs, cfg.SystemConfigProxy) + } + migrateInputV2 := MigrateInputV2{ + ChainSystemConfigs: chainSystemConfigs, + DisputeGameConfigs: []DisputeGameConfigV2{ + { + Enabled: true, + InitBond: big.NewInt(0), + GameType: superCannonGameType, + GameArgs: absoluteCannonPrestate[:], + }, + }, + StartingAnchorRoot: bindings.Proposal{ + Root: common.Hash(superRoot), + L2SequenceNumber: big.NewInt(int64(superrootTime)), + }, + StartingRespectedGameType: superCannonGameType, + } + migrateCall := contract.Call("migrate", migrateInputV2) + migrateCallData, err = migrateCall.Pack() + require.NoError(err) + } else { + migrateInputV1 := bindings.OPContractsManagerInteropMigratorMigrateInput{ + UsePermissionlessGame: true, + StartingAnchorRoot: bindings.Proposal{ + Root: common.Hash(superRoot), + L2SequenceNumber: big.NewInt(int64(superrootTime)), + }, + GameParameters: bindings.OPContractsManagerInteropMigratorGameParameters{ + Proposer: proposer, + Challenger: challenger, + MaxGameDepth: big.NewInt(73), + SplitDepth: big.NewInt(30), + InitBond: big.NewInt(0), + ClockExtension: 10800, + MaxClockDuration: 302400, + }, + OpChainConfigs: opChainConfigs, + } + migrateCall := contract.Call("migrate", migrateInputV1) + migrateCallData, err = migrateCall.Pack() + require.NoError(err) + } + + l1PAOKey, err := keys.Secret(devkeys.ChainOperatorKeys(l1ChainID.ToBig())(devkeys.L1ProxyAdminOwnerRole)) + require.NoError(err, "must have configured L1 proxy admin owner") + transactOpts, err := bind.NewKeyedTransactorWithChainID(l1PAOKey, l1ChainID.ToBig()) + require.NoError(err, "must have transact opts") + transactOpts.Context = t.Ctx() + + t.Log("Deploying delegate call proxy contract") + delegateCallProxy, proxyContract := deployDelegateCallProxy(t, transactOpts, client, l1pao) + oldSuperchainProxyAdminOwner := getOwner(t, w3Client, superchainProxyAdmin) + transferOwnership(t, l1PAOKey, client, superchainProxyAdmin, delegateCallProxy) + + oldDisputeGameFactories := make(map[eth.ChainID]common.Address) + for i, opChainConfig := range opChainConfigs { + var portal common.Address + require.NoError(w3Client.Call(w3eth.CallFunc(opChainConfig.SystemConfigProxy, optimismPortalFn).Returns(&portal))) + portalProxyAdmin := getProxyAdmin(t, w3Client, portal) + transferOwnership(t, l1PAOKey, client, portalProxyAdmin, delegateCallProxy) + + dgf := getDisputeGameFactory(t, w3Client, portal) + transferOwnership(t, l1PAOKey, client, dgf, delegateCallProxy) + oldDisputeGameFactories[l2ChainIDs[i]] = dgf + } + + t.Log("Executing delegate call") + migrateTx, err := proxyContract.ExecuteDelegateCall(transactOpts, migration.opcmImpl, migrateCallData) + require.NoErrorf(err, "migrate delegatecall failed: %v", errutil.TryAddRevertReason(err)) + _, err = wait.ForReceiptOK(t.Ctx(), client, migrateTx.Hash()) + require.NoError(err) + + var sharedDGF common.Address + for _, l2Deployment := range migration.l2Deployments { + portal := getOptimismPortal(t, w3Client, l2Deployment.SystemConfigProxyAddr()) + addr := getDisputeGameFactory(t, w3Client, portal) + if sharedDGF == (common.Address{}) { + sharedDGF = addr + } else { + require.Equal(sharedDGF, addr, "dispute game factory address is not the same for all deployments") + } + } + require.NotEmpty(getSuperGameImpl(t, w3Client, sharedDGF)) + + resetOwnershipAfterMigration( + t, + keys, + l1ChainID.ToBig(), + l1PAOKey, + w3Client, + client, + delegateCallProxy, + opChainConfigs, + ) + resetOldDisputeGameFactoriesAfterMigration( + t, + keys, + l1ChainID.ToBig(), + l1PAOKey, + client, + delegateCallProxy, + oldDisputeGameFactories, + ) + transferOwnershipForDelegateCallProxy(t, l1ChainID.ToBig(), l1PAOKey, client, delegateCallProxy, superchainProxyAdmin, oldSuperchainProxyAdminOwner) + + superchainProxyAdminOwner := getOwner(t, w3Client, superchainProxyAdmin) + require.Equal(oldSuperchainProxyAdminOwner, superchainProxyAdminOwner, "superchain proxy admin owner is not the L1PAO") + + for chainID, l2Deployment := range migration.l2Deployments { + l2Deployment.disputeGameFactoryProxy = sharedDGF + migration.l2Deployments[chainID] = l2Deployment + } + t.Log("Interop migration complete") + return sharedDGF +} + func getInteropCannonAbsolutePrestate(t devtest.CommonT) common.Hash { return getAbsolutePrestate(t, "op-program/bin/prestate-proof-interop.json") } @@ -486,7 +450,7 @@ func transferOwnershipForDelegateCallProxy( func resetOwnershipAfterMigration( t devtest.CommonT, - o *Orchestrator, + keys devkeys.Keys, l1ChainID *big.Int, ownerPrivateKey *ecdsa.PrivateKey, w3Client *w3.Client, @@ -494,7 +458,7 @@ func resetOwnershipAfterMigration( delegateCallProxy common.Address, opChainConfigs []bindings.OPContractsManagerOpChainConfig, ) { - l1PAO, err := o.keys.Address(devkeys.ChainOperatorKeys(l1ChainID)(devkeys.L1ProxyAdminOwnerRole)) + l1PAO, err := keys.Address(devkeys.ChainOperatorKeys(l1ChainID)(devkeys.L1ProxyAdminOwnerRole)) t.Require().NoError(err, "must have L1 proxy admin owner private key") portal0 := getOptimismPortal(t, w3Client, opChainConfigs[0].SystemConfigProxy) @@ -523,13 +487,9 @@ func resetOwnershipAfterMigration( l1PAO, ) - // The migration temporarily transfers ownership of each portal ProxyAdmin to the DelegateCallProxy - // to satisfy the delegatecall requirement of the OPCM. Reset these back to the L1 proxy admin owner - // after the shared admin contracts are restored. for _, cfg := range opChainConfigs { portal := getOptimismPortal(t, w3Client, cfg.SystemConfigProxy) portalProxyAdmin := getProxyAdmin(t, w3Client, portal) - // In some setups the migration may already restore ownership. Only reset when still owned by the proxy. if getOwner(t, w3Client, portalProxyAdmin) == delegateCallProxy { transferOwnershipForDelegateCallProxy( t, @@ -543,7 +503,6 @@ func resetOwnershipAfterMigration( } } - // The Proxy Admin owner is changed. Assert that the admin of other proxies are consistent var sharedAnchorStateRegistryProxy common.Address err = w3Client.Call(w3eth.CallFunc(portal0, anchorStateRegistryFn).Returns(&sharedAnchorStateRegistryProxy)) t.Require().NoError(err) @@ -562,9 +521,9 @@ func resetOwnershipAfterMigration( } } -func resetOldDisputeGameFactories( +func resetOldDisputeGameFactoriesAfterMigration( t devtest.CommonT, - o *Orchestrator, + keys devkeys.Keys, l1ChainID *big.Int, ownerPrivateKey *ecdsa.PrivateKey, client *ethclient.Client, @@ -573,9 +532,8 @@ func resetOldDisputeGameFactories( ) { for l2ChainID, oldDGF := range oldDisputeGameFactories { chainOpsForL2 := devkeys.ChainOperatorKeys(l2ChainID.ToBig()) - l1PAOForL2, err := o.keys.Address(chainOpsForL2(devkeys.L1ProxyAdminOwnerRole)) + l1PAOForL2, err := keys.Address(chainOpsForL2(devkeys.L1ProxyAdminOwnerRole)) t.Require().NoError(err, "must have configured L1 proxy admin owner private key") - // Not required since the old DGFs are not used; but done to prevent surprises later transferOwnershipForDelegateCallProxy( t, l1ChainID, diff --git a/op-devstack/sysgo/supervisor.go b/op-devstack/sysgo/supervisor.go index 4a2ab1a00abc3..62f5cc9bca682 100644 --- a/op-devstack/sysgo/supervisor.go +++ b/op-devstack/sysgo/supervisor.go @@ -1,47 +1,8 @@ package sysgo -import ( - "os" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/dial" - "github.com/ethereum-optimism/optimism/op-service/retry" -) +import "github.com/ethereum-optimism/optimism/op-devstack/stack" type Supervisor interface { - hydrate(system stack.ExtensibleSystem) stack.Lifecycle UserRPC() string } - -func WithSupervisor(supervisorID stack.ComponentID, clusterID stack.ComponentID, l1ELID stack.ComponentID) stack.Option[*Orchestrator] { - switch os.Getenv("DEVSTACK_SUPERVISOR_KIND") { - case "kona": - panic("kona-supervisor has been removed") - default: - return WithOPSupervisor(supervisorID, clusterID, l1ELID) - } -} - -func WithManagedBySupervisor(l2CLID stack.ComponentID, supervisorID stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - require := orch.P().Require() - - l2CL, ok := orch.GetL2CL(l2CLID) - require.True(ok, "looking for L2 CL node to connect to supervisor") - interopEndpoint, secret := l2CL.InteropRPC() - - s, ok := orch.GetSupervisor(supervisorID) - require.True(ok, "looking for supervisor") - - ctx := orch.P().Ctx() - supClient, err := dial.DialSupervisorClientWithTimeout(ctx, orch.P().Logger(), s.UserRPC(), client.WithLazyDial()) - orch.P().Require().NoError(err) - - err = retry.Do0(ctx, 10, retry.Exponential(), func() error { - return supClient.AddL2RPC(ctx, interopEndpoint, secret) - }) - require.NoError(err, "must connect CL node %s to supervisor %s", l2CLID, supervisorID) - }) -} diff --git a/op-devstack/sysgo/supervisor_kona.go b/op-devstack/sysgo/supervisor_kona.go new file mode 100644 index 0000000000000..463ce4169032d --- /dev/null +++ b/op-devstack/sysgo/supervisor_kona.go @@ -0,0 +1,99 @@ +package sysgo + +import ( + "sync" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/stack" + "github.com/ethereum-optimism/optimism/op-service/logpipe" + "github.com/ethereum-optimism/optimism/op-service/tasks" + "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" + "github.com/ethereum/go-ethereum/log" +) + +type KonaSupervisor struct { + mu sync.Mutex + + name string + userRPC string + + userProxy *tcpproxy.Proxy + + execPath string + args []string + // Each entry is of the form "key=value". + env []string + + p devtest.CommonT + + sub *SubProcess +} + +var _ stack.Lifecycle = (*KonaSupervisor)(nil) + +func (s *KonaSupervisor) UserRPC() string { + return s.userRPC +} + +func (s *KonaSupervisor) Start() { + s.mu.Lock() + defer s.mu.Unlock() + if s.sub != nil { + s.p.Logger().Warn("Kona-supervisor already started") + return + } + + // Create a proxy for the user RPC, + // so other services can connect, and stay connected, across restarts. + if s.userProxy == nil { + s.userProxy = tcpproxy.New(s.p.Logger()) + s.p.Require().NoError(s.userProxy.Start()) + s.p.Cleanup(func() { + s.userProxy.Close() + }) + s.userRPC = "http://" + s.userProxy.Addr() + } + + // Create the sub-process. + // We pipe sub-process logs to the test-logger. + // And inspect them along the way, to get the RPC server address. + logOut := logpipe.ToLoggerWithMinLevel(s.p.Logger().New("src", "stdout"), log.LevelWarn) + logErr := logpipe.ToLoggerWithMinLevel(s.p.Logger().New("src", "stderr"), log.LevelWarn) + userRPC := make(chan string, 1) + onLogEntry := func(e logpipe.LogEntry) { + switch e.LogMessage() { + case "RPC server bound to address": + userRPC <- "http://" + e.FieldValue("addr").(string) + } + } + stdOutLogs := logpipe.LogCallback(func(line []byte) { + e := logpipe.ParseRustStructuredLogs(line) + logOut(e) + onLogEntry(e) + }) + stdErrLogs := logpipe.LogCallback(func(line []byte) { + e := logpipe.ParseRustStructuredLogs(line) + logErr(e) + }) + + s.sub = NewSubProcess(s.p, stdOutLogs, stdErrLogs) + err := s.sub.Start(s.execPath, s.args, s.env) + s.p.Require().NoError(err, "Must start") + + var userRPCAddr string + s.p.Require().NoError(tasks.Await(s.p.Ctx(), userRPC, &userRPCAddr), "need user RPC") + + s.userProxy.SetUpstream(ProxyAddr(s.p.Require(), userRPCAddr)) +} + +func (s *KonaSupervisor) Stop() { + s.mu.Lock() + defer s.mu.Unlock() + if s.sub == nil { + s.p.Logger().Warn("kona-supervisor already stopped") + return + } + err := s.sub.Stop(true) + s.p.Require().NoError(err, "Must stop") + s.sub = nil +} diff --git a/op-devstack/sysgo/supervisor_op.go b/op-devstack/sysgo/supervisor_op.go index e7ffe9c0eaaa7..dffdd8216b7a8 100644 --- a/op-devstack/sysgo/supervisor_op.go +++ b/op-devstack/sysgo/supervisor_op.go @@ -4,31 +4,24 @@ import ( "context" "sync" + "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/testutils/tcpproxy" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum/go-ethereum/log" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/client" - oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/metrics" - "github.com/ethereum-optimism/optimism/op-service/oppprof" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" supervisorConfig "github.com/ethereum-optimism/optimism/op-supervisor/config" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/syncnode" ) type OpSupervisor struct { mu sync.Mutex - id stack.ComponentID + name string userRPC string cfg *supervisorConfig.Config - p devtest.P + p devtest.CommonT logger log.Logger service *supervisor.SupervisorService @@ -38,19 +31,6 @@ type OpSupervisor struct { var _ stack.Lifecycle = (*OpSupervisor)(nil) -func (s *OpSupervisor) hydrate(sys stack.ExtensibleSystem) { - tlog := sys.Logger().New("id", s.id) - supClient, err := client.NewRPC(sys.T().Ctx(), tlog, s.userRPC, client.WithLazyDial()) - sys.T().Require().NoError(err) - sys.T().Cleanup(supClient.Close) - - sys.AddSupervisor(shim.NewSupervisor(shim.SupervisorConfig{ - CommonConfig: shim.NewCommonConfig(sys.T()), - ID: s.id, - Client: supClient, - })) -} - func (s *OpSupervisor) UserRPC() string { return s.userRPC } @@ -98,61 +78,3 @@ func (s *OpSupervisor) Stop() { s.service = nil } - -func WithOPSupervisor(supervisorID stack.ComponentID, clusterID stack.ComponentID, l1ELID stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), supervisorID)) - require := p.Require() - - l1EL, ok := orch.GetL1EL(l1ELID) - require.True(ok, "need L1 EL node to connect supervisor to") - - cluster, ok := orch.GetCluster(clusterID) - require.True(ok, "need cluster to determine dependency set") - - require.NotNil(cluster.cfgset, "need a full config set") - require.NoError(cluster.cfgset.CheckChains(), "config set must be valid") - cfg := &supervisorConfig.Config{ - MetricsConfig: metrics.CLIConfig{ - Enabled: false, - }, - PprofConfig: oppprof.CLIConfig{ - ListenEnabled: false, - }, - LogConfig: oplog.CLIConfig{ // ignored, logger overrides this - Level: log.LevelDebug, - Format: oplog.FormatText, - }, - RPC: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - // When supervisor starts, store its RPC port here - // given by the os, to reclaim when restart. - ListenPort: 0, - EnableAdmin: true, - }, - SyncSources: &syncnode.CLISyncNodes{}, // no sync-sources - L1RPC: l1EL.UserRPC(), - // Note: datadir is created here, - // persistent across stop/start, for the duration of the package execution. - Datadir: orch.p.TempDir(), - Version: "dev", - FullConfigSetSource: cluster.cfgset, - MockRun: false, - SynchronousProcessors: false, - DatadirSyncEndpoint: "", - } - - plog := p.Logger() - supervisorNode := &OpSupervisor{ - id: supervisorID, - userRPC: "", // set on start - cfg: cfg, - p: p, - logger: plog, - service: nil, // set on start - } - orch.registry.Register(supervisorID, supervisorNode) - supervisorNode.Start() - orch.p.Cleanup(supervisorNode.Stop) - }) -} diff --git a/op-devstack/sysgo/sync_tester.go b/op-devstack/sysgo/sync_tester.go index 185f3618bb2b6..b34c6c898827a 100644 --- a/op-devstack/sysgo/sync_tester.go +++ b/op-devstack/sysgo/sync_tester.go @@ -1,131 +1,38 @@ package sysgo import ( - "context" - "fmt" - - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/endpoint" "github.com/ethereum-optimism/optimism/op-service/eth" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" - "github.com/ethereum-optimism/optimism/op-sync-tester/config" - "github.com/ethereum-optimism/optimism/op-sync-tester/synctester" - - stconf "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/config" - sttypes "github.com/ethereum-optimism/optimism/op-sync-tester/synctester/backend/types" ) // Caveat: id is binded by a single EL(chainID), but service can support multiple ELs type SyncTesterService struct { - id stack.ComponentID service *synctester.Service } -func (n *SyncTesterService) hydrate(system stack.ExtensibleSystem) { - require := system.T().Require() - - for syncTesterID, chainID := range n.service.SyncTesters() { - syncTesterRPC := n.service.SyncTesterRPC(chainID, false) - rpcCl, err := client.NewRPC(system.T().Ctx(), system.Logger(), syncTesterRPC, client.WithLazyDial()) - require.NoError(err) - system.T().Cleanup(rpcCl.Close) - id := stack.NewSyncTesterID(syncTesterID.String(), chainID) - front := shim.NewSyncTester(shim.SyncTesterConfig{ - CommonConfig: shim.NewCommonConfig(system.T()), - ID: id, - Addr: syncTesterRPC, - Client: rpcCl, - }) - net := system.Network(chainID).(stack.ExtensibleNetwork) - net.AddSyncTester(front) +func (n *SyncTesterService) DefaultEndpoint(chainID eth.ChainID) (string, string, bool) { + if n == nil || n.service == nil { + return "", "", false } -} - -func WithSyncTester(syncTesterID stack.ComponentID, l2ELs []stack.ComponentID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), syncTesterID)) - - require := p.Require() - - require.Nil(orch.syncTester, "can only support a single sync-tester-service in sysgo") - - syncTesters := make(map[sttypes.SyncTesterID]*stconf.SyncTesterEntry) - - for _, elID := range l2ELs { - id := sttypes.SyncTesterID(fmt.Sprintf("dev-sync-tester-%s", elID.ChainID())) - require.NotContains(syncTesters, id, "one sync tester per chain only") - - el, ok := orch.GetL2EL(elID) - require.True(ok, "need L2 EL for sync tester", elID) - - syncTesters[id] = &stconf.SyncTesterEntry{ - ELRPC: endpoint.MustRPC{Value: endpoint.URL(el.UserRPC())}, - ChainID: elID.ChainID(), - } + for syncTesterID, mappedChainID := range n.service.SyncTesters() { + if mappedChainID != chainID { + continue } - - cfg := &config.Config{ - RPC: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - }, - SyncTesters: &stconf.Config{ - SyncTesters: syncTesters, - }, - } - logger := p.Logger() - srv, err := synctester.FromConfig(p.Ctx(), cfg, logger) - require.NoError(err, "must setup sync tester service") - require.NoError(srv.Start(p.Ctx())) - p.Cleanup(func() { - ctx, cancel := context.WithCancel(context.Background()) - cancel() // force-quit - logger.Info("Closing sync tester") - _ = srv.Stop(ctx) - logger.Info("Closed sync tester") - }) - orch.syncTester = &SyncTesterService{id: syncTesterID, service: srv} - }) + return syncTesterID.String(), n.service.SyncTesterRPC(chainID, false), true + } + return "", "", false } -func WithSyncTesterWithExternalEndpoint(syncTesterID stack.ComponentID, endpointRPC string, chainID eth.ChainID) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), syncTesterID)) - - require := p.Require() - - require.Nil(orch.syncTester, "can only support a single sync-tester-service in sysgo") - - syncTesters := make(map[sttypes.SyncTesterID]*stconf.SyncTesterEntry) - - // Create a sync tester entry with the external endpoint - id := sttypes.SyncTesterID(fmt.Sprintf("dev-sync-tester-%s", chainID)) - syncTesters[id] = &stconf.SyncTesterEntry{ - ELRPC: endpoint.MustRPC{Value: endpoint.URL(endpointRPC)}, - ChainID: chainID, - } +func (n *SyncTesterService) RPC() string { + if n == nil || n.service == nil { + return "" + } + return n.service.RPC() +} - cfg := &config.Config{ - RPC: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - }, - SyncTesters: &stconf.Config{ - SyncTesters: syncTesters, - }, - } - logger := p.Logger() - srv, err := synctester.FromConfig(p.Ctx(), cfg, logger) - require.NoError(err, "must setup sync tester service") - require.NoError(srv.Start(p.Ctx())) - p.Cleanup(func() { - ctx, cancel := context.WithCancel(context.Background()) - cancel() // force-quit - logger.Info("Closing sync tester") - _ = srv.Stop(ctx) - logger.Info("Closed sync tester") - }) - orch.syncTester = &SyncTesterService{id: syncTesterID, service: srv} - }) +func (n *SyncTesterService) SyncTesterRPCPath(chainID eth.ChainID, withSessionID bool) string { + if n == nil || n.service == nil { + return "" + } + return n.service.SyncTesterRPCPath(chainID, withSessionID) } diff --git a/op-devstack/sysgo/system.go b/op-devstack/sysgo/system.go deleted file mode 100644 index 71cc861e60b68..0000000000000 --- a/op-devstack/sysgo/system.go +++ /dev/null @@ -1,934 +0,0 @@ -package sysgo - -import ( - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -var ( - DefaultL1ID = eth.ChainIDFromUInt64(900) - DefaultL2AID = eth.ChainIDFromUInt64(901) - DefaultL2BID = eth.ChainIDFromUInt64(902) -) - -type DefaultMinimalSystemIDs struct { - L1 stack.ComponentID - L1EL stack.ComponentID - L1CL stack.ComponentID - - L2 stack.ComponentID - L2CL stack.ComponentID - L2EL stack.ComponentID - - L2Batcher stack.ComponentID - L2Proposer stack.ComponentID - L2Challenger stack.ComponentID - - TestSequencer stack.ComponentID -} - -func NewDefaultMinimalSystemIDs(l1ID, l2ID eth.ChainID) DefaultMinimalSystemIDs { - ids := DefaultMinimalSystemIDs{ - L1: stack.NewL1NetworkID(l1ID), - L1EL: stack.NewL1ELNodeID("l1", l1ID), - L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.NewL2NetworkID(l2ID), - L2CL: stack.NewL2CLNodeID("sequencer", l2ID), - L2EL: stack.NewL2ELNodeID("sequencer", l2ID), - L2Batcher: stack.NewL2BatcherID("main", l2ID), - L2Proposer: stack.NewL2ProposerID("main", l2ID), - L2Challenger: stack.NewL2ChallengerID("main", l2ID), - TestSequencer: stack.NewTestSequencerID("test-sequencer"), - } - return ids -} - -func DefaultMinimalSystem(dest *DefaultMinimalSystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultMinimalSystemIDs(DefaultL1ID, DefaultL2AID) - return defaultMinimalSystemOpts(&ids, dest) -} - -func defaultMinimalSystemOpts(ids *DefaultMinimalSystemIDs, dest *DefaultMinimalSystemIDs) stack.CombinedOption[*Orchestrator] { - opt := stack.Combine[*Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2EL)) - opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, L2CLSequencer())) - - opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) - opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) - - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ - ids.L2EL, - })) - - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = *ids - })) - - return opt -} - -// DefaultTwoL2System defines a minimal system with a single L1 and two L2 chains, -// without interop or supervisor: both L2s get their own ELs, and we attach L2CL nodes -// via the default L2CL selector (which can be set to supernode to share a single process). -type DefaultTwoL2SystemIDs struct { - L1 stack.ComponentID - L1EL stack.ComponentID - L1CL stack.ComponentID - - L2A stack.ComponentID - L2ACL stack.ComponentID - L2AEL stack.ComponentID - - L2B stack.ComponentID - L2BCL stack.ComponentID - L2BEL stack.ComponentID - - Supernode stack.SupernodeID - TestSequencer stack.ComponentID - L2ABatcher stack.ComponentID - L2AProposer stack.ComponentID - L2BBatcher stack.ComponentID - L2BProposer stack.ComponentID -} - -func NewDefaultTwoL2SystemIDs(l1ID, l2AID, l2BID eth.ChainID) DefaultTwoL2SystemIDs { - return DefaultTwoL2SystemIDs{ - L1: stack.NewL1NetworkID(l1ID), - L1EL: stack.NewL1ELNodeID("l1", l1ID), - L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2A: stack.NewL2NetworkID(l2AID), - L2ACL: stack.NewL2CLNodeID("sequencer", l2AID), - L2AEL: stack.NewL2ELNodeID("sequencer", l2AID), - L2B: stack.NewL2NetworkID(l2BID), - L2BCL: stack.NewL2CLNodeID("sequencer", l2BID), - L2BEL: stack.NewL2ELNodeID("sequencer", l2BID), - Supernode: stack.NewSupernodeID("supernode-two-l2-system", l2AID, l2BID), - TestSequencer: stack.NewTestSequencerID("test-sequencer-2l2"), - L2ABatcher: stack.NewL2BatcherID("main", l2AID), - L2AProposer: stack.NewL2ProposerID("main", l2AID), - L2BBatcher: stack.NewL2BatcherID("main", l2BID), - L2BProposer: stack.NewL2ProposerID("main", l2BID), - } -} - -func DefaultTwoL2System(dest *DefaultTwoL2SystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultTwoL2SystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) - opt := stack.Combine[*Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2B.ChainID()), - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2AEL)) - opt.Add(WithL2CLNode(ids.L2ACL, ids.L1CL, ids.L1EL, ids.L2AEL, L2CLSequencer())) - - opt.Add(WithL2ELNode(ids.L2BEL)) - opt.Add(WithL2CLNode(ids.L2BCL, ids.L1CL, ids.L1EL, ids.L2BEL, L2CLSequencer())) - - opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) - opt.Add(WithProposer(ids.L2AProposer, ids.L1EL, &ids.L2ACL, nil)) - - opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) - opt.Add(WithProposer(ids.L2BProposer, ids.L1EL, &ids.L2BCL, nil)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) - - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -// DefaultSupernodeTwoL2System runs two L2 chains that share a single supernode instance for their CL, -// wiring thin L2CL wrappers that route via the supernode RPC router. -func DefaultSupernodeTwoL2System(dest *DefaultTwoL2SystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultTwoL2SystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) - opt := stack.Combine[*Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up (supernode)") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2B.ChainID()), - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2AEL)) - opt.Add(WithL2ELNode(ids.L2BEL)) - - // Shared supernode for both L2 chains - opt.Add(WithSharedSupernodeCLs(ids.Supernode, []L2CLs{{CLID: ids.L2ACL, ELID: ids.L2AEL}, {CLID: ids.L2BCL, ELID: ids.L2BEL}}, ids.L1CL, ids.L1EL)) - - opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) - opt.Add(WithProposer(ids.L2AProposer, ids.L1EL, &ids.L2ACL, nil)) - - opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) - opt.Add(WithProposer(ids.L2BProposer, ids.L1EL, &ids.L2BCL, nil)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -// DefaultSupernodeInteropTwoL2System runs two L2 chains with a shared supernode that has -// interop verification enabled. Use delaySeconds=0 for interop at genesis, or a positive value -// to test the transition from normal safety to interop-verified safety. -func DefaultSupernodeInteropTwoL2System(dest *DefaultTwoL2SystemIDs, delaySeconds uint64) stack.Option[*Orchestrator] { - ids := NewDefaultTwoL2SystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) - opt := stack.Combine[*Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - if delaySeconds == 0 { - o.P().Logger().Info("Setting up (supernode with interop)") - } else { - o.P().Logger().Info("Setting up (supernode with delayed interop)", "delay_seconds", delaySeconds) - } - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2B.ChainID()), - WithInteropAtGenesis(), // Enable interop contracts - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2AEL)) - opt.Add(WithL2ELNode(ids.L2BEL)) - - // Shared supernode for both L2 chains with interop enabled - cls := []L2CLs{{CLID: ids.L2ACL, ELID: ids.L2AEL}, {CLID: ids.L2BCL, ELID: ids.L2BEL}} - if delaySeconds == 0 { - opt.Add(WithSharedSupernodeCLsInterop(ids.Supernode, cls, ids.L1CL, ids.L1EL)) - } else { - opt.Add(WithSharedSupernodeCLsInteropDelayed(ids.Supernode, cls, ids.L1CL, ids.L1EL, delaySeconds)) - } - - opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) - opt.Add(WithProposer(ids.L2AProposer, ids.L1EL, &ids.L2ACL, nil)) - - opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) - opt.Add(WithProposer(ids.L2BProposer, ids.L1EL, &ids.L2BCL, nil)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) - - // Test sequencer for deterministic block building on both L2 chains - opt.Add(WithTestSequencer2L2(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L2BCL, ids.L1EL, ids.L2AEL, ids.L2BEL)) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -type DefaultMinimalSystemWithSyncTesterIDs struct { - DefaultMinimalSystemIDs - - SyncTester stack.ComponentID -} - -func NewDefaultMinimalSystemWithSyncTesterIDs(l1ID, l2ID eth.ChainID) DefaultMinimalSystemWithSyncTesterIDs { - minimal := NewDefaultMinimalSystemIDs(l1ID, l2ID) - return DefaultMinimalSystemWithSyncTesterIDs{ - DefaultMinimalSystemIDs: minimal, - SyncTester: stack.NewSyncTesterID("sync-tester", l2ID), - } -} - -func DefaultMinimalSystemWithSyncTester(dest *DefaultMinimalSystemWithSyncTesterIDs, fcu eth.FCUState) stack.Option[*Orchestrator] { - l1ID := eth.ChainIDFromUInt64(900) - l2ID := eth.ChainIDFromUInt64(901) - ids := NewDefaultMinimalSystemWithSyncTesterIDs(l1ID, l2ID) - - opt := stack.Combine[*Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2EL)) - opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, L2CLSequencer())) - - opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) - opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) - - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ - ids.L2EL, - })) - - opt.Add(WithSyncTester(ids.SyncTester, []stack.ComponentID{ids.L2EL})) - - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -type DefaultSingleChainInteropSystemIDs struct { - L1 stack.ComponentID - L1EL stack.ComponentID - L1CL stack.ComponentID - - Superchain stack.ComponentID - Cluster stack.ComponentID - - Supervisor stack.ComponentID - TestSequencer stack.ComponentID - - L2A stack.ComponentID - L2ACL stack.ComponentID - L2AEL stack.ComponentID - - L2ABatcher stack.ComponentID - L2AProposer stack.ComponentID - L2ChallengerA stack.ComponentID -} - -func NewDefaultSingleChainInteropSystemIDs(l1ID, l2AID eth.ChainID) DefaultSingleChainInteropSystemIDs { - ids := DefaultSingleChainInteropSystemIDs{ - L1: stack.NewL1NetworkID(l1ID), - L1EL: stack.NewL1ELNodeID("l1", l1ID), - L1CL: stack.NewL1CLNodeID("l1", l1ID), - Superchain: stack.NewSuperchainID("main"), // TODO(#15244): hardcoded to match the deployer default ID - Cluster: stack.NewClusterID("main"), - Supervisor: stack.NewSupervisorID("1-primary"), // prefix with number for ordering of supervisors - TestSequencer: stack.NewTestSequencerID("dev"), - L2A: stack.NewL2NetworkID(l2AID), - L2ACL: stack.NewL2CLNodeID("sequencer", l2AID), - L2AEL: stack.NewL2ELNodeID("sequencer", l2AID), - L2ABatcher: stack.NewL2BatcherID("main", l2AID), - L2AProposer: stack.NewL2ProposerID("main", l2AID), - L2ChallengerA: stack.NewL2ChallengerID("main", l2AID), - } - return ids -} - -func DefaultSingleChainInteropSystem(dest *DefaultSingleChainInteropSystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultSingleChainInteropSystemIDs(DefaultL1ID, DefaultL2AID) - opt := stack.Combine[*Orchestrator]() - opt.Add(baseInteropSystem(&ids)) - - opt.Add(WithL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, &ids.L2ACL, []stack.ComponentID{ - ids.L2AEL, - })) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL})) - - // Upon evaluation of the option, export the contents we created. - // Ids here are static, but other things may be exported too. - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -// DefaultMinimalInteropSystem creates a minimal system with interop contracts but no supervisor. -// This tests interop contract deployment with local finality (SupervisorEnabled=false in op-node). -func DefaultMinimalInteropSystem(dest *DefaultMinimalSystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultMinimalSystemIDs(DefaultL1ID, DefaultL2AID) - opt := stack.Combine[*Orchestrator]() - - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up minimal interop (no supervisor)") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), - WithInteropAtGenesis(), - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - // No supervisor - interop with local finality only - opt.Add(WithL2ELNode(ids.L2EL)) - opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, L2CLSequencer())) - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) - opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) - - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -// baseInteropSystem defines a system that supports interop with a single chain -// Components which are shared across multiple chains are not started, allowing them to be added later including -// any additional chains that have been added. -func baseInteropSystem(ids *DefaultSingleChainInteropSystemIDs) stack.Option[*Orchestrator] { - opt := stack.Combine[*Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), - WithInteropAtGenesis(), // this can be overridden by later options - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithSupervisor(ids.Supervisor, ids.Cluster, ids.L1EL)) - - opt.Add(WithL2ELNode(ids.L2AEL, L2ELWithSupervisor(ids.Supervisor))) - opt.Add(WithL2CLNode(ids.L2ACL, ids.L1CL, ids.L1EL, ids.L2AEL, L2CLSequencer(), L2CLIndexing())) - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L1EL, ids.L2AEL)) - opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) - - opt.Add(WithManagedBySupervisor(ids.L2ACL, ids.Supervisor)) - - // Note: we provide L2 CL nodes still, even though they are not used post-interop. - // Since we may create an interop infra-setup, before interop is even scheduled to run. - opt.Add(WithProposer(ids.L2AProposer, ids.L1EL, &ids.L2ACL, &ids.Supervisor)) - - opt.Add(WithL2MetricsDashboard()) - - return opt -} - -// struct of the services, so we can access them later and do not have to guess their IDs. -type DefaultInteropSystemIDs struct { - DefaultSingleChainInteropSystemIDs - - L2B stack.ComponentID - L2BCL stack.ComponentID - L2BEL stack.ComponentID - - L2BBatcher stack.ComponentID - L2BProposer stack.ComponentID - L2ChallengerB stack.ComponentID -} - -func NewDefaultInteropSystemIDs(l1ID, l2AID, l2BID eth.ChainID) DefaultInteropSystemIDs { - ids := DefaultInteropSystemIDs{ - DefaultSingleChainInteropSystemIDs: NewDefaultSingleChainInteropSystemIDs(l1ID, l2AID), - L2B: stack.NewL2NetworkID(l2BID), - L2BCL: stack.NewL2CLNodeID("sequencer", l2BID), - L2BEL: stack.NewL2ELNodeID("sequencer", l2BID), - L2BBatcher: stack.NewL2BatcherID("main", l2BID), - L2BProposer: stack.NewL2ProposerID("main", l2BID), - L2ChallengerB: stack.NewL2ChallengerID("main", l2BID), - } - return ids -} - -func DefaultInteropSystem(dest *DefaultInteropSystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultInteropSystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) - opt := stack.Combine[*Orchestrator]() - - // start with single chain interop system - opt.Add(baseInteropSystem(&ids.DefaultSingleChainInteropSystemIDs)) - - opt.Add(WithDeployerOptions( - WithPrefundedL2(ids.L1.ChainID(), ids.L2B.ChainID()), - WithInteropAtGenesis(), // this can be overridden by later options - )) - opt.Add(WithL2ELNode(ids.L2BEL, L2ELWithSupervisor(ids.Supervisor))) - opt.Add(WithL2CLNode(ids.L2BCL, ids.L1CL, ids.L1EL, ids.L2BEL, L2CLSequencer(), L2CLIndexing())) - opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) - - opt.Add(WithManagedBySupervisor(ids.L2BCL, ids.Supervisor)) - - // Note: we provide L2 CL nodes still, even though they are not used post-interop. - // Since we may create an interop infra-setup, before interop is even scheduled to run. - opt.Add(WithProposer(ids.L2BProposer, ids.L1EL, &ids.L2BCL, &ids.Supervisor)) - - // Deploy separate challengers for each chain. Can be reduced to a single challenger when the DisputeGameFactory - // is actually shared. - opt.Add(WithL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, &ids.L2ACL, []stack.ComponentID{ - ids.L2AEL, ids.L2BEL, - })) - opt.Add(WithL2Challenger(ids.L2ChallengerB, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, &ids.L2BCL, []stack.ComponentID{ - ids.L2BEL, ids.L2AEL, - })) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) - - opt.Add(WithL2MetricsDashboard()) - - // Upon evaluation of the option, export the contents we created. - // Ids here are static, but other things may be exported too. - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -func DefaultIsthmusSuperProofsSystem(dest *DefaultInteropSystemIDs) stack.Option[*Orchestrator] { - return defaultSuperProofsSystem(dest) -} - -func DefaultInteropProofsSystem(dest *DefaultInteropSystemIDs) stack.Option[*Orchestrator] { - return defaultSuperProofsSystem(dest, WithInteropAtGenesis()) -} - -type DefaultSupernodeInteropProofsSystemIDs struct { - DefaultInteropSystemIDs - Supernode stack.SupernodeID -} - -func NewDefaultSupernodeInteropProofsSystemIDs(l1ID, l2AID, l2BID eth.ChainID) DefaultSupernodeInteropProofsSystemIDs { - return DefaultSupernodeInteropProofsSystemIDs{ - DefaultInteropSystemIDs: NewDefaultInteropSystemIDs(l1ID, l2AID, l2BID), - Supernode: stack.NewSupernodeID("supernode-two-system-proofs", l2AID, l2BID), - } -} - -func DefaultSupernodeIsthmusSuperProofsSystem(dest *DefaultSupernodeInteropProofsSystemIDs) stack.Option[*Orchestrator] { - return defaultSupernodeSuperProofsSystem(dest, nil) -} - -// DefaultSupernodeInteropProofsSystem creates a super-roots proofs system that sources super-roots via op-supernode -// (instead of op-supervisor). Interop is enabled at genesis. -func DefaultSupernodeInteropProofsSystem(dest *DefaultSupernodeInteropProofsSystemIDs) stack.Option[*Orchestrator] { - return defaultSupernodeSuperProofsSystem(dest, - []SupernodeOption{WithSupernodeInteropAtGenesis()}, - WithInteropAtGenesis()) -} - -func defaultSupernodeSuperProofsSystem(dest *DefaultSupernodeInteropProofsSystemIDs, snOpts []SupernodeOption, deployerOpts ...DeployerOption) stack.CombinedOption[*Orchestrator] { - ids := NewDefaultSupernodeInteropProofsSystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) - opt := stack.Combine[*Orchestrator]() - - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up (supernode)") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), WithDeployerOptions( - append([]DeployerOption{ - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2B.ChainID()), - WithDevFeatureEnabled(deployer.OptimismPortalInteropDevFlag), - }, deployerOpts...)..., - )) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2AEL)) - opt.Add(WithL2ELNode(ids.L2BEL)) - - // Shared supernode for both L2 chains (registers per-chain L2CL proxies) - opt.Add(WithSharedSupernodeCLs(ids.Supernode, - []L2CLs{{CLID: ids.L2ACL, ELID: ids.L2AEL}, {CLID: ids.L2BCL, ELID: ids.L2BEL}}, - ids.L1CL, ids.L1EL, snOpts...)) - - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L1EL, ids.L2AEL)) - - opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) - opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) - - // Run super roots migration using supernode as super root source - opt.Add(WithSuperRootsFromSupernode(ids.L1.ChainID(), ids.L1EL, []stack.ComponentID{ids.L2ACL, ids.L2BCL}, ids.Supernode, ids.L2A.ChainID())) - - // Start challenger after migration; use supernode RPCs as super-roots source. - opt.Add(WithSupernodeL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supernode, &ids.Cluster, []stack.ComponentID{ - ids.L2BEL, ids.L2AEL, - })) - - // Start proposer after migration; use supernode RPCs as proposal source. - opt.Add(WithSupernodeProposer(ids.L2AProposer, ids.L1EL, &ids.Supernode)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) - - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -// DefaultSingleChainSupernodeProofsSystemIDs holds IDs for a single-chain supernode proof system. -type DefaultSingleChainSupernodeProofsSystemIDs struct { - DefaultSingleChainInteropSystemIDs - Supernode stack.SupernodeID -} - -func NewDefaultSingleChainSupernodeProofsSystemIDs(l1ID, l2AID eth.ChainID) DefaultSingleChainSupernodeProofsSystemIDs { - return DefaultSingleChainSupernodeProofsSystemIDs{ - DefaultSingleChainInteropSystemIDs: NewDefaultSingleChainInteropSystemIDs(l1ID, l2AID), - Supernode: stack.NewSupernodeID("supernode-single-system-proofs", l2AID), - } -} - -// DefaultSingleChainSupernodeIsthmusSuperProofsSystem creates a single-chain super-roots proofs -// system using op-supernode without interop at genesis (preinterop). -func DefaultSingleChainSupernodeIsthmusSuperProofsSystem(dest *DefaultSingleChainSupernodeProofsSystemIDs) stack.Option[*Orchestrator] { - return defaultSingleChainSupernodeSuperProofsSystem(dest, nil) -} - -// DefaultSingleChainSupernodeInteropProofsSystem creates a single-chain super-roots proofs -// system using op-supernode with interop enabled at genesis. -func DefaultSingleChainSupernodeInteropProofsSystem(dest *DefaultSingleChainSupernodeProofsSystemIDs) stack.Option[*Orchestrator] { - return defaultSingleChainSupernodeSuperProofsSystem(dest, - []SupernodeOption{WithSupernodeInteropAtGenesis()}, - WithInteropAtGenesis()) -} - -func defaultSingleChainSupernodeSuperProofsSystem(dest *DefaultSingleChainSupernodeProofsSystemIDs, snOpts []SupernodeOption, deployerOpts ...DeployerOption) stack.CombinedOption[*Orchestrator] { - ids := NewDefaultSingleChainSupernodeProofsSystemIDs(DefaultL1ID, DefaultL2AID) - opt := stack.Combine[*Orchestrator]() - - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up single-chain (supernode)") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), WithDeployerOptions( - append([]DeployerOption{ - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), - WithDevFeatureEnabled(deployer.OptimismPortalInteropDevFlag), - }, deployerOpts...)..., - )) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2AEL)) - - // Shared supernode for the single L2 chain - opt.Add(WithSharedSupernodeCLs(ids.Supernode, - []L2CLs{{CLID: ids.L2ACL, ELID: ids.L2AEL}}, - ids.L1CL, ids.L1EL, snOpts...)) - - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L1EL, ids.L2AEL)) - - opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) - - // Run super roots migration using supernode as super root source - opt.Add(WithSuperRootsFromSupernode(ids.L1.ChainID(), ids.L1EL, []stack.ComponentID{ids.L2ACL}, ids.Supernode, ids.L2A.ChainID())) - - // Start challenger after migration; use supernode RPCs as super-roots source. - opt.Add(WithSupernodeL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supernode, &ids.Cluster, []stack.ComponentID{ - ids.L2AEL, - })) - - // Start proposer after migration; use supernode RPCs as proposal source. - opt.Add(WithSupernodeProposer(ids.L2AProposer, ids.L1EL, &ids.Supernode)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL})) - - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -func defaultSuperProofsSystem(dest *DefaultInteropSystemIDs, deployerOpts ...DeployerOption) stack.CombinedOption[*Orchestrator] { - ids := NewDefaultInteropSystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) - opt := stack.Combine[*Orchestrator]() - - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), WithDeployerOptions( - append([]DeployerOption{ - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2B.ChainID()), - WithDevFeatureEnabled(deployer.OptimismPortalInteropDevFlag), - }, deployerOpts...)...)) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithSupervisor(ids.Supervisor, ids.Cluster, ids.L1EL)) - - opt.Add(WithL2ELNode(ids.L2AEL, L2ELWithSupervisor(ids.Supervisor))) - opt.Add(WithL2CLNode(ids.L2ACL, ids.L1CL, ids.L1EL, ids.L2AEL, L2CLSequencer(), L2CLIndexing())) - - opt.Add(WithL2ELNode(ids.L2BEL, L2ELWithSupervisor(ids.Supervisor))) - opt.Add(WithL2CLNode(ids.L2BCL, ids.L1CL, ids.L1EL, ids.L2BEL, L2CLSequencer(), L2CLIndexing())) - - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L1EL, ids.L2AEL)) - - opt.Add(WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) - opt.Add(WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) - - opt.Add(WithManagedBySupervisor(ids.L2ACL, ids.Supervisor)) - opt.Add(WithManagedBySupervisor(ids.L2BCL, ids.Supervisor)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) - - opt.Add(WithSuperRoots(ids.L1.ChainID(), ids.L1EL, []stack.ComponentID{ids.L2ACL, ids.L2BCL}, ids.Supervisor, ids.L2A.ChainID())) - - opt.Add(WithSuperProposer(ids.L2AProposer, ids.L1EL, &ids.Supervisor)) - - opt.Add(WithSuperL2Challenger(ids.L2ChallengerA, ids.L1EL, ids.L1CL, &ids.Supervisor, &ids.Cluster, []stack.ComponentID{ - ids.L2BEL, ids.L2AEL, - })) - - opt.Add(WithL2MetricsDashboard()) - - // Upon evaluation of the option, export the contents we created. - // Ids here are static, but other things may be exported too. - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -type MultiSupervisorInteropSystemIDs struct { - DefaultInteropSystemIDs - - // Supervisor does not support multinode so need a additional supervisor for verifier nodes - SupervisorSecondary stack.ComponentID - - L2A2CL stack.ComponentID - L2A2EL stack.ComponentID - L2B2CL stack.ComponentID - L2B2EL stack.ComponentID -} - -func MultiSupervisorInteropSystem(dest *MultiSupervisorInteropSystemIDs) stack.Option[*Orchestrator] { - ids := MultiSupervisorInteropSystemIDs{ - DefaultInteropSystemIDs: NewDefaultInteropSystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID), - SupervisorSecondary: stack.NewSupervisorID("2-secondary"), // prefix with number for ordering of supervisors - L2A2CL: stack.NewL2CLNodeID("verifier", DefaultL2AID), - L2A2EL: stack.NewL2ELNodeID("verifier", DefaultL2AID), - L2B2CL: stack.NewL2CLNodeID("verifier", DefaultL2BID), - L2B2EL: stack.NewL2ELNodeID("verifier", DefaultL2BID), - } - - // start with default interop system - var parentIds DefaultInteropSystemIDs - opt := stack.Combine[*Orchestrator]() - opt.Add(DefaultInteropSystem(&parentIds)) - - // add backup supervisor - opt.Add(WithSupervisor(ids.SupervisorSecondary, ids.Cluster, ids.L1EL)) - - opt.Add(WithL2ELNode(ids.L2A2EL, L2ELWithSupervisor(ids.SupervisorSecondary))) - opt.Add(WithL2CLNode(ids.L2A2CL, ids.L1CL, ids.L1EL, ids.L2A2EL, L2CLIndexing())) - - opt.Add(WithL2ELNode(ids.L2B2EL, L2ELWithSupervisor(ids.SupervisorSecondary))) - opt.Add(WithL2CLNode(ids.L2B2CL, ids.L1CL, ids.L1EL, ids.L2B2EL, L2CLIndexing())) - - // verifier must be also managed or it cannot advance - // we attach verifier L2CL with backup supervisor - opt.Add(WithManagedBySupervisor(ids.L2A2CL, ids.SupervisorSecondary)) - opt.Add(WithManagedBySupervisor(ids.L2B2CL, ids.SupervisorSecondary)) - - // P2P connect L2CL nodes - opt.Add(WithL2CLP2PConnection(ids.L2ACL, ids.L2A2CL)) - opt.Add(WithL2CLP2PConnection(ids.L2BCL, ids.L2B2CL)) - - opt.Add(WithL2MetricsDashboard()) - - // Upon evaluation of the option, export the contents we created. - // Ids here are static, but other things may be exported too. - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} - -func ProofSystem(dest *DefaultMinimalSystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultMinimalSystemIDs(DefaultL1ID, DefaultL2AID) - opt := defaultMinimalSystemOpts(&ids, dest) - opt.Add(WithCannonGameTypeAdded(ids.L1EL, ids.L2.ChainID())) - opt.Add(WithCannonKonaGameTypeAdded()) - return opt -} - -type SingleChainSystemWithFlashblocksIDs struct { - L1 stack.ComponentID - L1EL stack.ComponentID - L1CL stack.ComponentID - - L2 stack.ComponentID - L2CL stack.ComponentID - L2EL stack.ComponentID - L2Builder stack.ComponentID - L2RollupBoost stack.ComponentID - - L2Batcher stack.ComponentID - L2Proposer stack.ComponentID - L2Challenger stack.ComponentID - - TestSequencer stack.ComponentID -} - -func NewDefaultSingleChainSystemWithFlashblocksIDs(l1ID, l2ID eth.ChainID) SingleChainSystemWithFlashblocksIDs { - ids := SingleChainSystemWithFlashblocksIDs{ - L1: stack.NewL1NetworkID(l1ID), - L1EL: stack.NewL1ELNodeID("l1", l1ID), - L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.NewL2NetworkID(l2ID), - L2CL: stack.NewL2CLNodeID("sequencer", l2ID), - L2EL: stack.NewL2ELNodeID("sequencer", l2ID), - L2Builder: stack.NewOPRBuilderNodeID("sequencer-builder", l2ID), - L2RollupBoost: stack.NewRollupBoostNodeID("rollup-boost", l2ID), - L2Batcher: stack.NewL2BatcherID("main", l2ID), - L2Proposer: stack.NewL2ProposerID("main", l2ID), - L2Challenger: stack.NewL2ChallengerID("main", l2ID), - TestSequencer: stack.NewTestSequencerID("test-sequencer"), - } - return ids -} - -func DefaultSingleChainSystemWithFlashblocks(dest *SingleChainSystemWithFlashblocksIDs) stack.Option[*Orchestrator] { - ids := NewDefaultSingleChainSystemWithFlashblocksIDs(DefaultL1ID, DefaultL2AID) - return singleChainSystemWithFlashblocksOpts(&ids, dest) -} - -func singleChainSystemWithFlashblocksOpts(ids *SingleChainSystemWithFlashblocksIDs, dest *SingleChainSystemWithFlashblocksIDs) stack.CombinedOption[*Orchestrator] { - opt := stack.Combine[*Orchestrator]() - // Precompute deterministic P2P identity and peering between sequencer EL and op-rbuilder EL. - seqID := NewELNodeIdentity(0) - builderID := NewELNodeIdentity(0) // allocate dynamic port for builder - - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2EL, L2ELWithP2PConfig("127.0.0.1", seqID.Port, seqID.KeyHex(), nil, nil))) - opt.Add(WithOPRBuilderNode(ids.L2Builder, OPRBuilderWithNodeIdentity(builderID, "127.0.0.1", nil, nil))) - // Sequencer adds builder as regular static peer (not trusted) - opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2Builder, false)) - // Builder adds sequencer as trusted peer - opt.Add(WithL2ELP2PConnection(ids.L2Builder, ids.L2EL, true)) - opt.Add(WithRollupBoost(ids.L2RollupBoost, ids.L2EL, RollupBoostWithBuilderNode(ids.L2Builder))) - - opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2RollupBoost, L2CLSequencer())) - - opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) - opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) - - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ - ids.L2EL, - })) - - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = *ids - })) - - return opt -} diff --git a/op-devstack/sysgo/system_singlechain_multinode.go b/op-devstack/sysgo/system_singlechain_multinode.go deleted file mode 100644 index 2a2e1070d8d19..0000000000000 --- a/op-devstack/sysgo/system_singlechain_multinode.go +++ /dev/null @@ -1,83 +0,0 @@ -package sysgo - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type DefaultSingleChainMultiNodeSystemIDs struct { - DefaultMinimalSystemIDs - - L2CLB stack.ComponentID - L2ELB stack.ComponentID -} - -type DefaultSingleChainMultiNodeWithTestSeqSystemIDs struct { - DefaultSingleChainMultiNodeSystemIDs - - TestSequencer stack.ComponentID -} - -func NewDefaultSingleChainMultiNodeSystemIDs(l1ID, l2ID eth.ChainID) DefaultSingleChainMultiNodeSystemIDs { - minimal := NewDefaultMinimalSystemIDs(l1ID, l2ID) - return DefaultSingleChainMultiNodeSystemIDs{ - DefaultMinimalSystemIDs: minimal, - L2CLB: stack.NewL2CLNodeID("b", l2ID), - L2ELB: stack.NewL2ELNodeID("b", l2ID), - } -} - -func NewDefaultSingleChainMultiNodeWithTestSeqSystemIDs(l1ID, l2ID eth.ChainID) DefaultSingleChainMultiNodeWithTestSeqSystemIDs { - return DefaultSingleChainMultiNodeWithTestSeqSystemIDs{ - DefaultSingleChainMultiNodeSystemIDs: NewDefaultSingleChainMultiNodeSystemIDs(l1ID, l2ID), - TestSequencer: stack.NewTestSequencerID("dev"), - } -} - -func DefaultSingleChainMultiNodeSystem(dest *DefaultSingleChainMultiNodeSystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultSingleChainMultiNodeSystemIDs(DefaultL1ID, DefaultL2AID) - - opt := stack.Combine[*Orchestrator]() - opt.Add(DefaultMinimalSystem(&dest.DefaultMinimalSystemIDs)) - - opt.Add(WithL2ELNode(ids.L2ELB)) - opt.Add(WithL2CLNode(ids.L2CLB, ids.L1CL, ids.L1EL, ids.L2ELB)) - - // P2P connect L2CL nodes - opt.Add(WithL2CLP2PConnection(ids.L2CL, ids.L2CLB)) - opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2ELB, false)) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - return opt -} - -func DefaultSingleChainMultiNodeWithTestSeqSystem(dest *DefaultSingleChainMultiNodeWithTestSeqSystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultSingleChainMultiNodeWithTestSeqSystemIDs(DefaultL1ID, DefaultL2AID) - opt := stack.Combine[*Orchestrator]() - opt.Add(DefaultSingleChainMultiNodeSystem(&dest.DefaultSingleChainMultiNodeSystemIDs)) - - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - return opt -} - -func DefaultSingleChainMultiNodeSystemWithoutP2P(dest *DefaultSingleChainMultiNodeSystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultSingleChainMultiNodeSystemIDs(DefaultL1ID, DefaultL2AID) - - opt := stack.Combine[*Orchestrator]() - opt.Add(DefaultMinimalSystem(&dest.DefaultMinimalSystemIDs)) - - opt.Add(WithL2ELNode(ids.L2ELB)) - opt.Add(WithL2CLNode(ids.L2CLB, ids.L1CL, ids.L1EL, ids.L2ELB)) - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - return opt -} diff --git a/op-devstack/sysgo/system_singlechain_twoverifiers.go b/op-devstack/sysgo/system_singlechain_twoverifiers.go deleted file mode 100644 index 3a38587fc22f6..0000000000000 --- a/op-devstack/sysgo/system_singlechain_twoverifiers.go +++ /dev/null @@ -1,79 +0,0 @@ -package sysgo - -import ( - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type DefaultSingleChainTwoVerifiersSystemIDs struct { - DefaultSingleChainMultiNodeSystemIDs - - L2CLC stack.ComponentID - L2ELC stack.ComponentID - - TestSequencer stack.ComponentID -} - -func NewDefaultSingleChainTwoVerifiersSystemIDs(l1ID, l2ID eth.ChainID) DefaultSingleChainTwoVerifiersSystemIDs { - return DefaultSingleChainTwoVerifiersSystemIDs{ - DefaultSingleChainMultiNodeSystemIDs: NewDefaultSingleChainMultiNodeSystemIDs(l1ID, l2ID), - L2CLC: stack.NewL2CLNodeID("c", l2ID), - L2ELC: stack.NewL2ELNodeID("c", l2ID), - TestSequencer: stack.NewTestSequencerID("dev"), - } -} - -func DefaultSingleChainTwoVerifiersFollowL2System(dest *DefaultSingleChainTwoVerifiersSystemIDs) stack.Option[*Orchestrator] { - ids := NewDefaultSingleChainTwoVerifiersSystemIDs(DefaultL1ID, DefaultL2AID) - - opt := stack.Combine[*Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2ELB)) - opt.Add(WithL2CLNode(ids.L2CLB, ids.L1CL, ids.L1EL, ids.L2ELB)) - - opt.Add(WithL2ELNode(ids.L2EL)) - opt.Add(WithL2CLNodeFollowL2(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, ids.L2CLB, L2CLSequencer())) - - opt.Add(WithL2ELNode(ids.L2ELC)) - opt.Add(WithL2CLNodeFollowL2(ids.L2CLC, ids.L1CL, ids.L1EL, ids.L2ELC, ids.L2CLB)) - - opt.Add(WithL2CLP2PConnection(ids.L2CL, ids.L2CLB)) - opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2ELB, false)) - opt.Add(WithL2CLP2PConnection(ids.L2CL, ids.L2CLC)) - opt.Add(WithL2ELP2PConnection(ids.L2EL, ids.L2ELC, false)) - opt.Add(WithL2CLP2PConnection(ids.L2CLB, ids.L2CLC)) - opt.Add(WithL2ELP2PConnection(ids.L2ELB, ids.L2ELC, false)) - - opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) - opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) - - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CLB, ids.L1EL, ids.L2ELB)) - - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ - ids.L2EL, - })) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} diff --git a/op-devstack/sysgo/system_synctester.go b/op-devstack/sysgo/system_synctester.go deleted file mode 100644 index 5ba3c13315763..0000000000000 --- a/op-devstack/sysgo/system_synctester.go +++ /dev/null @@ -1,79 +0,0 @@ -package sysgo - -import ( - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type DefaultSimpleSystemWithSyncTesterIDs struct { - DefaultMinimalSystemIDs - - L2CL2 stack.ComponentID - SyncTesterL2EL stack.ComponentID - SyncTester stack.ComponentID -} - -func NewDefaultSimpleSystemWithSyncTesterIDs(l1ID, l2ID eth.ChainID) DefaultSimpleSystemWithSyncTesterIDs { - minimal := NewDefaultMinimalSystemIDs(l1ID, l2ID) - return DefaultSimpleSystemWithSyncTesterIDs{ - DefaultMinimalSystemIDs: minimal, - L2CL2: stack.NewL2CLNodeID("verifier", l2ID), - SyncTesterL2EL: stack.NewL2ELNodeID("sync-tester-el", l2ID), - SyncTester: stack.NewSyncTesterID("sync-tester", l2ID), - } -} - -func DefaultSimpleSystemWithSyncTester(dest *DefaultSimpleSystemWithSyncTesterIDs) stack.Option[*Orchestrator] { - l1ID := eth.ChainIDFromUInt64(900) - l2ID := eth.ChainIDFromUInt64(901) - ids := NewDefaultSimpleSystemWithSyncTesterIDs(l1ID, l2ID) - - opt := stack.Combine[*Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(WithDeployer(), - WithDeployerOptions( - WithLocalContractSources(), - WithCommons(ids.L1.ChainID()), - WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), - ), - ) - - opt.Add(WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(WithL2ELNode(ids.L2EL)) - opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, L2CLSequencer())) - - opt.Add(WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL)) - opt.Add(WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil)) - - opt.Add(WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL})) - - opt.Add(WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2CL, ids.L1EL, ids.L2EL)) - - opt.Add(WithL2Challenger(ids.L2Challenger, ids.L1EL, ids.L1CL, nil, nil, &ids.L2CL, []stack.ComponentID{ - ids.L2EL, - })) - - opt.Add(WithSyncTester(ids.SyncTester, []stack.ComponentID{ids.L2EL})) - - // Create a SyncTesterEL with the same chain ID as the EL node - opt.Add(WithSyncTesterL2ELNode(ids.SyncTesterL2EL, ids.L2EL)) - opt.Add(WithL2CLNode(ids.L2CL2, ids.L1CL, ids.L1EL, ids.SyncTesterL2EL)) - - // P2P Connect CLs to signal unsafe heads - opt.Add(WithL2CLP2PConnection(ids.L2CL, ids.L2CL2)) - - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} diff --git a/op-devstack/sysgo/system_synctester_ext.go b/op-devstack/sysgo/system_synctester_ext.go deleted file mode 100644 index a05820b17897c..0000000000000 --- a/op-devstack/sysgo/system_synctester_ext.go +++ /dev/null @@ -1,103 +0,0 @@ -package sysgo - -import ( - "fmt" - - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-node/chaincfg" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum/core" -) - -type DefaultMinimalExternalELSystemIDs struct { - L1 stack.ComponentID - L1EL stack.ComponentID - L1CL stack.ComponentID - - L2 stack.ComponentID - L2CL stack.ComponentID - L2EL stack.ComponentID - L2ELReadOnly stack.ComponentID - - SyncTester stack.ComponentID -} - -func NewExternalELSystemIDs(l1ID, l2ID eth.ChainID) DefaultMinimalExternalELSystemIDs { - ids := DefaultMinimalExternalELSystemIDs{ - L1: stack.NewL1NetworkID(l1ID), - L1EL: stack.NewL1ELNodeID("l1", l1ID), - L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.NewL2NetworkID(l2ID), - L2CL: stack.NewL2CLNodeID("verifier", l2ID), - L2EL: stack.NewL2ELNodeID("sync-tester-el", l2ID), - L2ELReadOnly: stack.NewL2ELNodeID("l2-el-readonly", l2ID), - SyncTester: stack.NewSyncTesterID("sync-tester", l2ID), - } - return ids -} - -// ExternalELSystemWithEndpointAndSuperchainRegistry creates a minimal external EL system -// using a network from the superchain registry instead of the deployer -func ExternalELSystemWithEndpointAndSuperchainRegistry(dest *DefaultMinimalExternalELSystemIDs, networkPreset stack.ExtNetworkConfig) stack.Option[*Orchestrator] { - chainCfg := chaincfg.ChainByName(networkPreset.L2NetworkName) - if chainCfg == nil { - panic(fmt.Sprintf("network %s not found in superchain registry", networkPreset.L2NetworkName)) - } - l2ChainID := eth.ChainIDFromUInt64(chainCfg.ChainID) - - ids := NewExternalELSystemIDs(networkPreset.L1ChainID, l2ChainID) - - opt := stack.Combine[*Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - o.P().Logger().Info("Setting up with superchain registry network", "network", networkPreset.L2NetworkName) - })) - - opt.Add(WithMnemonicKeys(devkeys.TestMnemonic)) - - // We must supply the full L1 Chain Config, so look that up or fail if unknown - chainID := ids.L1.ChainID() - l1ChainConfig := eth.L1ChainConfigByChainID(chainID) - if l1ChainConfig == nil { - panic(fmt.Sprintf("unsupported L1 chain ID: %s", chainID)) - } - - // Skip deployer since we're using external L1 and superchain registry for L2 config - // Create L1 network record for external L1 - opt.Add(stack.BeforeDeploy(func(o *Orchestrator) { - l1Net := &L1Network{ - id: ids.L1, - genesis: &core.Genesis{ - Config: l1ChainConfig, - }, - blockTime: 12, - } - o.registry.Register(ids.L1, l1Net) - })) - - opt.Add(WithExtL1Nodes(ids.L1EL, ids.L1CL, networkPreset.L1ELEndpoint, networkPreset.L1CLBeaconEndpoint)) - - // Use empty dependency set and minimal cluster instead of deployer - opt.Add(WithEmptyDepSet( - stack.NewL2NetworkID(l2ChainID), - networkPreset.L2NetworkName, - )) - - // Add SyncTester service with external endpoint - opt.Add(WithSyncTesterWithExternalEndpoint(ids.SyncTester, networkPreset.L2ELEndpoint, l2ChainID)) - - // Add SyncTesterL2ELNode as the L2EL replacement for real-world EL endpoint - opt.Add(WithSyncTesterL2ELNode(ids.L2EL, ids.L2EL)) - - opt.Add(WithExtL2Node(ids.L2ELReadOnly, networkPreset.L2ELEndpoint)) - - opt.Add(WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL)) - - opt.Add(WithL2MetricsDashboard()) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - *dest = ids - })) - - return opt -} diff --git a/op-devstack/sysgo/system_test.go b/op-devstack/sysgo/system_test.go deleted file mode 100644 index feddda2c9b608..0000000000000 --- a/op-devstack/sysgo/system_test.go +++ /dev/null @@ -1,138 +0,0 @@ -package sysgo - -import ( - "context" - "testing" - "time" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-service/testlog" -) - -func exiters(gt *testing.T) (func(bool), func()) { - onFail := func(now bool) { - gt.Helper() - if now { - gt.FailNow() - } else { - gt.Fail() - } - } - onSkipNow := func() { - gt.Helper() - gt.SkipNow() - } - return onFail, onSkipNow -} - -func TestSystem(gt *testing.T) { - var ids DefaultInteropSystemIDs - opt := DefaultInteropSystem(&ids) - - logger := testlog.Logger(gt, log.LevelInfo) - - onFail, onSkipNow := exiters(gt) - p := devtest.NewP(context.Background(), logger, onFail, onSkipNow) - gt.Cleanup(p.Close) - - orch := NewOrchestrator(p, stack.Combine[*Orchestrator]()) - stack.ApplyOptionLifecycle(opt, orch) - - // Run two tests in parallel: see if we can share the same orchestrator - // between two test scopes, with two different hydrated system frontends. - gt.Run("testA", func(gt *testing.T) { - gt.Parallel() - - t := devtest.SerialT(gt) - system := shim.NewSystem(t) - orch.Hydrate(system) - - testSystem(ids, system) - }) - - gt.Run("testB", func(gt *testing.T) { - gt.Parallel() - - t := devtest.SerialT(gt) - system := shim.NewSystem(t) - orch.Hydrate(system) - - testSystem(ids, system) - }) -} - -func testSystem(ids DefaultInteropSystemIDs, system stack.System) { - t := system.T() - logger := t.Logger() - - t.Run("test matchers", func(t devtest.T) { - require := t.Require() - require.Equal(ids.L1, system.L1Network(match.FirstL1Network).ID()) - require.Equal(ids.L1EL, system.L1Network(match.FirstL1Network).L1ELNode(match.FirstL1EL).ID()) - require.Equal(ids.L1CL, system.L1Network(match.FirstL1Network).L1CLNode(match.FirstL1CL).ID()) - require.Equal(ids.L2A, system.L2Network(match.L2ChainA).ID()) - require.Equal(ids.L2A, system.L2Network(match.FirstL2Network).ID()) - require.Equal(ids.L2B, system.L2Network(match.L2ChainB).ID()) - require.Equal(ids.Cluster, system.Cluster(match.FirstCluster).ID()) - require.Equal(ids.Superchain, system.Superchain(match.FirstSuperchain).ID()) - require.Equal(ids.Supervisor, system.Supervisor(match.FirstSupervisor).ID()) - l2A := system.L2Network(match.L2ChainA) - require.Equal(ids.L2ACL, l2A.L2CLNode(match.FirstL2CL).ID()) - require.Equal(ids.L2AEL, l2A.L2ELNode(match.FirstL2EL).ID()) - require.Equal(ids.L2ABatcher, l2A.L2Batcher(match.FirstL2Batcher).ID()) - require.Equal(ids.L2AProposer, l2A.L2Proposer(match.FirstL2Proposer).ID()) - }) - - t.Run("test labeling", func(t devtest.T) { - require := t.Require() - netB := system.L2Network(match.L2ChainB) - require.Equal("", netB.Label("nickname")) - netB.SetLabel("nickname", "Network B") - require.Equal("Network B", netB.Label("nickname")) - v := system.L2Network(match.WithLabel[stack.L2Network]( - "nickname", "Network B")) - require.Equal(ids.L2B, v.ID()) - }) - - t.Run("op-geth match", func(t devtest.T) { - elNode := system.L2Network(match.L2ChainA).L2ELNode(match.OpGeth) - t.Require().Equal(string(match.OpGeth), elNode.Label(match.LabelVendor)) - }) - - t.Run("find CL", func(t devtest.T) { - elNode := system.L2Network(match.L2ChainA).L2ELNode(match.FirstL2EL) - clNode := system.L2Network(match.L2ChainA).L2CLNode(match.WithEngine(elNode.ID())) - t.Require().Contains(clNode.ELs(), elNode) - }) - - t.Run("sync", func(t devtest.T) { - require := t.Require() - seqA := system.L2Network(stack.ByID[stack.L2Network](ids.L2A)).L2CLNode(stack.ByID[stack.L2CLNode](ids.L2ACL)) - seqB := system.L2Network(stack.ByID[stack.L2Network](ids.L2B)).L2CLNode(stack.ByID[stack.L2CLNode](ids.L2BCL)) - blocks := uint64(5) - // wait for this many blocks, with some margin for delays - for i := uint64(0); i < blocks*2+10; i++ { - time.Sleep(time.Second * 2) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second*10) - statusA, err := seqA.RollupAPI().SyncStatus(ctx) - require.NoError(err) - statusB, err := seqB.RollupAPI().SyncStatus(ctx) - require.NoError(err) - cancel() - logger.Info("chain A", "tip", statusA.UnsafeL2) - logger.Info("chain B", "tip", statusB.UnsafeL2) - - if statusA.UnsafeL2.Number > blocks && statusB.UnsafeL2.Number > blocks { - return - } - } - t.Errorf("Expected to reach block %d on both chains", blocks) - t.FailNow() - }) -} diff --git a/op-devstack/sysgo/system_two_l2_follow_l2.go b/op-devstack/sysgo/system_two_l2_follow_l2.go deleted file mode 100644 index 7bf5b9ad11c01..0000000000000 --- a/op-devstack/sysgo/system_two_l2_follow_l2.go +++ /dev/null @@ -1,61 +0,0 @@ -package sysgo - -import ( - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -// DefaultTwoL2SupernodeFollowL2SystemIDs defines a two-L2 interop+supernode setup -// with one additional follow-source verifier per chain. -type DefaultTwoL2SupernodeFollowL2SystemIDs struct { - DefaultTwoL2SystemIDs - - L2AFollowerCL stack.ComponentID - L2AFollowerEL stack.ComponentID - L2BFollowerCL stack.ComponentID - L2BFollowerEL stack.ComponentID -} - -func NewDefaultTwoL2SupernodeFollowL2SystemIDs(l1ID, l2AID, l2BID eth.ChainID) DefaultTwoL2SupernodeFollowL2SystemIDs { - return DefaultTwoL2SupernodeFollowL2SystemIDs{ - DefaultTwoL2SystemIDs: NewDefaultTwoL2SystemIDs(l1ID, l2AID, l2BID), - L2AFollowerCL: stack.NewL2CLNodeID("follower", l2AID), - L2AFollowerEL: stack.NewL2ELNodeID("follower", l2AID), - L2BFollowerCL: stack.NewL2CLNodeID("follower", l2BID), - L2BFollowerEL: stack.NewL2ELNodeID("follower", l2BID), - } -} - -// DefaultTwoL2SupernodeFollowL2System runs two L2 chains with: -// - shared supernode CL (interop enabled with configurable delay), -// - one follow-source verifier per chain in op-node light-CL mode. -// -// The follower for each chain tracks that chain's supernode CL proxy. -func DefaultTwoL2SupernodeFollowL2System(dest *DefaultTwoL2SupernodeFollowL2SystemIDs, delaySeconds uint64) stack.Option[*Orchestrator] { - ids := NewDefaultTwoL2SupernodeFollowL2SystemIDs(DefaultL1ID, DefaultL2AID, DefaultL2BID) - - var baseIDs DefaultTwoL2SystemIDs - opt := stack.Combine[*Orchestrator]() - - // Build on top of the existing interop+supernode two-L2 topology. - opt.Add(DefaultSupernodeInteropTwoL2System(&baseIDs, delaySeconds)) - - // Chain A follower - opt.Add(WithL2ELNode(ids.L2AFollowerEL)) - opt.Add(WithOpNodeFollowL2(ids.L2AFollowerCL, ids.L1CL, ids.L1EL, ids.L2AFollowerEL, ids.L2ACL)) - opt.Add(WithL2CLP2PConnection(ids.L2ACL, ids.L2AFollowerCL)) - opt.Add(WithL2ELP2PConnection(ids.L2AEL, ids.L2AFollowerEL, false)) - - // Chain B follower - opt.Add(WithL2ELNode(ids.L2BFollowerEL)) - opt.Add(WithOpNodeFollowL2(ids.L2BFollowerCL, ids.L1CL, ids.L1EL, ids.L2BFollowerEL, ids.L2BCL)) - opt.Add(WithL2CLP2PConnection(ids.L2BCL, ids.L2BFollowerCL)) - opt.Add(WithL2ELP2PConnection(ids.L2BEL, ids.L2BFollowerEL, false)) - - opt.Add(stack.Finally(func(orch *Orchestrator) { - ids.DefaultTwoL2SystemIDs = baseIDs - *dest = ids - })) - - return opt -} diff --git a/op-devstack/sysgo/test_sequencer.go b/op-devstack/sysgo/test_sequencer.go deleted file mode 100644 index 5da43b784e9ac..0000000000000 --- a/op-devstack/sysgo/test_sequencer.go +++ /dev/null @@ -1,321 +0,0 @@ -package sysgo - -import ( - "context" - "fmt" - - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rpc" - - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/endpoint" - "github.com/ethereum-optimism/optimism/op-service/eth" - oplog "github.com/ethereum-optimism/optimism/op-service/log" - "github.com/ethereum-optimism/optimism/op-service/metrics" - "github.com/ethereum-optimism/optimism/op-service/oppprof" - oprpc "github.com/ethereum-optimism/optimism/op-service/rpc" - sequencerConfig "github.com/ethereum-optimism/optimism/op-test-sequencer/config" - testmetrics "github.com/ethereum-optimism/optimism/op-test-sequencer/metrics" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/builders/fakepos" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/builders/standardbuilder" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/committers/noopcommitter" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/committers/standardcommitter" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/config" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/publishers/nooppublisher" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/publishers/standardpublisher" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/sequencers/fullseq" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/signers/localkey" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/backend/work/signers/noopsigner" - "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" - gn "github.com/ethereum/go-ethereum/node" -) - -type TestSequencer struct { - id stack.ComponentID - userRPC string - jwtSecret [32]byte - sequencers map[eth.ChainID]seqtypes.SequencerID -} - -func (s *TestSequencer) hydrate(sys stack.ExtensibleSystem) { - tlog := sys.Logger().New("id", s.id) - - opts := []client.RPCOption{ - client.WithLazyDial(), - client.WithGethRPCOptions(rpc.WithHTTPAuth(gn.NewJWTAuth(s.jwtSecret))), - } - - sqClient, err := client.NewRPC(sys.T().Ctx(), tlog, s.userRPC, opts...) - sys.T().Require().NoError(err) - sys.T().Cleanup(sqClient.Close) - - sequencersRpcs := make(map[eth.ChainID]client.RPC) - for chainID, seqID := range s.sequencers { - seqRpc, err := client.NewRPC(sys.T().Ctx(), tlog, s.userRPC+"/sequencers/"+seqID.String(), opts...) - sys.T().Require().NoError(err) - sys.T().Cleanup(seqRpc.Close) - - sequencersRpcs[chainID] = seqRpc - } - - sys.AddTestSequencer(shim.NewTestSequencer(shim.TestSequencerConfig{ - CommonConfig: shim.NewCommonConfig(sys.T()), - ID: s.id, - Client: sqClient, - ControlClients: sequencersRpcs, - })) -} - -// l2ChainIDs pairs together the CL and EL node IDs for an L2 chain. -type l2ChainIDs struct { - CLID stack.ComponentID - ELID stack.ComponentID -} - -func WithTestSequencer(testSequencerID stack.ComponentID, l1CLID stack.ComponentID, l2CLID stack.ComponentID, l1ELID stack.ComponentID, l2ELID stack.ComponentID) stack.Option[*Orchestrator] { - return withTestSequencerImpl(testSequencerID, l1CLID, l1ELID, l2ChainIDs{CLID: l2CLID, ELID: l2ELID}) -} - -// WithTestSequencer2L2 creates a test sequencer that can build blocks on two L2 chains. -// This is useful for testing same-timestamp interop scenarios where we need deterministic -// block timestamps on both chains. -func WithTestSequencer2L2(testSequencerID stack.ComponentID, l1CLID stack.ComponentID, - l2ACLID stack.ComponentID, l2BCLID stack.ComponentID, - l1ELID stack.ComponentID, l2AELID stack.ComponentID, l2BELID stack.ComponentID) stack.Option[*Orchestrator] { - return withTestSequencerImpl(testSequencerID, l1CLID, l1ELID, - l2ChainIDs{CLID: l2ACLID, ELID: l2AELID}, - l2ChainIDs{CLID: l2BCLID, ELID: l2BELID}, - ) -} - -// withTestSequencerImpl is the shared implementation for creating test sequencers. -// It supports any number of L2 chains. -func withTestSequencerImpl(testSequencerID stack.ComponentID, l1CLID stack.ComponentID, l1ELID stack.ComponentID, l2Chains ...l2ChainIDs) stack.Option[*Orchestrator] { - return stack.AfterDeploy(func(orch *Orchestrator) { - p := orch.P().WithCtx(stack.ContextWithID(orch.P().Ctx(), testSequencerID)) - require := p.Require() - logger := p.Logger() - - // Setup L1 components - orch.writeDefaultJWT() - l1EL, ok := orch.GetL1EL(l1ELID) - require.True(ok, "l1 EL node required") - l1ELClient, err := ethclient.DialContext(p.Ctx(), l1EL.UserRPC()) - require.NoError(err) - engineCl, err := dialEngine(p.Ctx(), l1EL.AuthRPC(), orch.jwtSecret) - require.NoError(err) - - l1CL, ok := orch.GetL1CL(l1CLID) - require.True(ok, "l1 CL node required") - - l1Net, ok := orch.GetL1Network(stack.NewL1NetworkID(l1ELID.ChainID())) - require.True(ok, "l1 net required") - - // L1 sequencer IDs - bid_L1 := seqtypes.BuilderID("test-l1-builder") - cid_L1 := seqtypes.CommitterID("test-noop-committer") - sid_L1 := seqtypes.SignerID("test-noop-signer") - pid_L1 := seqtypes.PublisherID("test-noop-publisher") - l1SequencerID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l1ELID.ChainID())) - - // Initialize ensemble config with L1 components - ensemble := &config.Ensemble{ - Builders: map[seqtypes.BuilderID]*config.BuilderEntry{ - bid_L1: { - L1: &fakepos.Config{ - ChainConfig: orch.wb.outL1Genesis.Config, - EngineAPI: engineCl, - Backend: l1ELClient, - Beacon: l1CL.beacon, - FinalizedDistance: 20, - SafeDistance: 10, - BlockTime: 6, - }, - }, - }, - Signers: map[seqtypes.SignerID]*config.SignerEntry{ - sid_L1: { - Noop: &noopsigner.Config{}, - }, - }, - Committers: map[seqtypes.CommitterID]*config.CommitterEntry{ - cid_L1: { - Noop: &noopcommitter.Config{}, - }, - }, - Publishers: map[seqtypes.PublisherID]*config.PublisherEntry{ - pid_L1: { - Noop: &nooppublisher.Config{}, - }, - }, - Sequencers: map[seqtypes.SequencerID]*config.SequencerEntry{ - l1SequencerID: { - Full: &fullseq.Config{ - ChainID: l1ELID.ChainID(), - Builder: bid_L1, - Signer: sid_L1, - Committer: cid_L1, - Publisher: pid_L1, - }, - }, - }, - } - - // Track sequencer IDs for the TestSequencer struct - sequencerIDs := map[eth.ChainID]seqtypes.SequencerID{ - l1CLID.ChainID(): l1SequencerID, - } - - // Add L2 chain configurations - logFields := []any{"l1EL", l1EL.UserRPC()} - for i, l2Chain := range l2Chains { - l2EL, ok := orch.GetL2EL(l2Chain.ELID) - require.True(ok, "l2 EL node required for chain %d", i) - - l2CL, ok := orch.GetL2CL(l2Chain.CLID) - require.True(ok, "l2 CL node required for chain %d", i) - - // Generate unique IDs for this L2 chain (use suffix for multi-chain, no suffix for single chain) - suffix := "" - if len(l2Chains) > 1 { - suffix = fmt.Sprintf("-%c", 'A'+i) // -A, -B, -C, etc. - } - bid := seqtypes.BuilderID(fmt.Sprintf("test-standard-builder%s", suffix)) - cid := seqtypes.CommitterID(fmt.Sprintf("test-standard-committer%s", suffix)) - sid := seqtypes.SignerID(fmt.Sprintf("test-local-signer%s", suffix)) - pid := seqtypes.PublisherID(fmt.Sprintf("test-standard-publisher%s", suffix)) - seqID := seqtypes.SequencerID(fmt.Sprintf("test-seq-%s", l2Chain.CLID.ChainID())) - - // Get P2P key for signing - p2pKey, err := orch.keys.Secret(devkeys.SequencerP2PRole.Key(l2Chain.CLID.ChainID().ToBig())) - require.NoError(err, "need p2p key for sequencer %d", i) - rawKey := hexutil.Bytes(crypto.FromECDSA(p2pKey)) - - // Add builder - ensemble.Builders[bid] = &config.BuilderEntry{ - Standard: &standardbuilder.Config{ - L1ChainConfig: l1Net.genesis.Config, - L1EL: endpoint.MustRPC{ - Value: endpoint.HttpURL(l1EL.UserRPC()), - }, - L2EL: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2EL.UserRPC()), - }, - L2CL: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2CL.UserRPC()), - }, - }, - } - - // Add signer - ensemble.Signers[sid] = &config.SignerEntry{ - LocalKey: &localkey.Config{ - RawKey: &rawKey, - ChainID: l2Chain.CLID.ChainID(), - }, - } - - // Add committer - ensemble.Committers[cid] = &config.CommitterEntry{ - Standard: &standardcommitter.Config{ - RPC: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2CL.UserRPC()), - }, - }, - } - - // Add publisher - ensemble.Publishers[pid] = &config.PublisherEntry{ - Standard: &standardpublisher.Config{ - RPC: endpoint.MustRPC{ - Value: endpoint.HttpURL(l2CL.UserRPC()), - }, - }, - } - - // Add sequencer - ensemble.Sequencers[seqID] = &config.SequencerEntry{ - Full: &fullseq.Config{ - ChainID: l2Chain.CLID.ChainID(), - Builder: bid, - Signer: sid, - Committer: cid, - Publisher: pid, - SequencerConfDepth: 2, - SequencerEnabled: true, - SequencerStopped: false, - SequencerMaxSafeLag: 0, - }, - } - - sequencerIDs[l2Chain.CLID.ChainID()] = seqID - logFields = append(logFields, fmt.Sprintf("l2EL%d", i), l2EL.UserRPC(), fmt.Sprintf("l2CL%d", i), l2CL.UserRPC()) - } - - logger.Info("Configuring test sequencer", logFields...) - - jobs := work.NewJobRegistry() - startedEnsemble, err := ensemble.Start(context.Background(), &work.StartOpts{ - Log: logger, - Metrics: &testmetrics.NoopMetrics{}, - Jobs: jobs, - }) - require.NoError(err) - - jwtPath, jwtSecret := orch.writeDefaultJWT() - - cfg := &sequencerConfig.Config{ - MetricsConfig: metrics.CLIConfig{ - Enabled: false, - }, - PprofConfig: oppprof.CLIConfig{ - ListenEnabled: false, - }, - LogConfig: oplog.CLIConfig{ - Level: log.LevelDebug, - Format: oplog.FormatText, - }, - RPC: oprpc.CLIConfig{ - ListenAddr: "127.0.0.1", - ListenPort: 0, - EnableAdmin: true, - }, - Ensemble: startedEnsemble, - JWTSecretPath: jwtPath, - Version: "dev", - MockRun: false, - } - - sq, err := sequencer.FromConfig(p.Ctx(), cfg, logger) - require.NoError(err) - - err = sq.Start(p.Ctx()) - require.NoError(err) - - p.Cleanup(func() { - ctx, cancel := context.WithCancel(p.Ctx()) - cancel() - logger.Info("Closing sequencer") - closeErr := sq.Stop(ctx) - logger.Info("Closed sequencer", "err", closeErr) - }) - - testSequencerNode := &TestSequencer{ - id: testSequencerID, - userRPC: sq.RPC(), - jwtSecret: jwtSecret, - sequencers: sequencerIDs, - } - logger.Info("Sequencer User RPC", "http_endpoint", testSequencerNode.userRPC) - orch.registry.Register(testSequencerID, testSequencerNode) - }) -} diff --git a/op-devstack/sysgo/util.go b/op-devstack/sysgo/util.go index f323543b4e27b..f177364bf8bc6 100644 --- a/op-devstack/sysgo/util.go +++ b/op-devstack/sysgo/util.go @@ -78,7 +78,7 @@ func getAvailableLocalPort() (string, error) { } // waitTCPReady parses a URL and waits for its TCP endpoint to become ready using EventuallyWithT. -func waitTCPReady(p devtest.P, rawURL string, timeout time.Duration) { +func waitTCPReady(p devtest.CommonT, rawURL string, timeout time.Duration) { p.Helper() u, err := url.Parse(rawURL) p.Require().NoError(err, "parse URL: %s", rawURL) diff --git a/op-devstack/sysgo/world.go b/op-devstack/sysgo/world.go new file mode 100644 index 0000000000000..3a0360a3f4876 --- /dev/null +++ b/op-devstack/sysgo/world.go @@ -0,0 +1,155 @@ +package sysgo + +import ( + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" + "github.com/ethereum-optimism/optimism/op-core/forks" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" +) + +func newWorldBuilder(t devtest.T, keys devkeys.Keys) *worldBuilder { + return &worldBuilder{ + p: t, + logger: t.Logger(), + require: t.Require(), + keys: keys, + builder: intentbuilder.New(), + } +} + +func applyConfigInteropAtGenesis(builder intentbuilder.Builder) { + for _, l2Cfg := range builder.L2s() { + l2Cfg.WithForkAtGenesis(forks.Interop) + } +} + +func applyConfigDeployerOptions(t devtest.T, keys devkeys.Keys, builder intentbuilder.Builder, opts []DeployerOption) { + if len(opts) == 0 { + return + } + for _, opt := range opts { + if opt == nil { + continue + } + opt(t, keys, builder) + } +} + +func buildSingleChainWorldWithInterop(t devtest.T, keys devkeys.Keys, interopAtGenesis bool, deployerOpts ...DeployerOption) (*L1Network, *L2Network, depset.DependencySet, depset.FullConfigSetMerged) { + _, l1Net, l2Net, depSet, fullCfgSet := buildSingleChainWorldWithInteropAndState(t, keys, interopAtGenesis, deployerOpts...) + return l1Net, l2Net, depSet, fullCfgSet +} + +type interopMigrationState struct { + opcmImpl common.Address + superchainConfigAddr common.Address + l2Deployments map[eth.ChainID]*L2Deployment +} + +func newInteropMigrationState(wb *worldBuilder) *interopMigrationState { + if wb == nil || wb.output == nil || wb.outSuperchainDeployment == nil { + return nil + } + state := &interopMigrationState{ + opcmImpl: wb.output.ImplementationsDeployment.OpcmImpl, + superchainConfigAddr: wb.outSuperchainDeployment.SuperchainConfigAddr(), + l2Deployments: make(map[eth.ChainID]*L2Deployment, len(wb.outL2Deployment)), + } + for chainID, deployment := range wb.outL2Deployment { + state.l2Deployments[chainID] = deployment + } + return state +} + +func buildSingleChainWorldWithInteropAndState(t devtest.T, keys devkeys.Keys, interopAtGenesis bool, deployerOpts ...DeployerOption) (*interopMigrationState, *L1Network, *L2Network, depset.DependencySet, depset.FullConfigSetMerged) { + wb := newWorldBuilder(t, keys) + applyConfigLocalContractSources(t, keys, wb.builder) + applyConfigCommons(t, keys, DefaultL1ID, wb.builder) + applyConfigPrefundedL2(t, keys, DefaultL1ID, DefaultL2AID, wb.builder) + if interopAtGenesis { + applyConfigInteropAtGenesis(wb.builder) + } + applyConfigDeployerOptions(t, keys, wb.builder, deployerOpts) + wb.Build() + + t.Require().Len(wb.l2Chains, 1, "expected exactly one L2 chain") + l2ID := wb.l2Chains[0] + l1ID := eth.ChainIDFromUInt64(wb.output.AppliedIntent.L1ChainID) + + l1Net := &L1Network{ + name: "l1", + chainID: l1ID, + genesis: wb.outL1Genesis, + blockTime: 6, + } + l2Net := &L2Network{ + name: "l2a", + chainID: l2ID, + l1ChainID: l1ID, + genesis: wb.outL2Genesis[l2ID], + rollupCfg: wb.outL2RollupCfg[l2ID], + deployment: wb.outL2Deployment[l2ID], + opcmImpl: wb.output.ImplementationsDeployment.OpcmImpl, + mipsImpl: wb.output.ImplementationsDeployment.MipsImpl, + keys: keys, + } + var depSet depset.DependencySet + if wb.outFullCfgSet.DependencySet != nil { + depSet = wb.outFullCfgSet.DependencySet + } + return newInteropMigrationState(wb), l1Net, l2Net, depSet, wb.outFullCfgSet +} + +func buildTwoL2WorldWithState(t devtest.T, keys devkeys.Keys, interopAtGenesis bool, deployerOpts ...DeployerOption) (*interopMigrationState, *L1Network, *L2Network, *L2Network, depset.FullConfigSetMerged) { + wb := newWorldBuilder(t, keys) + applyConfigLocalContractSources(t, keys, wb.builder) + applyConfigCommons(t, keys, DefaultL1ID, wb.builder) + applyConfigPrefundedL2(t, keys, DefaultL1ID, DefaultL2AID, wb.builder) + applyConfigPrefundedL2(t, keys, DefaultL1ID, DefaultL2BID, wb.builder) + if interopAtGenesis { + applyConfigInteropAtGenesis(wb.builder) + } + applyConfigDeployerOptions(t, keys, wb.builder, deployerOpts) + wb.Build() + + l1ID := eth.ChainIDFromUInt64(wb.output.AppliedIntent.L1ChainID) + l1Net := &L1Network{ + name: "l1", + chainID: l1ID, + genesis: wb.outL1Genesis, + blockTime: 6, + } + + l2ANet, ok := wb.outL2Genesis[DefaultL2AID] + t.Require().True(ok, "missing L2A genesis") + l2BNet, ok := wb.outL2Genesis[DefaultL2BID] + t.Require().True(ok, "missing L2B genesis") + + l2A := &L2Network{ + name: "l2a", + chainID: DefaultL2AID, + l1ChainID: l1ID, + genesis: l2ANet, + rollupCfg: wb.outL2RollupCfg[DefaultL2AID], + deployment: wb.outL2Deployment[DefaultL2AID], + opcmImpl: wb.output.ImplementationsDeployment.OpcmImpl, + mipsImpl: wb.output.ImplementationsDeployment.MipsImpl, + keys: keys, + } + l2B := &L2Network{ + name: "l2b", + chainID: DefaultL2BID, + l1ChainID: l1ID, + genesis: l2BNet, + rollupCfg: wb.outL2RollupCfg[DefaultL2BID], + deployment: wb.outL2Deployment[DefaultL2BID], + opcmImpl: wb.output.ImplementationsDeployment.OpcmImpl, + mipsImpl: wb.output.ImplementationsDeployment.MipsImpl, + keys: keys, + } + return newInteropMigrationState(wb), l1Net, l2A, l2B, wb.outFullCfgSet +} diff --git a/op-service/logpipe/pipe.go b/op-service/logpipe/pipe.go index abc22c09ec74c..e876908fc781d 100644 --- a/op-service/logpipe/pipe.go +++ b/op-service/logpipe/pipe.go @@ -80,10 +80,17 @@ type LogEntry interface { type LogParser func(line []byte) LogEntry func ToLogger(logger log.Logger) func(e LogEntry) { + return ToLoggerWithMinLevel(logger, log.LevelTrace) +} + +func ToLoggerWithMinLevel(logger log.Logger, minLevel slog.Level) func(e LogEntry) { return func(e LogEntry) { msg := e.LogMessage() attrs := e.LogFields() lvl := e.LogLevel() + if lvl < minLevel { + return + } if lvl >= log.LevelCrit { // If a sub-process has a critical error, this process can handle it diff --git a/op-service/logpipe/pipe_test.go b/op-service/logpipe/pipe_test.go index 475c7971d10bc..6133eaee4a04b 100644 --- a/op-service/logpipe/pipe_test.go +++ b/op-service/logpipe/pipe_test.go @@ -43,3 +43,30 @@ func TestWriteToLogProcessor(t *testing.T) { require.NotNil(t, entry3) require.Equal(t, "world", entry3.Message) } + +func TestWriteToLogProcessorWithMinLevel(t *testing.T) { + logger, capt := testlog.CaptureLogger(t, log.LevelTrace) + + proc := NewLineBuffer(func(line []byte) { + ToLoggerWithMinLevel(logger, log.LevelWarn)(ParseRustStructuredLogs(line)) + }) + _, err := io.Copy(proc, strings.NewReader(`{"level": "DEBUG", "fields": {"message": "hello", "foo": 1}}`+"\n")) + require.NoError(t, err) + _, err = io.Copy(proc, strings.NewReader(`{"fields": {"message": "world", "bar": "sunny"}, "level": "INFO"}`+"\n")) + require.NoError(t, err) + _, err = io.Copy(proc, strings.NewReader(`{"fields": {"message": "warn", "baz": "kept"}, "level": "WARN"}`+"\n")) + require.NoError(t, err) + + require.Nil(t, capt.FindLog( + testlog.NewLevelFilter(log.LevelDebug), + testlog.NewAttributesContainsFilter("foo", "1"))) + require.Nil(t, capt.FindLog( + testlog.NewLevelFilter(log.LevelInfo), + testlog.NewAttributesContainsFilter("bar", "sunny"))) + + entry := capt.FindLog( + testlog.NewLevelFilter(log.LevelWarn), + testlog.NewAttributesContainsFilter("baz", "kept")) + require.NotNil(t, entry) + require.Equal(t, "warn", entry.Message) +} diff --git a/op-up/main.go b/op-up/main.go index 4ea3ec0b137e5..a9bbef2a91c3b 100644 --- a/op-up/main.go +++ b/op-up/main.go @@ -4,9 +4,9 @@ import ( "context" _ "embed" "encoding/json" + "errors" "fmt" "io" - "log/slog" "net/http" "os" "os/signal" @@ -19,10 +19,7 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-devstack/presets" opservice "github.com/ethereum-optimism/optimism/op-service" "github.com/ethereum-optimism/optimism/op-service/cliapp" "github.com/ethereum-optimism/optimism/op-service/client" @@ -34,6 +31,7 @@ import ( "github.com/ethereum/go-ethereum/crypto" "github.com/ethereum/go-ethereum/log" "github.com/urfave/cli/v2" + "go.opentelemetry.io/otel" "go.opentelemetry.io/otel/trace" ) @@ -105,50 +103,27 @@ func runOpUp(ctx context.Context, stderr io.Writer, opUpDir string) error { if err := os.MkdirAll(opUpDir, 0o755); err != nil { return fmt.Errorf("create the op-up dir: %w", err) } - deployerCacheDir := filepath.Join(opUpDir, "deployer", "cache") - if err := os.MkdirAll(deployerCacheDir, 0o755); err != nil { - return fmt.Errorf("create the deployer cache dir: %w", err) + tempRoot := filepath.Join(opUpDir, "tmp") + if err := os.MkdirAll(tempRoot, 0o755); err != nil { + return fmt.Errorf("create the op-up temp dir: %w", err) } devtest.RootContext = ctx + t := newTestingT(ctx, stderr, tempRoot) + defer t.doCleanup() - p := newP(ctx, stderr) - defer p.Close() - - ids := sysgo.NewDefaultMinimalSystemIDs(sysgo.DefaultL1ID, sysgo.DefaultL2AID) - opts := stack.Combine( - sysgo.WithMnemonicKeys(devkeys.TestMnemonic), - - sysgo.WithDeployer(), - sysgo.WithDeployerOptions( - sysgo.WithEmbeddedContractSources(), - sysgo.WithCommons(ids.L1.ChainID()), - sysgo.WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), - ), - sysgo.WithDeployerPipelineOption(sysgo.WithDeployerCacheDir(deployerCacheDir)), - - sysgo.WithL1Nodes(ids.L1EL, ids.L1CL), - - sysgo.WithL2ELNode(ids.L2EL), - sysgo.WithL2CLNode(ids.L2CL, ids.L1CL, ids.L1EL, ids.L2EL, sysgo.L2CLSequencer()), - sysgo.WithL2MetricsDashboard(), - - sysgo.WithBatcher(ids.L2Batcher, ids.L1EL, ids.L2CL, ids.L2EL), - sysgo.WithProposer(ids.L2Proposer, ids.L1EL, &ids.L2CL, nil), - - sysgo.WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2EL}), - ) - - orch := sysgo.NewOrchestrator(p, opts) - stack.ApplyOptionLifecycle[*sysgo.Orchestrator](opts, orch) - if err := runSysgo(ctx, stderr, orch); err != nil { + sys, err := newMinimalSystem(t) + if err != nil { + return err + } + if err := runSystem(ctx, stderr, sys); err != nil { return err } fmt.Fprintf(stderr, "\nPlease consider filling out this survey to influence future development: https://www.surveymonkey.com/r/JTGHFK3\n") return nil } -func newP(ctx context.Context, stderr io.Writer) devtest.P { +func newLogger(ctx context.Context, stderr io.Writer) log.Logger { logHandler := oplog.NewLogHandler(stderr, oplog.DefaultCLIConfig()) logHandler = logfilter.WrapFilterHandler(logHandler) logHandler.(logfilter.FilterHandler).Set(logfilter.DefaultMute()) @@ -156,26 +131,30 @@ func newP(ctx context.Context, stderr io.Writer) devtest.P { logger := log.NewLogger(logHandler) oplog.SetGlobalLogHandler(logHandler) logger.SetContext(ctx) - onFail := func(now bool) { - logger.Error("Main failed") - debug.PrintStack() - if now { - panic("critical Main fail") + return logger +} + +func newMinimalSystem(t *testingT) (sys *presets.Minimal, err error) { + defer func() { + if recovered := recover(); recovered != nil { + var failure testingFailure + if errors.As(asError(recovered), &failure) { + err = failure.err + return + } + panic(recovered) } - } - p := devtest.NewP(ctx, logger, onFail, func() { - onFail(true) - }) - return p + }() + return presets.NewMinimal(t), nil } -func runSysgo(ctx context.Context, stderr io.Writer, orch *sysgo.Orchestrator) error { +func runSystem(ctx context.Context, stderr io.Writer, sys *presets.Minimal) error { // Print available account. hd, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) if err != nil { return fmt.Errorf("new mnemonic dev keys: %w", err) } - const funderIndex = 10_000 // see sysgo/deployer.go. + const funderIndex = 10_000 funderUserKey := devkeys.UserKey(funderIndex) funderAddress, err := hd.Address(funderUserKey) if err != nil { @@ -190,19 +169,7 @@ func runSysgo(ctx context.Context, stderr io.Writer, orch *sysgo.Orchestrator) e fmt.Fprintf(stderr, "Test Account Private Key: %s\n", "0x"+common.Bytes2Hex(crypto.FromECDSA(funderPrivKey))) fmt.Fprintf(stderr, "EL Node URL: %s\n", "http://localhost:8545") - t := &testingT{ - ctx: ctx, - cleanups: make([]func(), 0), - } - defer t.doCleanup() - sys := shim.NewSystem(t) - orch.Hydrate(sys) - l2Networks := sys.L2Networks() - if len(l2Networks) != 1 { - return fmt.Errorf("need one l2 network, got: %d", len(l2Networks)) - } - l2Net := l2Networks[0] - elNode := l2Net.L2ELNode(match.FirstL2EL) + elNode := sys.L2EL // Log on new blocks. go func() { @@ -227,7 +194,7 @@ func runSysgo(ctx context.Context, stderr io.Writer, orch *sysgo.Orchestrator) e // Proxy L2 EL requests. go func() { - if err := proxyEL(stderr, elNode.L2EthClient().RPC()); err != nil { + if err := proxyEL(ctx, stderr, elNode.Escape().L2EthClient().RPC()); err != nil && !errors.Is(err, http.ErrServerClosed) { fmt.Fprintf(stderr, "error: %v", err) } }() @@ -239,9 +206,10 @@ func runSysgo(ctx context.Context, stderr io.Writer, orch *sysgo.Orchestrator) e // proxyEL is a hacky way to intercept EL json rpc requests for logging to get around log filtering // bugs. -func proxyEL(stderr io.Writer, client client.RPC) error { +func proxyEL(ctx context.Context, stderr io.Writer, client client.RPC) error { + mux := http.NewServeMux() // Set up the HTTP handler for all incoming requests. - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { // Ensure the request method is POST, as JSON RPC typically uses POST. if r.Method != http.MethodPost { http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) @@ -348,35 +316,98 @@ func proxyEL(stderr io.Writer, client client.RPC) error { } }) - // Start the HTTP server. - if err := http.ListenAndServe("localhost:8545", nil); err != nil { - return fmt.Errorf("listen and server: %w", err) + server := &http.Server{Addr: "localhost:8545", Handler: mux} + errCh := make(chan error, 1) + go func() { + errCh <- server.ListenAndServe() + }() + select { + case <-ctx.Done(): + shutdownCtx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + if err := server.Shutdown(shutdownCtx); err != nil { + return fmt.Errorf("shutdown proxy server: %w", err) + } + return <-errCh + case err := <-errCh: + if err != nil { + return fmt.Errorf("listen and serve: %w", err) + } + return nil } - return nil } type testingT struct { + state *testingState + ctx context.Context + logger log.Logger + tracer trace.Tracer + req *testreq.Assertions + gate *testreq.Assertions +} + +type testingState struct { mu sync.Mutex - ctx context.Context + tempRoot string cleanups []func() } +type testingFailure struct { + err error +} + +func (f testingFailure) Error() string { + return f.err.Error() +} + +func asError(v any) error { + if err, ok := v.(error); ok { + return err + } + return nil +} + +func newTestingT(ctx context.Context, stderr io.Writer, tempRoot string) *testingT { + logger := newLogger(ctx, stderr) + t := &testingT{ + state: &testingState{ + tempRoot: tempRoot, + cleanups: make([]func(), 0), + }, + ctx: ctx, + logger: logger, + tracer: otel.Tracer("op-up"), + } + t.req = testreq.New(t) + t.gate = testreq.New(t) + return t +} + +func (t *testingT) failf(format string, args ...any) { + err := fmt.Errorf(format, args...) + t.logger.Error("op-up runtime failure", "err", err) + debug.PrintStack() + panic(testingFailure{err: err}) +} + var _ devtest.T = (*testingT)(nil) var _ testreq.TestingT = (*testingT)(nil) func (t *testingT) doCleanup() { - t.mu.Lock() - defer t.mu.Unlock() - for _, cleanup := range slices.Backward(t.cleanups) { + t.state.mu.Lock() + cleanups := append([]func(){}, t.state.cleanups...) + t.state.cleanups = nil + t.state.mu.Unlock() + for _, cleanup := range slices.Backward(cleanups) { cleanup() } } // Cleanup implements devtest.T. func (t *testingT) Cleanup(fn func()) { - t.mu.Lock() - defer t.mu.Unlock() - t.cleanups = append(t.cleanups, fn) + t.state.mu.Lock() + defer t.state.mu.Unlock() + t.state.cleanups = append(t.state.cleanups, fn) } // Ctx implements devtest.T. @@ -391,23 +422,27 @@ func (t *testingT) Deadline() (deadline time.Time, ok bool) { // Error implements devtest.T. func (t *testingT) Error(args ...any) { + t.failf("%s", fmt.Sprint(args...)) } // Errorf implements devtest.T. func (t *testingT) Errorf(format string, args ...any) { + t.failf(format, args...) } // Fail implements devtest.T. func (t *testingT) Fail() { + t.failf("test failed") } // FailNow implements devtest.T. func (t *testingT) FailNow() { + t.failf("test failed immediately") } // Gate implements devtest.T. func (t *testingT) Gate() *testreq.Assertions { - return testreq.New(t) + return t.gate } // Helper implements devtest.T. @@ -416,14 +451,16 @@ func (t *testingT) Helper() { // Log implements devtest.T. func (t *testingT) Log(args ...any) { + t.logger.Info(fmt.Sprint(args...)) } // Logf implements devtest.T. func (t *testingT) Logf(format string, args ...any) { + t.logger.Info(fmt.Sprintf(format, args...)) } func (t *testingT) Logger() log.Logger { - return log.NewLogger(slog.NewTextHandler(io.Discard, nil)) + return t.logger } func (t *testingT) Name() string { @@ -434,24 +471,25 @@ func (t *testingT) Parallel() { } func (t *testingT) Require() *testreq.Assertions { - return testreq.New(t) + return t.req } func (t *testingT) Run(name string, fn func(devtest.T)) { - panic("unimplemented") + subCtx := devtest.AddTestScope(t.ctx, name) + fn(t.WithCtx(subCtx)) } func (t *testingT) Skip(args ...any) { - panic("unimplemented") + t.failf("unexpected skip: %s", fmt.Sprint(args...)) } func (t *testingT) SkipNow() { - panic("unimplemented") + t.failf("unexpected skip") } // Skipf implements devtest.T. func (t *testingT) Skipf(format string, args ...any) { - panic("unimplemented") + t.failf("unexpected skip: "+format, args...) } // Skipped implements devtest.T. @@ -461,17 +499,36 @@ func (t *testingT) Skipped() bool { // TempDir implements devtest.T. func (t *testingT) TempDir() string { - panic("unimplemented") + dir, err := os.MkdirTemp(t.state.tempRoot, "op-up-*") + if err != nil { + t.failf("failed to create temp dir: %v", err) + } + t.Cleanup(func() { + if err := os.RemoveAll(dir); err != nil { + t.logger.Error("failed to clean up temp dir", "dir", dir, "err", err) + } + }) + return dir } // Tracer implements devtest.T. func (t *testingT) Tracer() trace.Tracer { - panic("unimplemented") + return t.tracer } // WithCtx implements devtest.T. func (t *testingT) WithCtx(ctx context.Context) devtest.T { - return t + logger := t.logger.New() + logger.SetContext(ctx) + out := &testingT{ + state: t.state, + ctx: ctx, + logger: logger, + tracer: t.tracer, + } + out.req = testreq.New(out) + out.gate = testreq.New(out) + return out } // _TestOnly implements devtest.T. diff --git a/rust/kona/tests/node/common/conductor_test.go b/rust/kona/tests/node/common/conductor_test.go index 5011e306c0907..e59fe59db5de6 100644 --- a/rust/kona/tests/node/common/conductor_test.go +++ b/rust/kona/tests/node/common/conductor_test.go @@ -3,14 +3,12 @@ package node import ( "context" "fmt" - "strings" "testing" "time" "github.com/ethereum-optimism/optimism/op-conductor/consensus" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/stack" "github.com/ethereum-optimism/optimism/op-service/testlog" node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" "github.com/ethereum/go-ethereum/log" @@ -54,8 +52,7 @@ func TestConductorLeadershipTransfer(gt *testing.T) { idToConductor := make(map[string]conductorWithInfo) for _, conductor := range conductors { - conductorId := strings.TrimPrefix(conductor.String(), stack.KindConductor.String()+"-") - idToConductor[conductorId] = conductorWithInfo{conductor, consensus.ServerInfo{}} + idToConductor[conductor.String()] = conductorWithInfo{conductor, consensus.ServerInfo{}} } for _, memberInfo := range membership.Servers { conductor, ok := idToConductor[memberInfo.ID] diff --git a/rust/kona/tests/node/common/engine_test.go b/rust/kona/tests/node/common/engine_test.go index e0fb99eed37ff..5852b3fc3924c 100644 --- a/rust/kona/tests/node/common/engine_test.go +++ b/rust/kona/tests/node/common/engine_test.go @@ -14,7 +14,7 @@ import ( func TestEngine(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) // Get the nodes from the network. nodes := out.L2CLKonaNodes() @@ -25,23 +25,16 @@ func TestEngine(gt *testing.T) { go func(node *dsl.L2CLNode) { defer wg.Done() - queue := make(chan []uint64) - - // Spawn a task that gets the engine queue length with a ws connection. + done := make(chan struct{}) go func() { - done := make(chan struct{}) - go func() { - // Wait for 40 unsafe blocks to be produced. - node.Advanced(types.LocalUnsafe, 40, 100) - done <- struct{}{} - }() - - queue <- node_utils.GetDevWS(t, node, "engine_queue_size", done) + defer close(done) + // Wait for 40 unsafe blocks to be produced. + node.Advanced(types.LocalUnsafe, 40, 100) }() - q := <-queue - for _, q := range q { - require.LessOrEqual(t, q, uint64(1), "engine queue length should be 1 or less") + queueLens := node_utils.GetDevWS(t, node, "engine_queue_size", done) + for _, queueLen := range queueLens { + require.LessOrEqual(t, queueLen, uint64(1), "engine queue length should be 1 or less") } }(&node) } diff --git a/rust/kona/tests/node/common/init_test.go b/rust/kona/tests/node/common/init_test.go deleted file mode 100644 index 1a15b2416d06c..0000000000000 --- a/rust/kona/tests/node/common/init_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package node - -import ( - "fmt" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - config := node_utils.ParseL2NodeConfigFromEnv() - - fmt.Printf("Running e2e tests with Config: %d\n", config) - presets.DoMain(m, node_utils.WithMixedOpKona(config)) -} diff --git a/rust/kona/tests/node/common/p2p_test.go b/rust/kona/tests/node/common/p2p_test.go index 1d461efdca1bf..562e924906165 100644 --- a/rust/kona/tests/node/common/p2p_test.go +++ b/rust/kona/tests/node/common/p2p_test.go @@ -8,7 +8,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" "github.com/libp2p/go-libp2p/core/peer" "github.com/stretchr/testify/require" ) @@ -28,7 +27,7 @@ func checkProtocols(t devtest.T, peer *apis.PeerInfo) { // Check that the node has enough connected peers and peers in the discovery table. func checkPeerStats(t devtest.T, node *dsl.L2CLNode, minConnected uint, minBlocksTopic uint) { peerStats, err := node.Escape().P2PAPI().PeerStats(t.Ctx()) - nodeName := node.Escape().ID() + nodeName := node.Escape().Name() require.NoError(t, err, "failed to get peer stats for %s", nodeName) @@ -47,18 +46,18 @@ func arePeers(t devtest.T, node *dsl.L2CLNode, otherNodeId peer.ID) { for _, peer := range nodePeers.Peers { if peer.PeerID == otherNodeId { // TODO(ethereum-optimism/optimism#18655): this test is flaky, we should fix it. - // require.Equal(t, network.Connected, peer.Connectedness, fmt.Sprintf("%s is not connected to the %s", node.Escape().ID(), otherNodeId)) + // require.Equal(t, network.Connected, peer.Connectedness, fmt.Sprintf("%s is not connected to the %s", node.Escape().Name(), otherNodeId)) checkProtocols(t, peer) found = true } } - require.True(t, found, fmt.Sprintf("%s is not in the %s's peers", otherNodeId, node.Escape().ID())) + require.True(t, found, fmt.Sprintf("%s is not in the %s's peers", otherNodeId, node.Escape().Name())) } func TestP2PMinimal(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLNodes() firstNode := nodes[0] @@ -83,7 +82,7 @@ func TestP2PMinimal(gt *testing.T) { func TestP2PProtocols(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLNodes() @@ -97,7 +96,7 @@ func TestP2PProtocols(gt *testing.T) { func TestP2PChainID(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLKonaNodes() @@ -106,14 +105,14 @@ func TestP2PChainID(gt *testing.T) { chainID := nodes[0].PeerInfo().ChainID for _, node := range nodes { - nodeChainID, ok := node.Escape().ID().ChainID().Uint64() + nodeChainID, ok := node.Escape().ChainID().Uint64() require.True(t, ok, "chainID is too large for a uint64") - require.Equal(t, chainID, nodeChainID, fmt.Sprintf("%s has a different chainID", node.Escape().ID())) + require.Equal(t, chainID, nodeChainID, fmt.Sprintf("%s has a different chainID", node.Escape().Name())) for _, peer := range node.Peers().Peers { // Sometimes peers don't have a chainID because they are not part of the discovery table while being connected to gossip. if peer.ChainID != 0 { - require.Equal(t, chainID, peer.ChainID, fmt.Sprintf("%s has a different chainID", node.Escape().ID())) + require.Equal(t, chainID, peer.ChainID, fmt.Sprintf("%s has a different chainID", node.Escape().Name())) } } } @@ -123,7 +122,7 @@ func TestP2PChainID(gt *testing.T) { func TestNetworkConnectivity(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLNodes() numNodes := len(nodes) diff --git a/rust/kona/tests/node/common/rpc_test.go b/rust/kona/tests/node/common/rpc_test.go index 34720e92c8fc9..560067cbdf3ca 100644 --- a/rust/kona/tests/node/common/rpc_test.go +++ b/rust/kona/tests/node/common/rpc_test.go @@ -17,7 +17,7 @@ import ( func TestP2PPeers(gt *testing.T) { t := devtest.SerialT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) p2pPeersAndPeerStats(t, out) @@ -35,7 +35,7 @@ func p2pSelfAndPeers(t devtest.T, out *node_utils.MixedOpKonaPreset) { go func(node *dsl.L2CLNode) { defer wg.Done() clRPC := node_utils.GetNodeRPCEndpoint(node) - clName := node.Escape().ID().Key() + clName := node.Escape().Name() // Gather the peers for the node. peers := &apis.PeerDump{} @@ -49,7 +49,7 @@ func p2pSelfAndPeers(t devtest.T, out *node_utils.MixedOpKonaPreset) { // We get the peer's info. otherPeerInfo := &apis.PeerInfo{} otherCLRPC := node_utils.GetNodeRPCEndpoint(&node) - otherCLName := node.Escape().ID().Key() + otherCLName := node.Escape().Name() require.NoError(t, node_utils.SendRPCRequest(otherCLRPC, "opp2p_self", otherPeerInfo), "failed to send RPC request to node %s: %s", clName) // These checks fail for the op-node. It seems that their p2p handler is flaky and doesn't always return the correct peer info. @@ -93,7 +93,7 @@ func p2pPeersAndPeerStats(t devtest.T, out *node_utils.MixedOpKonaPreset) { go func(node *dsl.L2CLNode) { defer wg.Done() clRPC := node_utils.GetNodeRPCEndpoint(node) - clName := node.Escape().ID().Key() + clName := node.Escape().Name() peers := &apis.PeerDump{} require.NoError(t, node_utils.SendRPCRequest(clRPC, "opp2p_peers", peers, true), "failed to send RPC request to node %s: %s", clName) @@ -112,7 +112,7 @@ func p2pBanPeer(t devtest.T, out *node_utils.MixedOpKonaPreset) { nodes := out.L2CLNodes() for _, node := range nodes { clRPC := node_utils.GetNodeRPCEndpoint(&node) - clName := node.Escape().ID().Key() + clName := node.Escape().Name() peers := &apis.PeerDump{} require.NoError(t, node_utils.SendRPCRequest(clRPC, "opp2p_peers", peers, true), "failed to send RPC request to node %s: %s", clName) @@ -162,7 +162,7 @@ func p2pBanPeer(t devtest.T, out *node_utils.MixedOpKonaPreset) { func rollupConfig(t devtest.T, node *dsl.L2CLNode) *rollup.Config { clRPC := node_utils.GetNodeRPCEndpoint(node) - clName := node.Escape().ID().Key() + clName := node.Escape().Name() rollupConfig := &rollup.Config{} require.NoError(t, node_utils.SendRPCRequest(clRPC, "optimism_rollupConfig", rollupConfig), "failed to send RPC request to node %s: %s", clName) @@ -181,7 +181,7 @@ func rollupConfigMatches(t devtest.T, configA *rollup.Config, configB *rollup.Co func TestRollupConfig(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) rollupConfigs := make([]*rollup.Config, 0) diff --git a/rust/kona/tests/node/common/setup_test.go b/rust/kona/tests/node/common/setup_test.go new file mode 100644 index 0000000000000..ddc346e2fcdb9 --- /dev/null +++ b/rust/kona/tests/node/common/setup_test.go @@ -0,0 +1,11 @@ +package node + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" +) + +func newCommonPreset(t devtest.T) *node_utils.MixedOpKonaPreset { + t.Helper() + return node_utils.NewMixedOpKona(t) +} diff --git a/rust/kona/tests/node/common/sync_test.go b/rust/kona/tests/node/common/sync_test.go index 5db91b82a45d4..5dd8d53ff53a4 100644 --- a/rust/kona/tests/node/common/sync_test.go +++ b/rust/kona/tests/node/common/sync_test.go @@ -13,7 +13,7 @@ import ( func TestL2SafeSync(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) sequencer := out.L2CLSequencerNodes()[0] nodes := out.L2CLValidatorNodes() @@ -32,7 +32,7 @@ func TestL2SafeSync(gt *testing.T) { func TestL2UnsafeSync(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLNodes() @@ -50,7 +50,7 @@ func TestL2FinalizedSync(gt *testing.T) { t := devtest.ParallelT(gt) t.Skip("Skipping finalized sync test") - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLNodes() diff --git a/rust/kona/tests/node/common/sync_ws_test.go b/rust/kona/tests/node/common/sync_ws_test.go index a0099a8a80b8a..499d10a89667c 100644 --- a/rust/kona/tests/node/common/sync_ws_test.go +++ b/rust/kona/tests/node/common/sync_ws_test.go @@ -24,7 +24,7 @@ func TestSyncUnsafeBecomesSafe(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLKonaNodes() @@ -78,7 +78,7 @@ func TestSyncUnsafeBecomesSafe(gt *testing.T) { func TestSyncUnsafe(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLKonaNodes() @@ -102,7 +102,7 @@ func TestSyncUnsafe(gt *testing.T) { // We shouldn't have safe heads reorgs in this very simple testnet because there is only one DA layer node. for _, block := range output { for _, node := range nodes { - otherCLNode := node.Escape().ID().Key() + otherCLNode := node.Escape().Name() otherCLSyncStatus := node.ChainSyncStatus(out.L2Chain.ChainID(), types.LocalUnsafe) if otherCLSyncStatus.Number < block.Number { @@ -114,7 +114,7 @@ func TestSyncUnsafe(gt *testing.T) { require.NoError(t, err, "impossible to get block from node %s", otherCLNode) // Make sure the blocks match! - require.Equal(t, expectedOutputResponse.BlockRef, block, "block mismatch between %s and %s", otherCLNode, node.Escape().ID().Key()) + require.Equal(t, expectedOutputResponse.BlockRef, block, "block mismatch between %s and %s", otherCLNode, node.Escape().Name()) } } @@ -129,7 +129,7 @@ func TestSyncUnsafe(gt *testing.T) { func TestSyncSafe(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLKonaNodes() @@ -145,7 +145,7 @@ func TestSyncSafe(gt *testing.T) { wg.Add(1) go func(node *dsl.L2CLNode) { defer wg.Done() - clName := node.Escape().ID().Key() + clName := node.Escape().Name() output := node_utils.GetKonaWs(t, node, "safe_head", time.After(2*time.Minute)) @@ -154,7 +154,7 @@ func TestSyncSafe(gt *testing.T) { // We shouldn't have safe heads reorgs in this very simple testnet because there is only one DA layer node. for _, block := range output { for _, node := range nodes { - otherCLNode := node.Escape().ID().Key() + otherCLNode := node.Escape().Name() otherCLSyncStatus := node.ChainSyncStatus(out.L2Chain.ChainID(), types.LocalSafe) if otherCLSyncStatus.Number < block.Number { @@ -181,7 +181,7 @@ func TestSyncSafe(gt *testing.T) { func TestSyncFinalized(gt *testing.T) { t := devtest.ParallelT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) nodes := out.L2CLKonaNodes() @@ -190,7 +190,7 @@ func TestSyncFinalized(gt *testing.T) { wg.Add(1) go func(node *dsl.L2CLNode) { defer wg.Done() - clName := node.Escape().ID().Key() + clName := node.Escape().Name() output := node_utils.GetKonaWs(t, node, "finalized_head", time.After(4*time.Minute)) @@ -201,7 +201,7 @@ func TestSyncFinalized(gt *testing.T) { // For each block, we check that the block is actually in the chain of the other nodes. for _, block := range output { for _, node := range nodes { - otherCLNode := node.Escape().ID().Key() + otherCLNode := node.Escape().Name() otherCLSyncStatus := node.ChainSyncStatus(out.L2Chain.ChainID(), types.Finalized) if otherCLSyncStatus.Number < block.Number { diff --git a/rust/kona/tests/node/common/tx_inclusion_test.go b/rust/kona/tests/node/common/tx_inclusion_test.go index 089d2de7ae29c..10a4eca8e4b78 100644 --- a/rust/kona/tests/node/common/tx_inclusion_test.go +++ b/rust/kona/tests/node/common/tx_inclusion_test.go @@ -6,12 +6,11 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-service/eth" - node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" ) func TestL2TransactionInclusion(gt *testing.T) { t := devtest.SerialT(gt) - out := node_utils.NewMixedOpKona(t) + out := newCommonPreset(t) originNode := out.L2ELSequencerNodes()[0] funder := dsl.NewFunder(out.Wallet, out.Faucet, originNode) diff --git a/rust/kona/tests/node/long-running/init_test.go b/rust/kona/tests/node/long-running/helpers_test.go similarity index 76% rename from rust/kona/tests/node/long-running/init_test.go rename to rust/kona/tests/node/long-running/helpers_test.go index 610f6bbd8f24b..79620132dde60 100644 --- a/rust/kona/tests/node/long-running/init_test.go +++ b/rust/kona/tests/node/long-running/helpers_test.go @@ -2,9 +2,8 @@ package node import ( "flag" - "testing" - "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" ) @@ -15,11 +14,8 @@ var ( initNumAccounts = flag.Int("init-num-accounts", 10, "initial number of accounts to fund") ) -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - flag.Parse() - - presets.DoMain(m, node_utils.WithMixedOpKona(node_utils.L2NodeConfig{ +func newLongRunningPreset(t devtest.T) *node_utils.MixedOpKonaPreset { + return node_utils.NewMixedOpKonaForConfig(t, node_utils.L2NodeConfig{ OpSequencerNodesWithGeth: 0, OpSequencerNodesWithReth: 0, KonaSequencerNodesWithGeth: 1, @@ -28,5 +24,5 @@ func TestMain(m *testing.M) { OpNodesWithReth: 1, KonaNodesWithGeth: 1, KonaNodesWithReth: 1, - })) + }) } diff --git a/rust/kona/tests/node/long-running/tx_producer_test.go b/rust/kona/tests/node/long-running/tx_producer_test.go index 3b1417c80e573..3d5369c16b5ab 100644 --- a/rust/kona/tests/node/long-running/tx_producer_test.go +++ b/rust/kona/tests/node/long-running/tx_producer_test.go @@ -163,7 +163,7 @@ func (tr *TxReceiver) Start(wg *sync.WaitGroup) { func TestTxProducer(gt *testing.T) { t := devtest.SerialT(gt) - out := node_utils.NewMixedOpKona(t) + out := newLongRunningPreset(t) var wg sync.WaitGroup diff --git a/rust/kona/tests/node/reorgs/init_test.go b/rust/kona/tests/node/reorgs/init_test.go deleted file mode 100644 index ceb5850b8f246..0000000000000 --- a/rust/kona/tests/node/reorgs/init_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package reorgs - -import ( - "fmt" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - l2Config := node_utils.ParseL2NodeConfigFromEnv() - - fmt.Printf("Running e2e reorg tests with Config: %d\n", l2Config) - - presets.DoMain(m, node_utils.WithMixedWithTestSequencer(l2Config)) -} diff --git a/rust/kona/tests/node/reorgs/l2_reorg_after_l1_reorgs_test.go b/rust/kona/tests/node/reorgs/l2_reorg_after_l1_reorgs_test.go index 366ed16504ae6..ebb65688cdfb2 100644 --- a/rust/kona/tests/node/reorgs/l2_reorg_after_l1_reorgs_test.go +++ b/rust/kona/tests/node/reorgs/l2_reorg_after_l1_reorgs_test.go @@ -6,8 +6,6 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-service/apis" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" @@ -47,8 +45,8 @@ func TestL2ReorgAfterL1Reorg(gt *testing.T) { } post := func(t devtest.T, sys *node_utils.MinimalWithTestSequencersPreset) { for i, elNode := range sys.L2ELNodes() { - require.False(t, elNode.IsCanonical(unsafeRef[i].ID()), "Previous unsafe block should have been reorged", "elNode", elNode.ID(), "unsafeRef", unsafeRef[i].ID()) - require.False(t, elNode.IsCanonical(localSafeRef[i].ID()), "Previous local-safe block should have been reorged", "elNode", elNode.ID(), "localSafeRef", localSafeRef[i].ID()) + require.False(t, elNode.IsCanonical(unsafeRef[i].ID()), "Previous unsafe block should have been reorged", "elNode", elNode.String(), "unsafeRef", unsafeRef[i].ID()) + require.False(t, elNode.IsCanonical(localSafeRef[i].ID()), "Previous local-safe block should have been reorged", "elNode", elNode.String(), "localSafeRef", localSafeRef[i].ID()) } } testL2ReorgAfterL1Reorg(gt, 20, pre, post) @@ -66,11 +64,9 @@ func testL2ReorgAfterL1Reorg(gt *testing.T, n int, preChecks, postChecks checksF sys := node_utils.NewMixedOpKonaWithTestSequencer(t) ts := sys.TestSequencer.Escape().ControlAPI(sys.L1Network.ChainID()) - cl := sys.L1Network.Escape().L1CLNode(match.FirstL1CL) - sys.L1Network.WaitForBlock() - sys.ControlPlane.FakePoSState(cl.ID(), stack.Stop) + sys.L1CL.Stop() // sequence a few L1 and L2 blocks for range n + 1 { @@ -102,7 +98,7 @@ func testL2ReorgAfterL1Reorg(gt *testing.T, n int, preChecks, postChecks checksF sequenceL1Block(t, ts, divergence.ParentHash) // continue building on the alternative L1 chain - sys.ControlPlane.FakePoSState(cl.ID(), stack.Start) + sys.L1CL.Start() // confirm L1 reorged sys.L1EL.ReorgTriggered(divergence, 5) diff --git a/rust/kona/tests/node/reorgs/l2_reorg_test.go b/rust/kona/tests/node/reorgs/l2_reorg_test.go index 5e0a25e5b755d..1b489dc67446e 100644 --- a/rust/kona/tests/node/reorgs/l2_reorg_test.go +++ b/rust/kona/tests/node/reorgs/l2_reorg_test.go @@ -120,7 +120,7 @@ func TestL2Reorg(gt *testing.T) { } } if !found { - return fmt.Errorf("expected node %s to be connected to the sequencer", node.Escape().ID().Key()) + return fmt.Errorf("expected node %s to be connected to the sequencer", node.Escape().Name()) } } diff --git a/rust/kona/tests/node/restart/conn_drop_test.go b/rust/kona/tests/node/restart/conn_drop_test.go index 41f25ff7b9394..3cbf942333aa5 100644 --- a/rust/kona/tests/node/restart/conn_drop_test.go +++ b/rust/kona/tests/node/restart/conn_drop_test.go @@ -14,7 +14,7 @@ import ( func TestConnDropSync(gt *testing.T) { t := devtest.SerialT(gt) - out := node_utils.NewMixedOpKona(t) + out := newRestartPreset(t) nodes := out.L2CLValidatorNodes() sequencerNodes := out.L2CLSequencerNodes() @@ -32,20 +32,20 @@ func TestConnDropSync(gt *testing.T) { var postDisconnectCheckFuns []dsl.CheckFunc for _, node := range nodes { - clName := node.Escape().ID().Key() + clName := node.Escape().Name() node.DisconnectPeer(&sequencer) // Ensure that the node is no longer connected to the sequencer - t.Logf("node %s is disconnected from sequencer %s", clName, sequencer.Escape().ID().Key()) + t.Logf("node %s is disconnected from sequencer %s", clName, sequencer.Escape().Name()) seqPeers := sequencer.Peers() for _, peer := range seqPeers.Peers { - t.Require().NotEqual(peer.PeerID, node.PeerInfo().PeerID, "expected node %s to be disconnected from sequencer %s", clName, sequencer.Escape().ID().Key()) + t.Require().NotEqual(peer.PeerID, node.PeerInfo().PeerID, "expected node %s to be disconnected from sequencer %s", clName, sequencer.Escape().Name()) } peers := node.Peers() for _, peer := range peers.Peers { - t.Require().NotEqual(peer.PeerID, sequencer.PeerInfo().PeerID, "expected node %s to be disconnected from sequencer %s", clName, sequencer.Escape().ID().Key()) + t.Require().NotEqual(peer.PeerID, sequencer.PeerInfo().PeerID, "expected node %s to be disconnected from sequencer %s", clName, sequencer.Escape().Name()) } currentUnsafeHead := node.ChainSyncStatus(node.ChainID(), types.LocalUnsafe) @@ -92,7 +92,7 @@ func TestConnDropSync(gt *testing.T) { var postReconnectCheckFuns []dsl.CheckFunc for _, node := range nodes { - clName := node.Escape().ID().Key() + clName := node.Escape().Name() node.ConnectPeer(&sequencer) @@ -104,13 +104,13 @@ func TestConnDropSync(gt *testing.T) { found := false for _, peer := range peers.Peers { if peer.PeerID == sequencer.PeerInfo().PeerID { - t.Logf("node %s is connected to reference node %s", clName, sequencer.Escape().ID().Key()) + t.Logf("node %s is connected to reference node %s", clName, sequencer.Escape().Name()) found = true break } } - t.Require().True(found, "expected node %s to be connected to reference node %s", clName, sequencer.Escape().ID().Key()) + t.Require().True(found, "expected node %s to be connected to reference node %s", clName, sequencer.Escape().Name()) // Check that the node is resyncing with the unsafe head network postReconnectCheckFuns = append(postReconnectCheckFuns, node_utils.MatchedWithinRange(t, node, sequencer, 3, types.LocalSafe, 50), node.AdvancedFn(types.LocalUnsafe, 50, 100), node_utils.MatchedWithinRange(t, node, sequencer, 3, types.LocalUnsafe, 100)) diff --git a/rust/kona/tests/node/restart/helpers_test.go b/rust/kona/tests/node/restart/helpers_test.go new file mode 100644 index 0000000000000..7c5ec4d452c91 --- /dev/null +++ b/rust/kona/tests/node/restart/helpers_test.go @@ -0,0 +1,18 @@ +package node_restart + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" +) + +func newRestartPreset(t devtest.T) *node_utils.MixedOpKonaPreset { + if sharedRestartRuntime != nil { + return node_utils.NewMixedOpKonaFromRuntime(t, sharedRestartRuntime) + } + + // Restart tests currently target a minimal kona-only topology. + return node_utils.NewMixedOpKonaForConfig(t, node_utils.L2NodeConfig{ + KonaSequencerNodesWithGeth: 1, + KonaNodesWithGeth: 1, + }) +} diff --git a/rust/kona/tests/node/restart/init_test.go b/rust/kona/tests/node/restart/init_test.go deleted file mode 100644 index 2d9b0e7cae20a..0000000000000 --- a/rust/kona/tests/node/restart/init_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package node_restart - -import ( - "fmt" - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // Currently, the restart tests only support kona nodes. The op node based configs are not supported (because of req-resp sync incompatibility). - config := node_utils.L2NodeConfig{ - KonaSequencerNodesWithGeth: 1, - KonaNodesWithGeth: 1, - } - - fmt.Printf("Running restart e2e tests with Config: %d\n", config) - presets.DoMain(m, node_utils.WithMixedOpKona(config)) -} diff --git a/rust/kona/tests/node/restart/restart_test.go b/rust/kona/tests/node/restart/restart_test.go index f57af1ab4d343..239b1a71d06a1 100644 --- a/rust/kona/tests/node/restart/restart_test.go +++ b/rust/kona/tests/node/restart/restart_test.go @@ -18,7 +18,7 @@ import ( func TestRestartSync(gt *testing.T) { t := devtest.SerialT(gt) - out := node_utils.NewMixedOpKona(t) + out := newRestartPreset(t) nodes := out.L2CLValidatorNodes() sequencerNodes := out.L2CLSequencerNodes() @@ -35,8 +35,8 @@ func TestRestartSync(gt *testing.T) { dsl.CheckAll(t, preCheckFuns...) for _, node := range nodes { - t.Logf("testing restarts for node %s", node.Escape().ID().Key()) - clName := node.Escape().ID().Key() + t.Logf("testing restarts for node %s", node.Escape().Name()) + clName := node.Escape().Name() nodePeerId := node.PeerInfo().PeerID t.Logf("stopping node %s", clName) @@ -48,7 +48,7 @@ func TestRestartSync(gt *testing.T) { seqPeers := sequencer.Peers() for _, peer := range seqPeers.Peers { if peer.PeerID == nodePeerId { - return nil, fmt.Errorf("expected node %s to be disconnected from sequencer %s", clName, sequencer.Escape().ID().Key()) + return nil, fmt.Errorf("expected node %s to be disconnected from sequencer %s", clName, sequencer.Escape().Name()) } } return nil, nil @@ -68,7 +68,7 @@ func TestRestartSync(gt *testing.T) { var postStartCheckFuns []dsl.CheckFunc for _, node := range nodes { - clName := node.Escape().ID().Key() + clName := node.Escape().Name() t.Logf("starting node %s", clName) node.Start() @@ -85,13 +85,13 @@ func TestRestartSync(gt *testing.T) { found := false for _, peer := range peers.Peers { if peer.PeerID == sequencer.PeerInfo().PeerID { - t.Logf("node %s is connected to reference node %s", clName, sequencer.Escape().ID().Key()) + t.Logf("node %s is connected to reference node %s", clName, sequencer.Escape().Name()) found = true break } } - t.Require().True(found, "expected node %s to be connected to reference node %s", clName, sequencer.Escape().ID().Key()) + t.Require().True(found, "expected node %s to be connected to reference node %s", clName, sequencer.Escape().Name()) } dsl.CheckAll(t, postStartCheckFuns...) diff --git a/rust/kona/tests/node/restart/sequencer_restart_test.go b/rust/kona/tests/node/restart/sequencer_restart_test.go index 9a02bde6ec99b..7ea4a93d8a893 100644 --- a/rust/kona/tests/node/restart/sequencer_restart_test.go +++ b/rust/kona/tests/node/restart/sequencer_restart_test.go @@ -9,13 +9,12 @@ import ( "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-service/retry" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" ) func TestSequencerRestart(gt *testing.T) { t := devtest.SerialT(gt) - out := node_utils.NewMixedOpKona(t) + out := newRestartPreset(t) nodes := out.L2CLValidatorNodes() sequencerNodes := out.L2CLSequencerNodes() @@ -33,7 +32,7 @@ func TestSequencerRestart(gt *testing.T) { dsl.CheckAll(t, preCheckFuns...) // Let's stop the sequencer node. - t.Logf("Stopping sequencer %s", sequencer.Escape().ID().Key()) + t.Logf("Stopping sequencer %s", sequencer.Escape().Name()) sequencer.Stop() var stopCheckFuns []dsl.CheckFunc @@ -43,7 +42,7 @@ func TestSequencerRestart(gt *testing.T) { _, err := retry.Do(t.Ctx(), 5, &retry.ExponentialStrategy{Max: 10 * time.Second, Min: 1 * time.Second, MaxJitter: 250 * time.Millisecond}, func() (any, error) { for _, peer := range nodePeers.Peers { if peer.PeerID == seqPeerId { - return nil, fmt.Errorf("expected node %s to be disconnected from sequencer %s", node.Escape().ID().Key(), sequencer.Escape().ID().Key()) + return nil, fmt.Errorf("expected node %s to be disconnected from sequencer %s", node.Escape().Name(), sequencer.Escape().Name()) } } return nil, nil @@ -58,13 +57,13 @@ func TestSequencerRestart(gt *testing.T) { dsl.CheckAll(t, stopCheckFuns...) // Let's restart the sequencer node. - t.Logf("Starting sequencer %s", sequencer.Escape().ID().Key()) + t.Logf("Starting sequencer %s", sequencer.Escape().Name()) sequencer.Start() // Let's reconnect the sequencer to the nodes. - t.Logf("Reconnecting sequencer %s to nodes", sequencer.Escape().ID().Key()) + t.Logf("Reconnecting sequencer %s to nodes", sequencer.Escape().Name()) for _, node := range nodes { - t.Logf("Connecting sequencer %s to node %s", sequencer.Escape().ID().Key(), node.Escape().ID().Key()) + t.Logf("Connecting sequencer %s to node %s", sequencer.Escape().Name(), node.Escape().Name()) sequencer.ConnectPeer(&node) } diff --git a/rust/kona/tests/node/restart/setup_test.go b/rust/kona/tests/node/restart/setup_test.go new file mode 100644 index 0000000000000..984062e7b3d71 --- /dev/null +++ b/rust/kona/tests/node/restart/setup_test.go @@ -0,0 +1,51 @@ +package node_restart + +import ( + "context" + "os" + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + oplog "github.com/ethereum-optimism/optimism/op-service/log" + node_utils "github.com/ethereum-optimism/optimism/rust/kona/tests/node/utils" +) + +var sharedRestartRuntime *sysgo.MixedSingleChainRuntime + +type packageInitResult struct { + code int +} + +func TestMain(m *testing.M) { + logger := oplog.NewLogger(os.Stderr, oplog.DefaultCLIConfig()) + pkg := devtest.NewP(context.Background(), logger, func(_ bool) { + panic(packageInitResult{code: 1}) + }, func() { + panic(packageInitResult{code: 0}) + }) + + devtest.RootContext = pkg.Ctx() + + code := 1 + func() { + defer func() { + if recovered := recover(); recovered != nil { + if result, ok := recovered.(packageInitResult); ok { + code = result.code + return + } + panic(recovered) + } + }() + + sharedRestartRuntime = node_utils.NewSharedMixedOpKonaRuntimeForConfig(pkg, node_utils.L2NodeConfig{ + KonaSequencerNodesWithGeth: 1, + KonaNodesWithGeth: 1, + }) + code = m.Run() + }() + + pkg.Close() + os.Exit(code) +} diff --git a/rust/kona/tests/node/utils/mixed_preset.go b/rust/kona/tests/node/utils/mixed_preset.go index f80eb415cc4e4..83064b1a9be9d 100644 --- a/rust/kona/tests/node/utils/mixed_preset.go +++ b/rust/kona/tests/node/utils/mixed_preset.go @@ -4,29 +4,14 @@ import ( "fmt" "os" "strconv" - "strings" "github.com/ethereum/go-ethereum/log" "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" + devpresets "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-node/rollup/sync" - "github.com/ethereum-optimism/optimism/op-service/eth" -) - -type L2NodeKind string - -const ( - OpNode L2NodeKind = "op" - KonaNode L2NodeKind = "kona" - Sequencer L2NodeKind = "sequencer" - Validator L2NodeKind = "validator" ) type L2NodeConfig struct { @@ -55,68 +40,33 @@ const ( ) func ParseL2NodeConfigFromEnv() L2NodeConfig { - // Get environment variable: OP_SEQUENCER_NODES. Convert to int. - opSequencerGeth := os.Getenv("OP_SEQUENCER_WITH_GETH") - opSequencerGethInt, err := strconv.Atoi(opSequencerGeth) - if err != nil { - opSequencerGethInt = DefaultOpSequencerGeth - } - // Get environment variable: KONA_SEQUENCER_NODES - konaSequencerGeth := os.Getenv("KONA_SEQUENCER_WITH_GETH") - konaSequencerGethInt, err := strconv.Atoi(konaSequencerGeth) - if err != nil { - konaSequencerGethInt = DefaultKonaSequencerGeth - } - // Get environment variable: OP_SEQUENCER_WITH_RETH - opSequencerReth := os.Getenv("OP_SEQUENCER_WITH_RETH") - opSequencerRethInt, err := strconv.Atoi(opSequencerReth) - if err != nil { - opSequencerRethInt = DefaultOpSequencerReth - } - // Get environment variable: KONA_SEQUENCER_WITH_RETH - konaSequencerReth := os.Getenv("KONA_SEQUENCER_WITH_RETH") - konaSequencerRethInt, err := strconv.Atoi(konaSequencerReth) - if err != nil { - konaSequencerRethInt = DefaultKonaSequencerReth - } - // Get environment variable: OP_VALIDATOR_WITH_GETH - opValidatorGeth := os.Getenv("OP_VALIDATOR_WITH_GETH") - opValidatorGethInt, err := strconv.Atoi(opValidatorGeth) - if err != nil { - opValidatorGethInt = DefaultOpValidatorGeth - } - // Get environment variable: OP_VALIDATOR_WITH_RETH - opValidatorReth := os.Getenv("OP_VALIDATOR_WITH_RETH") - opValidatorRethInt, err := strconv.Atoi(opValidatorReth) - if err != nil { - opValidatorRethInt = DefaultOpValidatorReth - } - // Get environment variable: KONA_VALIDATOR_WITH_GETH - konaValidatorGeth := os.Getenv("KONA_VALIDATOR_WITH_GETH") - konaValidatorGethInt, err := strconv.Atoi(konaValidatorGeth) - if err != nil { - konaValidatorGethInt = DefaultKonaValidatorGeth - } - // Get environment variable: KONA_VALIDATOR_WITH_RETH - konaValidatorReth := os.Getenv("KONA_VALIDATOR_WITH_RETH") - konaValidatorRethInt, err := strconv.Atoi(konaValidatorReth) - if err != nil { - konaValidatorRethInt = DefaultKonaValidatorReth - } + opSequencerGethInt := parseEnvInt("OP_SEQUENCER_WITH_GETH", DefaultOpSequencerGeth) + konaSequencerGethInt := parseEnvInt("KONA_SEQUENCER_WITH_GETH", DefaultKonaSequencerGeth) + opSequencerRethInt := parseEnvInt("OP_SEQUENCER_WITH_RETH", DefaultOpSequencerReth) + konaSequencerRethInt := parseEnvInt("KONA_SEQUENCER_WITH_RETH", DefaultKonaSequencerReth) + opValidatorGethInt := parseEnvInt("OP_VALIDATOR_WITH_GETH", DefaultOpValidatorGeth) + opValidatorRethInt := parseEnvInt("OP_VALIDATOR_WITH_RETH", DefaultOpValidatorReth) + konaValidatorGethInt := parseEnvInt("KONA_VALIDATOR_WITH_GETH", DefaultKonaValidatorGeth) + konaValidatorRethInt := parseEnvInt("KONA_VALIDATOR_WITH_RETH", DefaultKonaValidatorReth) return L2NodeConfig{ - OpSequencerNodesWithGeth: opSequencerGethInt, - OpSequencerNodesWithReth: opSequencerRethInt, - - OpNodesWithGeth: opValidatorGethInt, - OpNodesWithReth: opValidatorRethInt, - + OpSequencerNodesWithGeth: opSequencerGethInt, + OpSequencerNodesWithReth: opSequencerRethInt, + OpNodesWithGeth: opValidatorGethInt, + OpNodesWithReth: opValidatorRethInt, KonaSequencerNodesWithGeth: konaSequencerGethInt, KonaSequencerNodesWithReth: konaSequencerRethInt, + KonaNodesWithGeth: konaValidatorGethInt, + KonaNodesWithReth: konaValidatorRethInt, + } +} - KonaNodesWithGeth: konaValidatorGethInt, - KonaNodesWithReth: konaValidatorRethInt, +func parseEnvInt(name string, fallback int) int { + value, err := strconv.Atoi(os.Getenv(name)) + if err != nil { + return fallback } + return value } func (l2NodeConfig L2NodeConfig) TotalNodes() int { @@ -140,12 +90,12 @@ func (l2NodeConfig L2NodeConfig) KonaValidatorNodes() int { } type MixedOpKonaPreset struct { - Log log.Logger - T devtest.T - ControlPlane stack.ControlPlane + Log log.Logger + T devtest.T L1Network *dsl.L1Network L1EL *dsl.L1ELNode + L1CL *dsl.L1CLNode L2Chain *dsl.L2Network L2Batcher *dsl.L2Batcher @@ -170,32 +120,26 @@ type MixedOpKonaPreset struct { Funder *dsl.Funder } -// L2ELNodes returns all the L2EL nodes in the network (op-reth, op-geth, etc.), validator and sequencer. func (m *MixedOpKonaPreset) L2ELNodes() []dsl.L2ELNode { return append(m.L2ELSequencerNodes(), m.L2ELValidatorNodes()...) } -// L2CLNodes returns all the L2CL nodes in the network (op-nodes and kona-nodes), validator and sequencer. func (m *MixedOpKonaPreset) L2CLNodes() []dsl.L2CLNode { return append(m.L2CLSequencerNodes(), m.L2CLValidatorNodes()...) } -// L2CLValidatorNodes returns all the validator L2CL nodes in the network (op-nodes and kona-nodes). func (m *MixedOpKonaPreset) L2CLValidatorNodes() []dsl.L2CLNode { return append(m.L2CLOpValidatorNodes, m.L2CLKonaValidatorNodes...) } -// L2CLSequencerNodes returns all the sequencer L2CL nodes in the network (op-nodes and kona-nodes). func (m *MixedOpKonaPreset) L2CLSequencerNodes() []dsl.L2CLNode { return append(m.L2CLOpSequencerNodes, m.L2CLKonaSequencerNodes...) } -// L2ELValidatorNodes returns all the validator L2EL nodes in the network (op-reth, op-geth, etc.). func (m *MixedOpKonaPreset) L2ELValidatorNodes() []dsl.L2ELNode { return append(m.L2ELOpValidatorNodes, m.L2ELKonaValidatorNodes...) } -// L2ELSequencerNodes returns all the sequencer L2EL nodes in the network (op-reth, op-geth, etc.). func (m *MixedOpKonaPreset) L2ELSequencerNodes() []dsl.L2ELNode { return append(m.L2ELOpSequencerNodes, m.L2ELKonaSequencerNodes...) } @@ -204,361 +148,87 @@ func (m *MixedOpKonaPreset) L2CLKonaNodes() []dsl.L2CLNode { return append(m.L2CLKonaValidatorNodes, m.L2CLKonaSequencerNodes...) } -func L2NodeMatcher[E stack.Identifiable](value ...string) stack.Matcher[E] { - return match.MatchElemFn[E](func(elem E) bool { - for _, v := range value { - if !strings.Contains(elem.ID().Key(), v) { - return false - } - } - return true - }) -} - func (m *MixedOpKonaPreset) L2Networks() []*dsl.L2Network { - return []*dsl.L2Network{ - m.L2Chain, - } -} - -func WithMixedOpKona(l2NodeConfig L2NodeConfig) stack.CommonOption { - return stack.MakeCommon(DefaultMixedOpKonaSystem(&DefaultMixedOpKonaSystemIDs{}, l2NodeConfig)) -} - -func L2CLNodes(nodes []stack.L2CLNode, orch stack.Orchestrator) []dsl.L2CLNode { - out := make([]dsl.L2CLNode, len(nodes)) - for i, node := range nodes { - out[i] = *dsl.NewL2CLNode(node, orch.ControlPlane()) - } - return out -} - -func L2ELNodes(nodes []stack.L2ELNode, orch stack.Orchestrator) []dsl.L2ELNode { - out := make([]dsl.L2ELNode, len(nodes)) - for i, node := range nodes { - out[i] = *dsl.NewL2ELNode(node, orch.ControlPlane()) - } - return out + return []*dsl.L2Network{m.L2Chain} } func NewMixedOpKona(t devtest.T) *MixedOpKonaPreset { - system := shim.NewSystem(t) - orch := presets.Orchestrator() - orch.Hydrate(system) - - t.Gate().Equal(len(system.L2Networks()), 1, "expected exactly one L2 network") - t.Gate().Equal(len(system.L1Networks()), 1, "expected exactly one L1 network") - - l1Net := system.L1Network(match.FirstL1Network) - l2Net := system.L2Network(match.Assume(t, match.L2ChainA)) - - t.Gate().GreaterOrEqual(len(l2Net.L2CLNodes()), 2, "expected at least two L2CL nodes") - - opSequencerCLNodes := L2NodeMatcher[stack.L2CLNode](string(OpNode), string(Sequencer)).Match(l2Net.L2CLNodes()) - konaSequencerCLNodes := L2NodeMatcher[stack.L2CLNode](string(KonaNode), string(Sequencer)).Match(l2Net.L2CLNodes()) - - opCLNodes := L2NodeMatcher[stack.L2CLNode](string(OpNode), string(Validator)).Match(l2Net.L2CLNodes()) - konaCLNodes := L2NodeMatcher[stack.L2CLNode](string(KonaNode), string(Validator)).Match(l2Net.L2CLNodes()) - - opSequencerELNodes := L2NodeMatcher[stack.L2ELNode](string(OpNode), string(Sequencer)).Match(l2Net.L2ELNodes()) - konaSequencerELNodes := L2NodeMatcher[stack.L2ELNode](string(KonaNode), string(Sequencer)).Match(l2Net.L2ELNodes()) - opELNodes := L2NodeMatcher[stack.L2ELNode](string(OpNode), string(Validator)).Match(l2Net.L2ELNodes()) - konaELNodes := L2NodeMatcher[stack.L2ELNode](string(KonaNode), string(Validator)).Match(l2Net.L2ELNodes()) - - out := &MixedOpKonaPreset{ - Log: t.Logger(), - T: t, - ControlPlane: orch.ControlPlane(), - L1Network: dsl.NewL1Network(system.L1Network(match.FirstL1Network)), - L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), - L2Chain: dsl.NewL2Network(l2Net, orch.ControlPlane()), - L2Batcher: dsl.NewL2Batcher(l2Net.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - - L2ELOpSequencerNodes: L2ELNodes(opSequencerELNodes, orch), - L2CLOpSequencerNodes: L2CLNodes(opSequencerCLNodes, orch), - - L2ELOpValidatorNodes: L2ELNodes(opELNodes, orch), - L2CLOpValidatorNodes: L2CLNodes(opCLNodes, orch), - - L2ELKonaSequencerNodes: L2ELNodes(konaSequencerELNodes, orch), - L2CLKonaSequencerNodes: L2CLNodes(konaSequencerCLNodes, orch), - - L2ELKonaValidatorNodes: L2ELNodes(konaELNodes, orch), - L2CLKonaValidatorNodes: L2CLNodes(konaCLNodes, orch), - - Wallet: dsl.NewHDWallet(t, devkeys.TestMnemonic, 30), - Faucet: dsl.NewFaucet(l2Net.Faucet(match.Assume(t, match.FirstFaucet))), - } - return out -} - -type DefaultMixedOpKonaSystemIDs struct { - L1 stack.ComponentID - L1EL stack.ComponentID - L1CL stack.ComponentID - - L2 stack.ComponentID - - L2ELOpGethSequencerNodes []stack.ComponentID - L2ELOpRethSequencerNodes []stack.ComponentID - - L2CLOpGethSequencerNodes []stack.ComponentID - L2CLOpRethSequencerNodes []stack.ComponentID - - L2ELKonaGethSequencerNodes []stack.ComponentID - L2ELKonaRethSequencerNodes []stack.ComponentID - - L2CLKonaGethSequencerNodes []stack.ComponentID - L2CLKonaRethSequencerNodes []stack.ComponentID - - L2CLOpGethNodes []stack.ComponentID - L2ELOpGethNodes []stack.ComponentID - - L2CLOpRethNodes []stack.ComponentID - L2ELOpRethNodes []stack.ComponentID - - L2CLKonaGethNodes []stack.ComponentID - L2ELKonaGethNodes []stack.ComponentID - - L2CLKonaRethNodes []stack.ComponentID - L2ELKonaRethNodes []stack.ComponentID - - L2Batcher stack.ComponentID - L2Proposer stack.ComponentID -} - -func (ids *DefaultMixedOpKonaSystemIDs) L2CLSequencerNodes() []stack.ComponentID { - list := append(ids.L2CLOpGethSequencerNodes, ids.L2CLOpRethSequencerNodes...) - list = append(list, ids.L2CLKonaGethSequencerNodes...) - list = append(list, ids.L2CLKonaRethSequencerNodes...) - return list -} - -func (ids *DefaultMixedOpKonaSystemIDs) L2ELSequencerNodes() []stack.ComponentID { - list := append(ids.L2ELOpGethSequencerNodes, ids.L2ELOpRethSequencerNodes...) - list = append(list, ids.L2ELKonaGethSequencerNodes...) - list = append(list, ids.L2ELKonaRethSequencerNodes...) - return list + return NewMixedOpKonaForConfig(t, ParseL2NodeConfigFromEnv()) } -func (ids *DefaultMixedOpKonaSystemIDs) L2CLValidatorNodes() []stack.ComponentID { - list := append(ids.L2CLOpGethNodes, ids.L2CLOpRethNodes...) - list = append(list, ids.L2CLKonaGethNodes...) - list = append(list, ids.L2CLKonaRethNodes...) - return list -} -func (ids *DefaultMixedOpKonaSystemIDs) L2ELValidatorNodes() []stack.ComponentID { - list := append(ids.L2ELOpGethNodes, ids.L2ELOpRethNodes...) - list = append(list, ids.L2ELKonaGethNodes...) - list = append(list, ids.L2ELKonaRethNodes...) - return list -} - -func (ids *DefaultMixedOpKonaSystemIDs) L2CLNodes() []stack.ComponentID { - return append(ids.L2CLSequencerNodes(), ids.L2CLValidatorNodes()...) -} - -func (ids *DefaultMixedOpKonaSystemIDs) L2ELNodes() []stack.ComponentID { - return append(ids.L2ELSequencerNodes(), ids.L2ELValidatorNodes()...) +func NewMixedOpKonaForConfig(t devtest.T, l2NodeConfig L2NodeConfig) *MixedOpKonaPreset { + runtime := sysgo.NewMixedSingleChainRuntime(t, sysgo.MixedSingleChainPresetConfig{ + NodeSpecs: mixedOpKonaNodeSpecs(l2NodeConfig), + }) + return NewMixedOpKonaFromRuntime(t, runtime) } -func NewDefaultMixedOpKonaSystemIDs(l1ID, l2ID eth.ChainID, l2NodeConfig L2NodeConfig) DefaultMixedOpKonaSystemIDs { - rethOpCLNodes := make([]stack.ComponentID, l2NodeConfig.OpNodesWithReth) - rethOpELNodes := make([]stack.ComponentID, l2NodeConfig.OpNodesWithReth) - rethKonaCLNodes := make([]stack.ComponentID, l2NodeConfig.KonaNodesWithReth) - rethKonaELNodes := make([]stack.ComponentID, l2NodeConfig.KonaNodesWithReth) - - gethOpCLNodes := make([]stack.ComponentID, l2NodeConfig.OpNodesWithGeth) - gethOpELNodes := make([]stack.ComponentID, l2NodeConfig.OpNodesWithGeth) - gethKonaCLNodes := make([]stack.ComponentID, l2NodeConfig.KonaNodesWithGeth) - gethKonaELNodes := make([]stack.ComponentID, l2NodeConfig.KonaNodesWithGeth) - - gethOpSequencerCLNodes := make([]stack.ComponentID, l2NodeConfig.OpSequencerNodesWithGeth) - gethOpSequencerELNodes := make([]stack.ComponentID, l2NodeConfig.OpSequencerNodesWithGeth) - gethKonaSequencerCLNodes := make([]stack.ComponentID, l2NodeConfig.KonaSequencerNodesWithGeth) - gethKonaSequencerELNodes := make([]stack.ComponentID, l2NodeConfig.KonaSequencerNodesWithGeth) - - rethOpSequencerCLNodes := make([]stack.ComponentID, l2NodeConfig.OpSequencerNodesWithReth) - rethOpSequencerELNodes := make([]stack.ComponentID, l2NodeConfig.OpSequencerNodesWithReth) - rethKonaSequencerCLNodes := make([]stack.ComponentID, l2NodeConfig.KonaSequencerNodesWithReth) - rethKonaSequencerELNodes := make([]stack.ComponentID, l2NodeConfig.KonaSequencerNodesWithReth) - - for i := range l2NodeConfig.OpSequencerNodesWithGeth { - gethOpSequencerCLNodes[i] = stack.NewL2CLNodeID(fmt.Sprintf("cl-geth-op-sequencer-%d", i), l2ID) - gethOpSequencerELNodes[i] = stack.NewL2ELNodeID(fmt.Sprintf("el-geth-op-sequencer-%d", i), l2ID) - } - - for i := range l2NodeConfig.KonaSequencerNodesWithGeth { - gethKonaSequencerCLNodes[i] = stack.NewL2CLNodeID(fmt.Sprintf("cl-geth-kona-sequencer-%d", i), l2ID) - gethKonaSequencerELNodes[i] = stack.NewL2ELNodeID(fmt.Sprintf("el-geth-kona-sequencer-%d", i), l2ID) - } - - for i := range l2NodeConfig.OpSequencerNodesWithReth { - rethOpSequencerCLNodes[i] = stack.NewL2CLNodeID(fmt.Sprintf("cl-reth-op-sequencer-%d", i), l2ID) - rethOpSequencerELNodes[i] = stack.NewL2ELNodeID(fmt.Sprintf("el-reth-op-sequencer-%d", i), l2ID) - } - - for i := range l2NodeConfig.KonaSequencerNodesWithReth { - rethKonaSequencerCLNodes[i] = stack.NewL2CLNodeID(fmt.Sprintf("cl-reth-kona-sequencer-%d", i), l2ID) - rethKonaSequencerELNodes[i] = stack.NewL2ELNodeID(fmt.Sprintf("el-reth-kona-sequencer-%d", i), l2ID) - } - - for i := range l2NodeConfig.OpNodesWithGeth { - gethOpCLNodes[i] = stack.NewL2CLNodeID(fmt.Sprintf("cl-geth-op-validator-%d", i), l2ID) - gethOpELNodes[i] = stack.NewL2ELNodeID(fmt.Sprintf("el-geth-op-validator-%d", i), l2ID) - } - - for i := range l2NodeConfig.OpNodesWithReth { - rethOpCLNodes[i] = stack.NewL2CLNodeID(fmt.Sprintf("cl-reth-op-validator-%d", i), l2ID) - rethOpELNodes[i] = stack.NewL2ELNodeID(fmt.Sprintf("el-reth-op-validator-%d", i), l2ID) - } - - for i := range l2NodeConfig.KonaNodesWithGeth { - gethKonaCLNodes[i] = stack.NewL2CLNodeID(fmt.Sprintf("cl-geth-kona-validator-%d", i), l2ID) - gethKonaELNodes[i] = stack.NewL2ELNodeID(fmt.Sprintf("el-geth-kona-validator-%d", i), l2ID) - } - - for i := range l2NodeConfig.KonaNodesWithReth { - rethKonaCLNodes[i] = stack.NewL2CLNodeID(fmt.Sprintf("cl-reth-kona-validator-%d", i), l2ID) - rethKonaELNodes[i] = stack.NewL2ELNodeID(fmt.Sprintf("el-reth-kona-validator-%d", i), l2ID) - } - - ids := DefaultMixedOpKonaSystemIDs{ - L1: stack.NewL1NetworkID(l1ID), - L1EL: stack.NewL1ELNodeID("l1", l1ID), - L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.NewL2NetworkID(l2ID), - - L2CLOpGethSequencerNodes: gethOpSequencerCLNodes, - L2ELOpGethSequencerNodes: gethOpSequencerELNodes, - - L2CLOpRethSequencerNodes: rethOpSequencerCLNodes, - L2ELOpRethSequencerNodes: rethOpSequencerELNodes, - - L2CLOpGethNodes: gethOpCLNodes, - L2ELOpGethNodes: gethOpELNodes, - - L2CLOpRethNodes: rethOpCLNodes, - L2ELOpRethNodes: rethOpELNodes, - - L2CLKonaGethSequencerNodes: gethKonaSequencerCLNodes, - L2ELKonaGethSequencerNodes: gethKonaSequencerELNodes, - - L2CLKonaRethSequencerNodes: rethKonaSequencerCLNodes, - L2ELKonaRethSequencerNodes: rethKonaSequencerELNodes, - - L2CLKonaGethNodes: gethKonaCLNodes, - L2ELKonaGethNodes: gethKonaELNodes, - - L2CLKonaRethNodes: rethKonaCLNodes, - L2ELKonaRethNodes: rethKonaELNodes, - - L2Batcher: stack.NewL2BatcherID("main", l2ID), - L2Proposer: stack.NewL2ProposerID("main", l2ID), - } - return ids +func NewMixedOpKonaFromRuntime(t devtest.T, runtime *sysgo.MixedSingleChainRuntime) *MixedOpKonaPreset { + preset, _ := mixedOpKonaFromRuntime(t, runtime) + return preset } -func DefaultMixedOpKonaSystem(dest *DefaultMixedOpKonaSystemIDs, l2NodeConfig L2NodeConfig) stack.CombinedOption[*sysgo.Orchestrator] { - l1ID := eth.ChainIDFromUInt64(DefaultL1ID) - l2ID := eth.ChainIDFromUInt64(DefaultL2ID) - ids := NewDefaultMixedOpKonaSystemIDs(l1ID, l2ID, l2NodeConfig) - - opt := stack.Combine[*sysgo.Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *sysgo.Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(sysgo.WithMnemonicKeys(devkeys.TestMnemonic)) - - opt.Add(sysgo.WithDeployer(), - sysgo.WithDeployerOptions( - sysgo.WithLocalContractSources(), - sysgo.WithCommons(ids.L1.ChainID()), - sysgo.WithPrefundedL2(ids.L1.ChainID(), ids.L2.ChainID()), - ), - ) - - opt.Add(sysgo.WithL1Nodes(ids.L1EL, ids.L1CL)) - - // Spawn all nodes. - for i := range ids.L2CLKonaGethSequencerNodes { - opt.Add(sysgo.WithOpGeth(ids.L2ELKonaGethSequencerNodes[i])) - opt.Add(sysgo.WithKonaNode(ids.L2CLKonaGethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaGethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.IsSequencer = true - cfg.SequencerSyncMode = sync.ELSync - cfg.VerifierSyncMode = sync.ELSync - }))) - } - - for i := range ids.L2CLOpGethSequencerNodes { - opt.Add(sysgo.WithOpGeth(ids.L2ELOpGethSequencerNodes[i])) - opt.Add(sysgo.WithOpNode(ids.L2CLOpGethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELOpGethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.IsSequencer = true - }))) - } - - for i := range ids.L2CLKonaRethSequencerNodes { - opt.Add(sysgo.WithOpReth(ids.L2ELKonaRethSequencerNodes[i])) - opt.Add(sysgo.WithKonaNode(ids.L2CLKonaRethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaRethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.IsSequencer = true - cfg.SequencerSyncMode = sync.ELSync - cfg.VerifierSyncMode = sync.ELSync - }))) - } - - for i := range ids.L2CLOpRethSequencerNodes { - opt.Add(sysgo.WithOpReth(ids.L2ELOpRethSequencerNodes[i])) - opt.Add(sysgo.WithOpNode(ids.L2CLOpRethSequencerNodes[i], ids.L1CL, ids.L1EL, ids.L2ELOpRethSequencerNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.IsSequencer = true - }))) - } - - for i := range ids.L2CLKonaGethNodes { - opt.Add(sysgo.WithOpGeth(ids.L2ELKonaGethNodes[i])) - opt.Add(sysgo.WithKonaNode(ids.L2CLKonaGethNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaGethNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.SequencerSyncMode = sync.ELSync - cfg.VerifierSyncMode = sync.ELSync - }))) - } - - for i := range ids.L2ELOpGethNodes { - opt.Add(sysgo.WithOpGeth(ids.L2ELOpGethNodes[i])) - opt.Add(sysgo.WithOpNode(ids.L2CLOpGethNodes[i], ids.L1CL, ids.L1EL, ids.L2ELOpGethNodes[i])) - } - - for i := range ids.L2CLKonaRethNodes { - opt.Add(sysgo.WithOpReth(ids.L2ELKonaRethNodes[i])) - opt.Add(sysgo.WithKonaNode(ids.L2CLKonaRethNodes[i], ids.L1CL, ids.L1EL, ids.L2ELKonaRethNodes[i], sysgo.L2CLOptionFn(func(p devtest.P, id stack.ComponentID, cfg *sysgo.L2CLConfig) { - cfg.SequencerSyncMode = sync.ELSync - cfg.VerifierSyncMode = sync.ELSync - }))) - } - - for i := range ids.L2ELOpRethNodes { - opt.Add(sysgo.WithOpReth(ids.L2ELOpRethNodes[i])) - opt.Add(sysgo.WithOpNode(ids.L2CLOpRethNodes[i], ids.L1CL, ids.L1EL, ids.L2ELOpRethNodes[i])) +func mixedOpKonaFromRuntime(t devtest.T, runtime *sysgo.MixedSingleChainRuntime) (*MixedOpKonaPreset, *devpresets.MixedSingleChainFrontends) { + frontends := devpresets.NewMixedSingleChainFrontends(t, runtime) + t.Gate().GreaterOrEqual(len(frontends.Nodes), 2, "expected at least two mixed L2 nodes") + out := &MixedOpKonaPreset{ + Log: t.Logger(), + T: t, + L1Network: frontends.L1Network, + L1EL: frontends.L1EL, + L1CL: frontends.L1CL, + L2Chain: frontends.L2Network, + L2Batcher: frontends.L2Batcher, + Wallet: dsl.NewHDWallet(t, devkeys.TestMnemonic, 30), + FaucetL1: frontends.FaucetL1, + Faucet: frontends.FaucetL2, + } + for _, node := range frontends.Nodes { + switch { + case node.Spec.CLKind == sysgo.MixedL2CLOpNode && node.Spec.IsSequencer: + out.L2ELOpSequencerNodes = append(out.L2ELOpSequencerNodes, *node.EL) + out.L2CLOpSequencerNodes = append(out.L2CLOpSequencerNodes, *node.CL) + case node.Spec.CLKind == sysgo.MixedL2CLOpNode && !node.Spec.IsSequencer: + out.L2ELOpValidatorNodes = append(out.L2ELOpValidatorNodes, *node.EL) + out.L2CLOpValidatorNodes = append(out.L2CLOpValidatorNodes, *node.CL) + case node.Spec.CLKind == sysgo.MixedL2CLKona && node.Spec.IsSequencer: + out.L2ELKonaSequencerNodes = append(out.L2ELKonaSequencerNodes, *node.EL) + out.L2CLKonaSequencerNodes = append(out.L2CLKonaSequencerNodes, *node.CL) + case node.Spec.CLKind == sysgo.MixedL2CLKona && !node.Spec.IsSequencer: + out.L2ELKonaValidatorNodes = append(out.L2ELKonaValidatorNodes, *node.EL) + out.L2CLKonaValidatorNodes = append(out.L2CLKonaValidatorNodes, *node.CL) + } + if out.Funder == nil && node.Spec.IsSequencer { + out.Funder = dsl.NewFunder(out.Wallet, out.Faucet, node.EL) + } } - - // Connect all nodes to each other in the p2p network. - CLNodeIDs := ids.L2CLNodes() - ELNodeIDs := ids.L2ELNodes() - - for i := range CLNodeIDs { - for j := range i { - opt.Add(sysgo.WithL2CLP2PConnection(CLNodeIDs[i], CLNodeIDs[j])) - opt.Add(sysgo.WithL2ELP2PConnection(ELNodeIDs[i], ELNodeIDs[j], false)) + out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) + return out, frontends +} + +func mixedOpKonaNodeSpecs(cfg L2NodeConfig) []sysgo.MixedSingleChainNodeSpec { + var specs []sysgo.MixedSingleChainNodeSpec + appendSpecs := func(count int, elPrefix, clPrefix string, elKind sysgo.MixedL2ELKind, clKind sysgo.MixedL2CLKind, isSequencer bool) { + for i := 0; i < count; i++ { + specs = append(specs, sysgo.MixedSingleChainNodeSpec{ + ELKey: fmt.Sprintf("%s-%d", elPrefix, i), + CLKey: fmt.Sprintf("%s-%d", clPrefix, i), + ELKind: elKind, + CLKind: clKind, + IsSequencer: isSequencer, + }) } } - opt.Add(sysgo.WithBatcher(ids.L2Batcher, ids.L1EL, CLNodeIDs[0], ELNodeIDs[0])) - opt.Add(sysgo.WithProposer(ids.L2Proposer, ids.L1EL, &CLNodeIDs[0], nil)) - - opt.Add(sysgo.WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ELNodeIDs[0]})) + appendSpecs(cfg.OpSequencerNodesWithGeth, "el-geth-op-sequencer", "cl-geth-op-sequencer", sysgo.MixedL2ELOpGeth, sysgo.MixedL2CLOpNode, true) + appendSpecs(cfg.OpSequencerNodesWithReth, "el-reth-op-sequencer", "cl-reth-op-sequencer", sysgo.MixedL2ELOpReth, sysgo.MixedL2CLOpNode, true) + appendSpecs(cfg.KonaSequencerNodesWithGeth, "el-geth-kona-sequencer", "cl-geth-kona-sequencer", sysgo.MixedL2ELOpGeth, sysgo.MixedL2CLKona, true) + appendSpecs(cfg.KonaSequencerNodesWithReth, "el-reth-kona-sequencer", "cl-reth-kona-sequencer", sysgo.MixedL2ELOpReth, sysgo.MixedL2CLKona, true) - opt.Add(stack.Finally(func(orch *sysgo.Orchestrator) { - *dest = ids - })) + appendSpecs(cfg.OpNodesWithGeth, "el-geth-op-validator", "cl-geth-op-validator", sysgo.MixedL2ELOpGeth, sysgo.MixedL2CLOpNode, false) + appendSpecs(cfg.OpNodesWithReth, "el-reth-op-validator", "cl-reth-op-validator", sysgo.MixedL2ELOpReth, sysgo.MixedL2CLOpNode, false) + appendSpecs(cfg.KonaNodesWithGeth, "el-geth-kona-validator", "cl-geth-kona-validator", sysgo.MixedL2ELOpGeth, sysgo.MixedL2CLKona, false) + appendSpecs(cfg.KonaNodesWithReth, "el-reth-kona-validator", "cl-reth-kona-validator", sysgo.MixedL2ELOpReth, sysgo.MixedL2CLKona, false) - return opt + return specs } diff --git a/rust/kona/tests/node/utils/mixed_preset_with_conductor.go b/rust/kona/tests/node/utils/mixed_preset_with_conductor.go index 5e8a4f929dbf1..765be0555bcca 100644 --- a/rust/kona/tests/node/utils/mixed_preset_with_conductor.go +++ b/rust/kona/tests/node/utils/mixed_preset_with_conductor.go @@ -2,33 +2,15 @@ package node_utils import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" ) -type MinimalWithConductors struct { - *MixedOpKonaPreset - - ConductorSets map[stack.ComponentID]dsl.ConductorSet -} +type MinimalWithConductors = presets.MinimalWithConductors func NewMixedOpKonaWithConductors(t devtest.T) *MinimalWithConductors { - system := shim.NewSystem(t) - orch := presets.Orchestrator() - orch.Hydrate(system) - chains := system.L2Networks() - conductorSets := make(map[stack.ComponentID]dsl.ConductorSet) - for _, chain := range chains { - chainMatcher := match.L2ChainById(chain.ID()) - l2 := system.L2Network(match.Assume(t, chainMatcher)) + return presets.NewMinimalWithConductors(t) +} - conductorSets[chain.ID()] = dsl.NewConductorSet(l2.Conductors()) - } - return &MinimalWithConductors{ - MixedOpKonaPreset: NewMixedOpKona(t), - ConductorSets: conductorSets, - } +func NewMixedOpKonaWithConductorsForConfig(t devtest.T, _ L2NodeConfig, opts ...presets.Option) *MinimalWithConductors { + return presets.NewMinimalWithConductors(t, opts...) } diff --git a/rust/kona/tests/node/utils/package_scope.go b/rust/kona/tests/node/utils/package_scope.go new file mode 100644 index 0000000000000..e08bacbfe8818 --- /dev/null +++ b/rust/kona/tests/node/utils/package_scope.go @@ -0,0 +1,166 @@ +package node_utils + +import ( + "context" + "time" + + "github.com/ethereum/go-ethereum/log" + "go.opentelemetry.io/otel/trace" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/ethereum-optimism/optimism/op-service/testreq" +) + +// packageScopeT adapts a package-scoped devtest.P so sysgo runtimes can be +// initialized once in TestMain and shared across the package. +type packageScopeT struct { + p devtest.P + gate *testreq.Assertions +} + +type packageGateAdapter struct { + inner interface { + Helper() + Skipf(format string, args ...any) + SkipNow() + } +} + +func (g *packageGateAdapter) Errorf(format string, args ...interface{}) { + g.inner.Helper() + g.inner.Skipf(format, args...) +} + +func (g *packageGateAdapter) FailNow() { + g.inner.Helper() + g.inner.SkipNow() +} + +func (g *packageGateAdapter) Helper() { + g.inner.Helper() +} + +func newPackageScopeT(p devtest.P) *packageScopeT { + t := &packageScopeT{p: p} + t.gate = testreq.New(&packageGateAdapter{inner: t}) + return t +} + +func (t *packageScopeT) Error(args ...any) { + t.p.Error(args...) +} + +func (t *packageScopeT) Errorf(format string, args ...any) { + t.p.Errorf(format, args...) +} + +func (t *packageScopeT) Fail() { + t.p.Fail() +} + +func (t *packageScopeT) FailNow() { + t.p.FailNow() +} + +func (t *packageScopeT) TempDir() string { + return t.p.TempDir() +} + +func (t *packageScopeT) Cleanup(fn func()) { + t.p.Cleanup(fn) +} + +func (t *packageScopeT) Run(_ string, fn func(devtest.T)) { + fn(newPackageScopeT(t.p)) +} + +func (t *packageScopeT) Ctx() context.Context { + return t.p.Ctx() +} + +func (t *packageScopeT) WithCtx(ctx context.Context) devtest.T { + return newPackageScopeT(t.p.WithCtx(ctx)) +} + +func (t *packageScopeT) Parallel() { +} + +func (t *packageScopeT) Skip(args ...any) { + t.Helper() + t.Log(args...) + t.SkipNow() +} + +func (t *packageScopeT) Skipped() bool { + return false +} + +func (t *packageScopeT) Skipf(format string, args ...any) { + t.Helper() + t.Logf(format, args...) + t.SkipNow() +} + +func (t *packageScopeT) SkipNow() { + t.p.SkipNow() +} + +func (t *packageScopeT) Log(args ...any) { + t.p.Log(args...) +} + +func (t *packageScopeT) Logf(format string, args ...any) { + t.p.Logf(format, args...) +} + +func (t *packageScopeT) Helper() { + t.p.Helper() +} + +func (t *packageScopeT) Name() string { + return t.p.Name() +} + +func (t *packageScopeT) Logger() log.Logger { + return t.p.Logger() +} + +func (t *packageScopeT) Tracer() trace.Tracer { + return t.p.Tracer() +} + +func (t *packageScopeT) Require() *testreq.Assertions { + return t.p.Require() +} + +func (t *packageScopeT) Gate() *testreq.Assertions { + return t.gate +} + +func (t *packageScopeT) Deadline() (time.Time, bool) { + return time.Time{}, false +} + +func (t *packageScopeT) TestOnly() { +} + +var _ devtest.T = (*packageScopeT)(nil) + +func NewSharedMixedOpKona(pkg devtest.P) *MixedOpKonaPreset { + return NewSharedMixedOpKonaForConfig(pkg, ParseL2NodeConfigFromEnv()) +} + +func NewSharedMixedOpKonaForConfig(pkg devtest.P, cfg L2NodeConfig) *MixedOpKonaPreset { + return NewMixedOpKonaFromRuntime(newPackageScopeT(pkg), NewSharedMixedOpKonaRuntimeForConfig(pkg, cfg)) +} + +func NewSharedMixedOpKonaRuntime(pkg devtest.P) *sysgo.MixedSingleChainRuntime { + return NewSharedMixedOpKonaRuntimeForConfig(pkg, ParseL2NodeConfigFromEnv()) +} + +func NewSharedMixedOpKonaRuntimeForConfig(pkg devtest.P, cfg L2NodeConfig) *sysgo.MixedSingleChainRuntime { + return sysgo.NewMixedSingleChainRuntime(newPackageScopeT(pkg), sysgo.MixedSingleChainPresetConfig{ + NodeSpecs: mixedOpKonaNodeSpecs(cfg), + }) +} diff --git a/rust/kona/tests/node/utils/test_sequencer_preset.go b/rust/kona/tests/node/utils/test_sequencer_preset.go index 56dcdaa18afbb..239474984817a 100644 --- a/rust/kona/tests/node/utils/test_sequencer_preset.go +++ b/rust/kona/tests/node/utils/test_sequencer_preset.go @@ -3,12 +3,7 @@ package node_utils import ( "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/shim" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" - "github.com/ethereum-optimism/optimism/op-service/eth" ) type MinimalWithTestSequencersPreset struct { @@ -17,71 +12,30 @@ type MinimalWithTestSequencersPreset struct { TestSequencer dsl.TestSequencer } -func WithMixedWithTestSequencer(l2Config L2NodeConfig) stack.CommonOption { - if l2Config.OpSequencerNodesWithGeth == 0 && l2Config.OpSequencerNodesWithReth == 0 { - l2Config.OpSequencerNodesWithGeth = 1 - } - - return stack.MakeCommon(DefaultMixedWithTestSequencer(&DefaultMinimalWithTestSequencerIds{}, l2Config)) -} - func NewMixedOpKonaWithTestSequencer(t devtest.T) *MinimalWithTestSequencersPreset { - system := shim.NewSystem(t) - orch := presets.Orchestrator() - orch.Hydrate(system) + return NewMixedOpKonaWithTestSequencerForConfig(t, ParseL2NodeConfigFromEnv()) +} - t.Gate().Equal(len(system.L2Networks()), 1, "expected exactly one L2 network") - t.Gate().Equal(len(system.L1Networks()), 1, "expected exactly one L1 network") +func NewMixedOpKonaWithTestSequencerForConfig(t devtest.T, l2Config L2NodeConfig) *MinimalWithTestSequencersPreset { + l2Config = withRequiredOpSequencerForTestSequencer(l2Config) - TestSequencer := - dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))) + runtime := sysgo.NewMixedSingleChainRuntime(t, sysgo.MixedSingleChainPresetConfig{ + NodeSpecs: mixedOpKonaNodeSpecs(l2Config), + WithTestSequencer: true, + TestSequencerName: "test-sequencer", + }) + mixedPreset, frontends := mixedOpKonaFromRuntime(t, runtime) + t.Require().NotNil(frontends.TestSequencer, "expected test sequencer frontend") return &MinimalWithTestSequencersPreset{ - MixedOpKonaPreset: NewMixedOpKona(t), - TestSequencer: *TestSequencer, + MixedOpKonaPreset: mixedPreset, + TestSequencer: *frontends.TestSequencer, } } -type DefaultMinimalWithTestSequencerIds struct { - DefaultMixedOpKonaSystemIDs DefaultMixedOpKonaSystemIDs - TestSequencerId stack.ComponentID -} - -func NewDefaultMinimalWithTestSequencerIds(l2Config L2NodeConfig) DefaultMinimalWithTestSequencerIds { - return DefaultMinimalWithTestSequencerIds{ - DefaultMixedOpKonaSystemIDs: NewDefaultMixedOpKonaSystemIDs(eth.ChainIDFromUInt64(DefaultL1ID), eth.ChainIDFromUInt64(DefaultL2ID), L2NodeConfig{ - OpSequencerNodesWithGeth: l2Config.OpSequencerNodesWithGeth, - OpSequencerNodesWithReth: l2Config.OpSequencerNodesWithReth, - OpNodesWithGeth: l2Config.OpNodesWithGeth, - OpNodesWithReth: l2Config.OpNodesWithReth, - KonaNodesWithGeth: l2Config.KonaNodesWithGeth, - KonaNodesWithReth: l2Config.KonaNodesWithReth, - }), - TestSequencerId: stack.NewTestSequencerID("test-sequencer"), +func withRequiredOpSequencerForTestSequencer(l2Config L2NodeConfig) L2NodeConfig { + if l2Config.OpSequencerNodes() == 0 { + l2Config.OpSequencerNodesWithGeth = 1 } -} - -func DefaultMixedWithTestSequencer(dest *DefaultMinimalWithTestSequencerIds, l2Config L2NodeConfig) stack.Option[*sysgo.Orchestrator] { - - opt := DefaultMixedOpKonaSystem(&dest.DefaultMixedOpKonaSystemIDs, L2NodeConfig{ - OpSequencerNodesWithGeth: l2Config.OpSequencerNodesWithGeth, - OpSequencerNodesWithReth: l2Config.OpSequencerNodesWithReth, - OpNodesWithGeth: l2Config.OpNodesWithGeth, - OpNodesWithReth: l2Config.OpNodesWithReth, - KonaNodesWithGeth: l2Config.KonaNodesWithGeth, - KonaNodesWithReth: l2Config.KonaNodesWithReth, - }) - - ids := NewDefaultMinimalWithTestSequencerIds(l2Config) - - L2SequencerCLNodes := ids.DefaultMixedOpKonaSystemIDs.L2CLSequencerNodes() - L2SequencerELNodes := ids.DefaultMixedOpKonaSystemIDs.L2ELSequencerNodes() - - opt.Add(sysgo.WithTestSequencer(ids.TestSequencerId, ids.DefaultMixedOpKonaSystemIDs.L1CL, L2SequencerCLNodes[0], ids.DefaultMixedOpKonaSystemIDs.L1EL, L2SequencerELNodes[0])) - - opt.Add(stack.Finally(func(orch *sysgo.Orchestrator) { - *dest = ids - })) - - return opt + return l2Config } diff --git a/rust/kona/tests/node/utils/test_sequencer_preset_test.go b/rust/kona/tests/node/utils/test_sequencer_preset_test.go new file mode 100644 index 0000000000000..e13dce283134e --- /dev/null +++ b/rust/kona/tests/node/utils/test_sequencer_preset_test.go @@ -0,0 +1,38 @@ +package node_utils + +import ( + "testing" + + "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + "github.com/stretchr/testify/require" +) + +func TestWithRequiredOpSequencerForTestSequencerAddsDefaultOpGethSequencer(t *testing.T) { + cfg := withRequiredOpSequencerForTestSequencer(L2NodeConfig{ + KonaSequencerNodesWithReth: 1, + }) + + require.Equal(t, 1, cfg.OpSequencerNodesWithGeth) + require.Equal(t, 1, cfg.KonaSequencerNodesWithReth) + + specs := mixedOpKonaNodeSpecs(cfg) + require.True(t, hasNodeSpec(specs, sysgo.MixedL2ELOpGeth, sysgo.MixedL2CLOpNode, true)) +} + +func TestWithRequiredOpSequencerForTestSequencerPreservesExistingOpSequencer(t *testing.T) { + cfg := withRequiredOpSequencerForTestSequencer(L2NodeConfig{ + OpSequencerNodesWithReth: 2, + }) + + require.Equal(t, 0, cfg.OpSequencerNodesWithGeth) + require.Equal(t, 2, cfg.OpSequencerNodesWithReth) +} + +func hasNodeSpec(specs []sysgo.MixedSingleChainNodeSpec, elKind sysgo.MixedL2ELKind, clKind sysgo.MixedL2CLKind, isSequencer bool) bool { + for _, spec := range specs { + if spec.ELKind == elKind && spec.CLKind == clKind && spec.IsSequencer == isSequencer { + return true + } + } + return false +} diff --git a/rust/kona/tests/node/utils/ws.go b/rust/kona/tests/node/utils/ws.go index 6b0bee053f5a3..8419a37b205fc 100644 --- a/rust/kona/tests/node/utils/ws.go +++ b/rust/kona/tests/node/utils/ws.go @@ -93,7 +93,8 @@ func AsyncGetPrefixedWs[T any, Out any](t devtest.T, node *dsl.L2CLNode, prefix return } - msgChan <- msg + // Clone the raw payload before the next ReadJSON call can reuse the buffer. + msgChan <- append(json.RawMessage(nil), msg...) } }() @@ -120,8 +121,6 @@ func AsyncGetPrefixedWs[T any, Out any](t devtest.T, node *dsl.L2CLNode, prefix var p push[Out] require.NoError(t, json.Unmarshal(msg, &p), "decode: %v", err) - - t.Log(wsRPC, method, "received websocket message", p.Params.Result) output <- p.Params.Result } } diff --git a/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go b/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go new file mode 100644 index 0000000000000..1aa273c36bd00 --- /dev/null +++ b/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go @@ -0,0 +1,247 @@ +package l2reorg + +import ( + "math/rand" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" + "github.com/ethereum-optimism/optimism/op-service/bigs" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txintent" + "github.com/ethereum-optimism/optimism/op-service/txplan" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/stretchr/testify/require" +) + +// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go#L25 +func TestReorgInitExecMsg(gt *testing.T) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + + sys := presets.NewSimpleInterop(t) + l := sys.Log + + ia := sys.TestSequencer.Escape().ControlAPI(sys.L2ChainA.ChainID()) + + // three EOAs for triggering the init and exec interop txs, as well as a simple transfer tx + alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) + bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) + cathrine := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) + + sys.L1Network.WaitForBlock() + sys.L2ChainA.WaitForBlock() + + // stop batchers on chain A and on chain B + sys.L2BatcherA.Stop() + sys.L2BatcherB.Stop() + + // deploy event logger on chain A + var eventLoggerAddress common.Address + { + tx := txplan.NewPlannedTx(txplan.Combine( + alice.Plan(), + txplan.WithData(common.FromHex(bindings.EventloggerBin)), + )) + res, err := tx.Included.Eval(ctx) + require.NoError(t, err) + + eventLoggerAddress = res.ContractAddress + l.Info("deployed EventLogger", "chainID", tx.ChainID.Value(), "address", eventLoggerAddress) + } + + sys.L1Network.WaitForBlock() + + var initTrigger *txintent.InitTrigger + // prepare init trigger (i.e. what logs to emit on chain A) + { + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + nTopics := 3 + lenData := 10 + initTrigger = interop.RandomInitTrigger(rng, eventLoggerAddress, nTopics, lenData) + + l.Info("created init trigger", "address", eventLoggerAddress, "topics", nTopics, "lenData", lenData) + } + + // wait for chain B to catch up to chain A if necessary + sys.L2ChainB.CatchUpTo(sys.L2ChainA) + + var initTx *txintent.IntentTx[*txintent.InitTrigger, *txintent.InteropOutput] + var initReceipt *types.Receipt + // prepare and include initiating message on chain A + { + initTx = txintent.NewIntent[*txintent.InitTrigger, *txintent.InteropOutput](alice.Plan()) + initTx.Content.Set(initTrigger) + var err error + initReceipt, err = initTx.PlannedTx.Included.Eval(ctx) + require.NoError(t, err) + + l.Info("initiating message included", "chain", sys.L2ChainA.ChainID(), "block_number", initReceipt.BlockNumber, "block_hash", initReceipt.BlockHash, "now", time.Now().Unix()) + } + + // stop sequencer on chain A so that we later force a reorg/removal of the init msg + sys.L2CLA.StopSequencer() + + // at least one block between the init tx on chain A and the exec tx on chain B + sys.L2ChainB.WaitForBlock() + + var execTx *txintent.IntentTx[*txintent.ExecTrigger, *txintent.InteropOutput] + var execReceipt *types.Receipt + // prepare and include executing message on chain B + { + execTx = txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) + execTx.Content.DependOn(&initTx.Result) + // single event in tx so index is 0. ExecuteIndexed returns a lambda to transform InteropOutput to a new ExecTrigger + execTx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initTx.Result, 0)) + var err error + execReceipt, err = execTx.PlannedTx.Included.Eval(ctx) + require.NoError(t, err) + require.Equal(t, 1, len(execReceipt.Logs)) + + l.Info("executing message included", "chain", sys.L2ChainB.ChainID(), "block_number", execReceipt.BlockNumber, "block_hash", execReceipt.BlockHash, "now", time.Now().Unix()) + } + + // record divergence block numbers and original refs for future validation checks + var divergenceBlockNumber_A, divergenceBlockNumber_B uint64 + var originalRef_A, originalRef_B eth.L2BlockRef + + // sequence a conflicting block with a simple transfer tx, based on the parent of the parent of the unsafe head + { + var err error + divergenceBlockNumber_B = bigs.Uint64Strict(execReceipt.BlockNumber) + originalRef_B, err = sys.L2ELB.Escape().L2EthClient().L2BlockRefByHash(ctx, execReceipt.BlockHash) + require.NoError(t, err, "Expected to be able to call L2BlockRefByHash API, but got error") + + headToReorgA := initReceipt.BlockHash + headToReorgARef, err := sys.L2ELA.Escape().L2EthClient().L2BlockRefByHash(ctx, headToReorgA) + require.NoError(t, err, "Expected to be able to call L2BlockRefByHash API, but got error") + + divergenceBlockNumber_A = headToReorgARef.Number + originalRef_A = headToReorgARef + + parentOfHeadToReorgA := headToReorgARef.ParentID() + parentsL1Origin, err := sys.L2ELA.Escape().L2EthClient().L2BlockRefByHash(ctx, parentOfHeadToReorgA.Hash) + require.NoError(t, err, "Expected to be able to call L2BlockRefByHash API, but got error") + + nextL1Origin := parentsL1Origin.L1Origin.Number + 1 + l1Origin, err := sys.L1EL.EthClient().InfoByNumber(ctx, nextL1Origin) + require.NoError(t, err, "Expected to get block number %v from L1 execution client", nextL1Origin) + l1OriginHash := l1Origin.Hash() + + l.Info("Sequencing a conflicting block", "chain", sys.L2ChainA.ChainID(), "newL1Origin", eth.ToBlockID(l1Origin), "headToReorgA", headToReorgARef, "parent", parentOfHeadToReorgA, "parent_l1_origin", parentsL1Origin.L1Origin) + + err = ia.New(ctx, seqtypes.BuildOpts{ + Parent: parentOfHeadToReorgA.Hash, + L1Origin: &l1OriginHash, + }) + require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") + + // include simple transfer tx in opened block + { + to := cathrine.PlanTransfer(alice.Address(), eth.OneGWei) + opt := txplan.Combine(to) + ptx := txplan.NewPlannedTx(opt) + signed_tx, err := ptx.Signed.Eval(ctx) + require.NoError(t, err, "Expected to be able to evaluate a planned transaction on op-test-sequencer, but got error") + txdata, err := signed_tx.MarshalBinary() + require.NoError(t, err, "Expected to be able to marshal a signed transaction on op-test-sequencer, but got error") + + err = ia.IncludeTx(ctx, txdata) + require.NoError(t, err, "Expected to be able to include a signed transaction on op-test-sequencer, but got error") + } + + err = ia.Next(ctx) + require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") + } + + // sequence a second block with op-test-sequencer + { + unsafe := sys.L2ELA.BlockRefByLabel(eth.Unsafe) + l.Info("Current unsafe ref", "unsafeHead", unsafe) + err := ia.New(ctx, seqtypes.BuildOpts{ + Parent: unsafe.Hash, + L1Origin: nil, + }) + require.NoError(t, err, "Expected to be able to create a new block job for sequencing on op-test-sequencer, but got error") + + err = ia.Next(ctx) + require.NoError(t, err, "Expected to be able to call Next() after New() on op-test-sequencer, but got error") + } + + // continue sequencing with op-node + sys.L2CLA.StartSequencer() + + // start batchers on chain A and on chain B + sys.L2BatcherA.Start() + sys.L2BatcherB.Start() + + // wait and confirm reorgs on chain A and B + dsl.CheckAll(t, + sys.L2ELA.ReorgTriggeredFn(eth.L2BlockRef{ + Number: divergenceBlockNumber_A, + Hash: originalRef_A.Hash, + ParentHash: originalRef_A.ParentID().Hash, + }, 30), + sys.L2ELB.ReorgTriggeredFn(eth.L2BlockRef{ + Number: divergenceBlockNumber_B, + Hash: originalRef_B.Hash, + ParentHash: originalRef_B.ParentID().Hash, + }, 30), + ) + + // executing tx should eventually be no longer confirmed on chain B + require.Eventually(t, func() bool { + receipt, err := sys.L2ELB.Escape().EthClient().TransactionReceipt(ctx, execReceipt.TxHash) + if err == nil || err.Error() != "not found" { // want to get "not found" error + return false + } + if receipt != nil { // want to get nil receipt + return false + } + return true + }, 60*time.Second, 3*time.Second, "Expected for the executing tx to be removed from chain B") + + err := wait.For(ctx, 5*time.Second, func() (bool, error) { + safeL2Head_supervisor_A := sys.Supervisor.SafeBlockID(sys.L2ChainA.ChainID()).Hash + safeL2Head_supervisor_B := sys.Supervisor.SafeBlockID(sys.L2ChainB.ChainID()).Hash + safeL2Head_sequencer_A := sys.L2CLA.SafeL2BlockRef() + safeL2Head_sequencer_B := sys.L2CLB.SafeL2BlockRef() + + if safeL2Head_sequencer_A.Number < divergenceBlockNumber_A { + l.Info("Safe ref number is still behind divergence block A number", "divergence", divergenceBlockNumber_A, "safe", safeL2Head_sequencer_A.Number) + return false, nil + } + + if safeL2Head_sequencer_B.Number < divergenceBlockNumber_B { + l.Info("Safe ref number is still behind divergence block B number", "divergence", divergenceBlockNumber_B, "safe", safeL2Head_sequencer_B.Number) + return false, nil + } + + if safeL2Head_sequencer_A.Hash.Cmp(safeL2Head_supervisor_A) != 0 { + l.Info("Safe ref still not the same on supervisor and sequencer A", "supervisor", safeL2Head_supervisor_A, "sequencer", safeL2Head_sequencer_A.Hash) + return false, nil + } + + if safeL2Head_sequencer_B.Hash.Cmp(safeL2Head_supervisor_B) != 0 { + l.Info("Safe ref still not the same on supervisor and sequencer B", "supervisor", safeL2Head_supervisor_B, "sequencer", safeL2Head_sequencer_B.Hash) + return false, nil + } + + l.Info("Safe ref the same across supervisor and sequencers", + "supervisor_A", safeL2Head_supervisor_A, + "sequencer_A", safeL2Head_sequencer_A.Hash, + "supervisor_B", safeL2Head_supervisor_B, + "sequencer_B", safeL2Head_sequencer_B.Hash) + + return true, nil + }) + require.NoError(t, err, "Expected to get same safe ref on both supervisor and sequencer eventually") +} diff --git a/rust/kona/tests/supervisor/l2reorgAfterL1reorg/reorg_test.go b/rust/kona/tests/supervisor/l2reorgAfterL1reorg/reorg_test.go new file mode 100644 index 0000000000000..f253f5c81c24c --- /dev/null +++ b/rust/kona/tests/supervisor/l2reorgAfterL1reorg/reorg_test.go @@ -0,0 +1,162 @@ +package sysgo + +import ( + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/apis" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" + spresets "github.com/ethereum-optimism/optimism/rust/kona/tests/supervisor/presets" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +type checksFunc func(t devtest.T, sys *presets.SimpleInterop) + +func TestL2ReorgAfterL1Reorg(gt *testing.T) { + gt.Run("unsafe reorg", func(gt *testing.T) { + var crossSafeRef, localSafeRef, unsafeRef eth.BlockID + pre := func(t devtest.T, sys *presets.SimpleInterop) { + ss := sys.Supervisor.FetchSyncStatus() + crossSafeRef = ss.Chains[sys.L2ChainA.ChainID()].CrossSafe + localSafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalSafe + unsafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalUnsafe.ID() + } + post := func(t devtest.T, sys *presets.SimpleInterop) { + require.True(t, sys.L2ELA.IsCanonical(crossSafeRef), "Previous cross-safe block should still be canonical") + require.True(t, sys.L2ELA.IsCanonical(localSafeRef), "Previous local-safe block should still be canonical") + require.False(t, sys.L2ELA.IsCanonical(unsafeRef), "Previous unsafe block should have been reorged") + } + testL2ReorgAfterL1Reorg(gt, 3, pre, post) + }) + + gt.Run("unsafe, local-safe, cross-unsafe, cross-safe reorgs", func(gt *testing.T) { + var crossSafeRef, crossUnsafeRef, localSafeRef, unsafeRef eth.BlockID + pre := func(t devtest.T, sys *presets.SimpleInterop) { + ss := sys.Supervisor.FetchSyncStatus() + crossUnsafeRef = ss.Chains[sys.L2ChainA.ChainID()].CrossUnsafe + crossSafeRef = ss.Chains[sys.L2ChainA.ChainID()].CrossSafe + localSafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalSafe + unsafeRef = ss.Chains[sys.L2ChainA.ChainID()].LocalUnsafe.ID() + } + post := func(t devtest.T, sys *presets.SimpleInterop) { + require.False(t, sys.L2ELA.IsCanonical(crossSafeRef), "Previous cross-safe block should have been reorged") + require.False(t, sys.L2ELA.IsCanonical(crossUnsafeRef), "Previous cross-unsafe block should have been reorged") + require.False(t, sys.L2ELA.IsCanonical(localSafeRef), "Previous local-safe block should have been reorged") + require.False(t, sys.L2ELA.IsCanonical(unsafeRef), "Previous unsafe block should have been reorged") + } + testL2ReorgAfterL1Reorg(gt, 10, pre, post) + }) +} + +// testL2ReorgAfterL1Reorg tests that the L2 chain reorgs after an L1 reorg, and takes n, number of blocks to reorg, as parameter +// for unsafe reorgs - n must be at least >= confDepth, which is 2 in our test deployments +// for cross-safe reorgs - n must be at least >= safe distance, which is 10 in our test deployments (set in +// op-e2e/e2eutils/geth/geth.go when initialising FakePoS) +// pre- and post-checks are sanity checks to ensure that the blocks we expected to be reorged were indeed reorged or not +func testL2ReorgAfterL1Reorg(gt *testing.T, n int, preChecks, postChecks checksFunc) { + t := devtest.SerialT(gt) + ctx := t.Ctx() + + sys := spresets.NewSimpleInteropMinimal(t) + ts := sys.TestSequencer.Escape().ControlAPI(sys.L1Network.ChainID()) + + sys.L1Network.WaitForBlock() + + sys.L1CL.Stop() + + // sequence a few L1 and L2 blocks + for range n + 1 { + sequenceL1Block(t, ts, common.Hash{}) + + sys.L2ChainA.WaitForBlock() + sys.L2ChainA.WaitForBlock() + } + + // select a divergence block to reorg from + var divergence eth.L1BlockRef + { + tip := sys.L1EL.BlockRefByLabel(eth.Unsafe) + require.Greater(t, tip.Number, uint64(n), "n is larger than L1 tip, cannot reorg out block number `tip-n`") + + divergence = sys.L1EL.BlockRefByNumber(tip.Number - uint64(n)) + } + + // print the chains before sequencing an alternative L1 block + sys.L2ChainA.PrintChain() + sys.L1Network.PrintChain() + + // pre reorg trigger validations and checks + preChecks(t, sys) + + tipL2_preReorg := sys.L2ELA.BlockRefByLabel(eth.Unsafe) + + // reorg the L1 chain -- sequence an alternative L1 block from divergence block parent + sequenceL1Block(t, ts, divergence.ParentHash) + + // continue building on the alternative L1 chain + sys.L1CL.Start() + + // confirm L1 reorged + sys.L1EL.ReorgTriggered(divergence, 5) + + // wait until L2 chain A cross-safe ref caught up to where it was before the reorg + sys.L2CLA.Reached(types.CrossSafe, tipL2_preReorg.Number, 50) + + // test that latest chain A unsafe is not referencing a reorged L1 block (through the L1Origin field) + require.Eventually(t, func() bool { + unsafe := sys.L2ELA.BlockRefByLabel(eth.Unsafe) + + block, err := sys.L1EL.Escape().EthClient().InfoByNumber(ctx, unsafe.L1Origin.Number) + if err != nil { + sys.Log.Warn("failed to get L1 block info by number", "number", unsafe.L1Origin.Number, "err", err) + return false + } + + sys.Log.Info("current unsafe ref", "tip", unsafe, "tip_origin", unsafe.L1Origin, "l1blk", eth.InfoToL1BlockRef(block)) + + // print the chains so we have information to debug if the test fails + sys.L2ChainA.PrintChain() + sys.L1Network.PrintChain() + + return block.Hash() == unsafe.L1Origin.Hash + }, 120*time.Second, 7*time.Second, "L1 block origin hash should match hash of block on L1 at that number. If not, it means there was a reorg, and L2 blocks L1Origin field is referencing a reorged block.") + + // confirm all L1Origin fields point to canonical blocks + require.Eventually(t, func() bool { + ref := sys.L2ELA.BlockRefByLabel(eth.Unsafe) + var err error + + // wait until L2 chains' L1Origin points to a L1 block after the one that was reorged + if ref.L1Origin.Number < divergence.Number { + return false + } + + sys.Log.Info("L2 chain progressed, pointing to newer L1 block", "ref", ref, "ref_origin", ref.L1Origin, "divergence", divergence) + + for i := ref.Number; i > 0 && ref.L1Origin.Number >= divergence.Number; i-- { + ref, err = sys.L2ELA.Escape().L2EthClient().L2BlockRefByNumber(ctx, i) + if err != nil { + return false + } + + if !sys.L1EL.IsCanonical(ref.L1Origin) { + return false + } + } + + return true + }, 120*time.Second, 5*time.Second, "all L1Origin fields should point to canonical L1 blocks") + + // post reorg test validations and checks + postChecks(t, sys) +} + +func sequenceL1Block(t devtest.T, ts apis.TestSequencerControlAPI, parent common.Hash) { + require.NoError(t, ts.New(t.Ctx(), seqtypes.BuildOpts{Parent: parent})) + require.NoError(t, ts.Next(t.Ctx())) +} diff --git a/rust/kona/tests/supervisor/pre_interop/helpers_test.go b/rust/kona/tests/supervisor/pre_interop/helpers_test.go new file mode 100644 index 0000000000000..9b93b79fb13d0 --- /dev/null +++ b/rust/kona/tests/supervisor/pre_interop/helpers_test.go @@ -0,0 +1,14 @@ +package preinterop + +import ( + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + spresets "github.com/ethereum-optimism/optimism/rust/kona/tests/supervisor/presets" +) + +func newMinimalPreInterop(t devtest.T) *presets.SimpleInterop { + return spresets.NewSimpleInteropMinimal(t, + spresets.WithSuggestedInteropActivationOffset(30), + spresets.WithInteropNotAtGenesis(), + ) +} diff --git a/rust/kona/tests/supervisor/pre_interop/post_test.go b/rust/kona/tests/supervisor/pre_interop/post_test.go new file mode 100644 index 0000000000000..04424557756c0 --- /dev/null +++ b/rust/kona/tests/supervisor/pre_interop/post_test.go @@ -0,0 +1,208 @@ +package preinterop + +import ( + "math/rand" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + "github.com/ethereum-optimism/optimism/op-core/forks" + "github.com/ethereum-optimism/optimism/op-core/predeploys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-devstack/presets" + "github.com/ethereum-optimism/optimism/op-service/eth" + stypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" + "github.com/ethereum/go-ethereum/common" +) + +func testSupervisorActivationBlock(t devtest.T, sys *presets.SimpleInterop, net *dsl.L2Network, activationBlock eth.BlockID) { + require := t.Require() + + // wait for some time to ensure the interop activation block is become cross-safe + t.Logger().Info("Waiting for interop activation block to be cross-safe") + sys.Supervisor.WaitForL2HeadToAdvanceTo(net.ChainID(), stypes.CrossSafe, activationBlock) + + interopTime := net.Escape().ChainConfig().InteropTime + pre := net.LatestBlockBeforeTimestamp(t, *interopTime) + require.NotNil(pre, "Pre-interop block should be found before interop time") + + // make sure pre-interop block is parent of activation block + require.Equal(pre.Number, activationBlock.Number-1, "Activation block should be one after pre-interop block") + + // fetching the source for the pre-interop block should return the error + // this is to make sure that we only store the blocks after interop + _, err := sys.Supervisor.Escape().QueryAPI().CrossDerivedToSource(t.Ctx(), net.ChainID(), pre.ID()) + require.Error(err, "CrossDerivedToSource should error before interop") + + // fetch the source for the activation block + derivedFrom, err := sys.Supervisor.Escape().QueryAPI().CrossDerivedToSource(t.Ctx(), net.ChainID(), activationBlock) + require.NoError(err, "CrossDerivedToSource should not error after interop") + require.NotNil(derivedFrom, "CrossDerivedToSource should return a valid source block") +} + +// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/upgrade/post_test.go +// test case modified to check the correctness of the supervisor activation block as well +func TestPostInbox(gt *testing.T) { + t := devtest.ParallelT(gt) + sys := newMinimalPreInterop(t) + devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { + require := t.Require() + activationBlock := net.AwaitActivation(t, forks.Interop) + + el := net.PrimaryEL() + implAddrBytes, err := el.EthClient().GetStorageAt(t.Ctx(), predeploys.CrossL2InboxAddr, + genesis.ImplementationSlot, activationBlock.Hash.String()) + require.NoError(err) + implAddr := common.BytesToAddress(implAddrBytes[:]) + require.NotEqual(common.Address{}, implAddr) + code, err := el.EthClient().CodeAtHash(t.Ctx(), implAddr, activationBlock.Hash) + require.NoError(err) + require.NotEmpty(code) + + testSupervisorActivationBlock(t, sys, net, activationBlock) + }) +} + +// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/upgrade/post_test.go +func TestPostInteropUpgradeComprehensive(gt *testing.T) { + t := devtest.ParallelT(gt) + sys := newMinimalPreInterop(t) + require := t.Require() + logger := t.Logger() + + // Wait for networks to be online by waiting for blocks + sys.L1Network.WaitForBlock() + sys.L2ChainA.WaitForBlock() + sys.L2ChainB.WaitForBlock() + + // Get interop activation time + interopTime := sys.L2ChainA.Escape().ChainConfig().InteropTime + require.NotNil(interopTime, "InteropTime must be set") + + logger.Info("Starting comprehensive post-interop upgrade tests", "interopTime", *interopTime) + + // 1. Check that anchor block of supervisor matches the activation block + logger.Info("Checking supervisor anchor block matches activation block") + testSupervisorAnchorBlock(t, sys) + + // 2. Check that the supervisor has safety progression for each level + logger.Info("Checking supervisor safety progression") + testSupervisorSafetyProgression(t, sys) + + // 3. Confirms that interop message can be included + logger.Info("Testing interop message inclusion") + testInteropMessageInclusion(t, sys) + + logger.Info("All comprehensive post-interop upgrade tests completed successfully") +} + +// testSupervisorAnchorBlock checks that the supervisor's anchor block has been set and matches the upgrade timestamp +func testSupervisorAnchorBlock(t devtest.T, sys *presets.SimpleInterop) { + logger := t.Logger() + + // Use the DSL helper for anchor block validation + logger.Info("Testing supervisor anchor block functionality") + + // Phase 1: Wait for L2 chains to reach interop activation time + logger.Info("Phase 1: Waiting for L2 chains to reach interop activation time") + + devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { + + // Gate test to not time out before upgrade happens + forkTimestamp := net.Escape().ChainConfig().InteropTime + t.Gate().NotNil(forkTimestamp, "Must have fork configured") + t.Gate().Greater(*forkTimestamp, uint64(0), "Must not start fork at genesis") + upgradeTime := time.Unix(int64(*forkTimestamp), 0) + if deadline, hasDeadline := t.Deadline(); hasDeadline { + t.Gate().True(upgradeTime.Before(deadline), "test must not time out before upgrade happens") + } + + activationBlock := net.AwaitActivation(t, forks.Interop) + sys.Supervisor.WaitForL2HeadToAdvanceTo(net.ChainID(), stypes.CrossSafe, activationBlock) + + logger.Info("Validating anchor block timing", + "chainID", net.ChainID(), + "derivedBlockNumber", activationBlock.Number, + "interopTime", *forkTimestamp) + }) + + logger.Info("Supervisor anchor block validation completed successfully") +} + +// testSupervisorSafetyProgression checks that supervisor has safety progression for each level +func testSupervisorSafetyProgression(t devtest.T, sys *presets.SimpleInterop) { + logger := t.Logger() + logger.Info("Testing supervisor safety progression") + + delta := uint64(3) // Minimum blocks of progression expected + dsl.CheckAll(t, + sys.L2CLA.AdvancedFn(stypes.LocalUnsafe, delta, 30), + sys.L2CLB.AdvancedFn(stypes.LocalUnsafe, delta, 30), + + sys.L2CLA.AdvancedFn(stypes.LocalSafe, delta, 30), + sys.L2CLB.AdvancedFn(stypes.LocalSafe, delta, 30), + + sys.L2CLA.AdvancedFn(stypes.CrossUnsafe, delta, 30), + sys.L2CLB.AdvancedFn(stypes.CrossUnsafe, delta, 30), + + sys.L2CLA.AdvancedFn(stypes.CrossSafe, delta, 60), + sys.L2CLB.AdvancedFn(stypes.CrossSafe, delta, 60), + ) + + logger.Info("Supervisor safety progression validation completed successfully") +} + +// testInteropMessageInclusion confirms that interop messages can be included post-upgrade +func testInteropMessageInclusion(t devtest.T, sys *presets.SimpleInterop) { + logger := t.Logger() + logger.Info("Starting interop message inclusion test") + + // Phase 1: Setup test accounts and contracts + alice, bob, eventLoggerAddress := setupInteropTestEnvironment(sys) + + // Phase 2: Send init message on chain A + rng := rand.New(rand.NewSource(1234)) + initMsg := alice.SendInitMessage(interop.RandomInitTrigger(rng, eventLoggerAddress, rng.Intn(5), rng.Intn(30))) + + // Make sure supervisor indexes block which includes init message + sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) + + // Single event in tx so index is 0 + execMsg := bob.SendExecMessage(initMsg) + + // Phase 5: Verify cross-safe progression + verifyInteropMessagesProgression(t, sys, initMsg, execMsg) + + logger.Info("Interop message inclusion test completed successfully") +} + +// setupInteropTestEnvironment creates test accounts and deploys necessary contracts +func setupInteropTestEnvironment(sys *presets.SimpleInterop) (alice, bob *dsl.EOA, eventLoggerAddress common.Address) { + + // Create EOAs for interop messaging + alice = sys.FunderA.NewFundedEOA(eth.OneHundredthEther) + bob = sys.FunderB.NewFundedEOA(eth.OneHundredthEther) + + // Deploy event logger contract on chain A + eventLoggerAddress = alice.DeployEventLogger() + + // Wait for chains to catch up + sys.L2ChainB.CatchUpTo(sys.L2ChainA) + + return alice, bob, eventLoggerAddress +} + +// verifyInteropMessagesProgression verifies cross-safe progression for both init and exec messages +func verifyInteropMessagesProgression(t devtest.T, sys *presets.SimpleInterop, initMsg *dsl.InitMessage, execMsg *dsl.ExecMessage) { + logger := t.Logger() + + // Verify cross-safe progression for both messages + dsl.CheckAll(t, + sys.L2CLA.ReachedRefFn(stypes.CrossSafe, initMsg.BlockID(), 60), + sys.L2CLB.ReachedRefFn(stypes.CrossSafe, execMsg.BlockID(), 60), + ) + + logger.Info("Cross-safe progression verified for both init and exec messages") +} diff --git a/rust/kona/tests/supervisor/pre_interop/pre_test.go b/rust/kona/tests/supervisor/pre_interop/pre_test.go new file mode 100644 index 0000000000000..1ae079bf01d6d --- /dev/null +++ b/rust/kona/tests/supervisor/pre_interop/pre_test.go @@ -0,0 +1,110 @@ +package preinterop + +import ( + "math/rand" + "testing" + "time" + + "github.com/ethereum/go-ethereum/common" + + "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" + "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" + "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" + "github.com/ethereum-optimism/optimism/op-core/predeploys" + "github.com/ethereum-optimism/optimism/op-devstack/devtest" + "github.com/ethereum-optimism/optimism/op-devstack/dsl" + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txintent" + stypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" +) + +// Acceptance Test: https://github.com/ethereum-optimism/optimism/blob/develop/op-acceptance-tests/tests/interop/upgrade/pre_test.go +func TestPreNoInbox(gt *testing.T) { + t := devtest.ParallelT(gt) + sys := newMinimalPreInterop(t) + require := t.Require() + + t.Logger().Info("Starting") + + devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { + interopTime := net.Escape().ChainConfig().InteropTime + t.Require().NotNil(interopTime) + pre := net.LatestBlockBeforeTimestamp(t, *interopTime) + el := net.PrimaryEL() + codeAddr := common.HexToAddress("0xC0D3C0d3C0D3C0d3c0d3c0D3c0D3C0d3C0D30022") + implCode, err := el.EthClient().CodeAtHash(t.Ctx(), codeAddr, pre.Hash) + require.NoError(err) + require.Len(implCode, 0, "needs to be empty") + implAddrBytes, err := el.EthClient().GetStorageAt(t.Ctx(), predeploys.CrossL2InboxAddr, + genesis.ImplementationSlot, pre.Hash.String()) + require.NoError(err) + require.Equal(common.Address{}, common.BytesToAddress(implAddrBytes[:])) + }) + + // try access the sync-status of the supervisor, assert that the sync-status returns the expected error + devtest.RunParallel(t, sys.L2Networks(), func(t devtest.T, net *dsl.L2Network) { + interopTime := net.Escape().ChainConfig().InteropTime + + _, err := sys.Supervisor.Escape().QueryAPI().SyncStatus(t.Ctx()) + require.ErrorContains(err, "supervisor status tracker not ready") + + // confirm we are still pre-interop + require.False(net.IsActivated(*interopTime)) + t.Logger().Info("Timestamps", "interopTime", *interopTime, "now", time.Now().Unix()) + }) + + var initMsg *dsl.InitMessage + // try interop before the upgrade, confirm that messages do not get included + { + // two EOAs for triggering the init and exec interop txs + alice := sys.FunderA.NewFundedEOA(eth.OneHundredthEther) + bob := sys.FunderB.NewFundedEOA(eth.OneHundredthEther) + + interopTimeA := sys.L2ChainA.Escape().ChainConfig().InteropTime + interopTimeB := sys.L2ChainB.Escape().ChainConfig().InteropTime + + eventLoggerAddress := alice.DeployEventLogger() + + // wait for chain B to catch up to chain A if necessary + sys.L2ChainB.CatchUpTo(sys.L2ChainA) + + // send initiating message on chain A + rng := rand.New(rand.NewSource(time.Now().UnixNano())) + initMsg = alice.SendInitMessage(interop.RandomInitTrigger(rng, eventLoggerAddress, rng.Intn(3), rng.Intn(10))) + + // at least one block between the init tx on chain A and the exec tx on chain B + sys.L2ChainB.WaitForBlock() + + // send executing message on chain B and confirm we got an error + execTx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) + execTx.Content.DependOn(&initMsg.Tx.Result) + execTx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initMsg.Tx.Result, 0)) + execReceipt, err := execTx.PlannedTx.Included.Eval(sys.T.Ctx()) + require.ErrorContains(err, "implementation not initialized", "error did not contain expected string") + require.Nil(execReceipt) + + t.Logger().Info("initReceipt", "blocknum", initMsg.Receipt.BlockNumber, "txhash", initMsg.Receipt.TxHash) + + // confirm we are still pre-interop + require.False(sys.L2ChainA.IsActivated(*interopTimeA)) + require.False(sys.L2ChainB.IsActivated(*interopTimeB)) + t.Logger().Info("Timestamps", "interopTimeA", *interopTimeA, "interopTimeB", *interopTimeB, "now", time.Now().Unix()) + } + + // check that log events from a block before activation, when converted into an access-list, fail the check-access-list RPC check + { + ctx := sys.T.Ctx() + + execTrigger, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &initMsg.Tx.Result, 0)(ctx) + require.NoError(err) + + ed := stypes.ExecutingDescriptor{Timestamp: uint64(time.Now().Unix())} + accessEntries := []stypes.Access{execTrigger.Msg.Access()} + accessList := stypes.EncodeAccessList(accessEntries) + + err = sys.Supervisor.Escape().QueryAPI().CheckAccessList(ctx, accessList, stypes.CrossSafe, ed) + require.ErrorContains(err, "conflicting data") + } + + t.Logger().Info("Done") +} diff --git a/rust/kona/tests/supervisor/presets/interop_minimal.go b/rust/kona/tests/supervisor/presets/interop_minimal.go index e454a3b9c166f..0d5ad7e09fa5e 100644 --- a/rust/kona/tests/supervisor/presets/interop_minimal.go +++ b/rust/kona/tests/supervisor/presets/interop_minimal.go @@ -7,92 +7,33 @@ import ( "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-devstack/devtest" - "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/sysgo" + oppresets "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" ) -// WithSimpleInteropMinimal specifies a system that meets the SimpleInterop criteria removing the Challenger. -func WithSimpleInteropMinimal() stack.CommonOption { - return stack.MakeCommon(DefaultMinimalInteropSystem(&sysgo.DefaultInteropSystemIDs{})) -} - -func DefaultMinimalInteropSystem(dest *sysgo.DefaultInteropSystemIDs) stack.Option[*sysgo.Orchestrator] { - ids := sysgo.NewDefaultInteropSystemIDs(sysgo.DefaultL1ID, sysgo.DefaultL2AID, sysgo.DefaultL2BID) - opt := stack.Combine[*sysgo.Orchestrator]() - - // start with single chain interop system - opt.Add(baseInteropSystem(&ids.DefaultSingleChainInteropSystemIDs)) - - opt.Add(sysgo.WithDeployerOptions( - sysgo.WithPrefundedL2(ids.L1.ChainID(), ids.L2B.ChainID()), - sysgo.WithInteropAtGenesis(), // this can be overridden by later options - )) - opt.Add(sysgo.WithL2ELNode(ids.L2BEL, sysgo.L2ELWithSupervisor(ids.Supervisor))) - opt.Add(sysgo.WithL2CLNode(ids.L2BCL, ids.L1CL, ids.L1EL, ids.L2BEL, sysgo.L2CLSequencer(), sysgo.L2CLIndexing())) - opt.Add(sysgo.WithBatcher(ids.L2BBatcher, ids.L1EL, ids.L2BCL, ids.L2BEL)) - - opt.Add(sysgo.WithManagedBySupervisor(ids.L2BCL, ids.Supervisor)) - - // Note: we provide L2 CL nodes still, even though they are not used post-interop. - // Since we may create an interop infra-setup, before interop is even scheduled to run. - opt.Add(sysgo.WithProposer(ids.L2BProposer, ids.L1EL, &ids.L2BCL, &ids.Supervisor)) - - opt.Add(sysgo.WithFaucets([]stack.ComponentID{ids.L1EL}, []stack.ComponentID{ids.L2AEL, ids.L2BEL})) - - // Upon evaluation of the option, export the contents we created. - // Ids here are static, but other things may be exported too. - opt.Add(stack.Finally(func(orch *sysgo.Orchestrator) { - *dest = ids - })) - - return opt -} - -// baseInteropSystem defines a system that supports interop with a single chain -// Components which are shared across multiple chains are not started, allowing them to be added later including -// any additional chains that have been added. -func baseInteropSystem(ids *sysgo.DefaultSingleChainInteropSystemIDs) stack.Option[*sysgo.Orchestrator] { - opt := stack.Combine[*sysgo.Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *sysgo.Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(sysgo.WithMnemonicKeys(devkeys.TestMnemonic)) - - // Get artifacts path +func NewSimpleInteropMinimal(t devtest.T, opts ...oppresets.Option) *oppresets.SimpleInterop { artifactsPath := os.Getenv("OP_DEPLOYER_ARTIFACTS") if artifactsPath == "" { panic("OP_DEPLOYER_ARTIFACTS is not set") } + contractArtifacts := artifacts.MustNewFileLocator(filepath.Join(artifactsPath, "src")) - opt.Add(sysgo.WithDeployer(), - sysgo.WithDeployerPipelineOption( - sysgo.WithDeployerCacheDir(artifactsPath), - ), - sysgo.WithDeployerOptions( - func(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { - builder.WithL1ContractsLocator(artifacts.MustNewFileLocator(filepath.Join(artifactsPath, "src"))) - builder.WithL2ContractsLocator(artifacts.MustNewFileLocator(filepath.Join(artifactsPath, "src"))) + baseOpts := []oppresets.Option{ + oppresets.WithDeployerOptions( + func(_ devtest.T, _ devkeys.Keys, builder intentbuilder.Builder) { + builder.WithL1ContractsLocator(contractArtifacts) + builder.WithL2ContractsLocator(contractArtifacts) }, - sysgo.WithCommons(ids.L1.ChainID()), - sysgo.WithPrefundedL2(ids.L1.ChainID(), ids.L2A.ChainID()), ), - ) - - opt.Add(sysgo.WithL1Nodes(ids.L1EL, ids.L1CL)) - - opt.Add(sysgo.WithSupervisor(ids.Supervisor, ids.Cluster, ids.L1EL)) - - opt.Add(sysgo.WithL2ELNode(ids.L2AEL, sysgo.L2ELWithSupervisor(ids.Supervisor))) - opt.Add(sysgo.WithL2CLNode(ids.L2ACL, ids.L1CL, ids.L1EL, ids.L2AEL, sysgo.L2CLSequencer(), sysgo.L2CLIndexing())) - opt.Add(sysgo.WithTestSequencer(ids.TestSequencer, ids.L1CL, ids.L2ACL, ids.L1EL, ids.L2AEL)) - opt.Add(sysgo.WithBatcher(ids.L2ABatcher, ids.L1EL, ids.L2ACL, ids.L2AEL)) + } + baseOpts = append(baseOpts, opts...) + return oppresets.NewSimpleInterop(t, baseOpts...) +} - opt.Add(sysgo.WithManagedBySupervisor(ids.L2ACL, ids.Supervisor)) +func WithSuggestedInteropActivationOffset(offset uint64) oppresets.Option { + return oppresets.WithSuggestedInteropActivationOffset(offset) +} - // Note: we provide L2 CL nodes still, even though they are not used post-interop. - // Since we may create an interop infra-setup, before interop is even scheduled to run. - opt.Add(sysgo.WithProposer(ids.L2AProposer, ids.L1EL, &ids.L2ACL, &ids.Supervisor)) - return opt +func WithInteropNotAtGenesis() oppresets.Option { + return oppresets.WithInteropNotAtGenesis() } diff --git a/rust/op-reth/crates/tests/Makefile b/rust/op-reth/crates/tests/Makefile index 23bd092a1d0af..1eb961cbcd545 100644 --- a/rust/op-reth/crates/tests/Makefile +++ b/rust/op-reth/crates/tests/Makefile @@ -2,17 +2,15 @@ DOCKER_IMAGE_NAME := op-reth DOCKER_TAG := local DOCKERFILE_PATH := ../../../DockerfileOpProof -KURTOSIS_PACKAGE := github.com/ethpandaops/optimism-package@998796c0f3bb478d63d729e65f0b76e24112e00d -DEVNET ?= opgeth-seq-opreth-val GO_PKG_NAME ?= proofs/core SOURCE_DIR := $(shell pwd) OP_DEVSTACK_PROOF_SEQUENCER_EL ?= op-geth OP_DEVSTACK_PROOF_VALIDATOR_EL ?= op-reth-with-proof -.PHONY: all build-docker build-contracts unzip-contract-artifacts update-packages run clean help +.PHONY: all build build-docker build-docker-with-cov build-contracts unzip-contract-artifacts update-packages test-e2e-sysgo # Default target -all: build-docker run +all: test-e2e-sysgo # Build op-reth build: @@ -34,15 +32,6 @@ build-docker-with-cov: -f $(notdir $(DOCKERFILE_PATH)) \ -t $(DOCKER_IMAGE_NAME):$(DOCKER_TAG) . -# Run Kurtosis with the optimism devnet -run: - @echo "Starting Optimism devnet with historical proof configuration..." - @DEVNET_PATH="./devnets/$(DEVNET).yaml"; \ - if [ ! -z "$(DEVNET_CUSTOM_PATH)" ]; then \ - DEVNET_PATH="$(DEVNET_CUSTOM_PATH)"; \ - fi; \ - kurtosis run $(KURTOSIS_PACKAGE) --args-file $$DEVNET_PATH --enclave $(DEVNET) - # Build smart contract artifacts with Foundry build-contracts: @echo "Building contracts with forge..." @@ -61,33 +50,14 @@ update-packages: mkdir -p "$(SOURCE_DIR)/artifacts/compressed" cp "$(SOURCE_DIR)/optimism/op-deployer/pkg/deployer/artifacts/forge-artifacts/artifacts.tzst" "$(SOURCE_DIR)/artifacts/compressed/artifacts.tzst" -# Run E2E tests using Kurtosis -test-e2e-kurtosis: build-contracts - @echo "Running E2E tests with Kurtosis for $(DEVNET)" - @DEVNET_PATH="$(SOURCE_DIR)/devnets/$(DEVNET).yaml"; \ - if [ ! -z "$(DEVNET_CUSTOM_PATH)" ]; then \ - DEVNET_PATH="$(DEVNET_CUSTOM_PATH)"; \ - fi; \ - export OP_DEPLOYER_ARTIFACTS="$(SOURCE_DIR)/artifacts/src/forge-artifacts"; \ - export DEVNET_ENV_URL="ktnative://$(DEVNET)$$DEVNET_PATH"; \ - export DISABLE_OP_E2E_LEGACY=true; \ - export DEVSTACK_ORCHESTRATOR=sysext; \ - go test -count=1 -timeout 40m -v ./$(GO_PKG_NAME) - # Run E2E tests using Sysgo -test-e2e-sysgo: unzip-contract-artifacts build-contracts +test-e2e-sysgo: build unzip-contract-artifacts build-contracts @echo "Running E2E tests with Sysgo" export OP_DEPLOYER_ARTIFACTS="$(SOURCE_DIR)/artifacts/src/forge-artifacts"; \ export DISABLE_OP_E2E_LEGACY=true; \ - export DEVSTACK_ORCHESTRATOR=sysgo; \ export OP_RETH_ENABLE_PROOF_HISTORY=true; \ export SKIP_P2P_CONNECTION_CHECK=true; \ export OP_RETH_EXEC_PATH="${SOURCE_DIR}/../../../target/debug/op-reth"; \ export OP_DEVSTACK_PROOF_SEQUENCER_EL=$(OP_DEVSTACK_PROOF_SEQUENCER_EL); \ export OP_DEVSTACK_PROOF_VALIDATOR_EL=$(OP_DEVSTACK_PROOF_VALIDATOR_EL); \ go test -count=1 -timeout 40m -v ./$(GO_PKG_NAME) - -# Stop and clean Kurtosis services -clean: - @echo "Cleaning up Kurtosis services..." - kurtosis clean -a diff --git a/rust/op-reth/crates/tests/README.md b/rust/op-reth/crates/tests/README.md index 7169c09da70b8..a536ccff6df29 100644 --- a/rust/op-reth/crates/tests/README.md +++ b/rust/op-reth/crates/tests/README.md @@ -1,67 +1,54 @@ # E2E tests for op-reth -This folder contains the end-to-end testing resources for op-reth. Tests use the Optimism "devstack" (from the Optimism monorepo) and Kurtosis to deploy ephemeral devnets. +This folder contains the end-to-end testing resources for op-reth. Tests run against in-process `op-devstack` systems (sysgo). -This README documents common workflows and Makefile commands used to build the local Docker image, start the devnet with Kurtosis, run e2e tests, and clean up resources. +This README documents common workflows and Makefile commands used to build artifacts and run e2e tests. ## Prerequisites -- Docker (Desktop) running on your machine -- Kurtosis CLI installed and able to reach the Kurtosis engine - Go (to run Go-based e2e tests) +- Rust toolchain (to build `op-reth`) +- Foundry (`forge`) for proof contract artifacts +- Docker (optional, only for `make build-docker`) ## Commands (Makefile targets) -Build the Docker image used by the devnet (tags `op-reth:local`): +Build the local `op-reth` binary: ```sh make build ``` -Start the Optimism devnet (default: `simple-historical-proof`): +Run the e2e test suite in sysgo mode (Go tests): ```sh -# uses the Makefile's DEVNET variable (devnets/.yaml) -# OPTIONAL. Default: opgeth-seq-opreth-val -make run DEVNET= - -# or with a custom devnet YAML path -make run DEVNET_CUSTOM_PATH=/absolute/path/to/devnet.yaml -``` - -Run the e2e test suite that exercises the deployed devnet (Go tests): - -```sh -# runs go test with a long timeout; set GO_PKG_NAME to the package to test -make test-e2e-kurtosis +# runs go test with a long timeout; defaults to GO_PKG_NAME=proofs/core +make test-e2e-sysgo # run a specific test or package -make test-e2e-kurtosis GO_PKG_NAME=path/to/pkg +make test-e2e-sysgo GO_PKG_NAME=path/to/pkg ``` -Stop and remove Kurtosis resources (cleanup): +Optional: build a local Docker image (`op-reth:local`): ```sh -make clean +make build-docker ``` ## Implementation notes -- The Makefile in this directory calls the repository root `DockerfileOp` to build an op-reth image tagged `op-reth:local`. -- The default Kurtosis package used is `github.com/ethpandaops/optimism-package@1.4.0`. The Makefile passes the YAML under `devnets/$(DEVNET).yaml` to `kurtosis run`. +- `make test-e2e-sysgo` now builds `op-reth` before running tests. +- The test target sets `OP_RETH_EXEC_PATH` to `../../../target/debug/op-reth`. +- You can override proof EL kinds with: + - `OP_DEVSTACK_PROOF_SEQUENCER_EL` + - `OP_DEVSTACK_PROOF_VALIDATOR_EL` ## Quick workflow example ```sh -# build image +# build op-reth make build -# start devnet -make run - # run tests (set GO_PKG_NAME if needed) -make test-e2e-kurtosis GO_PKG_NAME=proofs - -# cleanup -make clean +make test-e2e-sysgo GO_PKG_NAME=proofs ``` diff --git a/rust/op-reth/crates/tests/devnets/opgeth-seq-opreth-val.yaml b/rust/op-reth/crates/tests/devnets/opgeth-seq-opreth-val.yaml deleted file mode 100644 index 65aaa2f32034d..0000000000000 --- a/rust/op-reth/crates/tests/devnets/opgeth-seq-opreth-val.yaml +++ /dev/null @@ -1,74 +0,0 @@ -# A simple network configuration for kurtosis (https://github.com/ethpandaops/optimism-package) -# Spins up chain with two participating EL/CL pairs. -# One with op-geth/op-node (sequencer role) and one with op-reth/op-node (verifier role). - -optimism_package: - observability: - enabled: true - grafana_params: - # Will load the dashboards from default branch. - dashboard_sources: - - github.com/op-rs/op-reth/etc/grafana - image: "grafana/grafana:12.3.0" - faucet: - enabled: true - test-sequencers: - sequencer: - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-test-sequencer:9243bb0452efa3fd255556631688d1255723384a - enabled: true - chains: - chain0: - # Chain with only two nodes - participants: - sequencer: - el: - type: op-geth - log_level: "debug" - cl: - type: op-node - log_level: "debug" - extra_params: [--experimental.sequencer-api=true] - sequencer: true - verifier: - el: - type: op-reth - # Note: we use the local image for now. This allows us to run the tests in CI pipelines without publishing new docker images every time. - image: op-reth:local - extra_params: [ - --proofs-history, - --proofs-history.window=200, - --proofs-history.prune-interval=1m, - --proofs-history.storage-path=/data/proofs-history - ] - cl: - type: op-node - log_level: "debug" - extra_params: [--experimental.sequencer-api=true] - sequencer: false - network_params: - network: "kurtosis" - network_id: "2151908" - seconds_per_slot: 2 - - global_log_level: "info" - global_node_selectors: {} - global_tolerations: [] - persistent: false -ethereum_package: - participants: - - el_type: geth - cl_type: teku - cl_image: consensys/teku:25.7.1 - network_params: - preset: minimal - genesis_delay: 5 - additional_preloaded_contracts: ' - { - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - } - ' diff --git a/rust/op-reth/crates/tests/devnets/opreth-seq-opgeth-val.yaml b/rust/op-reth/crates/tests/devnets/opreth-seq-opgeth-val.yaml deleted file mode 100644 index 2b43aa72d173e..0000000000000 --- a/rust/op-reth/crates/tests/devnets/opreth-seq-opgeth-val.yaml +++ /dev/null @@ -1,74 +0,0 @@ -# A simple network configuration for kurtosis (https://github.com/ethpandaops/optimism-package) -# Spins up chain with two participating EL/CL pairs. -# One with op-geth/op-node (verifier role) and one with op-reth/op-node (sequencer role). - -optimism_package: - observability: - enabled: true - grafana_params: - # Will load the dashboards from default branch. - dashboard_sources: - - github.com/op-rs/op-reth/etc/grafana - image: "grafana/grafana:12.3.0" - faucet: - enabled: true - test-sequencers: - sequencer: - image: us-docker.pkg.dev/oplabs-tools-artifacts/images/op-test-sequencer:9243bb0452efa3fd255556631688d1255723384a - enabled: true - chains: - chain0: - # Chain with only two nodes - participants: - sequencer: - el: - type: op-reth - # Note: we use the local image for now. This allows us to run the tests in CI pipelines without publishing new docker images every time. - image: op-reth:local - extra_params: [ - --proofs-history, - --proofs-history.window=200, - --proofs-history.prune-interval=1m, - --proofs-history.storage-path=/data/proofs-history - ] - cl: - type: op-node - log_level: "debug" - extra_params: [--experimental.sequencer-api=true] - sequencer: true - verifier: - el: - type: op-geth - log_level: "debug" - cl: - type: op-node - log_level: "debug" - extra_params: [--experimental.sequencer-api=true] - sequencer: false - - network_params: - network: "kurtosis" - network_id: "2151908" - seconds_per_slot: 2 - global_log_level: "info" - global_node_selectors: {} - global_tolerations: [] - persistent: false -ethereum_package: - participants: - - el_type: geth - cl_type: teku - cl_image: consensys/teku:25.7.1 - network_params: - preset: minimal - genesis_delay: 5 - additional_preloaded_contracts: ' - { - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - } - ' diff --git a/rust/op-reth/crates/tests/proofs/core/init_test.go b/rust/op-reth/crates/tests/proofs/core/init_test.go deleted file mode 100644 index df6fe1ec27d7a..0000000000000 --- a/rust/op-reth/crates/tests/proofs/core/init_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package core - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/op-rs/op-geth/proofs/utils" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // Other setups may be added here, hydrated from the same orchestrator - presets.DoMain(m, utils.WithMixedOpProofPreset()) -} diff --git a/rust/op-reth/crates/tests/proofs/prune/init_test.go b/rust/op-reth/crates/tests/proofs/prune/init_test.go deleted file mode 100644 index bd9082e259d41..0000000000000 --- a/rust/op-reth/crates/tests/proofs/prune/init_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package prune - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/op-rs/op-geth/proofs/utils" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // Other setups may be added here, hydrated from the same orchestrator - presets.DoMain(m, utils.WithMixedOpProofPreset()) -} diff --git a/rust/op-reth/crates/tests/proofs/reorg/init_test.go b/rust/op-reth/crates/tests/proofs/reorg/init_test.go deleted file mode 100644 index cfcf49cbe55a3..0000000000000 --- a/rust/op-reth/crates/tests/proofs/reorg/init_test.go +++ /dev/null @@ -1,14 +0,0 @@ -package reorg - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/op-rs/op-geth/proofs/utils" -) - -// TestMain creates the test-setups against the shared backend -func TestMain(m *testing.M) { - // Other setups may be added here, hydrated from the same orchestrator - presets.DoMain(m, utils.WithMixedOpProofPreset()) -} diff --git a/rust/op-reth/crates/tests/proofs/utils/preset.go b/rust/op-reth/crates/tests/proofs/utils/preset.go index 55cbc804103b5..c590ea8580df8 100644 --- a/rust/op-reth/crates/tests/proofs/utils/preset.go +++ b/rust/op-reth/crates/tests/proofs/utils/preset.go @@ -2,7 +2,6 @@ package utils import ( "os" - "strings" "github.com/ethereum/go-ethereum/log" @@ -10,13 +9,10 @@ import ( "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" - "github.com/ethereum-optimism/optimism/op-devstack/presets" - "github.com/ethereum-optimism/optimism/op-devstack/shim" + devpresets "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-devstack/stack" - "github.com/ethereum-optimism/optimism/op-devstack/stack/match" "github.com/ethereum-optimism/optimism/op-devstack/sysgo" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/intentbuilder" - "github.com/ethereum-optimism/optimism/op-service/eth" ) type L2ELClient string @@ -27,23 +23,18 @@ const ( L2ELClientRethWithProofs L2ELClient = "reth-with-proof" ) -type L2ELNodeID struct { - L2ELNodeID stack.ComponentID - Client L2ELClient -} - type L2ELNode struct { *dsl.L2ELNode Client L2ELClient } type MixedOpProofPreset struct { - Log log.Logger - T devtest.T - ControlPlane stack.ControlPlane + Log log.Logger + T devtest.T L1Network *dsl.L1Network L1EL *dsl.L1ELNode + L1CL *dsl.L1CLNode L2Chain *dsl.L2Network L2Batcher *dsl.L2Batcher @@ -76,293 +67,171 @@ func (m *MixedOpProofPreset) L2ELValidatorNode() *dsl.L2ELNode { return m.L2ELValidator.L2ELNode } -// GethL2ELNode returns first L2 EL nodes that are running op-geth func (m *MixedOpProofPreset) GethL2ELNode() *dsl.L2ELNode { if m.L2ELSequencer.Client == L2ELClientGeth { return m.L2ELSequencer.L2ELNode } - if m.L2ELValidator.Client == L2ELClientGeth { return m.L2ELValidator.L2ELNode } - return nil } -// RethL2ELNode returns first L2 EL nodes that are running op-reth func (m *MixedOpProofPreset) RethL2ELNode() *dsl.L2ELNode { if m.L2ELSequencer.Client == L2ELClientReth { return m.L2ELSequencer.L2ELNode } - if m.L2ELValidator.Client == L2ELClientReth { return m.L2ELValidator.L2ELNode } return nil } -// RethWithProofL2ELNode returns first L2 EL nodes that are running op-reth with proof func (m *MixedOpProofPreset) RethWithProofL2ELNode() *dsl.L2ELNode { if m.L2ELSequencer.Client == L2ELClientRethWithProofs { return m.L2ELSequencer.L2ELNode } - if m.L2ELValidator.Client == L2ELClientRethWithProofs { return m.L2ELValidator.L2ELNode } return nil } -func WithMixedOpProofPreset() stack.CommonOption { - return stack.MakeCommon(DefaultMixedOpProofSystem(&DefaultMixedOpProofSystemIDs{})) -} - -func L2NodeMatcher[E stack.Identifiable](value ...string) stack.Matcher[E] { - return match.MatchElemFn[E](func(elem E) bool { - for _, v := range value { - if !strings.Contains(elem.ID().Key(), v) { - return false - } - } - return true - }) -} - func NewMixedOpProofPreset(t devtest.T) *MixedOpProofPreset { - system := shim.NewSystem(t) - orch := presets.Orchestrator() - orch.Hydrate(system) - - t.Gate().Equal(len(system.L2Networks()), 1, "expected exactly one L2 network") - t.Gate().Equal(len(system.L1Networks()), 1, "expected exactly one L1 network") + nodeSpecs := mixedOpProofNodeSpecs() + runtime := sysgo.NewMixedSingleChainRuntime(t, sysgo.MixedSingleChainPresetConfig{ + NodeSpecs: nodeSpecs, + WithTestSequencer: true, + TestSequencerName: "test-sequencer", + DeployerOptions: proofDeployerOptions(t), + }) - l1Net := system.L1Network(match.FirstL1Network) - l2Net := system.L2Network(match.Assume(t, match.L2ChainA)) + frontends := devpresets.NewMixedSingleChainFrontends(t, runtime) + return mixedOpProofFromFrontends(t, frontends) +} - t.Gate().GreaterOrEqual(len(l2Net.L2CLNodes()), 2, "expected at least two L2CL nodes") +func mixedOpProofFromFrontends(t devtest.T, frontends *devpresets.MixedSingleChainFrontends) *MixedOpProofPreset { + t.Require().NotNil(frontends.TestSequencer, "expected test sequencer frontend") - sequencerCL := l2Net.L2CLNode(match.Assume(t, match.WithSequencerActive(t.Ctx()))) - sequencerELInner := l2Net.L2ELNode(match.Assume(t, match.EngineFor(sequencerCL))) - var sequencerEL *L2ELNode - if strings.Contains(sequencerELInner.ID().String(), "op-reth-with-proof") { - sequencerEL = &L2ELNode{ - L2ELNode: dsl.NewL2ELNode(sequencerELInner, orch.ControlPlane()), - Client: L2ELClientRethWithProofs, - } - } else if strings.Contains(sequencerELInner.ID().String(), "op-reth") { - sequencerEL = &L2ELNode{ - L2ELNode: dsl.NewL2ELNode(sequencerELInner, orch.ControlPlane()), - Client: L2ELClientReth, - } - } else if strings.Contains(sequencerELInner.ID().String(), "op-geth") { - sequencerEL = &L2ELNode{ - L2ELNode: dsl.NewL2ELNode(sequencerELInner, orch.ControlPlane()), - Client: L2ELClientGeth, + var l2ELSequencer *L2ELNode + var l2CLSequencer *dsl.L2CLNode + var l2ELValidator *L2ELNode + var l2CLValidator *dsl.L2CLNode + for _, node := range frontends.Nodes { + client := mixedOpProofClientFromSpec(node.Spec) + if node.Spec.IsSequencer { + l2ELSequencer = &L2ELNode{L2ELNode: node.EL, Client: client} + l2CLSequencer = node.CL + continue } - } else { - t.Error("unexpected L2EL client for sequencer") - t.FailNow() - } - - verifierCL := l2Net.L2CLNode(match.Assume(t, - match.And( - match.Not(match.WithSequencerActive(t.Ctx())), - match.Not(sequencerCL.ID()), - ))) - verifierELInner := l2Net.L2ELNode(match.Assume(t, - match.And( - match.EngineFor(verifierCL), - match.Not(sequencerEL.ID()), - ))) - var verifierEL *L2ELNode - if strings.Contains(verifierELInner.ID().String(), "op-reth-with-proof") { - verifierEL = &L2ELNode{ - L2ELNode: dsl.NewL2ELNode(verifierELInner, orch.ControlPlane()), - Client: L2ELClientRethWithProofs, - } - } else if strings.Contains(verifierELInner.ID().String(), "op-reth") { - verifierEL = &L2ELNode{ - L2ELNode: dsl.NewL2ELNode(verifierELInner, orch.ControlPlane()), - Client: L2ELClientReth, - } - } else if strings.Contains(verifierELInner.ID().String(), "op-geth") { - verifierEL = &L2ELNode{ - L2ELNode: dsl.NewL2ELNode(verifierELInner, orch.ControlPlane()), - Client: L2ELClientGeth, - } - } else { - t.Error("unexpected L2EL client for verifier") - t.FailNow() + l2ELValidator = &L2ELNode{L2ELNode: node.EL, Client: client} + l2CLValidator = node.CL } + t.Require().NotNil(l2ELSequencer, "missing sequencer EL frontend") + t.Require().NotNil(l2CLSequencer, "missing sequencer CL frontend") + t.Require().NotNil(l2ELValidator, "missing validator EL frontend") + t.Require().NotNil(l2CLValidator, "missing validator CL frontend") out := &MixedOpProofPreset{ Log: t.Logger(), T: t, - ControlPlane: orch.ControlPlane(), - L1Network: dsl.NewL1Network(l1Net), - L1EL: dsl.NewL1ELNode(l1Net.L1ELNode(match.Assume(t, match.FirstL1EL))), - L2Chain: dsl.NewL2Network(l2Net, orch.ControlPlane()), - L2Batcher: dsl.NewL2Batcher(l2Net.L2Batcher(match.Assume(t, match.FirstL2Batcher))), - L2ELSequencer: sequencerEL, - L2CLSequencer: dsl.NewL2CLNode(sequencerCL, orch.ControlPlane()), - L2ELValidator: verifierEL, - L2CLValidator: dsl.NewL2CLNode(verifierCL, orch.ControlPlane()), - Wallet: dsl.NewRandomHDWallet(t, 30), // Random for test isolation - FaucetL2: dsl.NewFaucet(l2Net.Faucet(match.Assume(t, match.FirstFaucet))), - - TestSequencer: dsl.NewTestSequencer(system.TestSequencer(match.Assume(t, match.FirstTestSequencer))), + L1Network: frontends.L1Network, + L1EL: frontends.L1EL, + L1CL: frontends.L1CL, + L2Chain: frontends.L2Network, + L2Batcher: frontends.L2Batcher, + L2ELSequencer: l2ELSequencer, + L2CLSequencer: l2CLSequencer, + L2ELValidator: l2ELValidator, + L2CLValidator: l2CLValidator, + Wallet: dsl.NewRandomHDWallet(t, 30), + FaucetL1: frontends.FaucetL1, + FaucetL2: frontends.FaucetL2, + TestSequencer: frontends.TestSequencer, } - out.FaucetL1 = dsl.NewFaucet(out.L1Network.Escape().Faucet(match.Assume(t, match.FirstFaucet))) out.FunderL1 = dsl.NewFunder(out.Wallet, out.FaucetL1, out.L1EL) out.FunderL2 = dsl.NewFunder(out.Wallet, out.FaucetL2, out.L2ELSequencer) return out } -type DefaultMixedOpProofSystemIDs struct { - L1 stack.ComponentID - L1EL stack.ComponentID - L1CL stack.ComponentID - - L2 stack.ComponentID - - L2CLSequencer stack.ComponentID - L2ELSequencer L2ELNodeID - - L2CLValidator stack.ComponentID - L2ELValidator L2ELNodeID - - L2Batcher stack.ComponentID - L2Proposer stack.ComponentID - L2Challenger stack.ComponentID - - TestSequencer stack.ComponentID +func proofDeployerOptions(t devtest.T) []sysgo.DeployerOption { + artifactsPath := os.Getenv("OP_DEPLOYER_ARTIFACTS") + t.Require().NotEmpty(artifactsPath, "OP_DEPLOYER_ARTIFACTS is not set") + return []sysgo.DeployerOption{ + func(_ devtest.T, _ devkeys.Keys, builder intentbuilder.Builder) { + locator := artifacts.MustNewFileLocator(artifactsPath) + builder.WithL1ContractsLocator(locator) + builder.WithL2ContractsLocator(locator) + }, + } } -func NewDefaultMixedOpProofSystemIDs(l1ID, l2ID eth.ChainID) DefaultMixedOpProofSystemIDs { - ids := DefaultMixedOpProofSystemIDs{ - L1: stack.NewL1NetworkID(l1ID), - L1EL: stack.NewL1ELNodeID("l1", l1ID), - L1CL: stack.NewL1CLNodeID("l1", l1ID), - L2: stack.NewL2NetworkID(l2ID), - L2CLSequencer: stack.NewL2CLNodeID("sequencer", l2ID), - L2CLValidator: stack.NewL2CLNodeID("validator", l2ID), - L2Batcher: stack.NewL2BatcherID("main", l2ID), - L2Proposer: stack.NewL2ProposerID("main", l2ID), - L2Challenger: stack.NewL2ChallengerID("main", l2ID), - TestSequencer: stack.NewTestSequencerID("test-sequencer"), +func mixedOpProofNodeSpecs() []sysgo.MixedSingleChainNodeSpec { + sequencerClient := mixedOpProofClientFromEnv("OP_DEVSTACK_PROOF_SEQUENCER_EL", L2ELClientGeth) + validatorClient := mixedOpProofClientFromEnv("OP_DEVSTACK_PROOF_VALIDATOR_EL", L2ELClientRethWithProofs) + return []sysgo.MixedSingleChainNodeSpec{ + { + ELKey: mixedOpProofELKey("sequencer", sequencerClient), + CLKey: "sequencer", + ELKind: mixedOpProofELKind(sequencerClient), + ELProofHistory: sequencerClient == L2ELClientRethWithProofs, + CLKind: sysgo.MixedL2CLOpNode, + IsSequencer: true, + }, + { + ELKey: mixedOpProofELKey("validator", validatorClient), + CLKey: "validator", + ELKind: mixedOpProofELKind(validatorClient), + ELProofHistory: validatorClient == L2ELClientRethWithProofs, + CLKind: sysgo.MixedL2CLOpNode, + IsSequencer: false, + }, } +} - // default to op-geth for sequencer and op-reth-with-proof for validator - switch os.Getenv("OP_DEVSTACK_PROOF_SEQUENCER_EL") { - case "op-reth-with-proof": - ids.L2ELSequencer = L2ELNodeID{ - L2ELNodeID: stack.NewL2ELNodeID("sequencer-op-reth-with-proof", l2ID), - Client: L2ELClientRethWithProofs, - } - case "op-reth": - ids.L2ELSequencer = L2ELNodeID{ - L2ELNodeID: stack.NewL2ELNodeID("sequencer-op-reth", l2ID), - Client: L2ELClientReth, - } +func mixedOpProofELKey(role string, client L2ELClient) string { + switch client { + case L2ELClientGeth: + return role + "-op-geth" + case L2ELClientReth: + return role + "-op-reth" + case L2ELClientRethWithProofs: + return role + "-op-reth-with-proof" default: - ids.L2ELSequencer = L2ELNodeID{ - L2ELNodeID: stack.NewL2ELNodeID("sequencer-op-geth", l2ID), - Client: L2ELClientGeth, - } + panic("unknown mixed proof L2 EL client") } +} - switch os.Getenv("OP_DEVSTACK_PROOF_VALIDATOR_EL") { +func mixedOpProofClientFromEnv(name string, fallback L2ELClient) L2ELClient { + switch os.Getenv(name) { case "op-geth": - ids.L2ELValidator = L2ELNodeID{ - L2ELNodeID: stack.NewL2ELNodeID("validator-op-geth", l2ID), - Client: L2ELClientGeth, - } + return L2ELClientGeth case "op-reth": - ids.L2ELValidator = L2ELNodeID{ - L2ELNodeID: stack.NewL2ELNodeID("validator-op-reth", l2ID), - Client: L2ELClientReth, - } + return L2ELClientReth + case "op-reth-with-proof": + return L2ELClientRethWithProofs default: - ids.L2ELValidator = L2ELNodeID{ - L2ELNodeID: stack.NewL2ELNodeID("validator-op-reth-with-proof", l2ID), - Client: L2ELClientRethWithProofs, - } + return fallback } - - return ids } -func DefaultMixedOpProofSystem(dest *DefaultMixedOpProofSystemIDs) stack.Option[*sysgo.Orchestrator] { - ids := NewDefaultMixedOpProofSystemIDs(sysgo.DefaultL1ID, sysgo.DefaultL2AID) - return defaultMixedOpProofSystemOpts(&ids, dest) -} - -func defaultMixedOpProofSystemOpts(src, dest *DefaultMixedOpProofSystemIDs) stack.CombinedOption[*sysgo.Orchestrator] { - opt := stack.Combine[*sysgo.Orchestrator]() - opt.Add(stack.BeforeDeploy(func(o *sysgo.Orchestrator) { - o.P().Logger().Info("Setting up") - })) - - opt.Add(sysgo.WithMnemonicKeys(devkeys.TestMnemonic)) - - // Get artifacts path - artifactsPath := os.Getenv("OP_DEPLOYER_ARTIFACTS") - if artifactsPath == "" { - panic("OP_DEPLOYER_ARTIFACTS is not set") +func mixedOpProofClientFromSpec(spec sysgo.MixedSingleChainNodeSpec) L2ELClient { + if spec.ELKind == sysgo.MixedL2ELOpGeth { + return L2ELClientGeth } - - opt.Add(sysgo.WithDeployer(), - sysgo.WithDeployerPipelineOption( - sysgo.WithDeployerCacheDir(artifactsPath), - ), - sysgo.WithDeployerOptions( - func(_ devtest.P, _ devkeys.Keys, builder intentbuilder.Builder) { - builder.WithL1ContractsLocator(artifacts.MustNewFileLocator(artifactsPath)) - builder.WithL2ContractsLocator(artifacts.MustNewFileLocator(artifactsPath)) - }, - sysgo.WithCommons(src.L1.ChainID()), - sysgo.WithPrefundedL2(src.L1.ChainID(), src.L2.ChainID()), - ), - ) - - opt.Add(sysgo.WithL1Nodes(src.L1EL, src.L1CL)) - - // Spawn L2 sequencer nodes - switch src.L2ELSequencer.Client { - case L2ELClientRethWithProofs: - opt.Add(sysgo.WithOpReth(src.L2ELSequencer.L2ELNodeID, sysgo.L2ELWithProofHistory(true))) - case L2ELClientReth: - opt.Add(sysgo.WithOpReth(src.L2ELSequencer.L2ELNodeID)) - case L2ELClientGeth: - opt.Add(sysgo.WithOpGeth(src.L2ELSequencer.L2ELNodeID)) - default: - panic("unknown L2 EL client for sequencer") + if spec.ELProofHistory { + return L2ELClientRethWithProofs } - opt.Add(sysgo.WithL2CLNode(src.L2CLSequencer, src.L1CL, src.L1EL, src.L2ELSequencer.L2ELNodeID, sysgo.L2CLSequencer())) + return L2ELClientReth +} - // Spawn L2 validator nodes - switch src.L2ELValidator.Client { - case L2ELClientRethWithProofs: - opt.Add(sysgo.WithOpReth(src.L2ELValidator.L2ELNodeID, sysgo.L2ELWithProofHistory(true))) - case L2ELClientReth: - opt.Add(sysgo.WithOpReth(src.L2ELValidator.L2ELNodeID)) +func mixedOpProofELKind(client L2ELClient) sysgo.MixedL2ELKind { + switch client { case L2ELClientGeth: - opt.Add(sysgo.WithOpGeth(src.L2ELValidator.L2ELNodeID)) + return sysgo.MixedL2ELOpGeth + case L2ELClientReth, L2ELClientRethWithProofs: + return sysgo.MixedL2ELOpReth default: - panic("unknown L2 EL client for validator") + panic("unknown mixed proof L2 EL client") } - opt.Add(sysgo.WithL2CLNode(src.L2CLValidator, src.L1CL, src.L1EL, src.L2ELValidator.L2ELNodeID)) - - opt.Add(sysgo.WithBatcher(src.L2Batcher, src.L1EL, src.L2CLSequencer, src.L2ELSequencer.L2ELNodeID)) - opt.Add(sysgo.WithProposer(src.L2Proposer, src.L1EL, &src.L2CLSequencer, nil)) - - opt.Add(sysgo.WithFaucets([]stack.ComponentID{src.L1EL}, []stack.ComponentID{src.L2ELSequencer.L2ELNodeID})) - - opt.Add(sysgo.WithTestSequencer(src.TestSequencer, src.L1CL, src.L2CLSequencer, src.L1EL, src.L2ELSequencer.L2ELNodeID)) - - opt.Add(stack.Finally(func(orch *sysgo.Orchestrator) { - *dest = *src - })) - - return opt } From 7f9662a6613e83035d85b9fffa6eb2b55e9e0c80 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 12 Mar 2026 10:05:23 +1000 Subject: [PATCH 107/201] fix(kona/derive): add over-fill check in BlobSource::load_blobs (#19364) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix(kona/derive): add over-fill check in BlobSource::load_blobs After the blob-pointer fill loop completes, add a post-loop check: if `blob_index < blobs.len()` the provider returned more blobs than were requested. Return `ResetError::BlobsOverFill` (→ PipelineErrorKind::Reset) rather than silently discarding the extras. This mirrors op-node's `fillBlobPointers` check at blob_data_source.go:162-163 which returns `fmt.Errorf("got too many blobs")` wrapped as `NewResetError`. Over-fill can occur with buggy blob providers (e.g. third-party RPC services that ignore the requested hash list) or in rare L1 reorg scenarios where the blob set shifts between hash collection and fetch. Changes: - Add `ResetError::BlobsOverFill(usize, usize)` variant to `pipeline.rs` (symmetric with the existing `BlobsUnderFill` variant). - Import `ResetError` in `blobs.rs` and add the post-loop guard. - Add `should_return_extra_blob` flag to `TestBlobProvider` for testing. - Add `test_load_blobs_overfill_triggers_reset` regression test. Fixes https://github.com/ethereum-optimism/optimism/issues/19363 Co-Authored-By: Claude Sonnet 4.6 * chore: use function names instead of line numbers in Go references Co-Authored-By: Claude Opus 4.6 * fix(kona/derive): use named fields for BlobsOverFill variant Co-Authored-By: Claude Opus 4.6 * refactor(kona/derive): rename blob_index to filled_blobs in BlobSource (#19480) Rename the `blob_index` variable in `load_blobs` to `filled_blobs` for clarity, as the variable tracks the number of blob placeholders that were filled rather than serving as a traditional index. Addresses review feedback from optimism#19364. Co-authored-by: Claude Opus 4.6 * chore: remove references to Go implementation in comments Co-Authored-By: Claude Opus 4.6 * fix(kona/derive): remove useless BlobProviderError conversion in map_err The `From for PipelineErrorKind` impl already exists, so `?` handles the conversion automatically. The explicit `map_err` was redundant. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Sonnet 4.6 --- .../protocol/derive/src/errors/pipeline.rs | 12 ++++ .../protocol/derive/src/sources/blobs.rs | 64 +++++++++++++++++-- .../derive/src/test_utils/blob_provider.rs | 6 ++ 3 files changed, 78 insertions(+), 4 deletions(-) diff --git a/rust/kona/crates/protocol/derive/src/errors/pipeline.rs b/rust/kona/crates/protocol/derive/src/errors/pipeline.rs index a537dcc97c924..0871be93e1264 100644 --- a/rust/kona/crates/protocol/derive/src/errors/pipeline.rs +++ b/rust/kona/crates/protocol/derive/src/errors/pipeline.rs @@ -356,6 +356,17 @@ pub enum ResetError { /// The blob provider returned fewer blobs than expected (under-fill). #[error("Blob provider under-fill: {0}")] BlobsUnderFill(BlobProviderError), + /// The blob provider returned more blobs than were requested (over-fill). + /// Can occur with buggy blob providers or in rare L1 reorg scenarios. + #[error( + "Blob provider over-fill: filled {filled} blob placeholders but provider returned {returned} blobs" + )] + BlobsOverFill { + /// The number of blob placeholders that were filled. + filled: usize, + /// The total number of blobs returned by the provider. + returned: usize, + }, } impl ResetError { @@ -449,6 +460,7 @@ mod tests { expected: 0, actual: 0, }), + ResetError::BlobsOverFill { filled: 0, returned: 0 }, ]; for error in reset_errors { let expected = PipelineErrorKind::Reset(error.clone()); diff --git a/rust/kona/crates/protocol/derive/src/sources/blobs.rs b/rust/kona/crates/protocol/derive/src/sources/blobs.rs index f8661f660f153..7a022dc5a0235 100644 --- a/rust/kona/crates/protocol/derive/src/sources/blobs.rs +++ b/rust/kona/crates/protocol/derive/src/sources/blobs.rs @@ -2,7 +2,7 @@ use crate::{ BlobData, BlobProvider, ChainProvider, DataAvailabilityProvider, PipelineError, - PipelineErrorKind, PipelineResult, + PipelineErrorKind, PipelineResult, ResetError, }; use alloc::{boxed::Box, vec::Vec}; use alloy_consensus::{ @@ -163,14 +163,23 @@ where })?; // Fill the blob pointers. - let mut blob_index = 0; + let mut filled_blobs = 0; for blob in &mut data { - let should_increment = blob.fill(&blobs, blob_index)?; + let should_increment = blob.fill(&blobs, filled_blobs)?; if should_increment { - blob_index += 1; + filled_blobs += 1; } } + // Post-loop over-fill check: if the provider returned more blobs than were + // requested, the pipeline state is inconsistent. Reset so the pipeline retries + // from a clean state. + if filled_blobs < blobs.len() { + return Err( + ResetError::BlobsOverFill { filled: filled_blobs, returned: blobs.len() }.reset() + ); + } + self.open = true; self.data = data; Ok(()) @@ -485,4 +494,51 @@ pub(crate) mod tests { got {err:?}" ); } + + /// Regression test: when the blob provider returns more blobs than were requested + /// (over-fill), `load_blobs` must return `PipelineErrorKind::Reset` rather than + /// silently discarding the extra blobs. + /// Over-fill can occur with buggy providers or in rare L1 reorg scenarios. + #[tokio::test] + async fn test_load_blobs_overfill_triggers_reset() { + use alloy_consensus::Blob; + + let mut source = default_test_blob_source(); + let block_info = BlockInfo::default(); + let batcher_address = + alloy_primitives::address!("A83C816D4f9b2783761a22BA6FADB0eB0606D7B2"); + source.batcher_address = + alloy_primitives::address!("11E9CA82A3a762b4B5bd264d4173a242e7a77064"); + let txs = valid_blob_txs(); + source.chain_provider.insert_block_with_transactions(1, block_info, txs); + // Insert blobs for all the real hashes so fill does not under-fill first. + let hashes = [ + alloy_primitives::b256!( + "012ec3d6f66766bedb002a190126b3549fce0047de0d4c25cffce0dc1c57921a" + ), + alloy_primitives::b256!( + "0152d8e24762ff22b1cfd9f8c0683786a7ca63ba49973818b3d1e9512cd2cec4" + ), + alloy_primitives::b256!( + "013b98c6c83e066d5b14af2b85199e3d4fc7d1e778dd53130d180f5077e2d1c7" + ), + alloy_primitives::b256!( + "01148b495d6e859114e670ca54fb6e2657f0cbae5b08063605093a4b3dc9f8f1" + ), + alloy_primitives::b256!( + "011ac212f13c5dff2b2c6b600a79635103d6f580a4221079951181b25c7e6549" + ), + ]; + for hash in hashes { + source.blob_fetcher.insert_blob(hash, Blob::with_last_byte(1u8)); + } + // Instruct the mock provider to return one extra blob beyond what was requested. + source.blob_fetcher.should_return_extra_blob = true; + + let err = source.load_blobs(&BlockInfo::default(), batcher_address).await.unwrap_err(); + assert!( + matches!(err, PipelineErrorKind::Reset(_)), + "expected Reset for blob over-fill, got {err:?}" + ); + } } diff --git a/rust/kona/crates/protocol/derive/src/test_utils/blob_provider.rs b/rust/kona/crates/protocol/derive/src/test_utils/blob_provider.rs index 42dcea5d1a772..cd2a3838066ac 100644 --- a/rust/kona/crates/protocol/derive/src/test_utils/blob_provider.rs +++ b/rust/kona/crates/protocol/derive/src/test_utils/blob_provider.rs @@ -17,6 +17,9 @@ pub struct TestBlobProvider { /// When `true`, `get_and_validate_blobs` returns `BlobProviderError::BlobNotFound`, /// simulating a missed/orphaned beacon slot (HTTP 404 from the beacon node). pub should_return_not_found: bool, + /// When `true`, `get_and_validate_blobs` appends one extra blob beyond those + /// requested, simulating a buggy provider that returns too many blobs (over-fill). + pub should_return_extra_blob: bool, } impl TestBlobProvider { @@ -55,6 +58,9 @@ impl BlobProvider for TestBlobProvider { blobs.push(Box::new(*data)); } } + if self.should_return_extra_blob { + blobs.push(Box::new(Blob::default())); + } Ok(blobs) } } From 8cfb89f6d9e7925d440475feb4a2dc7ce28b01db Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Thu, 12 Mar 2026 21:00:48 +1000 Subject: [PATCH 108/201] chore(linter): migrate Makefile to justfile (#19473) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * chore(linter): migrate Makefile to justfile Migrate linter build targets from Make to Just. The Makefile now delegates to just with a deprecation warning, preserving backwards compatibility for existing make invocations. Co-Authored-By: Claude Opus 4.6 * fix: pass make variables as env vars in deprecated.mk shim The deprecated.mk shim forwards all make variable assignments to just via JUSTFLAGS. This breaks when inherited variables (like GO_TEST_FLAGS from parent make processes) are not defined in the target justfile, since just rejects unknown variable overrides. Fix by passing variables as environment variables instead of just CLI args. Since all shared justfile variables (GITCOMMIT, GITDATE, VERSION, GOOS, GOARCH, etc.) use env() for defaults, they still pick up the forwarded values. Unknown variables are silently ignored. Co-Authored-By: Claude Opus 4.6 * fix: correct .PHONY declaration in deprecated Make shim The .PHONY declaration was formatted as a recipe (indented under .PHONY:) rather than as a prerequisite list (.PHONY: targets). This caused Make to treat targets like `cannon` and `op-program` as file/directory targets, and since directories with those names exist in the repo, Make would skip them with "is up to date" instead of running the just delegation. Co-Authored-By: Claude Opus 4.6 * fix(linter): restore file-dependency tracking in justfile build target The original Makefile only rebuilt when files under analyzers/ or .custom-gcl.yml changed. Replicate this with a timestamp check in the just build recipe. Co-Authored-By: Claude Opus 4.6 * refactor(linter): extract uptodate check into reusable justfiles/uptodate.sh Move the file-dependency freshness check from an inline bash script in linter/justfile into a standalone helper at justfiles/uptodate.sh so other justfiles can reuse the same pattern. Co-Authored-By: Claude Opus 4.6 * fix(justfiles): rebuild when source paths don't exist in uptodate.sh If a source path is neither a file nor a directory, force a rebuild instead of silently skipping it. Co-Authored-By: Claude Opus 4.6 * chore(justfiles): warn when source path doesn't exist in uptodate.sh Print a warning to stderr before forcing a rebuild when a source path doesn't exist — this is likely an error and should be visible. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- justfiles/deprecated.mk | 7 +++--- justfiles/uptodate.sh | 50 +++++++++++++++++++++++++++++++++++++++++ linter/Makefile | 16 ++----------- linter/justfile | 21 +++++++++++++++++ 4 files changed, 76 insertions(+), 18 deletions(-) create mode 100755 justfiles/uptodate.sh create mode 100644 linter/justfile diff --git a/justfiles/deprecated.mk b/justfiles/deprecated.mk index 655f901348d64..9ac15aa6c1b17 100644 --- a/justfiles/deprecated.mk +++ b/justfiles/deprecated.mk @@ -16,12 +16,11 @@ define make-deprecated-target $1: @echo @printf %s\\n '$(call banner-style,Deprecated make call: make $1 $(JUSTFLAGS))' - @printf %s\\n '$(call banner-style,Consider using just instead: just $(JUSTFLAGS) $1)' + @printf %s\\n '$(call banner-style,Consider using just instead: just $1)' @echo - just $(JUSTFLAGS) $1 + env $(JUSTFLAGS) just $1 endef $(foreach element,$(DEPRECATED_TARGETS),$(eval $(call make-deprecated-target,$(element)))) -.PHONY: - $(DEPRECATED_TARGETS) +.PHONY: $(DEPRECATED_TARGETS) diff --git a/justfiles/uptodate.sh b/justfiles/uptodate.sh new file mode 100755 index 0000000000000..5404976ca6b4f --- /dev/null +++ b/justfiles/uptodate.sh @@ -0,0 +1,50 @@ +#!/usr/bin/env bash +# Check if a target file is up to date relative to source files/directories. +# Exits 0 (true) if the target exists and is newer than all sources. +# Exits 1 (false) if the target needs rebuilding. +# +# Usage: uptodate.sh TARGET SOURCE [SOURCE...] +# +# SOURCE can be a file or a directory. Directories are searched recursively. +# +# Example in a justfile: +# build: +# #!/usr/bin/env bash +# set -euo pipefail +# if ../justfiles/uptodate.sh bin/mybinary src/ config.yml; then +# echo "bin/mybinary is up to date" +# exit 0 +# fi +# go build -o bin/mybinary . + +set -euo pipefail + +if [ $# -lt 2 ]; then + echo "usage: uptodate.sh TARGET SOURCE [SOURCE...]" >&2 + exit 1 +fi + +target="$1" +shift + +if [ ! -f "$target" ]; then + exit 1 +fi + +for src in "$@"; do + if [ ! -e "$src" ]; then + echo "warning: source '$src' does not exist, forcing rebuild" >&2 + exit 1 + elif [ -d "$src" ]; then + newer=$(find "$src" -type f -newer "$target" -print -quit 2>/dev/null) + if [ -n "$newer" ]; then + exit 1 + fi + else + if [ "$src" -nt "$target" ]; then + exit 1 + fi + fi +done + +exit 0 diff --git a/linter/Makefile b/linter/Makefile index 59a26b3df18a9..f17a601259a21 100644 --- a/linter/Makefile +++ b/linter/Makefile @@ -1,15 +1,3 @@ -BIN := bin/op-golangci-lint -SRC := $(shell find analyzers -type f) +DEPRECATED_TARGETS := build test clean -build: $(BIN) - -$(BIN): $(SRC) .custom-gcl.yml | bin - golangci-lint custom -v - -bin: - mkdir -p bin - -test: - go test ./... - -.PHONY: build test +include ../justfiles/deprecated.mk diff --git a/linter/justfile b/linter/justfile new file mode 100644 index 0000000000000..8a6f30fd7d1a4 --- /dev/null +++ b/linter/justfile @@ -0,0 +1,21 @@ +import '../justfiles/go.just' + +BIN := "bin/op-golangci-lint" + +# Build the custom linter (only if sources changed) +build: + #!/usr/bin/env bash + set -euo pipefail + mkdir -p bin + if ../justfiles/uptodate.sh "{{BIN}}" analyzers .custom-gcl.yml; then + echo "{{BIN}} is up to date" + exit 0 + fi + golangci-lint custom -v + +# Run tests +test: (go_test "./...") + +# Clean build artifacts +clean: + rm -rf bin From 7f9c2ae6b3bdf404a0cd20df0f35ad16e2afc1c2 Mon Sep 17 00:00:00 2001 From: "devin-ai-integration[bot]" <158243242+devin-ai-integration[bot]@users.noreply.github.com> Date: Thu, 12 Mar 2026 12:57:24 +0100 Subject: [PATCH 109/201] test(contracts): convert GovernanceToken tests to fuzz tests (#19490) Convert 9 hardcoded tests to fuzz tests for broader coverage: - mint: fuzz recipient address and amount - mint revert: fuzz non-owner caller address - burn/burnFrom: fuzz mint and burn amounts - transfer/transferFrom: fuzz mint and transfer amounts - approve: fuzz approval amount - increaseAllowance/decreaseAllowance: fuzz initial and delta amounts Co-authored-by: Devin AI <158243242+devin-ai-integration[bot]@users.noreply.github.com> --- .../test/governance/GovernanceToken.t.sol | 166 +++++++++--------- 1 file changed, 80 insertions(+), 86 deletions(-) diff --git a/packages/contracts-bedrock/test/governance/GovernanceToken.t.sol b/packages/contracts-bedrock/test/governance/GovernanceToken.t.sol index 237f13fb094b7..cffe625d89728 100644 --- a/packages/contracts-bedrock/test/governance/GovernanceToken.t.sol +++ b/packages/contracts-bedrock/test/governance/GovernanceToken.t.sol @@ -34,25 +34,28 @@ contract GovernanceToken_Constructor_Test is GovernanceToken_TestInit { /// @title GovernanceToken_Mint_Test /// @notice Tests the `mint` function of the `GovernanceToken` contract. contract GovernanceToken_Mint_Test is GovernanceToken_TestInit { - /// @notice Tests that the owner can successfully call `mint`. - function test_mint_fromOwner_succeeds() external { - // Mint 100 tokens. + /// @notice Tests that the owner can mint tokens to any + /// valid recipient with any valid amount. + function testFuzz_mint_fromOwner_succeeds(address _to, uint256 _amount) external { + vm.assume(_to != address(0)); + _amount = bound(_amount, 0, type(uint224).max); + vm.prank(owner); - governanceToken.mint(owner, 100); + governanceToken.mint(_to, _amount); - // Balances have updated correctly. - assertEq(governanceToken.balanceOf(owner), 100); - assertEq(governanceToken.totalSupply(), 100); + assertEq(governanceToken.balanceOf(_to), _amount); + assertEq(governanceToken.totalSupply(), _amount); } - /// @notice Tests that `mint` reverts when called by a non-owner. - function test_mint_fromNotOwner_reverts() external { - // Mint 100 tokens as rando. - vm.prank(rando); + /// @notice Tests that `mint` reverts when called by + /// any non-owner address. + function testFuzz_mint_fromNotOwner_reverts(address _caller) external { + vm.assume(_caller != owner); + + vm.prank(_caller); vm.expectRevert("Ownable: caller is not the owner"); governanceToken.mint(owner, 100); - // Balance does not update. assertEq(governanceToken.balanceOf(owner), 0); assertEq(governanceToken.totalSupply(), 0); } @@ -62,123 +65,114 @@ contract GovernanceToken_Mint_Test is GovernanceToken_TestInit { /// @notice General tests that are not testing any function directly of the `GovernanceToken` /// contract or are testing multiple functions at once. contract GovernanceToken_Uncategorized_Test is GovernanceToken_TestInit { - /// @notice Tests that the owner can successfully call `burn`. - function test_burn_succeeds() external { - // Mint 100 tokens to rando. + /// @notice Tests that a user can burn a portion of + /// their tokens. + function testFuzz_burn_succeeds(uint256 _mintAmount, uint256 _burnAmount) external { + _mintAmount = bound(_mintAmount, 1, type(uint224).max); + _burnAmount = bound(_burnAmount, 0, _mintAmount); + vm.prank(owner); - governanceToken.mint(rando, 100); + governanceToken.mint(rando, _mintAmount); - // Rando burns their tokens. vm.prank(rando); - governanceToken.burn(50); + governanceToken.burn(_burnAmount); - // Balances have updated correctly. - assertEq(governanceToken.balanceOf(rando), 50); - assertEq(governanceToken.totalSupply(), 50); + assertEq(governanceToken.balanceOf(rando), _mintAmount - _burnAmount); + assertEq(governanceToken.totalSupply(), _mintAmount - _burnAmount); } - /// @notice Tests that the owner can successfully call `burnFrom`. - function test_burnFrom_succeeds() external { - // Mint 100 tokens to rando. + /// @notice Tests that an approved spender can burn + /// tokens from another account. + function testFuzz_burnFrom_succeeds(uint256 _mintAmount, uint256 _burnAmount) external { + _mintAmount = bound(_mintAmount, 1, type(uint224).max); + _burnAmount = bound(_burnAmount, 0, _mintAmount); + vm.prank(owner); - governanceToken.mint(rando, 100); + governanceToken.mint(rando, _mintAmount); - // Rando approves owner to burn 50 tokens. vm.prank(rando); - governanceToken.approve(owner, 50); + governanceToken.approve(owner, _burnAmount); - // Owner burns 50 tokens from rando. vm.prank(owner); - governanceToken.burnFrom(rando, 50); + governanceToken.burnFrom(rando, _burnAmount); - // Balances have updated correctly. - assertEq(governanceToken.balanceOf(rando), 50); - assertEq(governanceToken.totalSupply(), 50); + assertEq(governanceToken.balanceOf(rando), _mintAmount - _burnAmount); + assertEq(governanceToken.totalSupply(), _mintAmount - _burnAmount); } - /// @notice Tests that `transfer` correctly transfers tokens. - function test_transfer_succeeds() external { - // Mint 100 tokens to rando. + /// @notice Tests that `transfer` correctly moves + /// tokens between accounts. + function testFuzz_transfer_succeeds(uint256 _mintAmount, uint256 _transferAmount) external { + _mintAmount = bound(_mintAmount, 1, type(uint224).max); + _transferAmount = bound(_transferAmount, 0, _mintAmount); + vm.prank(owner); - governanceToken.mint(rando, 100); + governanceToken.mint(rando, _mintAmount); - // Rando transfers 50 tokens to owner. vm.prank(rando); - governanceToken.transfer(owner, 50); + governanceToken.transfer(owner, _transferAmount); - // Balances have updated correctly. - assertEq(governanceToken.balanceOf(owner), 50); - assertEq(governanceToken.balanceOf(rando), 50); - assertEq(governanceToken.totalSupply(), 100); + assertEq(governanceToken.balanceOf(owner), _transferAmount); + assertEq(governanceToken.balanceOf(rando), _mintAmount - _transferAmount); + assertEq(governanceToken.totalSupply(), _mintAmount); } - /// @notice Tests that `approve` correctly sets allowances. - function test_approve_succeeds() external { - // Mint 100 tokens to rando. - vm.prank(owner); - governanceToken.mint(rando, 100); - - // Rando approves owner to spend 50 tokens. + /// @notice Tests that `approve` correctly sets + /// allowances for any amount. + function testFuzz_approve_succeeds(uint256 _amount) external { vm.prank(rando); - governanceToken.approve(owner, 50); + governanceToken.approve(owner, _amount); - // Allowances have updated. - assertEq(governanceToken.allowance(rando, owner), 50); + assertEq(governanceToken.allowance(rando, owner), _amount); } - /// @notice Tests that `transferFrom` correctly transfers tokens. - function test_transferFrom_succeeds() external { - // Mint 100 tokens to rando. + /// @notice Tests that `transferFrom` correctly moves + /// tokens using an allowance. + function testFuzz_transferFrom_succeeds(uint256 _mintAmount, uint256 _transferAmount) external { + _mintAmount = bound(_mintAmount, 1, type(uint224).max); + _transferAmount = bound(_transferAmount, 0, _mintAmount); + vm.prank(owner); - governanceToken.mint(rando, 100); + governanceToken.mint(rando, _mintAmount); - // Rando approves owner to spend 50 tokens. vm.prank(rando); - governanceToken.approve(owner, 50); + governanceToken.approve(owner, _transferAmount); - // Owner transfers 50 tokens from rando to owner. vm.prank(owner); - governanceToken.transferFrom(rando, owner, 50); + governanceToken.transferFrom(rando, owner, _transferAmount); - // Balances have updated correctly. - assertEq(governanceToken.balanceOf(owner), 50); - assertEq(governanceToken.balanceOf(rando), 50); - assertEq(governanceToken.totalSupply(), 100); + assertEq(governanceToken.balanceOf(owner), _transferAmount); + assertEq(governanceToken.balanceOf(rando), _mintAmount - _transferAmount); + assertEq(governanceToken.totalSupply(), _mintAmount); } - /// @notice Tests that `increaseAllowance` correctly increases allowances. - function test_increaseAllowance_succeeds() external { - // Mint 100 tokens to rando. - vm.prank(owner); - governanceToken.mint(rando, 100); + /// @notice Tests that `increaseAllowance` correctly + /// increases an existing allowance. + function testFuzz_increaseAllowance_succeeds(uint256 _initialApproval, uint256 _increase) external { + _initialApproval = bound(_initialApproval, 0, type(uint256).max / 2); + _increase = bound(_increase, 0, type(uint256).max - _initialApproval); - // Rando approves owner to spend 50 tokens. vm.prank(rando); - governanceToken.approve(owner, 50); + governanceToken.approve(owner, _initialApproval); - // Rando increases allowance by 50 tokens. vm.prank(rando); - governanceToken.increaseAllowance(owner, 50); + governanceToken.increaseAllowance(owner, _increase); - // Allowances have updated. - assertEq(governanceToken.allowance(rando, owner), 100); + assertEq(governanceToken.allowance(rando, owner), _initialApproval + _increase); } - /// @notice Tests that `decreaseAllowance` correctly decreases allowances. - function test_decreaseAllowance_succeeds() external { - // Mint 100 tokens to rando. - vm.prank(owner); - governanceToken.mint(rando, 100); + /// @notice Tests that `decreaseAllowance` correctly + /// decreases an existing allowance. + function testFuzz_decreaseAllowance_succeeds(uint256 _initialApproval, uint256 _decrease) external { + _initialApproval = bound(_initialApproval, 0, type(uint256).max); + _decrease = bound(_decrease, 0, _initialApproval); - // Rando approves owner to spend 100 tokens. vm.prank(rando); - governanceToken.approve(owner, 100); + governanceToken.approve(owner, _initialApproval); - // Rando decreases allowance by 50 tokens. vm.prank(rando); - governanceToken.decreaseAllowance(owner, 50); + governanceToken.decreaseAllowance(owner, _decrease); - // Allowances have updated. - assertEq(governanceToken.allowance(rando, owner), 50); + assertEq(governanceToken.allowance(rando, owner), _initialApproval - _decrease); } } From f67fd6c6930180012e33878c6054f008ed7b548f Mon Sep 17 00:00:00 2001 From: Copilot <198982749+Copilot@users.noreply.github.com> Date: Thu, 12 Mar 2026 14:37:12 +0000 Subject: [PATCH 110/201] Remove accidental `PR.md` from repository root (#19497) * Initial plan * chore: remove accidental PR.md Co-authored-by: karlfloersch <706123+karlfloersch@users.noreply.github.com> --------- Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com> Co-authored-by: karlfloersch <706123+karlfloersch@users.noreply.github.com> --- PR.md | 92 ----------------------------------------------------------- 1 file changed, 92 deletions(-) delete mode 100644 PR.md diff --git a/PR.md b/PR.md deleted file mode 100644 index dd51d0e764af4..0000000000000 --- a/PR.md +++ /dev/null @@ -1,92 +0,0 @@ -# PR1: Flashblocks Runtime Constructor (No Orchestrator Path) - -## Summary - -This PR makes `presets.NewSingleChainWithFlashblocks(...)` run through a real constructor DAG instead of the legacy orchestrator/system wiring path. - -It is the first concrete preset migration where: - -- the preset no longer calls `DefaultSingleChainSystemWithFlashblocks()`, -- construction happens via direct hierarchical constructor calls in `sysgo`, and -- acceptance tests still consume the same preset API surface. - -## What Changed - -### 1) New runtime for flashblocks - -Added: - -- `op-devstack/sysgo/flashblocks_runtime.go` - -This runtime now constructs and boots the flashblocks test target directly: - -1. build L1/L2 intent world, -2. start L1 (EL + fake beacon CL), -3. start sequencer EL, -4. start builder EL (`op-rbuilder`), -5. wire EL P2P peering, -6. start rollup-boost, -7. start sequencer CL (`op-node`), -8. start faucet service for L1 and L2. - -The runtime exports topology + endpoint data needed by presets (L1/L2 configs, deployment, node RPCs, flashblocks WS URLs, faucet endpoints). - -### 2) `NewSingleChainWithFlashblocks` now uses the runtime - -Updated: - -- `op-devstack/presets/flashblocks.go` - -Changes: - -- `NewSingleChainWithFlashblocks` now instantiates `sysgo.NewFlashblocksRuntime(...)`. -- It assembles DSL/shim frontends directly from runtime references. -- It no longer routes through orchestrator/system constructor chains. -- It rejects orchestrator options for this preset (`opts` must be empty). - -### 3) Removed dead flashblocks orchestrator adapter - -Updated: - -- `op-devstack/presets/sysgo_runtime.go` - -Removed: - -- `singleChainWithFlashblocksRuntime` type -- `singleChainWithFlashblocksRuntimeFromOrchestrator(...)` - -This deletes the now-unused preset-specific flashblocks orchestrator hydration path. - -### 4) Added runtime test-sequencer startup - -The runtime now starts an in-process test-sequencer service directly (no orchestrator path), configures L1 + L2 sequencing backends, and exports: - -- admin RPC endpoint, -- JWT secret, -- per-chain control RPC endpoints. - -The preset wires this into `dsl.TestSequencer` via the existing frontend constructor. - -### 5) Flashblocks tests are back to strict test-sequencer usage - -Updated: - -- `op-acceptance-tests/tests/flashblocks/flashblocks_stream_test.go` - -`driveViaTestSequencer(...)` now requires the test-sequencer to exist again (fallback removed), so the test behavior matches the prior deterministic sequencing model. - -## Validation - -Executed: - -- `go test ./op-devstack/sysgo -run '^$'` -- `go test ./op-devstack/presets -run '^$'` -- `go test ./op-acceptance-tests/tests/flashblocks -count=1` - -All passed. - -## PR2 Proposal - -1. Move shared constructor primitives into a dedicated package (e.g. runtime builders for L1/L2/faucet/sequencer services). -2. Migrate next preset(s) to runtime assembly (`minimal`, then `base/conductor` path). -3. Start deleting flashblocks legacy sysgo constructor plumbing in `system.go` once no call-sites remain. From 9aabfc58dbe1d7190d1a5ad4e515c6582435e8de Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 13 Mar 2026 03:47:50 +1000 Subject: [PATCH 111/201] chore: add fix-rust-fmt Claude Code skill (#19488) * chore: add fix-rust-fmt Claude Code skill Adds a skill that fixes Rust formatting CI failures by running `just fmt-fix` with the correct nightly toolchain via mise. Co-Authored-By: Claude Opus 4.6 * Apply suggestion from @ajsutton * Update .claude/skills/fix-rust-fmt/SKILL.md Co-authored-by: Sebastian Stammler * docs: link /fix-rust-fmt skill from kona CLAUDE.md Co-Authored-By: Claude Opus 4.6 * docs: reference fix-rust-fmt skill in rust-dev.md Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 Co-authored-by: Sebastian Stammler --- .claude/skills/fix-rust-fmt/SKILL.md | 55 ++++++++++++++++++++++++++++ docs/ai/rust-dev.md | 4 ++ rust/kona/CLAUDE.md | 2 +- 3 files changed, 60 insertions(+), 1 deletion(-) create mode 100644 .claude/skills/fix-rust-fmt/SKILL.md diff --git a/.claude/skills/fix-rust-fmt/SKILL.md b/.claude/skills/fix-rust-fmt/SKILL.md new file mode 100644 index 0000000000000..d87b23bfa9233 --- /dev/null +++ b/.claude/skills/fix-rust-fmt/SKILL.md @@ -0,0 +1,55 @@ +# fix-rust-fmt + +Fix Rust formatting issues in the optimism monorepo which would cause the `rust-fmt` CI job to fail. + +## When to Use + +Use this skill when the `rust-fmt` CI job fails on a PR that touches Rust code. + +### Trigger Phrases + +- "Fix rust formatting" +- "rust-fmt is failing" +- "Fix the rust-fmt CI job" + +## Prerequisites + +- `mise` must be trusted and installed for this repo (`mise trust && mise install`) + +## Workflow + +### Step 1: Ensure mise tools are installed + +From the repo root (or worktree root): + +```bash +cd && mise install +``` + +This installs `rust`, `just`, and all other tools pinned in `mise.toml`. + +### Step 2: Install the nightly toolchain with rustfmt + +The justfile pins a specific nightly (see `NIGHTLY` variable in `rust/justfile`). +Install it: + +```bash +cd /rust && mise exec -- just install-nightly +``` + +### Step 3: Run the formatter + +```bash +cd /rust && mise exec -- just fmt-fix +``` + +Any files that change are the reason the CI job failed. Stage and commit them. + +## Notes + +- `mise exec --` activates the mise environment so `cargo`, `just`, and + `rustup` resolve to the versions pinned in `mise.toml`. +- The nightly toolchain is required because the workspace uses unstable + `rustfmt` options (see `rust/rustfmt.toml`). +- There is no need to inspect `rust-fmt` CI errors — if the job failed, running + `just fmt-fix` and committing the result is the complete fix. diff --git a/docs/ai/rust-dev.md b/docs/ai/rust-dev.md index 881d73191ea3a..8dc3b3ff289c6 100644 --- a/docs/ai/rust-dev.md +++ b/docs/ai/rust-dev.md @@ -3,3 +3,7 @@ This document provides guidance for AI agents working with Rust components in the OP Stack. + +## Skills + +- **Fix Rust Formatting** ([`.claude/skills/fix-rust-fmt/SKILL.md`](../../.claude/skills/fix-rust-fmt/SKILL.md)): Fixes `rust-fmt` CI failures by installing the pinned nightly toolchain and running `just fmt-fix`. Invoke with `/fix-rust-fmt`. diff --git a/rust/kona/CLAUDE.md b/rust/kona/CLAUDE.md index 58ba2b486d2bd..23b433cefd7f2 100644 --- a/rust/kona/CLAUDE.md +++ b/rust/kona/CLAUDE.md @@ -5,7 +5,7 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co ## Build Commands - Build workspace: `just b` or `just build-native` - Lint: `just l` or `just lint-native` -- Format: `just f` or `just fmt-native-fix` +- Format: `just f` or `just fmt-native-fix` (if `rust-fmt` CI fails, use `/fix-rust-fmt`) - Run all tests: `just t` or `just tests` - Run specific test: `cargo nextest run --package [package-name] --test [test-name]` - Run single test: `cargo nextest run --package [package-name] --test [test-name] -- [test_function_name]` From 7149381de9a8a462609d650834ef18e2d69f74df Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Thu, 12 Mar 2026 15:20:12 -0400 Subject: [PATCH 112/201] chore(rust/op-reth): bump reth to v1.11.3 (#19498) Bump all reth dependencies from v1.11.2 to v1.11.3 in the rust workspace, including op-reth crate versions and the Cargo.lock. --- rust/Cargo.lock | 472 ++++++++++----------- rust/Cargo.toml | 146 +++---- rust/op-reth/bin/Cargo.toml | 2 +- rust/op-reth/crates/chainspec/Cargo.toml | 2 +- rust/op-reth/crates/cli/Cargo.toml | 2 +- rust/op-reth/crates/consensus/Cargo.toml | 2 +- rust/op-reth/crates/evm/Cargo.toml | 2 +- rust/op-reth/crates/exex/Cargo.toml | 2 +- rust/op-reth/crates/flashblocks/Cargo.toml | 2 +- rust/op-reth/crates/hardforks/Cargo.toml | 2 +- rust/op-reth/crates/node/Cargo.toml | 2 +- rust/op-reth/crates/payload/Cargo.toml | 2 +- rust/op-reth/crates/primitives/Cargo.toml | 2 +- rust/op-reth/crates/reth/Cargo.toml | 2 +- rust/op-reth/crates/rpc/Cargo.toml | 2 +- rust/op-reth/crates/storage/Cargo.toml | 2 +- rust/op-reth/crates/trie/Cargo.toml | 2 +- rust/op-reth/crates/txpool/Cargo.toml | 2 +- 18 files changed, 325 insertions(+), 325 deletions(-) diff --git a/rust/Cargo.lock b/rust/Cargo.lock index 06541dc35f0cc..c8c5f4281019b 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -7838,7 +7838,7 @@ dependencies = [ [[package]] name = "op-reth" -version = "1.11.2" +version = "1.11.3" dependencies = [ "clap", "reth-cli-util", @@ -9273,8 +9273,8 @@ checksum = "1e061d1b48cb8d38042de4ae0a7a6401009d6143dc80d2e2d6f31f0bdd6470c7" [[package]] name = "reth-basic-payload-builder" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9297,8 +9297,8 @@ dependencies = [ [[package]] name = "reth-chain-state" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9329,8 +9329,8 @@ dependencies = [ [[package]] name = "reth-chainspec" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9349,8 +9349,8 @@ dependencies = [ [[package]] name = "reth-cli" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-genesis", "clap", @@ -9363,8 +9363,8 @@ dependencies = [ [[package]] name = "reth-cli-commands" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -9449,8 +9449,8 @@ dependencies = [ [[package]] name = "reth-cli-runner" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "reth-tasks", "tokio", @@ -9459,8 +9459,8 @@ dependencies = [ [[package]] name = "reth-cli-util" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9479,8 +9479,8 @@ dependencies = [ [[package]] name = "reth-codecs" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9499,8 +9499,8 @@ dependencies = [ [[package]] name = "reth-codecs-derive" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "proc-macro2", "quote", @@ -9509,8 +9509,8 @@ dependencies = [ [[package]] name = "reth-config" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "eyre", "humantime-serde", @@ -9525,8 +9525,8 @@ dependencies = [ [[package]] name = "reth-consensus" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9538,8 +9538,8 @@ dependencies = [ [[package]] name = "reth-consensus-common" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9550,8 +9550,8 @@ dependencies = [ [[package]] name = "reth-consensus-debug-client" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9576,8 +9576,8 @@ dependencies = [ [[package]] name = "reth-db" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "derive_more", @@ -9603,8 +9603,8 @@ dependencies = [ [[package]] name = "reth-db-api" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9632,8 +9632,8 @@ dependencies = [ [[package]] name = "reth-db-common" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -9662,8 +9662,8 @@ dependencies = [ [[package]] name = "reth-db-models" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -9677,8 +9677,8 @@ dependencies = [ [[package]] name = "reth-discv4" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9702,8 +9702,8 @@ dependencies = [ [[package]] name = "reth-discv5" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -9726,8 +9726,8 @@ dependencies = [ [[package]] name = "reth-dns-discovery" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "dashmap", @@ -9750,8 +9750,8 @@ dependencies = [ [[package]] name = "reth-downloaders" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9785,8 +9785,8 @@ dependencies = [ [[package]] name = "reth-e2e-test-utils" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9843,8 +9843,8 @@ dependencies = [ [[package]] name = "reth-ecies" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "aes", "alloy-primitives", @@ -9871,8 +9871,8 @@ dependencies = [ [[package]] name = "reth-engine-local" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -9895,8 +9895,8 @@ dependencies = [ [[package]] name = "reth-engine-primitives" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -9920,8 +9920,8 @@ dependencies = [ [[package]] name = "reth-engine-service" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "futures", "pin-project", @@ -9942,8 +9942,8 @@ dependencies = [ [[package]] name = "reth-engine-tree" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eip7928", @@ -9999,8 +9999,8 @@ dependencies = [ [[package]] name = "reth-engine-util" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -10027,8 +10027,8 @@ dependencies = [ [[package]] name = "reth-era" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10042,8 +10042,8 @@ dependencies = [ [[package]] name = "reth-era-downloader" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "bytes", @@ -10058,8 +10058,8 @@ dependencies = [ [[package]] name = "reth-era-utils" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10080,8 +10080,8 @@ dependencies = [ [[package]] name = "reth-errors" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "reth-consensus", "reth-execution-errors", @@ -10091,8 +10091,8 @@ dependencies = [ [[package]] name = "reth-eth-wire" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-chains", "alloy-primitives", @@ -10120,8 +10120,8 @@ dependencies = [ [[package]] name = "reth-eth-wire-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-chains", "alloy-consensus", @@ -10144,8 +10144,8 @@ dependencies = [ [[package]] name = "reth-ethereum" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-rpc-types-engine", "alloy-rpc-types-eth", @@ -10185,8 +10185,8 @@ dependencies = [ [[package]] name = "reth-ethereum-cli" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "clap", "eyre", @@ -10208,8 +10208,8 @@ dependencies = [ [[package]] name = "reth-ethereum-consensus" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10224,8 +10224,8 @@ dependencies = [ [[package]] name = "reth-ethereum-engine-primitives" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10242,8 +10242,8 @@ dependencies = [ [[package]] name = "reth-ethereum-forks" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eip2124", "alloy-hardforks", @@ -10256,8 +10256,8 @@ dependencies = [ [[package]] name = "reth-ethereum-payload-builder" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10285,8 +10285,8 @@ dependencies = [ [[package]] name = "reth-ethereum-primitives" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10305,8 +10305,8 @@ dependencies = [ [[package]] name = "reth-etl" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "rayon", "reth-db-api", @@ -10315,8 +10315,8 @@ dependencies = [ [[package]] name = "reth-evm" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10339,8 +10339,8 @@ dependencies = [ [[package]] name = "reth-evm-ethereum" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10361,8 +10361,8 @@ dependencies = [ [[package]] name = "reth-execution-errors" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-evm", "alloy-primitives", @@ -10374,8 +10374,8 @@ dependencies = [ [[package]] name = "reth-execution-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10392,8 +10392,8 @@ dependencies = [ [[package]] name = "reth-exex" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10430,8 +10430,8 @@ dependencies = [ [[package]] name = "reth-exex-test-utils" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "eyre", @@ -10462,8 +10462,8 @@ dependencies = [ [[package]] name = "reth-exex-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -10476,8 +10476,8 @@ dependencies = [ [[package]] name = "reth-fs-util" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "serde", "serde_json", @@ -10486,8 +10486,8 @@ dependencies = [ [[package]] name = "reth-invalid-block-hooks" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10514,8 +10514,8 @@ dependencies = [ [[package]] name = "reth-ipc" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "bytes", "futures", @@ -10534,8 +10534,8 @@ dependencies = [ [[package]] name = "reth-libmdbx" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "bitflags 2.11.0", "byteorder", @@ -10550,8 +10550,8 @@ dependencies = [ [[package]] name = "reth-mdbx-sys" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "bindgen", "cc", @@ -10559,8 +10559,8 @@ dependencies = [ [[package]] name = "reth-metrics" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "futures", "metrics", @@ -10571,8 +10571,8 @@ dependencies = [ [[package]] name = "reth-net-banlist" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "ipnet", @@ -10580,8 +10580,8 @@ dependencies = [ [[package]] name = "reth-net-nat" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "futures-util", "if-addrs 0.14.0", @@ -10594,8 +10594,8 @@ dependencies = [ [[package]] name = "reth-network" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10651,8 +10651,8 @@ dependencies = [ [[package]] name = "reth-network-api" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10676,8 +10676,8 @@ dependencies = [ [[package]] name = "reth-network-p2p" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10699,8 +10699,8 @@ dependencies = [ [[package]] name = "reth-network-peers" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -10714,8 +10714,8 @@ dependencies = [ [[package]] name = "reth-network-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eip2124", "humantime-serde", @@ -10728,8 +10728,8 @@ dependencies = [ [[package]] name = "reth-nippy-jar" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "anyhow", "bincode 1.3.3", @@ -10745,8 +10745,8 @@ dependencies = [ [[package]] name = "reth-node-api" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-rpc-types-engine", "eyre", @@ -10769,8 +10769,8 @@ dependencies = [ [[package]] name = "reth-node-builder" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10838,8 +10838,8 @@ dependencies = [ [[package]] name = "reth-node-core" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10893,8 +10893,8 @@ dependencies = [ [[package]] name = "reth-node-ethereum" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "alloy-network", @@ -10931,8 +10931,8 @@ dependencies = [ [[package]] name = "reth-node-ethstats" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -10955,8 +10955,8 @@ dependencies = [ [[package]] name = "reth-node-events" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -10979,8 +10979,8 @@ dependencies = [ [[package]] name = "reth-node-metrics" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "bytes", "eyre", @@ -11008,8 +11008,8 @@ dependencies = [ [[package]] name = "reth-node-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "reth-chainspec", "reth-db-api", @@ -11020,7 +11020,7 @@ dependencies = [ [[package]] name = "reth-op" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-primitives", "reth-chainspec", @@ -11061,7 +11061,7 @@ dependencies = [ [[package]] name = "reth-optimism-chainspec" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -11089,7 +11089,7 @@ dependencies = [ [[package]] name = "reth-optimism-cli" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11139,7 +11139,7 @@ dependencies = [ [[package]] name = "reth-optimism-consensus" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-chains", "alloy-consensus", @@ -11170,7 +11170,7 @@ dependencies = [ [[package]] name = "reth-optimism-evm" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11199,7 +11199,7 @@ dependencies = [ [[package]] name = "reth-optimism-exex" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11226,7 +11226,7 @@ dependencies = [ [[package]] name = "reth-optimism-flashblocks" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11271,7 +11271,7 @@ dependencies = [ [[package]] name = "reth-optimism-forks" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-op-hardforks", "alloy-primitives", @@ -11281,7 +11281,7 @@ dependencies = [ [[package]] name = "reth-optimism-node" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-genesis", @@ -11348,7 +11348,7 @@ dependencies = [ [[package]] name = "reth-optimism-payload-builder" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11387,7 +11387,7 @@ dependencies = [ [[package]] name = "reth-optimism-primitives" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11414,7 +11414,7 @@ dependencies = [ [[package]] name = "reth-optimism-rpc" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11485,7 +11485,7 @@ dependencies = [ [[package]] name = "reth-optimism-storage" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "reth-codecs", @@ -11497,7 +11497,7 @@ dependencies = [ [[package]] name = "reth-optimism-trie" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11542,7 +11542,7 @@ dependencies = [ [[package]] name = "reth-optimism-txpool" -version = "1.11.2" +version = "1.11.3" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11580,8 +11580,8 @@ dependencies = [ [[package]] name = "reth-payload-builder" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11601,8 +11601,8 @@ dependencies = [ [[package]] name = "reth-payload-builder-primitives" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "pin-project", "reth-payload-primitives", @@ -11613,8 +11613,8 @@ dependencies = [ [[package]] name = "reth-payload-primitives" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11636,8 +11636,8 @@ dependencies = [ [[package]] name = "reth-payload-util" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -11646,8 +11646,8 @@ dependencies = [ [[package]] name = "reth-payload-validator" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-rpc-types-engine", @@ -11656,8 +11656,8 @@ dependencies = [ [[package]] name = "reth-primitives" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "once_cell", @@ -11669,8 +11669,8 @@ dependencies = [ [[package]] name = "reth-primitives-traits" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11703,8 +11703,8 @@ dependencies = [ [[package]] name = "reth-provider" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11748,8 +11748,8 @@ dependencies = [ [[package]] name = "reth-prune" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -11777,8 +11777,8 @@ dependencies = [ [[package]] name = "reth-prune-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "arbitrary", @@ -11793,8 +11793,8 @@ dependencies = [ [[package]] name = "reth-revm" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "reth-primitives-traits", @@ -11806,8 +11806,8 @@ dependencies = [ [[package]] name = "reth-rpc" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -11883,8 +11883,8 @@ dependencies = [ [[package]] name = "reth-rpc-api" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eip7928", "alloy-eips", @@ -11913,8 +11913,8 @@ dependencies = [ [[package]] name = "reth-rpc-builder" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-network", "alloy-provider", @@ -11954,8 +11954,8 @@ dependencies = [ [[package]] name = "reth-rpc-convert" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-evm", @@ -11978,8 +11978,8 @@ dependencies = [ [[package]] name = "reth-rpc-engine-api" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12008,8 +12008,8 @@ dependencies = [ [[package]] name = "reth-rpc-eth-api" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -12052,8 +12052,8 @@ dependencies = [ [[package]] name = "reth-rpc-eth-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12100,8 +12100,8 @@ dependencies = [ [[package]] name = "reth-rpc-layer" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-rpc-types-engine", "http", @@ -12114,8 +12114,8 @@ dependencies = [ [[package]] name = "reth-rpc-server-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12130,8 +12130,8 @@ dependencies = [ [[package]] name = "reth-stages" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12180,8 +12180,8 @@ dependencies = [ [[package]] name = "reth-stages-api" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12207,8 +12207,8 @@ dependencies = [ [[package]] name = "reth-stages-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "arbitrary", @@ -12221,8 +12221,8 @@ dependencies = [ [[package]] name = "reth-static-file" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "parking_lot", @@ -12241,8 +12241,8 @@ dependencies = [ [[package]] name = "reth-static-file-types" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "clap", @@ -12256,8 +12256,8 @@ dependencies = [ [[package]] name = "reth-storage-api" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12280,8 +12280,8 @@ dependencies = [ [[package]] name = "reth-storage-errors" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-eips", "alloy-primitives", @@ -12297,8 +12297,8 @@ dependencies = [ [[package]] name = "reth-tasks" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "auto_impl", "dyn-clone", @@ -12315,8 +12315,8 @@ dependencies = [ [[package]] name = "reth-testing-utils" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12331,8 +12331,8 @@ dependencies = [ [[package]] name = "reth-tokio-util" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "tokio", "tokio-stream", @@ -12341,8 +12341,8 @@ dependencies = [ [[package]] name = "reth-tracing" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "clap", "eyre", @@ -12360,8 +12360,8 @@ dependencies = [ [[package]] name = "reth-tracing-otlp" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "clap", "eyre", @@ -12378,8 +12378,8 @@ dependencies = [ [[package]] name = "reth-transaction-pool" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12424,8 +12424,8 @@ dependencies = [ [[package]] name = "reth-trie" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-eips", @@ -12450,8 +12450,8 @@ dependencies = [ [[package]] name = "reth-trie-common" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-consensus", "alloy-primitives", @@ -12477,8 +12477,8 @@ dependencies = [ [[package]] name = "reth-trie-db" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "metrics", @@ -12497,8 +12497,8 @@ dependencies = [ [[package]] name = "reth-trie-parallel" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -12522,8 +12522,8 @@ dependencies = [ [[package]] name = "reth-trie-sparse" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "alloy-primitives", "alloy-rlp", @@ -12541,8 +12541,8 @@ dependencies = [ [[package]] name = "reth-zstd-compressors" -version = "1.11.1" -source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.2#bef3d7b4d1da937fcccc9bbd6f8bd93e16380dc7" +version = "1.11.3" +source = "git+https://github.com/paradigmxyz/reth?tag=v1.11.3#d6324d63e27ef6b7c49cdc9b1977c1b808234c7b" dependencies = [ "zstd", ] diff --git a/rust/Cargo.toml b/rust/Cargo.toml index b46e8f85db595..694b68dc52504 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -296,79 +296,79 @@ op-alloy-rpc-jsonrpsee = { version = "0.23.1", path = "op-alloy/crates/rpc-jsonr alloy-op-evm = { version = "0.26.3", path = "alloy-op-evm/", default-features = false } alloy-op-hardforks = { version = "0.4.7", path = "alloy-op-hardforks/", default-features = false } -# ==================== RETH CRATES (from git rev 564ffa58 / main) ==================== -reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-basic-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-chain-state = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-cli-commands = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-cli-runner = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-cli-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-consensus-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-downloaders = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-e2e-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-engine-local = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-eth-wire = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-ethereum-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-ethereum-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-ethereum-forks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-ethereum-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-execution-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-execution-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-fs-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-network = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-node-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-node-core = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-node-events = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-node-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-payload-builder-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-payload-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-payload-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-payload-validator = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-primitives-traits = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-provider = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-prune = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-revm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-rpc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-rpc-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-rpc-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-rpc-engine-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-rpc-eth-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-rpc-server-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-stages = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-static-file = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-static-file-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-storage-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-storage-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-tasks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-tracing = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-trie-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } -reth-trie-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2" } -reth-zstd-compressors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.2", default-features = false } +# ==================== RETH CRATES (v1.11.3) ==================== +reth = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-basic-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-chain-state = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-chainspec = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-cli-commands = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-cli-runner = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-cli-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-codecs = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-consensus-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-db-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-db-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-downloaders = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-e2e-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-engine-local = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-engine-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-eth-wire = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-ethereum-cli = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-ethereum-consensus = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-ethereum-forks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-ethereum-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-evm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-evm-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-exex = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-exex-test-utils = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-execution-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-execution-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-fs-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-network = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-network-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-network-peers = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-node-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-node-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-node-core = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-node-ethereum = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-node-events = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-node-metrics = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-payload-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-payload-builder-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-payload-primitives = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-payload-util = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-payload-validator = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-primitives-traits = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-provider = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-prune = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-prune-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-revm = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-rpc = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-rpc-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-rpc-builder = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-rpc-engine-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-rpc-eth-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-rpc-eth-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-rpc-server-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-stages = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-stages-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-static-file = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-static-file-types = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-storage-api = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-storage-errors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-tasks = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-tracing = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-transaction-pool = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-trie = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-trie-common = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } +reth-trie-db = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3" } +reth-zstd-compressors = { git = "https://github.com/paradigmxyz/reth", tag = "v1.11.3", default-features = false } # ==================== REVM (latest: op-reth versions) ==================== revm = { version = "34.0.0", default-features = false } diff --git a/rust/op-reth/bin/Cargo.toml b/rust/op-reth/bin/Cargo.toml index b86dc0b9aed47..1dd0f1b08fa80 100644 --- a/rust/op-reth/bin/Cargo.toml +++ b/rust/op-reth/bin/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "op-reth" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/chainspec/Cargo.toml b/rust/op-reth/crates/chainspec/Cargo.toml index cfdb01ca7215d..5706906e44bb2 100644 --- a/rust/op-reth/crates/chainspec/Cargo.toml +++ b/rust/op-reth/crates/chainspec/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-chainspec" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/cli/Cargo.toml b/rust/op-reth/crates/cli/Cargo.toml index 1f5554d8a505c..fcc61b1a9fd63 100644 --- a/rust/op-reth/crates/cli/Cargo.toml +++ b/rust/op-reth/crates/cli/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-cli" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/consensus/Cargo.toml b/rust/op-reth/crates/consensus/Cargo.toml index f386066883f23..b5affa175ba3f 100644 --- a/rust/op-reth/crates/consensus/Cargo.toml +++ b/rust/op-reth/crates/consensus/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-consensus" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/evm/Cargo.toml b/rust/op-reth/crates/evm/Cargo.toml index 9c0efeffc8545..16346078f91ec 100644 --- a/rust/op-reth/crates/evm/Cargo.toml +++ b/rust/op-reth/crates/evm/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-evm" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/exex/Cargo.toml b/rust/op-reth/crates/exex/Cargo.toml index bc2992733eb94..5a9a9b6d01681 100644 --- a/rust/op-reth/crates/exex/Cargo.toml +++ b/rust/op-reth/crates/exex/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-exex" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/flashblocks/Cargo.toml b/rust/op-reth/crates/flashblocks/Cargo.toml index 0febb66c629c8..62d36d18d8848 100644 --- a/rust/op-reth/crates/flashblocks/Cargo.toml +++ b/rust/op-reth/crates/flashblocks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-flashblocks" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/hardforks/Cargo.toml b/rust/op-reth/crates/hardforks/Cargo.toml index 6a709cdc1182c..2d8a23eeab57d 100644 --- a/rust/op-reth/crates/hardforks/Cargo.toml +++ b/rust/op-reth/crates/hardforks/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-forks" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/node/Cargo.toml b/rust/op-reth/crates/node/Cargo.toml index 0f521c1f58a82..a11cb2afc0057 100644 --- a/rust/op-reth/crates/node/Cargo.toml +++ b/rust/op-reth/crates/node/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-node" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/payload/Cargo.toml b/rust/op-reth/crates/payload/Cargo.toml index 7248c36e80e51..93a2553744bb5 100644 --- a/rust/op-reth/crates/payload/Cargo.toml +++ b/rust/op-reth/crates/payload/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-payload-builder" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/primitives/Cargo.toml b/rust/op-reth/crates/primitives/Cargo.toml index 3c2709b59ac19..70b30300b312a 100644 --- a/rust/op-reth/crates/primitives/Cargo.toml +++ b/rust/op-reth/crates/primitives/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-primitives" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/reth/Cargo.toml b/rust/op-reth/crates/reth/Cargo.toml index 66731edc26f45..19b44f3d67e83 100644 --- a/rust/op-reth/crates/reth/Cargo.toml +++ b/rust/op-reth/crates/reth/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-op" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/rpc/Cargo.toml b/rust/op-reth/crates/rpc/Cargo.toml index b8ef3fb9a3552..5249feb659545 100644 --- a/rust/op-reth/crates/rpc/Cargo.toml +++ b/rust/op-reth/crates/rpc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-rpc" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/storage/Cargo.toml b/rust/op-reth/crates/storage/Cargo.toml index 5d2b58c9d3915..4543fa9d8b9c6 100644 --- a/rust/op-reth/crates/storage/Cargo.toml +++ b/rust/op-reth/crates/storage/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-storage" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/trie/Cargo.toml b/rust/op-reth/crates/trie/Cargo.toml index 987b5ac20500d..9d768751027e6 100644 --- a/rust/op-reth/crates/trie/Cargo.toml +++ b/rust/op-reth/crates/trie/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-trie" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true diff --git a/rust/op-reth/crates/txpool/Cargo.toml b/rust/op-reth/crates/txpool/Cargo.toml index 7bcb17bd7e710..bbfc34843063d 100644 --- a/rust/op-reth/crates/txpool/Cargo.toml +++ b/rust/op-reth/crates/txpool/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "reth-optimism-txpool" -version = "1.11.2" +version = "1.11.3" edition.workspace = true rust-version.workspace = true license.workspace = true From 54ab84a0f1c174059d954d66d9291ad5704172e4 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 13 Mar 2026 05:59:48 +1000 Subject: [PATCH 113/201] chore(cannon): migrate Makefile to justfile (#19474) * chore(cannon): migrate Makefile to justfile Migrate cannon build targets from Make to Just. The Makefile now delegates to just with a deprecation warning, preserving backwards compatibility for existing make invocations. Co-Authored-By: Claude Opus 4.6 * fix(cannon): add missing lint target and include justfiles in Docker context - Add `lint` to DEPRECATED_TARGETS and justfile (CI compatibility stub) - Copy justfiles/ into kona cannon-repro.dockerfile for deprecated.mk shim - Install `just` binary in cannon Docker build for the Make shim Co-Authored-By: Claude Opus 4.6 * fix(cannon): remove [default] attribute for just <1.38 compat The Alpine 3.21 just package is v1.37.0 which doesn't support the [default] attribute. Move cannon recipe to first position instead. Co-Authored-By: Claude Opus 4.6 * fix(cannon): include justfiles/ in op-program Docker build context The cannon Makefile deprecated shim requires justfiles/deprecated.mk, which is resolved relative to cannon/ inside the Docker container. The op-program Dockerfile.repro.dockerignore was excluding justfiles/ from the build context, causing the cannon make shim to fail. Co-Authored-By: Claude Opus 4.6 * docs(cannon): update README to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix(cannon): call just directly in cannon-repro.dockerfile Instead of going through the deprecated Make shim, invoke just cannon directly in the Docker build. Co-Authored-By: Claude Opus 4.6 * fix(cannon): update Dockerfile.diff to use just diff-cannon directly The diff-%-cannon Make pattern target was converted to a parameterized just recipe (just diff-cannon VM). Update the Dockerfile to call just directly instead of through make, which would fail since the deprecated shim doesn't support pattern targets. Co-Authored-By: Claude Opus 4.6 * fix(cannon): add diff-%-cannon pattern target to deprecated Makefile Preserves backwards compatibility for make diff--cannon invocations (used by Dockerfile.diff and potentially other scripts) by translating the pattern to just diff-cannon . Co-Authored-By: Claude Opus 4.6 * fix: install just via system package manager in Dockerfiles - cannon/Dockerfile.diff: use `apk add just` instead of curl install script, drop unnecessary `make` dependency - cannon-repro.dockerfile: switch cannon-build stage from ubuntu:22.04 to golang:1.23.8-alpine3.21, matching the monorepo's Go builder image, so just can be installed via `apk add` instead of curl install script Co-Authored-By: Claude Opus 4.6 * fix(cannon): bump Go to 1.24.10 in cannon-repro.dockerfile The golang Docker image sets GOTOOLCHAIN=local which prevents automatic toolchain downloading. Since go.mod requires go 1.24.0, the 1.23.8 image fails to build. Match Dockerfile.diff which already uses 1.24.10. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- cannon/Dockerfile.diff | 9 +- cannon/Makefile | 123 +----------------- cannon/README.md | 4 +- cannon/justfile | 118 +++++++++++++++++ op-program/Dockerfile.repro.dockerignore | 1 + .../fpvm-prestates/cannon-repro.dockerfile | 19 +-- 6 files changed, 137 insertions(+), 137 deletions(-) create mode 100644 cannon/justfile diff --git a/cannon/Dockerfile.diff b/cannon/Dockerfile.diff index 3cb33a6f5e96f..33a90202a8905 100644 --- a/cannon/Dockerfile.diff +++ b/cannon/Dockerfile.diff @@ -1,6 +1,6 @@ FROM golang:1.24.10-alpine3.21 AS builder -RUN apk add --no-cache make bash +RUN apk add --no-cache bash just COPY ./go.mod /app/go.mod COPY ./go.sum /app/go.sum @@ -30,7 +30,8 @@ COPY --from=cannon-multithreaded64-5 /usr/local/bin/cannon /usr/local/bin/cannon # Check cannon-multithreaded64-5 # verify the latest multithreaded VM behavior against multithreaded64-5 -RUN cd cannon && make diff-multithreaded64-5-cannon -e OTHER_CANNON=/usr/local/bin/cannon-multithreaded64-5 +RUN cd cannon && OTHER_CANNON=/usr/local/bin/cannon-multithreaded64-5 just diff-cannon multithreaded64-5 RUN --mount=type=cache,target=/root/.cache/go-build cd cannon && \ - make diff-multithreaded64-5-cannon -e OTHER_CANNON=/usr/local/bin/cannon-multithreaded64-5 \ - GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE + OTHER_CANNON=/usr/local/bin/cannon-multithreaded64-5 \ + GOOS=$TARGETOS GOARCH=$TARGETARCH GITCOMMIT=$GIT_COMMIT GITDATE=$GIT_DATE \ + just diff-cannon multithreaded64-5 diff --git a/cannon/Makefile b/cannon/Makefile index 54a72ff54783a..fc3fda4061e99 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -1,118 +1,9 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD) -GITDATE ?= $(shell git show -s --format='%ct') -VERSION ?= v0.0.0 +DEPRECATED_TARGETS := cannon cannon64-impl cannon-embeds clean elf elf-go-current sanitize-program contract test diff-cannon cannon-stf-verify fuzz lint -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/cannon/multicannon/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/cannon/multicannon/version.Meta=$(VERSION_META) -LDFLAGS := -ldflags "$(LDFLAGSSTRING)" +include ../justfiles/deprecated.mk -# Use the old Apple linker to workaround broken xcode - https://github.com/golang/go/issues/65169 -ifeq ($(shell uname),Darwin) - FUZZLDFLAGS := -ldflags=-extldflags=-Wl,-ld_classic -endif - -.DEFAULT_GOAL := cannon - -# The MIPS64 r2 opcodes not supported by cannon. This list does not include coprocess-specific and trap opcodes. -UNSUPPORTED_OPCODES := (dclo|dclz|madd|maddu|seb|seh|wsbh|dsbh|dshd|ins|dins|dinsm|dinsu|ext|dext|dextu|dextm|rotr|drotr|drotr32|rotrv|drotrv|break|sdbbp|pref) - -CANNON64_FUZZTIME := 20s - -cannon64-impl: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon64-impl . - -# Note: This target is used by ./scripts/build-legacy-cannons.sh -# It should build the individual versions of cannons and copy them into place in the multicannon/embeds directory -# Ideally, preserve backwards compatibility with this behaviour but if it needs to change, build-legacy-cannons.sh will -# need to be updated to account for different behaviours in different versions. -# Each embed is suffixed with the latest `StateVersion` number corresponding to the target VM and architecture. -cannon-embeds: cannon64-impl - # 64-bit multithreaded vm - @cp bin/cannon64-impl ./multicannon/embeds/cannon-8 - -cannon: cannon-embeds - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v $(LDFLAGS) -o ./bin/cannon ./multicannon/ - -clean: - rm -rf bin multicannon/embeds/cannon* - -elf: - make -C ./testdata elf - -elf-go-current: - make -C ./testdata/go-1-24 elf - -sanitize-program: - mips-linux-gnu-objdump -d -j .text $$GUEST_PROGRAM > ./bin/dump.txt - @if ! { cat ./bin/dump.txt | awk '{print $$3}' | grep -Ew -m1 "$(UNSUPPORTED_OPCODES)"; }; then \ - echo "guest program is sanitized for unsupported instructions"; \ - else \ - echo "found unsupported instructions in the guest program"; \ - exit 1; \ - fi - -contract: - cd ../packages/contracts-bedrock && forge build - -test: elf contract - go test -v ./... - - -diff-%-cannon: cannon elf-go-current - # Load an elf file to create a prestate, and check that both cannon versions generate the same prestate - @VM=$*; \ - echo "Running diff for VM type $${VM}"; \ - $$OTHER_CANNON load-elf --type $$VM --path ./testdata/go-1-24/bin/hello.64.elf --out ./bin/prestate-other.bin.gz --meta ""; \ - ./bin/cannon load-elf --type $$VM --path ./testdata/go-1-24/bin/hello.64.elf --out ./bin/prestate.bin.gz --meta ""; - @cmp ./bin/prestate-other.bin.gz ./bin/prestate.bin.gz; - @if [ $$? -eq 0 ]; then \ - echo "Generated identical prestates"; \ - else \ - echo "Generated different prestates"; \ - exit 1; \ - fi - - # Run cannon and check that both cannon versions produce identical states - $$OTHER_CANNON run --proof-at '=0' --stop-at '=100000000' --input=./bin/prestate.bin.gz --output ./bin/out-other.bin.gz --meta "" - ./bin/cannon run --proof-at '=0' --stop-at '=100000000' --input=./bin/prestate.bin.gz --output ./bin/out.bin.gz --meta "" - @cmp ./bin/out-other.bin.gz ./bin/out.bin.gz - @if [ $$? -eq 0 ]; then \ - echo "Generated identical post-states"; \ - else \ - echo "Generated different post-states"; \ - exit 1; \ - fi - -cannon-stf-verify: - @docker build --progress plain -f Dockerfile.diff ../ - -fuzz: - printf "%s\n" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzMulOp ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzMultOp ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzMultuOp ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateSyscallBrk ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateSyscallMmap ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateSyscallExitGroup ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateSyscallFcntl ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateHintRead ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStatePreimageRead ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateHintWrite ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStatePreimageWrite ./mipsevm/tests" \ - "go test $(FUZZLDFLAGS) -run NOTAREALTEST -v -fuzztime $(CANNON64_FUZZTIME) -fuzz=FuzzStateSyscallCloneMT ./mipsevm/tests" \ - | parallel -j 8 {} - -.PHONY: \ - cannon64-impl \ - cannon-embeds \ - cannon \ - clean \ - elf \ - elf-go-current \ - test \ - lint \ - fuzz \ - diff-%-cannon \ - cannon-stf-verify +# Pattern target for backwards compatibility with make diff--cannon invocations. +# Translates make diff--cannon to just diff-cannon . +.PHONY: diff-%-cannon +diff-%-cannon: + env $(JUSTFLAGS) just diff-cannon $* diff --git a/cannon/README.md b/cannon/README.md index 6294b38c02f5f..6a7874a575caf 100644 --- a/cannon/README.md +++ b/cannon/README.md @@ -22,11 +22,11 @@ For more information, see [Docs](./docs/README.md). ```shell # Build op-program server-mode and MIPS-client binaries. cd ../op-program -make op-program # build +just op-program # build # Switch back to cannon, and build the CLI cd ../cannon -make cannon +just cannon # Transform MIPS op-program client binary into first VM state. # This outputs state.bin.gz (VM state) and meta.json (for debug symbols). diff --git a/cannon/justfile b/cannon/justfile new file mode 100644 index 0000000000000..82d52c460e961 --- /dev/null +++ b/cannon/justfile @@ -0,0 +1,118 @@ +import '../justfiles/go.just' + +# Build ldflags string +_VERSION_META_STR := if VERSION_META != "" { "+" + VERSION_META } else { "" } +_LDFLAGSSTRING := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/cannon/multicannon/version.Version=" + VERSION + " " + \ + "-X github.com/ethereum-optimism/optimism/cannon/multicannon/version.Meta=" + _VERSION_META_STR + " " + \ + "") + "'" + +# The MIPS64 r2 opcodes not supported by cannon (excludes coprocessor-specific and trap opcodes) +UNSUPPORTED_OPCODES := "(dclo|dclz|madd|maddu|seb|seh|wsbh|dsbh|dshd|ins|dins|dinsm|dinsu|ext|dext|dextu|dextm|rotr|drotr|drotr32|rotrv|drotrv|break|sdbbp|pref)" + +CANNON64_FUZZTIME := "20s" + +# Build cannon binary (default target — must remain first recipe) +cannon: cannon-embeds + env GO111MODULE=on GOOS={{TARGETOS}} GOARCH={{TARGETARCH}} go build -v -ldflags {{_LDFLAGSSTRING}} -o ./bin/cannon ./multicannon/ + +# Build cannon64-impl binary (note: does NOT use CGO_ENABLED=0) +cannon64-impl: + env GO111MODULE=on GOOS={{TARGETOS}} GOARCH={{TARGETARCH}} go build -v -ldflags {{_LDFLAGSSTRING}} -o ./bin/cannon64-impl . + +# Build cannon embeds (used by scripts/build-legacy-cannons.sh) +# Each embed is suffixed with the latest StateVersion number for the target VM and architecture +cannon-embeds: cannon64-impl + @cp bin/cannon64-impl ./multicannon/embeds/cannon-8 + +# Clean build artifacts +clean: + rm -rf bin multicannon/embeds/cannon* + +# Build ELF test binaries +elf: + make -C ./testdata elf + +# Build ELF test binaries for current Go version +elf-go-current: + make -C ./testdata/go-1-24 elf + +# Check guest program for unsupported MIPS instructions +sanitize-program: + #!/usr/bin/env bash + set -euo pipefail + mips-linux-gnu-objdump -d -j .text "$GUEST_PROGRAM" > ./bin/dump.txt + if ! { cat ./bin/dump.txt | awk '{print $3}' | grep -Ew -m1 "{{UNSUPPORTED_OPCODES}}"; }; then + echo "guest program is sanitized for unsupported instructions" + else + echo "found unsupported instructions in the guest program" + exit 1 + fi + +# Build contracts +contract: + cd ../packages/contracts-bedrock && forge build + +# Run tests +test: elf contract + @just go_test "./..." + +# Compare cannon output against OTHER_CANNON for a given VM type +diff-cannon VM: cannon elf-go-current + #!/usr/bin/env bash + set -euo pipefail + VM="{{VM}}" + echo "Running diff for VM type ${VM}" + # Load an elf file to create a prestate, and check that both cannon versions generate the same prestate + $OTHER_CANNON load-elf --type "$VM" --path ./testdata/go-1-24/bin/hello.64.elf --out ./bin/prestate-other.bin.gz --meta "" + ./bin/cannon load-elf --type "$VM" --path ./testdata/go-1-24/bin/hello.64.elf --out ./bin/prestate.bin.gz --meta "" + cmp ./bin/prestate-other.bin.gz ./bin/prestate.bin.gz + if [ $? -eq 0 ]; then + echo "Generated identical prestates" + else + echo "Generated different prestates" + exit 1 + fi + # Run cannon and check that both cannon versions produce identical states + $OTHER_CANNON run --proof-at '=0' --stop-at '=100000000' --input=./bin/prestate.bin.gz --output ./bin/out-other.bin.gz --meta "" + ./bin/cannon run --proof-at '=0' --stop-at '=100000000' --input=./bin/prestate.bin.gz --output ./bin/out.bin.gz --meta "" + cmp ./bin/out-other.bin.gz ./bin/out.bin.gz + if [ $? -eq 0 ]; then + echo "Generated identical post-states" + else + echo "Generated different post-states" + exit 1 + fi + +# Verify cannon STF via Docker +cannon-stf-verify: + @docker build --progress plain -f Dockerfile.diff ../ + +# Lint (cannon is linted via the root lint-go target; this is a CI compatibility stub) +lint: + @echo "cannon lint is handled by the root lint-go target" + +# Run fuzz tests in parallel +fuzz: + #!/usr/bin/env bash + set -euo pipefail + FUZZLDFLAGS="" + if [ "$(uname)" = "Darwin" ]; then + FUZZLDFLAGS="-ldflags=-extldflags=-Wl,-ld_classic" + fi + printf "%s\n" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzMulOp ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzMultOp ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzMultuOp ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzStateSyscallBrk ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzStateSyscallMmap ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzStateSyscallExitGroup ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzStateSyscallFcntl ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzStateHintRead ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzStatePreimageRead ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzStateHintWrite ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzStatePreimageWrite ./mipsevm/tests" \ + "go test $FUZZLDFLAGS -run NOTAREALTEST -v -fuzztime {{CANNON64_FUZZTIME}} -fuzz=FuzzStateSyscallCloneMT ./mipsevm/tests" \ + | parallel -j 8 {} diff --git a/op-program/Dockerfile.repro.dockerignore b/op-program/Dockerfile.repro.dockerignore index 1cf8173ca256c..3496b28e41202 100644 --- a/op-program/Dockerfile.repro.dockerignore +++ b/op-program/Dockerfile.repro.dockerignore @@ -12,6 +12,7 @@ !op-service/ !op-supervisor/ !op-test-sequencer +!justfiles/ **/bin **/testdata diff --git a/rust/kona/docker/fpvm-prestates/cannon-repro.dockerfile b/rust/kona/docker/fpvm-prestates/cannon-repro.dockerfile index 0c69197ee30d4..3540282292a2e 100644 --- a/rust/kona/docker/fpvm-prestates/cannon-repro.dockerfile +++ b/rust/kona/docker/fpvm-prestates/cannon-repro.dockerfile @@ -2,31 +2,20 @@ # Build Cannon from local monorepo # ################################################################ -FROM ubuntu:22.04 AS cannon-build -SHELL ["/bin/bash", "-c"] - -ARG TARGETARCH - -# Install deps -RUN apt-get update && apt-get install -y --no-install-recommends curl ca-certificates make - -ENV GO_VERSION=1.23.8 +FROM golang:1.24.10-alpine3.21 AS cannon-build -# Fetch go manually, rather than using a Go base image, so we can copy the installation into the final stage -RUN curl -sL https://go.dev/dl/go$GO_VERSION.linux-$TARGETARCH.tar.gz -o go$GO_VERSION.linux-$TARGETARCH.tar.gz && \ - tar -C /usr/local/ -xzf go$GO_VERSION.linux-$TARGETARCH.tar.gz -ENV GOPATH=/go -ENV PATH=/usr/local/go/bin:$GOPATH/bin:$PATH +RUN apk add --no-cache bash just # Copy monorepo source needed for the cannon build COPY --from=monorepo go.mod go.sum /optimism/ COPY --from=monorepo cannon/ /optimism/cannon/ COPY --from=monorepo op-service/ /optimism/op-service/ COPY --from=monorepo op-preimage/ /optimism/op-preimage/ +COPY --from=monorepo justfiles/ /optimism/justfiles/ # Build cannon from local source RUN cd /optimism/cannon && \ - make && \ + just cannon && \ cp bin/cannon /cannon-bin ################################################################ From 32fe33bb381811116f4f58547e3fab59e58c2f15 Mon Sep 17 00:00:00 2001 From: Teddy Knox Date: Thu, 12 Mar 2026 16:17:03 -0400 Subject: [PATCH 114/201] chore: remove devnet-sdk and kurtosis-devnet (#19506) --- .circleci/continue/main.yml | 2 +- AGENTS.md | 2 - Makefile | 2 - README.md | 2 - devnet-sdk/book/.gitignore | 1 - devnet-sdk/book/book.toml | 13 - devnet-sdk/book/custom.css | 5 - devnet-sdk/book/src/README.md | 54 - devnet-sdk/book/src/SUMMARY.md | 15 - devnet-sdk/book/src/descriptors.md | 142 -- devnet-sdk/book/src/dsl/intro.md | 41 - devnet-sdk/book/src/dsl/style_guide.md | 133 -- devnet-sdk/book/src/shell.md | 81 - devnet-sdk/book/src/system.md | 199 -- devnet-sdk/book/src/testing.md | 165 -- devnet-sdk/book/theme/css/footer.css | 71 - devnet-sdk/book/theme/js/footer.js | 41 - devnet-sdk/cmd/mf2kt/main.go | 72 - devnet-sdk/constraints/constraints.go | 26 - devnet-sdk/constraints/constraints_test.go | 124 -- devnet-sdk/contracts/bindings/eventlogger.go | 68 - .../bindings/l2tol2crossdomainmessenger.go | 788 ------- .../contracts/bindings/superchainweth.go | 1879 ----------------- devnet-sdk/contracts/constants/constants.go | 55 - devnet-sdk/contracts/contracts.go | 17 - .../contracts/registry/client/client.go | 50 - .../client/l2tol2crossdomainmessenger.go | 22 - devnet-sdk/contracts/registry/client/weth.go | 38 - devnet-sdk/contracts/registry/empty/empty.go | 25 - devnet-sdk/controller/kt/kt.go | 109 - devnet-sdk/controller/kt/kt_test.go | 153 -- devnet-sdk/controller/kt/mutate_env.go | 100 - devnet-sdk/controller/kt/mutate_env_test.go | 152 -- devnet-sdk/controller/kt/test_helpers.go | 145 -- devnet-sdk/controller/surface/surface.go | 11 - devnet-sdk/descriptors/deployment.go | 87 - devnet-sdk/images/repository.go | 43 - devnet-sdk/interfaces/registry.go | 33 - devnet-sdk/kt/fs/devnet_fs.go | 171 -- devnet-sdk/kt/fs/devnet_fs_test.go | 293 --- devnet-sdk/kt/fs/fs.go | 272 --- devnet-sdk/kt/fs/fs_test.go | 450 ---- devnet-sdk/kt/params.go | 82 - devnet-sdk/kt/visitor.go | 265 --- devnet-sdk/kt/visitor_test.go | 121 -- devnet-sdk/manifest/acceptor.go | 25 - devnet-sdk/manifest/manifest.go | 104 - devnet-sdk/manifest/visitor.go | 35 - devnet-sdk/proofs/prestate/cmd/main.go | 99 - devnet-sdk/shell/cmd/ctrl/main.go | 78 - devnet-sdk/shell/cmd/enter/main.go | 93 - devnet-sdk/shell/cmd/motd/main.go | 59 - devnet-sdk/shell/env/chain.go | 187 -- devnet-sdk/shell/env/devnet.go | 182 -- devnet-sdk/shell/env/env_test.go | 309 --- devnet-sdk/shell/env/file_fetch.go | 49 - devnet-sdk/shell/env/file_fetch_test.go | 93 - devnet-sdk/shell/env/kt_fetch.go | 69 - devnet-sdk/shell/env/kt_fetch_test.go | 70 - devnet-sdk/shell/env/kt_native_fetch.go | 119 -- devnet-sdk/shell/env/kt_native_fetch_test.go | 228 -- .../env/testdata/kurtosis/args--malformed.txt | 1 - .../env/testdata/kurtosis/args--simple.yaml | 6 - devnet-sdk/system/chain.go | 259 --- devnet-sdk/system/chain_test.go | 246 --- devnet-sdk/system/interfaces.go | 150 -- devnet-sdk/system/node.go | 139 -- .../system/periphery/go-ethereum/fees.go | 134 -- .../system/periphery/go-ethereum/fees_test.go | 276 --- devnet-sdk/system/system.go | 110 - devnet-sdk/system/system_test.go | 273 --- devnet-sdk/system/tx.go | 227 -- devnet-sdk/system/tx_test.go | 193 -- devnet-sdk/system/txbuilder.go | 360 ---- devnet-sdk/system/txbuilder_test.go | 475 ----- devnet-sdk/system/txprocessor.go | 83 - devnet-sdk/system/txprocessor_test.go | 215 -- devnet-sdk/system/wallet.go | 422 ---- devnet-sdk/system/walletV2.go | 93 - devnet-sdk/system/wallet_test.go | 237 --- devnet-sdk/telemetry/carrier.go | 55 - devnet-sdk/telemetry/init.go | 58 - devnet-sdk/telemetry/slog.go | 98 - devnet-sdk/types/balance.go | 92 - devnet-sdk/types/balance_test.go | 267 --- devnet-sdk/types/types.go | 30 - go.mod | 39 +- go.sum | 70 - kurtosis-devnet/.gitignore | 3 - kurtosis-devnet/README.md | 203 -- kurtosis-devnet/book/.gitignore | 1 - kurtosis-devnet/book/book.toml | 13 - kurtosis-devnet/book/custom.css | 5 - kurtosis-devnet/book/src/README.md | 26 - kurtosis-devnet/book/src/SUMMARY.md | 12 - kurtosis-devnet/book/src/basic_deployment.md | 233 -- kurtosis-devnet/book/src/local_artifacts.md | 114 - kurtosis-devnet/book/src/std_output.md | 134 -- kurtosis-devnet/book/theme/css/footer.css | 71 - kurtosis-devnet/book/theme/js/footer.js | 41 - kurtosis-devnet/cmd/main.go | 246 --- kurtosis-devnet/cmd/main_test.go | 293 --- kurtosis-devnet/fileserver/kurtosis.yml | 4 - kurtosis-devnet/fileserver/main.star | 53 - .../static_files/nginx/default.conf | 8 - kurtosis-devnet/flash.yaml | 110 - kurtosis-devnet/foo-user.example.json | 16 - kurtosis-devnet/interop.yaml | 174 -- kurtosis-devnet/jovian.yaml | 86 - kurtosis-devnet/justfile | 101 - kurtosis-devnet/op-program-svc/Dockerfile | 38 - kurtosis-devnet/op-program-svc/README.md | 57 - kurtosis-devnet/op-program-svc/build.go | 174 -- kurtosis-devnet/op-program-svc/build_test.go | 211 -- kurtosis-devnet/op-program-svc/defaults.go | 70 - kurtosis-devnet/op-program-svc/fs.go | 330 --- kurtosis-devnet/op-program-svc/fs_test.go | 140 -- kurtosis-devnet/op-program-svc/interfaces.go | 48 - kurtosis-devnet/op-program-svc/justfile | 5 - kurtosis-devnet/op-program-svc/main.go | 47 - kurtosis-devnet/op-program-svc/mocks.go | 285 --- kurtosis-devnet/op-program-svc/server.go | 158 -- kurtosis-devnet/op-program-svc/server_test.go | 251 --- .../optimism-package-trampoline/README.md | 7 - .../optimism-package-trampoline/kurtosis.yml | 8 - .../optimism-package-trampoline/main.star | 5 - kurtosis-devnet/pkg/build/contracts.go | 401 ---- kurtosis-devnet/pkg/build/contracts_test.go | 387 ---- kurtosis-devnet/pkg/build/docker.go | 343 --- kurtosis-devnet/pkg/build/docker_test.go | 182 -- kurtosis-devnet/pkg/build/prestate.go | 109 - kurtosis-devnet/pkg/deploy/deploy.go | 306 --- kurtosis-devnet/pkg/deploy/deploy_test.go | 143 -- kurtosis-devnet/pkg/deploy/fileserver.go | 282 --- kurtosis-devnet/pkg/deploy/fileserver_test.go | 149 -- kurtosis-devnet/pkg/deploy/prestate.go | 112 - kurtosis-devnet/pkg/deploy/prestate_test.go | 112 - kurtosis-devnet/pkg/deploy/template.go | 288 --- kurtosis-devnet/pkg/deploy/template_test.go | 417 ---- kurtosis-devnet/pkg/kurtosis/adapters.go | 54 - .../pkg/kurtosis/api/enclave/enclave.go | 170 -- .../pkg/kurtosis/api/enclave/enclave_test.go | 241 --- .../pkg/kurtosis/api/engine/engine.go | 116 - .../pkg/kurtosis/api/engine/engine_test.go | 188 -- kurtosis-devnet/pkg/kurtosis/api/fake/fake.go | 289 --- .../pkg/kurtosis/api/interfaces/iface.go | 96 - .../pkg/kurtosis/api/run/handlers.go | 174 -- .../pkg/kurtosis/api/run/handlers_test.go | 374 ---- .../pkg/kurtosis/api/run/kurtosis_run.go | 154 -- .../pkg/kurtosis/api/run/kurtosis_run_test.go | 112 - .../pkg/kurtosis/api/wrappers/wrappers.go | 269 --- .../kurtosis/api/wrappers/wrappers_local.go | 21 - .../kurtosis/api/wrappers/wrappers_testing.go | 14 - kurtosis-devnet/pkg/kurtosis/endpoints.go | 384 ---- .../pkg/kurtosis/endpoints_test.go | 465 ---- kurtosis-devnet/pkg/kurtosis/kurtosis.go | 297 --- kurtosis-devnet/pkg/kurtosis/kurtosis_test.go | 567 ----- .../pkg/kurtosis/sources/deployer/cmd/main.go | 40 - .../pkg/kurtosis/sources/deployer/deployer.go | 470 ----- .../sources/deployer/deployer_test.go | 220 -- .../pkg/kurtosis/sources/deployer/wallets.go | 89 - .../pkg/kurtosis/sources/depset/cmd/main.go | 43 - .../pkg/kurtosis/sources/depset/depset.go | 79 - .../pkg/kurtosis/sources/inspect/README.md | 351 --- .../pkg/kurtosis/sources/inspect/cmd/main.go | 65 - .../pkg/kurtosis/sources/inspect/conductor.go | 155 -- .../sources/inspect/conductor_test.go | 86 - .../pkg/kurtosis/sources/inspect/config.go | 34 - .../kurtosis/sources/inspect/config_test.go | 91 - .../kurtosis/sources/inspect/flags/flags.go | 50 - .../sources/inspect/flags/flags_test.go | 124 -- .../pkg/kurtosis/sources/inspect/inspect.go | 116 - .../kurtosis/sources/inspect/inspect_test.go | 76 - .../pkg/kurtosis/sources/inspect/service.go | 150 -- .../kurtosis/sources/inspect/service_test.go | 63 - .../kurtosis/sources/interfaces/interfaces.go | 32 - .../pkg/kurtosis/sources/jwt/cmd/main.go | 54 - .../pkg/kurtosis/sources/jwt/jwt.go | 87 - .../pkg/kurtosis/sources/spec/spec.go | 172 -- .../pkg/kurtosis/sources/spec/spec_test.go | 113 - kurtosis-devnet/pkg/tmpl/cmd/main.go | 69 - kurtosis-devnet/pkg/tmpl/fake/fake.go | 31 - kurtosis-devnet/pkg/tmpl/tmpl.go | 189 -- kurtosis-devnet/pkg/tmpl/tmpl_test.go | 110 - kurtosis-devnet/pkg/types/autofix.go | 9 - kurtosis-devnet/pkg/util/docker.go | 773 ------- kurtosis-devnet/pkg/util/docker_test.go | 234 -- kurtosis-devnet/pkg/util/retry.go | 35 - kurtosis-devnet/pkg/util/retry_test.go | 255 --- kurtosis-devnet/pkg/util/util.go | 70 - kurtosis-devnet/pkg/util/util_test.go | 101 - kurtosis-devnet/simple.yaml | 89 - kurtosis-devnet/templates/devnet.yaml | 51 - kurtosis-devnet/templates/l2.yaml | 35 - kurtosis-devnet/templates/local-op-node.yaml | 33 - kurtosis-devnet/tests/.gitignore | 2 - kurtosis-devnet/tests/boilerplate.sh | 36 - kurtosis-devnet/tests/interop-smoke-test.sh | 16 - kurtosis-devnet/tests/kurtosis.yml | 4 - kurtosis-devnet/tests/main.star | 23 - kurtosis-devnet/user.yaml | 3 - op-acceptance-tests/justfile | 1 - .../scripts/metrics-collect-authorship.sh | 1 - .../interop/contract/interop_contract_test.go | 10 +- .../interop/loadtest/interop_load_test.go | 4 +- .../tests/interop/loadtest/l2.go | 2 +- .../tests/interop/message/interop_msg_test.go | 25 +- .../interop/reorgs/init_exec_msg_test.go | 6 +- .../interop/reorgs/invalid_exec_msgs_test.go | 6 +- .../tests/interop/upgrade/pre_test.go | 5 +- .../tests/isthmus/isthmus_test_helpers.go | 16 +- op-challenger/README.md | 8 +- op-deployer/pkg/deployer/apply.go | 2 +- .../pkg/deployer/pipeline/pre_state.go | 2 +- op-deployer/pkg/deployer/state/state.go | 2 +- .../pkg/deployer/validate/helpers_test.go | 2 +- op-devstack/devtest/testing.go | 3 +- op-devstack/devtest/tracing_handler.go | 90 + op-devstack/dsl/eoa.go | 17 +- op-e2e/actions/interop/interop_msg_test.go | 48 +- .../proofs => op-service}/prestate/client.go | 83 +- .../testutils/devnet/prestatebuilder.go | 2 +- .../txintent/bindings/eventlogger_deploy.go | 5 + op-service/txintent/interop_call.go | 4 +- op-supervisor/README.md | 1 - .../op-stack-go/Dockerfile.dockerignore | 1 - .../supervisor/l2reorg/init_exec_msg_test.go | 6 +- .../tests/supervisor/pre_interop/pre_test.go | 5 +- 228 files changed, 218 insertions(+), 29091 deletions(-) delete mode 100644 devnet-sdk/book/.gitignore delete mode 100644 devnet-sdk/book/book.toml delete mode 100644 devnet-sdk/book/custom.css delete mode 100644 devnet-sdk/book/src/README.md delete mode 100644 devnet-sdk/book/src/SUMMARY.md delete mode 100644 devnet-sdk/book/src/descriptors.md delete mode 100644 devnet-sdk/book/src/dsl/intro.md delete mode 100644 devnet-sdk/book/src/dsl/style_guide.md delete mode 100644 devnet-sdk/book/src/shell.md delete mode 100644 devnet-sdk/book/src/system.md delete mode 100644 devnet-sdk/book/src/testing.md delete mode 100644 devnet-sdk/book/theme/css/footer.css delete mode 100644 devnet-sdk/book/theme/js/footer.js delete mode 100644 devnet-sdk/cmd/mf2kt/main.go delete mode 100644 devnet-sdk/constraints/constraints.go delete mode 100644 devnet-sdk/constraints/constraints_test.go delete mode 100644 devnet-sdk/contracts/bindings/eventlogger.go delete mode 100644 devnet-sdk/contracts/bindings/l2tol2crossdomainmessenger.go delete mode 100644 devnet-sdk/contracts/bindings/superchainweth.go delete mode 100644 devnet-sdk/contracts/constants/constants.go delete mode 100644 devnet-sdk/contracts/contracts.go delete mode 100644 devnet-sdk/contracts/registry/client/client.go delete mode 100644 devnet-sdk/contracts/registry/client/l2tol2crossdomainmessenger.go delete mode 100644 devnet-sdk/contracts/registry/client/weth.go delete mode 100644 devnet-sdk/contracts/registry/empty/empty.go delete mode 100644 devnet-sdk/controller/kt/kt.go delete mode 100644 devnet-sdk/controller/kt/kt_test.go delete mode 100644 devnet-sdk/controller/kt/mutate_env.go delete mode 100644 devnet-sdk/controller/kt/mutate_env_test.go delete mode 100644 devnet-sdk/controller/kt/test_helpers.go delete mode 100644 devnet-sdk/controller/surface/surface.go delete mode 100644 devnet-sdk/descriptors/deployment.go delete mode 100644 devnet-sdk/images/repository.go delete mode 100644 devnet-sdk/interfaces/registry.go delete mode 100644 devnet-sdk/kt/fs/devnet_fs.go delete mode 100644 devnet-sdk/kt/fs/devnet_fs_test.go delete mode 100644 devnet-sdk/kt/fs/fs.go delete mode 100644 devnet-sdk/kt/fs/fs_test.go delete mode 100644 devnet-sdk/kt/params.go delete mode 100644 devnet-sdk/kt/visitor.go delete mode 100644 devnet-sdk/kt/visitor_test.go delete mode 100644 devnet-sdk/manifest/acceptor.go delete mode 100644 devnet-sdk/manifest/manifest.go delete mode 100644 devnet-sdk/manifest/visitor.go delete mode 100644 devnet-sdk/proofs/prestate/cmd/main.go delete mode 100644 devnet-sdk/shell/cmd/ctrl/main.go delete mode 100644 devnet-sdk/shell/cmd/enter/main.go delete mode 100644 devnet-sdk/shell/cmd/motd/main.go delete mode 100644 devnet-sdk/shell/env/chain.go delete mode 100644 devnet-sdk/shell/env/devnet.go delete mode 100644 devnet-sdk/shell/env/env_test.go delete mode 100644 devnet-sdk/shell/env/file_fetch.go delete mode 100644 devnet-sdk/shell/env/file_fetch_test.go delete mode 100644 devnet-sdk/shell/env/kt_fetch.go delete mode 100644 devnet-sdk/shell/env/kt_fetch_test.go delete mode 100644 devnet-sdk/shell/env/kt_native_fetch.go delete mode 100644 devnet-sdk/shell/env/kt_native_fetch_test.go delete mode 100644 devnet-sdk/shell/env/testdata/kurtosis/args--malformed.txt delete mode 100644 devnet-sdk/shell/env/testdata/kurtosis/args--simple.yaml delete mode 100644 devnet-sdk/system/chain.go delete mode 100644 devnet-sdk/system/chain_test.go delete mode 100644 devnet-sdk/system/interfaces.go delete mode 100644 devnet-sdk/system/node.go delete mode 100644 devnet-sdk/system/periphery/go-ethereum/fees.go delete mode 100644 devnet-sdk/system/periphery/go-ethereum/fees_test.go delete mode 100644 devnet-sdk/system/system.go delete mode 100644 devnet-sdk/system/system_test.go delete mode 100644 devnet-sdk/system/tx.go delete mode 100644 devnet-sdk/system/tx_test.go delete mode 100644 devnet-sdk/system/txbuilder.go delete mode 100644 devnet-sdk/system/txbuilder_test.go delete mode 100644 devnet-sdk/system/txprocessor.go delete mode 100644 devnet-sdk/system/txprocessor_test.go delete mode 100644 devnet-sdk/system/wallet.go delete mode 100644 devnet-sdk/system/walletV2.go delete mode 100644 devnet-sdk/system/wallet_test.go delete mode 100644 devnet-sdk/telemetry/carrier.go delete mode 100644 devnet-sdk/telemetry/init.go delete mode 100644 devnet-sdk/telemetry/slog.go delete mode 100644 devnet-sdk/types/balance.go delete mode 100644 devnet-sdk/types/balance_test.go delete mode 100644 devnet-sdk/types/types.go delete mode 100644 kurtosis-devnet/.gitignore delete mode 100644 kurtosis-devnet/README.md delete mode 100644 kurtosis-devnet/book/.gitignore delete mode 100644 kurtosis-devnet/book/book.toml delete mode 100644 kurtosis-devnet/book/custom.css delete mode 100644 kurtosis-devnet/book/src/README.md delete mode 100644 kurtosis-devnet/book/src/SUMMARY.md delete mode 100644 kurtosis-devnet/book/src/basic_deployment.md delete mode 100644 kurtosis-devnet/book/src/local_artifacts.md delete mode 100644 kurtosis-devnet/book/src/std_output.md delete mode 100644 kurtosis-devnet/book/theme/css/footer.css delete mode 100644 kurtosis-devnet/book/theme/js/footer.js delete mode 100644 kurtosis-devnet/cmd/main.go delete mode 100644 kurtosis-devnet/cmd/main_test.go delete mode 100644 kurtosis-devnet/fileserver/kurtosis.yml delete mode 100644 kurtosis-devnet/fileserver/main.star delete mode 100644 kurtosis-devnet/fileserver/static_files/nginx/default.conf delete mode 100644 kurtosis-devnet/flash.yaml delete mode 100644 kurtosis-devnet/foo-user.example.json delete mode 100644 kurtosis-devnet/interop.yaml delete mode 100644 kurtosis-devnet/jovian.yaml delete mode 100644 kurtosis-devnet/justfile delete mode 100644 kurtosis-devnet/op-program-svc/Dockerfile delete mode 100644 kurtosis-devnet/op-program-svc/README.md delete mode 100644 kurtosis-devnet/op-program-svc/build.go delete mode 100644 kurtosis-devnet/op-program-svc/build_test.go delete mode 100644 kurtosis-devnet/op-program-svc/defaults.go delete mode 100644 kurtosis-devnet/op-program-svc/fs.go delete mode 100644 kurtosis-devnet/op-program-svc/fs_test.go delete mode 100644 kurtosis-devnet/op-program-svc/interfaces.go delete mode 100644 kurtosis-devnet/op-program-svc/justfile delete mode 100644 kurtosis-devnet/op-program-svc/main.go delete mode 100644 kurtosis-devnet/op-program-svc/mocks.go delete mode 100644 kurtosis-devnet/op-program-svc/server.go delete mode 100644 kurtosis-devnet/op-program-svc/server_test.go delete mode 100644 kurtosis-devnet/optimism-package-trampoline/README.md delete mode 100644 kurtosis-devnet/optimism-package-trampoline/kurtosis.yml delete mode 100644 kurtosis-devnet/optimism-package-trampoline/main.star delete mode 100644 kurtosis-devnet/pkg/build/contracts.go delete mode 100644 kurtosis-devnet/pkg/build/contracts_test.go delete mode 100644 kurtosis-devnet/pkg/build/docker.go delete mode 100644 kurtosis-devnet/pkg/build/docker_test.go delete mode 100644 kurtosis-devnet/pkg/build/prestate.go delete mode 100644 kurtosis-devnet/pkg/deploy/deploy.go delete mode 100644 kurtosis-devnet/pkg/deploy/deploy_test.go delete mode 100644 kurtosis-devnet/pkg/deploy/fileserver.go delete mode 100644 kurtosis-devnet/pkg/deploy/fileserver_test.go delete mode 100644 kurtosis-devnet/pkg/deploy/prestate.go delete mode 100644 kurtosis-devnet/pkg/deploy/prestate_test.go delete mode 100644 kurtosis-devnet/pkg/deploy/template.go delete mode 100644 kurtosis-devnet/pkg/deploy/template_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/adapters.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/enclave/enclave.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/enclave/enclave_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/engine/engine.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/engine/engine_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/fake/fake.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/interfaces/iface.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/run/handlers.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/run/handlers_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/run/kurtosis_run.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/run/kurtosis_run_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers_local.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers_testing.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/endpoints.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/endpoints_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/kurtosis.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/kurtosis_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/deployer/cmd/main.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/deployer/deployer.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/deployer/deployer_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/deployer/wallets.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/depset/cmd/main.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/depset/depset.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/README.md delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/cmd/main.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/config.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/config_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/service.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/inspect/service_test.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/interfaces/interfaces.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/jwt/cmd/main.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/jwt/jwt.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/spec/spec.go delete mode 100644 kurtosis-devnet/pkg/kurtosis/sources/spec/spec_test.go delete mode 100644 kurtosis-devnet/pkg/tmpl/cmd/main.go delete mode 100644 kurtosis-devnet/pkg/tmpl/fake/fake.go delete mode 100644 kurtosis-devnet/pkg/tmpl/tmpl.go delete mode 100644 kurtosis-devnet/pkg/tmpl/tmpl_test.go delete mode 100644 kurtosis-devnet/pkg/types/autofix.go delete mode 100644 kurtosis-devnet/pkg/util/docker.go delete mode 100644 kurtosis-devnet/pkg/util/docker_test.go delete mode 100644 kurtosis-devnet/pkg/util/retry.go delete mode 100644 kurtosis-devnet/pkg/util/retry_test.go delete mode 100644 kurtosis-devnet/pkg/util/util.go delete mode 100644 kurtosis-devnet/pkg/util/util_test.go delete mode 100644 kurtosis-devnet/simple.yaml delete mode 100644 kurtosis-devnet/templates/devnet.yaml delete mode 100644 kurtosis-devnet/templates/l2.yaml delete mode 100644 kurtosis-devnet/templates/local-op-node.yaml delete mode 100644 kurtosis-devnet/tests/.gitignore delete mode 100644 kurtosis-devnet/tests/boilerplate.sh delete mode 100644 kurtosis-devnet/tests/interop-smoke-test.sh delete mode 100644 kurtosis-devnet/tests/kurtosis.yml delete mode 100644 kurtosis-devnet/tests/main.star delete mode 100644 kurtosis-devnet/user.yaml rename {devnet-sdk => op-acceptance-tests}/scripts/metrics-collect-authorship.sh (84%) create mode 100644 op-devstack/devtest/tracing_handler.go rename {devnet-sdk/proofs => op-service}/prestate/client.go (73%) create mode 100644 op-service/txintent/bindings/eventlogger_deploy.go diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index 42df66bd9b8e2..aabb4c2a505ee 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -2802,7 +2802,7 @@ jobs: - run: name: Collect devnet metrics for op-acceptance-tests command: | - ./devnet-sdk/scripts/metrics-collect-authorship.sh op-acceptance-tests/tests > .metrics--authorship--op-acceptance-tests + ./op-acceptance-tests/scripts/metrics-collect-authorship.sh op-acceptance-tests/tests > .metrics--authorship--op-acceptance-tests echo "Wrote file .metrics--authorship--op-acceptance-tests" - gcp-cli/install - gcp-oidc-authenticate: diff --git a/AGENTS.md b/AGENTS.md index 2857cddef15bf..7d7055bf0b98b 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -48,8 +48,6 @@ The OP Stack includes significant Rust implementations: ### Development and Testing Infrastructure -- **devnet-sdk**: Toolkit for devnet interactions -- **kurtosis-devnet**: Kurtosis-based devnet environment (DEPRECATED) - **op-e2e**: End-to-end testing framework - **op-acceptance-tests**: Acceptance test suite diff --git a/Makefile b/Makefile index bfe1871860484..ea34470a152e5 100644 --- a/Makefile +++ b/Makefile @@ -245,8 +245,6 @@ TEST_PKGS := \ ./op-e2e/actions/upgrades \ ./packages/contracts-bedrock/scripts/checks/... \ ./op-dripper/... \ - ./devnet-sdk/... \ - ./kurtosis-devnet/... \ ./op-devstack/... \ ./op-deployer/pkg/deployer/artifacts/... \ ./op-deployer/pkg/deployer/broadcaster/... \ diff --git a/README.md b/README.md index bdd480d522e01..ee813779d7bd0 100644 --- a/README.md +++ b/README.md @@ -64,9 +64,7 @@ The Optimism Immunefi program offers up to $2,000,042 for in-scope critical vuln

 ├── cannon: Onchain MIPS instruction emulator for fault proofs
-├── devnet-sdk: Comprehensive toolkit for standardized devnet interactions
 ├── docs: A collection of documents including audits and post-mortems
-├── kurtosis-devnet: OP-Stack Kurtosis devnet
 ├── op-acceptance-tests: Acceptance tests and configuration for OP Stack
 ├── op-alt-da: Alternative Data Availability mode (beta)
 ├── op-batcher: L2-Batch Submitter, submits bundles of batches to L1
diff --git a/devnet-sdk/book/.gitignore b/devnet-sdk/book/.gitignore
deleted file mode 100644
index 7585238efedfc..0000000000000
--- a/devnet-sdk/book/.gitignore
+++ /dev/null
@@ -1 +0,0 @@
-book
diff --git a/devnet-sdk/book/book.toml b/devnet-sdk/book/book.toml
deleted file mode 100644
index bde35e66b7da0..0000000000000
--- a/devnet-sdk/book/book.toml
+++ /dev/null
@@ -1,13 +0,0 @@
-[book]
-authors = ["Optimism Contributors"]
-language = "en"
-multilingual = false
-src = "src"
-title = "Devnet SDK Book"
-
-[output.html]
-site-url = "/devnet-sdk/"
-git-repository-url = "https://github.com/ethereum-optimism/optimism/tree/develop/devnet-sdk/book"
-edit-url-template = "https://github.com/ethereum-optimism/optimism/tree/develop/devnet-sdk/book/{path}"
-additional-css = ["custom.css", "theme/css/footer.css"]
-additional-js = ["theme/js/footer.js"]
diff --git a/devnet-sdk/book/custom.css b/devnet-sdk/book/custom.css
deleted file mode 100644
index 7c94143752af4..0000000000000
--- a/devnet-sdk/book/custom.css
+++ /dev/null
@@ -1,5 +0,0 @@
-.content main {
-  max-width: 85%;
-  margin-left: auto;
-  margin-right: auto;
-}
diff --git a/devnet-sdk/book/src/README.md b/devnet-sdk/book/src/README.md
deleted file mode 100644
index 9947175682465..0000000000000
--- a/devnet-sdk/book/src/README.md
+++ /dev/null
@@ -1,54 +0,0 @@
-> ⚠️ **UNDER HEAVY DEVELOPMENT** ⚠️
->
-> This documentation is actively being developed and may change frequently.
-
-# Introduction
-
-# Devnet SDK
-
-The Devnet SDK is a comprehensive toolkit designed to standardize and simplify interactions with Optimism devnets. It provides a robust set of tools and interfaces for deploying, managing, and testing Optimism networks in development environments.
-
-## Core Components
-
-### 1. Devnet Descriptors
-
-The descriptors package defines a standard interface for describing and interacting with devnets. It provides:
-
-- Structured representation of devnet environments including L1 and L2 chains
-- Service discovery and endpoint management
-- Wallet and address management
-- Standardized configuration for chain components (nodes, services, endpoints)
-
-### 2. Shell Integration
-
-The shell package provides a preconfigured shell environment for interacting with devnets. For example, you can quickly:
-
-- Launch a shell with all environment variables set and run commands like `cast balance 
` that automatically use the correct RPC endpoints -- Access chain-specific configuration like JWT secrets and contract addresses - -This makes it easy to interact with your devnet without manually configuring tools or managing connection details. - -### 3. System Interface - -The system package provides a devnet-agnostic programmatic interface, constructed for example from the descriptors above, for interacting with Optimism networks. Key features include: - -- Unified interface for L1 and L2 chain interactions -- Transaction management and processing -- Wallet management and operations -- Contract interaction capabilities -- Interoperability features between different L2 chains - -Core interfaces include: -- `System`: Represents a complete Optimism system with L1 and L2 chains -- `Chain`: Provides access to chain-specific operations -- `Wallet`: Manages accounts, transactions, and signing operations -- `Transaction`: Handles transaction creation and processing - -### 4. Testing Framework - -The testing package provides a comprehensive framework for testing devnet deployments: - -- Standardized testing utilities -- System test capabilities -- Integration test helpers -- Test fixtures and utilities diff --git a/devnet-sdk/book/src/SUMMARY.md b/devnet-sdk/book/src/SUMMARY.md deleted file mode 100644 index fb46d91bb1094..0000000000000 --- a/devnet-sdk/book/src/SUMMARY.md +++ /dev/null @@ -1,15 +0,0 @@ -# Summary - -[Introduction](README.md) - -# Usage - -- [Descriptors Documentation](./descriptors.md) -- [Shell Integration Guide](./shell.md) -- [System Interface Reference](./system.md) -- [Testing Framework Guide](./testing.md) - -# DSL - -- [Introduction](dsl/intro.md) -- [Style Guide](dsl/style_guide.md) diff --git a/devnet-sdk/book/src/descriptors.md b/devnet-sdk/book/src/descriptors.md deleted file mode 100644 index 20dc1e21a460c..0000000000000 --- a/devnet-sdk/book/src/descriptors.md +++ /dev/null @@ -1,142 +0,0 @@ -# Devnet Descriptors - -The devnet descriptor is a standardized format that describes the complete topology and configuration of an Optimism devnet deployment. This standard serves as a bridge between different devnet implementations and the higher-level tooling provided by the devnet-sdk. - -## Universal Descriptor Format - -Both `kurtosis-devnet` and `netchef` emit the same descriptor format, despite being completely different devnet implementations: - -- **kurtosis-devnet**: Uses Kurtosis to orchestrate containerized devnet deployments -- **netchef**: Provides a lightweight, local devnet deployment - -This standardization enables a powerful ecosystem where tools can be built independently of the underlying devnet implementation. - -## Descriptor Structure - -A devnet descriptor provides a complete view of a running devnet: - -```json -{ - "l1": { - "name": "l1", - "id": "900", - "services": { - "geth": { - "name": "geth", - "endpoints": { - "rpc": { - "host": "localhost", - "port": 8545 - } - } - } - }, - "nodes": [...], - "addresses": { - "deployer": "0x...", - "admin": "0x..." - }, - "wallets": { - "admin": { - "address": "0x...", - "private_key": "0x..." - } - } - }, - "l2": [ - { - "name": "op-sepolia", - "id": "11155420", - "services": {...}, - "nodes": [...], - "addresses": {...}, - "wallets": {...} - } - ], - "features": ["eip1559", "shanghai"] -} -``` - -## Enabling Devnet-Agnostic Tooling - -The power of the descriptor format lies in its ability to make any compliant devnet implementation immediately accessible to the entire devnet-sdk toolset: - -1. **Universal Interface**: Any devnet that can emit this descriptor format can be managed through devnet-sdk's tools -2. **Automatic Integration**: New devnet implementations only need to implement the descriptor format to gain access to: - - System interface for chain interaction - - Testing framework - - Shell integration tools - - Wallet management - - Transaction processing - -## Benefits - -This standardization provides several key advantages: - -- **Portability**: Tools built against the devnet-sdk work with any compliant devnet implementation -- **Consistency**: Developers get the same experience regardless of the underlying devnet -- **Extensibility**: New devnet implementations can focus on deployment mechanics while leveraging existing tooling -- **Interoperability**: Tools can be built that work across different devnet implementations - -## Implementation Requirements - -To make a devnet implementation compatible with devnet-sdk, it needs to: - -1. Provide a mechanism to output the descriptor (typically as JSON) -2. Ensure all required services and endpoints are properly described - -Once these requirements are met, the devnet automatically gains access to the full suite of devnet-sdk capabilities. - -## Status - -The descriptor format is currently in active development, particularly regarding endpoint specifications: - -### Endpoint Requirements - -- **Current State**: The format does not strictly specify which endpoints must be included in a compliant descriptor -- **Minimum Known Requirements**: - - RPC endpoints are essential for basic chain interaction - - Other endpoints may be optional depending on use case - -### Implementation Notes - -- `kurtosis-devnet` currently outputs all service endpoints by default, including many that may not be necessary for testing -- Other devnet implementations can be more selective about which endpoints they expose -- Different testing scenarios may require different sets of endpoints - -### Future Development - -We plan to develop more specific recommendations for: -- Required vs optional endpoints -- Standard endpoint naming conventions -- Service-specific endpoint requirements -- Best practices for endpoint exposure - -Until these specifications are finalized, devnet implementations should: -1. Always include RPC endpoints -2. Document which additional endpoints they expose -3. Consider their specific use cases when deciding which endpoints to include - -## Example Usage - -Here's how a tool might use the descriptor to interact with any compliant devnet: - -```go -// Load descriptor from any compliant devnet -descriptor, err := descriptors.Load("devnet.json") -if err != nil { - log.Fatal(err) -} - -// Use the descriptor with devnet-sdk tools -system, err := system.FromDescriptor(descriptor) -if err != nil { - log.Fatal(err) -} - -// Now you can use all devnet-sdk features -l1 := system.L1() -l2 := system.L2(descriptor.L2[0].ID) -``` - -This standardization enables a rich ecosystem of tools that work consistently across different devnet implementations, making development and testing more efficient and reliable. diff --git a/devnet-sdk/book/src/dsl/intro.md b/devnet-sdk/book/src/dsl/intro.md deleted file mode 100644 index bc6b3b4757f74..0000000000000 --- a/devnet-sdk/book/src/dsl/intro.md +++ /dev/null @@ -1,41 +0,0 @@ -# Introduction - -The devnet-sdk DSL is a high level test library, specifically designed for end to end / acceptance testing of the -OP Stack. It aims to make the development and maintenance of whole system tests faster and easier. - -The high level API helps make the actual test read in a more declarative style and separate the technical details of how -an action is actually performed. The intended result is that tests express the requirements, while the DSL provides the -technical details of how those requirements are met. This ensures that as the technical details change, the DSL can -be updated rather than requiring that each test be updated individual - significantly reducing the maintenance cost for -a large test suite. Similarly, if there is flakiness in tests, it can often be solved by improving the DSL to -properly wait for pre or post conditions or automatically perform required setup steps and that fix is automatically -applied everywhere, including tests added in the future. - -## Guiding Principles - -These guiding principles allow the test suite to evolve and grow over time in a way that ensures the tests are -maintainable and continue to be easy to write. With multiple different teams contributing to tests, over a long time -period, shared principles are required to avoid many divergent approaches and frameworks emerging which increase the -cognitive load for developers writing tests and increase the maintenance costs for existing tests. - -### Keep It Simple - -Avoid attempting to make the DSL read like plain English. This is a domain-specific language and the domain experts are -actually the test developers, not non-technical users. Each statement should clearly describe what it is trying to do, -but does not need to read like an English sentence. - -Bias very strongly towards making the tests simpler, even if the DSL implementation then needs to be more complex. -Complexity in tests will be duplicated for each test case whereas complexity in the DSL is more centralised and is -encapsulated so it is much less likely to be a distraction. - -### Consistency - -The "language" of the DSL emerges by being consistent in the structures and naming used for things. Take the time to -refactor things to ensure that the same name is used consistently for a concept right across the DSL. - -Bias towards following established patterns rather than doing something new. While introducing a new pattern might make -things cleaner in a particular test, it introduces additional cognitive load for people to understand when working with -the tests. It is usually (but not always) better to preserve consistency than to have a marginally nicer solution for -one specific scenario. - -The [style guide](./style_guide.md) defines a set of common patterns and guidelines that should be followed. diff --git a/devnet-sdk/book/src/dsl/style_guide.md b/devnet-sdk/book/src/dsl/style_guide.md deleted file mode 100644 index 3095892ddb9fa..0000000000000 --- a/devnet-sdk/book/src/dsl/style_guide.md +++ /dev/null @@ -1,133 +0,0 @@ -# DSL Style Guide - -This style guide outlines common patterns and anti-patterns used by the testing DSL. Following this guide not only -improves consistency, it helps keep the separation of requirements (in test files) from implementation details (in DSL -implementation), which in turn ensures tests are maintainable even as the number of tests keeps increasing over time. - -## Entry Points - -What are the key entry points for the system? Nodes/services, users, contracts?? - -## Action Methods - -Methods that perform actions will typically have three steps: - -1. Check (and if needed, wait) for any required preconditions -2. Perform the action, allowing components to fully process the effects of it -3. Assert that the action completed. These are intended to be a sanity check to ensure tests fail fast if something - doesn't work as expected. Options may be provided to perform more detailed or specific assertions - -## Verification Methods - -Verification methods in the DSL provide additional assertions about the state of the system, beyond the minimal -assertions performed by action methods. - -Verification methods should include any required waiting or retrying. - -Verification methods should generally only be used in tests to assert the specific behaviours the test is covering. -Avoid adding additional verification steps in a test to assert that setup actions were performed correctly - such -assertions should be built into the action methods. While sanity checking setup can be useful, adding additional -verification method calls into tests makes it harder to see what the test is actually intending to cover and increases -the number of places that need to be updated if the behaviour being verified changes in the future. - -### Avoid Getter Methods - -The DSL generally avoids exposing methods that return data from the system state. Instead verification methods are -exposed which combine the fetching and assertion of the data. This allows the DSL to handle any waiting or retrying -that may be necessary (or become necessary). This avoids a common source of flakiness where tests assume an asynchronous -operation will have completed instead of explicitly waiting for the expected final state. - - -```go -// Avoid: the balance of an account is data from the system which changes over time -block := node.GetBalance(user) - -// Good: use a verification method -node.VerifyBalance(user, 10 * constants.Eth) - -// Better? Select the entry point to be as declarative as possible -user.VerifyBalacne(10 * constants.Eth) // implementation could verify balance on all nodes automatically -``` - - -Note however that this doesn't mean that DSL methods never return anything. While returning raw data is avoided, -returning objects that represent something in the system is ok. e.g. - -```go -claim := game.RootClaim() - -// Waits for op-challenger to counter the root claim and returns a value representing that counter claim -// which can expose further verification or action methods. -counter := claim.VerifyCountered() -counter.VerifyClaimant(honestChallenger) -counter.Attack() -``` - -## Method Arguments - -Required inputs to methods are specified as normal parameters, so type checking enforces their presence. - -Optional inputs to methods are specified by a config struct and accept a vararg of functions that can update that struct. -This is roughly inline with the typical opts pattern in Golang but with significantly reduced boilerplate code since -so many methods will define their own config. With* methods are only provided for the most common optional args and -tests will normally supply a custom function that sets all the optional values they need at once. - -## Logging - -Include logging to indicate what the test is doing within the DSL methods. - -Methods that wait should log what they are waiting for and the current state of the system on each poll cycle. - -## No Sleeps - -Neither tests nor DSL code should use hard coded sleeps. CI systems tend to be under heavy and unpredictable load so -short sleep times lead to flaky tests when the system is slower than expected. Long sleeps waste time, causing test runs -to be too slow. By using a waiter pattern, a long timeout can be applied to avoid flakiness, while allowing the test to -progress quickly once the condition is met. - -```go -// Avoid: arbitrary delays -node.DoSomething() -time.Sleep(2 * time.Minute) -node.VerifyResult() - -// Good: build wait/retry loops into the testlib method -node.DoSomething() -node.VerifyResult() // Automatically waits -``` - -## Test Smells - -"Smells" are patterns that indicate there may be a problem. They aren't hard rules, but indicate that something may not -be right and the developer should take a little time to consider if there are better alternatives. - -### Comment and Code Block - -Where possible, test code should be self-explanatory with testlib method calls that are high level enough to not need -comments explaining what they do in the test. When comments are required to explain simple setup, it's an indication -that the testlib method is either poorly named or that a higher level method should be introduced. - -```go -// Smelly: Test code is far too low level and needs to be explained with a comment -// Deploy test contract -storeProgram := program.New().Sstore(0, 0xbeef).Bytes() -walletv2, err := system.NewWalletV2FromWalletAndChain(ctx, wallet, l2Chain) -require.NoError(t, err) -storeAddr, err := DeployProgram(ctx, walletv2, storeProgram) -require.NoError(t, err) -code, err := l2Client.CodeAt(ctx, storeAddr, nil) -require.NoError(t, err) -require.NotEmpty(t, code, "Store contract not deployed") -require.Equal(t, code, storeProgram, "Store contract code incorrect") - -// Good: Introduce a testlib method to encapsulate the detail and keep the test high level -contract := contracts.SStoreContract.Deploy(l2Node, 0xbeef) -``` - -However, not all comments are bad: - -```go -// Good: Explain the calculations behind specific numbers -// operatorFeeCharged = gasUsed * operatorFeeScalar == 1000 * 5 == 5000 -tx.VerifyOperatorFeeCharged(5000) -``` diff --git a/devnet-sdk/book/src/shell.md b/devnet-sdk/book/src/shell.md deleted file mode 100644 index f0ad9cad1dc88..0000000000000 --- a/devnet-sdk/book/src/shell.md +++ /dev/null @@ -1,81 +0,0 @@ -# Shell Integration - -The devnet-sdk provides powerful shell integration capabilities that allow developers to "enter" a devnet environment, making interactions with the network more intuitive and streamlined. - -## Devnet Shell Environment - -Using a devnet's descriptor, we can create a shell environment that is automatically configured with all the necessary context to interact with the devnet: - -```bash -# Enter a shell configured for your devnet -devnet-sdk shell --descriptor path/to/devnet.json -``` - -### Automatic Configuration - -When you enter a devnet shell, the environment is automatically configured with: - -- Environment variables for RPC endpoints -- JWT authentication tokens where required -- Named wallet addresses -- Chain IDs -- Other devnet-specific configuration - -### Simplified Tool Usage - -This automatic configuration enables seamless use of Ethereum development tools without explicit endpoint configuration: - -```bash -# Without devnet shell -cast balance 0x123... --rpc-url http://localhost:8545 --jwt-secret /path/to/jwt - -# With devnet shell -cast balance 0x123... # RPC and JWT automatically configured -``` - -## Supported Tools - -The shell environment enhances the experience with various Ethereum development tools: - -- `cast`: For sending transactions and querying state - -## Environment Variables - -The shell automatically sets up standard Ethereum environment variables based on the descriptor: - -```bash -# Chain enpointpoit -export ETH_RPC_URL=... -export ETH_JWT_SECRET=... -``` - -## Usage Examples - -```bash -# Enter devnet shell -go run devnet-sdk/shell/cmd/enter/main.go --descriptor devnet.json --chain ... - -# Now you can use tools directly -cast block latest - -# Exit the shell -exit -``` - -## Benefits - -- **Simplified Workflow**: No need to manually configure RPC endpoints or authentication -- **Consistent Environment**: Same configuration across all tools and commands -- **Reduced Error Risk**: Eliminates misconfigurations and copy-paste errors -- **Context Awareness**: Shell knows about all chains and services in your devnet - -## Implementation Details - -The shell integration: - -1. Reads the descriptor file -2. Sets up environment variables based on the descriptor content -3. Creates a new shell session with the configured environment -4. Maintains the environment until you exit the shell - -This feature makes it significantly easier to work with devnets by removing the need to manually manage connection details and authentication tokens. diff --git a/devnet-sdk/book/src/system.md b/devnet-sdk/book/src/system.md deleted file mode 100644 index 7ef3bd10352c5..0000000000000 --- a/devnet-sdk/book/src/system.md +++ /dev/null @@ -1,199 +0,0 @@ -# System Interfaces - -The devnet-sdk provides a set of Go interfaces that abstract away the specifics of devnet deployments, enabling automation solutions to work consistently across different deployment types and implementations. - -## Core Philosophy - -While the Descriptor interfaces provide a common way to describe actual devnet deployments (like production-like or Kurtosis-based deployments), the System interfaces operate at a higher level of abstraction. They are designed to support both real deployments and lightweight testing environments. - -The key principles are: - -- **Deployment-Agnostic Automation**: Code written against these interfaces works with any implementation - from full deployments described by Descriptors to in-memory stacks or completely fake environments -- **Flexible Testing Options**: Enables testing against: - - Complete devnet deployments - - Partial mock implementations - - Fully simulated environments -- **One-Way Abstraction**: While Descriptors can be converted into System interfaces, System interfaces can represent additional constructs beyond what Descriptors describe -- **Implementation Freedom**: New deployment types or testing environments can be added without modifying existing automation code - -## Interface Purity - -A critical design principle of these interfaces is their **purity**. This means that interfaces: - -1. **Only Reference Other Pure Interfaces**: Each interface method can only return or accept: - - Other pure interfaces from this package - - Simple data objects that can be fully instantiated - - Standard Go types and primitives - -2. **Avoid Backend-Specific Types**: The interfaces never expose types that would create dependencies on specific implementations: - ```go - // BAD: Creates dependency on specific client implementation - func (c Chain) GetNodeClient() *specific.NodeClient - - // GOOD: Returns pure interface that can be implemented by any backend - func (c Chain) Client() (ChainClient, error) - ``` - -3. **Use Generic Data Types**: When complex data structures are needed, they are defined as pure data objects: - ```go - // Pure data type that any implementation can create - type TransactionData interface { - From() common.Address - To() *common.Address - Value() *big.Int - Data() []byte - } - ``` - -### Why Purity Matters - -Interface purity is crucial because it: -- Preserves implementation freedom -- Prevents accidental coupling to specific backends -- Enables creation of new implementations without constraints -- Allows mixing different implementation types (e.g., partial fakes) - -### Example: Maintaining Purity - -```go -// IMPURE: Forces dependency on eth client -type Chain interface { - GetEthClient() *ethclient.Client // 👎 Locks us to specific client -} - -// PURE: Allows any implementation -type Chain interface { - Client() (ChainClient, error) // 👍 Implementation-agnostic -} - -type ChainClient interface { - BlockNumber(ctx context.Context) (uint64, error) - // ... other methods -} -``` - -## Interface Hierarchy - -### System - -The top-level interface representing a complete Optimism deployment: - -```go -type System interface { - // Unique identifier for this system - Identifier() string - - // Access to L1 chain - L1() Chain - - // Access to L2 chain(s) - L2(chainID uint64) Chain -} -``` - -### Chain - -Represents an individual chain (L1 or L2) within the system: - -```go -type Chain interface { - // Chain identification - RPCURL() string - ID() types.ChainID - - // Core functionality - Client() (*ethclient.Client, error) - Wallets(ctx context.Context) ([]Wallet, error) - ContractsRegistry() interfaces.ContractsRegistry - - // Chain capabilities - SupportsEIP(ctx context.Context, eip uint64) bool - - // Transaction management - GasPrice(ctx context.Context) (*big.Int, error) - GasLimit(ctx context.Context, tx TransactionData) (uint64, error) - PendingNonceAt(ctx context.Context, address common.Address) (uint64, error) -} -``` - -### Wallet - -Manages accounts and transaction signing: - -```go -type Wallet interface { - // Account management - PrivateKey() types.Key - Address() types.Address - Balance() types.Balance - Nonce() uint64 - - // Transaction operations - Sign(tx Transaction) (Transaction, error) - Send(ctx context.Context, tx Transaction) error - - // Convenience methods - SendETH(to types.Address, amount types.Balance) types.WriteInvocation[any] - Transactor() *bind.TransactOpts -} -``` - -## Implementation Types - -The interfaces can be implemented in various ways to suit different needs: - -### 1. Real Deployments -- **Kurtosis-based**: Full containerized deployment -- **Netchef**: Remote devnet deployment -- -### 2. Testing Implementations -- **In-memory**: Fast, lightweight implementation for unit tests -- **Mocks**: Controlled behavior for specific test scenarios -- **Recording**: Record and replay real interactions - -### 3. Specialized Implementations -- **Partial**: Combining pieces from fake and real deployments -- **Filtered**: Limited functionality for specific use cases -- **Instrumented**: Added logging/metrics for debugging - -## Usage Examples - -### Writing Tests - -The System interfaces are primarily used through our testing framework. See the [Testing Framework](./testing.md) documentation for detailed examples and best practices. - -### Creating a Mock Implementation - -```go -type MockSystem struct { - l1 *MockChain - l2Map map[uint64]*MockChain -} - -func NewMockSystem() *MockSystem { - return &MockSystem{ - l1: NewMockChain(), - l2Map: make(map[uint64]*MockChain), - } -} - -// Implement System interface... -``` - -## Benefits - -- **Abstraction**: Automation code is isolated from deployment details -- **Flexibility**: Easy to add new deployment types -- **Testability**: Support for various testing approaches -- **Consistency**: Same interface across all implementations -- **Extensibility**: Can add specialized implementations for specific needs - -## Best Practices - -1. **Write Against Interfaces**: Never depend on specific implementations -2. **Use Context**: For proper cancellation and timeouts -3. **Handle Errors**: All operations can fail -4. **Test Multiple Implementations**: Ensure code works across different types -5. **Consider Performance**: Choose appropriate implementation for use case - -The System interfaces provide a powerful abstraction layer that enables writing robust, deployment-agnostic automation code while supporting a wide range of implementation types for different use cases. diff --git a/devnet-sdk/book/src/testing.md b/devnet-sdk/book/src/testing.md deleted file mode 100644 index c88a3cb28b4e6..0000000000000 --- a/devnet-sdk/book/src/testing.md +++ /dev/null @@ -1,165 +0,0 @@ -# Testing Framework - -The devnet-sdk provides a comprehensive testing framework designed to make testing against Optimism devnets both powerful and developer-friendly. - -## Testing Philosophy - -Our testing approach is built on several key principles: - -### 1. Native Go Tests - -Tests are written as standard Go tests, providing: -- Full IDE integration -- Native debugging capabilities -- Familiar testing patterns -- Integration with standard Go tooling - -```go -func TestSystemWrapETH(t *testing.T) { - // Standard Go test function - systest.SystemTest(t, wrapETHScenario(...)) -} -``` - -### 2. Safe Test Execution - -Tests are designed to be safe and self-aware: -- Tests verify their prerequisites before execution -- Tests skip gracefully when prerequisites aren't met -- Clear distinction between precondition failures and test failures - -```go -// Test will skip if the system doesn't support required features -walletGetter, fundsValidator := validators.AcquireL2WalletWithFunds( - chainIdx, - types.NewBalance(big.NewInt(1.0 * constants.ETH)), -) -``` - -### 3. Testable Scenarios - -Test scenarios themselves are designed to be testable: -- Scenarios work against any compliant System implementation -- Mocks and fakes can be used for scenario validation -- Clear separation between test logic and system interaction - -### 4. Framework Integration - -The `systest` package provides integration helpers that: -- Handle system acquisition and setup -- Manage test context and cleanup -- Provide precondition validation -- Enable consistent test patterns - -## Example Test - -Here's a complete example showing these principles in action: - -```go -import ( - "math/big" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/systest" - "github.com/ethereum-optimism/optimism/devnet-sdk/testing/testlib/validators" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-service/testlog" - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/require" -) - -// Define test scenario as a function that works with any System implementation -func wrapETHScenario(chainIdx uint64, walletGetter validators.WalletGetter) systest.SystemTestFunc { - return func(t systest.T, sys system.System) { - ctx := t.Context() - - logger := testlog.Logger(t, log.LevelInfo) - logger := logger.With("test", "WrapETH", "devnet", sys.Identifier()) - - // Get the L2 chain we want to test with - chain := sys.L2(chainIdx) - logger = logger.With("chain", chain.ID()) - - // Get a funded wallet for testing - user := walletGetter(ctx) - - // Access contract registry - wethAddr := constants.WETH - weth, err := chain.ContractsRegistry().WETH(wethAddr) - require.NoError(t, err) - - // Test logic using pure interfaces - funds := types.NewBalance(big.NewInt(0.5 * constants.ETH)) - initialBalance, err := weth.BalanceOf(user.Address()).Call(ctx) - require.NoError(t, err) - - require.NoError(t, user.SendETH(wethAddr, funds).Send(ctx).Wait()) - - finalBalance, err := weth.BalanceOf(user.Address()).Call(ctx) - require.NoError(t, err) - - require.Equal(t, initialBalance.Add(funds), finalBalance) - } -} - -func TestSystemWrapETH(t *testing.T) { - chainIdx := uint64(0) // First L2 chain - - // Setup wallet with required funds - this acts as a precondition - walletGetter, fundsValidator := validators.AcquireL2WalletWithFunds( - chainIdx, - types.NewBalance(big.NewInt(1.0 * constants.ETH)), - ) - - // Run the test with system management handled by framework - systest.SystemTest(t, - wrapETHScenario(chainIdx, walletGetter), - fundsValidator, - ) -} -``` - -## Framework Components - -### 1. Test Context Management - -The framework provides context management through `systest.T`: -- Proper test timeouts -- Cleanup handling -- Resource management -- Logging context - -### 2. Precondition Validators - -Validators ensure test prerequisites are met: -```go -// Validator ensures required funds are available -fundsValidator := validators.AcquireL2WalletWithFunds(...) -``` - -### 3. System Acquisition - -The framework handles system creation and setup: -```go -systest.SystemTest(t, func(t systest.T, sys system.System) { - // System is ready to use -}) -``` - -### 4. Resource Management - -Resources are properly managed: -- Automatic cleanup -- Proper error handling -- Context cancellation - -## Best Practices - -1. **Use Scenarios**: Write reusable test scenarios that work with any System implementation -2. **Validate Prerequisites**: Always check test prerequisites using validators -3. **Handle Resources**: Use the framework's resource management -4. **Use Pure Interfaces**: Write tests against the interfaces, not specific implementations -5. **Proper Logging**: Use structured logging with test context -6. **Clear Setup**: Keep test setup clear and explicit -7. **Error Handling**: Always handle errors and provide clear failure messages diff --git a/devnet-sdk/book/theme/css/footer.css b/devnet-sdk/book/theme/css/footer.css deleted file mode 100644 index cb7be80ab2145..0000000000000 --- a/devnet-sdk/book/theme/css/footer.css +++ /dev/null @@ -1,71 +0,0 @@ -.mdbook-footer { - width: 100%; - padding: 4rem 2.5rem; /* Increased padding */ - background-color: var(--bg); - border-top: 1px solid var(--sidebar-bg); - margin-top: 5rem; /* Increased margin */ -} - -.mdbook-footer .footer-container { - max-width: 1200px; - margin: 0 auto; - display: flex; - flex-direction: column; - gap: 2.5rem; /* Increased gap */ - align-items: center; -} - -.mdbook-footer .policy-links { - display: flex; - gap: 4rem; /* Increased gap between links */ - flex-wrap: wrap; - justify-content: center; -} - -.mdbook-footer .policy-links a { - color: var(--fg); - text-decoration: none; - transition: opacity 0.2s; - font-size: 1.35rem; /* Increased font size */ - opacity: 0.85; - font-weight: 400; - line-height: 1.6; /* Increased line height */ -} - -.mdbook-footer .policy-links a:hover { - opacity: 1; - text-decoration: underline; -} - -.mdbook-footer .copyright { - color: var(--fg); - font-size: 1.35rem; /* Increased font size */ - opacity: 0.85; - text-align: center; - font-weight: 400; - line-height: 1.6; /* Increased line height */ -} - -.mdbook-footer .copyright a { - color: var(--fg); - text-decoration: none; -} - -.mdbook-footer .copyright a:hover { - text-decoration: underline; -} - -@media (max-width: 640px) { - .mdbook-footer .policy-links { - gap: 2.5rem; /* Increased gap for mobile */ - } - - .mdbook-footer { - padding: 3rem 2rem; /* Increased padding for mobile */ - } - - .mdbook-footer .policy-links a, - .mdbook-footer .copyright { - font-size: 1.25rem; /* Increased font size for mobile */ - } -} \ No newline at end of file diff --git a/devnet-sdk/book/theme/js/footer.js b/devnet-sdk/book/theme/js/footer.js deleted file mode 100644 index 014f44f2d6c54..0000000000000 --- a/devnet-sdk/book/theme/js/footer.js +++ /dev/null @@ -1,41 +0,0 @@ -// Create footer element -function createFooter() { - const footer = document.createElement('footer'); - footer.className = 'mdbook-footer'; - - const container = document.createElement('div'); - container.className = 'footer-container'; - - // Add legal links - const policyLinks = document.createElement('div'); - policyLinks.className = 'policy-links'; - - const links = [ - { href: 'https://optimism.io/community-agreement', text: 'Community Agreement' }, - { href: 'https://optimism.io/terms', text: 'Terms of Service' }, - { href: 'https://optimism.io/data-privacy-policy', text: 'Privacy Policy' } - ]; - - links.forEach(link => { - const a = document.createElement('a'); - a.href = link.href; - a.textContent = link.text; - policyLinks.appendChild(a); - }); - - // Add copyright notice - const copyright = document.createElement('div'); - copyright.className = 'copyright'; - copyright.innerHTML = `© ${new Date().getFullYear()} Optimism Foundation. All rights reserved.`; - - // Assemble footer - container.appendChild(policyLinks); - container.appendChild(copyright); - footer.appendChild(container); - - // Add footer to page - document.body.appendChild(footer); -} - -// Run after DOM is loaded -document.addEventListener('DOMContentLoaded', createFooter); \ No newline at end of file diff --git a/devnet-sdk/cmd/mf2kt/main.go b/devnet-sdk/cmd/mf2kt/main.go deleted file mode 100644 index de1ad36b4ec79..0000000000000 --- a/devnet-sdk/cmd/mf2kt/main.go +++ /dev/null @@ -1,72 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/ethereum-optimism/optimism/devnet-sdk/kt" - "github.com/ethereum-optimism/optimism/devnet-sdk/manifest" - "github.com/urfave/cli/v2" - "gopkg.in/yaml.v3" -) - -func main() { - app := &cli.App{ - Name: "devnet", - Usage: "Generate Kurtosis parameters from a devnet manifest", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "manifest", - Aliases: []string{"m"}, - Usage: "Path to the manifest YAML file", - Required: true, - }, - &cli.StringFlag{ - Name: "output", - Aliases: []string{"o"}, - Usage: "Path to write the Kurtosis parameters file (default: stdout)", - }, - }, - Action: func(c *cli.Context) error { - // Read manifest file - manifestPath := c.String("manifest") - manifestBytes, err := os.ReadFile(manifestPath) - if err != nil { - return fmt.Errorf("failed to read manifest file: %w", err) - } - - // Parse manifest YAML - var m manifest.Manifest - if err := yaml.Unmarshal(manifestBytes, &m); err != nil { - return fmt.Errorf("failed to parse manifest YAML: %w", err) - } - - // Create visitor and process manifest - visitor := kt.NewKurtosisVisitor() - m.Accept(visitor) - - // Get params and write to file or stdout - params := visitor.GetParams() - paramsBytes, err := yaml.Marshal(params) - if err != nil { - return fmt.Errorf("failed to marshal params: %w", err) - } - - outputPath := c.String("output") - if outputPath != "" { - if err := os.WriteFile(outputPath, paramsBytes, 0644); err != nil { - return fmt.Errorf("failed to write params file: %w", err) - } - } else { - fmt.Print(string(paramsBytes)) - } - - return nil - }, - } - - if err := app.Run(os.Args); err != nil { - fmt.Fprintf(os.Stderr, "error: %v\n", err) - os.Exit(1) - } -} diff --git a/devnet-sdk/constraints/constraints.go b/devnet-sdk/constraints/constraints.go deleted file mode 100644 index c4566bb350408..0000000000000 --- a/devnet-sdk/constraints/constraints.go +++ /dev/null @@ -1,26 +0,0 @@ -package constraints - -import ( - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" -) - -type WalletConstraint interface { - CheckWallet(wallet system.Wallet) bool -} - -type WalletConstraintFunc func(wallet system.Wallet) bool - -func (f WalletConstraintFunc) CheckWallet(wallet system.Wallet) bool { - return f(wallet) -} - -func WithBalance(amount types.Balance) WalletConstraint { - return WalletConstraintFunc(func(wallet system.Wallet) bool { - balance := wallet.Balance() - log.Debug("checking balance", "wallet", wallet.Address(), "balance", balance, "needed", amount) - return balance.GreaterThan(amount) - }) -} diff --git a/devnet-sdk/constraints/constraints_test.go b/devnet-sdk/constraints/constraints_test.go deleted file mode 100644 index b55bd2892d261..0000000000000 --- a/devnet-sdk/constraints/constraints_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package constraints - -import ( - "context" - "math/big" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/stretchr/testify/assert" -) - -type mockWallet struct { - balance types.Balance - address types.Address -} - -func (m mockWallet) Balance() types.Balance { - return m.balance -} - -func (m mockWallet) Address() types.Address { - return m.address -} - -func (m mockWallet) PrivateKey() types.Key { - key, _ := crypto.HexToECDSA("123") - return types.Key(key) -} - -func (m mockWallet) SendETH(to types.Address, amount types.Balance) types.WriteInvocation[any] { - panic("not implemented") -} - -func (m mockWallet) InitiateMessage(chainID types.ChainID, target common.Address, message []byte) types.WriteInvocation[any] { - panic("not implemented") -} - -func (m mockWallet) ExecuteMessage(identifier bindings.Identifier, sentMessage []byte) types.WriteInvocation[any] { - panic("not implemented") -} - -func (m mockWallet) Nonce() uint64 { - return 0 -} - -func (m mockWallet) Sign(tx system.Transaction) (system.Transaction, error) { - return tx, nil -} - -func (m mockWallet) Send(ctx context.Context, tx system.Transaction) error { - return nil -} - -func (m mockWallet) Transactor() *bind.TransactOpts { - return nil -} - -var _ system.Wallet = (*mockWallet)(nil) - -func newBigInt(x int64) *big.Int { - return big.NewInt(x) -} - -func TestWithBalance(t *testing.T) { - tests := []struct { - name string - walletBalance types.Balance - requiredAmount types.Balance - expectPass bool - }{ - { - name: "balance greater than required", - walletBalance: types.NewBalance(newBigInt(200)), - requiredAmount: types.NewBalance(newBigInt(100)), - expectPass: true, - }, - { - name: "balance equal to required", - walletBalance: types.NewBalance(newBigInt(100)), - requiredAmount: types.NewBalance(newBigInt(100)), - expectPass: false, - }, - { - name: "balance less than required", - walletBalance: types.NewBalance(newBigInt(50)), - requiredAmount: types.NewBalance(newBigInt(100)), - expectPass: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - wallet := mockWallet{ - balance: tt.walletBalance, - address: common.HexToAddress("0x123"), - } - constraint := WithBalance(tt.requiredAmount) - result := constraint.CheckWallet(wallet) - assert.Equal(t, tt.expectPass, result, "balance check should match expected result") - }) - } -} - -func TestWalletConstraintFunc(t *testing.T) { - called := false - testFunc := WalletConstraintFunc(func(wallet system.Wallet) bool { - called = true - return true - }) - - wallet := mockWallet{ - balance: types.NewBalance(newBigInt(100)), - address: common.HexToAddress("0x123"), - } - - result := testFunc.CheckWallet(wallet) - assert.True(t, called, "constraint function should have been called") - assert.True(t, result, "constraint function should return true") -} diff --git a/devnet-sdk/contracts/bindings/eventlogger.go b/devnet-sdk/contracts/bindings/eventlogger.go deleted file mode 100644 index c92749341e9cb..0000000000000 --- a/devnet-sdk/contracts/bindings/eventlogger.go +++ /dev/null @@ -1,68 +0,0 @@ -package bindings - -// This file was generated and edited by below sequences: -// cd packages/contracts-bedrock -// solc --optimize --bin --abi -o out src/integration/EventLogger.sol -// abigen --abi out/EventLogger.abi --bin out/EventLogger.bin --pkg bindings --out eventlogger.go -// Resulting eventlogger.go was moved to this file, and only the needed implementation was left here. - -import ( - "errors" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" -) - -// EventloggerMetaData contains all meta data concerning the Eventlogger contract. -var EventloggerMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"bytes32[]\",\"name\":\"_topics\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"emitLog\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"origin\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"logIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"chainId\",\"type\":\"uint256\"}],\"internalType\":\"structIdentifier\",\"name\":\"_id\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"_msgHash\",\"type\":\"bytes32\"}],\"name\":\"validateMessage\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", - Bin: "0x6080604052348015600e575f80fd5b506102ac8061001c5f395ff3fe608060405234801561000f575f80fd5b5060043610610034575f3560e01c8063ab4d6f7514610038578063edebc13b1461004d575b5f80fd5b61004b61004636600461013e565b610060565b005b61004b61005b36600461016c565b6100bd565b60405163ab4d6f7560e01b81526022602160991b019063ab4d6f759061008c9085908590600401610226565b5f604051808303815f87803b1580156100a3575f80fd5b505af11580156100b5573d5f803e3d5ffd5b505050505050565b80604051818482378486356020880135604089013560608a0135848015610102576001811461010a5760028114610113576003811461011d5760048114610128575f80fd5b8787a0610130565b848888a1610130565b83858989a2610130565b8284868a8aa3610130565b818385878b8ba45b505050505050505050505050565b5f8082840360c0811215610150575f80fd5b60a081121561015d575f80fd5b50919360a08501359350915050565b5f805f806040858703121561017f575f80fd5b843567ffffffffffffffff80821115610196575f80fd5b818701915087601f8301126101a9575f80fd5b8135818111156101b7575f80fd5b8860208260051b85010111156101cb575f80fd5b6020928301965094509086013590808211156101e5575f80fd5b818701915087601f8301126101f8575f80fd5b813581811115610206575f80fd5b886020828501011115610217575f80fd5b95989497505060200194505050565b60c0810183356001600160a01b038116808214610241575f80fd5b8352506020848101359083015260408085013590830152606080850135908301526080938401359382019390935260a001529056fea26469706673582212206da9bc84d514e1a78e2b4160f99f93aa58672040ece82f45ac2a878aeefdfbe164736f6c63430008190033", -} - -// EventloggerABI is the input ABI used to generate the binding from. -// Deprecated: Use EventloggerMetaData.ABI instead. -var EventloggerABI = EventloggerMetaData.ABI - -// EventloggerBin is the compiled bytecode used for deploying new contracts. -// Deprecated: Use EventloggerMetaData.Bin instead. -var EventloggerBin = EventloggerMetaData.Bin - -// DeployEventlogger deploys a new Ethereum contract, binding an instance of Eventlogger to it. -func DeployEventlogger(auth *bind.TransactOpts, backend bind.ContractBackend) (common.Address, *types.Transaction, *Eventlogger, error) { - parsed, err := EventloggerMetaData.GetAbi() - if err != nil { - return common.Address{}, nil, nil, err - } - if parsed == nil { - return common.Address{}, nil, nil, errors.New("GetABI returned nil") - } - - address, tx, contract, err := bind.DeployContract(auth, *parsed, common.FromHex(EventloggerBin), backend) - if err != nil { - return common.Address{}, nil, nil, err - } - return address, tx, &Eventlogger{EventloggerCaller: EventloggerCaller{contract: contract}, EventloggerTransactor: EventloggerTransactor{contract: contract}, EventloggerFilterer: EventloggerFilterer{contract: contract}}, nil -} - -// Eventlogger is an auto generated Go binding around an Ethereum contract. -type Eventlogger struct { - EventloggerCaller // Read-only binding to the contract - EventloggerTransactor // Write-only binding to the contract - EventloggerFilterer // Log filterer for contract events -} - -// EventloggerCaller is an auto generated read-only Go binding around an Ethereum contract. -type EventloggerCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// EventloggerTransactor is an auto generated write-only Go binding around an Ethereum contract. -type EventloggerTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// EventloggerFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type EventloggerFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} diff --git a/devnet-sdk/contracts/bindings/l2tol2crossdomainmessenger.go b/devnet-sdk/contracts/bindings/l2tol2crossdomainmessenger.go deleted file mode 100644 index 01d733398b636..0000000000000 --- a/devnet-sdk/contracts/bindings/l2tol2crossdomainmessenger.go +++ /dev/null @@ -1,788 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package bindings - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -// Identifier is an auto generated low-level Go binding around an user-defined struct. -type Identifier struct { - Origin common.Address - BlockNumber *big.Int - LogIndex *big.Int - Timestamp *big.Int - ChainId *big.Int -} - -// L2ToL2CrossDomainMessengerMetaData contains all meta data concerning the L2ToL2CrossDomainMessenger contract. -var L2ToL2CrossDomainMessengerMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[],\"name\":\"crossDomainMessageContext\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"sender_\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"source_\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"crossDomainMessageSender\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"sender_\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"crossDomainMessageSource\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"source_\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messageNonce\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"messageVersion\",\"outputs\":[{\"internalType\":\"uint16\",\"name\":\"\",\"type\":\"uint16\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"components\":[{\"internalType\":\"address\",\"name\":\"origin\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"logIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"timestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"chainId\",\"type\":\"uint256\"}],\"internalType\":\"structIdentifier\",\"name\":\"_id\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"_sentMessage\",\"type\":\"bytes\"}],\"name\":\"relayMessage\",\"outputs\":[{\"internalType\":\"bytes\",\"name\":\"returnData_\",\"type\":\"bytes\"}],\"stateMutability\":\"payable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_destination\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"_target\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_message\",\"type\":\"bytes\"}],\"name\":\"sendMessage\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"name\":\"successfulMessages\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"version\",\"outputs\":[{\"internalType\":\"string\",\"name\":\"\",\"type\":\"string\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"source\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"bytes32\",\"name\":\"messageHash\",\"type\":\"bytes32\"}],\"name\":\"RelayedMessage\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"destination\",\"type\":\"uint256\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"target\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"messageNonce\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"message\",\"type\":\"bytes\"}],\"name\":\"SentMessage\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"EventPayloadNotSentMessage\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IdOriginNotL2ToL2CrossDomainMessenger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidChainId\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MessageAlreadyRelayed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MessageDestinationNotRelayChain\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MessageDestinationSameChain\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MessageTargetCrossL2Inbox\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"MessageTargetL2ToL2CrossDomainMessenger\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"NotEntered\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrantCall\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"TargetCallFailed\",\"type\":\"error\"}]", -} - -// L2ToL2CrossDomainMessengerABI is the input ABI used to generate the binding from. -// Deprecated: Use L2ToL2CrossDomainMessengerMetaData.ABI instead. -var L2ToL2CrossDomainMessengerABI = L2ToL2CrossDomainMessengerMetaData.ABI - -// L2ToL2CrossDomainMessenger is an auto generated Go binding around an Ethereum contract. -type L2ToL2CrossDomainMessenger struct { - L2ToL2CrossDomainMessengerCaller // Read-only binding to the contract - L2ToL2CrossDomainMessengerTransactor // Write-only binding to the contract - L2ToL2CrossDomainMessengerFilterer // Log filterer for contract events -} - -// L2ToL2CrossDomainMessengerCaller is an auto generated read-only Go binding around an Ethereum contract. -type L2ToL2CrossDomainMessengerCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// L2ToL2CrossDomainMessengerTransactor is an auto generated write-only Go binding around an Ethereum contract. -type L2ToL2CrossDomainMessengerTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// L2ToL2CrossDomainMessengerFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type L2ToL2CrossDomainMessengerFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// L2ToL2CrossDomainMessengerSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type L2ToL2CrossDomainMessengerSession struct { - Contract *L2ToL2CrossDomainMessenger // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// L2ToL2CrossDomainMessengerCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type L2ToL2CrossDomainMessengerCallerSession struct { - Contract *L2ToL2CrossDomainMessengerCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// L2ToL2CrossDomainMessengerTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type L2ToL2CrossDomainMessengerTransactorSession struct { - Contract *L2ToL2CrossDomainMessengerTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// L2ToL2CrossDomainMessengerRaw is an auto generated low-level Go binding around an Ethereum contract. -type L2ToL2CrossDomainMessengerRaw struct { - Contract *L2ToL2CrossDomainMessenger // Generic contract binding to access the raw methods on -} - -// L2ToL2CrossDomainMessengerCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type L2ToL2CrossDomainMessengerCallerRaw struct { - Contract *L2ToL2CrossDomainMessengerCaller // Generic read-only contract binding to access the raw methods on -} - -// L2ToL2CrossDomainMessengerTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type L2ToL2CrossDomainMessengerTransactorRaw struct { - Contract *L2ToL2CrossDomainMessengerTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewL2ToL2CrossDomainMessenger creates a new instance of L2ToL2CrossDomainMessenger, bound to a specific deployed contract. -func NewL2ToL2CrossDomainMessenger(address common.Address, backend bind.ContractBackend) (*L2ToL2CrossDomainMessenger, error) { - contract, err := bindL2ToL2CrossDomainMessenger(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &L2ToL2CrossDomainMessenger{L2ToL2CrossDomainMessengerCaller: L2ToL2CrossDomainMessengerCaller{contract: contract}, L2ToL2CrossDomainMessengerTransactor: L2ToL2CrossDomainMessengerTransactor{contract: contract}, L2ToL2CrossDomainMessengerFilterer: L2ToL2CrossDomainMessengerFilterer{contract: contract}}, nil -} - -// NewL2ToL2CrossDomainMessengerCaller creates a new read-only instance of L2ToL2CrossDomainMessenger, bound to a specific deployed contract. -func NewL2ToL2CrossDomainMessengerCaller(address common.Address, caller bind.ContractCaller) (*L2ToL2CrossDomainMessengerCaller, error) { - contract, err := bindL2ToL2CrossDomainMessenger(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &L2ToL2CrossDomainMessengerCaller{contract: contract}, nil -} - -// NewL2ToL2CrossDomainMessengerTransactor creates a new write-only instance of L2ToL2CrossDomainMessenger, bound to a specific deployed contract. -func NewL2ToL2CrossDomainMessengerTransactor(address common.Address, transactor bind.ContractTransactor) (*L2ToL2CrossDomainMessengerTransactor, error) { - contract, err := bindL2ToL2CrossDomainMessenger(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &L2ToL2CrossDomainMessengerTransactor{contract: contract}, nil -} - -// NewL2ToL2CrossDomainMessengerFilterer creates a new log filterer instance of L2ToL2CrossDomainMessenger, bound to a specific deployed contract. -func NewL2ToL2CrossDomainMessengerFilterer(address common.Address, filterer bind.ContractFilterer) (*L2ToL2CrossDomainMessengerFilterer, error) { - contract, err := bindL2ToL2CrossDomainMessenger(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &L2ToL2CrossDomainMessengerFilterer{contract: contract}, nil -} - -// bindL2ToL2CrossDomainMessenger binds a generic wrapper to an already deployed contract. -func bindL2ToL2CrossDomainMessenger(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(L2ToL2CrossDomainMessengerABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _L2ToL2CrossDomainMessenger.Contract.L2ToL2CrossDomainMessengerCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.Contract.L2ToL2CrossDomainMessengerTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.Contract.L2ToL2CrossDomainMessengerTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _L2ToL2CrossDomainMessenger.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.Contract.contract.Transact(opts, method, params...) -} - -// CrossDomainMessageContext is a free data retrieval call binding the contract method 0x7936cbee. -// -// Solidity: function crossDomainMessageContext() view returns(address sender_, uint256 source_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCaller) CrossDomainMessageContext(opts *bind.CallOpts) (struct { - Sender common.Address - Source *big.Int -}, error) { - var out []interface{} - err := _L2ToL2CrossDomainMessenger.contract.Call(opts, &out, "crossDomainMessageContext") - - outstruct := new(struct { - Sender common.Address - Source *big.Int - }) - if err != nil { - return *outstruct, err - } - - outstruct.Sender = *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - outstruct.Source = *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) - - return *outstruct, err - -} - -// CrossDomainMessageContext is a free data retrieval call binding the contract method 0x7936cbee. -// -// Solidity: function crossDomainMessageContext() view returns(address sender_, uint256 source_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerSession) CrossDomainMessageContext() (struct { - Sender common.Address - Source *big.Int -}, error) { - return _L2ToL2CrossDomainMessenger.Contract.CrossDomainMessageContext(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// CrossDomainMessageContext is a free data retrieval call binding the contract method 0x7936cbee. -// -// Solidity: function crossDomainMessageContext() view returns(address sender_, uint256 source_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCallerSession) CrossDomainMessageContext() (struct { - Sender common.Address - Source *big.Int -}, error) { - return _L2ToL2CrossDomainMessenger.Contract.CrossDomainMessageContext(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// CrossDomainMessageSender is a free data retrieval call binding the contract method 0x38ffde18. -// -// Solidity: function crossDomainMessageSender() view returns(address sender_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCaller) CrossDomainMessageSender(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _L2ToL2CrossDomainMessenger.contract.Call(opts, &out, "crossDomainMessageSender") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -// CrossDomainMessageSender is a free data retrieval call binding the contract method 0x38ffde18. -// -// Solidity: function crossDomainMessageSender() view returns(address sender_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerSession) CrossDomainMessageSender() (common.Address, error) { - return _L2ToL2CrossDomainMessenger.Contract.CrossDomainMessageSender(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// CrossDomainMessageSender is a free data retrieval call binding the contract method 0x38ffde18. -// -// Solidity: function crossDomainMessageSender() view returns(address sender_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCallerSession) CrossDomainMessageSender() (common.Address, error) { - return _L2ToL2CrossDomainMessenger.Contract.CrossDomainMessageSender(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// CrossDomainMessageSource is a free data retrieval call binding the contract method 0x24794462. -// -// Solidity: function crossDomainMessageSource() view returns(uint256 source_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCaller) CrossDomainMessageSource(opts *bind.CallOpts) (*big.Int, error) { - var out []interface{} - err := _L2ToL2CrossDomainMessenger.contract.Call(opts, &out, "crossDomainMessageSource") - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -// CrossDomainMessageSource is a free data retrieval call binding the contract method 0x24794462. -// -// Solidity: function crossDomainMessageSource() view returns(uint256 source_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerSession) CrossDomainMessageSource() (*big.Int, error) { - return _L2ToL2CrossDomainMessenger.Contract.CrossDomainMessageSource(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// CrossDomainMessageSource is a free data retrieval call binding the contract method 0x24794462. -// -// Solidity: function crossDomainMessageSource() view returns(uint256 source_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCallerSession) CrossDomainMessageSource() (*big.Int, error) { - return _L2ToL2CrossDomainMessenger.Contract.CrossDomainMessageSource(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// MessageNonce is a free data retrieval call binding the contract method 0xecc70428. -// -// Solidity: function messageNonce() view returns(uint256) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCaller) MessageNonce(opts *bind.CallOpts) (*big.Int, error) { - var out []interface{} - err := _L2ToL2CrossDomainMessenger.contract.Call(opts, &out, "messageNonce") - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -// MessageNonce is a free data retrieval call binding the contract method 0xecc70428. -// -// Solidity: function messageNonce() view returns(uint256) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerSession) MessageNonce() (*big.Int, error) { - return _L2ToL2CrossDomainMessenger.Contract.MessageNonce(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// MessageNonce is a free data retrieval call binding the contract method 0xecc70428. -// -// Solidity: function messageNonce() view returns(uint256) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCallerSession) MessageNonce() (*big.Int, error) { - return _L2ToL2CrossDomainMessenger.Contract.MessageNonce(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// MessageVersion is a free data retrieval call binding the contract method 0x52617f3c. -// -// Solidity: function messageVersion() view returns(uint16) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCaller) MessageVersion(opts *bind.CallOpts) (uint16, error) { - var out []interface{} - err := _L2ToL2CrossDomainMessenger.contract.Call(opts, &out, "messageVersion") - - if err != nil { - return *new(uint16), err - } - - out0 := *abi.ConvertType(out[0], new(uint16)).(*uint16) - - return out0, err - -} - -// MessageVersion is a free data retrieval call binding the contract method 0x52617f3c. -// -// Solidity: function messageVersion() view returns(uint16) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerSession) MessageVersion() (uint16, error) { - return _L2ToL2CrossDomainMessenger.Contract.MessageVersion(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// MessageVersion is a free data retrieval call binding the contract method 0x52617f3c. -// -// Solidity: function messageVersion() view returns(uint16) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCallerSession) MessageVersion() (uint16, error) { - return _L2ToL2CrossDomainMessenger.Contract.MessageVersion(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// SuccessfulMessages is a free data retrieval call binding the contract method 0xb1b1b209. -// -// Solidity: function successfulMessages(bytes32 ) view returns(bool) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCaller) SuccessfulMessages(opts *bind.CallOpts, arg0 [32]byte) (bool, error) { - var out []interface{} - err := _L2ToL2CrossDomainMessenger.contract.Call(opts, &out, "successfulMessages", arg0) - - if err != nil { - return *new(bool), err - } - - out0 := *abi.ConvertType(out[0], new(bool)).(*bool) - - return out0, err - -} - -// SuccessfulMessages is a free data retrieval call binding the contract method 0xb1b1b209. -// -// Solidity: function successfulMessages(bytes32 ) view returns(bool) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerSession) SuccessfulMessages(arg0 [32]byte) (bool, error) { - return _L2ToL2CrossDomainMessenger.Contract.SuccessfulMessages(&_L2ToL2CrossDomainMessenger.CallOpts, arg0) -} - -// SuccessfulMessages is a free data retrieval call binding the contract method 0xb1b1b209. -// -// Solidity: function successfulMessages(bytes32 ) view returns(bool) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCallerSession) SuccessfulMessages(arg0 [32]byte) (bool, error) { - return _L2ToL2CrossDomainMessenger.Contract.SuccessfulMessages(&_L2ToL2CrossDomainMessenger.CallOpts, arg0) -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCaller) Version(opts *bind.CallOpts) (string, error) { - var out []interface{} - err := _L2ToL2CrossDomainMessenger.contract.Call(opts, &out, "version") - - if err != nil { - return *new(string), err - } - - out0 := *abi.ConvertType(out[0], new(string)).(*string) - - return out0, err - -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerSession) Version() (string, error) { - return _L2ToL2CrossDomainMessenger.Contract.Version(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerCallerSession) Version() (string, error) { - return _L2ToL2CrossDomainMessenger.Contract.Version(&_L2ToL2CrossDomainMessenger.CallOpts) -} - -// RelayMessage is a paid mutator transaction binding the contract method 0x8d1d298f. -// -// Solidity: function relayMessage((address,uint256,uint256,uint256,uint256) _id, bytes _sentMessage) payable returns(bytes returnData_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerTransactor) RelayMessage(opts *bind.TransactOpts, _id Identifier, _sentMessage []byte) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.contract.Transact(opts, "relayMessage", _id, _sentMessage) -} - -// RelayMessage is a paid mutator transaction binding the contract method 0x8d1d298f. -// -// Solidity: function relayMessage((address,uint256,uint256,uint256,uint256) _id, bytes _sentMessage) payable returns(bytes returnData_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerSession) RelayMessage(_id Identifier, _sentMessage []byte) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.Contract.RelayMessage(&_L2ToL2CrossDomainMessenger.TransactOpts, _id, _sentMessage) -} - -// RelayMessage is a paid mutator transaction binding the contract method 0x8d1d298f. -// -// Solidity: function relayMessage((address,uint256,uint256,uint256,uint256) _id, bytes _sentMessage) payable returns(bytes returnData_) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerTransactorSession) RelayMessage(_id Identifier, _sentMessage []byte) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.Contract.RelayMessage(&_L2ToL2CrossDomainMessenger.TransactOpts, _id, _sentMessage) -} - -// SendMessage is a paid mutator transaction binding the contract method 0x7056f41f. -// -// Solidity: function sendMessage(uint256 _destination, address _target, bytes _message) returns(bytes32) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerTransactor) SendMessage(opts *bind.TransactOpts, _destination *big.Int, _target common.Address, _message []byte) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.contract.Transact(opts, "sendMessage", _destination, _target, _message) -} - -// SendMessage is a paid mutator transaction binding the contract method 0x7056f41f. -// -// Solidity: function sendMessage(uint256 _destination, address _target, bytes _message) returns(bytes32) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerSession) SendMessage(_destination *big.Int, _target common.Address, _message []byte) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.Contract.SendMessage(&_L2ToL2CrossDomainMessenger.TransactOpts, _destination, _target, _message) -} - -// SendMessage is a paid mutator transaction binding the contract method 0x7056f41f. -// -// Solidity: function sendMessage(uint256 _destination, address _target, bytes _message) returns(bytes32) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerTransactorSession) SendMessage(_destination *big.Int, _target common.Address, _message []byte) (*types.Transaction, error) { - return _L2ToL2CrossDomainMessenger.Contract.SendMessage(&_L2ToL2CrossDomainMessenger.TransactOpts, _destination, _target, _message) -} - -// L2ToL2CrossDomainMessengerRelayedMessageIterator is returned from FilterRelayedMessage and is used to iterate over the raw logs and unpacked data for RelayedMessage events raised by the L2ToL2CrossDomainMessenger contract. -type L2ToL2CrossDomainMessengerRelayedMessageIterator struct { - Event *L2ToL2CrossDomainMessengerRelayedMessage // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *L2ToL2CrossDomainMessengerRelayedMessageIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(L2ToL2CrossDomainMessengerRelayedMessage) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(L2ToL2CrossDomainMessengerRelayedMessage) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *L2ToL2CrossDomainMessengerRelayedMessageIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *L2ToL2CrossDomainMessengerRelayedMessageIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// L2ToL2CrossDomainMessengerRelayedMessage represents a RelayedMessage event raised by the L2ToL2CrossDomainMessenger contract. -type L2ToL2CrossDomainMessengerRelayedMessage struct { - Source *big.Int - MessageNonce *big.Int - MessageHash [32]byte - Raw types.Log // Blockchain specific contextual infos -} - -// FilterRelayedMessage is a free log retrieval operation binding the contract event 0x5948076590932b9d173029c7df03fe386e755a61c86c7fe2671011a2faa2a379. -// -// Solidity: event RelayedMessage(uint256 indexed source, uint256 indexed messageNonce, bytes32 indexed messageHash) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerFilterer) FilterRelayedMessage(opts *bind.FilterOpts, source []*big.Int, messageNonce []*big.Int, messageHash [][32]byte) (*L2ToL2CrossDomainMessengerRelayedMessageIterator, error) { - - var sourceRule []interface{} - for _, sourceItem := range source { - sourceRule = append(sourceRule, sourceItem) - } - var messageNonceRule []interface{} - for _, messageNonceItem := range messageNonce { - messageNonceRule = append(messageNonceRule, messageNonceItem) - } - var messageHashRule []interface{} - for _, messageHashItem := range messageHash { - messageHashRule = append(messageHashRule, messageHashItem) - } - - logs, sub, err := _L2ToL2CrossDomainMessenger.contract.FilterLogs(opts, "RelayedMessage", sourceRule, messageNonceRule, messageHashRule) - if err != nil { - return nil, err - } - return &L2ToL2CrossDomainMessengerRelayedMessageIterator{contract: _L2ToL2CrossDomainMessenger.contract, event: "RelayedMessage", logs: logs, sub: sub}, nil -} - -// WatchRelayedMessage is a free log subscription operation binding the contract event 0x5948076590932b9d173029c7df03fe386e755a61c86c7fe2671011a2faa2a379. -// -// Solidity: event RelayedMessage(uint256 indexed source, uint256 indexed messageNonce, bytes32 indexed messageHash) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerFilterer) WatchRelayedMessage(opts *bind.WatchOpts, sink chan<- *L2ToL2CrossDomainMessengerRelayedMessage, source []*big.Int, messageNonce []*big.Int, messageHash [][32]byte) (event.Subscription, error) { - - var sourceRule []interface{} - for _, sourceItem := range source { - sourceRule = append(sourceRule, sourceItem) - } - var messageNonceRule []interface{} - for _, messageNonceItem := range messageNonce { - messageNonceRule = append(messageNonceRule, messageNonceItem) - } - var messageHashRule []interface{} - for _, messageHashItem := range messageHash { - messageHashRule = append(messageHashRule, messageHashItem) - } - - logs, sub, err := _L2ToL2CrossDomainMessenger.contract.WatchLogs(opts, "RelayedMessage", sourceRule, messageNonceRule, messageHashRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(L2ToL2CrossDomainMessengerRelayedMessage) - if err := _L2ToL2CrossDomainMessenger.contract.UnpackLog(event, "RelayedMessage", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseRelayedMessage is a log parse operation binding the contract event 0x5948076590932b9d173029c7df03fe386e755a61c86c7fe2671011a2faa2a379. -// -// Solidity: event RelayedMessage(uint256 indexed source, uint256 indexed messageNonce, bytes32 indexed messageHash) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerFilterer) ParseRelayedMessage(log types.Log) (*L2ToL2CrossDomainMessengerRelayedMessage, error) { - event := new(L2ToL2CrossDomainMessengerRelayedMessage) - if err := _L2ToL2CrossDomainMessenger.contract.UnpackLog(event, "RelayedMessage", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// L2ToL2CrossDomainMessengerSentMessageIterator is returned from FilterSentMessage and is used to iterate over the raw logs and unpacked data for SentMessage events raised by the L2ToL2CrossDomainMessenger contract. -type L2ToL2CrossDomainMessengerSentMessageIterator struct { - Event *L2ToL2CrossDomainMessengerSentMessage // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *L2ToL2CrossDomainMessengerSentMessageIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(L2ToL2CrossDomainMessengerSentMessage) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(L2ToL2CrossDomainMessengerSentMessage) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *L2ToL2CrossDomainMessengerSentMessageIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *L2ToL2CrossDomainMessengerSentMessageIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// L2ToL2CrossDomainMessengerSentMessage represents a SentMessage event raised by the L2ToL2CrossDomainMessenger contract. -type L2ToL2CrossDomainMessengerSentMessage struct { - Destination *big.Int - Target common.Address - MessageNonce *big.Int - Sender common.Address - Message []byte - Raw types.Log // Blockchain specific contextual infos -} - -// FilterSentMessage is a free log retrieval operation binding the contract event 0x382409ac69001e11931a28435afef442cbfd20d9891907e8fa373ba7d351f320. -// -// Solidity: event SentMessage(uint256 indexed destination, address indexed target, uint256 indexed messageNonce, address sender, bytes message) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerFilterer) FilterSentMessage(opts *bind.FilterOpts, destination []*big.Int, target []common.Address, messageNonce []*big.Int) (*L2ToL2CrossDomainMessengerSentMessageIterator, error) { - - var destinationRule []interface{} - for _, destinationItem := range destination { - destinationRule = append(destinationRule, destinationItem) - } - var targetRule []interface{} - for _, targetItem := range target { - targetRule = append(targetRule, targetItem) - } - var messageNonceRule []interface{} - for _, messageNonceItem := range messageNonce { - messageNonceRule = append(messageNonceRule, messageNonceItem) - } - - logs, sub, err := _L2ToL2CrossDomainMessenger.contract.FilterLogs(opts, "SentMessage", destinationRule, targetRule, messageNonceRule) - if err != nil { - return nil, err - } - return &L2ToL2CrossDomainMessengerSentMessageIterator{contract: _L2ToL2CrossDomainMessenger.contract, event: "SentMessage", logs: logs, sub: sub}, nil -} - -// WatchSentMessage is a free log subscription operation binding the contract event 0x382409ac69001e11931a28435afef442cbfd20d9891907e8fa373ba7d351f320. -// -// Solidity: event SentMessage(uint256 indexed destination, address indexed target, uint256 indexed messageNonce, address sender, bytes message) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerFilterer) WatchSentMessage(opts *bind.WatchOpts, sink chan<- *L2ToL2CrossDomainMessengerSentMessage, destination []*big.Int, target []common.Address, messageNonce []*big.Int) (event.Subscription, error) { - - var destinationRule []interface{} - for _, destinationItem := range destination { - destinationRule = append(destinationRule, destinationItem) - } - var targetRule []interface{} - for _, targetItem := range target { - targetRule = append(targetRule, targetItem) - } - var messageNonceRule []interface{} - for _, messageNonceItem := range messageNonce { - messageNonceRule = append(messageNonceRule, messageNonceItem) - } - - logs, sub, err := _L2ToL2CrossDomainMessenger.contract.WatchLogs(opts, "SentMessage", destinationRule, targetRule, messageNonceRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(L2ToL2CrossDomainMessengerSentMessage) - if err := _L2ToL2CrossDomainMessenger.contract.UnpackLog(event, "SentMessage", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseSentMessage is a log parse operation binding the contract event 0x382409ac69001e11931a28435afef442cbfd20d9891907e8fa373ba7d351f320. -// -// Solidity: event SentMessage(uint256 indexed destination, address indexed target, uint256 indexed messageNonce, address sender, bytes message) -func (_L2ToL2CrossDomainMessenger *L2ToL2CrossDomainMessengerFilterer) ParseSentMessage(log types.Log) (*L2ToL2CrossDomainMessengerSentMessage, error) { - event := new(L2ToL2CrossDomainMessengerSentMessage) - if err := _L2ToL2CrossDomainMessenger.contract.UnpackLog(event, "SentMessage", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/devnet-sdk/contracts/bindings/superchainweth.go b/devnet-sdk/contracts/bindings/superchainweth.go deleted file mode 100644 index e0049ff999669..0000000000000 --- a/devnet-sdk/contracts/bindings/superchainweth.go +++ /dev/null @@ -1,1879 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package bindings - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription -) - -// SuperchainWETHMetaData contains all meta data concerning the SuperchainWETH contract. -var SuperchainWETHMetaData = &bind.MetaData{ - ABI: "[{\"type\":\"fallback\",\"stateMutability\":\"payable\"},{\"type\":\"receive\",\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"allowance\",\"inputs\":[{\"name\":\"owner\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"spender\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"approve\",\"inputs\":[{\"name\":\"guy\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"balanceOf\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"internalType\":\"address\"}],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"crosschainBurn\",\"inputs\":[{\"name\":\"_from\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_amount\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"crosschainMint\",\"inputs\":[{\"name\":\"_to\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_amount\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"decimals\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint8\",\"internalType\":\"uint8\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"deposit\",\"inputs\":[],\"outputs\":[],\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"name\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"relayETH\",\"inputs\":[{\"name\":\"_from\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_to\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_amount\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"sendETH\",\"inputs\":[{\"name\":\"_to\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"_chainId\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"msgHash_\",\"type\":\"bytes32\",\"internalType\":\"bytes32\"}],\"stateMutability\":\"payable\"},{\"type\":\"function\",\"name\":\"supportsInterface\",\"inputs\":[{\"name\":\"_interfaceId\",\"type\":\"bytes4\",\"internalType\":\"bytes4\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"symbol\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"totalSupply\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"transfer\",\"inputs\":[{\"name\":\"dst\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"transferFrom\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"dst\",\"type\":\"address\",\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[{\"name\":\"\",\"type\":\"bool\",\"internalType\":\"bool\"}],\"stateMutability\":\"nonpayable\"},{\"type\":\"function\",\"name\":\"version\",\"inputs\":[],\"outputs\":[{\"name\":\"\",\"type\":\"string\",\"internalType\":\"string\"}],\"stateMutability\":\"view\"},{\"type\":\"function\",\"name\":\"withdraw\",\"inputs\":[{\"name\":\"_amount\",\"type\":\"uint256\",\"internalType\":\"uint256\"}],\"outputs\":[],\"stateMutability\":\"nonpayable\"},{\"type\":\"event\",\"name\":\"Approval\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"guy\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"CrosschainBurn\",\"inputs\":[{\"name\":\"from\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"sender\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"CrosschainMint\",\"inputs\":[{\"name\":\"to\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"sender\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Deposit\",\"inputs\":[{\"name\":\"dst\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"RelayETH\",\"inputs\":[{\"name\":\"from\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"to\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"source\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"SendETH\",\"inputs\":[{\"name\":\"from\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"to\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"amount\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"},{\"name\":\"destination\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Transfer\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"dst\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"event\",\"name\":\"Withdrawal\",\"inputs\":[{\"name\":\"src\",\"type\":\"address\",\"indexed\":true,\"internalType\":\"address\"},{\"name\":\"wad\",\"type\":\"uint256\",\"indexed\":false,\"internalType\":\"uint256\"}],\"anonymous\":false},{\"type\":\"error\",\"name\":\"InvalidCrossDomainSender\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"NotCustomGasToken\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"Unauthorized\",\"inputs\":[]},{\"type\":\"error\",\"name\":\"ZeroAddress\",\"inputs\":[]}]", -} - -// SuperchainWETHABI is the input ABI used to generate the binding from. -// Deprecated: Use SuperchainWETHMetaData.ABI instead. -var SuperchainWETHABI = SuperchainWETHMetaData.ABI - -// SuperchainWETH is an auto generated Go binding around an Ethereum contract. -type SuperchainWETH struct { - SuperchainWETHCaller // Read-only binding to the contract - SuperchainWETHTransactor // Write-only binding to the contract - SuperchainWETHFilterer // Log filterer for contract events -} - -// SuperchainWETHCaller is an auto generated read-only Go binding around an Ethereum contract. -type SuperchainWETHCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// SuperchainWETHTransactor is an auto generated write-only Go binding around an Ethereum contract. -type SuperchainWETHTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// SuperchainWETHFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type SuperchainWETHFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// SuperchainWETHSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type SuperchainWETHSession struct { - Contract *SuperchainWETH // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// SuperchainWETHCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type SuperchainWETHCallerSession struct { - Contract *SuperchainWETHCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// SuperchainWETHTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type SuperchainWETHTransactorSession struct { - Contract *SuperchainWETHTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// SuperchainWETHRaw is an auto generated low-level Go binding around an Ethereum contract. -type SuperchainWETHRaw struct { - Contract *SuperchainWETH // Generic contract binding to access the raw methods on -} - -// SuperchainWETHCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type SuperchainWETHCallerRaw struct { - Contract *SuperchainWETHCaller // Generic read-only contract binding to access the raw methods on -} - -// SuperchainWETHTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type SuperchainWETHTransactorRaw struct { - Contract *SuperchainWETHTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewSuperchainWETH creates a new instance of SuperchainWETH, bound to a specific deployed contract. -func NewSuperchainWETH(address common.Address, backend bind.ContractBackend) (*SuperchainWETH, error) { - contract, err := bindSuperchainWETH(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &SuperchainWETH{SuperchainWETHCaller: SuperchainWETHCaller{contract: contract}, SuperchainWETHTransactor: SuperchainWETHTransactor{contract: contract}, SuperchainWETHFilterer: SuperchainWETHFilterer{contract: contract}}, nil -} - -// NewSuperchainWETHCaller creates a new read-only instance of SuperchainWETH, bound to a specific deployed contract. -func NewSuperchainWETHCaller(address common.Address, caller bind.ContractCaller) (*SuperchainWETHCaller, error) { - contract, err := bindSuperchainWETH(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &SuperchainWETHCaller{contract: contract}, nil -} - -// NewSuperchainWETHTransactor creates a new write-only instance of SuperchainWETH, bound to a specific deployed contract. -func NewSuperchainWETHTransactor(address common.Address, transactor bind.ContractTransactor) (*SuperchainWETHTransactor, error) { - contract, err := bindSuperchainWETH(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &SuperchainWETHTransactor{contract: contract}, nil -} - -// NewSuperchainWETHFilterer creates a new log filterer instance of SuperchainWETH, bound to a specific deployed contract. -func NewSuperchainWETHFilterer(address common.Address, filterer bind.ContractFilterer) (*SuperchainWETHFilterer, error) { - contract, err := bindSuperchainWETH(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &SuperchainWETHFilterer{contract: contract}, nil -} - -// bindSuperchainWETH binds a generic wrapper to an already deployed contract. -func bindSuperchainWETH(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := abi.JSON(strings.NewReader(SuperchainWETHABI)) - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_SuperchainWETH *SuperchainWETHRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _SuperchainWETH.Contract.SuperchainWETHCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_SuperchainWETH *SuperchainWETHRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _SuperchainWETH.Contract.SuperchainWETHTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_SuperchainWETH *SuperchainWETHRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _SuperchainWETH.Contract.SuperchainWETHTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_SuperchainWETH *SuperchainWETHCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _SuperchainWETH.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_SuperchainWETH *SuperchainWETHTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _SuperchainWETH.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_SuperchainWETH *SuperchainWETHTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _SuperchainWETH.Contract.contract.Transact(opts, method, params...) -} - -// Allowance is a free data retrieval call binding the contract method 0xdd62ed3e. -// -// Solidity: function allowance(address owner, address spender) view returns(uint256) -func (_SuperchainWETH *SuperchainWETHCaller) Allowance(opts *bind.CallOpts, owner common.Address, spender common.Address) (*big.Int, error) { - var out []interface{} - err := _SuperchainWETH.contract.Call(opts, &out, "allowance", owner, spender) - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -// Allowance is a free data retrieval call binding the contract method 0xdd62ed3e. -// -// Solidity: function allowance(address owner, address spender) view returns(uint256) -func (_SuperchainWETH *SuperchainWETHSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { - return _SuperchainWETH.Contract.Allowance(&_SuperchainWETH.CallOpts, owner, spender) -} - -// Allowance is a free data retrieval call binding the contract method 0xdd62ed3e. -// -// Solidity: function allowance(address owner, address spender) view returns(uint256) -func (_SuperchainWETH *SuperchainWETHCallerSession) Allowance(owner common.Address, spender common.Address) (*big.Int, error) { - return _SuperchainWETH.Contract.Allowance(&_SuperchainWETH.CallOpts, owner, spender) -} - -// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. -// -// Solidity: function balanceOf(address src) view returns(uint256) -func (_SuperchainWETH *SuperchainWETHCaller) BalanceOf(opts *bind.CallOpts, src common.Address) (*big.Int, error) { - var out []interface{} - err := _SuperchainWETH.contract.Call(opts, &out, "balanceOf", src) - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. -// -// Solidity: function balanceOf(address src) view returns(uint256) -func (_SuperchainWETH *SuperchainWETHSession) BalanceOf(src common.Address) (*big.Int, error) { - return _SuperchainWETH.Contract.BalanceOf(&_SuperchainWETH.CallOpts, src) -} - -// BalanceOf is a free data retrieval call binding the contract method 0x70a08231. -// -// Solidity: function balanceOf(address src) view returns(uint256) -func (_SuperchainWETH *SuperchainWETHCallerSession) BalanceOf(src common.Address) (*big.Int, error) { - return _SuperchainWETH.Contract.BalanceOf(&_SuperchainWETH.CallOpts, src) -} - -// Decimals is a free data retrieval call binding the contract method 0x313ce567. -// -// Solidity: function decimals() view returns(uint8) -func (_SuperchainWETH *SuperchainWETHCaller) Decimals(opts *bind.CallOpts) (uint8, error) { - var out []interface{} - err := _SuperchainWETH.contract.Call(opts, &out, "decimals") - - if err != nil { - return *new(uint8), err - } - - out0 := *abi.ConvertType(out[0], new(uint8)).(*uint8) - - return out0, err - -} - -// Decimals is a free data retrieval call binding the contract method 0x313ce567. -// -// Solidity: function decimals() view returns(uint8) -func (_SuperchainWETH *SuperchainWETHSession) Decimals() (uint8, error) { - return _SuperchainWETH.Contract.Decimals(&_SuperchainWETH.CallOpts) -} - -// Decimals is a free data retrieval call binding the contract method 0x313ce567. -// -// Solidity: function decimals() view returns(uint8) -func (_SuperchainWETH *SuperchainWETHCallerSession) Decimals() (uint8, error) { - return _SuperchainWETH.Contract.Decimals(&_SuperchainWETH.CallOpts) -} - -// Name is a free data retrieval call binding the contract method 0x06fdde03. -// -// Solidity: function name() view returns(string) -func (_SuperchainWETH *SuperchainWETHCaller) Name(opts *bind.CallOpts) (string, error) { - var out []interface{} - err := _SuperchainWETH.contract.Call(opts, &out, "name") - - if err != nil { - return *new(string), err - } - - out0 := *abi.ConvertType(out[0], new(string)).(*string) - - return out0, err - -} - -// Name is a free data retrieval call binding the contract method 0x06fdde03. -// -// Solidity: function name() view returns(string) -func (_SuperchainWETH *SuperchainWETHSession) Name() (string, error) { - return _SuperchainWETH.Contract.Name(&_SuperchainWETH.CallOpts) -} - -// Name is a free data retrieval call binding the contract method 0x06fdde03. -// -// Solidity: function name() view returns(string) -func (_SuperchainWETH *SuperchainWETHCallerSession) Name() (string, error) { - return _SuperchainWETH.Contract.Name(&_SuperchainWETH.CallOpts) -} - -// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. -// -// Solidity: function supportsInterface(bytes4 _interfaceId) view returns(bool) -func (_SuperchainWETH *SuperchainWETHCaller) SupportsInterface(opts *bind.CallOpts, _interfaceId [4]byte) (bool, error) { - var out []interface{} - err := _SuperchainWETH.contract.Call(opts, &out, "supportsInterface", _interfaceId) - - if err != nil { - return *new(bool), err - } - - out0 := *abi.ConvertType(out[0], new(bool)).(*bool) - - return out0, err - -} - -// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. -// -// Solidity: function supportsInterface(bytes4 _interfaceId) view returns(bool) -func (_SuperchainWETH *SuperchainWETHSession) SupportsInterface(_interfaceId [4]byte) (bool, error) { - return _SuperchainWETH.Contract.SupportsInterface(&_SuperchainWETH.CallOpts, _interfaceId) -} - -// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. -// -// Solidity: function supportsInterface(bytes4 _interfaceId) view returns(bool) -func (_SuperchainWETH *SuperchainWETHCallerSession) SupportsInterface(_interfaceId [4]byte) (bool, error) { - return _SuperchainWETH.Contract.SupportsInterface(&_SuperchainWETH.CallOpts, _interfaceId) -} - -// Symbol is a free data retrieval call binding the contract method 0x95d89b41. -// -// Solidity: function symbol() view returns(string) -func (_SuperchainWETH *SuperchainWETHCaller) Symbol(opts *bind.CallOpts) (string, error) { - var out []interface{} - err := _SuperchainWETH.contract.Call(opts, &out, "symbol") - - if err != nil { - return *new(string), err - } - - out0 := *abi.ConvertType(out[0], new(string)).(*string) - - return out0, err - -} - -// Symbol is a free data retrieval call binding the contract method 0x95d89b41. -// -// Solidity: function symbol() view returns(string) -func (_SuperchainWETH *SuperchainWETHSession) Symbol() (string, error) { - return _SuperchainWETH.Contract.Symbol(&_SuperchainWETH.CallOpts) -} - -// Symbol is a free data retrieval call binding the contract method 0x95d89b41. -// -// Solidity: function symbol() view returns(string) -func (_SuperchainWETH *SuperchainWETHCallerSession) Symbol() (string, error) { - return _SuperchainWETH.Contract.Symbol(&_SuperchainWETH.CallOpts) -} - -// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. -// -// Solidity: function totalSupply() view returns(uint256) -func (_SuperchainWETH *SuperchainWETHCaller) TotalSupply(opts *bind.CallOpts) (*big.Int, error) { - var out []interface{} - err := _SuperchainWETH.contract.Call(opts, &out, "totalSupply") - - if err != nil { - return *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new(*big.Int)).(**big.Int) - - return out0, err - -} - -// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. -// -// Solidity: function totalSupply() view returns(uint256) -func (_SuperchainWETH *SuperchainWETHSession) TotalSupply() (*big.Int, error) { - return _SuperchainWETH.Contract.TotalSupply(&_SuperchainWETH.CallOpts) -} - -// TotalSupply is a free data retrieval call binding the contract method 0x18160ddd. -// -// Solidity: function totalSupply() view returns(uint256) -func (_SuperchainWETH *SuperchainWETHCallerSession) TotalSupply() (*big.Int, error) { - return _SuperchainWETH.Contract.TotalSupply(&_SuperchainWETH.CallOpts) -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_SuperchainWETH *SuperchainWETHCaller) Version(opts *bind.CallOpts) (string, error) { - var out []interface{} - err := _SuperchainWETH.contract.Call(opts, &out, "version") - - if err != nil { - return *new(string), err - } - - out0 := *abi.ConvertType(out[0], new(string)).(*string) - - return out0, err - -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_SuperchainWETH *SuperchainWETHSession) Version() (string, error) { - return _SuperchainWETH.Contract.Version(&_SuperchainWETH.CallOpts) -} - -// Version is a free data retrieval call binding the contract method 0x54fd4d50. -// -// Solidity: function version() view returns(string) -func (_SuperchainWETH *SuperchainWETHCallerSession) Version() (string, error) { - return _SuperchainWETH.Contract.Version(&_SuperchainWETH.CallOpts) -} - -// Approve is a paid mutator transaction binding the contract method 0x095ea7b3. -// -// Solidity: function approve(address guy, uint256 wad) returns(bool) -func (_SuperchainWETH *SuperchainWETHTransactor) Approve(opts *bind.TransactOpts, guy common.Address, wad *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.contract.Transact(opts, "approve", guy, wad) -} - -// Approve is a paid mutator transaction binding the contract method 0x095ea7b3. -// -// Solidity: function approve(address guy, uint256 wad) returns(bool) -func (_SuperchainWETH *SuperchainWETHSession) Approve(guy common.Address, wad *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.Approve(&_SuperchainWETH.TransactOpts, guy, wad) -} - -// Approve is a paid mutator transaction binding the contract method 0x095ea7b3. -// -// Solidity: function approve(address guy, uint256 wad) returns(bool) -func (_SuperchainWETH *SuperchainWETHTransactorSession) Approve(guy common.Address, wad *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.Approve(&_SuperchainWETH.TransactOpts, guy, wad) -} - -// CrosschainBurn is a paid mutator transaction binding the contract method 0x2b8c49e3. -// -// Solidity: function crosschainBurn(address _from, uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHTransactor) CrosschainBurn(opts *bind.TransactOpts, _from common.Address, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.contract.Transact(opts, "crosschainBurn", _from, _amount) -} - -// CrosschainBurn is a paid mutator transaction binding the contract method 0x2b8c49e3. -// -// Solidity: function crosschainBurn(address _from, uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHSession) CrosschainBurn(_from common.Address, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.CrosschainBurn(&_SuperchainWETH.TransactOpts, _from, _amount) -} - -// CrosschainBurn is a paid mutator transaction binding the contract method 0x2b8c49e3. -// -// Solidity: function crosschainBurn(address _from, uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHTransactorSession) CrosschainBurn(_from common.Address, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.CrosschainBurn(&_SuperchainWETH.TransactOpts, _from, _amount) -} - -// CrosschainMint is a paid mutator transaction binding the contract method 0x18bf5077. -// -// Solidity: function crosschainMint(address _to, uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHTransactor) CrosschainMint(opts *bind.TransactOpts, _to common.Address, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.contract.Transact(opts, "crosschainMint", _to, _amount) -} - -// CrosschainMint is a paid mutator transaction binding the contract method 0x18bf5077. -// -// Solidity: function crosschainMint(address _to, uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHSession) CrosschainMint(_to common.Address, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.CrosschainMint(&_SuperchainWETH.TransactOpts, _to, _amount) -} - -// CrosschainMint is a paid mutator transaction binding the contract method 0x18bf5077. -// -// Solidity: function crosschainMint(address _to, uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHTransactorSession) CrosschainMint(_to common.Address, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.CrosschainMint(&_SuperchainWETH.TransactOpts, _to, _amount) -} - -// Deposit is a paid mutator transaction binding the contract method 0xd0e30db0. -// -// Solidity: function deposit() payable returns() -func (_SuperchainWETH *SuperchainWETHTransactor) Deposit(opts *bind.TransactOpts) (*types.Transaction, error) { - return _SuperchainWETH.contract.Transact(opts, "deposit") -} - -// Deposit is a paid mutator transaction binding the contract method 0xd0e30db0. -// -// Solidity: function deposit() payable returns() -func (_SuperchainWETH *SuperchainWETHSession) Deposit() (*types.Transaction, error) { - return _SuperchainWETH.Contract.Deposit(&_SuperchainWETH.TransactOpts) -} - -// Deposit is a paid mutator transaction binding the contract method 0xd0e30db0. -// -// Solidity: function deposit() payable returns() -func (_SuperchainWETH *SuperchainWETHTransactorSession) Deposit() (*types.Transaction, error) { - return _SuperchainWETH.Contract.Deposit(&_SuperchainWETH.TransactOpts) -} - -// RelayETH is a paid mutator transaction binding the contract method 0x4f0edcc9. -// -// Solidity: function relayETH(address _from, address _to, uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHTransactor) RelayETH(opts *bind.TransactOpts, _from common.Address, _to common.Address, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.contract.Transact(opts, "relayETH", _from, _to, _amount) -} - -// RelayETH is a paid mutator transaction binding the contract method 0x4f0edcc9. -// -// Solidity: function relayETH(address _from, address _to, uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHSession) RelayETH(_from common.Address, _to common.Address, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.RelayETH(&_SuperchainWETH.TransactOpts, _from, _to, _amount) -} - -// RelayETH is a paid mutator transaction binding the contract method 0x4f0edcc9. -// -// Solidity: function relayETH(address _from, address _to, uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHTransactorSession) RelayETH(_from common.Address, _to common.Address, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.RelayETH(&_SuperchainWETH.TransactOpts, _from, _to, _amount) -} - -// SendETH is a paid mutator transaction binding the contract method 0x64a197f3. -// -// Solidity: function sendETH(address _to, uint256 _chainId) payable returns(bytes32 msgHash_) -func (_SuperchainWETH *SuperchainWETHTransactor) SendETH(opts *bind.TransactOpts, _to common.Address, _chainId *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.contract.Transact(opts, "sendETH", _to, _chainId) -} - -// SendETH is a paid mutator transaction binding the contract method 0x64a197f3. -// -// Solidity: function sendETH(address _to, uint256 _chainId) payable returns(bytes32 msgHash_) -func (_SuperchainWETH *SuperchainWETHSession) SendETH(_to common.Address, _chainId *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.SendETH(&_SuperchainWETH.TransactOpts, _to, _chainId) -} - -// SendETH is a paid mutator transaction binding the contract method 0x64a197f3. -// -// Solidity: function sendETH(address _to, uint256 _chainId) payable returns(bytes32 msgHash_) -func (_SuperchainWETH *SuperchainWETHTransactorSession) SendETH(_to common.Address, _chainId *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.SendETH(&_SuperchainWETH.TransactOpts, _to, _chainId) -} - -// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. -// -// Solidity: function transfer(address dst, uint256 wad) returns(bool) -func (_SuperchainWETH *SuperchainWETHTransactor) Transfer(opts *bind.TransactOpts, dst common.Address, wad *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.contract.Transact(opts, "transfer", dst, wad) -} - -// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. -// -// Solidity: function transfer(address dst, uint256 wad) returns(bool) -func (_SuperchainWETH *SuperchainWETHSession) Transfer(dst common.Address, wad *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.Transfer(&_SuperchainWETH.TransactOpts, dst, wad) -} - -// Transfer is a paid mutator transaction binding the contract method 0xa9059cbb. -// -// Solidity: function transfer(address dst, uint256 wad) returns(bool) -func (_SuperchainWETH *SuperchainWETHTransactorSession) Transfer(dst common.Address, wad *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.Transfer(&_SuperchainWETH.TransactOpts, dst, wad) -} - -// TransferFrom is a paid mutator transaction binding the contract method 0x23b872dd. -// -// Solidity: function transferFrom(address src, address dst, uint256 wad) returns(bool) -func (_SuperchainWETH *SuperchainWETHTransactor) TransferFrom(opts *bind.TransactOpts, src common.Address, dst common.Address, wad *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.contract.Transact(opts, "transferFrom", src, dst, wad) -} - -// TransferFrom is a paid mutator transaction binding the contract method 0x23b872dd. -// -// Solidity: function transferFrom(address src, address dst, uint256 wad) returns(bool) -func (_SuperchainWETH *SuperchainWETHSession) TransferFrom(src common.Address, dst common.Address, wad *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.TransferFrom(&_SuperchainWETH.TransactOpts, src, dst, wad) -} - -// TransferFrom is a paid mutator transaction binding the contract method 0x23b872dd. -// -// Solidity: function transferFrom(address src, address dst, uint256 wad) returns(bool) -func (_SuperchainWETH *SuperchainWETHTransactorSession) TransferFrom(src common.Address, dst common.Address, wad *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.TransferFrom(&_SuperchainWETH.TransactOpts, src, dst, wad) -} - -// Withdraw is a paid mutator transaction binding the contract method 0x2e1a7d4d. -// -// Solidity: function withdraw(uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHTransactor) Withdraw(opts *bind.TransactOpts, _amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.contract.Transact(opts, "withdraw", _amount) -} - -// Withdraw is a paid mutator transaction binding the contract method 0x2e1a7d4d. -// -// Solidity: function withdraw(uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHSession) Withdraw(_amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.Withdraw(&_SuperchainWETH.TransactOpts, _amount) -} - -// Withdraw is a paid mutator transaction binding the contract method 0x2e1a7d4d. -// -// Solidity: function withdraw(uint256 _amount) returns() -func (_SuperchainWETH *SuperchainWETHTransactorSession) Withdraw(_amount *big.Int) (*types.Transaction, error) { - return _SuperchainWETH.Contract.Withdraw(&_SuperchainWETH.TransactOpts, _amount) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() payable returns() -func (_SuperchainWETH *SuperchainWETHTransactor) Fallback(opts *bind.TransactOpts, calldata []byte) (*types.Transaction, error) { - return _SuperchainWETH.contract.RawTransact(opts, calldata) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() payable returns() -func (_SuperchainWETH *SuperchainWETHSession) Fallback(calldata []byte) (*types.Transaction, error) { - return _SuperchainWETH.Contract.Fallback(&_SuperchainWETH.TransactOpts, calldata) -} - -// Fallback is a paid mutator transaction binding the contract fallback function. -// -// Solidity: fallback() payable returns() -func (_SuperchainWETH *SuperchainWETHTransactorSession) Fallback(calldata []byte) (*types.Transaction, error) { - return _SuperchainWETH.Contract.Fallback(&_SuperchainWETH.TransactOpts, calldata) -} - -// Receive is a paid mutator transaction binding the contract receive function. -// -// Solidity: receive() payable returns() -func (_SuperchainWETH *SuperchainWETHTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { - return _SuperchainWETH.contract.RawTransact(opts, nil) // calldata is disallowed for receive function -} - -// Receive is a paid mutator transaction binding the contract receive function. -// -// Solidity: receive() payable returns() -func (_SuperchainWETH *SuperchainWETHSession) Receive() (*types.Transaction, error) { - return _SuperchainWETH.Contract.Receive(&_SuperchainWETH.TransactOpts) -} - -// Receive is a paid mutator transaction binding the contract receive function. -// -// Solidity: receive() payable returns() -func (_SuperchainWETH *SuperchainWETHTransactorSession) Receive() (*types.Transaction, error) { - return _SuperchainWETH.Contract.Receive(&_SuperchainWETH.TransactOpts) -} - -// SuperchainWETHApprovalIterator is returned from FilterApproval and is used to iterate over the raw logs and unpacked data for Approval events raised by the SuperchainWETH contract. -type SuperchainWETHApprovalIterator struct { - Event *SuperchainWETHApproval // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SuperchainWETHApprovalIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHApproval) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHApproval) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SuperchainWETHApprovalIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SuperchainWETHApprovalIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SuperchainWETHApproval represents a Approval event raised by the SuperchainWETH contract. -type SuperchainWETHApproval struct { - Src common.Address - Guy common.Address - Wad *big.Int - Raw types.Log // Blockchain specific contextual infos -} - -// FilterApproval is a free log retrieval operation binding the contract event 0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925. -// -// Solidity: event Approval(address indexed src, address indexed guy, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) FilterApproval(opts *bind.FilterOpts, src []common.Address, guy []common.Address) (*SuperchainWETHApprovalIterator, error) { - - var srcRule []interface{} - for _, srcItem := range src { - srcRule = append(srcRule, srcItem) - } - var guyRule []interface{} - for _, guyItem := range guy { - guyRule = append(guyRule, guyItem) - } - - logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "Approval", srcRule, guyRule) - if err != nil { - return nil, err - } - return &SuperchainWETHApprovalIterator{contract: _SuperchainWETH.contract, event: "Approval", logs: logs, sub: sub}, nil -} - -// WatchApproval is a free log subscription operation binding the contract event 0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925. -// -// Solidity: event Approval(address indexed src, address indexed guy, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) WatchApproval(opts *bind.WatchOpts, sink chan<- *SuperchainWETHApproval, src []common.Address, guy []common.Address) (event.Subscription, error) { - - var srcRule []interface{} - for _, srcItem := range src { - srcRule = append(srcRule, srcItem) - } - var guyRule []interface{} - for _, guyItem := range guy { - guyRule = append(guyRule, guyItem) - } - - logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "Approval", srcRule, guyRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SuperchainWETHApproval) - if err := _SuperchainWETH.contract.UnpackLog(event, "Approval", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseApproval is a log parse operation binding the contract event 0x8c5be1e5ebec7d5bd14f71427d1e84f3dd0314c0f7b2291e5b200ac8c7c3b925. -// -// Solidity: event Approval(address indexed src, address indexed guy, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) ParseApproval(log types.Log) (*SuperchainWETHApproval, error) { - event := new(SuperchainWETHApproval) - if err := _SuperchainWETH.contract.UnpackLog(event, "Approval", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// SuperchainWETHCrosschainBurnIterator is returned from FilterCrosschainBurn and is used to iterate over the raw logs and unpacked data for CrosschainBurn events raised by the SuperchainWETH contract. -type SuperchainWETHCrosschainBurnIterator struct { - Event *SuperchainWETHCrosschainBurn // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SuperchainWETHCrosschainBurnIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHCrosschainBurn) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHCrosschainBurn) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SuperchainWETHCrosschainBurnIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SuperchainWETHCrosschainBurnIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SuperchainWETHCrosschainBurn represents a CrosschainBurn event raised by the SuperchainWETH contract. -type SuperchainWETHCrosschainBurn struct { - From common.Address - Amount *big.Int - Sender common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterCrosschainBurn is a free log retrieval operation binding the contract event 0xb90795a66650155983e242cac3e1ac1a4dc26f8ed2987f3ce416a34e00111fd4. -// -// Solidity: event CrosschainBurn(address indexed from, uint256 amount, address indexed sender) -func (_SuperchainWETH *SuperchainWETHFilterer) FilterCrosschainBurn(opts *bind.FilterOpts, from []common.Address, sender []common.Address) (*SuperchainWETHCrosschainBurnIterator, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - - var senderRule []interface{} - for _, senderItem := range sender { - senderRule = append(senderRule, senderItem) - } - - logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "CrosschainBurn", fromRule, senderRule) - if err != nil { - return nil, err - } - return &SuperchainWETHCrosschainBurnIterator{contract: _SuperchainWETH.contract, event: "CrosschainBurn", logs: logs, sub: sub}, nil -} - -// WatchCrosschainBurn is a free log subscription operation binding the contract event 0xb90795a66650155983e242cac3e1ac1a4dc26f8ed2987f3ce416a34e00111fd4. -// -// Solidity: event CrosschainBurn(address indexed from, uint256 amount, address indexed sender) -func (_SuperchainWETH *SuperchainWETHFilterer) WatchCrosschainBurn(opts *bind.WatchOpts, sink chan<- *SuperchainWETHCrosschainBurn, from []common.Address, sender []common.Address) (event.Subscription, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - - var senderRule []interface{} - for _, senderItem := range sender { - senderRule = append(senderRule, senderItem) - } - - logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "CrosschainBurn", fromRule, senderRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SuperchainWETHCrosschainBurn) - if err := _SuperchainWETH.contract.UnpackLog(event, "CrosschainBurn", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseCrosschainBurn is a log parse operation binding the contract event 0xb90795a66650155983e242cac3e1ac1a4dc26f8ed2987f3ce416a34e00111fd4. -// -// Solidity: event CrosschainBurn(address indexed from, uint256 amount, address indexed sender) -func (_SuperchainWETH *SuperchainWETHFilterer) ParseCrosschainBurn(log types.Log) (*SuperchainWETHCrosschainBurn, error) { - event := new(SuperchainWETHCrosschainBurn) - if err := _SuperchainWETH.contract.UnpackLog(event, "CrosschainBurn", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// SuperchainWETHCrosschainMintIterator is returned from FilterCrosschainMint and is used to iterate over the raw logs and unpacked data for CrosschainMint events raised by the SuperchainWETH contract. -type SuperchainWETHCrosschainMintIterator struct { - Event *SuperchainWETHCrosschainMint // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SuperchainWETHCrosschainMintIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHCrosschainMint) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHCrosschainMint) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SuperchainWETHCrosschainMintIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SuperchainWETHCrosschainMintIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SuperchainWETHCrosschainMint represents a CrosschainMint event raised by the SuperchainWETH contract. -type SuperchainWETHCrosschainMint struct { - To common.Address - Amount *big.Int - Sender common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterCrosschainMint is a free log retrieval operation binding the contract event 0xde22baff038e3a3e08407cbdf617deed74e869a7ba517df611e33131c6e6ea04. -// -// Solidity: event CrosschainMint(address indexed to, uint256 amount, address indexed sender) -func (_SuperchainWETH *SuperchainWETHFilterer) FilterCrosschainMint(opts *bind.FilterOpts, to []common.Address, sender []common.Address) (*SuperchainWETHCrosschainMintIterator, error) { - - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - var senderRule []interface{} - for _, senderItem := range sender { - senderRule = append(senderRule, senderItem) - } - - logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "CrosschainMint", toRule, senderRule) - if err != nil { - return nil, err - } - return &SuperchainWETHCrosschainMintIterator{contract: _SuperchainWETH.contract, event: "CrosschainMint", logs: logs, sub: sub}, nil -} - -// WatchCrosschainMint is a free log subscription operation binding the contract event 0xde22baff038e3a3e08407cbdf617deed74e869a7ba517df611e33131c6e6ea04. -// -// Solidity: event CrosschainMint(address indexed to, uint256 amount, address indexed sender) -func (_SuperchainWETH *SuperchainWETHFilterer) WatchCrosschainMint(opts *bind.WatchOpts, sink chan<- *SuperchainWETHCrosschainMint, to []common.Address, sender []common.Address) (event.Subscription, error) { - - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - var senderRule []interface{} - for _, senderItem := range sender { - senderRule = append(senderRule, senderItem) - } - - logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "CrosschainMint", toRule, senderRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SuperchainWETHCrosschainMint) - if err := _SuperchainWETH.contract.UnpackLog(event, "CrosschainMint", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseCrosschainMint is a log parse operation binding the contract event 0xde22baff038e3a3e08407cbdf617deed74e869a7ba517df611e33131c6e6ea04. -// -// Solidity: event CrosschainMint(address indexed to, uint256 amount, address indexed sender) -func (_SuperchainWETH *SuperchainWETHFilterer) ParseCrosschainMint(log types.Log) (*SuperchainWETHCrosschainMint, error) { - event := new(SuperchainWETHCrosschainMint) - if err := _SuperchainWETH.contract.UnpackLog(event, "CrosschainMint", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// SuperchainWETHDepositIterator is returned from FilterDeposit and is used to iterate over the raw logs and unpacked data for Deposit events raised by the SuperchainWETH contract. -type SuperchainWETHDepositIterator struct { - Event *SuperchainWETHDeposit // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SuperchainWETHDepositIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHDeposit) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHDeposit) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SuperchainWETHDepositIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SuperchainWETHDepositIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SuperchainWETHDeposit represents a Deposit event raised by the SuperchainWETH contract. -type SuperchainWETHDeposit struct { - Dst common.Address - Wad *big.Int - Raw types.Log // Blockchain specific contextual infos -} - -// FilterDeposit is a free log retrieval operation binding the contract event 0xe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c. -// -// Solidity: event Deposit(address indexed dst, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) FilterDeposit(opts *bind.FilterOpts, dst []common.Address) (*SuperchainWETHDepositIterator, error) { - - var dstRule []interface{} - for _, dstItem := range dst { - dstRule = append(dstRule, dstItem) - } - - logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "Deposit", dstRule) - if err != nil { - return nil, err - } - return &SuperchainWETHDepositIterator{contract: _SuperchainWETH.contract, event: "Deposit", logs: logs, sub: sub}, nil -} - -// WatchDeposit is a free log subscription operation binding the contract event 0xe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c. -// -// Solidity: event Deposit(address indexed dst, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) WatchDeposit(opts *bind.WatchOpts, sink chan<- *SuperchainWETHDeposit, dst []common.Address) (event.Subscription, error) { - - var dstRule []interface{} - for _, dstItem := range dst { - dstRule = append(dstRule, dstItem) - } - - logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "Deposit", dstRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SuperchainWETHDeposit) - if err := _SuperchainWETH.contract.UnpackLog(event, "Deposit", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseDeposit is a log parse operation binding the contract event 0xe1fffcc4923d04b559f4d29a8bfc6cda04eb5b0d3c460751c2402c5c5cc9109c. -// -// Solidity: event Deposit(address indexed dst, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) ParseDeposit(log types.Log) (*SuperchainWETHDeposit, error) { - event := new(SuperchainWETHDeposit) - if err := _SuperchainWETH.contract.UnpackLog(event, "Deposit", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// SuperchainWETHRelayETHIterator is returned from FilterRelayETH and is used to iterate over the raw logs and unpacked data for RelayETH events raised by the SuperchainWETH contract. -type SuperchainWETHRelayETHIterator struct { - Event *SuperchainWETHRelayETH // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SuperchainWETHRelayETHIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHRelayETH) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHRelayETH) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SuperchainWETHRelayETHIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SuperchainWETHRelayETHIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SuperchainWETHRelayETH represents a RelayETH event raised by the SuperchainWETH contract. -type SuperchainWETHRelayETH struct { - From common.Address - To common.Address - Amount *big.Int - Source *big.Int - Raw types.Log // Blockchain specific contextual infos -} - -// FilterRelayETH is a free log retrieval operation binding the contract event 0xe5479bb8ebad3b9ac81f55f424a6289cf0a54ff2641708f41dcb2b26f264d359. -// -// Solidity: event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source) -func (_SuperchainWETH *SuperchainWETHFilterer) FilterRelayETH(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*SuperchainWETHRelayETHIterator, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "RelayETH", fromRule, toRule) - if err != nil { - return nil, err - } - return &SuperchainWETHRelayETHIterator{contract: _SuperchainWETH.contract, event: "RelayETH", logs: logs, sub: sub}, nil -} - -// WatchRelayETH is a free log subscription operation binding the contract event 0xe5479bb8ebad3b9ac81f55f424a6289cf0a54ff2641708f41dcb2b26f264d359. -// -// Solidity: event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source) -func (_SuperchainWETH *SuperchainWETHFilterer) WatchRelayETH(opts *bind.WatchOpts, sink chan<- *SuperchainWETHRelayETH, from []common.Address, to []common.Address) (event.Subscription, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "RelayETH", fromRule, toRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SuperchainWETHRelayETH) - if err := _SuperchainWETH.contract.UnpackLog(event, "RelayETH", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseRelayETH is a log parse operation binding the contract event 0xe5479bb8ebad3b9ac81f55f424a6289cf0a54ff2641708f41dcb2b26f264d359. -// -// Solidity: event RelayETH(address indexed from, address indexed to, uint256 amount, uint256 source) -func (_SuperchainWETH *SuperchainWETHFilterer) ParseRelayETH(log types.Log) (*SuperchainWETHRelayETH, error) { - event := new(SuperchainWETHRelayETH) - if err := _SuperchainWETH.contract.UnpackLog(event, "RelayETH", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// SuperchainWETHSendETHIterator is returned from FilterSendETH and is used to iterate over the raw logs and unpacked data for SendETH events raised by the SuperchainWETH contract. -type SuperchainWETHSendETHIterator struct { - Event *SuperchainWETHSendETH // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SuperchainWETHSendETHIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHSendETH) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHSendETH) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SuperchainWETHSendETHIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SuperchainWETHSendETHIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SuperchainWETHSendETH represents a SendETH event raised by the SuperchainWETH contract. -type SuperchainWETHSendETH struct { - From common.Address - To common.Address - Amount *big.Int - Destination *big.Int - Raw types.Log // Blockchain specific contextual infos -} - -// FilterSendETH is a free log retrieval operation binding the contract event 0xed98a2ff78833375c368471a747cdf0633024dde3f870feb08a934ac5be83402. -// -// Solidity: event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination) -func (_SuperchainWETH *SuperchainWETHFilterer) FilterSendETH(opts *bind.FilterOpts, from []common.Address, to []common.Address) (*SuperchainWETHSendETHIterator, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "SendETH", fromRule, toRule) - if err != nil { - return nil, err - } - return &SuperchainWETHSendETHIterator{contract: _SuperchainWETH.contract, event: "SendETH", logs: logs, sub: sub}, nil -} - -// WatchSendETH is a free log subscription operation binding the contract event 0xed98a2ff78833375c368471a747cdf0633024dde3f870feb08a934ac5be83402. -// -// Solidity: event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination) -func (_SuperchainWETH *SuperchainWETHFilterer) WatchSendETH(opts *bind.WatchOpts, sink chan<- *SuperchainWETHSendETH, from []common.Address, to []common.Address) (event.Subscription, error) { - - var fromRule []interface{} - for _, fromItem := range from { - fromRule = append(fromRule, fromItem) - } - var toRule []interface{} - for _, toItem := range to { - toRule = append(toRule, toItem) - } - - logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "SendETH", fromRule, toRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SuperchainWETHSendETH) - if err := _SuperchainWETH.contract.UnpackLog(event, "SendETH", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseSendETH is a log parse operation binding the contract event 0xed98a2ff78833375c368471a747cdf0633024dde3f870feb08a934ac5be83402. -// -// Solidity: event SendETH(address indexed from, address indexed to, uint256 amount, uint256 destination) -func (_SuperchainWETH *SuperchainWETHFilterer) ParseSendETH(log types.Log) (*SuperchainWETHSendETH, error) { - event := new(SuperchainWETHSendETH) - if err := _SuperchainWETH.contract.UnpackLog(event, "SendETH", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// SuperchainWETHTransferIterator is returned from FilterTransfer and is used to iterate over the raw logs and unpacked data for Transfer events raised by the SuperchainWETH contract. -type SuperchainWETHTransferIterator struct { - Event *SuperchainWETHTransfer // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SuperchainWETHTransferIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHTransfer) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHTransfer) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SuperchainWETHTransferIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SuperchainWETHTransferIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SuperchainWETHTransfer represents a Transfer event raised by the SuperchainWETH contract. -type SuperchainWETHTransfer struct { - Src common.Address - Dst common.Address - Wad *big.Int - Raw types.Log // Blockchain specific contextual infos -} - -// FilterTransfer is a free log retrieval operation binding the contract event 0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef. -// -// Solidity: event Transfer(address indexed src, address indexed dst, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) FilterTransfer(opts *bind.FilterOpts, src []common.Address, dst []common.Address) (*SuperchainWETHTransferIterator, error) { - - var srcRule []interface{} - for _, srcItem := range src { - srcRule = append(srcRule, srcItem) - } - var dstRule []interface{} - for _, dstItem := range dst { - dstRule = append(dstRule, dstItem) - } - - logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "Transfer", srcRule, dstRule) - if err != nil { - return nil, err - } - return &SuperchainWETHTransferIterator{contract: _SuperchainWETH.contract, event: "Transfer", logs: logs, sub: sub}, nil -} - -// WatchTransfer is a free log subscription operation binding the contract event 0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef. -// -// Solidity: event Transfer(address indexed src, address indexed dst, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) WatchTransfer(opts *bind.WatchOpts, sink chan<- *SuperchainWETHTransfer, src []common.Address, dst []common.Address) (event.Subscription, error) { - - var srcRule []interface{} - for _, srcItem := range src { - srcRule = append(srcRule, srcItem) - } - var dstRule []interface{} - for _, dstItem := range dst { - dstRule = append(dstRule, dstItem) - } - - logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "Transfer", srcRule, dstRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SuperchainWETHTransfer) - if err := _SuperchainWETH.contract.UnpackLog(event, "Transfer", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseTransfer is a log parse operation binding the contract event 0xddf252ad1be2c89b69c2b068fc378daa952ba7f163c4a11628f55a4df523b3ef. -// -// Solidity: event Transfer(address indexed src, address indexed dst, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) ParseTransfer(log types.Log) (*SuperchainWETHTransfer, error) { - event := new(SuperchainWETHTransfer) - if err := _SuperchainWETH.contract.UnpackLog(event, "Transfer", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// SuperchainWETHWithdrawalIterator is returned from FilterWithdrawal and is used to iterate over the raw logs and unpacked data for Withdrawal events raised by the SuperchainWETH contract. -type SuperchainWETHWithdrawalIterator struct { - Event *SuperchainWETHWithdrawal // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *SuperchainWETHWithdrawalIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHWithdrawal) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(SuperchainWETHWithdrawal) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *SuperchainWETHWithdrawalIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *SuperchainWETHWithdrawalIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// SuperchainWETHWithdrawal represents a Withdrawal event raised by the SuperchainWETH contract. -type SuperchainWETHWithdrawal struct { - Src common.Address - Wad *big.Int - Raw types.Log // Blockchain specific contextual infos -} - -// FilterWithdrawal is a free log retrieval operation binding the contract event 0x7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65. -// -// Solidity: event Withdrawal(address indexed src, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) FilterWithdrawal(opts *bind.FilterOpts, src []common.Address) (*SuperchainWETHWithdrawalIterator, error) { - - var srcRule []interface{} - for _, srcItem := range src { - srcRule = append(srcRule, srcItem) - } - - logs, sub, err := _SuperchainWETH.contract.FilterLogs(opts, "Withdrawal", srcRule) - if err != nil { - return nil, err - } - return &SuperchainWETHWithdrawalIterator{contract: _SuperchainWETH.contract, event: "Withdrawal", logs: logs, sub: sub}, nil -} - -// WatchWithdrawal is a free log subscription operation binding the contract event 0x7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65. -// -// Solidity: event Withdrawal(address indexed src, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) WatchWithdrawal(opts *bind.WatchOpts, sink chan<- *SuperchainWETHWithdrawal, src []common.Address) (event.Subscription, error) { - - var srcRule []interface{} - for _, srcItem := range src { - srcRule = append(srcRule, srcItem) - } - - logs, sub, err := _SuperchainWETH.contract.WatchLogs(opts, "Withdrawal", srcRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(SuperchainWETHWithdrawal) - if err := _SuperchainWETH.contract.UnpackLog(event, "Withdrawal", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseWithdrawal is a log parse operation binding the contract event 0x7fcf532c15f0a6db0bd6d0e038bea71d30d808c7d98cb3bf7268a95bf5081b65. -// -// Solidity: event Withdrawal(address indexed src, uint256 wad) -func (_SuperchainWETH *SuperchainWETHFilterer) ParseWithdrawal(log types.Log) (*SuperchainWETHWithdrawal, error) { - event := new(SuperchainWETHWithdrawal) - if err := _SuperchainWETH.contract.UnpackLog(event, "Withdrawal", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/devnet-sdk/contracts/constants/constants.go b/devnet-sdk/contracts/constants/constants.go deleted file mode 100644 index 67801db723dde..0000000000000 --- a/devnet-sdk/contracts/constants/constants.go +++ /dev/null @@ -1,55 +0,0 @@ -package constants - -import ( - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum/go-ethereum/common" -) - -var ( - LegacyMessagePasser types.Address = common.HexToAddress("0x4200000000000000000000000000000000000000") - DeployerWhitelist types.Address = common.HexToAddress("0x4200000000000000000000000000000000000002") - WETH types.Address = common.HexToAddress("0x4200000000000000000000000000000000000006") - L2CrossDomainMessenger types.Address = common.HexToAddress("0x4200000000000000000000000000000000000007") - GasPriceOracle types.Address = common.HexToAddress("0x420000000000000000000000000000000000000F") - L2StandardBridge types.Address = common.HexToAddress("0x4200000000000000000000000000000000000010") - SequencerFeeVault types.Address = common.HexToAddress("0x4200000000000000000000000000000000000011") - OptimismMintableERC20Factory types.Address = common.HexToAddress("0x4200000000000000000000000000000000000012") - L1BlockNumber types.Address = common.HexToAddress("0x4200000000000000000000000000000000000013") - L1Block types.Address = common.HexToAddress("0x4200000000000000000000000000000000000015") - L2ToL1MessagePasser types.Address = common.HexToAddress("0x4200000000000000000000000000000000000016") - L2ERC721Bridge types.Address = common.HexToAddress("0x4200000000000000000000000000000000000014") - OptimismMintableERC721Factory types.Address = common.HexToAddress("0x4200000000000000000000000000000000000017") - ProxyAdmin types.Address = common.HexToAddress("0x4200000000000000000000000000000000000018") - BaseFeeVault types.Address = common.HexToAddress("0x4200000000000000000000000000000000000019") - L1FeeVault types.Address = common.HexToAddress("0x420000000000000000000000000000000000001a") - OperatorFeeVault types.Address = common.HexToAddress("0x420000000000000000000000000000000000001B") - SchemaRegistry types.Address = common.HexToAddress("0x4200000000000000000000000000000000000020") - EAS types.Address = common.HexToAddress("0x4200000000000000000000000000000000000021") - CrossL2Inbox types.Address = common.HexToAddress("0x4200000000000000000000000000000000000022") - L2ToL2CrossDomainMessenger types.Address = common.HexToAddress("0x4200000000000000000000000000000000000023") - SuperchainETHBridge types.Address = common.HexToAddress("0x4200000000000000000000000000000000000024") - ETHLiquidity types.Address = common.HexToAddress("0x4200000000000000000000000000000000000025") - SuperchainTokenBridge types.Address = common.HexToAddress("0x4200000000000000000000000000000000000028") - NativeAssetLiquidity types.Address = common.HexToAddress("0x4200000000000000000000000000000000000029") - LiquidityController types.Address = common.HexToAddress("0x420000000000000000000000000000000000002a") - FeeSplitter types.Address = common.HexToAddress("0x420000000000000000000000000000000000002B") - GovernanceToken types.Address = common.HexToAddress("0x4200000000000000000000000000000000000042") - Create2Deployer types.Address = common.HexToAddress("0x13b0D85CcB8bf860b6b79AF3029fCA081AE9beF2") - MultiCall3 types.Address = common.HexToAddress("0xcA11bde05977b3631167028862bE2a173976CA11") - Safe_v130 types.Address = common.HexToAddress("0x69f4D1788e39c87893C980c06EdF4b7f686e2938") - SafeL2_v130 types.Address = common.HexToAddress("0xfb1bffC9d739B8D520DaF37dF666da4C687191EA") - MultiSendCallOnly_v130 types.Address = common.HexToAddress("0xA1dabEF33b3B82c7814B6D82A79e50F4AC44102B") - SafeSingletonFactory types.Address = common.HexToAddress("0x914d7Fec6aaC8cd542e72Bca78B30650d45643d7") - DeterministicDeploymentProxy types.Address = common.HexToAddress("0x4e59b44847b379578588920cA78FbF26c0B4956C") - MultiSend_v130 types.Address = common.HexToAddress("0x998739BFdAAdde7C933B942a68053933098f9EDa") - Permit2 types.Address = common.HexToAddress("0x000000000022D473030F116dDEE9F6B43aC78BA3") - SenderCreator_v060 types.Address = common.HexToAddress("0x7fc98430eaedbb6070b35b39d798725049088348") - EntryPoint_v060 types.Address = common.HexToAddress("0x5FF137D4b0FDCD49DcA30c7CF57E578a026d2789") - SenderCreator_v070 types.Address = common.HexToAddress("0xEFC2c1444eBCC4Db75e7613d20C6a62fF67A167C") - EntryPoint_v070 types.Address = common.HexToAddress("0x0000000071727De22E5E9d8BAf0edAc6f37da032") -) - -const ( - ETH = 1e18 - Gwei = 1e9 -) diff --git a/devnet-sdk/contracts/contracts.go b/devnet-sdk/contracts/contracts.go deleted file mode 100644 index ad2e55dc2d471..0000000000000 --- a/devnet-sdk/contracts/contracts.go +++ /dev/null @@ -1,17 +0,0 @@ -package contracts - -import ( - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/registry/client" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/registry/empty" - "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" - "github.com/ethereum/go-ethereum/ethclient" -) - -// NewClientRegistry creates a new Registry that uses the provided client -func NewClientRegistry(c *ethclient.Client) interfaces.ContractsRegistry { - return &client.ClientRegistry{Client: c} -} - -func NewEmptyRegistry() interfaces.ContractsRegistry { - return &empty.EmptyRegistry{} -} diff --git a/devnet-sdk/contracts/registry/client/client.go b/devnet-sdk/contracts/registry/client/client.go deleted file mode 100644 index d60f35639e6b0..0000000000000 --- a/devnet-sdk/contracts/registry/client/client.go +++ /dev/null @@ -1,50 +0,0 @@ -package client - -import ( - "fmt" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/ethclient" -) - -// ClientRegistry is a Registry implementation that uses an ethclient.Client -type ClientRegistry struct { - Client *ethclient.Client -} - -var _ interfaces.ContractsRegistry = (*ClientRegistry)(nil) - -func (r *ClientRegistry) WETH(address types.Address) (interfaces.WETH, error) { - // SuperchainWETH was removed and replaced with SuperchainETHBridge - // NewSuperchainWETH can be still used for fetching WETH balance - binding, err := bindings.NewSuperchainWETH(address, r.Client) - if err != nil { - return nil, fmt.Errorf("failed to create WETH binding: %w", err) - } - return &WETHBinding{ - contractAddress: address, - client: r.Client, - binding: binding, - }, nil -} - -func (r *ClientRegistry) L2ToL2CrossDomainMessenger(address types.Address) (interfaces.L2ToL2CrossDomainMessenger, error) { - binding, err := bindings.NewL2ToL2CrossDomainMessenger(address, r.Client) - if err != nil { - return nil, fmt.Errorf("failed to create L2ToL2CrossDomainMessenger binding: %w", err) - } - abi, err := abi.JSON(strings.NewReader(bindings.L2ToL2CrossDomainMessengerMetaData.ABI)) - if err != nil { - return nil, fmt.Errorf("failed to create L2ToL2CrossDomainMessenger binding ABI: %w", err) - } - return &L2ToL2CrossDomainMessengerBinding{ - contractAddress: address, - client: r.Client, - binding: binding, - abi: &abi, - }, nil -} diff --git a/devnet-sdk/contracts/registry/client/l2tol2crossdomainmessenger.go b/devnet-sdk/contracts/registry/client/l2tol2crossdomainmessenger.go deleted file mode 100644 index f2ff3da7ab7e8..0000000000000 --- a/devnet-sdk/contracts/registry/client/l2tol2crossdomainmessenger.go +++ /dev/null @@ -1,22 +0,0 @@ -package client - -import ( - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/ethclient" -) - -type L2ToL2CrossDomainMessengerBinding struct { - contractAddress types.Address - client *ethclient.Client - binding *bindings.L2ToL2CrossDomainMessenger - abi *abi.ABI -} - -var _ interfaces.L2ToL2CrossDomainMessenger = (*L2ToL2CrossDomainMessengerBinding)(nil) - -func (b *L2ToL2CrossDomainMessengerBinding) ABI() *abi.ABI { - return b.abi -} diff --git a/devnet-sdk/contracts/registry/client/weth.go b/devnet-sdk/contracts/registry/client/weth.go deleted file mode 100644 index 68d30af479ce5..0000000000000 --- a/devnet-sdk/contracts/registry/client/weth.go +++ /dev/null @@ -1,38 +0,0 @@ -package client - -import ( - "context" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum/go-ethereum/ethclient" -) - -type WETHBinding struct { - contractAddress types.Address - client *ethclient.Client - binding *bindings.SuperchainWETH -} - -var _ interfaces.WETH = (*WETHBinding)(nil) - -func (b *WETHBinding) BalanceOf(addr types.Address) types.ReadInvocation[types.Balance] { - return &WETHBalanceOfImpl{ - contract: b, - addr: addr, - } -} - -type WETHBalanceOfImpl struct { - contract *WETHBinding - addr types.Address -} - -func (i *WETHBalanceOfImpl) Call(ctx context.Context) (types.Balance, error) { - balance, err := i.contract.binding.BalanceOf(nil, i.addr) - if err != nil { - return types.Balance{}, err - } - return types.NewBalance(balance), nil -} diff --git a/devnet-sdk/contracts/registry/empty/empty.go b/devnet-sdk/contracts/registry/empty/empty.go deleted file mode 100644 index e5534908fd76c..0000000000000 --- a/devnet-sdk/contracts/registry/empty/empty.go +++ /dev/null @@ -1,25 +0,0 @@ -package empty - -import ( - "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" -) - -// EmptyRegistry represents a registry that returns not found errors for all contract accesses -type EmptyRegistry struct{} - -var _ interfaces.ContractsRegistry = (*EmptyRegistry)(nil) - -func (r *EmptyRegistry) WETH(address types.Address) (interfaces.WETH, error) { - return nil, &interfaces.ErrContractNotFound{ - ContractType: "WETH", - Address: address, - } -} - -func (r *EmptyRegistry) L2ToL2CrossDomainMessenger(address types.Address) (interfaces.L2ToL2CrossDomainMessenger, error) { - return nil, &interfaces.ErrContractNotFound{ - ContractType: "L2ToL2CrossDomainMessenger", - Address: address, - } -} diff --git a/devnet-sdk/controller/kt/kt.go b/devnet-sdk/controller/kt/kt.go deleted file mode 100644 index 7e2d338b9bf80..0000000000000 --- a/devnet-sdk/controller/kt/kt.go +++ /dev/null @@ -1,109 +0,0 @@ -package kt - -import ( - "context" - "fmt" - "strings" - "sync" - - "github.com/ethereum-optimism/optimism/devnet-sdk/controller/surface" - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/run" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/wrappers" -) - -type KurtosisControllerSurface struct { - env *descriptors.DevnetEnvironment - kurtosisCtx interfaces.KurtosisContextInterface - runner *run.KurtosisRunner - devnetfs *fs.DevnetFS - - // control operations are disruptive, let's make sure we don't run them - // concurrently so that test logic has a fighting chance of being correct. - mtx sync.Mutex -} - -func NewKurtosisControllerSurface(env *descriptors.DevnetEnvironment) (*KurtosisControllerSurface, error) { - enclave := env.Name - - kurtosisCtx, err := wrappers.GetDefaultKurtosisContext() - if err != nil { - return nil, err - } - - runner, err := run.NewKurtosisRunner( - run.WithKurtosisRunnerEnclave(enclave), - run.WithKurtosisRunnerKurtosisContext(kurtosisCtx), - ) - if err != nil { - return nil, err - } - - enclaveFS, err := fs.NewEnclaveFS(context.TODO(), enclave) - if err != nil { - return nil, err - } - // Create a new DevnetFS instance using the enclaveFS - devnetfs := fs.NewDevnetFS(enclaveFS) - - return &KurtosisControllerSurface{ - env: env, - kurtosisCtx: kurtosisCtx, - runner: runner, - devnetfs: devnetfs, - }, nil -} - -func (s *KurtosisControllerSurface) StartService(ctx context.Context, serviceName string) error { - s.mtx.Lock() - defer s.mtx.Unlock() - - script := fmt.Sprintf(` -def run(plan): - plan.start_service(name="%s") -`, serviceName) - // start_service is not idempotent, and doesn't return a typed error, - // so we need to check the error message - if err := s.runner.RunScript(ctx, script); err != nil { - msg := err.Error() - if strings.Contains(strings.ToLower(msg), "is already in use by container") { - // we know we don't need to update the env, as the service was already running - return nil - } - return err - } - return s.updateDevnetEnvironmentForService(ctx, serviceName, true) -} - -func (s *KurtosisControllerSurface) StopService(ctx context.Context, serviceName string) error { - s.mtx.Lock() - defer s.mtx.Unlock() - - script := fmt.Sprintf(` -def run(plan): - plan.stop_service(name="%s") -`, serviceName) - // stop_service is idempotent, so errors here are real - if err := s.runner.RunScript(ctx, script); err != nil { - return err - } - // conversely, we don't know if the service was running or not, so we need to update the env - return s.updateDevnetEnvironmentForService(ctx, serviceName, false) -} - -func (s *KurtosisControllerSurface) updateDevnetEnvironmentForService(ctx context.Context, serviceName string, on bool) error { - // - refreshed, err := s.updateDevnetEnvironmentService(ctx, serviceName, on) - if err != nil { - return err - } - if !refreshed { - return nil - } - - return s.devnetfs.UploadDevnetDescriptor(ctx, s.env) -} - -var _ surface.ServiceLifecycleSurface = (*KurtosisControllerSurface)(nil) diff --git a/devnet-sdk/controller/kt/kt_test.go b/devnet-sdk/controller/kt/kt_test.go deleted file mode 100644 index c84452102882c..0000000000000 --- a/devnet-sdk/controller/kt/kt_test.go +++ /dev/null @@ -1,153 +0,0 @@ -package kt - -import ( - "context" - "errors" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/fake" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/run" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestKurtosisControllerSurface(t *testing.T) { - ctx := context.Background() - testErr := errors.New("test error") - - // Create a test environment - env := &descriptors.DevnetEnvironment{ - Name: "test-env", - L1: &descriptors.Chain{ - Services: descriptors.RedundantServiceMap{ - "test-service": []*descriptors.Service{ - &descriptors.Service{ - Name: "test-service", - Endpoints: descriptors.EndpointMap{ - "http": { - Port: 0, - PrivatePort: 0, - }, - }, - }, - }, - }, - }, - } - - // Create a test service context with port data - testSvcCtx := &testServiceContext{ - publicPorts: map[string]interfaces.PortSpec{ - "http": &testPortSpec{number: 8080}, - }, - privatePorts: map[string]interfaces.PortSpec{ - "http": &testPortSpec{number: 8082}, - }, - } - - tests := []struct { - name string - serviceName string - operation string // "start" or "stop" - runErr error - wantErr bool - }{ - { - name: "successful service start", - serviceName: "test-service", - operation: "start", - runErr: nil, - wantErr: false, - }, - { - name: "service already running", - serviceName: "test-service", - operation: "start", - runErr: errors.New("is already in use by container"), - wantErr: false, - }, - { - name: "error starting service", - serviceName: "test-service", - operation: "start", - runErr: testErr, - wantErr: true, - }, - { - name: "successful service stop", - serviceName: "test-service", - operation: "stop", - runErr: nil, - wantErr: false, - }, - { - name: "error stopping service", - serviceName: "test-service", - operation: "stop", - runErr: testErr, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a fake enclave context that will return our test service context - fakeEnclaveCtx := &fake.EnclaveContext{ - RunErr: tt.runErr, - Services: map[services.ServiceName]interfaces.ServiceContext{ - "test-service": testSvcCtx, - }, - } - - // Create a fake Kurtosis context that will return our fake enclave context - fakeCtx := &fake.KurtosisContext{ - EnclaveCtx: fakeEnclaveCtx, - } - - // Create a KurtosisRunner with our fake context - runner, err := run.NewKurtosisRunner( - run.WithKurtosisRunnerEnclave("test-enclave"), - run.WithKurtosisRunnerKurtosisContext(fakeCtx), - ) - require.NoError(t, err) - - // Create the controller surface with all required fields - surface := &KurtosisControllerSurface{ - env: env, - kurtosisCtx: fakeCtx, - runner: runner, - } - - // Create the mock DevnetFS - mockDevnetFS, err := newMockDevnetFS(env) - require.NoError(t, err) - surface.devnetfs = mockDevnetFS - - switch tt.operation { - case "start": - err = surface.StartService(ctx, tt.serviceName) - case "stop": - err = surface.StopService(ctx, tt.serviceName) - default: - t.Fatalf("unknown operation: %s", tt.operation) - } - - if tt.wantErr { - assert.Error(t, err) - return - } - assert.NoError(t, err) - - // For successful start operations, verify that the service endpoints were updated - if tt.operation == "start" && !tt.wantErr { - svc := findSvcInEnv(env, tt.serviceName) - require.NotNil(t, svc) - require.Equal(t, 8080, svc[0].Endpoints["http"].Port) - require.Equal(t, 8082, svc[0].Endpoints["http"].PrivatePort) - } - }) - } -} diff --git a/devnet-sdk/controller/kt/mutate_env.go b/devnet-sdk/controller/kt/mutate_env.go deleted file mode 100644 index 66c5c8cde27cf..0000000000000 --- a/devnet-sdk/controller/kt/mutate_env.go +++ /dev/null @@ -1,100 +0,0 @@ -package kt - -import ( - "context" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" -) - -// a hack because some L2 services are duplicated across chains -type redundantService []*descriptors.Service - -func (s redundantService) getEndpoints() descriptors.EndpointMap { - if len(s) == 0 { - return nil - } - - return s[0].Endpoints -} - -func (s redundantService) setEndpoints(endpoints descriptors.EndpointMap) { - for _, svc := range s { - svc.Endpoints = endpoints - } -} - -func (s redundantService) refreshEndpoints(serviceCtx interfaces.ServiceContext) { - endpoints := s.getEndpoints() - - publicPorts := serviceCtx.GetPublicPorts() - privatePorts := serviceCtx.GetPrivatePorts() - - for name, info := range publicPorts { - endpoints[name].Port = int(info.GetNumber()) - } - for name, info := range privatePorts { - endpoints[name].PrivatePort = int(info.GetNumber()) - } - - s.setEndpoints(endpoints) -} - -func findSvcInEnv(env *descriptors.DevnetEnvironment, serviceName string) redundantService { - if svc := findSvcInChain(env.L1, serviceName); svc != nil { - return redundantService{svc} - } - - var services redundantService = nil - for _, l2 := range env.L2 { - if svc := findSvcInChain(l2.Chain, serviceName); svc != nil { - services = append(services, svc) - } - } - return services -} - -func findSvcInChain(chain *descriptors.Chain, serviceName string) *descriptors.Service { - for _, instances := range chain.Services { - for _, svc := range instances { - if svc.Name == serviceName { - return svc - } - } - } - - for _, node := range chain.Nodes { - for _, svc := range node.Services { - if svc.Name == serviceName { - return svc - } - } - } - - return nil -} - -func (s *KurtosisControllerSurface) updateDevnetEnvironmentService(ctx context.Context, serviceName string, on bool) (bool, error) { - svc := findSvcInEnv(s.env, serviceName) - if svc == nil { - // service is not part of the env, so we don't need to do anything - return false, nil - } - - // get the enclave - enclaveCtx, err := s.kurtosisCtx.GetEnclave(ctx, s.env.Name) - if err != nil { - return false, err - } - - serviceCtx, err := enclaveCtx.GetService(serviceName) - if err != nil { - return false, err - } - - if on { - svc.refreshEndpoints(serviceCtx) - } - // otherwise the service is down anyway, it doesn't matter if it has outdated endpoints - return on, nil -} diff --git a/devnet-sdk/controller/kt/mutate_env_test.go b/devnet-sdk/controller/kt/mutate_env_test.go deleted file mode 100644 index 3a83dc585a948..0000000000000 --- a/devnet-sdk/controller/kt/mutate_env_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package kt - -import ( - "context" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/stretchr/testify/require" -) - -func TestRedundantServiceRefreshEndpoints(t *testing.T) { - // Create a test service with some initial endpoints - svc1 := &descriptors.Service{ - Name: "test-service", - Endpoints: descriptors.EndpointMap{ - "http": { - Port: 0, - PrivatePort: 0, - }, - "ws": { - Port: 0, - PrivatePort: 0, - }, - }, - } - svc2 := &descriptors.Service{ - Name: "test-service", - Endpoints: descriptors.EndpointMap{ - "http": { - Port: 0, - PrivatePort: 0, - }, - "ws": { - Port: 0, - PrivatePort: 0, - }, - }, - } - - // Create a redundant service with both services - redundant := redundantService{svc1, svc2} - - // Create a test service context with new port numbers - testCtx := &testServiceContext{ - publicPorts: map[string]interfaces.PortSpec{ - "http": &testPortSpec{number: 8080}, - "ws": &testPortSpec{number: 8081}, - }, - privatePorts: map[string]interfaces.PortSpec{ - "http": &testPortSpec{number: 8082}, - "ws": &testPortSpec{number: 8083}, - }, - } - - // Call refreshEndpoints - redundant.refreshEndpoints(testCtx) - - // Verify that both services have been updated with the new port numbers - for _, svc := range redundant { - require.Equal(t, 8080, svc.Endpoints["http"].Port) - require.Equal(t, 8081, svc.Endpoints["ws"].Port) - require.Equal(t, 8082, svc.Endpoints["http"].PrivatePort) - require.Equal(t, 8083, svc.Endpoints["ws"].PrivatePort) - } -} - -func TestRedundantServiceEmpty(t *testing.T) { - // Test behavior with empty redundant service - redundant := redundantService{} - testCtx := &testServiceContext{ - publicPorts: map[string]interfaces.PortSpec{}, - privatePorts: map[string]interfaces.PortSpec{}, - } - - // This should not panic - redundant.refreshEndpoints(testCtx) -} - -func TestUpdateDevnetEnvironmentService(t *testing.T) { - // Create a test environment with a service - env := &descriptors.DevnetEnvironment{ - Name: "test-env", - L1: &descriptors.Chain{ - Services: descriptors.RedundantServiceMap{ - "test-service": []*descriptors.Service{ - &descriptors.Service{ - Name: "test-service", - Endpoints: descriptors.EndpointMap{ - "http": { - Port: 0, - PrivatePort: 0, - }, - }, - }, - }, - }, - }, - } - - // Create a test service context with new port numbers - testSvcCtx := &testServiceContext{ - publicPorts: map[string]interfaces.PortSpec{ - "http": &testPortSpec{number: 8080}, - }, - privatePorts: map[string]interfaces.PortSpec{ - "http": &testPortSpec{number: 8082}, - }, - } - - // Create a mock enclave context with our service - mockEnclave := &mockEnclaveContext{ - services: map[services.ServiceName]interfaces.ServiceContext{ - "test-service": testSvcCtx, - }, - } - - // Create a mock kurtosis context with our enclave - mockKurtosisCtx := &mockKurtosisContext{ - enclaves: map[string]interfaces.EnclaveContext{ - "test-env": mockEnclave, - }, - } - - // Create the controller surface - controller := &KurtosisControllerSurface{ - kurtosisCtx: mockKurtosisCtx, - env: env, - } - - // Create the mock DevnetFS - mockDevnetFS, err := newMockDevnetFS(env) - require.NoError(t, err) - controller.devnetfs = mockDevnetFS - - // Test updating the service (turning it on) - updated, err := controller.updateDevnetEnvironmentService(context.Background(), "test-service", true) - require.NoError(t, err) - require.True(t, updated) - - // Verify that the service's endpoints were updated - svc := findSvcInEnv(env, "test-service") - require.NotNil(t, svc) - require.Equal(t, 8080, svc[0].Endpoints["http"].Port) - require.Equal(t, 8082, svc[0].Endpoints["http"].PrivatePort) - - // Test updating a non-existent service - updated, err = controller.updateDevnetEnvironmentService(context.Background(), "non-existent-service", true) - require.NoError(t, err) - require.False(t, updated) -} diff --git a/devnet-sdk/controller/kt/test_helpers.go b/devnet-sdk/controller/kt/test_helpers.go deleted file mode 100644 index f61df2f1fcff4..0000000000000 --- a/devnet-sdk/controller/kt/test_helpers.go +++ /dev/null @@ -1,145 +0,0 @@ -package kt - -import ( - "context" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/kurtosis-tech/kurtosis/api/golang/core/kurtosis_core_rpc_api_bindings" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/enclaves" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/starlark_run_config" - "github.com/spf13/afero" -) - -// testPortSpec implements interfaces.PortSpec -type testPortSpec struct { - number uint16 -} - -func (m *testPortSpec) GetNumber() uint16 { - return m.number -} - -// testServiceContext implements interfaces.ServiceContext -type testServiceContext struct { - publicPorts map[string]interfaces.PortSpec - privatePorts map[string]interfaces.PortSpec -} - -func (m *testServiceContext) GetServiceUUID() services.ServiceUUID { - return "mock-service-uuid" -} - -func (m *testServiceContext) GetMaybePublicIPAddress() string { - return "127.0.0.1" -} - -func (m *testServiceContext) GetPublicPorts() map[string]interfaces.PortSpec { - return m.publicPorts -} - -func (m *testServiceContext) GetPrivatePorts() map[string]interfaces.PortSpec { - return m.privatePorts -} - -func (m *testServiceContext) GetLabels() map[string]string { - return make(map[string]string) -} - -// mockEnclaveFS implements fs.EnclaveContextIface for testing -type mockEnclaveFS struct { - env *descriptors.DevnetEnvironment -} - -func (m *mockEnclaveFS) GetAllFilesArtifactNamesAndUuids(ctx context.Context) ([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, error) { - return nil, nil -} - -func (m *mockEnclaveFS) DownloadFilesArtifact(ctx context.Context, name string) ([]byte, error) { - return nil, nil -} - -func (m *mockEnclaveFS) UploadFiles(pathToUpload string, artifactName string) (services.FilesArtifactUUID, services.FileArtifactName, error) { - return "", "", nil -} - -// newMockDevnetFS creates a new mock DevnetFS for testing -func newMockDevnetFS(env *descriptors.DevnetEnvironment) (*fs.DevnetFS, error) { - mockEnclaveFS := &mockEnclaveFS{env: env} - enclaveFS, err := fs.NewEnclaveFS(context.Background(), "test-enclave", - fs.WithEnclaveCtx(mockEnclaveFS), - fs.WithFs(afero.NewMemMapFs()), - ) - if err != nil { - return nil, err - } - return fs.NewDevnetFS(enclaveFS), nil -} - -type mockEnclaveContext struct { - services map[services.ServiceName]interfaces.ServiceContext -} - -func (m *mockEnclaveContext) GetEnclaveUuid() enclaves.EnclaveUUID { - return "mock-enclave-uuid" -} - -func (m *mockEnclaveContext) GetService(serviceIdentifier string) (interfaces.ServiceContext, error) { - if svc, ok := m.services[services.ServiceName(serviceIdentifier)]; ok { - return svc, nil - } - return nil, nil -} - -func (m *mockEnclaveContext) GetServices() (map[services.ServiceName]services.ServiceUUID, error) { - result := make(map[services.ServiceName]services.ServiceUUID) - for name, svc := range m.services { - result[name] = svc.GetServiceUUID() - } - return result, nil -} - -func (m *mockEnclaveContext) GetAllFilesArtifactNamesAndUuids(ctx context.Context) ([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, error) { - return nil, nil -} - -func (m *mockEnclaveContext) RunStarlarkPackage(ctx context.Context, pkg string, serializedParams *starlark_run_config.StarlarkRunConfig) (<-chan interfaces.StarlarkResponse, string, error) { - return nil, "", nil -} - -func (m *mockEnclaveContext) RunStarlarkScript(ctx context.Context, script string, serializedParams *starlark_run_config.StarlarkRunConfig) error { - return nil -} - -// mockKurtosisContext implements interfaces.KurtosisContextInterface -type mockKurtosisContext struct { - enclaves map[string]interfaces.EnclaveContext -} - -func (m *mockKurtosisContext) CreateEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - if enclave, ok := m.enclaves[name]; ok { - return enclave, nil - } - return nil, nil -} - -func (m *mockKurtosisContext) GetEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - if enclave, ok := m.enclaves[name]; ok { - return enclave, nil - } - return nil, nil -} - -func (m *mockKurtosisContext) Clean(ctx context.Context, destroyAll bool) ([]interfaces.EnclaveNameAndUuid, error) { - return []interfaces.EnclaveNameAndUuid{}, nil -} - -func (m *mockKurtosisContext) GetEnclaveStatus(ctx context.Context, name string) (interfaces.EnclaveStatus, error) { - return interfaces.EnclaveStatusRunning, nil -} - -func (m *mockKurtosisContext) DestroyEnclave(ctx context.Context, name string) error { - return nil -} diff --git a/devnet-sdk/controller/surface/surface.go b/devnet-sdk/controller/surface/surface.go deleted file mode 100644 index 519d9d1c302b1..0000000000000 --- a/devnet-sdk/controller/surface/surface.go +++ /dev/null @@ -1,11 +0,0 @@ -package surface - -import "context" - -type ControlSurface interface { -} - -type ServiceLifecycleSurface interface { - StartService(context.Context, string) error - StopService(context.Context, string) error -} diff --git a/devnet-sdk/descriptors/deployment.go b/devnet-sdk/descriptors/deployment.go deleted file mode 100644 index 60e67ea2ced6a..0000000000000 --- a/devnet-sdk/descriptors/deployment.go +++ /dev/null @@ -1,87 +0,0 @@ -package descriptors - -import ( - "encoding/json" - "net/http" - - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum/go-ethereum/params" -) - -type PortInfo struct { - Host string `json:"host"` - Scheme string `json:"scheme,omitempty"` - Port int `json:"port,omitempty"` - PrivatePort int `json:"private_port,omitempty"` - - ReverseProxyHeader http.Header `json:"reverse_proxy_header,omitempty"` -} - -// EndpointMap is a map of service names to their endpoints. -type EndpointMap map[string]*PortInfo - -// Service represents a chain service (e.g. batcher, proposer, challenger) -type Service struct { - Name string `json:"name"` - Endpoints EndpointMap `json:"endpoints"` - Labels map[string]string `json:"labels,omitempty"` -} - -// ServiceMap is a map of service names to services. -type ServiceMap map[string]*Service - -// RedundantServiceMap is a map of service names to services. -// It is used to represent services that are redundant, i.e. they can have multiple instances. -type RedundantServiceMap map[string][]*Service - -// Node represents a node for a chain -type Node struct { - Name string `json:"name"` - Services ServiceMap `json:"services"` - Labels map[string]string `json:"labels,omitempty"` -} - -// AddressMap is a map of address names to their corresponding addresses -type AddressMap map[string]types.Address - -type Chain struct { - Name string `json:"name"` - ID string `json:"id,omitempty"` - Services RedundantServiceMap `json:"services,omitempty"` - Nodes []Node `json:"nodes"` - Wallets WalletMap `json:"wallets,omitempty"` - JWT string `json:"jwt,omitempty"` - Config *params.ChainConfig `json:"config,omitempty"` - Addresses AddressMap `json:"addresses,omitempty"` -} - -type L2Chain struct { - *Chain - L1Wallets WalletMap `json:"l1_wallets,omitempty"` - RollupConfig *rollup.Config `json:"rollup_config"` -} - -// Wallet represents a wallet with an address and optional private key. -type Wallet struct { - Address types.Address `json:"address"` - PrivateKey string `json:"private_key,omitempty"` -} - -// WalletMap is a map of wallet names to wallets. -type WalletMap map[string]*Wallet - -type DepSet = json.RawMessage - -// DevnetEnvironment exposes the relevant information to interact with a devnet. -type DevnetEnvironment struct { - Name string `json:"name"` - - ReverseProxyURL string `json:"reverse_proxy_url,omitempty"` - - L1 *Chain `json:"l1"` - L2 []*L2Chain `json:"l2"` - - Features []string `json:"features,omitempty"` - DepSets map[string]DepSet `json:"dep_sets,omitempty"` -} diff --git a/devnet-sdk/images/repository.go b/devnet-sdk/images/repository.go deleted file mode 100644 index 0766e0243a01f..0000000000000 --- a/devnet-sdk/images/repository.go +++ /dev/null @@ -1,43 +0,0 @@ -package images - -import "fmt" - -// Repository maps component versions to their corresponding Docker image URLs -type Repository struct { - mapping map[string]string -} - -const ( - opLabsToolsRegistry = "us-docker.pkg.dev/oplabs-tools-artifacts/images" -) - -// NewRepository creates a new Repository instance with predefined mappings -func NewRepository() *Repository { - return &Repository{ - mapping: map[string]string{ - // OP Labs images - "op-deployer": opLabsToolsRegistry, - "op-geth": opLabsToolsRegistry, - "op-node": opLabsToolsRegistry, - "op-batcher": opLabsToolsRegistry, - "op-proposer": opLabsToolsRegistry, - "op-challenger": opLabsToolsRegistry, - "op-reth": opLabsToolsRegistry, - }, - } -} - -// GetImage returns the full Docker image URL for a given component and version -func (r *Repository) GetImage(component string, version string) string { - if imageTemplate, ok := r.mapping[component]; ok { - - if version == "" { - version = "latest" - } - return fmt.Sprintf("%s/%s:%s", imageTemplate, component, version) - } - - // TODO: that's our way to convey that the "default" image should be used. - // We should probably have a more explicit way to do this. - return "" -} diff --git a/devnet-sdk/interfaces/registry.go b/devnet-sdk/interfaces/registry.go deleted file mode 100644 index 9ab21b04f1991..0000000000000 --- a/devnet-sdk/interfaces/registry.go +++ /dev/null @@ -1,33 +0,0 @@ -package interfaces - -import ( - "fmt" - - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum/go-ethereum/accounts/abi" -) - -// ErrContractNotFound indicates that a contract is not available at the requested address -type ErrContractNotFound struct { - ContractType string - Address types.Address -} - -func (e *ErrContractNotFound) Error() string { - return fmt.Sprintf("%s contract not found at %s", e.ContractType, e.Address) -} - -// ContractsRegistry provides access to all supported contract instances -type ContractsRegistry interface { - WETH(address types.Address) (WETH, error) - L2ToL2CrossDomainMessenger(address types.Address) (L2ToL2CrossDomainMessenger, error) -} - -// WETH represents the interface for interacting with the WETH contract -type WETH interface { - BalanceOf(user types.Address) types.ReadInvocation[types.Balance] -} - -type L2ToL2CrossDomainMessenger interface { - ABI() *abi.ABI -} diff --git a/devnet-sdk/kt/fs/devnet_fs.go b/devnet-sdk/kt/fs/devnet_fs.go deleted file mode 100644 index 9062728e8b452..0000000000000 --- a/devnet-sdk/kt/fs/devnet_fs.go +++ /dev/null @@ -1,171 +0,0 @@ -package fs - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "log" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" -) - -const ( - DevnetEnvArtifactNamePrefix = "devnet-descriptor-" - DevnetEnvArtifactPath = "env.json" -) - -type DevnetFS struct { - *EnclaveFS -} - -type DevnetFSDescriptorOption func(*options) - -type options struct { - artifactName string - artifactPath string -} - -func newOptions() *options { - return &options{ - artifactPath: DevnetEnvArtifactPath, - } -} - -func WithArtifactName(name string) DevnetFSDescriptorOption { - return func(o *options) { - o.artifactName = name - } -} - -func WithArtifactPath(path string) DevnetFSDescriptorOption { - return func(o *options) { - o.artifactPath = path - } -} - -func NewDevnetFS(fs *EnclaveFS) *DevnetFS { - return &DevnetFS{EnclaveFS: fs} -} - -func (fs *DevnetFS) GetDevnetDescriptor(ctx context.Context, opts ...DevnetFSDescriptorOption) (*descriptors.DevnetEnvironment, error) { - options := newOptions() - for _, opt := range opts { - opt(options) - } - - if options.artifactName == "" { - if err := fs.loadLatestDevnetDescriptorName(ctx, options); err != nil { - return nil, err - } - } - - artifact, err := fs.GetArtifact(ctx, options.artifactName) - if err != nil { - return nil, fmt.Errorf("error getting artifact: %w", err) - } - - var buf bytes.Buffer - writer := NewArtifactFileWriter(options.artifactPath, &buf) - - if err := artifact.ExtractFiles(writer); err != nil { - return nil, fmt.Errorf("error extracting file from artifact: %w", err) - } - - var env descriptors.DevnetEnvironment - if err := json.Unmarshal(buf.Bytes(), &env); err != nil { - return nil, fmt.Errorf("error unmarshalling environment: %w", err) - } - - return &env, nil -} - -func (fs *DevnetFS) UploadDevnetDescriptor(ctx context.Context, env *descriptors.DevnetEnvironment, opts ...DevnetFSDescriptorOption) error { - envBuf := bytes.NewBuffer(nil) - enc := json.NewEncoder(envBuf) - enc.SetIndent("", " ") - if err := enc.Encode(env); err != nil { - return fmt.Errorf("error encoding environment: %w", err) - } - - options := newOptions() - for _, opt := range opts { - opt(options) - } - - if options.artifactName == "" { - if err := fs.loadNextDevnetDescriptorName(ctx, options); err != nil { - return fmt.Errorf("error getting next devnet descriptor: %w", err) - } - } - - if err := fs.PutArtifact(ctx, options.artifactName, NewArtifactFileReader(options.artifactPath, envBuf)); err != nil { - return fmt.Errorf("error putting environment artifact: %w", err) - } - - return nil -} - -func (fs *DevnetFS) loadLatestDevnetDescriptorName(ctx context.Context, options *options) error { - names, err := fs.GetAllArtifactNames(ctx) - if err != nil { - return fmt.Errorf("error getting artifact names: %w", err) - } - - var maxSuffix int = -1 - var maxName string - for _, name := range names { - _, suffix, found := strings.Cut(name, DevnetEnvArtifactNamePrefix) - if !found { - continue - } - - // Parse the suffix as a number - var num int - if _, err := fmt.Sscanf(suffix, "%d", &num); err != nil { - continue // Skip if suffix is not a valid number - } - - // Update maxName if this number is larger - if num > maxSuffix { - maxSuffix = num - maxName = name - } - } - - if maxName == "" { - return fmt.Errorf("no descriptor found with valid numerical suffix") - } - - options.artifactName = maxName - return nil -} - -func (fs *DevnetFS) loadNextDevnetDescriptorName(ctx context.Context, options *options) error { - artifactNames, err := fs.GetAllArtifactNames(ctx) - if err != nil { - return fmt.Errorf("error getting artifact names: %w", err) - } - - maxNum := -1 - for _, artifactName := range artifactNames { - if !strings.HasPrefix(artifactName, DevnetEnvArtifactNamePrefix) { - continue - } - - numStr := strings.TrimPrefix(artifactName, DevnetEnvArtifactNamePrefix) - num := 0 - if _, err := fmt.Sscanf(numStr, "%d", &num); err != nil { - log.Printf("Warning: invalid devnet descriptor format: %s", artifactName) - continue - } - - if num > maxNum { - maxNum = num - } - } - - options.artifactName = fmt.Sprintf("%s%d", DevnetEnvArtifactNamePrefix, maxNum+1) - return nil -} diff --git a/devnet-sdk/kt/fs/devnet_fs_test.go b/devnet-sdk/kt/fs/devnet_fs_test.go deleted file mode 100644 index d8bb6824001f4..0000000000000 --- a/devnet-sdk/kt/fs/devnet_fs_test.go +++ /dev/null @@ -1,293 +0,0 @@ -package fs - -import ( - "context" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestGetDevnetDescriptor(t *testing.T) { - envContent := `{"name": "test-env", "l1": {"name": "l1", "id": "1", "nodes": []}, "l2": []}` - expectedEnv := &descriptors.DevnetEnvironment{ - Name: "test-env", - L1: &descriptors.Chain{ - Name: "l1", - ID: "1", - Nodes: []descriptors.Node{}, - }, - L2: []*descriptors.L2Chain{}, - } - - tests := []struct { - name string - artifactName string - artifactPath string - envContent string - wantErr bool - expectedEnv *descriptors.DevnetEnvironment - }{ - { - name: "successful retrieval with default path", - artifactName: "devnet-descriptor-1", - artifactPath: DevnetEnvArtifactPath, - envContent: envContent, - expectedEnv: expectedEnv, - }, - { - name: "successful retrieval with custom path", - artifactName: "devnet-descriptor-1", - artifactPath: "custom/path/env.json", - envContent: envContent, - expectedEnv: expectedEnv, - }, - { - name: "invalid json content", - artifactName: "devnet-descriptor-1", - artifactPath: DevnetEnvArtifactPath, - envContent: `invalid json`, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create mock context with artifact - mockCtx := &mockEnclaveContext{ - artifacts: map[string][]byte{ - tt.artifactName: createTarGzArtifact(t, map[string]string{ - tt.artifactPath: tt.envContent, - }), - }, - fs: afero.NewMemMapFs(), - } - - enclaveFS, err := NewEnclaveFS(context.Background(), "test-enclave", WithEnclaveCtx(mockCtx), WithFs(mockCtx.fs)) - require.NoError(t, err) - - devnetFS := NewDevnetFS(enclaveFS) - - // Get descriptor with options - opts := []DevnetFSDescriptorOption{} - if tt.artifactName != "" { - opts = append(opts, WithArtifactName(tt.artifactName)) - } - if tt.artifactPath != DevnetEnvArtifactPath { - opts = append(opts, WithArtifactPath(tt.artifactPath)) - } - - env, err := devnetFS.GetDevnetDescriptor(context.Background(), opts...) - if tt.wantErr { - assert.Error(t, err) - return - } - require.NoError(t, err) - assert.Equal(t, tt.expectedEnv, env) - }) - } -} - -func TestUploadDevnetDescriptor(t *testing.T) { - env := &descriptors.DevnetEnvironment{ - Name: "test-env", - L1: &descriptors.Chain{ - Name: "l1", - ID: "1", - Nodes: []descriptors.Node{}, - }, - L2: []*descriptors.L2Chain{}, - } - - tests := []struct { - name string - artifactName string - artifactPath string - env *descriptors.DevnetEnvironment - wantErr bool - }{ - { - name: "successful upload with default path", - artifactName: "devnet-descriptor-1", - artifactPath: DevnetEnvArtifactPath, - env: env, - }, - { - name: "successful upload with custom path", - artifactName: "devnet-descriptor-1", - artifactPath: "custom/path/env.json", - env: env, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create mock context - mockCtx := &mockEnclaveContext{ - artifacts: make(map[string][]byte), - fs: afero.NewMemMapFs(), - } - - enclaveFS, err := NewEnclaveFS(context.Background(), "test-enclave", WithEnclaveCtx(mockCtx), WithFs(mockCtx.fs)) - require.NoError(t, err) - - devnetFS := NewDevnetFS(enclaveFS) - - // Upload descriptor with options - opts := []DevnetFSDescriptorOption{} - if tt.artifactName != "" { - opts = append(opts, WithArtifactName(tt.artifactName)) - } - if tt.artifactPath != DevnetEnvArtifactPath { - opts = append(opts, WithArtifactPath(tt.artifactPath)) - } - - err = devnetFS.UploadDevnetDescriptor(context.Background(), tt.env, opts...) - if tt.wantErr { - assert.Error(t, err) - return - } - require.NoError(t, err) - - // Verify the artifact was uploaded - require.NotNil(t, mockCtx.uploaded) - uploaded := mockCtx.uploaded[tt.artifactName] - require.NotNil(t, uploaded) - require.Contains(t, uploaded, tt.artifactPath) - }) - } -} - -func TestLoadLatestDevnetDescriptorName(t *testing.T) { - tests := []struct { - name string - existingNames []string - expectedName string - wantErr bool - }{ - { - name: "single descriptor", - existingNames: []string{ - "devnet-descriptor-1", - }, - expectedName: "devnet-descriptor-1", - }, - { - name: "multiple descriptors", - existingNames: []string{ - "devnet-descriptor-1", - "devnet-descriptor-3", - "devnet-descriptor-2", - }, - expectedName: "devnet-descriptor-3", - }, - { - name: "no descriptors", - existingNames: []string{}, - wantErr: true, - }, - { - name: "invalid descriptor names", - existingNames: []string{ - "invalid-name", - "devnet-descriptor-invalid", - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create mock context with artifacts - mockCtx := &mockEnclaveContext{ - artifacts: make(map[string][]byte), - fs: afero.NewMemMapFs(), - } - - // Add artifacts to the mock context - for _, name := range tt.existingNames { - mockCtx.artifacts[name] = []byte{} - } - - enclaveFS, err := NewEnclaveFS(context.Background(), "test-enclave", WithEnclaveCtx(mockCtx), WithFs(mockCtx.fs)) - require.NoError(t, err) - - devnetFS := NewDevnetFS(enclaveFS) - - options := newOptions() - err = devnetFS.loadLatestDevnetDescriptorName(context.Background(), options) - if tt.wantErr { - assert.Error(t, err) - return - } - require.NoError(t, err) - assert.Equal(t, tt.expectedName, options.artifactName) - }) - } -} - -func TestLoadNextDevnetDescriptorName(t *testing.T) { - tests := []struct { - name string - existingNames []string - expectedName string - }{ - { - name: "no existing descriptors", - existingNames: []string{}, - expectedName: "devnet-descriptor-0", - }, - { - name: "single descriptor", - existingNames: []string{ - "devnet-descriptor-1", - }, - expectedName: "devnet-descriptor-2", - }, - { - name: "multiple descriptors", - existingNames: []string{ - "devnet-descriptor-1", - "devnet-descriptor-3", - "devnet-descriptor-2", - }, - expectedName: "devnet-descriptor-4", - }, - { - name: "with invalid descriptor names", - existingNames: []string{ - "invalid-name", - "devnet-descriptor-1", - "devnet-descriptor-invalid", - }, - expectedName: "devnet-descriptor-2", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create mock context with artifacts - mockCtx := &mockEnclaveContext{ - artifacts: make(map[string][]byte), - fs: afero.NewMemMapFs(), - } - - // Add artifacts to the mock context - for _, name := range tt.existingNames { - mockCtx.artifacts[name] = []byte{} - } - - enclaveFS, err := NewEnclaveFS(context.Background(), "test-enclave", WithEnclaveCtx(mockCtx), WithFs(mockCtx.fs)) - require.NoError(t, err) - - devnetFS := NewDevnetFS(enclaveFS) - - options := newOptions() - err = devnetFS.loadNextDevnetDescriptorName(context.Background(), options) - require.NoError(t, err) - assert.Equal(t, tt.expectedName, options.artifactName) - }) - } -} diff --git a/devnet-sdk/kt/fs/fs.go b/devnet-sdk/kt/fs/fs.go deleted file mode 100644 index f1acb7e12f851..0000000000000 --- a/devnet-sdk/kt/fs/fs.go +++ /dev/null @@ -1,272 +0,0 @@ -package fs - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "context" - "fmt" - "io" - "os" - "path/filepath" - - "github.com/kurtosis-tech/kurtosis/api/golang/core/kurtosis_core_rpc_api_bindings" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/kurtosis-tech/kurtosis/api/golang/engine/lib/kurtosis_context" - "github.com/spf13/afero" -) - -// EnclaveContextIface abstracts the EnclaveContext for testing -type EnclaveContextIface interface { - GetAllFilesArtifactNamesAndUuids(ctx context.Context) ([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, error) - DownloadFilesArtifact(ctx context.Context, name string) ([]byte, error) - UploadFiles(pathToUpload string, artifactName string) (services.FilesArtifactUUID, services.FileArtifactName, error) -} - -type EnclaveFS struct { - enclaveCtx EnclaveContextIface - fs afero.Fs -} - -type EnclaveFSOption func(*EnclaveFS) - -func WithFs(fs afero.Fs) EnclaveFSOption { - return func(e *EnclaveFS) { - e.fs = fs - } -} - -func WithEnclaveCtx(enclaveCtx EnclaveContextIface) EnclaveFSOption { - return func(e *EnclaveFS) { - e.enclaveCtx = enclaveCtx - } -} - -func NewEnclaveFS(ctx context.Context, enclave string, opts ...EnclaveFSOption) (*EnclaveFS, error) { - enclaveFS := &EnclaveFS{} - - for _, opt := range opts { - opt(enclaveFS) - } - - if enclaveFS.fs == nil { - enclaveFS.fs = afero.NewOsFs() - } - - if enclaveFS.enclaveCtx == nil { - kurtosisCtx, err := kurtosis_context.NewKurtosisContextFromLocalEngine() - if err != nil { - return nil, err - } - - enclaveCtx, err := kurtosisCtx.GetEnclaveContext(ctx, enclave) - if err != nil { - return nil, err - } - - enclaveFS.enclaveCtx = enclaveCtx - } - - return enclaveFS, nil -} - -type Artifact struct { - rawData []byte - reader *tar.Reader - fs afero.Fs -} - -func (fs *EnclaveFS) GetAllArtifactNames(ctx context.Context) ([]string, error) { - artifacts, err := fs.enclaveCtx.GetAllFilesArtifactNamesAndUuids(ctx) - if err != nil { - return nil, err - } - - names := make([]string, len(artifacts)) - for i, artifact := range artifacts { - names[i] = artifact.GetFileName() - } - - return names, nil -} - -func (fs *EnclaveFS) GetArtifact(ctx context.Context, name string) (*Artifact, error) { - artifact, err := fs.enclaveCtx.DownloadFilesArtifact(ctx, name) - if err != nil { - return nil, err - } - - // Store the raw data - buffer := bytes.NewBuffer(artifact) - zipReader, err := gzip.NewReader(buffer) - if err != nil { - return nil, err - } - tarReader := tar.NewReader(zipReader) - return &Artifact{ - rawData: artifact, - reader: tarReader, - fs: fs.fs, - }, nil -} - -func (a *Artifact) newReader() (*tar.Reader, error) { - buffer := bytes.NewBuffer(a.rawData) - zipReader, err := gzip.NewReader(buffer) - if err != nil { - return nil, err - } - return tar.NewReader(zipReader), nil -} - -func (a *Artifact) Download(path string) error { - // Create a new reader for this operation - reader, err := a.newReader() - if err != nil { - return fmt.Errorf("failed to create reader: %w", err) - } - - for { - header, err := reader.Next() - if err == io.EOF { - return nil - } - if err != nil { - return fmt.Errorf("failed to read tar header: %w", err) - } - - fpath := filepath.Join(path, filepath.Clean(header.Name)) - - switch header.Typeflag { - case tar.TypeDir: - if err := a.fs.MkdirAll(fpath, os.FileMode(header.Mode)); err != nil { - return fmt.Errorf("failed to create directory %s: %w", fpath, err) - } - case tar.TypeReg: - // Create parent directories if they don't exist - if err := a.fs.MkdirAll(filepath.Dir(fpath), 0755); err != nil { - return fmt.Errorf("failed to create directory for %s: %w", fpath, err) - } - - // Create the file - f, err := a.fs.OpenFile(fpath, os.O_CREATE|os.O_WRONLY, os.FileMode(header.Mode)) - if err != nil { - return fmt.Errorf("failed to create file %s: %w", fpath, err) - } - - // Copy contents from tar reader to file - if _, err := io.Copy(f, reader); err != nil { - f.Close() - return fmt.Errorf("failed to write contents to %s: %w", fpath, err) - } - f.Close() - default: - return fmt.Errorf("unsupported file type %d for %s", header.Typeflag, header.Name) - } - } -} - -func (a *Artifact) ExtractFiles(writers ...*ArtifactFileWriter) error { - // Create a new reader for this operation - reader, err := a.newReader() - if err != nil { - return fmt.Errorf("failed to create reader: %w", err) - } - - paths := make(map[string]io.Writer) - for _, writer := range writers { - canonicalPath := filepath.Clean(writer.path) - paths[canonicalPath] = writer.writer - } - - for { - header, err := reader.Next() - if err == io.EOF { - break - } - if err != nil { - return fmt.Errorf("failed to read tar header: %w", err) - } - - headerPath := filepath.Clean(header.Name) - if _, ok := paths[headerPath]; !ok { - continue - } - - writer := paths[headerPath] - _, err = io.Copy(writer, reader) - if err != nil { - return fmt.Errorf("failed to copy content: %w", err) - } - } - - return nil -} - -func (fs *EnclaveFS) PutArtifact(ctx context.Context, name string, readers ...*ArtifactFileReader) (retErr error) { - // Create a temporary directory using afero - tempDir, err := afero.TempDir(fs.fs, "", "artifact-*") - if err != nil { - return err - } - defer func() { - if err := fs.fs.RemoveAll(tempDir); err != nil && retErr == nil { - retErr = fmt.Errorf("failed to cleanup temporary directory: %w", err) - } - }() - - // Process each reader - for _, reader := range readers { - // Create the full path in the temp directory - fullPath := filepath.Join(tempDir, reader.path) - - // Ensure the parent directory exists - if err := fs.fs.MkdirAll(filepath.Dir(fullPath), 0755); err != nil { - return err - } - - // Create the file - file, err := fs.fs.Create(fullPath) - if err != nil { - return err - } - - // Copy the content - _, err = io.Copy(file, reader.reader) - file.Close() // Close file after writing - if err != nil { - return err - } - } - - // Upload the directory to Kurtosis - if _, _, err := fs.enclaveCtx.UploadFiles(tempDir, name); err != nil { - return err - } - - return -} - -type ArtifactFileReader struct { - path string - reader io.Reader -} - -func NewArtifactFileReader(path string, reader io.Reader) *ArtifactFileReader { - return &ArtifactFileReader{ - path: path, - reader: reader, - } -} - -type ArtifactFileWriter struct { - path string - writer io.Writer -} - -func NewArtifactFileWriter(path string, writer io.Writer) *ArtifactFileWriter { - return &ArtifactFileWriter{ - path: path, - writer: writer, - } -} diff --git a/devnet-sdk/kt/fs/fs_test.go b/devnet-sdk/kt/fs/fs_test.go deleted file mode 100644 index c347a75c4aa16..0000000000000 --- a/devnet-sdk/kt/fs/fs_test.go +++ /dev/null @@ -1,450 +0,0 @@ -package fs - -import ( - "archive/tar" - "bytes" - "compress/gzip" - "context" - "os" - "path/filepath" - "testing" - - "github.com/kurtosis-tech/kurtosis/api/golang/core/kurtosis_core_rpc_api_bindings" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type mockEnclaveContext struct { - artifacts map[string][]byte - uploaded map[string]map[string][]byte // artifactName -> path -> content - fs afero.Fs // filesystem to use for operations -} - -func (m *mockEnclaveContext) DownloadFilesArtifact(_ context.Context, name string) ([]byte, error) { - return m.artifacts[name], nil -} - -func (m *mockEnclaveContext) UploadFiles(pathToUpload string, artifactName string) (services.FilesArtifactUUID, services.FileArtifactName, error) { - if m.uploaded == nil { - m.uploaded = make(map[string]map[string][]byte) - } - m.uploaded[artifactName] = make(map[string][]byte) - - err := afero.Walk(m.fs, pathToUpload, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - return nil - } - - relPath, err := filepath.Rel(pathToUpload, path) - if err != nil { - return err - } - - content, err := afero.ReadFile(m.fs, path) - if err != nil { - return err - } - - m.uploaded[artifactName][relPath] = content - return nil - }) - - return "test-uuid", services.FileArtifactName(artifactName), err -} - -func (m *mockEnclaveContext) GetAllFilesArtifactNamesAndUuids(ctx context.Context) ([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, error) { - var result []*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid - for name := range m.artifacts { - result = append(result, &kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid{ - FileName: name, - FileUuid: "test-uuid", - }) - } - return result, nil -} - -var _ EnclaveContextIface = (*mockEnclaveContext)(nil) - -func createTarGzArtifact(t *testing.T, files map[string]string) []byte { - var buf bytes.Buffer - gzWriter := gzip.NewWriter(&buf) - tarWriter := tar.NewWriter(gzWriter) - - for name, content := range files { - err := tarWriter.WriteHeader(&tar.Header{ - Name: name, - Mode: 0600, - Size: int64(len(content)), - }) - require.NoError(t, err) - - _, err = tarWriter.Write([]byte(content)) - require.NoError(t, err) - } - - require.NoError(t, tarWriter.Close()) - require.NoError(t, gzWriter.Close()) - return buf.Bytes() -} - -func TestArtifactExtraction(t *testing.T) { - tests := []struct { - name string - files map[string]string - requests map[string]string - wantErr bool - }{ - { - name: "simple path", - files: map[string]string{ - "file1.txt": "content1", - }, - requests: map[string]string{ - "file1.txt": "content1", - }, - }, - { - name: "path with dot prefix", - files: map[string]string{ - "./file1.txt": "content1", - }, - requests: map[string]string{ - "file1.txt": "content1", - }, - }, - { - name: "mixed paths", - files: map[string]string{ - "./file1.txt": "content1", - "file2.txt": "content2", - "./dir/f3.txt": "content3", - }, - requests: map[string]string{ - "file1.txt": "content1", - "file2.txt": "content2", - "dir/f3.txt": "content3", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create mock context with artifact - mockCtx := &mockEnclaveContext{ - artifacts: map[string][]byte{ - "test-artifact": createTarGzArtifact(t, tt.files), - }, - fs: afero.NewMemMapFs(), - } - - fs, err := NewEnclaveFS(context.Background(), "test-enclave", WithEnclaveCtx(mockCtx), WithFs(mockCtx.fs)) - require.NoError(t, err) - - artifact, err := fs.GetArtifact(context.Background(), "test-artifact") - require.NoError(t, err) - - // Create writers for all requested files - writers := make([]*ArtifactFileWriter, 0, len(tt.requests)) - buffers := make(map[string]*bytes.Buffer, len(tt.requests)) - for reqPath := range tt.requests { - buf := &bytes.Buffer{} - buffers[reqPath] = buf - writers = append(writers, NewArtifactFileWriter(reqPath, buf)) - } - - // Extract all files at once - err = artifact.ExtractFiles(writers...) - if tt.wantErr { - require.Error(t, err) - return - } - require.NoError(t, err) - - // Verify contents - for reqPath, wantContent := range tt.requests { - require.Equal(t, wantContent, buffers[reqPath].String(), "content mismatch for %s", reqPath) - } - }) - } -} - -func TestPutArtifact(t *testing.T) { - tests := []struct { - name string - files map[string]string - wantErr bool - }{ - { - name: "single file", - files: map[string]string{ - "file1.txt": "content1", - }, - }, - { - name: "multiple files", - files: map[string]string{ - "file1.txt": "content1", - "dir/file2.txt": "content2", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - fs := afero.NewMemMapFs() - mockCtx := &mockEnclaveContext{ - artifacts: make(map[string][]byte), - fs: fs, - } - - enclaveFs := &EnclaveFS{ - enclaveCtx: mockCtx, - fs: fs, - } - - // Create readers for all files - var readers []*ArtifactFileReader - for path, content := range tt.files { - readers = append(readers, NewArtifactFileReader( - path, - bytes.NewReader([]byte(content)), - )) - } - - // Put the artifact - err := enclaveFs.PutArtifact(context.Background(), "test-artifact", readers...) - if tt.wantErr { - require.Error(t, err) - return - } - require.NoError(t, err) - - // Verify uploaded contents - require.NotNil(t, mockCtx.uploaded) - uploaded := mockCtx.uploaded["test-artifact"] - require.NotNil(t, uploaded) - require.Equal(t, len(tt.files), len(uploaded)) - - for path, wantContent := range tt.files { - content, exists := uploaded[path] - require.True(t, exists, "missing file: %s", path) - require.Equal(t, wantContent, string(content), "content mismatch for %s", path) - } - }) - } -} - -func TestMultipleExtractCalls(t *testing.T) { - // Create a test artifact with multiple files - files := map[string]string{ - "file1.txt": "content1", - "file2.txt": "content2", - "dir/file3.txt": "content3", - "dir/file4.txt": "content4", - } - - // Create mock context with artifact - mockCtx := &mockEnclaveContext{ - artifacts: map[string][]byte{ - "test-artifact": createTarGzArtifact(t, files), - }, - fs: afero.NewMemMapFs(), - } - - fs, err := NewEnclaveFS(context.Background(), "test-enclave", WithEnclaveCtx(mockCtx), WithFs(mockCtx.fs)) - require.NoError(t, err) - - artifact, err := fs.GetArtifact(context.Background(), "test-artifact") - require.NoError(t, err) - - // First extraction - get file1.txt and file3.txt - firstExtractFiles := map[string]string{ - "file1.txt": "content1", - "dir/file3.txt": "content3", - } - - firstWriters := make([]*ArtifactFileWriter, 0, len(firstExtractFiles)) - firstBuffers := make(map[string]*bytes.Buffer, len(firstExtractFiles)) - - for reqPath := range firstExtractFiles { - buf := &bytes.Buffer{} - firstBuffers[reqPath] = buf - firstWriters = append(firstWriters, NewArtifactFileWriter(reqPath, buf)) - } - - // First extraction - err = artifact.ExtractFiles(firstWriters...) - require.NoError(t, err) - - // Verify first extraction - for reqPath, wantContent := range firstExtractFiles { - require.Equal(t, wantContent, firstBuffers[reqPath].String(), - "first extraction: content mismatch for %s", reqPath) - } - - // Second extraction - get file2.txt and file4.txt - secondExtractFiles := map[string]string{ - "file2.txt": "content2", - "dir/file4.txt": "content4", - } - - secondWriters := make([]*ArtifactFileWriter, 0, len(secondExtractFiles)) - secondBuffers := make(map[string]*bytes.Buffer, len(secondExtractFiles)) - - for reqPath := range secondExtractFiles { - buf := &bytes.Buffer{} - secondBuffers[reqPath] = buf - secondWriters = append(secondWriters, NewArtifactFileWriter(reqPath, buf)) - } - - // Second extraction using the same artifact - err = artifact.ExtractFiles(secondWriters...) - require.NoError(t, err) - - // Verify second extraction - for reqPath, wantContent := range secondExtractFiles { - require.Equal(t, wantContent, secondBuffers[reqPath].String(), - "second extraction: content mismatch for %s", reqPath) - } - - // Third extraction - extract all files again to prove we can keep extracting - allFiles := map[string]string{ - "file1.txt": "content1", - "file2.txt": "content2", - "dir/file3.txt": "content3", - "dir/file4.txt": "content4", - } - - allWriters := make([]*ArtifactFileWriter, 0, len(allFiles)) - allBuffers := make(map[string]*bytes.Buffer, len(allFiles)) - - for reqPath := range allFiles { - buf := &bytes.Buffer{} - allBuffers[reqPath] = buf - allWriters = append(allWriters, NewArtifactFileWriter(reqPath, buf)) - } - - // Third extraction - err = artifact.ExtractFiles(allWriters...) - require.NoError(t, err) - - // Verify third extraction - for reqPath, wantContent := range allFiles { - require.Equal(t, wantContent, allBuffers[reqPath].String(), - "third extraction: content mismatch for %s", reqPath) - } -} - -func TestArtifact_Download(t *testing.T) { - tests := []struct { - name string - files map[string][]byte // map of filepath to content - wantErr bool - validate func(t *testing.T, fs afero.Fs) - }{ - { - name: "single file download", - files: map[string][]byte{ - "test.txt": []byte("hello world"), - }, - validate: func(t *testing.T, fs afero.Fs) { - content, err := afero.ReadFile(fs, "test.txt") - require.NoError(t, err) - assert.Equal(t, []byte("hello world"), content) - }, - }, - { - name: "nested directory structure", - files: map[string][]byte{ - "dir/test.txt": []byte("hello"), - "dir/subdir/test.txt": []byte("world"), - }, - validate: func(t *testing.T, fs afero.Fs) { - content1, err := afero.ReadFile(fs, "dir/test.txt") - require.NoError(t, err) - assert.Equal(t, []byte("hello"), content1) - - content2, err := afero.ReadFile(fs, "dir/subdir/test.txt") - require.NoError(t, err) - assert.Equal(t, []byte("world"), content2) - }, - }, - { - name: "empty directory", - files: map[string][]byte{ - "dir/": nil, - }, - validate: func(t *testing.T, fs afero.Fs) { - exists, err := afero.DirExists(fs, "dir") - require.NoError(t, err) - assert.True(t, exists) - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a tar.gz archive in memory - var buf bytes.Buffer - gw := gzip.NewWriter(&buf) - tw := tar.NewWriter(gw) - - // Add files to the archive - for path, content := range tt.files { - header := &tar.Header{ - Name: path, - } - if content == nil { - header.Typeflag = tar.TypeDir - header.Mode = 0755 - } else { - header.Typeflag = tar.TypeReg - header.Size = int64(len(content)) - header.Mode = 0644 - } - - err := tw.WriteHeader(header) - require.NoError(t, err) - - if content != nil { - _, err = tw.Write(content) - require.NoError(t, err) - } - } - - err := tw.Close() - require.NoError(t, err) - err = gw.Close() - require.NoError(t, err) - - // Create in-memory filesystem - memFs := afero.NewMemMapFs() - - // Create an Artifact from the archive - rawData := buf.Bytes() - zipReader, err := gzip.NewReader(bytes.NewReader(rawData)) - require.NoError(t, err) - artifact := &Artifact{ - rawData: rawData, - reader: tar.NewReader(zipReader), - fs: memFs, - } - - // Test Download function - err = artifact.Download("") - if tt.wantErr { - assert.Error(t, err) - return - } - require.NoError(t, err) - - // Run validation - tt.validate(t, memFs) - }) - } -} diff --git a/devnet-sdk/kt/params.go b/devnet-sdk/kt/params.go deleted file mode 100644 index 2125e08e56cfd..0000000000000 --- a/devnet-sdk/kt/params.go +++ /dev/null @@ -1,82 +0,0 @@ -package kt - -// KurtosisParams represents the top-level Kurtosis configuration -type KurtosisParams struct { - OptimismPackage OptimismPackage `yaml:"optimism_package"` - EthereumPackage EthereumPackage `yaml:"ethereum_package"` -} - -// OptimismPackage represents the Optimism-specific configuration -type OptimismPackage struct { - Chains []ChainConfig `yaml:"chains"` - OpContractDeployerParams OpContractDeployerParams `yaml:"op_contract_deployer_params"` - Persistent bool `yaml:"persistent"` -} - -// ChainConfig represents a single chain configuration -type ChainConfig struct { - Participants []ParticipantConfig `yaml:"participants"` - NetworkParams NetworkParams `yaml:"network_params"` - BatcherParams BatcherParams `yaml:"batcher_params"` - ChallengerParams ChallengerParams `yaml:"challenger_params"` - ProposerParams ProposerParams `yaml:"proposer_params"` -} - -// ParticipantConfig represents a participant in the network -type ParticipantConfig struct { - ElType string `yaml:"el_type"` - ElImage string `yaml:"el_image"` - ClType string `yaml:"cl_type"` - ClImage string `yaml:"cl_image"` - Count int `yaml:"count"` -} - -// TimeOffsets represents a map of time offset values -type TimeOffsets map[string]int - -// NetworkParams represents network-specific parameters -type NetworkParams struct { - Network string `yaml:"network"` - NetworkID string `yaml:"network_id"` - SecondsPerSlot int `yaml:"seconds_per_slot"` - Name string `yaml:"name"` - FundDevAccounts bool `yaml:"fund_dev_accounts"` - TimeOffsets `yaml:",inline"` -} - -// BatcherParams represents batcher-specific parameters -type BatcherParams struct { - Image string `yaml:"image"` -} - -// ChallengerParams represents challenger-specific parameters -type ChallengerParams struct { - Image string `yaml:"image"` - CannonPrestatesURL string `yaml:"cannon_prestates_url,omitempty"` -} - -// ProposerParams represents proposer-specific parameters -type ProposerParams struct { - Image string `yaml:"image"` - GameType int `yaml:"game_type"` - ProposalInterval string `yaml:"proposal_interval"` -} - -// OpContractDeployerParams represents contract deployer parameters -type OpContractDeployerParams struct { - Image string `yaml:"image"` - L1ArtifactsLocator string `yaml:"l1_artifacts_locator"` - L2ArtifactsLocator string `yaml:"l2_artifacts_locator"` -} - -// EthereumPackage represents Ethereum-specific configuration -type EthereumPackage struct { - NetworkParams EthereumNetworkParams `yaml:"network_params"` -} - -// EthereumNetworkParams represents Ethereum network parameters -type EthereumNetworkParams struct { - Preset string `yaml:"preset"` - GenesisDelay int `yaml:"genesis_delay"` - AdditionalPreloadedContracts string `yaml:"additional_preloaded_contracts"` -} diff --git a/devnet-sdk/kt/visitor.go b/devnet-sdk/kt/visitor.go deleted file mode 100644 index e237a6724f7cf..0000000000000 --- a/devnet-sdk/kt/visitor.go +++ /dev/null @@ -1,265 +0,0 @@ -package kt - -import ( - "strconv" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/images" - "github.com/ethereum-optimism/optimism/devnet-sdk/manifest" -) - -const ( - defaultProposalInterval = "10m" - defaultGameType = 1 - defaultPreset = "minimal" - defaultGenesisDelay = 5 - defaultPreloadedContracts = `{ - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - }` -) - -// KurtosisVisitor implements the manifest.ManifestVisitor interface -type KurtosisVisitor struct { - params *KurtosisParams - repository *images.Repository - l2Visitor *l2Visitor -} - -// Component visitor for handling component versions -type componentVisitor struct { - name string - version string -} - -// Chain visitor for handling chain configuration -type chainVisitor struct { - name string - id uint64 -} - -// Contracts visitor for handling contract configuration -type contractsVisitor struct { - locator string -} - -// Overrides represents deployment overrides -type Overrides struct { - SecondsPerSlot int `yaml:"seconds_per_slot"` - TimeOffsets `yaml:",inline"` -} - -// Deployment visitor for handling deployment configuration -type deploymentVisitor struct { - deployer *componentVisitor - l1Contracts *contractsVisitor - l2Contracts *contractsVisitor - overrides *Overrides -} - -// L2 visitor for handling L2 configuration -type l2Visitor struct { - components map[string]*componentVisitor - deployment *deploymentVisitor - chains []*chainVisitor -} - -// NewKurtosisVisitor creates a new KurtosisVisitor -func NewKurtosisVisitor() *KurtosisVisitor { - return &KurtosisVisitor{ - params: &KurtosisParams{ - OptimismPackage: OptimismPackage{ - Chains: make([]ChainConfig, 0), - Persistent: false, - }, - EthereumPackage: EthereumPackage{ - NetworkParams: EthereumNetworkParams{ - Preset: defaultPreset, - GenesisDelay: defaultGenesisDelay, - AdditionalPreloadedContracts: defaultPreloadedContracts, - }, - }, - }, - repository: images.NewRepository(), - } -} - -func (v *KurtosisVisitor) VisitName(name string) {} - -func (v *KurtosisVisitor) VisitType(manifestType string) {} - -func (v *KurtosisVisitor) VisitL1() manifest.ChainVisitor { - return &chainVisitor{} -} - -func (v *KurtosisVisitor) VisitL2() manifest.L2Visitor { - v.l2Visitor = &l2Visitor{ - components: make(map[string]*componentVisitor), - deployment: &deploymentVisitor{ - deployer: &componentVisitor{}, - l1Contracts: &contractsVisitor{}, - l2Contracts: &contractsVisitor{}, - overrides: &Overrides{ - TimeOffsets: make(TimeOffsets), - }, - }, - chains: make([]*chainVisitor, 0), - } - return v.l2Visitor -} - -// Component visitor implementation -func (v *componentVisitor) VisitVersion(version string) { - // Strip the component name from the version string - parts := strings.SplitN(version, "/", 2) - if len(parts) == 2 { - v.version = parts[1] - } else { - v.version = version - } -} - -// Chain visitor implementation -func (v *chainVisitor) VisitName(name string) { - v.name = name -} - -func (v *chainVisitor) VisitID(id uint64) { - // TODO: this is horrible but unfortunately the funding script breaks for - // chain IDs larger than 32 bits. - v.id = id & 0xFFFFFFFF -} - -// Contracts visitor implementation -func (v *contractsVisitor) VisitVersion(version string) { - if v.locator == "" { - v.locator = "tag://" + version - } -} - -func (v *contractsVisitor) VisitLocator(locator string) { - v.locator = locator -} - -// Deployment visitor implementation -func (v *deploymentVisitor) VisitDeployer() manifest.ComponentVisitor { - return v.deployer -} - -func (v *deploymentVisitor) VisitL1Contracts() manifest.ContractsVisitor { - return v.l1Contracts -} - -func (v *deploymentVisitor) VisitL2Contracts() manifest.ContractsVisitor { - return v.l2Contracts -} - -func (v *deploymentVisitor) VisitOverride(key string, value interface{}) { - if key == "seconds_per_slot" { - if intValue, ok := value.(int); ok { - v.overrides.SecondsPerSlot = intValue - } - } else if strings.HasSuffix(key, "_time_offset") { - if intValue, ok := value.(int); ok { - v.overrides.TimeOffsets[key] = intValue - } - } -} - -// L2 visitor implementation -func (v *l2Visitor) VisitL2Component(name string) manifest.ComponentVisitor { - comp := &componentVisitor{name: name} - v.components[name] = comp - return comp -} - -func (v *l2Visitor) VisitL2Deployment() manifest.DeploymentVisitor { - return v.deployment -} - -func (v *l2Visitor) VisitL2Chain(idx int) manifest.ChainVisitor { - chain := &chainVisitor{} - if idx >= len(v.chains) { - v.chains = append(v.chains, chain) - } else { - v.chains[idx] = chain - } - return chain -} - -// GetParams returns the generated Kurtosis parameters -func (v *KurtosisVisitor) GetParams() *KurtosisParams { - if v.l2Visitor != nil { - v.BuildKurtosisParams(v.l2Visitor) - } - return v.params -} - -// getComponentVersion returns the version for a component, or empty string if not found -func (l2 *l2Visitor) getComponentVersion(name string) string { - if comp, ok := l2.components[name]; ok { - return comp.version - } - return "" -} - -// getComponentImage returns the image for a component, or empty string if component doesn't exist -func (v *KurtosisVisitor) getComponentImage(l2 *l2Visitor, name string) string { - if _, ok := l2.components[name]; ok { - return v.repository.GetImage(name, l2.getComponentVersion(name)) - } - return "" -} - -// BuildKurtosisParams builds the final Kurtosis parameters from the collected visitor data -func (v *KurtosisVisitor) BuildKurtosisParams(l2 *l2Visitor) { - // Set deployer params - v.params.OptimismPackage.OpContractDeployerParams = OpContractDeployerParams{ - Image: v.repository.GetImage("op-deployer", l2.deployment.deployer.version), - L1ArtifactsLocator: l2.deployment.l1Contracts.locator, - L2ArtifactsLocator: l2.deployment.l2Contracts.locator, - } - - // Build chain configs - for _, chain := range l2.chains { - // Create network params with embedded map - networkParams := NetworkParams{ - Network: "kurtosis", - NetworkID: strconv.FormatUint(chain.id, 10), - SecondsPerSlot: l2.deployment.overrides.SecondsPerSlot, - Name: chain.name, - FundDevAccounts: true, - TimeOffsets: l2.deployment.overrides.TimeOffsets, - } - - chainConfig := ChainConfig{ - Participants: []ParticipantConfig{ - { - ElType: "op-geth", - ElImage: v.getComponentImage(l2, "op-geth"), - ClType: "op-node", - ClImage: v.getComponentImage(l2, "op-node"), - Count: 1, - }, - }, - NetworkParams: networkParams, - BatcherParams: BatcherParams{ - Image: v.getComponentImage(l2, "op-batcher"), - }, - ChallengerParams: ChallengerParams{ - Image: v.getComponentImage(l2, "op-challenger"), - }, - ProposerParams: ProposerParams{ - Image: v.getComponentImage(l2, "op-proposer"), - GameType: defaultGameType, - ProposalInterval: defaultProposalInterval, - }, - } - - v.params.OptimismPackage.Chains = append(v.params.OptimismPackage.Chains, chainConfig) - } -} diff --git a/devnet-sdk/kt/visitor_test.go b/devnet-sdk/kt/visitor_test.go deleted file mode 100644 index 17ce7bdfb0202..0000000000000 --- a/devnet-sdk/kt/visitor_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package kt - -import ( - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/manifest" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" -) - -func TestKurtosisVisitor_TransformsManifest(t *testing.T) { - input := ` -name: alpaca -type: alphanet -l1: - name: sepolia - chain_id: 11155111 -l2: - deployment: - op-deployer: - version: op-deployer/v0.0.11 - l1-contracts: - locator: https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-c3f2e2adbd52a93c2c08cab018cd637a4e203db53034e59c6c139c76b4297953.tar.gz - version: 984bae9146398a2997ec13757bfe2438ca8f92eb - l2-contracts: - version: op-contracts/v1.7.0-beta.1+l2-contracts - overrides: - seconds_per_slot: 2 - fjord_time_offset: 0 - granite_time_offset: 0 - holocene_time_offset: 0 - components: - op-node: - version: op-node/v1.10.2 - op-geth: - version: op-geth/v1.101411.4-rc.4 - op-reth: - version: op-reth/v1.1.5 - op-proposer: - version: op-proposer/v1.10.0-rc.2 - op-batcher: - version: op-batcher/v1.10.0 - op-challenger: - version: op-challenger/v1.3.1-rc.4 - chains: - - name: alpaca-0 - chain_id: 11155111100000 -` - - // Then the output should match the expected YAML structure - expected := KurtosisParams{ - OptimismPackage: OptimismPackage{ - Chains: []ChainConfig{ - { - Participants: []ParticipantConfig{ - { - ElType: "op-geth", - ElImage: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101411.4-rc.4", - ClType: "op-node", - ClImage: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-node:v1.10.2", - Count: 1, - }, - }, - NetworkParams: NetworkParams{ - Network: "kurtosis", - NetworkID: "1081032288", - SecondsPerSlot: 2, - Name: "alpaca-0", - FundDevAccounts: true, - TimeOffsets: TimeOffsets{ - "fjord_time_offset": 0, - "granite_time_offset": 0, - "holocene_time_offset": 0, - }, - }, - BatcherParams: BatcherParams{ - Image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:v1.10.0", - }, - ChallengerParams: ChallengerParams{ - Image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-challenger:v1.3.1-rc.4", - CannonPrestatesURL: "", - }, - ProposerParams: ProposerParams{ - Image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-proposer:v1.10.0-rc.2", - GameType: 1, - ProposalInterval: "10m", - }, - }, - }, - OpContractDeployerParams: OpContractDeployerParams{ - Image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-deployer:v0.0.11", - L1ArtifactsLocator: "https://storage.googleapis.com/oplabs-contract-artifacts/artifacts-v1-c3f2e2adbd52a93c2c08cab018cd637a4e203db53034e59c6c139c76b4297953.tar.gz", - L2ArtifactsLocator: "tag://op-contracts/v1.7.0-beta.1+l2-contracts", - }, - Persistent: false, - }, - EthereumPackage: EthereumPackage{ - NetworkParams: EthereumNetworkParams{ - Preset: "minimal", - GenesisDelay: 5, - AdditionalPreloadedContracts: defaultPreloadedContracts, - }, - }, - } - - // Convert the input to a manifest - var manifest manifest.Manifest - err := yaml.Unmarshal([]byte(input), &manifest) - require.NoError(t, err) - - // Create visitor and have manifest accept it - visitor := NewKurtosisVisitor() - manifest.Accept(visitor) - - // Get the generated params - actual := *visitor.GetParams() - - // Compare the actual and expected params - require.Equal(t, expected, actual, "Generated params should match expected params") - -} diff --git a/devnet-sdk/manifest/acceptor.go b/devnet-sdk/manifest/acceptor.go deleted file mode 100644 index b7873c6f60714..0000000000000 --- a/devnet-sdk/manifest/acceptor.go +++ /dev/null @@ -1,25 +0,0 @@ -package manifest - -type ManifestAcceptor interface { - Accept(visitor ManifestVisitor) -} - -type ChainAcceptor interface { - Accept(visitor ChainVisitor) -} - -type L2Acceptor interface { - Accept(visitor L2Visitor) -} - -type DeploymentAcceptor interface { - Accept(visitor DeploymentVisitor) -} - -type ContractsAcceptor interface { - Accept(visitor ContractsVisitor) -} - -type ComponentAcceptor interface { - Accept(visitor ComponentVisitor) -} diff --git a/devnet-sdk/manifest/manifest.go b/devnet-sdk/manifest/manifest.go deleted file mode 100644 index 65d9e5f7da623..0000000000000 --- a/devnet-sdk/manifest/manifest.go +++ /dev/null @@ -1,104 +0,0 @@ -package manifest - -// L1Config represents L1 configuration -type L1Config struct { - Name string `yaml:"name"` - ChainID uint64 `yaml:"chain_id"` -} - -func (c *L1Config) Accept(visitor ChainVisitor) { - visitor.VisitName(c.Name) - visitor.VisitID(c.ChainID) -} - -var _ ChainAcceptor = (*L1Config)(nil) - -type Component struct { - Version string `yaml:"version"` -} - -func (c *Component) Accept(visitor ComponentVisitor) { - visitor.VisitVersion(c.Version) -} - -var _ ComponentAcceptor = (*Component)(nil) - -type Contracts struct { - Version string `yaml:"version"` - Locator string `yaml:"locator"` -} - -func (c *Contracts) Accept(visitor ContractsVisitor) { - visitor.VisitLocator(c.Locator) - visitor.VisitVersion(c.Version) -} - -var _ ContractsAcceptor = (*Contracts)(nil) - -// L2Deployment represents deployment configuration -type L2Deployment struct { - OpDeployer *Component `yaml:"op-deployer"` - L1Contracts *Contracts `yaml:"l1-contracts"` - L2Contracts *Contracts `yaml:"l2-contracts"` - Overrides map[string]interface{} `yaml:"overrides"` -} - -func (d *L2Deployment) Accept(visitor DeploymentVisitor) { - d.OpDeployer.Accept(visitor.VisitDeployer()) - d.L1Contracts.Accept(visitor.VisitL1Contracts()) - d.L2Contracts.Accept(visitor.VisitL2Contracts()) - for key, value := range d.Overrides { - visitor.VisitOverride(key, value) - } -} - -var _ DeploymentAcceptor = (*L2Deployment)(nil) - -// L2Chain represents an L2 chain configuration -type L2Chain struct { - Name string `yaml:"name"` - ChainID uint64 `yaml:"chain_id"` -} - -func (c *L2Chain) Accept(visitor ChainVisitor) { - visitor.VisitName(c.Name) - visitor.VisitID(c.ChainID) -} - -var _ ChainAcceptor = (*L2Chain)(nil) - -// L2Config represents L2 configuration -type L2Config struct { - Deployment *L2Deployment `yaml:"deployment"` - Components map[string]*Component `yaml:"components"` - Chains []*L2Chain `yaml:"chains"` -} - -func (c *L2Config) Accept(visitor L2Visitor) { - for name, component := range c.Components { - component.Accept(visitor.VisitL2Component(name)) - } - for i, chain := range c.Chains { - chain.Accept(visitor.VisitL2Chain(i)) - } - c.Deployment.Accept(visitor.VisitL2Deployment()) -} - -var _ L2Acceptor = (*L2Config)(nil) - -// Manifest represents the top-level manifest configuration -type Manifest struct { - Name string `yaml:"name"` - Type string `yaml:"type"` - L1 *L1Config `yaml:"l1"` - L2 *L2Config `yaml:"l2"` -} - -func (m *Manifest) Accept(visitor ManifestVisitor) { - visitor.VisitName(m.Name) - visitor.VisitType(m.Type) - m.L1.Accept(visitor.VisitL1()) - m.L2.Accept(visitor.VisitL2()) -} - -var _ ManifestAcceptor = (*Manifest)(nil) diff --git a/devnet-sdk/manifest/visitor.go b/devnet-sdk/manifest/visitor.go deleted file mode 100644 index c6a3861e33dbc..0000000000000 --- a/devnet-sdk/manifest/visitor.go +++ /dev/null @@ -1,35 +0,0 @@ -package manifest - -type ManifestVisitor interface { - VisitName(name string) - VisitType(manifestType string) - VisitL1() ChainVisitor - VisitL2() L2Visitor -} - -type L2Visitor interface { - VisitL2Component(name string) ComponentVisitor - VisitL2Deployment() DeploymentVisitor - VisitL2Chain(int) ChainVisitor -} - -type ComponentVisitor interface { - VisitVersion(version string) -} - -type DeploymentVisitor interface { - VisitDeployer() ComponentVisitor - VisitL1Contracts() ContractsVisitor - VisitL2Contracts() ContractsVisitor - VisitOverride(string, interface{}) -} - -type ContractsVisitor interface { - VisitVersion(version string) - VisitLocator(locator string) -} - -type ChainVisitor interface { - VisitName(name string) - VisitID(id uint64) -} diff --git a/devnet-sdk/proofs/prestate/cmd/main.go b/devnet-sdk/proofs/prestate/cmd/main.go deleted file mode 100644 index 782ac7a642e83..0000000000000 --- a/devnet-sdk/proofs/prestate/cmd/main.go +++ /dev/null @@ -1,99 +0,0 @@ -package main - -import ( - "context" - "flag" - "fmt" - "log" - "os" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/proofs/prestate" -) - -type chainConfig struct { - id string - rollupConfig string - genesisConfig string -} - -func parseChainFlag(s string) (*chainConfig, error) { - parts := strings.Split(s, ",") - if len(parts) != 3 { - return nil, fmt.Errorf("chain flag must contain exactly 1 id and 2 files separated by comma") - } - return &chainConfig{ - id: strings.TrimSpace(parts[0]), - rollupConfig: strings.TrimSpace(parts[1]), - genesisConfig: strings.TrimSpace(parts[2]), - }, nil -} - -func main() { - var ( - clientURL = flag.String("url", "http://localhost:8080", "URL of the prestate builder service") - interop = flag.Bool("interop", false, "Generate interop dependency set") - chains = make(chainConfigList, 0) - ) - - flag.Var(&chains, "chain", "Chain configuration files in format: rollup-config.json,genesis-config.json (can be specified multiple times)") - flag.Parse() - - client := prestate.NewPrestateBuilderClient(*clientURL) - ctx := context.Background() - - // Build options list - opts := make([]prestate.PrestateBuilderOption, 0) - - if *interop { - opts = append(opts, prestate.WithGeneratedInteropDepSet()) - } - - // Add chain configs - for i, chain := range chains { - rollupFile, err := os.Open(chain.rollupConfig) - if err != nil { - log.Fatalf("Failed to open rollup config file for chain %d: %v", i, err) - } - defer rollupFile.Close() - - genesisFile, err := os.Open(chain.genesisConfig) - if err != nil { - log.Fatalf("Failed to open genesis config file for chain %d: %v", i, err) - } - defer genesisFile.Close() - - opts = append(opts, prestate.WithChainConfig( - chain.id, - rollupFile, - genesisFile, - )) - } - - // Build prestate - manifest, err := client.BuildPrestate(ctx, opts...) - if err != nil { - log.Fatalf("Failed to build prestate: %v", err) - } - - // Print manifest - for id, hash := range manifest { - fmt.Printf("%s: %s\n", id, hash) - } -} - -// chainConfigList implements flag.Value interface for repeated chain flags -type chainConfigList []*chainConfig - -func (c *chainConfigList) String() string { - return fmt.Sprintf("%v", *c) -} - -func (c *chainConfigList) Set(value string) error { - config, err := parseChainFlag(value) - if err != nil { - return err - } - *c = append(*c, config) - return nil -} diff --git a/devnet-sdk/shell/cmd/ctrl/main.go b/devnet-sdk/shell/cmd/ctrl/main.go deleted file mode 100644 index ed4cdbe30f11c..0000000000000 --- a/devnet-sdk/shell/cmd/ctrl/main.go +++ /dev/null @@ -1,78 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/ethereum-optimism/optimism/devnet-sdk/controller/surface" - "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" - "github.com/urfave/cli/v2" -) - -func run(ctx *cli.Context) error { - devnetURL := ctx.String("devnet") - action := ctx.String("action") - service := ctx.String("service") - - devnetEnv, err := env.LoadDevnetFromURL(devnetURL) - if err != nil { - return err - } - - ctrl, err := devnetEnv.Control() - if err != nil { - return err - } - - lc, ok := ctrl.(surface.ServiceLifecycleSurface) - if !ok { - return fmt.Errorf("control surface does not support lifecycle management") - } - - switch action { - case "start": - return lc.StartService(ctx.Context, service) - case "stop": - return lc.StopService(ctx.Context, service) - default: - return fmt.Errorf("invalid action: %s", action) - } -} - -func main() { - app := &cli.App{ - Name: "ctrl", - Usage: "Control devnet services", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "devnet", - Usage: "URL to devnet JSON file", - EnvVars: []string{env.EnvURLVar}, - Required: true, - }, - &cli.StringFlag{ - Name: "action", - Usage: "Action to perform (start or stop)", - Required: true, - Value: "", - Action: func(ctx *cli.Context, v string) error { - if v != "start" && v != "stop" { - return fmt.Errorf("action must be either 'start' or 'stop'") - } - return nil - }, - }, - &cli.StringFlag{ - Name: "service", - Usage: "Service to perform action on", - Required: true, - }, - }, - Action: run, - } - - if err := app.Run(os.Args); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} diff --git a/devnet-sdk/shell/cmd/enter/main.go b/devnet-sdk/shell/cmd/enter/main.go deleted file mode 100644 index 5fb98280c4016..0000000000000 --- a/devnet-sdk/shell/cmd/enter/main.go +++ /dev/null @@ -1,93 +0,0 @@ -package main - -import ( - "fmt" - "os" - "os/exec" - - "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" - "github.com/urfave/cli/v2" -) - -func run(ctx *cli.Context) error { - devnetURL := ctx.String("devnet") - chainName := ctx.String("chain") - nodeIndex := ctx.Int("node-index") - - devnetEnv, err := env.LoadDevnetFromURL(devnetURL) - if err != nil { - return err - } - - chain, err := devnetEnv.GetChain(chainName) - if err != nil { - return err - } - - chainEnv, err := chain.GetEnv( - env.WithCastIntegration(true, nodeIndex), - ) - if err != nil { - return err - } - - if motd := chainEnv.GetMotd(); motd != "" { - fmt.Println(motd) - } - - // Get current environment and append chain-specific vars - env := chainEnv.ApplyToEnv(os.Environ()) - - // Get current shell - shell := os.Getenv("SHELL") - if shell == "" { - shell = "/bin/sh" - } - - // Execute new shell - cmd := exec.Command(shell) - cmd.Env = env - cmd.Stdin = os.Stdin - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("error executing shell: %w", err) - } - - return nil -} - -func main() { - app := &cli.App{ - Name: "enter", - Usage: "Enter a shell with devnet environment variables set", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "devnet", - Usage: "URL to devnet JSON file", - EnvVars: []string{env.EnvURLVar}, - Required: true, - }, - &cli.StringFlag{ - Name: "chain", - Usage: "Name of the chain to connect to", - EnvVars: []string{env.ChainNameVar}, - Required: true, - }, - &cli.IntFlag{ - Name: "node-index", - Usage: "Index of the node to connect to (default: 0)", - EnvVars: []string{env.NodeIndexVar}, - Required: false, - Value: 0, - }, - }, - Action: run, - } - - if err := app.Run(os.Args); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} diff --git a/devnet-sdk/shell/cmd/motd/main.go b/devnet-sdk/shell/cmd/motd/main.go deleted file mode 100644 index c2436ddd6eaa6..0000000000000 --- a/devnet-sdk/shell/cmd/motd/main.go +++ /dev/null @@ -1,59 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" - "github.com/urfave/cli/v2" -) - -func run(ctx *cli.Context) error { - devnetURL := ctx.String("devnet") - chainName := ctx.String("chain") - - devnetEnv, err := env.LoadDevnetFromURL(devnetURL) - if err != nil { - return err - } - - chain, err := devnetEnv.GetChain(chainName) - if err != nil { - return err - } - - chainEnv, err := chain.GetEnv() - if err != nil { - return err - } - - fmt.Println(chainEnv.GetMotd()) - return nil -} - -func main() { - app := &cli.App{ - Name: "motd", - Usage: "Display the Message of the Day for a chain environment", - Flags: []cli.Flag{ - &cli.StringFlag{ - Name: "devnet", - Usage: "URL to devnet JSON file", - EnvVars: []string{env.EnvURLVar}, - Required: true, - }, - &cli.StringFlag{ - Name: "chain", - Usage: "Name of the chain to get MOTD for", - EnvVars: []string{env.ChainNameVar}, - Required: true, - }, - }, - Action: run, - } - - if err := app.Run(os.Args); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} diff --git a/devnet-sdk/shell/env/chain.go b/devnet-sdk/shell/env/chain.go deleted file mode 100644 index 446b66fb6581a..0000000000000 --- a/devnet-sdk/shell/env/chain.go +++ /dev/null @@ -1,187 +0,0 @@ -package env - -import ( - "bytes" - "fmt" - "html/template" - "net/url" - "path/filepath" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" -) - -const ( - EnvURLVar = "DEVNET_ENV_URL" - ChainNameVar = "DEVNET_CHAIN_NAME" - NodeIndexVar = "DEVNET_NODE_INDEX" - ExpectPreconditionsMet = "DEVNET_EXPECT_PRECONDITIONS_MET" - EnvCtrlVar = "DEVNET_ENV_CTRL" -) - -type ChainConfig struct { - chain *descriptors.Chain - devnetURL string - name string -} - -type ChainEnv struct { - motd string - envVars map[string]string -} - -func (c *ChainConfig) getRpcUrl(nodeIndex int) func() (string, error) { - return func() (string, error) { - if len(c.chain.Nodes) == 0 { - return "", fmt.Errorf("chain '%s' has no nodes", c.chain.Name) - } - - if nodeIndex >= len(c.chain.Nodes) { - return "", fmt.Errorf("node index %d is out of bounds for chain '%s'", nodeIndex, c.chain.Name) - } - - // Get RPC endpoint from the first node's execution layer service - elService, ok := c.chain.Nodes[nodeIndex].Services["el"] - if !ok { - return "", fmt.Errorf("no execution layer service found for chain '%s'", c.chain.Name) - } - - rpcEndpoint, ok := elService.Endpoints["rpc"] - if !ok { - return "", fmt.Errorf("no RPC endpoint found for chain '%s'", c.chain.Name) - } - - scheme := rpcEndpoint.Scheme - if scheme == "" { - scheme = "http" - } - return fmt.Sprintf("%s://%s:%d", scheme, rpcEndpoint.Host, rpcEndpoint.Port), nil - } -} - -func (c *ChainConfig) getJwtSecret() (string, error) { - jwt := c.chain.JWT - if len(jwt) >= 2 && jwt[:2] == "0x" { - jwt = jwt[2:] - } - - return jwt, nil -} - -func (c *ChainConfig) motd() string { - tmpl := `You're in a {{.Name}} chain subshell. - - Some addresses of interest: - {{ range $key, $value := .Addresses -}} - {{ printf "%-35s" $key }} = {{ $value }} - {{ end -}} - ` - - t := template.Must(template.New("motd").Parse(tmpl)) - - var buf bytes.Buffer - if err := t.Execute(&buf, c.chain); err != nil { - panic(err) - } - - return buf.String() -} - -type ChainConfigOption func(*ChainConfig, *chainConfigOpts) error - -type chainConfigOpts struct { - extraEnvVars map[string]string -} - -func WithCastIntegration(cast bool, nodeIndex int) ChainConfigOption { - return func(c *ChainConfig, o *chainConfigOpts) error { - mapping := map[string]func() (string, error){ - "ETH_RPC_URL": c.getRpcUrl(nodeIndex), - "ETH_RPC_JWT_SECRET": c.getJwtSecret, - } - - for key, fn := range mapping { - value := "" - var err error - if cast { - if value, err = fn(); err != nil { - return err - } - } - o.extraEnvVars[key] = value - } - return nil - } -} - -func WithExpectedPreconditions(pre bool) ChainConfigOption { - return func(c *ChainConfig, o *chainConfigOpts) error { - if pre { - o.extraEnvVars[ExpectPreconditionsMet] = "true" - } else { - o.extraEnvVars[ExpectPreconditionsMet] = "" - } - return nil - } -} - -func (c *ChainConfig) GetEnv(opts ...ChainConfigOption) (*ChainEnv, error) { - motd := c.motd() - o := &chainConfigOpts{ - extraEnvVars: make(map[string]string), - } - - for _, opt := range opts { - if err := opt(c, o); err != nil { - return nil, err - } - } - - // To allow commands within the shell to know which devnet and chain they are in - absPath := c.devnetURL - if u, err := url.Parse(c.devnetURL); err == nil { - if u.Scheme == "" || u.Scheme == "file" { - // make sure the path is absolute - if abs, err := filepath.Abs(u.Path); err == nil { - absPath = abs - } - } - } - o.extraEnvVars[EnvURLVar] = absPath - o.extraEnvVars[ChainNameVar] = c.name - - return &ChainEnv{ - motd: motd, - envVars: o.extraEnvVars, - }, nil -} - -func (e *ChainEnv) ApplyToEnv(env []string) []string { - // first identify which env vars to clear - clearEnv := make(map[string]interface{}) - for key := range e.envVars { - clearEnv[key] = nil - } - - // then actually remove these from the env - cleanEnv := make([]string, 0) - for _, s := range env { - key := strings.SplitN(s, "=", 2)[0] - if _, ok := clearEnv[key]; !ok { - cleanEnv = append(cleanEnv, s) - } - } - - // then add the remaining env vars - for key, value := range e.envVars { - if value == "" { - continue - } - cleanEnv = append(cleanEnv, fmt.Sprintf("%s=%s", key, value)) - } - return cleanEnv -} - -func (e *ChainEnv) GetMotd() string { - return e.motd -} diff --git a/devnet-sdk/shell/env/devnet.go b/devnet-sdk/shell/env/devnet.go deleted file mode 100644 index 2f1d3baf6eb88..0000000000000 --- a/devnet-sdk/shell/env/devnet.go +++ /dev/null @@ -1,182 +0,0 @@ -package env - -import ( - "fmt" - "math/big" - "net/url" - "os" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/controller/kt" - "github.com/ethereum-optimism/optimism/devnet-sdk/controller/surface" - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum/go-ethereum/params" -) - -type surfaceGetter func() (surface.ControlSurface, error) -type controllerFactory func(*descriptors.DevnetEnvironment) surfaceGetter - -type DevnetEnv struct { - Env *descriptors.DevnetEnvironment - URL string - - ctrl surfaceGetter -} - -// DataFetcher is a function type for fetching data from a URL -type DataFetcher func(*url.URL) (*descriptors.DevnetEnvironment, error) - -type schemeBackend struct { - fetcher DataFetcher - ctrlFactory controllerFactory -} - -func getKurtosisController(env *descriptors.DevnetEnvironment) surfaceGetter { - return func() (surface.ControlSurface, error) { - return kt.NewKurtosisControllerSurface(env) - } -} - -var ( - ktFetcher = &kurtosisFetcher{ - devnetFSFactory: newDevnetFS, - } - - // schemeToBackend maps URL schemes to their respective data fetcher functions - schemeToBackend = map[string]schemeBackend{ - "": {fetchFileData, nil}, - "file": {fetchFileData, nil}, - "kt": {ktFetcher.fetchKurtosisData, getKurtosisController}, - "ktnative": {fetchKurtosisNativeData, getKurtosisController}, - } -) - -// fetchDevnetData retrieves data from a URL based on its scheme -func fetchDevnetData(parsedURL *url.URL) (*descriptors.DevnetEnvironment, error) { - scheme := strings.ToLower(parsedURL.Scheme) - backend, ok := schemeToBackend[scheme] - if !ok { - return nil, fmt.Errorf("unsupported URL scheme: %s", scheme) - } - - return backend.fetcher(parsedURL) -} - -func LoadDevnetFromURL(devnetURL string) (*DevnetEnv, error) { - parsedURL, err := url.Parse(devnetURL) - if err != nil { - return nil, fmt.Errorf("error parsing URL: %w", err) - } - - env, err := fetchDevnetData(parsedURL) - if err != nil { - return nil, fmt.Errorf("error fetching devnet data: %w", err) - } - - if err := fixupDevnetConfig(env); err != nil { - return nil, fmt.Errorf("error fixing up devnet config: %w", err) - } - - var ctrl surfaceGetter - scheme := parsedURL.Scheme - if val, ok := os.LookupEnv(EnvCtrlVar); ok { - scheme = val - } - backend, ok := schemeToBackend[scheme] - if !ok { - return nil, fmt.Errorf("invalid scheme to lookup control interface: %s", scheme) - } - - if backend.ctrlFactory != nil { - ctrl = backend.ctrlFactory(env) - } - - return &DevnetEnv{ - Env: env, - URL: devnetURL, - ctrl: ctrl, - }, nil -} - -func (d *DevnetEnv) GetChain(chainName string) (*ChainConfig, error) { - var chain *descriptors.Chain - if d.Env.L1.Name == chainName { - chain = d.Env.L1 - } else { - for _, l2Chain := range d.Env.L2 { - if l2Chain.Name == chainName { - chain = l2Chain.Chain - break - } - } - } - - if chain == nil { - return nil, fmt.Errorf("chain '%s' not found in devnet config", chainName) - } - - return &ChainConfig{ - chain: chain, - devnetURL: d.URL, - name: chainName, - }, nil -} - -func (d *DevnetEnv) Control() (surface.ControlSurface, error) { - if d.ctrl == nil { - return nil, fmt.Errorf("devnet is not controllable") - } - return d.ctrl() -} - -func fixupDevnetConfig(config *descriptors.DevnetEnvironment) error { - // we should really get this from the kurtosis output, but the data doesn't exist yet, so craft a minimal one. - l1ID := new(big.Int) - l1ID, ok := l1ID.SetString(config.L1.ID, 10) - if !ok { - return fmt.Errorf("invalid L1 ID: %s", config.L1.ID) - } - if config.L1.Config == nil { - if l1Config := eth.L1ChainConfigByChainID(eth.ChainIDFromBig(l1ID)); l1Config != nil { - config.L1.Config = l1Config - } else { - config.L1.Config = ¶ms.ChainConfig{ - ChainID: l1ID, - } - } - } - for _, l2Chain := range config.L2 { - l2ChainId := l2Chain.Chain.ID - - var l2ID *big.Int - base := 10 - if len(l2ChainId) >= 2 && l2ChainId[:2] == "0x" { - base = 16 - l2ChainId = l2ChainId[2:] - } - - l2ID, ok := new(big.Int).SetString(l2ChainId, base) - if !ok { - return fmt.Errorf("invalid L2 ID: %s", l2ChainId) - } - // Convert the L2 chain ID to decimal string format - decimalId := l2ID.String() - l2Chain.Chain.ID = decimalId - - if l2Chain.Config == nil { - l2Chain.Config = ¶ms.ChainConfig{ - ChainID: l2ID, - } - } - - if l2Chain.RollupConfig == nil { - l2Chain.RollupConfig = &rollup.Config{ - L1ChainID: l1ID, - L2ChainID: l2ID, - } - } - } - return nil -} diff --git a/devnet-sdk/shell/env/env_test.go b/devnet-sdk/shell/env/env_test.go deleted file mode 100644 index d2b7aaf44a766..0000000000000 --- a/devnet-sdk/shell/env/env_test.go +++ /dev/null @@ -1,309 +0,0 @@ -package env - -import ( - "os" - "path/filepath" - "strings" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestLoadDevnetEnv(t *testing.T) { - // Create a temporary test file - content := `{ - "l1": { - "name": "l1", - "id": "1", - "nodes": [{ - "services": { - "el": { - "endpoints": { - "rpc": { - "host": "localhost", - "port": 8545 - } - } - } - } - }], - "jwt": "0x1234567890abcdef", - "addresses": { - "deployer": "0x1234567890123456789012345678901234567890" - } - }, - "l2": [{ - "name": "op", - "id": "2", - "nodes": [{ - "services": { - "el": { - "endpoints": { - "rpc": { - "host": "localhost", - "port": 9545 - } - } - } - } - }], - "jwt": "0xdeadbeef", - "addresses": { - "deployer": "0x2345678901234567890123456789012345678901" - } - }] - }` - - tmpfile, err := os.CreateTemp("", "devnet-*.json") - require.NoError(t, err) - defer os.Remove(tmpfile.Name()) - - _, err = tmpfile.Write([]byte(content)) - require.NoError(t, err) - err = tmpfile.Close() - require.NoError(t, err) - - // Test successful load - t.Run("successful load", func(t *testing.T) { - env, err := LoadDevnetFromURL(tmpfile.Name()) - require.NoError(t, err) - assert.Equal(t, "l1", env.Env.L1.Name) - assert.Equal(t, "op", env.Env.L2[0].Name) - }) - - // Test loading non-existent file - t.Run("non-existent file", func(t *testing.T) { - _, err := LoadDevnetFromURL("non-existent.json") - assert.Error(t, err) - }) - - // Test loading invalid JSON - t.Run("invalid JSON", func(t *testing.T) { - invalidFile := filepath.Join(t.TempDir(), "invalid.json") - err := os.WriteFile(invalidFile, []byte("{invalid json}"), 0644) - require.NoError(t, err) - - _, err = LoadDevnetFromURL(invalidFile) - assert.Error(t, err) - }) -} - -func TestGetChain(t *testing.T) { - devnet := &DevnetEnv{ - Env: &descriptors.DevnetEnvironment{ - L1: &descriptors.Chain{ - Name: "l1", - Nodes: []descriptors.Node{ - { - Services: descriptors.ServiceMap{ - "el": { - Endpoints: descriptors.EndpointMap{ - "rpc": { - Host: "localhost", - Port: 8545, - }, - }, - }, - }, - }, - }, - JWT: "0x1234", - Addresses: descriptors.AddressMap{ - "deployer": common.HexToAddress("0x1234567890123456789012345678901234567890"), - }, - }, - L2: []*descriptors.L2Chain{ - { - Chain: &descriptors.Chain{ - Name: "op", - Nodes: []descriptors.Node{ - { - Services: descriptors.ServiceMap{ - "el": { - Endpoints: descriptors.EndpointMap{ - "rpc": { - Host: "localhost", - Port: 9545, - }, - }, - }, - }, - }, - }, - JWT: "0x5678", - Addresses: descriptors.AddressMap{ - "deployer": common.HexToAddress("0x2345678901234567890123456789012345678901"), - }, - }, - L1Wallets: descriptors.WalletMap{ - "deployer": &descriptors.Wallet{ - Address: common.HexToAddress("0x2345678901234567890123456789012345678901"), - PrivateKey: "0x2345678901234567890123456789012345678901", - }, - }, - }, - }, - }, - URL: "test.json", - } - - // Test getting L1 chain - t.Run("get L1 chain", func(t *testing.T) { - chain, err := devnet.GetChain("l1") - require.NoError(t, err) - assert.Equal(t, "l1", chain.name) - assert.Equal(t, "0x1234", chain.chain.JWT) - }) - - // Test getting L2 chain - t.Run("get L2 chain", func(t *testing.T) { - chain, err := devnet.GetChain("op") - require.NoError(t, err) - assert.Equal(t, "op", chain.name) - assert.Equal(t, "0x5678", chain.chain.JWT) - }) - - // Test getting non-existent chain - t.Run("get non-existent chain", func(t *testing.T) { - _, err := devnet.GetChain("invalid") - assert.Error(t, err) - }) -} - -func TestChainConfig(t *testing.T) { - chain := &ChainConfig{ - chain: &descriptors.Chain{ - Name: "test", - Nodes: []descriptors.Node{ - { - Services: descriptors.ServiceMap{ - "el": { - Endpoints: descriptors.EndpointMap{ - "rpc": { - Host: "localhost", - Port: 8545, - Scheme: "https", - }, - }, - }, - }, - }, - }, - JWT: "0x1234", - Addresses: descriptors.AddressMap{ - "deployer": common.HexToAddress("0x1234567890123456789012345678901234567890"), - }, - }, - devnetURL: "test.json", - name: "test", - } - - // Test getting environment variables - t.Run("get environment variables", func(t *testing.T) { - env, err := chain.GetEnv( - WithCastIntegration(true, 0), - ) - require.NoError(t, err) - - assert.Equal(t, "https://localhost:8545", env.envVars["ETH_RPC_URL"]) - assert.Equal(t, "1234", env.envVars["ETH_RPC_JWT_SECRET"]) - assert.Equal(t, "test.json", filepath.Base(env.envVars[EnvURLVar])) - assert.Equal(t, "test", env.envVars[ChainNameVar]) - assert.Contains(t, env.motd, "deployer") - assert.Contains(t, env.motd, "0x1234567890123456789012345678901234567890") - }) - - // Test chain with no nodes - t.Run("chain with no nodes", func(t *testing.T) { - noNodesChain := &ChainConfig{ - chain: &descriptors.Chain{ - Name: "test", - Nodes: []descriptors.Node{}, - }, - } - _, err := noNodesChain.GetEnv( - WithCastIntegration(true, 0), - ) - assert.Error(t, err) - }) - - // Test chain with missing service - t.Run("chain with missing service", func(t *testing.T) { - missingServiceChain := &ChainConfig{ - chain: &descriptors.Chain{ - Name: "test", - Nodes: []descriptors.Node{ - { - Services: descriptors.ServiceMap{}, - }, - }, - }, - } - _, err := missingServiceChain.GetEnv( - WithCastIntegration(true, 0), - ) - assert.Error(t, err) - }) - - // Test chain with missing endpoint - t.Run("chain with missing endpoint", func(t *testing.T) { - missingEndpointChain := &ChainConfig{ - chain: &descriptors.Chain{ - Name: "test", - Nodes: []descriptors.Node{ - { - Services: descriptors.ServiceMap{ - "el": { - Endpoints: descriptors.EndpointMap{}, - }, - }, - }, - }, - }, - } - _, err := missingEndpointChain.GetEnv( - WithCastIntegration(true, 0), - ) - assert.Error(t, err) - }) -} - -func TestChainEnv_ApplyToEnv(t *testing.T) { - originalEnv := []string{ - "KEEP_ME=old_value", - "OVERRIDE_ME=old_value", - "REMOVE_ME=old_value", - } - - env := &ChainEnv{ - envVars: map[string]string{ - "OVERRIDE_ME": "new_value", - "REMOVE_ME": "", - }, - } - - result := env.ApplyToEnv(originalEnv) - - // Convert result to map for easier testing - resultMap := make(map[string]string) - for _, v := range result { - parts := strings.SplitN(v, "=", 2) - resultMap[parts[0]] = parts[1] - } - - // Test that KEEP_ME was overridden with new value - assert.Equal(t, "old_value", resultMap["KEEP_ME"]) - - // Test that OVERRIDE_ME was overridden with new value - assert.Equal(t, "new_value", resultMap["OVERRIDE_ME"]) - - // Test that REMOVE_ME was removed (not present in result) - _, exists := resultMap["REMOVE_ME"] - assert.False(t, exists, "REMOVE_ME should have been removed") - - // Test that we have exactly 3 variables in the result - assert.Equal(t, 2, len(result), "Result should have exactly 3 variables") -} diff --git a/devnet-sdk/shell/env/file_fetch.go b/devnet-sdk/shell/env/file_fetch.go deleted file mode 100644 index 929331d26659c..0000000000000 --- a/devnet-sdk/shell/env/file_fetch.go +++ /dev/null @@ -1,49 +0,0 @@ -package env - -import ( - "encoding/json" - "fmt" - "net/url" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/spf13/afero" -) - -type fileFetcher struct { - fs afero.Fs -} - -// fetchFileData reads data from a local file -func (f *fileFetcher) fetchFileData(u *url.URL) (*descriptors.DevnetEnvironment, error) { - body, err := afero.ReadFile(f.fs, u.Path) - if err != nil { - return nil, fmt.Errorf("error reading file: %w", err) - } - - basename := u.Path - if lastSlash := strings.LastIndex(basename, "/"); lastSlash >= 0 { - basename = basename[lastSlash+1:] - } - if lastDot := strings.LastIndex(basename, "."); lastDot >= 0 { - basename = basename[:lastDot] - } - - var config descriptors.DevnetEnvironment - if err := json.Unmarshal(body, &config); err != nil { - return nil, fmt.Errorf("error parsing JSON: %w", err) - } - - // If the name is not set, use the basename of the file - if config.Name == "" { - config.Name = basename - } - return &config, nil -} - -func fetchFileData(u *url.URL) (*descriptors.DevnetEnvironment, error) { - fetcher := &fileFetcher{ - fs: afero.NewOsFs(), - } - return fetcher.fetchFileData(u) -} diff --git a/devnet-sdk/shell/env/file_fetch_test.go b/devnet-sdk/shell/env/file_fetch_test.go deleted file mode 100644 index b6e722e0a6684..0000000000000 --- a/devnet-sdk/shell/env/file_fetch_test.go +++ /dev/null @@ -1,93 +0,0 @@ -package env - -import ( - "net/url" - "testing" - - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFetchFileDataFromOS(t *testing.T) { - fs := afero.NewMemMapFs() - - var ( - absoluteContent = []byte(`{"name": "absolute"}`) - relativeContent = []byte(`{"name": "relative"}`) - noNameContent = []byte(`{}`) - ) - - err := afero.WriteFile(fs, "/some/absolute/path", absoluteContent, 0644) - require.NoError(t, err) - err = afero.WriteFile(fs, "some/relative/path", relativeContent, 0644) - require.NoError(t, err) - err = afero.WriteFile(fs, "some/file/devnet-env.json", noNameContent, 0644) - require.NoError(t, err) - err = afero.WriteFile(fs, "some/file/devnet", noNameContent, 0644) - require.NoError(t, err) - - fetcher := &fileFetcher{ - fs: fs, - } - - tests := []struct { - name string - urlStr string - wantName string - wantContent []byte - wantError bool - }{ - { - name: "file URL", - urlStr: "file:///some/absolute/path", - wantName: "absolute", - wantContent: absoluteContent, - }, - { - name: "absolute path", - urlStr: "/some/absolute/path", - wantName: "absolute", - wantContent: absoluteContent, - }, - { - name: "relative path", - urlStr: "some/relative/path", - wantName: "relative", - wantContent: relativeContent, - }, - { - name: "no name - file with extension", - urlStr: "some/file/devnet-env.json", - wantName: "devnet-env", - wantContent: noNameContent, - }, - { - name: "no name - file without extension", - urlStr: "some/file/devnet", - wantName: "devnet", - wantContent: noNameContent, - }, - { - name: "non-existent file", - urlStr: "file:///nonexistent/path", - wantError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - u, err := url.Parse(tt.urlStr) - require.NoError(t, err) - - env, err := fetcher.fetchFileData(u) - if tt.wantError { - assert.Error(t, err) - return - } - - require.NoError(t, err) - assert.Equal(t, tt.wantName, env.Name) - }) - } -} diff --git a/devnet-sdk/shell/env/kt_fetch.go b/devnet-sdk/shell/env/kt_fetch.go deleted file mode 100644 index 2ac5553919d42..0000000000000 --- a/devnet-sdk/shell/env/kt_fetch.go +++ /dev/null @@ -1,69 +0,0 @@ -package env - -import ( - "context" - "fmt" - "net/url" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" -) - -// DevnetFS is an interface that both our mock and the real implementation satisfy -type DevnetFS interface { - GetDevnetDescriptor(ctx context.Context, opts ...ktfs.DevnetFSDescriptorOption) (*descriptors.DevnetEnvironment, error) -} - -type devnetFSFactory func(ctx context.Context, enclave string) (DevnetFS, error) - -type kurtosisFetcher struct { - devnetFSFactory devnetFSFactory -} - -func newDevnetFS(ctx context.Context, enclave string) (DevnetFS, error) { - fs, err := ktfs.NewEnclaveFS(ctx, enclave) - if err != nil { - return nil, err - } - return ktfs.NewDevnetFS(fs), nil -} - -// parseKurtosisURL parses a Kurtosis URL of the form kt://enclave/artifact/file -// If artifact is omitted, it defaults to "" -// If file is omitted, it defaults to "env.json" -func (f *kurtosisFetcher) parseKurtosisURL(u *url.URL) (enclave, artifactName, fileName string) { - enclave = u.Host - artifactName = "" - fileName = ktfs.DevnetEnvArtifactPath - - // Trim both prefix and suffix slashes before splitting - trimmedPath := strings.Trim(u.Path, "/") - parts := strings.Split(trimmedPath, "/") - if len(parts) > 0 && parts[0] != "" { - artifactName = parts[0] - } - if len(parts) > 1 && parts[1] != "" { - fileName = parts[1] - } - - return -} - -// fetchKurtosisData reads data from a Kurtosis artifact -func (f *kurtosisFetcher) fetchKurtosisData(u *url.URL) (*descriptors.DevnetEnvironment, error) { - enclave, artifactName, fileName := f.parseKurtosisURL(u) - - devnetFS, err := f.devnetFSFactory(context.Background(), enclave) - if err != nil { - return nil, fmt.Errorf("error creating enclave fs: %w", err) - } - - env, err := devnetFS.GetDevnetDescriptor(context.Background(), ktfs.WithArtifactName(artifactName), ktfs.WithArtifactPath(fileName)) - if err != nil { - return nil, fmt.Errorf("error getting devnet descriptor: %w", err) - } - - env.Name = enclave - return env, nil -} diff --git a/devnet-sdk/shell/env/kt_fetch_test.go b/devnet-sdk/shell/env/kt_fetch_test.go deleted file mode 100644 index 46da0e091671d..0000000000000 --- a/devnet-sdk/shell/env/kt_fetch_test.go +++ /dev/null @@ -1,70 +0,0 @@ -package env - -import ( - "net/url" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestParseKurtosisURL(t *testing.T) { - tests := []struct { - name string - urlStr string - wantEnclave string - wantArtifact string - wantFile string - wantParseError bool - }{ - { - name: "basic url", - urlStr: "kt://myenclave", - wantEnclave: "myenclave", - wantArtifact: "", - wantFile: "env.json", - }, - { - name: "with artifact", - urlStr: "kt://myenclave/custom-artifact", - wantEnclave: "myenclave", - wantArtifact: "custom-artifact", - wantFile: "env.json", - }, - { - name: "with artifact and file", - urlStr: "kt://myenclave/custom-artifact/config.json", - wantEnclave: "myenclave", - wantArtifact: "custom-artifact", - wantFile: "config.json", - }, - { - name: "with trailing slash", - urlStr: "kt://enclave/artifact/", - wantEnclave: "enclave", - wantArtifact: "artifact", - wantFile: "env.json", - }, - { - name: "invalid url", - urlStr: "://invalid", - wantParseError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - u, err := url.Parse(tt.urlStr) - if tt.wantParseError { - assert.Error(t, err) - return - } - require.NoError(t, err) - - enclave, artifact, file := ktFetcher.parseKurtosisURL(u) - assert.Equal(t, tt.wantEnclave, enclave) - assert.Equal(t, tt.wantArtifact, artifact) - assert.Equal(t, tt.wantFile, file) - }) - } -} diff --git a/devnet-sdk/shell/env/kt_native_fetch.go b/devnet-sdk/shell/env/kt_native_fetch.go deleted file mode 100644 index 3693fa887b569..0000000000000 --- a/devnet-sdk/shell/env/kt_native_fetch.go +++ /dev/null @@ -1,119 +0,0 @@ -package env - -import ( - "context" - "fmt" - "io" - "net/url" - "os" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" -) - -// parseKurtosisNativeURL parses a Kurtosis URL of the form kt://enclave/artifact/file -// If artifact is omitted, it defaults to "devnet" -// If file is omitted, it defaults to "env.json" -func parseKurtosisNativeURL(u *url.URL) (enclave, argsFileName string) { - enclave = u.Host - argsFileName = "/" + strings.Trim(u.Path, "/") - - return -} - -// fetchKurtosisNativeData reads data directly from kurtosis API using default dependency implementations -func fetchKurtosisNativeData(u *url.URL) (*descriptors.DevnetEnvironment, error) { - return fetchKurtosisNativeDataInternal(u, &defaultOSOpenImpl{}, &defaultSpecImpl{}, &defaultKurtosisImpl{}) -} - -// fetchKurtosisNativeDataInternal reads data directly from kurtosis API using provided dependency implementations -func fetchKurtosisNativeDataInternal(u *url.URL, osImpl osOpenInterface, specImpl specInterface, kurtosisImpl kurtosisInterface) (*descriptors.DevnetEnvironment, error) { - // First let's parse the kurtosis URL - enclave, argsFileName := parseKurtosisNativeURL(u) - - // Open the arguments file - argsFile, err := osImpl.Open(argsFileName) - if err != nil { - return nil, fmt.Errorf("error reading arguments file: %w", err) - } - - // Make sure to close the file once we're done reading - defer argsFile.Close() - - // Once we have the arguments file, we can extract the enclave spec - enclaveSpec, err := specImpl.ExtractData(argsFile) - if err != nil { - return nil, fmt.Errorf("error extracting enclave spec: %w", err) - } - - // We'll use the deployer to extract the system spec - deployer, err := kurtosisImpl.NewKurtosisDeployer(kurtosis.WithKurtosisEnclave(enclave)) - if err != nil { - return nil, fmt.Errorf("error creating deployer: %w", err) - } - - // We'll read the environment info from kurtosis directly - ctx := context.Background() - env, err := deployer.GetEnvironmentInfo(ctx, enclaveSpec) - if err != nil { - return nil, fmt.Errorf("error getting environment info: %w", err) - } - - return env.DevnetEnvironment, nil -} - -// osOpenInterface describes a struct that can open filesystem files for reading -// -// osOpenInterface is used when loading kurtosis args files from local filesystem -type osOpenInterface interface { - Open(name string) (fileInterface, error) -} - -// fileInterface describes a subset of os.File struct for ease of testing -type fileInterface interface { - io.Reader - Close() error -} - -// defaultOSOpenImpl implements osOpenInterface -type defaultOSOpenImpl struct{} - -func (d *defaultOSOpenImpl) Open(name string) (fileInterface, error) { - return os.Open(name) -} - -// specInterface describes a subset of functionality required from the spec package -// -// The spec package is responsible for turning a kurtosis args file into an EnclaveSpec -type specInterface interface { - ExtractData(r io.Reader) (*spec.EnclaveSpec, error) -} - -// defaultSpecImpl implements specInterface -type defaultSpecImpl struct{} - -func (d *defaultSpecImpl) ExtractData(r io.Reader) (*spec.EnclaveSpec, error) { - return spec.NewSpec().ExtractData(r) -} - -// kurtosisInterface describes a subset of functionality required from the kurtosis package -// -// kurtosisInterface provides access to a KurtosisDeployer object, an intermediate object that provides -// access to the KurtosisEnvironment object -type kurtosisInterface interface { - NewKurtosisDeployer(opts ...kurtosis.KurtosisDeployerOptions) (kurtosisDeployerInterface, error) -} - -// kurtosisDeployerInterface describes a subset of functionality required from KurtosisDeployer struct -type kurtosisDeployerInterface interface { - GetEnvironmentInfo(ctx context.Context, spec *spec.EnclaveSpec) (*kurtosis.KurtosisEnvironment, error) -} - -// defaultKurtosisImpl implements kurtosisInterface -type defaultKurtosisImpl struct{} - -func (d *defaultKurtosisImpl) NewKurtosisDeployer(opts ...kurtosis.KurtosisDeployerOptions) (kurtosisDeployerInterface, error) { - return kurtosis.NewKurtosisDeployer(opts...) -} diff --git a/devnet-sdk/shell/env/kt_native_fetch_test.go b/devnet-sdk/shell/env/kt_native_fetch_test.go deleted file mode 100644 index 8c23cfb28d4be..0000000000000 --- a/devnet-sdk/shell/env/kt_native_fetch_test.go +++ /dev/null @@ -1,228 +0,0 @@ -package env - -import ( - "context" - "embed" - "encoding/json" - "fmt" - "io" - "net/url" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ( - //go:embed testdata/kurtosis - kurtosisTestData embed.FS -) - -func TestParseKurtosisNativeURL(t *testing.T) { - tests := []struct { - name string - urlStr string - wantEnclave string - wantArtifact string - wantFile string - wantParseError bool - }{ - { - name: "absolute file path", - urlStr: "ktnative://myenclave/path/args.yaml", - wantEnclave: "myenclave", - wantFile: "/path/args.yaml", - }, - { - name: "invalid url", - urlStr: "://invalid", - wantParseError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - u, err := url.Parse(tt.urlStr) - if tt.wantParseError { - assert.Error(t, err) - return - } - require.NoError(t, err) - - enclave, argsFile := parseKurtosisNativeURL(u) - assert.Equal(t, tt.wantEnclave, enclave) - assert.Equal(t, tt.wantFile, argsFile) - }) - } -} - -func TestFetchKurtosisNativeDataFailures(t *testing.T) { - url, err := url.Parse("ktnative://enclave/file/path") - require.NoError(t, err) - - t.Run("non-existent args file", func(t *testing.T) { - osImpl := &mockOSImpl{ - err: fmt.Errorf("oh no"), - } - - _, err = fetchKurtosisNativeDataInternal(url, osImpl, &defaultSpecImpl{}, &defaultKurtosisImpl{}) - require.ErrorContains(t, err, "error reading arguments file: oh no") - }) - - t.Run("malformed args file", func(t *testing.T) { - file, err := kurtosisTestData.Open("testdata/kurtosis/args--malformed.txt") - require.NoError(t, err) - - osImpl := &mockOSImpl{ - value: file, - } - - _, err = fetchKurtosisNativeDataInternal(url, osImpl, &defaultSpecImpl{}, &defaultKurtosisImpl{}) - require.ErrorContains(t, err, "error extracting enclave spec: failed to decode YAML: yaml: unmarshal errors:") - }) - - t.Run("spec extraction failure", func(t *testing.T) { - file, err := kurtosisTestData.Open("testdata/kurtosis/args--simple.yaml") - require.NoError(t, err) - - osImpl := &mockOSImpl{ - value: file, - } - - specImpl := &mockSpecImpl{ - err: fmt.Errorf("oh no"), - } - - _, err = fetchKurtosisNativeDataInternal(url, osImpl, specImpl, &defaultKurtosisImpl{}) - require.ErrorContains(t, err, "error extracting enclave spec: oh no") - }) - - t.Run("kurtosis deployer failure", func(t *testing.T) { - file, err := kurtosisTestData.Open("testdata/kurtosis/args--simple.yaml") - require.NoError(t, err) - - osImpl := &mockOSImpl{ - value: file, - } - - kurtosisImpl := &mockKurtosisImpl{ - err: fmt.Errorf("oh no"), - } - - _, err = fetchKurtosisNativeDataInternal(url, osImpl, &defaultSpecImpl{}, kurtosisImpl) - require.ErrorContains(t, err, "error creating deployer: oh no") - }) - - t.Run("kurtosis info extraction failure", func(t *testing.T) { - file, err := kurtosisTestData.Open("testdata/kurtosis/args--simple.yaml") - require.NoError(t, err) - - osImpl := &mockOSImpl{ - value: file, - } - - kurtosisDeployer := &mockKurtosisDeployerImpl{ - err: fmt.Errorf("oh no"), - } - - kurtosisImpl := &mockKurtosisImpl{ - value: kurtosisDeployer, - } - - _, err = fetchKurtosisNativeDataInternal(url, osImpl, &defaultSpecImpl{}, kurtosisImpl) - require.ErrorContains(t, err, "error getting environment info: oh no") - }) -} - -func TestFetchKurtosisNativeDataSuccess(t *testing.T) { - url, err := url.Parse("ktnative://enclave/file/path") - require.NoError(t, err) - - t.Run("fetching success", func(t *testing.T) { - file, err := kurtosisTestData.Open("testdata/kurtosis/args--simple.yaml") - require.NoError(t, err) - - // We'll prepare a mock spec to be returned - envSpec := &spec.EnclaveSpec{} - env := &kurtosis.KurtosisEnvironment{ - DevnetEnvironment: &descriptors.DevnetEnvironment{ - Name: "enclave", - L2: make([]*descriptors.L2Chain, 0, 1), - Features: envSpec.Features, - }, - } - - // And serialize it so that we can compare values - _, err = json.MarshalIndent(env, "", " ") - require.NoError(t, err) - - osImpl := &mockOSImpl{ - value: file, - } - - specImpl := &mockSpecImpl{ - value: envSpec, - } - - kurtosisDeployer := &mockKurtosisDeployerImpl{ - value: env, - } - - kurtosisImpl := &mockKurtosisImpl{ - value: kurtosisDeployer, - } - - devnetEnv, err := fetchKurtosisNativeDataInternal(url, osImpl, specImpl, kurtosisImpl) - require.NoError(t, err) - require.Equal(t, "enclave", devnetEnv.Name) - }) -} - -var ( - _ osOpenInterface = (*mockOSImpl)(nil) - - _ specInterface = (*mockSpecImpl)(nil) - - _ kurtosisInterface = (*mockKurtosisImpl)(nil) - - _ kurtosisDeployerInterface = (*mockKurtosisDeployerImpl)(nil) -) - -type mockOSImpl struct { - value fileInterface - err error -} - -func (o *mockOSImpl) Open(name string) (fileInterface, error) { - return o.value, o.err -} - -type mockSpecImpl struct { - value *spec.EnclaveSpec - err error -} - -func (o *mockSpecImpl) ExtractData(r io.Reader) (*spec.EnclaveSpec, error) { - return o.value, o.err -} - -type mockKurtosisImpl struct { - value kurtosisDeployerInterface - err error -} - -func (o *mockKurtosisImpl) NewKurtosisDeployer(opts ...kurtosis.KurtosisDeployerOptions) (kurtosisDeployerInterface, error) { - return o.value, o.err -} - -type mockKurtosisDeployerImpl struct { - value *kurtosis.KurtosisEnvironment - err error -} - -func (o *mockKurtosisDeployerImpl) GetEnvironmentInfo(context.Context, *spec.EnclaveSpec) (*kurtosis.KurtosisEnvironment, error) { - return o.value, o.err -} diff --git a/devnet-sdk/shell/env/testdata/kurtosis/args--malformed.txt b/devnet-sdk/shell/env/testdata/kurtosis/args--malformed.txt deleted file mode 100644 index 7b1f8d92fc758..0000000000000 --- a/devnet-sdk/shell/env/testdata/kurtosis/args--malformed.txt +++ /dev/null @@ -1 +0,0 @@ -what is this \ No newline at end of file diff --git a/devnet-sdk/shell/env/testdata/kurtosis/args--simple.yaml b/devnet-sdk/shell/env/testdata/kurtosis/args--simple.yaml deleted file mode 100644 index 28644893b554c..0000000000000 --- a/devnet-sdk/shell/env/testdata/kurtosis/args--simple.yaml +++ /dev/null @@ -1,6 +0,0 @@ -optimism_package: - chains: - op-kurtosis: - whatakey: - - el_type: op-geth - cl_type: op-node \ No newline at end of file diff --git a/devnet-sdk/system/chain.go b/devnet-sdk/system/chain.go deleted file mode 100644 index 20f82796f95a0..0000000000000 --- a/devnet-sdk/system/chain.go +++ /dev/null @@ -1,259 +0,0 @@ -package system - -import ( - "context" - "fmt" - "math/big" - "sync" - "time" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum/go-ethereum/common" - coreTypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" - "github.com/ethereum/go-ethereum/rpc" -) - -// this is to differentiate between op-geth and go-ethereum -type opBlock interface { - WithdrawalsRoot() *common.Hash -} - -var ( - // This will make sure that we implement the Chain interface - _ Chain = (*chain)(nil) - _ L2Chain = (*l2Chain)(nil) - - // Make sure we're using op-geth in place of go-ethereum. - // If you're wondering why this fails at compile time, - // it's most likely because you're not using a "replace" - // directive in your go.mod file. - _ opBlock = (*coreTypes.Block)(nil) -) - -// clientManager handles ethclient connections -type clientManager struct { - mu sync.RWMutex - clients map[string]*sources.EthClient - gethClients map[string]*ethclient.Client -} - -func newClientManager() *clientManager { - return &clientManager{ - clients: make(map[string]*sources.EthClient), - gethClients: make(map[string]*ethclient.Client), - } -} - -func (m *clientManager) Client(rpcURL string) (*sources.EthClient, error) { - m.mu.RLock() - if client, ok := m.clients[rpcURL]; ok { - m.mu.RUnlock() - return client, nil - } - m.mu.RUnlock() - - m.mu.Lock() - defer m.mu.Unlock() - - // Double-check after acquiring write lock - if client, ok := m.clients[rpcURL]; ok { - return client, nil - } - - ethClCfg := sources.EthClientConfig{ - MaxRequestsPerBatch: 10, - MaxConcurrentRequests: 10, - ReceiptsCacheSize: 10, - TransactionsCacheSize: 10, - HeadersCacheSize: 10, - PayloadsCacheSize: 10, - BlockRefsCacheSize: 10, - TrustRPC: false, - MustBePostMerge: true, - RPCProviderKind: sources.RPCKindStandard, - MethodResetDuration: time.Minute, - } - rpcClient, err := rpc.DialContext(context.Background(), rpcURL) - if err != nil { - return nil, err - } - ethCl, err := sources.NewEthClient(client.NewBaseRPCClient(rpcClient), log.Root(), nil, ðClCfg) - if err != nil { - return nil, err - } - m.clients[rpcURL] = ethCl - return ethCl, nil -} - -func (m *clientManager) GethClient(rpcURL string) (*ethclient.Client, error) { - m.mu.RLock() - if client, ok := m.gethClients[rpcURL]; ok { - m.mu.RUnlock() - return client, nil - } - m.mu.RUnlock() - - m.mu.Lock() - defer m.mu.Unlock() - - // Double-check after acquiring write lock - if client, ok := m.gethClients[rpcURL]; ok { - return client, nil - } - - client, err := ethclient.Dial(rpcURL) - if err != nil { - return nil, err - } - m.gethClients[rpcURL] = client - return client, nil -} - -type chain struct { - id string - wallets WalletMap - nodes []Node - chainConfig *params.ChainConfig - addresses AddressMap -} - -func (c *chain) Nodes() []Node { - return c.nodes -} - -// Wallet returns the first wallet which meets all provided constraints, or an -// error. -// Typically this will be one of the pre-funded wallets associated with -// the deployed system. -func (c *chain) Wallets() WalletMap { - return c.wallets -} - -func (c *chain) ID() types.ChainID { - if c.id == "" { - return types.ChainID(big.NewInt(0)) - } - base := 10 - if len(c.id) >= 2 && c.id[0:2] == "0x" { - c.id = c.id[2:] - base = 16 - } - id, ok := new(big.Int).SetString(c.id, base) - if !ok { - return types.ChainID(big.NewInt(0)) - } - return types.ChainID(id) -} - -func (c *chain) Config() (*params.ChainConfig, error) { - if c.chainConfig == nil { - return nil, fmt.Errorf("chain config is nil") - } - return c.chainConfig, nil -} - -func (c *chain) Addresses() AddressMap { - return c.addresses -} - -// SupportsEIP checks if the chain's first node supports the given EIP -func (c *chain) SupportsEIP(ctx context.Context, eip uint64) bool { - if len(c.nodes) == 0 { - return false - } - return c.nodes[0].SupportsEIP(ctx, eip) -} - -func checkHeader(ctx context.Context, client *sources.EthClient, check func(eth.BlockInfo) bool) bool { - info, err := client.InfoByLabel(ctx, eth.Unsafe) - if err != nil { - return false - } - return check(info) -} - -func newNodesFromDescriptor(d *descriptors.Chain) []Node { - clients := newClientManager() - nodes := make([]Node, len(d.Nodes)) - for i, node := range d.Nodes { - svc := node.Services["el"] - name := svc.Name - rpc := svc.Endpoints["rpc"] - if rpc.Scheme == "" { - rpc.Scheme = "http" - } - nodes[i] = newNode(fmt.Sprintf("%s://%s:%d", rpc.Scheme, rpc.Host, rpc.Port), name, clients) - } - return nodes -} - -func newChainFromDescriptor(d *descriptors.Chain) (*chain, error) { - // TODO: handle incorrect descriptors better. We could panic here. - - nodes := newNodesFromDescriptor(d) - c := newChain(d.ID, nil, d.Config, AddressMap(d.Addresses), nodes) // Create chain first - - wallets, err := newWalletMapFromDescriptorWalletMap(d.Wallets, c) - if err != nil { - return nil, err - } - c.wallets = wallets - - return c, nil -} - -func newChain(chainID string, wallets WalletMap, chainConfig *params.ChainConfig, addresses AddressMap, nodes []Node) *chain { - chain := &chain{ - id: chainID, - wallets: wallets, - nodes: nodes, - chainConfig: chainConfig, - addresses: addresses, - } - return chain -} - -func newL2ChainFromDescriptor(d *descriptors.L2Chain) (*l2Chain, error) { - // TODO: handle incorrect descriptors better. We could panic here. - - nodes := newNodesFromDescriptor(d.Chain) - c := newL2Chain(d.ID, nil, nil, d.Config, AddressMap(d.Addresses), nodes) // Create chain first - - l2Wallets, err := newWalletMapFromDescriptorWalletMap(d.Wallets, c) - if err != nil { - return nil, err - } - c.wallets = l2Wallets - - l1Wallets, err := newWalletMapFromDescriptorWalletMap(d.L1Wallets, c) - if err != nil { - return nil, err - } - c.l1Wallets = l1Wallets - - return c, nil -} - -func newL2Chain(chainID string, l1Wallets WalletMap, l2Wallets WalletMap, chainConfig *params.ChainConfig, l2Addresses AddressMap, nodes []Node) *l2Chain { - chain := &l2Chain{ - chain: newChain(chainID, l2Wallets, chainConfig, l2Addresses, nodes), - l1Wallets: l1Wallets, - } - return chain -} - -type l2Chain struct { - *chain - l1Wallets WalletMap -} - -func (c *l2Chain) L1Wallets() WalletMap { - return c.l1Wallets -} diff --git a/devnet-sdk/system/chain_test.go b/devnet-sdk/system/chain_test.go deleted file mode 100644 index 975f90ea6dc5a..0000000000000 --- a/devnet-sdk/system/chain_test.go +++ /dev/null @@ -1,246 +0,0 @@ -package system - -import ( - "context" - "math/big" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/registry/empty" - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" -) - -func TestClientManager(t *testing.T) { - manager := newClientManager() - - t.Run("returns error for invalid URL", func(t *testing.T) { - _, err := manager.Client("invalid://url") - assert.Error(t, err) - }) - - t.Run("caches client for same URL", func(t *testing.T) { - // Use a hostname that's guaranteed to fail DNS resolution - url := "http://this.domain.definitely.does.not.exist:8545" - - // First call should create new client - client1, err1 := manager.Client(url) - // Second call should return cached client - client2, err2 := manager.Client(url) - - // Both calls should succeed in creating a client - assert.NoError(t, err1) - assert.NoError(t, err2) - assert.NotNil(t, client1) - assert.NotNil(t, client2) - - // But the client should fail when used - ctx := context.Background() - _, err := client1.ChainID(ctx) - assert.Error(t, err) - - // And both clients should be the same instance - assert.Same(t, client1, client2) - }) -} - -func TestChainFromDescriptor(t *testing.T) { - descriptor := &descriptors.Chain{ - ID: "1", - Nodes: []descriptors.Node{ - { - Services: descriptors.ServiceMap{ - "el": &descriptors.Service{ - Endpoints: descriptors.EndpointMap{ - "rpc": &descriptors.PortInfo{ - Host: "localhost", - Port: 8545, - }, - }, - }, - }, - }, - }, - Wallets: descriptors.WalletMap{ - "user1": &descriptors.Wallet{ - PrivateKey: "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - Address: common.HexToAddress("0x1234567890123456789012345678901234567890"), - }, - }, - Addresses: descriptors.AddressMap{ - "user1": common.HexToAddress("0x1234567890123456789012345678901234567890"), - }, - } - - chain, err := newChainFromDescriptor(descriptor) - assert.Nil(t, err) - assert.NotNil(t, chain) - assert.Equal(t, "http://localhost:8545", chain.Nodes()[0].RPCURL()) - - // Compare the underlying big.Int values - chainID := chain.ID() - expectedID := big.NewInt(1) - assert.Equal(t, 0, expectedID.Cmp(chainID)) -} - -func TestL2ChainFromDescriptor(t *testing.T) { - descriptor := &descriptors.L2Chain{ - Chain: &descriptors.Chain{ - ID: "1", - Nodes: []descriptors.Node{ - { - Services: descriptors.ServiceMap{ - "el": &descriptors.Service{ - Endpoints: descriptors.EndpointMap{ - "rpc": &descriptors.PortInfo{ - Host: "localhost", - Port: 8545, - }, - }, - }, - }, - }, - }, - Wallets: descriptors.WalletMap{ - "user1": &descriptors.Wallet{ - PrivateKey: "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - Address: common.HexToAddress("0x1234567890123456789012345678901234567890"), - }, - }, - Addresses: descriptors.AddressMap{ - "user2": common.HexToAddress("0x1234567890123456789012345678901234567891"), - }, - }, - L1Wallets: descriptors.WalletMap{ - "user1": &descriptors.Wallet{ - PrivateKey: "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - Address: common.HexToAddress("0x1234567890123456789012345678901234567890"), - }, - }, - } - - chain, err := newL2ChainFromDescriptor(descriptor) - assert.Nil(t, err) - assert.NotNil(t, chain) - assert.Equal(t, "http://localhost:8545", chain.Nodes()[0].RPCURL()) - - // Compare the underlying big.Int values - chainID := chain.ID() - expectedID := big.NewInt(1) - assert.Equal(t, 0, expectedID.Cmp(chainID)) -} - -func TestChainWallet(t *testing.T) { - testAddr := common.HexToAddress("0x1234567890123456789012345678901234567890") - - wallet, err := NewWallet("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", testAddr, nil) - assert.Nil(t, err) - - l1Chain := newChain("1", WalletMap{"user1": wallet}, nil, map[string]common.Address{}, []Node{}) - - t.Run("finds wallet meeting constraints", func(t *testing.T) { - constraint := &addressConstraint{addr: testAddr} - wallets := l1Chain.Wallets() - - for _, w := range wallets { - if constraint.CheckWallet(w) { - assert.NotNil(t, w) - assert.Equal(t, testAddr, w.Address()) - return - } - } - t.Fatalf("wallet not found") - }) - - t.Run("returns error when no wallet meets constraints", func(t *testing.T) { - wrongAddr := common.HexToAddress("0x0987654321098765432109876543210987654321") - constraint := &addressConstraint{addr: wrongAddr} - wallets := l1Chain.Wallets() - - for _, w := range wallets { - if constraint.CheckWallet(w) { - t.Fatalf("wallet found") - } - } - }) -} - -// addressConstraint implements constraints.WalletConstraint for testing -type addressConstraint struct { - addr common.Address -} - -func (c *addressConstraint) CheckWallet(w Wallet) bool { - return w.Address() == c.addr -} - -func TestChainID(t *testing.T) { - tests := []struct { - name string - idString string - want *big.Int - }{ - { - name: "valid chain ID", - idString: "1", - want: big.NewInt(1), - }, - { - name: "empty chain ID", - idString: "", - want: big.NewInt(0), - }, - { - name: "invalid chain ID", - idString: "not a number", - want: big.NewInt(0), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - chain := newChain(tt.idString, WalletMap{}, nil, AddressMap{}, []Node{}) - got := chain.ID() - // Compare the underlying big.Int values - assert.Equal(t, 0, tt.want.Cmp(got)) - }) - } -} - -func TestSupportsEIP(t *testing.T) { - ctx := context.Background() - chain := newChain("1", WalletMap{}, nil, AddressMap{}, []Node{}) - - // Since we can't reliably test against a live node, we're just testing the error case - t.Run("returns false for connection error", func(t *testing.T) { - assert.False(t, chain.SupportsEIP(ctx, 1559)) - assert.False(t, chain.SupportsEIP(ctx, 4844)) - }) -} - -// mockContractsRegistry extends empty.EmptyRegistry to provide mock contract instances -type mockContractsRegistry struct { - empty.EmptyRegistry -} - -func TestContractsRegistry(t *testing.T) { - node := &mockNode{} - // Create a mock for testing - mockRegistry := &mockContractsRegistry{} - - // Set up the mock to return the registry when ContractsRegistry() is called - node.On("ContractsRegistry").Return(mockRegistry) - - chain := newChain("1", WalletMap{}, nil, AddressMap{}, []Node{node}) - - t.Run("returns empty registry on error", func(t *testing.T) { - registry := chain.Nodes()[0].ContractsRegistry() - assert.NotNil(t, registry) - }) - - t.Run("caches registry", func(t *testing.T) { - registry1 := chain.Nodes()[0].ContractsRegistry() - registry2 := chain.Nodes()[0].ContractsRegistry() - assert.Same(t, registry1, registry2) - }) -} diff --git a/devnet-sdk/system/interfaces.go b/devnet-sdk/system/interfaces.go deleted file mode 100644 index eecb4952fef1a..0000000000000 --- a/devnet-sdk/system/interfaces.go +++ /dev/null @@ -1,150 +0,0 @@ -package system - -import ( - "context" - "crypto/ecdsa" - "math/big" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/sources" - supervisorTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - coreTypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/params" -) - -type System interface { - Identifier() string - L1() Chain - L2s() []L2Chain -} - -// Chain represents an Ethereum chain (L1 or L2) -type Chain interface { - ID() types.ChainID - Nodes() []Node // The node at index 0 will always be the sequencer node - Config() (*params.ChainConfig, error) - - // The wallets and addresses below are for use on the chain that the instance represents. - // If the instance also implements L2Chain, then the wallets and addresses below are still for the L2. - Wallets() WalletMap - Addresses() AddressMap -} - -type L2Chain interface { - Chain - - // The wallets below are for use on the L1 chain that this L2Chain instance settles to. - L1Wallets() WalletMap -} - -type Node interface { - Name() string - GasPrice(ctx context.Context) (*big.Int, error) - GasLimit(ctx context.Context, tx TransactionData) (uint64, error) - PendingNonceAt(ctx context.Context, address common.Address) (uint64, error) - BlockByNumber(ctx context.Context, number *big.Int) (eth.BlockInfo, error) - ContractsRegistry() interfaces.ContractsRegistry - SupportsEIP(ctx context.Context, eip uint64) bool - RPCURL() string - Client() (*sources.EthClient, error) - GethClient() (*ethclient.Client, error) -} - -type WalletMap map[string]Wallet -type AddressMap descriptors.AddressMap - -// Wallet represents a chain wallet. -// In particular it can process transactions. -type Wallet interface { - PrivateKey() types.Key - Address() types.Address - SendETH(to types.Address, amount types.Balance) types.WriteInvocation[any] - InitiateMessage(chainID types.ChainID, target common.Address, message []byte) types.WriteInvocation[any] - ExecuteMessage(identifier bindings.Identifier, sentMessage []byte) types.WriteInvocation[any] - Balance() types.Balance - Nonce() uint64 - - TransactionProcessor -} - -// WalletV2 is a temporary interface for integrating txplan and txintent -type WalletV2 interface { - PrivateKey() *ecdsa.PrivateKey - Address() common.Address - Client() *sources.EthClient - GethClient() *ethclient.Client - Ctx() context.Context -} - -// TransactionProcessor is a helper interface for signing and sending transactions. -type TransactionProcessor interface { - Sign(tx Transaction) (Transaction, error) - Send(ctx context.Context, tx Transaction) error -} - -// Transaction interfaces: - -// TransactionData is the input for a transaction creation. -type TransactionData interface { - From() common.Address - To() *common.Address - Value() *big.Int - Data() []byte - AccessList() coreTypes.AccessList -} - -// Transaction is the instantiated transaction object. -type Transaction interface { - Type() uint8 - Hash() common.Hash - TransactionData -} - -type Receipt interface { - BlockNumber() *big.Int - Logs() []*coreTypes.Log - TxHash() common.Hash -} - -// RawTransaction is an optional interface that can be implemented by a Transaction -// to provide access to the raw transaction data. -// It is currently necessary to perform processing operations (signing, sending) -// on the transaction. We would need to do better here. -type RawTransaction interface { - Raw() *coreTypes.Transaction -} - -// Specialized interop interfaces: - -// InteropSystem extends System with interoperability features -type InteropSystem interface { - System - InteropSet() InteropSet - Supervisor(context.Context) (Supervisor, error) -} - -// InteropSet provides access to L2 chains in an interop environment -type InteropSet interface { - L2s() []L2Chain -} - -// Supervisor provides access to the query interface of the supervisor -type Supervisor interface { - LocalUnsafe(context.Context, eth.ChainID) (eth.BlockID, error) - CrossSafe(context.Context, eth.ChainID) (supervisorTypes.DerivedIDPair, error) - Finalized(context.Context, eth.ChainID) (eth.BlockID, error) - FinalizedL1(context.Context) (eth.BlockRef, error) - CrossDerivedToSource(context.Context, eth.ChainID, eth.BlockID) (eth.BlockRef, error) - UpdateLocalUnsafe(context.Context, eth.ChainID, eth.BlockRef) error - UpdateLocalSafe(context.Context, eth.ChainID, eth.L1BlockRef, eth.BlockRef) error - SuperRootAtTimestamp(context.Context, hexutil.Uint64) (eth.SuperRootResponse, error) - AllSafeDerivedAt(context.Context, eth.BlockID) (derived map[eth.ChainID]eth.BlockID, err error) - SyncStatus(context.Context) (eth.SupervisorSyncStatus, error) -} diff --git a/devnet-sdk/system/node.go b/devnet-sdk/system/node.go deleted file mode 100644 index 31149d2e76c50..0000000000000 --- a/devnet-sdk/system/node.go +++ /dev/null @@ -1,139 +0,0 @@ -package system - -import ( - "context" - "fmt" - "math/big" - "sync" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts" - "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" - "github.com/ethereum-optimism/optimism/op-service/bigs" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/ethclient" -) - -var ( - // This will make sure that we implement the Node interface - _ Node = (*node)(nil) -) - -type node struct { - rpcUrl string - name string - clients *clientManager - mu sync.Mutex - registry interfaces.ContractsRegistry -} - -func newNode(rpcUrl string, name string, clients *clientManager) *node { - return &node{rpcUrl: rpcUrl, name: name, clients: clients} -} - -func (n *node) GasPrice(ctx context.Context) (*big.Int, error) { - client, err := n.clients.Client(n.rpcUrl) - if err != nil { - return nil, fmt.Errorf("failed to get client: %w", err) - } - return client.SuggestGasPrice(ctx) -} - -func (n *node) GasLimit(ctx context.Context, tx TransactionData) (uint64, error) { - client, err := n.clients.Client(n.rpcUrl) - if err != nil { - return 0, fmt.Errorf("failed to get client: %w", err) - } - - msg := ethereum.CallMsg{ - From: tx.From(), - To: tx.To(), - Value: tx.Value(), - Data: tx.Data(), - AccessList: tx.AccessList(), - } - estimated, err := client.EstimateGas(ctx, msg) - if err != nil { - return 0, fmt.Errorf("failed to estimate gas: %w", err) - } - - return estimated, nil -} - -func (n *node) PendingNonceAt(ctx context.Context, address common.Address) (uint64, error) { - client, err := n.clients.Client(n.rpcUrl) - if err != nil { - return 0, fmt.Errorf("failed to get client: %w", err) - } - return client.PendingNonceAt(ctx, address) -} - -func (n *node) BlockByNumber(ctx context.Context, number *big.Int) (eth.BlockInfo, error) { - client, err := n.clients.Client(n.rpcUrl) - if err != nil { - return nil, fmt.Errorf("failed to get client: %w", err) - } - var block eth.BlockInfo - if number != nil { - block, err = client.InfoByNumber(ctx, bigs.Uint64Strict(number)) - } else { - block, err = client.InfoByLabel(ctx, eth.Unsafe) - } - if err != nil { - return nil, err - } - return block, nil -} - -func (n *node) Client() (*sources.EthClient, error) { - return n.clients.Client(n.rpcUrl) -} - -func (n *node) GethClient() (*ethclient.Client, error) { - return n.clients.GethClient(n.rpcUrl) -} - -func (n *node) ContractsRegistry() interfaces.ContractsRegistry { - n.mu.Lock() - defer n.mu.Unlock() - - if n.registry != nil { - return n.registry - } - client, err := n.clients.GethClient(n.rpcUrl) - if err != nil { - return contracts.NewEmptyRegistry() - } - - n.registry = contracts.NewClientRegistry(client) - return n.registry -} - -func (n *node) RPCURL() string { - return n.rpcUrl -} - -func (n *node) SupportsEIP(ctx context.Context, eip uint64) bool { - client, err := n.Client() - if err != nil { - return false - } - - switch eip { - case 1559: - return checkHeader(ctx, client, func(h eth.BlockInfo) bool { - return h.BaseFee() != nil - }) - case 4844: - return checkHeader(ctx, client, func(h eth.BlockInfo) bool { - return h.ExcessBlobGas() != nil - }) - } - return false -} - -func (n *node) Name() string { - return n.name -} diff --git a/devnet-sdk/system/periphery/go-ethereum/fees.go b/devnet-sdk/system/periphery/go-ethereum/fees.go deleted file mode 100644 index 0508d195e7384..0000000000000 --- a/devnet-sdk/system/periphery/go-ethereum/fees.go +++ /dev/null @@ -1,134 +0,0 @@ -package goethereum - -import ( - "context" - "math/big" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" -) - -var ( - // Ensure that the feeEstimator implements the FeeEstimator interface - _ FeeEstimator = (*EIP1559FeeEstimator)(nil) - - // Ensure that the EIP1159FeeEthClient implements the EIP1159FeeEthClient interface - _ EIP1159FeeEthClient = (*ethclient.Client)(nil) -) - -// FeeEstimator is a generic fee estimation interface (not specific to EIP-1559) -type FeeEstimator interface { - EstimateFees(ctx context.Context, opts *bind.TransactOpts) (*bind.TransactOpts, error) -} - -// EIP1559FeeEstimator is a fee estimator that uses EIP-1559 fee estimation -type EIP1559FeeEstimator struct { - // Access to the Ethereum client is needed to get the fee information from the chain - client EIP1159FeeEthClient - - options eip1559FeeEstimatorOptions -} - -type eip1559FeeEstimatorOptions struct { - // The base multiplier is used to increase the maxFeePerGas (GasFeeCap) by a factor - baseMultiplier float64 - - // The tip multiplier is used to increase the maxPriorityFeePerGas (GasTipCap) by a factor - tipMultiplier float64 -} - -type EIP1559FeeEstimatorOption interface { - apply(*eip1559FeeEstimatorOptions) -} - -type eip1559FeeEstimatorOptionBaseMultiplier float64 - -func (o eip1559FeeEstimatorOptionBaseMultiplier) apply(opts *eip1559FeeEstimatorOptions) { - opts.baseMultiplier = float64(o) -} - -func WithEIP1559BaseMultiplier(multiplier float64) EIP1559FeeEstimatorOption { - return eip1559FeeEstimatorOptionBaseMultiplier(multiplier) -} - -type eip1559FeeEstimatorOptionTipMultiplier float64 - -func (o eip1559FeeEstimatorOptionTipMultiplier) apply(opts *eip1559FeeEstimatorOptions) { - opts.tipMultiplier = float64(o) -} - -func WithEIP1559TipMultiplier(multiplier float64) EIP1559FeeEstimatorOption { - return eip1559FeeEstimatorOptionTipMultiplier(multiplier) -} - -func NewEIP1559FeeEstimator(client EIP1159FeeEthClient, opts ...EIP1559FeeEstimatorOption) *EIP1559FeeEstimator { - options := eip1559FeeEstimatorOptions{ - baseMultiplier: 1.0, - tipMultiplier: 1.0, - } - - for _, o := range opts { - o.apply(&options) - } - - return &EIP1559FeeEstimator{ - client: client, - options: options, - } -} - -func (f *EIP1559FeeEstimator) EstimateFees(ctx context.Context, opts *bind.TransactOpts) (*bind.TransactOpts, error) { - newOpts := *opts - - // Add a gas tip cap if needed - if newOpts.GasTipCap == nil { - tipCap, err := f.client.SuggestGasTipCap(ctx) - - if err != nil { - return nil, err - } - - // GasTipCap represents the maxPriorityFeePerGas - newOpts.GasTipCap = multiplyBigInt(tipCap, f.options.tipMultiplier) - } - - // Add a gas fee cap if needed - if newOpts.GasFeeCap == nil { - block, err := f.client.BlockByNumber(ctx, nil) - if err != nil { - return nil, err - } - - baseFee := block.BaseFee() - if baseFee != nil { - // The adjusted base fee takes the multiplier into account - adjustedBaseFee := multiplyBigInt(baseFee, f.options.baseMultiplier) - - // The total fee (maxFeePerGas) is the sum of the base fee and the tip - newOpts.GasFeeCap = big.NewInt(0).Add(adjustedBaseFee, newOpts.GasTipCap) - } - } - - return &newOpts, nil -} - -// EIP1159FeeEthClient is a subset of the ethclient.Client interface required for fee estimation -type EIP1159FeeEthClient interface { - BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) - SuggestGasTipCap(ctx context.Context) (*big.Int, error) -} - -// multiplyBigInt is a little helper for a messy big.Int x float64 multiplication -// -// It rounds the result away from zero since that's the lower risk behavior for fee estimation -func multiplyBigInt(b *big.Int, m float64) *big.Int { - bFloat := big.NewFloat(0).SetInt(b) - mFloat := big.NewFloat(m) - ceiled, accuracy := big.NewFloat(0).Mul(bFloat, mFloat).Int(nil) - if accuracy == big.Below { - ceiled = ceiled.Add(ceiled, big.NewInt(1)) - } - - return ceiled -} diff --git a/devnet-sdk/system/periphery/go-ethereum/fees_test.go b/devnet-sdk/system/periphery/go-ethereum/fees_test.go deleted file mode 100644 index 302d6f556acc6..0000000000000 --- a/devnet-sdk/system/periphery/go-ethereum/fees_test.go +++ /dev/null @@ -1,276 +0,0 @@ -package goethereum - -import ( - "context" - "fmt" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestMultiplyBigInt(t *testing.T) { - type TestCase struct { - value *big.Int - multiplier float64 - expected *big.Int - } - - testCases := []TestCase{ - { - value: big.NewInt(10), - multiplier: 1.0, - expected: big.NewInt(10), - }, - { - value: big.NewInt(7), - multiplier: 0.0, - expected: big.NewInt(0), - }, - { - value: big.NewInt(10), - multiplier: 1.01, - expected: big.NewInt(11), - }, - { - value: big.NewInt(10), - multiplier: 1.11, - expected: big.NewInt(12), - }, - { - value: big.NewInt(5), - multiplier: 1.2, - expected: big.NewInt(6), - }, - } - - for _, testCase := range testCases { - t.Run(fmt.Sprintf("should return %d for %d multiplied by %f", testCase.expected.Int64(), testCase.value.Int64(), testCase.multiplier), func(t *testing.T) { - result := multiplyBigInt(testCase.value, testCase.multiplier) - require.Equal(t, testCase.expected, result) - }) - } -} - -func TestEstimateEIP1559Fees(t *testing.T) { - t.Run("if GasFeeCap and GasTipCap are not nil", func(t *testing.T) { - opts := &bind.TransactOpts{ - GasFeeCap: big.NewInt(1), - GasTipCap: big.NewInt(2), - } - - t.Run("should not modify the options", func(t *testing.T) { - feeEstimator := NewEIP1559FeeEstimator(&mockFeeEthClientImpl{}) - newOpts, err := feeEstimator.EstimateFees(context.Background(), opts) - require.NoError(t, err) - - require.Equal(t, opts, newOpts) - - // We make sure that we get a copy of the object to prevent mutating the original - assert.NotSame(t, opts, newOpts) - }) - }) - - t.Run("if the GasTipCap is nil", func(t *testing.T) { - defaultOpts := &bind.TransactOpts{ - GasFeeCap: big.NewInt(1), - From: common.Address{}, - Nonce: big.NewInt(64), - } - - t.Run("should return an error if the client returns an error", func(t *testing.T) { - tipCapErr := fmt.Errorf("tip cap error") - feeEstimator := NewEIP1559FeeEstimator(&mockFeeEthClientImpl{ - tipCapErr: tipCapErr, - }) - _, err := feeEstimator.EstimateFees(context.Background(), defaultOpts) - require.Equal(t, tipCapErr, err) - }) - - t.Run("with default tip multiplier", func(t *testing.T) { - t.Run("should set the GasTipCap to the client's suggested tip cap", func(t *testing.T) { - tipCapValue := big.NewInt(5) - feeEstimator := NewEIP1559FeeEstimator(&mockFeeEthClientImpl{ - tipCapValue: tipCapValue, - }) - - newOpts, err := feeEstimator.EstimateFees(context.Background(), defaultOpts) - require.NoError(t, err) - - // We create a new opts with the expected tip cap added - expectedOpts := *defaultOpts - expectedOpts.GasTipCap = tipCapValue - - // We check that the tip has been added - require.Equal(t, &expectedOpts, newOpts) - - // We make sure that we get a copy of the object to prevent mutating the original - assert.NotSame(t, defaultOpts, newOpts) - }) - }) - - t.Run("with custom tip multiplier", func(t *testing.T) { - t.Run("should set the GasTipCap to the client's suggested tip cap multiplied by the tip multiplier", func(t *testing.T) { - tipCapValue := big.NewInt(5) - tipMultiplier := 10.0 - // The expected tip is a product of the tip cap and the tip multiplier - expectedTip := big.NewInt(50) - - // We create a fee estimator with a custom tip multiplier - feeEstimator := NewEIP1559FeeEstimator(&mockFeeEthClientImpl{ - tipCapValue: tipCapValue, - }, WithEIP1559TipMultiplier(tipMultiplier)) - - newOpts, err := feeEstimator.EstimateFees(context.Background(), defaultOpts) - require.NoError(t, err) - - // We create a new opts with the expected tip cap added - expectedOpts := *defaultOpts - expectedOpts.GasTipCap = expectedTip - - // We check that the tip has been added - require.Equal(t, &expectedOpts, newOpts) - - // We make sure that we get a copy of the object to prevent mutating the original - assert.NotSame(t, defaultOpts, newOpts) - }) - }) - }) - - t.Run("if the GasFeeCap is nil", func(t *testing.T) { - defaultOpts := &bind.TransactOpts{ - GasTipCap: big.NewInt(1), - From: common.Address{}, - Nonce: big.NewInt(64), - } - - t.Run("should return an error if the client returns an error", func(t *testing.T) { - blockErr := fmt.Errorf("tip cap error") - feeEstimator := NewEIP1559FeeEstimator(&mockFeeEthClientImpl{ - blockErr: blockErr, - }) - _, err := feeEstimator.EstimateFees(context.Background(), defaultOpts) - require.Equal(t, blockErr, err) - }) - - t.Run("should set the GasFeeCap to the sum of block base fee and tip", func(t *testing.T) { - baseFeeValue := big.NewInt(5) - blockValue := types.NewBlock(&types.Header{ - BaseFee: baseFeeValue, - Time: 0, - }, nil, nil, nil, &mockBlockType{}) - - // We expect the total gas cap to be the base fee plus the tip cap - expectedGas := big.NewInt(0).Add(baseFeeValue, defaultOpts.GasTipCap) - - feeEstimator := NewEIP1559FeeEstimator(&mockFeeEthClientImpl{ - blockValue: blockValue, - }) - - newOpts, err := feeEstimator.EstimateFees(context.Background(), defaultOpts) - require.NoError(t, err) - - // We create a new opts with the expected fee cap added - expectedOpts := *defaultOpts - expectedOpts.GasFeeCap = expectedGas - - // We check that the tip has been added - require.Equal(t, &expectedOpts, newOpts) - - // We make sure that we get a copy of the object to prevent mutating the original - assert.NotSame(t, defaultOpts, newOpts) - }) - - t.Run("should set the GasFeeCap to nil if the base fee is nil", func(t *testing.T) { - blockValue := types.NewBlock(&types.Header{ - BaseFee: nil, - Time: 0, - }, nil, nil, nil, &mockBlockType{}) - - feeEstimator := NewEIP1559FeeEstimator(&mockFeeEthClientImpl{ - blockValue: blockValue, - }) - - newOpts, err := feeEstimator.EstimateFees(context.Background(), defaultOpts) - require.NoError(t, err) - - // We create a new opts with the expected fee cap added - expectedOpts := *defaultOpts - expectedOpts.GasFeeCap = nil - - // We check that the tip has been added - require.Equal(t, &expectedOpts, newOpts) - - // We make sure that we get a copy of the object to prevent mutating the original - assert.NotSame(t, defaultOpts, newOpts) - }) - - t.Run("with custom base multiplier", func(t *testing.T) { - t.Run("should set the GasFeeCap to the block base fee multiplied by the base multiplier", func(t *testing.T) { - baseMultiplier := 1.2 - baseFeeValue := big.NewInt(9) - blockValue := types.NewBlock(&types.Header{ - BaseFee: baseFeeValue, - Time: 0, - }, nil, nil, nil, &mockBlockType{}) - - // We expect the total gas cap to be the base fee (9) multiplied by 1.2 (= 10.8, rounded up to 11) plus the tip cap (1) - expectedGas := big.NewInt(0).Add(big.NewInt(11), defaultOpts.GasTipCap) - - // We create a fee estimator with a custom tip multiplier - feeEstimator := NewEIP1559FeeEstimator(&mockFeeEthClientImpl{ - blockValue: blockValue, - }, WithEIP1559BaseMultiplier(baseMultiplier)) - - newOpts, err := feeEstimator.EstimateFees(context.Background(), defaultOpts) - require.NoError(t, err) - - // We create a new opts with the expected tip cap added - expectedOpts := *defaultOpts - expectedOpts.GasFeeCap = expectedGas - - // We check that the tip has been added - require.Equal(t, &expectedOpts, newOpts) - - // We make sure that we get a copy of the object to prevent mutating the original - assert.NotSame(t, defaultOpts, newOpts) - }) - }) - }) -} - -var ( - _ EIP1159FeeEthClient = (*mockFeeEthClientImpl)(nil) - - _ types.BlockType = (*mockBlockType)(nil) -) - -type mockFeeEthClientImpl struct { - blockValue *types.Block - blockErr error - - tipCapValue *big.Int - tipCapErr error -} - -func (m *mockFeeEthClientImpl) BlockByNumber(ctx context.Context, number *big.Int) (*types.Block, error) { - return m.blockValue, m.blockErr -} - -func (m *mockFeeEthClientImpl) SuggestGasTipCap(ctx context.Context) (*big.Int, error) { - return m.tipCapValue, m.tipCapErr -} - -type mockBlockType struct{} - -func (m *mockBlockType) HasOptimismWithdrawalsRoot(blkTime uint64) bool { - return false -} - -func (m *mockBlockType) IsIsthmus(blkTime uint64) bool { - return false -} diff --git a/devnet-sdk/system/system.go b/devnet-sdk/system/system.go deleted file mode 100644 index d282e9ee01d4d..0000000000000 --- a/devnet-sdk/system/system.go +++ /dev/null @@ -1,110 +0,0 @@ -package system - -import ( - "context" - "fmt" - "slices" - "sync" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/devnet-sdk/shell/env" - "github.com/ethereum-optimism/optimism/op-service/dial" -) - -type system struct { - identifier string - l1 Chain - l2s []L2Chain -} - -// system implements System -var _ System = (*system)(nil) - -func NewSystemFromURL(url string) (System, error) { - devnetEnv, err := env.LoadDevnetFromURL(url) - if err != nil { - return nil, fmt.Errorf("failed to load devnet from URL: %w", err) - } - - sys, err := systemFromDevnet(devnetEnv.Env) - if err != nil { - return nil, fmt.Errorf("failed to create system from devnet: %w", err) - } - return sys, nil -} - -func (s *system) L1() Chain { - return s.l1 -} - -func (s *system) L2s() []L2Chain { - return s.l2s -} - -func (s *system) Identifier() string { - return s.identifier -} - -func systemFromDevnet(dn *descriptors.DevnetEnvironment) (System, error) { - l1, err := newChainFromDescriptor(dn.L1) - if err != nil { - return nil, fmt.Errorf("failed to add L1 chain: %w", err) - } - - l2s := make([]L2Chain, len(dn.L2)) - for i, l2 := range dn.L2 { - l2s[i], err = newL2ChainFromDescriptor(l2) - if err != nil { - return nil, fmt.Errorf("failed to add L2 chain: %w", err) - } - } - - sys := &system{ - identifier: dn.Name, - l1: l1, - l2s: l2s, - } - - if slices.Contains(dn.Features, "interop") { - // TODO(14849): this will break as soon as we have a dependency set that - // doesn't include all L2s. - supervisorRPC := dn.L2[0].Services["supervisor"][0].Endpoints["rpc"] - return &interopSystem{ - system: sys, - supervisorRPC: fmt.Sprintf("http://%s:%d", supervisorRPC.Host, supervisorRPC.Port), - }, nil - } - - return sys, nil -} - -type interopSystem struct { - *system - - supervisorRPC string - supervisor Supervisor - mu sync.Mutex -} - -// interopSystem implements InteropSystem -var _ InteropSystem = (*interopSystem)(nil) - -func (i *interopSystem) InteropSet() InteropSet { - return i.system // TODO: the interop set might not contain all L2s -} - -func (i *interopSystem) Supervisor(ctx context.Context) (Supervisor, error) { - i.mu.Lock() - defer i.mu.Unlock() - - if i.supervisor != nil { - return i.supervisor, nil - } - - supervisor, err := dial.DialSupervisorClientWithTimeout(ctx, nil, i.supervisorRPC) - if err != nil { - return nil, fmt.Errorf("failed to dial supervisor RPC: %w", err) - } - i.supervisor = supervisor - return supervisor, nil -} diff --git a/devnet-sdk/system/system_test.go b/devnet-sdk/system/system_test.go deleted file mode 100644 index f97bbdec532b7..0000000000000 --- a/devnet-sdk/system/system_test.go +++ /dev/null @@ -1,273 +0,0 @@ -package system - -import ( - "encoding/json" - "os" - "path/filepath" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewSystemFromEnv(t *testing.T) { - // Create a temporary devnet file - tempDir := t.TempDir() - devnetFile := filepath.Join(tempDir, "devnet.json") - - devnet := &descriptors.DevnetEnvironment{ - L1: &descriptors.Chain{ - ID: "1", - Nodes: []descriptors.Node{{ - Services: map[string]*descriptors.Service{ - "el": { - Name: "geth", - Endpoints: descriptors.EndpointMap{ - "rpc": &descriptors.PortInfo{ - Host: "localhost", - Port: 8545, - }, - }, - }, - }, - }}, - Wallets: descriptors.WalletMap{ - "default": &descriptors.Wallet{ - Address: common.HexToAddress("0x123"), - PrivateKey: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - }, - }, - Addresses: descriptors.AddressMap{ - "defaultl1": common.HexToAddress("0x123"), - }, - }, - L2: []*descriptors.L2Chain{ - { - Chain: &descriptors.Chain{ - ID: "2", - Nodes: []descriptors.Node{{ - Services: map[string]*descriptors.Service{ - "el": { - Name: "geth", - Endpoints: descriptors.EndpointMap{ - "rpc": &descriptors.PortInfo{ - Host: "localhost", - Port: 8546, - }, - }, - }, - }, - }}, - Wallets: descriptors.WalletMap{ - "default": &descriptors.Wallet{ - Address: common.HexToAddress("0x123"), - PrivateKey: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - }, - }, - Addresses: descriptors.AddressMap{ - "defaultl2": common.HexToAddress("0x456"), - }, - }, - L1Wallets: descriptors.WalletMap{ - "default": &descriptors.Wallet{ - Address: common.HexToAddress("0x123"), - PrivateKey: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - }, - }, - }, - }, - Features: []string{}, - } - - data, err := json.Marshal(devnet) - require.NoError(t, err) - require.NoError(t, os.WriteFile(devnetFile, data, 0644)) - - sys, err := NewSystemFromURL(devnetFile) - assert.NoError(t, err) - assert.NotNil(t, sys) -} - -func TestSystemFromDevnet(t *testing.T) { - testNode := descriptors.Node{ - Services: map[string]*descriptors.Service{ - "el": { - Name: "geth", - Endpoints: descriptors.EndpointMap{ - "rpc": &descriptors.PortInfo{ - Host: "localhost", - Port: 8545, - }, - }, - }, - }, - } - - testWallet := &descriptors.Wallet{ - Address: common.HexToAddress("0x123"), - PrivateKey: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - } - - tests := []struct { - name string - devnet *descriptors.DevnetEnvironment - wantErr bool - isInterop bool - }{ - { - name: "basic system", - devnet: &descriptors.DevnetEnvironment{ - L1: &descriptors.Chain{ - ID: "1", - Nodes: []descriptors.Node{testNode}, - Wallets: descriptors.WalletMap{ - "default": testWallet, - }, - Addresses: descriptors.AddressMap{ - "defaultl1": common.HexToAddress("0x123"), - }, - }, - L2: []*descriptors.L2Chain{ - { - Chain: &descriptors.Chain{ - ID: "2", - Nodes: []descriptors.Node{testNode}, - Wallets: descriptors.WalletMap{ - "default": testWallet, - }, - }, - L1Wallets: descriptors.WalletMap{ - "default": testWallet, - }, - }, - }, - }, - wantErr: false, - isInterop: false, - }, - { - name: "interop system", - devnet: &descriptors.DevnetEnvironment{ - L1: &descriptors.Chain{ - ID: "1", - Nodes: []descriptors.Node{testNode}, - Wallets: descriptors.WalletMap{ - "default": testWallet, - }, - Addresses: descriptors.AddressMap{ - "defaultl1": common.HexToAddress("0x123"), - }, - }, - L2: []*descriptors.L2Chain{ - { - Chain: &descriptors.Chain{ - ID: "2", - Nodes: []descriptors.Node{testNode}, - Wallets: descriptors.WalletMap{ - "default": testWallet, - }, - Services: descriptors.RedundantServiceMap{ - "supervisor": []*descriptors.Service{ - &descriptors.Service{ - Name: "supervisor", - Endpoints: descriptors.EndpointMap{ - "rpc": &descriptors.PortInfo{ - Host: "localhost", - Port: 8545, - }, - }, - }, - }, - }, - }, - L1Wallets: descriptors.WalletMap{ - "default": testWallet, - }, - }}, - Features: []string{"interop"}, - }, - wantErr: false, - isInterop: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - sys, err := systemFromDevnet(tt.devnet) - if tt.wantErr { - assert.Error(t, err) - return - } - assert.NoError(t, err) - assert.NotNil(t, sys) - - _, isInterop := sys.(InteropSystem) - assert.Equal(t, tt.isInterop, isInterop) - }) - } -} - -func TestWallet(t *testing.T) { - chain := newChain("1", WalletMap{}, nil, AddressMap{}, []Node{}) - tests := []struct { - name string - privateKey string - address types.Address - wantAddr types.Address - wantPrivKey types.Key - }{ - { - name: "valid wallet", - privateKey: "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", - address: common.HexToAddress("0x123"), - wantAddr: common.HexToAddress("0x123"), - }, - { - name: "empty wallet", - privateKey: "", - address: common.HexToAddress("0x123"), - wantAddr: common.HexToAddress("0x123"), - }, - { - name: "only address", - privateKey: "", - address: common.HexToAddress("0x456"), - wantAddr: common.HexToAddress("0x456"), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - w, err := NewWallet(tt.privateKey, tt.address, chain) - assert.Nil(t, err) - - assert.Equal(t, tt.wantAddr, w.Address()) - }) - } -} - -func TestChainUser(t *testing.T) { - chain := newChain("1", WalletMap{}, nil, AddressMap{}, []Node{}) - - testWallet, err := NewWallet("0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef", common.HexToAddress("0x123"), chain) - assert.Nil(t, err) - - chain.wallets = WalletMap{ - "l2Faucet": testWallet, - } - - wallets := chain.Wallets() - require.NoError(t, err) - - for _, w := range wallets { - if w.Address() == testWallet.Address() { - assert.Equal(t, testWallet.Address(), w.Address()) - assert.Equal(t, testWallet.PrivateKey(), w.PrivateKey()) - return - } - } - assert.Fail(t, "wallet not found") -} diff --git a/devnet-sdk/system/tx.go b/devnet-sdk/system/tx.go deleted file mode 100644 index 41a661bd9539a..0000000000000 --- a/devnet-sdk/system/tx.go +++ /dev/null @@ -1,227 +0,0 @@ -package system - -import ( - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/kzg4844" -) - -// TxOpts is a struct that holds all transaction options -type TxOpts struct { - from common.Address - to *common.Address - value *big.Int - data []byte - gasLimit uint64 // Optional: if 0, will be estimated - accessList types.AccessList - blobHashes []common.Hash - blobs []kzg4844.Blob - commitments []kzg4844.Commitment - proofs []kzg4844.Proof -} - -var _ TransactionData = (*TxOpts)(nil) - -func (opts *TxOpts) From() common.Address { - return opts.from -} - -func (opts *TxOpts) To() *common.Address { - return opts.to -} - -func (opts *TxOpts) Value() *big.Int { - return opts.value -} - -func (opts *TxOpts) Data() []byte { - return opts.data -} - -func (opts *TxOpts) AccessList() types.AccessList { - return opts.accessList -} - -// Validate checks that all required fields are set and consistent -func (opts *TxOpts) Validate() error { - // Check mandatory fields - if opts.from == (common.Address{}) { - return fmt.Errorf("from address is required") - } - if opts.to == nil { - return fmt.Errorf("to address is required") - } - if opts.value == nil || opts.value.Sign() < 0 { - return fmt.Errorf("value must be non-negative") - } - - // Check blob-related fields consistency - hasBlobs := len(opts.blobs) > 0 - hasCommitments := len(opts.commitments) > 0 - hasProofs := len(opts.proofs) > 0 - hasBlobHashes := len(opts.blobHashes) > 0 - - // If any blob-related field is set, all must be set - if hasBlobs || hasCommitments || hasProofs || hasBlobHashes { - if !hasBlobs { - return fmt.Errorf("blobs are required when other blob fields are set") - } - if !hasCommitments { - return fmt.Errorf("commitments are required when other blob fields are set") - } - if !hasProofs { - return fmt.Errorf("proofs are required when other blob fields are set") - } - if !hasBlobHashes { - return fmt.Errorf("blob hashes are required when other blob fields are set") - } - - // Check that all blob-related fields have the same length - blobCount := len(opts.blobs) - if len(opts.commitments) != blobCount { - return fmt.Errorf("number of commitments (%d) does not match number of blobs (%d)", len(opts.commitments), blobCount) - } - if len(opts.proofs) != blobCount { - return fmt.Errorf("number of proofs (%d) does not match number of blobs (%d)", len(opts.proofs), blobCount) - } - if len(opts.blobHashes) != blobCount { - return fmt.Errorf("number of blob hashes (%d) does not match number of blobs (%d)", len(opts.blobHashes), blobCount) - } - } - - return nil -} - -// TxOption is a function that configures TxOpts -type TxOption func(*TxOpts) - -// WithFrom sets the sender address -func WithFrom(from common.Address) TxOption { - return func(opts *TxOpts) { - opts.from = from - } -} - -// WithTo sets the recipient address -func WithTo(to common.Address) TxOption { - return func(opts *TxOpts) { - opts.to = &to - } -} - -// WithValue sets the transaction value -func WithValue(value *big.Int) TxOption { - return func(opts *TxOpts) { - opts.value = value - } -} - -// WithData sets the transaction data -func WithData(data []byte) TxOption { - return func(opts *TxOpts) { - opts.data = data - } -} - -// WithGasLimit sets an explicit gas limit -func WithGasLimit(gasLimit uint64) TxOption { - return func(opts *TxOpts) { - opts.gasLimit = gasLimit - } -} - -// WithAccessList sets the access list for EIP-2930 transactions -func WithAccessList(accessList types.AccessList) TxOption { - return func(opts *TxOpts) { - opts.accessList = accessList - } -} - -// WithBlobs sets the blob transaction fields -func WithBlobs(blobs []kzg4844.Blob) TxOption { - return func(opts *TxOpts) { - opts.blobs = blobs - } -} - -// WithBlobCommitments sets the blob commitments -func WithBlobCommitments(commitments []kzg4844.Commitment) TxOption { - return func(opts *TxOpts) { - opts.commitments = commitments - } -} - -// WithBlobProofs sets the blob proofs -func WithBlobProofs(proofs []kzg4844.Proof) TxOption { - return func(opts *TxOpts) { - opts.proofs = proofs - } -} - -// WithBlobHashes sets the blob hashes -func WithBlobHashes(hashes []common.Hash) TxOption { - return func(opts *TxOpts) { - opts.blobHashes = hashes - } -} - -// EthTx is the default implementation of Transaction that wraps types.Transaction -type EthTx struct { - tx *types.Transaction - from common.Address - txType uint8 -} - -func (t *EthTx) Hash() common.Hash { - return t.tx.Hash() -} - -func (t *EthTx) From() common.Address { - return t.from -} - -func (t *EthTx) To() *common.Address { - return t.tx.To() -} - -func (t *EthTx) Value() *big.Int { - return t.tx.Value() -} - -func (t *EthTx) Data() []byte { - return t.tx.Data() -} - -func (t *EthTx) AccessList() types.AccessList { - return t.tx.AccessList() -} - -func (t *EthTx) Type() uint8 { - return t.txType -} - -func (t *EthTx) Raw() *types.Transaction { - return t.tx -} - -// EthReceipt is the default implementation of Receipt that wraps types.Receipt -type EthReceipt struct { - blockNumber *big.Int - logs []*types.Log - txHash common.Hash -} - -func (t *EthReceipt) BlockNumber() *big.Int { - return t.blockNumber -} - -func (t *EthReceipt) Logs() []*types.Log { - return t.logs -} - -func (t *EthReceipt) TxHash() common.Hash { - return t.txHash -} diff --git a/devnet-sdk/system/tx_test.go b/devnet-sdk/system/tx_test.go deleted file mode 100644 index e9ec8137b17d2..0000000000000 --- a/devnet-sdk/system/tx_test.go +++ /dev/null @@ -1,193 +0,0 @@ -package system - -import ( - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/stretchr/testify/assert" -) - -func TestTxOpts_Validate(t *testing.T) { - addr := common.HexToAddress("0x1234567890123456789012345678901234567890") - tests := []struct { - name string - opts *TxOpts - wantErr bool - }{ - { - name: "valid basic transaction", - opts: &TxOpts{ - from: addr, - to: &addr, - value: big.NewInt(0), - }, - wantErr: false, - }, - { - name: "missing from address", - opts: &TxOpts{ - to: &addr, - value: big.NewInt(0), - }, - wantErr: true, - }, - { - name: "missing to address", - opts: &TxOpts{ - from: addr, - value: big.NewInt(0), - }, - wantErr: true, - }, - { - name: "negative value", - opts: &TxOpts{ - from: addr, - to: &addr, - value: big.NewInt(-1), - }, - wantErr: true, - }, - { - name: "valid with blobs", - opts: &TxOpts{ - from: addr, - to: &addr, - value: big.NewInt(0), - blobs: []kzg4844.Blob{{1}}, - commitments: []kzg4844.Commitment{{2}}, - proofs: []kzg4844.Proof{{3}}, - blobHashes: []common.Hash{{4}}, - }, - wantErr: false, - }, - { - name: "inconsistent blob fields - missing blobs", - opts: &TxOpts{ - from: addr, - to: &addr, - value: big.NewInt(0), - commitments: []kzg4844.Commitment{{2}}, - proofs: []kzg4844.Proof{{3}}, - blobHashes: []common.Hash{{4}}, - }, - wantErr: true, - }, - { - name: "inconsistent blob fields - mismatched lengths", - opts: &TxOpts{ - from: addr, - to: &addr, - value: big.NewInt(0), - blobs: []kzg4844.Blob{{1}}, - commitments: []kzg4844.Commitment{{2}, {3}}, // Extra commitment - proofs: []kzg4844.Proof{{3}}, - blobHashes: []common.Hash{{4}}, - }, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := tt.opts.Validate() - if tt.wantErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} - -func TestTxOpts_Getters(t *testing.T) { - addr := common.HexToAddress("0x1234567890123456789012345678901234567890") - value := big.NewInt(123) - data := []byte{1, 2, 3} - - opts := &TxOpts{ - from: addr, - to: &addr, - value: value, - data: data, - } - - assert.Equal(t, addr, opts.From()) - assert.Equal(t, &addr, opts.To()) - assert.Equal(t, value, opts.Value()) - assert.Equal(t, data, opts.Data()) -} - -func TestEthTx_Methods(t *testing.T) { - addr := common.HexToAddress("0x1234567890123456789012345678901234567890") - value := big.NewInt(123) - data := []byte{1, 2, 3} - - // Create a legacy transaction for testing - tx := types.NewTransaction( - 0, // nonce - addr, // to - value, // value - 21000, // gas limit - big.NewInt(1), // gas price - data, // data - ) - - ethTx := &EthTx{ - tx: tx, - from: addr, - txType: uint8(types.LegacyTxType), - } - - assert.Equal(t, tx.Hash(), ethTx.Hash()) - assert.Equal(t, addr, ethTx.From()) - assert.Equal(t, &addr, ethTx.To()) - assert.Equal(t, value, ethTx.Value()) - assert.Equal(t, data, ethTx.Data()) - assert.Equal(t, uint8(types.LegacyTxType), ethTx.Type()) - assert.Equal(t, tx, ethTx.Raw()) -} - -func TestTxOptions(t *testing.T) { - addr := common.HexToAddress("0x1234567890123456789012345678901234567890") - value := big.NewInt(123) - data := []byte{1, 2, 3} - gasLimit := uint64(21000) - accessList := types.AccessList{{ - Address: addr, - StorageKeys: []common.Hash{{1}}, - }} - blobs := []kzg4844.Blob{{1}} - commitments := []kzg4844.Commitment{{2}} - proofs := []kzg4844.Proof{{3}} - blobHashes := []common.Hash{{4}} - - opts := &TxOpts{} - - // Apply all options - WithFrom(addr)(opts) - WithTo(addr)(opts) - WithValue(value)(opts) - WithData(data)(opts) - WithGasLimit(gasLimit)(opts) - WithAccessList(accessList)(opts) - WithBlobs(blobs)(opts) - WithBlobCommitments(commitments)(opts) - WithBlobProofs(proofs)(opts) - WithBlobHashes(blobHashes)(opts) - - // Verify all fields were set correctly - assert.Equal(t, addr, opts.from) - assert.Equal(t, &addr, opts.to) - assert.Equal(t, value, opts.value) - assert.Equal(t, data, opts.data) - assert.Equal(t, gasLimit, opts.gasLimit) - assert.Equal(t, accessList, opts.accessList) - assert.Equal(t, blobs, opts.blobs) - assert.Equal(t, commitments, opts.commitments) - assert.Equal(t, proofs, opts.proofs) - assert.Equal(t, blobHashes, opts.blobHashes) -} diff --git a/devnet-sdk/system/txbuilder.go b/devnet-sdk/system/txbuilder.go deleted file mode 100644 index 836210b1fbbb7..0000000000000 --- a/devnet-sdk/system/txbuilder.go +++ /dev/null @@ -1,360 +0,0 @@ -package system - -import ( - "context" - "fmt" - "math/big" - - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/holiman/uint256" -) - -// Default values for gas calculations -const ( - DefaultGasLimitMarginPercent = 20 // 20% margin for gas limit - DefaultFeeCapMultiplier = 2 // 2x gas price for fee cap -) - -// TxBuilderOption is a function that configures a TxBuilder -type TxBuilderOption func(*TxBuilder) - -// WithTxType sets the transaction type to use, overriding automatic detection -func WithTxType(txType uint8) TxBuilderOption { - return func(b *TxBuilder) { - b.forcedTxType = &txType - b.supportedTxTypes = []uint8{txType} - } -} - -// WithGasLimitMargin sets the margin percentage to add to estimated gas limit -func WithGasLimitMargin(marginPercent uint64) TxBuilderOption { - return func(b *TxBuilder) { - b.gasLimitMarginPercent = marginPercent - } -} - -// WithFeeCapMultiplier sets the multiplier for calculating fee cap from gas price -func WithFeeCapMultiplier(multiplier uint64) TxBuilderOption { - return func(b *TxBuilder) { - b.feeCapMultiplier = multiplier - } -} - -// TxBuilder helps construct Ethereum transactions -type TxBuilder struct { - ctx context.Context - chain Chain - supportedTxTypes []uint8 - forcedTxType *uint8 // indicates if the tx type was manually set - gasLimitMarginPercent uint64 - feeCapMultiplier uint64 -} - -// NewTxBuilder creates a new transaction builder -func NewTxBuilder(ctx context.Context, chain Chain, opts ...TxBuilderOption) *TxBuilder { - builder := &TxBuilder{ - chain: chain, - ctx: ctx, - supportedTxTypes: []uint8{types.LegacyTxType}, // Legacy is always supported - gasLimitMarginPercent: DefaultGasLimitMarginPercent, - feeCapMultiplier: DefaultFeeCapMultiplier, - } - - // Apply any options provided - for _, opt := range opts { - opt(builder) - } - - // Skip network checks if tx type is forced - if builder.forcedTxType == nil { - if builder.chain.Nodes()[0].SupportsEIP(ctx, 1559) { - builder.supportedTxTypes = append(builder.supportedTxTypes, types.DynamicFeeTxType) - builder.supportedTxTypes = append(builder.supportedTxTypes, types.AccessListTxType) - } - if builder.chain.Nodes()[0].SupportsEIP(ctx, 4844) { - builder.supportedTxTypes = append(builder.supportedTxTypes, types.BlobTxType) - } - } - - log.Info("Instantiated TxBuilder", - "supportedTxTypes", builder.supportedTxTypes, - "forcedTxType", builder.forcedTxType, - "gasLimitMargin", builder.gasLimitMarginPercent, - "feeCapMultiplier", builder.feeCapMultiplier, - ) - return builder -} - -// BuildTx creates a new transaction, using the appropriate type for the network -func (b *TxBuilder) BuildTx(options ...TxOption) (Transaction, error) { - // Apply options to create TxOpts - opts := &TxOpts{} - for _, opt := range options { - opt(opts) - } - - // Check for blob transaction requirements if blobs are provided - if len(opts.blobHashes) > 0 { - if b.forcedTxType != nil && *b.forcedTxType != types.BlobTxType { - return nil, fmt.Errorf("blob transactions not supported with forced transaction type %d", *b.forcedTxType) - } - if !b.supportsType(types.BlobTxType) { - return nil, fmt.Errorf("blob transactions not supported by the network") - } - } - - // Validate all fields - if err := opts.Validate(); err != nil { - return nil, fmt.Errorf("invalid transaction options: %w", err) - } - - var tx *types.Transaction - var err error - - // Choose the most advanced supported transaction type - txType := b.chooseTxType(len(opts.accessList) > 0, len(opts.blobHashes) > 0) - switch txType { - case types.BlobTxType: - if len(opts.blobHashes) > 0 { - tx, err = b.buildBlobTx(opts) - } else { - // If blob tx type is forced but no blobs provided, fall back to dynamic fee tx - tx, err = b.buildDynamicFeeTx(opts) - } - case types.AccessListTxType: - tx, err = b.buildAccessListTx(opts) - case types.DynamicFeeTxType: - tx, err = b.buildDynamicFeeTx(opts) - default: - tx, err = b.buildLegacyTx(opts) - } - - if err != nil { - return nil, err - } - - return &EthTx{ - tx: tx, - from: opts.from, - txType: txType, - }, nil -} - -// supportsType checks if a transaction type is supported -func (b *TxBuilder) supportsType(txType uint8) bool { - for _, t := range b.supportedTxTypes { - if t == txType { - return true - } - } - return false -} - -// chooseTxType selects the most advanced supported transaction type -func (b *TxBuilder) chooseTxType(hasAccessList bool, hasBlobs bool) uint8 { - if b.forcedTxType != nil { - return *b.forcedTxType - } - - // Blob transactions are the most advanced, but only use them if we have blobs - if hasBlobs && b.supportsType(types.BlobTxType) { - return types.BlobTxType - } - - // If we have an access list and support access list transactions, use that - if hasAccessList && b.supportsType(types.AccessListTxType) { - return types.AccessListTxType - } - - // Try dynamic fee transactions next - if b.supportsType(types.DynamicFeeTxType) { - return types.DynamicFeeTxType - } - - // Fall back to legacy - return types.LegacyTxType -} - -// getNonce gets the next nonce for the given address -func (b *TxBuilder) getNonce(from common.Address) (uint64, error) { - nonce, err := b.chain.Nodes()[0].PendingNonceAt(b.ctx, from) - if err != nil { - return 0, fmt.Errorf("failed to get nonce: %w", err) - } - return nonce, nil -} - -// getGasPrice gets the suggested gas price from the network -func (b *TxBuilder) getGasPrice() (*big.Int, error) { - gasPrice, err := b.chain.Nodes()[0].GasPrice(b.ctx) - if err != nil { - return nil, fmt.Errorf("failed to get gas price: %w", err) - } - return gasPrice, nil -} - -// calculateGasLimit calculates the gas limit for a transaction, with a configurable safety buffer -func (b *TxBuilder) calculateGasLimit(opts *TxOpts) (uint64, error) { - if opts.gasLimit != 0 { - return opts.gasLimit, nil - } - - estimated, err := b.chain.Nodes()[0].GasLimit(b.ctx, opts) - if err != nil { - return 0, fmt.Errorf("failed to estimate gas: %w", err) - } - // Add the configured margin to the estimated gas limit - return estimated * (100 + b.gasLimitMarginPercent) / 100, nil -} - -// buildDynamicFeeTx creates a new EIP-1559 transaction with the given parameters -func (b *TxBuilder) buildDynamicFeeTx(opts *TxOpts) (*types.Transaction, error) { - nonce, err := b.getNonce(opts.from) - if err != nil { - return nil, err - } - - gasPrice, err := b.getGasPrice() - if err != nil { - return nil, err - } - - chainID := b.chain.ID() - - gasLimit, err := b.calculateGasLimit(opts) - if err != nil { - return nil, err - } - - return types.NewTx(&types.DynamicFeeTx{ - ChainID: chainID, - Nonce: nonce, - GasTipCap: gasPrice, - GasFeeCap: new(big.Int).Mul(gasPrice, big.NewInt(int64(b.feeCapMultiplier))), - Gas: gasLimit, - To: opts.to, - Value: opts.value, - Data: opts.data, - }), nil -} - -// buildLegacyTx creates a new legacy (pre-EIP-1559) transaction -func (b *TxBuilder) buildLegacyTx(opts *TxOpts) (*types.Transaction, error) { - nonce, err := b.getNonce(opts.from) - if err != nil { - return nil, err - } - - gasPrice, err := b.getGasPrice() - if err != nil { - return nil, err - } - - gasLimit, err := b.calculateGasLimit(opts) - if err != nil { - return nil, err - } - - return types.NewTx(&types.LegacyTx{ - Nonce: nonce, - To: opts.to, - Value: opts.value, - Gas: gasLimit, - GasPrice: gasPrice, - Data: opts.data, - }), nil -} - -// buildAccessListTx creates a new EIP-2930 transaction with access list -func (b *TxBuilder) buildAccessListTx(opts *TxOpts) (*types.Transaction, error) { - nonce, err := b.getNonce(opts.from) - if err != nil { - return nil, err - } - - gasPrice, err := b.getGasPrice() - if err != nil { - return nil, err - } - - chainID := b.chain.ID() - - gasLimit, err := b.calculateGasLimit(opts) - if err != nil { - return nil, err - } - - return types.NewTx(&types.AccessListTx{ - ChainID: chainID, - Nonce: nonce, - GasPrice: gasPrice, - Gas: gasLimit, - To: opts.to, - Value: opts.value, - Data: opts.data, - AccessList: opts.accessList, - }), nil -} - -// buildBlobTx creates a new EIP-4844 blob transaction -func (b *TxBuilder) buildBlobTx(opts *TxOpts) (*types.Transaction, error) { - nonce, err := b.getNonce(opts.from) - if err != nil { - return nil, err - } - - gasPrice, err := b.getGasPrice() - if err != nil { - return nil, err - } - - chainID := b.chain.ID() - - gasLimit, err := b.calculateGasLimit(opts) - if err != nil { - return nil, err - } - - // Validate blob transaction requirements - if opts.to == nil { - return nil, fmt.Errorf("blob transactions must have a recipient") - } - - if len(opts.blobHashes) == 0 { - return nil, fmt.Errorf("blob transactions must have at least one blob hash") - } - - if len(opts.blobs) != len(opts.commitments) || len(opts.blobs) != len(opts.proofs) { - return nil, fmt.Errorf("mismatched number of blobs, commitments, and proofs") - } - - // Convert big.Int values to uint256.Int - chainIDU256, _ := uint256.FromBig(chainID) - gasTipCapU256, _ := uint256.FromBig(gasPrice) - gasFeeCapU256, _ := uint256.FromBig(new(big.Int).Mul(gasPrice, big.NewInt(int64(b.feeCapMultiplier)))) - valueU256, _ := uint256.FromBig(opts.value) - // For blob transactions, we'll use the same gas price for blob fee cap - blobFeeCapU256, _ := uint256.FromBig(gasPrice) - - return types.NewTx(&types.BlobTx{ - ChainID: chainIDU256, - Nonce: nonce, - GasTipCap: gasTipCapU256, - GasFeeCap: gasFeeCapU256, - Gas: gasLimit, - To: *opts.to, - Value: valueU256, - Data: opts.data, - AccessList: opts.accessList, - BlobFeeCap: blobFeeCapU256, - BlobHashes: opts.blobHashes, - Sidecar: &types.BlobTxSidecar{ - Blobs: opts.blobs, - Commitments: opts.commitments, - Proofs: opts.proofs, - }, - }), nil -} diff --git a/devnet-sdk/system/txbuilder_test.go b/devnet-sdk/system/txbuilder_test.go deleted file mode 100644 index f82a55537ae5e..0000000000000 --- a/devnet-sdk/system/txbuilder_test.go +++ /dev/null @@ -1,475 +0,0 @@ -package system - -import ( - "context" - "fmt" - "math/big" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/interfaces" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - ethtypes "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/crypto/kzg4844" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/params" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -var ( - _ Chain = (*mockChain)(nil) - _ Node = (*mockNode)(nil) - _ Wallet = (*mockWallet)(nil) -) - -// mockWallet implements types.Wallet for testing -type mockWallet struct { - mock.Mock -} - -func (m *mockWallet) PrivateKey() types.Key { - args := m.Called() - return args.Get(0).(types.Key) -} - -func (m *mockWallet) Address() types.Address { - args := m.Called() - return args.Get(0).(common.Address) -} - -func (m *mockWallet) Send(ctx context.Context, tx Transaction) error { - return nil -} - -func (m *mockWallet) Sign(tx Transaction) (Transaction, error) { - return tx, nil -} - -func (m *mockWallet) SendETH(to types.Address, amount types.Balance) types.WriteInvocation[any] { - args := m.Called(to, amount) - return args.Get(0).(types.WriteInvocation[any]) -} - -func (m *mockWallet) InitiateMessage(chainID types.ChainID, target common.Address, message []byte) types.WriteInvocation[any] { - args := m.Called(chainID, target, message) - return args.Get(0).(types.WriteInvocation[any]) -} - -func (m *mockWallet) ExecuteMessage(identifier bindings.Identifier, sentMessage []byte) types.WriteInvocation[any] { - args := m.Called(identifier, sentMessage) - return args.Get(0).(types.WriteInvocation[any]) -} - -func (m *mockWallet) Balance() types.Balance { - args := m.Called() - return args.Get(0).(types.Balance) -} - -func (m *mockWallet) Nonce() uint64 { - args := m.Called() - return args.Get(0).(uint64) -} - -func (m *mockWallet) Transactor() *bind.TransactOpts { - return nil -} - -// mockChain implements the Chain interface for testing -type mockChain struct { - mock.Mock - wallet *mockWallet -} - -func newMockChain() *mockChain { - return &mockChain{ - wallet: new(mockWallet), - } -} - -func (m *mockChain) Nodes() []Node { - args := m.Called() - return args.Get(0).([]Node) -} - -func (m *mockChain) ID() types.ChainID { - args := m.Called() - return args.Get(0).(types.ChainID) -} - -func (m *mockChain) SupportsEIP(ctx context.Context, eip uint64) bool { - args := m.Called(ctx, eip) - return args.Bool(0) -} - -func (m *mockChain) ContractsRegistry() interfaces.ContractsRegistry { - args := m.Called() - return args.Get(0).(interfaces.ContractsRegistry) -} - -func (m *mockChain) RPCURL() string { - args := m.Called() - return args.String(0) -} - -func (m *mockChain) Client() (*sources.EthClient, error) { - args := m.Called() - return args.Get(0).(*sources.EthClient), nil -} - -func (m *mockChain) Wallets() WalletMap { - return nil -} - -func (m *mockChain) Config() (*params.ChainConfig, error) { - return nil, fmt.Errorf("not implemented for mock chain") -} - -func (m *mockChain) Addresses() AddressMap { - args := m.Called() - return args.Get(0).(AddressMap) -} - -type mockNode struct { - mock.Mock -} - -func newMockNode() *mockNode { - return &mockNode{} -} - -func (m *mockNode) GasPrice(ctx context.Context) (*big.Int, error) { - args := m.Called(ctx) - return args.Get(0).(*big.Int), args.Error(1) -} - -func (m *mockNode) GasLimit(ctx context.Context, tx TransactionData) (uint64, error) { - args := m.Called(ctx, tx) - return args.Get(0).(uint64), args.Error(1) -} - -func (m *mockNode) PendingNonceAt(ctx context.Context, addr common.Address) (uint64, error) { - args := m.Called(ctx, addr) - return args.Get(0).(uint64), args.Error(1) -} - -func (m *mockNode) BlockByNumber(ctx context.Context, number *big.Int) (eth.BlockInfo, error) { - args := m.Called(ctx, number) - return args.Get(0).(eth.BlockInfo), args.Error(1) -} - -func (m *mockNode) Client() (*sources.EthClient, error) { - args := m.Called() - return args.Get(0).(*sources.EthClient), args.Error(1) -} - -func (m *mockNode) ContractsRegistry() interfaces.ContractsRegistry { - args := m.Called() - return args.Get(0).(interfaces.ContractsRegistry) -} - -func (m *mockNode) GethClient() (*ethclient.Client, error) { - args := m.Called() - return args.Get(0).(*ethclient.Client), args.Error(1) -} - -func (m *mockNode) RPCURL() string { - args := m.Called() - return args.Get(0).(string) -} - -func (m *mockNode) SupportsEIP(ctx context.Context, eip uint64) bool { - args := m.Called(ctx, eip) - return args.Bool(0) -} - -func (m *mockNode) Name() string { - args := m.Called() - return args.String(0) -} - -func TestNewTxBuilder(t *testing.T) { - ctx := context.Background() - - var node *mockNode - var chain *mockChain - tests := []struct { - name string - setupMock func() - opts []TxBuilderOption - expectedTypes []uint8 - expectedMargin uint64 - }{ - { - name: "legacy only", - setupMock: func() { - chain = newMockChain() - node = newMockNode() - chain.On("Nodes").Return([]Node{node}) - node.On("SupportsEIP", ctx, uint64(1559)).Return(false).Once() - node.On("SupportsEIP", ctx, uint64(4844)).Return(false).Once() - }, - opts: nil, - expectedTypes: []uint8{ethtypes.LegacyTxType}, - expectedMargin: DefaultGasLimitMarginPercent, - }, - { - name: "with EIP-1559", - setupMock: func() { - chain = newMockChain() - node = newMockNode() - chain.On("Nodes").Return([]Node{node}) - node.On("SupportsEIP", ctx, uint64(1559)).Return(true).Once() - node.On("SupportsEIP", ctx, uint64(4844)).Return(false).Once() - }, - opts: nil, - expectedTypes: []uint8{ethtypes.LegacyTxType, ethtypes.DynamicFeeTxType, ethtypes.AccessListTxType}, - expectedMargin: DefaultGasLimitMarginPercent, - }, - { - name: "with EIP-4844", - setupMock: func() { - chain = newMockChain() - node = newMockNode() - chain.On("Nodes").Return([]Node{node}) - node.On("SupportsEIP", ctx, uint64(1559)).Return(true).Once() - node.On("SupportsEIP", ctx, uint64(4844)).Return(true).Once() - }, - opts: nil, - expectedTypes: []uint8{ethtypes.LegacyTxType, ethtypes.DynamicFeeTxType, ethtypes.AccessListTxType, ethtypes.BlobTxType}, - expectedMargin: DefaultGasLimitMarginPercent, - }, - { - name: "forced tx type", - setupMock: func() { - // No EIP checks needed when type is forced - }, - opts: []TxBuilderOption{ - WithTxType(ethtypes.DynamicFeeTxType), - }, - expectedTypes: []uint8{ethtypes.DynamicFeeTxType}, - expectedMargin: DefaultGasLimitMarginPercent, - }, - { - name: "custom margin", - setupMock: func() { - chain = newMockChain() - node = newMockNode() - chain.On("Nodes").Return([]Node{node}) - node.On("SupportsEIP", ctx, uint64(1559)).Return(false).Once() - node.On("SupportsEIP", ctx, uint64(4844)).Return(false).Once() - }, - opts: []TxBuilderOption{ - WithGasLimitMargin(50), - }, - expectedTypes: []uint8{ethtypes.LegacyTxType}, - expectedMargin: 50, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.setupMock() - builder := NewTxBuilder(ctx, chain, tt.opts...) - - assert.Equal(t, tt.expectedTypes, builder.supportedTxTypes) - assert.Equal(t, tt.expectedMargin, builder.gasLimitMarginPercent) - chain.AssertExpectations(t) - }) - } -} - -func TestBuildTx(t *testing.T) { - ctx := context.Background() - chain := newMockChain() - node := newMockNode() - addr := common.HexToAddress("0x1234567890123456789012345678901234567890") - to := common.HexToAddress("0x0987654321098765432109876543210987654321") - chainID := big.NewInt(1) - gasPrice := big.NewInt(1000000000) // 1 gwei - nonce := uint64(1) - - tests := []struct { - name string - setupMock func() - opts []TxOption - wantType uint8 - wantErr bool - }{ - { - name: "legacy tx", - setupMock: func() { - chain.On("Nodes").Return([]Node{node}) - node.On("SupportsEIP", ctx, uint64(1559)).Return(false).Once() - node.On("SupportsEIP", ctx, uint64(4844)).Return(false).Once() - node.On("PendingNonceAt", ctx, addr).Return(nonce, nil).Once() - node.On("GasPrice", ctx).Return(gasPrice, nil).Once() - node.On("GasLimit", ctx, mock.Anything).Return(uint64(21000), nil).Once() - }, - opts: []TxOption{ - WithFrom(addr), - WithTo(to), - WithValue(big.NewInt(100000000000000000)), // 0.1 ETH - }, - wantType: ethtypes.LegacyTxType, - wantErr: false, - }, - { - name: "dynamic fee tx", - setupMock: func() { - chain.On("Nodes").Return([]Node{node}) - node.On("SupportsEIP", ctx, uint64(1559)).Return(true).Once() - node.On("SupportsEIP", ctx, uint64(4844)).Return(false).Once() - node.On("PendingNonceAt", ctx, addr).Return(nonce, nil).Once() - node.On("GasPrice", ctx).Return(gasPrice, nil).Once() - chain.On("ID").Return(chainID).Once() - node.On("GasLimit", ctx, mock.Anything).Return(uint64(21000), nil).Once() - }, - opts: []TxOption{ - WithFrom(addr), - WithTo(to), - WithValue(big.NewInt(100000000000000000)), // 0.1 ETH - }, - wantType: ethtypes.DynamicFeeTxType, - wantErr: false, - }, - { - name: "access list tx", - setupMock: func() { - chain.On("Nodes").Return([]Node{node}) - node.On("SupportsEIP", ctx, uint64(1559)).Return(true).Once() - node.On("SupportsEIP", ctx, uint64(4844)).Return(false).Once() - node.On("PendingNonceAt", ctx, addr).Return(nonce, nil).Once() - node.On("GasPrice", ctx).Return(gasPrice, nil).Once() - chain.On("ID").Return(chainID).Once() - node.On("GasLimit", ctx, mock.Anything).Return(uint64(21000), nil).Once() - }, - opts: []TxOption{ - WithFrom(addr), - WithTo(to), - WithValue(big.NewInt(100000000000000000)), // 0.1 ETH - WithAccessList(ethtypes.AccessList{ - { - Address: common.HexToAddress("0x1234567890123456789012345678901234567890"), - StorageKeys: []common.Hash{ - common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001"), - }, - }, - }), - }, - wantType: ethtypes.AccessListTxType, - wantErr: false, - }, - { - name: "blob tx", - setupMock: func() { - chain.On("Nodes").Return([]Node{node}) - node.On("SupportsEIP", ctx, uint64(1559)).Return(true).Once() - node.On("SupportsEIP", ctx, uint64(4844)).Return(true).Once() - node.On("PendingNonceAt", ctx, addr).Return(nonce, nil).Once() - node.On("GasPrice", ctx).Return(gasPrice, nil).Once() - chain.On("ID").Return(chainID).Once() - node.On("GasLimit", ctx, mock.Anything).Return(uint64(21000), nil).Once() - }, - opts: []TxOption{ - WithFrom(addr), - WithTo(to), - WithValue(big.NewInt(100000000000000000)), // 0.1 ETH - WithBlobs([]kzg4844.Blob{{}}), - WithBlobCommitments([]kzg4844.Commitment{{}}), - WithBlobProofs([]kzg4844.Proof{{}}), - WithBlobHashes([]common.Hash{{}}), - }, - wantType: ethtypes.BlobTxType, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.setupMock() - builder := NewTxBuilder(ctx, chain) - tx, err := builder.BuildTx(tt.opts...) - - if tt.wantErr { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - assert.Equal(t, tt.wantType, tx.Type()) - chain.AssertExpectations(t) - }) - } -} - -func TestCalculateGasLimit(t *testing.T) { - ctx := context.Background() - addr := common.HexToAddress("0x1234567890123456789012345678901234567890") - - tests := []struct { - name string - opts *TxOpts - margin uint64 - estimatedGas uint64 - expectedLimit uint64 - expectEstimate bool - wantErr bool - }{ - { - name: "explicit gas limit", - opts: &TxOpts{ - from: addr, - to: &addr, - value: big.NewInt(0), - gasLimit: 21000, - }, - margin: 20, - estimatedGas: 0, - expectedLimit: 21000, - expectEstimate: false, - wantErr: false, - }, - { - name: "estimated with margin", - opts: &TxOpts{ - from: addr, - to: &addr, - value: big.NewInt(0), - }, - margin: 20, - estimatedGas: 21000, - expectedLimit: 25200, // 21000 * 1.2 - expectEstimate: true, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set up EIP support expectations for NewTxBuilder - chain := newMockChain() - node := newMockNode() - chain.On("Nodes").Return([]Node{node}) - node.On("SupportsEIP", ctx, uint64(1559)).Return(false) - node.On("SupportsEIP", ctx, uint64(4844)).Return(false) - node.On("GasLimit", ctx, tt.opts).Return(tt.estimatedGas, nil).Once() - - builder := NewTxBuilder(ctx, chain, WithGasLimitMargin(tt.margin)) - limit, err := builder.calculateGasLimit(tt.opts) - - if tt.wantErr { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - assert.Equal(t, tt.expectedLimit, limit) - chain.AssertExpectations(t) - }) - } -} diff --git a/devnet-sdk/system/txprocessor.go b/devnet-sdk/system/txprocessor.go deleted file mode 100644 index 1279aadffc1b6..0000000000000 --- a/devnet-sdk/system/txprocessor.go +++ /dev/null @@ -1,83 +0,0 @@ -package system - -import ( - "context" - "fmt" - "math/big" - - sdkTypes "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/ethclient" -) - -// EthClient defines the interface for interacting with Ethereum node -type EthClient interface { - SendTransaction(ctx context.Context, tx *types.Transaction) error -} - -// TransactionProcessor handles signing and sending transactions -type transactionProcessor struct { - client EthClient - chainID *big.Int - privateKey sdkTypes.Key -} - -// NewTransactionProcessor creates a new transaction processor -func NewTransactionProcessor(client EthClient, chainID *big.Int) TransactionProcessor { - return &transactionProcessor{ - client: client, - chainID: chainID, - } -} - -// NewEthTransactionProcessor creates a new transaction processor with an ethclient -func NewEthTransactionProcessor(client *ethclient.Client, chainID *big.Int) TransactionProcessor { - return NewTransactionProcessor(client, chainID) -} - -// Sign signs a transaction with the given private key -func (p *transactionProcessor) Sign(tx Transaction) (Transaction, error) { - pk := p.privateKey - if pk == nil { - return nil, fmt.Errorf("private key is nil") - } - - var signer types.Signer - switch tx.Type() { - case types.SetCodeTxType: - signer = types.NewIsthmusSigner(p.chainID) - case types.DynamicFeeTxType: - signer = types.NewLondonSigner(p.chainID) - case types.AccessListTxType: - signer = types.NewEIP2930Signer(p.chainID) - default: - signer = types.NewEIP155Signer(p.chainID) - } - - if rt, ok := tx.(RawTransaction); ok { - signedTx, err := types.SignTx(rt.Raw(), signer, pk) - if err != nil { - return nil, fmt.Errorf("failed to sign transaction: %w", err) - } - - return &EthTx{ - tx: signedTx, - from: tx.From(), - txType: tx.Type(), - }, nil - } - - return nil, fmt.Errorf("transaction does not support signing") -} - -// Send sends a signed transaction to the network -func (p *transactionProcessor) Send(ctx context.Context, tx Transaction) error { - if st, ok := tx.(RawTransaction); ok { - if err := p.client.SendTransaction(ctx, st.Raw()); err != nil { - return fmt.Errorf("failed to send transaction: %w", err) - } - return nil - } - - return fmt.Errorf("transaction is not signed") -} diff --git a/devnet-sdk/system/txprocessor_test.go b/devnet-sdk/system/txprocessor_test.go deleted file mode 100644 index c69776d53a7cf..0000000000000 --- a/devnet-sdk/system/txprocessor_test.go +++ /dev/null @@ -1,215 +0,0 @@ -package system - -import ( - "context" - "fmt" - "math/big" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/stretchr/testify/assert" -) - -func (m *mockEthClient) SendTransaction(ctx context.Context, tx *types.Transaction) error { - args := m.Called(ctx, tx) - return args.Error(0) -} - -func TestTransactionProcessor_Sign(t *testing.T) { - // Test private key and corresponding address - // DO NOT use this key for anything other than testing - testKey := "1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" - testAddr := common.HexToAddress("0x96216849c49358B10257cb55b28eA603c874b05E") - - chainID := big.NewInt(1) - client := new(mockEthClient) - - // Create a wallet with the test key - chain := newChain(chainID.String(), WalletMap{}, nil, AddressMap{}, []Node{}) - wallet, err := NewWallet(testKey, testAddr, chain) - assert.NoError(t, err) - - processor := &transactionProcessor{ - client: client, - chainID: chainID, - privateKey: wallet.PrivateKey(), - } - - invalidProcessor := &transactionProcessor{ - client: client, - chainID: chainID, - // No private key set - } - - tests := []struct { - name string - processor *transactionProcessor - tx Transaction - wantType uint8 - wantErr bool - errMessage string - }{ - { - name: "legacy tx", - processor: processor, - tx: &EthTx{ - tx: types.NewTransaction( - 0, - testAddr, - big.NewInt(1), - 21000, - big.NewInt(1), - nil, - ), - from: testAddr, - txType: types.LegacyTxType, - }, - wantType: types.LegacyTxType, - wantErr: false, - }, - { - name: "dynamic fee tx", - processor: processor, - tx: &EthTx{ - tx: types.NewTx(&types.DynamicFeeTx{ - ChainID: chainID, - Nonce: 0, - GasTipCap: big.NewInt(1), - GasFeeCap: big.NewInt(1), - Gas: 21000, - To: &testAddr, - Value: big.NewInt(1), - Data: nil, - }), - from: testAddr, - txType: types.DynamicFeeTxType, - }, - wantType: types.DynamicFeeTxType, - wantErr: false, - }, - { - name: "invalid private key", - processor: invalidProcessor, - tx: &EthTx{ - tx: types.NewTransaction( - 0, - testAddr, - big.NewInt(1), - 21000, - big.NewInt(1), - nil, - ), - from: testAddr, - txType: types.LegacyTxType, - }, - wantErr: true, - errMessage: "private key is nil", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - signedTx, err := tt.processor.Sign(tt.tx) - if tt.wantErr { - assert.Error(t, err) - assert.Contains(t, err.Error(), tt.errMessage) - return - } - - assert.NoError(t, err) - assert.NotNil(t, signedTx) - assert.Equal(t, tt.wantType, signedTx.Type()) - assert.Equal(t, tt.tx.From(), signedTx.From()) - }) - } -} - -func TestTransactionProcessor_Send(t *testing.T) { - chainID := big.NewInt(1) - client := new(mockEthClient) - processor := NewTransactionProcessor(client, chainID) - ctx := context.Background() - - testAddr := common.HexToAddress("0x1234567890123456789012345678901234567890") - tx := types.NewTransaction( - 0, - testAddr, - big.NewInt(1), - 21000, - big.NewInt(1), - nil, - ) - - tests := []struct { - name string - tx Transaction - setupMock func() - wantErr bool - errMessage string - }{ - { - name: "successful send", - tx: &EthTx{ - tx: tx, - from: testAddr, - txType: types.LegacyTxType, - }, - setupMock: func() { - client.On("SendTransaction", ctx, tx).Return(nil).Once() - }, - wantErr: false, - }, - { - name: "send error", - tx: &EthTx{ - tx: tx, - from: testAddr, - txType: types.LegacyTxType, - }, - setupMock: func() { - client.On("SendTransaction", ctx, tx).Return(fmt.Errorf("send failed")).Once() - }, - wantErr: true, - errMessage: "failed to send transaction", - }, - { - name: "not a raw transaction", - tx: &mockTransaction{ - from: testAddr, - }, - setupMock: func() {}, - wantErr: true, - errMessage: "transaction is not signed", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tt.setupMock() - - err := processor.Send(ctx, tt.tx) - if tt.wantErr { - assert.Error(t, err) - assert.Contains(t, err.Error(), tt.errMessage) - return - } - - assert.NoError(t, err) - client.AssertExpectations(t) - }) - } -} - -// mockTransaction implements Transaction for testing -type mockTransaction struct { - from common.Address -} - -func (m *mockTransaction) Hash() common.Hash { return common.Hash{} } -func (m *mockTransaction) From() common.Address { return m.from } -func (m *mockTransaction) To() *common.Address { return nil } -func (m *mockTransaction) Value() *big.Int { return nil } -func (m *mockTransaction) Data() []byte { return nil } -func (m *mockTransaction) AccessList() types.AccessList { return nil } -func (m *mockTransaction) Type() uint8 { return 0 } diff --git a/devnet-sdk/system/wallet.go b/devnet-sdk/system/wallet.go deleted file mode 100644 index 7d5d14757e249..0000000000000 --- a/devnet-sdk/system/wallet.go +++ /dev/null @@ -1,422 +0,0 @@ -package system - -import ( - "context" - "encoding/hex" - "fmt" - "math/big" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" - "github.com/ethereum-optimism/optimism/op-service/eth" - supervisorTypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - - "github.com/ethereum-optimism/optimism/op-service/bigs" - coreTypes "github.com/ethereum/go-ethereum/core/types" -) - -var ( - // This will make sure that we implement the Chain interface - _ Wallet = (*wallet)(nil) -) - -type wallet struct { - privateKey types.Key - address types.Address - chain Chain -} - -func newWalletMapFromDescriptorWalletMap(descriptorWalletMap descriptors.WalletMap, chain Chain) (WalletMap, error) { - result := WalletMap{} - for k, v := range descriptorWalletMap { - wallet, err := NewWallet(v.PrivateKey, v.Address, chain) - if err != nil { - return nil, err - } - result[k] = wallet - } - return result, nil -} - -func NewWallet(pk string, addr types.Address, chain Chain) (*wallet, error) { - privateKey, err := privateKeyFromString(pk) - if err != nil { - return nil, fmt.Errorf("failed to convert private from string: %w", err) - } - - return &wallet{ - privateKey: privateKey, - address: addr, - chain: chain, - }, nil -} - -func privateKeyFromString(pk string) (types.Key, error) { - var privateKey types.Key - if pk != "" { - pk = strings.TrimPrefix(pk, "0x") - if len(pk)%2 == 1 { - pk = "0" + pk - } - pkBytes, err := hex.DecodeString(pk) - if err != nil { - return nil, fmt.Errorf("failed to decode private key: %w", err) - } - key, err := crypto.ToECDSA(pkBytes) - if err != nil { - return nil, fmt.Errorf("failed to convert private key to ECDSA: %w", err) - } - privateKey = key - } - - return privateKey, nil -} - -func (w *wallet) PrivateKey() types.Key { - return w.privateKey -} - -func (w *wallet) Address() types.Address { - return w.address -} - -func (w *wallet) SendETH(to types.Address, amount types.Balance) types.WriteInvocation[any] { - return &sendImpl{ - chain: w.chain, - processor: w, - from: w.address, - to: to, - amount: amount, - } -} - -func (w *wallet) Balance() types.Balance { - client, err := w.chain.Nodes()[0].Client() - if err != nil { - return types.Balance{} - } - - balance, err := client.BalanceAt(context.Background(), w.address, nil) - if err != nil { - return types.Balance{} - } - - return types.NewBalance(balance) -} - -func (w *wallet) InitiateMessage(chainID types.ChainID, target common.Address, message []byte) types.WriteInvocation[any] { - return &initiateMessageImpl{ - chain: w.chain, - processor: w, - from: w.address, - target: target, - chainID: chainID, - message: message, - } -} - -func (w *wallet) ExecuteMessage(identifier bindings.Identifier, sentMessage []byte) types.WriteInvocation[any] { - return &executeMessageImpl{ - chain: w.chain, - processor: w, - from: w.address, - identifier: identifier, - sentMessage: sentMessage, - } -} - -type initiateMessageImpl struct { - chain Chain - processor TransactionProcessor - from types.Address - - target types.Address - chainID types.ChainID - message []byte -} - -func (i *initiateMessageImpl) Call(ctx context.Context) (any, error) { - builder := NewTxBuilder(ctx, i.chain) - messenger, err := i.chain.Nodes()[0].ContractsRegistry().L2ToL2CrossDomainMessenger(constants.L2ToL2CrossDomainMessenger) - if err != nil { - return nil, fmt.Errorf("failed to init transaction: %w", err) - } - data, err := messenger.ABI().Pack("sendMessage", i.chainID, i.target, i.message) - if err != nil { - return nil, fmt.Errorf("failed to build calldata: %w", err) - } - tx, err := builder.BuildTx( - WithFrom(i.from), - WithTo(constants.L2ToL2CrossDomainMessenger), - WithValue(big.NewInt(0)), - WithData(data), - ) - if err != nil { - return nil, fmt.Errorf("failed to build transaction: %w", err) - } - - tx, err = i.processor.Sign(tx) - if err != nil { - return nil, fmt.Errorf("failed to sign transaction: %w", err) - } - - return tx, nil -} - -func (i *initiateMessageImpl) Send(ctx context.Context) types.InvocationResult { - result, err := i.Call(ctx) - if err != nil { - return &sendResult{chain: i.chain, tx: nil, err: err} - } - tx, ok := result.(Transaction) - if !ok { - return &sendResult{chain: i.chain, tx: nil, err: fmt.Errorf("unexpected return type")} - } - err = i.processor.Send(ctx, tx) - return &sendResult{ - chain: i.chain, - tx: tx, - err: err, - } -} - -type executeMessageImpl struct { - chain Chain - processor TransactionProcessor - from types.Address - - identifier bindings.Identifier - sentMessage []byte -} - -func (i *executeMessageImpl) Call(ctx context.Context) (any, error) { - builder := NewTxBuilder(ctx, i.chain) - messenger, err := i.chain.Nodes()[0].ContractsRegistry().L2ToL2CrossDomainMessenger(constants.L2ToL2CrossDomainMessenger) - if err != nil { - return nil, fmt.Errorf("failed to init transaction: %w", err) - } - data, err := messenger.ABI().Pack("relayMessage", i.identifier, i.sentMessage) - if err != nil { - return nil, fmt.Errorf("failed to build calldata: %w", err) - } - // Wrapper to use Access implementation - msg := supervisorTypes.Message{ - Identifier: supervisorTypes.Identifier{ - Origin: i.identifier.Origin, - BlockNumber: bigs.Uint64Strict(i.identifier.BlockNumber), - LogIndex: uint32(bigs.Uint64Strict(i.identifier.LogIndex)), - Timestamp: bigs.Uint64Strict(i.identifier.Timestamp), - ChainID: eth.ChainIDFromBig(i.identifier.ChainId), - }, - PayloadHash: crypto.Keccak256Hash(i.sentMessage), - } - access := msg.Access() - accessList := coreTypes.AccessList{{ - Address: constants.CrossL2Inbox, - StorageKeys: supervisorTypes.EncodeAccessList([]supervisorTypes.Access{access}), - }} - tx, err := builder.BuildTx( - WithFrom(i.from), - WithTo(constants.L2ToL2CrossDomainMessenger), - WithValue(big.NewInt(0)), - WithData(data), - WithAccessList(accessList), - ) - if err != nil { - return nil, fmt.Errorf("failed to build transaction: %w", err) - } - tx, err = i.processor.Sign(tx) - if err != nil { - return nil, fmt.Errorf("failed to sign transaction: %w", err) - } - return tx, nil -} - -func (i *executeMessageImpl) Send(ctx context.Context) types.InvocationResult { - result, err := i.Call(ctx) - if err != nil { - return &sendResult{chain: i.chain, tx: nil, err: err} - } - tx, ok := result.(Transaction) - if !ok { - return &sendResult{chain: i.chain, tx: nil, err: fmt.Errorf("unexpected return type")} - } - err = i.processor.Send(ctx, tx) - return &sendResult{ - chain: i.chain, - tx: tx, - err: err, - } -} - -func (w *wallet) Nonce() uint64 { - client, err := w.chain.Nodes()[0].Client() - if err != nil { - return 0 - } - - nonce, err := client.PendingNonceAt(context.Background(), w.address) - if err != nil { - return 0 - } - - return nonce -} - -func (w *wallet) Transactor() *bind.TransactOpts { - transactor, err := bind.NewKeyedTransactorWithChainID(w.PrivateKey(), w.chain.ID()) - if err != nil { - panic(fmt.Sprintf("could not create transactor for address %s and chainID %v", w.Address(), w.chain.ID())) - } - - return transactor -} - -func (w *wallet) Sign(tx Transaction) (Transaction, error) { - pk := w.privateKey - - var signer coreTypes.Signer - switch tx.Type() { - case coreTypes.SetCodeTxType: - signer = coreTypes.NewIsthmusSigner(w.chain.ID()) - case coreTypes.DynamicFeeTxType: - signer = coreTypes.NewLondonSigner(w.chain.ID()) - case coreTypes.AccessListTxType: - signer = coreTypes.NewEIP2930Signer(w.chain.ID()) - default: - signer = coreTypes.NewEIP155Signer(w.chain.ID()) - } - - if rt, ok := tx.(RawTransaction); ok { - signedTx, err := coreTypes.SignTx(rt.Raw(), signer, pk) - if err != nil { - return nil, fmt.Errorf("failed to sign transaction: %w", err) - } - - return &EthTx{ - tx: signedTx, - from: tx.From(), - txType: tx.Type(), - }, nil - } - - return nil, fmt.Errorf("transaction does not support signing") -} - -func (w *wallet) Send(ctx context.Context, tx Transaction) error { - if st, ok := tx.(RawTransaction); ok { - client, err := w.chain.Nodes()[0].Client() - if err != nil { - return fmt.Errorf("failed to get client: %w", err) - } - if err := client.SendTransaction(ctx, st.Raw()); err != nil { - return fmt.Errorf("failed to send transaction: %w", err) - } - return nil - } - - return fmt.Errorf("transaction is not signed") -} - -type sendImpl struct { - chain Chain - processor TransactionProcessor - from types.Address - to types.Address - amount types.Balance -} - -func (i *sendImpl) Call(ctx context.Context) (any, error) { - builder := NewTxBuilder(ctx, i.chain) - tx, err := builder.BuildTx( - WithFrom(i.from), - WithTo(i.to), - WithValue(i.amount.Int), - WithData(nil), - ) - if err != nil { - return nil, fmt.Errorf("failed to build transaction: %w", err) - } - - tx, err = i.processor.Sign(tx) - if err != nil { - return nil, fmt.Errorf("failed to sign transaction: %w", err) - } - - return tx, nil -} - -func (i *sendImpl) Send(ctx context.Context) types.InvocationResult { - builder := NewTxBuilder(ctx, i.chain) - tx, err := builder.BuildTx( - WithFrom(i.from), - WithTo(i.to), - WithValue(i.amount.Int), - WithData(nil), - ) - - // Sign the transaction if it's built okay - if err == nil { - tx, err = i.processor.Sign(tx) - } - - // Send the transaction if it's signed okay - if err == nil { - err = i.processor.Send(ctx, tx) - } - - return &sendResult{ - chain: i.chain, - tx: tx, - err: err, - } -} - -type sendResult struct { - chain Chain - tx Transaction - receipt Receipt - err error -} - -func (r *sendResult) Error() error { - return r.err -} - -func (r *sendResult) Wait() error { - client, err := r.chain.Nodes()[0].GethClient() - if err != nil { - return fmt.Errorf("failed to get client: %w", err) - } - - if r.err != nil { - return r.err - } - if r.tx == nil { - return fmt.Errorf("no transaction to wait for") - } - - if tx, ok := r.tx.(RawTransaction); ok { - receipt, err := wait.ForReceiptOK(context.Background(), client, tx.Raw().Hash()) - if err != nil { - return fmt.Errorf("failed waiting for transaction confirmation: %w", err) - } - r.receipt = &EthReceipt{blockNumber: receipt.BlockNumber, logs: receipt.Logs, txHash: receipt.TxHash} - if receipt.Status == 0 { - return fmt.Errorf("transaction failed") - } - } - - return nil -} - -func (r *sendResult) Info() any { - return r.receipt -} diff --git a/devnet-sdk/system/walletV2.go b/devnet-sdk/system/walletV2.go deleted file mode 100644 index 9e2d38c208861..0000000000000 --- a/devnet-sdk/system/walletV2.go +++ /dev/null @@ -1,93 +0,0 @@ -package system - -import ( - "context" - "crypto/ecdsa" - "fmt" - - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rpc" -) - -var ( - _ WalletV2 = (*walletV2)(nil) -) - -type walletV2 struct { - address common.Address - priv *ecdsa.PrivateKey - client *sources.EthClient - gethClient *ethclient.Client - ctx context.Context -} - -func NewWalletV2FromWalletAndChain(ctx context.Context, wallet Wallet, chain Chain) (WalletV2, error) { - if len(chain.Nodes()) == 0 { - return nil, fmt.Errorf("failed to init walletV2: chain has zero nodes") - } - client, err := chain.Nodes()[0].Client() - if err != nil { - return nil, err - } - gethClient, err := chain.Nodes()[0].GethClient() - if err != nil { - return nil, err - } - return &walletV2{ - address: wallet.Address(), - priv: wallet.PrivateKey(), - client: client, - gethClient: gethClient, - ctx: ctx, - }, nil -} - -func NewWalletV2(ctx context.Context, rpcURL string, priv *ecdsa.PrivateKey, clCfg *sources.EthClientConfig, log log.Logger) (*walletV2, error) { - if clCfg == nil { - clCfg = sources.DefaultEthClientConfig(10) - } - rpcClient, err := rpc.DialContext(ctx, rpcURL) - if err != nil { - return nil, err - } - cl, err := sources.NewEthClient(client.NewBaseRPCClient(rpcClient), log, nil, clCfg) - if err != nil { - return nil, err - } - pubkeyECDSA, ok := priv.Public().(*ecdsa.PublicKey) - if !ok { - return nil, fmt.Errorf("Failed to assert type: publicKey is not of type *ecdsa.PublicKey") - } - address := crypto.PubkeyToAddress(*pubkeyECDSA) - return &walletV2{ - address: address, - client: cl, - priv: priv, - ctx: ctx, - }, nil -} - -func (w *walletV2) PrivateKey() *ecdsa.PrivateKey { - return w.priv -} - -func (w *walletV2) Client() *sources.EthClient { - return w.client -} - -func (w *walletV2) Ctx() context.Context { - return w.ctx -} - -func (w *walletV2) Address() common.Address { - return w.address -} - -func (w *walletV2) GethClient() *ethclient.Client { - return w.gethClient -} diff --git a/devnet-sdk/system/wallet_test.go b/devnet-sdk/system/wallet_test.go deleted file mode 100644 index 294869af1a6d5..0000000000000 --- a/devnet-sdk/system/wallet_test.go +++ /dev/null @@ -1,237 +0,0 @@ -package system - -import ( - "context" - "math/big" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/sources" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/ethclient" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rpc" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" -) - -// testWallet is a minimal wallet implementation for testing balance functionality -type testWallet struct { - privateKey types.Key - address types.Address - chain *mockChainForBalance // Use concrete type to access mock client directly -} - -func (w *testWallet) Balance() types.Balance { - // Use the mock client directly instead of going through Client() - balance, err := w.chain.client.BalanceAt(context.Background(), w.address, nil) - if err != nil { - return types.NewBalance(new(big.Int)) - } - - return types.NewBalance(balance) -} - -// mockEthClient implements a mock ethereum client for testing -type mockEthClient struct { - mock.Mock -} - -func (m *mockEthClient) BalanceAt(ctx context.Context, account types.Address, blockNumber *big.Int) (*big.Int, error) { - args := m.Called(ctx, account, blockNumber) - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*big.Int), args.Error(1) -} - -func (m *mockEthClient) PendingNonceAt(ctx context.Context, account common.Address) (uint64, error) { - args := m.Called(ctx, account) - return args.Get(0).(uint64), args.Error(1) -} - -// mockChainForBalance implements just enough of the chain interface for balance testing -type mockChainForBalance struct { - mock.Mock - client *mockEthClient -} - -func TestWalletBalance(t *testing.T) { - tests := []struct { - name string - setupMock func(*mockChainForBalance) - expectedValue *big.Int - }{ - { - name: "successful balance fetch", - setupMock: func(m *mockChainForBalance) { - balance := big.NewInt(1000000000000000000) // 1 ETH - m.client.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(balance, nil) - }, - expectedValue: big.NewInt(1000000000000000000), - }, - { - name: "balance fetch error returns zero", - setupMock: func(m *mockChainForBalance) { - m.client.On("BalanceAt", mock.Anything, mock.Anything, mock.Anything).Return(nil, assert.AnError) - }, - expectedValue: new(big.Int), - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockChain := &mockChainForBalance{ - client: new(mockEthClient), - } - tt.setupMock(mockChain) - - w := &testWallet{ - privateKey: crypto.ToECDSAUnsafe(common.FromHex("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")), - address: types.Address{}, - chain: mockChain, - } - - balance := w.Balance() - assert.Equal(t, 0, balance.Int.Cmp(tt.expectedValue)) - - mockChain.AssertExpectations(t) - mockChain.client.AssertExpectations(t) - }) - } -} - -type internalMockChain struct { - *mockChain -} - -func (m *internalMockChain) Client() (*sources.EthClient, error) { - args := m.Called() - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*sources.EthClient), args.Error(1) -} - -func (m *internalMockChain) GethClient() (*ethclient.Client, error) { - args := m.Called() - if args.Get(0) == nil { - return nil, args.Error(1) - } - return args.Get(0).(*ethclient.Client), args.Error(1) -} - -func TestNewWallet(t *testing.T) { - pk := "0x1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef" - addr := types.Address(common.HexToAddress("0x5678")) - chain := &chain{} - - w, err := NewWallet(pk, addr, chain) - assert.NoError(t, err) - - // The private key is converted to ECDSA, so we can't compare directly with the input string - assert.NotNil(t, w.privateKey) - assert.Equal(t, addr, w.address) - assert.Equal(t, chain, w.chain) -} - -func TestWallet_Address(t *testing.T) { - addr := types.Address(common.HexToAddress("0x5678")) - w := &wallet{address: addr} - - assert.Equal(t, addr, w.Address()) -} - -func TestWallet_SendETH(t *testing.T) { - ctx := context.Background() - mockChain := newMockChain() - mockNode := newMockNode() - internalChain := &internalMockChain{mockChain} - - // Use a valid 256-bit private key (32 bytes) - testPrivateKey := crypto.ToECDSAUnsafe(common.FromHex("1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef")) - - // Derive the address from the private key - fromAddr := crypto.PubkeyToAddress(testPrivateKey.PublicKey) - - w := &wallet{ - privateKey: testPrivateKey, - address: types.Address(fromAddr), - chain: internalChain, - } - - toAddr := types.Address(common.HexToAddress("0x5678")) - amount := types.NewBalance(big.NewInt(1000000)) - - chainID := big.NewInt(1) - - // Mock chain ID for all calls - mockChain.On("ID").Return(types.ChainID(chainID)).Maybe() - - // Mock EIP support checks - mockChain.On("Nodes").Return([]Node{mockNode}).Once() - mockNode.On("SupportsEIP", ctx, uint64(1559)).Return(false) - mockChain.On("Nodes").Return([]Node{mockNode}).Once() - mockNode.On("SupportsEIP", ctx, uint64(4844)).Return(false) - - // Mock gas price and limit - mockChain.On("Nodes").Return([]Node{mockNode}).Once() - mockNode.On("GasPrice", ctx).Return(big.NewInt(1000000000), nil) - mockChain.On("Nodes").Return([]Node{mockNode}).Once() - mockNode.On("GasLimit", ctx, mock.Anything).Return(uint64(21000), nil) - - // Mock nonce retrieval - mockChain.On("Nodes").Return([]Node{mockNode}).Once() - mockNode.On("PendingNonceAt", ctx, fromAddr).Return(uint64(0), nil) - - // Mock client access - rpcClient, err := rpc.DialContext(context.Background(), "http://this.domain.definitely.does.not.exist:8545") - assert.NoError(t, err) - ethClCfg := sources.EthClientConfig{MaxConcurrentRequests: 1, MaxRequestsPerBatch: 1, RPCProviderKind: sources.RPCKindStandard} - ethCl, err := sources.NewEthClient(client.NewBaseRPCClient(rpcClient), log.Root(), nil, ðClCfg) - assert.NoError(t, err) - mockChain.On("Nodes").Return([]Node{mockNode}).Once() - mockNode.On("Client").Return(ethCl, nil) - - // Create the send invocation - invocation := w.SendETH(toAddr, amount) - assert.NotNil(t, invocation) - - // Send the transaction - result := invocation.Send(ctx) - assert.Error(t, result.Error()) // We expect an error since the client can't connect - - mockChain.AssertExpectations(t) -} - -func TestWallet_Balance(t *testing.T) { - mockChain := newMockChain() - mockNode := newMockNode() - mockChain.On("Nodes").Return([]Node{mockNode}).Once() - internalChain := &internalMockChain{mockChain} - w := &wallet{ - chain: internalChain, - } - - // Test error case when client is not available - mockNode.On("Client").Return((*sources.EthClient)(nil), assert.AnError).Once() - balance := w.Balance() - assert.Equal(t, types.Balance{}, balance) -} - -func TestWallet_Nonce(t *testing.T) { - mockChain := newMockChain() - mockNode := newMockNode() - mockChain.On("Nodes").Return([]Node{mockNode}).Once() - internalChain := &internalMockChain{mockChain} - w := &wallet{ - chain: internalChain, - } - - // Test error case when client is not available - mockNode.On("Client").Return((*sources.EthClient)(nil), assert.AnError).Once() - nonce := w.Nonce() - assert.Equal(t, uint64(0), nonce) -} diff --git a/devnet-sdk/telemetry/carrier.go b/devnet-sdk/telemetry/carrier.go deleted file mode 100644 index dfbbdcfbee5d6..0000000000000 --- a/devnet-sdk/telemetry/carrier.go +++ /dev/null @@ -1,55 +0,0 @@ -package telemetry - -import ( - "context" - "fmt" - "strings" - - "go.opentelemetry.io/otel/propagation" -) - -const CarrierEnvVarPrefix = "OTEL_DEVSTACK_PROPAGATOR_CARRIER_" - -// keep in sync with textPropagator() below -var defaultPropagators = []string{ - "tracecontext", - "baggage", -} - -func textPropagator() propagation.TextMapPropagator { - return propagation.NewCompositeTextMapPropagator( - // keep in sync with propagators above - propagation.TraceContext{}, - propagation.Baggage{}, - ) -} - -func InstrumentEnvironment(ctx context.Context, env []string) []string { - propagator := textPropagator() - carrier := propagation.MapCarrier{} - propagator.Inject(ctx, carrier) - - for k, v := range carrier { - env = append(env, fmt.Sprintf("%s%s=%s", CarrierEnvVarPrefix, k, v)) - } - - return env -} - -func ExtractEnvironment(ctx context.Context, env []string) (context.Context, error) { - carrier := propagation.MapCarrier{} - // Reconstruct the carrier from the environment variables - for _, e := range env { - if strings.HasPrefix(e, CarrierEnvVarPrefix) { - parts := strings.SplitN(e, "=", 2) - if len(parts) == 2 { - key := strings.TrimPrefix(parts[0], CarrierEnvVarPrefix) - value := parts[1] - carrier.Set(key, value) - } - } - } - - ctx = textPropagator().Extract(ctx, carrier) - return ctx, nil -} diff --git a/devnet-sdk/telemetry/init.go b/devnet-sdk/telemetry/init.go deleted file mode 100644 index 52ffe4bc1fa67..0000000000000 --- a/devnet-sdk/telemetry/init.go +++ /dev/null @@ -1,58 +0,0 @@ -package telemetry - -import ( - "context" - "os" - - "github.com/honeycombio/otel-config-go/otelconfig" -) - -const ( - serviceNameEnvVar = "OTEL_SERVICE_NAME" - serviceVersionEnvVar = "OTEL_SERVICE_VERSION" - tracesEndpointEnvVar = "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT" - metricsEndpointEnvVar = "OTEL_EXPORTER_OTLP_METRICS_ENDPOINT" - - defaultServiceName = "devstack" - defaultServiceVersion = "0.0.0" -) - -func envOrDefault(key, def string) string { - if v, ok := os.LookupEnv(key); ok { - return v - } - return def -} - -func SetupOpenTelemetry(ctx context.Context, opts ...otelconfig.Option) (context.Context, func(), error) { - defaultOpts := []otelconfig.Option{ - otelconfig.WithServiceName(envOrDefault(serviceNameEnvVar, defaultServiceName)), - otelconfig.WithServiceVersion(envOrDefault(serviceVersionEnvVar, defaultServiceVersion)), - otelconfig.WithPropagators(defaultPropagators), - } - - // do not use localhost:4317 by default, we want telemetry to be opt-in and - // explicit. - // The caller is still able to override this by passing in their own opts. - if os.Getenv(tracesEndpointEnvVar) == "" { - defaultOpts = append(defaultOpts, otelconfig.WithTracesEnabled(false)) - } - if os.Getenv(metricsEndpointEnvVar) == "" { - defaultOpts = append(defaultOpts, otelconfig.WithMetricsEnabled(false)) - } - - opts = append(defaultOpts, opts...) - otelShutdown, err := otelconfig.ConfigureOpenTelemetry(opts...) - if err != nil { - return ctx, nil, err - } - - // If the environment contains carrier information, extract it. - // This is useful for test runner / test communication for example. - ctx, err = ExtractEnvironment(ctx, os.Environ()) - if err != nil { - return ctx, nil, err - } - - return ctx, otelShutdown, nil -} diff --git a/devnet-sdk/telemetry/slog.go b/devnet-sdk/telemetry/slog.go deleted file mode 100644 index ce7429a053016..0000000000000 --- a/devnet-sdk/telemetry/slog.go +++ /dev/null @@ -1,98 +0,0 @@ -package telemetry - -import ( - "context" - "fmt" - "log/slog" - - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - - "github.com/ethereum-optimism/optimism/op-service/logmods" -) - -func WrapHandler(h slog.Handler) slog.Handler { - return &tracingHandler{ - Handler: h, - } -} - -type tracingHandler struct { - slog.Handler -} - -var _ logmods.Handler = (*tracingHandler)(nil) - -func (h *tracingHandler) Unwrap() slog.Handler { - return h.Handler -} - -func (h *tracingHandler) WithAttrs(attrs []slog.Attr) slog.Handler { - return &tracingHandler{Handler: h.Handler.WithAttrs(attrs)} -} - -func (h *tracingHandler) WithGroup(name string) slog.Handler { - return &tracingHandler{Handler: h.Handler.WithGroup(name)} -} - -func (h *tracingHandler) Handle(ctx context.Context, record slog.Record) error { - // Send log entries as events to the tracer - span := trace.SpanFromContext(ctx) - if span.IsRecording() { - attrRecorder := &attrAccumulator{} - record.Attrs(func(a slog.Attr) bool { - attrRecorder.register(a) - return true - }) - span.AddEvent(record.Message, trace.WithAttributes(attrRecorder.kv...)) - } - - // Conversely add tracing data to the local logs - spanCtx := trace.SpanContextFromContext(ctx) - if spanCtx.HasTraceID() { - record.AddAttrs(slog.String("trace_id", spanCtx.TraceID().String())) - } - if spanCtx.HasSpanID() { - record.AddAttrs(slog.String("span_id", spanCtx.SpanID().String())) - } - return h.Handler.Handle(ctx, record) -} - -type attrAccumulator struct { - kv []attribute.KeyValue -} - -func (ac *attrAccumulator) register(a slog.Attr) { - switch a.Value.Kind() { - case slog.KindAny: - ac.kv = append(ac.kv, attribute.String(a.Key, fmt.Sprintf("%v", a.Value.Any()))) - case slog.KindBool: - ac.kv = append(ac.kv, attribute.Bool(a.Key, a.Value.Bool())) - case slog.KindDuration: - ac.kv = append(ac.kv, attribute.String(a.Key, a.Value.Duration().String())) - case slog.KindFloat64: - ac.kv = append(ac.kv, attribute.Float64(a.Key, a.Value.Float64())) - case slog.KindInt64: - ac.kv = append(ac.kv, attribute.Int64(a.Key, a.Value.Int64())) - case slog.KindString: - ac.kv = append(ac.kv, attribute.String(a.Key, a.Value.String())) - case slog.KindTime: - ac.kv = append(ac.kv, attribute.String(a.Key, a.Value.Time().String())) - case slog.KindUint64: - val := a.Value.Uint64() - ac.kv = append(ac.kv, attribute.Int64(a.Key, int64(val))) - // detect overflows - if val > uint64(1<<63-1) { - // Value doesn't properly fit in int64 - ac.kv = append(ac.kv, attribute.Bool(a.Key+".overflow", true)) - ac.kv = append(ac.kv, attribute.String(a.Key+".actual", fmt.Sprintf("%d", val))) - } - case slog.KindGroup: - for _, attr := range a.Value.Group() { - ac.register(attr) - } - case slog.KindLogValuer: - value := a.Value.LogValuer().LogValue() - ac.register(slog.Attr{Key: a.Key, Value: value}) - } -} diff --git a/devnet-sdk/types/balance.go b/devnet-sdk/types/balance.go deleted file mode 100644 index bc2e4aa5942ea..0000000000000 --- a/devnet-sdk/types/balance.go +++ /dev/null @@ -1,92 +0,0 @@ -package types - -import ( - "fmt" - "math/big" -) - -type Balance struct { - *big.Int -} - -// NewBalance creates a new Balance from a big.Int -func NewBalance(i *big.Int) Balance { - return Balance{Int: new(big.Int).Set(i)} -} - -// Add returns a new Balance with other added to it -func (b Balance) Add(other Balance) Balance { - return Balance{Int: new(big.Int).Add(b.Int, other.Int)} -} - -// Sub returns a new Balance with other subtracted from it -func (b Balance) Sub(other Balance) Balance { - return Balance{Int: new(big.Int).Sub(b.Int, other.Int)} -} - -// Mul returns a new Balance multiplied by a float64 -func (b Balance) Mul(f float64) Balance { - floatResult := new(big.Float).Mul(new(big.Float).SetInt(b.Int), new(big.Float).SetFloat64(f)) - result := new(big.Int) - floatResult.Int(result) - return Balance{Int: result} -} - -// GreaterThan returns true if this balance is greater than other -func (b Balance) GreaterThan(other Balance) bool { - if b.Int == nil { - return false - } - if other.Int == nil { - return true - } - return b.Int.Cmp(other.Int) > 0 -} - -// LessThan returns true if this balance is less than other -func (b Balance) LessThan(other Balance) bool { - if b.Int == nil { - return other.Int != nil - } - if other.Int == nil { - return false - } - return b.Int.Cmp(other.Int) < 0 -} - -// Equal returns true if this balance equals other -func (b Balance) Equal(other Balance) bool { - if b.Int == nil { - return other.Int == nil - } - if other.Int == nil { - return false - } - return b.Int.Cmp(other.Int) == 0 -} - -// String implements fmt.Stringer to format Balance in the most readable unit -func (b Balance) String() string { - if b.Int == nil { - return "0 ETH" - } - - val := new(big.Float).SetInt(b.Int) - eth := new(big.Float).Quo(val, new(big.Float).SetInt64(1e18)) - - // 1 ETH = 1e18 Wei - if eth.Cmp(new(big.Float).SetFloat64(0.001)) >= 0 { - str := eth.Text('f', 0) - return fmt.Sprintf("%s ETH", str) - } - - // 1 Gwei = 1e9 Wei - gwei := new(big.Float).Quo(val, new(big.Float).SetInt64(1e9)) - if gwei.Cmp(new(big.Float).SetFloat64(0.001)) >= 0 { - str := gwei.Text('g', 3) - return fmt.Sprintf("%s Gwei", str) - } - - // Wei - return fmt.Sprintf("%s Wei", b.Text(10)) -} diff --git a/devnet-sdk/types/balance_test.go b/devnet-sdk/types/balance_test.go deleted file mode 100644 index 09b880f60622f..0000000000000 --- a/devnet-sdk/types/balance_test.go +++ /dev/null @@ -1,267 +0,0 @@ -package types - -import ( - "math/big" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestNewBalance(t *testing.T) { - i := big.NewInt(100) - b := NewBalance(i) - if b.Int.Cmp(i) != 0 { - t.Errorf("NewBalance failed, got %v, want %v", b.Int, i) - } - - // Verify that modifying the input doesn't affect the Balance - i.SetInt64(200) - if b.Int.Cmp(big.NewInt(100)) != 0 { - t.Error("NewBalance did not create a copy of the input") - } -} - -func TestBalance_Add(t *testing.T) { - tests := []struct { - a, b, want int64 - }{ - {100, 200, 300}, - {0, 100, 100}, - {-100, 100, 0}, - {1000000, 2000000, 3000000}, - } - - for _, tt := range tests { - a := NewBalance(big.NewInt(tt.a)) - b := NewBalance(big.NewInt(tt.b)) - got := a.Add(b) - want := NewBalance(big.NewInt(tt.want)) - if !got.Equal(want) { - t.Errorf("Add(%v, %v) = %v, want %v", tt.a, tt.b, got, want) - } - // Verify original balances weren't modified - if !a.Equal(NewBalance(big.NewInt(tt.a))) { - t.Error("Add modified original balance") - } - } -} - -func TestBalance_Sub(t *testing.T) { - tests := []struct { - a, b, want int64 - }{ - {300, 200, 100}, - {100, 100, 0}, - {0, 100, -100}, - {3000000, 2000000, 1000000}, - } - - for _, tt := range tests { - a := NewBalance(big.NewInt(tt.a)) - b := NewBalance(big.NewInt(tt.b)) - got := a.Sub(b) - want := NewBalance(big.NewInt(tt.want)) - if !got.Equal(want) { - t.Errorf("Sub(%v, %v) = %v, want %v", tt.a, tt.b, got, want) - } - } -} - -func TestBalance_Mul(t *testing.T) { - tests := []struct { - a int64 - mul float64 - want int64 - }{ - {100, 2.0, 200}, - {100, 0.5, 50}, - {100, 0.0, 0}, - {1000, 1.5, 1500}, - } - - for _, tt := range tests { - a := NewBalance(big.NewInt(tt.a)) - got := a.Mul(tt.mul) - want := NewBalance(big.NewInt(tt.want)) - if !got.Equal(want) { - t.Errorf("Mul(%v, %v) = %v, want %v", tt.a, tt.mul, got, want) - } - } -} - -func TestBalanceComparisons(t *testing.T) { - tests := []struct { - name string - balance1 Balance - balance2 Balance - greater bool - less bool - equal bool - }{ - { - name: "both nil", - balance1: Balance{}, - balance2: Balance{}, - greater: false, - less: false, - equal: true, - }, - { - name: "first nil", - balance1: Balance{}, - balance2: NewBalance(big.NewInt(100)), - greater: false, - less: true, - equal: false, - }, - { - name: "second nil", - balance1: NewBalance(big.NewInt(100)), - balance2: Balance{}, - greater: true, - less: false, - equal: false, - }, - { - name: "first greater", - balance1: NewBalance(big.NewInt(200)), - balance2: NewBalance(big.NewInt(100)), - greater: true, - less: false, - equal: false, - }, - { - name: "second greater", - balance1: NewBalance(big.NewInt(100)), - balance2: NewBalance(big.NewInt(200)), - greater: false, - less: true, - equal: false, - }, - { - name: "equal values", - balance1: NewBalance(big.NewInt(100)), - balance2: NewBalance(big.NewInt(100)), - greater: false, - less: false, - equal: true, - }, - { - name: "zero values", - balance1: NewBalance(new(big.Int)), - balance2: NewBalance(new(big.Int)), - greater: false, - less: false, - equal: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.greater, tt.balance1.GreaterThan(tt.balance2), "GreaterThan check failed") - assert.Equal(t, tt.less, tt.balance1.LessThan(tt.balance2), "LessThan check failed") - assert.Equal(t, tt.equal, tt.balance1.Equal(tt.balance2), "Equal check failed") - }) - } -} - -func TestBalanceArithmetic(t *testing.T) { - tests := []struct { - name string - balance1 Balance - balance2 Balance - add *big.Int - sub *big.Int - mul float64 - mulRes *big.Int - }{ - { - name: "basic arithmetic", - balance1: NewBalance(big.NewInt(100)), - balance2: NewBalance(big.NewInt(50)), - add: big.NewInt(150), - sub: big.NewInt(50), - mul: 2.5, - mulRes: big.NewInt(250), - }, - { - name: "zero values", - balance1: NewBalance(new(big.Int)), - balance2: NewBalance(new(big.Int)), - add: new(big.Int), - sub: new(big.Int), - mul: 1.0, - mulRes: new(big.Int), - }, - { - name: "large numbers", - balance1: NewBalance(new(big.Int).Mul(big.NewInt(1e18), big.NewInt(100))), // 100 ETH - balance2: NewBalance(new(big.Int).Mul(big.NewInt(1e18), big.NewInt(50))), // 50 ETH - add: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(150)), // 150 ETH - sub: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(50)), // 50 ETH - mul: 0.5, - mulRes: new(big.Int).Mul(big.NewInt(1e18), big.NewInt(50)), // 50 ETH - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Test Add - sum := tt.balance1.Add(tt.balance2) - assert.Equal(t, 0, sum.Int.Cmp(tt.add), "Add result mismatch") - - // Test Sub - diff := tt.balance1.Sub(tt.balance2) - assert.Equal(t, 0, diff.Int.Cmp(tt.sub), "Sub result mismatch") - - // Test Mul - product := tt.balance1.Mul(tt.mul) - assert.Equal(t, 0, product.Int.Cmp(tt.mulRes), "Mul result mismatch") - }) - } -} - -func TestBalanceLogValue(t *testing.T) { - tests := []struct { - name string - balance Balance - expected string - }{ - { - name: "nil balance", - balance: Balance{}, - expected: "0 ETH", - }, - { - name: "zero balance", - balance: NewBalance(new(big.Int)), - expected: "0 Wei", - }, - { - name: "small wei amount", - balance: NewBalance(big.NewInt(100)), - expected: "100 Wei", - }, - { - name: "gwei amount", - balance: NewBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(1e9))), - expected: "1 Gwei", - }, - { - name: "eth amount", - balance: NewBalance(new(big.Int).Mul(big.NewInt(1), big.NewInt(1e18))), - expected: "1 ETH", - }, - { - name: "large eth amount", - balance: NewBalance(new(big.Int).Mul(big.NewInt(1000), big.NewInt(1e18))), - expected: "1000 ETH", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - assert.Equal(t, tt.expected, tt.balance.String()) - }) - } -} diff --git a/devnet-sdk/types/types.go b/devnet-sdk/types/types.go deleted file mode 100644 index e251710ae7193..0000000000000 --- a/devnet-sdk/types/types.go +++ /dev/null @@ -1,30 +0,0 @@ -package types - -import ( - "context" - "crypto/ecdsa" - "math/big" - - "github.com/ethereum/go-ethereum/common" -) - -type Address = common.Address - -type ChainID = *big.Int - -type ReadInvocation[T any] interface { - Call(ctx context.Context) (T, error) -} - -type WriteInvocation[T any] interface { - ReadInvocation[T] - Send(ctx context.Context) InvocationResult -} - -type InvocationResult interface { - Error() error - Wait() error - Info() any -} - -type Key = *ecdsa.PrivateKey diff --git a/go.mod b/go.mod index 4e4aa1378021a..77c899b2144a5 100644 --- a/go.mod +++ b/go.mod @@ -19,14 +19,10 @@ require ( github.com/crate-crypto/go-kzg-4844 v1.1.0 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 - github.com/docker/docker v27.5.1+incompatible - github.com/docker/go-connections v0.5.0 github.com/ethereum-optimism/go-ethereum-hdwallet v0.1.4-0.20251001155152-4eb15ccedf7e github.com/ethereum-optimism/superchain-registry/validation v0.0.0-20260115192958-fb86a23cd30e github.com/ethereum/go-ethereum v1.16.3 - github.com/fatih/color v1.18.0 github.com/fsnotify/fsnotify v1.9.0 - github.com/go-task/slim-sprig/v3 v3.0.0 github.com/golang/snappy v1.0.0 github.com/google/go-cmp v0.7.0 github.com/google/go-github/v55 v55.0.0 @@ -38,7 +34,6 @@ require ( github.com/hashicorp/raft v1.7.3 github.com/hashicorp/raft-boltdb/v2 v2.3.1 github.com/holiman/uint256 v1.3.2 - github.com/honeycombio/otel-config-go v1.17.0 github.com/ipfs/go-datastore v0.6.0 github.com/ipfs/go-ds-leveldb v0.5.0 github.com/klauspost/compress v1.18.0 @@ -61,7 +56,6 @@ require ( github.com/prometheus/client_model v0.6.2 github.com/protolambda/ctxlock v0.1.0 github.com/schollz/progressbar/v3 v3.18.0 - github.com/spf13/afero v1.12.0 github.com/stretchr/testify v1.10.0 github.com/urfave/cli/v2 v2.27.6 go.etcd.io/bbolt v1.3.5 @@ -79,6 +73,11 @@ require ( gopkg.in/yaml.v3 v3.0.1 ) +require ( + github.com/fatih/color v1.18.0 // indirect + github.com/go-task/slim-sprig/v3 v3.0.0 // indirect +) + require ( codeberg.org/go-fonts/liberation v0.5.0 // indirect codeberg.org/go-latex/latex v0.1.0 // indirect @@ -101,7 +100,6 @@ require ( github.com/btcsuite/btcd/btcutil v1.1.5 // indirect github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f // indirect github.com/campoy/embedmd v1.0.0 // indirect - github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/cloudflare/circl v1.3.3 // indirect github.com/cockroachdb/errors v1.11.3 // indirect @@ -110,7 +108,6 @@ require ( github.com/cockroachdb/redact v1.1.5 // indirect github.com/cockroachdb/tokenbucket v0.0.0-20230807174530-cc333fc44b06 // indirect github.com/containerd/cgroups v1.1.0 // indirect - github.com/containerd/log v0.1.0 // indirect github.com/coreos/go-systemd/v22 v22.5.0 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.5 // indirect github.com/crate-crypto/go-eth-kzg v1.4.0 // indirect @@ -121,7 +118,6 @@ require ( github.com/decred/dcrd/crypto/blake256 v1.0.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de // indirect - github.com/distribution/reference v0.6.0 // indirect github.com/dlclark/regexp2 v1.7.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dop251/goja v0.0.0-20230806174421-c933cf95e127 // indirect @@ -133,7 +129,6 @@ require ( github.com/ethereum/go-bigmodexpfix v0.0.0-20250911101455-f9e208c548ab // indirect github.com/ethereum/go-verkle v0.2.2 // indirect github.com/felixge/fgprof v0.9.5 // indirect - github.com/felixge/httpsnoop v1.0.4 // indirect github.com/ferranbt/fastssz v0.1.4 // indirect github.com/flynn/noise v1.1.0 // indirect github.com/francoispqt/gojay v1.2.13 // indirect @@ -157,7 +152,6 @@ require ( github.com/google/gopacket v1.1.19 // indirect github.com/google/pprof v0.0.0-20241009165004-a3522334989c // indirect github.com/graph-gophers/graphql-go v1.3.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-bexpr v0.1.11 // indirect github.com/hashicorp/go-hclog v1.6.2 // indirect @@ -196,7 +190,6 @@ require ( github.com/libp2p/go-netroute v0.2.1 // indirect github.com/libp2p/go-reuseport v0.4.0 // indirect github.com/libp2p/go-yamux/v4 v4.0.1 // indirect - github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd // indirect github.com/mattn/go-colorable v0.1.14 // indirect github.com/mattn/go-runewidth v0.0.16 // indirect @@ -208,9 +201,6 @@ require ( github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/mitchellh/pointerstructure v1.2.1 // indirect - github.com/moby/docker-image-spec v1.3.1 // indirect - github.com/moby/term v0.5.2 // indirect - github.com/morikuni/aec v1.0.0 // indirect github.com/mr-tron/base58 v1.2.0 // indirect github.com/multiformats/go-base36 v0.2.0 // indirect github.com/multiformats/go-multiaddr-fmt v0.1.0 // indirect @@ -224,8 +214,6 @@ require ( github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416 // indirect github.com/nwaples/rardecode v1.1.3 // indirect github.com/onsi/ginkgo/v2 v2.20.0 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect - github.com/opencontainers/image-spec v1.1.0 // indirect github.com/opencontainers/runtime-spec v1.2.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 // indirect @@ -250,7 +238,6 @@ require ( github.com/pion/turn/v2 v2.1.6 // indirect github.com/pion/webrtc/v3 v3.3.0 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect - github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 // indirect github.com/prometheus/common v0.62.0 // indirect github.com/prometheus/procfs v0.15.1 // indirect github.com/quic-go/qpack v0.4.0 // indirect @@ -262,10 +249,7 @@ require ( github.com/rs/cors v1.11.0 // indirect github.com/rs/xid v1.6.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect - github.com/sethvargo/go-envconfig v1.1.0 // indirect github.com/shirou/gopsutil v3.21.11+incompatible // indirect - github.com/shirou/gopsutil/v4 v4.24.6 // indirect - github.com/shoenig/go-m1cpu v0.1.6 // indirect github.com/sirupsen/logrus v1.9.3 // indirect github.com/spaolacci/murmur3 v1.1.0 // indirect github.com/stretchr/objx v0.5.2 // indirect @@ -279,20 +263,8 @@ require ( github.com/xrash/smetrics v0.0.0-20240521201337-686a1a2994c1 // indirect github.com/yusufpapurcu/wmi v1.2.4 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/contrib/instrumentation/host v0.53.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect - go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0 // indirect - go.opentelemetry.io/contrib/propagators/b3 v1.28.0 // indirect - go.opentelemetry.io/contrib/propagators/ot v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 // indirect go.opentelemetry.io/otel/metric v1.34.0 // indirect go.opentelemetry.io/otel/sdk v1.34.0 // indirect - go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect - go.opentelemetry.io/proto/otlp v1.5.0 // indirect go.uber.org/dig v1.18.0 // indirect go.uber.org/fx v1.22.2 // indirect go.uber.org/mock v0.4.0 // indirect @@ -308,7 +280,6 @@ require ( google.golang.org/protobuf v1.36.6 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gotest.tools/v3 v3.5.2 // indirect lukechampine.com/blake3 v1.3.0 // indirect ) diff --git a/go.sum b/go.sum index ee74c32b0e798..3a380fe3fd400 100644 --- a/go.sum +++ b/go.sum @@ -23,8 +23,6 @@ git.sr.ht/~sbinet/gg v0.6.0 h1:RIzgkizAk+9r7uPzf/VfbJHBMKUr0F5hRFxTUGMnt38= git.sr.ht/~sbinet/gg v0.6.0/go.mod h1:uucygbfC9wVPQIfrmwM2et0imr8L7KQWywX0xpFMm94= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= -github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg= github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho= @@ -112,8 +110,6 @@ github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7 github.com/bwesterb/go-ristretto v1.2.0/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= github.com/campoy/embedmd v1.0.0 h1:V4kI2qTJJLf4J29RzI/MAt2c3Bl4dQSYPuflzwFH2hY= github.com/campoy/embedmd v1.0.0/go.mod h1:oxyr9RCiSXg0M3VJ3ks0UGfp98BpSSGr0kpiX3MzVl8= -github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= -github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk= github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= @@ -165,8 +161,6 @@ github.com/consensys/gnark-crypto v0.18.0/go.mod h1:L3mXGFTe1ZN+RSJ+CLjUt9x7PNdx github.com/containerd/cgroups v0.0.0-20201119153540-4cbc285b3327/go.mod h1:ZJeTFisyysqgcCdecO57Dj79RfL0LNeGiFUqLYQRYLE= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= -github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= -github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs= @@ -210,15 +204,9 @@ github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de h1:t0UHb5vdo github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= -github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= -github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8= -github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= -github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= -github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= @@ -256,8 +244,6 @@ github.com/fatih/color v1.18.0/go.mod h1:4FelSpRwEGDpQ12mAdzqdOukCy4u8WUtOY6lkT/ github.com/felixge/fgprof v0.9.3/go.mod h1:RdbpDgzqYVh/T9fPELJyV7EYJuHB55UTEULNun8eiPw= github.com/felixge/fgprof v0.9.5 h1:8+vR6yu2vvSKn08urWyEuxx75NWPEvybbkBirEpsbVY= github.com/felixge/fgprof v0.9.5/go.mod h1:yKl+ERSa++RYOs32d8K6WEXCB4uXdLls4ZaZPpayhMM= -github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= -github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/ferranbt/fastssz v0.1.4 h1:OCDB+dYDEQDvAgtAGnTSidK1Pe2tW3nFV40XyMkTeDY= github.com/ferranbt/fastssz v0.1.4/go.mod h1:Ea3+oeoRGGLGm5shYAeDgu6PGUlcvQhE2fILyD9+tGg= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -363,7 +349,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= @@ -401,8 +386,6 @@ github.com/graph-gophers/graphql-go v1.3.0 h1:Eb9x/q6MFpCLz7jBCiP/WTxjSDrYLR1QY4 github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -444,8 +427,6 @@ github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZ github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA= github.com/holiman/uint256 v1.3.2 h1:a9EgMPSC1AAaj1SZL5zIQD3WbwTuHrMGOerLjGmM/TA= github.com/holiman/uint256 v1.3.2/go.mod h1:EOMSn4q6Nyt9P6efbI3bueV4e1b3dGlUCXeiRV4ng7E= -github.com/honeycombio/otel-config-go v1.17.0 h1:3/zig0L3IGnfgiCrEfAwBsM0rF57+TKTyJ/a8yqW2eM= -github.com/honeycombio/otel-config-go v1.17.0/go.mod h1:g2mMdfih4sYKfXBtz2mNGvo3HiQYqX4Up4pdA8JOF2s= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.3.0 h1:UvLUlWDNpoUdYzb2TCn+MuTWtcjXKSza2n6CBdQ0xXc= github.com/huin/goupnp v1.3.0/go.mod h1:gnGPsThkYa7bFi/KWmEysQRf48l2dvR5bxr2OFckNX8= @@ -568,8 +549,6 @@ github.com/libp2p/go-yamux/v4 v4.0.1 h1:FfDR4S1wj6Bw2Pqbc8Uz7pCxeRBPbwsBbEdfwiCy github.com/libp2p/go-yamux/v4 v4.0.1/go.mod h1:NWjl8ZTLOGlozrXSOZ/HlfG++39iKNnM5wwmtQP1YB4= github.com/lmittmann/w3 v0.19.5 h1:WwVRyIwhRLfIahmpB1EglsB3o1XWsgydgrxIUp5upFQ= github.com/lmittmann/w3 v0.19.5/go.mod h1:pN97sGGYGvsbqOYj/ms3Pd+7k/aiK/9OpNcxMmmzSOI= -github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= -github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae/go.mod h1:ilwx/Dta8jXAgpFYFvSWEMwxmbWXyiUHkd5FwyKhb5k= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -622,16 +601,10 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= -github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= -github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= -github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= -github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= -github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/mr-tron/base58 v1.1.2/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= @@ -693,10 +666,6 @@ github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAl github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k= github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY= -github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= -github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= -github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= -github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= @@ -769,8 +738,6 @@ github.com/pkg/profile v1.7.0/go.mod h1:8Uer0jas47ZQMJ7VD+OHknK4YDY07LPUC6dEvqDj github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55 h1:o4JXh1EVt9k/+g42oCprj/FisM4qX9L3sZB3upGN2ZU= -github.com/power-devops/perfstat v0.0.0-20240221224432-82ca36839d55/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= @@ -833,16 +800,8 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/schollz/progressbar/v3 v3.18.0 h1:uXdoHABRFmNIjUfte/Ex7WtuyVslrw2wVPQmCN62HpA= github.com/schollz/progressbar/v3 v3.18.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sethvargo/go-envconfig v1.1.0 h1:cWZiJxeTm7AlCvzGXrEXaSTCNgip5oJepekh/BOQuog= -github.com/sethvargo/go-envconfig v1.1.0/go.mod h1:JLd0KFWQYzyENqnEPWWZ49i4vzZo/6nRidxI8YvGiHw= github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI= github.com/shirou/gopsutil v3.21.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= -github.com/shirou/gopsutil/v4 v4.24.6 h1:9qqCSYF2pgOU+t+NgJtp7Co5+5mHF/HyKBUckySQL64= -github.com/shirou/gopsutil/v4 v4.24.6/go.mod h1:aoebb2vxetJ/yIDZISmduFvVNPHqXQ9SEJwRXxkf0RA= -github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= -github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= -github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= -github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY= github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM= github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0= @@ -877,8 +836,6 @@ github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= -github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= @@ -939,30 +896,8 @@ go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/detectors/aws/lambda v0.53.0 h1:KG6fOUk3EwSH1dEpsAbsLKFbn3cFwN9xDu8plGu55zI= -go.opentelemetry.io/contrib/detectors/aws/lambda v0.53.0/go.mod h1:bSd579exEkh/P5msRcom8YzVB6NsUxYKyV+D/FYOY7Y= -go.opentelemetry.io/contrib/instrumentation/host v0.53.0 h1:X4r+5n6bSqaQUbPlSO5baoM7tBvipkT0mJFyuPFnPAU= -go.opentelemetry.io/contrib/instrumentation/host v0.53.0/go.mod h1:NTaDj8VCnJxWleEcRQRQaN36+aCZjO9foNIdJunEjUQ= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I= -go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0 h1:nOlJEAJyrcy8hexK65M+dsCHIx7CVVbybcFDNkcTcAc= -go.opentelemetry.io/contrib/instrumentation/runtime v0.53.0/go.mod h1:u79lGGIlkg3Ryw425RbMjEkGYNxSnXRyR286O840+u4= -go.opentelemetry.io/contrib/propagators/b3 v1.28.0 h1:XR6CFQrQ/ttAYmTBX2loUEFGdk1h17pxYI8828dk/1Y= -go.opentelemetry.io/contrib/propagators/b3 v1.28.0/go.mod h1:DWRkzJONLquRz7OJPh2rRbZ7MugQj62rk7g6HRnEqh0= -go.opentelemetry.io/contrib/propagators/ot v1.28.0 h1:rmlG+2pc5k5M7Y7izDrxAHZUIwDERdGMTD9oMV7llMk= -go.opentelemetry.io/contrib/propagators/ot v1.28.0/go.mod h1:MNgXIn+UrMbNGpd7xyckyo2LCHIgCdmdjEE7YNZGG+w= go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY= go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0 h1:U2guen0GhqH8o/G2un8f/aG/y++OuW6MyCo6hT9prXk= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v1.28.0/go.mod h1:yeGZANgEcpdx/WK0IvvRFC+2oLiMS2u4L/0Rj2M2Qr0= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0 h1:aLmmtjRke7LPDQ3lvpFz+kNEH43faFhzW7v8BFIEydg= -go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp v1.28.0/go.mod h1:TC1pyCt6G9Sjb4bQpShH+P5R53pO6ZuGnHuuln9xMeE= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0 h1:OeNbIYk/2C15ckl7glBlOBp5+WlYsOElzTNmiPW/x60= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.34.0/go.mod h1:7Bept48yIeqxP2OZ9/AqIpYS94h2or0aB4FypJTc8ZM= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0 h1:R3X6ZXmNPRR8ul6i3WgFURCHzaXjHdm0karRG/+dj3s= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.28.0/go.mod h1:QWFXnDavXWwMx2EEcZsf3yxgEKAqsxQ+Syjp+seyInw= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0 h1:BEj3SPM81McUZHYjRS5pEgNgnmzGJ5tRpU5krWnV8Bs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.34.0/go.mod h1:9cKLGBDzI/F3NoHLQGm4ZrYdIHsvGt6ej6hUowxY0J4= go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ= go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE= go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A= @@ -971,8 +906,6 @@ go.opentelemetry.io/otel/sdk/metric v1.31.0 h1:i9hxxLJF/9kkvfHppyLL55aW7iIJz4Jjx go.opentelemetry.io/otel/sdk/metric v1.31.0/go.mod h1:CRInTMVvNhUKgSAMbKyTMxqOBC0zgyxzW55lZzX43Y8= go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k= go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE= -go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4= -go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/dig v1.18.0 h1:imUL1UiY0Mg4bqbFfsRQO5G4CGRBec/ZujWTvSVp3pw= @@ -1120,7 +1053,6 @@ golang.org/x/sys v0.0.0-20200814200057-3d37ad5750ed/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1268,8 +1200,6 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= -gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/kurtosis-devnet/.gitignore b/kurtosis-devnet/.gitignore deleted file mode 100644 index 7b6543377da0e..0000000000000 --- a/kurtosis-devnet/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -*-user.json -fileserver/upload-content/* -cmd/__debug_bin* diff --git a/kurtosis-devnet/README.md b/kurtosis-devnet/README.md deleted file mode 100644 index e6bc14a760448..0000000000000 --- a/kurtosis-devnet/README.md +++ /dev/null @@ -1,203 +0,0 @@ -# Getting Started - -Welcome to the Kurtosis Devnet! This tool helps you quickly spin up local development networks for testing and development purposes. Whether you're working on simple test networks or complex interoperability scenarios, this devnet environment has you covered. - -Running a Kurtosis Devnet has the following prerequisites: -- Kurtosis must be installed. This is automatically handled by `mise`, same as with other dev tools in this repository -- Docker Desktop must be installed and running - -Docker Desktop may be substituted by an alternative like Orbstack if you have that installed. - -# Running A Devnet - -To see available devnets, consult the `justfile` to see what `.*-devnet` targets exist, currently -- `simple-devnet` -- `interop-devnet` -- `user-devnet` - -You can read over the referenced `yaml` files located in this directory to see the network definition which would be deployed. Mini and Simple are example network definitions, and User expects a provided network definition. - -To run the Interop Devnet, simply: -``` -just interop-devnet -``` - -If all works as expected, you should see a collection of containers appear in Docker. Some of them are Kurtosis infrastructure, while others are the actual hosts for your network. You can observe that the network is running by searching for "supervisor" and watching its logs. - -## Resolving Issues - -Here is a list of potential pitfalls when running Kurtosis and known solutions. - -### `error ensuring kurtosis engine is running` -This error indicates Docker Desktop (or your alternative) is not running. - -### `network with name kt-interop-devnet already exists` -If your kurtosis network is taken down and destroyed through docker, it is possible that the network resources are left around, preventing you from starting up a new network. To resolve, run: -``` -kurtosis engine stop -docker network rm kt-interop-devnet -``` - -You can use `docker network ls` to inspect for networks to remove if the error message specifies some other network. - -# Kurtosis-devnet support - -## devnet specification - -Due to sandboxing issues across repositories, we currently rely on a slight -superset of the native optimism-package specification YAML file, via go -templates. - -So that means in particular that the regular optimism-package input is valid -here. - -Additional custom functions: - -- localDockerImage(PROJECT): builds a docker image for PROJECT based on the - current branch content. - -- localContractArtifacts(LAYER): builds a contracts bundle based on the current - branch content (note: LAYER is currently ignored, we might need to revisit) - -Example: - -```yaml -... - op_contract_deployer_params: - image: {{ localDockerImage "op-deployer" }} - l1_artifacts_locator: {{ localContractArtifacts "l1" }} - l2_artifacts_locator: {{ localContractArtifacts "l2" }} -... -``` - -The list of supported PROJECT values can be found in `justfile` as a -PROJECT-image target. Adding a target there will immediately available to the -template engine. - -## devnet deployment tool - -Located in cmd/main.go, this tool handle the creation of an enclave matching the -provided specification. - -The expected entry point for interacting with it is the corresponding -`just devnet SPEC` target. - -This takes an optional 2nd argument, that can be used to provide values for the -template interpretation. - -Note that a SPEC of the form `FOO.yaml` will yield a kurtosis enclave named -`FOO-devnet` - -Convenience targets can be added to `justfile` for specific specifications, for -example: - -```just -interop-devnet: (devnet "interop.yaml") -``` - -## devnet output - -One important aspect of the devnet workflow is that the output should be -*consumable*. Going forward we want to integrate them into larger workflows -(serving as targets for tests for example, or any other form of automation). - -To address this, the deployment tool outputs a document with (hopefully!) useful -information. Here's a short extract: - -```json -{ - "l1": { - "name": "Ethereum", - "nodes": [ - { - "cl": "http://localhost:53689", - "el": "http://localhost:53620" - } - ] - }, - "l2": [ - { - "name": "op-kurtosis-1", - "id": "2151908", - "services": { - "batcher": "http://localhost:57259" - }, - "nodes": [ - { - "cl": "http://localhost:57029", - "el": "http://localhost:56781" - } - ], - "addresses": { - "addressManager": "0x1b89c03f2d8041b2ba16b5128e613d9279195d1a", - ... - } - }, - ... - ], - "wallets": { - "baseFeeVaultRecipient": { - "address": "0xF435e3ba80545679CfC24E5766d7B02F0CCB5938", - "private_key": "0xc661dd5d4b091676d1a5f2b5110f9a13cb8682140587bd756e357286a98d2c26" - }, - ... - } -} -``` - -## further interactions - -Beyond deployment, we can interact with enclaves normally. - -In particular, cleaning up a devnet can be achieved using -`kurtosis rm FOO-devnet` and the likes. - -## Troubleshooting - -### Autofix mode - -Autofix mode helps recover from failed devnet deployments by automatically -cleaning up the environment. It has two modes: - -1. **Normal Mode** (`AUTOFIX=true`) - - Sets up the correct shell and updates dependencies - - Cleans up dangling networks and stopped devnets - - Preserves other running enclaves - - Good for fixing minor deployment issues - -2. **Nuke Mode** (`AUTOFIX=nuke`) - - Sets up the correct shell and updates dependencies - - Completely resets the Kurtosis environment - - Removes all networks and containers - - Use when you need a fresh start - -Usage: -```bash -# For normal cleanup -AUTOFIX=true just interop-devnet - -# For complete reset -AUTOFIX=nuke just interop-devnet -``` - -Note: Nuke mode will stop all running enclaves, so use it carefully. - -### Older kurtosis versions - -In some cases, a newer kurtosis client might not be able to handle an -older kurtosis engine. This typically happens if the kurtosis -command-line managed by mise gets updated while some enclaves are -already running. - -To help recover, you can either run with `AUTOFIX=nuke` or kill the -old engine with: - -```shell -docker rm -f $(docker ps -aqf "name=kurtosis-*") -``` - -Potentially you'll also need to cleanup dangling docker networks: - -```shell -docker network rm -f $(docker network ls -qf "name=kt-*") -``` diff --git a/kurtosis-devnet/book/.gitignore b/kurtosis-devnet/book/.gitignore deleted file mode 100644 index 7585238efedfc..0000000000000 --- a/kurtosis-devnet/book/.gitignore +++ /dev/null @@ -1 +0,0 @@ -book diff --git a/kurtosis-devnet/book/book.toml b/kurtosis-devnet/book/book.toml deleted file mode 100644 index ac835adb54a1c..0000000000000 --- a/kurtosis-devnet/book/book.toml +++ /dev/null @@ -1,13 +0,0 @@ -[book] -authors = ["Optimism Contributors"] -language = "en" -multilingual = false -src = "src" -title = "Kurtosis Devnet Book" - -[output.html] -site-url = "/kurtosis-devnet/" -git-repository-url = "https://github.com/ethereum-optimism/optimism/tree/develop/kurtosis-devnet/book" -edit-url-template = "https://github.com/ethereum-optimism/optimism/tree/develop/kurtosis-devnet/book/{path}" -additional-css = ["custom.css", "theme/css/footer.css"] -additional-js = ["theme/js/footer.js"] diff --git a/kurtosis-devnet/book/custom.css b/kurtosis-devnet/book/custom.css deleted file mode 100644 index 7c94143752af4..0000000000000 --- a/kurtosis-devnet/book/custom.css +++ /dev/null @@ -1,5 +0,0 @@ -.content main { - max-width: 85%; - margin-left: auto; - margin-right: auto; -} diff --git a/kurtosis-devnet/book/src/README.md b/kurtosis-devnet/book/src/README.md deleted file mode 100644 index dfc864106293a..0000000000000 --- a/kurtosis-devnet/book/src/README.md +++ /dev/null @@ -1,26 +0,0 @@ -> ⚠️ **UNDER HEAVY DEVELOPMENT** ⚠️ -> -> This documentation is actively being developed and may change frequently. - -# Introduction - -Kurtosis Devnet is a development and testing environment for Optimism devnets, providing a local development setup for testing and validating L2 functionality. -This environment is built around Kurtosis, adding convenience features for dev-oriented features. - -## Getting Started - -To use Kurtosis Devnet, you'll need: -1. A Docker daemon (Docker Desktop or any drop-in replacement) -2. Kurtosis installed on your system - -Detailed setup instructions and usage examples can be found in the following chapters of this documentation. - -## Use Cases - -The Kurtosis DevNet is particularly useful for: -- Protocol developers working on Optimism -- Smart contract developers testing L2 functionality -- Engineers validating cross-chain interactions - -This documentation will guide you through setting up, using, and extending Kurtosis Devnet for your development needs. - diff --git a/kurtosis-devnet/book/src/SUMMARY.md b/kurtosis-devnet/book/src/SUMMARY.md deleted file mode 100644 index 8d9c720ff5607..0000000000000 --- a/kurtosis-devnet/book/src/SUMMARY.md +++ /dev/null @@ -1,12 +0,0 @@ -# Summary - -[Introduction](README.md) - -# Getting started - -- [Basic deployment](./basic_deployment.md) - -# Architecture - -- [Local Artifacts](./local_artifacts.md) -- [Standardized Output](./std_output.md) \ No newline at end of file diff --git a/kurtosis-devnet/book/src/basic_deployment.md b/kurtosis-devnet/book/src/basic_deployment.md deleted file mode 100644 index 6a3ad2e064f63..0000000000000 --- a/kurtosis-devnet/book/src/basic_deployment.md +++ /dev/null @@ -1,233 +0,0 @@ -# Basic Deployment - -The Kurtosis devnet provides several pre-configured devnet templates and convenient commands to deploy and interact with them. - -## Built-in Devnets - -The following devnet templates are available out of the box: - -1. **Simple Devnet** (`simple.yaml`) - - Basic single-chain setup - - Ideal for local development and testing - - Deploy with: `just simple-devnet` - -2. **Interop Devnet** (`interop.yaml`) - - Designed for interop testing - - Includes test suite for cross-chain interactions - - Deploy with: `just interop-devnet` - - Run tests with: `just interop-devnet-test` - -3. **Pectra Devnet** (`pectra.yaml`) - - Specialized configuration for Pectra testing - - Deploy with: `just pectra-devnet` - -## User-Defined Devnets (Experimental) - -> **Note**: User-defined devnets are an experimental feature and not actively supported at this time. Use at your own risk. - -The user devnet template (`user.yaml`) allows for customizable devnet configurations through a JSON input file. This feature is designed to simplify devnet creation for future devnet-as-a-service scenarios. - -### Deployment -```bash -just user-devnet -``` - -### Example Configuration -Here's an example of a user devnet configuration file: - -```json -{ - "interop": true, - "l2s": { - "2151908": { - "nodes": ["op-geth", "op-geth"] - }, - "2151909": { - "nodes": ["op-reth"] - } - }, - "overrides": { - "flags": { - "log_level": "--log.level=debug" - } - } -} -``` - -This configuration: -- Enables interop testing features -- Defines two L2 chains: - - Chain `2151908` with two `op-geth` nodes - - Chain `2151909` with one `op-reth` node -- Sets custom logging level for all nodes - -## Deployment Commands - -Arbitrary devnets can be deployed using the general `devnet` command with the following syntax: -```bash -just devnet [data-file] [name] -``` - -Where: -- `template-file`: The YAML template to use (e.g., `simple.yaml`) -- `data-file`: Optional JSON file with configuration data -- `name`: Optional custom name for the devnet (defaults to template name) - -For example: -```bash -# Deploy simple devnet with default name -just devnet simple.yaml - -# Deploy user devnet with custom data and name -just devnet user.yaml my-config.json my-custom-devnet -``` - -This can be convenient when experimenting with devnet definitions - -## Entering a Devnet Shell - -The devnet provides a powerful feature to "enter" a devnet environment, which sets up the necessary environment variables for interacting with the chains. - -### Basic Usage -```bash -just enter-devnet [chain-name] -``` - -Where: -- `devnet-name`: The name of your deployed devnet -- `chain-name`: Optional chain to connect to (defaults to "Ethereum") - -Example: -```bash -# Enter the Ethereum chain environment in the simple devnet -just enter-devnet simple-devnet - -# Enter a specific chain environment -just enter-devnet my-devnet l2-chain - -# Use exec to replace the current shell process (recommended) -exec just enter-devnet my-devnet l2-chain -``` - -Note: The enter feature creates a new shell process. To avoid accumulating shell processes, you can use the `exec` command, which replaces the current shell with the new one. This is especially useful in scripts or when you want to maintain a clean process tree. - -### Features of the Devnet Shell - -When you enter a devnet shell, you get: -1. All necessary environment variables set for the chosen chain -2. Integration with tools like `cast` for blockchain interaction -3. Chain-specific configuration and endpoints -4. A new shell session with the devnet context - -The shell inherits your current environment and adds: -- Chain-specific RPC endpoints -- Network identifiers -- Authentication credentials (if any) -- Tool configurations - -To exit the devnet shell, simply type `exit` or press `Ctrl+D`. - -### Environment Variables - -The devnet shell automatically sets up environment variables needed for development and testing: -- `ETH_RPC_URL`: The RPC endpoint for the selected chain -- `ETH_RPC_JWT_SECRET`: JWT secret for authenticated RPC connections (when cast integration is enabled) -- `DEVNET_ENV_URL`: The URL or absolute path to the devnet environment file -- `DEVNET_CHAIN_NAME`: The name of the currently selected chain - -These variables are automatically picked up by tools like `cast`, making it easy to interact with the chain directly from the shell. - -## AUTOFIX Feature - -The devnet includes an AUTOFIX feature that helps recover from failed devnet deployments by automatically cleaning up the environment. It has two modes: - -1. **Normal Mode** (`AUTOFIX=true`) - - Cleans up stopped or empty enclaves - - Removes associated Docker resources (containers, volumes, networks) - - Preserves running enclaves - - Good for fixing minor deployment issues - -2. **Nuke Mode** (`AUTOFIX=nuke`) - - Completely resets the Kurtosis environment - - Removes all enclaves and associated Docker resources - - Use when you need a fresh start - -### How AUTOFIX Works - -AUTOFIX operates by: -1. Checking the status of the enclave (running, stopped, or empty) -2. For stopped or empty enclaves in normal mode: - - Removes the enclave - - Cleans up potential kurtosis Docker resources -3. For nuke mode: - - Removes all enclaves - - Cleans up all potential kurtosis Docker resources - -### Usage - -```bash -# For normal cleanup -AUTOFIX=true just devnet simple.yaml - -# For complete reset -AUTOFIX=nuke just devnet simple.yaml -``` - -Note: Nuke mode will stop all running enclaves, so use it carefully. - -### Troubleshooting - -If you encounter issues with older Kurtosis versions, you can use AUTOFIX to recover: - -```bash -# For normal cleanup -AUTOFIX=true just devnet simple.yaml - -# For complete reset -AUTOFIX=nuke just devnet simple.yaml -``` - -Alternatively, you can manually clean up Docker resources: - -```bash -# Remove old Kurtosis containers -docker rm -f $(docker ps -aqf "name=kurtosis-*") - -# Clean up dangling networks -docker network rm -f $(docker network ls -qf "name=kt-*") -``` - -## Frequently Asked Questions (FAQ) - -### Docker Rate Limiting Issues - -#### Q: I'm getting a 443 error when pulling from ghcr.io. What can I do? - -A: This is typically caused by Docker Hub rate limiting. Here are several solutions: - -1. **Authenticate with GitHub Container Registry**: - ```bash - docker login ghcr.io - ``` - This will give you higher rate limits. - -2. **Adjust Docker Engine Configuration**: - Add these settings to your Docker daemon configuration (`/etc/docker/daemon.json`): - ```json - { - "max-concurrent-downloads": 1, - "max-concurrent-uploads": 1, - "max-download-attempts": 100, - "registry-mirrors": [] - } - ``` - -3. **Restart Docker Engine**: - ```bash - # For systemd-based systems - sudo systemctl restart docker - - # For macOS - osascript -e 'quit app "Docker"' - open -a Docker - ``` diff --git a/kurtosis-devnet/book/src/local_artifacts.md b/kurtosis-devnet/book/src/local_artifacts.md deleted file mode 100644 index 49342518336a8..0000000000000 --- a/kurtosis-devnet/book/src/local_artifacts.md +++ /dev/null @@ -1,114 +0,0 @@ -# Local Artifacts Integration - -The Kurtosis devnet provides powerful templating capabilities that allow you to seamlessly integrate locally built artifacts (Docker images, smart contracts, and prestates) into your devnet configuration. This integration is managed through a combination of Go-based builders and YAML templates. - -## Component Eligibility - -Not all components can be built locally. Only components that are part of the Optimism monorepo can be built using the local artifact system. Here's a breakdown: - -### Buildable Components -Components that can be built locally include: -- `op-node` -- `op-batcher` -- `op-proposer` -- `op-challenger` -- `op-deployer` - -### External Components -Some components are dependencies living outside the monorepo and cannot be built locally: -- `op-geth` -- `op-reth` - -For example, in your configuration: -```yaml -# This will use an external image - cannot be built locally -el_type: op-geth -el_image: "" # Will use the default op-geth image - -# This can be built locally -cl_type: op-node -cl_image: {{ localDockerImage "op-node" }} # Will build from local source -``` - -## Template Functions - -In the `simple.yaml` configuration, you'll notice several custom template functions that enable local artifact integration: - -```yaml -# Example usage in simple.yaml -image: {{ localDockerImage "op-node" }} -l1_artifacts_locator: {{ localContractArtifacts "l1" }} -faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate_mt64 }} -``` - -These template functions map to specific builders in the Go codebase that handle artifact construction. - -## Builder Components - -### 1. Docker Image Builder - -The Docker image builder manages the building and tagging of local Docker images: - -```go -// Usage in YAML: -image: {{ localDockerImage "op-node" }} -``` - -This builder: -- Executes build commands using the `just` task runner -- Caches built images to prevent redundant builds (in particular when we have multiple L2s and/or participants to any L2) - -### 2. Contract Builder - -The contract builder handles the compilation and bundling of smart contracts: - -```yaml -# Usage in YAML: -l1_artifacts_locator: {{ localContractArtifacts "l1" }} -l2_artifacts_locator: {{ localContractArtifacts "l2" }} -``` - -This builder: -- Manages contract compilation through `just` commands -- Caches built contract bundles - -### 3. Prestate Builder - -The prestate builder manages the generation of fault proof prestates: - -```yaml -# Usage in YAML: -faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate_mt64 }} -``` - -This builder: -- Generates prestate data for fault proofs -- Caches built prestates - -## Using Local Artifacts - -To use local artifacts in your devnet: - -1. Ensure your local environment has the necessary build dependencies -2. Reference local artifacts in your YAML configuration using the appropriate template functions -3. The builders will automatically handle building and caching of artifacts - -Example configuration using all types of local artifacts: - -```yaml -optimism_package: - chains: - - participants: - - el_type: op-geth - el_image: "" # Uses default external op-geth image - cl_type: op-node - cl_image: {{ localDockerImage "op-node" }} - op_contract_deployer_params: - image: {{ localDockerImage "op-deployer" }} - l1_artifacts_locator: {{ localContractArtifacts "l1" }} - l2_artifacts_locator: {{ localContractArtifacts "l2" }} - global_deploy_overrides: - faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate_mt64 }} -``` - -This integration system ensures that your devnet can seamlessly use locally built components while maintaining reproducibility and ease of configuration. diff --git a/kurtosis-devnet/book/src/std_output.md b/kurtosis-devnet/book/src/std_output.md deleted file mode 100644 index d62c5acf34abe..0000000000000 --- a/kurtosis-devnet/book/src/std_output.md +++ /dev/null @@ -1,134 +0,0 @@ -# Standard Output Format - -Kurtosis-devnet is tightly integrated with the [Optimism Devnet SDK](/devnet-sdk/). This integration is achieved through a standardized devnet descriptor format that enables powerful testing and automation capabilities. - -## Accessing the Devnet Descriptor - -The devnet descriptor is available in two ways: - -1. **Deployment Output** - - When you run any of the deployment commands (`just devnet ...`), the descriptor is printed to stdout - - The output is a JSON file that fully describes your devnet configuration - - You can capture this output for later use or automation - -2. **Kurtosis Enclave Artifact** - - The descriptor is also stored as a file artifact named "devnet" in the Kurtosis enclave - - This allows other tools and services to discover and interact with your devnet - - The descriptor can be accessed through devnet-sdk using the Kurtosis URL format: `kt:///files/devnet` - -Here's a simplified example of a devnet descriptor: - -```json -{ - "l1": { - "name": "Ethereum", - "nodes": [ - { - "services": { - "cl": { - "name": "cl-1-lighthouse-geth", - "endpoints": { - "http": { - "host": "127.0.0.1", - "port": 8545 - } - } - }, - "el": { - "name": "el-1-geth-lighthouse", - "endpoints": { - "rpc": { - "host": "127.0.0.1", - "port": 8551 - } - } - } - } - } - ], - "addresses": { - "l1CrossDomainMessenger": "0x...", - "l1StandardBridge": "0x...", - "optimismPortal": "0x..." - // ... other contract addresses - }, - "wallets": { - "user-key-0": { - "address": "0x...", - "private_key": "0x..." - } - // ... other wallets - }, - "jwt": "0x..." - }, - "l2": [ - { - "name": "op-kurtosis", - "id": "2151908", - "services": { - "batcher": { - "name": "op-batcher-op-kurtosis", - "endpoints": { - "http": { - "host": "127.0.0.1", - "port": 8547 - } - } - }, - "proposer": { - "name": "op-proposer-op-kurtosis", - "endpoints": { - "http": { - "host": "127.0.0.1", - "port": 8548 - } - } - } - }, - "nodes": [ - { - "services": { - "cl": { - "name": "op-node", - "endpoints": { - "http": { - "host": "127.0.0.1", - "port": 8546 - } - } - }, - "el": { - "name": "op-geth", - "endpoints": { - "rpc": { - "host": "127.0.0.1", - "port": 8549 - } - } - } - } - } - ], - "jwt": "0x..." - } - ] -} -``` - -This standardized output enables seamless integration with the devnet-sdk and other tools in the ecosystem. - -## Devnet SDK Integration - -By leveraging the devnet-sdk integration, your devnets automatically gain access to: - -1. **Test Framework Integration** - - Use your devnet as a System Under Test (SUT) with tests written in the devnet-sdk framework - - Seamless integration with existing test suites - - Standardized approach to devnet interaction in tests - -2. **Test Runner Support** - - Native support for op-nat as a test runner - - Consistent test execution across different devnet configurations - - Automated test setup and teardown - -These capabilities make kurtosis-devnet an ideal platform for both development and testing environments. diff --git a/kurtosis-devnet/book/theme/css/footer.css b/kurtosis-devnet/book/theme/css/footer.css deleted file mode 100644 index cb7be80ab2145..0000000000000 --- a/kurtosis-devnet/book/theme/css/footer.css +++ /dev/null @@ -1,71 +0,0 @@ -.mdbook-footer { - width: 100%; - padding: 4rem 2.5rem; /* Increased padding */ - background-color: var(--bg); - border-top: 1px solid var(--sidebar-bg); - margin-top: 5rem; /* Increased margin */ -} - -.mdbook-footer .footer-container { - max-width: 1200px; - margin: 0 auto; - display: flex; - flex-direction: column; - gap: 2.5rem; /* Increased gap */ - align-items: center; -} - -.mdbook-footer .policy-links { - display: flex; - gap: 4rem; /* Increased gap between links */ - flex-wrap: wrap; - justify-content: center; -} - -.mdbook-footer .policy-links a { - color: var(--fg); - text-decoration: none; - transition: opacity 0.2s; - font-size: 1.35rem; /* Increased font size */ - opacity: 0.85; - font-weight: 400; - line-height: 1.6; /* Increased line height */ -} - -.mdbook-footer .policy-links a:hover { - opacity: 1; - text-decoration: underline; -} - -.mdbook-footer .copyright { - color: var(--fg); - font-size: 1.35rem; /* Increased font size */ - opacity: 0.85; - text-align: center; - font-weight: 400; - line-height: 1.6; /* Increased line height */ -} - -.mdbook-footer .copyright a { - color: var(--fg); - text-decoration: none; -} - -.mdbook-footer .copyright a:hover { - text-decoration: underline; -} - -@media (max-width: 640px) { - .mdbook-footer .policy-links { - gap: 2.5rem; /* Increased gap for mobile */ - } - - .mdbook-footer { - padding: 3rem 2rem; /* Increased padding for mobile */ - } - - .mdbook-footer .policy-links a, - .mdbook-footer .copyright { - font-size: 1.25rem; /* Increased font size for mobile */ - } -} \ No newline at end of file diff --git a/kurtosis-devnet/book/theme/js/footer.js b/kurtosis-devnet/book/theme/js/footer.js deleted file mode 100644 index 014f44f2d6c54..0000000000000 --- a/kurtosis-devnet/book/theme/js/footer.js +++ /dev/null @@ -1,41 +0,0 @@ -// Create footer element -function createFooter() { - const footer = document.createElement('footer'); - footer.className = 'mdbook-footer'; - - const container = document.createElement('div'); - container.className = 'footer-container'; - - // Add legal links - const policyLinks = document.createElement('div'); - policyLinks.className = 'policy-links'; - - const links = [ - { href: 'https://optimism.io/community-agreement', text: 'Community Agreement' }, - { href: 'https://optimism.io/terms', text: 'Terms of Service' }, - { href: 'https://optimism.io/data-privacy-policy', text: 'Privacy Policy' } - ]; - - links.forEach(link => { - const a = document.createElement('a'); - a.href = link.href; - a.textContent = link.text; - policyLinks.appendChild(a); - }); - - // Add copyright notice - const copyright = document.createElement('div'); - copyright.className = 'copyright'; - copyright.innerHTML = `© ${new Date().getFullYear()} Optimism Foundation. All rights reserved.`; - - // Assemble footer - container.appendChild(policyLinks); - container.appendChild(copyright); - footer.appendChild(container); - - // Add footer to page - document.body.appendChild(footer); -} - -// Run after DOM is loaded -document.addEventListener('DOMContentLoaded', createFooter); \ No newline at end of file diff --git a/kurtosis-devnet/cmd/main.go b/kurtosis-devnet/cmd/main.go deleted file mode 100644 index e9a651cb94362..0000000000000 --- a/kurtosis-devnet/cmd/main.go +++ /dev/null @@ -1,246 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "fmt" - "log" - "os" - "path/filepath" - - "github.com/BurntSushi/toml" - "github.com/ethereum-optimism/optimism/devnet-sdk/telemetry" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/deploy" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect" - autofixTypes "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/types" - "github.com/honeycombio/otel-config-go/otelconfig" - "github.com/urfave/cli/v2" -) - -type config struct { - templateFile string - dataFile string - kurtosisPackage string - enclave string - environment string - conductorConfig string - dryRun bool - baseDir string - kurtosisBinary string - autofix string -} - -func newConfig(c *cli.Context) (*config, error) { - cfg := &config{ - templateFile: c.String("template"), - dataFile: c.String("data"), - kurtosisPackage: c.String("kurtosis-package"), - enclave: c.String("enclave"), - environment: c.String("environment"), - conductorConfig: c.String("conductor-config"), - dryRun: c.Bool("dry-run"), - kurtosisBinary: c.String("kurtosis-binary"), - autofix: c.String("autofix"), - } - - // Validate required flags - if cfg.templateFile == "" { - return nil, fmt.Errorf("template file is required") - } - cfg.baseDir = filepath.Dir(cfg.templateFile) - - return cfg, nil -} - -func writeEnvironment(path string, env *kurtosis.KurtosisEnvironment) error { - out := os.Stdout - if path != "" { - var err error - out, err = os.Create(path) - if err != nil { - return fmt.Errorf("error creating environment file: %w", err) - } - defer out.Close() - } - - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - if err := enc.Encode(env); err != nil { - return fmt.Errorf("error encoding environment: %w", err) - } - - return nil -} - -func writeConductorConfig(path string, enclaveName string) error { - if path == "" { - return nil - } - - ctx := context.Background() - conductorConfig, err := inspect.ExtractConductorConfig(ctx, enclaveName) - if err != nil { - log.Printf("Warning: Could not extract conductor config: %v", err) - return nil - } - - if conductorConfig == nil { - log.Println("No conductor services found, skipping conductor config generation") - return nil - } - - out, err := os.Create(path) - if err != nil { - return fmt.Errorf("error creating conductor config file: %w", err) - } - defer out.Close() - - encoder := toml.NewEncoder(out) - if err := encoder.Encode(conductorConfig); err != nil { - return fmt.Errorf("error encoding conductor config as TOML: %w", err) - } - - log.Printf("Conductor configuration saved to: %s", path) - return nil -} - -func printAutofixMessage() { - fmt.Println("Trouble with your devnet? Try Autofix!") - fmt.Println("Set AUTOFIX=true to automatically fix common configuration issues.") - fmt.Println("If that doesn't work, set AUTOFIX=nuke to start fresh with a clean slate.") - fmt.Println() -} - -func printWelcomeMessage() { - fmt.Println("Welcome to Kurtosis Devnet!") - printAutofixMessage() - fmt.Println("Happy hacking!") -} - -func mainAction(c *cli.Context) error { - ctx := c.Context - - ctx, shutdown, err := telemetry.SetupOpenTelemetry( - ctx, - otelconfig.WithServiceName(c.App.Name), - otelconfig.WithServiceVersion(c.App.Version), - ) - if err != nil { - return fmt.Errorf("error setting up OpenTelemetry: %w", err) - } - defer shutdown() - - // Only show welcome message if not showing help or version - if !c.Bool("help") && !c.Bool("version") && c.NArg() == 0 { - printWelcomeMessage() - } - - cfg, err := newConfig(c) - if err != nil { - return fmt.Errorf("error parsing config: %w", err) - } - - autofixMode := autofixTypes.AutofixModeDisabled - if cfg.autofix == "true" { - autofixMode = autofixTypes.AutofixModeNormal - } else if cfg.autofix == "nuke" { - autofixMode = autofixTypes.AutofixModeNuke - } else if os.Getenv("AUTOFIX") == "true" { - autofixMode = autofixTypes.AutofixModeNormal - } else if os.Getenv("AUTOFIX") == "nuke" { - autofixMode = autofixTypes.AutofixModeNuke - } - - deployer, err := deploy.NewDeployer( - deploy.WithKurtosisPackage(cfg.kurtosisPackage), - deploy.WithEnclave(cfg.enclave), - deploy.WithDryRun(cfg.dryRun), - deploy.WithKurtosisBinary(cfg.kurtosisBinary), - deploy.WithTemplateFile(cfg.templateFile), - deploy.WithDataFile(cfg.dataFile), - deploy.WithBaseDir(cfg.baseDir), - deploy.WithAutofixMode(autofixMode), - ) - if err != nil { - return fmt.Errorf("error creating deployer: %w", err) - } - - env, err := deployer.Deploy(ctx, nil) - if err != nil { - if autofixMode == autofixTypes.AutofixModeDisabled { - printAutofixMessage() - } - return fmt.Errorf("error deploying environment: %w", err) - } - - // Write environment JSON file - if err := writeEnvironment(cfg.environment, env); err != nil { - return fmt.Errorf("error writing environment file: %w", err) - } - - // Write conductor configuration TOML file - if err := writeConductorConfig(cfg.conductorConfig, cfg.enclave); err != nil { - return fmt.Errorf("error writing conductor config file: %w", err) - } - - return nil -} - -func getFlags() []cli.Flag { - return []cli.Flag{ - &cli.StringFlag{ - Name: "template", - Usage: "Path to the template file (required)", - Required: true, - }, - &cli.StringFlag{ - Name: "data", - Usage: "Path to JSON data file (optional)", - }, - &cli.StringFlag{ - Name: "kurtosis-package", - Usage: "Kurtosis package to deploy (optional)", - Value: kurtosis.DefaultPackageName, - }, - &cli.StringFlag{ - Name: "enclave", - Usage: "Enclave name (optional)", - Value: kurtosis.DefaultEnclave, - }, - &cli.StringFlag{ - Name: "environment", - Usage: "Path to JSON environment file output (optional)", - }, - &cli.StringFlag{ - Name: "conductor-config", - Usage: "Path to TOML conductor configuration file output (optional)", - }, - &cli.BoolFlag{ - Name: "dry-run", - Usage: "Dry run mode (optional)", - }, - &cli.StringFlag{ - Name: "kurtosis-binary", - Usage: "Path to kurtosis binary (optional)", - Value: "kurtosis", - }, - &cli.StringFlag{ - Name: "autofix", - Usage: "Autofix mode (optional, values: true, nuke)", - }, - } -} - -func main() { - app := &cli.App{ - Name: "kurtosis-devnet", - Usage: "Deploy and manage Optimism devnet using Kurtosis", - Flags: getFlags(), - Action: mainAction, - } - - if err := app.Run(os.Args); err != nil { - log.Fatalf("Error: %v\n", err) - } -} diff --git a/kurtosis-devnet/cmd/main_test.go b/kurtosis-devnet/cmd/main_test.go deleted file mode 100644 index 6ea2d8a0be7c3..0000000000000 --- a/kurtosis-devnet/cmd/main_test.go +++ /dev/null @@ -1,293 +0,0 @@ -package main - -import ( - "os" - "path/filepath" - "testing" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis" - autofixTypes "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/types" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/urfave/cli/v2" -) - -func TestParseFlags(t *testing.T) { - tests := []struct { - name string - args []string - wantCfg *config - wantError bool - }{ - { - name: "valid configuration", - args: []string{ - "--template", "path/to/template.yaml", - "--enclave", "test-enclave", - }, - wantCfg: &config{ - templateFile: "path/to/template.yaml", - enclave: "test-enclave", - kurtosisPackage: kurtosis.DefaultPackageName, - }, - wantError: false, - }, - { - name: "missing required template", - args: []string{"--enclave", "test-enclave"}, - wantCfg: nil, - wantError: true, - }, - { - name: "with data file", - args: []string{ - "--template", "path/to/template.yaml", - "--data", "path/to/data.json", - }, - wantCfg: &config{ - templateFile: "path/to/template.yaml", - dataFile: "path/to/data.json", - enclave: kurtosis.DefaultEnclave, - kurtosisPackage: kurtosis.DefaultPackageName, - }, - wantError: false, - }, - { - name: "with autofix true", - args: []string{ - "--template", "path/to/template.yaml", - "--autofix", "true", - }, - wantCfg: &config{ - templateFile: "path/to/template.yaml", - enclave: kurtosis.DefaultEnclave, - kurtosisPackage: kurtosis.DefaultPackageName, - autofix: "true", - }, - wantError: false, - }, - { - name: "with autofix nuke", - args: []string{ - "--template", "path/to/template.yaml", - "--autofix", "nuke", - }, - wantCfg: &config{ - templateFile: "path/to/template.yaml", - enclave: kurtosis.DefaultEnclave, - kurtosisPackage: kurtosis.DefaultPackageName, - autofix: "nuke", - }, - wantError: false, - }, - { - name: "with invalid autofix value", - args: []string{ - "--template", "path/to/template.yaml", - "--autofix", "invalid", - }, - wantCfg: &config{ - templateFile: "path/to/template.yaml", - enclave: kurtosis.DefaultEnclave, - kurtosisPackage: kurtosis.DefaultPackageName, - autofix: "invalid", - }, - wantError: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - var cfg *config - app := &cli.App{ - Flags: getFlags(), - Action: func(c *cli.Context) (err error) { - cfg, err = newConfig(c) - return - }, - } - - // Prepend program name to args as urfave/cli expects - args := append([]string{"prog"}, tt.args...) - - err := app.Run(args) - if tt.wantError { - assert.Error(t, err) - return - } - - require.NoError(t, err) - require.NotNil(t, cfg) - assert.Equal(t, tt.wantCfg.templateFile, cfg.templateFile) - assert.Equal(t, tt.wantCfg.enclave, cfg.enclave) - assert.Equal(t, tt.wantCfg.kurtosisPackage, cfg.kurtosisPackage) - if tt.wantCfg.dataFile != "" { - assert.Equal(t, tt.wantCfg.dataFile, cfg.dataFile) - } - if tt.wantCfg.autofix != "" { - assert.Equal(t, tt.wantCfg.autofix, cfg.autofix) - } - }) - } -} - -func TestMainFuncValidatesConfig(t *testing.T) { - // Create a temporary directory for test files - tmpDir, err := os.MkdirTemp("", "main-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create test template - templatePath := filepath.Join(tmpDir, "template.yaml") - err = os.WriteFile(templatePath, []byte("name: test"), 0644) - require.NoError(t, err) - - // Create environment output path - envPath := filepath.Join(tmpDir, "env.json") - - app := &cli.App{ - Flags: getFlags(), - Action: func(c *cli.Context) error { - cfg, err := newConfig(c) - if err != nil { - return err - } - - // Verify config values - assert.Equal(t, templatePath, cfg.templateFile) - assert.Equal(t, envPath, cfg.environment) - assert.True(t, cfg.dryRun) - - // Create an empty environment file to simulate successful deployment - return os.WriteFile(envPath, []byte("{}"), 0644) - }, - } - - args := []string{ - "prog", - "--template", templatePath, - "--environment", envPath, - "--dry-run", - } - - err = app.Run(args) - require.NoError(t, err) - - // Verify the environment file was created - assert.FileExists(t, envPath) -} - -func TestAutofixModes(t *testing.T) { - tests := []struct { - name string - autofixEnv string - autofixFlag string - expectedMode autofixTypes.AutofixMode - }{ - { - name: "autofix disabled", - autofixEnv: "", - autofixFlag: "", - expectedMode: autofixTypes.AutofixModeDisabled, - }, - { - name: "autofix normal mode via env", - autofixEnv: "true", - autofixFlag: "", - expectedMode: autofixTypes.AutofixModeNormal, - }, - { - name: "autofix nuke mode via env", - autofixEnv: "nuke", - autofixFlag: "", - expectedMode: autofixTypes.AutofixModeNuke, - }, - { - name: "autofix normal mode via flag", - autofixEnv: "", - autofixFlag: "true", - expectedMode: autofixTypes.AutofixModeNormal, - }, - { - name: "autofix nuke mode via flag", - autofixEnv: "", - autofixFlag: "nuke", - expectedMode: autofixTypes.AutofixModeNuke, - }, - { - name: "flag takes precedence over env", - autofixEnv: "true", - autofixFlag: "nuke", - expectedMode: autofixTypes.AutofixModeNuke, - }, - { - name: "invalid autofix value", - autofixEnv: "invalid", - autofixFlag: "", - expectedMode: autofixTypes.AutofixModeDisabled, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a temporary directory for test files - tmpDir, err := os.MkdirTemp("", "autofix-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create test template - templatePath := filepath.Join(tmpDir, "template.yaml") - err = os.WriteFile(templatePath, []byte("name: test"), 0644) - require.NoError(t, err) - - // Create environment output path - envPath := filepath.Join(tmpDir, "env.json") - - // Set up test environment - if tt.autofixEnv != "" { - t.Setenv("AUTOFIX", tt.autofixEnv) - } - - app := &cli.App{ - Flags: getFlags(), - Action: func(c *cli.Context) error { - cfg, err := newConfig(c) - if err != nil { - return err - } - - // Verify autofix mode - autofixMode := autofixTypes.AutofixModeDisabled - if cfg.autofix == "true" { - autofixMode = autofixTypes.AutofixModeNormal - } else if cfg.autofix == "nuke" { - autofixMode = autofixTypes.AutofixModeNuke - } else if os.Getenv("AUTOFIX") == "true" { - autofixMode = autofixTypes.AutofixModeNormal - } else if os.Getenv("AUTOFIX") == "nuke" { - autofixMode = autofixTypes.AutofixModeNuke - } - assert.Equal(t, tt.expectedMode, autofixMode) - - // Create an empty environment file to simulate successful deployment - return os.WriteFile(envPath, []byte("{}"), 0644) - }, - } - - args := []string{ - "prog", - "--template", templatePath, - "--environment", envPath, - } - if tt.autofixFlag != "" { - args = append(args, "--autofix", tt.autofixFlag) - } - - err = app.Run(args) - require.NoError(t, err) - - // Verify the environment file was created - assert.FileExists(t, envPath) - }) - } -} diff --git a/kurtosis-devnet/fileserver/kurtosis.yml b/kurtosis-devnet/fileserver/kurtosis.yml deleted file mode 100644 index 2f1eb17b25e8d..0000000000000 --- a/kurtosis-devnet/fileserver/kurtosis.yml +++ /dev/null @@ -1,4 +0,0 @@ -name: github.com/ethereum-optimism/optimism/kurtosis-devnet/fileserver -description: |- - Kurtosis package for serving files from the build directory -replace: {} diff --git a/kurtosis-devnet/fileserver/main.star b/kurtosis-devnet/fileserver/main.star deleted file mode 100644 index 94b5dff887dde..0000000000000 --- a/kurtosis-devnet/fileserver/main.star +++ /dev/null @@ -1,53 +0,0 @@ -FILESERVER_HTTP_PORT_ID = "http" -FILESERVER_HTTP_PORT_NUM = 80 -FILESERVER_IMAGE = "nginx:latest" - - -def get_used_ports(): - used_ports = { - FILESERVER_HTTP_PORT_ID: PortSpec( - number=FILESERVER_HTTP_PORT_NUM, - ) - } - return used_ports - - -def run(plan, source_path, server_image=FILESERVER_IMAGE): - service_name = "fileserver" - config = get_fileserver_config( - plan = plan, - service_name = service_name, - source_path = source_path, - server_image = server_image, - ) - plan.add_service( - name = service_name, - config = config, - ) - return service_name - - -def get_fileserver_config(plan, service_name, source_path, server_image): - files = {} - - # Upload content to container - content_artifact = plan.upload_files( - src=source_path, - name="{}-content".format(service_name), - ) - files["/content"] = content_artifact - - # Add nginx config file - nginx_conf = plan.upload_files( - src="static_files/nginx", - name="{}-nginx-conf".format(service_name), - ) - files["/etc/nginx/conf.d"] = nginx_conf - - ports = get_used_ports() - return ServiceConfig( - image=server_image, - ports=ports, - cmd=["nginx", "-g", "daemon off;"], - files=files, - ) diff --git a/kurtosis-devnet/fileserver/static_files/nginx/default.conf b/kurtosis-devnet/fileserver/static_files/nginx/default.conf deleted file mode 100644 index 69932eb89d887..0000000000000 --- a/kurtosis-devnet/fileserver/static_files/nginx/default.conf +++ /dev/null @@ -1,8 +0,0 @@ -server { - listen 80; - server_name _; - root /content; - location / { - try_files $uri $uri/ =404; - } -} diff --git a/kurtosis-devnet/flash.yaml b/kurtosis-devnet/flash.yaml deleted file mode 100644 index b34eea0df02e1..0000000000000 --- a/kurtosis-devnet/flash.yaml +++ /dev/null @@ -1,110 +0,0 @@ -optimism_package: - faucet: - enabled: true - image: {{ localDockerImage "op-faucet" }} - chains: - op-kurtosis: - participants: - node0: - sequencer: true - el: - type: op-geth - el_builder: - type: op-rbuilder - cl_builder: - type: op-node - image: {{ localDockerImage "op-node" }} - mev_params: - enabled: true - cl: - type: op-node - image: {{ localDockerImage "op-node" }} - conductor_params: - image: {{ localDockerImage "op-conductor" }} - enabled: true - bootstrap: true - paused: true - admin: true - proxy: true - websocket_enabled: true - - node1: - sequencer: true - el: - type: op-reth - el_builder: - type: op-rbuilder - cl_builder: - type: op-node - image: {{ localDockerImage "op-node" }} - mev_params: - enabled: true - cl: - type: op-node - image: {{ localDockerImage "op-node" }} - conductor_params: - image: {{ localDockerImage "op-conductor" }} - enabled: true - paused: true - admin: true - proxy: true - websocket_enabled: true - - proxyd_params: - pprof_enabled: false - extra_params: [] - network_params: - network: "kurtosis" - network_id: "2151908" - seconds_per_slot: 2 - fjord_time_offset: 0 - granite_time_offset: 0 - holocene_time_offset: 0 - fund_dev_accounts: true - - flashblocks_websocket_proxy_params: - enabled: true - flashblocks_rpc_params: - type: op-reth - batcher_params: - image: {{ localDockerImage "op-batcher" }} - extra_params: [] - proposer_params: - image: {{ localDockerImage "op-proposer" }} - extra_params: [] - game_type: 1 - proposal_interval: 10m - challengers: - challenger: - enabled: true - image: {{ localDockerImage "op-challenger" }} - participants: "*" - cannon_prestates_url: {{ localPrestate.URL }} - cannon_trace_types: ["cannon", "permissioned"] - op_contract_deployer_params: - image: {{ localDockerImage "op-deployer" }} - l1_artifacts_locator: {{ localContractArtifacts "l1" }} - l2_artifacts_locator: {{ localContractArtifacts "l2" }} - overrides: - faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate_mt64 }} - global_log_level: "info" - global_node_selectors: {} - global_tolerations: [] - persistent: false -ethereum_package: - participants: - - el_type: geth - cl_type: teku - cl_image: consensys/teku:25.7.1 - network_params: - preset: minimal - genesis_delay: 5 - additional_preloaded_contracts: | - { - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - } diff --git a/kurtosis-devnet/foo-user.example.json b/kurtosis-devnet/foo-user.example.json deleted file mode 100644 index 7f7f5336d79ad..0000000000000 --- a/kurtosis-devnet/foo-user.example.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "interop": true, - "l2s": { - "2151908": { - "nodes": ["op-geth", "op-geth"] - }, - "2151909": { - "nodes": ["op-reth"] - } - }, - "overrides": { - "flags": { - "log_level": "--log.level=debug" - } - } -} \ No newline at end of file diff --git a/kurtosis-devnet/interop.yaml b/kurtosis-devnet/interop.yaml deleted file mode 100644 index 2295c963c6bb8..0000000000000 --- a/kurtosis-devnet/interop.yaml +++ /dev/null @@ -1,174 +0,0 @@ -{{- $local_images := dict - "op_node" (localDockerImage "op-node") - "op_batcher" (localDockerImage "op-batcher") - "op_challenger" (localDockerImage "op-challenger") - "op_conductor" (localDockerImage "op-conductor") - "op_proposer" (localDockerImage "op-proposer") - "op_deployer" (localDockerImage "op-deployer") - "op_supervisor" (localDockerImage "op-supervisor") - "op_faucet" (localDockerImage "op-faucet") - "op_interop_mon" (localDockerImage "op-interop-mon") --}} -{{- $urls := dict - "prestate" (localPrestate.URL) - "l1_artifacts" (localContractArtifacts "l1") - "l2_artifacts" (localContractArtifacts "l2") --}} -{{- $flags := dict - "log_level" "--log.level=info" - "log_format" "--log.format=logfmtms" - "interop_mempool_filtering" "--rollup.interopmempoolfiltering" - "experimental_sequencer_api" "--experimental.sequencer-api" --}} ---- -optimism_package: - faucet: - enabled: true - image: {{ $local_images.op_faucet }} - interop_mon: - enabled: true - image: {{ $local_images.op_interop_mon }} - superchains: - superchain: - enabled: true - supervisors: - supervisor: - superchain: superchain - image: {{ $local_images.op_supervisor }} - extra_params: - - {{ $flags.log_level }} - - {{ $flags.log_format }} - test-sequencers: - sequencer: - enabled: true - chains: - op-kurtosis1: - participants: - node0: &x-node - el: - type: op-geth - image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101602.1-rc.1" - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: - - {{ $flags.interop_mempool_filtering }} - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 - cl: - type: op-node - image: {{ $local_images.op_node }} - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: - - {{ $flags.log_format }} - - {{ $flags.experimental_sequencer_api }} - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 - mev_params: - image: "" - builder_host: "" - builder_port: "" - network_params: - network: "kurtosis" - network_id: "2151908" - seconds_per_slot: 2 - fjord_time_offset: 0 - granite_time_offset: 0 - holocene_time_offset: 0 - isthmus_time_offset: 0 - jovian_time_offset: 0 - interop_time_offset: 0 - fund_dev_accounts: true - batcher_params: - image: {{ $local_images.op_batcher }} - extra_params: - - {{ $flags.log_level }} - - {{ $flags.log_format }} - proposer_params: - image: {{ $local_images.op_proposer }} - extra_params: - - {{ $flags.log_level }} - - {{ $flags.log_format }} - game_type: 1 - proposal_interval: 10m - op-kurtosis2: - participants: - node0: *x-node - network_params: - network: "kurtosis" - network_id: "2151909" - seconds_per_slot: 2 - fjord_time_offset: 0 - granite_time_offset: 0 - holocene_time_offset: 0 - isthmus_time_offset: 0 - jovian_time_offset: 0 - interop_time_offset: 0 - fund_dev_accounts: true - batcher_params: - image: {{ $local_images.op_batcher }} - extra_params: - - {{ $flags.log_level }} - - {{ $flags.log_format }} - proposer_params: - image: {{ $local_images.op_proposer }} - extra_params: - - {{ $flags.log_level }} - - {{ $flags.log_format }} - game_type: 1 - proposal_interval: 10m - challengers: - challenger: - enabled: true - image: {{ $local_images.op_challenger }} - participants: "*" - cannon_prestates_url: {{ localPrestate.URL }} - cannon_trace_types: ["super-cannon", "super-permissioned"] - extra_params: - - {{ $flags.log_level }} - - {{ $flags.log_format }} - op_contract_deployer_params: - image: {{ $local_images.op_deployer }} - l1_artifacts_locator: {{ $urls.l1_artifacts }} - l2_artifacts_locator: {{ $urls.l2_artifacts }} - overrides: - faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate_interop }} - global_log_level: "info" - global_node_selectors: {} - global_tolerations: [] - persistent: false - observability: - grafana_params: - dashboard_sources: - - github.com/ethereum-optimism/grafana-dashboards-public/resources - - github.com/op-rs/kona/docker/recipes/kona-node/grafana - - github.com/paradigmxyz/reth/etc/grafana - - github.com/geoknee/grafana-dashboards/ - - github.com/nonsense/op-stack-grafana-dashboards/resources -ethereum_package: - participants: - - el_type: geth - cl_type: teku - cl_image: consensys/teku:25.7.1 - network_params: - preset: minimal - genesis_delay: 5 - additional_preloaded_contracts: | - { - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - } diff --git a/kurtosis-devnet/jovian.yaml b/kurtosis-devnet/jovian.yaml deleted file mode 100644 index a0ab9bcb1b9c8..0000000000000 --- a/kurtosis-devnet/jovian.yaml +++ /dev/null @@ -1,86 +0,0 @@ -optimism_package: - faucet: - enabled: true - image: {{ localDockerImage "op-faucet" }} - chains: - op-kurtosis: - participants: - node0: - el: - type: op-geth - image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:v1.101602.1-rc.1" - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: [] - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 - cl: &x-node-cl - type: op-node - image: {{ localDockerImage "op-node" }} - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: [] - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 - network_params: - network: "kurtosis" - network_id: "2151908" - seconds_per_slot: 2 - fjord_time_offset: 0 - granite_time_offset: 0 - holocene_time_offset: 0 - isthmus_time_offset: 0 - jovian_time_offset: 0 - fund_dev_accounts: true - batcher_params: - image: {{ localDockerImage "op-batcher" }} - extra_params: [] - proposer_params: - image: {{ localDockerImage "op-proposer" }} - extra_params: [] - game_type: 1 - proposal_interval: 10m - challengers: - challenger: - enabled: true - image: {{ localDockerImage "op-challenger" }} - participants: "*" - cannon_prestates_url: {{ localPrestate.URL }} - cannon_trace_types: ["cannon", "permissioned"] - op_contract_deployer_params: - image: {{ localDockerImage "op-deployer" }} - l1_artifacts_locator: {{ localContractArtifacts "l1" }} - l2_artifacts_locator: {{ localContractArtifacts "l2" }} - overrides: - faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate_mt64 }} - global_log_level: "info" - global_node_selectors: {} - global_tolerations: [] - persistent: false -ethereum_package: - participants: - - el_type: geth - cl_type: teku - cl_image: consensys/teku:25.7.1 - network_params: - preset: minimal - genesis_delay: 5 - additional_preloaded_contracts: | - { - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - } diff --git a/kurtosis-devnet/justfile b/kurtosis-devnet/justfile deleted file mode 100644 index 9ba597891a525..0000000000000 --- a/kurtosis-devnet/justfile +++ /dev/null @@ -1,101 +0,0 @@ -import '../justfiles/prerequisites.just' - -KURTOSIS_PACKAGE := "./optimism-package-trampoline/" - -test: _prerequisites - go test --tags=testonly ./... - -_kurtosis-run PACKAGE_NAME ARG_FILE ENCLAVE: - kurtosis run {{PACKAGE_NAME}} --args-file {{ARG_FILE}} --enclave {{ENCLAVE}} --show-enclave-inspect=false --image-download=missing - -_prestate-build PATH='.': - docker buildx build --output {{PATH}} --progress plain -f ../op-program/Dockerfile.repro ../ - -_docker_build TAG TARGET CONTEXT DOCKERFILE *ARGS: _prerequisites - #!/usr/bin/env bash - # --load is needed to ensure the image ends up in the local registry - # --provenance=false is needed to make the build idempotent - docker buildx build \ - --load \ - --provenance=false \ - -t {{TAG}} \ - -f {{CONTEXT}}/{{DOCKERFILE}} \ - {{ if TARGET != '' { "--target " + TARGET } else { "" } }} \ - --build-arg GIT_COMMIT={git_commit} \ - --build-arg GIT_DATE={git_date} \ - {{ ARGS }} \ - {{CONTEXT}} - -_docker_build_stack TAG TARGET *ARGS: (_docker_build TAG TARGET "../" "ops/docker/op-stack-go/Dockerfile" ARGS) - -cannon-image TAG='cannon:devnet': (_docker_build_stack TAG "cannon-target") -da-server-image TAG='da-server:devnet': (_docker_build_stack TAG "da-server-target") -op-batcher-image TAG='op-batcher:devnet': (_docker_build_stack TAG "op-batcher-target") -op-challenger-image TAG='op-challenger:devnet': (_docker_build_stack TAG "op-challenger-target") -op-conductor-image TAG='op-conductor:devnet': (_docker_build_stack TAG "op-conductor-target") -op-deployer-image TAG='op-deployer:devnet': (_docker_build_stack TAG "op-deployer-target") -op-dispute-mon-image TAG='op-dispute-mon:devnet': (_docker_build_stack TAG "op-dispute-mon-target") -op-node-image TAG='op-node:devnet': (_docker_build_stack TAG "op-node-target") -op-program-image TAG='op-program:devnet': (_docker_build_stack TAG "op-program-target") -op-proposer-image TAG='op-proposer:devnet': (_docker_build_stack TAG "op-proposer-target") -op-supervisor-image TAG='op-supervisor:devnet': (_docker_build_stack TAG "op-supervisor-target") -op-wheel-image TAG='op-wheel:devnet': (_docker_build_stack TAG "op-wheel-target") -op-faucet-image TAG='op-faucet:devnet': (_docker_build_stack TAG "op-faucet-target") -op-interop-mon-image TAG='op-interop-mon:devnet': (_docker_build_stack TAG "op-interop-mon-target") - -op-program-builder-image TAG='op-program-builder:devnet': _prerequisites - just op-program-svc/op-program-svc {{TAG}} - - -# Devnet template recipe -devnet TEMPLATE_FILE DATA_FILE="" NAME="" PACKAGE=KURTOSIS_PACKAGE: _prerequisites - #!/usr/bin/env bash - export DEVNET_NAME={{NAME}} - if [ -z "{{NAME}}" ]; then - export DEVNET_NAME=`basename {{TEMPLATE_FILE}} .yaml` - if [ -n "{{DATA_FILE}}" ]; then - export DATA_FILE_NAME=`basename {{DATA_FILE}} .json` - export DEVNET_NAME="$DEVNET_NAME-$DATA_FILE_NAME" - fi - fi - export ENCL_NAME="$DEVNET_NAME"-devnet - export CONDUCTOR_CONFIG="tests/op-conductor-ops-$ENCL_NAME.toml" - go run cmd/main.go -kurtosis-package {{PACKAGE}} \ - -environment "tests/$ENCL_NAME.json" \ - -conductor-config "$CONDUCTOR_CONFIG" \ - -template "{{TEMPLATE_FILE}}" \ - -data "{{DATA_FILE}}" \ - -enclave "$ENCL_NAME" \ - && cat "tests/$ENCL_NAME.json" && if [ -f "$CONDUCTOR_CONFIG" ]; then cat "$CONDUCTOR_CONFIG"; fi - -devnet-test DEVNET *TEST: _prerequisites - #!/usr/bin/env bash - export TESTS=({{TEST}}) - # we need a timestamp in there to force kurtosis to not cache the test solely based on its name! - export ARGS=$(printf '%s\n' "${TESTS[@]}" | jq -R . | jq -s . | jq -s '{devnet: "{{DEVNET}}", timestamp: "{{datetime("%s")}}", tests: add}') - kurtosis run --enclave {{DEVNET}} \ - --show-enclave-inspect=false \ - ./tests/ "$ARGS" - -# Devnet recipes - -# Simple devnet -simple-devnet: (devnet "simple.yaml") - -# Interop devnet -interop-devnet: (devnet "interop.yaml") -interop-devnet-test: (devnet-test "interop-devnet" "interop-smoke-test.sh") - -# User devnet -user-devnet DATA_FILE: - {{just_executable()}} devnet "user.yaml" {{DATA_FILE}} {{file_stem(DATA_FILE)}} - -# Jovian devnet -jovian-devnet: (devnet "jovian.yaml") - -# Flashblocks devnet -flash-devnet: (devnet "flash.yaml") - -# subshells -enter-devnet DEVNET CHAIN='Ethereum' NODE_INDEX='0': _prerequisites - go run ../devnet-sdk/shell/cmd/enter/main.go --devnet kt://{{DEVNET}} --chain {{CHAIN}} --node-index {{NODE_INDEX}} diff --git a/kurtosis-devnet/op-program-svc/Dockerfile b/kurtosis-devnet/op-program-svc/Dockerfile deleted file mode 100644 index 3f11dbb3c113d..0000000000000 --- a/kurtosis-devnet/op-program-svc/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -ARG BASE_IMAGE=op-program-base:latest - -FROM golang:1.24.10-alpine3.20 AS builder - -COPY ./*.go /app/ -WORKDIR /app - -RUN go mod init op-program-svc -RUN go build -o op-program-svc . - - -FROM ${BASE_IMAGE} AS svc - -ARG GIT_COMMIT -ARG GIT_DATE - -ARG CANNON_VERSION=v0.0.0 -ARG OP_PROGRAM_VERSION=v0.0.0 - -ARG TARGETOS TARGETARCH - -WORKDIR /app - -# build cannon ahead of time -RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build just \ - -d /app/op-program \ - -f /app/op-program/repro.justfile \ - GOOS="$TARGETOS" \ - GOARCH="$TARGETARCH" \ - GIT_COMMIT="$GIT_COMMIT" \ - GIT_DATE="$GIT_DATE" \ - CANNON_VERSION="$CANNON_VERSION" \ - OP_PROGRAM_VERSION="$OP_PROGRAM_VERSION" \ - cannon - -COPY --from=builder /app/op-program-svc . -EXPOSE 8080 -CMD ["./op-program-svc"] diff --git a/kurtosis-devnet/op-program-svc/README.md b/kurtosis-devnet/op-program-svc/README.md deleted file mode 100644 index 091654c0d9ac4..0000000000000 --- a/kurtosis-devnet/op-program-svc/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# op-program-svc - -This small service is a temporary measure until we come up with a better way -of generating/serving prestate files based on chain information. - -# API - -The API is intentionally extremely simple: -- `POST /`: generate new prestates from provided inputs -- `GET /HASH.(bin.gz|json)`: get prestate data -- `GET /info.json`: get prestates mapping - -The idea is for this service to be basically a function -(chains_specs, deptsets) -> prestates. - -In the future, we definitely want to replace the implementation of that -function (see implementation notes below) - -## Trigger new build: - -Example using curl - -``` -$ curl -X POST -H "Content-Type: multipart/form-data" \ - -F "files[]=@rollup-2151908.json" \ - -F "files[]=@rollup-2151909.json" \ - -F "files[]=@genesis-2151908.json" \ - -F "files[]=@genesis-2151909.json" \ - -F "files[]=@depsets.json" \ - http://localhost:8080 -``` - -## Retrieve prestates mapping - -``` -$ curl -q http://localhost:8080/info.json -{ - "prestate_interop": "0x034731331d519c93fc0562643e0728c43f8e45a0af1160ad4c57c4e5141d2bbb", - "prestate_mt64": "0x0325bb0ca8521b468bb8234d8ba54b1b74db60e2b5bc75d0077a0fe2098b6b45" -} -``` - -## Implementation notes - -Unfortunately, op-program-client relies on embedded (using `//go:embed`) -configuration files to store unannounced chain configs. - -This means that in the context of devnets, we need to store the configs -(which are available only mid-deployment) into the **source tree** and -trigger a late build step. - -So effectively, we need to package the relevant part of the sources into -a container, deploy that one alongside the devnet, and run that build step -on demand. - -This is ugly, unsafe, easy to run a DOS against,... we need to do better. -But for now this is what we have. diff --git a/kurtosis-devnet/op-program-svc/build.go b/kurtosis-devnet/op-program-svc/build.go deleted file mode 100644 index 7da7a4a23d87b..0000000000000 --- a/kurtosis-devnet/op-program-svc/build.go +++ /dev/null @@ -1,174 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io" - "log" - "mime/multipart" - "path/filepath" - "strconv" - "strings" - "sync" - - "bufio" -) - -// MultipartUploadedFile adapts multipart.FileHeader to UploadedFile -type MultipartUploadedFile struct { - header *multipart.FileHeader -} - -func NewMultipartUploadedFile(header *multipart.FileHeader) *MultipartUploadedFile { - return &MultipartUploadedFile{header: header} -} - -func (f *MultipartUploadedFile) Open() (io.ReadCloser, error) { - return f.header.Open() -} - -func (f *MultipartUploadedFile) GetFilename() string { - return f.header.Filename -} - -type Builder struct { - appRoot string - configsDir string - buildDir string - buildCmd string - fs FS - cmdFactory CommandFactory -} - -func NewBuilder(appRoot, configsDir, buildDir, buildCmd string) *Builder { - return &Builder{ - appRoot: appRoot, - configsDir: configsDir, - buildDir: buildDir, - buildCmd: buildCmd, - fs: &DefaultFileSystem{}, - cmdFactory: &DefaultCommandFactory{}, - } -} - -func (b *Builder) SaveUploadedFiles(files []UploadedFile) error { - // Create configs directory if it doesn't exist - fullConfigsDir := b.fs.Join(b.appRoot, b.buildDir, b.configsDir) - if err := b.fs.MkdirAll(fullConfigsDir, 0755); err != nil { - return fmt.Errorf("failed to create config directory: %w", err) - } - - // Save the files - for _, file := range files { - reader, err := file.Open() - if err != nil { - return fmt.Errorf("failed to open file: %w", err) - } - defer reader.Close() - - destPath := b.fs.Join(fullConfigsDir, b.normalizeFilename(file.GetFilename())) - dst, err := b.fs.Create(destPath) - if err != nil { - return fmt.Errorf("failed to create destination file: %w", err) - } - defer dst.Close() - - if _, err := io.Copy(dst, reader); err != nil { - return fmt.Errorf("failed to save file: %w", err) - } - log.Printf("Saved file: %s", destPath) - } - - return nil -} - -func (b *Builder) ExecuteBuild() ([]byte, error) { - log.Printf("Starting build...") - cmdParts := strings.Fields(b.buildCmd) - cmd := b.cmdFactory.CreateCommand(cmdParts[0], cmdParts[1:]...) - - // Set working directory - cmd.SetDir(b.fs.Join(b.appRoot, b.buildDir)) - - // Create pipes for stdout and stderr - stdout, err := cmd.StdoutPipe() - if err != nil { - return nil, fmt.Errorf("failed to create stdout pipe: %w", err) - } - stderr, err := cmd.StderrPipe() - if err != nil { - return nil, fmt.Errorf("failed to create stderr pipe: %w", err) - } - - // Buffer to store complete output for error reporting - var output bytes.Buffer - output.WriteString("Build output:\n") - - // Start the command - if err := cmd.Start(); err != nil { - return nil, fmt.Errorf("failed to start build: %w", err) - } - - // Create a WaitGroup to wait for both stdout and stderr to be processed - var wg sync.WaitGroup - wg.Add(2) - - // Stream stdout - go func() { - defer wg.Done() - scanner := bufio.NewScanner(stdout) - for scanner.Scan() { - line := scanner.Text() - log.Printf("[build] %s", line) - output.WriteString(line + "\n") - } - }() - - // Stream stderr - go func() { - defer wg.Done() - scanner := bufio.NewScanner(stderr) - for scanner.Scan() { - line := scanner.Text() - log.Printf("[build][stderr] %s", line) - output.WriteString(line + "\n") - } - }() - - // Wait for both streams to complete - wg.Wait() - - // Wait for the command to complete - if err := cmd.Wait(); err != nil { - return output.Bytes(), fmt.Errorf("build failed: %w", err) - } - - log.Printf("Build completed successfully") - return output.Bytes(), nil -} - -// This is a convenience hack to natively support the file format of op-deployer -func (b *Builder) normalizeFilename(filename string) string { - // Get just the filename without directories - filename = filepath.Base(filename) - - // Check if filename matches PREFIX-NUMBER.json pattern - if parts := strings.Split(filename, "-"); len(parts) == 2 { - if numStr := strings.TrimSuffix(parts[1], ".json"); numStr != parts[1] { - // Check if the number part is actually numeric - if _, err := strconv.Atoi(numStr); err == nil { - // Handle specific cases - switch parts[0] { - case "genesis": - return fmt.Sprintf("%s-genesis-l2.json", numStr) - case "rollup": - return fmt.Sprintf("%s-rollup.json", numStr) - - } - // For all other cases, leave the filename unchanged - } - } - } - - return filename -} diff --git a/kurtosis-devnet/op-program-svc/build_test.go b/kurtosis-devnet/op-program-svc/build_test.go deleted file mode 100644 index 70a813f1f3b39..0000000000000 --- a/kurtosis-devnet/op-program-svc/build_test.go +++ /dev/null @@ -1,211 +0,0 @@ -package main - -import ( - "strings" - "testing" -) - -func TestSaveUploadedFiles(t *testing.T) { - tests := []struct { - name string - files []struct { - filename string - content []byte - } - shouldFail bool - }{ - { - name: "successful save", - files: []struct { - filename string - content []byte - }{ - { - filename: "test1.json", - content: []byte("test1 content"), - }, - { - filename: "test2.json", - content: []byte("test2 content"), - }, - }, - shouldFail: false, - }, - { - name: "filesystem error", - files: []struct { - filename string - content []byte - }{ - { - filename: "test1.json", - content: []byte("test1 content"), - }, - }, - shouldFail: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockFS := NewMockFS() - mockFS.ShouldFail = tt.shouldFail - - // Create mock uploaded files - files := make([]UploadedFile, len(tt.files)) - for i, f := range tt.files { - files[i] = NewMockUploadedFile(f.filename, f.content) - } - - b := &Builder{ - appRoot: "app", - configsDir: "configs", - buildDir: "build", - fs: mockFS, - } - - err := b.SaveUploadedFiles(files) - - if tt.shouldFail && err == nil { - t.Error("expected error but got none") - } - - if !tt.shouldFail { - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - // Verify correct directory was created - if len(mockFS.MkdirCalls) != 1 { - t.Errorf("expected 1 mkdir call, got %d", len(mockFS.MkdirCalls)) - } - - // Verify files were created - expectedCreateCalls := len(tt.files) - if len(mockFS.CreateCalls) != expectedCreateCalls { - t.Errorf("expected %d create calls, got %d", expectedCreateCalls, len(mockFS.CreateCalls)) - } - } - }) - } -} - -func TestExecuteBuild(t *testing.T) { - tests := []struct { - name string - stdout string - stderr string - shouldFail bool - }{ - { - name: "successful build", - stdout: "build successful\n", - stderr: "", - shouldFail: false, - }, - { - name: "build failure", - stdout: "", - stderr: "build failed\n", - shouldFail: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - mockRunner := &MockCommandRunner{ - ShouldFail: tt.shouldFail, - Stdout: tt.stdout, - Stderr: tt.stderr, - } - - mockFactory := &MockCommandFactory{ - Runner: mockRunner, - } - - mockFS := NewMockFS() - - b := &Builder{ - appRoot: "app", - buildDir: "build", - buildCmd: "make build", - cmdFactory: mockFactory, - fs: mockFS, - } - - output, err := b.ExecuteBuild() - - if tt.shouldFail && err == nil { - t.Error("expected error but got none") - } - - if !tt.shouldFail { - if err != nil { - t.Errorf("unexpected error: %v", err) - } - - if !mockRunner.StartCalled { - t.Error("Start was not called") - } - - if !mockRunner.WaitCalled { - t.Error("Wait was not called") - } - - if !strings.Contains(string(output), tt.stdout) { - t.Errorf("expected output to contain %q, got %q", tt.stdout, string(output)) - } - } - }) - } -} - -func TestNormalizeFilename(t *testing.T) { - tests := []struct { - name string - input string - expected string - }{ - { - name: "standard format - unchanged", - input: "prefix-123.json", - expected: "prefix-123.json", - }, - { - name: "genesis format", - input: "genesis-123.json", - expected: "123-genesis-l2.json", - }, - { - name: "rollup format", - input: "rollup-456.json", - expected: "456-rollup.json", - }, - { - name: "no number", - input: "test.json", - expected: "test.json", - }, - { - name: "invalid number", - input: "prefix-abc.json", - expected: "prefix-abc.json", - }, - { - name: "no json extension", - input: "prefix-123.txt", - expected: "prefix-123.txt", - }, - } - - b := &Builder{} - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - result := b.normalizeFilename(tt.input) - if result != tt.expected { - t.Errorf("expected %q, got %q", tt.expected, result) - } - }) - } -} diff --git a/kurtosis-devnet/op-program-svc/defaults.go b/kurtosis-devnet/op-program-svc/defaults.go deleted file mode 100644 index baf756a0d423d..0000000000000 --- a/kurtosis-devnet/op-program-svc/defaults.go +++ /dev/null @@ -1,70 +0,0 @@ -package main - -import ( - "io" - "io/fs" - "os" - "os/exec" - "path/filepath" -) - -// osFile wraps os.File to implement File -type osFile struct { - *os.File -} - -func (f *osFile) Readdir(count int) ([]fs.FileInfo, error) { - return f.File.Readdir(count) -} - -// DefaultFileSystem implements testutils.FS using actual OS calls -type DefaultFileSystem struct{} - -func (fs *DefaultFileSystem) Open(name string) (File, error) { - file, err := os.Open(name) - if err != nil { - return nil, err - } - return &osFile{File: file}, nil -} - -func (fs *DefaultFileSystem) ReadDir(name string) ([]fs.DirEntry, error) { - return os.ReadDir(name) -} - -func (fs *DefaultFileSystem) ReadFile(name string) ([]byte, error) { - return os.ReadFile(name) -} - -func (fs *DefaultFileSystem) MkdirAll(path string, perm os.FileMode) error { - return os.MkdirAll(path, perm) -} - -func (fs *DefaultFileSystem) Create(name string) (io.WriteCloser, error) { - return os.Create(name) -} - -func (fs *DefaultFileSystem) Join(elem ...string) string { - return filepath.Join(elem...) -} - -func (fs *DefaultFileSystem) Stat(name string) (fs.FileInfo, error) { - return os.Stat(name) -} - -// commandWrapper wraps exec.Cmd to implement CommandRunner -type commandWrapper struct { - *exec.Cmd -} - -func (c *commandWrapper) SetDir(dir string) { - c.Cmd.Dir = dir -} - -// DefaultCommandFactory implements testutils.CommandFactory using actual OS exec -type DefaultCommandFactory struct{} - -func (f *DefaultCommandFactory) CreateCommand(name string, args ...string) CommandRunner { - cmd := exec.Command(name, args...) - return &commandWrapper{Cmd: cmd} -} diff --git a/kurtosis-devnet/op-program-svc/fs.go b/kurtosis-devnet/op-program-svc/fs.go deleted file mode 100644 index c79ef00773d69..0000000000000 --- a/kurtosis-devnet/op-program-svc/fs.go +++ /dev/null @@ -1,330 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "errors" - "fmt" - "io" - "io/fs" - "log" - "net/http" - "os" - "path/filepath" - "regexp" - "strings" - "sync" - "time" -) - -const ( - depsetsFilename = "depsets.json" - infoFilename = "info.json" -) - -// proofFileSystem implements http.FileSystem, mapping hash-based virtual paths to actual files -type proofFileSystem struct { - root string - fs FS // Use our consolidated FS interface - proofFiles map[string]string // hash -> variable part mapping - proofMutex sync.RWMutex -} - -// proofFile implements http.File, representing a virtual file in our proof filesystem -type proofFile struct { - file File -} - -// infoFile implements http.File for the virtual info.json file -type infoFile struct { - *bytes.Reader - content []byte -} - -func newInfoFile(proofFiles map[string]string) *infoFile { - // Create inverted map - invertedMap := make(map[string]string) - for hash, variablePart := range proofFiles { - // Replace dashes with underscores in the key - key := fmt.Sprintf("prestate%s", variablePart) - key = strings.ReplaceAll(key, "-", "_") - invertedMap[key] = hash - } - - // Convert to JSON - content, err := json.MarshalIndent(invertedMap, "", " ") - if err != nil { - // Fallback to empty JSON object if marshaling fails - content = []byte("{}") - } - - return &infoFile{ - Reader: bytes.NewReader(content), - content: content, - } -} - -func (f *infoFile) Close() error { - return nil -} - -func (f *infoFile) Readdir(count int) ([]fs.FileInfo, error) { - return nil, fmt.Errorf("not a directory") -} - -func (f *infoFile) Stat() (fs.FileInfo, error) { - return virtualFileInfo{ - name: infoFilename, - size: int64(len(f.content)), - mode: 0644, - modTime: time.Now(), - isDir: false, - }, nil -} - -func (f *proofFile) Close() error { - return f.file.Close() -} - -func (f *proofFile) Read(p []byte) (n int, err error) { - return f.file.Read(p) -} - -func (f *proofFile) Seek(offset int64, whence int) (int64, error) { - return f.file.(io.Seeker).Seek(offset, whence) -} - -func (f *proofFile) Readdir(count int) ([]fs.FileInfo, error) { - // For actual files, we don't support directory listing - return nil, fmt.Errorf("not a directory") -} - -func (f *proofFile) Stat() (fs.FileInfo, error) { - return f.file.(fs.File).Stat() -} - -// proofDir implements http.File for the root directory -type proofDir struct { - *proofFileSystem - pos int -} - -func (d *proofDir) Close() error { - return nil -} - -func (d *proofDir) Read(p []byte) (n int, err error) { - return 0, fmt.Errorf("cannot read a directory") -} - -func (d *proofDir) Seek(offset int64, whence int) (int64, error) { - return 0, fmt.Errorf("cannot seek a directory") -} - -func (d *proofDir) Readdir(count int) ([]fs.FileInfo, error) { - d.proofMutex.RLock() - defer d.proofMutex.RUnlock() - - // Calculate total number of entries - totalEntries := len(d.proofFiles)*2 + 1 // hash.json, hash.bin.gz files + info.json - - // If we've already read all entries - if d.pos >= totalEntries { - if count <= 0 { - return nil, nil - } - return nil, io.EOF - } - - // Convert hashes to virtual file entries - var entries []fs.FileInfo - hashes := make([]string, 0, len(d.proofFiles)) - for hash := range d.proofFiles { - hashes = append(hashes, hash) - } - - start := d.pos - end := start + count - if count <= 0 || end > totalEntries { - end = totalEntries - } - - for i := start; i < end; i++ { - // Special case for info.json (second to last entry) - if i == len(d.proofFiles)*2 { - entries = append(entries, virtualFileInfo{ - name: infoFilename, - size: 0, // Size will be determined when actually opening the file - mode: 0644, - modTime: time.Now(), - isDir: false, - }) - continue - } - - hash := hashes[i/2] - isJSON := i%2 == 0 - - var name string - if isJSON { - name = hash + ".json" - } else { - name = hash + ".bin.gz" - } - - // Create a virtual file info - entries = append(entries, virtualFileInfo{ - name: name, - size: 0, // Size will be determined when actually opening the file - mode: 0644, - modTime: time.Now(), - isDir: false, - }) - } - - d.pos = end - return entries, nil -} - -func (d *proofDir) Stat() (fs.FileInfo, error) { - return virtualFileInfo{ - name: ".", - size: 0, - mode: 0755, - modTime: time.Now(), - isDir: true, - }, nil -} - -// virtualFileInfo implements fs.FileInfo for our virtual files -type virtualFileInfo struct { - name string - size int64 - mode fs.FileMode - modTime time.Time - isDir bool -} - -func (v virtualFileInfo) Name() string { return v.name } -func (v virtualFileInfo) Size() int64 { return v.size } -func (v virtualFileInfo) Mode() fs.FileMode { return v.mode } -func (v virtualFileInfo) ModTime() time.Time { return v.modTime } -func (v virtualFileInfo) IsDir() bool { return v.isDir } -func (v virtualFileInfo) Sys() interface{} { return nil } - -func newProofFileSystem(root string) *proofFileSystem { - return &proofFileSystem{ - root: root, - fs: &DefaultFileSystem{}, - proofFiles: make(map[string]string), - } -} - -// SetFS allows replacing the filesystem implementation, primarily for testing -func (fs *proofFileSystem) SetFS(newFS FS) { - fs.proofMutex.Lock() - defer fs.proofMutex.Unlock() - fs.fs = newFS -} - -func (fs *proofFileSystem) Open(name string) (http.File, error) { - if name == "/" || name == "" { - return &proofDir{proofFileSystem: fs}, nil - } - - // Clean the path and remove leading slash - name = strings.TrimPrefix(filepath.Clean(name), "/") - - // Special case for info.json - if name == infoFilename { - fs.proofMutex.RLock() - defer fs.proofMutex.RUnlock() - return newInfoFile(fs.proofFiles), nil - } - - fs.proofMutex.RLock() - defer fs.proofMutex.RUnlock() - - var targetFile string - if strings.HasSuffix(name, ".json") { - hash := strings.TrimSuffix(name, ".json") - if variablePart, ok := fs.proofFiles[hash]; ok { - targetFile = fmt.Sprintf("prestate-proof%s.json", variablePart) - } - } else if strings.HasSuffix(name, ".bin.gz") { - hash := strings.TrimSuffix(name, ".bin.gz") - if variablePart, ok := fs.proofFiles[hash]; ok { - targetFile = fmt.Sprintf("prestate%s.bin.gz", variablePart) - } - } - - if targetFile == "" { - return nil, fs.Error("file not found") - } - - file, err := fs.fs.Open(fs.fs.Join(fs.root, targetFile)) - if err != nil { - return nil, err - } - - return &proofFile{file: file}, nil -} - -func (fs *proofFileSystem) scanProofFiles() error { - fs.proofMutex.Lock() - defer fs.proofMutex.Unlock() - - // Clear existing mappings - fs.proofFiles = make(map[string]string) - - // Read directory entries - entries, err := fs.fs.ReadDir(fs.root) - if err != nil { - return fmt.Errorf("failed to read proofs directory: %w", err) - } - - // Regexp for matching prestate-proof files and extracting the variable part - proofRegexp := regexp.MustCompile(`^prestate-proof(.*)\.json$`) - - for _, entry := range entries { - log.Printf("entry: %s", entry.Name()) - if entry.IsDir() { - log.Printf("entry is a directory: %s", entry.Name()) - continue - } - - matches := proofRegexp.FindStringSubmatch(entry.Name()) - if matches == nil { - log.Printf("Warning: ignoring non-proof file %s", entry.Name()) - continue - } - - // matches[1] contains the variable part (including the leading hyphen if present) - variablePart := matches[1] - - // Read and parse the JSON file - data, err := fs.fs.ReadFile(fs.fs.Join(fs.root, entry.Name())) - if err != nil { - log.Printf("Warning: failed to read proof file %s: %v", entry.Name(), err) - continue - } - - var proofData struct { - Pre string `json:"pre"` - } - if err := json.Unmarshal(data, &proofData); err != nil { - log.Printf("Warning: failed to parse proof file %s: %v", entry.Name(), err) - continue - } - - // Store the mapping from hash to variable part of filename - fs.proofFiles[proofData.Pre] = variablePart - log.Printf("Mapped hash %s to proof file pattern%s", proofData.Pre, variablePart) - } - - return nil -} - -func (fs *proofFileSystem) Error(msg string) error { - return &os.PathError{Op: "open", Path: "virtual path", Err: errors.New(msg)} -} diff --git a/kurtosis-devnet/op-program-svc/fs_test.go b/kurtosis-devnet/op-program-svc/fs_test.go deleted file mode 100644 index 67a1f204dfb3d..0000000000000 --- a/kurtosis-devnet/op-program-svc/fs_test.go +++ /dev/null @@ -1,140 +0,0 @@ -package main - -import ( - "bytes" - "encoding/json" - "io" - "testing" -) - -func TestProofFileSystem(t *testing.T) { - // Create mock filesystem - mockfs := NewMockFS() - - // Add test files - proofData := map[string]interface{}{ - "pre": "hash123", - } - proofJSON, _ := json.Marshal(proofData) - - mockfs.Files["/proofs/prestate-proof-test.json"] = NewMockFile( - "prestate-proof-test.json", - proofJSON, - ) - mockfs.Files["/proofs/prestate-test.bin.gz"] = NewMockFile( - "prestate-test.bin.gz", - []byte("mock binary data"), - ) - - // Create proof filesystem and set mock fs - pfs := newProofFileSystem("/proofs") - pfs.SetFS(mockfs) - - // Test scanning proof files - t.Run("ScanProofFiles", func(t *testing.T) { - err := pfs.scanProofFiles() - if err != nil { - t.Errorf("scanProofFiles failed: %v", err) - } - - // Verify mapping was created - if mapping, ok := pfs.proofFiles["hash123"]; !ok || mapping != "-test" { - t.Errorf("Expected mapping for hash123 to be -test, got %v", mapping) - } - }) - - t.Run("OpenJSONFile", func(t *testing.T) { - file, err := pfs.Open("/hash123.json") - if err != nil { - t.Errorf("Failed to open JSON file: %v", err) - } - defer file.Close() - - // Read contents - contents, err := io.ReadAll(file) - if err != nil { - t.Errorf("Failed to read file contents: %v", err) - } - - if !bytes.Equal(contents, proofJSON) { - t.Errorf("File contents don't match expected") - } - }) - - t.Run("OpenBinaryFile", func(t *testing.T) { - file, err := pfs.Open("/hash123.bin.gz") - if err != nil { - t.Errorf("Failed to open binary file: %v", err) - } - defer file.Close() - - contents, err := io.ReadAll(file) - if err != nil { - t.Errorf("Failed to read file contents: %v", err) - } - - if !bytes.Equal(contents, []byte("mock binary data")) { - t.Errorf("File contents don't match expected") - } - }) - - t.Run("OpenInfoJSONFile", func(t *testing.T) { - file, err := pfs.Open("/info.json") - if err != nil { - t.Errorf("Failed to open info.json file: %v", err) - } - defer file.Close() - - // Read contents - contents, err := io.ReadAll(file) - if err != nil { - t.Errorf("Failed to read file contents: %v", err) - } - - // Verify the contents contain the inverted map - var infoData map[string]string - err = json.Unmarshal(contents, &infoData) - if err != nil { - t.Errorf("Failed to parse info.json contents: %v", err) - } - - // Check that the key has dashes replaced with underscores - expectedKey := "prestate_test" - if hash, ok := infoData[expectedKey]; !ok || hash != "hash123" { - t.Errorf("Expected info.json to contain mapping from %s to hash123, got %v", expectedKey, hash) - } - }) - - t.Run("OpenNonExistentFile", func(t *testing.T) { - _, err := pfs.Open("/nonexistent.json") - if err == nil { - t.Error("Expected error opening non-existent file") - } - }) - - t.Run("ListDirectory", func(t *testing.T) { - dir, err := pfs.Open("/") - if err != nil { - t.Errorf("Failed to open root directory: %v", err) - } - defer dir.Close() - - files, err := dir.Readdir(-1) - if err != nil { - t.Errorf("Failed to read directory: %v", err) - } - - // We expect both .json and .bin.gz files for hash123, plus info.json - if len(files) != 3 { - t.Errorf("Expected 3 files, got %d", len(files)) - } - - // Verify info.json is included in the directory listing - for _, file := range files { - if file.Name() == "info.json" { - return - } - } - t.Error("info.json not found in directory listing") - }) -} diff --git a/kurtosis-devnet/op-program-svc/interfaces.go b/kurtosis-devnet/op-program-svc/interfaces.go deleted file mode 100644 index 0d93ecb8349ed..0000000000000 --- a/kurtosis-devnet/op-program-svc/interfaces.go +++ /dev/null @@ -1,48 +0,0 @@ -package main - -import ( - "io" - "io/fs" - "os" -) - -// File interface abstracts file operations -type File interface { - fs.File - io.Seeker - Readdir(count int) ([]fs.FileInfo, error) -} - -// FS defines the interface for all filesystem operations -type FS interface { - // Core FS operations - Open(name string) (File, error) - ReadDir(name string) ([]fs.DirEntry, error) - ReadFile(name string) ([]byte, error) - Join(elem ...string) string - Stat(name string) (fs.FileInfo, error) - - // Additional FileSystem operations - MkdirAll(path string, perm os.FileMode) error - Create(name string) (io.WriteCloser, error) -} - -// UploadedFile represents a file that has been uploaded -type UploadedFile interface { - Open() (io.ReadCloser, error) - GetFilename() string -} - -// CommandRunner abstracts command execution for testing -type CommandRunner interface { - Start() error - Wait() error - StdoutPipe() (io.ReadCloser, error) - StderrPipe() (io.ReadCloser, error) - SetDir(dir string) -} - -// CommandFactory creates commands -type CommandFactory interface { - CreateCommand(name string, args ...string) CommandRunner -} diff --git a/kurtosis-devnet/op-program-svc/justfile b/kurtosis-devnet/op-program-svc/justfile deleted file mode 100644 index 3c980fc335feb..0000000000000 --- a/kurtosis-devnet/op-program-svc/justfile +++ /dev/null @@ -1,5 +0,0 @@ -op-program-base TAG='op-program-base:latest': - docker buildx build -f ../../op-program/Dockerfile.repro --target=src -t {{TAG}} ../.. - -op-program-svc TAG='op-program-svc:latest': op-program-base - docker buildx build -f Dockerfile -t {{TAG}} . diff --git a/kurtosis-devnet/op-program-svc/main.go b/kurtosis-devnet/op-program-svc/main.go deleted file mode 100644 index c267d7ec8b598..0000000000000 --- a/kurtosis-devnet/op-program-svc/main.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "flag" - "fmt" - "log" - "net/http" - "path/filepath" -) - -var ( - flagAppRoot = flag.String("app-root", "/app", "Root directory for the application") - flagConfigsDir = flag.String("configs-dir", "chainconfig/configs", "Directory for config files (relative to build-dir)") - flagBuildDir = flag.String("build-dir", "op-program", "Directory where the build command will be executed (relative to app-root)") - flagBuildCmd = flag.String("build-cmd", "just -f repro.justfile build-current", "Build command to execute") - flagPort = flag.Int("port", 8080, "Port to listen on") -) - -func main() { - flag.Parse() - - srv := createServer() - - handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - switch r.Method { - case http.MethodGet: - http.FileServer(srv.proofFS).ServeHTTP(w, r) - case http.MethodPost: - srv.handleUpload(w, r) - default: - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - } - }) - // Set up routes - http.HandleFunc("/", handler) - - log.Printf("Starting server on :%d with:", srv.port) - log.Printf(" app-root: %s", srv.appRoot) - log.Printf(" configs-dir: %s", filepath.Join(srv.appRoot, srv.buildDir, srv.configsDir)) - log.Printf(" build-dir: %s", filepath.Join(srv.appRoot, srv.buildDir)) - log.Printf(" build-cmd: %s", srv.buildCmd) - log.Printf(" proofs-dir: %s", filepath.Join(srv.appRoot, srv.buildDir, "bin")) - - if err := http.ListenAndServe(fmt.Sprintf(":%d", srv.port), nil); err != nil { - log.Fatalf("Failed to start server: %v", err) - } -} diff --git a/kurtosis-devnet/op-program-svc/mocks.go b/kurtosis-devnet/op-program-svc/mocks.go deleted file mode 100644 index 8fa108cb0c873..0000000000000 --- a/kurtosis-devnet/op-program-svc/mocks.go +++ /dev/null @@ -1,285 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io" - "io/fs" - "net/http" - "os" - "path/filepath" - "strings" - "time" -) - -// MockFile implements both File and fs.FileInfo interfaces for testing -type MockFile struct { - name string - contents []byte - pos int64 - isDir bool -} - -func NewMockFile(name string, contents []byte) *MockFile { - return &MockFile{ - name: name, - contents: contents, - } -} - -func (m *MockFile) Close() error { return nil } -func (m *MockFile) Read(p []byte) (n int, err error) { - if m.pos >= int64(len(m.contents)) { - return 0, io.EOF - } - n = copy(p, m.contents[m.pos:]) - m.pos += int64(n) - return n, nil -} - -func (m *MockFile) Seek(offset int64, whence int) (int64, error) { - var abs int64 - switch whence { - case io.SeekStart: - abs = offset - case io.SeekCurrent: - abs = m.pos + offset - case io.SeekEnd: - abs = int64(len(m.contents)) + offset - default: - return 0, fmt.Errorf("invalid whence") - } - if abs < 0 { - return 0, fmt.Errorf("negative position") - } - m.pos = abs - return abs, nil -} - -func (m *MockFile) Stat() (fs.FileInfo, error) { return m, nil } -func (m *MockFile) Name() string { return m.name } -func (m *MockFile) Size() int64 { return int64(len(m.contents)) } -func (m *MockFile) Mode() fs.FileMode { return 0644 } -func (m *MockFile) ModTime() time.Time { return time.Now() } -func (m *MockFile) IsDir() bool { return m.isDir } -func (m *MockFile) Sys() interface{} { return nil } -func (m *MockFile) Readdir(count int) ([]fs.FileInfo, error) { - return nil, fmt.Errorf("not implemented") -} - -// MockFS implements FS interface for testing -type MockFS struct { - Files map[string]*MockFile - ShouldFail bool - StatFailPaths map[string]bool // Paths that should fail for Stat - JoinCalls [][]string - MkdirCalls []string - CreateCalls []string -} - -func NewMockFS() *MockFS { - return &MockFS{ - Files: make(map[string]*MockFile), - StatFailPaths: make(map[string]bool), - JoinCalls: make([][]string, 0), - MkdirCalls: make([]string, 0), - CreateCalls: make([]string, 0), - } -} - -// FS interface methods -func (m *MockFS) Open(name string) (File, error) { - if m.ShouldFail { - return nil, fmt.Errorf("mock open error") - } - if file, ok := m.Files[name]; ok { - file.pos = 0 // Reset position for new reads - return file, nil - } - return nil, fs.ErrNotExist -} - -func (m *MockFS) ReadDir(name string) ([]fs.DirEntry, error) { - if m.ShouldFail { - return nil, fmt.Errorf("mock readdir error") - } - var entries []fs.DirEntry - for path, file := range m.Files { - if filepath.Dir(path) == name { - entries = append(entries, fs.FileInfoToDirEntry(file)) - } - } - return entries, nil -} - -func (m *MockFS) ReadFile(name string) ([]byte, error) { - if m.ShouldFail { - return nil, fmt.Errorf("mock readfile error") - } - if file, ok := m.Files[name]; ok { - return file.contents, nil - } - return nil, fs.ErrNotExist -} - -// FileSystem interface methods -func (m *MockFS) MkdirAll(path string, perm os.FileMode) error { - if m.ShouldFail { - return fmt.Errorf("mock mkdir error") - } - m.MkdirCalls = append(m.MkdirCalls, path) - return nil -} - -func (m *MockFS) Create(name string) (io.WriteCloser, error) { - if m.ShouldFail { - return nil, fmt.Errorf("mock create error") - } - m.CreateCalls = append(m.CreateCalls, name) - return &MockWriteCloser{}, nil -} - -func (m *MockFS) Join(elem ...string) string { - m.JoinCalls = append(m.JoinCalls, elem) - return filepath.Join(elem...) -} - -func (m *MockFS) Stat(name string) (fs.FileInfo, error) { - if m.ShouldFail { - return nil, fmt.Errorf("mock stat error") - } - if m.StatFailPaths[name] { - return nil, fmt.Errorf("file not found: %s", name) - } - return m.Files[name], nil -} - -// MockWriteCloser implements io.WriteCloser for testing -type MockWriteCloser struct { - bytes.Buffer -} - -func (m *MockWriteCloser) Close() error { - return nil -} - -// MockUploadedFile implements UploadedFile for testing -type MockUploadedFile struct { - filename string - content []byte -} - -func NewMockUploadedFile(filename string, content []byte) *MockUploadedFile { - return &MockUploadedFile{ - filename: filename, - content: content, - } -} - -func (m *MockUploadedFile) Open() (io.ReadCloser, error) { - return io.NopCloser(bytes.NewReader(m.content)), nil -} - -func (m *MockUploadedFile) GetFilename() string { - return m.filename -} - -// MockCommandRunner implements CommandRunner for testing -type MockCommandRunner struct { - StartCalled bool - WaitCalled bool - ShouldFail bool - Stdout string - Stderr string - Dir string -} - -func (m *MockCommandRunner) Start() error { - if m.ShouldFail { - return fmt.Errorf("mock start error") - } - m.StartCalled = true - return nil -} - -func (m *MockCommandRunner) Wait() error { - if m.ShouldFail { - return fmt.Errorf("mock wait error") - } - m.WaitCalled = true - return nil -} - -func (m *MockCommandRunner) StdoutPipe() (io.ReadCloser, error) { - return io.NopCloser(strings.NewReader(m.Stdout)), nil -} - -func (m *MockCommandRunner) StderrPipe() (io.ReadCloser, error) { - return io.NopCloser(strings.NewReader(m.Stderr)), nil -} - -func (m *MockCommandRunner) SetDir(dir string) { - m.Dir = dir -} - -// MockCommandFactory implements CommandFactory for testing -type MockCommandFactory struct { - Runner *MockCommandRunner -} - -func (f *MockCommandFactory) CreateCommand(name string, args ...string) CommandRunner { - return f.Runner -} - -// MockProofFS is a mock implementation of ProofFS -type MockProofFS struct { - scanProofFilesFn func() error - fs *MockFS -} - -func NewMockProofFS() *MockProofFS { - return &MockProofFS{ - fs: NewMockFS(), - } -} - -func (m *MockProofFS) scanProofFiles() error { - if m.scanProofFilesFn != nil { - return m.scanProofFilesFn() - } - return nil -} - -func (m *MockProofFS) Open(name string) (http.File, error) { - file, err := m.fs.Open(name) - if err != nil { - return nil, err - } - // MockFile implements http.File (including Seek) - return file.(http.File), nil -} - -// AddFile adds a file to the mock filesystem -func (m *MockProofFS) AddFile(name string, contents []byte) { - m.fs.Files[name] = NewMockFile(name, contents) -} - -// MockBuilder is a mock implementation of BuildSystem -type MockBuilder struct { - saveUploadedFilesFn func(files []UploadedFile) error - executeBuildFn func() ([]byte, error) -} - -func (m *MockBuilder) SaveUploadedFiles(files []UploadedFile) error { - if m.saveUploadedFilesFn != nil { - return m.saveUploadedFilesFn(files) - } - return nil -} - -func (m *MockBuilder) ExecuteBuild() ([]byte, error) { - if m.executeBuildFn != nil { - return m.executeBuildFn() - } - return []byte("mock build output"), nil -} diff --git a/kurtosis-devnet/op-program-svc/server.go b/kurtosis-devnet/op-program-svc/server.go deleted file mode 100644 index ad8e16a15b52e..0000000000000 --- a/kurtosis-devnet/op-program-svc/server.go +++ /dev/null @@ -1,158 +0,0 @@ -package main - -import ( - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "log" - "mime/multipart" - "net/http" - "path/filepath" - "sync" -) - -// ProofFS represents the interface for the proof filesystem -type ProofFS interface { - http.FileSystem - scanProofFiles() error -} - -// BuildSystem represents the interface for the build system -type BuildSystem interface { - SaveUploadedFiles(files []UploadedFile) error - ExecuteBuild() ([]byte, error) -} - -type server struct { - appRoot string - configsDir string - buildDir string - buildCmd string - port int - lastBuildHash string - buildMutex sync.Mutex - proofFS ProofFS - builder BuildSystem -} - -func createServer() *server { - srv := &server{ - appRoot: *flagAppRoot, - configsDir: *flagConfigsDir, - buildDir: *flagBuildDir, - buildCmd: *flagBuildCmd, - port: *flagPort, - } - - // Initialize the proof filesystem - proofsDir := filepath.Join(srv.appRoot, srv.buildDir, "bin") - proofFS := newProofFileSystem(proofsDir) - srv.proofFS = proofFS - - // Initialize the builder - builder := NewBuilder(srv.appRoot, srv.configsDir, srv.buildDir, srv.buildCmd) - srv.builder = builder - - return srv -} - -func (s *server) handleUpload(w http.ResponseWriter, r *http.Request) { - if r.Method != http.MethodPost { - http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) - return - } - - log.Printf("Received upload request from %s", r.RemoteAddr) - - multipartFiles, currentHash, err := s.processMultipartForm(r) - if err != nil { - http.Error(w, err.Error(), http.StatusBadRequest) - return - } - - s.buildMutex.Lock() - defer s.buildMutex.Unlock() - - // Check if we need to rebuild - if currentHash == s.lastBuildHash { - log.Printf("Hash matches last build, skipping") - w.WriteHeader(http.StatusNotModified) - fmt.Fprintf(w, "Files unchanged, skipping build") - return - } - - log.Printf("Hash differs from last build (%s), proceeding with build", s.lastBuildHash) - - // Convert multipart files to UploadedFile interface - files := make([]UploadedFile, len(multipartFiles)) - for i, f := range multipartFiles { - files[i] = NewMultipartUploadedFile(f) - } - - // Save the files using the builder - if err := s.builder.SaveUploadedFiles(files); err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) - return - } - - // Execute the build - out, err := s.builder.ExecuteBuild() - if err != nil { - http.Error(w, fmt.Sprintf("%v\nOutput: %s", err, out), http.StatusInternalServerError) - return - } - - // After successful build, scan for new proof files - if err := s.proofFS.scanProofFiles(); err != nil { - log.Printf("Warning: failed to scan proof files: %v", err) - } - - // Update the last successful build hash - s.lastBuildHash = currentHash - - log.Printf("Build successful, last build hash: %s", currentHash) - w.WriteHeader(http.StatusOK) -} - -func (s *server) processMultipartForm(r *http.Request) ([]*multipart.FileHeader, string, error) { - // Parse the multipart form - if err := r.ParseMultipartForm(1 << 30); err != nil { // 1GB max memory - return nil, "", fmt.Errorf("failed to parse form: %w", err) - } - - // Get uploaded files - files := r.MultipartForm.File["files[]"] - if len(files) == 0 { - return nil, "", fmt.Errorf("no files uploaded") - } - - log.Printf("Processing %d files:", len(files)) - for _, fileHeader := range files { - log.Printf(" - %s (size: %d bytes)", fileHeader.Filename, fileHeader.Size) - } - - // Calculate hash of all files - hash, err := s.calculateFilesHash(files) - if err != nil { - return nil, "", fmt.Errorf("failed to calculate files hash: %w", err) - } - - return files, hash, nil -} - -func (s *server) calculateFilesHash(files []*multipart.FileHeader) (string, error) { - hasher := sha256.New() - for _, fileHeader := range files { - file, err := fileHeader.Open() - if err != nil { - return "", fmt.Errorf("failed to open file: %w", err) - } - if _, err := io.Copy(hasher, file); err != nil { - file.Close() - return "", fmt.Errorf("failed to hash file: %w", err) - } - file.Close() - } - return hex.EncodeToString(hasher.Sum(nil)), nil -} diff --git a/kurtosis-devnet/op-program-svc/server_test.go b/kurtosis-devnet/op-program-svc/server_test.go deleted file mode 100644 index 5831e37cfa096..0000000000000 --- a/kurtosis-devnet/op-program-svc/server_test.go +++ /dev/null @@ -1,251 +0,0 @@ -package main - -import ( - "bytes" - "fmt" - "io" - "mime/multipart" - "net/http" - "net/http/httptest" - "strings" - "testing" -) - -func createTestServer(t *testing.T) (*server, *MockProofFS, *MockBuilder) { - t.Helper() - mockProofFS := NewMockProofFS() - mockBuilder := &MockBuilder{} - - srv := &server{ - appRoot: "test-root", - configsDir: "test-configs", - buildDir: "test-build", - buildCmd: "test-cmd", - port: 8080, - proofFS: mockProofFS, - builder: mockBuilder, - } - - return srv, mockProofFS, mockBuilder -} - -func createMultipartRequest(t *testing.T, files map[string][]byte) (*http.Request, error) { - t.Helper() - body := &bytes.Buffer{} - writer := multipart.NewWriter(body) - - for filename, content := range files { - part, err := writer.CreateFormFile("files[]", filename) - if err != nil { - return nil, err - } - if _, err := io.Copy(part, bytes.NewReader(content)); err != nil { - return nil, err - } - } - - if err := writer.Close(); err != nil { - return nil, err - } - - req := httptest.NewRequest("POST", "/upload", body) - req.Header.Set("Content-Type", writer.FormDataContentType()) - return req, nil -} - -func TestHandleUpload_MethodNotAllowed(t *testing.T) { - srv, _, _ := createTestServer(t) - - req := httptest.NewRequest("GET", "/upload", nil) - w := httptest.NewRecorder() - - srv.handleUpload(w, req) - - if w.Code != http.StatusMethodNotAllowed { - t.Errorf("Expected status code %d, got %d", http.StatusMethodNotAllowed, w.Code) - } -} - -func TestHandleUpload_NoFiles(t *testing.T) { - srv, _, _ := createTestServer(t) - - req := httptest.NewRequest("POST", "/upload", nil) - req.Header.Set("Content-Type", "multipart/form-data; boundary=xxx") - w := httptest.NewRecorder() - - srv.handleUpload(w, req) - - if w.Code != http.StatusBadRequest { - t.Errorf("Expected status code %d, got %d", http.StatusBadRequest, w.Code) - } -} - -func TestHandleUpload_Success(t *testing.T) { - srv, mockProofFS, mockBuilder := createTestServer(t) - - // Setup test data - files := map[string][]byte{ - "test.txt": []byte("test content"), - } - - // Setup mocks - mockBuilder.saveUploadedFilesFn = func(files []UploadedFile) error { - return nil - } - mockBuilder.executeBuildFn = func() ([]byte, error) { - return []byte("build successful"), nil - } - mockProofFS.scanProofFilesFn = func() error { - return nil - } - - // Create request - req, err := createMultipartRequest(t, files) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - w := httptest.NewRecorder() - srv.handleUpload(w, req) - - // We now expect 200 OK instead of a redirect - if w.Code != http.StatusOK { - t.Errorf("Expected status code %d, got %d", http.StatusOK, w.Code) - } -} - -func TestHandleUpload_SaveError(t *testing.T) { - srv, _, mockBuilder := createTestServer(t) - - // Setup test data - files := map[string][]byte{ - "test.txt": []byte("test content"), - } - - // Setup mock to return error - mockBuilder.saveUploadedFilesFn = func(files []UploadedFile) error { - return fmt.Errorf("failed to save files") - } - - // Create request - req, err := createMultipartRequest(t, files) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - w := httptest.NewRecorder() - srv.handleUpload(w, req) - - if w.Code != http.StatusInternalServerError { - t.Errorf("Expected status code %d, got %d", http.StatusInternalServerError, w.Code) - } -} - -func TestHandleUpload_BuildError(t *testing.T) { - srv, _, mockBuilder := createTestServer(t) - - // Setup test data - files := map[string][]byte{ - "test.txt": []byte("test content"), - } - - // Setup mocks - mockBuilder.saveUploadedFilesFn = func(files []UploadedFile) error { - return nil - } - mockBuilder.executeBuildFn = func() ([]byte, error) { - return []byte("build failed"), fmt.Errorf("build error") - } - - // Create request - req, err := createMultipartRequest(t, files) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - w := httptest.NewRecorder() - srv.handleUpload(w, req) - - if w.Code != http.StatusInternalServerError { - t.Errorf("Expected status code %d, got %d", http.StatusInternalServerError, w.Code) - } - - if !strings.Contains(w.Body.String(), "build error") { - t.Errorf("Expected error message to contain 'build error', got %s", w.Body.String()) - } -} - -func TestHandleUpload_ScanError(t *testing.T) { - srv, mockProofFS, mockBuilder := createTestServer(t) - - // Setup test data - files := map[string][]byte{ - "test.txt": []byte("test content"), - } - - // Setup mocks - mockBuilder.saveUploadedFilesFn = func(files []UploadedFile) error { - return nil - } - mockBuilder.executeBuildFn = func() ([]byte, error) { - return []byte("build successful"), nil - } - mockProofFS.scanProofFilesFn = func() error { - return fmt.Errorf("scan error") - } - - // Create request - req, err := createMultipartRequest(t, files) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - w := httptest.NewRecorder() - srv.handleUpload(w, req) - - // Even with scan error, we should still return 200 OK - if w.Code != http.StatusOK { - t.Errorf("Expected status code %d, got %d", http.StatusOK, w.Code) - } -} - -func TestHandleUpload_UnchangedFiles(t *testing.T) { - srv, _, _ := createTestServer(t) - - // Setup test data - files := map[string][]byte{ - "test.txt": []byte("test content"), - } - - // First request - req1, err := createMultipartRequest(t, files) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - w1 := httptest.NewRecorder() - srv.handleUpload(w1, req1) - - // First request should return 200 OK - if w1.Code != http.StatusOK { - t.Errorf("Expected status code %d, got %d", http.StatusOK, w1.Code) - } - - // Second request with same files - req2, err := createMultipartRequest(t, files) - if err != nil { - t.Fatalf("Failed to create request: %v", err) - } - - w2 := httptest.NewRecorder() - srv.handleUpload(w2, req2) - - // Second request with unchanged files should return 304 Not Modified - if w2.Code != http.StatusNotModified { - t.Errorf("Expected status code %d, got %d", http.StatusNotModified, w2.Code) - } - - if !strings.Contains(w2.Body.String(), "Files unchanged") { - t.Errorf("Expected response to contain 'Files unchanged', got %s", w2.Body.String()) - } -} diff --git a/kurtosis-devnet/optimism-package-trampoline/README.md b/kurtosis-devnet/optimism-package-trampoline/README.md deleted file mode 100644 index 41853ea712f2b..0000000000000 --- a/kurtosis-devnet/optimism-package-trampoline/README.md +++ /dev/null @@ -1,7 +0,0 @@ -This package contains a mostly empty Kurtosis package, -that trampolines to github.com/ethpandas/optimism-package. - -The goal here is to use the `replace` section of `kurtosis.yml` -as a "lockfile" for our package dependencies. - -This way, we achieve reproducibility for our devnet deployments. \ No newline at end of file diff --git a/kurtosis-devnet/optimism-package-trampoline/kurtosis.yml b/kurtosis-devnet/optimism-package-trampoline/kurtosis.yml deleted file mode 100644 index 06e9db8afd93d..0000000000000 --- a/kurtosis-devnet/optimism-package-trampoline/kurtosis.yml +++ /dev/null @@ -1,8 +0,0 @@ -name: github.com/ethereum-optimism/optimism/kurtosis-devnet/optimism-package-trampoline -description: |- - A trampoline package for optimism-package. This one is reproducible, due to the replace directives below. -replace: - github.com/ethpandaops/optimism-package: github.com/ethpandaops/optimism-package@89e0b8cacab9f7e9f74d53b72d4870092825d577 - github.com/ethpandaops/ethereum-package: github.com/ethpandaops/ethereum-package@83830d44823767af65eda7dfe6b26c87c536c4cf - github.com/kurtosis-tech/prometheus-package: github.com/kurtosis-tech/prometheus-package@637c9dea933be18e47f96cadc0d9bb0e3a5aa9d6 # v1.0.0 - github.com/kurtosis-tech/postgres-package: github.com/kurtosis-tech/postgres-package@9cbdde2c55e8d1656deb87821465a2ad244d8b33 # v1.0.0 diff --git a/kurtosis-devnet/optimism-package-trampoline/main.star b/kurtosis-devnet/optimism-package-trampoline/main.star deleted file mode 100644 index d1b2d34b0331f..0000000000000 --- a/kurtosis-devnet/optimism-package-trampoline/main.star +++ /dev/null @@ -1,5 +0,0 @@ -optimism_package = import_module("github.com/ethpandaops/optimism-package/main.star") - -def run(plan, args): - # just delegate to optimism-package - optimism_package.run(plan, args) diff --git a/kurtosis-devnet/pkg/build/contracts.go b/kurtosis-devnet/pkg/build/contracts.go deleted file mode 100644 index b21f523c5f9a0..0000000000000 --- a/kurtosis-devnet/pkg/build/contracts.go +++ /dev/null @@ -1,401 +0,0 @@ -package build - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "fmt" - "io" - "log" - "os" - "path/filepath" - "strings" - "text/template" - "time" - - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/enclave" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/util" - "github.com/spf13/afero" - "go.opentelemetry.io/otel" -) - -// ContractBuilder handles building smart contracts using just commands -type ContractBuilder struct { - // Base directory where the build commands should be executed - baseDir string - // Template for the build command - cmdTemplate *template.Template - - // Dry run mode - dryRun bool - - builtContracts map[string]string - - enclave string - - // Command factory for testing - cmdFactory cmdFactory - // Enclave manager for testing - enclaveManager *enclave.KurtosisEnclaveManager - // Enclave filesystem for testing - enclaveFS *ktfs.EnclaveFS - // Filesystem for operations - fs afero.Fs -} - -const ( - contractsCmdTemplateStr = "just {{ .ContractsPath }}/build-no-tests" - relativeContractsPath = "../packages/contracts-bedrock" - solidityCachePath = "cache/solidity-files-cache.json" -) - -var defaultContractTemplate *template.Template - -func init() { - defaultContractTemplate = template.Must(template.New("contract_build_cmd").Parse(contractsCmdTemplateStr)) -} - -type ContractBuilderOptions func(*ContractBuilder) - -func WithContractBaseDir(baseDir string) ContractBuilderOptions { - return func(b *ContractBuilder) { - b.baseDir = baseDir - } -} - -func WithContractDryRun(dryRun bool) ContractBuilderOptions { - return func(b *ContractBuilder) { - b.dryRun = dryRun - } -} - -func WithContractEnclave(enclave string) ContractBuilderOptions { - return func(b *ContractBuilder) { - b.enclave = enclave - } -} - -func WithContractEnclaveManager(manager *enclave.KurtosisEnclaveManager) ContractBuilderOptions { - return func(b *ContractBuilder) { - b.enclaveManager = manager - } -} - -func WithContractFS(fs afero.Fs) ContractBuilderOptions { - return func(b *ContractBuilder) { - b.fs = fs - } -} - -// NewContractBuilder creates a new ContractBuilder instance -func NewContractBuilder(opts ...ContractBuilderOptions) *ContractBuilder { - b := &ContractBuilder{ - baseDir: ".", - cmdTemplate: defaultContractTemplate, - dryRun: false, - builtContracts: make(map[string]string), - cmdFactory: defaultCmdFactory, - enclaveManager: nil, - enclaveFS: nil, - fs: afero.NewOsFs(), // Default to OS filesystem - } - - for _, opt := range opts { - opt(b) - } - - return b -} - -// Build executes the contract build command -func (b *ContractBuilder) Build(ctx context.Context, _ string) (string, error) { - _, span := otel.Tracer("contract-builder").Start(ctx, "build contracts") - defer span.End() - - // since we ignore layer for now, we can skip the build if the file already - // exists: it'll be the same file! - if url, ok := b.builtContracts[""]; ok { - return url, nil - } - - log.Println("Building contracts bundle") - - // Execute template to get command string - var cmdBuf bytes.Buffer - if err := b.cmdTemplate.Execute(&cmdBuf, map[string]string{ - "ContractsPath": relativeContractsPath, - }); err != nil { - return "", fmt.Errorf("failed to execute command template: %w", err) - } - - // Create command using the factory - cmd := b.cmdFactory("sh", "-c", cmdBuf.String()) - cmd.SetDir(b.baseDir) - - if b.dryRun { - return "artifact://contracts", nil - } - - output, err := cmd.CombinedOutput() - if err != nil { - return "", fmt.Errorf("contract build command failed: %w\nOutput: %s", err, string(output)) - } - - name, err := b.createContractsArtifact() - if err != nil { - return "", fmt.Errorf("failed to create contracts artifact: %w", err) - } - - url := fmt.Sprintf("artifact://%s", name) - b.builtContracts[""] = url - return url, nil -} - -func (b *ContractBuilder) GetContractUrl() string { - if b.dryRun { - return "artifact://contracts" - } - return fmt.Sprintf("artifact://%s", b.getBuiltContractName()) -} - -func (b *ContractBuilder) getBuiltContractName() string { - return fmt.Sprintf("contracts-%s", b.buildHash()) -} - -func (b *ContractBuilder) buildHash() string { - // the solidity cache file contains up-to-date information about the current - // state of the build, so it's suitable to provide a unique hash. - cacheFilePath := filepath.Join(b.baseDir, relativeContractsPath, solidityCachePath) - - fileData, err := afero.ReadFile(b.fs, cacheFilePath) - if err != nil { - log.Printf("Error reading solidity cache file: %v", err) - return "error" - } - - hash := sha256.Sum256(fileData) - return hex.EncodeToString(hash[:]) -} - -func (b *ContractBuilder) createContractsArtifact() (name string, retErr error) { - // Create context with 10-minute timeout for artifact upload operations - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Minute) - defer cancel() - - name = b.getBuiltContractName() - - // Ensure the enclave exists - var err error - if b.enclaveManager == nil { - return "", fmt.Errorf("enclave manager not set") - } - - // TODO: this is not ideal, we should feed the resulting enclave into the - // EnclaveFS constructor. As it is, we're making sure the enclave exists, - // and we recreate an enclave context for NewEnclaveFS.. - _, err = b.enclaveManager.GetEnclave(ctx, b.enclave) - if err != nil { - return "", fmt.Errorf("failed to get or create enclave: %w", err) - } - - // Create a new Kurtosis filesystem for the specified enclave - var enclaveFS *ktfs.EnclaveFS - if b.enclaveFS != nil { - enclaveFS = b.enclaveFS - } else { - enclaveFS, err = ktfs.NewEnclaveFS(ctx, b.enclave) - if err != nil { - return "", fmt.Errorf("failed to create enclave filesystem: %w", err) - } - } - - // Check if artifact already exists with retry logic - artifactNames, getAllErr := util.WithRetry(ctx, "GetAllArtifactNames", func() ([]string, error) { - return enclaveFS.GetAllArtifactNames(ctx) - }) - - if getAllErr != nil { - log.Printf("Warning: Failed to retrieve artifact names: %v", getAllErr) - } else { - for _, existingName := range artifactNames { - if existingName == name { - log.Printf("Artifact '%s' already exists, skipping creation", name) - return name, nil - } - } - } - - // Check the base contracts directory - contractsDir := filepath.Join(b.baseDir, relativeContractsPath) - exists, err := afero.DirExists(b.fs, contractsDir) - if err != nil || !exists { - return "", fmt.Errorf("contracts directory not found at %s: %w", contractsDir, err) - } - - // Create temp directory to hold the files we want to include - tempDir, err := afero.TempDir(b.fs, "", "contracts-artifacts-*") - if err != nil { - return "", fmt.Errorf("failed to create temp directory: %w", err) - } - defer func() { - if err := b.fs.RemoveAll(tempDir); err != nil && retErr == nil { - retErr = fmt.Errorf("failed to cleanup temporary directory: %w", err) - } - }() - - // Populate the temp directory with the necessary files - if err := b.populateContractsArtifact(contractsDir, tempDir); err != nil { - return "", fmt.Errorf("failed to populate contracts artifact: %w", err) - } - - // Create file readers for the artifact - readers, err := b.createArtifactReaders(tempDir) - if err != nil { - return "", fmt.Errorf("failed to create artifact readers: %w", err) - } - - // Upload the artifact with retry logic - _, err = util.WithRetry(ctx, fmt.Sprintf("PutArtifact(%s)", name), func() (struct{}, error) { - return struct{}{}, enclaveFS.PutArtifact(ctx, name, readers...) - }) - - if err != nil { - return "", fmt.Errorf("failed to upload artifact: %w", err) - } - - return -} - -// createArtifactReaders creates file readers for all files in the directory -func (b *ContractBuilder) createArtifactReaders(dir string) ([]*ktfs.ArtifactFileReader, error) { - var readers []*ktfs.ArtifactFileReader - var openFiles []io.Closer - - err := afero.Walk(b.fs, dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Skip directories themselves - if info.IsDir() { - return nil - } - - // Get relative path from base dir - relPath, err := filepath.Rel(dir, path) - if err != nil { - return fmt.Errorf("failed to get relative path: %w", err) - } - - // Open the file - file, err := b.fs.Open(path) - if err != nil { - return fmt.Errorf("failed to open file %s: %w", path, err) - } - openFiles = append(openFiles, file) - - // Add file reader to the list - readers = append(readers, ktfs.NewArtifactFileReader(relPath, file)) - - return nil - }) - - if err != nil { - // Close any open files - for _, file := range openFiles { - file.Close() - } - return nil, fmt.Errorf("failed to prepare artifact files: %w", err) - } - - return readers, nil -} - -// populateContractsArtifact populates the temporary directory with required contract files -func (b *ContractBuilder) populateContractsArtifact(contractsDir, tempDir string) error { - // Handle forge-artifacts directories - exclude *.t.sol directories as we don't need tests. - // op-deployer will need contracts and scripts. - forgeArtifactsPath := filepath.Join(contractsDir, "forge-artifacts") - exists, err := afero.DirExists(b.fs, forgeArtifactsPath) - if err != nil { - return fmt.Errorf("failed to check forge-artifacts directory: %w", err) - } - if !exists { - return nil - } - - entries, err := afero.ReadDir(b.fs, forgeArtifactsPath) - if err != nil { - return fmt.Errorf("failed to read forge-artifacts directory: %w", err) - } - - for _, entry := range entries { - if !entry.IsDir() { - continue - } - - // Skip test directories - if strings.HasSuffix(entry.Name(), ".t.sol") { - continue - } - - srcPath := filepath.Join(forgeArtifactsPath, entry.Name()) - destPath := filepath.Join(tempDir, entry.Name()) - - // Create destination directory - if err := b.fs.MkdirAll(destPath, 0755); err != nil { - return fmt.Errorf("failed to create directory %s: %w", destPath, err) - } - - // Copy directory contents - err = afero.Walk(b.fs, srcPath, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Get path relative to source directory - relPath, err := filepath.Rel(srcPath, path) - if err != nil { - return fmt.Errorf("failed to get relative path: %w", err) - } - - // Skip root directory - if relPath == "." { - return nil - } - - destPath := filepath.Join(destPath, relPath) - - if info.IsDir() { - return b.fs.MkdirAll(destPath, info.Mode()) - } - - // Copy file contents - srcFile, err := b.fs.Open(path) - if err != nil { - return fmt.Errorf("failed to open source file: %w", err) - } - defer srcFile.Close() - - destFile, err := b.fs.Create(destPath) - if err != nil { - return fmt.Errorf("failed to create destination file: %w", err) - } - defer destFile.Close() - - if _, err := io.Copy(destFile, srcFile); err != nil { - return fmt.Errorf("failed to copy file contents: %w", err) - } - - return b.fs.Chmod(destPath, info.Mode()) - }) - - if err != nil { - return fmt.Errorf("failed to copy directory %s: %w", srcPath, err) - } - } - - return nil -} diff --git a/kurtosis-devnet/pkg/build/contracts_test.go b/kurtosis-devnet/pkg/build/contracts_test.go deleted file mode 100644 index a4cf312297c8c..0000000000000 --- a/kurtosis-devnet/pkg/build/contracts_test.go +++ /dev/null @@ -1,387 +0,0 @@ -package build - -import ( - "bytes" - "context" - "fmt" - "io" - "path/filepath" - "testing" - - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/enclave" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/kurtosis-tech/kurtosis/api/golang/core/kurtosis_core_rpc_api_bindings" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/enclaves" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/starlark_run_config" - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type testCmd struct { - output []byte - err error - dir string - stdout *bytes.Buffer - stderr *bytes.Buffer -} - -func (c *testCmd) CombinedOutput() ([]byte, error) { - return c.output, c.err -} - -func (c *testCmd) Dir() string { - return c.dir -} - -func (c *testCmd) SetDir(dir string) { - c.dir = dir -} - -func (c *testCmd) Run() error { - return c.err -} - -func (c *testCmd) SetOutput(stdout, stderr *bytes.Buffer) { - c.stdout = stdout - c.stderr = stderr -} - -func testCmdFactory(output []byte, err error) cmdFactory { - return func(name string, arg ...string) cmdRunner { - return &testCmd{output: output, err: err} - } -} - -type testEnclaveFS struct { - fs afero.Fs - artifact string -} - -func (e *testEnclaveFS) GetAllFilesArtifactNamesAndUuids(ctx context.Context) ([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, error) { - if e.artifact == "" { - return []*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid{}, nil - } - return []*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid{ - { - FileName: e.artifact, - FileUuid: "test-uuid", - }, - }, nil -} - -func (e *testEnclaveFS) DownloadFilesArtifact(ctx context.Context, name string) ([]byte, error) { - return []byte("test content"), nil -} - -func (e *testEnclaveFS) UploadFiles(pathToUpload string, artifactName string) (services.FilesArtifactUUID, services.FileArtifactName, error) { - e.artifact = artifactName - return "test-uuid", services.FileArtifactName(artifactName), nil -} - -func (m *testEnclaveFS) GetAllArtifactNames(ctx context.Context) ([]string, error) { - if m.artifact == "" { - return []string{}, nil - } - return []string{m.artifact}, nil -} - -func (m *testEnclaveFS) PutArtifact(ctx context.Context, artifactName string, content []byte) error { - reader := bytes.NewReader(content) - file, err := m.fs.Create(artifactName) - if err != nil { - return err - } - defer file.Close() - _, err = io.Copy(file, reader) - if err != nil { - return err - } - m.artifact = artifactName - return nil -} - -func (m *testEnclaveFS) GetArtifact(ctx context.Context, name string) (*ktfs.Artifact, error) { - return nil, fmt.Errorf("not implemented") -} - -type testEnclaveContext struct{} - -func (e *testEnclaveContext) RunStarlarkPackage(ctx context.Context, pkg string, config *starlark_run_config.StarlarkRunConfig) (<-chan interfaces.StarlarkResponse, string, error) { - return nil, "", nil -} - -func (e *testEnclaveContext) RunStarlarkScript(ctx context.Context, script string, config *starlark_run_config.StarlarkRunConfig) error { - return nil -} - -func (e *testEnclaveContext) GetEnclaveUuid() enclaves.EnclaveUUID { - return enclaves.EnclaveUUID("test-enclave-uuid") -} - -func (e *testEnclaveContext) GetServices() (map[services.ServiceName]services.ServiceUUID, error) { - return nil, nil -} - -func (e *testEnclaveContext) GetService(serviceIdentifier string) (interfaces.ServiceContext, error) { - return nil, nil -} - -func (e *testEnclaveContext) GetAllFilesArtifactNamesAndUuids(ctx context.Context) ([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, error) { - return nil, nil -} - -type testKurtosisContext struct{} - -func (c *testKurtosisContext) CreateEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - return &testEnclaveContext{}, nil -} - -func (c *testKurtosisContext) GetEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - return &testEnclaveContext{}, nil -} - -func (c *testKurtosisContext) GetEnclaveStatus(ctx context.Context, name string) (interfaces.EnclaveStatus, error) { - return interfaces.EnclaveStatusRunning, nil -} - -func (c *testKurtosisContext) Clean(ctx context.Context, destroyAll bool) ([]interfaces.EnclaveNameAndUuid, error) { - return []interfaces.EnclaveNameAndUuid{}, nil -} - -func (c *testKurtosisContext) DestroyEnclave(ctx context.Context, name string) error { - return nil -} - -func setupTestFS(t *testing.T) (*testEnclaveFS, afero.Fs) { - fs := afero.NewMemMapFs() - - // Create the contracts directory structure - contractsDir := filepath.Join(".", relativeContractsPath) - require.NoError(t, fs.MkdirAll(contractsDir, 0755)) - - // Create a mock solidity cache file - cacheDir := filepath.Join(contractsDir, "cache") - require.NoError(t, fs.MkdirAll(cacheDir, 0755)) - require.NoError(t, afero.WriteFile(fs, filepath.Join(cacheDir, "solidity-files-cache.json"), []byte("test cache"), 0644)) - - // Create forge-artifacts directory with test files - forgeDir := filepath.Join(contractsDir, "forge-artifacts") - require.NoError(t, fs.MkdirAll(forgeDir, 0755)) - - // Create some test contract artifacts - contractDirs := []string{"Contract1.sol", "Contract2.sol"} - for _, dir := range contractDirs { - artifactDir := filepath.Join(forgeDir, dir) - require.NoError(t, fs.MkdirAll(artifactDir, 0755)) - require.NoError(t, afero.WriteFile(fs, filepath.Join(artifactDir, "artifact.json"), []byte("test artifact"), 0644)) - } - - // Create a test contract directory - testContractDir := filepath.Join(forgeDir, "TestContract.t.sol") - require.NoError(t, fs.MkdirAll(testContractDir, 0755)) - require.NoError(t, afero.WriteFile(fs, filepath.Join(testContractDir, "artifact.json"), []byte("test artifact"), 0644)) - - return &testEnclaveFS{fs: fs}, fs -} - -func TestContractBuilder_Build(t *testing.T) { - tests := []struct { - name string - setupCmd func() *testCmd - expectError bool - expectedOutput string - }{ - { - name: "successful build", - setupCmd: func() *testCmd { - return &testCmd{ - output: []byte("build successful"), - err: nil, - dir: ".", - } - }, - expectError: false, - expectedOutput: "artifact://contracts-ce0456a3c5a930d170e08492989cf52b416562106c8040bc384548bfe142eaa2", // hash of "test cache" - }, - { - name: "build command fails", - setupCmd: func() *testCmd { - return &testCmd{ - output: []byte("build failed"), - err: fmt.Errorf("command failed"), - } - }, - expectError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Setup test filesystem - testFS, memFS := setupTestFS(t) - - // Create mock command - mockCmd := tt.setupCmd() - - // Create mock enclave manager - enclaveManager, err := enclave.NewKurtosisEnclaveManager( - enclave.WithKurtosisContext(&testKurtosisContext{}), - ) - require.NoError(t, err) - - // Create contract builder with mocks - builder := NewContractBuilder( - WithContractFS(memFS), - WithContractBaseDir("."), - WithContractDryRun(false), - ) - builder.cmdFactory = testCmdFactory(mockCmd.output, mockCmd.err) - builder.enclaveFS, err = ktfs.NewEnclaveFS(context.Background(), "test-enclave", ktfs.WithEnclaveCtx(testFS), ktfs.WithFs(memFS)) - require.NoError(t, err) - builder.enclaveManager = enclaveManager - - // Execute build - output, err := builder.Build(context.Background(), "") - - // Verify results - if tt.expectError { - assert.Error(t, err) - return - } - - require.NoError(t, err) - assert.Equal(t, tt.expectedOutput, output) - assert.Equal(t, mockCmd.dir, ".") - }) - } -} - -func TestContractBuilder_createContractsArtifact(t *testing.T) { - testFS, memFS := setupTestFS(t) - - // Create mock enclave manager - enclaveManager, err := enclave.NewKurtosisEnclaveManager( - enclave.WithKurtosisContext(&testKurtosisContext{}), - ) - require.NoError(t, err) - - builder := NewContractBuilder( - WithContractFS(memFS), - WithContractBaseDir("."), - ) - builder.enclaveFS, err = ktfs.NewEnclaveFS(context.Background(), "test-enclave", ktfs.WithEnclaveCtx(testFS), ktfs.WithFs(memFS)) - require.NoError(t, err) - builder.enclaveManager = enclaveManager - - // Create the artifact - name, err := builder.createContractsArtifact() - require.NoError(t, err) - - // Verify the artifact was created - artifacts, err := builder.enclaveFS.GetAllArtifactNames(context.Background()) - require.NoError(t, err) - assert.Contains(t, artifacts, name) - - // Verify it skips test contracts - for _, artifact := range artifacts { - assert.NotContains(t, artifact, "TestContract.t.sol") - } -} - -func TestContractBuilder_buildHash(t *testing.T) { - _, memFS := setupTestFS(t) - - builder := NewContractBuilder( - WithContractFS(memFS), - WithContractBaseDir("."), - ) - - // Get the hash - hash := builder.buildHash() - - // Verify it's not empty or "error" - assert.NotEmpty(t, hash) - assert.NotEqual(t, "error", hash) - - // Verify it's consistent - hash2 := builder.buildHash() - assert.Equal(t, hash, hash2) - - // Modify the cache file and verify the hash changes - cacheFile := filepath.Join(".", relativeContractsPath, solidityCachePath) - err := afero.WriteFile(memFS, cacheFile, []byte("modified cache"), 0644) - require.NoError(t, err) - - hash3 := builder.buildHash() - assert.NotEqual(t, hash, hash3) -} - -func TestContractBuilder_populateContractsArtifact(t *testing.T) { - _, memFS := setupTestFS(t) - - builder := NewContractBuilder( - WithContractFS(memFS), - WithContractBaseDir("."), - ) - - // Create a temporary directory for the test - tempDir, err := afero.TempDir(memFS, "", "test-artifacts-*") - require.NoError(t, err) - defer func() { - _ = memFS.RemoveAll(tempDir) - }() - - // Populate the artifacts - contractsDir := filepath.Join(".", relativeContractsPath) - err = builder.populateContractsArtifact(contractsDir, tempDir) - require.NoError(t, err) - - // Verify the directory structure - exists, err := afero.DirExists(memFS, filepath.Join(tempDir, "Contract1.sol")) - assert.NoError(t, err) - assert.True(t, exists) - - exists, err = afero.DirExists(memFS, filepath.Join(tempDir, "Contract2.sol")) - assert.NoError(t, err) - assert.True(t, exists) - - // Verify test contracts are not copied - exists, err = afero.DirExists(memFS, filepath.Join(tempDir, "TestContract.t.sol")) - assert.NoError(t, err) - assert.False(t, exists) - - // Verify file contents - content, err := afero.ReadFile(memFS, filepath.Join(tempDir, "Contract1.sol", "artifact.json")) - require.NoError(t, err) - assert.Equal(t, "test artifact", string(content)) -} - -func TestContractBuilder_GetContractUrl(t *testing.T) { - _, memFS := setupTestFS(t) - - builder := NewContractBuilder( - WithContractFS(memFS), - WithContractBaseDir("."), - ) - - // Get the contract URL - url := builder.GetContractUrl() - - // Verify the format is correct - assert.Regexp(t, `^artifact://contracts-[a-f0-9]{64}$`, url) - - // Verify it's consistent - url2 := builder.GetContractUrl() - assert.Equal(t, url, url2) - - // Verify it changes when the cache file changes - cacheFile := filepath.Join(".", relativeContractsPath, solidityCachePath) - err := afero.WriteFile(memFS, cacheFile, []byte("modified cache"), 0644) - require.NoError(t, err) - - url3 := builder.GetContractUrl() - assert.NotEqual(t, url, url3) -} diff --git a/kurtosis-devnet/pkg/build/docker.go b/kurtosis-devnet/pkg/build/docker.go deleted file mode 100644 index c9142c574481c..0000000000000 --- a/kurtosis-devnet/pkg/build/docker.go +++ /dev/null @@ -1,343 +0,0 @@ -package build - -import ( - "bytes" - "context" - "fmt" - "log" - "net/url" - "os" - "os/exec" - "runtime" - "strings" - "sync" - "text/template" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/client" - "go.opentelemetry.io/otel" - "golang.org/x/sync/semaphore" -) - -// cmdRunner abstracts command execution for testing -type cmdRunner interface { - // CombinedOutput is kept for potential future use or simpler scenarios - CombinedOutput() ([]byte, error) - // Run starts the command and waits for it to complete. - // It's often preferred when you want to manage stdout/stderr separately. - Run() error - // SetOutput sets the writers for stdout and stderr. - SetOutput(stdout, stderr *bytes.Buffer) - Dir() string - SetDir(dir string) -} - -// defaultCmdRunner is the default implementation that uses exec.Command -type defaultCmdRunner struct { - *exec.Cmd - stdout *bytes.Buffer - stderr *bytes.Buffer -} - -func (r *defaultCmdRunner) CombinedOutput() ([]byte, error) { - if r.stdout == nil || r.stderr == nil { - var combined bytes.Buffer - r.Cmd.Stdout = &combined - r.Cmd.Stderr = &combined - err := r.Cmd.Run() - return combined.Bytes(), err - } - err := r.Run() - combined := append(r.stdout.Bytes(), r.stderr.Bytes()...) - return combined, err -} - -func (r *defaultCmdRunner) SetOutput(stdout, stderr *bytes.Buffer) { - r.stdout = stdout - r.stderr = stderr - r.Cmd.Stdout = stdout - r.Cmd.Stderr = stderr -} - -func (r *defaultCmdRunner) Run() error { - return r.Cmd.Run() -} - -func (r *defaultCmdRunner) Dir() string { - return r.Cmd.Dir -} - -func (r *defaultCmdRunner) SetDir(dir string) { - r.Cmd.Dir = dir -} - -// cmdFactory creates commands -type cmdFactory func(name string, arg ...string) cmdRunner - -// defaultCmdFactory is the default implementation that uses exec.Command -func defaultCmdFactory(name string, arg ...string) cmdRunner { - return &defaultCmdRunner{Cmd: exec.Command(name, arg...)} -} - -// dockerClient interface defines the Docker client methods we use -type dockerClient interface { - ImageInspectWithRaw(ctx context.Context, imageID string) (types.ImageInspect, []byte, error) - ImageTag(ctx context.Context, source, target string) error -} - -// dockerProvider abstracts the creation of Docker clients -type dockerProvider interface { - newClient() (dockerClient, error) -} - -// defaultDockerProvider is the default implementation of dockerProvider -type defaultDockerProvider struct{} - -func (p *defaultDockerProvider) newClient() (dockerClient, error) { - opts := []client.Opt{client.FromEnv} - - // Check if default docker socket exists - hostURL, err := url.Parse(client.DefaultDockerHost) - if err != nil { - return nil, fmt.Errorf("failed to parse default docker host: %w", err) - } - - // For unix sockets, check if the socket file exists - unixOS := runtime.GOOS == "linux" || runtime.GOOS == "darwin" - if hostURL.Scheme == "unix" && unixOS { - if _, err := os.Stat(hostURL.Path); os.IsNotExist(err) { - // Default socket doesn't exist, try to find an alternate location. Docker Desktop uses a socket in the home directory. - homeDir, err := os.UserHomeDir() - if err != nil { - return nil, fmt.Errorf("failed to get user home directory: %w", err) - } - // Try to use the non-privileged socket if available - homeSocketPath := fmt.Sprintf("%s/.docker/run/docker.sock", homeDir) - if runtime.GOOS == "linux" { - homeSocketPath = fmt.Sprintf("%s/.docker/desktop/docker.sock", homeDir) - } - - // If that socket exists, make it the default. Otherwise, leave it alone, and hope some environment variable has been set. - if _, err := os.Stat(homeSocketPath); err == nil { - socketURL := &url.URL{ - Scheme: "unix", - Path: homeSocketPath, - } - // prepend the host, so that it can still be overridden by the environment. - opts = append([]client.Opt{client.WithHost(socketURL.String())}, opts...) - } - } - } - - return client.NewClientWithOpts(opts...) -} - -// DockerBuilder handles building docker images using just commands -type DockerBuilder struct { - // Base directory where the build commands should be executed - baseDir string - // Template for the build command - cmdTemplate *template.Template - // Dry run mode - dryRun bool - // Docker provider for creating clients - dockerProvider dockerProvider - // Command factory for testing - cmdFactory cmdFactory - // Concurrency limiting semaphore - sem *semaphore.Weighted - // Mutex to protect shared state (buildStates) - mu sync.Mutex - // Tracks the state of builds (ongoing or completed) - buildStates map[string]*buildState -} - -// buildState stores the result and status of a build -type buildState struct { - result string - err error - done chan struct{} - once sync.Once -} - -const cmdTemplateStr = "just {{.ProjectName}}-image {{.ImageTag}}" - -var defaultCmdTemplate *template.Template - -func init() { - defaultCmdTemplate = template.Must(template.New("docker_build_cmd").Parse(cmdTemplateStr)) -} - -type DockerBuilderOptions func(*DockerBuilder) - -func WithDockerCmdTemplate(cmdTemplate *template.Template) DockerBuilderOptions { - return func(b *DockerBuilder) { - b.cmdTemplate = cmdTemplate - } -} - -func WithDockerBaseDir(baseDir string) DockerBuilderOptions { - return func(b *DockerBuilder) { - b.baseDir = baseDir - } -} - -func WithDockerDryRun(dryRun bool) DockerBuilderOptions { - return func(b *DockerBuilder) { - b.dryRun = dryRun - } -} - -// WithDockerConcurrency sets the maximum number of concurrent builds. -func WithDockerConcurrency(limit int) DockerBuilderOptions { - if limit <= 0 { - limit = 1 - } - if limit >= 32 { - limit = 32 - } - return func(b *DockerBuilder) { - b.sem = semaphore.NewWeighted(int64(limit)) - } -} - -// NewDockerBuilder creates a new DockerBuilder instance -func NewDockerBuilder(opts ...DockerBuilderOptions) *DockerBuilder { - b := &DockerBuilder{ - baseDir: ".", - cmdTemplate: defaultCmdTemplate, - dryRun: false, - dockerProvider: &defaultDockerProvider{}, - cmdFactory: defaultCmdFactory, - sem: semaphore.NewWeighted(1), - buildStates: make(map[string]*buildState), - } - - for _, opt := range opts { - opt(b) - } - - return b -} - -// templateData holds the data for the command template -type templateData struct { - ImageTag string - ProjectName string -} - -// Build ensures the docker image for the given project is built, respecting concurrency limits. -// It blocks until the specific requested build is complete. Other builds may run concurrently. -func (b *DockerBuilder) Build(ctx context.Context, projectName, imageTag string) (string, error) { - b.mu.Lock() - state, exists := b.buildStates[projectName] - if !exists { - state = &buildState{ - done: make(chan struct{}), - } - b.buildStates[projectName] = state - } - b.mu.Unlock() - - if !exists { - state.once.Do(func() { - err := b.executeBuild(ctx, projectName, imageTag, state) - if err != nil { - state.err = err - state.result = "" - } - close(state.done) - }) - } else { - <-state.done - } - - return state.result, state.err -} - -func (b *DockerBuilder) executeBuild(ctx context.Context, projectName, initialImageTag string, state *buildState) error { - ctx, span := otel.Tracer("docker-builder").Start(ctx, fmt.Sprintf("build %s", projectName)) - defer span.End() - - log.Printf("Build started for project: %s (tag: %s)", projectName, initialImageTag) - - if b.dryRun { - log.Printf("Dry run: Skipping build for project %s", projectName) - state.result = initialImageTag - return nil - } - - if err := b.sem.Acquire(ctx, 1); err != nil { - log.Printf("Failed to acquire build semaphore for %s: %v", projectName, err) - return fmt.Errorf("failed to acquire semaphore: %w", err) - } - defer b.sem.Release(1) - - data := templateData{ - ImageTag: initialImageTag, - ProjectName: projectName, - } - - var cmdBuf bytes.Buffer - if err := b.cmdTemplate.Execute(&cmdBuf, data); err != nil { - log.Printf("Build failed for %s: Failed to execute command template: %v", projectName, err) - return fmt.Errorf("failed to execute command template: %w", err) - } - cmdStr := cmdBuf.String() - - cmd := b.cmdFactory("sh", "-c", cmdStr) - var stdoutBuf, stderrBuf bytes.Buffer - cmd.SetOutput(&stdoutBuf, &stderrBuf) - - startTime := time.Now() - log.Printf("Executing build command for %s: %s", projectName, cmdStr) - err := cmd.Run() - duration := time.Since(startTime) - - if err != nil { - log.Printf("Build failed for %s after %s: %v", projectName, duration, err) - log.Printf("--- Start Output (stdout) for failed %s ---", projectName) - log.Print(stdoutBuf.String()) - log.Printf("--- End Output (stdout) for failed %s ---", projectName) - log.Printf("--- Start Output (stderr) for failed %s ---", projectName) - log.Print(stderrBuf.String()) - log.Printf("--- End Output (stderr) for failed %s ---", projectName) - return fmt.Errorf("build command failed: %w", err) - } - - dockerClient, err := b.dockerProvider.newClient() - if err != nil { - log.Printf("Build command succeeded for %s, but Docker client creation failed: %v", projectName, err) - return fmt.Errorf("failed to create docker client: %w", err) - } - - inspect, _, err := dockerClient.ImageInspectWithRaw(ctx, initialImageTag) - if err != nil { - log.Printf("Build command succeeded for %s in %s, but failed to inspect image '%s': %v", projectName, duration, initialImageTag, err) - log.Printf("Stdout: %s", stdoutBuf.String()) - log.Printf("Stderr: %s", stderrBuf.String()) - return fmt.Errorf("build command succeeded but failed to inspect image %s: %w", initialImageTag, err) - } - - shortID := TruncateID(inspect.ID) - - finalTag := fmt.Sprintf("%s:%s", projectName, shortID) - err = dockerClient.ImageTag(ctx, initialImageTag, finalTag) - if err != nil { - log.Printf("Build succeeded for %s in %s, inspecting image '%s' OK, but failed to tag as '%s': %v", projectName, duration, initialImageTag, finalTag, err) - return fmt.Errorf("failed to tag image %s as %s: %w", initialImageTag, finalTag, err) - } - - state.result = finalTag - log.Printf("Build successful for project: %s. Tagged as: %s (Duration: %s)", projectName, finalTag, duration) - return nil -} - -func TruncateID(id string) string { - shortID := strings.TrimPrefix(id, "sha256:") - if len(shortID) > 12 { - shortID = shortID[:12] - } - return shortID -} diff --git a/kurtosis-devnet/pkg/build/docker_test.go b/kurtosis-devnet/pkg/build/docker_test.go deleted file mode 100644 index ea96e8c2ba30e..0000000000000 --- a/kurtosis-devnet/pkg/build/docker_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package build - -import ( - "bytes" - "context" - "fmt" - "log" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// --- Helper to capture log output --- -func captureLogs(t *testing.T) (*bytes.Buffer, func()) { - var logBuf bytes.Buffer - originalLogger := log.Writer() - log.SetOutput(&logBuf) - t.Cleanup(func() { - log.SetOutput(originalLogger) - }) - return &logBuf, func() { log.SetOutput(originalLogger) } -} - -// --- Tests --- - -func TestDockerBuilder_Build_Success(t *testing.T) { - logBuf, cleanup := captureLogs(t) - defer cleanup() - - projectName := "test-project" - initialTag := "test-project:enclave1" - - // Create a builder in dry run mode - builder := NewDockerBuilder( - WithDockerDryRun(true), - WithDockerConcurrency(1), - ) - - // Execute build - resultTag, err := builder.Build(context.Background(), projectName, initialTag) - - // Verify results - require.NoError(t, err) - assert.Equal(t, initialTag, resultTag) - - // Verify log output - logs := logBuf.String() - assert.Contains(t, logs, fmt.Sprintf("Build started for project: %s (tag: %s)", projectName, initialTag)) - assert.Contains(t, logs, fmt.Sprintf("Dry run: Skipping build for project %s", projectName)) -} - -func TestDockerBuilder_Build_CommandFailure(t *testing.T) { - // Create a builder in dry run mode - builder := NewDockerBuilder( - WithDockerDryRun(true), - WithDockerConcurrency(1), - ) - - // Try to build a project - result, err := builder.Build(context.Background(), "test-project", "test-tag") - - // Verify the result - require.NoError(t, err) - assert.Equal(t, "test-tag", result) -} - -func TestDockerBuilder_Build_ConcurrencyLimit(t *testing.T) { - logBuf, cleanup := captureLogs(t) - defer cleanup() - - concurrencyLimit := 2 - numBuilds := 5 - - // Create a builder in dry run mode with concurrency limit - builder := NewDockerBuilder( - WithDockerDryRun(true), - WithDockerConcurrency(concurrencyLimit), - ) - - // Execute builds concurrently - var wg sync.WaitGroup - wg.Add(numBuilds) - startTime := time.Now() - - for i := 0; i < numBuilds; i++ { - go func(idx int) { - defer wg.Done() - projectName := fmt.Sprintf("concurrent-project-%d", idx) - initialTag := fmt.Sprintf("%s:enclave1", projectName) - _, err := builder.Build(context.Background(), projectName, initialTag) - assert.NoError(t, err, "Build %d failed", idx) - }(i) - } - - wg.Wait() - totalDuration := time.Since(startTime) - - // Verify logs show dry run messages - logs := logBuf.String() - for i := 0; i < numBuilds; i++ { - projectName := fmt.Sprintf("concurrent-project-%d", i) - assert.Contains(t, logs, fmt.Sprintf("Dry run: Skipping build for project %s", projectName)) - } - assert.NotContains(t, logs, "Build failed") - - // Basic check: total time should be reasonable - assert.Less(t, totalDuration, 1*time.Second, "Total duration too long, indicates potential blocking") -} - -func TestDockerBuilder_Build_DryRun(t *testing.T) { - logBuf, cleanup := captureLogs(t) - defer cleanup() - - projectName := "dry-run-project" - initialTag := "dry-run-project:enclave-dry" - - // Create a builder in dry run mode - builder := NewDockerBuilder( - WithDockerDryRun(true), - WithDockerConcurrency(1), - ) - - // Execute build - resultTag, err := builder.Build(context.Background(), projectName, initialTag) - - // Verify results - require.NoError(t, err) - assert.Equal(t, initialTag, resultTag) - - // Verify log output - logs := logBuf.String() - assert.Contains(t, logs, fmt.Sprintf("Build started for project: %s", projectName)) - assert.Contains(t, logs, fmt.Sprintf("Dry run: Skipping build for project %s", projectName)) - assert.NotContains(t, logs, "Executing build command") - assert.NotContains(t, logs, "Build successful") - assert.NotContains(t, logs, "Build failed") -} - -func TestDockerBuilder_Build_DuplicateCalls(t *testing.T) { - logBuf, cleanup := captureLogs(t) - defer cleanup() - - projectName := "duplicate-project" - initialTag := "duplicate:enclave1" - - // Create a builder in dry run mode - builder := NewDockerBuilder( - WithDockerDryRun(true), - WithDockerConcurrency(2), - ) - - // Execute multiple concurrent builds - var wg sync.WaitGroup - numCalls := 3 - results := make([]string, numCalls) - errors := make([]error, numCalls) - wg.Add(numCalls) - - for i := 0; i < numCalls; i++ { - go func(idx int) { - defer wg.Done() - results[idx], errors[idx] = builder.Build(context.Background(), projectName, initialTag) - }(i) - } - - wg.Wait() - - // Verify all calls returned the same result - for i := 0; i < numCalls; i++ { - require.NoError(t, errors[i], "Call %d returned an error", i) - assert.Equal(t, initialTag, results[i], "Call %d returned wrong tag", i) - } - - // Verify logs show dry run messages - logs := logBuf.String() - assert.Contains(t, logs, fmt.Sprintf("Build started for project: %s", projectName)) - assert.Contains(t, logs, fmt.Sprintf("Dry run: Skipping build for project %s", projectName)) - assert.NotContains(t, logs, "Build failed") -} diff --git a/kurtosis-devnet/pkg/build/prestate.go b/kurtosis-devnet/pkg/build/prestate.go deleted file mode 100644 index 38472d5cc35f9..0000000000000 --- a/kurtosis-devnet/pkg/build/prestate.go +++ /dev/null @@ -1,109 +0,0 @@ -package build - -import ( - "bytes" - "context" - "fmt" - "log" - "os/exec" - "text/template" - - "go.opentelemetry.io/otel" -) - -// PrestateBuilder handles building prestates using just commands -type PrestateBuilder struct { - baseDir string - cmdTemplate *template.Template - dryRun bool - - builtPrestates map[string]interface{} -} - -const ( - prestateCmdTemplateStr = "just _prestate-build {{.Path}}" -) - -var defaultPrestateTemplate *template.Template - -func init() { - defaultPrestateTemplate = template.Must(template.New("prestate_build_cmd").Parse(prestateCmdTemplateStr)) -} - -type PrestateBuilderOptions func(*PrestateBuilder) - -func WithPrestateBaseDir(baseDir string) PrestateBuilderOptions { - return func(b *PrestateBuilder) { - b.baseDir = baseDir - } -} - -func WithPrestateTemplate(cmdTemplate *template.Template) PrestateBuilderOptions { - return func(b *PrestateBuilder) { - b.cmdTemplate = cmdTemplate - } -} - -func WithPrestateDryRun(dryRun bool) PrestateBuilderOptions { - return func(b *PrestateBuilder) { - b.dryRun = dryRun - } -} - -// NewPrestateBuilder creates a new PrestateBuilder instance -func NewPrestateBuilder(opts ...PrestateBuilderOptions) *PrestateBuilder { - b := &PrestateBuilder{ - baseDir: ".", - cmdTemplate: defaultPrestateTemplate, - dryRun: false, - builtPrestates: make(map[string]interface{}), - } - - for _, opt := range opts { - opt(b) - } - - return b -} - -// templateData holds the data for the command template -type prestateTemplateData struct { - Path string -} - -// Build executes the prestate build command -func (b *PrestateBuilder) Build(ctx context.Context, path string) error { - _, span := otel.Tracer("prestate-builder").Start(ctx, "build prestate") - defer span.End() - - if _, ok := b.builtPrestates[path]; ok { - return nil - } - - log.Printf("Building prestate: %s", path) - - // Prepare template data - data := prestateTemplateData{ - Path: path, - } - - // Execute template to get command string - var cmdBuf bytes.Buffer - if err := b.cmdTemplate.Execute(&cmdBuf, data); err != nil { - return fmt.Errorf("failed to execute command template: %w", err) - } - - // Create command - cmd := exec.Command("sh", "-c", cmdBuf.String()) - cmd.Dir = b.baseDir - - if !b.dryRun { - output, err := cmd.CombinedOutput() - if err != nil { - return fmt.Errorf("prestate build command failed: %w\nOutput: %s", err, string(output)) - } - } - - b.builtPrestates[path] = struct{}{} - return nil -} diff --git a/kurtosis-devnet/pkg/deploy/deploy.go b/kurtosis-devnet/pkg/deploy/deploy.go deleted file mode 100644 index e938638496572..0000000000000 --- a/kurtosis-devnet/pkg/deploy/deploy.go +++ /dev/null @@ -1,306 +0,0 @@ -package deploy - -import ( - "bytes" - "context" - "fmt" - "io" - "log" - "os" - - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/enclave" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/engine" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" - autofixTypes "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/types" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/util" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -type EngineManager interface { - EnsureRunning() error - GetEngineType() (string, error) - RestartEngine() error -} - -type deployer interface { - Deploy(ctx context.Context, input io.Reader) (*spec.EnclaveSpec, error) - GetEnvironmentInfo(ctx context.Context, spec *spec.EnclaveSpec) (*kurtosis.KurtosisEnvironment, error) -} - -type DeployerFunc func(opts ...kurtosis.KurtosisDeployerOptions) (deployer, error) - -type DeployerOption func(*Deployer) - -type Deployer struct { - baseDir string - dryRun bool - kurtosisPkg string - enclave string - kurtosisBinary string - ktDeployer DeployerFunc - engineManager EngineManager - templateFile string - dataFile string - newEnclaveFS func(ctx context.Context, enclave string, opts ...ktfs.EnclaveFSOption) (*ktfs.EnclaveFS, error) - enclaveManager *enclave.KurtosisEnclaveManager - autofixMode autofixTypes.AutofixMode - tracer trace.Tracer -} - -func WithKurtosisDeployer(ktDeployer DeployerFunc) DeployerOption { - return func(d *Deployer) { - d.ktDeployer = ktDeployer - } -} - -func WithEngineManager(engineManager EngineManager) DeployerOption { - return func(d *Deployer) { - d.engineManager = engineManager - } -} - -func WithKurtosisBinary(kurtosisBinary string) DeployerOption { - return func(d *Deployer) { - d.kurtosisBinary = kurtosisBinary - } -} - -func WithKurtosisPackage(kurtosisPkg string) DeployerOption { - return func(d *Deployer) { - d.kurtosisPkg = kurtosisPkg - } -} - -func WithTemplateFile(templateFile string) DeployerOption { - return func(d *Deployer) { - d.templateFile = templateFile - } -} - -func WithDataFile(dataFile string) DeployerOption { - return func(d *Deployer) { - d.dataFile = dataFile - } -} - -func WithBaseDir(baseDir string) DeployerOption { - return func(d *Deployer) { - d.baseDir = baseDir - } -} - -func WithDryRun(dryRun bool) DeployerOption { - return func(d *Deployer) { - d.dryRun = dryRun - } -} - -func WithEnclave(enclave string) DeployerOption { - return func(d *Deployer) { - d.enclave = enclave - } -} - -func WithAutofixMode(autofixMode autofixTypes.AutofixMode) DeployerOption { - return func(d *Deployer) { - d.autofixMode = autofixMode - } -} - -func WithNewEnclaveFSFunc(newEnclaveFS func(ctx context.Context, enclave string, opts ...ktfs.EnclaveFSOption) (*ktfs.EnclaveFS, error)) DeployerOption { - return func(d *Deployer) { - d.newEnclaveFS = newEnclaveFS - } -} - -func NewDeployer(opts ...DeployerOption) (*Deployer, error) { - d := &Deployer{ - kurtosisBinary: "kurtosis", - ktDeployer: func(opts ...kurtosis.KurtosisDeployerOptions) (deployer, error) { - return kurtosis.NewKurtosisDeployer(opts...) - }, - newEnclaveFS: ktfs.NewEnclaveFS, - tracer: otel.Tracer("deployer"), - } - for _, opt := range opts { - opt(d) - } - - if d.engineManager == nil { - d.engineManager = engine.NewEngineManager(engine.WithKurtosisBinary(d.kurtosisBinary)) - } - - if !d.dryRun { - if err := d.engineManager.EnsureRunning(); err != nil { - return nil, fmt.Errorf("error ensuring kurtosis engine is running: %w", err) - } - - // Get and log engine info - engineType, err := d.engineManager.GetEngineType() - if err != nil { - log.Printf("Warning: failed to get engine type: %v", err) - } else { - log.Printf("Kurtosis engine type: %s", engineType) - } - var enclaveManager *enclave.KurtosisEnclaveManager - if engineType == "docker" { - enclaveManager, err = enclave.NewKurtosisEnclaveManager( - enclave.WithDockerManager(&enclave.DefaultDockerManager{}), - ) - } else { - enclaveManager, err = enclave.NewKurtosisEnclaveManager() - } - if err != nil { - return nil, fmt.Errorf("failed to create enclave manager: %w", err) - } - d.enclaveManager = enclaveManager - } else { - // This allows the deployer to work in dry run mode without a running Kurtosis engine - log.Printf("No Kurtosis engine running, skipping enclave manager creation") - } - - return d, nil -} - -func (d *Deployer) deployEnvironment(ctx context.Context, r io.Reader) (*kurtosis.KurtosisEnvironment, error) { - ctx, span := d.tracer.Start(ctx, "deploy environment") - defer span.End() - - // Create a multi reader to output deployment input to stdout - buf := bytes.NewBuffer(nil) - tee := io.TeeReader(r, buf) - - // Log the deployment input - log.Println("Deployment input:") - if _, err := io.Copy(os.Stdout, tee); err != nil { - return nil, fmt.Errorf("error copying deployment input: %w", err) - } - - opts := []kurtosis.KurtosisDeployerOptions{ - kurtosis.WithKurtosisBaseDir(d.baseDir), - kurtosis.WithKurtosisDryRun(d.dryRun), - kurtosis.WithKurtosisPackageName(d.kurtosisPkg), - kurtosis.WithKurtosisEnclave(d.enclave), - kurtosis.WithKurtosisAutofixMode(d.autofixMode), - } - - ktd, err := d.ktDeployer(opts...) - if err != nil { - return nil, fmt.Errorf("error creating kurtosis deployer: %w", err) - } - - spec, err := ktd.Deploy(ctx, buf) - if err != nil { - return nil, fmt.Errorf("error deploying kurtosis package: %w", err) - } - - info, err := ktd.GetEnvironmentInfo(ctx, spec) - if err != nil { - return nil, fmt.Errorf("error getting environment info: %w", err) - } - - // Upload the environment info to the enclave. - fs, err := d.newEnclaveFS(ctx, d.enclave) - if err != nil { - return nil, fmt.Errorf("error getting enclave fs: %w", err) - } - devnetFS := ktfs.NewDevnetFS(fs) - if err := devnetFS.UploadDevnetDescriptor(ctx, info.DevnetEnvironment); err != nil { - return nil, fmt.Errorf("error uploading devnet descriptor: %w", err) - } - - // Only configure Traefik in non-dry-run mode when Docker is available - if !d.dryRun { - if err := util.SetReverseProxyConfig(ctx); err != nil { - return nil, fmt.Errorf("failed to set Traefik network configuration: %w", err) - } - } - - fmt.Printf("Environment running successfully\n") - - return info, nil -} - -func (d *Deployer) renderTemplate(ctx context.Context, buildDir string, urlBuilder func(path ...string) string) (*bytes.Buffer, error) { - ctx, span := d.tracer.Start(ctx, "render template") - defer span.End() - - t := &Templater{ - baseDir: d.baseDir, - dryRun: d.dryRun, - enclave: d.enclave, - templateFile: d.templateFile, - dataFile: d.dataFile, - enclaveManager: d.enclaveManager, - buildDir: buildDir, - urlBuilder: urlBuilder, - } - - return t.Render(ctx) -} - -func (d *Deployer) Deploy(ctx context.Context, r io.Reader) (*kurtosis.KurtosisEnvironment, error) { - ctx, span := d.tracer.Start(ctx, "deploy devnet") - defer span.End() - - // Clean up the enclave before deploying - if d.autofixMode == autofixTypes.AutofixModeNuke { - // Recreate the engine - log.Println("Restarting engine") - if err := d.engineManager.RestartEngine(); err != nil { - return nil, fmt.Errorf("error restarting engine: %w", err) - } - log.Println("Nuking enclave") - if d.enclaveManager != nil { - // Remove all the enclaves and destroy all the docker resources related to kurtosis - err := d.enclaveManager.Nuke(ctx) - if err != nil { - return nil, fmt.Errorf("error nuking enclave: %w", err) - } - } - } else if d.autofixMode == autofixTypes.AutofixModeNormal { - log.Println("Autofixing enclave") - if d.enclaveManager != nil { - if err := d.enclaveManager.Autofix(ctx, d.enclave); err != nil { - return nil, fmt.Errorf("error autofixing enclave: %w", err) - } - } - } - - // Pre-create the enclave if it doesn't exist - if d.enclaveManager != nil { - _, err := d.enclaveManager.GetEnclave(ctx, d.enclave) - if err != nil { - return nil, fmt.Errorf("error getting enclave: %w", err) - } - } - - tmpDir, err := os.MkdirTemp("", d.enclave) - if err != nil { - return nil, fmt.Errorf("error creating temporary directory: %w", err) - } - defer os.RemoveAll(tmpDir) - - srv := &FileServer{ - baseDir: d.baseDir, - dryRun: d.dryRun, - enclave: d.enclave, - deployer: d.ktDeployer, - } - - ch := srv.getState(ctx) - - buf, err := d.renderTemplate(ctx, tmpDir, srv.URL) - if err != nil { - return nil, fmt.Errorf("error rendering template: %w", err) - } - - if err := srv.Deploy(ctx, tmpDir, ch); err != nil { - return nil, fmt.Errorf("error deploying fileserver: %w", err) - } - - return d.deployEnvironment(ctx, buf) -} diff --git a/kurtosis-devnet/pkg/deploy/deploy_test.go b/kurtosis-devnet/pkg/deploy/deploy_test.go deleted file mode 100644 index 0836e870d2cfe..0000000000000 --- a/kurtosis-devnet/pkg/deploy/deploy_test.go +++ /dev/null @@ -1,143 +0,0 @@ -package deploy - -import ( - "bytes" - "context" - "encoding/json" - "io" - "os" - "path/filepath" - "testing" - - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" - "github.com/kurtosis-tech/kurtosis/api/golang/core/kurtosis_core_rpc_api_bindings" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// mockDeployerForTest implements the deployer interface for testing -type mockDeployerForTest struct { - baseDir string -} - -func (m *mockDeployerForTest) Deploy(ctx context.Context, input io.Reader) (*spec.EnclaveSpec, error) { - // Create a mock env.json file - envPath := filepath.Join(m.baseDir, "env.json") - mockEnv := map[string]interface{}{ - "test": "value", - } - data, err := json.Marshal(mockEnv) - if err != nil { - return nil, err - } - if err := os.WriteFile(envPath, data, 0644); err != nil { - return nil, err - } - return &spec.EnclaveSpec{}, nil -} - -func (m *mockDeployerForTest) GetEnvironmentInfo(ctx context.Context, spec *spec.EnclaveSpec) (*kurtosis.KurtosisEnvironment, error) { - return &kurtosis.KurtosisEnvironment{}, nil -} - -// mockEnclaveContext implements EnclaveContextIface for testing -type mockEnclaveContext struct { - artifacts []string -} - -func (m *mockEnclaveContext) GetAllFilesArtifactNamesAndUuids(ctx context.Context) ([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, error) { - result := make([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, len(m.artifacts)) - for i, name := range m.artifacts { - result[i] = &kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid{ - FileName: name, - FileUuid: "test-uuid", - } - } - return result, nil -} - -func (m *mockEnclaveContext) DownloadFilesArtifact(ctx context.Context, name string) ([]byte, error) { - return nil, nil -} - -func (m *mockEnclaveContext) UploadFiles(pathToUpload string, artifactName string) (services.FilesArtifactUUID, services.FileArtifactName, error) { - return "", "", nil -} - -func TestDeploy(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - // Create a temporary directory for the environment output - tmpDir, err := os.MkdirTemp("", "deploy-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create a simple template file - templatePath := filepath.Join(tmpDir, "template.yaml") - err = os.WriteFile(templatePath, []byte("test: {{ .Config }}"), 0644) - require.NoError(t, err) - - // Create a simple data file - dataPath := filepath.Join(tmpDir, "data.json") - err = os.WriteFile(dataPath, []byte(`{"Config": "value"}`), 0644) - require.NoError(t, err) - - envPath := filepath.Join(tmpDir, "env.json") - // Create a simple deployment configuration - deployConfig := bytes.NewBufferString(`{"test": "config"}`) - - // Create a mock deployer function - mockDeployerFunc := func(opts ...kurtosis.KurtosisDeployerOptions) (deployer, error) { - return &mockDeployerForTest{baseDir: tmpDir}, nil - } - - // Create a mock EnclaveFS function - mockEnclaveFSFunc := func(ctx context.Context, enclave string, opts ...ktfs.EnclaveFSOption) (*ktfs.EnclaveFS, error) { - mockCtx := &mockEnclaveContext{ - artifacts: []string{ - "devnet-descriptor-1", - "devnet-descriptor-2", - }, - } - return ktfs.NewEnclaveFS(ctx, enclave, ktfs.WithEnclaveCtx(mockCtx)) - } - - d, err := NewDeployer( - WithBaseDir(tmpDir), - WithKurtosisDeployer(mockDeployerFunc), - WithDryRun(true), - WithTemplateFile(templatePath), - WithDataFile(dataPath), - WithNewEnclaveFSFunc(mockEnclaveFSFunc), - ) - require.NoError(t, err) - - env, err := d.Deploy(ctx, deployConfig) - require.NoError(t, err) - require.NotNil(t, env) - - // Verify the environment file was created - assert.FileExists(t, envPath) - - // Read and verify the content - content, err := os.ReadFile(envPath) - require.NoError(t, err) - - var envData map[string]interface{} - err = json.Unmarshal(content, &envData) - require.NoError(t, err) - assert.Equal(t, "value", envData["test"]) -} - -func TestNewDeployer_DryRun(t *testing.T) { - // In dry run mode, we should not create an enclave manager - deployer, err := NewDeployer( - WithDryRun(true), - ) - require.NoError(t, err) - assert.Nil(t, deployer.enclaveManager) -} diff --git a/kurtosis-devnet/pkg/deploy/fileserver.go b/kurtosis-devnet/pkg/deploy/fileserver.go deleted file mode 100644 index 53b0802442cf3..0000000000000 --- a/kurtosis-devnet/pkg/deploy/fileserver.go +++ /dev/null @@ -1,282 +0,0 @@ -package deploy - -import ( - "bytes" - "context" - "crypto/sha256" - "encoding/hex" - "encoding/json" - "fmt" - "log" - "os" - "path/filepath" - "strings" - "sync" - - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/util" - "github.com/spf13/afero" - "go.opentelemetry.io/otel" -) - -const FILESERVER_PACKAGE = "fileserver" - -type FileServer struct { - baseDir string - enclave string - dryRun bool - deployer DeployerFunc - fs afero.Fs -} - -func (f *FileServer) URL(path ...string) string { - return fmt.Sprintf("http://%s/%s", FILESERVER_PACKAGE, strings.Join(path, "/")) -} - -func (f *FileServer) Deploy(ctx context.Context, sourceDir string, stateCh <-chan *fileserverState) (retErr error) { - ctx, span := otel.Tracer("fileserver").Start(ctx, "deploy fileserver") - defer span.End() - - if f.fs == nil { - f.fs = afero.NewOsFs() - } - - // Check if source directory is empty. If it is, then ie means we don't have - // anything to serve, so we might as well not deploy the fileserver. - entries, err := afero.ReadDir(f.fs, sourceDir) - if err != nil { - return fmt.Errorf("error reading source directory: %w", err) - } - if len(entries) == 0 { - return nil - } - - srcHash, err := calculateDirHashWithFs(sourceDir, f.fs) - if err != nil { - return fmt.Errorf("error calculating source directory hash: %w", err) - } - - // Create a temp dir in the fileserver package - baseDir := filepath.Join(f.baseDir, FILESERVER_PACKAGE) - if err := f.fs.MkdirAll(baseDir, 0755); err != nil { - return fmt.Errorf("error creating base directory: %w", err) - } - - // Create the nginx directory structure - nginxDir := filepath.Join(baseDir, "static_files", "nginx") - if err := f.fs.MkdirAll(nginxDir, 0755); err != nil { - return fmt.Errorf("error creating nginx directory: %w", err) - } - - configHash, err := calculateDirHashWithFs(nginxDir, f.fs) - if err != nil { - return fmt.Errorf("error calculating base directory hash: %w", err) - } - - refState := <-stateCh - if refState.contentHash == srcHash && refState.configHash == configHash { - log.Println("No changes to fileserver, skipping deployment") - return nil - } - - // Can't use MkdirTemp here because the directory name needs to always be the same - // in order for kurtosis file artifact upload to be idempotent. - // (i.e. the file upload and all its downstream dependencies can be SKIPPED on re-runs) - tempDir := filepath.Join(baseDir, "upload-content") - - // Clean up any existing content - if err := f.fs.RemoveAll(tempDir); err != nil { - return fmt.Errorf("error cleaning up existing directory: %w", err) - } - - // Create the directory - if err := f.fs.MkdirAll(tempDir, 0755); err != nil { - return fmt.Errorf("error creating temporary directory: %w", err) - } - defer func() { - if err := f.fs.RemoveAll(tempDir); err != nil && retErr == nil { - retErr = fmt.Errorf("error cleaning up temporary directory: %w", err) - } - }() - - // Copy build dir contents to tempDir - if err := util.CopyDir(sourceDir, tempDir, f.fs); err != nil { - return fmt.Errorf("error copying directory: %w", err) - } - - buf := bytes.NewBuffer(nil) - buf.WriteString(fmt.Sprintf("source_path: %s\n", filepath.Base(tempDir))) - - opts := []kurtosis.KurtosisDeployerOptions{ - kurtosis.WithKurtosisBaseDir(f.baseDir), - kurtosis.WithKurtosisDryRun(f.dryRun), - kurtosis.WithKurtosisPackageName(FILESERVER_PACKAGE), - kurtosis.WithKurtosisEnclave(f.enclave), - } - - d, err := f.deployer(opts...) - if err != nil { - return fmt.Errorf("error creating kurtosis deployer: %w", err) - } - - _, err = d.Deploy(ctx, buf) - if err != nil { - return fmt.Errorf("error deploying kurtosis package: %w", err) - } - - return -} - -type fileserverState struct { - contentHash string - configHash string -} - -// downloadAndHashArtifact downloads an artifact and calculates its hash -func downloadAndHashArtifact(ctx context.Context, enclave, artifactName string) (hash string, retErr error) { - fs, err := ktfs.NewEnclaveFS(ctx, enclave) - if err != nil { - return "", fmt.Errorf("failed to create enclave fs: %w", err) - } - - // Create temp dir - osFs := afero.NewOsFs() - tempDir, err := afero.TempDir(osFs, "", artifactName+"-*") - if err != nil { - return "", fmt.Errorf("failed to create temp dir: %w", err) - } - defer func() { - if err := osFs.RemoveAll(tempDir); err != nil && retErr == nil { - retErr = fmt.Errorf("error cleaning up temporary directory: %w", err) - } - }() - - // Download artifact - artifact, err := fs.GetArtifact(ctx, artifactName) - if err != nil { - return "", fmt.Errorf("failed to get artifact: %w", err) - } - - // Ensure parent directories exist before extracting - if err := osFs.MkdirAll(tempDir, 0755); err != nil { - return "", fmt.Errorf("failed to create temp dir structure: %w", err) - } - - // Extract to temp dir - if err := artifact.Download(tempDir); err != nil { - return "", fmt.Errorf("failed to download artifact: %w", err) - } - - // Calculate hash - hash, err = calculateDirHash(tempDir) - if err != nil { - return "", fmt.Errorf("failed to calculate hash: %w", err) - } - - return -} - -func (f *FileServer) getState(ctx context.Context) <-chan *fileserverState { - stateCh := make(chan *fileserverState) - - go func(ctx context.Context) { - st := &fileserverState{} - var wg sync.WaitGroup - - type artifactInfo struct { - name string - dest *string - } - - artifacts := []artifactInfo{ - {"fileserver-content", &st.contentHash}, - {"fileserver-nginx-conf", &st.configHash}, - } - - for _, art := range artifacts { - wg.Add(1) - go func(art artifactInfo) { - defer wg.Done() - hash, err := downloadAndHashArtifact(ctx, f.enclave, art.name) - if err == nil { - *art.dest = hash - } - }(art) - } - - wg.Wait() - stateCh <- st - }(ctx) - - return stateCh -} - -type entry struct { - RelPath string `json:"rel_path"` - Size int64 `json:"size"` - Mode string `json:"mode"` - Content []byte `json:"content"` -} - -// calculateDirHash returns a SHA256 hash of the directory contents -// It walks through the directory, hashing file names and contents -func calculateDirHash(dir string) (string, error) { - return calculateDirHashWithFs(dir, afero.NewOsFs()) -} - -// calculateDirHashWithFs is like calculateDirHash but accepts a custom filesystem -func calculateDirHashWithFs(dir string, fs afero.Fs) (string, error) { - hash := sha256.New() - - err := afero.Walk(fs, dir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Get path relative to root dir - relPath, err := filepath.Rel(dir, path) - if err != nil { - return err - } - - // Skip the root directory - if relPath == "." { - return nil - } - - // Add the relative path and file info to hash - entry := entry{ - RelPath: relPath, - Size: info.Size(), - Mode: info.Mode().String(), - } - - // If it's a regular file, add its contents to hash - if !info.IsDir() { - content, err := afero.ReadFile(fs, path) - if err != nil { - return err - } - entry.Content = content - } - - jsonBytes, err := json.Marshal(entry) - if err != nil { - return err - } - _, err = hash.Write(jsonBytes) - if err != nil { - return err - } - - return nil - }) - - if err != nil { - return "", fmt.Errorf("error walking directory: %w", err) - } - - hashStr := hex.EncodeToString(hash.Sum(nil)) - return hashStr, nil -} diff --git a/kurtosis-devnet/pkg/deploy/fileserver_test.go b/kurtosis-devnet/pkg/deploy/fileserver_test.go deleted file mode 100644 index 75afe3a9637a5..0000000000000 --- a/kurtosis-devnet/pkg/deploy/fileserver_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package deploy - -import ( - "context" - "io" - "path/filepath" - "testing" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestDeployFileserver(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - fs := afero.NewMemMapFs() - - // Create test directories - sourceDir := "/source" - require.NoError(t, fs.MkdirAll(sourceDir, 0755)) - - // Create required directory structure - nginxDir := filepath.Join(sourceDir, "static_files", "nginx") - require.NoError(t, fs.MkdirAll(nginxDir, 0755)) - - // Create a mock deployer function - mockDeployerFunc := func(opts ...kurtosis.KurtosisDeployerOptions) (deployer, error) { - return &mockDeployer{}, nil - } - - testCases := []struct { - name string - setup func(t *testing.T, fs afero.Fs, sourceDir, nginxDir string, state *fileserverState) - state *fileserverState - shouldError bool - shouldDeploy bool - }{ - { - name: "empty source directory - no deployment needed", - setup: func(t *testing.T, fs afero.Fs, sourceDir, nginxDir string, state *fileserverState) { - // No files to create - }, - state: &fileserverState{}, - shouldError: false, - shouldDeploy: false, - }, - { - name: "new files to deploy", - setup: func(t *testing.T, fs afero.Fs, sourceDir, nginxDir string, state *fileserverState) { - require.NoError(t, afero.WriteFile( - fs, - filepath.Join(sourceDir, "test.txt"), - []byte("test content"), - 0644, - )) - }, - state: &fileserverState{}, - shouldError: false, - shouldDeploy: true, - }, - { - name: "no changes - deployment skipped", - setup: func(t *testing.T, fs afero.Fs, sourceDir, nginxDir string, state *fileserverState) { - require.NoError(t, afero.WriteFile( - fs, - filepath.Join(sourceDir, "test.txt"), - []byte("test content"), - 0644, - )) - - // Calculate actual hash for the test file - hash, err := calculateDirHashWithFs(sourceDir, fs) - require.NoError(t, err) - - // Calculate nginx config hash - configHash, err := calculateDirHashWithFs(nginxDir, fs) - require.NoError(t, err) - - // Update state with actual hashes - state.contentHash = hash - state.configHash = configHash - }, - state: &fileserverState{}, - shouldError: false, - shouldDeploy: false, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - // Clean up and recreate source directory for each test - require.NoError(t, fs.RemoveAll(sourceDir)) - require.NoError(t, fs.MkdirAll(sourceDir, 0755)) - - // Recreate nginx directory - require.NoError(t, fs.MkdirAll(nginxDir, 0755)) - - // Setup test files - tc.setup(t, fs, sourceDir, nginxDir, tc.state) - - // Create a separate directory for the fileserver deployment - deployBaseDir := "/deploy" - require.NoError(t, fs.MkdirAll(deployBaseDir, 0755)) - - fileServer := &FileServer{ - baseDir: deployBaseDir, - enclave: "test-enclave", - dryRun: true, - deployer: mockDeployerFunc, - fs: fs, - } - - // Create state channel and send test state - ch := make(chan *fileserverState, 1) - ch <- tc.state - close(ch) - - err := fileServer.Deploy(ctx, sourceDir, ch) - if tc.shouldError { - require.Error(t, err) - } else { - require.NoError(t, err) - } - - // Verify deployment directory was created only if deployment was needed - deployDir := filepath.Join(deployBaseDir, FILESERVER_PACKAGE) - exists, err := afero.Exists(fs, deployDir) - require.NoError(t, err) - if tc.shouldDeploy { - assert.True(t, exists) - } - }) - } -} - -// mockDeployer implements the deployer interface for testing -type mockDeployer struct{} - -func (m *mockDeployer) Deploy(ctx context.Context, input io.Reader) (*spec.EnclaveSpec, error) { - return &spec.EnclaveSpec{}, nil -} - -func (m *mockDeployer) GetEnvironmentInfo(ctx context.Context, spec *spec.EnclaveSpec) (*kurtosis.KurtosisEnvironment, error) { - return &kurtosis.KurtosisEnvironment{}, nil -} diff --git a/kurtosis-devnet/pkg/deploy/prestate.go b/kurtosis-devnet/pkg/deploy/prestate.go deleted file mode 100644 index d6c7a823a6c98..0000000000000 --- a/kurtosis-devnet/pkg/deploy/prestate.go +++ /dev/null @@ -1,112 +0,0 @@ -package deploy - -import ( - "context" - "encoding/json" - "fmt" - "log" - "os" - "path/filepath" - "strings" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/build" -) - -type PrestateInfo struct { - URL string `json:"url"` - Hashes map[string]string `json:"hashes"` -} - -type localPrestateHolder struct { - info *PrestateInfo - baseDir string - buildDir string - dryRun bool - builder *build.PrestateBuilder - urlBuilder func(path ...string) string -} - -func (h *localPrestateHolder) GetPrestateInfo(ctx context.Context) (*PrestateInfo, error) { - if h.info != nil { - return h.info, nil - } - - prestatePath := []string{"proofs", "op-program", "cannon"} - prestateURL := h.urlBuilder(prestatePath...) - - // Create build directory with the final path structure - buildDir := filepath.Join(append([]string{h.buildDir}, prestatePath...)...) - if err := os.MkdirAll(buildDir, 0755); err != nil { - return nil, fmt.Errorf("failed to create prestate build directory: %w", err) - } - - info := &PrestateInfo{ - URL: prestateURL, - Hashes: make(map[string]string), - } - - if h.dryRun { - // In dry run, populate with placeholder keys to avoid template errors during first pass - info.Hashes["prestate_mt64"] = "dry_run_placeholder" - info.Hashes["prestate_interop"] = "dry_run_placeholder" - h.info = info - return info, nil - } - - // Map of known file prefixes to their keys - fileToKey := map[string]string{ - "prestate-proof-mt64.json": "prestate_mt64", - "prestate-proof-interop.json": "prestate_interop", - } - - // Build all prestate files directly in the target directory - if err := h.builder.Build(ctx, buildDir); err != nil { - return nil, fmt.Errorf("failed to build prestates: %w", err) - } - - // Find and process all prestate files - matches, err := filepath.Glob(filepath.Join(buildDir, "prestate-proof*.json")) - if err != nil { - return nil, fmt.Errorf("failed to find prestate files: %w", err) - } - - // Process each file to rename it to its hash - for _, filePath := range matches { - content, err := os.ReadFile(filePath) - if err != nil { - return nil, fmt.Errorf("failed to read prestate %s: %w", filepath.Base(filePath), err) - } - - var data struct { - Pre string `json:"pre"` - } - if err := json.Unmarshal(content, &data); err != nil { - return nil, fmt.Errorf("failed to parse prestate %s: %w", filepath.Base(filePath), err) - } - - // Store hash with its corresponding key - if key, exists := fileToKey[filepath.Base(filePath)]; exists { - info.Hashes[key] = data.Pre - } - - // Rename files to hash-based names - newFileName := data.Pre + ".json" - hashedPath := filepath.Join(buildDir, newFileName) - if err := os.Rename(filePath, hashedPath); err != nil { - return nil, fmt.Errorf("failed to rename prestate %s: %w", filepath.Base(filePath), err) - } - log.Printf("%s available at: %s/%s\n", filepath.Base(filePath), prestateURL, newFileName) - - // Rename the corresponding binary file - binFilePath := strings.Replace(strings.TrimSuffix(filePath, ".json"), "-proof", "", 1) + ".bin.gz" - newBinFileName := data.Pre + ".bin.gz" - binHashedPath := filepath.Join(buildDir, newBinFileName) - if err := os.Rename(binFilePath, binHashedPath); err != nil { - return nil, fmt.Errorf("failed to rename prestate %s: %w", filepath.Base(binFilePath), err) - } - log.Printf("%s available at: %s/%s\n", filepath.Base(binFilePath), prestateURL, newBinFileName) - } - - h.info = info - return info, nil -} diff --git a/kurtosis-devnet/pkg/deploy/prestate_test.go b/kurtosis-devnet/pkg/deploy/prestate_test.go deleted file mode 100644 index 3525531547aa4..0000000000000 --- a/kurtosis-devnet/pkg/deploy/prestate_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package deploy - -import ( - "bytes" - "context" - "os" - "path/filepath" - "strings" - "sync" - "testing" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/tmpl" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "gopkg.in/yaml.v3" -) - -func TestLocalPrestate(t *testing.T) { - tests := []struct { - name string - dryRun bool - wantErr bool - }{ - { - name: "dry run mode", - dryRun: true, - wantErr: false, - }, - { - name: "normal mode", - dryRun: false, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "prestate-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create a mock justfile for each test case - err = os.WriteFile(filepath.Join(tmpDir, "justfile"), []byte(` -_prestate-build target: - @echo "Mock prestate build" -`), 0644) - require.NoError(t, err) - - templater := &Templater{ - baseDir: tmpDir, - dryRun: tt.dryRun, - buildDir: tmpDir, - urlBuilder: func(path ...string) string { - return "http://fileserver/" + strings.Join(path, "/") - }, - } - - buildWg := &sync.WaitGroup{} - // Create template context with just the prestate function - tmplCtx := tmpl.NewTemplateContext(templater.localPrestateOption(context.Background(), buildWg)) - - // Test template with multiple calls to localPrestate - template := `first: - url: {{(localPrestate).URL}} - hashes: - game: {{index (localPrestate).Hashes "game"}} - proof: {{index (localPrestate).Hashes "proof"}} -second: - url: {{(localPrestate).URL}} - hashes: - game: {{index (localPrestate).Hashes "game"}} - proof: {{index (localPrestate).Hashes "proof"}}` - buf := bytes.NewBuffer(nil) - err = tmplCtx.InstantiateTemplate(bytes.NewBufferString(template), buf) - - if tt.wantErr { - assert.Error(t, err) - return - } - require.NoError(t, err) - - // Wait for the async goroutine to complete - buildWg.Wait() - - // Verify the output is valid YAML and contains the static path - output := buf.String() - assert.Contains(t, output, "url: http://fileserver/proofs/op-program/cannon") - - // Verify both calls return the same values - var result struct { - First struct { - URL string `yaml:"url"` - Hashes map[string]string `yaml:"hashes"` - } `yaml:"first"` - Second struct { - URL string `yaml:"url"` - Hashes map[string]string `yaml:"hashes"` - } `yaml:"second"` - } - err = yaml.Unmarshal(buf.Bytes(), &result) - require.NoError(t, err) - - // Check that both calls returned identical results - assert.Equal(t, result.First.URL, result.Second.URL, "URLs should match") - assert.Equal(t, result.First.Hashes, result.Second.Hashes, "Hashes should match") - - // In dry run mode, we don't create the directory - prestateDir := filepath.Join(tmpDir, "proofs", "op-program", "cannon") - assert.DirExists(t, prestateDir) - }) - } -} diff --git a/kurtosis-devnet/pkg/deploy/template.go b/kurtosis-devnet/pkg/deploy/template.go deleted file mode 100644 index d1c509a535c35..0000000000000 --- a/kurtosis-devnet/pkg/deploy/template.go +++ /dev/null @@ -1,288 +0,0 @@ -package deploy - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "log" - "os" - "strings" - "sync" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/build" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/enclave" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/tmpl" -) - -var ( - dockerBuildConcurrency = 8 -) - -type Templater struct { - enclave string - dryRun bool - baseDir string - templateFile string - dataFile string - buildDir string - urlBuilder func(path ...string) string - - // Common state across template functions - buildJobsMux sync.Mutex - buildJobs map[string]*dockerBuildJob - - contracts contractStateBuildJob - prestate prestateStateBuildJob - - enclaveManager *enclave.KurtosisEnclaveManager -} - -// prestateStateBuildJob helps track the state of the prestate build -type prestateStateBuildJob struct { - info *PrestateInfo - err error - started bool -} - -// contractStateBuildJob helps track the state of the contract build -type contractStateBuildJob struct { - url string - err error - started bool -} - -// dockerBuildJob helps collect and group build jobs -type dockerBuildJob struct { - projectName string - imageTag string - result string - err error - done chan struct{} -} - -func (f *Templater) localDockerImageOption(_ context.Context) tmpl.TemplateContextOptions { - // Initialize the build jobs map if it's nil - if f.buildJobs == nil { - f.buildJobs = make(map[string]*dockerBuildJob) - } - - imageTag := func(projectName string) string { - return fmt.Sprintf("%s:%s", projectName, f.enclave) - } - - // Function that gets called during template rendering - return tmpl.WithFunction("localDockerImage", func(projectName string) (string, error) { - tag := imageTag(projectName) - - // First, check if we already have this build job - f.buildJobsMux.Lock() - job, exists := f.buildJobs[projectName] - if !exists { - // If not, create a new job but don't start it yet - job = &dockerBuildJob{ - projectName: projectName, - imageTag: tag, - done: make(chan struct{}), - } - f.buildJobs[projectName] = job - } - f.buildJobsMux.Unlock() - - // If the job is already done, return its result - select { - case <-job.done: - return job.result, job.err - default: - // Just collect the build request for now and return a placeholder - // The actual build will happen in Render() before final template evaluation - return fmt.Sprintf("__PLACEHOLDER_DOCKER_IMAGE_%s__", projectName), nil - } - }) -} - -func (f *Templater) localContractArtifactsOption(ctx context.Context, buildWg *sync.WaitGroup) tmpl.TemplateContextOptions { - contractBuilder := build.NewContractBuilder( - build.WithContractBaseDir(f.baseDir), - build.WithContractDryRun(f.dryRun), - build.WithContractEnclave(f.enclave), - build.WithContractEnclaveManager(f.enclaveManager), - ) - - return tmpl.WithFunction("localContractArtifacts", func(layer string) (string, error) { - if f.dryRun { - return "artifact://contracts", nil - } - if !f.contracts.started { - f.contracts.started = true - buildWg.Add(1) - go func() { - url, err := contractBuilder.Build(ctx, "") - f.contracts.url = url - f.contracts.err = err - buildWg.Done() - }() - return contractBuilder.GetContractUrl(), nil - } - return f.contracts.url, f.contracts.err - }) -} - -func (f *Templater) localPrestateOption(ctx context.Context, buildWg *sync.WaitGroup) tmpl.TemplateContextOptions { - holder := &localPrestateHolder{ - baseDir: f.baseDir, - buildDir: f.buildDir, - dryRun: f.dryRun, - builder: build.NewPrestateBuilder( - build.WithPrestateBaseDir(f.baseDir), - build.WithPrestateDryRun(f.dryRun), - ), - urlBuilder: f.urlBuilder, - } - - return tmpl.WithFunction("localPrestate", func() (*PrestateInfo, error) { - if !f.prestate.started { - f.prestate.started = true - buildWg.Add(1) - go func() { - info, err := holder.GetPrestateInfo(ctx) - f.prestate.info = info - f.prestate.err = err - buildWg.Done() - }() - } - if f.prestate.info == nil { - prestatePath := []string{"proofs", "op-program", "cannon"} - return &PrestateInfo{ - URL: f.urlBuilder(prestatePath...), - Hashes: map[string]string{ - "prestate_mt64": "dry_run_placeholder", - "prestate_interop": "dry_run_placeholder", - }, - }, nil - } - return f.prestate.info, f.prestate.err - }) -} - -func (f *Templater) Render(ctx context.Context) (*bytes.Buffer, error) { - // Initialize the build jobs map if it's nil - if f.buildJobs == nil { - f.buildJobs = make(map[string]*dockerBuildJob) - } - - // Check if template file exists - if _, err := os.Stat(f.templateFile); os.IsNotExist(err) { - return nil, fmt.Errorf("template file does not exist: %s", f.templateFile) - } - - // Check if the template file contains template syntax - content, err := os.ReadFile(f.templateFile) - if err != nil { - return nil, fmt.Errorf("error reading template file: %w", err) - } - - if len(content) == 0 { - return nil, fmt.Errorf("template file is empty: %s", f.templateFile) - } - - contentStr := string(content) - if !strings.Contains(contentStr, "{{") && !strings.Contains(contentStr, "}}") { - // This is a plain YAML file, return it as-is - return bytes.NewBuffer(content), nil - } - - buildWg := &sync.WaitGroup{} - - opts := []tmpl.TemplateContextOptions{ - f.localDockerImageOption(ctx), - f.localContractArtifactsOption(ctx, buildWg), - f.localPrestateOption(ctx, buildWg), - tmpl.WithBaseDir(f.baseDir), - } - - // Read and parse the data file if provided - if f.dataFile != "" { - data, err := os.ReadFile(f.dataFile) - if err != nil { - return nil, fmt.Errorf("error reading data file: %w", err) - } - - var templateData map[string]interface{} - if err := json.Unmarshal(data, &templateData); err != nil { - return nil, fmt.Errorf("error parsing JSON data: %w", err) - } - - opts = append(opts, tmpl.WithData(templateData)) - } - - // Open template file - tmplFile, err := os.Open(f.templateFile) - if err != nil { - return nil, fmt.Errorf("error opening template file: %w", err) - } - defer tmplFile.Close() - - // Create template context - tmplCtx := tmpl.NewTemplateContext(opts...) - - // First pass: Collect all build jobs without executing them - prelimBuf := bytes.NewBuffer(nil) - if err := tmplCtx.InstantiateTemplate(tmplFile, prelimBuf); err != nil { - return nil, fmt.Errorf("error in first-pass template processing: %w", err) - } - - // Find all docker build jobs and execute them concurrently - var dockerJobs []*dockerBuildJob - f.buildJobsMux.Lock() - for _, job := range f.buildJobs { - dockerJobs = append(dockerJobs, job) - } - f.buildJobsMux.Unlock() - - if len(dockerJobs) > 0 { - // Create a single Docker builder for all builds using the factory - dockerBuilder := build.NewDockerBuilder( - build.WithDockerBaseDir(f.baseDir), - build.WithDockerDryRun(f.dryRun), - build.WithDockerConcurrency(dockerBuildConcurrency), // Set concurrency - ) - - // Start all the builds - buildWg.Add(len(dockerJobs)) - for _, job := range dockerJobs { - go func(j *dockerBuildJob) { - defer buildWg.Done() - log.Printf("Starting build for %s (tag: %s)", j.projectName, j.imageTag) - j.result, j.err = dockerBuilder.Build(ctx, j.projectName, j.imageTag) - close(j.done) // Mark this job as done - }(job) - } - buildWg.Wait() // Wait for all builds to complete - - // Check for any build errors - for _, job := range dockerJobs { - if job.err != nil { - return nil, fmt.Errorf("error building docker image for %s: %w", job.projectName, job.err) - } - } - - // Now reopen the template file for the second pass - tmplFile.Close() - tmplFile, err = os.Open(f.templateFile) - if err != nil { - return nil, fmt.Errorf("error reopening template file: %w", err) - } - defer tmplFile.Close() - } else { - buildWg.Wait() - } - - // Second pass: Render with actual build results - buf := bytes.NewBuffer(nil) - if err := tmplCtx.InstantiateTemplate(tmplFile, buf); err != nil { - return nil, fmt.Errorf("error processing template: %w", err) - } - - return buf, nil -} diff --git a/kurtosis-devnet/pkg/deploy/template_test.go b/kurtosis-devnet/pkg/deploy/template_test.go deleted file mode 100644 index 7e7b9e2d9bc75..0000000000000 --- a/kurtosis-devnet/pkg/deploy/template_test.go +++ /dev/null @@ -1,417 +0,0 @@ -package deploy - -import ( - "context" - "os" - "path/filepath" - "strings" - "sync" - "testing" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/enclave" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/tmpl" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestRenderTemplate(t *testing.T) { - // Create a temporary directory for test files - tmpDir, err := os.MkdirTemp("", "template-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create a test template file - templateContent := ` -name: {{.name}} -image: {{localDockerImage "test-project"}} -artifacts: {{localContractArtifacts "l1"}}` - - templatePath := filepath.Join(tmpDir, "template.yaml") - err = os.WriteFile(templatePath, []byte(templateContent), 0644) - require.NoError(t, err) - - // Create a test data file - dataContent := `{"name": "test-deployment"}` - dataPath := filepath.Join(tmpDir, "data.json") - err = os.WriteFile(dataPath, []byte(dataContent), 0644) - require.NoError(t, err) - - // Create a Templater instance - templater := &Templater{ - enclave: "test-enclave", - dryRun: true, - baseDir: tmpDir, - templateFile: templatePath, - dataFile: dataPath, - buildDir: tmpDir, - urlBuilder: func(path ...string) string { - return "http://localhost:8080/" + strings.Join(path, "/") - }, - } - - buf, err := templater.Render(context.Background()) - require.NoError(t, err) - - // Verify template rendering - assert.Contains(t, buf.String(), "test-deployment") - assert.Contains(t, buf.String(), "test-project:test-enclave") -} - -func TestRenderTemplate_DryRun(t *testing.T) { - // Create a temporary directory for test files - tmpDir, err := os.MkdirTemp("", "template-test-dryrun") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create a test template file with multiple docker image requests, including duplicates - templateContent := ` -name: {{.name}} -imageA1: {{ localDockerImage "project-a" }} -imageB: {{ localDockerImage "project-b" }} -imageA2: {{ localDockerImage "project-a" }} -contracts: {{ localContractArtifacts "l1" }} -prestateHash: {{ (localPrestate).Hashes.prestate_mt64 }}` - - templatePath := filepath.Join(tmpDir, "template.yaml") - err = os.WriteFile(templatePath, []byte(templateContent), 0644) - require.NoError(t, err) - - // Create a test data file - dataContent := `{"name": "test-deployment"}` - dataPath := filepath.Join(tmpDir, "data.json") - err = os.WriteFile(dataPath, []byte(dataContent), 0644) - require.NoError(t, err) - - // Create dummy prestate and contract files for dry run build simulation - prestateDir := filepath.Join(tmpDir, "prestate_build") - contractsDir := filepath.Join(tmpDir, "contracts_build") - require.NoError(t, os.MkdirAll(prestateDir, 0755)) - require.NoError(t, os.MkdirAll(contractsDir, 0755)) - // Note: The actual content doesn't matter for dry run, just existence might - // depending on how the builders are implemented, but our current focus is docker build flow. - - // Create a Templater instance in dryRun mode - enclaveName := "test-enclave-dryrun" - templater := &Templater{ - enclave: enclaveName, - dryRun: true, - baseDir: tmpDir, // Needs a valid base directory - templateFile: templatePath, - dataFile: dataPath, - buildDir: tmpDir, // Used by contract/prestate builders - urlBuilder: func(path ...string) string { - return "http://fileserver.test/" + strings.Join(path, "/") - }, - } - - buf, err := templater.Render(context.Background()) - require.NoError(t, err) - - // --- Assertions --- - output := buf.String() - t.Logf("Rendered output (dry run):\n%s", output) - - // 1. Verify template data is rendered - assert.Contains(t, output, "name: test-deployment") - - // 2. Verify Docker images are replaced with their *initial* tags (due to dryRun) - // and NOT the placeholder values. - expectedTagA := "project-a:" + enclaveName - expectedTagB := "project-b:" + enclaveName - assert.Contains(t, output, "imageA1: "+expectedTagA) - assert.Contains(t, output, "imageB: "+expectedTagB) - assert.Contains(t, output, "imageA2: "+expectedTagA) // Duplicate uses the same tag - assert.NotContains(t, output, "__PLACEHOLDER_DOCKER_IMAGE_") - - // 3. Verify contract artifacts URL is present (uses dry run logic of that builder) - assert.Contains(t, output, "contracts: artifact://contracts") - - // 4. Verify prestate hash placeholder is present (dry run for prestate needs specific setup) - // In dry run, the prestate builder might return zero values or specific placeholders. - // Based on `localPrestateHolder` implementation, it might error if files don't exist, - // or return default values. Let's assume it returns empty/default for dry run. - // Adjust this assertion based on the actual dry-run behavior of PrestateBuilder. - // For now, let's check if the key exists, assuming the dry run might produce an empty hash. - assert.Contains(t, output, "prestateHash:") // Check if the key is rendered - - // 5. Check that buildJobs map was populated (indirectly verifying first pass) - templater.buildJobsMux.Lock() - assert.Contains(t, templater.buildJobs, "project-a") - assert.Contains(t, templater.buildJobs, "project-b") - assert.Len(t, templater.buildJobs, 2, "Should only have jobs for unique project names") - templater.buildJobsMux.Unlock() -} - -func TestLocalPrestateOption(t *testing.T) { - // Create a temporary directory for test files - tmpDir, err := os.MkdirTemp("", "prestate-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create a test build directory - buildDir := filepath.Join(tmpDir, "build") - require.NoError(t, os.MkdirAll(buildDir, 0755)) - - // Create a Templater instance - templater := &Templater{ - enclave: "test-enclave", - dryRun: true, - baseDir: tmpDir, - buildDir: buildDir, - urlBuilder: func(path ...string) string { - return "http://localhost:8080/" + strings.Join(path, "/") - }, - } - buildWg := &sync.WaitGroup{} - - // Get the localPrestate option - option := templater.localPrestateOption(context.Background(), buildWg) - - // Create a template context with the option - ctx := tmpl.NewTemplateContext(option) - - // Test the localPrestate function - localPrestateFn, ok := ctx.Functions["localPrestate"].(func() (*PrestateInfo, error)) - require.True(t, ok) - - prestate, err := localPrestateFn() - require.NoError(t, err) - - // Wait for the async goroutine to complete - buildWg.Wait() - - // In dry run mode, we should get a placeholder prestate with the correct URL - expectedURL := "http://localhost:8080/proofs/op-program/cannon" - assert.Equal(t, expectedURL, prestate.URL) - assert.Equal(t, "dry_run_placeholder", prestate.Hashes["prestate_mt64"]) - assert.Equal(t, "dry_run_placeholder", prestate.Hashes["prestate_interop"]) - - // Call it again to test caching - prestate2, err := localPrestateFn() - require.NoError(t, err) - assert.Equal(t, prestate, prestate2) -} - -func TestLocalContractArtifactsOption(t *testing.T) { - // Create a temporary directory for test files - tmpDir, err := os.MkdirTemp("", "contract-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create the contracts directory structure - contractsDir := filepath.Join(tmpDir, "packages", "contracts-bedrock") - require.NoError(t, os.MkdirAll(contractsDir, 0755)) - - // Create a mock solidity cache file - cacheDir := filepath.Join(contractsDir, "cache") - require.NoError(t, os.MkdirAll(cacheDir, 0755)) - require.NoError(t, os.WriteFile(filepath.Join(cacheDir, "solidity-files-cache.json"), []byte("test cache"), 0644)) - - // Create a mock enclave manager - mockEnclaveManager := &enclave.KurtosisEnclaveManager{} - - // Create a Templater instance - templater := &Templater{ - enclave: "test-enclave", - dryRun: true, - baseDir: tmpDir, - enclaveManager: mockEnclaveManager, - urlBuilder: func(path ...string) string { - return "http://localhost:8080/" + strings.Join(path, "/") - }, - } - buildWg := &sync.WaitGroup{} - // Get the localContractArtifacts option - option := templater.localContractArtifactsOption(context.Background(), buildWg) - - // Create a template context with the option - ctx := tmpl.NewTemplateContext(option) - - // Test the localContractArtifacts function - localContractArtifactsFn, ok := ctx.Functions["localContractArtifacts"].(func(string) (string, error)) - require.True(t, ok) - - // Test with L1 layer - artifacts, err := localContractArtifactsFn("l1") - require.NoError(t, err) - assert.Equal(t, "artifact://contracts", artifacts) - - // Test with L2 layer - artifacts, err = localContractArtifactsFn("l2") - require.NoError(t, err) - assert.Equal(t, "artifact://contracts", artifacts) -} - -func TestRenderTemplate_PlainYamlFile(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "template-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - plainYamlContent := `optimism_package: - faucet: - enabled: true - chains: - chain1: - participants: - node1: - el: - type: op-geth - cl: - type: op-node - network_params: - network: "kurtosis" - network_id: "2151908" - interop_time_offset: 100 - chain2: - participants: - node1: - el: - type: op-geth - cl: - type: op-node - network_params: - network: "kurtosis" - network_id: "2151909" - interop_time_offset: 5000 -` - - templatePath := filepath.Join(tmpDir, "plain.yaml") - err = os.WriteFile(templatePath, []byte(plainYamlContent), 0644) - require.NoError(t, err) - - templater := &Templater{ - enclave: "test-enclave", - dryRun: true, - baseDir: tmpDir, - templateFile: templatePath, - buildDir: tmpDir, - urlBuilder: func(path ...string) string { - return "http://localhost:8080/" + strings.Join(path, "/") - }, - } - - buf, err := templater.Render(context.Background()) - require.NoError(t, err) - - // The output should be exactly the same as the input (no template processing) - assert.Equal(t, plainYamlContent, buf.String()) -} - -func TestRenderTemplate_PlainYamlWithDataFile(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "template-test-with-data") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - plainYamlContent := `optimism_package: - chains: - test-chain: - network_params: - network_id: "123456" -` - - templatePath := filepath.Join(tmpDir, "plain.yaml") - err = os.WriteFile(templatePath, []byte(plainYamlContent), 0644) - require.NoError(t, err) - - // Create a data file (even though the template doesn't use it) - dataContent := `{"someData": "value"}` - dataPath := filepath.Join(tmpDir, "data.json") - err = os.WriteFile(dataPath, []byte(dataContent), 0644) - require.NoError(t, err) - - templater := &Templater{ - enclave: "test-enclave", - dryRun: true, - baseDir: tmpDir, - templateFile: templatePath, - dataFile: dataPath, // Data file is irrelevant for plain YAML - buildDir: tmpDir, - urlBuilder: func(path ...string) string { - return "http://localhost:8080/" + strings.Join(path, "/") - }, - } - - buf, err := templater.Render(context.Background()) - require.NoError(t, err) - assert.Equal(t, plainYamlContent, buf.String()) -} - -func TestRenderTemplate_TemplateWithoutDataFile(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "template-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create a file that DOES contain template syntax - templateContent := `optimism_package: - chains: - {{.chainName}}: - network_params: - network_id: "{{.networkId}}" -` - - templatePath := filepath.Join(tmpDir, "template.yaml") - err = os.WriteFile(templatePath, []byte(templateContent), 0644) - require.NoError(t, err) - - templater := &Templater{ - enclave: "test-enclave", - dryRun: true, - baseDir: tmpDir, - templateFile: templatePath, - dataFile: "", - buildDir: tmpDir, - urlBuilder: func(path ...string) string { - return "http://localhost:8080/" + strings.Join(path, "/") - }, - } - - // This should fail because the template has syntax but no data - _, err = templater.Render(context.Background()) - assert.Error(t, err, "Should fail when template has syntax but no data is provided") - assert.Contains(t, err.Error(), "failed to execute template") -} - -func TestRenderTemplate_EmptyFile(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "template-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - // Create an empty file - templatePath := filepath.Join(tmpDir, "empty.yaml") - err = os.WriteFile(templatePath, []byte(""), 0644) - require.NoError(t, err) - - templater := &Templater{ - enclave: "test", - dryRun: true, - baseDir: tmpDir, - templateFile: templatePath, - buildDir: tmpDir, - urlBuilder: func(...string) string { return "http://localhost" }, - } - - _, err = templater.Render(context.Background()) - assert.Error(t, err) - assert.Contains(t, err.Error(), "template file is empty") -} - -func TestRenderTemplate_FileDoesNotExist(t *testing.T) { - tmpDir, err := os.MkdirTemp("", "template-test") - require.NoError(t, err) - defer os.RemoveAll(tmpDir) - - nonExistentPath := filepath.Join(tmpDir, "nonexistent.yaml") - - templater := &Templater{ - enclave: "test", - dryRun: true, - baseDir: tmpDir, - templateFile: nonExistentPath, - buildDir: tmpDir, - urlBuilder: func(...string) string { return "http://localhost" }, - } - - _, err = templater.Render(context.Background()) - assert.Error(t, err) - assert.Contains(t, err.Error(), "template file does not exist") -} diff --git a/kurtosis-devnet/pkg/kurtosis/adapters.go b/kurtosis-devnet/pkg/kurtosis/adapters.go deleted file mode 100644 index 7bb3254b20e28..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/adapters.go +++ /dev/null @@ -1,54 +0,0 @@ -package kurtosis - -import ( - "context" - "io" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/deployer" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/depset" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/interfaces" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/jwt" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" -) - -type enclaveSpecAdapter struct{} - -func (a *enclaveSpecAdapter) EnclaveSpec(r io.Reader) (*spec.EnclaveSpec, error) { - return spec.NewSpec().ExtractData(r) -} - -var _ interfaces.EnclaveSpecifier = (*enclaveSpecAdapter)(nil) - -type enclaveInspectAdapter struct{} - -func (a *enclaveInspectAdapter) EnclaveInspect(ctx context.Context, enclave string) (*inspect.InspectData, error) { - return inspect.NewInspector(enclave).ExtractData(ctx) -} - -var _ interfaces.EnclaveInspecter = (*enclaveInspectAdapter)(nil) - -type enclaveDeployerAdapter struct{} - -func (a *enclaveDeployerAdapter) EnclaveObserve(ctx context.Context, enclave string) (*deployer.DeployerData, error) { - return deployer.NewDeployer(enclave).ExtractData(ctx) -} - -var _ interfaces.EnclaveObserver = (*enclaveDeployerAdapter)(nil) - -type enclaveJWTAdapter struct{} - -func (a *enclaveJWTAdapter) ExtractData(ctx context.Context, enclave string) (*jwt.Data, error) { - return jwt.NewExtractor(enclave).ExtractData(ctx) -} - -var _ interfaces.JWTExtractor = (*enclaveJWTAdapter)(nil) - -type enclaveDepsetAdapter struct{} - -func (a *enclaveDepsetAdapter) ExtractData(ctx context.Context, enclave string) (map[string]descriptors.DepSet, error) { - return depset.NewExtractor(enclave).ExtractData(ctx) -} - -var _ interfaces.DepsetExtractor = (*enclaveDepsetAdapter)(nil) diff --git a/kurtosis-devnet/pkg/kurtosis/api/enclave/enclave.go b/kurtosis-devnet/pkg/kurtosis/api/enclave/enclave.go deleted file mode 100644 index 92df86a43b3c8..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/enclave/enclave.go +++ /dev/null @@ -1,170 +0,0 @@ -package enclave - -import ( - "context" - "fmt" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/wrappers" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/util" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -// DockerManager defines the interface for Docker operations -type DockerManager interface { - DestroyDockerResources(ctx context.Context, enclave ...string) error -} - -// DefaultDockerManager implements DockerManager using the util package -type DefaultDockerManager struct{} - -func (d *DefaultDockerManager) DestroyDockerResources(ctx context.Context, enclave ...string) error { - return util.DestroyDockerResources(ctx, enclave...) -} - -type KurtosisEnclaveManager struct { - kurtosisCtx interfaces.KurtosisContextInterface - dockerMgr DockerManager - tracer trace.Tracer -} - -type KurtosisEnclaveManagerOptions func(*KurtosisEnclaveManager) - -func WithKurtosisContext(kurtosisCtx interfaces.KurtosisContextInterface) KurtosisEnclaveManagerOptions { - return func(manager *KurtosisEnclaveManager) { - manager.kurtosisCtx = kurtosisCtx - } -} - -func WithDockerManager(dockerMgr DockerManager) KurtosisEnclaveManagerOptions { - return func(manager *KurtosisEnclaveManager) { - manager.dockerMgr = dockerMgr - } -} - -func NewKurtosisEnclaveManager(opts ...KurtosisEnclaveManagerOptions) (*KurtosisEnclaveManager, error) { - manager := &KurtosisEnclaveManager{ - tracer: otel.Tracer("enclave-manager"), - } - - for _, opt := range opts { - opt(manager) - } - - if manager.kurtosisCtx == nil { - var err error - manager.kurtosisCtx, err = wrappers.GetDefaultKurtosisContext() - if err != nil { - return nil, fmt.Errorf("failed to create Kurtosis context: %w", err) - } - } - return manager, nil -} - -func (mgr *KurtosisEnclaveManager) GetEnclave(ctx context.Context, enclave string) (interfaces.EnclaveContext, error) { - ctx, span := mgr.tracer.Start(ctx, "get enclave") - defer span.End() - - // Try to get existing enclave first - enclaveCtx, err := mgr.kurtosisCtx.GetEnclave(ctx, enclave) - if err != nil { - // If enclave doesn't exist, create a new one - fmt.Printf("Creating a new enclave for Starlark to run inside...\n") - enclaveCtx, err = mgr.kurtosisCtx.CreateEnclave(ctx, enclave) - if err != nil { - return nil, fmt.Errorf("failed to create enclave: %w", err) - } - fmt.Printf("Enclave '%s' created successfully\n\n", enclave) - } else { - fmt.Printf("Using existing enclave '%s'\n\n", enclave) - } - - return enclaveCtx, nil -} - -// cleanupEnclave handles the common cleanup logic for both stopped and empty enclaves -func (mgr *KurtosisEnclaveManager) cleanupEnclave(ctx context.Context, enclave string) error { - ctx, span := mgr.tracer.Start(ctx, "cleanup enclave") - defer span.End() - - // Remove the enclave - err := mgr.kurtosisCtx.DestroyEnclave(ctx, enclave) - if err != nil { - fmt.Printf("failed to destroy enclave: %v", err) - } else { - fmt.Printf("Destroyed enclave: %s\n", enclave) - } - var errDocker error - if mgr.dockerMgr != nil { - errDocker = mgr.dockerMgr.DestroyDockerResources(ctx, enclave) - if errDocker != nil { - fmt.Printf("failed to destroy docker resources: %v", errDocker) - } else { - fmt.Printf("Destroyed docker resources for enclave: %s\n", enclave) - } - } - if err != nil { - return err - } - if errDocker != nil { - return errDocker - } - return nil -} - -func (mgr *KurtosisEnclaveManager) Autofix(ctx context.Context, enclave string) error { - ctx, span := mgr.tracer.Start(ctx, "autofix enclave") - defer span.End() - - fmt.Printf("Autofixing enclave '%s'\n", enclave) - status, err := mgr.kurtosisCtx.GetEnclaveStatus(ctx, enclave) - if err != nil { - // Means the enclave doesn't exist, so we're good - fmt.Printf("Enclave '%s' does not exist, skipping autofix\n", enclave) - return nil - } - switch status { - case interfaces.EnclaveStatusRunning: - fmt.Printf("Enclave '%s' is running, skipping autofix\n", enclave) - return nil - case interfaces.EnclaveStatusStopped: - fmt.Printf("Enclave '%s' is stopped, removing\n", enclave) - return mgr.cleanupEnclave(ctx, enclave) - case interfaces.EnclaveStatusEmpty: - fmt.Printf("Enclave '%s' is empty, removing\n", enclave) - return mgr.cleanupEnclave(ctx, enclave) - } - return fmt.Errorf("unknown enclave status: %s", status) -} - -func (mgr *KurtosisEnclaveManager) Nuke(ctx context.Context) error { - ctx, span := mgr.tracer.Start(ctx, "nuke enclaves") - defer span.End() - - enclaves, err := mgr.kurtosisCtx.Clean(ctx, true) - if err != nil { - fmt.Printf("failed to clean enclaves: %v", err) - } else { - fmt.Printf("Cleaned enclaves\n") - } - for _, enclave := range enclaves { - fmt.Printf("Nuked enclave: %s\n", enclave.GetName()) - } - var errDocker error - if mgr.dockerMgr != nil { - errDocker = mgr.dockerMgr.DestroyDockerResources(ctx) - if errDocker != nil { - fmt.Printf("failed to destroy docker resources: %v", errDocker) - } else { - fmt.Printf("Destroyed docker resources\n") - } - } - if err != nil { - return err - } - if errDocker != nil { - return errDocker - } - return nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/enclave/enclave_test.go b/kurtosis-devnet/pkg/kurtosis/api/enclave/enclave_test.go deleted file mode 100644 index fe9f57e3c602a..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/enclave/enclave_test.go +++ /dev/null @@ -1,241 +0,0 @@ -package enclave - -import ( - "context" - "errors" - "testing" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/fake" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -// MockDockerManager implements DockerManager for testing -type MockDockerManager struct{} - -func (m *MockDockerManager) DestroyDockerResources(ctx context.Context, enclave ...string) error { - return nil -} - -func TestNewKurtosisEnclaveManager(t *testing.T) { - tests := []struct { - name string - opts []KurtosisEnclaveManagerOptions - wantErr bool - }{ - { - name: "create with fake context", - opts: []KurtosisEnclaveManagerOptions{ - WithKurtosisContext(&fake.KurtosisContext{}), - }, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - manager, err := NewKurtosisEnclaveManager(tt.opts...) - if tt.wantErr { - assert.Error(t, err) - return - } - assert.NoError(t, err) - assert.NotNil(t, manager) - }) - } -} - -func TestGetEnclave(t *testing.T) { - tests := []struct { - name string - enclave string - fakeCtx *fake.KurtosisContext - wantErr bool - wantCalls []string - }{ - { - name: "get existing enclave", - enclave: "test-enclave", - fakeCtx: &fake.KurtosisContext{ - EnclaveCtx: &fake.EnclaveContext{}, - }, - wantErr: false, - wantCalls: []string{"get"}, - }, - { - name: "create new enclave when not exists", - enclave: "test-enclave", - fakeCtx: &fake.KurtosisContext{ - GetErr: errors.New("enclave not found"), - EnclaveCtx: &fake.EnclaveContext{}, - }, - wantErr: false, - wantCalls: []string{"get", "create"}, - }, - { - name: "error on get and create", - enclave: "test-enclave", - fakeCtx: &fake.KurtosisContext{ - GetErr: errors.New("get error"), - CreateErr: errors.New("create error"), - }, - wantErr: true, - wantCalls: []string{"get", "create"}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - manager, err := NewKurtosisEnclaveManager( - WithKurtosisContext(tt.fakeCtx), - WithDockerManager(&MockDockerManager{}), - ) - require.NoError(t, err) - - ctx := context.Background() - enclaveCtx, err := manager.GetEnclave(ctx, tt.enclave) - - if tt.wantErr { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - assert.NotNil(t, enclaveCtx) - }) - } -} - -func TestAutofix(t *testing.T) { - tests := []struct { - name string - enclave string - fakeCtx *fake.KurtosisContext - status interfaces.EnclaveStatus - statusErr error - destroyErr error - wantErr bool - wantDestroyed bool - }{ - { - name: "running enclave", - enclave: "test-enclave", - fakeCtx: &fake.KurtosisContext{}, - status: interfaces.EnclaveStatusRunning, - wantErr: false, - }, - { - name: "stopped enclave", - enclave: "test-enclave", - fakeCtx: &fake.KurtosisContext{}, - status: interfaces.EnclaveStatusStopped, - wantErr: false, - wantDestroyed: true, - }, - { - name: "empty enclave", - enclave: "test-enclave", - fakeCtx: &fake.KurtosisContext{}, - status: interfaces.EnclaveStatusEmpty, - wantErr: false, - wantDestroyed: true, - }, - { - name: "enclave not found", - enclave: "test-enclave", - fakeCtx: &fake.KurtosisContext{}, - statusErr: errors.New("enclave not found"), - wantErr: false, - }, - { - name: "destroy error", - enclave: "test-enclave", - fakeCtx: &fake.KurtosisContext{}, - status: interfaces.EnclaveStatusStopped, - destroyErr: errors.New("destroy error"), - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Setup mock context - tt.fakeCtx.Status = tt.status - tt.fakeCtx.StatusErr = tt.statusErr - tt.fakeCtx.DestroyErr = tt.destroyErr - - manager, err := NewKurtosisEnclaveManager( - WithKurtosisContext(tt.fakeCtx), - WithDockerManager(&MockDockerManager{}), - ) - require.NoError(t, err) - - ctx := context.Background() - err = manager.Autofix(ctx, tt.enclave) - - if tt.wantErr { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - if tt.wantDestroyed { - assert.True(t, tt.fakeCtx.DestroyCalled, "Destroy should be called") - } else { - assert.False(t, tt.fakeCtx.DestroyCalled, "Destroy should not be called") - } - }) - } -} - -func TestNuke(t *testing.T) { - tests := []struct { - name string - fakeCtx *fake.KurtosisContext - cleanErr error - wantErr bool - wantClean bool - }{ - { - name: "successful nuke", - fakeCtx: &fake.KurtosisContext{}, - wantErr: false, - wantClean: true, - }, - { - name: "clean error", - fakeCtx: &fake.KurtosisContext{}, - cleanErr: errors.New("clean error"), - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Setup mock context - tt.fakeCtx.CleanErr = tt.cleanErr - - manager, err := NewKurtosisEnclaveManager( - WithKurtosisContext(tt.fakeCtx), - WithDockerManager(&MockDockerManager{}), - ) - require.NoError(t, err) - - ctx := context.Background() - err = manager.Nuke(ctx) - - if tt.wantErr { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - if tt.wantClean { - assert.True(t, tt.fakeCtx.CleanCalled, "Clean should be called") - } else { - assert.False(t, tt.fakeCtx.CleanCalled, "Clean should not be called") - } - }) - } -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/engine/engine.go b/kurtosis-devnet/pkg/kurtosis/api/engine/engine.go deleted file mode 100644 index 158c14647bc4e..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/engine/engine.go +++ /dev/null @@ -1,116 +0,0 @@ -package engine - -import ( - "fmt" - "os" - "os/exec" - "strings" - - "github.com/kurtosis-tech/kurtosis/api/golang/kurtosis_version" - "gopkg.in/yaml.v3" -) - -// EngineManager handles running the Kurtosis engine -type EngineManager struct { - kurtosisBinary string - version string -} - -// Option configures an EngineManager -type Option func(*EngineManager) - -// WithKurtosisBinary sets the path to the kurtosis binary -func WithKurtosisBinary(binary string) Option { - return func(e *EngineManager) { - e.kurtosisBinary = binary - } -} - -// WithVersion sets the engine version -func WithVersion(version string) Option { - return func(e *EngineManager) { - e.version = version - } -} - -// NewEngineManager creates a new EngineManager with the given options -func NewEngineManager(opts ...Option) *EngineManager { - e := &EngineManager{ - kurtosisBinary: "kurtosis", // Default to expecting kurtosis in PATH - version: kurtosis_version.KurtosisVersion, // Default to library version - } - for _, opt := range opts { - opt(e) - } - return e -} - -// EnsureRunning starts the Kurtosis engine with the configured version -func (e *EngineManager) EnsureRunning() error { - cmd := exec.Command(e.kurtosisBinary, "engine", "start", "--version", e.version) - fmt.Println("Starting Kurtosis engine with version:", e.version) - - // Capture stdout and stderr for more verbose output - var stdout, stderr strings.Builder - cmd.Stdout = &stdout - cmd.Stderr = &stderr - - if err := cmd.Run(); err != nil { - return fmt.Errorf("failed to start kurtosis engine: %w\nstdout: %s\nstderr: %s", - err, stdout.String(), stderr.String()) - } - return nil -} - -// GetEngineType gets the type of the running engine (docker, kubernetes, etc) -func (e *EngineManager) GetEngineType() (string, error) { - // First try to get the cluster name - cmd := exec.Command(e.kurtosisBinary, "cluster", "get") - output, err := cmd.Output() - if err != nil { - // Means there's no cluster set, so we're using the default cluster - // Which is the first entry in kurtosis cluster ls - cmd = exec.Command(e.kurtosisBinary, "cluster", "ls") - output, err = cmd.Output() - if err != nil { - return "", fmt.Errorf("failed to get cluster info: %w", err) - } - clusterName := strings.TrimSpace(string(output)) - return clusterName, nil - } - clusterName := strings.TrimSpace(string(output)) - - cmd = exec.Command(e.kurtosisBinary, "config", "path") - output, err = cmd.Output() - if err != nil { - return "", fmt.Errorf("failed to get config path: %w", err) - } - configPath := strings.TrimSpace(string(output)) - - configData, err := os.ReadFile(configPath) - if err != nil { - return "", fmt.Errorf("failed to read config file: %w", err) - } - - var config struct { - KurtosisClusters map[string]struct { - Type string `yaml:"type"` - } `yaml:"kurtosis-clusters"` - } - if err := yaml.Unmarshal(configData, &config); err != nil { - return "", fmt.Errorf("failed to parse config file: %w", err) - } - - cluster, exists := config.KurtosisClusters[clusterName] - if !exists { - // Means we're using the cluster definitions from the default config - return clusterName, nil - } - - return cluster.Type, nil -} - -func (e *EngineManager) RestartEngine() error { - cmd := exec.Command(e.kurtosisBinary, "engine", "restart") - return cmd.Run() -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/engine/engine_test.go b/kurtosis-devnet/pkg/kurtosis/api/engine/engine_test.go deleted file mode 100644 index 93f5d367c6de0..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/engine/engine_test.go +++ /dev/null @@ -1,188 +0,0 @@ -package engine - -import ( - "os" - "path/filepath" - "testing" - - "github.com/kurtosis-tech/kurtosis/api/golang/kurtosis_version" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewEngineManager(t *testing.T) { - tests := []struct { - name string - opts []Option - expectedBinary string - expectedVer string - }{ - { - name: "default options", - opts: []Option{}, - expectedBinary: "kurtosis", - expectedVer: kurtosis_version.KurtosisVersion, - }, - { - name: "custom binary path", - opts: []Option{WithKurtosisBinary("/custom/path/kurtosis")}, - expectedBinary: "/custom/path/kurtosis", - expectedVer: kurtosis_version.KurtosisVersion, - }, - { - name: "custom version", - opts: []Option{WithVersion("1.0.0")}, - expectedBinary: "kurtosis", - expectedVer: "1.0.0", - }, - { - name: "custom binary and version", - opts: []Option{WithKurtosisBinary("/custom/path/kurtosis"), WithVersion("1.0.0")}, - expectedBinary: "/custom/path/kurtosis", - expectedVer: "1.0.0", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - manager := NewEngineManager(tt.opts...) - assert.Equal(t, tt.expectedBinary, manager.kurtosisBinary) - assert.Equal(t, tt.expectedVer, manager.version) - }) - } -} - -func TestEnsureRunning(t *testing.T) { - // Create a temporary directory for testing - tempDir, err := os.MkdirTemp("", "kurtosis-test-*") - require.NoError(t, err) - defer os.RemoveAll(tempDir) - - // Create a mock kurtosis binary that captures and verifies the arguments - mockBinary := filepath.Join(tempDir, "kurtosis") - mockScript := `#!/bin/sh -if [ "$1" = "engine" ] && [ "$2" = "start" ] && [ "$3" = "--version" ] && [ "$4" = "test-version" ]; then - echo "Engine started with version test-version" - exit 0 -else - echo "Invalid arguments: $@" - exit 1 -fi` - err = os.WriteFile(mockBinary, []byte(mockScript), 0755) - require.NoError(t, err) - - manager := NewEngineManager( - WithKurtosisBinary(mockBinary), - WithVersion("test-version"), - ) - err = manager.EnsureRunning() - assert.NoError(t, err) -} - -func TestGetEngineType(t *testing.T) { - // Create a temporary directory for testing - tempDir, err := os.MkdirTemp("", "kurtosis-test-*") - require.NoError(t, err) - defer os.RemoveAll(tempDir) - - // Create a mock kurtosis binary that simulates cluster commands - mockBinary := filepath.Join(tempDir, "kurtosis") - mockScript := `#!/bin/sh -if [ "$1" = "cluster" ] && [ "$2" = "get" ]; then - echo "test-cluster" -elif [ "$1" = "config" ] && [ "$2" = "path" ]; then - echo "` + tempDir + `/config.yaml" -else - exit 1 -fi` - err = os.WriteFile(mockBinary, []byte(mockScript), 0755) - require.NoError(t, err) - - // Create a mock config file - configPath := filepath.Join(tempDir, "config.yaml") - configContent := ` -kurtosis-clusters: - test-cluster: - type: docker -` - err = os.WriteFile(configPath, []byte(configContent), 0644) - require.NoError(t, err) - - manager := NewEngineManager(WithKurtosisBinary(mockBinary)) - engineType, err := manager.GetEngineType() - assert.NoError(t, err) - assert.Equal(t, "docker", engineType) -} - -func TestGetEngineType_NoCluster(t *testing.T) { - // Create a temporary directory for testing - tempDir, err := os.MkdirTemp("", "kurtosis-test-*") - require.NoError(t, err) - defer os.RemoveAll(tempDir) - - // Create a mock kurtosis binary that simulates no cluster set - mockBinary := filepath.Join(tempDir, "kurtosis") - mockScript := `#!/bin/sh -if [ "$1" = "cluster" ] && [ "$2" = "get" ]; then - exit 1 -elif [ "$1" = "cluster" ] && [ "$2" = "ls" ]; then - echo "default-cluster" -else - exit 1 -fi` - err = os.WriteFile(mockBinary, []byte(mockScript), 0755) - require.NoError(t, err) - - manager := NewEngineManager(WithKurtosisBinary(mockBinary)) - engineType, err := manager.GetEngineType() - assert.NoError(t, err) - assert.Equal(t, "default-cluster", engineType) -} - -func TestRestartEngine(t *testing.T) { - // Create a temporary directory for testing - tempDir, err := os.MkdirTemp("", "kurtosis-test-*") - require.NoError(t, err) - defer os.RemoveAll(tempDir) - - // Create a mock kurtosis binary that captures and verifies the arguments - mockBinary := filepath.Join(tempDir, "kurtosis") - mockScript := `#!/bin/sh -if [ "$1" = "engine" ] && [ "$2" = "restart" ]; then - echo "Engine restarted successfully" - exit 0 -else - echo "Invalid arguments: $@" - exit 1 -fi` - err = os.WriteFile(mockBinary, []byte(mockScript), 0755) - require.NoError(t, err) - - manager := NewEngineManager(WithKurtosisBinary(mockBinary)) - err = manager.RestartEngine() - assert.NoError(t, err) -} - -func TestRestartEngine_Failure(t *testing.T) { - // Create a temporary directory for testing - tempDir, err := os.MkdirTemp("", "kurtosis-test-*") - require.NoError(t, err) - defer os.RemoveAll(tempDir) - - // Create a mock kurtosis binary that always fails - mockBinary := filepath.Join(tempDir, "kurtosis") - mockScript := `#!/bin/sh -if [ "$1" = "engine" ] && [ "$2" = "restart" ]; then - echo "Failed to restart engine" - exit 1 -else - echo "Invalid arguments: $@" - exit 1 -fi` - err = os.WriteFile(mockBinary, []byte(mockScript), 0755) - require.NoError(t, err) - - manager := NewEngineManager(WithKurtosisBinary(mockBinary)) - err = manager.RestartEngine() - assert.Error(t, err) -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/fake/fake.go b/kurtosis-devnet/pkg/kurtosis/api/fake/fake.go deleted file mode 100644 index a9800f806d9bd..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/fake/fake.go +++ /dev/null @@ -1,289 +0,0 @@ -package fake - -import ( - "context" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/kurtosis-tech/kurtosis/api/golang/core/kurtosis_core_rpc_api_bindings" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/enclaves" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/starlark_run_config" -) - -// KurtosisContext implements interfaces.KurtosisContextInterface for testing -type KurtosisContext struct { - EnclaveCtx *EnclaveContext - GetErr error - CreateErr error - CleanErr error - DestroyErr error - Status interfaces.EnclaveStatus - StatusErr error - DestroyCalled bool - CleanCalled bool -} - -func (f *KurtosisContext) CreateEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - if f.CreateErr != nil { - return nil, f.CreateErr - } - return f.EnclaveCtx, nil -} - -func (f *KurtosisContext) GetEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - if f.GetErr != nil { - return nil, f.GetErr - } - return f.EnclaveCtx, nil -} - -func (f *KurtosisContext) GetEnclaveStatus(ctx context.Context, name string) (interfaces.EnclaveStatus, error) { - if f.StatusErr != nil { - return "", f.StatusErr - } - return f.Status, nil -} - -func (f *KurtosisContext) Clean(ctx context.Context, destroyAll bool) ([]interfaces.EnclaveNameAndUuid, error) { - f.CleanCalled = true - if f.CleanErr != nil { - return nil, f.CleanErr - } - return []interfaces.EnclaveNameAndUuid{}, nil -} - -func (f *KurtosisContext) DestroyEnclave(ctx context.Context, name string) error { - f.DestroyCalled = true - if f.DestroyErr != nil { - return f.DestroyErr - } - return nil -} - -// EnclaveContext implements interfaces.EnclaveContext for testing -type EnclaveContext struct { - RunErr error - Responses []interfaces.StarlarkResponse - ArtifactNames []*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid - ArtifactData []byte - UploadErr error - - Services map[services.ServiceName]interfaces.ServiceContext - Files map[services.FileArtifactName]string -} - -func (f *EnclaveContext) GetEnclaveUuid() enclaves.EnclaveUUID { - return enclaves.EnclaveUUID("test-enclave-uuid") -} - -func (f *EnclaveContext) GetServices() (map[services.ServiceName]services.ServiceUUID, error) { - if f.Services == nil { - return nil, nil - } - services := make(map[services.ServiceName]services.ServiceUUID) - for name, svc := range f.Services { - services[name] = svc.GetServiceUUID() - } - return services, nil -} - -func (f *EnclaveContext) GetService(serviceIdentifier string) (interfaces.ServiceContext, error) { - if f.Services == nil { - return nil, nil - } - return f.Services[services.ServiceName(serviceIdentifier)], nil -} - -func (f *EnclaveContext) GetAllFilesArtifactNamesAndUuids(ctx context.Context) ([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, error) { - artifacts := make([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, 0, len(f.Files)) - for name, uuid := range f.Files { - artifacts = append(artifacts, &kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid{ - FileName: string(name), - FileUuid: string(uuid), - }) - } - return artifacts, nil -} - -func (f *EnclaveContext) RunStarlarkPackage(ctx context.Context, pkg string, params *starlark_run_config.StarlarkRunConfig) (<-chan interfaces.StarlarkResponse, string, error) { - if f.RunErr != nil { - return nil, "", f.RunErr - } - - // Create a channel and send all responses - ch := make(chan interfaces.StarlarkResponse) - go func() { - defer close(ch) - for _, resp := range f.Responses { - ch <- resp - } - }() - return ch, "", nil -} - -func (f *EnclaveContext) RunStarlarkScript(ctx context.Context, script string, params *starlark_run_config.StarlarkRunConfig) error { - return f.RunErr -} - -func (f *EnclaveContext) DownloadFilesArtifact(ctx context.Context, name string) ([]byte, error) { - return f.ArtifactData, nil -} - -func (f *EnclaveContext) UploadFiles(pathToUpload string, artifactName string) (services.FilesArtifactUUID, services.FileArtifactName, error) { - if f.UploadErr != nil { - return "", "", f.UploadErr - } - return "test-uuid", services.FileArtifactName(artifactName), nil -} - -type ServiceContext struct { - ServiceUUID services.ServiceUUID - PublicIP string - PrivateIP string - PublicPorts map[string]*services.PortSpec - PrivatePorts map[string]*services.PortSpec -} - -func (f *ServiceContext) GetServiceUUID() services.ServiceUUID { - return f.ServiceUUID -} - -func (f *ServiceContext) GetMaybePublicIPAddress() string { - return f.PublicIP -} - -func (f *ServiceContext) GetPublicPorts() map[string]*services.PortSpec { - return f.PublicPorts -} - -func (f *ServiceContext) GetPrivatePorts() map[string]*services.PortSpec { - return f.PrivatePorts -} - -// StarlarkResponse implements interfaces.StarlarkResponse for testing -type StarlarkResponse struct { - Err interfaces.StarlarkError - ProgressMsg []string - Instruction string - IsSuccessful bool - Warning string - Info string - Result string - HasResult bool // tracks whether result was explicitly set -} - -func (f *StarlarkResponse) GetError() interfaces.StarlarkError { - return f.Err -} - -func (f *StarlarkResponse) GetProgressInfo() interfaces.ProgressInfo { - if f.ProgressMsg != nil { - return &ProgressInfo{Info: f.ProgressMsg} - } - return nil -} - -func (f *StarlarkResponse) GetInstruction() interfaces.Instruction { - if f.Instruction != "" { - return &Instruction{Desc: f.Instruction} - } - return nil -} - -func (f *StarlarkResponse) GetRunFinishedEvent() interfaces.RunFinishedEvent { - return &RunFinishedEvent{IsSuccessful: f.IsSuccessful} -} - -func (f *StarlarkResponse) GetWarning() interfaces.Warning { - if f.Warning != "" { - return &Warning{Msg: f.Warning} - } - return nil -} - -func (f *StarlarkResponse) GetInfo() interfaces.Info { - if f.Info != "" { - return &Info{Msg: f.Info} - } - return nil -} - -func (f *StarlarkResponse) GetInstructionResult() interfaces.InstructionResult { - if !f.HasResult { - return nil - } - return &InstructionResult{Result: f.Result} -} - -// ProgressInfo implements ProgressInfo for testing -type ProgressInfo struct { - Info []string -} - -func (f *ProgressInfo) GetCurrentStepInfo() []string { - return f.Info -} - -// Instruction implements Instruction for testing -type Instruction struct { - Desc string -} - -func (f *Instruction) GetDescription() string { - return f.Desc -} - -// StarlarkError implements StarlarkError for testing -type StarlarkError struct { - InterpretationErr error - ValidationErr error - ExecutionErr error -} - -func (f *StarlarkError) GetInterpretationError() error { - return f.InterpretationErr -} - -func (f *StarlarkError) GetValidationError() error { - return f.ValidationErr -} - -func (f *StarlarkError) GetExecutionError() error { - return f.ExecutionErr -} - -// RunFinishedEvent implements RunFinishedEvent for testing -type RunFinishedEvent struct { - IsSuccessful bool -} - -func (f *RunFinishedEvent) GetIsRunSuccessful() bool { - return f.IsSuccessful -} - -// Warning implements Warning for testing -type Warning struct { - Msg string -} - -func (f *Warning) GetMessage() string { - return f.Msg -} - -// Info implements Info for testing -type Info struct { - Msg string -} - -func (f *Info) GetMessage() string { - return f.Msg -} - -// InstructionResult implements InstructionResult for testing -type InstructionResult struct { - Result string -} - -func (f *InstructionResult) GetSerializedInstructionResult() string { - return f.Result -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/interfaces/iface.go b/kurtosis-devnet/pkg/kurtosis/api/interfaces/iface.go deleted file mode 100644 index 0b3b532bda2fa..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/interfaces/iface.go +++ /dev/null @@ -1,96 +0,0 @@ -package interfaces - -import ( - "context" - - "github.com/kurtosis-tech/kurtosis/api/golang/core/kurtosis_core_rpc_api_bindings" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/enclaves" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/starlark_run_config" -) - -// Interfaces for Kurtosis SDK types to make testing easier -type StarlarkError interface { - GetInterpretationError() error - GetValidationError() error - GetExecutionError() error -} - -type ProgressInfo interface { - GetCurrentStepInfo() []string -} - -type Instruction interface { - GetDescription() string -} - -type RunFinishedEvent interface { - GetIsRunSuccessful() bool -} - -type Warning interface { - GetMessage() string -} - -type Info interface { - GetMessage() string -} - -type InstructionResult interface { - GetSerializedInstructionResult() string -} - -type StarlarkResponse interface { - GetError() StarlarkError - GetProgressInfo() ProgressInfo - GetInstruction() Instruction - GetRunFinishedEvent() RunFinishedEvent - GetWarning() Warning - GetInfo() Info - GetInstructionResult() InstructionResult -} - -type PortSpec interface { - GetNumber() uint16 -} - -type ServiceContext interface { - GetServiceUUID() services.ServiceUUID - GetMaybePublicIPAddress() string - GetPublicPorts() map[string]PortSpec - GetPrivatePorts() map[string]PortSpec - GetLabels() map[string]string -} - -type EnclaveContext interface { - GetEnclaveUuid() enclaves.EnclaveUUID - - GetService(serviceIdentifier string) (ServiceContext, error) - GetServices() (map[services.ServiceName]services.ServiceUUID, error) - - GetAllFilesArtifactNamesAndUuids(ctx context.Context) ([]*kurtosis_core_rpc_api_bindings.FilesArtifactNameAndUuid, error) - - RunStarlarkPackage(context.Context, string, *starlark_run_config.StarlarkRunConfig) (<-chan StarlarkResponse, string, error) - RunStarlarkScript(context.Context, string, *starlark_run_config.StarlarkRunConfig) error -} - -type EnclaveNameAndUuid interface { - GetName() string - GetUuid() string -} - -type EnclaveStatus string - -const ( - EnclaveStatusEmpty EnclaveStatus = "empty" - EnclaveStatusRunning EnclaveStatus = "running" - EnclaveStatusStopped EnclaveStatus = "stopped" -) - -type KurtosisContextInterface interface { - CreateEnclave(context.Context, string) (EnclaveContext, error) - GetEnclave(context.Context, string) (EnclaveContext, error) - GetEnclaveStatus(context.Context, string) (EnclaveStatus, error) - DestroyEnclave(context.Context, string) error - Clean(context.Context, bool) ([]EnclaveNameAndUuid, error) -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/run/handlers.go b/kurtosis-devnet/pkg/kurtosis/api/run/handlers.go deleted file mode 100644 index 3c4c719063e57..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/run/handlers.go +++ /dev/null @@ -1,174 +0,0 @@ -package run - -import ( - "context" - "fmt" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/fatih/color" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -// Color printers -var ( - printCyan = color.New(color.FgCyan).SprintFunc() - printYellow = color.New(color.FgYellow).SprintFunc() - printRed = color.New(color.FgRed).SprintFunc() - printBlue = color.New(color.FgBlue).SprintFunc() -) - -// MessageHandler defines the interface for handling different types of messages -type MessageHandler interface { - // Handle processes the message if applicable and returns: - // - bool: whether the message was handled - // - error: any error that occurred during handling - Handle(context.Context, interfaces.StarlarkResponse) (bool, error) -} - -// MessageHandlerFunc is a function type that implements MessageHandler -type MessageHandlerFunc func(context.Context, interfaces.StarlarkResponse) (bool, error) - -func (f MessageHandlerFunc) Handle(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - return f(ctx, resp) -} - -// FirstMatchHandler returns a handler that applies the first matching handler from the given handlers -func FirstMatchHandler(handlers ...MessageHandler) MessageHandler { - return MessageHandlerFunc(func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - for _, h := range handlers { - handled, err := h.Handle(ctx, resp) - if err != nil { - return true, err - } - if handled { - return true, nil - } - } - return false, nil - }) -} - -// AllHandlers returns a handler that applies all the given handlers in order -func AllHandlers(handlers ...MessageHandler) MessageHandler { - return MessageHandlerFunc(func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - anyHandled := false - for _, h := range handlers { - handled, err := h.Handle(ctx, resp) - if err != nil { - return true, err - } - anyHandled = anyHandled || handled - } - return anyHandled, nil - }) -} - -// defaultHandler is the default message handler that provides standard Kurtosis output -type defaultHandler struct { - tracer trace.Tracer - span trace.Span -} - -func newDefaultHandler() *defaultHandler { - return &defaultHandler{ - tracer: otel.Tracer("kurtosis-run"), - } -} - -var _ MessageHandler = (*defaultHandler)(nil) - -func (h *defaultHandler) Handle(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - hdlr := FirstMatchHandler( - MessageHandlerFunc(h.handleProgress), - MessageHandlerFunc(h.handleInstruction), - MessageHandlerFunc(h.handleWarning), - MessageHandlerFunc(h.handleInfo), - MessageHandlerFunc(h.handleResult), - MessageHandlerFunc(h.handleError), - ) - - return hdlr.Handle(ctx, resp) -} - -// handleProgress handles progress info messages -func (h *defaultHandler) handleProgress(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - if progressInfo := resp.GetProgressInfo(); progressInfo != nil { - // ignore progress messages, same as kurtosis run does - return true, nil - } - return false, nil -} - -// handleInstruction handles instruction messages -func (h *defaultHandler) handleInstruction(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - if instruction := resp.GetInstruction(); instruction != nil { - desc := instruction.GetDescription() - _, span := h.tracer.Start(ctx, desc) - h.span = span - - fmt.Println(printCyan(desc)) - return true, nil - } - return false, nil -} - -// handleWarning handles warning messages -func (h *defaultHandler) handleWarning(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - if warning := resp.GetWarning(); warning != nil { - fmt.Println(printYellow(warning.GetMessage())) - return true, nil - } - return false, nil -} - -// handleInfo handles info messages -func (h *defaultHandler) handleInfo(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - if info := resp.GetInfo(); info != nil { - fmt.Println(printBlue(info.GetMessage())) - return true, nil - } - return false, nil -} - -// handleResult handles instruction result messages -func (h *defaultHandler) handleResult(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - if result := resp.GetInstructionResult(); result != nil { - if result.GetSerializedInstructionResult() != "" { - fmt.Printf("%s\n\n", result.GetSerializedInstructionResult()) - } - if h.span != nil { - h.span.End() - } - return true, nil - } - return false, nil -} - -// handleError handles error messages -func (h *defaultHandler) handleError(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - if err := resp.GetError(); err != nil { - if interpretErr := err.GetInterpretationError(); interpretErr != nil { - return true, fmt.Errorf(printRed("interpretation error: %v"), interpretErr) - } - if validationErr := err.GetValidationError(); validationErr != nil { - return true, fmt.Errorf(printRed("validation error: %v"), validationErr) - } - if executionErr := err.GetExecutionError(); executionErr != nil { - return true, fmt.Errorf(printRed("execution error: %v"), executionErr) - } - return true, nil - } - return false, nil -} - -// makeRunFinishedHandler creates a handler for run finished events -func makeRunFinishedHandler(isSuccessful *bool) MessageHandlerFunc { - return func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - if event := resp.GetRunFinishedEvent(); event != nil { - *isSuccessful = event.GetIsRunSuccessful() - return true, nil - } - return false, nil - } -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/run/handlers_test.go b/kurtosis-devnet/pkg/kurtosis/api/run/handlers_test.go deleted file mode 100644 index 580642d921061..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/run/handlers_test.go +++ /dev/null @@ -1,374 +0,0 @@ -package run - -import ( - "context" - "fmt" - "testing" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/fake" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/stretchr/testify/assert" -) - -func TestHandleProgress(t *testing.T) { - ctx := context.Background() - d := newDefaultHandler() - tests := []struct { - name string - response interfaces.StarlarkResponse - want bool - }{ - { - name: "handles progress message", - response: &fake.StarlarkResponse{ - ProgressMsg: []string{"Step 1", "Step 2"}, - }, - want: true, - }, - { - name: "ignores non-progress message", - response: &fake.StarlarkResponse{}, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handled, err := d.handleProgress(ctx, tt.response) - assert.NoError(t, err) - assert.Equal(t, tt.want, handled) - }) - } -} - -func TestHandleInstruction(t *testing.T) { - ctx := context.Background() - d := newDefaultHandler() - tests := []struct { - name string - response interfaces.StarlarkResponse - want bool - }{ - { - name: "handles instruction message", - response: &fake.StarlarkResponse{ - Instruction: "Execute command", - }, - want: true, - }, - { - name: "ignores non-instruction message", - response: &fake.StarlarkResponse{}, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handled, err := d.handleInstruction(ctx, tt.response) - assert.NoError(t, err) - assert.Equal(t, tt.want, handled) - }) - } -} - -func TestHandleWarning(t *testing.T) { - ctx := context.Background() - d := newDefaultHandler() - tests := []struct { - name string - response interfaces.StarlarkResponse - want bool - }{ - { - name: "handles warning message", - response: &fake.StarlarkResponse{ - Warning: "Warning: deprecated feature", - }, - want: true, - }, - { - name: "ignores non-warning message", - response: &fake.StarlarkResponse{}, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handled, err := d.handleWarning(ctx, tt.response) - assert.NoError(t, err) - assert.Equal(t, tt.want, handled) - }) - } -} - -func TestHandleInfo(t *testing.T) { - ctx := context.Background() - d := newDefaultHandler() - tests := []struct { - name string - response interfaces.StarlarkResponse - want bool - }{ - { - name: "handles info message", - response: &fake.StarlarkResponse{ - Info: "System info", - }, - want: true, - }, - { - name: "ignores non-info message", - response: &fake.StarlarkResponse{}, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handled, err := d.handleInfo(ctx, tt.response) - assert.NoError(t, err) - assert.Equal(t, tt.want, handled) - }) - } -} - -func TestHandleResult(t *testing.T) { - ctx := context.Background() - d := newDefaultHandler() - tests := []struct { - name string - response interfaces.StarlarkResponse - want bool - }{ - { - name: "handles result message", - response: &fake.StarlarkResponse{ - Result: "Operation completed", - HasResult: true, - }, - want: true, - }, - { - name: "handles empty result message", - response: &fake.StarlarkResponse{ - Result: "", - HasResult: true, - }, - want: true, - }, - { - name: "ignores non-result message", - response: &fake.StarlarkResponse{}, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handled, err := d.handleResult(ctx, tt.response) - assert.NoError(t, err) - assert.Equal(t, tt.want, handled) - }) - } -} - -func TestHandleError(t *testing.T) { - ctx := context.Background() - d := newDefaultHandler() - testErr := fmt.Errorf("test error") - tests := []struct { - name string - response interfaces.StarlarkResponse - want bool - wantError bool - }{ - { - name: "handles interpretation error", - response: &fake.StarlarkResponse{ - Err: &fake.StarlarkError{InterpretationErr: testErr}, - }, - want: true, - wantError: true, - }, - { - name: "handles validation error", - response: &fake.StarlarkResponse{ - Err: &fake.StarlarkError{ValidationErr: testErr}, - }, - want: true, - wantError: true, - }, - { - name: "handles execution error", - response: &fake.StarlarkResponse{ - Err: &fake.StarlarkError{ExecutionErr: testErr}, - }, - want: true, - wantError: true, - }, - { - name: "ignores non-error message", - response: &fake.StarlarkResponse{}, - want: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handled, err := d.handleError(ctx, tt.response) - if tt.wantError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, handled) - }) - } -} - -func TestFirstMatchHandler(t *testing.T) { - ctx := context.Background() - d := newDefaultHandler() - testErr := fmt.Errorf("test error") - tests := []struct { - name string - handlers []MessageHandler - response interfaces.StarlarkResponse - want bool - wantError bool - }{ - { - name: "first handler matches", - handlers: []MessageHandler{ - MessageHandlerFunc(d.handleInfo), - MessageHandlerFunc(d.handleWarning), - }, - response: &fake.StarlarkResponse{ - Info: "test info", - }, - want: true, - }, - { - name: "second handler matches", - handlers: []MessageHandler{ - MessageHandlerFunc(d.handleInfo), - MessageHandlerFunc(d.handleWarning), - }, - response: &fake.StarlarkResponse{ - Warning: "test warning", - }, - want: true, - }, - { - name: "no handlers match", - handlers: []MessageHandler{ - MessageHandlerFunc(d.handleInfo), - MessageHandlerFunc(d.handleWarning), - }, - response: &fake.StarlarkResponse{ - Result: "test result", HasResult: true, - }, - want: false, - }, - { - name: "handler returns error", - handlers: []MessageHandler{ - MessageHandlerFunc(d.handleError), - }, - response: &fake.StarlarkResponse{ - Err: &fake.StarlarkError{InterpretationErr: testErr}, - }, - want: true, - wantError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := FirstMatchHandler(tt.handlers...) - handled, err := handler.Handle(ctx, tt.response) - if tt.wantError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, handled) - }) - } -} - -func TestAllHandlers(t *testing.T) { - ctx := context.Background() - d := newDefaultHandler() - testErr := fmt.Errorf("test error") - tests := []struct { - name string - handlers []MessageHandler - response interfaces.StarlarkResponse - want bool - wantError bool - }{ - { - name: "multiple handlers match", - handlers: []MessageHandler{ - MessageHandlerFunc(func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - return true, nil - }), - MessageHandlerFunc(func(ctx context.Context, resp interfaces.StarlarkResponse) (bool, error) { - return true, nil - }), - }, - response: &fake.StarlarkResponse{}, - want: true, - }, - { - name: "some handlers match", - handlers: []MessageHandler{ - MessageHandlerFunc(d.handleInfo), - MessageHandlerFunc(d.handleWarning), - }, - response: &fake.StarlarkResponse{ - Info: "test info", - }, - want: true, - }, - { - name: "no handlers match", - handlers: []MessageHandler{ - MessageHandlerFunc(d.handleInfo), - MessageHandlerFunc(d.handleWarning), - }, - response: &fake.StarlarkResponse{ - Result: "test result", HasResult: true, - }, - want: false, - }, - { - name: "handler returns error", - handlers: []MessageHandler{ - MessageHandlerFunc(d.handleInfo), - MessageHandlerFunc(d.handleError), - }, - response: &fake.StarlarkResponse{ - Err: &fake.StarlarkError{InterpretationErr: testErr}, - }, - want: true, - wantError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - handler := AllHandlers(tt.handlers...) - handled, err := handler.Handle(ctx, tt.response) - if tt.wantError { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - assert.Equal(t, tt.want, handled) - }) - } -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/run/kurtosis_run.go b/kurtosis-devnet/pkg/kurtosis/api/run/kurtosis_run.go deleted file mode 100644 index bd317e9b6f7ad..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/run/kurtosis_run.go +++ /dev/null @@ -1,154 +0,0 @@ -package run - -import ( - "context" - "errors" - "fmt" - "io" - "os" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/enclave" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/wrappers" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/starlark_run_config" - "go.opentelemetry.io/otel" - "go.opentelemetry.io/otel/trace" -) - -type KurtosisRunner struct { - dryRun bool - enclave string - kurtosisCtx interfaces.KurtosisContextInterface - runHandlers []MessageHandler - tracer trace.Tracer -} - -type KurtosisRunnerOptions func(*KurtosisRunner) - -func WithKurtosisRunnerDryRun(dryRun bool) KurtosisRunnerOptions { - return func(r *KurtosisRunner) { - r.dryRun = dryRun - } -} - -func WithKurtosisRunnerEnclave(enclave string) KurtosisRunnerOptions { - return func(r *KurtosisRunner) { - r.enclave = enclave - } -} - -func WithKurtosisRunnerKurtosisContext(kurtosisCtx interfaces.KurtosisContextInterface) KurtosisRunnerOptions { - return func(r *KurtosisRunner) { - r.kurtosisCtx = kurtosisCtx - } -} - -func WithKurtosisRunnerRunHandlers(runHandlers ...MessageHandler) KurtosisRunnerOptions { - return func(r *KurtosisRunner) { - r.runHandlers = runHandlers - } -} - -func NewKurtosisRunner(opts ...KurtosisRunnerOptions) (*KurtosisRunner, error) { - r := &KurtosisRunner{ - tracer: otel.Tracer("kurtosis-run"), - } - for _, opt := range opts { - opt(r) - } - - if r.kurtosisCtx == nil { - var err error - r.kurtosisCtx, err = wrappers.GetDefaultKurtosisContext() - if err != nil { - return nil, fmt.Errorf("failed to create Kurtosis context: %w", err) - } - } - return r, nil -} - -func (r *KurtosisRunner) Run(ctx context.Context, packageName string, args io.Reader) error { - ctx, span := r.tracer.Start(ctx, fmt.Sprintf("run package %s", packageName)) - defer span.End() - - if r.dryRun { - fmt.Printf("Dry run mode enabled, would run kurtosis package %s in enclave %s\n", - packageName, r.enclave) - if args != nil { - fmt.Println("\nWith arguments:") - if _, err := io.Copy(os.Stdout, args); err != nil { - return fmt.Errorf("failed to dump args: %w", err) - } - fmt.Println() - } - return nil - } - - mgr, err := enclave.NewKurtosisEnclaveManager( - enclave.WithKurtosisContext(r.kurtosisCtx), - ) - if err != nil { - return fmt.Errorf("failed to create Kurtosis enclave manager: %w", err) - } - // Try to get existing enclave first - enclaveCtx, err := mgr.GetEnclave(ctx, r.enclave) - if err != nil { - return fmt.Errorf("failed to get enclave: %w", err) - } - - // Set up run config with args if provided - serializedParams := "{}" - if args != nil { - argsBytes, err := io.ReadAll(args) - if err != nil { - return fmt.Errorf("failed to read args: %w", err) - } - serializedParams = string(argsBytes) - } - - runConfig := &starlark_run_config.StarlarkRunConfig{ - SerializedParams: serializedParams, - } - - stream, _, err := enclaveCtx.RunStarlarkPackage(ctx, packageName, runConfig) - if err != nil { - return fmt.Errorf("failed to run Kurtosis package: %w", err) - } - - // Set up message handlers - var isRunSuccessful bool - runFinishedHandler := makeRunFinishedHandler(&isRunSuccessful) - - // Combine custom handlers with default handler and run finished handler - handler := AllHandlers(append(r.runHandlers, newDefaultHandler(), runFinishedHandler)...) - - // Process the output stream - for responseLine := range stream { - if _, err := handler.Handle(ctx, responseLine); err != nil { - return err - } - } - - if !isRunSuccessful { - return errors.New(printRed("kurtosis package execution failed")) - } - - return nil -} - -func (r *KurtosisRunner) RunScript(ctx context.Context, script string) error { - if r.dryRun { - fmt.Printf("Dry run mode enabled, would run following script in enclave %s\n%s\n", - r.enclave, script) - return nil - } - - enclaveCtx, err := r.kurtosisCtx.GetEnclave(ctx, r.enclave) - if err != nil { - return fmt.Errorf("failed to get enclave: %w", err) - } - - return enclaveCtx.RunStarlarkScript(ctx, script, &starlark_run_config.StarlarkRunConfig{ - SerializedParams: "{}", - }) -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/run/kurtosis_run_test.go b/kurtosis-devnet/pkg/kurtosis/api/run/kurtosis_run_test.go deleted file mode 100644 index dd50d929941cd..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/run/kurtosis_run_test.go +++ /dev/null @@ -1,112 +0,0 @@ -package run - -import ( - "context" - "fmt" - "testing" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/fake" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestRunKurtosis(t *testing.T) { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - testErr := fmt.Errorf("test error") - tests := []struct { - name string - responses []fake.StarlarkResponse - kurtosisErr error - getErr error - wantErr bool - }{ - { - name: "successful run with all message types", - responses: []fake.StarlarkResponse{ - {ProgressMsg: []string{"Starting deployment..."}}, - {Info: "Preparing environment"}, - {Instruction: "Executing package"}, - {Warning: "Using default config"}, - {Result: "Service started", HasResult: true}, - {ProgressMsg: []string{"Deployment complete"}}, - {IsSuccessful: true}, - }, - wantErr: false, - }, - { - name: "run with error", - responses: []fake.StarlarkResponse{ - {ProgressMsg: []string{"Starting deployment..."}}, - {Err: &fake.StarlarkError{ExecutionErr: testErr}}, - }, - wantErr: true, - }, - { - name: "run with unsuccessful completion", - responses: []fake.StarlarkResponse{ - {ProgressMsg: []string{"Starting deployment..."}}, - {IsSuccessful: false}, - }, - wantErr: true, - }, - { - name: "kurtosis error", - kurtosisErr: fmt.Errorf("kurtosis failed"), - wantErr: true, - }, - { - name: "uses existing enclave", - responses: []fake.StarlarkResponse{ - {ProgressMsg: []string{"Using existing enclave"}}, - {IsSuccessful: true}, - }, - getErr: nil, - wantErr: false, - }, - { - name: "creates new enclave when get fails", - responses: []fake.StarlarkResponse{ - {ProgressMsg: []string{"Creating new enclave"}}, - {IsSuccessful: true}, - }, - getErr: fmt.Errorf("enclave not found"), - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Convert test responses to interface slice - interfaceResponses := make([]interfaces.StarlarkResponse, len(tt.responses)) - for i := range tt.responses { - interfaceResponses[i] = &tt.responses[i] - } - - // Create a fake enclave context that will return our test responses - fakeCtx := &fake.KurtosisContext{ - EnclaveCtx: &fake.EnclaveContext{ - RunErr: tt.kurtosisErr, - Responses: interfaceResponses, - }, - GetErr: tt.getErr, - } - - kurtosisRunner, err := NewKurtosisRunner( - WithKurtosisRunnerDryRun(false), - WithKurtosisRunnerEnclave("test-enclave"), - WithKurtosisRunnerKurtosisContext(fakeCtx), - ) - require.NoError(t, err) - - err = kurtosisRunner.Run(ctx, "test-package", nil) - if tt.wantErr { - assert.Error(t, err) - } else { - assert.NoError(t, err) - } - }) - } -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers.go b/kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers.go deleted file mode 100644 index db3172fac6bbe..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers.go +++ /dev/null @@ -1,269 +0,0 @@ -package wrappers - -import ( - "context" - "errors" - "fmt" - "strings" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/kurtosis-tech/kurtosis/api/golang/core/kurtosis_core_rpc_api_bindings" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/enclaves" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" - "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/starlark_run_config" - "github.com/kurtosis-tech/kurtosis/api/golang/engine/kurtosis_engine_rpc_api_bindings" - "github.com/kurtosis-tech/kurtosis/api/golang/engine/lib/kurtosis_context" -) - -// Wrapper types to implement our interfaces -type KurtosisContextWrapper struct { - *kurtosis_context.KurtosisContext -} - -type EnclaveContextWrapper struct { - *enclaves.EnclaveContext -} - -type ServiceContextWrapper struct { - *services.ServiceContext -} - -type EnclaveInfoWrapper struct { - *kurtosis_engine_rpc_api_bindings.EnclaveInfo -} - -// mostly a no-op, to force the values to be typed as interfaces -func convertPortSpecMap(ports map[string]*services.PortSpec) map[string]interfaces.PortSpec { - wrappedPorts := make(map[string]interfaces.PortSpec) - for name, port := range ports { - wrappedPorts[name] = port - } - return wrappedPorts -} - -func (w *ServiceContextWrapper) GetPublicPorts() map[string]interfaces.PortSpec { - return convertPortSpecMap(w.ServiceContext.GetPublicPorts()) -} - -func (w *ServiceContextWrapper) GetPrivatePorts() map[string]interfaces.PortSpec { - return convertPortSpecMap(w.ServiceContext.GetPrivatePorts()) -} - -type starlarkRunResponseLineWrapper struct { - *kurtosis_core_rpc_api_bindings.StarlarkRunResponseLine -} - -type starlarkErrorWrapper struct { - *kurtosis_core_rpc_api_bindings.StarlarkError -} - -type starlarkRunProgressWrapper struct { - *kurtosis_core_rpc_api_bindings.StarlarkRunProgress -} - -type starlarkInstructionWrapper struct { - *kurtosis_core_rpc_api_bindings.StarlarkInstruction -} - -type starlarkRunFinishedEventWrapper struct { - *kurtosis_core_rpc_api_bindings.StarlarkRunFinishedEvent -} - -type starlarkWarningWrapper struct { - *kurtosis_core_rpc_api_bindings.StarlarkWarning -} - -type starlarkInfoWrapper struct { - *kurtosis_core_rpc_api_bindings.StarlarkInfo -} - -type starlarkInstructionResultWrapper struct { - *kurtosis_core_rpc_api_bindings.StarlarkInstructionResult -} - -type EnclaveNameAndUuidWrapper struct { - *kurtosis_engine_rpc_api_bindings.EnclaveNameAndUuid -} - -func (w KurtosisContextWrapper) CreateEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - enclaveCtx, err := w.KurtosisContext.CreateEnclave(ctx, name) - if err != nil { - return nil, err - } - return &EnclaveContextWrapper{enclaveCtx}, nil -} - -func (w KurtosisContextWrapper) GetEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - enclaveCtx, err := w.KurtosisContext.GetEnclaveContext(ctx, name) - if err != nil { - return nil, err - } - return &EnclaveContextWrapper{enclaveCtx}, nil -} - -func (w *EnclaveContextWrapper) GetService(serviceIdentifier string) (interfaces.ServiceContext, error) { - svcCtx, err := w.EnclaveContext.GetServiceContext(serviceIdentifier) - if err != nil { - return nil, err - } - return &ServiceContextWrapper{svcCtx}, nil -} - -func (w KurtosisContextWrapper) GetEnclaveStatus(ctx context.Context, enclave string) (interfaces.EnclaveStatus, error) { - enclaveInfo, err := w.KurtosisContext.GetEnclave(ctx, enclave) - if err != nil { - return "", err - } - status := enclaveInfo.GetContainersStatus() - switch status { - case kurtosis_engine_rpc_api_bindings.EnclaveContainersStatus_EnclaveContainersStatus_EMPTY: - return interfaces.EnclaveStatusEmpty, nil - case kurtosis_engine_rpc_api_bindings.EnclaveContainersStatus_EnclaveContainersStatus_RUNNING: - return interfaces.EnclaveStatusRunning, nil - case kurtosis_engine_rpc_api_bindings.EnclaveContainersStatus_EnclaveContainersStatus_STOPPED: - return interfaces.EnclaveStatusStopped, nil - default: - return "", fmt.Errorf("unknown enclave status: %v", status) - } -} - -func (w KurtosisContextWrapper) DestroyEnclave(ctx context.Context, name string) error { - return w.KurtosisContext.DestroyEnclave(ctx, name) -} - -func (w KurtosisContextWrapper) Clean(ctx context.Context, destroyAll bool) ([]interfaces.EnclaveNameAndUuid, error) { - deleted, err := w.KurtosisContext.Clean(ctx, destroyAll) - if err != nil { - return nil, err - } - - result := make([]interfaces.EnclaveNameAndUuid, len(deleted)) - for i, nameAndUuid := range deleted { - result[i] = &EnclaveNameAndUuidWrapper{nameAndUuid} - } - return result, nil -} - -func (w *EnclaveContextWrapper) RunStarlarkPackage(ctx context.Context, pkg string, serializedParams *starlark_run_config.StarlarkRunConfig) (<-chan interfaces.StarlarkResponse, string, error) { - runner := w.EnclaveContext.RunStarlarkPackage - if strings.HasPrefix(pkg, "github.com/") { - runner = w.EnclaveContext.RunStarlarkRemotePackage - } - - stream, cancel, err := runner(ctx, pkg, serializedParams) - if err != nil { - return nil, "", err - } - - // Convert the stream - wrappedStream := make(chan interfaces.StarlarkResponse) - go func() { - defer close(wrappedStream) - defer cancel() - for line := range stream { - wrappedStream <- &starlarkRunResponseLineWrapper{line} - } - }() - - return wrappedStream, "", nil -} - -func (w *EnclaveContextWrapper) RunStarlarkScript(ctx context.Context, script string, serializedParams *starlark_run_config.StarlarkRunConfig) error { - // TODO: we should probably collect some data from the result and extend the error. - _, err := w.EnclaveContext.RunStarlarkScriptBlocking(ctx, script, serializedParams) - return err -} - -func (w *starlarkRunResponseLineWrapper) GetError() interfaces.StarlarkError { - if err := w.StarlarkRunResponseLine.GetError(); err != nil { - return &starlarkErrorWrapper{err} - } - return nil -} - -func (w *starlarkRunResponseLineWrapper) GetProgressInfo() interfaces.ProgressInfo { - if progress := w.StarlarkRunResponseLine.GetProgressInfo(); progress != nil { - return &starlarkRunProgressWrapper{progress} - } - return nil -} - -func (w *starlarkRunResponseLineWrapper) GetInstruction() interfaces.Instruction { - if instruction := w.StarlarkRunResponseLine.GetInstruction(); instruction != nil { - return &starlarkInstructionWrapper{instruction} - } - return nil -} - -func (w *starlarkRunResponseLineWrapper) GetRunFinishedEvent() interfaces.RunFinishedEvent { - if event := w.StarlarkRunResponseLine.GetRunFinishedEvent(); event != nil { - return &starlarkRunFinishedEventWrapper{event} - } - return nil -} - -func (w *starlarkRunResponseLineWrapper) GetWarning() interfaces.Warning { - if warning := w.StarlarkRunResponseLine.GetWarning(); warning != nil { - return &starlarkWarningWrapper{warning} - } - return nil -} - -func (w *starlarkRunResponseLineWrapper) GetInfo() interfaces.Info { - if info := w.StarlarkRunResponseLine.GetInfo(); info != nil { - return &starlarkInfoWrapper{info} - } - return nil -} - -func (w *starlarkRunResponseLineWrapper) GetInstructionResult() interfaces.InstructionResult { - if result := w.StarlarkRunResponseLine.GetInstructionResult(); result != nil { - return &starlarkInstructionResultWrapper{result} - } - return nil -} - -func (w *starlarkRunProgressWrapper) GetCurrentStepInfo() []string { - return w.StarlarkRunProgress.CurrentStepInfo -} - -func (w *starlarkInstructionWrapper) GetDescription() string { - return w.StarlarkInstruction.Description -} - -func (w *starlarkRunFinishedEventWrapper) GetIsRunSuccessful() bool { - return w.StarlarkRunFinishedEvent.IsRunSuccessful -} - -func (w *starlarkErrorWrapper) GetInterpretationError() error { - if err := w.StarlarkError.GetInterpretationError(); err != nil { - return errors.New(err.GetErrorMessage()) - } - return nil -} - -func (w *starlarkErrorWrapper) GetValidationError() error { - if err := w.StarlarkError.GetValidationError(); err != nil { - return errors.New(err.GetErrorMessage()) - } - return nil -} - -func (w *starlarkErrorWrapper) GetExecutionError() error { - if err := w.StarlarkError.GetExecutionError(); err != nil { - return errors.New(err.GetErrorMessage()) - } - return nil -} - -func (w *starlarkWarningWrapper) GetMessage() string { - return w.StarlarkWarning.WarningMessage -} - -func (w *starlarkInfoWrapper) GetMessage() string { - return w.StarlarkInfo.InfoMessage -} - -func (w *starlarkInstructionResultWrapper) GetSerializedInstructionResult() string { - return w.StarlarkInstructionResult.SerializedInstructionResult -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers_local.go b/kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers_local.go deleted file mode 100644 index 3e984df8d22f5..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers_local.go +++ /dev/null @@ -1,21 +0,0 @@ -//go:build !testonly -// +build !testonly - -package wrappers - -import ( - "fmt" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/kurtosis-tech/kurtosis/api/golang/engine/lib/kurtosis_context" -) - -func GetDefaultKurtosisContext() (interfaces.KurtosisContextInterface, error) { - kCtx, err := kurtosis_context.NewKurtosisContextFromLocalEngine() - if err != nil { - return nil, fmt.Errorf("failed to create Kurtosis context: %w", err) - } - return KurtosisContextWrapper{ - KurtosisContext: kCtx, - }, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers_testing.go b/kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers_testing.go deleted file mode 100644 index af2c9a7510294..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/api/wrappers/wrappers_testing.go +++ /dev/null @@ -1,14 +0,0 @@ -//go:build testonly -// +build testonly - -package wrappers - -import ( - "errors" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" -) - -func GetDefaultKurtosisContext() (interfaces.KurtosisContextInterface, error) { - return nil, errors.New("attempting to use local Kurtosis context in testonly mode") -} diff --git a/kurtosis-devnet/pkg/kurtosis/endpoints.go b/kurtosis-devnet/pkg/kurtosis/endpoints.go deleted file mode 100644 index ad786e42f800d..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/endpoints.go +++ /dev/null @@ -1,384 +0,0 @@ -package kurtosis - -import ( - "encoding/json" - "strconv" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" -) - -// ServiceFinder is the main entry point for finding services and their endpoints -type ServiceFinder struct { - services inspect.ServiceMap - - l1Chain *spec.ChainSpec - l2Chains []*spec.ChainSpec - depsets map[string]descriptors.DepSet - - triagedServices []*triagedService -} - -// ServiceFinderOption configures a ServiceFinder -type ServiceFinderOption func(*ServiceFinder) - -// WithL1Chain sets the L1 chain -func WithL1Chain(chain *spec.ChainSpec) ServiceFinderOption { - return func(f *ServiceFinder) { - f.l1Chain = chain - } -} - -// WithL2Chains sets the L2 networks -func WithL2Chains(networks []*spec.ChainSpec) ServiceFinderOption { - return func(f *ServiceFinder) { - f.l2Chains = networks - } -} - -// WithDepSets sets the dependency sets -func WithDepSets(depsets map[string]descriptors.DepSet) ServiceFinderOption { - return func(f *ServiceFinder) { - f.depsets = depsets - } -} - -// NewServiceFinder creates a new ServiceFinder with the given options -func NewServiceFinder(services inspect.ServiceMap, opts ...ServiceFinderOption) *ServiceFinder { - f := &ServiceFinder{ - services: services, - } - for _, opt := range opts { - opt(f) - } - - f.triage() - return f -} - -type chainAcceptor func(*spec.ChainSpec) bool - -type serviceParser func(string) (int, chainAcceptor, bool) - -type triagedService struct { - tag string // service tag - idx int // service index (for nodes) - name string // service name (for nodes) - svc *descriptors.Service - accept chainAcceptor -} - -func acceptAll(c *spec.ChainSpec) bool { - return true -} - -func acceptID(s string) chainAcceptor { - return func(c *spec.ChainSpec) bool { - return c.NetworkID == s - } -} - -func acceptIDs(ids ...string) chainAcceptor { - acceptors := make([]chainAcceptor, 0) - for _, id := range ids { - acceptors = append(acceptors, acceptID(id)) - } - return combineAcceptors(acceptors...) -} - -func combineAcceptors(acceptors ...chainAcceptor) chainAcceptor { - return func(c *spec.ChainSpec) bool { - for _, acceptor := range acceptors { - if acceptor(c) { - return true - } - } - return false - } -} - -// This is now for L1 only. L2 is handled through labels. -func (f *ServiceFinder) triageNode(prefix string) serviceParser { - return func(serviceName string) (int, chainAcceptor, bool) { - extractIndex := func(s string) int { - // Extract numeric index from service name - parts := strings.Split(s, "-") - if idx, err := strconv.ParseUint(parts[0], 10, 32); err == nil { - return int(idx) - 1 - } - return 0 - } - - if strings.HasPrefix(serviceName, prefix) { // L1 - idx := extractIndex(strings.TrimPrefix(serviceName, prefix)) - return idx, acceptID(f.l1Chain.NetworkID), true - } - - return 0, nil, false - } -} - -type serviceParserRules map[string]serviceParser - -func (spr serviceParserRules) apply(serviceName string, endpoints descriptors.EndpointMap) *triagedService { - for tag, rule := range spr { - if idx, accept, ok := rule(serviceName); ok { - return &triagedService{ - tag: tag, - idx: idx, - accept: accept, - svc: &descriptors.Service{ - Name: serviceName, - Endpoints: endpoints, - }, - } - } - } - return nil -} - -// TODO: this might need some adjustments as we stabilize labels in optimism-package -const ( - kindLabel = "op.kind" - networkIDLabel = "op.network.id" - nodeNameLabel = "op.network.participant.name" - nodeIndexLabel = "op.network.participant.index" - supervisorSuperchainLabel = "op.network.supervisor.superchain" -) - -func (f *ServiceFinder) getNetworkIDs(svc *inspect.Service) []string { - var network_ids []string - id, ok := svc.Labels[networkIDLabel] - if !ok { - // network IDs might be specified through a superchain - superchain, ok := svc.Labels[supervisorSuperchainLabel] - if !ok { - return nil - } - ds, ok := f.depsets[superchain] - if !ok { - return nil - } - var depSet depset.StaticConfigDependencySet - err := json.Unmarshal(ds, &depSet) - if err != nil { - return nil - } - for _, chain := range depSet.Chains() { - network_ids = append(network_ids, chain.String()) - } - } else { - network_ids = strings.Split(id, "-") - } - - return network_ids -} - -func (f *ServiceFinder) triageByLabels(svc *inspect.Service, name string, endpoints descriptors.EndpointMap) *triagedService { - tag, ok := svc.Labels[kindLabel] - if !ok { - return nil - } - - // So that we can have the same behaviour as netchef - if (tag == "flashblocks-websocket-proxy") && endpoints != nil { - if _, has := endpoints["ws-flashblocks"]; !has { - if ws, ok := endpoints["ws"]; ok { - endpoints["ws-flashblocks"] = ws - } - } - } - network_ids := f.getNetworkIDs(svc) - idx := -1 - if val, ok := svc.Labels[nodeIndexLabel]; ok { - i, err := strconv.Atoi(val) - if err != nil { - return nil - } - idx = i - } - - accept := acceptIDs(network_ids...) - if len(network_ids) == 0 { // TODO: this is only for faucet right now, we can remove this once we have a proper label for all services - accept = acceptAll - } - return &triagedService{ - tag: tag, - idx: idx, - name: svc.Labels[nodeNameLabel], - accept: accept, - svc: &descriptors.Service{ - Name: name, - Endpoints: endpoints, - }, - } -} - -func (f *ServiceFinder) triage() { - rules := serviceParserRules{ - "el": f.triageNode("el-"), - "cl": f.triageNode("cl-"), - } - - triagedServices := []*triagedService{} - for serviceName, svc := range f.services { - endpoints := make(descriptors.EndpointMap) - for portName, portInfo := range svc.Ports { - endpoints[portName] = portInfo - } - - // Ultimately we'll rely only on labels, and most of the code in this file will disappear as a result. - // - // For now though the L1 services are still not tagged properly so we rely on the name resolution as a fallback - triaged := f.triageByLabels(svc, serviceName, endpoints) - if triaged == nil { - triaged = rules.apply(serviceName, endpoints) - } - - if triaged != nil { - triagedServices = append(triagedServices, triaged) - } - } - - f.triagedServices = triagedServices -} - -func (f *ServiceFinder) findChainServices(chain *spec.ChainSpec) ([]descriptors.Node, descriptors.RedundantServiceMap) { - var nodes []descriptors.Node - services := make(descriptors.RedundantServiceMap) - - var selected []*triagedService - for _, svc := range f.triagedServices { - if svc.accept(chain) { - if svc.idx >= len(nodes) { - // just resize the slice, that'll create "0" items for the new indices. - // We don't expect more than a few nodes per chain, so this is fine. - nodes = make([]descriptors.Node, svc.idx+1) - } - if svc.idx < 0 { // not a node service - // create a dummy entry for the service - services[svc.tag] = nil - } - selected = append(selected, svc) - } - } - - // Now our slice is the right size, and our map has the right keys, we can just fill in the data - for _, svc := range selected { - if svc.idx >= 0 { - node := nodes[svc.idx] - if node.Services == nil { - node.Services = make(descriptors.ServiceMap) - } - node.Services[svc.tag] = svc.svc - node.Name = svc.name - - if cfg, ok := chain.Nodes[node.Name]; ok { - node.Labels = make(map[string]string) - if cfg.IsSequencer { - node.Labels["sequencer"] = "true" - } - node.Labels["elType"] = cfg.ELType - node.Labels["clType"] = cfg.CLType - } - - nodes[svc.idx] = node - } else { - services[svc.tag] = append(services[svc.tag], svc.svc) - } - } - - return reorderNodes(nodes), services -} - -// FindL1Services finds L1 nodes. -func (f *ServiceFinder) FindL1Services() ([]descriptors.Node, descriptors.RedundantServiceMap) { - return f.findChainServices(f.l1Chain) -} - -// FindL2Services finds L2 nodes and services for a specific network -func (f *ServiceFinder) FindL2Services(s *spec.ChainSpec) ([]descriptors.Node, descriptors.RedundantServiceMap) { - return f.findChainServices(s) -} - -// TODO: remove this once we remove the devnet-sdk/system test framework. -// At that point the order of the nodes will not be important anymore. -func reorderNodes(nodes []descriptors.Node) []descriptors.Node { - // This is a hack to preserve some compatibililty with prior expectations, - // that were embedded in the devnet-sdk/system test framework. - // - // We need to rearrange the order of the nodes so that: - // - either there are nodes in the list that contain a label "sequencer", - // and then one of them must be the first node - // - or there are no nodes with the label "sequencer", and there are some - // with el type "op-geth" and cl type "op-node". Then one of them must be - // the first node - // - or none of the above, and then we keep the order as is - - if len(nodes) == 0 { - return nodes - } - - // First, check if any node has the "sequencer" label - var sequencerIndex int = -1 - for i, node := range nodes { - if node.Labels != nil && node.Labels["sequencer"] == "true" { - sequencerIndex = i - break - } - } - - // If we found a sequencer, move it to the front - if sequencerIndex >= 0 { - return moveNodeToFront(nodes, sequencerIndex) - } - - // If no sequencer found, look for nodes with el type "op-geth" and cl type "op-node" - var opGethOpNodeIndex int = -1 - for i, node := range nodes { - if node.Services != nil { - hasOpGeth := false - hasOpNode := false - - // Check for op-geth service - if node.Labels != nil && node.Labels["elType"] == "op-geth" { - hasOpGeth = true - } - - // Check for op-node service - if node.Labels != nil && node.Labels["clType"] == "op-node" { - hasOpNode = true - } - - if hasOpGeth && hasOpNode { - opGethOpNodeIndex = i - break - } - } - } - - // If we found a node with both op-geth and op-node, move it to the front - if opGethOpNodeIndex >= 0 { - return moveNodeToFront(nodes, opGethOpNodeIndex) - } - - // If none of the above conditions are met, return the nodes in their original order - return nodes -} - -func moveNodeToFront(nodes []descriptors.Node, index int) []descriptors.Node { - if index < 0 || index >= len(nodes) { - return nodes - } - - result := make([]descriptors.Node, len(nodes)) - copy(result, nodes) - // Move the node at the specified index to the front - nodeToMove := result[index] - copy(result[1:index+1], result[:index]) - result[0] = nodeToMove - return result -} diff --git a/kurtosis-devnet/pkg/kurtosis/endpoints_test.go b/kurtosis-devnet/pkg/kurtosis/endpoints_test.go deleted file mode 100644 index d46444cc5586f..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/endpoints_test.go +++ /dev/null @@ -1,465 +0,0 @@ -package kurtosis - -import ( - "encoding/json" - "fmt" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestFindChainServices(t *testing.T) { - // Create test chains based on the scenario - chain1 := &spec.ChainSpec{ - Name: "op-kurtosis-1", - NetworkID: "2151908", - } - chain2 := &spec.ChainSpec{ - Name: "op-kurtosis-2", - NetworkID: "2151909", - } - chains := []*spec.ChainSpec{chain1, chain2} - - // Create mock dependency set - depSets := createTestDepSets(t) - - // Create mock service map based on inspect data from the scenario - services := createTestServiceMap() - - // Create service finder with the test data - finder := NewServiceFinder( - services, - WithL1Chain(&spec.ChainSpec{NetworkID: "0"}), - WithL2Chains(chains), - WithDepSets(depSets), - ) - - // Test triage directly to ensure services are correctly triaged - t.Run("triage services", func(t *testing.T) { - assert.NotNil(t, finder.triagedServices, "Triaged services should not be nil") - assert.NotEmpty(t, finder.triagedServices, "Triaged services should not be empty") - - // Count service types - tagCount := make(map[string]int) - for _, svc := range finder.triagedServices { - tagCount[svc.tag]++ - } - - // Verify expected service counts - assert.Equal(t, 3, tagCount["cl"], "Should have 3 CL services") - assert.Equal(t, 3, tagCount["el"], "Should have 3 EL service") - assert.Equal(t, 2, tagCount["batcher"], "Should have 2 batcher services") - assert.Equal(t, 2, tagCount["proposer"], "Should have 2 proposer services") - assert.Equal(t, 2, tagCount["proxyd"], "Should have 2 proxyd services") - assert.Equal(t, 1, tagCount["challenger"], "Should have 1 challenger service") - assert.Equal(t, 1, tagCount["supervisor"], "Should have 1 supervisor service") - assert.Equal(t, 1, tagCount["faucet"], "Should have 1 faucet service") - }) - - // Test L1 service discovery - t.Run("L1 services", func(t *testing.T) { - nodes, services := finder.FindL1Services() - - // Verify L1 nodes - assert.Equal(t, 1, len(nodes), "Should have exactly 1 node") - - // Verify L1 services - assert.Equal(t, 1, len(services), "Should have exactly 1 service") - assert.Contains(t, services, "faucet", "Should have faucet service") - }) - - // Test L2 services for both chains - for _, chain := range chains { - t.Run(fmt.Sprintf("L2 %s services", chain.Name), func(t *testing.T) { - nodes, services := finder.FindL2Services(chain) - - assert.Equal(t, 1, len(nodes), "Should have exactly 1 node") - assert.Equal(t, 6, len(services), "Should have exactly 6 services") - - assert.Contains(t, services, "batcher", "Should have batcher service") - assert.Contains(t, services, "proposer", "Should have proposer service") - assert.Contains(t, services, "proxyd", "Should have proxyd service") - assert.Contains(t, services, "challenger", "Should have challenger service") - assert.Contains(t, services, "supervisor", "Should have supervisor service") - assert.Contains(t, services, "faucet", "Should have faucet service") - }) - } -} - -// createTestServiceMap creates a service map based on the provided scenario output -func createTestServiceMap() inspect.ServiceMap { - services := inspect.ServiceMap{ - // L1 Services - must match pattern expected by triageNode function - "cl-1-teku-geth": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32777}, - "metrics": &descriptors.PortInfo{Port: 32778}, - "tcp-discovery": &descriptors.PortInfo{Port: 32779}, - "udp-discovery": &descriptors.PortInfo{Port: 32769}, - }, - }, - "el-1-geth-teku": &inspect.Service{ - Ports: inspect.PortMap{ - "engine-rpc": &descriptors.PortInfo{Port: 32774}, - "metrics": &descriptors.PortInfo{Port: 32775}, - "rpc": &descriptors.PortInfo{Port: 32772}, - "tcp-discovery": &descriptors.PortInfo{Port: 32776}, - "udp-discovery": &descriptors.PortInfo{Port: 32768}, - "ws": &descriptors.PortInfo{Port: 32773}, - }, - }, - "fileserver": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32771}, - }, - }, - "grafana": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32815}, - }, - }, - "prometheus": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32814}, - }, - }, - - // L2 Chain1 Services - "op-batcher-op-kurtosis-1": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32791}, - "metrics": &descriptors.PortInfo{Port: 32792}, - }, - Labels: map[string]string{ - kindLabel: "batcher", - networkIDLabel: "2151908", - }, - }, - "op-proposer-op-kurtosis-1": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32793}, - "metrics": &descriptors.PortInfo{Port: 32794}, - }, - Labels: map[string]string{ - kindLabel: "proposer", - networkIDLabel: "2151908", - }, - }, - "op-cl-2151908-1": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32785}, - "metrics": &descriptors.PortInfo{Port: 32786}, - "rpc-interop": &descriptors.PortInfo{Port: 32788}, - "tcp-discovery": &descriptors.PortInfo{Port: 32787}, - "udp-discovery": &descriptors.PortInfo{Port: 32771}, - }, - Labels: map[string]string{ - kindLabel: "cl", - networkIDLabel: "2151908", - nodeIndexLabel: "0", - }, - }, - "op-el-2151908-1": &inspect.Service{ - Ports: inspect.PortMap{ - "engine-rpc": &descriptors.PortInfo{Port: 32782}, - "metrics": &descriptors.PortInfo{Port: 32783}, - "rpc": &descriptors.PortInfo{Port: 32780}, - "tcp-discovery": &descriptors.PortInfo{Port: 32784}, - "udp-discovery": &descriptors.PortInfo{Port: 32770}, - "ws": &descriptors.PortInfo{Port: 32781}, - }, - Labels: map[string]string{ - kindLabel: "el", - networkIDLabel: "2151908", - nodeIndexLabel: "0", - }, - }, - "proxyd-2151908": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32790}, - "metrics": &descriptors.PortInfo{Port: 32789}, - }, - Labels: map[string]string{ - kindLabel: "proxyd", - networkIDLabel: "2151908", - }, - }, - - // L2 Chain2 Services - "op-batcher-op-kurtosis-2": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32806}, - "metrics": &descriptors.PortInfo{Port: 32807}, - }, - Labels: map[string]string{ - kindLabel: "batcher", - networkIDLabel: "2151909", - }, - }, - "op-proposer-op-kurtosis-2": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32808}, - "metrics": &descriptors.PortInfo{Port: 32809}, - }, - Labels: map[string]string{ - kindLabel: "proposer", - networkIDLabel: "2151909", - }, - }, - "op-cl-2151909-1": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32800}, - "metrics": &descriptors.PortInfo{Port: 32801}, - "rpc-interop": &descriptors.PortInfo{Port: 32803}, - "tcp-discovery": &descriptors.PortInfo{Port: 32802}, - "udp-discovery": &descriptors.PortInfo{Port: 32773}, - }, - Labels: map[string]string{ - kindLabel: "cl", - networkIDLabel: "2151909", - nodeIndexLabel: "0", - }, - }, - "op-el-2151909-1": &inspect.Service{ - Ports: inspect.PortMap{ - "engine-rpc": &descriptors.PortInfo{Port: 32797}, - "metrics": &descriptors.PortInfo{Port: 32798}, - "rpc": &descriptors.PortInfo{Port: 32795}, - "tcp-discovery": &descriptors.PortInfo{Port: 32799}, - "udp-discovery": &descriptors.PortInfo{Port: 32772}, - "ws": &descriptors.PortInfo{Port: 32796}, - }, - Labels: map[string]string{ - kindLabel: "el", - networkIDLabel: "2151909", - nodeIndexLabel: "0", - }, - }, - "proxyd-2151909": &inspect.Service{ - Ports: inspect.PortMap{ - "http": &descriptors.PortInfo{Port: 32805}, - "metrics": &descriptors.PortInfo{Port: 32804}, - }, - Labels: map[string]string{ - kindLabel: "proxyd", - networkIDLabel: "2151909", - }, - }, - - // Shared L2 Services - "op-faucet": &inspect.Service{ - Ports: inspect.PortMap{ - "rpc": &descriptors.PortInfo{Port: 32813}, - }, - Labels: map[string]string{ - kindLabel: "faucet", - }, - }, - "challenger-service": &inspect.Service{ // intentionally not following conventions, to force use of labels. - Ports: inspect.PortMap{ - "metrics": &descriptors.PortInfo{Port: 32812}, - }, - Labels: map[string]string{ - kindLabel: "challenger", - networkIDLabel: "2151908-2151909", - }, - }, - "op-supervisor-service-superchain": &inspect.Service{ - Ports: inspect.PortMap{ - "metrics": &descriptors.PortInfo{Port: 32811}, - "rpc": &descriptors.PortInfo{Port: 32810}, - }, - Labels: map[string]string{ - kindLabel: "supervisor", - supervisorSuperchainLabel: "superchain", - }, - }, - "validator-key-generation-cl-validator-keystore": {}, - } - - return services -} - -// createTestDepSets creates test dependency sets for the test -func createTestDepSets(t *testing.T) map[string]descriptors.DepSet { - // Create the dependency set for the superchain - depSetData := map[eth.ChainID]*depset.StaticConfigDependency{ - eth.ChainIDFromUInt64(2151908): {}, - eth.ChainIDFromUInt64(2151909): {}, - } - - depSet, err := depset.NewStaticConfigDependencySet(depSetData) - require.NoError(t, err) - - jsonData, err := json.Marshal(depSet) - require.NoError(t, err) - - return map[string]descriptors.DepSet{ - "superchain": descriptors.DepSet(jsonData), - } -} - -// TestTriageFunctions tests the actual implementation of triage functions -func TestTriageFunctions(t *testing.T) { - l1Spec := &spec.ChainSpec{NetworkID: "123456"} - // Create a minimal finder with default values - finder := &ServiceFinder{ - services: make(inspect.ServiceMap), - l1Chain: l1Spec, - } - - // Test the triageNode function for recognizing services - t.Run("triageNode", func(t *testing.T) { - // Test CL node parser - parser := finder.triageNode("cl-") - - // Test L1 node format - idx, accept, ok := parser("cl-1-teku-geth") - assert.True(t, ok, "Should recognize L1 CL node") - assert.Equal(t, 0, idx, "Should extract index 0 from L1 CL node") - assert.True(t, accept(l1Spec), "Should accept L1") - - // Test with various suffixes to see what is recognized - _, _, ok = parser("cl-1-teku-geth-with-extra-parts") - assert.True(t, ok, "Should recognize L1 CL node regardless of suffix") - - // This is considered invalid - _, _, ok = parser("cl") - assert.False(t, ok, "Should not recognize simple 'cl'") - }) -} - -// TestReorderNodes tests the reorderNodes function with various scenarios -func TestReorderNodes(t *testing.T) { - // Define common nodes to reduce repetition - regularNode1 := descriptors.Node{Name: "node1", Services: descriptors.ServiceMap{}} - regularNode2 := descriptors.Node{Name: "node2", Services: descriptors.ServiceMap{}} - regularNode3 := descriptors.Node{Name: "node3", Services: descriptors.ServiceMap{}} - - sequencerNode := descriptors.Node{ - Name: "sequencer", - Labels: map[string]string{"sequencer": "true"}, - } - sequencerNode1 := descriptors.Node{ - Name: "sequencer1", - Labels: map[string]string{"sequencer": "true"}, - } - sequencerNode2 := descriptors.Node{ - Name: "sequencer2", - Labels: map[string]string{"sequencer": "true"}, - } - - opGethOpNode := descriptors.Node{ - Name: "op-node", - Services: descriptors.ServiceMap{ - "el": &descriptors.Service{Name: "op-geth-1"}, - "cl": &descriptors.Service{Name: "op-node-1"}, - }, - Labels: map[string]string{ - "elType": "op-geth", - "clType": "op-node", - }, - } - - elOnlyNode := descriptors.Node{ - Name: "el-only", - Services: descriptors.ServiceMap{ - "el": &descriptors.Service{Name: "op-geth"}, - }, - } - - clOnlyNode := descriptors.Node{ - Name: "cl-only", - Services: descriptors.ServiceMap{ - "cl": &descriptors.Service{Name: "op-node"}, - }, - } - - t.Run("empty slice", func(t *testing.T) { - nodes := []descriptors.Node{} - result := reorderNodes(nodes) - assert.Equal(t, nodes, result, "Empty slice should be returned unchanged") - }) - - t.Run("single node", func(t *testing.T) { - nodes := []descriptors.Node{regularNode1} - result := reorderNodes(nodes) - assert.Equal(t, nodes, result, "Single node should be returned unchanged") - }) - - t.Run("sequencer node first", func(t *testing.T) { - nodes := []descriptors.Node{sequencerNode, regularNode2, regularNode3} - result := reorderNodes(nodes) - assert.Equal(t, nodes, result, "Sequencer already first should remain unchanged") - }) - - t.Run("sequencer node moved to front", func(t *testing.T) { - nodes := []descriptors.Node{regularNode1, sequencerNode, regularNode3} - result := reorderNodes(nodes) - - expected := []descriptors.Node{sequencerNode, regularNode1, regularNode3} - assert.Equal(t, expected, result, "Sequencer should be moved to front") - }) - - t.Run("sequencer node at end", func(t *testing.T) { - nodes := []descriptors.Node{regularNode1, regularNode2, sequencerNode} - result := reorderNodes(nodes) - - expected := []descriptors.Node{sequencerNode, regularNode1, regularNode2} - assert.Equal(t, expected, result, "Sequencer at end should be moved to front") - }) - - t.Run("multiple sequencer nodes - first one moved", func(t *testing.T) { - nodes := []descriptors.Node{regularNode1, sequencerNode1, regularNode3, sequencerNode2} - result := reorderNodes(nodes) - - expected := []descriptors.Node{sequencerNode1, regularNode1, regularNode3, sequencerNode2} - assert.Equal(t, expected, result, "First sequencer should be moved to front") - }) - - t.Run("op-geth and op-node services moved to front", func(t *testing.T) { - nodes := []descriptors.Node{regularNode1, opGethOpNode, regularNode3} - result := reorderNodes(nodes) - - expected := []descriptors.Node{opGethOpNode, regularNode1, regularNode3} - assert.Equal(t, expected, result, "Node with op-geth and op-node should be moved to front") - }) - - t.Run("op-geth and op-node services already first", func(t *testing.T) { - nodes := []descriptors.Node{opGethOpNode, regularNode2, regularNode3} - result := reorderNodes(nodes) - assert.Equal(t, nodes, result, "Node with op-geth and op-node already first should remain unchanged") - }) - - t.Run("only el service - no reordering", func(t *testing.T) { - nodes := []descriptors.Node{regularNode1, elOnlyNode, regularNode3} - result := reorderNodes(nodes) - assert.Equal(t, nodes, result, "Node with only el service should not be reordered") - }) - - t.Run("only cl service - no reordering", func(t *testing.T) { - nodes := []descriptors.Node{regularNode1, clOnlyNode, regularNode3} - result := reorderNodes(nodes) - assert.Equal(t, nodes, result, "Node with only cl service should not be reordered") - }) - - t.Run("no special nodes - original order preserved", func(t *testing.T) { - nodes := []descriptors.Node{regularNode1, regularNode2, regularNode3} - result := reorderNodes(nodes) - assert.Equal(t, nodes, result, "Nodes without special properties should maintain original order") - }) - - t.Run("sequencer takes precedence over op-geth/op-node", func(t *testing.T) { - nodes := []descriptors.Node{opGethOpNode, sequencerNode, regularNode3} - result := reorderNodes(nodes) - - expected := []descriptors.Node{sequencerNode, opGethOpNode, regularNode3} - assert.Equal(t, expected, result, "Sequencer should take precedence over op-geth/op-node") - }) -} diff --git a/kurtosis-devnet/pkg/kurtosis/kurtosis.go b/kurtosis-devnet/pkg/kurtosis/kurtosis.go deleted file mode 100644 index ddf836a992078..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/kurtosis.go +++ /dev/null @@ -1,297 +0,0 @@ -package kurtosis - -import ( - "bytes" - "context" - "fmt" - "io" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - devnetTypes "github.com/ethereum-optimism/optimism/devnet-sdk/types" - apiInterfaces "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/run" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/wrappers" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/deployer" - srcInterfaces "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/interfaces" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" - autofixTypes "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/types" -) - -const ( - DefaultPackageName = "github.com/ethpandaops/optimism-package" - DefaultEnclave = "devnet" - - // static URL for kurtosis reverse proxy - defaultKurtosisReverseProxyURL = "http://127.0.0.1:9730" -) - -// KurtosisEnvironment represents the output of a Kurtosis deployment -type KurtosisEnvironment struct { - *descriptors.DevnetEnvironment -} - -// KurtosisDeployer handles deploying packages using Kurtosis -type KurtosisDeployer struct { - // Base directory where the deployment commands should be executed - baseDir string - // Package name to deploy - packageName string - // Dry run mode - dryRun bool - // Enclave name - enclave string - - // interfaces for kurtosis sources - enclaveSpec srcInterfaces.EnclaveSpecifier - enclaveInspecter srcInterfaces.EnclaveInspecter - enclaveObserver srcInterfaces.EnclaveObserver - jwtExtractor srcInterfaces.JWTExtractor - depsetExtractor srcInterfaces.DepsetExtractor - - // interface for kurtosis interactions - kurtosisCtx apiInterfaces.KurtosisContextInterface - - // autofix mode - autofixMode autofixTypes.AutofixMode -} - -type KurtosisDeployerOptions func(*KurtosisDeployer) - -func WithKurtosisBaseDir(baseDir string) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.baseDir = baseDir - } -} - -func WithKurtosisPackageName(packageName string) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.packageName = packageName - } -} - -func WithKurtosisDryRun(dryRun bool) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.dryRun = dryRun - } -} - -func WithKurtosisEnclave(enclave string) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.enclave = enclave - } -} - -func WithKurtosisEnclaveSpec(enclaveSpec srcInterfaces.EnclaveSpecifier) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.enclaveSpec = enclaveSpec - } -} - -func WithKurtosisEnclaveInspecter(enclaveInspecter srcInterfaces.EnclaveInspecter) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.enclaveInspecter = enclaveInspecter - } -} - -func WithKurtosisEnclaveObserver(enclaveObserver srcInterfaces.EnclaveObserver) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.enclaveObserver = enclaveObserver - } -} - -func WithKurtosisJWTExtractor(extractor srcInterfaces.JWTExtractor) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.jwtExtractor = extractor - } -} - -func WithKurtosisDepsetExtractor(extractor srcInterfaces.DepsetExtractor) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.depsetExtractor = extractor - } -} - -func WithKurtosisKurtosisContext(kurtosisCtx apiInterfaces.KurtosisContextInterface) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.kurtosisCtx = kurtosisCtx - } -} - -func WithKurtosisAutofixMode(autofixMode autofixTypes.AutofixMode) KurtosisDeployerOptions { - return func(d *KurtosisDeployer) { - d.autofixMode = autofixMode - } -} - -// NewKurtosisDeployer creates a new KurtosisDeployer instance -func NewKurtosisDeployer(opts ...KurtosisDeployerOptions) (*KurtosisDeployer, error) { - d := &KurtosisDeployer{ - baseDir: ".", - packageName: DefaultPackageName, - dryRun: false, - enclave: DefaultEnclave, - - enclaveSpec: &enclaveSpecAdapter{}, - enclaveInspecter: &enclaveInspectAdapter{}, - enclaveObserver: &enclaveDeployerAdapter{}, - jwtExtractor: &enclaveJWTAdapter{}, - depsetExtractor: &enclaveDepsetAdapter{}, - } - - for _, opt := range opts { - opt(d) - } - - if d.kurtosisCtx == nil { - var err error - d.kurtosisCtx, err = wrappers.GetDefaultKurtosisContext() - if err != nil { - return nil, fmt.Errorf("failed to create Kurtosis context: %w", err) - } - } - - return d, nil -} - -func (d *KurtosisDeployer) getWallets(wallets deployer.WalletList) descriptors.WalletMap { - walletMap := make(descriptors.WalletMap) - for _, wallet := range wallets { - walletMap[wallet.Name] = &descriptors.Wallet{ - Address: devnetTypes.Address(wallet.Address), - PrivateKey: wallet.PrivateKey, - } - } - return walletMap -} - -// GetEnvironmentInfo parses the input spec and inspect output to create KurtosisEnvironment -func (d *KurtosisDeployer) GetEnvironmentInfo(ctx context.Context, s *spec.EnclaveSpec) (*KurtosisEnvironment, error) { - inspectResult, err := d.enclaveInspecter.EnclaveInspect(ctx, d.enclave) - if err != nil { - return nil, fmt.Errorf("failed to parse inspect output: %w", err) - } - - // Get contract addresses - deployerData, err := d.enclaveObserver.EnclaveObserve(ctx, d.enclave) - if err != nil { - return nil, fmt.Errorf("failed to parse deployer state: %w", err) - } - - // Get JWT data - jwtData, err := d.jwtExtractor.ExtractData(ctx, d.enclave) - if err != nil { - return nil, fmt.Errorf("failed to extract JWT data: %w", err) - } - - // Get dependency set - var depsets map[string]descriptors.DepSet - if s.Features.Contains(spec.FeatureInterop) { - depsets, err = d.depsetExtractor.ExtractData(ctx, d.enclave) - if err != nil { - return nil, fmt.Errorf("failed to extract dependency set: %w", err) - } - } - - env := &KurtosisEnvironment{ - DevnetEnvironment: &descriptors.DevnetEnvironment{ - Name: d.enclave, - ReverseProxyURL: defaultKurtosisReverseProxyURL, - - L2: make([]*descriptors.L2Chain, 0, len(s.Chains)), - Features: s.Features, - DepSets: depsets, - }, - } - - // Find L1 endpoint - finder := NewServiceFinder( - inspectResult.UserServices, - WithL1Chain(&spec.ChainSpec{ - NetworkID: deployerData.L1ChainID, - Name: "Ethereum", - }), - WithL2Chains(s.Chains), - WithDepSets(depsets), - ) - if nodes, services := finder.FindL1Services(); len(nodes) > 0 { - chain := &descriptors.Chain{ - ID: deployerData.L1ChainID, - Name: "Ethereum", - Services: services, - Nodes: nodes, - JWT: jwtData.L1JWT, - Addresses: descriptors.AddressMap(deployerData.State.Addresses), - Wallets: d.getWallets(deployerData.L1ValidatorWallets), - Config: deployerData.L1ChainConfig, - } - if deployerData.State != nil { - chain.Addresses = descriptors.AddressMap(deployerData.State.Addresses) - chain.Wallets = d.getWallets(deployerData.L1ValidatorWallets) - } - env.L1 = chain - } - - // Find L2 endpoints - for _, chainSpec := range s.Chains { - nodes, services := finder.FindL2Services(chainSpec) - - chain := &descriptors.L2Chain{ - Chain: &descriptors.Chain{ - Name: chainSpec.Name, - ID: chainSpec.NetworkID, - Services: services, - Nodes: nodes, - JWT: jwtData.L2JWT, - }, - } - - // Add contract addresses if available - if deployerData.State != nil && deployerData.State.Deployments != nil { - if deployment, ok := deployerData.State.Deployments[chainSpec.NetworkID]; ok { - chain.Addresses = descriptors.AddressMap(deployment.L2Addresses) - chain.Config = deployment.Config - chain.RollupConfig = deployment.RollupConfig - chain.Wallets = d.getWallets(deployment.L2Wallets) - chain.L1Wallets = d.getWallets(deployment.L1Wallets) - } - } - - env.L2 = append(env.L2, chain) - } - - return env, nil -} - -// Deploy executes the Kurtosis deployment command with the provided input -func (d *KurtosisDeployer) Deploy(ctx context.Context, input io.Reader) (*spec.EnclaveSpec, error) { - // Parse the input spec first - inputCopy := new(bytes.Buffer) - tee := io.TeeReader(input, inputCopy) - - spec, err := d.enclaveSpec.EnclaveSpec(tee) - if err != nil { - return nil, fmt.Errorf("failed to parse input spec: %w", err) - } - - // Run kurtosis command - kurtosisRunner, err := run.NewKurtosisRunner( - run.WithKurtosisRunnerDryRun(d.dryRun), - run.WithKurtosisRunnerEnclave(d.enclave), - run.WithKurtosisRunnerKurtosisContext(d.kurtosisCtx), - ) - if err != nil { - return nil, fmt.Errorf("failed to create Kurtosis runner: %w", err) - } - - if err := kurtosisRunner.Run(ctx, d.packageName, inputCopy); err != nil { - return nil, err - } - - // If dry run, return empty environment - if d.dryRun { - return spec, nil - } - - // Get environment information - return spec, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/kurtosis_test.go b/kurtosis-devnet/pkg/kurtosis/kurtosis_test.go deleted file mode 100644 index 3e7d1ddafdc80..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/kurtosis_test.go +++ /dev/null @@ -1,567 +0,0 @@ -package kurtosis - -import ( - "context" - "fmt" - "io" - "strings" - "testing" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/fake" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/interfaces" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/deployer" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/jwt" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestKurtosisDeployer(t *testing.T) { - tests := []struct { - name string - opts []KurtosisDeployerOptions - wantBaseDir string - wantPkg string - wantDryRun bool - wantEnclave string - }{ - { - name: "default values", - opts: nil, - wantBaseDir: ".", - wantPkg: DefaultPackageName, - wantDryRun: false, - wantEnclave: DefaultEnclave, - }, - { - name: "with options", - opts: []KurtosisDeployerOptions{ - WithKurtosisBaseDir("/custom/dir"), - WithKurtosisPackageName("custom-package"), - WithKurtosisDryRun(true), - WithKurtosisEnclave("custom-enclave"), - }, - wantBaseDir: "/custom/dir", - wantPkg: "custom-package", - wantDryRun: true, - wantEnclave: "custom-enclave", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a fake Kurtosis context - fakeCtx := &fake.KurtosisContext{ - EnclaveCtx: &fake.EnclaveContext{ - Responses: []interfaces.StarlarkResponse{ - &fake.StarlarkResponse{ - IsSuccessful: true, - }, - }, - }, - } - - // Add the fake context to the options - opts := append(tt.opts, WithKurtosisKurtosisContext(fakeCtx)) - - d, err := NewKurtosisDeployer(opts...) - require.NoError(t, err) - assert.Equal(t, tt.wantBaseDir, d.baseDir) - assert.Equal(t, tt.wantPkg, d.packageName) - assert.Equal(t, tt.wantDryRun, d.dryRun) - assert.Equal(t, tt.wantEnclave, d.enclave) - }) - } -} - -// fakeEnclaveInspecter implements EnclaveInspecter for testing -type fakeEnclaveInspecter struct { - result *inspect.InspectData - err error -} - -func (f *fakeEnclaveInspecter) EnclaveInspect(ctx context.Context, enclave string) (*inspect.InspectData, error) { - return f.result, f.err -} - -// fakeEnclaveObserver implements EnclaveObserver for testing -type fakeEnclaveObserver struct { - state *deployer.DeployerData - err error -} - -func (f *fakeEnclaveObserver) EnclaveObserve(ctx context.Context, enclave string) (*deployer.DeployerData, error) { - return f.state, f.err -} - -// fakeEnclaveSpecifier implements EnclaveSpecifier for testing -type fakeEnclaveSpecifier struct { - spec *spec.EnclaveSpec - err error -} - -func (f *fakeEnclaveSpecifier) EnclaveSpec(r io.Reader) (*spec.EnclaveSpec, error) { - return f.spec, f.err -} - -// fakeJWTExtractor implements interfaces.JWTExtractor for testing -type fakeJWTExtractor struct { - data *jwt.Data - err error -} - -func (f *fakeJWTExtractor) ExtractData(ctx context.Context, enclave string) (*jwt.Data, error) { - return f.data, f.err -} - -type fakeDepsetExtractor struct { - data map[string]descriptors.DepSet - err error -} - -func (f *fakeDepsetExtractor) ExtractData(ctx context.Context, enclave string) (map[string]descriptors.DepSet, error) { - return f.data, f.err -} - -// mockKurtosisContext implements interfaces.KurtosisContextInterface for testing -type mockKurtosisContext struct { - enclaveCtx interfaces.EnclaveContext - getErr error - createErr error - cleanErr error - destroyErr error -} - -func (m *mockKurtosisContext) GetEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - if m.getErr != nil { - return nil, m.getErr - } - return m.enclaveCtx, nil -} - -func (m *mockKurtosisContext) GetEnclaveStatus(ctx context.Context, name string) (interfaces.EnclaveStatus, error) { - if m.getErr != nil { - return "", m.getErr - } - return interfaces.EnclaveStatusRunning, nil -} - -func (m *mockKurtosisContext) CreateEnclave(ctx context.Context, name string) (interfaces.EnclaveContext, error) { - if m.createErr != nil { - return nil, m.createErr - } - return m.enclaveCtx, nil -} - -func (m *mockKurtosisContext) Clean(ctx context.Context, destroyAll bool) ([]interfaces.EnclaveNameAndUuid, error) { - if m.cleanErr != nil { - return nil, m.cleanErr - } - return []interfaces.EnclaveNameAndUuid{}, nil -} - -func (m *mockKurtosisContext) DestroyEnclave(ctx context.Context, name string) error { - if m.destroyErr != nil { - return m.destroyErr - } - return nil -} - -func TestDeploy(t *testing.T) { - testSpec := &spec.EnclaveSpec{ - Chains: []*spec.ChainSpec{ - { - Name: "op-kurtosis", - NetworkID: "1234", - }, - }, - } - - testServices := make(inspect.ServiceMap) - testServices["el-1-geth-lighthouse"] = &inspect.Service{ - Ports: inspect.PortMap{ - "rpc": {Port: 52645}, - }, - } - - testWallets := deployer.WalletList{ - { - Name: "test-wallet", - Address: common.HexToAddress("0x123"), - PrivateKey: "0xabc", - }, - } - - tests := []struct { - name string - specErr error - inspectErr error - deployerErr error - kurtosisErr error - wantErr bool - }{ - { - name: "successful deployment", - }, - { - name: "spec error", - specErr: fmt.Errorf("spec failed"), - wantErr: true, - }, - { - name: "inspect error", - inspectErr: fmt.Errorf("inspect failed"), - wantErr: true, - }, - { - name: "kurtosis error", - kurtosisErr: fmt.Errorf("kurtosis failed"), - wantErr: true, - }, - { - name: "deployer error", - deployerErr: fmt.Errorf("deployer failed"), - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a fake Kurtosis context that will return the test error - fakeCtx := &fake.KurtosisContext{ - EnclaveCtx: &fake.EnclaveContext{ - RunErr: tt.kurtosisErr, - // Send a successful run finished event for successful cases - Responses: []interfaces.StarlarkResponse{ - &fake.StarlarkResponse{ - IsSuccessful: !tt.wantErr, - }, - }, - }, - } - - d, err := NewKurtosisDeployer( - WithKurtosisEnclaveSpec(&fakeEnclaveSpecifier{ - spec: testSpec, - err: tt.specErr, - }), - WithKurtosisEnclaveInspecter(&fakeEnclaveInspecter{ - result: &inspect.InspectData{ - UserServices: testServices, - }, - err: tt.inspectErr, - }), - WithKurtosisEnclaveObserver(&fakeEnclaveObserver{ - state: &deployer.DeployerData{ - L1ValidatorWallets: testWallets, - }, - err: tt.deployerErr, - }), - WithKurtosisKurtosisContext(fakeCtx), - ) - require.NoError(t, err) - - _, err = d.Deploy(context.Background(), strings.NewReader("test input")) - if tt.wantErr { - assert.Error(t, err) - return - } - - require.NoError(t, err) - }) - } -} - -func TestGetEnvironmentInfo(t *testing.T) { - testSpec := &spec.EnclaveSpec{ - Chains: []*spec.ChainSpec{ - { - Name: "op-kurtosis", - NetworkID: "1234", - }, - }, - } - - // Create test services map with the expected structure - testServices := make(inspect.ServiceMap) - testServices["el-1-geth-lighthouse"] = &inspect.Service{ - Ports: inspect.PortMap{ - "rpc": &descriptors.PortInfo{Port: 52645}, - }, - } - - testWallet := &deployer.Wallet{ - Name: "test-wallet", - Address: common.HexToAddress("0x123"), - PrivateKey: "0xabc", - } - testWallets := deployer.WalletList{testWallet} - - testJWTs := &jwt.Data{ - L1JWT: "test-l1-jwt", - L2JWT: "test-l2-jwt", - } - - // Create expected L1 services - l1Services := make(descriptors.ServiceMap) - l1Services["el"] = &descriptors.Service{ - Name: "el-1-geth-lighthouse", - Endpoints: descriptors.EndpointMap{ - "rpc": &descriptors.PortInfo{Port: 52645}, - }, - } - - tests := []struct { - name string - spec *spec.EnclaveSpec - inspect *inspect.InspectData - deploy *deployer.DeployerData - jwt *jwt.Data - want *KurtosisEnvironment - wantErr bool - err error - }{ - { - name: "successful environment info with JWT", - spec: testSpec, - inspect: &inspect.InspectData{UserServices: testServices}, - deploy: &deployer.DeployerData{ - L1ValidatorWallets: testWallets, - State: &deployer.DeployerState{ - Addresses: deployer.DeploymentAddresses{ - "0x123": common.HexToAddress("0x123"), - }, - }, - L1ChainID: "0", - }, - jwt: testJWTs, - want: &KurtosisEnvironment{ - DevnetEnvironment: &descriptors.DevnetEnvironment{ - Name: DefaultEnclave, - ReverseProxyURL: defaultKurtosisReverseProxyURL, - L1: &descriptors.Chain{ - ID: "0", - Name: "Ethereum", - Services: make(descriptors.RedundantServiceMap), - Nodes: []descriptors.Node{ - { - Services: l1Services, - }, - }, - JWT: testJWTs.L1JWT, - Addresses: descriptors.AddressMap{ - "0x123": common.HexToAddress("0x123"), - }, - Wallets: descriptors.WalletMap{ - testWallet.Name: { - Address: testWallet.Address, - PrivateKey: testWallet.PrivateKey, - }, - }, - }, - L2: []*descriptors.L2Chain{ - { - Chain: &descriptors.Chain{ - Name: "op-kurtosis", - ID: "1234", - Services: make(descriptors.RedundantServiceMap), - JWT: testJWTs.L2JWT, - }, - }, - }, - DepSets: nil, - }, - }, - }, - { - name: "inspect error", - spec: testSpec, - err: fmt.Errorf("inspect failed"), - wantErr: true, - }, - { - name: "deploy error", - spec: testSpec, - inspect: &inspect.InspectData{UserServices: testServices}, - err: fmt.Errorf("deploy failed"), - wantErr: true, - }, - { - name: "jwt error", - spec: testSpec, - inspect: &inspect.InspectData{UserServices: testServices}, - deploy: &deployer.DeployerData{}, - err: fmt.Errorf("jwt failed"), - wantErr: true, - }, - { - name: "with interop feature - depset fetched", - spec: &spec.EnclaveSpec{ - Chains: []*spec.ChainSpec{ - { - Name: "op-kurtosis", - NetworkID: "1234", - }, - }, - Features: spec.FeatureList{spec.FeatureInterop}, - }, - inspect: &inspect.InspectData{UserServices: testServices}, - deploy: &deployer.DeployerData{ - L1ValidatorWallets: testWallets, - State: &deployer.DeployerState{ - Addresses: deployer.DeploymentAddresses{ - "0x123": common.HexToAddress("0x123"), - }, - }, - L1ChainID: "0", - }, - jwt: testJWTs, - want: &KurtosisEnvironment{ - DevnetEnvironment: &descriptors.DevnetEnvironment{ - Name: DefaultEnclave, - ReverseProxyURL: defaultKurtosisReverseProxyURL, - L1: &descriptors.Chain{ - ID: "0", - Name: "Ethereum", - Services: make(descriptors.RedundantServiceMap), - Nodes: []descriptors.Node{ - { - Services: l1Services, - }, - }, - JWT: testJWTs.L1JWT, - Addresses: descriptors.AddressMap{ - "0x123": common.HexToAddress("0x123"), - }, - Wallets: descriptors.WalletMap{ - testWallet.Name: { - Address: testWallet.Address, - PrivateKey: testWallet.PrivateKey, - }, - }, - }, - L2: []*descriptors.L2Chain{ - { - Chain: &descriptors.Chain{ - Name: "op-kurtosis", - ID: "1234", - Services: make(descriptors.RedundantServiceMap), - JWT: testJWTs.L2JWT, - }, - }, - }, - Features: spec.FeatureList{spec.FeatureInterop}, - DepSets: map[string]descriptors.DepSet{"test-dep-set": descriptors.DepSet(`{}`)}, - }, - }, - }, - { - name: "without interop feature - depset not fetched", - spec: &spec.EnclaveSpec{ - Chains: []*spec.ChainSpec{ - { - Name: "op-kurtosis", - NetworkID: "1234", - }, - }, - Features: spec.FeatureList{}, - }, - inspect: &inspect.InspectData{UserServices: testServices}, - deploy: &deployer.DeployerData{ - L1ValidatorWallets: testWallets, - State: &deployer.DeployerState{ - Addresses: deployer.DeploymentAddresses{ - "0x123": common.HexToAddress("0x123"), - }, - }, - L1ChainID: "0", - }, - jwt: testJWTs, - want: &KurtosisEnvironment{ - DevnetEnvironment: &descriptors.DevnetEnvironment{ - Name: DefaultEnclave, - ReverseProxyURL: defaultKurtosisReverseProxyURL, - L1: &descriptors.Chain{ - ID: "0", - Name: "Ethereum", - Services: make(descriptors.RedundantServiceMap), - Nodes: []descriptors.Node{ - { - Services: l1Services, - }, - }, - JWT: testJWTs.L1JWT, - Addresses: descriptors.AddressMap{ - "0x123": common.HexToAddress("0x123"), - }, - Wallets: descriptors.WalletMap{ - testWallet.Name: { - Address: testWallet.Address, - PrivateKey: testWallet.PrivateKey, - }, - }, - }, - L2: []*descriptors.L2Chain{ - { - Chain: &descriptors.Chain{ - Name: "op-kurtosis", - ID: "1234", - Services: make(descriptors.RedundantServiceMap), - JWT: testJWTs.L2JWT, - }, - }, - }, - Features: spec.FeatureList{}, - DepSets: nil, - }, - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a mock Kurtosis context that won't try to connect to a real engine - mockCtx := &mockKurtosisContext{ - enclaveCtx: &fake.EnclaveContext{}, - } - - // Create depset data based on whether interop is enabled - var depsets map[string]descriptors.DepSet - if tt.spec != nil && tt.spec.Features.Contains(spec.FeatureInterop) { - depsets = map[string]descriptors.DepSet{"test-dep-set": descriptors.DepSet(`{}`)} - } - - deployer, err := NewKurtosisDeployer( - WithKurtosisKurtosisContext(mockCtx), - WithKurtosisEnclaveInspecter(&fakeEnclaveInspecter{ - result: tt.inspect, - err: tt.err, - }), - WithKurtosisEnclaveObserver(&fakeEnclaveObserver{ - state: tt.deploy, - err: tt.err, - }), - WithKurtosisJWTExtractor(&fakeJWTExtractor{ - data: tt.jwt, - err: tt.err, - }), - WithKurtosisDepsetExtractor(&fakeDepsetExtractor{ - data: depsets, - err: tt.err, - }), - ) - require.NoError(t, err) - - got, err := deployer.GetEnvironmentInfo(context.Background(), tt.spec) - if tt.wantErr { - require.Error(t, err) - return - } - require.NoError(t, err) - require.Equal(t, tt.want, got) - }) - } -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/deployer/cmd/main.go b/kurtosis-devnet/pkg/kurtosis/sources/deployer/cmd/main.go deleted file mode 100644 index 3d721a8795aaa..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/deployer/cmd/main.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "os" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/deployer" -) - -func main() { - // Parse command line flags - enclave := flag.String("enclave", "", "Name of the Kurtosis enclave") - flag.Parse() - - if *enclave == "" { - fmt.Fprintln(os.Stderr, "Error: --enclave flag is required") - flag.Usage() - os.Exit(1) - } - - // Get deployer data - d := deployer.NewDeployer(*enclave) - ctx := context.Background() - data, err := d.ExtractData(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "Error parsing deployer data: %v\n", err) - os.Exit(1) - } - - // Encode as JSON and write to stdout - encoder := json.NewEncoder(os.Stdout) - encoder.SetIndent("", " ") - if err := encoder.Encode(data); err != nil { - fmt.Fprintf(os.Stderr, "Error encoding JSON: %v\n", err) - os.Exit(1) - } -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/deployer/deployer.go b/kurtosis-devnet/pkg/kurtosis/sources/deployer/deployer.go deleted file mode 100644 index 429e5af31082c..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/deployer/deployer.go +++ /dev/null @@ -1,470 +0,0 @@ -package deployer - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "io" - "math/big" - "strings" - "text/template" - - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/devnet-sdk/types" - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum-optimism/optimism/op-node/rollup" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/params" -) - -const ( - defaultDeployerArtifactName = "op-deployer-configs" - defaultWalletsName = "wallets.json" - defaultStateName = "state.json" - defaultGenesisArtifactName = "el_cl_genesis_data" - defaultMnemonicName = "mnemonics.yaml" - defaultGenesisNameTemplate = "genesis-{{.ChainID}}.json" - defaultRollupNameTemplate = "rollup-{{.ChainID}}.json" - defaultL1GenesisName = "genesis.json" -) - -// DeploymentAddresses maps contract names to their addresses -type DeploymentAddresses map[string]types.Address - -// DeploymentStateAddresses maps chain IDs to their contract addresses -type DeploymentStateAddresses map[string]DeploymentAddresses - -type DeploymentState struct { - L1Addresses DeploymentAddresses `json:"l1_addresses"` - L2Addresses DeploymentAddresses `json:"l2_addresses"` - L1Wallets WalletList `json:"l1_wallets"` - L2Wallets WalletList `json:"l2_wallets"` - Config *params.ChainConfig `json:"chain_config"` - RollupConfig *rollup.Config `json:"rollup_config"` -} - -type DeployerState struct { - Deployments map[string]DeploymentState `json:"l2s"` - Addresses DeploymentAddresses `json:"superchain"` -} - -// StateFile represents the structure of the state.json file -type StateFile struct { - OpChainDeployments []map[string]interface{} `json:"opChainDeployments"` - SuperChainContracts map[string]interface{} `json:"superchainContracts"` - ImplementationsDeployment map[string]interface{} `json:"implementationsDeployment"` -} - -// Wallet represents a wallet with optional private key and name -type Wallet struct { - Address types.Address `json:"address"` - PrivateKey string `json:"private_key"` - Name string `json:"name"` -} - -// WalletList holds a list of wallets -type WalletList []*Wallet -type WalletMap map[string]*Wallet - -type DeployerData struct { - L1ValidatorWallets WalletList `json:"wallets"` - State *DeployerState `json:"state"` - L1ChainID string `json:"l1_chain_id"` - L1ChainConfig *params.ChainConfig `json:"l1_chain_config"` -} - -type Deployer struct { - enclave string - deployerArtifactName string - walletsName string - stateName string - genesisArtifactName string - l1ValidatorMnemonicName string - l2GenesisNameTemplate string - l2RollupNameTemplate string - l1GenesisName string -} - -type DeployerOption func(*Deployer) - -func WithArtifactName(name string) DeployerOption { - return func(d *Deployer) { - d.deployerArtifactName = name - } -} - -func WithWalletsName(name string) DeployerOption { - return func(d *Deployer) { - d.walletsName = name - } -} - -func WithStateName(name string) DeployerOption { - return func(d *Deployer) { - d.stateName = name - } -} - -func WithGenesisArtifactName(name string) DeployerOption { - return func(d *Deployer) { - d.genesisArtifactName = name - } -} - -func WithMnemonicsName(name string) DeployerOption { - return func(d *Deployer) { - d.l1ValidatorMnemonicName = name - } -} - -func WithGenesisNameTemplate(name string) DeployerOption { - return func(d *Deployer) { - d.l2GenesisNameTemplate = name - } -} - -func WithRollupNameTemplate(name string) DeployerOption { - return func(d *Deployer) { - d.l2RollupNameTemplate = name - } -} - -func NewDeployer(enclave string, opts ...DeployerOption) *Deployer { - d := &Deployer{ - enclave: enclave, - deployerArtifactName: defaultDeployerArtifactName, - walletsName: defaultWalletsName, - stateName: defaultStateName, - genesisArtifactName: defaultGenesisArtifactName, - l1ValidatorMnemonicName: defaultMnemonicName, - l2GenesisNameTemplate: defaultGenesisNameTemplate, - l2RollupNameTemplate: defaultRollupNameTemplate, - l1GenesisName: defaultL1GenesisName, - } - - for _, opt := range opts { - opt(d) - } - - return d -} - -// parseWalletsFile parses a JSON file containing wallet information -func parseWalletsFile(r io.Reader) (map[string]WalletList, error) { - result := make(map[string]WalletList) - - // Read all data from reader - data, err := io.ReadAll(r) - if err != nil { - return nil, fmt.Errorf("failed to read wallet file: %w", err) - } - - // Unmarshal into a map first - var rawData map[string]map[string]string - if err := json.Unmarshal(data, &rawData); err != nil { - return nil, fmt.Errorf("failed to decode wallet file: %w", err) - } - - for id, chain := range rawData { - // Create a map to store wallets by name - walletMap := make(WalletMap) - hasAddress := make(map[string]bool) - - // First pass: collect addresses - for key, value := range chain { - if strings.HasSuffix(key, "Address") { - name := strings.TrimSuffix(key, "Address") - wallet, ok := walletMap[name] - if !ok || wallet == nil { - wallet = &Wallet{ - Name: name, - Address: common.HexToAddress(value), - } - } else { - log.Warn("duplicate wallet name key in wallets file", "name", name) - } - walletMap[name] = wallet - hasAddress[name] = true - } - } - - // Second pass: collect private keys only for wallets with addresses - for key, value := range chain { - if strings.HasSuffix(key, "PrivateKey") { - name := strings.TrimSuffix(key, "PrivateKey") - if hasAddress[name] { - wallet := walletMap[name] - wallet.PrivateKey = value - walletMap[name] = wallet - } - } - } - - // Convert map to list, only including wallets with addresses - wl := make(WalletList, 0, len(walletMap)) - for name, wallet := range walletMap { - if hasAddress[name] { - wl = append(wl, wallet) - } - } - - result[id] = wl - } - - return result, nil -} - -// hexToDecimal converts a hex string (with or without 0x prefix) to a decimal string -func hexToDecimal(hex string) (string, error) { - // Remove 0x prefix if present - hex = strings.TrimPrefix(hex, "0x") - - // Parse hex string to big.Int - n := new(big.Int) - if _, ok := n.SetString(hex, 16); !ok { - return "", fmt.Errorf("invalid hex string: %s", hex) - } - - // Convert to decimal string - return n.String(), nil -} - -// parseStateFile parses the state.json file and extracts addresses -func parseStateFile(r io.Reader) (*DeployerState, error) { - var state StateFile - if err := json.NewDecoder(r).Decode(&state); err != nil { - return nil, fmt.Errorf("failed to decode state file: %w", err) - } - - result := &DeployerState{ - Deployments: make(map[string]DeploymentState), - Addresses: make(DeploymentAddresses), - } - - mapDeployment := func(deployment map[string]interface{}) DeploymentAddresses { - addresses := make(DeploymentAddresses) - for key, value := range deployment { - if strings.HasSuffix(key, "Proxy") || strings.HasSuffix(key, "Impl") { - addresses[key] = common.HexToAddress(value.(string)) - } - } - return addresses - } - - for _, deployment := range state.OpChainDeployments { - // Get the chain ID - idValue, ok := deployment["id"] - if !ok { - continue - } - hexID, ok := idValue.(string) - if !ok { - continue - } - - // Convert hex ID to decimal - id, err := hexToDecimal(hexID) - if err != nil { - continue - } - - l1Addresses := mapDeployment(deployment) - - // op-deployer currently does not categorize L2 addresses - // so we need to map them manually. - // TODO: Update op-deployer to sort rollup contracts by category - l2Addresses := make(DeploymentAddresses) - for _, addressName := range []string{"OptimismMintableErc20FactoryProxy"} { - if addr, ok := l1Addresses[addressName]; ok { - l2Addresses[addressName] = addr - delete(l1Addresses, addressName) - } - } - - result.Deployments[id] = DeploymentState{ - L1Addresses: l1Addresses, - L2Addresses: l2Addresses, - } - } - - result.Addresses = mapDeployment(state.ImplementationsDeployment) - // merge the superchain and implementations addresses - for key, value := range mapDeployment(state.SuperChainContracts) { - result.Addresses[key] = value - } - - return result, nil -} - -// ExtractData downloads and parses the op-deployer state -func (d *Deployer) ExtractData(ctx context.Context) (*DeployerData, error) { - fs, err := ktfs.NewEnclaveFS(ctx, d.enclave) - if err != nil { - return nil, err - } - - deployerArtifact, err := fs.GetArtifact(ctx, d.deployerArtifactName) - if err != nil { - return nil, err - } - - stateBuffer := bytes.NewBuffer(nil) - walletsBuffer := bytes.NewBuffer(nil) - if err := deployerArtifact.ExtractFiles( - ktfs.NewArtifactFileWriter(d.stateName, stateBuffer), - ktfs.NewArtifactFileWriter(d.walletsName, walletsBuffer), - ); err != nil { - return nil, err - } - - state, err := parseStateFile(stateBuffer) - if err != nil { - return nil, err - } - - l1WalletsForL2Admin, err := parseWalletsFile(walletsBuffer) - if err != nil { - return nil, err - } - - // Generate test wallets from the standard "test test test..." mnemonic - // These are the same wallets funded in L2Genesis.s.sol's devAccounts array - devWallets, err := d.getDevWallets() - if err != nil { - return nil, err - } - - for id, deployment := range state.Deployments { - if l1Wallets, exists := l1WalletsForL2Admin[id]; exists { - deployment.L1Wallets = l1Wallets - } - deployment.L2Wallets = devWallets - - genesisBuffer := bytes.NewBuffer(nil) - genesisName, err := d.renderGenesisNameTemplate(id) - if err != nil { - return nil, err - } - - if err := deployerArtifact.ExtractFiles( - ktfs.NewArtifactFileWriter(genesisName, genesisBuffer), - ); err != nil { - return nil, err - } - - // Parse the genesis file JSON into a core.Genesis struct - var genesis core.Genesis - if err := json.NewDecoder(genesisBuffer).Decode(&genesis); err != nil { - return nil, fmt.Errorf("failed to parse genesis file %s in artifact %s for chain ID %s: %w", genesisName, d.deployerArtifactName, id, err) - } - - // Store the genesis data in the deployment state - deployment.Config = genesis.Config - - rollupBuffer := bytes.NewBuffer(nil) - rollupName, err := d.renderRollupNameTemplate(id) - if err != nil { - return nil, err - } - - if err := deployerArtifact.ExtractFiles( - ktfs.NewArtifactFileWriter(rollupName, rollupBuffer), - ); err != nil { - return nil, err - } - - // Parse the genesis file JSON into a core.Genesis struct - var rollupCfg rollup.Config - if err := json.NewDecoder(rollupBuffer).Decode(&rollupCfg); err != nil { - return nil, fmt.Errorf("failed to parse rollup file %s in artifact %s for chain ID %s: %w", rollupName, d.deployerArtifactName, id, err) - } - - // Store the data in the deployment state - deployment.Config = genesis.Config - deployment.RollupConfig = &rollupCfg - - state.Deployments[id] = deployment - } - - l1GenesisArtifact, err := fs.GetArtifact(ctx, d.genesisArtifactName) - if err != nil { - return nil, err - } - - l1ValidatorWallets, err := d.getL1ValidatorWallets(l1GenesisArtifact) - if err != nil { - return nil, err - } - - l1ChainConfig, err := d.getConfig(l1GenesisArtifact) - if err != nil { - return nil, err - } - - return &DeployerData{ - L1ChainID: l1ChainConfig.ChainID.String(), - State: state, - L1ValidatorWallets: l1ValidatorWallets, - L1ChainConfig: l1ChainConfig, - }, nil -} - -func (d *Deployer) renderGenesisNameTemplate(chainID string) (string, error) { - return d.renderNameTemplate(d.l2GenesisNameTemplate, chainID) -} - -func (d *Deployer) renderRollupNameTemplate(chainID string) (string, error) { - return d.renderNameTemplate(d.l2RollupNameTemplate, chainID) -} - -func (d *Deployer) renderNameTemplate(t, chainID string) (string, error) { - tmpl, err := template.New("").Parse(t) - if err != nil { - return "", fmt.Errorf("failed to compile name template %s: %w", t, err) - } - - var buf bytes.Buffer - err = tmpl.Execute(&buf, map[string]string{"ChainID": chainID}) - if err != nil { - return "", fmt.Errorf("failed to execute name template %s: %w", t, err) - } - - return buf.String(), nil -} - -// getDevWallets generates the set of test wallets used in L2Genesis.s.sol -// These wallets are derived from the standard test mnemonic -func (d *Deployer) getDevWallets() ([]*Wallet, error) { - m, err := devkeys.NewMnemonicDevKeys(devkeys.TestMnemonic) - if err != nil { - return nil, fmt.Errorf("failed to create mnemonic dev keys: %w", err) - } - - // Generate 30 wallets to match L2Genesis.s.sol's devAccounts array - testWallets := make([]*Wallet, 0, 30) - for i := 0; i < 30; i++ { - key := devkeys.UserKey(uint64(i)) - addr, err := m.Address(key) - if err != nil { - return nil, fmt.Errorf("failed to get address for test wallet %d: %w", i, err) - } - - sec, err := m.Secret(key) - if err != nil { - return nil, fmt.Errorf("failed to get secret key for test wallet %d: %w", i, err) - } - - testWallets = append(testWallets, &Wallet{ - Name: fmt.Sprintf("dev-account-%d", i), - Address: addr, - PrivateKey: hexutil.Bytes(crypto.FromECDSA(sec)).String(), - }) - } - - return testWallets, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/deployer/deployer_test.go b/kurtosis-devnet/pkg/kurtosis/sources/deployer/deployer_test.go deleted file mode 100644 index bb5aeb15a8466..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/deployer/deployer_test.go +++ /dev/null @@ -1,220 +0,0 @@ -package deployer - -import ( - "sort" - "strings" - "testing" - - "github.com/ethereum/go-ethereum/common" - "github.com/stretchr/testify/require" -) - -func TestParseStateFile(t *testing.T) { - stateJSON := `{ - "opChainDeployments": [ - { - "id": "0x000000000000000000000000000000000000000000000000000000000020d5e4", - "FooProxy": "0x123", - "FooImpl": "0x456", - "FooBar": "0x789" - }, - { - "id": "0x000000000000000000000000000000000000000000000000000000000020d5e5", - "FooProxy": "0xabc", - "FooImpl": "0xdef" - } - ] - }` - - result, err := parseStateFile(strings.NewReader(stateJSON)) - require.NoError(t, err, "Failed to parse state file") - - // Test chain deployments - tests := []struct { - chainID string - expected DeploymentAddresses - }{ - { - chainID: "2151908", - expected: DeploymentAddresses{ - "FooProxy": common.HexToAddress("0x123"), - "FooImpl": common.HexToAddress("0x456"), - }, - }, - { - chainID: "2151909", - expected: DeploymentAddresses{ - "FooProxy": common.HexToAddress("0xabc"), - "FooImpl": common.HexToAddress("0xdef"), - }, - }, - } - - for _, tt := range tests { - chain, ok := result.Deployments[tt.chainID] - require.True(t, ok, "Chain %s not found in result", tt.chainID) - - for key, expected := range tt.expected { - actual := chain.L1Addresses[key] - // TODO: add L2 addresses - require.Equal(t, expected, actual, "Chain %s, %s: expected %s, got %s", tt.chainID, key, expected, actual) - } - } -} - -func TestParseStateFileErrors(t *testing.T) { - tests := []struct { - name string - json string - wantErr bool - }{ - { - name: "empty json", - json: "", - wantErr: true, - }, - { - name: "invalid json", - json: "{invalid", - wantErr: true, - }, - { - name: "missing deployments", - json: `{ - "otherField": [] - }`, - wantErr: false, - }, - { - name: "invalid address type", - json: `{ - "opChainDeployments": [ - { - "id": "3151909", - "data": { - "L1CrossDomainMessengerAddress": 123 - } - } - ] - }`, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := parseStateFile(strings.NewReader(tt.json)) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } -} - -func TestParseWalletsFile(t *testing.T) { - tests := []struct { - name string - input string - want map[string]WalletList - wantErr bool - }{ - { - name: "successful parse", - input: `{ - "chain1": { - "proposerPrivateKey": "0xe1ec816e9ad0372e458c474a06e1e6d9e7f7985cbf642a5e5fa44be639789531", - "proposerAddress": "0xDFfA3C478Be83a91286c04721d2e5DF9A133b93F", - "batcherPrivateKey": "0x557313b816b8fb354340883edf86627b3de680a9f3e15aa1f522cbe6f9c7b967", - "batcherAddress": "0x6bd90c2a1AE00384AD9F4BcD76310F54A9CcdA11" - } - }`, - want: map[string]WalletList{ - "chain1": { - { - Name: "proposer", - Address: common.HexToAddress("0xDFfA3C478Be83a91286c04721d2e5DF9A133b93F"), - PrivateKey: "0xe1ec816e9ad0372e458c474a06e1e6d9e7f7985cbf642a5e5fa44be639789531", - }, - { - Name: "batcher", - Address: common.HexToAddress("0x6bd90c2a1AE00384AD9F4BcD76310F54A9CcdA11"), - PrivateKey: "0x557313b816b8fb354340883edf86627b3de680a9f3e15aa1f522cbe6f9c7b967", - }, - }, - }, - wantErr: false, - }, - { - name: "address only", - input: `{ - "chain1": { - "proposerAddress": "0xDFfA3C478Be83a91286c04721d2e5DF9A133b93F" - } - }`, - want: map[string]WalletList{ - "chain1": { - { - Name: "proposer", - Address: common.HexToAddress("0xDFfA3C478Be83a91286c04721d2e5DF9A133b93F"), - }, - }, - }, - wantErr: false, - }, - { - name: "private key only - should be ignored", - input: `{ - "chain1": { - "proposerPrivateKey": "0xe1ec816e9ad0372e458c474a06e1e6d9e7f7985cbf642a5e5fa44be639789531" - } - }`, - want: map[string]WalletList{ - "chain1": {}, - }, - wantErr: false, - }, - { - name: "invalid JSON", - input: `{invalid json}`, - want: nil, - wantErr: true, - }, - { - name: "empty input", - input: `{}`, - want: map[string]WalletList{}, - wantErr: false, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - reader := strings.NewReader(tt.input) - got, err := parseWalletsFile(reader) - - if tt.wantErr { - require.Error(t, err) - return - } - - require.NoError(t, err) - require.NotNil(t, got) - - // Sort wallets by name for consistent comparison - sortWallets := func(wallets WalletList) { - sort.Slice(wallets, func(i, j int) bool { - return wallets[i].Name < wallets[j].Name - }) - } - - for chainID, wallets := range got { - sortWallets(wallets) - wantWallets := tt.want[chainID] - sortWallets(wantWallets) - require.Equal(t, wantWallets, wallets) - } - }) - } -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/deployer/wallets.go b/kurtosis-devnet/pkg/kurtosis/sources/deployer/wallets.go deleted file mode 100644 index 2cc1873618387..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/deployer/wallets.go +++ /dev/null @@ -1,89 +0,0 @@ -package deployer - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/op-chain-ops/devkeys" - "github.com/ethereum/go-ethereum/common/hexutil" - "github.com/ethereum/go-ethereum/core" - "github.com/ethereum/go-ethereum/crypto" - "github.com/ethereum/go-ethereum/params" - "gopkg.in/yaml.v3" -) - -const ( - // TODO: can we figure out how many were actually funded? - numWallets = 21 -) - -func getMnemonics(r io.Reader) (string, error) { - type mnemonicConfig struct { - Mnemonic string `yaml:"mnemonic"` - Count int `yaml:"count"` // TODO: what does this mean? it seems much larger than the number of wallets - } - - var config []mnemonicConfig - decoder := yaml.NewDecoder(r) - if err := decoder.Decode(&config); err != nil { - return "", fmt.Errorf("failed to decode mnemonic config: %w", err) - } - - // TODO: what does this mean if there are multiple mnemonics in this file? - return config[0].Mnemonic, nil -} - -func (d *Deployer) getL1ValidatorWallets(deployerArtifact *ktfs.Artifact) ([]*Wallet, error) { - mnemonicsBuffer := bytes.NewBuffer(nil) - if err := deployerArtifact.ExtractFiles( - ktfs.NewArtifactFileWriter(d.l1ValidatorMnemonicName, mnemonicsBuffer), - ); err != nil { - return nil, err - } - - mnemonic, err := getMnemonics(mnemonicsBuffer) - if err != nil { - return nil, err - } - - m, _ := devkeys.NewMnemonicDevKeys(mnemonic) - knownWallets := make([]*Wallet, 0) - - var keys []devkeys.Key - for i := 0; i < numWallets; i++ { - keys = append(keys, devkeys.UserKey(i)) - } - - for _, key := range keys { - addr, _ := m.Address(key) - sec, _ := m.Secret(key) - - knownWallets = append(knownWallets, &Wallet{ - Name: key.String(), - Address: addr, - PrivateKey: hexutil.Bytes(crypto.FromECDSA(sec)).String(), - }) - } - - return knownWallets, nil -} - -func (d *Deployer) getConfig(genesisArtifact *ktfs.Artifact) (*params.ChainConfig, error) { - genesisBuffer := bytes.NewBuffer(nil) - if err := genesisArtifact.ExtractFiles( - ktfs.NewArtifactFileWriter(d.l1GenesisName, genesisBuffer), - ); err != nil { - return nil, err - } - - // Parse the genesis file JSON into a core.Genesis struct - var genesis core.Genesis - if err := json.NewDecoder(genesisBuffer).Decode(&genesis); err != nil { - return nil, fmt.Errorf("failed to parse genesis file %s in artifact %s: %w", d.l1GenesisName, d.genesisArtifactName, err) - } - - return genesis.Config, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/depset/cmd/main.go b/kurtosis-devnet/pkg/kurtosis/sources/depset/cmd/main.go deleted file mode 100644 index d14651ed6a895..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/depset/cmd/main.go +++ /dev/null @@ -1,43 +0,0 @@ -package main - -import ( - "context" - "encoding/json" - "flag" - "fmt" - "os" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/depset" -) - -func main() { - // Parse command line flags - enclave := flag.String("enclave", "", "Name of the Kurtosis enclave") - flag.Parse() - - if *enclave == "" { - fmt.Fprintln(os.Stderr, "Error: --enclave flag is required") - flag.Usage() - os.Exit(1) - } - - // Get depset data - e := depset.NewExtractor(*enclave) - ctx := context.Background() - data, err := e.ExtractData(ctx) - if err != nil { - fmt.Fprintf(os.Stderr, "Error parsing deployer data: %v\n", err) - os.Exit(1) - } - - for name, depset := range data { - fmt.Println(name) - // Encode as JSON and write to stdout - encoder := json.NewEncoder(os.Stdout) - encoder.SetIndent("", " ") - if err := encoder.Encode(depset); err != nil { - fmt.Fprintf(os.Stderr, "Error encoding JSON: %v\n", err) - os.Exit(1) - } - } -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/depset/depset.go b/kurtosis-devnet/pkg/kurtosis/sources/depset/depset.go deleted file mode 100644 index ca10e0cdc48a2..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/depset/depset.go +++ /dev/null @@ -1,79 +0,0 @@ -package depset - -import ( - "bytes" - "context" - "fmt" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/util" -) - -const ( - depsetFileNamePrefix = "superchain-depset-" -) - -// extractor implements the interfaces.DepsetExtractor interface -type extractor struct { - enclave string -} - -// NewExtractor creates a new dependency set extractor -func NewExtractor(enclave string) *extractor { - return &extractor{ - enclave: enclave, - } -} - -// ExtractData extracts dependency set from its respective artifact -func (e *extractor) ExtractData(ctx context.Context) (map[string]descriptors.DepSet, error) { - fs, err := ktfs.NewEnclaveFS(ctx, e.enclave) - if err != nil { - return nil, err - } - - return extractDepsetsFromArtifacts(ctx, fs) -} - -func extractDepsetsFromArtifacts(ctx context.Context, fs *ktfs.EnclaveFS) (map[string]descriptors.DepSet, error) { - // Get all artifact names with retry logic - allArtifacts, err := util.WithRetry(ctx, "GetAllArtifactNames", func() ([]string, error) { - return fs.GetAllArtifactNames(ctx) - }) - - if err != nil { - return nil, fmt.Errorf("failed to get all artifact names: %w", err) - } - - depsetArtifacts := make([]string, 0) - for _, artifactName := range allArtifacts { - if strings.HasPrefix(artifactName, depsetFileNamePrefix) { - depsetArtifacts = append(depsetArtifacts, artifactName) - } - } - - depsets := make(map[string]descriptors.DepSet) - for _, artifactName := range depsetArtifacts { - // Get artifact with retry logic - a, err := util.WithRetry(ctx, fmt.Sprintf("GetArtifact(%s)", artifactName), func() (*ktfs.Artifact, error) { - return fs.GetArtifact(ctx, artifactName) - }) - - if err != nil { - return nil, fmt.Errorf("failed to get artifact '%s': %w", artifactName, err) - } - - fname := artifactName + ".json" - buffer := &bytes.Buffer{} - if err := a.ExtractFiles(ktfs.NewArtifactFileWriter(fname, buffer)); err != nil { - return nil, fmt.Errorf("failed to extract dependency set: %w", err) - } - - depsetName := strings.TrimPrefix(artifactName, depsetFileNamePrefix) - depsets[depsetName] = descriptors.DepSet(buffer.Bytes()) - } - - return depsets, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/README.md b/kurtosis-devnet/pkg/kurtosis/sources/inspect/README.md deleted file mode 100644 index 95ce2688d2eab..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/README.md +++ /dev/null @@ -1,351 +0,0 @@ -# Kurtosis Inspect Tool - -A command-line tool for inspecting Kurtosis enclaves and extracting conductor configurations and environment data from running Optimism devnets. - -## Overview - -The Kurtosis Inspect Tool provides a clean interface to: - -- 🔍 **Inspect running Kurtosis enclaves** - Extract service information and file artifacts -- 🎛️ **Generate conductor configurations** - Create TOML configs for `op-conductor-ops` -- 📊 **Export environment data** - Save complete devnet information as JSON -- 🔧 **Fix Traefik issues** - Repair missing network labels on containers - -## Installation - -### Build from Source - -```bash -cd optimism/kurtosis-devnet -go build -o kurtosis-inspect pkg/kurtosis/sources/inspect/cmd/main.go -``` - -### Run Directly - -```bash -go run pkg/kurtosis/sources/inspect/cmd/main.go [options] -``` - -## Usage - -### Basic Inspection - -Inspect a running enclave and display results: - -```bash -./kurtosis-inspect my-devnet-enclave -``` - -### Extract Conductor Configuration - -Generate a conductor configuration file for use with `op-conductor-ops`: - -```bash -./kurtosis-inspect --conductor-config conductor.toml my-devnet-enclave -``` - -### Export Complete Environment - -Save the complete environment data as JSON: - -```bash -./kurtosis-inspect --environment environment.json my-devnet-enclave -``` - -### Combined Export - -Extract both conductor config and environment data: - -```bash -./kurtosis-inspect \ - --conductor-config conductor.toml \ - --environment environment.json \ - my-devnet-enclave -``` - -### Fix Traefik Network Issues - -Repair missing Traefik labels on containers: - -```bash -./kurtosis-inspect --fix-traefik my-devnet-enclave -``` - -## Configuration Options - -### CLI Flags - -| Flag | Environment Variable | Description | -|------|---------------------|-------------| -| `--conductor-config` | `KURTOSIS_INSPECT_CONDUCTOR_CONFIG` | Path to write conductor configuration TOML file | -| `--environment` | `KURTOSIS_INSPECT_ENVIRONMENT` | Path to write environment JSON file | -| `--fix-traefik` | `KURTOSIS_INSPECT_FIX_TRAEFIK` | Fix missing Traefik labels on containers | -| `--log.level` | `KURTOSIS_INSPECT_LOG_LEVEL` | Logging level (DEBUG, INFO, WARN, ERROR) | -| `--log.format` | `KURTOSIS_INSPECT_LOG_FORMAT` | Log format (text, json, logfmt) | - -### Environment Variables - -All flags can be set via environment variables with the `KURTOSIS_INSPECT_` prefix: - -```bash -export KURTOSIS_INSPECT_CONDUCTOR_CONFIG="/tmp/conductor.toml" -export KURTOSIS_INSPECT_ENVIRONMENT="/tmp/environment.json" -export KURTOSIS_INSPECT_LOG_LEVEL="DEBUG" - -./kurtosis-inspect my-devnet-enclave -``` - -## Output Formats - -### Conductor Configuration (TOML) - -The conductor configuration file is compatible with `op-conductor-ops`: - -```toml -[networks] - [networks.2151908-chain0-kona] - sequencers = ["op-conductor-2151908-chain0-kona-sequencer"] - [networks.2151908-chain0-optimism] - sequencers = ["op-conductor-2151908-chain0-optimism-sequencer"] - -[sequencers] - [sequencers.op-conductor-2151908-chain0-kona-sequencer] - raft_addr = "127.0.0.1:60135" - conductor_rpc_url = "http://127.0.0.1:60134" - node_rpc_url = "http://127.0.0.1:60048" - voting = true - [sequencers.op-conductor-2151908-chain0-optimism-sequencer] - raft_addr = "127.0.0.1:60176" - conductor_rpc_url = "http://127.0.0.1:60177" - node_rpc_url = "http://127.0.0.1:60062" - voting = true -``` - -### Environment Data (JSON) - -Complete environment data including services and file artifacts: - -```json -{ - "FileArtifacts": [ - "genesis-l1.json", - "genesis-l2-chain0.json", - "jwt.txt", - "rollup-l2-chain0.json" - ], - "UserServices": { - "op-node-chain0-sequencer": { - "Labels": { - "app": "op-node", - "chain": "chain0", - "role": "sequencer" - }, - "Ports": { - "rpc": { - "Host": "127.0.0.1", - "Port": 9545 - }, - "p2p": { - "Host": "127.0.0.1", - "Port": 9222 - } - } - } - } -} -``` - -## Integration with op-conductor-ops - -### 1. Generate Conductor Configuration - -```bash -# Extract conductor config from running devnet -./kurtosis-inspect --conductor-config conductor.toml my-devnet - -# Use with op-conductor-ops -cd infra/op-conductor-ops -python op-conductor-ops.py --config ../../kurtosis-devnet/conductor.toml status -``` - -### 2. Leadership Transfer Example - -```bash -# Generate config and perform leadership transfer -./kurtosis-inspect --conductor-config conductor.toml my-devnet -cd infra/op-conductor-ops -python op-conductor-ops.py --config ../../kurtosis-devnet/conductor.toml \ - transfer-leadership \ - --target-sequencer "op-conductor-2151908-chain0-optimism-sequencer" -``` - -## Examples - -### Simple Devnet - -```bash -# Deploy simple devnet -cd kurtosis-devnet -just devnet simple - -# Inspect and extract configs -./kurtosis-inspect --conductor-config tests/simple-conductor.toml simple-devnet - -# Check conductor status -cd ../infra/op-conductor-ops -python op-conductor-ops.py --config ../../kurtosis-devnet/tests/simple-conductor.toml status -``` - -### Multi-Chain Interop - -```bash -# Deploy interop devnet -just devnet interop - -# Extract complex conductor configuration -./kurtosis-inspect \ - --conductor-config tests/interop-conductor.toml \ - --environment tests/interop-environment.json \ - interop-devnet - -# View conductor cluster status -cd ../infra/op-conductor-ops -python op-conductor-ops.py --config ../../kurtosis-devnet/tests/interop-conductor.toml status -``` - -### Debugging Network Issues - -```bash -# Fix Traefik network issues -./kurtosis-inspect --fix-traefik my-devnet - -# Inspect with debug logging -./kurtosis-inspect --log.level DEBUG --log.format json my-devnet -``` - -## Architecture - -The tool follows a clean architecture pattern with clear separation of concerns: - -``` -pkg/kurtosis/sources/inspect/ -├── cmd/main.go # CLI setup and entry point -├── config.go # Configuration parsing and validation -├── service.go # Business logic and service layer -├── conductor.go # Conductor configuration extraction -├── inspect.go # Core inspection functionality -├── flags/ -│ ├── flags.go # CLI flag definitions -│ └── flags_test.go # Flag testing -└── *_test.go # Comprehensive test suite -``` - -### Key Components - -- **Config**: Handles CLI argument parsing and validation -- **InspectService**: Main business logic for inspection operations -- **ConductorConfig**: Data structures for conductor configuration -- **Inspector**: Core enclave inspection functionality - -## Testing - -### Run All Tests - -```bash -go test ./pkg/kurtosis/sources/inspect/... -v -``` - -### Test Coverage - -```bash -go test ./pkg/kurtosis/sources/inspect/... -cover -``` - -### Test Categories - -- **Unit Tests**: Individual component functionality -- **Integration Tests**: File I/O and configuration parsing -- **Real-World Tests**: Based on actual devnet configurations -- **Error Tests**: Permission and validation error handling - -## Troubleshooting - -### Common Issues - -#### Kurtosis Engine Not Running - -``` -Error: failed to create Kurtosis context: The Kurtosis Engine Server is unavailable -``` - -**Solution:** -```bash -kurtosis engine start -``` - -#### Enclave Not Found - -``` -Error: failed to get enclave: enclave with identifier 'my-devnet' not found -``` - -**Solution:** -```bash -# List available enclaves -kurtosis enclave ls - -# Use correct enclave name -./kurtosis-inspect -``` - -#### Permission Denied - -``` -Error: error creating conductor config file: permission denied -``` - -**Solution:** -```bash -# Ensure write permissions to output directory -chmod 755 /output/directory -``` - -### Debug Mode - -Enable debug logging for detailed troubleshooting: - -```bash -./kurtosis-inspect --log.level DEBUG --log.format json my-devnet -``` - -## Contributing - -### Development Setup - -```bash -# Install dependencies -go mod download - -# Run tests -go test ./pkg/kurtosis/sources/inspect/... -v - -# Build -go build -o kurtosis-inspect pkg/kurtosis/sources/inspect/cmd/main.go -``` - -### Adding New Features - -1. Add functionality to appropriate service layer -2. Create comprehensive tests with real data -3. Update CLI flags if needed -4. Update this README with examples - -## Related Tools - -- **[op-conductor-ops](../../infra/op-conductor-ops/)**: Python CLI for managing conductor clusters -- **[Kurtosis](https://kurtosis.com/)**: Orchestration platform for development environments -- **[Optimism Devnet](../)**: Kurtosis package for Optimism development networks - -## License - -This tool is part of the Optimism monorepo and follows the same licensing terms. \ No newline at end of file diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/cmd/main.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/cmd/main.go deleted file mode 100644 index 8b4fb2ddab18c..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/cmd/main.go +++ /dev/null @@ -1,65 +0,0 @@ -// Package main reproduces a lightweight version of the "kurtosis enclave inspect" command -// It can be used to sanity check the results, as writing tests against a fake -// enclave is not practical right now. -package main - -import ( - "context" - "fmt" - "os" - - "github.com/urfave/cli/v2" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags" - opservice "github.com/ethereum-optimism/optimism/op-service" - "github.com/ethereum-optimism/optimism/op-service/cliapp" - oplog "github.com/ethereum-optimism/optimism/op-service/log" -) - -var ( - Version = "v0.1.0" - GitCommit = "" - GitDate = "" -) - -func main() { - app := cli.NewApp() - app.Version = opservice.FormatVersion(Version, GitCommit, GitDate, "") - app.Name = "kurtosis-inspect" - app.Usage = "Inspect Kurtosis enclaves and extract configurations" - app.Description = "Tool to inspect running Kurtosis enclaves and extract conductor configurations and environment data" - app.Flags = cliapp.ProtectFlags(flags.Flags) - app.Action = cliapp.LifecycleCmd(run) - app.ArgsUsage = "" - - if err := app.Run(os.Args); err != nil { - fmt.Fprintf(os.Stderr, "Error: %v\n", err) - os.Exit(1) - } -} - -func run(cliCtx *cli.Context, closeApp context.CancelCauseFunc) (cliapp.Lifecycle, error) { - // Parse configuration - cfg, err := inspect.NewConfig(cliCtx) - if err != nil { - return nil, err - } - - // Setup logging - log := oplog.NewLogger(oplog.AppOut(cliCtx), oplog.ReadCLIConfig(cliCtx)) - oplog.SetGlobalLogHandler(log.Handler()) - - // Create service - service := inspect.NewInspectService(cfg, log) - - // Create background context for operations - ctx := context.Background() - - // Run the service - if err := service.Run(ctx); err != nil { - return nil, err - } - - return nil, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor.go deleted file mode 100644 index 2c0538c917424..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor.go +++ /dev/null @@ -1,155 +0,0 @@ -package inspect - -import ( - "context" - "fmt" - "strings" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/wrappers" -) - -type ConductorSequencer struct { - RaftAddr string `json:"raft_addr" toml:"raft_addr"` - ConductorRPCURL string `json:"conductor_rpc_url" toml:"conductor_rpc_url"` - NodeRPCURL string `json:"node_rpc_url" toml:"node_rpc_url"` - Voting bool `json:"voting" toml:"voting"` -} - -type ConductorNetwork struct { - Sequencers []string `json:"sequencers" toml:"sequencers"` -} - -type ConductorConfig struct { - Networks map[string]*ConductorNetwork `json:"networks" toml:"networks"` - Sequencers map[string]*ConductorSequencer `json:"sequencers" toml:"sequencers"` -} - -func ExtractConductorConfig(ctx context.Context, enclaveID string) (*ConductorConfig, error) { - kurtosisCtx, err := wrappers.GetDefaultKurtosisContext() - if err != nil { - return nil, fmt.Errorf("failed to get Kurtosis context: %w", err) - } - - enclaveCtx, err := kurtosisCtx.GetEnclave(ctx, enclaveID) - if err != nil { - return nil, fmt.Errorf("failed to get enclave: %w", err) - } - - services, err := enclaveCtx.GetServices() - if err != nil { - return nil, fmt.Errorf("failed to get services: %w", err) - } - - conductorServices := make(map[string]map[string]interface{}) - opNodeServices := make(map[string]map[string]interface{}) - - for svcName := range services { - svcNameStr := string(svcName) - - svcCtx, err := enclaveCtx.GetService(svcNameStr) - if err != nil { - continue - } - - labels := svcCtx.GetLabels() - ports := make(map[string]*descriptors.PortInfo) - - for portName, portSpec := range svcCtx.GetPublicPorts() { - ports[portName] = &descriptors.PortInfo{ - Host: svcCtx.GetMaybePublicIPAddress(), - Port: int(portSpec.GetNumber()), - } - } - - if labels["op.kind"] == "conductor" { - conductorServices[svcNameStr] = map[string]interface{}{ - "labels": labels, - "ports": ports, - } - } - - if labels["op.kind"] == "cl" && labels["op.cl.type"] == "op-node" { - opNodeServices[svcNameStr] = map[string]interface{}{ - "labels": labels, - "ports": ports, - } - } - } - - if len(conductorServices) == 0 { - return nil, nil - } - - networks := make(map[string]*ConductorNetwork) - sequencers := make(map[string]*ConductorSequencer) - - networkSequencers := make(map[string][]string) - - for conductorSvcName, conductorData := range conductorServices { - labels := conductorData["labels"].(map[string]string) - ports := conductorData["ports"].(map[string]*descriptors.PortInfo) - - networkID := labels["op.network.id"] - if networkID == "" { - continue - } - - // Find the network name from service name (e.g., "op-conductor-2151908-op-kurtosis-node0") - parts := strings.Split(conductorSvcName, "-") - var networkName string - if len(parts) >= 4 { - networkName = strings.Join(parts[2:len(parts)-1], "-") - } - if networkName == "" { - networkName = "unknown" - } - - networkSequencers[networkName] = append(networkSequencers[networkName], conductorSvcName) - - participantName := labels["op.network.participant.name"] - var nodeRPCURL string - - // Look for matching op-node service - for _, nodeData := range opNodeServices { - nodeLabels := nodeData["labels"].(map[string]string) - nodePorts := nodeData["ports"].(map[string]*descriptors.PortInfo) - - if nodeLabels["op.network.participant.name"] == participantName && - nodeLabels["op.network.id"] == networkID { - if rpcPort, ok := nodePorts["rpc"]; ok { - nodeRPCURL = fmt.Sprintf("http://127.0.0.1:%d", rpcPort.Port) - } - break - } - } - - var raftAddr, conductorRPCURL string - - if consensusPort, ok := ports["consensus"]; ok { - raftAddr = fmt.Sprintf("127.0.0.1:%d", consensusPort.Port) - } - - if rpcPort, ok := ports["rpc"]; ok { - conductorRPCURL = fmt.Sprintf("http://127.0.0.1:%d", rpcPort.Port) - } - - sequencers[conductorSvcName] = &ConductorSequencer{ - RaftAddr: raftAddr, - ConductorRPCURL: conductorRPCURL, - NodeRPCURL: nodeRPCURL, - Voting: true, - } - } - - for networkName, sequencerNames := range networkSequencers { - networks[networkName] = &ConductorNetwork{ - Sequencers: sequencerNames, - } - } - - return &ConductorConfig{ - Networks: networks, - Sequencers: sequencers, - }, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor_test.go deleted file mode 100644 index 1c65207427949..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/conductor_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package inspect - -import ( - "strings" - "testing" - - "github.com/BurntSushi/toml" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestConductorConfig(t *testing.T) { - config := &ConductorConfig{ - Networks: map[string]*ConductorNetwork{ - "chain0": {Sequencers: []string{"seq0"}}, - "chain1": {Sequencers: []string{"seq1"}}, - }, - Sequencers: map[string]*ConductorSequencer{ - "seq0": { - RaftAddr: "127.0.0.1:8001", - ConductorRPCURL: "http://127.0.0.1:8002", - NodeRPCURL: "http://127.0.0.1:8003", - Voting: true, - }, - "seq1": { - RaftAddr: "127.0.0.1:8011", - ConductorRPCURL: "http://127.0.0.1:8012", - NodeRPCURL: "http://127.0.0.1:8013", - Voting: false, - }, - }, - } - - var buf strings.Builder - err := toml.NewEncoder(&buf).Encode(config) - require.NoError(t, err) - - output := buf.String() - assert.Contains(t, output, "[networks]") - assert.Contains(t, output, "[sequencers]") - assert.Contains(t, output, "voting = true") - assert.Contains(t, output, "voting = false") - - var decoded ConductorConfig - err = toml.Unmarshal([]byte(output), &decoded) - require.NoError(t, err) - assert.Equal(t, config.Networks, decoded.Networks) - assert.Equal(t, config.Sequencers, decoded.Sequencers) -} - -func TestConductorSequencer(t *testing.T) { - seq := &ConductorSequencer{ - RaftAddr: "localhost:8080", - ConductorRPCURL: "http://localhost:9090", - NodeRPCURL: "http://localhost:7070", - Voting: true, - } - - assert.Equal(t, "localhost:8080", seq.RaftAddr) - assert.Equal(t, "http://localhost:9090", seq.ConductorRPCURL) - assert.True(t, seq.Voting) -} - -func TestMultiChainConfig(t *testing.T) { - config := &ConductorConfig{ - Networks: map[string]*ConductorNetwork{ - "chain0": {Sequencers: []string{"seq0", "backup0"}}, - "chain1": {Sequencers: []string{"seq1", "observer1"}}, - }, - Sequencers: map[string]*ConductorSequencer{ - "seq0": {RaftAddr: "127.0.0.1:8001", ConductorRPCURL: "http://127.0.0.1:8002", NodeRPCURL: "http://127.0.0.1:8003", Voting: true}, - "backup0": {RaftAddr: "127.0.0.1:8011", ConductorRPCURL: "http://127.0.0.1:8012", NodeRPCURL: "http://127.0.0.1:8013", Voting: true}, - "seq1": {RaftAddr: "127.0.0.1:8021", ConductorRPCURL: "http://127.0.0.1:8022", NodeRPCURL: "http://127.0.0.1:8023", Voting: true}, - "observer1": {RaftAddr: "127.0.0.1:8031", ConductorRPCURL: "http://127.0.0.1:8032", NodeRPCURL: "http://127.0.0.1:8033", Voting: false}, - }, - } - - assert.Len(t, config.Networks, 2) - assert.Len(t, config.Sequencers, 4) - assert.False(t, config.Sequencers["observer1"].Voting) - - var buf strings.Builder - err := toml.NewEncoder(&buf).Encode(config) - require.NoError(t, err) - assert.Contains(t, buf.String(), "voting = false") -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/config.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/config.go deleted file mode 100644 index 935c00e3a9cb9..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/config.go +++ /dev/null @@ -1,34 +0,0 @@ -package inspect - -import ( - "fmt" - - "github.com/urfave/cli/v2" -) - -// Config holds the configuration for the inspect service -type Config struct { - EnclaveID string - FixTraefik bool - ConductorConfigPath string - EnvironmentPath string -} - -func NewConfig(ctx *cli.Context) (*Config, error) { - if ctx.NArg() != 1 { - return nil, fmt.Errorf("expected exactly one argument (enclave-id), got %d", ctx.NArg()) - } - - cfg := &Config{ - EnclaveID: ctx.Args().Get(0), - FixTraefik: ctx.Bool("fix-traefik"), - ConductorConfigPath: ctx.String("conductor-config-path"), - EnvironmentPath: ctx.String("environment-path"), - } - - if cfg.EnclaveID == "" { - return nil, fmt.Errorf("enclave-id is required") - } - - return cfg, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/config_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/config_test.go deleted file mode 100644 index 25dae574ff07e..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/config_test.go +++ /dev/null @@ -1,91 +0,0 @@ -package inspect - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/urfave/cli/v2" -) - -func TestNewConfig(t *testing.T) { - tests := []struct { - name string - args []string - expected *Config - wantErr bool - }{ - { - name: "valid config", - args: []string{"inspect", "test-enclave"}, - expected: &Config{ - EnclaveID: "test-enclave", - FixTraefik: false, - ConductorConfigPath: "", - EnvironmentPath: "", - }, - wantErr: false, - }, - { - name: "config with flags", - args: []string{ - "inspect", - "--fix-traefik", - "--conductor-config-path", "/tmp/conductor.toml", - "--environment-path", "/tmp/env.json", - "my-enclave", - }, - expected: &Config{ - EnclaveID: "my-enclave", - FixTraefik: true, - ConductorConfigPath: "/tmp/conductor.toml", - EnvironmentPath: "/tmp/env.json", - }, - wantErr: false, - }, - { - name: "no arguments", - args: []string{"inspect"}, - expected: nil, - wantErr: true, - }, - { - name: "too many arguments", - args: []string{"inspect", "enclave1", "enclave2"}, - expected: nil, - wantErr: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - app := &cli.App{ - Name: "inspect", - Flags: []cli.Flag{ - &cli.BoolFlag{Name: "fix-traefik"}, - &cli.StringFlag{Name: "conductor-config-path"}, - &cli.StringFlag{Name: "environment-path"}, - }, - Action: func(ctx *cli.Context) error { - cfg, err := NewConfig(ctx) - - if tt.wantErr { - assert.Error(t, err) - assert.Nil(t, cfg) - } else { - require.NoError(t, err) - require.NotNil(t, cfg) - assert.Equal(t, tt.expected.EnclaveID, cfg.EnclaveID) - assert.Equal(t, tt.expected.FixTraefik, cfg.FixTraefik) - assert.Equal(t, tt.expected.ConductorConfigPath, cfg.ConductorConfigPath) - assert.Equal(t, tt.expected.EnvironmentPath, cfg.EnvironmentPath) - } - return nil - }, - } - - err := app.Run(tt.args) - require.NoError(t, err) - }) - } -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags.go deleted file mode 100644 index 22c6b364984f7..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags.go +++ /dev/null @@ -1,50 +0,0 @@ -package flags - -import ( - "github.com/urfave/cli/v2" - - opservice "github.com/ethereum-optimism/optimism/op-service" - oplog "github.com/ethereum-optimism/optimism/op-service/log" -) - -const EnvVarPrefix = "KURTOSIS_INSPECT" - -var ( - FixTraefik = &cli.BoolFlag{ - Name: "fix-traefik", - Value: false, - EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "FIX_TRAEFIK"), - Usage: "Fix missing Traefik labels on containers", - } - ConductorConfig = &cli.StringFlag{ - Name: "conductor-config-path", - Value: "", - EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "CONDUCTOR_CONFIG"), - Usage: "Path where conductor configuration TOML file will be written (overwrites existing file)", - } - Environment = &cli.StringFlag{ - Name: "environment-path", - Value: "", - EnvVars: opservice.PrefixEnvVar(EnvVarPrefix, "ENVIRONMENT"), - Usage: "Path where environment JSON file will be written (overwrites existing file)", - } -) - -var requiredFlags = []cli.Flag{ - // No required flags -} - -var optionalFlags = []cli.Flag{ - FixTraefik, - ConductorConfig, - Environment, -} - -var Flags []cli.Flag - -func init() { - // Add common op-service flags - optionalFlags = append(optionalFlags, oplog.CLIFlags(EnvVarPrefix)...) - - Flags = append(requiredFlags, optionalFlags...) -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags_test.go deleted file mode 100644 index 8c9d85d29b1ca..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/flags/flags_test.go +++ /dev/null @@ -1,124 +0,0 @@ -package flags - -import ( - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/urfave/cli/v2" -) - -func TestFlags(t *testing.T) { - tests := []struct { - name string - args []string - envVars map[string]string - expected struct { - fixTraefik bool - conductorConfig string - environment string - } - }{ - { - name: "default values", - args: []string{"inspect", "test-enclave"}, - expected: struct { - fixTraefik bool - conductorConfig string - environment string - }{ - fixTraefik: false, - conductorConfig: "", - environment: "", - }, - }, - { - name: "cli flags set", - args: []string{ - "inspect", - "--fix-traefik", - "--conductor-config-path", "/tmp/conductor.toml", - "--environment-path", "/tmp/env.json", - "test-enclave", - }, - expected: struct { - fixTraefik bool - conductorConfig string - environment string - }{ - fixTraefik: true, - conductorConfig: "/tmp/conductor.toml", - environment: "/tmp/env.json", - }, - }, - { - name: "environment variables", - args: []string{"inspect", "test-enclave"}, - envVars: map[string]string{ - "KURTOSIS_INSPECT_FIX_TRAEFIK": "true", - "KURTOSIS_INSPECT_CONDUCTOR_CONFIG": "/env/conductor.toml", - "KURTOSIS_INSPECT_ENVIRONMENT": "/env/env.json", - }, - expected: struct { - fixTraefik bool - conductorConfig string - environment string - }{ - fixTraefik: true, - conductorConfig: "/env/conductor.toml", - environment: "/env/env.json", - }, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Set environment variables - for key, value := range tt.envVars { - os.Setenv(key, value) - defer os.Unsetenv(key) - } - - app := &cli.App{ - Name: "inspect", - Flags: Flags, - Action: func(ctx *cli.Context) error { - assert.Equal(t, tt.expected.fixTraefik, ctx.Bool("fix-traefik")) - assert.Equal(t, tt.expected.conductorConfig, ctx.String("conductor-config-path")) - assert.Equal(t, tt.expected.environment, ctx.String("environment-path")) - return nil - }, - } - - err := app.Run(tt.args) - require.NoError(t, err) - }) - } -} - -func TestFlagDefinitions(t *testing.T) { - flagNames := make(map[string]bool) - for _, flag := range Flags { - for _, name := range flag.Names() { - flagNames[name] = true - } - } - - assert.True(t, flagNames["fix-traefik"]) - assert.True(t, flagNames["conductor-config-path"]) - assert.True(t, flagNames["environment-path"]) - assert.True(t, flagNames["log.level"]) -} - -func TestEnvVarPrefix(t *testing.T) { - assert.Equal(t, "KURTOSIS_INSPECT", EnvVarPrefix) -} - -func TestFlagStructure(t *testing.T) { - assert.NotEmpty(t, Flags) - assert.Contains(t, optionalFlags, FixTraefik) - assert.Contains(t, optionalFlags, ConductorConfig) - assert.Contains(t, optionalFlags, Environment) - assert.Empty(t, requiredFlags) -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect.go deleted file mode 100644 index 286fd9cfc6031..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect.go +++ /dev/null @@ -1,116 +0,0 @@ -package inspect - -import ( - "context" - "fmt" - "net/http" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/api/wrappers" -) - -type PortMap map[string]*descriptors.PortInfo - -type Service struct { - Labels map[string]string - Ports PortMap -} - -type ServiceMap map[string]*Service - -// InspectData represents a summary of the output of "kurtosis enclave inspect" -type InspectData struct { - FileArtifacts []string - UserServices ServiceMap -} - -type Inspector struct { - enclaveID string -} - -func NewInspector(enclaveID string) *Inspector { - return &Inspector{enclaveID: enclaveID} -} - -func ShortenedUUIDString(fullUUID string) string { - lengthToTrim := 12 - if lengthToTrim > len(fullUUID) { - lengthToTrim = len(fullUUID) - } - return fullUUID[:lengthToTrim] -} - -func (e *Inspector) ExtractData(ctx context.Context) (*InspectData, error) { - kurtosisCtx, err := wrappers.GetDefaultKurtosisContext() - if err != nil { - return nil, err - } - - enclaveCtx, err := kurtosisCtx.GetEnclave(ctx, e.enclaveID) - if err != nil { - return nil, err - } - - services, err := enclaveCtx.GetServices() - if err != nil { - return nil, err - } - - artifacts, err := enclaveCtx.GetAllFilesArtifactNamesAndUuids(ctx) - if err != nil { - return nil, err - } - - enclaveUUID := string(enclaveCtx.GetEnclaveUuid()) - - data := &InspectData{ - UserServices: make(ServiceMap), - FileArtifacts: make([]string, len(artifacts)), - } - - for i, artifact := range artifacts { - data.FileArtifacts[i] = artifact.GetFileName() - } - - for svc := range services { - svc := string(svc) - svcCtx, err := enclaveCtx.GetService(svc) - if err != nil { - return nil, err - } - svcUUID := string(svcCtx.GetServiceUUID()) - - portMap := make(PortMap) - - for port, portSpec := range svcCtx.GetPublicPorts() { - portMap[port] = &descriptors.PortInfo{ - Host: svcCtx.GetMaybePublicIPAddress(), - Port: int(portSpec.GetNumber()), - } - } - shortEnclaveUuid := ShortenedUUIDString(enclaveUUID) - shortServiceUuid := ShortenedUUIDString(svcUUID) - for port, portSpec := range svcCtx.GetPrivatePorts() { - // avoid non-mapped ports, we shouldn't have to use them. - if p, ok := portMap[port]; ok { - p.PrivatePort = int(portSpec.GetNumber()) - p.ReverseProxyHeader = http.Header{ - // This allows going through the kurtosis reverse proxy for each port - "Host": []string{fmt.Sprintf("%d-%s-%s", p.PrivatePort, shortServiceUuid, shortEnclaveUuid)}, - } - - portMap[port] = p - } - } - - if len(portMap) != 0 { - data.UserServices[svc] = &Service{ - Ports: portMap, - Labels: svcCtx.GetLabels(), - } - } - - } - - return data, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect_test.go deleted file mode 100644 index 645f952edce1e..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/inspect_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package inspect - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" -) - -func TestNewInspector(t *testing.T) { - inspector := NewInspector("test-enclave") - assert.NotNil(t, inspector) - assert.Equal(t, "test-enclave", inspector.enclaveID) -} - -func TestShortenedUUIDString(t *testing.T) { - assert.Equal(t, "f47ac10b-58c", ShortenedUUIDString("f47ac10b-58cc-4372-a567-0e02b2c3d479")) - assert.Equal(t, "abc", ShortenedUUIDString("abc")) - assert.Equal(t, "", ShortenedUUIDString("")) - assert.Equal(t, "123456789012", ShortenedUUIDString("123456789012")) - assert.Equal(t, "test2-devnet", ShortenedUUIDString("test2-devnet-2151908")) -} - -func TestInspectData(t *testing.T) { - data := &InspectData{ - FileArtifacts: []string{"genesis.json", "jwt.txt"}, - UserServices: ServiceMap{ - "op-node": &Service{ - Labels: map[string]string{"app": "op-node", "role": "sequencer"}, - Ports: PortMap{ - "rpc": &descriptors.PortInfo{Host: "127.0.0.1", Port: 8545}, - "p2p": &descriptors.PortInfo{Host: "127.0.0.1", Port: 9222}, - }, - }, - }, - } - - assert.Len(t, data.FileArtifacts, 2) - assert.Len(t, data.UserServices, 1) - assert.Contains(t, data.FileArtifacts, "genesis.json") - - service := data.UserServices["op-node"] - assert.Equal(t, "op-node", service.Labels["app"]) - assert.Equal(t, "sequencer", service.Labels["role"]) - - rpcPort, exists := service.Ports["rpc"] - require.True(t, exists) - assert.Equal(t, 8545, rpcPort.Port) - - _, exists = service.Ports["nonexistent"] - assert.False(t, exists) -} - -func TestServiceMap(t *testing.T) { - services := ServiceMap{ - "seq0": &Service{Labels: map[string]string{"role": "sequencer"}, Ports: PortMap{"rpc": &descriptors.PortInfo{Port: 8545}}}, - "seq1": &Service{Labels: map[string]string{"role": "sequencer"}, Ports: PortMap{"rpc": &descriptors.PortInfo{Port: 8645}}}, - "conductor": &Service{Labels: map[string]string{"app": "conductor"}, Ports: PortMap{"rpc": &descriptors.PortInfo{Port: 8547}}}, - } - - assert.Len(t, services, 3) - - seq0, exists := services["seq0"] - require.True(t, exists) - assert.Equal(t, "sequencer", seq0.Labels["role"]) - - sequencerCount := 0 - for _, svc := range services { - if svc.Labels["role"] == "sequencer" { - sequencerCount++ - } - } - assert.Equal(t, 2, sequencerCount) -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/service.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/service.go deleted file mode 100644 index 099e7c62ccf49..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/service.go +++ /dev/null @@ -1,150 +0,0 @@ -package inspect - -import ( - "context" - "encoding/json" - "fmt" - "os" - - "github.com/BurntSushi/toml" - "github.com/ethereum/go-ethereum/log" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/util" -) - -// InspectService handles the core inspection functionality -type InspectService struct { - cfg *Config - log log.Logger -} - -func NewInspectService(cfg *Config, log log.Logger) *InspectService { - return &InspectService{ - cfg: cfg, - log: log, - } -} - -func (s *InspectService) Run(ctx context.Context) error { - if s.cfg.FixTraefik { - return s.fixTraefik(ctx) - } - - return s.inspect(ctx) -} - -func (s *InspectService) fixTraefik(ctx context.Context) error { - s.log.Info("Fixing Traefik network configuration...") - fmt.Println("🔧 Fixing Traefik network configuration...") - - if err := util.SetReverseProxyConfig(ctx); err != nil { - return fmt.Errorf("error setting reverse proxy config: %w", err) - } - - s.log.Info("Traefik network configuration fixed") - fmt.Println("✅ Traefik network configuration fixed!") - return nil -} - -func (s *InspectService) inspect(ctx context.Context) error { - inspector := NewInspector(s.cfg.EnclaveID) - - data, err := inspector.ExtractData(ctx) - if err != nil { - return fmt.Errorf("error inspecting enclave: %w", err) - } - - conductorConfig, err := ExtractConductorConfig(ctx, s.cfg.EnclaveID) - if err != nil { - s.log.Warn("Error extracting conductor configuration", "error", err) - } - - s.displayResults(data, conductorConfig) - - if err := s.writeFiles(data, conductorConfig); err != nil { - return fmt.Errorf("error writing output files: %w", err) - } - - return nil -} - -func (s *InspectService) displayResults(data *InspectData, conductorConfig *ConductorConfig) { - fmt.Println("File Artifacts:") - for _, artifact := range data.FileArtifacts { - fmt.Printf(" %s\n", artifact) - } - - fmt.Println("\nServices:") - for name, svc := range data.UserServices { - fmt.Printf(" %s:\n", name) - for portName, portInfo := range svc.Ports { - host := portInfo.Host - if host == "" { - host = "localhost" - } - fmt.Printf(" %s: %s:%d\n", portName, host, portInfo.Port) - } - } - - if conductorConfig != nil { - fmt.Println("\nConductor Configuration:") - fmt.Println("========================") - - if err := toml.NewEncoder(os.Stdout).Encode(conductorConfig); err != nil { - s.log.Error("Error marshaling conductor config to TOML", "error", err) - } - } -} - -func (s *InspectService) writeFiles(data *InspectData, conductorConfig *ConductorConfig) error { - if s.cfg.ConductorConfigPath != "" { - if conductorConfig == nil { - s.log.Info("No conductor services found, skipping conductor config generation") - } else { - if err := s.writeConductorConfig(s.cfg.ConductorConfigPath, conductorConfig); err != nil { - return fmt.Errorf("error writing conductor config file: %w", err) - } - fmt.Printf("Conductor configuration saved to: %s\n", s.cfg.ConductorConfigPath) - } - } - - if s.cfg.EnvironmentPath != "" { - if err := s.writeEnvironment(s.cfg.EnvironmentPath, data); err != nil { - return fmt.Errorf("error writing environment file: %w", err) - } - fmt.Printf("Environment data saved to: %s\n", s.cfg.EnvironmentPath) - } - - return nil -} - -func (s *InspectService) writeConductorConfig(path string, config *ConductorConfig) error { - out, err := os.Create(path) - if err != nil { - return fmt.Errorf("error creating conductor config file: %w", err) - } - defer out.Close() - - encoder := toml.NewEncoder(out) - if err := encoder.Encode(config); err != nil { - return fmt.Errorf("error encoding conductor config as TOML: %w", err) - } - - return nil -} - -func (s *InspectService) writeEnvironment(path string, data *InspectData) error { - out, err := os.Create(path) - if err != nil { - return fmt.Errorf("error creating environment file: %w", err) - } - defer out.Close() - - enc := json.NewEncoder(out) - enc.SetIndent("", " ") - if err := enc.Encode(data); err != nil { - return fmt.Errorf("error encoding environment: %w", err) - } - - return nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/inspect/service_test.go b/kurtosis-devnet/pkg/kurtosis/sources/inspect/service_test.go deleted file mode 100644 index afa4590388f2c..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/inspect/service_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package inspect - -import ( - "os" - "path/filepath" - "testing" - - "github.com/ethereum/go-ethereum/log" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" -) - -func TestInspectService(t *testing.T) { - cfg := &Config{EnclaveID: "test-enclave"} - service := NewInspectService(cfg, log.New()) - - assert.NotNil(t, service) - assert.Equal(t, cfg, service.cfg) -} - -func TestFileWriting(t *testing.T) { - tempDir := t.TempDir() - - cfg := &Config{ - EnclaveID: "test-enclave", - ConductorConfigPath: filepath.Join(tempDir, "conductor.toml"), - EnvironmentPath: filepath.Join(tempDir, "environment.json"), - } - service := NewInspectService(cfg, log.New()) - - conductorConfig := &ConductorConfig{ - Networks: map[string]*ConductorNetwork{"chain": {Sequencers: []string{"seq"}}}, - Sequencers: map[string]*ConductorSequencer{"seq": {RaftAddr: "127.0.0.1:8001", ConductorRPCURL: "http://127.0.0.1:8002", NodeRPCURL: "http://127.0.0.1:8003", Voting: true}}, - } - - inspectData := &InspectData{ - FileArtifacts: []string{"genesis.json", "jwt.txt"}, - UserServices: ServiceMap{ - "op-node": &Service{ - Labels: map[string]string{"app": "op-node"}, - Ports: PortMap{"rpc": &descriptors.PortInfo{Host: "127.0.0.1", Port: 8545}}, - }, - }, - } - - err := service.writeFiles(inspectData, conductorConfig) - require.NoError(t, err) - - assert.FileExists(t, cfg.ConductorConfigPath) - assert.FileExists(t, cfg.EnvironmentPath) - - content, err := os.ReadFile(cfg.ConductorConfigPath) - require.NoError(t, err) - assert.Contains(t, string(content), "[networks]") - assert.Contains(t, string(content), "[sequencers]") - - envContent, err := os.ReadFile(cfg.EnvironmentPath) - require.NoError(t, err) - assert.Contains(t, string(envContent), "genesis.json") - assert.Contains(t, string(envContent), "op-node") -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/interfaces/interfaces.go b/kurtosis-devnet/pkg/kurtosis/sources/interfaces/interfaces.go deleted file mode 100644 index 269226ed9f7a0..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/interfaces/interfaces.go +++ /dev/null @@ -1,32 +0,0 @@ -package interfaces - -import ( - "context" - "io" - - "github.com/ethereum-optimism/optimism/devnet-sdk/descriptors" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/deployer" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/inspect" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/jwt" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/spec" -) - -type EnclaveSpecifier interface { - EnclaveSpec(io.Reader) (*spec.EnclaveSpec, error) -} - -type EnclaveInspecter interface { - EnclaveInspect(context.Context, string) (*inspect.InspectData, error) -} - -type EnclaveObserver interface { - EnclaveObserve(context.Context, string) (*deployer.DeployerData, error) -} - -type JWTExtractor interface { - ExtractData(context.Context, string) (*jwt.Data, error) -} - -type DepsetExtractor interface { - ExtractData(context.Context, string) (map[string]descriptors.DepSet, error) -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/jwt/cmd/main.go b/kurtosis-devnet/pkg/kurtosis/sources/jwt/cmd/main.go deleted file mode 100644 index fd71974554bd0..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/jwt/cmd/main.go +++ /dev/null @@ -1,54 +0,0 @@ -package main - -import ( - "fmt" - "os" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/kurtosis/sources/jwt" - "github.com/ethereum-optimism/optimism/op-service/cliapp" - "github.com/urfave/cli/v2" -) - -var ( - GitCommit = "" - GitDate = "" -) - -func main() { - app := cli.NewApp() - app.Version = fmt.Sprintf("%s-%s", GitCommit, GitDate) - app.Name = "jwt" - app.Usage = "Tool to extract JWT secrets from Kurtosis enclaves" - app.Flags = cliapp.ProtectFlags([]cli.Flag{ - &cli.StringFlag{ - Name: "enclave", - Usage: "Name of the Kurtosis enclave", - Required: true, - }, - }) - app.Action = runJWT - app.Writer = os.Stdout - app.ErrWriter = os.Stderr - - err := app.Run(os.Args) - if err != nil { - _, _ = fmt.Fprintf(os.Stderr, "Application failed: %v\n", err) - os.Exit(1) - } -} - -func runJWT(ctx *cli.Context) error { - enclave := ctx.String("enclave") - - extractor := jwt.NewExtractor(enclave) - data, err := extractor.ExtractData(ctx.Context) - if err != nil { - return fmt.Errorf("failed to extract JWT data: %w", err) - } - - // Print the JWT secrets - fmt.Printf("L1 JWT Secret: %s\n", data.L1JWT) - fmt.Printf("L2 JWT Secret: %s\n", data.L2JWT) - - return nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/jwt/jwt.go b/kurtosis-devnet/pkg/kurtosis/sources/jwt/jwt.go deleted file mode 100644 index 20e950258fd27..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/jwt/jwt.go +++ /dev/null @@ -1,87 +0,0 @@ -package jwt - -import ( - "bytes" - "context" - "fmt" - "io" - - ktfs "github.com/ethereum-optimism/optimism/devnet-sdk/kt/fs" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/util" -) - -const ( - jwtSecretFileName = "jwtsecret" -) - -// Data holds the JWT secrets for L1 and L2 -type Data struct { - L1JWT string - L2JWT string -} - -// extractor implements the interfaces.JWTExtractor interface -type extractor struct { - enclave string -} - -// NewExtractor creates a new JWT extractor -func NewExtractor(enclave string) *extractor { - return &extractor{ - enclave: enclave, - } -} - -// ExtractData extracts JWT secrets from their respective artifacts -func (e *extractor) ExtractData(ctx context.Context) (*Data, error) { - fs, err := ktfs.NewEnclaveFS(ctx, e.enclave) - if err != nil { - return nil, err - } - - // Get L1 JWT with retry logic - l1JWT, err := util.WithRetry(ctx, "ExtractL1JWT", func() (string, error) { - return extractJWTFromArtifact(ctx, fs, "jwt_file") - }) - if err != nil { - return nil, fmt.Errorf("failed to get L1 JWT: %w", err) - } - - // Get L2 JWT with retry logic - l2JWT, err := util.WithRetry(ctx, "ExtractL2JWT", func() (string, error) { - return extractJWTFromArtifact(ctx, fs, "op_jwt_file") - }) - if err != nil { - return nil, fmt.Errorf("failed to get L2 JWT: %w", err) - } - - return &Data{ - L1JWT: l1JWT, - L2JWT: l2JWT, - }, nil -} - -func extractJWTFromArtifact(ctx context.Context, fs *ktfs.EnclaveFS, artifactName string) (string, error) { - // Get artifact with retry logic - a, err := util.WithRetry(ctx, fmt.Sprintf("GetArtifact(%s)", artifactName), func() (*ktfs.Artifact, error) { - return fs.GetArtifact(ctx, artifactName) - }) - if err != nil { - return "", fmt.Errorf("failed to get artifact: %w", err) - } - - buffer := &bytes.Buffer{} - if err := a.ExtractFiles(ktfs.NewArtifactFileWriter(jwtSecretFileName, buffer)); err != nil { - return "", fmt.Errorf("failed to extract JWT: %w", err) - } - - return parseJWT(buffer) -} - -func parseJWT(r io.Reader) (string, error) { - data, err := io.ReadAll(r) - if err != nil { - return "", fmt.Errorf("failed to read JWT file: %w", err) - } - return string(data), nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/spec/spec.go b/kurtosis-devnet/pkg/kurtosis/sources/spec/spec.go deleted file mode 100644 index f3f94058205c3..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/spec/spec.go +++ /dev/null @@ -1,172 +0,0 @@ -package spec - -import ( - "fmt" - "io" - - "gopkg.in/yaml.v3" -) - -const ( - FeatureInterop = "interop" - FeatureFaucet = "faucet" -) - -// ChainSpec represents the network parameters for a chain -type ChainSpec struct { - Name string - NetworkID string - Nodes map[string]NodeConfig -} - -// NodeConfig represents the configuration for a chain node -type NodeConfig struct { - IsSequencer bool - ELType string - CLType string -} - -type FeatureList []string - -func (fl FeatureList) Contains(feature string) bool { - for _, f := range fl { - if f == feature { - return true - } - } - return false -} - -// EnclaveSpec represents the parsed chain specifications from the YAML -type EnclaveSpec struct { - Chains []*ChainSpec - Features FeatureList -} - -// NetworkParams represents the network parameters section in the YAML -type NetworkParams struct { - Name string `yaml:"name"` - NetworkID string `yaml:"network_id"` -} - -// ChainConfig represents a chain configuration in the YAML -type ChainConfig struct { - NetworkParams NetworkParams `yaml:"network_params"` - Participants map[string]ParticipantConfig `yaml:"participants"` -} - -// NodeConfig represents a node configuration in the YAML -type ParticipantConfig struct { - Sequencer bool `yaml:"sequencer"` - EL ComponentType `yaml:"el"` - CL ComponentType `yaml:"cl"` -} - -// ComponentType represents a component type in the YAML -type ComponentType struct { - Type string `yaml:"type"` -} - -// InteropConfig represents the interop section in the YAML -type SuperchainConfig struct { - Enabled bool `yaml:"enabled"` -} - -// FaucetConfig represents the faucet section in the YAML -type FaucetConfig struct { - Enabled bool `yaml:"enabled"` -} - -// OptimismPackage represents the optimism_package section in the YAML -type OptimismPackage struct { - Faucet FaucetConfig `yaml:"faucet"` - Superchains map[string]SuperchainConfig `yaml:"superchains"` - Chains map[string]ChainConfig `yaml:"chains"` -} - -// YAMLSpec represents the root of the YAML document -type YAMLSpec struct { - OptimismPackage OptimismPackage `yaml:"optimism_package"` -} - -type Spec struct{} - -type SpecOption func(*Spec) - -func NewSpec(opts ...SpecOption) *Spec { - s := &Spec{} - for _, opt := range opts { - opt(s) - } - return s -} - -type featureExtractor func(YAMLSpec, string) bool - -var featuresMap = map[string]featureExtractor{ - FeatureInterop: interopExtractor, - FeatureFaucet: faucetExtractor, -} - -func interopExtractor(yamlSpec YAMLSpec, _feature string) bool { - for _, superchain := range yamlSpec.OptimismPackage.Superchains { - if superchain.Enabled { - return true - } - } - return false -} - -func faucetExtractor(yamlSpec YAMLSpec, _feature string) bool { - return yamlSpec.OptimismPackage.Faucet.Enabled -} - -// ExtractData parses a YAML document and returns the chain specifications -func (s *Spec) ExtractData(r io.Reader) (*EnclaveSpec, error) { - var yamlSpec YAMLSpec - decoder := yaml.NewDecoder(r) - if err := decoder.Decode(&yamlSpec); err != nil { - return nil, fmt.Errorf("failed to decode YAML: %w", err) - } - - var features []string - for feature, extractor := range featuresMap { - if extractor(yamlSpec, feature) { - features = append(features, feature) - } - } - - result := &EnclaveSpec{ - Chains: make([]*ChainSpec, 0, len(yamlSpec.OptimismPackage.Chains)), - Features: features, - } - - // Extract chain specifications - for name, chain := range yamlSpec.OptimismPackage.Chains { - - nodes := make(map[string]NodeConfig, len(chain.Participants)) - for name, participant := range chain.Participants { - elType := participant.EL.Type - clType := participant.CL.Type - if elType == "" { - elType = "op-geth" - } - if clType == "" { - clType = "op-node" - } - nodes[name] = NodeConfig{ - IsSequencer: participant.Sequencer, - ELType: elType, - CLType: clType, - } - } - - result.Chains = append(result.Chains, &ChainSpec{ - Name: name, - NetworkID: chain.NetworkParams.NetworkID, - Nodes: nodes, - }) - } - - return result, nil -} diff --git a/kurtosis-devnet/pkg/kurtosis/sources/spec/spec_test.go b/kurtosis-devnet/pkg/kurtosis/sources/spec/spec_test.go deleted file mode 100644 index 6988034d96dcc..0000000000000 --- a/kurtosis-devnet/pkg/kurtosis/sources/spec/spec_test.go +++ /dev/null @@ -1,113 +0,0 @@ -package spec - -import ( - "sort" - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestParseSpec(t *testing.T) { - yamlContent := ` -optimism_package: - chains: - op-rollup-one: - participants: - node0: - el: - type: op-geth - network_params: - network_id: "3151909" - blockscout_params: - enabled: true - op-rollup-two: - participants: - node0: - el: - type: op-geth - network_params: - network_id: "3151910" -ethereum_package: - participants: - - el_type: geth - - el_type: reth - network_params: - preset: minimal - genesis_delay: 5 -` - - result, err := NewSpec().ExtractData(strings.NewReader(yamlContent)) - require.NoError(t, err) - - expectedChains := []ChainSpec{ - { - Name: "op-rollup-one", - NetworkID: "3151909", - }, - { - Name: "op-rollup-two", - NetworkID: "3151910", - }, - } - - require.Len(t, result.Chains, len(expectedChains)) - sort.Slice(result.Chains, func(i, j int) bool { - return result.Chains[i].Name < result.Chains[j].Name - }) - - for i, expected := range expectedChains { - actual := result.Chains[i] - require.Equal(t, expected.Name, actual.Name, "Chain %d: name mismatch", i) - require.Equal(t, expected.NetworkID, actual.NetworkID, "Chain %d: network ID mismatch", i) - } -} - -func TestParseSpecErrors(t *testing.T) { - tests := []struct { - name string - yaml string - wantErr bool - }{ - { - name: "empty yaml", - yaml: "", - wantErr: true, - }, - { - name: "invalid yaml", - yaml: "invalid: [yaml: content", - wantErr: true, - }, - { - name: "missing network params", - yaml: ` -optimism_package: - chains: - op-kurtosis: - participants: - node0: - el: - type: op-geth - blockscout_params: - enabled: true`, - }, - { - name: "missing chains", - yaml: ` -optimism_package: - other_field: value`, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - _, err := NewSpec().ExtractData(strings.NewReader(tt.yaml)) - if tt.wantErr { - require.Error(t, err) - } else { - require.NoError(t, err) - } - }) - } -} diff --git a/kurtosis-devnet/pkg/tmpl/cmd/main.go b/kurtosis-devnet/pkg/tmpl/cmd/main.go deleted file mode 100644 index a64d08bc7f773..0000000000000 --- a/kurtosis-devnet/pkg/tmpl/cmd/main.go +++ /dev/null @@ -1,69 +0,0 @@ -package main - -import ( - "encoding/json" - "flag" - "fmt" - "os" - "strings" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/tmpl" - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/tmpl/fake" -) - -func main() { - // Parse command line flags - templateFile := flag.String("template", "", "Path to template file") - dataFile := flag.String("data", "", "Optional JSON data file") - flag.Parse() - - if *templateFile == "" { - fmt.Fprintln(os.Stderr, "Error: --template flag is required") - flag.Usage() - os.Exit(1) - } - - // Open template file - f, err := os.Open(*templateFile) - if err != nil { - fmt.Fprintf(os.Stderr, "Error opening template file: %v\n", err) - os.Exit(1) - } - defer f.Close() - - // Get basename of template file without extension - base := *templateFile - if lastSlash := strings.LastIndex(base, "/"); lastSlash >= 0 { - base = base[lastSlash+1:] - } - if lastDot := strings.LastIndex(base, "."); lastDot >= 0 { - base = base[:lastDot] - } - enclave := base + "-devnet" - - // Create template context - ctx := fake.NewFakeTemplateContext(enclave) - - // Load data file if provided - if *dataFile != "" { - dataBytes, err := os.ReadFile(*dataFile) - if err != nil { - fmt.Fprintf(os.Stderr, "Error reading data file: %v\n", err) - os.Exit(1) - } - - var data interface{} - if err := json.Unmarshal(dataBytes, &data); err != nil { - fmt.Fprintf(os.Stderr, "Error parsing data file as JSON: %v\n", err) - os.Exit(1) - } - - tmpl.WithData(data)(ctx) - } - - // Process template and write to stdout - if err := ctx.InstantiateTemplate(f, os.Stdout); err != nil { - fmt.Fprintf(os.Stderr, "Error processing template: %v\n", err) - os.Exit(1) - } -} diff --git a/kurtosis-devnet/pkg/tmpl/fake/fake.go b/kurtosis-devnet/pkg/tmpl/fake/fake.go deleted file mode 100644 index 3ad9c36ebfe8c..0000000000000 --- a/kurtosis-devnet/pkg/tmpl/fake/fake.go +++ /dev/null @@ -1,31 +0,0 @@ -package fake - -import ( - "fmt" - - "github.com/ethereum-optimism/optimism/kurtosis-devnet/pkg/tmpl" -) - -type PrestateInfo struct { - URL string `json:"url"` - Hashes map[string]string `json:"hashes"` -} - -func NewFakeTemplateContext(enclave string) *tmpl.TemplateContext { - return tmpl.NewTemplateContext( - tmpl.WithFunction("localDockerImage", func(image string) (string, error) { - return fmt.Sprintf("%s:%s", image, enclave), nil - }), - tmpl.WithFunction("localContractArtifacts", func(layer string) (string, error) { - return fmt.Sprintf("http://host.docker.internal:0/contracts-bundle-%s.tar.gz", enclave), nil - }), - tmpl.WithFunction("localPrestate", func() (*PrestateInfo, error) { - return &PrestateInfo{ - URL: "http://fileserver/proofs/op-program/cannon", - Hashes: map[string]string{ - "prestate": "0x1234567890", - }, - }, nil - }), - ) -} diff --git a/kurtosis-devnet/pkg/tmpl/tmpl.go b/kurtosis-devnet/pkg/tmpl/tmpl.go deleted file mode 100644 index ab931063f9796..0000000000000 --- a/kurtosis-devnet/pkg/tmpl/tmpl.go +++ /dev/null @@ -1,189 +0,0 @@ -package tmpl - -import ( - "bytes" - "encoding/json" - "fmt" - "io" - "os" - "path/filepath" - "text/template" - - sprig "github.com/go-task/slim-sprig/v3" - "gopkg.in/yaml.v3" -) - -// TemplateFunc represents a function that can be used in templates -type TemplateFunc any - -// TemplateContext contains data and functions to be passed to templates -type TemplateContext struct { - baseDir string - Data interface{} - Functions map[string]TemplateFunc - includeStack []string // Track stack of included files to detect circular includes -} - -type TemplateContextOptions func(*TemplateContext) - -func WithBaseDir(basedir string) TemplateContextOptions { - return func(ctx *TemplateContext) { - ctx.baseDir = basedir - } -} - -func WithFunction(name string, fn TemplateFunc) TemplateContextOptions { - return func(ctx *TemplateContext) { - ctx.Functions[name] = fn - } -} - -func WithData(data interface{}) TemplateContextOptions { - return func(ctx *TemplateContext) { - ctx.Data = data - } -} - -// NewTemplateContext creates a new TemplateContext with default functions -func NewTemplateContext(opts ...TemplateContextOptions) *TemplateContext { - ctx := &TemplateContext{ - baseDir: ".", - Functions: make(map[string]TemplateFunc), - includeStack: make([]string, 0), - } - - for _, opt := range opts { - opt(ctx) - } - - return ctx -} - -// includeFile reads and processes a template file relative to the given context's baseDir, -// parses the content as YAML, and returns its JSON representation. -// We use JSON because it can be inlined without worrying about indentation, while remaining valid YAML. -// Note: to protect against infinite recursion, we check for circular includes. -func (ctx *TemplateContext) includeFile(fname string, data ...interface{}) (string, error) { - // Resolve the file path relative to baseDir - path := filepath.Join(ctx.baseDir, fname) - - // Check for circular includes - absPath, err := filepath.Abs(path) - if err != nil { - return "", fmt.Errorf("error resolving absolute path: %w", err) - } - for _, includedFile := range ctx.includeStack { - if includedFile == absPath { - return "", fmt.Errorf("circular include detected for file %s", fname) - } - } - - // Read the included file - file, err := os.Open(path) - if err != nil { - return "", fmt.Errorf("error opening include file: %w", err) - } - defer file.Close() - - // Create buffer for output - var buf bytes.Buffer - - var tplData interface{} - switch len(data) { - case 0: - tplData = nil - case 1: - tplData = data[0] - default: - return "", fmt.Errorf("invalid number of arguments for includeFile: %d", len(data)) - } - - // Create new context with updated baseDir and include stack - includeCtx := &TemplateContext{ - baseDir: filepath.Dir(path), - Data: tplData, - Functions: ctx.Functions, - includeStack: append(append([]string{}, ctx.includeStack...), absPath), - } - - // Process the included template - if err := includeCtx.InstantiateTemplate(file, &buf); err != nil { - return "", fmt.Errorf("error processing include file: %w", err) - } - - // Parse the buffer content as YAML - var yamlData interface{} - if err := yaml.Unmarshal(buf.Bytes(), &yamlData); err != nil { - return "", fmt.Errorf("error parsing YAML: %w", err) - } - - // Convert to JSON - jsonBytes, err := json.Marshal(yamlData) - if err != nil { - return "", fmt.Errorf("error converting to JSON: %w", err) - } - - return string(jsonBytes), nil -} - -// InstantiateTemplate reads a template from the reader, executes it with the context, -// and writes the result to the writer -func (ctx *TemplateContext) InstantiateTemplate(reader io.Reader, writer io.Writer) error { - // Read template content - templateBytes, err := io.ReadAll(reader) - if err != nil { - return fmt.Errorf("failed to read template: %w", err) - } - - // Convert TemplateFunc map to FuncMap - funcMap := template.FuncMap{ - "include": ctx.includeFile, - } - for name, fn := range ctx.Functions { - funcMap[name] = fn - } - - // Create template with helper functions and option to error on missing fields - tmpl := template.New("template"). - Funcs(sprig.TxtFuncMap()). - Funcs(funcMap). - Option("missingkey=error") - - // Parse template - tmpl, err = tmpl.Parse(string(templateBytes)) - if err != nil { - return fmt.Errorf("failed to parse template: %w", err) - } - - // Execute template into a buffer - var buf bytes.Buffer - if err := tmpl.Execute(&buf, ctx.Data); err != nil { - return fmt.Errorf("failed to execute template: %w", err) - } - - // If this is the top-level rendering, we want to write the output as "pretty" YAML - if len(ctx.includeStack) == 0 { - var yamlData interface{} - // Parse the buffer content as YAML - if err := yaml.Unmarshal(buf.Bytes(), &yamlData); err != nil { - return fmt.Errorf("error parsing template output as YAML: %w. Template output: %s", err, buf.String()) - } - - // Create YAML encoder with default indentation - encoder := yaml.NewEncoder(writer) - encoder.SetIndent(2) - - // Write the YAML document - if err := encoder.Encode(yamlData); err != nil { - return fmt.Errorf("error writing YAML output: %w", err) - } - - } else { - // Otherwise, just write the buffer content to the writer - if _, err := buf.WriteTo(writer); err != nil { - return fmt.Errorf("failed to write template output: %w", err) - } - } - - return nil -} diff --git a/kurtosis-devnet/pkg/tmpl/tmpl_test.go b/kurtosis-devnet/pkg/tmpl/tmpl_test.go deleted file mode 100644 index 71c8a6985726d..0000000000000 --- a/kurtosis-devnet/pkg/tmpl/tmpl_test.go +++ /dev/null @@ -1,110 +0,0 @@ -package tmpl - -import ( - "bytes" - "strings" - "testing" - - "github.com/stretchr/testify/require" -) - -func TestNewTemplateContext(t *testing.T) { - t.Run("creates empty context", func(t *testing.T) { - ctx := NewTemplateContext() - require.Nil(t, ctx.Data, "expected nil Data in new context") - require.Empty(t, ctx.Functions, "expected empty Functions map in new context") - }) - - t.Run("adds data with WithData option", func(t *testing.T) { - data := map[string]string{"key": "value"} - ctx := NewTemplateContext(WithData(data)) - require.NotNil(t, ctx.Data, "expected non-nil Data in context") - d, ok := ctx.Data.(map[string]string) - require.True(t, ok) - require.Equal(t, "value", d["key"]) - }) - - t.Run("adds function with WithFunction option", func(t *testing.T) { - fn := func(s string) (string, error) { return s + "test", nil } - ctx := NewTemplateContext(WithFunction("testfn", fn)) - require.Len(t, ctx.Functions, 1, "expected one function in context") - _, ok := ctx.Functions["testfn"] - require.True(t, ok, "function not added with correct name") - }) -} - -func TestInstantiateTemplate(t *testing.T) { - t.Run("simple template substitution", func(t *testing.T) { - data := map[string]string{"name": "world"} - ctx := NewTemplateContext(WithData(data)) - - input := strings.NewReader("Hello {{.name}}!") - var output bytes.Buffer - - err := ctx.InstantiateTemplate(input, &output) - require.NoError(t, err) - - expected := "Hello world!\n" - require.Equal(t, expected, output.String()) - }) - - t.Run("template with custom function", func(t *testing.T) { - upper := func(s string) (string, error) { return strings.ToUpper(s), nil } - ctx := NewTemplateContext( - WithData(map[string]string{"name": "world"}), - WithFunction("upper", upper), - ) - - input := strings.NewReader("Hello {{upper .name}}!") - var output bytes.Buffer - - err := ctx.InstantiateTemplate(input, &output) - require.NoError(t, err) - - expected := "Hello WORLD!\n" - require.Equal(t, expected, output.String()) - }) - - t.Run("invalid template syntax", func(t *testing.T) { - ctx := NewTemplateContext() - input := strings.NewReader("Hello {{.name") - var output bytes.Buffer - - err := ctx.InstantiateTemplate(input, &output) - require.Error(t, err, "expected error for invalid template syntax") - }) - - t.Run("missing data field", func(t *testing.T) { - ctx := NewTemplateContext() - input := strings.NewReader("Hello {{.name}}!") - var output bytes.Buffer - - err := ctx.InstantiateTemplate(input, &output) - require.Error(t, err, "expected error for missing data field") - }) - - t.Run("multiple functions and data fields", func(t *testing.T) { - upper := func(s string) (string, error) { return strings.ToUpper(s), nil } - lower := func(s string) (string, error) { return strings.ToLower(s), nil } - - data := map[string]string{ - "greeting": "Hello", - "name": "World", - } - - ctx := NewTemplateContext( - WithData(data), - WithFunction("upper", upper), - WithFunction("lower", lower), - ) - - input := strings.NewReader("{{upper .greeting}} {{lower .name}}!") - var output bytes.Buffer - - err := ctx.InstantiateTemplate(input, &output) - require.NoError(t, err) - - expected := "HELLO world!\n" - require.Equal(t, expected, output.String()) - }) -} diff --git a/kurtosis-devnet/pkg/types/autofix.go b/kurtosis-devnet/pkg/types/autofix.go deleted file mode 100644 index 80c01b52df97a..0000000000000 --- a/kurtosis-devnet/pkg/types/autofix.go +++ /dev/null @@ -1,9 +0,0 @@ -package types - -type AutofixMode string - -const ( - AutofixModeDisabled AutofixMode = "disabled" - AutofixModeNormal AutofixMode = "normal" - AutofixModeNuke AutofixMode = "nuke" -) diff --git a/kurtosis-devnet/pkg/util/docker.go b/kurtosis-devnet/pkg/util/docker.go deleted file mode 100644 index b7a322a043814..0000000000000 --- a/kurtosis-devnet/pkg/util/docker.go +++ /dev/null @@ -1,773 +0,0 @@ -package util - -import ( - "context" - "fmt" - "net/http" - "os" - "path/filepath" - "strings" - "time" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/filters" - "github.com/docker/docker/api/types/network" - "github.com/docker/docker/api/types/volume" - "github.com/docker/docker/client" - "github.com/docker/go-connections/nat" - "github.com/ethereum/go-ethereum/log" - "github.com/ethereum/go-ethereum/rpc" - - opClient "github.com/ethereum-optimism/optimism/op-service/client" - "github.com/ethereum-optimism/optimism/op-service/eth" - "github.com/ethereum-optimism/optimism/op-service/sources" -) - -type TraefikHostTransport struct { - base http.RoundTripper - host string -} - -func (t *TraefikHostTransport) RoundTrip(req *http.Request) (*http.Response, error) { - newReq := req.Clone(req.Context()) - newReq.Host = t.host - newReq.Header.Set("Host", t.host) - return t.base.RoundTrip(newReq) -} - -type ServiceWithoutLabels struct { - Name string - ServiceUUID string - EnclaveUUID string - Ports []ServicePort -} - -type ServicePort struct { - Name string - Port int -} - -type RPCEndpoint struct { - Name string - Port int - UUID string - EnclaveUUID string -} - -// NewDockerClient creates a new Docker client and checks if Docker is available -func NewDockerClient() (*client.Client, error) { - apiClient, err := client.NewClientWithOpts(client.FromEnv) - if err != nil { - return nil, fmt.Errorf("failed to create docker client: %w", err) - } - - _, err = apiClient.Ping(context.Background()) - if err != nil { - return nil, fmt.Errorf("failed to connect to Docker: %w", err) - } - - return apiClient, nil -} - -// createKurtosisFilter creates a filter for kurtosis resources -func createKurtosisFilter(enclave ...string) filters.Args { - kurtosisFilter := filters.NewArgs() - if len(enclave) > 0 { - kurtosisFilter.Add("label", fmt.Sprintf("kurtosis.devnet.enclave=%s", enclave[0])) - } else { - kurtosisFilter.Add("label", "kurtosis.devnet.enclave") - } - return kurtosisFilter -} - -// destroyContainers stops and removes containers matching the filter -func destroyContainers(ctx context.Context, apiClient *client.Client, filter filters.Args) error { - containers, err := apiClient.ContainerList(ctx, container.ListOptions{ - All: true, - Filters: filter, - }) - if err != nil { - return fmt.Errorf("failed to list containers: %w", err) - } - - for _, cont := range containers { - if cont.State == "running" { - timeoutSecs := int(10) - if err := apiClient.ContainerStop(ctx, cont.ID, container.StopOptions{ - Timeout: &timeoutSecs, - }); err != nil { - return fmt.Errorf("failed to stop container %s: %w", cont.ID, err) - } - } - - if err := apiClient.ContainerRemove(ctx, cont.ID, container.RemoveOptions{ - RemoveVolumes: true, - Force: true, - }); err != nil { - return fmt.Errorf("failed to remove container %s: %w", cont.ID, err) - } - } - return nil -} - -// destroyVolumes removes volumes matching the filter -func destroyVolumes(ctx context.Context, apiClient *client.Client, filter filters.Args) error { - volumes, err := apiClient.VolumeList(ctx, volume.ListOptions{ - Filters: filter, - }) - if err != nil { - return fmt.Errorf("failed to list volumes: %w", err) - } - - for _, volume := range volumes.Volumes { - if err := apiClient.VolumeRemove(ctx, volume.Name, true); err != nil { - return fmt.Errorf("failed to remove volume %s: %w", volume.Name, err) - } - } - return nil -} - -// destroyNetworks removes networks matching the filter -func destroyNetworks(ctx context.Context, apiClient *client.Client, enclaveName string) error { - networks, err := apiClient.NetworkList(ctx, network.ListOptions{}) - if err != nil { - return fmt.Errorf("failed to list networks: %w", err) - } - - for _, network := range networks { - if (enclaveName != "" && strings.HasPrefix(network.Name, fmt.Sprintf("kt-%s-devnet", enclaveName))) || - (enclaveName == "" && strings.Contains(network.Name, "kt-")) { - if err := apiClient.NetworkRemove(ctx, network.ID); err != nil { - return fmt.Errorf("failed to remove network: %w", err) - } - } - } - return nil -} - -// DestroyDockerResources removes all Docker resources associated with the given enclave -func DestroyDockerResources(ctx context.Context, enclave ...string) error { - apiClient, err := NewDockerClient() - if err != nil { - return err - } - - enclaveName := "" - if len(enclave) > 0 { - enclaveName = enclave[0] - } - fmt.Printf("Destroying docker resources for enclave: %s\n", enclaveName) - - filter := createKurtosisFilter(enclave...) - - if err := destroyContainers(ctx, apiClient, filter); err != nil { - fmt.Printf("failed to destroy containers: %v", err) - } - - if err := destroyVolumes(ctx, apiClient, filter); err != nil { - fmt.Printf("failed to destroy volumes: %v", err) - } - - if err := destroyNetworks(ctx, apiClient, enclaveName); err != nil { - fmt.Printf("failed to destroy networks: %v", err) - } - - return nil -} - -func findRPCEndpoints(ctx context.Context, apiClient *client.Client) ([]RPCEndpoint, error) { - userFilters := filters.NewArgs() - userFilters.Add("label", "com.kurtosistech.container-type=user-service") - - containers, err := apiClient.ContainerList(ctx, container.ListOptions{ - All: false, - Filters: userFilters, - }) - if err != nil { - return nil, fmt.Errorf("failed to list containers: %w", err) - } - - var endpoints []RPCEndpoint - seen := make(map[string]bool) - - for _, c := range containers { - serviceName := strings.TrimPrefix(c.Names[0], "/") - serviceUUID := c.Labels["com.kurtosistech.guid"] - enclaveUUID := c.Labels["com.kurtosistech.enclave-id"] - - for _, port := range c.Ports { - if port.PrivatePort == 8545 && port.PublicPort != 0 { - key := fmt.Sprintf("%s-%s", serviceName, serviceUUID) - if !seen[key] { - seen[key] = true - endpoints = append(endpoints, RPCEndpoint{ - Name: serviceName, - Port: 8545, - UUID: serviceUUID, - EnclaveUUID: enclaveUUID, - }) - } - } - } - } - - return endpoints, nil -} - -func testRPCEndpoint(endpoint RPCEndpoint) error { - shortUUID := shortenedUUIDString(endpoint.UUID) - shortEnclaveUUID := shortenedUUIDString(endpoint.EnclaveUUID) - - hostHeader := fmt.Sprintf("%d-%s-%s", endpoint.Port, shortUUID, shortEnclaveUUID) - httpClient := &http.Client{ - Timeout: 10 * time.Second, - Transport: &TraefikHostTransport{ - base: http.DefaultTransport, - host: hostHeader, - }, - } - - rpcClient, err := rpc.DialOptions(context.Background(), "http://127.0.0.1:9730", rpc.WithHTTPClient(httpClient)) - if err != nil { - return fmt.Errorf("failed to create RPC client: %w", err) - } - defer rpcClient.Close() - - ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) - defer cancel() - - if strings.Contains(endpoint.Name, "supervisor") { - return testSupervisor(ctx, rpcClient) - } - if strings.Contains(endpoint.Name, "test-sequencer") { - // TODO: No public or unauthenticated health/status API exists for test-sequencer yet. - // Admin API is still in progress — skip readiness check until it's available. - return nil - } - return testEthNode(ctx, rpcClient) -} - -func testEthNode(ctx context.Context, rpcClient *rpc.Client) error { - baseClient := opClient.NewBaseRPCClient(rpcClient) - - ethConfig := &sources.EthClientConfig{ - MaxRequestsPerBatch: 1, - MaxConcurrentRequests: 1, - ReceiptsCacheSize: 1, - TransactionsCacheSize: 1, - HeadersCacheSize: 1, - PayloadsCacheSize: 1, - BlockRefsCacheSize: 1, - TrustRPC: true, - MustBePostMerge: false, - RPCProviderKind: sources.RPCKindStandard, - MethodResetDuration: time.Minute, - } - - ethClient, err := sources.NewEthClient(baseClient, log.Root(), nil, ethConfig) - if err != nil { - return fmt.Errorf("failed to create EthClient: %w", err) - } - - blockRef, err := ethClient.BlockRefByLabel(ctx, eth.Unsafe) - if err != nil { - return fmt.Errorf("failed to get latest block: %w", err) - } - - if blockRef.Number == 0 && blockRef.Hash.String() == "0x0000000000000000000000000000000000000000000000000000000000000000" { - return fmt.Errorf("received invalid block reference") - } - - blockInfo, err := ethClient.InfoByNumber(ctx, blockRef.Number) - if err != nil { - return fmt.Errorf("failed to get block info by number: %w", err) - } - - if blockInfo.Hash() != blockRef.Hash { - return fmt.Errorf("block hash mismatch: expected %s, got %s", blockRef.Hash, blockInfo.Hash()) - } - - return nil -} - -func testSupervisor(ctx context.Context, rpcClient *rpc.Client) error { - var syncStatus interface{} - err := rpcClient.CallContext(ctx, &syncStatus, "supervisor_syncStatus") - if err != nil { - return fmt.Errorf("failed to call supervisor_syncStatus: %w", err) - } - - if syncStatus == nil { - return fmt.Errorf("supervisor_syncStatus returned nil") - } - - return nil -} - -// SetReverseProxyConfig recreates the Traefik container with correct configuration for service routing -func SetReverseProxyConfig(ctx context.Context) error { - apiClient, err := NewDockerClient() - if err != nil { - return fmt.Errorf("failed to create Docker client: %w", err) - } - - fmt.Printf("Fixing Traefik routing by recreating container\n") - - traefikFilters := filters.NewArgs() - traefikFilters.Add("name", "kurtosis-reverse-proxy") - - traefikContainers, err := apiClient.ContainerList(ctx, container.ListOptions{ - All: false, - Filters: traefikFilters, - }) - if err != nil { - return fmt.Errorf("failed to list containers: %w", err) - } - - var traefikContainer *types.Container - for _, c := range traefikContainers { - for _, name := range c.Names { - if strings.Contains(name, "kurtosis-reverse-proxy") { - traefikContainer = &c - break - } - } - if traefikContainer != nil { - break - } - } - - if traefikContainer == nil { - return fmt.Errorf("traefik container (kurtosis-reverse-proxy) not found, recreate it by restarting kurtosis (kurtosis engine restart)") - } - - fmt.Printf("Found Traefik container: %s\n", traefikContainer.ID[:12]) - - containerInfo, err := apiClient.ContainerInspect(ctx, traefikContainer.ID) - if err != nil { - return fmt.Errorf("failed to inspect container: %w", err) - } - containerName := strings.TrimPrefix(containerInfo.Name, "/") - containerImage := containerInfo.Config.Image - var portBindings []string - for containerPort, hostBindings := range containerInfo.HostConfig.PortBindings { - for _, binding := range hostBindings { - portBindings = append(portBindings, fmt.Sprintf("%s:%s", binding.HostPort, containerPort)) - } - } - var networks []string - for networkName := range containerInfo.NetworkSettings.Networks { - if networkName != "bridge" { - networks = append(networks, networkName) - } - } - var correctNetworkID string - for networkName, network := range containerInfo.NetworkSettings.Networks { - if networkName != "bridge" && strings.Contains(networkName, "kt-") { - correctNetworkID = network.NetworkID - break - } - } - - if correctNetworkID == "" { - return fmt.Errorf("traefik container is not connected to any kurtosis networks") - } - - tempDir, err := createTempConfigDir(ctx) - if err != nil { - return fmt.Errorf("failed to create temp config directory: %w", err) - } - defer func() { - if err := removeTempDir(tempDir); err != nil { - fmt.Printf("Warning: Failed to clean up temp directory %s: %v\n", tempDir, err) - } - }() - - fmt.Printf("Stopping current Traefik container\n") - timeout := int(10) - if err := apiClient.ContainerStop(ctx, traefikContainer.ID, container.StopOptions{ - Timeout: &timeout, - }); err != nil { - return fmt.Errorf("failed to stop container: %w", err) - } - - if err := apiClient.ContainerRemove(ctx, traefikContainer.ID, container.RemoveOptions{ - RemoveVolumes: true, - Force: true, - }); err != nil { - return fmt.Errorf("failed to remove container: %w", err) - } - - newContainer, err := recreateTraefikContainer(ctx, apiClient, containerName, containerImage, portBindings, tempDir, containerInfo.Config.Labels) - if err != nil { - return fmt.Errorf("failed to recreate container: %w", err) - } - - fmt.Printf("Created new Traefik container: %s\n", newContainer.ID[:12]) - for _, networkName := range networks { - if err := apiClient.NetworkConnect(ctx, networkName, newContainer.ID, nil); err != nil { - return fmt.Errorf("failed to connect to network %s: %w", networkName, err) - } - } - - if err := waitForContainerRunning(ctx, apiClient, newContainer.ID, 30*time.Second); err != nil { - return fmt.Errorf("container failed to start within timeout: %w", err) - } - - if err := waitForTraefikReady(ctx, 30*time.Second); err != nil { - fmt.Printf("Warning: Traefik API not ready within timeout: %v\n", err) - } - - if err := TestRPCEndpoints(ctx, apiClient); err != nil { - fmt.Printf("RPC access test failed: %v\n", err) - } - - fmt.Printf("Traefik routing fix completed successfully\n") - - return nil -} - -// waitForContainerRunning polls the container status until it's running or timeout -func waitForContainerRunning(ctx context.Context, apiClient *client.Client, containerID string, timeout time.Duration) error { - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - ticker := time.NewTicker(500 * time.Millisecond) - defer ticker.Stop() - - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timeout waiting for container to start") - case <-ticker.C: - containerInfo, err := apiClient.ContainerInspect(ctx, containerID) - if err != nil { - return fmt.Errorf("failed to inspect container: %w", err) - } - if containerInfo.State.Running { - return nil - } - if containerInfo.State.Status == "exited" { - return fmt.Errorf("container exited unexpectedly: %s", containerInfo.State.Error) - } - } - } -} - -// waitForTraefikReady waits for Traefik API to be accessible -func waitForTraefikReady(ctx context.Context, timeout time.Duration) error { - ctx, cancel := context.WithTimeout(ctx, timeout) - defer cancel() - - ticker := time.NewTicker(500 * time.Millisecond) - defer ticker.Stop() - - client := &http.Client{ - Timeout: 2 * time.Second, - } - - for { - select { - case <-ctx.Done(): - return fmt.Errorf("timeout waiting for Traefik API to be ready") - case <-ticker.C: - resp, err := client.Get("http://127.0.0.1:9731/api/rawdata") - if err == nil && resp.StatusCode == http.StatusOK { - resp.Body.Close() - return nil - } - if resp != nil { - resp.Body.Close() - } - } - } -} - -func createTempConfigDir(ctx context.Context) (string, error) { - tempDir, err := os.MkdirTemp("", "traefik-fix-*") - if err != nil { - return "", fmt.Errorf("failed to create temp directory: %w", err) - } - - apiClient, err := NewDockerClient() - if err != nil { - os.RemoveAll(tempDir) - return "", fmt.Errorf("failed to create Docker client: %w", err) - } - defer apiClient.Close() - - networks, err := apiClient.NetworkList(ctx, network.ListOptions{}) - if err != nil { - os.RemoveAll(tempDir) - return "", fmt.Errorf("failed to list networks: %w", err) - } - - var traefikConfig strings.Builder - traefikConfig.WriteString(`# Traefik configuration with corrected network ID -api: - dashboard: true - insecure: true - -entryPoints: - web: - address: ":9730" - traefik: - address: ":9731" - -providers: -`) - - for _, net := range networks { - // Only include Kurtosis networks (skip bridge, host, none) - switch net.Name { - case "bridge", "host", "none": - continue - } - traefikConfig.WriteString(fmt.Sprintf(` docker: - endpoint: "unix:///var/run/docker.sock" - network: "%s" - exposedByDefault: true -`, net.ID)) - break - } - - traefikConfig.WriteString(` file: - directory: /etc/traefik/dynamic - watch: true - -log: - level: INFO -`) - configPath := filepath.Join(tempDir, "traefik.yml") - if err := os.WriteFile(configPath, []byte(traefikConfig.String()), 0644); err != nil { - os.RemoveAll(tempDir) - return "", fmt.Errorf("failed to write traefik config: %w", err) - } - - dynamicDir := filepath.Join(tempDir, "dynamic") - if err := os.MkdirAll(dynamicDir, 0755); err != nil { - os.RemoveAll(tempDir) - return "", fmt.Errorf("failed to create dynamic directory: %w", err) - } - - servicesWithoutLabels, err := discoverServicesWithoutTraefikLabels(ctx, apiClient) - if err != nil { - fmt.Printf("⚠️ Warning: Failed to discover services without Traefik labels: %v\n", err) - servicesWithoutLabels = []ServiceWithoutLabels{} - } - var dynamicConfig strings.Builder - dynamicConfig.WriteString("# Dynamic Traefik configuration for services without Traefik labels\n") - dynamicConfig.WriteString("# Generated by SetReverseProxyConfig - do not edit manually\n") - dynamicConfig.WriteString("http:\n") - dynamicConfig.WriteString(" routers:\n") - - for _, service := range servicesWithoutLabels { - addServiceRouters(&dynamicConfig, service) - } - dynamicConfig.WriteString(" services:\n") - for _, service := range servicesWithoutLabels { - addServiceServices(&dynamicConfig, service) - } - dynamicPath := filepath.Join(dynamicDir, "l1-routing.yml") - if err := os.WriteFile(dynamicPath, []byte(dynamicConfig.String()), 0644); err != nil { - os.RemoveAll(tempDir) - return "", fmt.Errorf("failed to write dynamic config: %w", err) - } - fmt.Printf("Created dynamic routing rules for %d services\n", len(servicesWithoutLabels)) - return tempDir, nil -} - -func removeTempDir(tempDir string) error { - return os.RemoveAll(tempDir) -} - -func recreateTraefikContainer(ctx context.Context, apiClient *client.Client, containerName, containerImage string, portBindings []string, tempDir string, labels map[string]string) (*container.CreateResponse, error) { - exposedPorts := make(nat.PortSet) - portBindingsMap := make(nat.PortMap) - - for _, binding := range portBindings { - parts := strings.Split(binding, ":") - if len(parts) == 2 { - hostPort := parts[0] - containerPortStr := parts[1] - - if strings.Contains(containerPortStr, "/") { - containerPortStr = strings.Split(containerPortStr, "/")[0] - } - - containerPort, err := nat.NewPort("tcp", containerPortStr) - if err != nil { - return nil, fmt.Errorf("invalid container port %s: %w", containerPortStr, err) - } - exposedPorts[containerPort] = struct{}{} - var portBindings []nat.PortBinding - portBindings = append(portBindings, nat.PortBinding{ - HostIP: "0.0.0.0", - HostPort: hostPort, - }) - portBindingsMap[containerPort] = portBindings - } - } - - mounts := []string{ - fmt.Sprintf("%s/traefik.yml:/etc/traefik/traefik.yml:ro", tempDir), - fmt.Sprintf("%s/dynamic:/etc/traefik/dynamic:ro", tempDir), - "/var/run/docker.sock:/var/run/docker.sock:ro", - } - - config := &container.Config{ - Image: containerImage, - ExposedPorts: exposedPorts, - Labels: labels, - } - hostConfig := &container.HostConfig{ - Binds: mounts, - PortBindings: portBindingsMap, - } - resp, err := apiClient.ContainerCreate(ctx, config, hostConfig, nil, nil, containerName) - if err != nil { - return nil, fmt.Errorf("failed to create container: %w", err) - } - if err := apiClient.ContainerStart(ctx, resp.ID, container.StartOptions{}); err != nil { - return nil, fmt.Errorf("failed to start container: %w", err) - } - - return &resp, nil -} - -func TestRPCEndpoints(ctx context.Context, apiClient *client.Client) error { - endpoints, err := findRPCEndpoints(ctx, apiClient) - if err != nil { - return fmt.Errorf("failed to find RPC endpoints: %w", err) - } - - if len(endpoints) == 0 { - fmt.Printf("No RPC endpoints found\n") - return nil - } - - fmt.Printf("Found %d RPC endpoint(s)\n", len(endpoints)) - - var lastError error - successCount := 0 - for _, endpoint := range endpoints { - fmt.Printf("Testing %s", endpoint.Name) - if err := testRPCEndpoint(endpoint); err != nil { - fmt.Printf(" - Failed: %v\n", err) - lastError = err - } else { - fmt.Printf(" - Success\n") - successCount++ - } - } - - if successCount == 0 { - return fmt.Errorf("all RPC endpoints failed, last error: %w", lastError) - } - - fmt.Printf("RPC access test passed (%d/%d endpoints working)\n", successCount, len(endpoints)) - return nil -} - -// shortenedUUIDString returns the first 12 characters of a UUID -func shortenedUUIDString(fullUUID string) string { - lengthToTrim := 12 - if lengthToTrim > len(fullUUID) { - lengthToTrim = len(fullUUID) - } - return fullUUID[:lengthToTrim] -} - -// discoverServicesWithoutTraefikLabels discovers services that need Traefik routing rules -func discoverServicesWithoutTraefikLabels(ctx context.Context, apiClient *client.Client) ([]ServiceWithoutLabels, error) { - userFilters := filters.NewArgs() - userFilters.Add("label", "com.kurtosistech.container-type=user-service") - - containers, err := apiClient.ContainerList(ctx, container.ListOptions{ - All: false, - Filters: userFilters, - }) - if err != nil { - return nil, fmt.Errorf("failed to list containers: %w", err) - } - - var servicesWithoutLabels []ServiceWithoutLabels - - for _, c := range containers { - serviceName := strings.TrimPrefix(c.Names[0], "/") - serviceUUID := c.Labels["com.kurtosistech.guid"] - enclaveUUID := c.Labels["com.kurtosistech.enclave-id"] - - containerDetails, err := apiClient.ContainerInspect(ctx, c.ID) - if err != nil { - fmt.Printf("failed to inspect container %s: %v\n", serviceName, err) - continue - } - - var portsWithoutLabels []ServicePort - processedPorts := make(map[int]bool) - - for portSpec := range containerDetails.Config.ExposedPorts { - port := portSpec.Port() - portNum := 0 - if _, err := fmt.Sscanf(port, "%d", &portNum); err == nil { - if processedPorts[portNum] { - continue - } - processedPorts[portNum] = true - hasTraefikLabelForPort := false - for labelKey := range c.Labels { - if strings.Contains(labelKey, "traefik.http.routers.") && strings.Contains(labelKey, ".rule") { - if strings.Contains(labelKey, fmt.Sprintf("-%d", portNum)) { - hasTraefikLabelForPort = true - break - } - } - } - if !hasTraefikLabelForPort { - portsWithoutLabels = append(portsWithoutLabels, ServicePort{ - Name: port, - Port: portNum, - }) - } - } - } - - if len(portsWithoutLabels) > 0 { - servicesWithoutLabels = append(servicesWithoutLabels, ServiceWithoutLabels{ - Name: serviceName, - ServiceUUID: serviceUUID, - EnclaveUUID: enclaveUUID, - Ports: portsWithoutLabels, - }) - } - } - - fmt.Printf("Discovered %d services with ports needing Traefik labels\n", len(servicesWithoutLabels)) - return servicesWithoutLabels, nil -} - -// addServiceRouters adds Traefik router rules for a service and its ports -func addServiceRouters(dynamicConfig *strings.Builder, service ServiceWithoutLabels) { - shortServiceUUID := shortenedUUIDString(service.ServiceUUID) - shortEnclaveUUID := shortenedUUIDString(service.EnclaveUUID) - for _, port := range service.Ports { - routerName := fmt.Sprintf("%s-%s", service.Name, port.Name) - serviceName := fmt.Sprintf("%s-%s", service.Name, port.Name) - dynamicConfig.WriteString(fmt.Sprintf(" %s:\n", routerName)) - dynamicConfig.WriteString(fmt.Sprintf(" rule: \"HostRegexp(`{^name:%d-%s-%s-?.*$}`)\"\n", port.Port, shortServiceUUID, shortEnclaveUUID)) - dynamicConfig.WriteString(fmt.Sprintf(" service: \"%s\"\n", serviceName)) - } -} - -// addServiceServices adds Traefik service definitions for a service and its ports -func addServiceServices(dynamicConfig *strings.Builder, service ServiceWithoutLabels) { - for _, port := range service.Ports { - serviceName := fmt.Sprintf("%s-%s", service.Name, port.Name) - dynamicConfig.WriteString(fmt.Sprintf(" %s:\n", serviceName)) - dynamicConfig.WriteString(" loadBalancer:\n") - dynamicConfig.WriteString(" servers:\n") - dynamicConfig.WriteString(fmt.Sprintf(" - url: \"http://%s:%d\"\n", service.Name, port.Port)) - } -} diff --git a/kurtosis-devnet/pkg/util/docker_test.go b/kurtosis-devnet/pkg/util/docker_test.go deleted file mode 100644 index ec6161e9a833f..0000000000000 --- a/kurtosis-devnet/pkg/util/docker_test.go +++ /dev/null @@ -1,234 +0,0 @@ -package util - -import ( - "fmt" - "strings" - "testing" - - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/network" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCreateKurtosisFilter(t *testing.T) { - tests := []struct { - name string - enclave []string - expectedFilter string - }{ - { - name: "no enclave specified", - enclave: []string{}, - expectedFilter: "kurtosis.devnet.enclave", - }, - { - name: "enclave specified", - enclave: []string{"test-enclave"}, - expectedFilter: "kurtosis.devnet.enclave=test-enclave", - }, - { - name: "multiple enclaves (only first used)", - enclave: []string{"enclave1", "enclave2"}, - expectedFilter: "kurtosis.devnet.enclave=enclave1", - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - filter := createKurtosisFilter(tt.enclave...) - - // Check that the filter has the expected label - labels := filter.Get("label") - require.Len(t, labels, 1, "Expected exactly one label filter") - assert.Equal(t, tt.expectedFilter, labels[0]) - }) - } -} - -// Helper function to create test containers for scenarios -func createTestContainer(id, name string, networks map[string]*network.EndpointSettings) types.Container { - return types.Container{ - ID: id, - Names: []string{name}, - NetworkSettings: &types.SummaryNetworkSettings{ - Networks: networks, - }, - } -} - -// Helper function to create test network endpoint -func createTestNetworkEndpoint(networkID string) *network.EndpointSettings { - return &network.EndpointSettings{ - NetworkID: networkID, - } -} - -func TestSetReverseProxyConfigLogic(t *testing.T) { - // Test the logic patterns that the function should follow - // The function should ALWAYS configure Traefik for ALL networks it has access to - - t.Run("network ID extraction logic", func(t *testing.T) { - // Test the logic for extracting ALL network IDs from Traefik's own networks - networks := map[string]*network.EndpointSettings{ - "bridge": createTestNetworkEndpoint("bridge-network-id"), - "custom1": createTestNetworkEndpoint("custom-network-id-1"), - "custom2": createTestNetworkEndpoint("custom-network-id-2"), - } - - traefikContainer := createTestContainer("traefik-id", "kurtosis-reverse-proxy-test", networks) - - // The function should collect all non-bridge networks - networkIDs := make(map[string]bool) - for networkName, network := range traefikContainer.NetworkSettings.Networks { - if networkName != "bridge" { - networkIDs[network.NetworkID] = true - } - } - - assert.Len(t, networkIDs, 2) - assert.Contains(t, networkIDs, "custom-network-id-1") - assert.Contains(t, networkIDs, "custom-network-id-2") - }) - - t.Run("traefik container identification", func(t *testing.T) { - // Test the logic for identifying Traefik containers - containers := []types.Container{ - createTestContainer("container1", "/some-other-container", nil), - createTestContainer("container2", "/kurtosis-reverse-proxy-12345", nil), - createTestContainer("container3", "/another-container", nil), - } - - // The function should find the container with "kurtosis-reverse-proxy" in the name - var traefikContainer *types.Container - for _, c := range containers { - if strings.Contains(c.Names[0], "kurtosis-reverse-proxy") { - traefikContainer = &c - break - } - } - - require.NotNil(t, traefikContainer, "Should find Traefik container") - assert.Equal(t, "container2", traefikContainer.ID) - }) - - t.Run("dynamic config generation", func(t *testing.T) { - // Test the dynamic configuration template for multiple networks - networkIDs := []string{"test-network-id-1", "test-network-id-2"} - - expectedConfig := `# Dynamic Traefik configuration for correct networks -providers: - dockerDynamic0: - endpoint: "unix:///var/run/docker.sock" - exposedByDefault: false - network: "test-network-id-1" - watch: true - dockerDynamic1: - endpoint: "unix:///var/run/docker.sock" - exposedByDefault: false - network: "test-network-id-2" - watch: true -` - - var actualConfig strings.Builder - actualConfig.WriteString("# Dynamic Traefik configuration for correct networks\n") - actualConfig.WriteString("providers:\n") - - for i, networkID := range networkIDs { - actualConfig.WriteString(fmt.Sprintf(` dockerDynamic%d: - endpoint: "unix:///var/run/docker.sock" - exposedByDefault: false - network: "%s" - watch: true -`, i, networkID)) - } - - assert.Equal(t, expectedConfig, actualConfig.String()) - }) -} - -func TestCheckUserServiceNetworks(t *testing.T) { - // Test the network accessibility checking logic - - t.Run("network accessibility logic", func(t *testing.T) { - // Test the logic for checking if user services have networks that Traefik doesn't have access to - - // Traefik has access to these networks - traefikNetworkIDs := map[string]bool{ - "network-1": true, - "network-2": true, - } - - // User service containers and their networks - userServiceContainers := []struct { - name string - networks map[string]*network.EndpointSettings - }{ - { - name: "service-1", - networks: map[string]*network.EndpointSettings{ - "bridge": createTestNetworkEndpoint("bridge-network-id"), - "network-1": createTestNetworkEndpoint("network-1"), // accessible - }, - }, - { - name: "service-2", - networks: map[string]*network.EndpointSettings{ - "bridge": createTestNetworkEndpoint("bridge-network-id"), - "network-2": createTestNetworkEndpoint("network-2"), // accessible - "network-3": createTestNetworkEndpoint("network-3"), // NOT accessible - }, - }, - } - - // Test the logic for finding unreachable networks - userServiceNetworks := make(map[string]bool) - for _, container := range userServiceContainers { - for networkName, network := range container.networks { - if networkName != "bridge" { - userServiceNetworks[network.NetworkID] = true - } - } - } - - // Find networks that user services are on but Traefik is not - unreachableNetworks := make(map[string]bool) - for networkID := range userServiceNetworks { - if !traefikNetworkIDs[networkID] { - unreachableNetworks[networkID] = true - } - } - - // Should find network-3 as unreachable - assert.Len(t, unreachableNetworks, 1) - assert.Contains(t, unreachableNetworks, "network-3") - assert.NotContains(t, unreachableNetworks, "network-1") - assert.NotContains(t, unreachableNetworks, "network-2") - }) - - t.Run("all networks accessible", func(t *testing.T) { - // Test case where all user service networks are accessible by Traefik - - traefikNetworkIDs := map[string]bool{ - "network-1": true, - "network-2": true, - "network-3": true, - } - - userServiceNetworks := map[string]bool{ - "network-1": true, - "network-2": true, - } - - // Find unreachable networks - unreachableNetworks := make(map[string]bool) - for networkID := range userServiceNetworks { - if !traefikNetworkIDs[networkID] { - unreachableNetworks[networkID] = true - } - } - - // Should find no unreachable networks - assert.Len(t, unreachableNetworks, 0) - }) -} diff --git a/kurtosis-devnet/pkg/util/retry.go b/kurtosis-devnet/pkg/util/retry.go deleted file mode 100644 index 05b7a9c38865f..0000000000000 --- a/kurtosis-devnet/pkg/util/retry.go +++ /dev/null @@ -1,35 +0,0 @@ -package util - -import ( - "context" - "fmt" - "log" - "time" -) - -// WithRetry executes a function with exponential backoff retry logic -// This is specifically designed for handling gRPC connection timeouts with Kurtosis -func WithRetry[T any](ctx context.Context, operation string, fn func() (T, error)) (T, error) { - var result T - var err error - - for attempt := 1; attempt <= 3; attempt++ { - result, err = fn() - if err == nil { - if attempt > 1 { - log.Printf("✅ Successfully completed %s on attempt %d", operation, attempt) - } - return result, nil - } - - log.Printf("❌ Attempt %d failed for %s: %v", attempt, operation, err) - - if attempt < 3 { - sleepDuration := time.Duration(attempt*2) * time.Second - log.Printf("⏳ Retrying %s in %v...", operation, sleepDuration) - time.Sleep(sleepDuration) - } - } - - return result, fmt.Errorf("%s failed after 3 attempts: %w", operation, err) -} diff --git a/kurtosis-devnet/pkg/util/retry_test.go b/kurtosis-devnet/pkg/util/retry_test.go deleted file mode 100644 index 7e4685dc7a532..0000000000000 --- a/kurtosis-devnet/pkg/util/retry_test.go +++ /dev/null @@ -1,255 +0,0 @@ -package util - -import ( - "context" - "errors" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestWithRetry(t *testing.T) { - tests := []struct { - name string - operation string - setupFunc func() func() (string, error) - expectedResult string - expectError bool - expectedErrorString string - expectedAttempts int - timeout time.Duration - }{ - { - name: "success on first attempt", - operation: "test-operation", - setupFunc: func() func() (string, error) { - return func() (string, error) { - return "success", nil - } - }, - expectedResult: "success", - expectError: false, - expectedAttempts: 1, - timeout: 5 * time.Second, - }, - { - name: "success on second attempt", - operation: "test-operation", - setupFunc: func() func() (string, error) { - attempts := 0 - return func() (string, error) { - attempts++ - if attempts < 2 { - return "", errors.New("temporary failure") - } - return "success", nil - } - }, - expectedResult: "success", - expectError: false, - expectedAttempts: 2, - timeout: 10 * time.Second, - }, - { - name: "success on third attempt", - operation: "test-operation", - setupFunc: func() func() (string, error) { - attempts := 0 - return func() (string, error) { - attempts++ - if attempts < 3 { - return "", errors.New("temporary failure") - } - return "success", nil - } - }, - expectedResult: "success", - expectError: false, - expectedAttempts: 3, - timeout: 15 * time.Second, - }, - { - name: "failure after all attempts", - operation: "test-operation", - setupFunc: func() func() (string, error) { - return func() (string, error) { - return "", errors.New("persistent failure") - } - }, - expectedResult: "", - expectError: true, - expectedErrorString: "test-operation failed after 3 attempts: persistent failure", - expectedAttempts: 3, - timeout: 15 * time.Second, - }, - { - name: "different return type - int", - operation: "integer-operation", - setupFunc: func() func() (string, error) { - // Note: This test uses string but we'll test int in a separate function - return func() (string, error) { - return "42", nil - } - }, - expectedResult: "42", - expectError: false, - expectedAttempts: 1, - timeout: 5 * time.Second, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - ctx, cancel := context.WithTimeout(context.Background(), tt.timeout) - defer cancel() - - fn := tt.setupFunc() - start := time.Now() - - result, err := WithRetry(ctx, tt.operation, fn) - - duration := time.Since(start) - - if tt.expectError { - assert.Error(t, err) - assert.Contains(t, err.Error(), tt.expectedErrorString) - assert.Equal(t, tt.expectedResult, result) - } else { - assert.NoError(t, err) - assert.Equal(t, tt.expectedResult, result) - } - - // Verify timing for multi-attempt scenarios - if tt.expectedAttempts > 1 { - // Expected minimum time: (attempt-1) * 2 + (attempt-2) * 4 seconds - // For 2 attempts: 2 seconds minimum - // For 3 attempts: 2 + 4 = 6 seconds minimum - expectedMinDuration := time.Duration(0) - for i := 1; i < tt.expectedAttempts; i++ { - expectedMinDuration += time.Duration(i*2) * time.Second - } - - if tt.expectError && tt.expectedAttempts == 3 { - // Should take at least 6 seconds for 3 failed attempts - assert.True(t, duration >= expectedMinDuration, - "Expected at least %v but took %v", expectedMinDuration, duration) - } else if !tt.expectError && tt.expectedAttempts > 1 { - // Should take at least the retry delay time - assert.True(t, duration >= expectedMinDuration, - "Expected at least %v but took %v", expectedMinDuration, duration) - } - } - }) - } -} - -func TestWithRetryContextCancellation(t *testing.T) { - // Note: The current implementation doesn't actually respect context cancellation - // during sleep operations, so this test verifies the current behavior - ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) - defer cancel() - - callCount := 0 - fn := func() (string, error) { - callCount++ - // Check if context is cancelled at the start of each call - select { - case <-ctx.Done(): - return "", ctx.Err() - default: - } - - // Always fail to test retry behavior - return "", errors.New("test failure") - } - - start := time.Now() - result, err := WithRetry(ctx, "context-cancel-test", fn) - duration := time.Since(start) - - assert.Error(t, err) - assert.Contains(t, err.Error(), "context-cancel-test failed after 3 attempts") - assert.Equal(t, "", result) - // The function should complete all 3 attempts even with context timeout - // because the current implementation doesn't respect context cancellation during sleep - assert.True(t, duration >= 6*time.Second, "Should complete all retry attempts") -} - -func TestWithRetryDifferentTypes(t *testing.T) { - t.Run("integer return type", func(t *testing.T) { - ctx := context.Background() - - fn := func() (int, error) { - return 42, nil - } - - result, err := WithRetry(ctx, "integer-test", fn) - - assert.NoError(t, err) - assert.Equal(t, 42, result) - }) - - t.Run("struct return type", func(t *testing.T) { - ctx := context.Background() - - type TestStruct struct { - Name string - Value int - } - - expected := TestStruct{Name: "test", Value: 123} - fn := func() (TestStruct, error) { - return expected, nil - } - - result, err := WithRetry(ctx, "struct-test", fn) - - assert.NoError(t, err) - assert.Equal(t, expected, result) - }) - - t.Run("pointer return type", func(t *testing.T) { - ctx := context.Background() - - expected := &struct{ Value string }{Value: "test"} - fn := func() (*struct{ Value string }, error) { - return expected, nil - } - - result, err := WithRetry(ctx, "pointer-test", fn) - - assert.NoError(t, err) - assert.Equal(t, expected, result) - }) -} - -func TestWithRetryErrorPropagation(t *testing.T) { - ctx := context.Background() - - originalErr := errors.New("original error") - fn := func() (string, error) { - return "", originalErr - } - - result, err := WithRetry(ctx, "error-propagation", fn) - - assert.Error(t, err) - assert.Equal(t, "", result) - assert.Contains(t, err.Error(), "error-propagation failed after 3 attempts") - assert.True(t, errors.Is(err, originalErr), "Should wrap the original error") -} - -func TestWithRetryOperationName(t *testing.T) { - ctx := context.Background() - - operationName := "custom-operation-name" - fn := func() (string, error) { - return "", errors.New("test error") - } - - _, err := WithRetry(ctx, operationName, fn) - - assert.Error(t, err) - assert.Contains(t, err.Error(), operationName) -} diff --git a/kurtosis-devnet/pkg/util/util.go b/kurtosis-devnet/pkg/util/util.go deleted file mode 100644 index ab871c2840007..0000000000000 --- a/kurtosis-devnet/pkg/util/util.go +++ /dev/null @@ -1,70 +0,0 @@ -package util - -import ( - "fmt" - "io" - "os" - "path/filepath" - - "github.com/spf13/afero" -) - -// CopyDir copies a directory from src to dst using the provided filesystem. -// If no filesystem is provided, it uses the OS filesystem. -func CopyDir(src string, dst string, fs afero.Fs) error { - if fs == nil { - fs = afero.NewOsFs() - } - - // First ensure the source exists - srcInfo, err := fs.Stat(src) - if err != nil { - return err - } - if !srcInfo.IsDir() { - return fmt.Errorf("source path %s is not a directory", src) - } - - // Create the destination directory - err = fs.MkdirAll(dst, srcInfo.Mode()) - if err != nil { - return err - } - - // Walk through the source directory - return afero.Walk(fs, src, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - - // Get relative path - relPath, err := filepath.Rel(src, path) - if err != nil { - return err - } - - // Construct destination path - dstPath := filepath.Join(dst, relPath) - - if info.IsDir() { - // Create directories with same permissions - return fs.MkdirAll(dstPath, info.Mode()) - } - - // Copy files - srcFile, err := fs.Open(path) - if err != nil { - return err - } - defer srcFile.Close() - - dstFile, err := fs.OpenFile(dstPath, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, info.Mode()) - if err != nil { - return err - } - defer dstFile.Close() - - _, err = io.Copy(dstFile, srcFile) - return err - }) -} diff --git a/kurtosis-devnet/pkg/util/util_test.go b/kurtosis-devnet/pkg/util/util_test.go deleted file mode 100644 index 1b875b971555c..0000000000000 --- a/kurtosis-devnet/pkg/util/util_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package util - -import ( - "path/filepath" - "testing" - - "github.com/spf13/afero" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestCopyDir(t *testing.T) { - tests := []struct { - name string - setupFiles map[string]string - src string - dst string - expectError bool - }{ - { - name: "successful copy of directory with files", - setupFiles: map[string]string{ - "/src/file1.txt": "content1", - "/src/file2.txt": "content2", - "/src/subdir/file3.txt": "content3", - }, - src: "/src", - dst: "/dst", - expectError: false, - }, - { - name: "source directory does not exist", - setupFiles: map[string]string{}, - src: "/nonexistent", - dst: "/dst", - expectError: true, - }, - { - name: "source is not a directory", - setupFiles: map[string]string{ - "/src": "file content", - }, - src: "/src", - dst: "/dst", - expectError: true, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - // Create a new memory filesystem for each test - fs := afero.NewMemMapFs() - - // Set up test files - for path, content := range tt.setupFiles { - dir := filepath.Dir(path) - err := fs.MkdirAll(dir, 0755) - require.NoError(t, err, "Failed to create directory") - - err = afero.WriteFile(fs, path, []byte(content), 0644) - require.NoError(t, err, "Failed to write test file") - } - - // Execute the copy - err := CopyDir(tt.src, tt.dst, fs) - - if tt.expectError { - assert.Error(t, err) - return - } - - assert.NoError(t, err) - - // Verify the copied files - for srcPath, expectedContent := range tt.setupFiles { - // Skip if the source path is not a file (e.g., when testing error cases) - info, err := fs.Stat(srcPath) - if err != nil || !info.Mode().IsRegular() { - continue - } - - // Calculate destination path - relPath, err := filepath.Rel(tt.src, srcPath) - require.NoError(t, err) - dstPath := filepath.Join(tt.dst, relPath) - - // Verify file exists and content matches - content, err := afero.ReadFile(fs, dstPath) - assert.NoError(t, err, "Failed to read copied file: %s", dstPath) - assert.Equal(t, expectedContent, string(content), "Content mismatch for file: %s", dstPath) - - // Verify permissions - srcInfo, err := fs.Stat(srcPath) - require.NoError(t, err) - dstInfo, err := fs.Stat(dstPath) - require.NoError(t, err) - assert.Equal(t, srcInfo.Mode(), dstInfo.Mode(), "Mode mismatch for file: %s", dstPath) - } - }) - } -} diff --git a/kurtosis-devnet/simple.yaml b/kurtosis-devnet/simple.yaml deleted file mode 100644 index 351ac0230ac5c..0000000000000 --- a/kurtosis-devnet/simple.yaml +++ /dev/null @@ -1,89 +0,0 @@ -optimism_package: - faucet: - enabled: true - image: {{ localDockerImage "op-faucet" }} - chains: - op-kurtosis: - participants: - node0: - el: - type: op-geth - image: "" - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: [] - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 - cl: - type: op-node - image: {{ localDockerImage "op-node" }} - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: [] - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 - mev_params: - image: "" - builder_host: "" - builder_port: "" - network_params: - network: "kurtosis" - network_id: "2151908" - seconds_per_slot: 2 - fjord_time_offset: 0 - granite_time_offset: 0 - holocene_time_offset: 0 - isthmus_time_offset: 0 - fund_dev_accounts: true - batcher_params: - image: {{ localDockerImage "op-batcher" }} - extra_params: [] - proposer_params: - image: {{ localDockerImage "op-proposer" }} - extra_params: [] - game_type: 1 - proposal_interval: 10m - challengers: - challenger: - enabled: true - image: {{ localDockerImage "op-challenger" }} - participants: "*" - cannon_prestates_url: {{ localPrestate.URL }} - cannon_trace_types: ["cannon", "permissioned"] - op_contract_deployer_params: - image: {{ localDockerImage "op-deployer" }} - l1_artifacts_locator: {{ localContractArtifacts "l1" }} - l2_artifacts_locator: {{ localContractArtifacts "l2" }} - overrides: - faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate_mt64 }} - global_log_level: "info" - global_node_selectors: {} - global_tolerations: [] - persistent: false -ethereum_package: - participants: - - el_type: geth - cl_type: teku - cl_image: consensys/teku:25.7.1 - network_params: - preset: minimal - genesis_delay: 5 - additional_preloaded_contracts: | - { - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - } diff --git a/kurtosis-devnet/templates/devnet.yaml b/kurtosis-devnet/templates/devnet.yaml deleted file mode 100644 index 032b2ef98f924..0000000000000 --- a/kurtosis-devnet/templates/devnet.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{{- $context := or . (dict)}} -{{- $default_l2s := dict - "2151908" (dict "nodes" (list "op-geth")) - "2151909" (dict "nodes" (list "op-geth")) -}} -{{- $l2s := dig "l2s" $default_l2s $context }} -{{- $overrides := dig "overrides" (dict) $context }} -{{- $interop := dig "interop" false $context }} ---- -optimism_package: -{{ if $interop }} - interop: - enabled: true - supervisor_params: - image: {{ dig "overrides" "images" "op_supervisor" (localDockerImage "op-supervisor") $context }} - extra_params: - - {{ dig "overrides" "flags" "log_level" "!!str" $context }} -{{ end }} - chains: - {{ range $l2_id, $l2 := $l2s }} - op-kurtosis-{{ $l2_id }}: - {{ include "l2.yaml" (dict "chain_id" $l2_id "overrides" $overrides "nodes" $l2.nodes) }} - {{ end }} - op_contract_deployer_params: - image: {{ dig "overrides" "images" "op_deployer" (localDockerImage "op-deployer") $context }} - l1_artifacts_locator: {{ dig "overrides" "urls" "l1_artifacts" (localContractArtifacts "l1") $context }} - l2_artifacts_locator: {{ dig "overrides" "urls" "l2_artifacts" (localContractArtifacts "l2") $context }} -{{ if $interop }} - global_deploy_overrides: - faultGameAbsolutePrestate: {{ dig "overrides" "deployer" "prestate" (localPrestate.Hashes.prestate_mt64) $context }} -{{ end }} - global_log_level: "info" - global_node_selectors: {} - global_tolerations: [] - persistent: false -ethereum_package: - participants: - - el_type: geth - cl_type: teku - network_params: - preset: minimal - genesis_delay: 5 - additional_preloaded_contracts: | - { - "0x4e59b44847b379578588920cA78FbF26c0B4956C": { - "balance": "0ETH", - "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", - "storage": {}, - "nonce": "1" - } - } diff --git a/kurtosis-devnet/templates/l2.yaml b/kurtosis-devnet/templates/l2.yaml deleted file mode 100644 index 268cfb8a75fb5..0000000000000 --- a/kurtosis-devnet/templates/l2.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- $context := or . (dict)}} -{{- $nodes := dig "nodes" (list "op-geth") $context -}} ---- -participants: -{{- range $node_id, $node := $nodes }} - {{ $node_id }}: - {{ include "local-op-node.yaml" (dict "overrides" $context.overrides "el_type" $node) }} -{{- end }} -network_params: - network: "kurtosis" - network_id: "{{ .chain_id }}" - seconds_per_slot: 2 - fjord_time_offset: 0 - granite_time_offset: 0 - holocene_time_offset: 0 - isthmus_time_offset: 0 - jovian_time_offset: 0 - interop_time_offset: 0 - fund_dev_accounts: true -batcher_params: - image: {{ dig "overrides" "images" "op_batcher" (localDockerImage "op-batcher") $context }} - extra_params: - - {{ dig "overrides" "flags" "log_level" "!!str" $context }} -challenger_params: - image: {{ dig "overrides" "images" "op_challenger" (localDockerImage "op-challenger") $context }} - cannon_prestate_path: "" - cannon_prestates_url: {{ dig "overrides" "urls" "prestate" (localPrestate.URL) $context }} - extra_params: - - {{ dig "overrides" "flags" "log_level" "!!str" $context }} -proposer_params: - image: {{ dig "overrides" "images" "op_proposer" (localDockerImage "op-proposer") $context }} - extra_params: - - {{ dig "overrides" "flags" "log_level" "!!str" $context }} - game_type: 1 - proposal_interval: 10m diff --git a/kurtosis-devnet/templates/local-op-node.yaml b/kurtosis-devnet/templates/local-op-node.yaml deleted file mode 100644 index 3b810bc9825ed..0000000000000 --- a/kurtosis-devnet/templates/local-op-node.yaml +++ /dev/null @@ -1,33 +0,0 @@ -{{- $context := or . (dict)}} -{{- $el_type := dig "el_type" "op-geth" $context -}} ---- -el: - type: {{ $el_type }} - image: {{ dig "overrides" "images" $el_type "!!str" $context }} - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: [] - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 -cl: - type: op-node - image: {{ dig "overrides" "images" "op-node" (localDockerImage "op-node") $context }} - log_level: "" - extra_env_vars: {} - extra_labels: {} - extra_params: [] - tolerations: [] - volume_size: 0 - min_cpu: 0 - max_cpu: 0 - min_mem: 0 - max_mem: 0 -mev_params: - image: "" - builder_host: "" - builder_port: "" \ No newline at end of file diff --git a/kurtosis-devnet/tests/.gitignore b/kurtosis-devnet/tests/.gitignore deleted file mode 100644 index e3339ba9266cb..0000000000000 --- a/kurtosis-devnet/tests/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.json -*.toml diff --git a/kurtosis-devnet/tests/boilerplate.sh b/kurtosis-devnet/tests/boilerplate.sh deleted file mode 100644 index 70e088ab7f3d5..0000000000000 --- a/kurtosis-devnet/tests/boilerplate.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env bash - -set -euo pipefail - -# Default values -DEVNET="" -ENVIRONMENT="" - -# Parse command line arguments -while [[ $# -gt 0 ]]; do - case "$1" in - --devnet) - DEVNET="$2" - shift 2 - ;; - --environment) - ENVIRONMENT="$2" - shift 2 - ;; - *) - echo "Invalid option: $1" >&2 - exit 1 - ;; - esac -done - -# Validate required arguments -if [ -z "$DEVNET" ]; then - echo "Error: --devnet argument is required" >&2 - exit 1 -fi - -if [ -z "$ENVIRONMENT" ]; then - echo "Error: --environment argument is required" >&2 - exit 1 -fi diff --git a/kurtosis-devnet/tests/interop-smoke-test.sh b/kurtosis-devnet/tests/interop-smoke-test.sh deleted file mode 100644 index 4fbc6ce0f3dd3..0000000000000 --- a/kurtosis-devnet/tests/interop-smoke-test.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env bash - -# TODO: actually test something. Right now it just gives an idea of what's -# possible. - -# shellcheck disable=SC1091 -source "$(dirname "$0")/boilerplate.sh" - -echo "DEVNET: $DEVNET" -echo "ENVIRONMENT:" -cat "$ENVIRONMENT" - -l1_name=$(cat "$ENVIRONMENT" | jq -r '.l1.name') -echo "L1 NAME: $l1_name" - -cast --version diff --git a/kurtosis-devnet/tests/kurtosis.yml b/kurtosis-devnet/tests/kurtosis.yml deleted file mode 100644 index 0fa3a586c742c..0000000000000 --- a/kurtosis-devnet/tests/kurtosis.yml +++ /dev/null @@ -1,4 +0,0 @@ -name: github.com/ethereum-optimism/optimism/kurtosis-devnet/tests -description: |- - Kurtosis package for running tests within the enclave -replace: {} diff --git a/kurtosis-devnet/tests/main.star b/kurtosis-devnet/tests/main.star deleted file mode 100644 index a962584f3c690..0000000000000 --- a/kurtosis-devnet/tests/main.star +++ /dev/null @@ -1,23 +0,0 @@ -""" -This is the main script for the kurtosis test runner. -""" - -def run(plan, devnet, timestamp, tests): - - tests_artifact = plan.upload_files( - src = "./", - name = "tests", - description = "uploading tests" - ) - - for test in tests: - plan.run_sh( - run = "/bin/bash /tests/{} --devnet {} --environment /tests/{}.json".format(test, devnet, devnet), - name = "{}-{}".format(test, timestamp), - image = "mslipper/deployment-utils:latest", - wait="180s", - - files = { - "/tests": tests_artifact, - }, - ) diff --git a/kurtosis-devnet/user.yaml b/kurtosis-devnet/user.yaml deleted file mode 100644 index 89b4fd84285a3..0000000000000 --- a/kurtosis-devnet/user.yaml +++ /dev/null @@ -1,3 +0,0 @@ -{{- $context := or . (dict)}} ---- -{{ include "templates/devnet.yaml" $context }} diff --git a/op-acceptance-tests/justfile b/op-acceptance-tests/justfile index 5547fb3c13d3a..27bdb5f8a1676 100644 --- a/op-acceptance-tests/justfile +++ b/op-acceptance-tests/justfile @@ -1,5 +1,4 @@ REPO_ROOT := `realpath ..` # path to the root of the optimism monorepo -KURTOSIS_DIR := REPO_ROOT + "/kurtosis-devnet" ACCEPTOR_VERSION := env_var_or_default("ACCEPTOR_VERSION", "v3.10.2") DOCKER_REGISTRY := env_var_or_default("DOCKER_REGISTRY", "us-docker.pkg.dev/oplabs-tools-artifacts/images") ACCEPTOR_IMAGE := env_var_or_default("ACCEPTOR_IMAGE", DOCKER_REGISTRY + "/op-acceptor:" + ACCEPTOR_VERSION) diff --git a/devnet-sdk/scripts/metrics-collect-authorship.sh b/op-acceptance-tests/scripts/metrics-collect-authorship.sh similarity index 84% rename from devnet-sdk/scripts/metrics-collect-authorship.sh rename to op-acceptance-tests/scripts/metrics-collect-authorship.sh index b498c7b566e61..cff633c087e01 100755 --- a/devnet-sdk/scripts/metrics-collect-authorship.sh +++ b/op-acceptance-tests/scripts/metrics-collect-authorship.sh @@ -8,7 +8,6 @@ if [ -z "$DIRECTORY" ]; then exit 1 fi -# Extract authorship data of target directory from git history echo -n "dt,author,commit" cd "$DIRECTORY" git ls-files | while read -r file; do diff --git a/op-acceptance-tests/tests/interop/contract/interop_contract_test.go b/op-acceptance-tests/tests/interop/contract/interop_contract_test.go index ae93f8ea2b118..ca5b6193b0b43 100644 --- a/op-acceptance-tests/tests/interop/contract/interop_contract_test.go +++ b/op-acceptance-tests/tests/interop/contract/interop_contract_test.go @@ -4,7 +4,7 @@ import ( "math/rand" "testing" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" + "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -43,7 +43,7 @@ func TestRegularMessage(gt *testing.T) { logger.Info("Send message", "address", eventLoggerAddress, "topicCnt", len(topics), "dataLen", len(data)) trigger := &txintent.SendTrigger{ - Emitter: constants.L2ToL2CrossDomainMessenger, + Emitter: predeploys.L2toL2CrossDomainMessengerAddr, DestChainID: bob.ChainID(), Target: eventLoggerAddress, RelayedCalldata: calldata, @@ -55,7 +55,7 @@ func TestRegularMessage(gt *testing.T) { sendMsgReceipt, err := txA.PlannedTx.Included.Eval(t.Ctx()) require.NoError(err, "send msg receipt not found") require.Equal(1, len(sendMsgReceipt.Logs)) // SentMessage event - require.Equal(constants.L2ToL2CrossDomainMessenger, sendMsgReceipt.Logs[0].Address) + require.Equal(predeploys.L2toL2CrossDomainMessengerAddr, sendMsgReceipt.Logs[0].Address) // Make sure supervisor syncs the chain A events sys.Supervisor.WaitForUnsafeHeadToAdvance(alice.ChainID(), 2) @@ -64,14 +64,14 @@ func TestRegularMessage(gt *testing.T) { txB := txintent.NewIntent[*txintent.RelayTrigger, *txintent.InteropOutput](bob.Plan()) txB.Content.DependOn(&txA.Result) idx := 0 - txB.Content.Fn(txintent.RelayIndexed(constants.L2ToL2CrossDomainMessenger, &txA.Result, &txA.PlannedTx.Included, idx)) + txB.Content.Fn(txintent.RelayIndexed(predeploys.L2toL2CrossDomainMessengerAddr, &txA.Result, &txA.PlannedTx.Included, idx)) relayMsgReceipt, err := txB.PlannedTx.Included.Eval(t.Ctx()) require.NoError(err, "relay msg receipt not found") // ExecutingMessage, EventLogger, RelayedMessage Events require.Equal(3, len(relayMsgReceipt.Logs)) - for logIdx, addr := range []common.Address{constants.CrossL2Inbox, eventLoggerAddress, constants.L2ToL2CrossDomainMessenger} { + for logIdx, addr := range []common.Address{predeploys.CrossL2InboxAddr, eventLoggerAddress, predeploys.L2toL2CrossDomainMessengerAddr} { require.Equal(addr, relayMsgReceipt.Logs[logIdx].Address) } // EventLogger topics and data diff --git a/op-acceptance-tests/tests/interop/loadtest/interop_load_test.go b/op-acceptance-tests/tests/interop/loadtest/interop_load_test.go index e1fd27cb5e812..e7de2a5fb9edb 100644 --- a/op-acceptance-tests/tests/interop/loadtest/interop_load_test.go +++ b/op-acceptance-tests/tests/interop/loadtest/interop_load_test.go @@ -11,7 +11,7 @@ import ( "testing" "time" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" + "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" @@ -58,7 +58,7 @@ type BlockRefByLabel interface { func planExecMsg(t devtest.T, initMsg *suptypes.Message, blockTime time.Duration, el BlockRefByLabel) txplan.Option { t.Require().NotNil(initMsg) return txplan.Combine(planCall(t, &txintent.ExecTrigger{ - Executor: constants.CrossL2Inbox, + Executor: predeploys.CrossL2InboxAddr, Msg: *initMsg, }), func(tx *txplan.PlannedTx) { tx.AgainstBlock.Wrap(func(fn plan.Fn[eth.BlockInfo]) plan.Fn[eth.BlockInfo] { diff --git a/op-acceptance-tests/tests/interop/loadtest/l2.go b/op-acceptance-tests/tests/interop/loadtest/l2.go index d76ab77275478..39a624b7fa4bf 100644 --- a/op-acceptance-tests/tests/interop/loadtest/l2.go +++ b/op-acceptance-tests/tests/interop/loadtest/l2.go @@ -4,10 +4,10 @@ import ( "sync/atomic" "time" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-service/txinclude" + "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txplan" "github.com/ethereum/go-ethereum/common" ethtypes "github.com/ethereum/go-ethereum/core/types" diff --git a/op-acceptance-tests/tests/interop/message/interop_msg_test.go b/op-acceptance-tests/tests/interop/message/interop_msg_test.go index 2851792db1269..d8a4afe9a3a3a 100644 --- a/op-acceptance-tests/tests/interop/message/interop_msg_test.go +++ b/op-acceptance-tests/tests/interop/message/interop_msg_test.go @@ -7,7 +7,6 @@ import ( "testing" "time" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" @@ -265,7 +264,7 @@ func TestInitExecMultipleMsg(gt *testing.T) { interop.RandomInitTrigger(rng, eventLoggerAddress, 2, 13), } txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) + txA.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: initCalls}) // Trigger two events receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) @@ -282,7 +281,7 @@ func TestInitExecMultipleMsg(gt *testing.T) { // Two events in tx so use every index indexes := []int{0, 1} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) + txB.Content.Fn(txintent.ExecuteIndexeds(predeploys.MultiCall3Addr, predeploys.CrossL2InboxAddr, &txA.Result, indexes)) receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) require.NoError(err) @@ -325,7 +324,7 @@ func TestExecSameMsgTwice(gt *testing.T) { // Single event in tx so indexes are 0, 0 indexes := []int{0, 0} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) + txB.Content.Fn(txintent.ExecuteIndexeds(predeploys.MultiCall3Addr, predeploys.CrossL2InboxAddr, &txA.Result, indexes)) receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) require.NoError(err) @@ -357,7 +356,7 @@ func TestExecDifferentTopicCount(gt *testing.T) { initCalls[topicCnt] = interop.RandomInitTrigger(rng, eventLoggerAddress, topicCnt, 10) } txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) + txA.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: initCalls}) // Trigger five events, each have {0, 1, 2, 3, 4} topics in it receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) @@ -378,7 +377,7 @@ func TestExecDifferentTopicCount(gt *testing.T) { // Five events in tx so use every index indexes := []int{0, 1, 2, 3, 4} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) + txB.Content.Fn(txintent.ExecuteIndexeds(predeploys.MultiCall3Addr, predeploys.CrossL2InboxAddr, &txA.Result, indexes)) receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) require.NoError(err) @@ -410,7 +409,7 @@ func TestExecMsgOpaqueData(gt *testing.T) { initCalls[1] = largeInitTrigger txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) + txA.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: initCalls}) // Trigger two events receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) @@ -429,7 +428,7 @@ func TestExecMsgOpaqueData(gt *testing.T) { // Two events in tx so use every index indexes := []int{0, 1} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) + txB.Content.Fn(txintent.ExecuteIndexeds(predeploys.MultiCall3Addr, predeploys.CrossL2InboxAddr, &txA.Result, indexes)) receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) require.NoError(err) @@ -461,7 +460,7 @@ func TestExecMsgDifferEventIndexInSingleTx(gt *testing.T) { } txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) + txA.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: initCalls}) // Trigger multiple events receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) @@ -478,7 +477,7 @@ func TestExecMsgDifferEventIndexInSingleTx(gt *testing.T) { // first, random or last event of a tx. indexes := []int{0, 1 + rng.Intn(eventCnt-1), eventCnt - 1} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) + txB.Content.Fn(txintent.ExecuteIndexeds(predeploys.MultiCall3Addr, predeploys.CrossL2InboxAddr, &txA.Result, indexes)) receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) require.NoError(err) @@ -584,7 +583,7 @@ func TestExecMessageInvalidAttributes(gt *testing.T) { interop.RandomInitTrigger(rng, eventLoggerAddress, 1, 50), } txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](alice.Plan()) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) + txA.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: initCalls}) // Trigger multiple events receiptA, err := txA.PlannedTx.Included.Eval(t.Ctx()) @@ -611,7 +610,7 @@ func TestExecMessageInvalidAttributes(gt *testing.T) { // Random select event index in tx for injecting faults eventIdx := rng.Intn(len(initCalls)) - txC.Content.Fn(executeIndexedFault(constants.CrossL2Inbox, &txA.Result, eventIdx, rng, faults, chuck.ChainID())) + txC.Content.Fn(executeIndexedFault(predeploys.CrossL2InboxAddr, &txA.Result, eventIdx, rng, faults, chuck.ChainID())) // make sure that the transaction is not reverted by CrossL2Inbox... gas, err := txC.PlannedTx.Gas.Eval(t.Ctx()) @@ -633,7 +632,7 @@ func TestExecMessageInvalidAttributes(gt *testing.T) { // Three events in tx so use every index indexes := []int{0, 1, 2} - txB.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, indexes)) + txB.Content.Fn(txintent.ExecuteIndexeds(predeploys.MultiCall3Addr, predeploys.CrossL2InboxAddr, &txA.Result, indexes)) receiptB, err := txB.PlannedTx.Included.Eval(t.Ctx()) require.NoError(err) diff --git a/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go b/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go index d35b58ede0987..559caf9e4c087 100644 --- a/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go +++ b/op-acceptance-tests/tests/interop/reorgs/init_exec_msg_test.go @@ -5,9 +5,8 @@ import ( "testing" "time" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" + "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" @@ -15,6 +14,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txintent" + "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txplan" "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" "github.com/ethereum/go-ethereum/common" @@ -100,7 +100,7 @@ func TestReorgInitExecMsg(gt *testing.T) { execTx = txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) execTx.Content.DependOn(&initTx.Result) // single event in tx so index is 0. ExecuteIndexed returns a lambda to transform InteropOutput to a new ExecTrigger - execTx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initTx.Result, 0)) + execTx.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &initTx.Result, 0)) var err error execReceipt, err = execTx.PlannedTx.Included.Eval(ctx) require.NoError(t, err) diff --git a/op-acceptance-tests/tests/interop/reorgs/invalid_exec_msgs_test.go b/op-acceptance-tests/tests/interop/reorgs/invalid_exec_msgs_test.go index d05dba3e091fd..0d6444e6fbf7f 100644 --- a/op-acceptance-tests/tests/interop/reorgs/invalid_exec_msgs_test.go +++ b/op-acceptance-tests/tests/interop/reorgs/invalid_exec_msgs_test.go @@ -7,14 +7,14 @@ import ( "testing" "time" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" + "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/presets" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/wait" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txintent" + "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txplan" suptypes "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/types" "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" @@ -134,7 +134,7 @@ func testReorgInvalidExecMsg(gt *testing.T, txModifierFn func(msg *suptypes.Mess // modify the message in order to make it invalid txModifierFn(&msg) return &txintent.ExecTrigger{ - Executor: constants.CrossL2Inbox, + Executor: predeploys.CrossL2InboxAddr, Msg: msg, }, nil }) diff --git a/op-acceptance-tests/tests/interop/upgrade/pre_test.go b/op-acceptance-tests/tests/interop/upgrade/pre_test.go index e89ba2e0d0be3..ee4a3d566dd7f 100644 --- a/op-acceptance-tests/tests/interop/upgrade/pre_test.go +++ b/op-acceptance-tests/tests/interop/upgrade/pre_test.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/common" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-core/predeploys" @@ -82,7 +81,7 @@ func TestPreNoInbox(gt *testing.T) { // send executing message on chain B and confirm we got an error execTx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) execTx.Content.DependOn(&initMsg.Tx.Result) - execTx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initMsg.Tx.Result, 0)) + execTx.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &initMsg.Tx.Result, 0)) execReceipt, err := execTx.PlannedTx.Included.Eval(sys.T.Ctx()) require.ErrorContains(err, "implementation not initialized", "error did not contain expected string") require.Nil(execReceipt) @@ -99,7 +98,7 @@ func TestPreNoInbox(gt *testing.T) { { ctx := sys.T.Ctx() - execTrigger, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &initMsg.Tx.Result, 0)(ctx) + execTrigger, err := txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &initMsg.Tx.Result, 0)(ctx) require.NoError(err) ed := stypes.ExecutingDescriptor{Timestamp: uint64(time.Now().Unix())} diff --git a/op-acceptance-tests/tests/isthmus/isthmus_test_helpers.go b/op-acceptance-tests/tests/isthmus/isthmus_test_helpers.go index 62330c92026e6..03b02df651ed9 100644 --- a/op-acceptance-tests/tests/isthmus/isthmus_test_helpers.go +++ b/op-acceptance-tests/tests/isthmus/isthmus_test_helpers.go @@ -2,15 +2,21 @@ package isthmus import ( "context" + "crypto/ecdsa" - "github.com/ethereum-optimism/optimism/devnet-sdk/system" "github.com/ethereum-optimism/optimism/op-service/retry" + "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/txplan" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/vm/program" ) -func DefaultTxSubmitOptions(w system.WalletV2) txplan.Option { +type walletV2 interface { + PrivateKey() *ecdsa.PrivateKey + Client() *sources.EthClient +} + +func DefaultTxSubmitOptions(w walletV2) txplan.Option { return txplan.Combine( txplan.WithPrivateKey(w.PrivateKey()), txplan.WithChainID(w.Client()), @@ -21,21 +27,21 @@ func DefaultTxSubmitOptions(w system.WalletV2) txplan.Option { ) } -func DefaultTxInclusionOptions(w system.WalletV2) txplan.Option { +func DefaultTxInclusionOptions(w walletV2) txplan.Option { return txplan.Combine( txplan.WithRetryInclusion(w.Client(), 10, retry.Exponential()), txplan.WithBlockInclusionInfo(w.Client()), ) } -func DefaultTxOpts(w system.WalletV2) txplan.Option { +func DefaultTxOpts(w walletV2) txplan.Option { return txplan.Combine( DefaultTxSubmitOptions(w), DefaultTxInclusionOptions(w), ) } -func DeployProgram(ctx context.Context, wallet system.WalletV2, code []byte) (common.Address, error) { +func DeployProgram(ctx context.Context, wallet walletV2, code []byte) (common.Address, error) { deployProgram := program.New().ReturnViaCodeCopy(code) opts := DefaultTxOpts(wallet) diff --git a/op-challenger/README.md b/op-challenger/README.md index cc6af5a908e57..4961daff30b6d 100644 --- a/op-challenger/README.md +++ b/op-challenger/README.md @@ -21,12 +21,8 @@ accessed by running `./op-challenger --help`. ### Running with Cannon on Local Devnet -To run `op-challenger` against the local devnet, first clean and run -the devnet. From the root of the repository run: - -```shell -cd kurtosis-devnet && just simple-devnet -``` +To run `op-challenger` against a local devnet, first start a local devnet +that exposes the `simple-devnet` enclave. Then build the `op-challenger` with `make op-challenger`. diff --git a/op-deployer/pkg/deployer/apply.go b/op-deployer/pkg/deployer/apply.go index b4df32a3b4062..7a81788180bfc 100644 --- a/op-deployer/pkg/deployer/apply.go +++ b/op-deployer/pkg/deployer/apply.go @@ -10,7 +10,6 @@ import ( "github.com/ethereum-optimism/optimism/op-service/ioutil" - "github.com/ethereum-optimism/optimism/devnet-sdk/proofs/prestate" "github.com/ethereum-optimism/optimism/op-chain-ops/foundry" "github.com/ethereum-optimism/optimism/op-chain-ops/script" "github.com/ethereum-optimism/optimism/op-chain-ops/script/forking" @@ -27,6 +26,7 @@ import ( opcrypto "github.com/ethereum-optimism/optimism/op-service/crypto" "github.com/ethereum-optimism/optimism/op-service/ctxinterrupt" oplog "github.com/ethereum-optimism/optimism/op-service/log" + "github.com/ethereum-optimism/optimism/op-service/prestate" "github.com/ethereum-optimism/optimism/op-validator/pkg/service" "github.com/ethereum-optimism/optimism/op-validator/pkg/validations" "github.com/ethereum/go-ethereum/common" diff --git a/op-deployer/pkg/deployer/pipeline/pre_state.go b/op-deployer/pkg/deployer/pipeline/pre_state.go index cb41f5041a2f0..ed8769f1d638e 100644 --- a/op-deployer/pkg/deployer/pipeline/pre_state.go +++ b/op-deployer/pkg/deployer/pipeline/pre_state.go @@ -6,8 +6,8 @@ import ( "encoding/json" "fmt" - "github.com/ethereum-optimism/optimism/devnet-sdk/proofs/prestate" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" + "github.com/ethereum-optimism/optimism/op-service/prestate" ) type PreStateBuilder interface { diff --git a/op-deployer/pkg/deployer/state/state.go b/op-deployer/pkg/deployer/state/state.go index cf86c0fa85b8b..40f31c5f3f1b8 100644 --- a/op-deployer/pkg/deployer/state/state.go +++ b/op-deployer/pkg/deployer/state/state.go @@ -5,8 +5,8 @@ import ( "github.com/ethereum/go-ethereum/core" - "github.com/ethereum-optimism/optimism/devnet-sdk/proofs/prestate" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/broadcaster" + "github.com/ethereum-optimism/optimism/op-service/prestate" "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" diff --git a/op-deployer/pkg/deployer/validate/helpers_test.go b/op-deployer/pkg/deployer/validate/helpers_test.go index a9c893f94ea6a..c0133298681cf 100644 --- a/op-deployer/pkg/deployer/validate/helpers_test.go +++ b/op-deployer/pkg/deployer/validate/helpers_test.go @@ -4,11 +4,11 @@ import ( "context" "testing" - "github.com/ethereum-optimism/optimism/devnet-sdk/proofs/prestate" "github.com/ethereum-optimism/optimism/op-chain-ops/addresses" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/artifacts" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/standard" "github.com/ethereum-optimism/optimism/op-deployer/pkg/deployer/state" + "github.com/ethereum-optimism/optimism/op-service/prestate" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" diff --git a/op-devstack/devtest/testing.go b/op-devstack/devtest/testing.go index f368c0ef16f7d..6f4c0e0dcc186 100644 --- a/op-devstack/devtest/testing.go +++ b/op-devstack/devtest/testing.go @@ -15,7 +15,6 @@ import ( "github.com/ethereum/go-ethereum/log" - "github.com/ethereum-optimism/optimism/devnet-sdk/telemetry" "github.com/ethereum-optimism/optimism/op-service/log/logfilter" "github.com/ethereum-optimism/optimism/op-service/logmods" "github.com/ethereum-optimism/optimism/op-service/testlog" @@ -313,7 +312,7 @@ func SerialT(t *testing.T) T { // Set the lowest default log-level, so the log-filters on top can apply correctly logger := testlog.LoggerWithHandlerMod(t, log.LevelTrace, - telemetry.WrapHandler, logfilter.WrapFilterHandler, logfilter.WrapContextHandler) + wrapTracingHandler, logfilter.WrapFilterHandler, logfilter.WrapContextHandler) h, ok := logmods.FindHandler[logfilter.FilterHandler](logger.Handler()) if ok { // Apply default log level. This may be overridden later. diff --git a/op-devstack/devtest/tracing_handler.go b/op-devstack/devtest/tracing_handler.go new file mode 100644 index 0000000000000..3918d7505d0b8 --- /dev/null +++ b/op-devstack/devtest/tracing_handler.go @@ -0,0 +1,90 @@ +package devtest + +import ( + "context" + "fmt" + "log/slog" + + "github.com/ethereum-optimism/optimism/op-service/logmods" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +func wrapTracingHandler(h slog.Handler) slog.Handler { + return &tracingHandler{Handler: h} +} + +type tracingHandler struct { + slog.Handler +} + +var _ logmods.Handler = (*tracingHandler)(nil) + +func (h *tracingHandler) Unwrap() slog.Handler { + return h.Handler +} + +func (h *tracingHandler) WithAttrs(attrs []slog.Attr) slog.Handler { + return &tracingHandler{Handler: h.Handler.WithAttrs(attrs)} +} + +func (h *tracingHandler) WithGroup(name string) slog.Handler { + return &tracingHandler{Handler: h.Handler.WithGroup(name)} +} + +func (h *tracingHandler) Handle(ctx context.Context, record slog.Record) error { + span := trace.SpanFromContext(ctx) + if span.IsRecording() { + recorder := &traceAttrAccumulator{} + record.Attrs(func(attr slog.Attr) bool { + recorder.register(attr) + return true + }) + span.AddEvent(record.Message, trace.WithAttributes(recorder.kv...)) + } + + spanCtx := trace.SpanContextFromContext(ctx) + if spanCtx.HasTraceID() { + record.AddAttrs(slog.String("trace_id", spanCtx.TraceID().String())) + } + if spanCtx.HasSpanID() { + record.AddAttrs(slog.String("span_id", spanCtx.SpanID().String())) + } + return h.Handler.Handle(ctx, record) +} + +type traceAttrAccumulator struct { + kv []attribute.KeyValue +} + +func (ac *traceAttrAccumulator) register(attr slog.Attr) { + switch attr.Value.Kind() { + case slog.KindAny: + ac.kv = append(ac.kv, attribute.String(attr.Key, fmt.Sprintf("%v", attr.Value.Any()))) + case slog.KindBool: + ac.kv = append(ac.kv, attribute.Bool(attr.Key, attr.Value.Bool())) + case slog.KindDuration: + ac.kv = append(ac.kv, attribute.String(attr.Key, attr.Value.Duration().String())) + case slog.KindFloat64: + ac.kv = append(ac.kv, attribute.Float64(attr.Key, attr.Value.Float64())) + case slog.KindInt64: + ac.kv = append(ac.kv, attribute.Int64(attr.Key, attr.Value.Int64())) + case slog.KindString: + ac.kv = append(ac.kv, attribute.String(attr.Key, attr.Value.String())) + case slog.KindTime: + ac.kv = append(ac.kv, attribute.String(attr.Key, attr.Value.Time().String())) + case slog.KindUint64: + val := attr.Value.Uint64() + ac.kv = append(ac.kv, attribute.Int64(attr.Key, int64(val))) + if val > uint64(1<<63-1) { + ac.kv = append(ac.kv, attribute.Bool(attr.Key+".overflow", true)) + ac.kv = append(ac.kv, attribute.String(attr.Key+".actual", fmt.Sprintf("%d", val))) + } + case slog.KindGroup: + for _, groupAttr := range attr.Value.Group() { + ac.register(groupAttr) + } + case slog.KindLogValuer: + ac.register(slog.Attr{Key: attr.Key, Value: attr.Value.LogValuer().LogValue()}) + } +} diff --git a/op-devstack/dsl/eoa.go b/op-devstack/dsl/eoa.go index 99a35df815dcd..d23499e89fedb 100644 --- a/op-devstack/dsl/eoa.go +++ b/op-devstack/dsl/eoa.go @@ -7,9 +7,8 @@ import ( "math/rand" "time" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" + "github.com/ethereum-optimism/optimism/op-core/predeploys" e2eBindings "github.com/ethereum-optimism/optimism/op-e2e/bindings" "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" @@ -233,7 +232,7 @@ func (u *EOA) WaitForBalance(v eth.ETH) { } func (u *EOA) DeployEventLogger() common.Address { - tx := txplan.NewPlannedTx(u.Plan(), txplan.WithData(common.FromHex(bindings.EventloggerBin))) + tx := txplan.NewPlannedTx(u.Plan(), txplan.WithData(common.FromHex(txIntentBindings.EventloggerBin))) res, err := tx.Included.Eval(u.ctx) u.t.Require().NoError(err, "failed to deploy EventLogger") eventLoggerAddress := res.ContractAddress @@ -292,7 +291,7 @@ func (u *EOA) SendRandomInitMessage(rng *rand.Rand, eventLoggerAddress common.Ad func (u *EOA) SendExecMessage(initMsg *InitMessage) *ExecMessage { tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](u.Plan()) tx.Content.DependOn(&initMsg.Tx.Result) - tx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initMsg.Tx.Result, 0)) + tx.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &initMsg.Tx.Result, 0)) receipt, err := tx.PlannedTx.Included.Eval(u.ctx) u.t.Require().NoError(err, "exec msg receipt not found") u.log.Info("exec message included", "chain", u.ChainID(), "block", receipt.BlockNumber) @@ -318,7 +317,7 @@ func (u *EOA) SendInvalidExecMessage(initMsg *InitMessage) *ExecMessage { // Create the exec trigger with the invalid message execTrigger := &txintent.ExecTrigger{ - Executor: constants.CrossL2Inbox, + Executor: predeploys.CrossL2InboxAddr, Msg: msg, } @@ -348,7 +347,7 @@ func (u *EOA) SendPackedRandomInitMessages(rng *rand.Rand, eventLoggerAddress co initCalls[index] = interop.RandomInitTrigger(rng, eventLoggerAddress, rng.Intn(5), rng.Intn(100)) } tx := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](u.Plan()) - tx.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: initCalls}) + tx.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: initCalls}) receipt, err := tx.PlannedTx.Included.Eval(u.ctx) if err != nil { return nil, nil, err @@ -369,7 +368,7 @@ func (u *EOA) SendPackedExecMessages(dependOn *txintent.IntentTx[*txintent.Multi for idx := range len(result.Entries) { indexes = append(indexes, idx) } - tx.Content.Fn(txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &dependOn.Result, indexes)) + tx.Content.Fn(txintent.ExecuteIndexeds(predeploys.MultiCall3Addr, predeploys.CrossL2InboxAddr, &dependOn.Result, indexes)) receipt, err := tx.PlannedTx.Included.Eval(u.ctx) if err != nil { return nil, nil, err @@ -513,7 +512,7 @@ func (p *SameTimestampPair) SubmitInit() *txplan.PlannedTx { func (p *SameTimestampPair) SubmitExecTo(executor *EOA) *txplan.PlannedTx { tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](executor.Plan()) tx.Content.Set(&txintent.ExecTrigger{ - Executor: constants.CrossL2Inbox, + Executor: predeploys.CrossL2InboxAddr, Msg: p.Message, }) _, err := tx.PlannedTx.Submitted.Eval(executor.ctx) @@ -529,7 +528,7 @@ func (p *SameTimestampPair) SubmitInvalidExecTo(executor *EOA) *txplan.PlannedTx tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](executor.Plan()) tx.Content.Set(&txintent.ExecTrigger{ - Executor: constants.CrossL2Inbox, + Executor: predeploys.CrossL2InboxAddr, Msg: invalidMsg, }) _, err := tx.PlannedTx.Submitted.Eval(executor.ctx) diff --git a/op-e2e/actions/interop/interop_msg_test.go b/op-e2e/actions/interop/interop_msg_test.go index 4faf502c8fb1d..abf9ed81d65bf 100644 --- a/op-e2e/actions/interop/interop_msg_test.go +++ b/op-e2e/actions/interop/interop_msg_test.go @@ -6,15 +6,15 @@ import ( "math/rand" "testing" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" + "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-e2e/actions/helpers" "github.com/ethereum-optimism/optimism/op-e2e/actions/interop/dsl" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/sources" "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/txintent" + "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txplan" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -449,7 +449,7 @@ func TestInitAndExecMsgSameTimestamp(gt *testing.T) { txB.Content.DependOn(&txA.Result) // Single event in tx so index is 0 - txB.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &txA.Result, 0)) + txB.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &txA.Result, 0)) actors.ChainB.Sequencer.ActL2StartBlock(t) _, err = txB.PlannedTx.Included.Eval(t.Ctx()) @@ -509,7 +509,7 @@ func TestBreakTimestampInvariant(gt *testing.T) { txB.Content.DependOn(&txA.Result) // Single event in tx so index is 0 - txB.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &txA.Result, 0)) + txB.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &txA.Result, 0)) actors.ChainB.Sequencer.ActL2StartBlock(t) _, err = txB.PlannedTx.Included.Eval(t.Ctx()) @@ -636,7 +636,7 @@ func TestExecMsgDifferTxIndex(gt *testing.T) { execTx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](optsB) // Single event in every tx so index is always 0 - execTx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initTx.Result, 0)) + execTx.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &initTx.Result, 0)) execTx.Content.DependOn(&initTx.Result) includedBlock, err := execTx.PlannedTx.IncludedBlock.Eval(t.Ctx()) @@ -710,7 +710,7 @@ func TestExpiredMessage(gt *testing.T) { txB.Content.DependOn(&txA.Result) // Single event in tx so index is 0 - txB.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &txA.Result, 0)) + txB.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &txA.Result, 0)) actors.ChainB.Sequencer.ActL2StartBlock(t) _, err = txB.PlannedTx.Included.Eval(t.Ctx()) @@ -783,7 +783,7 @@ func TestCrossPatternSameTimestamp(gt *testing.T) { // Intent to execute message X on chain B tx1 := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](optsB) - tx1.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &tx0.Result, 0)) + tx1.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &tx0.Result, 0)) tx1.Content.DependOn(&tx0.Result) _, err = tx1.PlannedTx.Submitted.Eval(t.Ctx()) @@ -805,7 +805,7 @@ func TestCrossPatternSameTimestamp(gt *testing.T) { // override nonce optsA = txplan.Combine(optsA, txplan.WithStaticNonce(nonce+1)) tx3 := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](optsA) - tx3.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &tx2.Result, 0)) + tx3.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &tx2.Result, 0)) tx3.Content.DependOn(&tx2.Result) _, err = tx3.PlannedTx.Submitted.Eval(t.Ctx()) @@ -887,7 +887,7 @@ func ExecTriggerFromInitTrigger(init *txintent.InitTrigger, logIndex uint, targe if x := len(output.Entries); x <= int(logIndex) { return nil, fmt.Errorf("invalid index: %d, only have %d events", logIndex, x) } - return &txintent.ExecTrigger{Executor: constants.CrossL2Inbox, Msg: output.Entries[logIndex]}, nil + return &txintent.ExecTrigger{Executor: predeploys.CrossL2InboxAddr, Msg: output.Entries[logIndex]}, nil } // TestCrossPatternSameTx tests below scenario: @@ -940,10 +940,10 @@ func TestCrossPatternSameTx(gt *testing.T) { // Intent to initiate message X and execute message Y at chain A txA := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](optsA) - txA.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: callsA}) + txA.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: callsA}) // Intent to initiate message Y and execute message X at chain B txB := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](optsB) - txB.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: callsB}) + txB.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: callsB}) includedA, err := txA.PlannedTx.IncludedBlock.Eval(t.Ctx()) require.NoError(t, err) @@ -962,14 +962,14 @@ func TestCrossPatternSameTx(gt *testing.T) { // confirm speculatively built exec message X by rebuilding after txA inclusion _, err = txA.Result.Eval(t.Ctx()) require.NoError(t, err) - multiTriggerA, err := txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txA.Result, []int{int(logIndexX)})(t.Ctx()) + multiTriggerA, err := txintent.ExecuteIndexeds(predeploys.MultiCall3Addr, predeploys.CrossL2InboxAddr, &txA.Result, []int{int(logIndexX)})(t.Ctx()) require.NoError(t, err) require.Equal(t, multiTriggerA.Calls[logIndexX], execX) // confirm speculatively built exec message Y by rebuilding after txB inclusion _, err = txB.Result.Eval(t.Ctx()) require.NoError(t, err) - multiTriggerB, err := txintent.ExecuteIndexeds(constants.MultiCall3, constants.CrossL2Inbox, &txB.Result, []int{int(logIndexY)})(t.Ctx()) + multiTriggerB, err := txintent.ExecuteIndexeds(predeploys.MultiCall3Addr, predeploys.CrossL2InboxAddr, &txB.Result, []int{int(logIndexY)})(t.Ctx()) require.NoError(t, err) require.Equal(t, multiTriggerB.Calls[logIndexY], execY) @@ -1021,7 +1021,7 @@ func TestCycleInTx(gt *testing.T) { // Intent to execute message X and initiate message X at chain A tx := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](optsA) - tx.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: calls}) + tx.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: calls}) included, err := tx.PlannedTx.IncludedBlock.Eval(t.Ctx()) require.NoError(t, err) @@ -1033,7 +1033,7 @@ func TestCycleInTx(gt *testing.T) { // confirm speculatively built exec message by rebuilding after tx inclusion _, err = tx.Result.Eval(t.Ctx()) require.NoError(t, err) - exec2, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &tx.Result, int(logIndexX))(t.Ctx()) + exec2, err := txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &tx.Result, int(logIndexX))(t.Ctx()) require.NoError(t, err) require.Equal(t, exec2, exec) @@ -1129,7 +1129,7 @@ func TestCycleInBlock(gt *testing.T) { _, err = tx.Result.Eval(t.Ctx()) require.NoError(t, err) // log index is 0 because tx emitted a single log - exec2, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &tx.Result, 0)(t.Ctx()) + exec2, err := txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &tx.Result, 0)(t.Ctx()) require.NoError(t, err) require.Equal(t, exec2, exec) @@ -1224,14 +1224,14 @@ func TestCycleAcrossChainsSameTimestamp(gt *testing.T) { _, err = tx2.Result.Eval(t.Ctx()) require.NoError(t, err) // log index is 0 because tx emitted a single log - execX2, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &tx2.Result, 0)(t.Ctx()) + execX2, err := txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &tx2.Result, 0)(t.Ctx()) require.NoError(t, err) require.Equal(t, execX2, execX) tx3 := intents[3] _, err = tx3.Result.Eval(t.Ctx()) require.NoError(t, err) // log index is 0 because tx emitted a single log - execY2, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &tx3.Result, 0)(t.Ctx()) + execY2, err := txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &tx3.Result, 0)(t.Ctx()) require.NoError(t, err) require.Equal(t, execY2, execY) @@ -1288,10 +1288,10 @@ func TestCycleAcrossChainsSameTx(gt *testing.T) { // tx0 executes message X, then initiates message Y tx0 := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](optsA) - tx0.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: []txintent.Call{execX, initY}}) + tx0.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: []txintent.Call{execX, initY}}) // tx1 executes message Y, then initiates message X tx1 := txintent.NewIntent[*txintent.MultiTrigger, *txintent.InteropOutput](optsB) - tx1.Content.Set(&txintent.MultiTrigger{Emitter: constants.MultiCall3, Calls: []txintent.Call{execY, initX}}) + tx1.Content.Set(&txintent.MultiTrigger{Emitter: predeploys.MultiCall3Addr, Calls: []txintent.Call{execY, initX}}) included0, err := tx0.PlannedTx.IncludedBlock.Eval(t.Ctx()) require.NoError(t, err) @@ -1307,12 +1307,12 @@ func TestCycleAcrossChainsSameTx(gt *testing.T) { // confirm speculatively built exec message by rebuilding after tx inclusion _, err = tx0.Result.Eval(t.Ctx()) require.NoError(t, err) - execY2, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &tx0.Result, int(logIndexY))(t.Ctx()) + execY2, err := txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &tx0.Result, int(logIndexY))(t.Ctx()) require.NoError(t, err) require.Equal(t, execY2, execY) _, err = tx1.Result.Eval(t.Ctx()) require.NoError(t, err) - execX2, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &tx1.Result, int(logIndexX))(t.Ctx()) + execX2, err := txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &tx1.Result, int(logIndexX))(t.Ctx()) require.NoError(t, err) require.Equal(t, execX2, execX) @@ -1343,7 +1343,7 @@ func TestExecMsgPointToSelf(gt *testing.T) { // manually construct identifier which makes exec message point to itself identifier := suptypes.Identifier{ - Origin: constants.CrossL2Inbox, + Origin: predeploys.CrossL2InboxAddr, BlockNumber: targetNum, LogIndex: uint32(0), // tx will emit single ExecutingMessage event to set log index as 0 Timestamp: targetTime, @@ -1354,7 +1354,7 @@ func TestExecMsgPointToSelf(gt *testing.T) { payloadHash := testutils.RandomHash(rng) message := suptypes.Message{Identifier: identifier, PayloadHash: payloadHash} - exec := &txintent.ExecTrigger{Executor: constants.CrossL2Inbox, Msg: message} + exec := &txintent.ExecTrigger{Executor: predeploys.CrossL2InboxAddr, Msg: message} // txintent for executing message pointing to itself tx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](optsA) tx.Content.Set(exec) diff --git a/devnet-sdk/proofs/prestate/client.go b/op-service/prestate/client.go similarity index 73% rename from devnet-sdk/proofs/prestate/client.go rename to op-service/prestate/client.go index a5a6e7b4c6aba..42708dfbd190a 100644 --- a/devnet-sdk/proofs/prestate/client.go +++ b/op-service/prestate/client.go @@ -14,23 +14,23 @@ import ( "github.com/ethereum-optimism/optimism/op-supervisor/supervisor/backend/depset" ) -// These constants should be in sync with op-program/chainconfig/chaincfg.go +// These constants should be in sync with op-program/chainconfig/chaincfg.go. const ( InteropDepSetName = "depsets.json" rollupConfigSuffix = "-rollup.json" genensisConfigSuffix = "-genesis-l2.json" ) -// PrestateManifest maps prestate identifiers to their hashes +// PrestateManifest maps prestate identifiers to their hashes. type PrestateManifest map[string]string -// PrestateBuilderClient is a client for the prestate builder service +// PrestateBuilderClient is a client for the prestate builder service. type PrestateBuilderClient struct { url string client *http.Client } -// NewPrestateBuilderClient creates a new client for the prestate builder service +// NewPrestateBuilderClient creates a new client for the prestate builder service. func NewPrestateBuilderClient(url string) *PrestateBuilderClient { return &PrestateBuilderClient{ url: url, @@ -38,24 +38,23 @@ func NewPrestateBuilderClient(url string) *PrestateBuilderClient { } } -// FileInput represents a file to be used in the build process +// FileInput represents a file to be used in the build process. type FileInput struct { - Name string // Name of the file (used for identification) - Content io.Reader // Content of the file - Type string // Type information (e.g., "rollup-config", "genesis-config", "interop") + Name string + Content io.Reader + Type string } -// buildContext holds all the inputs for a build operation type buildContext struct { chains []string files []FileInput generatedInteropDepSet bool } -// PrestateBuilderOption is a functional option for configuring a build +// PrestateBuilderOption is a functional option for configuring a build. type PrestateBuilderOption func(*buildContext) -// WithInteropDepSet adds an interop dependency set file to the build +// WithInteropDepSet adds an interop dependency set file to the build. func WithInteropDepSet(content io.Reader) PrestateBuilderOption { return func(c *buildContext) { c.files = append(c.files, FileInput{ @@ -81,31 +80,32 @@ func generateInteropDepSet(chains []string) ([]byte, error) { return nil, fmt.Errorf("failed to create interop dependency set: %w", err) } - json, err := json.Marshal(interopDepSet) + jsonBytes, err := json.Marshal(interopDepSet) if err != nil { return nil, err } - return json, nil + return jsonBytes, nil } +// WithGeneratedInteropDepSet requests generation of the interop dependency set from the chain list. func WithGeneratedInteropDepSet() PrestateBuilderOption { return func(c *buildContext) { c.generatedInteropDepSet = true } } -// WithChainConfig adds a pair of rollup and genesis config files to the build -func WithChainConfig(chainId string, rollupContent io.Reader, genesisContent io.Reader) PrestateBuilderOption { +// WithChainConfig adds a pair of rollup and genesis config files to the build. +func WithChainConfig(chainID string, rollupContent io.Reader, genesisContent io.Reader) PrestateBuilderOption { return func(c *buildContext) { - c.chains = append(c.chains, chainId) + c.chains = append(c.chains, chainID) c.files = append(c.files, FileInput{ - Name: chainId + rollupConfigSuffix, + Name: chainID + rollupConfigSuffix, Content: rollupContent, Type: "rollup-config", }, FileInput{ - Name: chainId + genensisConfigSuffix, + Name: chainID + genensisConfigSuffix, Content: genesisContent, Type: "genesis-config", }, @@ -113,15 +113,13 @@ func WithChainConfig(chainId string, rollupContent io.Reader, genesisContent io. } } -// BuildPrestate sends the files to the prestate builder service and returns a manifest of the built prestates +// BuildPrestate sends the files to the prestate builder service and returns a manifest of the built prestates. func (c *PrestateBuilderClient) BuildPrestate(ctx context.Context, opts ...PrestateBuilderOption) (PrestateManifest, error) { fmt.Println("Starting prestate build...") - // Apply options to build context bc := &buildContext{ files: []FileInput{}, } - for _, opt := range opts { opt(bc) } @@ -140,57 +138,42 @@ func (c *PrestateBuilderClient) BuildPrestate(ctx context.Context, opts ...Prest fmt.Printf("Preparing to upload %d files\n", len(bc.files)) - // Create a multipart form - var b bytes.Buffer - w := multipart.NewWriter(&b) - - // Add each file to the form + var body bytes.Buffer + writer := multipart.NewWriter(&body) for _, file := range bc.files { fmt.Printf("Adding file to form: %s (type: %s)\n", file.Name, file.Type) - // Create a form file with the file's name - fw, err := w.CreateFormFile("files[]", filepath.Base(file.Name)) + formFile, err := writer.CreateFormFile("files[]", filepath.Base(file.Name)) if err != nil { return nil, fmt.Errorf("failed to create form file: %w", err) } - - // Copy the file content to the form - if _, err := io.Copy(fw, file.Content); err != nil { + if _, err := io.Copy(formFile, file.Content); err != nil { return nil, fmt.Errorf("failed to copy file content: %w", err) } } - - // Close the multipart writer - if err := w.Close(); err != nil { + if err := writer.Close(); err != nil { return nil, fmt.Errorf("failed to close multipart writer: %w", err) } fmt.Printf("Sending build request to %s\n", c.url) - - // Create the HTTP request - req, err := http.NewRequestWithContext(ctx, "POST", c.url, &b) + req, err := http.NewRequestWithContext(ctx, http.MethodPost, c.url, &body) if err != nil { return nil, fmt.Errorf("failed to create request: %w", err) } + req.Header.Set("Content-Type", writer.FormDataContentType()) - // Set the content type - req.Header.Set("Content-Type", w.FormDataContentType()) - - // Send the request resp, err := c.client.Do(req) if err != nil { return nil, fmt.Errorf("failed to send request: %w", err) } defer resp.Body.Close() - // Check the response status if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusNotModified { - body, _ := io.ReadAll(resp.Body) - return nil, fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(body)) + respBody, _ := io.ReadAll(resp.Body) + return nil, fmt.Errorf("unexpected status code: %d, body: %s", resp.StatusCode, string(respBody)) } fmt.Println("Build request successful, fetching build manifest...") - // If the build was successful, get the info.json file infoURL := c.url if infoURL[len(infoURL)-1] != '/' { infoURL += "/" @@ -198,8 +181,7 @@ func (c *PrestateBuilderClient) BuildPrestate(ctx context.Context, opts ...Prest infoURL += "info.json" fmt.Printf("Requesting manifest from %s\n", infoURL) - - infoReq, err := http.NewRequestWithContext(ctx, "GET", infoURL, nil) + infoReq, err := http.NewRequestWithContext(ctx, http.MethodGet, infoURL, nil) if err != nil { return nil, fmt.Errorf("failed to create info request: %w", err) } @@ -211,18 +193,15 @@ func (c *PrestateBuilderClient) BuildPrestate(ctx context.Context, opts ...Prest defer infoResp.Body.Close() if infoResp.StatusCode != http.StatusOK { - body, _ := io.ReadAll(infoResp.Body) - return nil, fmt.Errorf("unexpected info.json status code: %d, body: %s", infoResp.StatusCode, string(body)) + respBody, _ := io.ReadAll(infoResp.Body) + return nil, fmt.Errorf("unexpected info.json status code: %d, body: %s", infoResp.StatusCode, string(respBody)) } - // Parse the JSON response var manifest PrestateManifest if err := json.NewDecoder(infoResp.Body).Decode(&manifest); err != nil { return nil, fmt.Errorf("failed to decode info.json response: %w", err) } fmt.Printf("Build complete. Generated %d prestate entries\n", len(manifest)) - return manifest, nil - } diff --git a/op-service/testutils/devnet/prestatebuilder.go b/op-service/testutils/devnet/prestatebuilder.go index a9127e12aaaf2..fc1fda07b9e04 100644 --- a/op-service/testutils/devnet/prestatebuilder.go +++ b/op-service/testutils/devnet/prestatebuilder.go @@ -3,7 +3,7 @@ package devnet import ( "context" - "github.com/ethereum-optimism/optimism/devnet-sdk/proofs/prestate" + "github.com/ethereum-optimism/optimism/op-service/prestate" ) type mockPreStateBuilder struct { diff --git a/op-service/txintent/bindings/eventlogger_deploy.go b/op-service/txintent/bindings/eventlogger_deploy.go new file mode 100644 index 0000000000000..1f6720111a6bc --- /dev/null +++ b/op-service/txintent/bindings/eventlogger_deploy.go @@ -0,0 +1,5 @@ +package bindings + +// EventloggerBin is the compiled EventLogger bytecode used by interop tests and devstack helpers. +// It was originally generated from packages/contracts-bedrock/src/integration/EventLogger.sol. +var EventloggerBin = "0x6080604052348015600e575f80fd5b506102ac8061001c5f395ff3fe608060405234801561000f575f80fd5b5060043610610034575f3560e01c8063ab4d6f7514610038578063edebc13b1461004d575b5f80fd5b61004b61004636600461013e565b610060565b005b61004b61005b36600461016c565b6100bd565b60405163ab4d6f7560e01b81526022602160991b019063ab4d6f759061008c9085908590600401610226565b5f604051808303815f87803b1580156100a3575f80fd5b505af11580156100b5573d5f803e3d5ffd5b505050505050565b80604051818482378486356020880135604089013560608a0135848015610102576001811461010a5760028114610113576003811461011d5760048114610128575f80fd5b8787a0610130565b848888a1610130565b83858989a2610130565b8284868a8aa3610130565b818385878b8ba45b505050505050505050505050565b5f8082840360c0811215610150575f80fd5b60a081121561015d575f80fd5b50919360a08501359350915050565b5f805f806040858703121561017f575f80fd5b843567ffffffffffffffff80821115610196575f80fd5b818701915087601f8301126101a9575f80fd5b8135818111156101b7575f80fd5b8860208260051b85010111156101cb575f80fd5b6020928301965094509086013590808211156101e5575f80fd5b818701915087601f8301126101f8575f80fd5b813581811115610206575f80fd5b886020828501011115610217575f80fd5b95989497505060200194505050565b60c0810183356001600160a01b038116808214610241575f80fd5b8352506020848101359083015260408085013590830152606080850135908301526080938401359382019390935260a001529056fea26469706673582212206da9bc84d514e1a78e2b4160f99f93aa58672040ece82f45ac2a878aeefdfbe164736f6c63430008190033" diff --git a/op-service/txintent/interop_call.go b/op-service/txintent/interop_call.go index 1fb70ccd82483..75fdcd1e796bf 100644 --- a/op-service/txintent/interop_call.go +++ b/op-service/txintent/interop_call.go @@ -4,7 +4,7 @@ import ( "fmt" "math/big" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" + "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" @@ -108,7 +108,7 @@ func (v *ExecTrigger) EncodeInput() ([]byte, error) { func (v *ExecTrigger) AccessList() (types.AccessList, error) { access := v.Msg.Access() accessList := types.AccessList{{ - Address: constants.CrossL2Inbox, + Address: predeploys.CrossL2InboxAddr, StorageKeys: suptypes.EncodeAccessList([]suptypes.Access{access}), }} return accessList, nil diff --git a/op-supervisor/README.md b/op-supervisor/README.md index f8fde2a1ca9e2..4080443dbecd6 100644 --- a/op-supervisor/README.md +++ b/op-supervisor/README.md @@ -350,4 +350,3 @@ sequenceDiagram - `op-e2e/interop`: Go interop system-tests, focused on offchain aspects of services to run end to end. - `op-e2e/actions/interop`: Go interop action-tests, focused on onchain aspects such as safety and state-transition. -- `kurtosis-devnet/interop.yaml`: Kurtosis configuration to run interoperable chains locally. diff --git a/ops/docker/op-stack-go/Dockerfile.dockerignore b/ops/docker/op-stack-go/Dockerfile.dockerignore index 6067e181296a6..000dd0ed53571 100644 --- a/ops/docker/op-stack-go/Dockerfile.dockerignore +++ b/ops/docker/op-stack-go/Dockerfile.dockerignore @@ -3,7 +3,6 @@ * !/cannon -!/devnet-sdk !/op-core !/op-batcher !/op-chain-ops diff --git a/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go b/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go index 1aa273c36bd00..61ccf76cc4249 100644 --- a/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go +++ b/rust/kona/tests/supervisor/l2reorg/init_exec_msg_test.go @@ -5,9 +5,8 @@ import ( "testing" "time" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/bindings" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" + "github.com/ethereum-optimism/optimism/op-core/predeploys" "github.com/ethereum-optimism/optimism/op-devstack/devtest" "github.com/ethereum-optimism/optimism/op-devstack/dsl" "github.com/ethereum-optimism/optimism/op-devstack/presets" @@ -15,6 +14,7 @@ import ( "github.com/ethereum-optimism/optimism/op-service/bigs" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/txintent" + "github.com/ethereum-optimism/optimism/op-service/txintent/bindings" "github.com/ethereum-optimism/optimism/op-service/txplan" "github.com/ethereum-optimism/optimism/op-test-sequencer/sequencer/seqtypes" "github.com/ethereum/go-ethereum/common" @@ -100,7 +100,7 @@ func TestReorgInitExecMsg(gt *testing.T) { execTx = txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) execTx.Content.DependOn(&initTx.Result) // single event in tx so index is 0. ExecuteIndexed returns a lambda to transform InteropOutput to a new ExecTrigger - execTx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initTx.Result, 0)) + execTx.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &initTx.Result, 0)) var err error execReceipt, err = execTx.PlannedTx.Included.Eval(ctx) require.NoError(t, err) diff --git a/rust/kona/tests/supervisor/pre_interop/pre_test.go b/rust/kona/tests/supervisor/pre_interop/pre_test.go index 1ae079bf01d6d..869c62e7279e5 100644 --- a/rust/kona/tests/supervisor/pre_interop/pre_test.go +++ b/rust/kona/tests/supervisor/pre_interop/pre_test.go @@ -7,7 +7,6 @@ import ( "github.com/ethereum/go-ethereum/common" - "github.com/ethereum-optimism/optimism/devnet-sdk/contracts/constants" "github.com/ethereum-optimism/optimism/op-acceptance-tests/tests/interop" "github.com/ethereum-optimism/optimism/op-chain-ops/genesis" "github.com/ethereum-optimism/optimism/op-core/predeploys" @@ -78,7 +77,7 @@ func TestPreNoInbox(gt *testing.T) { // send executing message on chain B and confirm we got an error execTx := txintent.NewIntent[*txintent.ExecTrigger, *txintent.InteropOutput](bob.Plan()) execTx.Content.DependOn(&initMsg.Tx.Result) - execTx.Content.Fn(txintent.ExecuteIndexed(constants.CrossL2Inbox, &initMsg.Tx.Result, 0)) + execTx.Content.Fn(txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &initMsg.Tx.Result, 0)) execReceipt, err := execTx.PlannedTx.Included.Eval(sys.T.Ctx()) require.ErrorContains(err, "implementation not initialized", "error did not contain expected string") require.Nil(execReceipt) @@ -95,7 +94,7 @@ func TestPreNoInbox(gt *testing.T) { { ctx := sys.T.Ctx() - execTrigger, err := txintent.ExecuteIndexed(constants.CrossL2Inbox, &initMsg.Tx.Result, 0)(ctx) + execTrigger, err := txintent.ExecuteIndexed(predeploys.CrossL2InboxAddr, &initMsg.Tx.Result, 0)(ctx) require.NoError(err) ed := stypes.ExecutingDescriptor{Timestamp: uint64(time.Now().Unix())} From 48b31f0b1b1e9afcf4d07f75c28771f602cc6a1b Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Thu, 12 Mar 2026 21:47:30 +0100 Subject: [PATCH 115/201] docs: add default branch and build system info to AGENTS.md (#19508) Co-authored-by: Claude Opus 4.6 --- AGENTS.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/AGENTS.md b/AGENTS.md index 7d7055bf0b98b..467ca88ed809d 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -15,6 +15,9 @@ When this happens, offer to submit the improvement to the relevant file in `docs ## Repository Overview +- **Default branch**: `develop` (not `main`) +- **Build system**: migrating from Make to [Just](https://github.com/casey/just) — shared justfile infra lives in `justfiles/` + This repository contains multiple components spanning different technologies: ### Go Services From c6ff4f9237fa332e239b8a5d233252a22b8d870b Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Thu, 12 Mar 2026 22:05:38 +0100 Subject: [PATCH 116/201] op-node/derive: drop pre-Delta span batch as NotEnoughData (#19500) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Returns NotEnoughData instead of NewTemporaryError when a span batch is encountered before Delta activation. This drains the channel immediately rather than taking O(N×backoff) cycles. Fixes #19493. --- op-node/rollup/derive/channel_in_reader.go | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/op-node/rollup/derive/channel_in_reader.go b/op-node/rollup/derive/channel_in_reader.go index ed4c12361a33d..5c8f97664a14e 100644 --- a/op-node/rollup/derive/channel_in_reader.go +++ b/op-node/rollup/derive/channel_in_reader.go @@ -110,10 +110,8 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { return batch, nil case SpanBatchType: if origin := cr.Origin(); !cr.cfg.IsDelta(origin.Time) { - // Check hard fork activation with the L1 inclusion block time instead of the L1 origin block time. - // Therefore, even if the batch passed this rule, it can be dropped in the batch queue. - // This is just for early dropping invalid batches as soon as possible. - return nil, NewTemporaryError(fmt.Errorf("cannot accept span batch in L1 block %s at time %d", origin, origin.Time)) + cr.log.Warn("dropping span batch before Delta activation", "origin", origin) + return nil, NotEnoughData } batch.Batch, err = DeriveSpanBatch(batchData, cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) if err != nil { From 218bff0408cf42b647ad6f4cf20ef14ec4120c4b Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Thu, 12 Mar 2026 22:13:56 +0100 Subject: [PATCH 117/201] op-node/derive: drop malformed span batch tx data as NotEnoughData (#19502) Classifies non-critical errors from DeriveSpanBatch (malformed tx type, invalid encoding, etc.) as NotEnoughData so the pipeline drops the batch immediately without backoff. Critical errors (logic errors) still propagate as-is. Fixes #19494. --- op-node/rollup/derive/channel_in_reader.go | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/op-node/rollup/derive/channel_in_reader.go b/op-node/rollup/derive/channel_in_reader.go index 5c8f97664a14e..b6634c5eaa21f 100644 --- a/op-node/rollup/derive/channel_in_reader.go +++ b/op-node/rollup/derive/channel_in_reader.go @@ -3,6 +3,7 @@ package derive import ( "bytes" "context" + "errors" "fmt" "io" @@ -115,7 +116,11 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { } batch.Batch, err = DeriveSpanBatch(batchData, cr.cfg.BlockTime, cr.cfg.Genesis.L2Time, cr.cfg.L2ChainID) if err != nil { - return nil, err + if errors.Is(err, ErrCritical) { + return nil, err + } + cr.log.Warn("dropping malformed span batch", "err", err) + return nil, NotEnoughData } batch.LogContext(cr.log).Info("decoded span batch from channel", "stage_origin", cr.Origin()) cr.metrics.RecordDerivedBatches("span") From b80de52a41d4cdc109f0bf80a13e3e0d1522a5ad Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Thu, 12 Mar 2026 22:27:33 +0100 Subject: [PATCH 118/201] op-node/derive: return CriticalError when beacon endpoint not configured after Ecotone (#19501) Wraps the "beacon endpoint not configured" error with NewCriticalError so the node stops with a clear fatal error instead of retrying indefinitely on an unrecoverable operator misconfiguration. Fixes #19495. --- op-node/rollup/derive/data_source.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/op-node/rollup/derive/data_source.go b/op-node/rollup/derive/data_source.go index 964dea20aaf97..d90cfb42ff53d 100644 --- a/op-node/rollup/derive/data_source.go +++ b/op-node/rollup/derive/data_source.go @@ -70,7 +70,7 @@ func (ds *DataSourceFactory) OpenData(ctx context.Context, ref eth.L1BlockRef, b var src DataIter if ds.ecotoneTime != nil && ref.Time >= *ds.ecotoneTime { if ds.blobsFetcher == nil { - return nil, fmt.Errorf("ecotone upgrade active but beacon endpoint not configured") + return nil, NewCriticalError(fmt.Errorf("ecotone upgrade active but beacon endpoint not configured")) } src = NewBlobDataSource(ctx, ds.log, ds.dsCfg, ds.fetcher, ds.blobsFetcher, ref, batcherAddr) } else { From 286f5ad3ec5be314a5e77b5ec55829616719bbca Mon Sep 17 00:00:00 2001 From: Sebastian Stammler Date: Thu, 12 Mar 2026 22:52:04 +0100 Subject: [PATCH 119/201] op-node/derive: drop bad channel decompression as NotEnoughData (#19499) * op-node/derive: drop bad channel decompression as NotEnoughData Fixes WriteChannel error classification in ChannelInReader.NextBatch: returns NotEnoughData (immediate continue, no backoff) instead of NewTemporaryError when channel data fails to decompress. Also downgrades the log.Error in WriteChannel to log.Warn since this is bad batcher data, not an infrastructure failure. Fixes #19492. * op-node/derive: address review comments --- op-node/rollup/derive/channel_in_reader.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/op-node/rollup/derive/channel_in_reader.go b/op-node/rollup/derive/channel_in_reader.go index b6634c5eaa21f..93c45fe3322c6 100644 --- a/op-node/rollup/derive/channel_in_reader.go +++ b/op-node/rollup/derive/channel_in_reader.go @@ -60,7 +60,6 @@ func (cr *ChannelInReader) WriteChannel(data []byte) error { cr.metrics.RecordChannelInputBytes(len(data)) return nil } else { - cr.log.Error("Error creating batch reader from channel data", "err", err) return err } } @@ -82,7 +81,8 @@ func (cr *ChannelInReader) NextBatch(ctx context.Context) (Batch, error) { return nil, err } else { if err := cr.WriteChannel(data); err != nil { - return nil, NewTemporaryError(err) + cr.log.Warn("failed to create batch reader from channel data, dropping channel", "err", err, "origin", cr.Origin()) + return nil, NotEnoughData } } } From 60a811cdc96fb8a9dacbbd009929866f5f39ca98 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 13 Mar 2026 08:07:15 +1000 Subject: [PATCH 120/201] chore(op-e2e): migrate Makefile to justfile (#19475) * chore(cannon): migrate Makefile to justfile Migrate cannon build targets from Make to Just. The Makefile now delegates to just with a deprecation warning, preserving backwards compatibility for existing make invocations. Co-Authored-By: Claude Opus 4.6 * fix(cannon): add missing lint target and include justfiles in Docker context - Add `lint` to DEPRECATED_TARGETS and justfile (CI compatibility stub) - Copy justfiles/ into kona cannon-repro.dockerfile for deprecated.mk shim - Install `just` binary in cannon Docker build for the Make shim Co-Authored-By: Claude Opus 4.6 * fix(cannon): remove [default] attribute for just <1.38 compat The Alpine 3.21 just package is v1.37.0 which doesn't support the [default] attribute. Move cannon recipe to first position instead. Co-Authored-By: Claude Opus 4.6 * fix(cannon): include justfiles/ in op-program Docker build context The cannon Makefile deprecated shim requires justfiles/deprecated.mk, which is resolved relative to cannon/ inside the Docker container. The op-program Dockerfile.repro.dockerignore was excluding justfiles/ from the build context, causing the cannon make shim to fail. Co-Authored-By: Claude Opus 4.6 * docs(cannon): update README to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix(cannon): call just directly in cannon-repro.dockerfile Instead of going through the deprecated Make shim, invoke just cannon directly in the Docker build. Co-Authored-By: Claude Opus 4.6 * fix(cannon): update Dockerfile.diff to use just diff-cannon directly The diff-%-cannon Make pattern target was converted to a parameterized just recipe (just diff-cannon VM). Update the Dockerfile to call just directly instead of through make, which would fail since the deprecated shim doesn't support pattern targets. Co-Authored-By: Claude Opus 4.6 * fix(cannon): add diff-%-cannon pattern target to deprecated Makefile Preserves backwards compatibility for make diff--cannon invocations (used by Dockerfile.diff and potentially other scripts) by translating the pattern to just diff-cannon . Co-Authored-By: Claude Opus 4.6 * fix: install just via system package manager in Dockerfiles - cannon/Dockerfile.diff: use `apk add just` instead of curl install script, drop unnecessary `make` dependency - cannon-repro.dockerfile: switch cannon-build stage from ubuntu:22.04 to golang:1.23.8-alpine3.21, matching the monorepo's Go builder image, so just can be installed via `apk add` instead of curl install script Co-Authored-By: Claude Opus 4.6 * fix(cannon): bump Go to 1.24.10 in cannon-repro.dockerfile The golang Docker image sets GOTOOLCHAIN=local which prevents automatic toolchain downloading. Since go.mod requires go 1.24.0, the 1.23.8 image fails to build. Match Dockerfile.diff which already uses 1.24.10. Co-Authored-By: Claude Opus 4.6 * chore(op-e2e): migrate Makefile to justfile Migrate op-e2e build targets from Make to Just. The Makefile now delegates to just with a deprecation warning, preserving backwards compatibility for existing make invocations. Co-Authored-By: Claude Opus 4.6 * docs(op-e2e): update README to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix(justfiles): pass Make variable overrides as env vars in deprecated shim The deprecated.mk shim was changed to pass JUSTFLAGS as just CLI variable overrides (`just VAR=val target`), but just rejects overrides for variables not declared in the justfile. This broke CI jobs where Make variable assignments propagate through sub-makes (e.g. GO_TEST_FLAGS, GUEST_PROGRAM). Revert to passing them as environment variables via `env`, which is how the shim originally worked in the cannon migration PR. Fixes: go-tests-short, sanitize-op-program CI failures Co-Authored-By: Claude Opus 4.6 * fix(op-e2e): manual shim for gen-binding positional arg The generic deprecated.mk shim converts make variables to env vars, but gen-binding CONTRACT: is a positional parameter in just. Write a manual shim that passes CONTRACT correctly as a positional arg. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- op-e2e/Makefile | 82 ++++++++---------------------------------------- op-e2e/README.md | 6 ++-- op-e2e/justfile | 77 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+), 72 deletions(-) create mode 100644 op-e2e/justfile diff --git a/op-e2e/Makefile b/op-e2e/Makefile index 447dbfaa60549..8680c6fd16fa8 100644 --- a/op-e2e/Makefile +++ b/op-e2e/Makefile @@ -1,73 +1,17 @@ -# nproc is for linux, sysctl for Mac and then fallback to 4 if neither is available -num_cores := $(shell nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) - -# Generally, JUNIT_FILE is set in CI but may be specified to an arbitrary file location to emulate CI locally -# If JUNIT_FILE is set, JSON_LOG_FILE should also be set -ifdef JUNIT_FILE - go_test = OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=false gotestsum --format=testname --junitfile=$(JUNIT_FILE) --jsonfile=$(JSON_LOG_FILE) -- -failfast - # Note: -parallel must be set to match the number of cores in the resource class - go_test_flags = -timeout=60m -parallel=$(num_cores) -else - go_test = go test - go_test_flags = -v -endif - -test: pre-test test-ws -.PHONY: test - -test-external-%: pre-test - make -C ./external_$*/ - $(go_test) $(go_test_flags) --externalL2 ./external_$*/ - -test-ws: pre-test - $(go_test) $(go_test_flags) ./system/... ./e2eutils/... ./opgeth/... ./interop/... -.PHONY: test-ws - -test-actions: pre-test - $(go_test) $(go_test_flags) ./actions/... -.PHONY: test-actions - -test-http: pre-test - OP_E2E_USE_HTTP=true $(go_test) $(go_test_flags) ./system/... ./e2eutils/... ./opgeth/... ./interop/... -.PHONY: test-http - -test-cannon: pre-test - OP_E2E_CANNON_ENABLED=true $(go_test) $(go_test_flags) ./faultproofs -.PHONY: test-cannon - -test-fault-proofs: pre-test - $(go_test) $(go_test_flags) ./faultproofs -.PHONY: test-faultproofs - -cannon-prestates: - make -C .. cannon-prestates -.PHONY: cannon-prestates +DEPRECATED_TARGETS := test test-ws test-actions test-http test-cannon test-fault-proofs cannon-prestates pre-test clean fuzz +include ../justfiles/deprecated.mk -pre-test: pre-test-cannon -.PHONY: pre-test - -pre-test-cannon: - @if [ ! -e ../op-program/bin ]; then \ - make cannon-prestates; \ - fi -.PHONY: pre-test-cannon - - -clean: - rm -r ../.devnet - rm -r ../op-program/bin -.PHONY: clean - -fuzz: - parallel --tag -N1 "go test -run NOTAREALTEST -tags cgo_test -v -fuzztime 10s -fuzz {} ./opgeth" ::: FuzzFjordCostFunction FuzzFastLzGethSolidity FuzzFastLzCgo - -ifndef CONTRACT -gen-binding: - $(error CONTRACT is required, usage: make gen-binding CONTRACT=OPContractsManager) -else +# gen-binding needs a manual shim because CONTRACT is a positional arg in just, +# not an env var. The generic shim would produce `just CONTRACT=X gen-binding` +# but just needs `just gen-binding X`. +.PHONY: gen-binding gen-binding: - cd ../packages/contracts-bedrock && just build - ./scripts/gen-binding.sh $(CONTRACT) +ifndef CONTRACT + $(error CONTRACT is required: make gen-binding CONTRACT=OPContractsManager) endif -.PHONY: gen-binding + @echo + @printf '%s\n' 'Deprecated make call: make gen-binding CONTRACT=$(CONTRACT)' + @printf '%s\n' 'Consider using just instead: just gen-binding $(CONTRACT)' + @echo + just gen-binding $(CONTRACT) diff --git a/op-e2e/README.md b/op-e2e/README.md index 6c039bc6d82d4..130b4afc13304 100644 --- a/op-e2e/README.md +++ b/op-e2e/README.md @@ -17,8 +17,8 @@ for those tests where we integration-test the full system, rather than only spec ## Quickstart ```bash -make test-actions -make test-ws +just test-actions +just test-ws ``` ## Overview @@ -44,7 +44,7 @@ make test-ws Bindings for a contract can be generated (or updated) using ``` -make gen-binding CONTRACT=OPContractsManager +just gen-binding OPContractsManager ``` diff --git a/op-e2e/justfile b/op-e2e/justfile new file mode 100644 index 0000000000000..72002fe4b54b6 --- /dev/null +++ b/op-e2e/justfile @@ -0,0 +1,77 @@ +import '../justfiles/default.just' + +num_cores := num_cpus() + +JUNIT_FILE := env('JUNIT_FILE', '') +JSON_LOG_FILE := env('JSON_LOG_FILE', '') + +_go_test := if JUNIT_FILE != "" { + "OP_TESTLOG_DISABLE_COLOR=true OP_E2E_DISABLE_PARALLEL=false gotestsum --format=testname --junitfile=" + JUNIT_FILE + " --jsonfile=" + JSON_LOG_FILE + " -- -failfast" +} else { + "go test" +} + +_go_test_flags := if JUNIT_FILE != "" { + "-timeout=60m -parallel=" + num_cores +} else { + "-v" +} + +# Run all tests +test: pre-test test-ws + +# Run tests for an external L2 +test-external NAME: pre-test + #!/usr/bin/env bash + set -euo pipefail + make -C ./external_{{NAME}}/ + {{_go_test}} {{_go_test_flags}} --externalL2 ./external_{{NAME}}/ + +# Run workspace tests +test-ws: pre-test + {{_go_test}} {{_go_test_flags}} ./system/... ./e2eutils/... ./opgeth/... ./interop/... + +# Run action tests +test-actions: pre-test + {{_go_test}} {{_go_test_flags}} ./actions/... + +# Run tests over HTTP +test-http: pre-test + OP_E2E_USE_HTTP=true {{_go_test}} {{_go_test_flags}} ./system/... ./e2eutils/... ./opgeth/... ./interop/... + +# Run cannon tests +test-cannon: pre-test + OP_E2E_CANNON_ENABLED=true {{_go_test}} {{_go_test_flags}} ./faultproofs + +# Run fault proof tests +test-fault-proofs: pre-test + {{_go_test}} {{_go_test_flags}} ./faultproofs + +# Build cannon prestates +cannon-prestates: + make -C .. cannon-prestates + +# Pre-test setup +pre-test: pre-test-cannon + +[private] +pre-test-cannon: + #!/usr/bin/env bash + set -euo pipefail + if [ ! -e ../op-program/bin ]; then + just cannon-prestates + fi + +# Clean build artifacts +clean: + rm -rf ../.devnet + rm -rf ../op-program/bin + +# Run fuzz tests +fuzz: + parallel --tag -N1 "go test -run NOTAREALTEST -tags cgo_test -v -fuzztime 10s -fuzz {} ./opgeth" ::: FuzzFjordCostFunction FuzzFastLzGethSolidity FuzzFastLzCgo + +# Generate contract bindings +gen-binding CONTRACT: + cd ../packages/contracts-bedrock && just build + ./scripts/gen-binding.sh {{CONTRACT}} From 9a50c60201c24f3ace880614548e8d2f6b0d049f Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 13 Mar 2026 09:27:35 +1000 Subject: [PATCH 121/201] chore(op-program): migrate Makefile to justfile (#19476) * docs(op-e2e): update README to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix(justfiles): pass Make variable overrides as env vars in deprecated shim The deprecated.mk shim was changed to pass JUSTFLAGS as just CLI variable overrides (`just VAR=val target`), but just rejects overrides for variables not declared in the justfile. This broke CI jobs where Make variable assignments propagate through sub-makes (e.g. GO_TEST_FLAGS, GUEST_PROGRAM). Revert to passing them as environment variables via `env`, which is how the shim originally worked in the cannon migration PR. Fixes: go-tests-short, sanitize-op-program CI failures Co-Authored-By: Claude Opus 4.6 * chore(op-program): migrate Makefile to justfile Migrate op-program build targets from Make to Just. The Makefile now delegates to just with a deprecation warning, preserving backwards compatibility for existing make invocations. Co-Authored-By: Claude Opus 4.6 * fix(op-program): include justfiles/ in Docker contexts for Make shim The Makefile shim includes ../justfiles/deprecated.mk which delegates to just. Docker builds for vmcompat and repro excluded justfiles/ from the build context, causing "No such file or directory" errors in CI. - Add !justfiles/ to both .dockerignore files - Install just in Dockerfile.vmcompat (Dockerfile.repro already has it) Co-Authored-By: Claude Opus 4.6 * ci: re-trigger cannon-prestate build Co-Authored-By: Claude Opus 4.6 * fix(op-program): remove unnecessary MIPS variable overrides from repro.justfile The op-program-client-mips target hardcodes GOOS/GOARCH/GOMIPS64 in its build commands, so passing GOOS=linux GOARCH=mips GOMIPS=softfloat from repro.justfile was always redundant. With the Make-to-just migration, the deprecated.mk shim forwards these as just variables, and just rejects GOMIPS since it's not defined in the justfile. Co-Authored-By: Claude Opus 4.6 * docs(op-program): update README to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix(op-program): use just in Dockerfile.vmcompat, restore TODO - Update Dockerfile.vmcompat to call just directly for the analyze target instead of going through the deprecated make shim. - Restore TODO(#18334) comment for go1.25 vm-compat support that was dropped during migration. Co-Authored-By: Claude Opus 4.6 * fix(op-program): use just instead of make in repro.justfile Convert repro.justfile to call just directly instead of going through make's deprecated shim. GOOS/GOARCH are passed as env vars for cannon (read by go.just), while VERSION/GITCOMMIT/GITDATE are passed as just variable overrides. The op-program mips targets hardcode their own GOOS/GOARCH/GOMIPS so they don't need to be passed. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- op-program/Dockerfile.vmcompat | 4 +- op-program/Dockerfile.vmcompat.dockerignore | 1 + op-program/Makefile | 191 +------------------- op-program/README.md | 8 +- op-program/justfile | 183 +++++++++++++++++++ op-program/repro.justfile | 18 +- 6 files changed, 197 insertions(+), 208 deletions(-) create mode 100644 op-program/justfile diff --git a/op-program/Dockerfile.vmcompat b/op-program/Dockerfile.vmcompat index 96c4caaad4f01..cdbd0136af81a 100644 --- a/op-program/Dockerfile.vmcompat +++ b/op-program/Dockerfile.vmcompat @@ -6,7 +6,7 @@ FROM golang:${GO_VERSION} AS builder ARG VM_TARGET # Install dependencies -RUN apk add --no-cache make git bash jq llvm +RUN apk add --no-cache make git bash jq llvm just # Build vm-compat binary RUN mkdir -p /tmp/vm-compat && \ @@ -31,7 +31,7 @@ WORKDIR /app/op-program ENV FINDINGS_OUTPUT_PATH=/app/op-program/vm-compat-findings.json RUN --mount=type=cache,target=/go/pkg/mod --mount=type=cache,target=/root/.cache/go-build \ touch $FINDINGS_OUTPUT_PATH && \ - make "analyze-op-program-client-${VM_TARGET}"; echo $? > /app/op-program/vm-compat-exit-code + just "analyze-op-program-client-${VM_TARGET}"; echo $? > /app/op-program/vm-compat-exit-code FROM scratch AS export COPY --from=builder /app/op-program/vm-compat-findings.json . diff --git a/op-program/Dockerfile.vmcompat.dockerignore b/op-program/Dockerfile.vmcompat.dockerignore index 1cf8173ca256c..3496b28e41202 100644 --- a/op-program/Dockerfile.vmcompat.dockerignore +++ b/op-program/Dockerfile.vmcompat.dockerignore @@ -12,6 +12,7 @@ !op-service/ !op-supervisor/ !op-test-sequencer +!justfiles/ **/bin **/testdata diff --git a/op-program/Makefile b/op-program/Makefile index 0da54691a4a0f..3fd777e5451a2 100644 --- a/op-program/Makefile +++ b/op-program/Makefile @@ -1,190 +1,3 @@ -GITCOMMIT ?= $(shell git rev-parse HEAD 2>/dev/null || echo "unknown") -GITDATE ?= $(shell git show -s --format='%ct' 2>/dev/null || echo "0") +DEPRECATED_TARGETS := op-program op-program-host op-program-client op-program-client-mips op-program-client-mips64 op-program-client-mips64-interop op-program-client-riscv check-custom-chains build-reproducible-prestate output-prestate-hash reproducible-prestate verify-reproducibility clean test capture-mainnet-genesis capture-sepolia-delta capture-sepolia-ecotone capture-sepolia-fjord capture-sepolia-granite capture-sepolia-holocene capture-sepolia-isthmus capture-chain-test-data verify-mainnet-genesis verify-sepolia-delta verify-sepolia-ecotone verify-sepolia-fjord verify-sepolia-granite verify-sepolia-holocene verify-sepolia-isthmus verify-compat analyze-op-program-client-current analyze-op-program-client-next run-vm-compat -VERSION ?= v0.0.0 - -LDFLAGSSTRING +=-X main.GitCommit=$(GITCOMMIT) -LDFLAGSSTRING +=-X main.GitDate=$(GITDATE) - -# op-program-client version must ALWAYS be set to the same value (v0.0.0) to ensure exact build is reproducible -PC_LDFLAGSSTRING := $(LDFLAGSSTRING) -PC_LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Version=v0.0.0 -PC_LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Meta= - -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Version=$(VERSION) -LDFLAGSSTRING +=-X github.com/ethereum-optimism/optimism/op-program/version.Meta=$(VERSION_META) - -COMPAT_DIR := temp/compat - -op-program: \ - op-program-host \ - op-program-client \ - op-program-client-mips - -op-program-host: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) CGO_ENABLED=0 go build -v -ldflags "$(LDFLAGSSTRING)" -o ./bin/op-program ./host/cmd/main.go - -op-program-client: - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v -ldflags "$(PC_LDFLAGSSTRING)" -o ./bin/op-program-client ./client/cmd/main.go - env GO111MODULE=on GOOS=$(TARGETOS) GOARCH=$(TARGETARCH) go build -v -ldflags "$(PC_LDFLAGSSTRING)" -o ./bin/op-program-client-interop ./client/interopcmd/main.go - -op-program-client-mips: op-program-client-mips64 op-program-client-mips64-interop - -op-program-client-mips64: - env GO111MODULE=on GOOS=linux GOARCH=mips64 GOMIPS64=softfloat go build -v -ldflags "$(PC_LDFLAGSSTRING)" -o ./bin/op-program-client64.elf ./client/cmd/main.go - # verify output with: readelf -h bin/op-program-client64.elf - # result is mips64, big endian, R3000 - -op-program-client-mips64-interop: - env GO111MODULE=on GOOS=linux GOARCH=mips64 GOMIPS64=softfloat go build -v -ldflags "$(PC_LDFLAGSSTRING)" -o ./bin/op-program-client-interop.elf ./client/interopcmd/main.go - # verify output with: readelf -h bin/op-program-client-interop.elf - # result is mips64, big endian, R3000 - -op-program-client-riscv: - env GO111MODULE=on GOOS=linux GOARCH=riscv64 go build -v -gcflags="all=-d=softfloat" -ldflags "$(PC_LDFLAGSSTRING)" -o ./bin/op-program-client-riscv.elf ./client/cmd/main.go - -check-custom-chains: op-program-host - ./bin/op-program configs check-custom-chains - -build-reproducible-prestate: check-custom-chains - @docker build --build-arg GO_VERSION=1.24.2-alpine3.21 --build-arg EXPORT_TARGET=next --output ./bin/ --progress plain -f Dockerfile.repro ../ - @docker build --build-arg GO_VERSION=1.24.2-alpine3.21 --build-arg EXPORT_TARGET=current --output ./bin/ --progress plain -f Dockerfile.repro ../ -.PHONY: build-reproducible-prestate - -output-prestate-hash: - @echo "-------------------- op-program Production Prestates --------------------" - @echo "\nCannon64 Absolute prestate hash: " - @cat ./bin/prestate-proof-mt64.json | jq -r .pre - @echo "\n-------------------- op-program Experimental Prestates --------------------" - @echo "\nCannon64Next Absolute prestate hash: " - @cat ./bin/prestate-proof-mt64Next.json | jq -r .pre - @echo "\nCannonInterop Absolute prestate hash: " - @cat ./bin/prestate-proof-interop.json | jq -r .pre - @echo "\nCannonInteropNext Absolute prestate hash: " - @cat ./bin/prestate-proof-interopNext.json | jq -r .pre - @echo -.PHONY: output-prestate-hash - -reproducible-prestate: build-reproducible-prestate output-prestate-hash -.PHONY: reproducible-prestate - -verify-reproducibility: - rm -rf temp/states - ./scripts/build-prestates.sh - env GO111MODULE=on go run ./prestates/verify/verify.go --input temp/states/versions.json -.PHONY: verify-reproducibility - -clean: - rm -rf bin "$(COMPAT_DIR)" - -test: - go test -v ./... - -capture-mainnet-genesis: op-program-host op-program-client - rm -rf "$(COMPAT_DIR)/mainnet-genesis" "$(COMPAT_DIR)/mainnet-genesis.tar.bz" - env GO111MODULE=on go run ./verify/mainnet/cmd/mainnet.go --l1 $$MAINNET_L1URL --l1.beacon $$MAINNET_BEACON_URL --l2 $$MAINNET_L2URL --datadir "$(COMPAT_DIR)/mainnet-genesis" --l1.head "0x4903424f6cc2cfba7c2bf8c8f48ca46721c963fa64b411cfee3697b781e3e5f1" --l2.start "105235063" --l2.end "105235064" - tar jcf "$(COMPAT_DIR)/mainnet-genesis.tar.bz" -C "$(COMPAT_DIR)" mainnet-genesis - -capture-sepolia-delta: op-program-host op-program-client - rm -rf "$(COMPAT_DIR)/sepolia-delta" "$(COMPAT_DIR)/sepolia-delta.tar.bz" - env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $$SEPOLIA_L1URL --l1.beacon $$SEPOLIA_BEACON_URL --l2 $$SEPOLIA_L2URL --datadir "$(COMPAT_DIR)/sepolia-delta" --l1.head "0x935428728bcfcfeb2e5ba9175fd2890e52831dae221aa4d5dcffed8320edc001" --l2.start "8728200" --l2.end "8728320" - tar jcf "$(COMPAT_DIR)/sepolia-delta.tar.bz" -C "$(COMPAT_DIR)" sepolia-delta - -capture-sepolia-ecotone: op-program-host op-program-client - rm -rf "$(COMPAT_DIR)/sepolia-ecotone" "$(COMPAT_DIR)/sepolia-ecotone.tar.bz" - env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $$SEPOLIA_L1URL --l1.beacon $$SEPOLIA_BEACON_URL --l2 $$SEPOLIA_L2URL --datadir "$(COMPAT_DIR)/sepolia-ecotone" --l1.head "0x5d491a8c1e728a4e70720c09fefdaa083681a9421cd365af85220cf8bd4448a3" --l2.start "9205715" --l2.end "9205815" - tar jcf "$(COMPAT_DIR)/sepolia-ecotone.tar.bz" -C "$(COMPAT_DIR)" sepolia-ecotone - -capture-sepolia-fjord: op-program-host op-program-client - rm -rf "$(COMPAT_DIR)/sepolia-fjord" "$(COMPAT_DIR)/sepolia-fjord.tar.bz" - env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $$SEPOLIA_L1URL --l1.beacon $$SEPOLIA_BEACON_URL --l2 $$SEPOLIA_L2URL --datadir "$(COMPAT_DIR)/sepolia-fjord" --l1.head "0x93ba31bf89e54237af6e6564e69d328b2b5202adf643de4cb097431f74f4a6c1" --l2.start "15378256" --l2.end "15378356" - tar jcf "$(COMPAT_DIR)/sepolia-fjord.tar.bz" -C "$(COMPAT_DIR)" sepolia-fjord - -capture-sepolia-granite: op-program-host op-program-client - rm -rf "$(COMPAT_DIR)/sepolia-granite" "$(COMPAT_DIR)/sepolia-granite.tar.bz" - env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $$SEPOLIA_L1URL --l1.beacon $$SEPOLIA_BEACON_URL --l2 $$SEPOLIA_L2URL --datadir "$(COMPAT_DIR)/sepolia-granite" --l1.head "0x4a4e2b07b1cb468f20b37de54308ec70d2a96453e5186b0bf0929bd5b63ca492" --l2.start "15837930" --l2.end "15838030" - tar jcf "$(COMPAT_DIR)/sepolia-granite.tar.bz" -C "$(COMPAT_DIR)" sepolia-granite - -capture-sepolia-holocene: op-program-host op-program-client - rm -rf "$(COMPAT_DIR)/sepolia-holocene" "$(COMPAT_DIR)/sepolia-holocene.tar.bz" - env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $$SEPOLIA_L1URL --l1.beacon $$SEPOLIA_BEACON_URL --l2 $$SEPOLIA_L2URL --datadir "$(COMPAT_DIR)/sepolia-holocene" --l1.head "0x0fd505af2e97a0cf59232a8615340689ee4dc14d022103f6342ba4fd6b89f066" --l2.start "20415330" --l2.end "20415430" - tar jcf "$(COMPAT_DIR)/sepolia-holocene.tar.bz" -C "$(COMPAT_DIR)" sepolia-holocene - -capture-sepolia-isthmus: op-program-host op-program-client - rm -rf "$(COMPAT_DIR)/sepolia-isthmus" "$(COMPAT_DIR)/sepolia-isthmus.tar.bz" - env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $$SEPOLIA_L1URL --l1.beacon $$SEPOLIA_BEACON_URL --l2 $$SEPOLIA_L2URL --datadir "$(COMPAT_DIR)/sepolia-isthmus" --l1.head "0x6ae443d11c3896a458f80905601920f16e6ae6cb34f070db7307195f6e23e58c" --l2.start "26551530" --l2.end "26551630" - tar jcf "$(COMPAT_DIR)/sepolia-isthmus.tar.bz" -C "$(COMPAT_DIR)" sepolia-isthmus - -capture-chain-test-data: capture-mainnet-genesis capture-sepolia-delta capture-sepolia-ecotone capture-sepolia-fjord capture-sepolia-granite capture-sepolia-holocene capture-sepolia-isthmus - -verify-mainnet-genesis: op-program-host op-program-client - ./scripts/run-compat.sh "mainnet-genesis" - -verify-sepolia-delta: op-program-host op-program-client - ./scripts/run-compat.sh "sepolia-delta" - -verify-sepolia-ecotone: op-program-host op-program-client - ./scripts/run-compat.sh "sepolia-ecotone" - -verify-sepolia-fjord: op-program-host op-program-client - ./scripts/run-compat.sh "sepolia-fjord" - -verify-sepolia-granite: op-program-host op-program-client - ./scripts/run-compat.sh "sepolia-granite" - -verify-sepolia-holocene: op-program-host op-program-client - ./scripts/run-compat.sh "sepolia-holocene" - -verify-sepolia-isthmus: op-program-host op-program-client - ./scripts/run-compat.sh "sepolia-isthmus" - -verify-compat: verify-mainnet-genesis verify-sepolia-delta verify-sepolia-ecotone verify-sepolia-fjord verify-sepolia-granite verify-sepolia-holocene verify-sepolia-isthmus - -analyze-op-program-client-current: - ./scripts/run-static-analysis.sh ./vm-profiles/cannon-multithreaded-64.yaml ./compatibility-test/baseline-cannon-multithreaded-64.json - -analyze-op-program-client-next: - ./scripts/run-static-analysis.sh ./vm-profiles/cannon-multithreaded-64-next.yaml ./compatibility-test/baseline-cannon-multithreaded-64-next.json - -VM_COMPAT_OUTPUT_DIR := bin/vm-compat-output - -run-vm-compat: - @rm -rf "$(VM_COMPAT_OUTPUT_DIR)" && mkdir -p "$(VM_COMPAT_OUTPUT_DIR)" - @docker build \ - --build-arg GO_VERSION=1.24.10-alpine3.21 \ - --build-arg VM_TARGET=current \ - --progress plain \ - --output "$(VM_COMPAT_OUTPUT_DIR)" \ - -f Dockerfile.vmcompat ../ - @exit $$(cat "$(VM_COMPAT_OUTPUT_DIR)/vm-compat-exit-code") - # TODO(#18334): Uncomment once vm-compat supports go1.25 - #@docker build --build-arg GO_VERSION=1.25.4-alpine3.21 --build-arg VM_TARGET=next --progress plain --output "$(VM_COMPAT_OUTPUT_DIR)" -f Dockerfile.vmcompat ../ - #@exit $$(cat "$(VM_COMPAT_OUTPUT_DIR)/vm-compat-exit-code") - -.PHONY: \ - op-program \ - op-program-host \ - op-program-client \ - op-program-client-mips \ - op-program-client-mips64 \ - op-program-client-riscv \ - clean \ - test \ - capture-mainnet-genesis \ - capture-sepolia-delta \ - capture-sepolia-ecotone \ - capture-sepolia-fjord \ - capture-sepolia-granite \ - capture-sepolia-holocene \ - capture-sepolia-isthmus \ - capture-chain-test-data \ - verify-mainnet-genesis \ - verify-sepolia-delta \ - verify-sepolia-ecotone \ - verify-sepolia-fjord \ - verify-sepolia-granite \ - verify-sepolia-holocene \ - verify-sepolia-isthmus \ - verify-compat \ - analyze-op-program-client \ - analyze-op-program-client-cannon-multithreaded-64 +include ../justfiles/deprecated.mk diff --git a/op-program/README.md b/op-program/README.md index 2e6fd0ed55aa2..97e9369f03a9c 100644 --- a/op-program/README.md +++ b/op-program/README.md @@ -12,7 +12,7 @@ on-chain VM as part of the dispute resolution process. To build op-program, from within the `op-program` directory run: ```shell -make op-program +just op-program ``` This resulting executable will be in `./bin/op-program` @@ -22,7 +22,7 @@ This resulting executable will be in `./bin/op-program` To run op-program unit tests, from within the `op-program` directory run: ```shell -make test +just test ``` ## Running @@ -35,11 +35,11 @@ From within the `op-program` directory, options can be reviewed with: ## Generating the Absolute Prestate -The absolute pre-state of the op-program can be generated by executing the makefile +The absolute pre-state of the op-program can be generated by executing the `reproducible-prestate` target. Effectively, this builds a docker image specified by [Dockerfile.repro](./Dockerfile.repro) with pinned dependency versions to ensure the build is reproducible. -After running `make reproducible-prestate`, the generate prestates files can be found in [./bin/](./bin/). +After running `just reproducible-prestate`, the generated prestates files can be found in [./bin/](./bin/). The `prestate-proof-*.json` files contain the absolute pre-state hash under the `.pre` key that is also used by the [contracts][ctb] deploy script. The `prestate-*.bin.gz` files contain the actual prestate. diff --git a/op-program/justfile b/op-program/justfile new file mode 100644 index 0000000000000..fcf443e4859e2 --- /dev/null +++ b/op-program/justfile @@ -0,0 +1,183 @@ +import '../justfiles/go.just' + +# Host ldflags: uses VERSION which can vary +_HOST_LDFLAGS := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/op-program/version.Version=" + VERSION + " " + \ + "-X github.com/ethereum-optimism/optimism/op-program/version.Meta=" + VERSION_META + " " + \ + "") + "'" + +# Client ldflags: ALWAYS v0.0.0 for reproducibility +_CLIENT_LDFLAGS := "'" + trim( + "-X main.GitCommit=" + GITCOMMIT + " " + \ + "-X main.GitDate=" + GITDATE + " " + \ + "-X github.com/ethereum-optimism/optimism/op-program/version.Version=v0.0.0" + " " + \ + "-X github.com/ethereum-optimism/optimism/op-program/version.Meta=" + \ + "") + "'" + +COMPAT_DIR := "temp/compat" +VM_COMPAT_OUTPUT_DIR := "bin/vm-compat-output" + +# Build all op-program binaries +op-program: op-program-host op-program-client op-program-client-mips + +# Build op-program host binary +op-program-host: (go_build "./bin/op-program" "./host/cmd/main.go" "-ldflags" _HOST_LDFLAGS) + +# Build op-program client binaries (client + interop) +op-program-client: + env GO111MODULE=on GOOS={{TARGETOS}} GOARCH={{TARGETARCH}} go build -v -ldflags {{_CLIENT_LDFLAGS}} -o ./bin/op-program-client ./client/cmd/main.go + env GO111MODULE=on GOOS={{TARGETOS}} GOARCH={{TARGETARCH}} go build -v -ldflags {{_CLIENT_LDFLAGS}} -o ./bin/op-program-client-interop ./client/interopcmd/main.go + +# Build MIPS client binaries +op-program-client-mips: op-program-client-mips64 op-program-client-mips64-interop + +# Build MIPS64 client binary +op-program-client-mips64: + env GO111MODULE=on GOOS=linux GOARCH=mips64 GOMIPS64=softfloat go build -v -ldflags {{_CLIENT_LDFLAGS}} -o ./bin/op-program-client64.elf ./client/cmd/main.go + +# Build MIPS64 interop client binary +op-program-client-mips64-interop: + env GO111MODULE=on GOOS=linux GOARCH=mips64 GOMIPS64=softfloat go build -v -ldflags {{_CLIENT_LDFLAGS}} -o ./bin/op-program-client-interop.elf ./client/interopcmd/main.go + +# Build RISC-V client binary +op-program-client-riscv: + env GO111MODULE=on GOOS=linux GOARCH=riscv64 go build -v -gcflags="all=-d=softfloat" -ldflags {{_CLIENT_LDFLAGS}} -o ./bin/op-program-client-riscv.elf ./client/cmd/main.go + +# Check custom chain configurations +check-custom-chains: op-program-host + ./bin/op-program configs check-custom-chains + +# Build reproducible prestates via Docker +build-reproducible-prestate: check-custom-chains + docker build --build-arg GO_VERSION=1.24.2-alpine3.21 --build-arg EXPORT_TARGET=next --output ./bin/ --progress plain -f Dockerfile.repro ../ + docker build --build-arg GO_VERSION=1.24.2-alpine3.21 --build-arg EXPORT_TARGET=current --output ./bin/ --progress plain -f Dockerfile.repro ../ + +# Print prestate hashes +output-prestate-hash: + @echo "-------------------- op-program Production Prestates --------------------" + @printf "\nCannon64 Absolute prestate hash: \n" + @cat ./bin/prestate-proof-mt64.json | jq -r .pre + @echo "-------------------- op-program Experimental Prestates --------------------" + @printf "\nCannon64Next Absolute prestate hash: \n" + @cat ./bin/prestate-proof-mt64Next.json | jq -r .pre + @printf "\nCannonInterop Absolute prestate hash: \n" + @cat ./bin/prestate-proof-interop.json | jq -r .pre + @printf "\nCannonInteropNext Absolute prestate hash: \n" + @cat ./bin/prestate-proof-interopNext.json | jq -r .pre + @echo + +# Build reproducible prestates and print hashes +reproducible-prestate: build-reproducible-prestate output-prestate-hash + +# Verify reproducibility of prestates +verify-reproducibility: + rm -rf temp/states + ./scripts/build-prestates.sh + env GO111MODULE=on go run ./prestates/verify/verify.go --input temp/states/versions.json + +# Clean build artifacts +clean: + rm -rf bin "{{COMPAT_DIR}}" + +# Run tests +test: (go_test "./...") + +# Capture mainnet genesis test data +capture-mainnet-genesis: op-program-host op-program-client + rm -rf "{{COMPAT_DIR}}/mainnet-genesis" "{{COMPAT_DIR}}/mainnet-genesis.tar.bz" + env GO111MODULE=on go run ./verify/mainnet/cmd/mainnet.go --l1 $MAINNET_L1URL --l1.beacon $MAINNET_BEACON_URL --l2 $MAINNET_L2URL --datadir "{{COMPAT_DIR}}/mainnet-genesis" --l1.head "0x4903424f6cc2cfba7c2bf8c8f48ca46721c963fa64b411cfee3697b781e3e5f1" --l2.start "105235063" --l2.end "105235064" + tar jcf "{{COMPAT_DIR}}/mainnet-genesis.tar.bz" -C "{{COMPAT_DIR}}" mainnet-genesis + +# Capture sepolia delta test data +capture-sepolia-delta: op-program-host op-program-client + rm -rf "{{COMPAT_DIR}}/sepolia-delta" "{{COMPAT_DIR}}/sepolia-delta.tar.bz" + env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $SEPOLIA_L1URL --l1.beacon $SEPOLIA_BEACON_URL --l2 $SEPOLIA_L2URL --datadir "{{COMPAT_DIR}}/sepolia-delta" --l1.head "0x935428728bcfcfeb2e5ba9175fd2890e52831dae221aa4d5dcffed8320edc001" --l2.start "8728200" --l2.end "8728320" + tar jcf "{{COMPAT_DIR}}/sepolia-delta.tar.bz" -C "{{COMPAT_DIR}}" sepolia-delta + +# Capture sepolia ecotone test data +capture-sepolia-ecotone: op-program-host op-program-client + rm -rf "{{COMPAT_DIR}}/sepolia-ecotone" "{{COMPAT_DIR}}/sepolia-ecotone.tar.bz" + env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $SEPOLIA_L1URL --l1.beacon $SEPOLIA_BEACON_URL --l2 $SEPOLIA_L2URL --datadir "{{COMPAT_DIR}}/sepolia-ecotone" --l1.head "0x5d491a8c1e728a4e70720c09fefdaa083681a9421cd365af85220cf8bd4448a3" --l2.start "9205715" --l2.end "9205815" + tar jcf "{{COMPAT_DIR}}/sepolia-ecotone.tar.bz" -C "{{COMPAT_DIR}}" sepolia-ecotone + +# Capture sepolia fjord test data +capture-sepolia-fjord: op-program-host op-program-client + rm -rf "{{COMPAT_DIR}}/sepolia-fjord" "{{COMPAT_DIR}}/sepolia-fjord.tar.bz" + env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $SEPOLIA_L1URL --l1.beacon $SEPOLIA_BEACON_URL --l2 $SEPOLIA_L2URL --datadir "{{COMPAT_DIR}}/sepolia-fjord" --l1.head "0x93ba31bf89e54237af6e6564e69d328b2b5202adf643de4cb097431f74f4a6c1" --l2.start "15378256" --l2.end "15378356" + tar jcf "{{COMPAT_DIR}}/sepolia-fjord.tar.bz" -C "{{COMPAT_DIR}}" sepolia-fjord + +# Capture sepolia granite test data +capture-sepolia-granite: op-program-host op-program-client + rm -rf "{{COMPAT_DIR}}/sepolia-granite" "{{COMPAT_DIR}}/sepolia-granite.tar.bz" + env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $SEPOLIA_L1URL --l1.beacon $SEPOLIA_BEACON_URL --l2 $SEPOLIA_L2URL --datadir "{{COMPAT_DIR}}/sepolia-granite" --l1.head "0x4a4e2b07b1cb468f20b37de54308ec70d2a96453e5186b0bf0929bd5b63ca492" --l2.start "15837930" --l2.end "15838030" + tar jcf "{{COMPAT_DIR}}/sepolia-granite.tar.bz" -C "{{COMPAT_DIR}}" sepolia-granite + +# Capture sepolia holocene test data +capture-sepolia-holocene: op-program-host op-program-client + rm -rf "{{COMPAT_DIR}}/sepolia-holocene" "{{COMPAT_DIR}}/sepolia-holocene.tar.bz" + env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $SEPOLIA_L1URL --l1.beacon $SEPOLIA_BEACON_URL --l2 $SEPOLIA_L2URL --datadir "{{COMPAT_DIR}}/sepolia-holocene" --l1.head "0x0fd505af2e97a0cf59232a8615340689ee4dc14d022103f6342ba4fd6b89f066" --l2.start "20415330" --l2.end "20415430" + tar jcf "{{COMPAT_DIR}}/sepolia-holocene.tar.bz" -C "{{COMPAT_DIR}}" sepolia-holocene + +# Capture sepolia isthmus test data +capture-sepolia-isthmus: op-program-host op-program-client + rm -rf "{{COMPAT_DIR}}/sepolia-isthmus" "{{COMPAT_DIR}}/sepolia-isthmus.tar.bz" + env GO111MODULE=on go run ./verify/sepolia/cmd/sepolia.go --l1 $SEPOLIA_L1URL --l1.beacon $SEPOLIA_BEACON_URL --l2 $SEPOLIA_L2URL --datadir "{{COMPAT_DIR}}/sepolia-isthmus" --l1.head "0x6ae443d11c3896a458f80905601920f16e6ae6cb34f070db7307195f6e23e58c" --l2.start "26551530" --l2.end "26551630" + tar jcf "{{COMPAT_DIR}}/sepolia-isthmus.tar.bz" -C "{{COMPAT_DIR}}" sepolia-isthmus + +# Capture all chain test data +capture-chain-test-data: capture-mainnet-genesis capture-sepolia-delta capture-sepolia-ecotone capture-sepolia-fjord capture-sepolia-granite capture-sepolia-holocene capture-sepolia-isthmus + +# Verify mainnet genesis compatibility +verify-mainnet-genesis: op-program-host op-program-client + ./scripts/run-compat.sh "mainnet-genesis" + +# Verify sepolia delta compatibility +verify-sepolia-delta: op-program-host op-program-client + ./scripts/run-compat.sh "sepolia-delta" + +# Verify sepolia ecotone compatibility +verify-sepolia-ecotone: op-program-host op-program-client + ./scripts/run-compat.sh "sepolia-ecotone" + +# Verify sepolia fjord compatibility +verify-sepolia-fjord: op-program-host op-program-client + ./scripts/run-compat.sh "sepolia-fjord" + +# Verify sepolia granite compatibility +verify-sepolia-granite: op-program-host op-program-client + ./scripts/run-compat.sh "sepolia-granite" + +# Verify sepolia holocene compatibility +verify-sepolia-holocene: op-program-host op-program-client + ./scripts/run-compat.sh "sepolia-holocene" + +# Verify sepolia isthmus compatibility +verify-sepolia-isthmus: op-program-host op-program-client + ./scripts/run-compat.sh "sepolia-isthmus" + +# Verify all compatibility tests +verify-compat: verify-mainnet-genesis verify-sepolia-delta verify-sepolia-ecotone verify-sepolia-fjord verify-sepolia-granite verify-sepolia-holocene verify-sepolia-isthmus + +# Analyze current op-program client for VM compatibility +analyze-op-program-client-current: + ./scripts/run-static-analysis.sh ./vm-profiles/cannon-multithreaded-64.yaml ./compatibility-test/baseline-cannon-multithreaded-64.json + +# Analyze next op-program client for VM compatibility +analyze-op-program-client-next: + ./scripts/run-static-analysis.sh ./vm-profiles/cannon-multithreaded-64-next.yaml ./compatibility-test/baseline-cannon-multithreaded-64-next.json + +# Run VM compatibility tests +run-vm-compat: + @rm -rf "{{VM_COMPAT_OUTPUT_DIR}}" && mkdir -p "{{VM_COMPAT_OUTPUT_DIR}}" + @docker build \ + --build-arg GO_VERSION=1.24.10-alpine3.21 \ + --build-arg VM_TARGET=current \ + --progress plain \ + --output "{{VM_COMPAT_OUTPUT_DIR}}" \ + -f Dockerfile.vmcompat ../ + @exit $(cat "{{VM_COMPAT_OUTPUT_DIR}}/vm-compat-exit-code") + # TODO(#18334): Uncomment once vm-compat supports go1.25 + #@docker build --build-arg GO_VERSION=1.25.4-alpine3.21 --build-arg VM_TARGET=next --progress plain --output "{{VM_COMPAT_OUTPUT_DIR}}" -f Dockerfile.vmcompat ../ + #@exit $(cat "{{VM_COMPAT_OUTPUT_DIR}}/vm-compat-exit-code") diff --git a/op-program/repro.justfile b/op-program/repro.justfile index 2af6173fa469b..dbe2b08dd97fe 100644 --- a/op-program/repro.justfile +++ b/op-program/repro.justfile @@ -13,24 +13,16 @@ cannon: # in devnet scenario, the cannon binary is already built. [ -x /app/cannon/bin/cannon ] && exit 0 cd ../cannon - make cannon \ - GOOS={{GOOS}} \ - GOARCH={{GOARCH}} \ - GITCOMMIT={{GIT_COMMIT}} \ - GITDATE={{GIT_DATE}} \ - VERSION={{CANNON_VERSION}} + env GOOS={{GOOS}} GOARCH={{GOARCH}} \ + just GITCOMMIT={{GIT_COMMIT}} GITDATE={{GIT_DATE}} VERSION={{CANNON_VERSION}} cannon # Build the op-program-client elf binaries +# Note: GOOS/GOARCH/GOMIPS are hardcoded in the justfile targets, +# so they don't need to be passed here. op-program-client-mips: #!/bin/bash cd ../op-program - make op-program-client-mips \ - GOOS=linux \ - GOARCH=mips \ - GOMIPS=softfloat \ - GITCOMMIT={{GIT_COMMIT}} \ - GITDATE={{GIT_DATE}} \ - VERSION={{OP_PROGRAM_VERSION}} + just GITCOMMIT={{GIT_COMMIT}} GITDATE={{GIT_DATE}} VERSION={{OP_PROGRAM_VERSION}} op-program-client-mips # Generate the prestate proof containing the absolute pre-state hash. prestate TYPE CLIENT_SUFFIX PRESTATE_SUFFIX: cannon op-program-client-mips From 74c6df4cb9412a2dc648697acb2b576bdb625efe Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 13 Mar 2026 10:59:37 +1000 Subject: [PATCH 122/201] chore: migrate root Makefile to justfile (#19477) * docs(op-e2e): update README to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix(justfiles): pass Make variable overrides as env vars in deprecated shim The deprecated.mk shim was changed to pass JUSTFLAGS as just CLI variable overrides (`just VAR=val target`), but just rejects overrides for variables not declared in the justfile. This broke CI jobs where Make variable assignments propagate through sub-makes (e.g. GO_TEST_FLAGS, GUEST_PROGRAM). Revert to passing them as environment variables via `env`, which is how the shim originally worked in the cannon migration PR. Fixes: go-tests-short, sanitize-op-program CI failures Co-Authored-By: Claude Opus 4.6 * ci: re-trigger cannon-prestate build Co-Authored-By: Claude Opus 4.6 * chore: migrate root Makefile to justfile Migrate root build targets from Make to Just. The Makefile now delegates to just with a deprecation warning, preserving backwards compatibility for existing make invocations. Co-Authored-By: Claude Opus 4.6 * docs: update CONTRIBUTING.md to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix: convert make -C calls to just and restore GOPRIVATE comment All subdirectories now have justfiles with deprecated Make shims, so convert remaining make -C calls to cd && just. Also restores the explanatory comment on mod-tidy's GOPRIVATE usage. Co-Authored-By: Claude Opus 4.6 * fix: run reproducible-prestate builds in parallel just doesn't parallelize dependencies like make -j does. Use background processes with wait to run op-program and kona prestate builds concurrently. Co-Authored-By: Claude Opus 4.6 * fix: use subshell for cd in _go-tests-ci-internal The cd into cannon/ was changing the CWD for the rest of the script, causing gotestsum to run from cannon/ instead of the repo root. The original Makefile used $(MAKE) -C which spawns a subprocess. Use a subshell to match that behavior. Co-Authored-By: Claude Opus 4.6 * fix: use subshells for sequential cd in reproducible-prestate The bare `cd op-program` on line 180 changed cwd persistently, so the following `cd rust` tried to resolve `op-program/rust/` which doesn't exist. Wrap both in subshells to preserve the original working directory. Co-Authored-By: Claude Opus 4.6 * fix: remove devnet-sdk and kurtosis-devnet from TEST_PKGS These directories were removed in #19506 but the justfile still referenced them, causing go-tests-short-ci to fail with "lstat ./devnet-sdk/: no such file or directory". Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- CONTRIBUTING.md | 2 +- Makefile | 378 +----------------------------------------------- justfile | 356 ++++++++++++++++++++++++++++++++++++++++++++- op-e2e/justfile | 2 +- 4 files changed, 352 insertions(+), 386 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index e851085a56e44..5ecd50abecf22 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -108,7 +108,7 @@ You must install all of the required [Software Dependencies](#software-dependenc Optimism Monorepo. Once you've done so, run the following command to build: ```bash -make build +just build ``` Packages built on one branch may not be compatible with packages on a different branch. diff --git a/Makefile b/Makefile index ea34470a152e5..e71824901670b 100644 --- a/Makefile +++ b/Makefile @@ -1,377 +1,3 @@ -# provide JUSTFLAGS for just-backed targets -include ./justfiles/flags.mk +DEPRECATED_TARGETS := help build build-go build-contracts build-customlint lint-go lint-go-fix check-op-geth-version golang-docker docker-builder-clean docker-builder compute-git-versions cross-op-node contracts-bedrock-docker submodules op-node generate-mocks-op-node generate-mocks-op-service op-batcher op-proposer op-challenger op-dispute-mon op-supernode op-interop-filter op-program cannon reproducible-prestate-op-program reproducible-prestate-kona reproducible-prestate cannon-prestates mod-tidy clean nuke test-unit semgrep-ci op-program-client op-program-host make-pre-test go-tests go-tests-short go-tests-short-ci go-tests-ci go-tests-ci-kona-action go-tests-fraud-proofs-ci test update-op-geth -BEDROCK_TAGS_REMOTE?=origin -OP_STACK_GO_BUILDER?=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest - -# Requires at least Python v3.9; specify a minor version below if needed -PYTHON?=python3 - -help: ## Prints this help message - @grep -h -E '^[a-zA-Z0-9_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' - -build: build-go build-contracts ## Builds Go components and contracts-bedrock -.PHONY: build - -build-go: submodules op-node op-proposer op-batcher op-challenger op-dispute-mon op-program cannon ## Builds main Go components -.PHONY: build-go - -build-contracts: - (cd packages/contracts-bedrock && just build) -.PHONY: build-contracts - -build-customlint: - make -C linter build -.PHONY: build-customlint - -lint-go: build-customlint ## Lints Go code with specific linters - ./linter/bin/op-golangci-lint run ./... - go mod tidy -diff -.PHONY: lint-go - -lint-go-fix: build-customlint ## Lints Go code with specific linters and fixes reported issues - ./linter/bin/op-golangci-lint run ./... --fix -.PHONY: lint-go-fix - -check-op-geth-version: ## Checks that op-geth version in go.mod is valid - go run ./ops/scripts/check-op-geth-version -.PHONY: check-op-geth-version - -golang-docker: ## Builds Docker images for Go components using buildx - # We don't use a buildx builder here, and just load directly into regular docker, for convenience. - GIT_COMMIT=$$(git rev-parse HEAD) \ - GIT_DATE=$$(git show -s --format='%ct') \ - IMAGE_TAGS=$$(git rev-parse HEAD),latest \ - docker buildx bake \ - --progress plain \ - --load \ - -f docker-bake.hcl \ - op-node op-batcher op-proposer op-challenger op-dispute-mon op-supervisor -.PHONY: golang-docker - -docker-builder-clean: ## Removes the Docker buildx builder - docker buildx rm buildx-build -.PHONY: docker-builder-clean - -docker-builder: ## Creates a Docker buildx builder - docker buildx create \ - --driver=docker-container --name=buildx-build --bootstrap --use -.PHONY: docker-builder - -compute-git-versions: ## Computes GIT_VERSION for all images and outputs JSON - @GIT_COMMIT=$$(git rev-parse HEAD) ./ops/scripts/compute-git-versions.sh -.PHONY: compute-git-versions - -# add --print to dry-run -cross-op-node: ## Builds cross-platform Docker image for op-node - # We don't use a buildx builder here, and just load directly into regular docker, for convenience. - GIT_COMMIT=$$(git rev-parse HEAD) \ - GIT_DATE=$$(git show -s --format='%ct') \ - IMAGE_TAGS=$$(git rev-parse HEAD),latest \ - PLATFORMS="linux/arm64" \ - GIT_VERSION=$(shell tags=$$(git tag --points-at $(GITCOMMIT) | grep '^op-node/' | sed 's/op-node\///' | sort -V); \ - preferred_tag=$$(echo "$$tags" | grep -v -- '-rc' | tail -n 1); \ - if [ -z "$$preferred_tag" ]; then \ - if [ -z "$$tags" ]; then \ - echo "untagged"; \ - else \ - echo "$$tags" | tail -n 1; \ - fi \ - else \ - echo $$preferred_tag; \ - fi) \ - docker buildx bake \ - --progress plain \ - --builder=buildx-build \ - --load \ - --no-cache \ - -f docker-bake.hcl \ - op-node -.PHONY: cross-op-node - -contracts-bedrock-docker: ## Builds Docker image for Bedrock contracts - IMAGE_TAGS=$$(git rev-parse HEAD),latest \ - docker buildx bake \ - --progress plain \ - --load \ - -f docker-bake.hcl \ - contracts-bedrock -.PHONY: contracts-bedrock-docker - -submodules: ## Updates git submodules - git submodule update --init --recursive -.PHONY: submodules - - -op-node: ## Builds op-node binary - just $(JUSTFLAGS) ./op-node/op-node -.PHONY: op-node - -generate-mocks-op-node: ## Generates mocks for op-node - make -C ./op-node generate-mocks -.PHONY: generate-mocks-op-node - -generate-mocks-op-service: ## Generates mocks for op-service - make -C ./op-service generate-mocks -.PHONY: generate-mocks-op-service - -op-batcher: ## Builds op-batcher binary - just $(JUSTFLAGS) ./op-batcher/op-batcher -.PHONY: op-batcher - -op-proposer: ## Builds op-proposer binary - just $(JUSTFLAGS) ./op-proposer/op-proposer -.PHONY: op-proposer - -op-challenger: ## Builds op-challenger binary - make -C ./op-challenger op-challenger -.PHONY: op-challenger - -op-dispute-mon: ## Builds op-dispute-mon binary - make -C ./op-dispute-mon op-dispute-mon -.PHONY: op-dispute-mon - -op-supernode: ## Builds op-supernode binary - just $(JUSTFLAGS) ./op-supernode/op-supernode -.PHONY: op-supernode - -op-interop-filter: ## Builds op-interop-filter binary - just $(JUSTFLAGS) ./op-interop-filter/op-interop-filter -.PHONY: op-interop-filter - -op-program: ## Builds op-program binary - make -C ./op-program op-program -.PHONY: op-program - -cannon: ## Builds cannon binary - make -C ./cannon cannon -.PHONY: cannon - -reproducible-prestate-op-program: - make -C ./op-program build-reproducible-prestate -.PHONY: reproducible-prestate-op-program - -reproducible-prestate-kona: - cd rust && just build-kona-reproducible-prestate -.PHONY: reproducible-prestate-kona - -reproducible-prestate: reproducible-prestate-op-program reproducible-prestate-kona ## Builds reproducible prestates for op-program and kona - # Output the prestate hashes after all the builds complete so they are easy to find at the end of the build logs. - make -C ./op-program output-prestate-hash - cd rust && just output-kona-prestate-hash -.PHONY: reproducible-prestate - -cannon-prestates: cannon op-program - go run ./op-program/builder/main.go build-all-prestates -.PHONY: cannon-prestates - -mod-tidy: ## Cleans up unused dependencies in Go modules - # Below GOPRIVATE line allows mod-tidy to be run immediately after - # releasing new versions. This bypasses the Go modules proxy, which - # can take a while to index new versions. - # - # See https://proxy.golang.org/ for more info. - export GOPRIVATE="github.com/ethereum-optimism" && go mod tidy -.PHONY: mod-tidy - -clean: ## Removes all generated files under bin/ - rm -rf ./bin - cd packages/contracts-bedrock/ && forge clean -.PHONY: clean - -nuke: clean ## Completely clean the project directory - git clean -Xdf -.PHONY: nuke - -test-unit: ## Runs unit tests for individual components - make -C ./op-node test - make -C ./op-proposer test - make -C ./op-batcher test - make -C ./op-e2e test - (cd packages/contracts-bedrock && just test) -.PHONY: test-unit - -# Remove the baseline-commit to generate a base reading & show all issues -semgrep: ## Runs Semgrep checks - $(eval DEV_REF := $(shell git rev-parse develop)) - SEMGREP_REPO_NAME=ethereum-optimism/optimism semgrep ci --baseline-commit=$(DEV_REF) -.PHONY: semgrep - -op-program-client: ## Builds op-program-client binary - make -C ./op-program op-program-client -.PHONY: op-program-client - -op-program-host: ## Builds op-program-host binary - make -C ./op-program op-program-host -.PHONY: op-program-host - -make-pre-test: ## Makes pre-test setup - make -C ./op-e2e pre-test -.PHONY: make-pre-test - -# Common prerequisites and package list for Go tests -TEST_DEPS := op-program-client op-program-host cannon build-contracts cannon-prestates make-pre-test - -# Excludes: op-validator, op-deployer/pkg/{validation,deployer/{bootstrap,manage,opcm,pipeline,upgrade}} (need RPC) -TEST_PKGS := \ - ./op-alt-da/... \ - ./op-batcher/... \ - ./op-chain-ops/... \ - ./op-node/... \ - ./op-proposer/... \ - ./op-challenger/... \ - ./op-faucet/... \ - ./op-dispute-mon/... \ - ./op-conductor/... \ - ./op-program/... \ - ./op-service/... \ - ./op-supervisor/... \ - ./op-test-sequencer/... \ - ./op-fetcher/... \ - ./op-e2e/system/... \ - ./op-e2e/e2eutils/... \ - ./op-e2e/opgeth/... \ - ./op-e2e/interop/... \ - ./op-e2e/actions/altda \ - ./op-e2e/actions/batcher \ - ./op-e2e/actions/derivation \ - ./op-e2e/actions/helpers \ - ./op-e2e/actions/interop \ - ./op-e2e/actions/proofs \ - ./op-e2e/actions/proposer \ - ./op-e2e/actions/safedb \ - ./op-e2e/actions/sequencer \ - ./op-e2e/actions/sync \ - ./op-e2e/actions/upgrades \ - ./packages/contracts-bedrock/scripts/checks/... \ - ./op-dripper/... \ - ./op-devstack/... \ - ./op-deployer/pkg/deployer/artifacts/... \ - ./op-deployer/pkg/deployer/broadcaster/... \ - ./op-deployer/pkg/deployer/clean/... \ - ./op-deployer/pkg/deployer/integration_test/ \ - ./op-deployer/pkg/deployer/integration_test/cli/... \ - ./op-deployer/pkg/deployer/standard/... \ - ./op-deployer/pkg/deployer/state/... \ - ./op-deployer/pkg/deployer/verify/... \ - ./op-sync-tester/... \ - ./op-supernode/... - -FRAUD_PROOF_TEST_PKGS := \ - ./op-e2e/faultproofs/... - -# Includes: op-validator, op-deployer/pkg/{bootstrap,manage,opcm,pipeline,upgrade} (need RPC) -RPC_TEST_PKGS := \ - ./op-validator/pkg/validations/... \ - ./op-deployer/pkg/deployer/bootstrap/... \ - ./op-deployer/pkg/deployer/manage/... \ - ./op-deployer/pkg/deployer/opcm/... \ - ./op-deployer/pkg/deployer/pipeline/... \ - ./op-deployer/pkg/deployer/upgrade/... - -# All test packages used by CI (combination of all package groups) -ALL_TEST_PACKAGES := $(TEST_PKGS) $(RPC_TEST_PKGS) $(FRAUD_PROOF_TEST_PKGS) - -# Common test environment variables -# For setting PARALLEL, nproc is for linux, sysctl for Mac and then fallback to 4 if neither is available -define DEFAULT_TEST_ENV_VARS -export ENABLE_KURTOSIS=true && \ -export OP_E2E_CANNON_ENABLED="false" && \ -export OP_E2E_USE_HTTP=true && \ -export ENABLE_ANVIL=true && \ -export PARALLEL=$$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) -endef - -# Additional CI-specific environment variables -define CI_ENV_VARS -export OP_TESTLOG_FILE_LOGGER_OUTDIR=$$(realpath ./tmp/testlogs) && \ -export SEPOLIA_RPC_URL="https://ci-sepolia-l1-archive.optimism.io" && \ -export MAINNET_RPC_URL="https://ci-mainnet-l1-archive.optimism.io" && \ -export NAT_INTEROP_LOADTEST_TARGET=10 && \ -export NAT_INTEROP_LOADTEST_TIMEOUT=30s -endef - -# Test timeout (can be overridden via environment) -TEST_TIMEOUT ?= 10m - -go-tests: $(TEST_DEPS) ## Runs comprehensive Go tests across all packages (cached for fast repeated runs) - $(DEFAULT_TEST_ENV_VARS) && \ - go test -parallel=$$PARALLEL -timeout=$(TEST_TIMEOUT) $(TEST_PKGS) -.PHONY: go-tests - -go-tests-short: $(TEST_DEPS) ## Runs comprehensive Go tests with -short flag - $(DEFAULT_TEST_ENV_VARS) && \ - go test -short -parallel=$$PARALLEL -timeout=$(TEST_TIMEOUT) $(TEST_PKGS) -.PHONY: go-tests-short - -# Internal target for running Go tests with gotestsum for CI -# Usage: make _go-tests-ci-internal GO_TEST_FLAGS="-short" -_go-tests-ci-internal: - $(MAKE) -C cannon cannon elf # Required for cannon/provider_test TestLastStepCacheAccuracy - @echo "Setting up test directories..." - mkdir -p ./tmp/test-results ./tmp/testlogs - @echo "Running Go tests with gotestsum..." - $(DEFAULT_TEST_ENV_VARS) && \ - $(CI_ENV_VARS) && \ - if [ -n "$$CIRCLE_NODE_TOTAL" ] && [ "$$CIRCLE_NODE_TOTAL" -gt 1 ]; then \ - export NODE_INDEX=$${CIRCLE_NODE_INDEX:-0} && \ - export NODE_TOTAL=$${CIRCLE_NODE_TOTAL:-1} && \ - export PARALLEL_PACKAGES=$$(echo "$(ALL_TEST_PACKAGES)" | tr ' ' '\n' | awk -v idx=$$NODE_INDEX -v total=$$NODE_TOTAL 'NR % total == idx' | tr '\n' ' ') && \ - if [ -n "$$PARALLEL_PACKAGES" ]; then \ - echo "Node $$NODE_INDEX/$$NODE_TOTAL running packages: $$PARALLEL_PACKAGES"; \ - gotestsum --format=testname \ - --junitfile=./tmp/test-results/results-$$NODE_INDEX.xml \ - --jsonfile=./tmp/testlogs/log-$$NODE_INDEX.json \ - --rerun-fails=3 \ - --rerun-fails-max-failures=50 \ - --packages="$$PARALLEL_PACKAGES" \ - -- -parallel=$$PARALLEL -coverprofile=coverage-$$NODE_INDEX.out $(GO_TEST_FLAGS) -timeout=$(TEST_TIMEOUT) -tags="ci"; \ - else \ - echo "ERROR: Node $$NODE_INDEX/$$NODE_TOTAL has no packages to run! Perhaps parallelism is set too high? (ALL_TEST_PACKAGES has $$(echo '$(ALL_TEST_PACKAGES)' | wc -w) packages)"; \ - exit 1; \ - fi; \ - else \ - gotestsum --format=testname \ - --junitfile=./tmp/test-results/results.xml \ - --jsonfile=./tmp/testlogs/log.json \ - --rerun-fails=3 \ - --rerun-fails-max-failures=50 \ - --packages="$(ALL_TEST_PACKAGES)" \ - -- -parallel=$$PARALLEL -coverprofile=coverage.out $(GO_TEST_FLAGS) -timeout=$(TEST_TIMEOUT) -tags="ci"; \ - fi -.PHONY: _go-tests-ci-internal - -go-tests-short-ci: ## Runs short Go tests with gotestsum for CI (assumes deps built by CI) - $(MAKE) _go-tests-ci-internal GO_TEST_FLAGS="-short" -.PHONY: go-tests-short-ci - -go-tests-ci: ## Runs comprehensive Go tests with gotestsum for CI (assumes deps built by CI) - $(MAKE) _go-tests-ci-internal GO_TEST_FLAGS="" -.PHONY: go-tests-ci - -go-tests-ci-kona-action: ## Runs action tests for kona with gotestsum for CI (assumes deps built by CI) - $(MAKE) _go-tests-ci-internal GO_TEST_FLAGS="-count=1 -timeout 60m -run Test_ProgramAction" -.PHONY: go-tests-ci-kona-action - -go-tests-fraud-proofs-ci: ## Runs fraud proofs Go tests with gotestsum for CI (assumes deps built by CI) - @echo "Setting up test directories..." - mkdir -p ./tmp/test-results ./tmp/testlogs - @echo "Running Go tests with gotestsum..." - $(DEFAULT_TEST_ENV_VARS) && \ - $(CI_ENV_VARS) && \ - export OP_E2E_CANNON_ENABLED="true" && \ - gotestsum --format=testname \ - --junitfile=./tmp/test-results/results.xml \ - --jsonfile=./tmp/testlogs/log.json \ - --rerun-fails=3 \ - --rerun-fails-max-failures=50 \ - --packages="$(FRAUD_PROOF_TEST_PKGS)" \ - -- -parallel=$$PARALLEL -coverprofile=coverage.out -timeout=$(TEST_TIMEOUT) -.PHONY: go-tests-fraud-proofs-ci - -test: go-tests ## Runs comprehensive Go tests (alias for go-tests) -.PHONY: test - -update-op-geth: ## Updates the Geth version used in the project - ./ops/scripts/update-op-geth.py -.PHONY: update-op-geth +include ./justfiles/deprecated.mk diff --git a/justfile b/justfile index d5ed8c55affa0..8dab027f3226c 100644 --- a/justfile +++ b/justfile @@ -1,3 +1,347 @@ +import 'justfiles/git.just' + +BEDROCK_TAGS_REMOTE := env('BEDROCK_TAGS_REMOTE', 'origin') +OP_STACK_GO_BUILDER := env('OP_STACK_GO_BUILDER', 'us-docker.pkg.dev/oplabs-tools-artifacts/images/op-stack-go:latest') +PYTHON := env('PYTHON', 'python3') + +TEST_TIMEOUT := env('TEST_TIMEOUT', '10m') + +TEST_PKGS := "./op-alt-da/... ./op-batcher/... ./op-chain-ops/... ./op-node/... ./op-proposer/... ./op-challenger/... ./op-faucet/... ./op-dispute-mon/... ./op-conductor/... ./op-program/... ./op-service/... ./op-supervisor/... ./op-test-sequencer/... ./op-fetcher/... ./op-e2e/system/... ./op-e2e/e2eutils/... ./op-e2e/opgeth/... ./op-e2e/interop/... ./op-e2e/actions/altda ./op-e2e/actions/batcher ./op-e2e/actions/derivation ./op-e2e/actions/helpers ./op-e2e/actions/interop ./op-e2e/actions/proofs ./op-e2e/actions/proposer ./op-e2e/actions/safedb ./op-e2e/actions/sequencer ./op-e2e/actions/sync ./op-e2e/actions/upgrades ./packages/contracts-bedrock/scripts/checks/... ./op-dripper/... ./op-devstack/... ./op-deployer/pkg/deployer/artifacts/... ./op-deployer/pkg/deployer/broadcaster/... ./op-deployer/pkg/deployer/clean/... ./op-deployer/pkg/deployer/integration_test/ ./op-deployer/pkg/deployer/integration_test/cli/... ./op-deployer/pkg/deployer/standard/... ./op-deployer/pkg/deployer/state/... ./op-deployer/pkg/deployer/verify/... ./op-sync-tester/... ./op-supernode/..." + +FRAUD_PROOF_TEST_PKGS := "./op-e2e/faultproofs/..." + +RPC_TEST_PKGS := "./op-validator/pkg/validations/... ./op-deployer/pkg/deployer/bootstrap/... ./op-deployer/pkg/deployer/manage/... ./op-deployer/pkg/deployer/opcm/... ./op-deployer/pkg/deployer/pipeline/... ./op-deployer/pkg/deployer/upgrade/..." + +ALL_TEST_PACKAGES := TEST_PKGS + " " + RPC_TEST_PKGS + " " + FRAUD_PROOF_TEST_PKGS + +# Lists all available targets. +help: + @just --list + +# Builds Go components and contracts-bedrock. +build: build-go build-contracts + +# Builds main Go components. +build-go: submodules op-node op-proposer op-batcher op-challenger op-dispute-mon op-program cannon + +# Builds contracts-bedrock. +build-contracts: + cd packages/contracts-bedrock && just build + +# Builds the custom linter. +build-customlint: + cd linter && just build + +# Lints Go code with specific linters. +lint-go: build-customlint + ./linter/bin/op-golangci-lint run ./... + go mod tidy -diff + +# Lints Go code with specific linters and fixes reported issues. +lint-go-fix: build-customlint + ./linter/bin/op-golangci-lint run ./... --fix + +# Checks that op-geth version in go.mod is valid. +check-op-geth-version: + go run ./ops/scripts/check-op-geth-version + +# Builds Docker images for Go components using buildx. +[script('bash')] +golang-docker: + set -euo pipefail + GIT_COMMIT=$(git rev-parse HEAD) \ + GIT_DATE=$(git show -s --format='%ct') \ + IMAGE_TAGS=$(git rev-parse HEAD),latest \ + docker buildx bake \ + --progress plain \ + --load \ + -f docker-bake.hcl \ + op-node op-batcher op-proposer op-challenger op-dispute-mon op-supervisor + +# Removes the Docker buildx builder. +docker-builder-clean: + docker buildx rm buildx-build + +# Creates a Docker buildx builder. +docker-builder: + docker buildx create \ + --driver=docker-container --name=buildx-build --bootstrap --use + +# Computes GIT_VERSION for all images and outputs JSON. +[script('bash')] +compute-git-versions: + GIT_COMMIT=$(git rev-parse HEAD) ./ops/scripts/compute-git-versions.sh + +# Builds cross-platform Docker image for op-node. +[script('bash')] +cross-op-node: + set -euo pipefail + GITCOMMIT=$(git rev-parse HEAD) + tags=$(git tag --points-at "$GITCOMMIT" | grep '^op-node/' | sed 's/op-node\///' | sort -V) + preferred_tag=$(echo "$tags" | grep -v -- '-rc' | tail -n 1) + if [ -z "$preferred_tag" ]; then + if [ -z "$tags" ]; then + GIT_VERSION="untagged" + else + GIT_VERSION=$(echo "$tags" | tail -n 1) + fi + else + GIT_VERSION="$preferred_tag" + fi + GIT_COMMIT="$GITCOMMIT" \ + GIT_DATE=$(git show -s --format='%ct') \ + IMAGE_TAGS=$(git rev-parse HEAD),latest \ + PLATFORMS="linux/arm64" \ + GIT_VERSION="$GIT_VERSION" \ + docker buildx bake \ + --progress plain \ + --builder=buildx-build \ + --load \ + --no-cache \ + -f docker-bake.hcl \ + op-node + +# Builds Docker image for Bedrock contracts. +[script('bash')] +contracts-bedrock-docker: + set -euo pipefail + IMAGE_TAGS=$(git rev-parse HEAD),latest \ + docker buildx bake \ + --progress plain \ + --load \ + -f docker-bake.hcl \ + contracts-bedrock + +# Updates git submodules. +submodules: + git submodule update --init --recursive + +# Builds op-node binary. +op-node: + just ./op-node/op-node + +# Generates mocks for op-node. +generate-mocks-op-node: + cd op-node && just generate-mocks + +# Generates mocks for op-service. +generate-mocks-op-service: + cd op-service && just generate-mocks + +# Builds op-batcher binary. +op-batcher: + just ./op-batcher/op-batcher + +# Builds op-proposer binary. +op-proposer: + just ./op-proposer/op-proposer + +# Builds op-challenger binary. +op-challenger: + cd op-challenger && just op-challenger + +# Builds op-dispute-mon binary. +op-dispute-mon: + cd op-dispute-mon && just op-dispute-mon + +# Builds op-supernode binary. +op-supernode: + just ./op-supernode/op-supernode + +# Builds op-interop-filter binary. +op-interop-filter: + just ./op-interop-filter/op-interop-filter + +# Builds op-program binary. +op-program: + cd op-program && just op-program + +# Builds cannon binary. +cannon: + cd cannon && just cannon + +# Builds reproducible prestate for op-program. +reproducible-prestate-op-program: + cd op-program && just build-reproducible-prestate + +# Builds reproducible prestate for kona. +reproducible-prestate-kona: + cd rust && just build-kona-reproducible-prestate + +# Builds reproducible prestates for op-program and kona in parallel. +[script('bash')] +reproducible-prestate: + set -euo pipefail + (cd op-program && just build-reproducible-prestate) & + pid1=$! + (cd rust && just build-kona-reproducible-prestate) & + pid2=$! + wait "$pid1" "$pid2" + (cd op-program && just output-prestate-hash) + (cd rust && just output-kona-prestate-hash) + +# Builds cannon prestates. +cannon-prestates: cannon op-program + go run ./op-program/builder/main.go build-all-prestates + +# Cleans up unused dependencies in Go modules. +# Bypasses the Go module proxy for freshly released versions. +# See https://proxy.golang.org/ for more info. +mod-tidy: + GOPRIVATE="github.com/ethereum-optimism" go mod tidy + +# Removes all generated files under bin/. +clean: + rm -rf ./bin + cd packages/contracts-bedrock/ && forge clean + +# Completely clean the project directory. +nuke: clean + git clean -Xdf + +# Runs unit tests for individual components. +test-unit: + cd op-node && just test + cd op-proposer && just test + cd op-batcher && just test + cd op-e2e && just test + cd packages/contracts-bedrock && just test + +# Runs semgrep on the entire monorepo. +semgrep: + semgrep scan --config .semgrep/rules/ --error . + +# Runs semgrep CI checks against develop baseline. +[script('bash')] +semgrep-ci: + set -euo pipefail + DEV_REF=$(git rev-parse develop) + SEMGREP_REPO_NAME=ethereum-optimism/optimism semgrep ci --baseline-commit="$DEV_REF" + +# Builds op-program-client binary. +op-program-client: + cd op-program && just op-program-client + +# Builds op-program-host binary. +op-program-host: + cd op-program && just op-program-host + +# Makes pre-test setup. +make-pre-test: + cd op-e2e && just pre-test + +# Runs comprehensive Go tests across all packages. +[script('bash')] +go-tests: op-program-client op-program-host cannon build-contracts cannon-prestates make-pre-test + set -euo pipefail + export ENABLE_KURTOSIS=true + export OP_E2E_CANNON_ENABLED="false" + export OP_E2E_USE_HTTP=true + export ENABLE_ANVIL=true + export PARALLEL=$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) + go test -parallel="$PARALLEL" -timeout={{TEST_TIMEOUT}} {{TEST_PKGS}} + +# Runs comprehensive Go tests with -short flag. +[script('bash')] +go-tests-short: op-program-client op-program-host cannon build-contracts cannon-prestates make-pre-test + set -euo pipefail + export ENABLE_KURTOSIS=true + export OP_E2E_CANNON_ENABLED="false" + export OP_E2E_USE_HTTP=true + export ENABLE_ANVIL=true + export PARALLEL=$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) + go test -short -parallel="$PARALLEL" -timeout={{TEST_TIMEOUT}} {{TEST_PKGS}} + +# Internal: runs Go tests with gotestsum for CI. +[script('bash')] +_go-tests-ci-internal go_test_flags="": + set -euo pipefail + (cd cannon && just cannon elf) + echo "Setting up test directories..." + mkdir -p ./tmp/test-results ./tmp/testlogs + echo "Running Go tests with gotestsum..." + export ENABLE_KURTOSIS=true + export OP_E2E_CANNON_ENABLED="false" + export OP_E2E_USE_HTTP=true + export ENABLE_ANVIL=true + export PARALLEL=$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) + export OP_TESTLOG_FILE_LOGGER_OUTDIR=$(realpath ./tmp/testlogs) + export SEPOLIA_RPC_URL="https://ci-sepolia-l1-archive.optimism.io" + export MAINNET_RPC_URL="https://ci-mainnet-l1-archive.optimism.io" + export NAT_INTEROP_LOADTEST_TARGET=10 + export NAT_INTEROP_LOADTEST_TIMEOUT=30s + ALL_PACKAGES="{{ALL_TEST_PACKAGES}}" + if [ -n "${CIRCLE_NODE_TOTAL:-}" ] && [ "$CIRCLE_NODE_TOTAL" -gt 1 ]; then + NODE_INDEX=${CIRCLE_NODE_INDEX:-0} + NODE_TOTAL=${CIRCLE_NODE_TOTAL:-1} + PARALLEL_PACKAGES=$(echo "$ALL_PACKAGES" | tr ' ' '\n' | awk -v idx="$NODE_INDEX" -v total="$NODE_TOTAL" 'NR % total == idx' | tr '\n' ' ') + if [ -n "$PARALLEL_PACKAGES" ]; then + echo "Node $NODE_INDEX/$NODE_TOTAL running packages: $PARALLEL_PACKAGES" + gotestsum --format=testname \ + --junitfile=./tmp/test-results/results-"$NODE_INDEX".xml \ + --jsonfile=./tmp/testlogs/log-"$NODE_INDEX".json \ + --rerun-fails=3 \ + --rerun-fails-max-failures=50 \ + --packages="$PARALLEL_PACKAGES" \ + -- -parallel="$PARALLEL" -coverprofile=coverage-"$NODE_INDEX".out {{go_test_flags}} -timeout={{TEST_TIMEOUT}} -tags="ci" + else + echo "ERROR: Node $NODE_INDEX/$NODE_TOTAL has no packages to run! Perhaps parallelism is set too high? (ALL_TEST_PACKAGES has $(echo "$ALL_PACKAGES" | wc -w) packages)" + exit 1 + fi + else + gotestsum --format=testname \ + --junitfile=./tmp/test-results/results.xml \ + --jsonfile=./tmp/testlogs/log.json \ + --rerun-fails=3 \ + --rerun-fails-max-failures=50 \ + --packages="$ALL_PACKAGES" \ + -- -parallel="$PARALLEL" -coverprofile=coverage.out {{go_test_flags}} -timeout={{TEST_TIMEOUT}} -tags="ci" + fi + +# Runs short Go tests with gotestsum for CI. +go-tests-short-ci: + just _go-tests-ci-internal "-short" + +# Runs comprehensive Go tests with gotestsum for CI. +go-tests-ci: + just _go-tests-ci-internal "" + +# Runs action tests for kona with gotestsum for CI. +go-tests-ci-kona-action: + just _go-tests-ci-internal "-count=1 -timeout 60m -run Test_ProgramAction" + +# Runs fraud proofs Go tests with gotestsum for CI. +[script('bash')] +go-tests-fraud-proofs-ci: + set -euo pipefail + echo "Setting up test directories..." + mkdir -p ./tmp/test-results ./tmp/testlogs + echo "Running Go tests with gotestsum..." + export ENABLE_KURTOSIS=true + export OP_E2E_CANNON_ENABLED="true" + export OP_E2E_USE_HTTP=true + export ENABLE_ANVIL=true + export PARALLEL=$(nproc 2>/dev/null || sysctl -n hw.ncpu 2>/dev/null || echo 4) + export OP_TESTLOG_FILE_LOGGER_OUTDIR=$(realpath ./tmp/testlogs) + export SEPOLIA_RPC_URL="https://ci-sepolia-l1-archive.optimism.io" + export MAINNET_RPC_URL="https://ci-mainnet-l1-archive.optimism.io" + export NAT_INTEROP_LOADTEST_TARGET=10 + export NAT_INTEROP_LOADTEST_TIMEOUT=30s + gotestsum --format=testname \ + --junitfile=./tmp/test-results/results.xml \ + --jsonfile=./tmp/testlogs/log.json \ + --rerun-fails=3 \ + --rerun-fails-max-failures=50 \ + --packages="{{FRAUD_PROOF_TEST_PKGS}}" \ + -- -parallel="$PARALLEL" -coverprofile=coverage.out -timeout={{TEST_TIMEOUT}} + +# Runs comprehensive Go tests (alias for go-tests). +test: go-tests + +# Updates the Geth version used in the project. +update-op-geth: + ./ops/scripts/update-op-geth.py + # Build all Rust binaries (release) for sysgo tests. build-rust-release: cd rust && cargo build --release --bin kona-node @@ -12,10 +356,6 @@ check-nut-locks: todo-checker: ./ops/scripts/todo-checker.sh -# Runs semgrep on the entire monorepo. -semgrep: - semgrep scan --config .semgrep/rules/ --error . - # Runs semgrep tests. semgrep-test: semgrep scan --test --config .semgrep/rules/ .semgrep/tests/ @@ -37,11 +377,11 @@ latest-versions: ./ops/scripts/latest-versions.sh # Usage: -# just update-op-geth 2f0528b -# just update-op-geth v1.101602.4 -# just update-op-geth optimism +# just update-op-geth-ref 2f0528b +# just update-op-geth-ref v1.101602.4 +# just update-op-geth-ref optimism [script('bash')] -update-op-geth ref: +update-op-geth-ref ref: set -euo pipefail ref="{{ref}}" if [ -z "$ref" ]; then echo "error: provide a hash/tag/branch"; exit 1; fi diff --git a/op-e2e/justfile b/op-e2e/justfile index 72002fe4b54b6..2fe97ed1098e4 100644 --- a/op-e2e/justfile +++ b/op-e2e/justfile @@ -49,7 +49,7 @@ test-fault-proofs: pre-test # Build cannon prestates cannon-prestates: - make -C .. cannon-prestates + cd .. && just cannon-prestates # Pre-test setup pre-test: pre-test-cannon From e78a35e39c98e89011848b2a79065a7b59bdb4d1 Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 13 Mar 2026 11:28:42 +1000 Subject: [PATCH 123/201] ci: update CircleCI config to use just instead of make (#19482) * docs(op-e2e): update README to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix(justfiles): pass Make variable overrides as env vars in deprecated shim The deprecated.mk shim was changed to pass JUSTFLAGS as just CLI variable overrides (`just VAR=val target`), but just rejects overrides for variables not declared in the justfile. This broke CI jobs where Make variable assignments propagate through sub-makes (e.g. GO_TEST_FLAGS, GUEST_PROGRAM). Revert to passing them as environment variables via `env`, which is how the shim originally worked in the cannon migration PR. Fixes: go-tests-short, sanitize-op-program CI failures Co-Authored-By: Claude Opus 4.6 * ci: re-trigger cannon-prestate build Co-Authored-By: Claude Opus 4.6 * chore: migrate root Makefile to justfile Migrate root build targets from Make to Just. The Makefile now delegates to just with a deprecation warning, preserving backwards compatibility for existing make invocations. Co-Authored-By: Claude Opus 4.6 * docs: update CONTRIBUTING.md to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix: convert make -C calls to just and restore GOPRIVATE comment All subdirectories now have justfiles with deprecated Make shims, so convert remaining make -C calls to cd && just. Also restores the explanatory comment on mod-tidy's GOPRIVATE usage. Co-Authored-By: Claude Opus 4.6 * fix: run reproducible-prestate builds in parallel just doesn't parallelize dependencies like make -j does. Use background processes with wait to run op-program and kona prestate builds concurrently. Co-Authored-By: Claude Opus 4.6 * fix: use subshell for cd in _go-tests-ci-internal The cd into cannon/ was changing the CWD for the rest of the script, causing gotestsum to run from cannon/ instead of the repo root. The original Makefile used $(MAKE) -C which spawns a subprocess. Use a subshell to match that behavior. Co-Authored-By: Claude Opus 4.6 * fix: use subshells for sequential cd in reproducible-prestate The bare `cd op-program` on line 180 changed cwd persistently, so the following `cd rust` tried to resolve `op-program/rust/` which doesn't exist. Wrap both in subshells to preserve the original working directory. Co-Authored-By: Claude Opus 4.6 * fix: remove devnet-sdk and kurtosis-devnet from TEST_PKGS These directories were removed in #19506 but the justfile still referenced them, causing go-tests-short-ci to fail with "lstat ./devnet-sdk/: no such file or directory". Co-Authored-By: Claude Opus 4.6 * ci: update CircleCI config to use just instead of make Migrates all CircleCI make invocations to just for targets that have been migrated to justfiles. Remaining make calls are for packages not yet migrated (op-challenger, op-node, op-service, op-chain-ops fuzz targets) and cannon/testdata which has its own Makefile. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .circleci/continue/main.yml | 44 ++++++++++++++++++---------------- .circleci/continue/rust-ci.yml | 2 +- 2 files changed, 24 insertions(+), 22 deletions(-) diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index aabb4c2a505ee..ba57aed438e3c 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -941,7 +941,7 @@ jobs: - run: name: Cannon Go lint command: | - make lint + just lint working_directory: cannon - run: name: Cannon Go 64-bit tests @@ -1804,7 +1804,7 @@ jobs: - run: name: run Go linter command: | - make lint-go + just lint-go - save_cache: key: golangci-v1-{{ checksum ".golangci.yaml" }} paths: @@ -1822,7 +1822,7 @@ jobs: namespace: sysgo-go-binaries - run: name: Build Go binaries for sysgo - command: make cannon op-program + command: just cannon op-program - go-save-cache: namespace: sysgo-go-binaries - persist_to_workspace: @@ -1843,7 +1843,7 @@ jobs: - run: name: check op-geth version command: | - make check-op-geth-version + just check-op-geth-version check-nut-locks: docker: @@ -1902,7 +1902,7 @@ jobs: - restore_cache: key: go-tests-v2-{{ checksum "go.mod" }} - run: - name: Run Go tests via Makefile + name: Run Go tests no_output_timeout: <> command: | <> @@ -1910,7 +1910,7 @@ jobs: # set to less than number CPUs (xlarge Docker is 16 CPU) so there's some buffer for things # like Geth export PARALLEL=12 - make <> + just <> - save_cache: key: go-tests-v2-{{ checksum "go.mod" }} paths: @@ -1977,22 +1977,22 @@ jobs: at: . - run: name: build op-program-client - command: make op-program-client + command: just op-program-client working_directory: op-program - run: name: build op-program-host - command: make op-program-host + command: just op-program-host working_directory: op-program - run: name: build cannon - command: make cannon + command: just cannon - run: name: run tests no_output_timeout: <> command: | <> export TEST_TIMEOUT=<> - make go-tests-fraud-proofs-ci + just go-tests-fraud-proofs-ci - codecov/upload: disable_search: true files: ./coverage.out @@ -2365,13 +2365,13 @@ jobs: sudo apt-get install -y binutils-mips-linux-gnu - run: name: Build cannon - command: make cannon + command: just cannon - run: name: Build op-program - command: make op-program + command: just op-program - run: name: Sanitize op-program client - command: make sanitize-program GUEST_PROGRAM=../op-program/bin/op-program-client64.elf + command: GUEST_PROGRAM=../op-program/bin/op-program-client64.elf just sanitize-program working_directory: cannon cannon-prestate: @@ -2383,7 +2383,7 @@ jobs: enable-mise-cache: true - run: name: Build prestates - command: make -j reproducible-prestate + command: just reproducible-prestate - persist_to_workspace: root: . paths: @@ -2485,7 +2485,8 @@ jobs: enable-mise-cache: true - run: name: Verify reproducibility - command: make -C op-program verify-reproducibility + command: just verify-reproducibility + working_directory: op-program - store_artifacts: path: ./op-program/temp/logs when: always @@ -2502,10 +2503,11 @@ jobs: - setup_remote_docker - run: name: Build cannon - command: make cannon + command: just cannon - run: name: Verify the Cannon STF - command: make -C ./cannon cannon-stf-verify + command: just cannon-stf-verify + working_directory: cannon - notify-failures-on-develop: mentions: "@proofs-team" @@ -2572,7 +2574,7 @@ jobs: - run: name: Run Analyzer command: | - make run-vm-compat + just run-vm-compat working_directory: op-program - store_artifacts: path: op-program/bin/vm-compat-output/vm-compat-findings.json @@ -2590,7 +2592,7 @@ jobs: - run: name: Verify Compatibility command: | - make verify-compat + just verify-compat working_directory: op-program check-generated-mocks-op-node: @@ -2605,7 +2607,7 @@ jobs: patterns: op-node - run: name: check-generated-mocks - command: make generate-mocks-op-node && git diff --exit-code + command: just generate-mocks-op-node && git diff --exit-code check-generated-mocks-op-service: docker: @@ -2619,7 +2621,7 @@ jobs: patterns: op-service - run: name: check-generated-mocks - command: make generate-mocks-op-service && git diff --exit-code + command: just generate-mocks-op-service && git diff --exit-code op-deployer-forge-version: docker: diff --git a/.circleci/continue/rust-ci.yml b/.circleci/continue/rust-ci.yml index 2db59a0c8b7d7..6e75f363d7425 100644 --- a/.circleci/continue/rust-ci.yml +++ b/.circleci/continue/rust-ci.yml @@ -858,7 +858,7 @@ jobs: - run: name: Build cannon command: | - cd cannon && make + cd cannon && just cannon sudo mv ./bin/cannon /usr/local/bin/ - run: name: Set run environment From ccb6aa1adfcb81122b0be60ef35d07a325da0c8a Mon Sep 17 00:00:00 2001 From: Adrian Sutton Date: Fri, 13 Mar 2026 12:02:18 +1000 Subject: [PATCH 124/201] chore(cannon): migrate testdata Makefiles to justfiles (#19483) * docs(op-e2e): update README to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix(justfiles): pass Make variable overrides as env vars in deprecated shim The deprecated.mk shim was changed to pass JUSTFLAGS as just CLI variable overrides (`just VAR=val target`), but just rejects overrides for variables not declared in the justfile. This broke CI jobs where Make variable assignments propagate through sub-makes (e.g. GO_TEST_FLAGS, GUEST_PROGRAM). Revert to passing them as environment variables via `env`, which is how the shim originally worked in the cannon migration PR. Fixes: go-tests-short, sanitize-op-program CI failures Co-Authored-By: Claude Opus 4.6 * ci: re-trigger cannon-prestate build Co-Authored-By: Claude Opus 4.6 * chore: migrate root Makefile to justfile Migrate root build targets from Make to Just. The Makefile now delegates to just with a deprecation warning, preserving backwards compatibility for existing make invocations. Co-Authored-By: Claude Opus 4.6 * docs: update CONTRIBUTING.md to use just instead of make Co-Authored-By: Claude Opus 4.6 * fix: convert make -C calls to just and restore GOPRIVATE comment All subdirectories now have justfiles with deprecated Make shims, so convert remaining make -C calls to cd && just. Also restores the explanatory comment on mod-tidy's GOPRIVATE usage. Co-Authored-By: Claude Opus 4.6 * fix: run reproducible-prestate builds in parallel just doesn't parallelize dependencies like make -j does. Use background processes with wait to run op-program and kona prestate builds concurrently. Co-Authored-By: Claude Opus 4.6 * fix: use subshell for cd in _go-tests-ci-internal The cd into cannon/ was changing the CWD for the rest of the script, causing gotestsum to run from cannon/ instead of the repo root. The original Makefile used $(MAKE) -C which spawns a subprocess. Use a subshell to match that behavior. Co-Authored-By: Claude Opus 4.6 * fix: use subshells for sequential cd in reproducible-prestate The bare `cd op-program` on line 180 changed cwd persistently, so the following `cd rust` tried to resolve `op-program/rust/` which doesn't exist. Wrap both in subshells to preserve the original working directory. Co-Authored-By: Claude Opus 4.6 * fix: remove devnet-sdk and kurtosis-devnet from TEST_PKGS These directories were removed in #19506 but the justfile still referenced them, causing go-tests-short-ci to fail with "lstat ./devnet-sdk/: no such file or directory". Co-Authored-By: Claude Opus 4.6 * ci: update CircleCI config to use just instead of make Migrates all CircleCI make invocations to just for targets that have been migrated to justfiles. Remaining make calls are for packages not yet migrated (op-challenger, op-node, op-service, op-chain-ops fuzz targets) and cannon/testdata which has its own Makefile. Co-Authored-By: Claude Opus 4.6 * chore(cannon): migrate testdata Makefiles to justfiles Migrates cannon/testdata/, cannon/testdata/go-1-24/, and cannon/testdata/go-1-25/ Makefiles to justfiles. The Make pattern rules for building ELF binaries from go.mod directories are replaced with shell loops that discover and build all directories dynamically. Also updates cannon/justfile to call just instead of make -C for testdata targets, and updates the CI config accordingly. Co-Authored-By: Claude Opus 4.6 * fix(cannon): manual shim for diff-cannon positional arg The generic deprecated.mk shim converts make variables to env vars, but diff-cannon VM: is a positional parameter in just. Write a manual shim that passes VM correctly. Also add deprecation warning to the diff-%-cannon pattern target. Co-Authored-By: Claude Opus 4.6 * fix: restore dump discovery from go.mod and TODO comment Match the original Makefile behavior by iterating */go.mod directories instead of bin/*.64.elf to avoid dumping stale ELFs. Restore the TODO about the little-endian vs big-endian toolchain. Co-Authored-By: Claude Opus 4.6 --------- Co-authored-by: Claude Opus 4.6 --- .circleci/continue/main.yml | 2 +- cannon/Makefile | 22 ++++++++++++++++++++-- cannon/justfile | 4 ++-- cannon/testdata/Makefile | 18 ++---------------- cannon/testdata/go-1-24/Makefile | 29 ++--------------------------- cannon/testdata/go-1-24/justfile | 31 +++++++++++++++++++++++++++++++ cannon/testdata/go-1-25/Makefile | 29 ++--------------------------- cannon/testdata/go-1-25/justfile | 31 +++++++++++++++++++++++++++++++ cannon/testdata/justfile | 15 +++++++++++++++ 9 files changed, 106 insertions(+), 75 deletions(-) create mode 100644 cannon/testdata/go-1-24/justfile create mode 100644 cannon/testdata/go-1-25/justfile create mode 100644 cannon/testdata/justfile diff --git a/.circleci/continue/main.yml b/.circleci/continue/main.yml index ba57aed438e3c..c017c74032784 100644 --- a/.circleci/continue/main.yml +++ b/.circleci/continue/main.yml @@ -936,7 +936,7 @@ jobs: mkdir -p ./tmp/testlogs - run: name: build Cannon example binaries - command: make elf # only compile ELF binaries with Go, we do not have MIPS GCC for creating the debug-dumps. + command: just elf # only compile ELF binaries with Go, we do not have MIPS GCC for creating the debug-dumps. working_directory: cannon/testdata - run: name: Cannon Go lint diff --git a/cannon/Makefile b/cannon/Makefile index fc3fda4061e99..b944ef3a69e0d 100644 --- a/cannon/Makefile +++ b/cannon/Makefile @@ -1,9 +1,27 @@ -DEPRECATED_TARGETS := cannon cannon64-impl cannon-embeds clean elf elf-go-current sanitize-program contract test diff-cannon cannon-stf-verify fuzz lint +DEPRECATED_TARGETS := cannon cannon64-impl cannon-embeds clean elf elf-go-current sanitize-program contract test cannon-stf-verify fuzz lint include ../justfiles/deprecated.mk +# diff-cannon needs a manual shim because VM is a positional arg in just, +# not an env var. The generic shim would produce `just VM=X diff-cannon` +# but just needs `just diff-cannon X`. +.PHONY: diff-cannon +diff-cannon: +ifndef VM + $(error VM is required: make diff-cannon VM=multithreaded64-5) +endif + @echo + @printf '%s\n' 'Deprecated make call: make diff-cannon VM=$(VM)' + @printf '%s\n' 'Consider using just instead: just diff-cannon $(VM)' + @echo + just diff-cannon $(VM) + # Pattern target for backwards compatibility with make diff--cannon invocations. # Translates make diff--cannon to just diff-cannon . .PHONY: diff-%-cannon diff-%-cannon: - env $(JUSTFLAGS) just diff-cannon $* + @echo + @printf '%s\n' 'Deprecated make call: make diff-$*-cannon' + @printf '%s\n' 'Consider using just instead: just diff-cannon $*' + @echo + just diff-cannon $* diff --git a/cannon/justfile b/cannon/justfile index 82d52c460e961..bf3345601b789 100644 --- a/cannon/justfile +++ b/cannon/justfile @@ -33,11 +33,11 @@ clean: # Build ELF test binaries elf: - make -C ./testdata elf + just ./testdata/elf # Build ELF test binaries for current Go version elf-go-current: - make -C ./testdata/go-1-24 elf + just ./testdata/go-1-24/elf # Check guest program for unsupported MIPS instructions sanitize-program: diff --git a/cannon/testdata/Makefile b/cannon/testdata/Makefile index a74e1aeae46e6..6b18e6cc787c1 100644 --- a/cannon/testdata/Makefile +++ b/cannon/testdata/Makefile @@ -1,17 +1,3 @@ -all: elf +DEPRECATED_TARGETS := elf go1-24 go1-25 clean -go1-24: - make -C ./go-1-24 elf -.PHONY: go1-24 - -go1-25: - make -C ./go-1-25 elf -.PHONY: go1-25 - -.PHONY: elf -elf: go1-24 go1-25 - -.PHONY: clean -clean: - make -C ./go-1-24 clean - make -C ./go-1-25 clean +include ../../justfiles/deprecated.mk diff --git a/cannon/testdata/go-1-24/Makefile b/cannon/testdata/go-1-24/Makefile index f4a7a51bf81bd..cd4c47589bd43 100644 --- a/cannon/testdata/go-1-24/Makefile +++ b/cannon/testdata/go-1-24/Makefile @@ -1,28 +1,3 @@ -all: elf +DEPRECATED_TARGETS := elf elf64 dump clean -.PHONY: elf64 -elf64: $(patsubst %/go.mod,bin/%.64.elf,$(wildcard */go.mod)) - -.PHONY: elf -elf: elf64 - -.PHONY: dump -dump: $(patsubst %/go.mod,bin/%.dump,$(wildcard */go.mod)) - -.PHONY: clean -clean: - @[ -d bin ] && find bin -maxdepth 1 -type f -delete - -bin: - mkdir bin - -# take any directory with a go mod, and build an ELF -# verify output with: readelf -h bin/.elf -# result is mips64, big endian, R3000 -bin/%.64.elf: bin - cd $(@:bin/%.64.elf=%) && GOOS=linux GOARCH=mips64 GOMIPS64=softfloat go build -o ../$@ . - -# take any ELF and dump it -# TODO: currently have the little-endian toolchain, but should use the big-endian one. The -EB compat flag works though. -bin/%.dump: bin - mipsel-linux-gnu-objdump -D --disassembler-options=no-aliases --wide --source -m mips:3000 -EB $(@:%.dump=%.elf) > $@ +include ../../../justfiles/deprecated.mk diff --git a/cannon/testdata/go-1-24/justfile b/cannon/testdata/go-1-24/justfile new file mode 100644 index 0000000000000..d606bdc635364 --- /dev/null +++ b/cannon/testdata/go-1-24/justfile @@ -0,0 +1,31 @@ +# Build all 64-bit ELF test binaries +elf64: elf + +# Build all 64-bit ELF test binaries +[script('bash')] +elf: + set -euo pipefail + mkdir -p bin + for mod in */go.mod; do + name=$(dirname "$mod") + echo "Building bin/${name}.64.elf" + (cd "$name" && GOOS=linux GOARCH=mips64 GOMIPS64=softfloat go build -o "../bin/${name}.64.elf" .) + done + +# Dump all ELF binaries +# TODO: currently have the little-endian toolchain, but should use the big-endian one. The -EB compat flag works though. +[script('bash')] +dump: + set -euo pipefail + mkdir -p bin + for mod in */go.mod; do + name=$(dirname "$mod") + elf="bin/${name}.64.elf" + dump="bin/${name}.64.dump" + echo "Dumping $elf -> $dump" + mipsel-linux-gnu-objdump -D --disassembler-options=no-aliases --wide --source -m mips:3000 -EB "$elf" > "$dump" + done + +# Clean build artifacts +clean: + @[ -d bin ] && find bin -maxdepth 1 -type f -delete || true diff --git a/cannon/testdata/go-1-25/Makefile b/cannon/testdata/go-1-25/Makefile index f4a7a51bf81bd..cd4c47589bd43 100644 --- a/cannon/testdata/go-1-25/Makefile +++ b/cannon/testdata/go-1-25/Makefile @@ -1,28 +1,3 @@ -all: elf +DEPRECATED_TARGETS := elf elf64 dump clean -.PHONY: elf64 -elf64: $(patsubst %/go.mod,bin/%.64.elf,$(wildcard */go.mod)) - -.PHONY: elf -elf: elf64 - -.PHONY: dump -dump: $(patsubst %/go.mod,bin/%.dump,$(wildcard */go.mod)) - -.PHONY: clean -clean: - @[ -d bin ] && find bin -maxdepth 1 -type f -delete - -bin: - mkdir bin - -# take any directory with a go mod, and build an ELF -# verify output with: readelf -h bin/.elf -# result is mips64, big endian, R3000 -bin/%.64.elf: bin - cd $(@:bin/%.64.elf=%) && GOOS=linux GOARCH=mips64 GOMIPS64=softfloat go build -o ../$@ . - -# take any ELF and dump it -# TODO: currently have the little-endian toolchain, but should use the big-endian one. The -EB compat flag works though. -bin/%.dump: bin - mipsel-linux-gnu-objdump -D --disassembler-options=no-aliases --wide --source -m mips:3000 -EB $(@:%.dump=%.elf) > $@ +include ../../../justfiles/deprecated.mk diff --git a/cannon/testdata/go-1-25/justfile b/cannon/testdata/go-1-25/justfile new file mode 100644 index 0000000000000..d606bdc635364 --- /dev/null +++ b/cannon/testdata/go-1-25/justfile @@ -0,0 +1,31 @@ +# Build all 64-bit ELF test binaries +elf64: elf + +# Build all 64-bit ELF test binaries +[script('bash')] +elf: + set -euo pipefail + mkdir -p bin + for mod in */go.mod; do + name=$(dirname "$mod") + echo "Building bin/${name}.64.elf" + (cd "$name" && GOOS=linux GOARCH=mips64 GOMIPS64=softfloat go build -o "../bin/${name}.64.elf" .) + done + +# Dump all ELF binaries +# TODO: currently have the little-endian toolchain, but should use the big-endian one. The -EB compat flag works though. +[script('bash')] +dump: + set -euo pipefail + mkdir -p bin + for mod in */go.mod; do + name=$(dirname "$mod") + elf="bin/${name}.64.elf" + dump="bin/${name}.64.dump" + echo "Dumping $elf -> $dump" + mipsel-linux-gnu-objdump -D --disassembler-options=no-aliases --wide --source -m mips:3000 -EB "$elf" > "$dump" + done + +# Clean build artifacts +clean: + @[ -d bin ] && find bin -maxdepth 1 -type f -delete || true diff --git a/cannon/testdata/justfile b/cannon/testdata/justfile new file mode 100644 index 0000000000000..8eda529505286 --- /dev/null +++ b/cannon/testdata/justfile @@ -0,0 +1,15 @@ +# Build all ELF test binaries +elf: go1-24 go1-25 + +# Build go-1-24 ELF binaries +go1-24: + just ./go-1-24/elf + +# Build go-1-25 ELF binaries +go1-25: + just ./go-1-25/elf + +# Clean all build artifacts +clean: + just ./go-1-24/clean + just ./go-1-25/clean From 681867a2ed9707a976da302c488440ed76851242 Mon Sep 17 00:00:00 2001 From: theo <80177219+theochap@users.noreply.github.com> Date: Thu, 12 Mar 2026 22:32:46 -0400 Subject: [PATCH 125/201] chore(rust/op-reth): port historical proofs binary change (#19252) * feat: add binary entry point for external proofs in OP (op-rs/op-reth#222) Closes op-rs/op-reth#164 --------- Co-authored-by: Arun Dhyani * feat: add support for `eth_getProof` (op-rs/op-reth#257) Adds support for `eth_getProof` RPC method. This required reworking the launch command to still work with an alternative DB provider. Closes op-rs/op-reth#173 --------- Co-authored-by: Emilia Hane * perf(trie): feature gate `reth-optimism-trie` metrics (op-rs/op-reth#282) Closes https://github.com/op-rs/bin/issues/281 - Feature gates `reth-optimism-trie` metrics - Moves cursor impls out of proofs module into new module `cursor` - Moves cursor factory impls into new module `cursor_factory` - Updates cursor factory impls to return cursor types with metrics wrapper if metrics feature is enabled * fix(test): Enable live collector tests with metrics feature (op-rs/op-reth#291) Closes https://github.com/op-rs/bin/issues/283 Enable live collector tests when metrics feature is enabled * feat: implement `debug_executePayload` (op-rs/op-reth#276) Closes https://github.com/op-rs/bin/issues/189 --------- Co-authored-by: Emilia Hane * feat: live collector integration (op-rs/op-reth#306) Closes op-rs/op-reth#296 --------- Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Emilia Hane * fix: rebase conflicts (op-rs/op-reth#367) Fix conflicts rebasing onto latest upstream main * feat: implemented `OpProofStorage` Database metrics (op-rs/op-reth#407) Closes op-rs/op-reth#224 Closes op-rs/op-reth#387 --------- Co-authored-by: Arun Dhyani * feat: Implemented `OpProofStoragePrunerTask` (op-rs/op-reth#375) Closes op-rs/op-reth#361 Closes op-rs/op-reth#395 --------- Co-authored-by: Emilia Hane * chore: inmem proof storage removed (op-rs/op-reth#465) This PR removes the CLI support for in-memory proof storage (proofs_history_storage_in_mem). The in-memory storage implementation was primarily intended for unit testing and is not feasible for running a node due to the large memory requirements of storing historical trie nodes. Closes op-rs/op-reth#466 * chore(exex): Add metrics feature in `reth-optimism-exex` (op-rs/op-reth#438) Closes https://github.com/op-rs/bin/issues/427 --------- Co-authored-by: itschaindev * feat: prune cli added (op-rs/op-reth#507) Closes op-rs/op-reth#452 * refactor(trie): return `OpProofsStorageError` from `execute_and_store_block_updates` (op-rs/op-reth#535) Closes op-rs/op-reth#523 --------- Co-authored-by: Emilia Hane * fix: reduce default proofs pruning interval (op-rs/op-reth#560) Closes op-rs/op-reth#559 * feat: add verification interval for integrity check (op-rs/op-reth#577) Closes op-rs/op-reth#449 The approach used is to perform full block verification after every N blocks to ensure the state is still correct. --------- Co-authored-by: Arun Dhyani * chore: getProof benchmark utility added (op-rs/op-reth#550) Utility for op-rs/op-reth#446 * chore: mv proof args to rollup node (op-rs/op-reth#625) Closes op-rs/op-reth#613 * chore: ExEx config builder (op-rs/op-reth#642) Closes op-rs/op-reth#641 --------- Co-authored-by: Himess * chore: moved proof initialization to `reth-optimism-node` (op-rs/op-reth#640) Closes op-rs/op-reth#612 --------- Co-authored-by: Arun Dhyani * chore(rust): fix compilation errors * docs(op-reth): add historical proofs README to exex crate Co-Authored-By: Claude Sonnet 4.6 * chore(op-reth): move proof-bench to rust/op-reth/bin/proof-bench Co-Authored-By: Claude Sonnet 4.6 * fix(op-reth): fix go-lint and rust-clippy CI failures - Replace big.Int.Uint64() calls with bigs.Uint64Strict() in proof tests - Fix goimports ordering in proof test files (move ethereum-optimism/* before ethereum/go-ethereum/*) - Fix const alignment in preset.go - Fix uninlined_format_args clippy lint in proof-bench report.rs Co-Authored-By: Claude Sonnet 4.6 fix(op-reth): update proofs preset to use stack.ComponentID Replace removed type aliases (L1NetworkID, L1ELNodeID, L2CLNodeID, etc.) with unified stack.ComponentID type. Also fix L2NodeMatcher generics and match.Not calls to use stack.ByID wrapper. fix(op-reth): remove duplicate crates/tests/proofs preset This was a partial copy of rust/op-reth/tests/proofs/utils with no go.mod, no test files, and no other sources. It caused go-lint CI failures due to stale type references. --------- Co-authored-by: jagroot <4516586+itschaindev@users.noreply.github.com> Co-authored-by: Arun Dhyani Co-authored-by: Julian Meyer Co-authored-by: Emilia Hane Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> Co-authored-by: Sadiqur Rahman Co-authored-by: itschaindev Co-authored-by: Himess <95512809+Himess@users.noreply.github.com> Co-authored-by: Himess Co-authored-by: Claude Sonnet 4.6 --- .circleci/continue/rust-e2e.yml | 31 + rust/Cargo.lock | 25 + rust/Cargo.toml | 1 + rust/op-reth/bin/Cargo.toml | 4 + rust/op-reth/bin/proof-bench/Cargo.toml | 24 + rust/op-reth/bin/proof-bench/README.md | 82 ++ rust/op-reth/bin/proof-bench/src/args.rs | 23 + rust/op-reth/bin/proof-bench/src/main.rs | 92 ++ rust/op-reth/bin/proof-bench/src/report.rs | 165 +++ rust/op-reth/bin/proof-bench/src/rpc.rs | 59 + rust/op-reth/bin/proof-bench/src/utils.rs | 38 + rust/op-reth/bin/src/main.rs | 24 +- rust/op-reth/crates/exex/README.md | 177 +++ rust/op-reth/crates/node/src/proof_history.rs | 2 +- .../op-reth/crates/tests/artifacts/.gitignore | 2 - .../tests/artifacts/compressed/README.md | 2 - .../tests/artifacts/compressed/artifacts.tzst | Bin 21157854 -> 0 bytes rust/op-reth/crates/tests/go.mod | 286 ---- rust/op-reth/crates/tests/go.sum | 1192 ----------------- .../crates/tests/proofs/utils/preset.go | 237 ---- .../tests/scripts/op-reth-entrypoint.sh | 69 - rust/op-reth/tests/Makefile | 37 + rust/op-reth/tests/README.md | 67 + .../tests/proofs/contracts/foundry.toml | 0 .../proofs/contracts/src/MultiStorage.sol | 0 .../proofs/contracts/src/SimpleStorage.sol | 0 .../tests/proofs/contracts/src/TokenVault.sol | 0 .../tests/proofs/core/account_proofs_test.go | 37 +- .../tests/proofs/core/execute_payload_test.go | 2 +- .../proofs/core/execution_witness_test.go | 11 +- .../tests/proofs/core/resyncing_test.go | 5 +- .../tests/proofs/core/simple_storage_test.go | 35 +- .../tests/proofs/prune/prune_test.go | 9 +- .../tests/proofs/reorg/reorg_test.go | 15 +- .../tests/proofs/utils/contract.go | 0 .../tests/proofs/utils/multistorage.go | 0 rust/op-reth/tests/proofs/utils/preset.go | 143 ++ .../{crates => }/tests/proofs/utils/proof.go | 0 .../tests/proofs/utils/simplestorage.go | 0 .../tests/proofs/utils/tokenvault.go | 0 .../{crates => }/tests/proofs/utils/utils.go | 0 41 files changed, 1048 insertions(+), 1848 deletions(-) create mode 100644 rust/op-reth/bin/proof-bench/Cargo.toml create mode 100644 rust/op-reth/bin/proof-bench/README.md create mode 100644 rust/op-reth/bin/proof-bench/src/args.rs create mode 100644 rust/op-reth/bin/proof-bench/src/main.rs create mode 100644 rust/op-reth/bin/proof-bench/src/report.rs create mode 100644 rust/op-reth/bin/proof-bench/src/rpc.rs create mode 100644 rust/op-reth/bin/proof-bench/src/utils.rs create mode 100644 rust/op-reth/crates/exex/README.md delete mode 100644 rust/op-reth/crates/tests/artifacts/.gitignore delete mode 100644 rust/op-reth/crates/tests/artifacts/compressed/README.md delete mode 100644 rust/op-reth/crates/tests/artifacts/compressed/artifacts.tzst delete mode 100644 rust/op-reth/crates/tests/go.mod delete mode 100644 rust/op-reth/crates/tests/go.sum delete mode 100644 rust/op-reth/crates/tests/proofs/utils/preset.go delete mode 100644 rust/op-reth/crates/tests/scripts/op-reth-entrypoint.sh create mode 100644 rust/op-reth/tests/Makefile create mode 100644 rust/op-reth/tests/README.md rename rust/op-reth/{crates => }/tests/proofs/contracts/foundry.toml (100%) rename rust/op-reth/{crates => }/tests/proofs/contracts/src/MultiStorage.sol (100%) rename rust/op-reth/{crates => }/tests/proofs/contracts/src/SimpleStorage.sol (100%) rename rust/op-reth/{crates => }/tests/proofs/contracts/src/TokenVault.sol (100%) rename rust/op-reth/{crates => }/tests/proofs/core/account_proofs_test.go (76%) rename rust/op-reth/{crates => }/tests/proofs/core/execute_payload_test.go (97%) rename rust/op-reth/{crates => }/tests/proofs/core/execution_witness_test.go (91%) rename rust/op-reth/{crates => }/tests/proofs/core/resyncing_test.go (90%) rename rust/op-reth/{crates => }/tests/proofs/core/simple_storage_test.go (82%) rename rust/op-reth/{crates => }/tests/proofs/prune/prune_test.go (94%) rename rust/op-reth/{crates => }/tests/proofs/reorg/reorg_test.go (93%) rename rust/op-reth/{crates => }/tests/proofs/utils/contract.go (100%) rename rust/op-reth/{crates => }/tests/proofs/utils/multistorage.go (100%) create mode 100644 rust/op-reth/tests/proofs/utils/preset.go rename rust/op-reth/{crates => }/tests/proofs/utils/proof.go (100%) rename rust/op-reth/{crates => }/tests/proofs/utils/simplestorage.go (100%) rename rust/op-reth/{crates => }/tests/proofs/utils/tokenvault.go (100%) rename rust/op-reth/{crates => }/tests/proofs/utils/utils.go (100%) diff --git a/.circleci/continue/rust-e2e.yml b/.circleci/continue/rust-e2e.yml index 7887cc9c985a0..0f5e0b900f9e5 100644 --- a/.circleci/continue/rust-e2e.yml +++ b/.circleci/continue/rust-e2e.yml @@ -212,6 +212,32 @@ jobs: - go-save-cache: namespace: kona-ci + # op-reth E2E Sysgo Tests + op-reth-e2e-sysgo-tests: + docker: + - image: <> + resource_class: xlarge + steps: + - utils/checkout-with-mise: + checkout-method: blobless + - attach_workspace: + at: . + - go-restore-cache: + namespace: op-reth-e2e + - rust-build: + directory: rust + profile: release + binary: "op-reth" + - run: + name: Run op-reth E2E tests with sysgo orchestrator + working_directory: rust/op-reth/tests + no_output_timeout: 60m + command: | + export OP_RETH_EXEC_PATH="$(pwd)/../../target/release/op-reth" + make test-e2e-sysgo + - go-save-cache: + namespace: op-reth-e2e + # Kona Proof Action Tests (from proof.yaml) kona-proof-action-tests: parameters: @@ -305,6 +331,11 @@ workflows: - cannon-kona-host - kona-build-release - op-reth-build + - op-reth-e2e-sysgo-tests: + <<: *rust-e2e-job-base + requires: + - contracts-bedrock-build + - op-reth-build - rust-restart-sysgo-tests: name: rust-e2e-restart <<: *rust-e2e-job-base diff --git a/rust/Cargo.lock b/rust/Cargo.lock index c8c5f4281019b..49286e3d6f8ea 100644 --- a/rust/Cargo.lock +++ b/rust/Cargo.lock @@ -4454,7 +4454,11 @@ version = "7.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "765c9198f173dd59ce26ff9f95ef0aafd0a0fe01fb9d72841bc5066a4c06511d" dependencies = [ + "base64 0.21.7", "byteorder", + "crossbeam-channel", + "flate2", + "nom", "num-traits", ] @@ -7841,7 +7845,10 @@ name = "op-reth" version = "1.11.3" dependencies = [ "clap", + "eyre", "reth-cli-util", + "reth-db", + "reth-node-builder", "reth-optimism-chainspec", "reth-optimism-cli", "reth-optimism-consensus", @@ -7854,6 +7861,22 @@ dependencies = [ "tracing", ] +[[package]] +name = "op-reth-proof-bench" +version = "1.11.0" +dependencies = [ + "alloy-primitives", + "anyhow", + "clap", + "futures", + "hdrhistogram", + "reqwest 0.13.2", + "reth-cli-runner", + "serde", + "serde_json", + "tokio", +] + [[package]] name = "op-revm" version = "15.0.0" @@ -9254,6 +9277,8 @@ dependencies = [ "log", "percent-encoding", "pin-project-lite", + "serde", + "serde_json", "sync_wrapper", "tokio", "tower 0.5.3", diff --git a/rust/Cargo.toml b/rust/Cargo.toml index 694b68dc52504..8344feb368ea7 100644 --- a/rust/Cargo.toml +++ b/rust/Cargo.toml @@ -26,6 +26,7 @@ members = [ # Op-Reth "op-reth/bin/", + "op-reth/bin/proof-bench/", "op-reth/crates/chainspec/", "op-reth/crates/cli/", "op-reth/crates/consensus/", diff --git a/rust/op-reth/bin/Cargo.toml b/rust/op-reth/bin/Cargo.toml index 1dd0f1b08fa80..a1e1473c8c5a4 100644 --- a/rust/op-reth/bin/Cargo.toml +++ b/rust/op-reth/bin/Cargo.toml @@ -19,9 +19,12 @@ reth-optimism-evm.workspace = true reth-optimism-payload-builder.workspace = true reth-optimism-primitives.workspace = true reth-optimism-forks.workspace = true +reth-node-builder.workspace = true +reth-db.workspace = true clap = { workspace = true, features = ["derive", "env"] } tracing.workspace = true +eyre.workspace = true [lints] workspace = true @@ -33,6 +36,7 @@ otlp = ["reth-optimism-cli/otlp"] js-tracer = [ "reth-optimism-node/js-tracer", + "reth-node-builder/js-tracer", ] jemalloc = ["reth-cli-util/jemalloc", "reth-optimism-cli/jemalloc"] diff --git a/rust/op-reth/bin/proof-bench/Cargo.toml b/rust/op-reth/bin/proof-bench/Cargo.toml new file mode 100644 index 0000000000000..79fb2cb0d9d92 --- /dev/null +++ b/rust/op-reth/bin/proof-bench/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "op-reth-proof-bench" +version = "1.11.0" +edition.workspace = true +rust-version.workspace = true +license.workspace = true +homepage.workspace = true +repository.workspace = true + +[dependencies] +# reth +reth-cli-runner.workspace = true + +# alloy +alloy-primitives = { workspace = true, features = ["serde"] } + +tokio = { workspace = true, features = ["macros"] } +reqwest = { workspace = true, features = ["json"] } +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +clap = { workspace = true, features = ["derive"] } +futures.workspace = true +anyhow.workspace = true +hdrhistogram = "7.5" diff --git a/rust/op-reth/bin/proof-bench/README.md b/rust/op-reth/bin/proof-bench/README.md new file mode 100644 index 0000000000000..62844ecf22e6c --- /dev/null +++ b/rust/op-reth/bin/proof-bench/README.md @@ -0,0 +1,82 @@ +# Reth Proof Benchmark Tool + +`op-reth-proof-bench` is a specialized CLI tool designed to benchmark the performance of the `eth_getProof` RPC method on Optimism/Ethereum nodes. It iterates through a range of blocks, sending concurrent proof requests to valid addresses, and reports detailed latency and throughput metrics. + +## Features + +- **Concurrent Execution:** Sends multiple requests in parallel to stress test the RPC. +- **Detailed Reporting:** + - Real-time per-block stats (Req/s, P95 Latency, Min/Max). + - Final summary with histogram-based percentiles (P50, P95, P99). +- **Customizable Workload:** Configure worker count, request count per block, and block step. +- **Robustness:** Handles network errors gracefully and reports error counts. + +## Installation + +This tool is part of the `op-reth` workspace. You can run it directly using Cargo. + +```bash +# Build and run directly +cargo run -p reth-proof-bench -- --help +``` + +## Usage + +### Basic Example + +Benchmark 100 blocks from block `10,000,000` to `10,000,100` against a local node: + +```bash +cargo run --release -p reth-proof-bench -- \ + --rpc http://localhost:8545 \ + --from 10000000 \ + --to 10000100 +``` + +### Advanced Usage + +Stress test a remote node with higher concurrency: + +```bash +cargo run --release -p reth-proof-bench -- \ + --rpc http://remote-node:8545 \ + --from 4000000 \ + --to 4100000 \ + --step 10000 \ + --reqs 50 \ + --workers 10 +``` + +### Arguments + +| Flag | Default | Description | +|------|---------|-------------| +| `--rpc` | `http://localhost:8545` | The HTTP RPC endpoint of the node. | +| `--from` | **Required** | Start block number. | +| `--to` | **Required** | End block number. | +| `--step` | `10000` | Number of blocks to skip between benchmark iterations. | +| `--reqs` | `10` | Number of `eth_getProof` requests to send *per block*. | +| `--workers` | `2` | Number of concurrent async workers to run. | + +## Output Example + +```text +Block | Req/s | Min(ms) | P95(ms) | Max(ms) | Errors +--------------------------------------------------------------------------- +36441154 | 245.50 | 25.12 | 45.20 | 55.10 | 0 +36451154 | 230.10 | 26.05 | 48.10 | 60.15 | 0 + +--------------------------------------------------------------------------- +Summary: +Total Requests: 100 +Total Time: 0.85s +Throughput (Req/s): 117.65 +Total Errors: 0 +----------------------------------- +Min Latency: 25.12 ms +Median Latency: 32.00 ms +P95 Latency: 48.10 ms +P99 Latency: 60.15 ms +Max Latency: 60.15 ms +--------------------------------------------------------------------------- +``` diff --git a/rust/op-reth/bin/proof-bench/src/args.rs b/rust/op-reth/bin/proof-bench/src/args.rs new file mode 100644 index 0000000000000..ad6ae48e46c40 --- /dev/null +++ b/rust/op-reth/bin/proof-bench/src/args.rs @@ -0,0 +1,23 @@ +use clap::Parser; + +#[derive(Parser, Debug)] +#[command(author, version, about = "Benchmark eth_getProof performance", long_about = None)] +pub struct Args { + #[arg(long, default_value = "http://localhost:8545")] + pub rpc: String, + + #[arg(long)] + pub from: u64, + + #[arg(long)] + pub to: u64, + + #[arg(long, default_value_t = 10000)] + pub step: u64, + + #[arg(long, default_value_t = 10)] + pub reqs: usize, + + #[arg(long, default_value_t = 2)] + pub workers: usize, +} diff --git a/rust/op-reth/bin/proof-bench/src/main.rs b/rust/op-reth/bin/proof-bench/src/main.rs new file mode 100644 index 0000000000000..ff5a18ad88f5c --- /dev/null +++ b/rust/op-reth/bin/proof-bench/src/main.rs @@ -0,0 +1,92 @@ +//! # reth-proof-bench +//! +//! A benchmarking tool for measuring the performance of historical state proofs +//! retrieval using the `eth_getProof` RPC method. + +#![doc( + html_logo_url = "https://raw.githubusercontent.com/paradigmxyz/reth/main/assets/reth-docs.png", + html_favicon_url = "https://avatars0.githubusercontent.com/u/97369466?s=256", + issue_tracker_base_url = "https://github.com/paradigmxyz/reth/issues/" +)] + +mod args; +mod report; +mod rpc; +mod utils; + +use anyhow::Result; +use clap::Parser; +use futures::stream::{self, StreamExt}; +use std::time::Instant; + +use crate::{ + args::Args, + report::{BenchMetrics, BenchSummary, Reporter}, + rpc::run_proof, + utils::get_addresses, +}; +use reth_cli_runner::CliRunner; + +#[allow(missing_docs)] +fn main() -> Result<()> { + let args = Args::parse(); + + if args.from > args.to { + anyhow::bail!("--from must be less than or equal to --to"); + } + + let runner = CliRunner::try_default_runtime()?; + runner.run_command_until_exit(|_| run(args)) +} + +async fn run(args: Args) -> Result<()> { + let client = reqwest::Client::new(); + let addresses = get_addresses(); + + // Use the reporter for output + Reporter::print_header(); + + let start_time = Instant::now(); + let mut current_block = args.from; + + // Initialize Summary + let mut summary = BenchSummary::new(); + + while current_block <= args.to { + let block_start = Instant::now(); + + let target_block = current_block; + + let work_items = (0..args.reqs).map(|i| { + let addr = addresses[i % addresses.len()]; + let client = client.clone(); + let rpc_url = args.rpc.clone(); + (i, addr, client, rpc_url, target_block) + }); + + let mut stream = stream::iter(work_items) + .map(|(attempt, addr, client, url, block)| async move { + run_proof(client, url, block, attempt, addr).await + }) + .buffer_unordered(args.workers); + + let mut samples = Vec::with_capacity(args.reqs); + while let Some(sample) = stream.next().await { + summary.add(&sample); + samples.push(sample); + } + + let block_duration = block_start.elapsed().as_secs_f64(); + + // Clean logic: Create metrics -> Report metrics + let metrics = BenchMetrics::new(current_block, &samples, block_duration); + Reporter::print_metrics(&metrics); + + current_block += args.step; + } + + let total_duration = start_time.elapsed().as_secs_f64(); + Reporter::print_summary(&summary, total_duration); + + Ok(()) +} diff --git a/rust/op-reth/bin/proof-bench/src/report.rs b/rust/op-reth/bin/proof-bench/src/report.rs new file mode 100644 index 0000000000000..d4b3c51178d0b --- /dev/null +++ b/rust/op-reth/bin/proof-bench/src/report.rs @@ -0,0 +1,165 @@ +use crate::rpc::Sample; +use hdrhistogram::Histogram; +use std::io::{self, Write}; + +// --- Per-Block Metrics --- + +pub struct BenchMetrics { + pub block: u64, + pub p95_ms: f64, + pub min_ms: f64, + pub max_ms: f64, + pub errors: usize, + pub throughput: f64, +} + +impl BenchMetrics { + pub fn new(block: u64, samples: &[Sample], duration_secs: f64) -> Self { + if samples.is_empty() { + return Self::empty(block); + } + + // 1. Prepare data + let mut latencies: Vec = samples.iter().map(|s| s.latency_ms).collect(); + // Sorting is efficient enough for small N (batch size) and gives exact precision + latencies.sort_by(|a, b| a.partial_cmp(b).unwrap()); + + let errors = samples.iter().filter(|s| !s.success).count(); + + // 2. Calculate Stats + let min_ms = *latencies.first().unwrap_or(&0.0); + let max_ms = *latencies.last().unwrap_or(&0.0); + let p95_ms = calculate_percentile(&latencies, 0.95); + + let throughput = + if duration_secs > 0.0 { samples.len() as f64 / duration_secs } else { 0.0 }; + + Self { block, p95_ms, min_ms, max_ms, errors, throughput } + } + + fn empty(block: u64) -> Self { + Self { block, p95_ms: 0.0, min_ms: 0.0, max_ms: 0.0, errors: 0, throughput: 0.0 } + } +} + +// --- Global Accumulator --- + +pub struct BenchSummary { + pub hist: Histogram, + pub total_errors: usize, + pub total_requests: usize, + pub min_ms: f64, + pub max_ms: f64, +} + +impl BenchSummary { + pub fn new() -> Self { + Self { + hist: Histogram::::new_with_bounds(1, 3_600_000, 3).unwrap(), + total_errors: 0, + total_requests: 0, + min_ms: f64::MAX, + max_ms: 0.0, + } + } + + pub fn add(&mut self, sample: &Sample) { + self.total_requests += 1; + + if !sample.success { + self.total_errors += 1; + } + + let lat = sample.latency_ms; + + if lat < self.min_ms { + self.min_ms = lat; + } + if lat > self.max_ms { + self.max_ms = lat; + } + + // Update Histogram (saturating cast to avoid crashes on bad data) + let val = (lat as u64).max(1); + self.hist.record(val).ok(); + } +} + +// --- Output Handling --- + +pub struct Reporter; + +impl Reporter { + const SEP: &'static str = + "---------------------------------------------------------------------------"; + + pub fn print_header() { + let header = format!( + "{:<10} | {:<10} | {:<10} | {:<10} | {:<10} | {:<10}", + "Block", "Req/s", "Min(ms)", "P95(ms)", "Max(ms)", "Errors" + ); + + let stdout = io::stdout(); + let mut handle = stdout.lock(); + writeln!(handle, "{header}").unwrap(); + writeln!(handle, "{}", Self::SEP).unwrap(); + } + + pub fn print_metrics(metrics: &BenchMetrics) { + let line = format!( + "{:<10} | {:<10.2} | {:<10.2} | {:<10.2} | {:<10.2} | {:<10}", + metrics.block, + metrics.throughput, + metrics.min_ms, + metrics.p95_ms, + metrics.max_ms, + metrics.errors + ); + + let stdout = io::stdout(); + let mut handle = stdout.lock(); + writeln!(handle, "{line}").unwrap(); + } + + pub fn print_summary(summary: &BenchSummary, total_duration: f64) { + if summary.total_requests == 0 { + println!("\nNo requests processed."); + return; + } + + let throughput = summary.total_requests as f64 / total_duration; + + // Histogram percentiles + let p50 = summary.hist.value_at_quantile(0.50); + let p95 = summary.hist.value_at_quantile(0.95); + let p99 = summary.hist.value_at_quantile(0.99); + + // Sanity check min in case it stayed at MAX + let min_print = if summary.min_ms == f64::MAX { 0.0 } else { summary.min_ms }; + + println!("\n{:-<75}", ""); + println!("Summary:"); + println!("{:<20} {}", "Total Requests:", summary.total_requests); + println!("{:<20} {:.2}s", "Total Time:", total_duration); + println!("{:<20} {:.2}", "Throughput (Req/s):", throughput); + println!("{:<20} {}", "Total Errors:", summary.total_errors); + println!("{:-<35}", ""); + println!("{:<20} {:.2} ms", "Min Latency:", min_print); + println!("{:<20} {} ms", "Median Latency:", p50); + println!("{:<20} {} ms", "P95 Latency:", p95); + println!("{:<20} {} ms", "P99 Latency:", p99); + println!("{:<20} {:.2} ms", "Max Latency:", summary.max_ms); + println!("{:-<75}", ""); + } +} + +// --- Helpers --- + +// Helper to extract clean math logic from struct initialization +fn calculate_percentile(sorted_data: &[f64], percentile: f64) -> f64 { + if sorted_data.is_empty() { + return 0.0; + } + let idx = ((sorted_data.len() as f64 * percentile).ceil() as usize).saturating_sub(1); + sorted_data.get(idx).copied().unwrap_or(0.0) +} diff --git a/rust/op-reth/bin/proof-bench/src/rpc.rs b/rust/op-reth/bin/proof-bench/src/rpc.rs new file mode 100644 index 0000000000000..7fa7b7119f0fb --- /dev/null +++ b/rust/op-reth/bin/proof-bench/src/rpc.rs @@ -0,0 +1,59 @@ +use crate::utils::{CONTRACT, balance_of_slot}; +use alloy_primitives::Address; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use std::time::Instant; + +#[derive(Debug, Serialize, Deserialize)] +struct RpcResponse { + jsonrpc: String, + id: usize, + result: Option, + error: Option, +} + +#[derive(Debug, Serialize, Deserialize)] +struct RpcError { + code: i32, + message: String, +} + +pub struct Sample { + pub latency_ms: f64, + pub success: bool, +} + +pub async fn run_proof( + client: reqwest::Client, + url: String, + block: u64, + id: usize, + addr: Address, +) -> Sample { + let start = Instant::now(); + let slot = balance_of_slot(addr); + + // Format hash with 0x used by alloy B256 debug/display + let params = json!([CONTRACT, [format!("{}", slot)], format!("0x{:x}", block)]); + + let body = json!({ + "jsonrpc": "2.0", + "id": id, + "method": "eth_getProof", + "params": params + }); + + let resp = client.post(&url).json(&body).send().await; + + let latency = start.elapsed().as_secs_f64() * 1000.0; + + let success = match resp { + Ok(res) => match res.json::().await { + Ok(rpc_resp) => rpc_resp.error.is_none() && rpc_resp.result.is_some(), + Err(_) => false, + }, + Err(_) => false, + }; + + Sample { latency_ms: latency, success } +} diff --git a/rust/op-reth/bin/proof-bench/src/utils.rs b/rust/op-reth/bin/proof-bench/src/utils.rs new file mode 100644 index 0000000000000..c76a355860a68 --- /dev/null +++ b/rust/op-reth/bin/proof-bench/src/utils.rs @@ -0,0 +1,38 @@ +use alloy_primitives::{Address, B256, U256, keccak256}; + +pub const CONTRACT: &str = "0x4200000000000000000000000000000000000006"; + +/// Calculate the storage slot for `balanceOf[addr]` +/// Solidity mappings: keccak256(abi.encode(key, slot_position)) +pub fn balance_of_slot(addr: Address) -> B256 { + // Left-pad address (20 bytes) to 32 bytes + let mut data = Vec::with_capacity(64); + data.extend_from_slice(&[0u8; 12]); + data.extend_from_slice(addr.as_slice()); + + // Pad slot position (3) to 32 bytes + let slot_bytes: [u8; 32] = U256::from(3).to_be_bytes(); + data.extend_from_slice(&slot_bytes); + + keccak256(data) +} + +pub fn get_addresses() -> Vec
{ + vec![ + "0x48107537B9e358B1894c7a491C17E4bF035AFC74".parse().unwrap(), + "0x917AbB78953902213F63e16268E78feBAC362846".parse().unwrap(), + "0xA32Ce4EB5802809EB89032E6cc0FB06EB51bde38".parse().unwrap(), + "0x8AE9Ed8aB2abF45376cDFb671c05170353dd1F0E".parse().unwrap(), + "0x2195DbA1ab41966E91C22e4C601Be6517a40f2aB".parse().unwrap(), + "0x04bF3799798077629cb627DfF76E48a015f0B3CB".parse().unwrap(), + "0x5aaFa65D234e962121C6f44fd570EE353Ac52Bf5".parse().unwrap(), + "0x2a58adA546c2e9cd3134c163FBfC0E335Ff91AfA".parse().unwrap(), + "0x8AE9Ed8aB2abF45376cDFb671c05170353dd1F0E".parse().unwrap(), + "0x8524771B4c5a8122E8959cFDeB641E3f498188AF".parse().unwrap(), + "0xf530AD425154CC9635CAaD538e8bf3C638191a4E".parse().unwrap(), + "0x73a5bB60b0B0fc35710DDc0ea9c407031E31Bdbb".parse().unwrap(), + "0xfE978E4Dc6f3d716121c603311b0c37a9acd7234".parse().unwrap(), + "0xcAAd4EB9ABfc93Ab9eA86FB5733B8F85c952200b".parse().unwrap(), + "0xd15b5531050AC78Aa78AeF8A6DE4256Fa4536107".parse().unwrap(), + ] +} diff --git a/rust/op-reth/bin/src/main.rs b/rust/op-reth/bin/src/main.rs index bfd63af539e39..1a72b045747e6 100644 --- a/rust/op-reth/bin/src/main.rs +++ b/rust/op-reth/bin/src/main.rs @@ -1,8 +1,12 @@ #![allow(missing_docs, rustdoc::missing_crate_level_docs)] use clap::Parser; +use eyre::ErrReport; +use reth_db::DatabaseEnv; +use reth_node_builder::{NodeBuilder, WithLaunchContext}; +use reth_optimism_chainspec::OpChainSpec; use reth_optimism_cli::{Cli, chainspec::OpChainSpecParser}; -use reth_optimism_node::{OpNode, args::RollupArgs}; +use reth_optimism_node::{args::RollupArgs, proof_history}; use tracing::info; #[global_allocator] @@ -12,6 +16,17 @@ static ALLOC: reth_cli_util::allocator::Allocator = reth_cli_util::allocator::ne #[unsafe(export_name = "_rjem_malloc_conf")] static MALLOC_CONF: &[u8] = b"prof:true,prof_active:true,lg_prof_sample:19\0"; +/// Single entry that handles: +/// - no proofs history (plain node), +/// - in-mem proofs storage, +/// - MDBX proofs storage. +async fn launch_node( + builder: WithLaunchContext>, + args: RollupArgs, +) -> eyre::Result<(), ErrReport> { + proof_history::launch_node_with_proof_history(builder, args).await +} + fn main() { reth_cli_util::sigsegv_handler::install(); @@ -23,11 +38,10 @@ fn main() { } if let Err(err) = - Cli::::parse().run(async move |builder, rollup_args| { + Cli::::parse().run(async move |builder, args| { info!(target: "reth::cli", "Launching node"); - let handle = - builder.node(OpNode::new(rollup_args)).launch_with_debug_capabilities().await?; - handle.node_exit_future.await + launch_node(builder, args.clone()).await?; + Ok(()) }) { eprintln!("Error: {err:?}"); diff --git a/rust/op-reth/crates/exex/README.md b/rust/op-reth/crates/exex/README.md new file mode 100644 index 0000000000000..c9b401d856eca --- /dev/null +++ b/rust/op-reth/crates/exex/README.md @@ -0,0 +1,177 @@ +# op-reth historical proofs +[![codecov](https://codecov.io/gh/op-rs/op-reth/branch/main/graph/badge.svg)](https://app.codecov.io/gh/op-rs/op-reth/tree/unstable/crates%2Foptimism?components%5B0%5D=op%20historical%20proof) + +![Description](assets/op-rs-logo.png) +> **⚠️ Under Construction** +> +> This is a work in progress. Stay tuned! + +## Motivation + +Reliable access to recent historical state via `eth_getProof` is a critical requirement for rollups and L2 infrastructure built on Ethereum. + +As described in Reth issue [#18070](https://github.com/paradigmxyz/reth/issues/18070), many applications on Optimism and other rollups (e.g. Base infrastructure, ENS, fault-proof systems) depend on fast and reliable `eth_getProof` queries within a bounded challenge window (typically 7 days). At present, the lack of reliable recent-state proof support is a blocker for broader Reth adoption in these environments. + +The core issue lies in Reth's architecture for historical state calculation. To serve `eth_getProof` for a historical block, Reth must perform an **in-memory revert**, applying state diffs backwards from the chain tip. While efficient for recent blocks, reverting state for a block 7 days ago requires loading thousands of changesets into Memory. This operation is computationally expensive and often causes the node to crash due to **Out-Of-Memory (OOM)** errors, effectively making deep historical proofs impossible on a standard node. + +While solutions like Erigon’s compressed archive format demonstrate that full historical proofs can be stored efficiently (~5 TB), most real-world use cases do not require access to *all* historical state. Instead, the overwhelming majority of applications only require proofs over a **recent, bounded time window** (e.g. the last 7 days for challenge games). + +This fork introduces a **Bounded History Sidecar** architecture for historical state proofs. The goal is to provide: +- **Crash-Free Proof Generation:** Serve `eth_getProof` for deep historical blocks without the OOM risks associated with in-memory reverts. +- **Constant Storage Footprint:** Maintain a fixed storage size (linear to the configured window) rather than the unbounded growth. +- **Zero-Overhead Sync:** Utilize Reth's Execution Extensions (ExEx) to process and index history asynchronously, ensuring the main node's sync speed and tip latency are unaffected. + +## Architecture: Bounded History Sidecar + +This module implements a **Sidecar Storage Pattern**. Instead of burdening the main node's database with historical data, we maintain a dedicated, secondary MDBX environment optimized specifically for serving proofs. + +### Core Mechanism: Versioned State +Unlike standard Reth (which stores the *current* state and calculates history by reverting diffs), this module implements a **Versioned State Store**. + +1. **`AccountTrieHistory` & `StorageTrieHistory`**: Stores the intermediate branch nodes of the Merkle Patricia Trie. Each node is versioned by block number, allowing us to traverse the exact trie structure as it existed at any past block. +2. **`HashedAccountHistory` & `HashedStorageHistory`**: Stores the actual account data (nonce, balance) and storage slot values at the leaves of the trie, also versioned by block number. + +### Initialization: State Snapshot +To ensure the service is easy to set up on existing nodes with millions of blocks, we do not require a full chain re-sync. Instead, the module requires an **Initial State Snapshot** via the CLI: + +1. **Capture:** The CLI command captures the *current* state of the blockchain (Account and Storage Tries) from the main database. +2. **Seed:** It populates the sidecar with this baseline state. +3. **Track:** Once initialized, the node begins tracking new blocks and maintaining history from that point forward. + +This ensures that the proof window has a valid starting point immediately. + +### Data Flow + +1. **Initialization:** The operator runs the initialization CLI command to snapshot the current main DB state and seed the sidecar. +2. **Ingestion (Write):** As the node syncs, the Execution Extension (`ExEx`) captures the TrieUpdates (branch nodes) and HashedPostState (leaf values) in each block and writes them to the sidecar DB tagged with the block number. +3. **Retrieval (Read):** When `eth_getProof` is called for a historical block, we simply look up the trie nodes valid at that specific block version. +4. **Maintenance (Prune):** A background process monitors the chain tip. Once a block falls outside the configured window (e.g., > 7 days old), its specific history versions are deleted to reclaim space. + +## New Components + +### 1. `reth-optimism-exex` +This crate implements the Execution Extension (ExEx) that acts as the bridge between the main node and the sidecar storage. + +- Ingestion Pipeline: Subscribes to the node's canonical state notifications to capture ExecutionOutcomes in real-time. +- Diff Extraction: Isolates the specific TrieUpdates (branch nodes) and HashedPostState (leaf values) changed in each block. +- Persistence: Writes these versioned updates to the sidecar MDBX database without blocking the main datastore. +- Lifecycle Management: Orchestrates the pruning process, ensuring the sidecar storage remains bounded by the configured window. + +### 2. `reth-optimism-trie` +This crate provides the Storage Engine and Proof Logic that powers the sidecar. + +- Versioned Storage: Implements MdbxProofsStorage, a specialized database schema optimized for time-series trie node retrieval. +- Proof Generation: Replaces the standard "revert-based" proof logic with a direct "lookup-based" approach. +- Pruning Logic: Implements the smart retention algorithm that safely deletes old history + +### 3. RPC Overrides +The module injects custom handlers to intercept specific RPC calls: +* **`eth_getProof`**: Checks if the requested block is historical. If so, it fetches the account and storage proofs from the secondary Proofs DB. +* **`debug_executionWitness`**: Allows debugging and tracing against historical states. +* **`debug_executePayload`**: Executes a payload against the historical state to generate an execution witness. + +## Hardware Requirements + +Recommended specifications: + +- **CPU**: 8-Core processor with good single-core performance +- **RAM**: Minimum 16 GB (32 GB recommended) +- **Storage**: NVMe SSD with adequate capacity for chain data plus snapshots + - Calculate: `(2 × current_chain_size) + snapshot_size + 20% buffer` + - *Note*: Storing 4 weeks of full proof history on a network like Base Testnet consumes approximately **~1 TB** of additional storage. +- **Network**: Stable internet connection with good bandwidth + +## Usage + +### 1. Initialization +Before starting the node with the sidecar enabled, you must initialize the proof storage. This command snapshots the current state of the main database to seed the sidecar. + +```bash +op-reth proofs init \ + --datadir=path/to/reth-datadir \ + --proofs-history.storage-path=/path/to/proof-db +``` + +### 2. Running the Node (Syncing) + +Once initialized, start the node with the --proofs-history flags to enable the sidecar service. + +```bash +op-reth node \ + --chain base-sepolia \ + --datadir=/path/to/reth-datadir \ + --proofs-history \ + --proofs-history.storage-path=/path/to/proofs-db \ + --proofs-history.window=600000 \ + --proofs-history.prune-interval=15s +``` + +Configuration Flags + +| Flag | Description | Default | Required | +| :--- | :--- | :--- | :--- | +| `--proofs-history` | Enables the historical proofs module. | `false` | No | +| `--proofs-history.storage-path` | Path to the separate MDBX database for storing proofs. | `None` | **Yes** (if enabled) | +| `--proofs-history.window` | Retention period in **blocks**. Data older than `Tip - Window` is pruned. | `1,296,000` (~30 days) | No | +| `--proofs-history.prune-interval` | How frequently the pruner runs to delete old data. | `1h` | No | + +### 3. Management + +We provide custom CLI commands to manage the proof history manually. + +`op-reth proofs prune` +Manually triggers the pruning process. Useful for reclaiming space immediately. + +```bash +op-reth proofs prune \ + --datadir=/path/to/reth-datadir \ + --proofs-history.storage-path=/path/to/proof-db \ + --proofs-history.window=600000 \ + --proofs-history.prune-batch-size=10000 +``` + +`op-reth proofs unwind` +Manually unwinds the proof history to a specific block. Useful for recovering from corrupted states. + +```bash +op-reth proofs unwind \ + --datadir=/path/to/reth-datadir \ + --proofs-history.storage-path=/path/to/proofs-db \ + --target=90 +``` + +### 4. Metrics +A comprehensive Grafana dashboard is available at `etc/grafana/dashboards/op-proof-history.json` to monitor: +- Syncing speed +- Sidecar storage size. +- Pruning performance. +- Proof generation latency. + +Sample metric snapshot available at: https://snapshots.raintank.io/dashboard/snapshot/bzYXscOCugsxO6C2bzFB1XbskxG0KFdo + +## Performance + +We benchmarked the sidecar on Base Sepolia to validate latency and throughput under load. + +Metric | Result +-- | -- +Avg Latency | 15 ms +Throughput | ~5,000 req/sec + +Benchmark Configuration +- Network: Base Sepolia (Local Node) +- Target: WETH Contract (0x420...0006) +- Range: ~700k blocks (34,011,476 to 34,704,213) +- Load: 10 concurrent workers, 100 requests per block iteration. + +The test script iterates through the block range, spawning 10 concurrent workers. Each worker selects an address round-robin from a pre-defined set, dynamically calculates the storage slot for balanceOf[address], and sends an eth_getProof request. + +Visual Proof: +- [Grafana Snapshot: Proof Metrics](https://snapshots.raintank.io/dashboard/snapshot/l74zCP4SXr1qcOR2RWFEiscZnDxGla8Z) +- [Grafana Snapshot: Reth Metrics](https://snapshots.raintank.io/dashboard/snapshot/DRoQMVF0m13d4tMRjhoAzHdfbjBA0eql) + +## Limitations + +- **High Storage Footprint**: The versioned state model trades storage space for instant computation. Storing versioned Merkle Trie nodes (hashes and branch paths) for every block modification is significantly more storage-intensive than the flat state diffs used by the main node. +- **Forward-Only Availability**: The sidecar implements a "record-forward" strategy. It cannot generate proofs for blocks prior to the sidecar's initialization; it does not backfill history. +- **Pruning & IOPS**: Pruning old history is a random-write intensive operation. High-performance NVMe storage is required to ensure the pruner can keep pace with the chain's growth on high-throughput networks. \ No newline at end of file diff --git a/rust/op-reth/crates/node/src/proof_history.rs b/rust/op-reth/crates/node/src/proof_history.rs index 03f5f1ac4082f..4d6e65da251ef 100644 --- a/rust/op-reth/crates/node/src/proof_history.rs +++ b/rust/op-reth/crates/node/src/proof_history.rs @@ -22,7 +22,7 @@ use tracing::info; /// - in-mem proofs storage, /// - MDBX proofs storage. pub async fn launch_node_with_proof_history( - builder: WithLaunchContext, OpChainSpec>>, + builder: WithLaunchContext>, args: RollupArgs, ) -> eyre::Result<(), ErrReport> { let RollupArgs { diff --git a/rust/op-reth/crates/tests/artifacts/.gitignore b/rust/op-reth/crates/tests/artifacts/.gitignore deleted file mode 100644 index e1b09822c7963..0000000000000 --- a/rust/op-reth/crates/tests/artifacts/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -forge-artifacts -src \ No newline at end of file diff --git a/rust/op-reth/crates/tests/artifacts/compressed/README.md b/rust/op-reth/crates/tests/artifacts/compressed/README.md deleted file mode 100644 index cc09a72587329..0000000000000 --- a/rust/op-reth/crates/tests/artifacts/compressed/README.md +++ /dev/null @@ -1,2 +0,0 @@ -Artifacts in this directory will be embedded inside the `op-deployer` binary. The directory can be populated by running -`make unzip-contract-artifacts`. \ No newline at end of file diff --git a/rust/op-reth/crates/tests/artifacts/compressed/artifacts.tzst b/rust/op-reth/crates/tests/artifacts/compressed/artifacts.tzst deleted file mode 100644 index a3ebbe3866d73170d9caa4d9c71b7e170796008e..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 21157854 zcmV)HK)t^xwJ-eyXxw`Nx>bodF|bU=#tnojlJRZ9KBd=c;kp$&XQ2`6X--TIbHj!x z5@jZ3W-u}5pyPkgf3UwX#{vxma05gG6JOCX0$Q-tTX}&NJ&UPPz=8w~2Qos{H4%g8 zr>Suy5RFgQ>9n$t84hBh?^ z-o_bGn)42v63I`HispdI zh$gA2c#5AO3->TUIDVQ)q~@jLTBP7BTBP~NPf|uUJWaE3(Mu+fl%QZzJMjKlK*7^Q z9z#ZhB|T6hMbE&295g}Egsao02%2bUYOiq`18z@~m{6cz)ZWu3$bQ9Zq64YkK=Gi_ zS3ECCFYz=j0y#($6g6Z-Dh2n`8S)FP0C3pb%4yH}y&!NH3xJV-b&JWw1cR5Z*3 zxrBs&&7>j$l8^u)C5*y*DSC{jcp&Kk3kvJIx!@4M`$=K?im6$=s97W;b36no19Bwq z@Qx(X8vFtP5L|@u`~rgX&eEEwF-Vx6k33M+U_1}1ALx0SQ)n1Q0jh9F2+|~JA_s9I z4Hl*dl6<7VV?dEa0faWSw}9|dn;;1lPcuos07G&M6;BiW^bv)3(FBq{T0od4(85== z=rJINAh{-*ULqj?2nbRIF~!bMiPfD4GBkci0GV$PZ|Q*4>CWXRZB z%U3U6YCV))4@Gk=cW-A`6Xd8*5o7^LM~7Gk91rB$FW%kW$_*3`jGb9~fT~$p*Wx3L`lhtZwVzTAU zY8AO1HS;mYmc>lzy6%GmwW^wmx-yef<)&sPCR@#FQ8!gr6gwV$h480|L>hr(dN*kdPof1%KXMP*1)vD0 z9k_6>5b5CfMj~mVhpMhIjON`_Ox{Eb-nocg!tqTsO--Z#PF1NB5fl!I6Q!nRDyBNl zLUjwNCD4reAonN+1rLjgny9GJgt2@m%Qe$SET$%6V9D}g0j`<2$q5LAVvdUlEK`=r z5?Mk~^uX3yzIx@#_yeewyHZ!?N{24C+`XMWmFIe@XSyRJSWjd`Zy{--mvlPAG!ES@ zikbsKxKnj*r*>vzXC`yL_7qUVlEy+po>0Jad3#3^foN9C;4rBKgXx!MA#oT|p_&+& z0tg4knI7Ve^v@^Ih|Qgz2AX)^ph2KEcT>`}m?rI~7D-970Ni9t6R|Y?ft-fcWKE?F zA2l?Zs_L{S(rg%5=JZa*v+_ai9sGR+nh3I(CJcQ%R!#y8GlZMA= zriK6&nLLpQ+efF?;sTAO?UVCTiD&wwafB&;o_9bma0l);c0$zInSk> zt~13N4g4gU9!m61lX2krmy|{W4K&bz!u(ShP#9<^xUd$`0m?&(nh6A+0z+vufB_LC zi8M;%p?stvz;Px|BzcO4vQXbsELqFd_EjFr^t{r*ZePl%R(n2_pPGYfnC3welhhoA zfrrv)paBX49*WXX8j93VTqG%=G!*5bNJ>Ld9*P3JsCa^)L@ltEoTDTqx^X2aC?8?{GA4)p#likBY>ui9PRhmwn{s;o*^RZ~$}uIyyBn#h(nGdE^7<(%nG=IU=5XU&)?woEmd zB4cYUU%hy#wM!q0YA#&Le7So&yYhAQ^hZJT47#$`)#|O?%B`w(TebC}U@K!?tj^l3 z4~1D3>#{0qvLY)}`cRfqSE{U3sa2{=WP*TH+BiQo779?-tGxP9)atJ4>O;v@T&7Fa zNg1rFR9971RUZmV^~?koq^i_SO-;o_Rj7-qsEMeIF~%5UnGBgDBbKjTywuvID;F;2 z%iY`Al|Phy^>lRBtG&CqRc~u&V>4dt+{~b-%X50ZS?=zd zskJA2D$jLwch>n(mUnh1cdFjbhoYUGj4Pms>3NI?2Rk?CLt#$Exttt%3FLern1zcN zmP%Kxl2xjA&rcxm5S6J=y+pyLXGEvkRn1L}(1={Bs;a7)nV#91oT)AxB+($zFrh#p z;v~>Q%mZQJrbnQenftJS;en!oVGgPJXM0U8R(DAFm|VIr%keI`Fsz<=RrbF8;;uHP z_EY<*{em>L|K?7&J-429ai{jIim6(SVT`I;nAOICn5S*seU6<-Y!9t}Xx6FKe649^>k$D;NK-_N^B#&t)wd7P}-3wx%$$CRT>_PI=MLs8kjo6h+W8(XQX@hki0TsDWUX*k;AWY}`a zc3IY!wS4bmZY_#^Kf@Z#oSCQI)Uw9d7^C>UE1lcB|AnyyRby;fkF4nHp)S*MNw=>U z#I=IZ@T3 zoR?I+)-iPIIc8awWze5e=oT}E%QCAo{dHwjKG(Ub+x=r)Uvo}eWuOd{fihSgliKdw zV!X6c2D#8xpJ9(-{*-ep)yCY?ZU1H4>q`^8`D4r^ZbRY4ckxrr$r@WW6xXPj`s<}F z_g#{Chus>(7Nd;0DtqTzx>ANuZL-Cuti4{I9lF$|c&W7#oily!P!%eBH*;sJ+ zprhzXMW)OM004j(00;mhLjVj2m zwG&-AsypROakEMS)GxSaX(bQ>A3dT^GEm)#mktCx>7obkpr`l+hpMS*M`qISVF5{z zhAXa8n;UJb^PE~Q4CRHG>eNMC5|t|v7)2< znkfTP3FeOc4$7q%;(>YQ?6@~MQJ$eW&_)v3!UyHx3=poO80twl`#N0CVTaxzxERZ# z*+GXV?Qj?SqH+oF%kA(^kfu=yR(Alu>kbzlyhHb_chEWY9qt&U25-*$+M#2EmW&(! zSaN7AZO~*4M5m81(^HOTUNe-&o8M^0pHn;ViD%LswTGK?G3i}6CgB=Q4?fBh?HRL3 zKWYiOple3fB~%-VZOh6V>~}MbH&B9-*o4#YP-E;sE{}lL>$!KJo<`)-1<3VuRGvr2 z;%1j~tc{&XPKz(6XqG|k`N#n>OXJ~!L`MYSPnV@gC}P+INZ<|j2!mvx;yKHpktyY= zp(KI&gP>$1C0C!ntj%e1XXSK|u9kD&>ohIIYD&H(WUlCLK<%EOh&+SQd`=yKlCv)5 zjpWTaK(JyiT4bYqoapM#(73IZ+YR zXJ*PQQY3R!yiFXYXl9VAmqtzN47)G{zZlp6QZ7^;)<4W zjyeud_Fz+^TQ!BjOFPvJw5r&_7<0He8F>>zEV>Rj*$fQ^!z>B%8_Ez1HpNG(l|=XkdvpqwMxiy0CI!%C%SC!0<*J18_=06b9z;gJ5&rzPp# zU@9%4>FBAizka7gwHc&Tpu*Bst%-laiQ35c%4q7A>ldSZ&}BLD8OJ}l!CH|pf-coh zIXBY`(WHDuCwerI3}c)$LK775jJqK;bvf%b3~k~XgFsG@@zr#gA<59B7CTW3pXomU zNp1|CI4BcYXXErsSU1lLcmzJ7l6WkMya*_= zX|}#kF|6v-=uBsoC$)DBqxHuL!)O$}z}a5b$7tlAAC*GGZBLKXT^ktdFi}!E_J4w> zi*5BVBh!y`D4|!ju?|O*k)zPWXA^Utvz(OD!N5+Mxzu){CKLqehY1{%mZHWG0Fj$l z3COquwh92L783}E^1GzZ0U+o+(sRjZKk_ByuT_(wlyYFV@k6(*e+N>b6hkn?JZmW~ zev)hyu9-t5g_dgxZ{<+9W>&HY`iAoncwjlES59n1jBqtR-M529P4+aVd@@}_7!^@a zoVPVTISl{Y|_0*bnjpmo57iV^6zN(ShYji>1Idjzi2kU+{9 z!UWyKkqLBYGr<`M#96T%oYZc7H@HZXfr@luJ#fOuj(8TGCRF$g%y2xWwA8ngZjeR= zj@NiNL&gT`tsNss{vdK!iGx(Hv&*=jGMAp(k4)Ez+CY-ZFX;L$BBU{NKYu4A)wyLD zhL*|*`QQ(0W!Ss0Q2@PBFLWZCgcg7$g=JCr-@bjrM1C|nYkptSs}4yT?Y(3u`Decf zG1!e$Y(iq(qTGLCxqavJOkn}4*+BT##m$GWAbWCZRMlC#*NH$MAtL49l$fxZsvmT8a0BP8` zsXZm|;+$6q9X+??Z7N!VB%ze2(VHSq8_6lXxO9vKHBju>&V8O6kaUxzQM34Tbkru9 zL^X`eUuG_pHYL@lvC5~T^F|-c%_Ej%eJ&Vx6Z$65c!YC7%^s1tqoMpPnwjKkyQl#<%rF1715)80 zI7$7ccPszRNggCf_vM~Wc?Z#ysqi|5mydRkC{ih?@H&^0N6|~gw$*{u@hO$SssOB7 zvBRLgC({EYPAvgW>35MlTV3O6Kk+y~*n-Gq0t1{V&wec|Id7>r!xXW!655OhXj z89pm^M)aL6R(~oY29T$#h!QRE$sTzo2;S_talnB>Vs<)P5Z_v3V5M(tU zbK0#rYXbT>r$G0z4KzclqGPnLD>TJWEX6+h_TMMR#z9$w$#Fz?b|ZzC+XQYZCRx!=01*APr1Cm9CQb%)`v?}RFixZv7 zO_H2UR$~;z@kMk(jo(JHnNaJehy&(LJAi zPPAnCLA^g5JqEgrF%M>ZMgG02CzUgjq-?|JBH^^*e|AmE-8Z0!^rILG)|FYziH`?H zYzdraj3UvCx?OR0ze8?CGvc&6iW=JN^oct6mqlmcmhN1u2k5ErN~yyorH*caxy5y@3i%JR{X5e(GyjN1Rf7^Cl8k^ zzNos12M;t-y-6F_Xa82e4`1ePv0pRgOjo_A2wmkF=yE2HIO{BD$hZ074)%vro*mx0SB z@2oo&@1C7&ft;5R#Qo_=hYu3t2RJVmXAnp{mVaJ4cZz%%+2o7{60%pUUh9`9os)%( zh{%m6LK$Uzfeq)AlEp|VR%VpXE6*ndF?HD+57jCO<#C`K*5!{(?D4}KA{Q|&x3(A* zjXc>%pydy5SkRhXW!CU#cpq2*LeEQhI#vTkH&qWm!V1|7alJ5ID*c|1%8?i&`cNT6 zlR6|`%EHSE#hL(M(_xXOF4&GhH4H~*_6o=uN0l#rrgBG5f_Ppkah@2M{j(%T)$}(g zT>2@4m`R)qQ0eS|7xaN#>f9u&YAYjG1_%c|($>@LgAl|@3Lr*DFrgNY&)ydtoxDxK zq$b|ECqV#O9UHw1vF43`HufeAyhW}*r)dvCpFZ5|Tq_d3^BE+sVN$J!+%{UTGk+6Z zjfWiSA$m4FbqJ~XdYAbXsA%W)X;%IWy-koK68Z#9uNfVMY)49>8cPsL=V&5i z6TGv_2BG4NAPD1gQ~mAmHc^H_p6-Je3W*tc(B@@G)ObW_P|y{rkn#R$q>~E=my;sd_w;wMgeGKjn)$eN~1|a=n-x-8KMQbXBQXLbWh_SPRh%l zqH89{$a9jqT%VlCB#C3;^RLvd#ep9r#*dB(Xr_LSRCQV`GiJ1l5HmK@Rsl|FvC{XH zvr0k*+iZO%8ozqu?JboYv)kDl{Z-#lf2loIm@-Aph*4)EplH8NE1}N!zXTm4r#^ZV zqQ#&DgVPcUp>zLZHa-B`^UY^&2uVSuJ$=itX3)YI6lrf<|CQBh@a}gN1vhqj;3bJL))a`qy{4hbu|r4=i~C;A4T_J0E|j8z{|m zO}K~Tw+c`FBd*i1 z#8U9~ zg6yn-L(DS4JjBFUH(4*q17^N+;6M-lU*=Pd(+i_1LhNonZM`EvB70AF;jhCPSjl=q zQ|Be=eYY#UXb^r>$$BqSHYrXamx-}T{oXn(nmD7F?2K8GFE-{wH|9wknOq%zjw|g9w$EBz71VB+oKqiyKp&i1FX(k@wC;QlQX)_uNobQvF5wqT0pF|Wnvl8H*3h) zaXql>Hfz-eTXSUBHj4YUv6?7=jyV9e(0{;)V}FD)Zkf|!Guq65x()vzN=rWK*o#(r zDI*CD2k(@goL;*HZeXl_lhsq#AYl^_JZ>V&)936t zShdH#Avmm&KP--e*xZO%W|r6*H?iPo z7)ObO2s%eE=zR38<+FSMq$z3!S5c&^wTl zI)^D~K(Lj?gT>S|kq6+M57{nB&0@Kk3_Q9p4Ym1rXU9aO0M()fJ}b_t**6=}3e`}f ztn@Z{fFlw)_=->gJ9ilHqHH{A&Ye^HQtHQUHd(C) zl$wcjlL4NnbmHo=DH7LFB8#Xdc>TS(o{j@{{w&&vbZ)f9d59A5kLZ|#r8}N|uicG# zP2_AmTeD-lgs40OAWH?131OkLb_H@6qT`G>pf5{u5_=|}Xy-^8-ZXx|n#DA@T`dpk;bNmSGaqU5_sY~VFwY}au@+^ zUGn2jz;)rGxkMCRAYAC6`X3k%4VYu2F=$p#Z%@vJ0(M97nsKZugNe`!3W5~?;1++# zu>oH>G?g5sVkD3fqX?*CZn^hXdV!J6=y_ik*_+}qD!A5T;*N$F@?mj`rI9Yee8Z@& z!vFJjkMO&i0|GVjbJO~t+pu4@ng0(q{nYHH|FtnnA&dfMVSvqeBE7K}g=9cR9&c%r zMq{lD76NZb2@;l4)eFj|{6>rk0Bc(9ttssxaLYZ{*h3zm5?Df?!@)lLSx8XxmrIF# zHaU<$Ifs-0;mO7$IEy9M$p*~oh9zel9B<~`IVs~tIvw!g2^vk^3;LR7CQ0aIH62nv`Dil;>hU1qgU={AC9$#H}L{>i#|zN&d>&(HYD8ioMbAzB~ zE|(;zQ`($5g-+z?Ge;(J^gD-6$fwF2MbZ%jEofcBqRAXjVnO+yr^0jW`xYFeFznxW%BRzC2ndbqYEA>RYHsG#CB9>w@V0=;0Oxp+Skho-Gh`3Lj zvYQkDv;fPOuqsQAWO8K)x?bFr;*opb>EM+fCPAn|YTe3%f-9#M7Dxzozi4AZSjid? z)7h>oVMX1}pR3Z64&pR02$t(dN==mtVyPwr=)aspXj2CqOsAmQ7kLHBUwu|o1rEtEX&SkDWfIeg|~a-2X8cpN?v zk~U+E)H)xP-oQF_7+O4;z7)T(>oR((T|S=C=8{gG-)s(BTAonW`AvI{JrFE-q|y+@fU;>6igbvNGW2@d zv={U27^e3C;5_aqke?hHLP@etMjDvA2RSw4iIQD^t z$8z$rFmBp!==KFezK)9kvx>q}F?9r)7jNcG(dvW_0(V(n3m%vS!xB0i_3^o){xK^5 z1Nb;h1F`}YdQgeP)&(#?A$a2QFSHKQt$&8-cFZv?_F2?6pPH20!Zz)X=6dC(1?I)% za9HeU;<;u8sPS0}i+Kowa-dImz21@rAgRpJwB+*238nqSUX23KaZ#9OlX4#|f&^8f z60hX|j7JcKhUh=1Mp)E2CM+$13QIoU!a~x*up9|*Sf(^OEQaofB}6F1Lg$WH&bdk~ z^8_XqlMcla&WeTN>l()qfiln*X)e_zpwb%R=#}P}C!aZEYGd(?5!*i6HY$Q=n4cwD z)Mj~k=8QQZZz5Jp1Ty(OrH#SMJ}9LaigNNX37mqZel+pN5Hx!mQgl8r^GTxUNm*9H zB1bA3Kx%HVw#!3SP#}jKT01??QxLByOk^A4G)kmLhTkV-AfyP8=HrONCSAupqrQURTBFWa5!Mu>lno7lOfkS38QenT&oH>D z`Vm!|>UucsscZl-y?1(1LBaC_r%D%l7Kalo6wqD6nz$({gP+h7dct?qL^zQo!UHF~ z$(v9-I6<6o0z#7$BbgI6l1|9&Ixz-2;UjY=#>ED~VMp|45f>s4R5lrXF)^u4aA^xO zBYB;PjYz6-#H(7X&g;fPo&$?9j7rkj*;8|?F&1EV*d8emNNa`1WYk*1K+YPu*7Q-1 zs2L3Bpi-0$L%6Rj5Z#9`QN*d(D=)-d3$Zf5l2a+c8i^sISK_v?&py)!G4gN(B{8I# zW{-hR1PP+%hCv7}j2qD+49+Fun{Ur{49N4GLTOh9^)OR?AzmB6s{xTY%}K@pKvh7zuDWJ2Z~IuW#Hv z10jnh9cXTPp%U&9F>4FWgrjFZ(Yj2ia0_kcA;7HHkxfljp#ee}qA(eN{Yc^}R~mmz(uI_#l>J%&LRzKrrJD<6@wK zyK-^X?-7-oZogjI75+&;*aArm(nu%AmLwdP$$%Mc zoV*F&5`e3tVxnD5@EUx*5R02_027!>6sm>N@OU4z?yy!XbU7%yI;{HI4#j319_=*1 zfKj4_t(t=Zge0C_`QSo$&8OjD0~;x-B5dUFlMP)6R9kHhTeBWyt&O8z@vJ=uvh;3p zwGqh$m*rJrLznz?S;uBGL^?>T9T*wM%ubrubPnSK7kYqh;Bl|sDcxT{m zG77<*zdqHzco9yVxhzxkJ>8Uq11Q2-!iTZKyux2T1%p6+QIw;WNumKqKM6#rF)2NL*+PI)p~b z8#G%|T}BijYndh6QqIlc8!od4q@wBWB=H%kOlb?$NJ<&OIZ0Ntna`&CM%~o7ILbaZRz@7Wh{LZOvYQRyT`Z+ZYftX4nnL&-dM zL6&PF$;dnx%+%AcDI;@?+qq~C<^lzm#FDaSOoLVH#4+BE-Hpl3WlLUDO?+BiCVD4( zlOU|r&N=)Cab146wdgZ#3M@L=aFbHz7njUula;r17AO^GSt4utXHK8?z z0DLUpxD)VIUqsP=oFeb}6{a+{(tfc5{C?4070f)p(Q@&j@bHVS#4Z%*vqk5Tm*yuI z4R3xNQ76ZO%y&lH>Xzh!l}LCuE^37vM%bf=!^08MR%bbgI4?$AGE^nkhr}8wPl9|% zNOy@r#~;Zdm@lkn=T7KvXwjVoRO8F;d)8Q<6+n$SnLc=Mc`M{i|iX%#?Ayk)+d+w zQ9hq?C>fs*1|?7J2I{KJneodp)B)pK8C;qKj@q#EMnq`2j}UStw}_B0xjz88l6wg} zt7qc$4!8{bPgGw=Q)#`p9dg3Yd78nR*avVf5Y)3j`m#04(PRYKeMdf6{If0{2Y@N3I@p+aL>rN$BR9R4fRzHF*;MY538;=F z2xtMQp$%$|aMu+ArZf5ou?on_AS>vCUW~^++K@g1Ksv_uv_?qn0&o;`6$_r0ktAc& zf-K6i3)GPNHTRON&J!CT!os1OR@2Iv>PIaD*#M^gLo9Za0kHitI`dQyY$l&;^g;IP zAz5VxWPU&c8W~hP)d?PfFUhY|9}_Q%p#WN+s}uPG+u8 zetCrwXN*(th;}?WCv}D{Xq3l%SU!}-`ksf8vsc&y$&VrhiZNC6A?N47+0XL@9$)jI z*xWrl5v$>%jp6L$#U&O@o2YCcZc2M(?PbwyA@6d~gwM!=@5r|$PB(1QJH}RM2F;dm zybA=HU@iZx-kVnxh(RC_etV)2hD-g%QUrBryGa&DgIP%uKxUy!n?5NFCH{rFK`)*R zngg}1T4AQz1X*4p2Qc^B1-u4KuNjdVtB4==VNENCi3r+&q90)oz7*09u_17r-)=>u z6~VC>a^o(Yqt>@n3tT7_$&Cw(l?gKl!k&+a!XUa-J*`cqS4 z%9PF6m-uxLn(-H%@LD?^@Acl*a0r2<&LJB}z6eP{eCST0z5BF(jM$P@tD?2m`YB1Q zm9L$!n`7I&;0FZsxP9AP5sj}c71qxCk7-q4>klkRV;WRFgWc}}rJPEg(JNeYb?>L< zrE(Tn4QDz+OcI;?kChYYp(#<8f#PevjnTkV-hXgh3_m&7_=B+?`9Ze2(P`&@Vk~FTFvWxDgekg;9 zQdRvj`hNbGfz}}BtT7i$)@1DMF(_E{3f?6C)b<7RmXmBV`lK&kz&~{gc;htrID{f& z`tJ|a;sDx^L=_}3Z2}xmY?4eI0XG3l z0b{)h&i*yepyw<{C3v)ZWsD_6WRaK_7axa?M^fX^YLci|%}1e76sELOHi*@aCt4JP z>PAj4jcjv6L^mGyb!>#L3WcIb!mCf-)r;VLp-|kA9ZPf+f}Wnl7j(2KOVr6877B&Y zAY@|cq)=Fb$UAOVM>|TcO0tb9Q{SL|qs2mByS-dR7q_Op*M9vKMZGD>8`L`#_OcC+ z?CQy2JJR4mUwo>v`nk^=QfR}c)h;b>ZFKQtI>Q{>q+Xv{?qY%&!Gc{XC3|CvZ;UTg z_OM{LeHop%E;zE4`>VWFrS{oB4b>}LYsOBS$!EkcsWNGH=!usZ&&aB>$(!ph#zlFr z>Tt-XvhCq;CisBCfq1~+0fR>#O>UG@GkU;aunHIAG1xcbYQZ_Ez|*vLaNje@olqdy zH42fJx!`npn}(`rbWVy(Uk5_LCGY?QnU;^=QaYw&6&kB0cAIfy?u;CcvqrBcgkUM? zf&n0aah}%&9p}jHbe;@0vd!4VYQNDZcQo;Qp~7-pc$E4<+@!@L zvj?H^)#a^#S5#9w$U~O}CHO2Qy_C!DGgpONhpxCY+_dq^fVsnkE%DEwiL*+&Ds*=>>;Gc zU(!*9M(6Qzp`_I`#&)I-O@R=9cSn6Swj(NB`!GejvAr)u5CZcemeC zdvPUtiva@^4ebmJ3k3M-53^i0e@fG`gb*9pi_^+T01yOmAPy4{2jZ|(OPbu6e|{MZ zUNlTlpkYD;2nEr=0dXJ>4eZmms_)JG_t;nh9Evz(lS)Mz1iiUD6{1~FH?$L<{$Tbv z^b0>8+UT=2D;3g_IjRYJQyfZ09$K-|^TCXVW%n9sU;z-oCf4N^_vk;TL6|%uN#-I- z+{f4m4IGHjzyTsWV}FA7ewGm$IMA>_QRmMpc4VHBi%S{bnk*`n>SYl9r99{J@24-% z&4jMKMLu`xgoXGJts%N34w$>%NSvA|6q0WI%NU8e40GqYjBP1NN|E5FK|lVLeA3XA zTJi^=~_Q5U}&!n1c8m0FY({h2q@7TeV*QF7Od|`k2US^hkuNEv7{TBRhy!u(p`0bd zSjbkvtiB-0Yk2Jyn|h~$TR#FAwxu2*3~ z9!cex{csSHQ?!t7LQ=6ciIfUZKm#{!e`o0=Rp_h?ezl?`M;1v2Ck``vIfjw>Qfjf5 z;MSe&O6t2w(*p*jl9~PS*hl(cOCS#Pik+rq*++i`6Bxeu;tK~Zfg=!MfdPgEhJ^_X zUwrWe1H~I8cmgiL0Uz+i7cRkp0TC7!5O4<#AZTErfC?BGcm*Kn1||SPg9;cJpum8I z1d0Qp;D8}Om~g-Y5Ed93XqXU))Tm?K&;S4c1OR|}Gf@Zt5F|R9OUt4?N-6defWUwN z!C-(uBo2qe0)ZeH3WkD#Ko|@N13`j7FenP*NUBC-$~pkl3`xGT8*3 z$HDUT6q3tmK_w9ar~lckk91w$bX5njIM>Mykp!OyjmQW*{oPrWk;LpZvRv8LsFr4q z2$_bXmC_(oKSbQ3I^-v$nZTVya&Ap!|LTk~=#gG`n<}ULTksm9YE+;MxacBV@Tgfn zOCeeuP<`rxT)~L)BlCB46}E~9%qiQ=9%I+-3JiFmt1EDTai*3E?HeLL;I#zvLS()! z-53P}tCJrE)q(R7QRuDJ;>3ydKA^KB zsB(;hqvZO*NAb*;(t;tg1&i@&*j7g6?ev-qDeK0hLfDzOl(Z6VCcX}`KzsQ%#!(Ip zfYLidMyf6YM_*7K7+vE0V(>8VruGCZY;iYX&%{DT+agkQmWq&hb>h4^U-c4z1}7R` zJ0qeH`l%7=>Ulp%R5%YQnL|Pl%Rlt(9jl0310yFT0>ro~!n+u#f6o{pa_?B9pd(oZ z2C@iIRnP$=D-J^m*$4f^y0{yV!eF;IjGXbtqswQNH6Rv-;@ZhBi*kx?G*)Udmny%T zz5~ODG|OSRa0ok?(P{`Ch{5qf)Ovb4doU(GV87y`WeenSz1L^LU;8G|Z&7lULRNe9 z&_SJ!?*0`bcS0SrmQODKVh3Z;HT7rERWnO*Vc^4VOj!CkzIK|@U7?gfJd+*95b>5K z>QLW_0Tv2ZTkL{^eMLTN&roS7sStHXSGRe;(=w(gq^l^b>WDi0w7OM=i4m%MfcpjO zIo;Hef$AkcS)ufhL}07qDE^BTly+2+8`R!4=I85ByUORy0ixsxlg-P>!6QP7#L@$G zb^D~nLE3}>OrVt>^O?Sj=^Au;RE_<&Ed?XCBnF7p!D>?f*{U560iaF}W)d_{oX#1O zcg1vq%?gZ8sfy@qJ2#p2Hm|4bjolW%F~_T=bIiTQA}|Ep_+64%hUp-`q+rm9rQDzH zz_a+3I+^k(Qz@L_^T$tk0edB3^!|9VY`3$fdsL6gYeil`s`DNkLC-~7=Va^QciA?& zSm2Am-7Am$GI2271W}~L94x{volJCzYLENOfHJyfS@&-xL9Qi-x4e2+J6A$hv4FdeP351)oo zg-W}S5|tGZZBG$XbQsZ8@ezo+R;Tp^Ah16Yv2U(Rxx>L$X(^X|z~!Ek_{nlR!&795 zg)-^jdelEzS61OV9;U)dplF53C$Yn%cRYEm{%Jo;(PO`804G!gtih0?jL9@q!b>~@ zG+I7q%;$h+&Ab_aS(r(9iGWcXM^C)_<$4fL9ly+lrwLm@#SUq#xaLZ6JPK+}E^w1? zv6+8uY~7^y(DE?~Tva?$$HKRw{=x)iPIYnQEF;z8P@+JTgGr3s|40-7`kFV3?3Tmb zVb=~((Lw0;XaMEOnCzB-e(bs3S!oBb%{m=>MN7MG5pu=0a)dZaVlD#W*+)aSNN1=Y zX4x;AyEU);8X`mPgH>gX0uQlC9!mM6vLB!{Qid3KLQLt__s%rIx-2NtoQu8~vx$k4 zNykv{@#0Dp*C-eVrb4&uzZ-0YElk}M$-$soO9A;*F=*z_W@xft#Ng5;b}i7z`f?_W zby?o3Y*BR|4q38P)OxI|thdZU4ok@lh4e?GI7J4xpbEJ|S9ZkWB7NtA7<7EeE`poj z6}Ap_}pt8V~#erySRhvo`u1u zKnnO;RuEOtd)zVvs`Kl^3uV>Oo3|VdjxwuR7d?`KPcMPRmq0}VV2MhV%$!PLl#LeE``JB zDtw|9K9S4rBn&O$rIJ2kvN$^zed$T3#F71jAdj~RWboZ~a>P^2$!1wG;&Dkw9bIj` zl+frNj%+F%PV!NdY(lPO#}(*T17ARd+&}SIQgBxu@LRK&E*I=SC%s5E68KKZf z0f)@)E0zggzaFNtnp!-_YR0uE7v4lpZn? zvKEJ6(WBABTnAO0--P~jr;*8{v3Vm7$V?N`VMECF=4@|=0F?<9x+qRtvid^`%j?fV zsl$iG_Ems0Xac^ek0n8D@R2D3GKwt%4mT<-2q=T_un zCalL*Vy4-{5!Q?m40d;H3xo)ITSb{WGv_}Q{)aQ(D#{`em=Aw#8O4jw!^UXtB?xe4 z!zxw}+S(LUK=~&@7a1w&NvhH!gjL}7w1O6dJ5H^Fp;g@2%^Ra}A_?N`UqGj%ATW;x zF*kZgM8pZVhk){<*6{U49>p7+j^adWhb5h!LS&%%#(bNQ`W|6BkRLh;?aX(LhJHc^2c5xuv?|I4Tq2ftSJ_20m2MLbCeWvF-YR)6L8-!?4dHNr`KLkWOirG)j4>;`TdFSwWn{x#Oac zMRUm*#e53mX)HTnRrq*hvPO-_iQdWa%V|@Ei&Abrt;w8XXG6ILsJMFgPJo8QzZqt6 z5DXpya+wQ2z|TLJm)GbLotCB(#!~S=LHZP#DM{Ir%`$$^XoYQtzqs-pfo|YvfCTYl z@|PL2;3|f)C$gD_XVpN_bT{L?sQ-BTTdXjRj;TcG0v^Z($D^0gDlYx7cLqO~!-e}* zlr}q4+@J(RzcMu|^!=Ph9u|$sgSNwBYQ=Xs5l{ucfgfat7Kug_9WMO*9|YZg(;|i@~&bcrDG#{l(qU+QE)s_ZGHB>Ol4q&iIo-dPx; zcEjkZ$GSR1vY={*@lj4%4L%oVORh{Cm^Cv9U6j`;rR!kjAh(LvO>{-f*RoRB_4eJ-zcH!lvIE; zmb+mE1|;2**_RTAMUZuM$tY~Fs}RtkBHIXgAZ?pFyv(CXT}3Jg*$hA!xPYu=jAQYC zoo(T4mjzfWT$Y%{PcFkL8AXz4_UgLGuRTpfQ9-UT2gND#5h6qAdSXbl!yGr>QGQ?kl13P?DwBCa5P2-79Ve9 zkP!KrO5W}CHe&(;0t1ENU43F)vbatH=uG^C?&GhzrC^$vA@m)}HF?BELv&plbSzgX zdWA$Z%@Q>EH(E;}%V&tP6d>T3Tgbsf0^Z%T>qpt08b@O5r^afte$>y#jbl zA0mu6DOT*@$Z$~w&#UdXaI6kQ80VkOK23?c?uY7vC>UeumF@8zqLPU8xC9o0X+X)x ztctM8$p48P4ZP8r#O@XXlCkQE0NMh4RZj{!6#yxmzGkj|S%92cBg689U z9&p)wQUDen9RQS2ONA1j$XLr0=EU%brQJ9d;+gm75K4iflsY4aI2kxi>nR1 zqI2M9KM#CHUmTb=+L)&@cHVBxG{)cGUer=F_};8iTQnaSg`-J}(y~r@PkF*iRNh?; zucd;n6zxjm$vF=pDaT#q5k?RoK?{^Ys_cQL7|lUs@q*H)w&61V9dU;MBOw|q;ETY6 zQ@EL32d?jPy3cy4a*-sE|9>!8+irEj(xffM$Q`g6TZ4%ti){uGtv7H@duhRWw_ z53pBsv#*KdIS1Dmij~7&X_?AD zpz<6{n^Ts}tJm{n^r9?HI*RJ}9KCB_B&G(jAOTZ8{I}rJ@P(`oU93n!^u_sSgOug6 z+0_b7y`Z1Uv(P;uC0Q|cd*tl-2Dd;!-$g+@2}}_C%n5R~;^84o#_CE>h=3bI_9dHe zU{hXAq~Dg=K7#C2`R`BVOWmFM<{pDjUm_lPaHe-OhD?DWR1oMMBH#3h=9g;kumPuu zNtwZ7Hke-l{bY>sbZR!HG31DN(!p3{%)JH`q-2`|vIktlojX~#{WsPRo=3`!;j^D+ zEhOyt2w)N7tY?sfm6fUN|GChUN`Oc+s&i;zT#SDQJGX%#D+v*|kO-Zyc>ahT)ZbqVDb+S;zQVa<}{ z#f8xtRW#*=%;X+KET){?6dFp(@+?cgh>Q-Vu8MUoC@0WrVeOwE%YC>3}l_g57)38TjQmnuGay=3Axe+n^r8K-Wr;%TrI4a7lBDJpC<&$r5^e~CCW{%3s`Z&>akKU2VE`5>qu^#== zq)F+KL`g?`NTG>V;Tjx!#`>;ID{VQEcofaIEMFh?f7vJjDJF{ZBuepIrhZLKMw1cz zs~?06>VAwU%Jc87^lIK@(nRfFv$BsA-xEUka=Zsw-X3;8j1rd~U;}v17-n%D6nvPb z%q~I*Gt(-!L72oVQ30+jUmxZ#ZIry6EVe$+$WV9dvKbPhiG*jx;!FgMUkys7ltqMX zV)4say$K7Bgk)sCCHvwak~kqMELv}p>xRM;B1$+yl8rNBBltBV&7PD3WUI(ybQ@g` z40>{MvQl!j&_6?i1@9y95k1bHmne(Sxoi0uNRD{Rv6OZ|YV4jD0W^yxzr%_Oz6Wdh z;vay4V|uzFwvc)5Tk|QE>@fvr?btBO*i*u4wy$xcUi0JJMMK#`k}$uJ0ScF((eVXL zT~quU39{B?=m*mgxGdh0^1c9Nx0p=R1*l-4 zSU4PV)b&dx79LuhGvF(5##~qe)j>W(h$Z^F4(UEQ-2da9Ki;xA$DwF^n;6pDq+Ws6 zM$Afw(dH@Gj;8ZzIHjaYzCgt3a+msGq9Oc(-gHrk0^owM{1Q(5#U>nyjmt7QKqBJ6 z1j7jpcZ7+N(C^$9c}2L$H!0zsL$ikLL@DABf~&?vvMpe!WfXG?^i6!0lJ>2 z!OQN6b!h#fG($2rwziP(j526;uTvi~-i*p`CIVqMSvG?}h+ohT^$5{FfEhh1|2v*O zOv8P2lBVCwX0ja8SW==fkIx6gA;(of<~dj%38Wr3r*)Lbj_3Rm@FjM8p&;%BMgfY6 z>U12;n!x{p%nY;_pqFD1?5QTNg)MjNT*`*{CQHj|J$5s(8u z>Z};RoKg}kOzM7XIQ)x}+W8Zs4$f55A~h~VXa+LLOjtsA#ne6}J}A%?LVA>-WsX~M zA2STJp2K+I&sSgGZe~ehR{y~4hY$INhL3LOVND@wekmreqDaab#45KSh%Md$jq z1Fx~5H9CXeH@!(bs3$gm3Pvi}qZ~uzB$8F<gmLT*Z_E$ z=nN2Z%(Wl@&>8F?YN{JntF8<*og=BJ9^wJgeZP`bJlLI@d}{zC+F)eVPO-1S*j%3( z4OcLga3n{CS;F9v+6{I$ZrLghLiFG9j@n1Z5Ud+;1q;iR}=pg5igLKQpDztQH#ZV)G$eaR;?W(nyA{7GAIb%^Is#5n1hGPG;RRVKP?T zZ1(03iebl2qj#idYB*pATd&nX{7XEGiFRsf7w@L#F~9yC@^B1H4IXgsqPz6e z2r>Ix`(U={!Gp}<=h-MBh^SrPi0C&+iws#vUHLi#THrDzVdboT@9e$hjmBozOWiP; ze^qy`9a0NoNC!4D-18xqIdaTYe}>JX76}X5^2uRX(^`_{p@h7(JVXevU0SE z?z#?%2a>??VUUd)^BEGwYz8)@A!Y4l2h8q^j*)hV;M>8%ThU&Z_uXO7NOFgvTz1|*NFU^brEzIB9HjZ%C z#xK+})w~-?*!B27Y>2XGf0(8lI&$b1oMaE= zX=V=E!k8Oq9K0g()f+Yy#lVA8OD7K1T8bs~qm6jTL`-aCmE|G>{>GfrK%->Iq?*Up zIX?`_@;%HnNBU-3HD#KO1P(*BMDw$`>tO#wO$!w0z%nGUjLIQfw(XRlJ}1ZOd5g^) z%0O@Qz=h$D+n=(mtZ*Fvn&LmM3C|TQ5F4_t2~C~&P4PF5sEgy~`_Y$<-+KPZ>TA7W zlxEc^HKeJrH%UUZW;74orP}3^Ei|yei^N&{cqeEFQ;C?#TC!#~BBB_g==J?ZXKR`t zApxDPDL#n|Jxo>b3cg^1YO*$_p}(%lAE46Y=&py5QfVk?YqlDc-tmZDAfrIEEA64P*78T^>%liVAxW zG~KEf@o{`fj@Hq!_klAs1~MmYQ;NCg?hN^1#NCI(JTL!|#*RJxd5AEruKX5zQbPx} zCm5h$0fQ&ct~8}KZDR`;4uC*G!a`udB`~1C;9x6U5FkQNh|m)v^n?gKA%F!8BrvcJ z1W?o4+KalmVDevERqV}XfdVWY>}ZC`wGSESASM#GM%y{19WI$e@(nWSsVlYFT9=d~ z?cg=EFOSP84Vp)Tkkm(x)dZCeJToM-%454J>9q{cEI26H630FkdoHzs_IJUt9cO!X zjWcz=GHx!XIjmu~3ssYh;~*uwnoUCQ0h7DFxxW`F5%?97lq72DfPsTC+!&sv*OM6s z*d*%H)Wc$sg7HCyn%Wpe#rkwT3kga29z^V=tLIaz4w=-D=+IO4qGN0Ab2x_H&dG$q z0@B(Qj-jS43X#<+sVZG}poptFPolaq@6McyQG#~i@k5WsdoF`Cy~&oYNI`nJ=zBYpki3m<2(-f4IT_|V*5j3`QYNt^Yql0lE)bViQ25=eTo1800000 zIYTiB1Ta`A7g&mF)-KXTw>PN7f>~ILI`PkP^^yRRkkBkCnlBA=(%+J*Y^wFIryjA0LF9|;E~^8FG}D8{u5LLHR1!^ zZ|6G#o=8-#pobq+wTaZ6dJj*}?VB&vv7897#U4(8I5prN3kIvT(aH34IGb< zQ3_fGV$$iVmS$&=g0ku~^Rgl5V3PEpKZe^o#t9hP>7F(371v!9?dG?;7~cnGr%k5# z-E7C$+#{)iVp>L2NZBZv-GvakE zzAd+{I)m6CNHuECyb!vbxUK>c8ONrh7WF#TB}B(?NJ23%1x;M!{5WtDtiEk8TMTIT z$>0i=q~8}PJT-U0_FN7hIE61V&{S{%27g~52U2}nf~h3bsg@`)F^?GV165{r6IfNK zK6NeZQxQo`go)mfupGDe;*6oDd`b?YMHqADKtMSv;oy;`+!Hlr7!&-Ly{R@eLQSW0 zF+nGn7OhSatoTKMN!=Bq$xTk;4;tcou6vNA9S`%kGhGIrI=9;Yfsf)%APgV)$d9<10K(`fqX$N)|otsrWBmI@pdp<>xYS-hb*w-KpB+8TUXiWq~_ z*U4nVA3ZoB*{i}YsjQY!d;U{I&K{f2V}kb!5-(uP`_+~ECSMtSs$T@V3>85BY1ntE zrX%%_8A0GQH+P6MYpO_UK*uVSp=0{*bQV{zAt*&oaOXI@RB#@69XNT10EM2}PmtaO zKvIdhVx{~5ZfQC{Sf>K|`t$vwC*1^sV#mYSO4>H46Zz zM04M2J^n+R8RYMaLNgv1!ujz6-0&HwNRy5=kiK}i2Q)KNDViIPi&7{4Xvvnm5*4Wt zffDx|IoHICM{T?YE)S<(7dgoB#ee{vzpU@9dglILB$e6Ljsp{ajAg>`;=d8-FY4MPWx;L{y`b(UlpgY~Cq!2qZ zQVQqp=2ZpRx)>C|$bP#7X$OFyJ-z<^;FpI0GV^#~m>`tf3eX|Q|3>C)eMF*W%^Z~o zY~VL!n7~DPe-q_W{4i0Ld-#75ijAU`$$lB-tZu%_@E=a%-laa2IHyj&;ND2bo$a19rK&l{GtsDv@oxGlhlk;>9t8-jz-Uun1 zRlOS2Bra+lBfnQvG6g=W{?F3e3?wO^l-L~bJj6exzD#v$T*vRRd-jj7mOe5{0z{%f zg(d=Kw0qO~f#m5)!~UyS!CuErR5hwrdT;oH#mj=7+GdU;Fl|jR`Eo(`4wH}SMAM0_ zLnDw76}2g(*cVV0nKcOSy8r?xgNPHcDlimip_RaZeF%mJ^f5;AovBI=lp#dbuX|6P zl9L|Uml2g z`Fz4L(Vnvrt;cASRZmaCRWaZR{;Hh1o3_BR+byuD=MU1igPxtH!g0D5T7o<)3fj*Itr|Ea2@wbLMBD*vD92!DFyP} zQ}&K(iu5r`hzw=OxWER8A$aL@>Sf(s6vYsv{N8DKk0?o>NoE0jdw(Wvp%qTBxaG|` z^GHicm3e%J^-{$#kt&Q(`nk}jdEhOnlQ=YSoM~UjET1%m`GUD%CtTzdJGd3I5by&k zeQO-n0bOLI1A9@NbrcwF81{Fsc7|Oi;bP#5=fu)7)$S^pL@aAmAQ zPjf9$W3ABu36Zk1SRaGWYHMST8Q>R9b1q2O_EWxzXMNA3ENK zHp3%hE&hqA8#=)(j|WEQ8;D{(Nx5viYWq||4!C}z?k=1kM3^*}_Yf0BF7pA0dP#Y- zw8B`&>o-8;nEdC5#GbTkBuhtfb|K4Wc|tOdf;~85aD;rOU{5gb zHG@xn1tZaz#DcZUf2Gy_E>H<>TMTEdR+CtkwHJyCW3aU|Hr%dysG?Vbc(HUUiF*s? z+t#Nf%wZGRy<`x6=h1ab^G@Aw+t6{2cs_$Xa>xeW44p8+t*h+4()z@#^SM#TE7Y+~ za$zw?djWr|G`IQr0!pG>81Qg~(ji%fPus>aQbR+Ht&-R#uLuaVcSBHHQei74{9TvJ zWQ`-?b*`5C2y>;hUO<8gEs6GE9!Owxgi7Ai z>fVs@M(0;AbILiFB`czX1(&c1c?XhPKkPK45mv%Z-Wo9LYvJy!gdZP%?P7USE}1=Z z$hwnp+3~g;72Su9iq5V_MQ0C4MMoJ@(G4DJ+JX`$$8P_y_n0Nn(;^%u|4N9~K#Jkl zjdzMm#6TpTP};-l&x?iOh4$GsqK`5*SJ0$zdDe4Y8jyV_AjXdB)`Z z3|W?wyVu(0RE;d@hD$aYLV&qYJ?x)F;*49o^`;fZ==pGsN6BJ%CF)6-o)|YamLTRE`W6*c z->7I4*ncphRn|kx*lyDVE20}BDscZDJ_T%9(`9*lS;=Id)C^?~1aDmFC{YL#n) z2$4tx4#cMoPAH;XACW$VM|X{u8AoF}LI|?f;g1!lijfX9mu4N`pN=xLzTIy!aFd}QHh=_RsRvnOk^@sCY znW7Bl9#e+5#$?v6bif~Z8$r*GW=QD76i>z@86_5dZ%Bf#gWLu={;j8wJ|BCA*{3&C zmr`hivQE0Rw9lChJ1-sx<6L1Jq4{52>8*|;RR`7wtSHa!)%)!&T?D>LtU*Qk6c((l zn}$TkYB_+c+Cg!ymi&{Bl0JVCHf!+A0t99ehITNd%@=ejA4d3B4Un9O#U(#ehHy-5 z)2%nN@G1-Qx+XMQ<^-1pR@W;*<9L$=Ix*=gkgI-gK{0xa7ESb?4$D!==foF6h5<$+ zUeRmtfhicl=kocJ ztFZ&n}epT8R(3tW)~+U z;R6(Qb1ISN!M&1<55f9M;6$qFI(%cn$?A}a`cPp=`Q?NnXO(9Mq=IcI-q z*(9;hbG&|BR(~?4c)ra%J5-VKpE+XS7FmoUj4SJ_zA$9 zV!!rTi@H`y9!e`s_eXGP_`5v~b>YT>+0r4L}g)8XS~ zVI;I8y5rp#q|jQaN}7Vb3Pv73mTXq(3u{@!?w>sjOi<$Y|P zjLVb(XAx`50j^lw$tSvB%acFwm863}^Kg$i>F*Et^&bs~wBifL!%cvO1cxAR>E5xZ zi4IOm&VwqU^R$>j{Gy6SMp4o?<%irS0TCE_AHf?umEK}wm#}F~sAoe?2$kq2Pfr1T zx!g1=>3op|W^*$ifj!YTTgSV0P$}~xHsZmb&ZI?Wvqe*-j~@6(C=HQ7g95ZFu%<4_ zM(!+a1u4Tmpb;?5rW^hku2X~u%fe zIe>&hO;x~5oX6Rcwla6_lQ@`Vt0{8OEBE z53@FfJ%7(dE!$_X@i!I2qyf-4hd8Z*!d?=5)SRRu5_?+^TNeQ^{77P8K;7fW(=$N*FD6B4}&_Fyj*eq!VI_ATLqpJwkK%*+Fht@0-C@9wq(PS1-ILax7XGZjiga7~9@OZAf zcgl`F$Mv7ks17TS%6w=(Z`VK~S>9)hWx-SgIm6;vTh6rjpr)w@e60FP z!Eqo@;w%3L_|@v7Z)os8VLpPMf|b%y``LvdnWL`>&GRC;uuY&Y#-jl4 znMJ86YLo(OVbzVw@)_|VFzN-4 zI$>ITikDNF0)z?^G7Lb}eP+3g0}}{=NA;7^l`OiR-PotB@%T1MKI=TP%DG5@yN6rL z2t<;*?<-G-LMW8sz%xMl|pAB zk=;J`%CkhpMDd&%@GO|q@b zdZt{WQ?Bt_m%`v$$P(G+d-kzU;V&r(^$(%Tg>qYymJq#9s%xWWLN^I5T$(8DDdm}m zvEMpZe1(jUoVc^fK<`D6-3!*yZGlY5o8po%!C-U;OnWmp$?<1oi!W*f;ZY5|hu{sD zM)XgDHxriHp$ovM&UJ#QEd0>e3)X3iPUirg7>XvCnK}u$?l(ZwMgip;!TF$W8tH5a zV3bk>jfocelmmBl|3lPG4|o)&$C^wANOsy-Btw5^t&r-@iD4L4qJVAl1Lg>*oOo%* zY!MkCt4`;w6|vEEv33$Zvp7I9BGxYdl5r<>0~auVVSxw~DR3ur&{l%fNDF0_E_cAx zI-K5Fotcx=p%zL?CW)WP7sA`~04iV?2*bn2 z#!d5(xhZ#~n_7t7G}~cK`N6xX+2l?0wrI*%C7c=;hSNMUrZhQ0RIJ0Hb(R_r_@X-2 zuPB5mgDzV|8}CKh3$G)Uf)(N;n}e-N6T_JuSSa-ib2%+u;!uLVZU5o_8$Wpr>0Psb*5!xCQ zAOyYS_+uzC(zogpDXI>^9=#D>YYpyV5Sv?}hK4{+>xd8-#(p9qT{6({91rJw5BO;g zWzOOP)mt{Fyp8-+Kq^#96r_#6Mm9wz;+8|p!B^H&Px0U1(dcF1L^i~sKxc8ZOVJ4) zu&*2B$WiSmgv<--7=rSKW{@v5Fy)Y3z?%}3kco|uEiKc zC2qKP%>v|mE?JFIJaRaaELV)~%<@<&p~8-(rm^_k2A-S*d1vLy&LegJ%HPO!pX0;k z9)jxwarA^~0R~W^#y))Rly}WTAQeWyUkpwZE(Vh|6bFq|d^vjzI+O9vx#0_p9!lji zJfIJe3%pa$#N|;yNUb{E!p*-4j5cMzp@@o0@%AL@ZBf^-iQ(+ldC}6A`znqgNzX`Q zDT+!(c^?K<1rK4e3x9NDHR3R)?Ws%gt9*np#S`pAOGgDI>b^WiqS8p3U3Ix!%)1zke-H*rPL**dwjR4;V>3QQw%o zyQfwEo|=y2;$(lDH5IQ^l9i5#WTjrOlf&U~BzvlUYDx#$s#B4%uIP?I?*4tw=`Q`W z4X(A+a_JId|6ebcHS=@wOb{Y~fC~nU%+*cK-?XBYRizT*Sb{h_aYPbH&j~lLx1MJcDBnB)3}=Hy!xx*GIwy_89H^)pHoe-WVcvcD&xPH)~ZwyMwenI=&dlm zHiybB)sT}GSL$ckkI2JSLf(B}M&+3vAECC&$@i;nYBq!_3}SG>paL->CL+uw1A&*! z1;Po;K(GQ|ykuTV-+-Y^h*9L}TVW7o z0tHVbl1IcLi9}I_w8>58OIS4&FrtA?NQkjKKuo3*T^d851zuvLAc3J&q6k zpORNO_L*qHQDGCA2?|7+;3NyeDwD5*q=!5lPPN8Vm?A4zDz%N@1ELX2%eB4T=Ek^c zRkLcbQ)=1X-*PNlW0CQ%+WDq!lg)>}E znGT3!J=g;ad~k5{%!!)p`*!HWeT%SSNjK*P97(|JTreTm2cRK+DxMveU009uJfFkEm0iT0I1$@n=0!A7qIcS;* zkOvBk0}fyuaDamY4sgI|C@2&-I5f}#$^}BMnP@Bxn}}c*3I~j%STKzxiVhE6GEnBy zG!hQOCYnnVBjrUA4IEJ5qKZQU2Z&)72*C&k5R8a085WVjL=qPY23;H`XdEbNnip9p z8i>YWhX*4dCgGq=(@accE)tgp4h|YzE)5(i(Z#`qRX8|YU<9MUF$xC_8jFJlje|o2 zF&BUUG!7axaL}MZ^PB}XP^JO}v_wq6i-QBjgAYQesZb)Qad3c^hsIe32RJ~3nTSmU zM8PzT2Fj$-K#2|*h*D{qhQWow1i94Ml|m#KgPH4vi) zbD>CRoW`+02@I6D04-63?Lll}B67{6F`&YrsDQ>Xz#@{k6wpu{IKiOCp=lZbFmNuf z0x7X+@F27_(83yK(Oe+KK@dg^Xq?LhR-jA)O$Ame%)%%dh(ggwI0l;r01O&TL>Nre znh4WSM3|k=UjHS823zK+g0KfnM3>piBpiG6P0YK_hot`Qy6Kq3+2Pxo1X7xSU#^DzVqBqPFLpCTCHldj%BuwUL*(xG61sRsg~w zSq=7_ueAt0?Xt7Sy`Cmb>n+Hi`X5y6x%lEbU7VZy7IaO;b?5S%W`4W%Y`y2V>6@~+ zsn|F520?G^FlN4E?~_Bdl4^(l7RP?&ch^f)> zS0#6?wNwgbv-OmN3HI);ciXfWr}vUQ->L{xafl`}&L3nf}^- zNwZrseQvt;zPDA*e(r2$P<8U=(O*+z7#VxacGZe%rKRaNCYCQH)}*W5Qkk%QRJ!p!nl{M_1`S+2O-x%4&l7xU+HKDX6UUAy&* z`KnVh)m!ruZ)ZwRn64oi4nqRVB@<0-fFg)#fbT&JdlU^+Knn4cBC1pz7YgRWBw%1fC5qa`#X`6&W%lONI=5alr($ zFp3NrRx2`4L`{V`3Tq+^L}?bF00F_mD4GqH2W6tzSVR)=l3@;d&?2h9)?6Sa(Ro9rU9Z`} z7xDGnAP?u*NlW_6-N{ZxT;)HrE@mZhhU%6Z7txVC6^rCVS*!T;A&|Es$>qB02aFJ< zAU|O&Lk4XypbQx@8BKFR0T>skfvt%!O^lig%#fiakGM`G`zh7hkNC>uk=7}ZJXY;{ zG_Q$2PH)w!m*%id009&tF~$f%5CsN}27{U6jO=1gZzW#+kV!L3B0KR{G3G1y?ny3C zL}`!>l2Kt03o#5@T(aV%$)7nMoUBiWO1^DsI$Wt7dHWtWHrMEIc29{?vA`(8VbiLm z%Uw0s=Juv0Pfx42QehV&7-3-*p15p;-z5`KDk>-v&cZGf6ecXDYO36kCoO5^su}e; zUCIGHC79eMO2LJq4#nP~P&ryf(lv>9dk1tV+{Z*I$5I+w*9J+iwOlP$5QeN?5s0_) zwe4#kOZ`#~X-Ip~1Y|Y01fQ;iC(H3NWm+ktLpM|`@L8e<#6jX(tR61G^$PKJt|{{9 zM{|!;M>M+PW|qhTX7S;y8AIYuR8kWb6~w{3kT;S%aX=b#34J@aP1Sb%M)a>0#ByoWj2@B4dsW1oBfo|TD?v2F9`;YAe!k!-= zKlwfWq*@zGjbNa2M?>ew#`xA_IkrtcCQlQ`BSm;SMb<0HDl1jnyt&DNbFKpd2ykHh zd$r$G^}C~bu%&+OG1r-?mF`-nB4@MQY^xG`P8<6!^_5o2Oj^;(a^!MPS2Ig4HLKOv zg_2xNn+|7*M5$e=@!q^rE@!x35>F2{VV!7#NE|E@nfz;*>B>EMemr<5&)`V%a*Rw} zU8V(j4c9eW^2^5q;((}bFq$gv^vY0mD_ziC+m?+NY^c#OgUg;PL5I%#E^%batauiBHRTZJhu z=g(Zzl}Vi&XG7@VCv%TEUNQM{mEyO=I2UxyF-(j>)jsV7Ltz)9ifTnG3{!y-_+WQ; z;e&+_*1{edfG80}17*SnOeNzeF;XJXLD2$QVeDhDL8i1B!uQ_d1$n*KOfJ_wO9nsn zcXw&hxcFmC%`R#7%9QDor)n^g#;4pDvwg0)V8WI0|2wXit*lZxx3nWu>+w?T-P&I{ z!MGYT+bhNvL;lu_=_b`~RYu15&enZ1ZmsV`+9z7&`p4RFW)R ziL!@+3DN=3(rjqZ(x^lg8b@M*!UD~P);!oi4AL}!7S>!E18k^3kpY@Yg+&w%Yz-!Y zno321CKE|uC>F4SK?x1U#i#-ULxsXH@I^qB=0d?RFgT3}(_j*^WH8WTKmm=53y5JA z6pUjHe9=Th(LhkbqN%V3b77Un0x^*_mjpo9em2B!kP2T>@<(10wW zhYS}61`9~3kl><$0Rx7H1Q!epuW^ByU=Rijl*TYL7#J`#E*Kb+!Uh=1#6;uPpQh6S@Q8W)TPqrs>|QEN094Mu~}U^EyFMuX8{6wqKa7!3x)VHi-r z1&&Iss=j(trPcMNm{&sr{ZH^viukwAO z)qPU;b64)lP2ES=bARa*Ehnk9ym-^nluK9ArO&^^`Kpw|`ua*+>64snrAnzkp4?gK z`Yd&Q+V$DiZ(ptLEN*Rml=ac1{H*@0&*HS0)9H2A$JI4C>sQ-x)!O}NKg(Heb#kqg zo=p!w%JFVb&pPT+X`Ob?#E(kM%W+3lQv0NK^!InyKF{OL&iitv>-KMd>ix8SJ8^#E zq{;e3{i`2s-h9^T_5C=~0nlPtUWm)5E@$y;CQ3l&*i4 z*IAC@T+fp_y`#@*H#Mh6wVl*ypA)BLPD<00s+`2C&hyTzOwXhJEa&3oiyv-w{+S=O z_)*UC^W)&86FJZ8uHz&-ys?zjR-7&jseV%9A$<}At;ns)OQLT@1)b&|TDt*@3*JrIh?&#{{ z;QU%=DRz=RQCf9;rF}9vtkluuPH*qy>ZpI7XXj~kn0+@n-@T4Hu*2H>nyOB!vs69n zon^;)Jq{GdaUSJ$(x!c$CmjbT@j9L9&Qh=mnaPRWd309EmBHvNu}zDa^3|W)lr_If z=AXVBPC=%UiVoemb9L^sQzG1dNk)pGAw`&zC&@r(TavOSf}4`V90)U}F{+2!k}Q!l-zXD9dE=uG9dzbBQ; zYI*Le_FSdfq35!7b?8dP6xF7CCR>W#r)t%d#ss0;YS|wCEA8Ly z*Vw7Nl3Tr>Rx4Ni-~7xwwO@5_?9{sLi#?a?TKg_4W8YfjhpP2c>UwL%l(P4iMuq%X zb=%a+B5QyC)Y$b?Yqj(xW|{20+wW&DTddkG&ric{HH+dO)?C7akgeo<)pmMCb zb*ZS*TIHJxHr5PPgwR=3t6D|XwpQw^OYD*+r(E$S)6Iknc0YAZotdgJQ`g8!U$M$$ zNnhO5$KElfr#g@( ze`VY;?r&24&e?0Ws?Emyo>RqObWYV%kgbj}sR*CD=D0n>7dNh5_vCFoxS(qH`MT|5 z&v)9)V9HO2kyY`pRk`e?RMysYo$9UAb{C9`3x>nE&<4Y~U|cX9#s!Y!Fw$u-oC_EZ z;{u0qk%94$p}~-V(P&&S8WLPIFwbE)493MWq~h{mT`WXLLp&h4P(edQ1PC4;SR_~g zfdmN%3_L(+6%~;Q5`Ydwh!=Xn)z_<(YVPH#rv4%8X18_BcyU{XnbtYodI%5E zKS{vCdQ4+-hmG}93~GZcC49cr$EWnYF3iTvt{IETTrKTX*DtD61s7w_{$w!Ws_Of< z-qdbX>YXEtofb?^Zz*)ozL4K*jP50v;suC?hzAxbtPPTP77hoD(12tD^$-Dqhzd$a z@X&w;iv|ZMCLDOkYK0tfRaAc|Veg75LZ{$L=^6e~wN&f2)Oty|1-qXn3y}sQe1ruFc!bCSj8hzbDlJ^=iD=~bgVrx<=Q!gZsGHcspH3`v7KUy2i}2)c8H1y9v~nE zj5Xx}0}=>~p@>Ta1rZ)#RD_2+2pWQoP+_smPivp0y0)%AGV5fUHp0wpzu6B?%>TqNE8^1bnBalL zu?UF-7~>F*C=vlQ5t+DTl9CYdkck8Xkjq6w#D%8Jy~j_ht(VQ->pcZCz1qahl!L5k z#r@S>bI~;?cTCsWlksAVpEu`!TFMnGwN_jH8hcZMtnRhssSI+jWz9JTCKeN{OofUS z!9)cMNKiyj8zzy82qG*nf;8bk1STy(3VgYetaTx|6-PEZms zHYbok1!QTEKqlOU2@(l22uvApT(CTd1txf)>Jp0s3p{vu-gYf~?A12+PyMpRgsZJ` zpV~I2)WyxghDz6=f3v7!OkI(`cinmQ<6_FW#Mm>aSWHdV#M-j9{>E4AT8kdr>vxxD z@ky(&s{xF&kN{m+3zP>;$~=&%Km!DXf~e3FNFHGH5pnze4Dvshy}H+bRLI(sdW$va z;x9|fLALMS^55>|k(oYJily48UhC6b2OWc|&DiVndJLv)?)I8L=}Ky%#fTy;l(BHx zcR|vC1_&hmfFR+4G(n;~Ca{RC!eZiq2PFcG0E&P;=-##G*b;+^nR-t>w_1Cp_8O{f zi#?xPP1SB^Tl!$OZ;|C&WyOy9dOL;wU~DcKO!~3&S2={SrR>HLw$xA=u<(Tm4<60+DfZaET5cJyvSjHA{&P_#EoV(OKUn@__(^I(;E3;J} z8}pY=qtb`5!L-y<&3BiByUMx8tu?pr-u{G~o$Rg_dw;Fzd9~QDUP5Kp+^gjL@_qcL zEI`m6$XI9~TA2q7Kv*JFh$>JC;vk6v3e*V%!iC*}SV3Iiu_O=R1OpB(k%HpDY0%Y3mOAM0W28> zE)aB-ieuHBrRe`$i=T`^|Mkz>B6Mz1TT*WQy#-YvuN0Xd)Nqji0Sto@4gm)UTzow_ z3%oo85U4N=g@QQ;1RQh)0t9dnAb`WbP=JgDVrV=ls#t*7<!F1%*bz&qiUdQ+%eZjb(NAbTbM&d<1l1lRfvKDWD|Y zv%5kozwC&kaKtt!{$8u1!M$pjdf_W~*rIsO$%sH2td!zn@zDhM=~;(t<>aRfE;WyA zF_WE?o=yDFCPhKyd2&g)&l{NA5w6(o-o;n}%~EjMI#r8$QYoy_@-ggk}bRf#=gq zXkr7{x#5|!g~S=gEu{ONa2GNEJQ& zc$zzFTV7(Fjh(fT&~i^?!~#touM+tjvBWysov%Cp#CLni5+xq;%>?cMr`j&|L+VEtunagSJtg9GE_ z_^b49gIp%NR#pF4Y0|xxga|U2tGZ`fcV(TrtZ0^_S&=n0ofa@=LoA`)mhCvmDik4i zQAe%hB|}*b3am`hjimmF01QqDy9mn7uXml3~=0ZV>-R*X$!rr<*(Dz75woQSiZ9RbQFxo zqs?a5+t;rCcc~42QejcCUqr(c@9L!~22RCh$h@w>old-Ep)j-p&24~JvZnGiOjdAu z)|~K#7k!?iQ2^TNiiMHyXXVYy_)_x~&C@Y(zUKmXZyqpam!mibA23EyDSgnE!27)P z(C!Ef=}CC@w-$Cf0ou(w2@?+H<5t67vs5IAbbc%X8CwTU2bG1#Zd*D0Z!OAM^5se|b5m zvC+e!lo#>3f32()M)RAo2N~NNx8|h$qEWAi5HDArzT+|ui1R?!kTG$^B9kq?`NJgu z%K~&mPpIN>RZK2!8t1gvklfnygQc?+ZWpFSk>y5#xOU+ikKb^I;hG$-b#&`f*J(^* z{xepi#oWG706X z{eBt-IXvSef@PZc@R|^o7#B8XA36gmXkPX8>PH5LqUl5qe&rOu#<-y_HaYf!X z9$lPAti6#AL68}3z9RCU;bMqFZetznH-ztqp31MOny=l|!1n%daeN?dVICU zsTrAbLLnFrd#Uq}iMc;yt69#`GP7>KZbhVd7EuqO5WPpxf(^uzx!{KfFj)i_>B+w| z)k9yjonorKsg2_$16Pf{D=ozhBw0%D`!cXS-Hr8f`6&x*lS7Pq)CQ7E3hB!NrcLZ= z;iAvfmaaxkGkYxN4r97b-vHmtygm_$7a3rLSpuVVAsl#s2h!mXP1x#`3NAN0EOrn}!^DH|bk?pK zSX90{#9;yUY!FD(CY-UO@24_Xs_AsMQxXirSt{mqoTd1W7VQfg@mC2Lq6JI~BcylS!E;apvDZ=N7xT%0;DIT|tT);MQT}Ueo`$g5@#+U# z%Nd5bJc$=)hwOjc0P^rV2-k3AxC9E_OU^A`ek-!=u~SjSdrgYyE1G#oYxF6>9t9N= zM%Uv}rsQoDAg54i2m6%SD(KssLlE*r0~uxv_`+&3iEfDq0|V3dB{0di+7TLt+mLg1 zW{()Tt~#@NDR#*07tLq!?2Kq}qPR5i5e^zM>5rPoIj*n9^VyLS)tvWr6hDT5O!fh3 z!XpqPVV`&j6MBU_@8>8(zH@p6?^Q>+#SKVHcz~SKfVdJnNjI|DcP+<;it%Aj5G&(q zvpfph$Y~$;)djC8M`)1&a6)B@#26ml1dVWUFEcZtsrP)LM~(cP!y%%)jrp=keRm@D z?WIRv5%h_%JFJyc072^Rz{ z=JX;`Mh5UT*uqK8dx?pX7RX$F28|9;do5ZsVlwV-9K&GbdgXH^q5(74kjfW1m6Z?$ zMe%{XlwcsZn5;Mv(1FNl5m?(wkM%DUsdW^x9!MND1`?Km3u5Y~6=bbMN&ehWvsSAxAA=?CbLf%;t}XAe$OX)KQ0OP7Adeln(R7&XiiG1e$z*04@!j@p zj;}<>nu;6KYzyku34-$w&G_@-jS9#L+gQWC&m5@zF<>piGY*e;VQ~&NKy^3+O4MW> z5e@;8TJ$ws4@|}bheKox*;eKlB02Xu8)l5z4#dsNQP+}b0p?4|5nM4v@1nVP+5B+_&nS)zS1xF@2 z&YWk93vVoTudig?47&6;J zn9Bt8ac$aykW4xR3N^sMUQfa_@!P9EixuJs!0S;T+EjKUE=;Fc7sbcz2x%Q9CpHbM zh+uPS;zG>jyfx6sn!RhfU>bUDMIL5tY<7GXXxl%AkmQ^TB`*8mM31dz&iex;N(gE% zE+kW&?5v=g0~cxW!;}_1Ba`%7$yZ~vkUi23*klq%-N8U2C>Ml?nHDx{;x?D;%_@hj zk9mMz2>+sI@g?O7jl*3t8L7s@Jf^alXTFjJt};n;*F3%D)Vdu$T!I%ZYo#x}n3&|y zx!74p3Ug%#i{z0=^MErLmVcK6ll;iN6eFV}_nCtGJs5jkrx+JYq`u}zf)hV+!<$zm2O{%3dvxZ z$BpnK6cy98vys)QX}UvS8X0zH|NGqPyte|`z?cZr!u4}4<0m(#OK|ERxhf;LO(PYRzLk80VHITgq-~Yok)Vd_nDZ7Q z!1PySHOW$+0st?9Kv_0+d*PWfF@eK;rhwmR`h!;ac9=bMYBYpHA;1PH)>LZcyQ+*l z`hrkqqBM}m1u+86P93B)lG-{(w@|MpRA{^PwlJOC%`+AnOA94hju^bC3X2#t0cHM+ zwhM$Fn86_&*~CAoNyOzB!u$wr0m~nT2+@ReX^0ch#f~a{#F&DG$qp*(oj6}K=lG!! zlbf9*JMyBE#IcNjP1Zj>hlhP_CwK#{$9tNnSH6$MnTjI7=n@_v$sD_p%@E^fw!j-M zK7X_ZY=6D?S!h3O7ZU2VcfBUWcs(<-CV2(#G{&O$x4o-d`tT21EYL4X+^ZExuy)7+ zZG1Cg76treMql;Wo zz_VOO(neg-7z=h?t*)-kY<(fJJld`E{;9Tt3)2E;5~+_FRw1JdjggPdiV4W@jV+|iC0SkaEg(QpsSE=>lLhOLp|TAh+3KIAt? zH`i?l9#s=`I-15Nhgrl?1Yr294Q1vhXu|?Soux3(YoaYIhv{I)i0K`kyN!k~TxudK zVNO5YY;B3-2DSWv37k0xZHKm@Sd$;WlF}9~7ScxVG&pql%%(+zanjDBxb7PxIhKw3Nf%$c#%NDmVT~@U{em zQC4ZfH(ToSP_=YlnCkU}%^~Gl!g%i4qI-Rm#sgoATBDTxUV}=g@SZ7Q2=a!kINvK6 znVN)$0NO@i1X7XM#=x0Sfu;^-=Aa?!H-IujAtBpf(EG0Gw3i0HZ`GkeN2?8+D_^S5pBSB_s8WYacag8jvgH z(Wp>5?f;Rvr>1u9QB#<_(hx7zZp82}{>fs-`!}M7LPRNQp(#)-rxRJ4JLYs5it#F~ zzHF+|BAQ+u<4MlBaLe_fv;lI}fk^j}95hZItOW;TC9cs;Z+1JQgCkRtqrFBuibO%Y z8!O3}Fd5h~y)t6`6vhg2r_O0<3MIIRr>F1go0P+DYO95-8o|>xQ(l z)TMP`u!-#t>Me<=n5!?V#nbIw(nsh)+(xi501MqgEMx}Et5P;VZk0(V6>SbJDZ+qd zGzh~Wks(VL1XSzhV2*#12LV)~vYP-oa0(B?s}$EVJA@ibl?89Ys^mzReFDzV_!-`V zs_i^7%O7Zp2jSEz)tkM>pJKs-fGU;e%?_c{Z^#lY&J-j!jAZ7ZlTyssi(F#>KlJ6w zIM?rtL*ckKWQGz5HivG@62Jhc!hWRSR^TmZHAzd%2+>^!nx5K04P+x9SA83{Nd7bI z|9!UlnP0;%*Vj_~!0CfBhTkUXpM$AvdjseljPE_stp~$#Pz1jH$O&f|05uf;w+_A5 zyQz5SnlVM?Doxmix;(7VMjDW8tgSAiDI`=`(2r{x*`!DL=3N^KGGPq&tHp_Mkrruo zlTRIzp&Cbl6poVuiL`pbmqjM}J$tt#mb zphZ0aH*+kArQuP*XgL6f?yKrLATEUlY<57SRm>4p+DhcMEvx9#?@S6ZWUWCx{2f5b z!N~xK7w4R}Oi6;ZgHOYQGm}gM%Z~%82?uu*@GNs8(d<}cT;G23n0C>O4(;vvKIPfn zbFe1DQWkmVihFNgHo$VL0y@|f3fIsa7$Dmx*?`Ec20W*OED3}h`1_soH$}!0Bu)?! zUj_p*Rna}b*KZ$9f_^ecxWMzRHo#W(IiO5!K=Pq%fK^iEPH2EqGME99KBUY%57ri7 zKGz1gt(rwjAd~lUSRc=-fI=mZ%kY8wr`muhD%@P)Qv(W~dUTS5kd|~qe&relOfQk3 zBI$ zC|h)ry1kcVH^wR6N}Q;$}O7STc%-Y{1x7Q<%2G280c6z*-e8`nr0+!V?^) ztsMXzYKmc1%|kLtJrg&_R3R6qo!ryq1S3|CXz0_F%b^#|aB9Xqv}qEI51a}b2VD!B zOJ7S&m70D+X72+*p9Tz2-r{Hm<6!IwLI@mt*3c{fD+Nj9PqFlDdUvc(3LVU4e3tQBbBq3WZkj-r&eO17V8d4Vp9? z9WYw_TxMRPf5orW{GeI`M0weOfpoym_nyE^y?G@Ks75i&Jb_u11{6O~^j#z0`LHgz z-Vj2P0tqejgzP3%=%j^_@UYj~$>hS1r7kg9qk)&~3~XT$XwaGvG7{ZwfxG(*(kVghBr6svtGp^o^pd+ak^ zeJ-ZIH1=n*zF{|4xwDS2VaQL`JPAIiyF)tKRG8z;iFCCYgagA*2q19yNdvP3a0&gU zlgqe*P{Q9rD5{iQoryA3qCHA^cP9((tZ0tKfQ@`+{jIb|3L!KYr}xr zR0P&Rus8AF8npjYmjA!&kc@xz1_k}E7x=9BaWhAdQ(Mdi_#_pdd;qjrEr4(iX;&0u zK|llW)G+tXUuT+2WNQ^R**a~GmzD$$((1OKw6&QKd$Z@6n!$gww;~usN-*2UId^|J z-(fWJgLi(jxP@sb(P(088pc58Tk1e(DeZ%Diw6gh1JO zy^;K=19o3;VTl3Wl$wZzFv4h?$h)$KcOR7qD;Qz-%U~^zHW%;o=PV&YaXlf0skaBe-cvG5 zQ|z`QL5rzM_X<%W^1FA7^@%eiLKu3fW=oI+bJOw$wl!{nGlJ>YM z-Ijn!CCZxw<>B(+U_jQNjRwTfV&5Btt1YtgV>>}L-TsEdm3C=10F*-DX&To__b9<- zYm1n1C6qeZF=k)U!>F~Kt060`C3GBhsGo?O^cavuYKr-EBCa(hsdD=#w4*Q=S`|A{ zdvbs&LBRwI5gec$t-o6!lXZ@O0*ah0e4=OO@4RzbDDTT|;HqI>1Vm_cS;i?@O)F+# z#4_su8n)EqLqrA+$UlpijEy1nh`{6Q*|H3=L_qsn7}aGkCrWl`w2`J6%K5l$<$i6h zx@vVRI+z0=$3N%wgvX6jCdQGwXCoB6&Oc31vEC=I5&a&~c82GLmi#fHV4CI;XF4{7 z8~AKZIFiZVWP7o%eGJV86r=hERHp;PIOm@jjg;E?_keYfF{LsfX9q_fQ0%DyNWLv_a0WrE$DmBRrbQBSP}Zg})q*Zi=z=^y@k!Bz_F5J|FHP1mP(?=Q zIq)XHkx?rUrt;a)kXf(SNnqXsF%eNWXgoLz6U0WV5hnIPNtS(+W#XpaggS=>JVKEm z1eQ_$SI5%|24Jd98^t?9M+s%(q(Inm{5Je+mEYoyH-Yltt0TsIGTiM*{7{jA=pt(x zKX=A3(~w;NEAPdWpB#=xB#{{&*eNf}stZ#rbMM2b<7v-^vPdDh`_s2B%n{Q#M^1~I zmoM04LsC(kQw(Ce7}Nu3M-=Ulo;OO@4gdXx{h}Qbt-X98N2TsU2_UVPT$|F#F<1Cg zxZ-hXfzScImMt^4nehVpLWtN+RaFATo=$?P3DPntx?fI!r5OnQ&4Xk(o765`X)_l? z%yw8qYc54`_*Xz>Da|4<#qFMEiFQ;6w~*x!0En);bMF1NWrou1U29=xo2wwC8AmCt z;Axgmrl-51rYm#MqCE4~$aIRe;ctmzCD4~nDcZr=qzIsq+1=M?ZYAcbGAT?kaA*4B z=55`|TdIa%L%$(=y;GE=`=(m#%Ft45E{a~7ZHaG+9p>G$;-&z1(ee3eC2g0PXuwEY zb{0|P)gd)%kg_aeRp%1$U65RPYipv!I||0BjdSfVG6H`vdZyxX&FREL5bX#eJk)kp zJMK>WvIdBZteIpKy=Jr4<#3=0D=)49^m2{`Lp&HBIj~CJHU?zSUEHEqh1+TV#R|YA z^gt}VlpQ_=*#@{Z@P^zxb7Dm%#$t3bTQiWdtYf}i(&=DWGNwh7r@{3y7U9mfgAJx) z$dd6~k0y(kYOh=35)j81Ffad1*p^6D*LXxLb`!J8-TJ>g4X_O-hA_r=!m%YR;K_ zEKXXp?m&Wz-N421PXSI{K0%}PmDjv^ShCFK*Op#T7*?;Yd&6P9@9`cak@R}nAdhFI z^!kxFi(R-Y3p(v15hSWzi_b(c`1Fs@=*-gwM;soKq=}ZB}5i9%5U=r|2_YUL`qKnCC26OiJ^4V5D7>1d;7-4+WSKj7FD})Un3?;H0@b$t8s;BZQ-V zo&bhqCejnhw_`lev2KD+Uz;0-VVLcJ;LjNZ?bkYb_^#;BbB%Lc?%aAPtDj+FVzuDX zu$!tCU56k6GMLe}WGWZ=OX7_EPn>Gb+mLhE!7M=#K*yu#%)~G!**X#(D)K(am>C{8 zDdS{rC(EcGQj{1=*n;S;=OnxrKGv`-84$e~a+dG=v7To_=P9PYh()*XoQX%m#;}+e zC$p$Z>E?Xy%7h6Y9itU07j7>Kg z#@4@`Afb)zAvhX?B|4y5K4Fu8^K7 zBUV`EfvIzZoyEa$k>2MOjH6-6u&_;}8g4;G1?}9CV(;T`AcjR9sOGyJKw^;f>b`Th z@m-DX^>2Bz8y^F;(3BmilPL5j;wMjzsEObOn><2Kp+sJ4u1JgnN`{*m?cBdv4}Oou z*GvzvvY||>DYg3s)iloZbM{M5!C)80#E;O-?{v76_MG|?ny;j$B}b1K$<7>ltS|E! zNB>|lauuO<%t9ln;RhRkUj99w4Lc%$AicOWb++k?Gg=5u4)sgCW>|P@3HmDDx0}Xy zdzy1%B5*470NQi}`Qz=5uNQx!wuO#17Jte@_`#|B`S8r0?9)cC@*Yl!WSZw(#_R;< zd8zho=hQTbC)yI-gcv7ef3i-f7UQ3+7Cq!nG?EvcWMi&&6m9>O>0r)sXeg*vGN>7R zu8Q#TXt3L^MwxTTa-{)AM{)%fe>#aua*(p`RJ{%$8?~VPtfj~SIywCV>$`u6QgOro z;UgGL?Ay>v6M{9d6{x2Xk!M!oY<36~&*UjZ@}MAC9#U*y!LiAd0vMTCv|wLeUs~ke7?t z$k}cB-I`IQ?U|#ydR(qp&Wk?J>UK#SC57B&`D3c>;|zJXR?=mXlSEz=S+y?GG## zQ-M71?p9Qk*2tQ-yPa5?qx*TsqW)TD_zHYfW3y zc<(2VVkbr==a2u21$=k{dIU>-EwBnk#Rj?-z>n(!Tw0nGxjiWhhUOb}7Fjl+JtZY2 zrJ;+--0xWXLH|MjF%Fmqa0l-PdX0KxZu8Fg%BxNOIm)`n$ro(SGm|lq&mv#Ek&%xP zeYeqnFNzn<2J_c%uen|9-EEqEFYb%mG-|_{{fd{o=FVf|tB9@bdco%1&+ENj!}z+I z+}yAFuU-%vMb?w7tfz>m&G~NfE}w^&%jMGf>KE1IY|i;VRBdS0+j8c~^L`GWIFB_34NmCs)>r_|Vnc@%`NSHe+hN`C(ON z#bcf4&#@xH=J;%(pH|-XzHO&xPjx-4?^OET$!@8-u{`#d()nX!ZSB50S8dy(QhPR~ zl-j0qI-{OEt-am2A>)I}%i77lefnE>dGT?pvhqDMud}514(nUV5|RHm-b6-75BKe! z&0GrknH@Ze{q^jvHdD#Ye0Tg1`|(Y^?6Bf7h5USazOtY9uFu2cr{TGFtSL`Ei~7BI z@wRG9e0+{yt6o;E*I_!yn6792cuMDxI?b}Lp?$uTr!6Nsx4!V!y0opiIj{H6))@IX zx<^g1@9g8Pcxh_Qcjwacu-2L_^=%jLpr6#|{5jvh>LlXdxywgKzdeplCudFG+B{D> zY9T|-)AqCNLO)#R+V-|PyU2Wht)0^vvhHzp4!eOiLXv<44oE=cB=azW021LSSsfk+ z$|vXnGht0|s1J{g4rD2i!9WW-NJ1G0@4s$w}nv5yI+I?zZagk^LffLECnz)ddldB)fCdmfRoIfoD5 zdEN8reTv_w_kDW5sQCDOzk8pY+~@Ni?@##>|IhpRbI<4B8@bQ-d2E)E7oU;e`#rti z8$7SR?oA)}T;Fs4*!SGgJ$<>aN#*a`&2#_!Ch^_RVdp#i@BZuW-j8|>-v6KTi(9cc zt~b4}e%24}Ib1!5+xW47)hm`(?*gaKs`}w^b0`d0M+Sio1Y|F7#d2+cPk(1`KHcH* z!Qr7+Q(4_tGr0i`ICp%z*SLEa+0U*s>%aQ8FYCJI>fU;%E&}U&I-6$PZufTVPVL49 z8$a%y1rAW4V*&Y1H=lNX_3hW=JZE_uTl;_n(lryw05(~hKfYK;MQ^i>Sjc%cy^WcP z4eR!!zjnpq=qLTMLF7eOY#uv25E4LCR^CKq<-9xWWIkT894a?@n9+9|{j=TKy}MU2 z3kbktWWnkn@kjtzfEhyavEWZlP)PFTaXiO7=N1bpGXNGW0KkF;09epqvLEl`*DR}M z{_ZLE^RMxvr))_@Prc)K+;gwFA2Fy{@;CwrkaSqslE?kEX-@T2#JjuH?7snECcppx zHIAv7{ryvk^SDj^QTzi&(h!iBz$q{y(vi6ZC~3i2fB?GYe8s&_R!&$(2j5ZGpg;iuEC7HB%aklg?Pm4QiY4800H1TW2nR_AI(jB5U=&a^Tr5C90L26X z9uPo*Nx*jjlmM^~l=NKy0LD*mZ*BlASO5SElJn~K7~S3I_^vs1R8Mu?<>hSr_2Awf zTyGW^_g`QCnty%nv+8WclKbJRx~i+Y+XvJ24>vQPbH$SE%`Pu0D_?436Uo5sF||EQk(z|If#lMl`3<@&0nk|iAo$SiQ| zjJM4>IFjK|2mnLigJc9HgjEYH0KkF;cn4T42PozxA_+zay7UJ9q+2TN*qz_btH4B1 z`qWF-Rs;~qNJysSoWnFXn~Hq8KO5w~23balhU9MQREeR-WVxGnfj7$MAG9n@dyU%x>nOGf& zVWcq<5GH{@2nohPB!eJwA14g=nmmgr%Ofpzp1;$M-}pDKeNN-)1eOOx#t8aYEQE1J>GIVzSz3K$W|IAD}@rN99y#}gI{A(?m*<*_Oj?B=z* z!=tf;#!vs$RNZZ~w{c8g5wv*YcZ~Vha6Y^9dDD1@M=~8G3EePj2tq)ygvA!}Kp=#b zG)9IoK%`>XMudHMs7h1uaA+#eO)mnxvPXc5NF|YlWD+pE%z(;KZd8a!rlX>=4*1WC z};iVrw_YT>d{im zQ`0{vpqGQ4%Bf)^Sy*;Zu?J$4O)ok_F5}jVvsF14l zlvL@Z#9mp-Wz-N#tvjF9^U;M==B`cJOj(Ga7@+9KfrkQQk+KoWVG#0>VPFdha{+;I za+ya7iw6YJVR0xIEjhPDtr@E(YG_k$o>q0)dlJf(m+i1(WsX*DYp$59)B9v+VN!OM zYki_q#!hEtc2WlyH>L=WGNk?-etcQfbpx8iR ztI?4%BrGaI5_Z#Whc%^&SVJmTHI=x|Sw2IF&q=26T1th!xvpknpLg)&w$#ocu4Qz% z?3S*Bj8H?_>0q3oWIhZSL7`PFV1`^M$k0eZL4o+dJe*vFU^IlFphaQP{D5R7DjM_6 z)$8ns+hK?PmZx)RPiiVD#Xa0qTWOBq$(R08#7y(JSV92obkuj`Fa& zq}sKF+AY17*fB)fe4$+KbS-?zmJZPjOAW2aWVbLR)1m+-dK;jrq*hL0KuY84iE_u3gZPzHJY)Q3e z=efF7*DZt^dUIUso?An{*w!W|)@xf!%PZCMYF^&3;U%WpKJM~{_@HP1$4D|5A{;oP zfw7=~6(J)gj3TLl7AM|vA^}EZfdf;*E)q&6qr@dpdUvx`cCw{}s}*wIw9Zntf8Ed0 zk?&fX3X`!#%8woYW2o2RH#_VsW{Z(ZPiIXrCp$k?_vVR9Bm;xQBP3&GN<2b7u%y64 zh{wkV$wUDHoJ9eW0&AfHBAE<|4w8@z23o*TqKvr!2HYc5#|Z^sQRYQU04f3S#?$x9 z=5xG!?s$2%xx3GEn@zL6HoW&oPlNran26`}pt{~Uy_3g(`kZw<_x~5Y@Bhoc{$Ke2 z{@ed@+~55(AOG^ruKQp9!hab+!++ub?)~R$y_|i=)@;4}g`fYi{V4ur@Bi|L2l@VT z{_=kM{c?BL7yos?sB3q3`$hed?Cck3_e-j~^JSkIkuT@`qJH7@%jI&->FMRtdZ>qw zn2CNtKYo63|2@C>CHMH_xl5}bKganOKff=2n%i&A`@ApT7xunf9=R`hzPsJK8M!Yf zJGqx}nYl0C+uxVIY-9G>Zf(uJ^u_y7`_h|zIk(uVw|sn2#^qejWmeza| z#Jrrx{FC{4S;fqzb&pD1#rncx2B_}uqN{Qp0xo_dn?@q*Zom&5=4+Ne6V z-a20Ra@re}xwShRFR7(vhwdCqo?Ak>Z{^K@p6wx@AOX*6@3#gc)7&H z%uKu_VmI%I7u-JlK6F23WW@8Bhyyyo!V<@`2%kNZH{_bBc{|%#G*c(s3FgLw^QNLWiIo}t|Y!;jO_c!_X!p(cpW^185FEyPI=I&s`_$$qRorczJo3d8e*;chK(; z@9wTSe|O!mdS3pn!S?J%*Yk4GbD3v5z4`c@&o!x>m&Q}=qj}wzdATq)mwe^C^geng z4;eA<@uJsUw7GWVCeQJ5>DOih%=!FVO4k@$%zV}KJz@Kz^i)jf9)W`7;~|5vq65JP z280M|5(gqQOwfWGU?T=F7CcNGDv*SMR-l-*WeRPg%ipw~r|v9t3;n03R-D{Ur@!y? z?Grjk|JvH~RE_OgY_;cR?97z^@jWrbkfZY}9U>GKPEs_Og@}%TAdrs)1};8A8YmnT zrd{*~!{BI`_>X0gL>MZxKl|VxCLV2Poi#f`O(p-=rm0hE>JL@6TAc|sPV6jVo(Qqt zeU&Gr^Ua3eR%;D)JE~c+gfQ=@Byu1m@eL4yOk40cz@!#tI3g16#KI!Mh${#&AcAlw z4-6j%A{ZJYp;nPOcBf)WZ=Hjh3md(+wQi46jL_y;$Nn1S`j*KlmnrkTWt13WwyWv- zJnXcCh_!ZnRZ%gWgjO~nY$FyI3l|d!BYuK1auXX07}$#3m7qL;R3KogkiZ_Q8xA@` z)hpjF-^QmqIbx^p9lfDbo)Vr?&dr_JsYeQ#`cm3yvF~$g{&(TeE)i+UCqX~sF=u>q;IPeRi8souB){B5n5CUZPV3s8z%R5l%GB3p60ulvqXf*>Q2!*4I8)2 zyi3gLvSGise&lm45mAETkWnQT8x*UgVd6lQBz(zX7>ukC4~Yc>8VRJK8Ek++VETYU zm5z&)GD=sfnizejh3yPmT0g1soJwz)jnR)Lq>eVC;>J`mvU2ak&iQnH`L0`i7%q1I zZH&he@@k?q0y45d5-bo>Njgb%y&E1T%R~d? z<6vVkCL$!5fFTwh6b>qaSu_a{kBSq7zyXEga?cMU#Y`c!ja6IihZQ?|(qaGf{*LJM zs$z#B%H5Oh%)8p}RvqO`p6q&kaazq(OMVA6WWJPA>Q*{EjnhTDMFWT*NN^woWTC6z;G2C2r?v0E);;c0>&VLfly?i%*F97Jcz%q>wJ(p^r^B}rG!k6dX|Z)FS=|^ zxgkx&EUjHe+_uzW$?4Od)a^VgVUadL&ya|mc$|a>0+1033Wfyh7gCAE2}j1_P>m7; z2kU?VWrF%7_A>r#24Wr-Q2S zm-*2Xr{ifMW60=HVoqj?Qd7Jb>qCBPwUipRmG;$H=g(2EQmq!R)z?F|XRSVF-bdF% zrYIhB;Y*@0m#`*4v@Z^PBPAF)@!$w)P$>C_K)@^nh)YDI>U2;xz16lMHpbMDt4e#> z)Z$UdQl`XsP)gkpt5n|>Lxfsl-3ygM)ST0;l+PZ|NloqAtW^6?*pyqJ#kZVgWdRrr zh_Wg(DsUvhfWJfs27-w=+$T63@JM(B2MLs-LOMJwC>|1$BDIJRyQ`z_&Ze*0Pb&{P zSG*QlND~`*b4GuB+~`t6+xk8~p=07dcJ+)8ohN?H)wsH{fI|Zy8XkL5q6rdAJZwZH zpgsoD*v!-1csP$pX933dAT_DIbOk`Mm!v;zQ z_+$Y&WkNv-1I9(ha$p6CLj{Bf$uo;(+0}9fkpmtGB7X9~A0`VQ z6c(RykXZ#)7|C!rS(uQ}O#p;Id%x`{l~!dYMhP+1)KWU^I?*55>iJENye;meXZ>>H zEkCa~-KN81$(SpKZY?f7mYBLlu#27uA_puLOJxKxOT|*LR4f%s#Zs|UEEP+| zQn6Gll>;i4iUWRIO*QQjy%~R{8)-G2*V3NQ)G7%VWfWTlH9^PX$G7kx1;Dl9VlprWY zYXET*6ba;TFtU(4Q>@d^>D1bxQa$M#ms->mJ$JsNKaAR1%=ee7q!8oP7Vms1Lg|-_ zt$SKx(oN?l5+Y%u;lV;d3P?(dMKa*b_LjCLBbn|(cwzTy0onLxp+Lbc#R%>hY5tI#7SSJQHG!_sl zDhw>N5em$K5Qp?&(ZRt84oERrRIn5a3kZ>f4h014yh>Lmg|4-3-~6r^(JsZBZ$o*E zr88P=5x+|1+Rc7?Txp_8mJFRkqM(Esm;}ZFV_@*W;ZgE%y(r&NandFVAtK{|0~0Ap zIiL?33P{*-YeOv^Wx6X5dE%|pc0MgBxBRC1+M3i!jdDG;EA^?ld&=dyvLSW0T+dO; z-VRb*Fz~PtBPAZe&`}X`fzlEf%3)3%9997Z^A-$IT%-kZ!U~Mo>7?4(_}a>QPp7$& z{=Ig~Q`uat`o@qQKIuE{Z|!9l~u z#=>e56%J~GIYS~xN){qk!^4FGg<99e?5Z_4_F}6%n$puRVs0w`ANif+>>IC>nn#(n zGt>tY`Eh3Kx7V?d1O*8wL>MSA4iv@#29Sh|qz4xn7z#45bBeK7F)|xJJ(ZIgi7&SbD7amjgPNq+o#k~<#d(~qJ@f6+xkL?8xEP!Pdb*iejxRoK8Z zh+wou!^L}w2}lXax)0|Xuj2nrPqnTL*&5Ce@h zRgE#FtQ2aC8se1fs#)n?r(FGcCrDqKu0{ypbxJtH?& zqzzX#|0UM!RwFk)j8rQ+YqfHv)N_g@+{6%_`%!w*F;8l(laf&Kq3|$)Kq2!XA)yT) z8!gd*5iw~Y4ip)Lh1in`hlo81Ir&9&zGhrs%)6=ebLY@%w~3v_*rTSJro@Mwu317w zZ>SAjI(|lWo6zgAhBPfkEGee_LD2|uA|V(L1Ro3H;Sftua7dn^fJu=!NKnE+awG|} zVL-u9G2!T%I@fiMT9_DX{9c&;Z@amAm5`_2W4;(V#h&3wi;1M z^nsFb!4gy;2E>$vh7O91HQ*5w4ow~t&4NMXAi;tHL4$??#46C`E>{WNQ7ze%YS+nB zYPa7Wb2%T!gsY*Hj9J;Q-IH4TJMFMHn=4nZ%6Z!tVhd?f?EQ@;bU489hQpB{$ViBY z1q27o%0!@G4x|Z#K?y^Ihz2r2fbnpQHza^iXSGI{jomQOVrQ(HLdO+T!~f({iTP2j zIwiKh&Pv8RPU=}7YWQjU;5DZF^fvbG7UH9SJ$sK(Up(k75DF8d2^uLQ2Tl}7^5PLN znvgIa7Z;k4qB>42LP9pe!vaB`gP4^gqQjW7v#M0KqsyH!Qv~ zvDTK7Qm79E3nB&#Ml2L0K}ti%CtwgqCZ@z)RL|BtI|@m_s#7N0dN2;L!l3Y0ul)*pc3;SJjdzFadqV{bpGU;I-eyg;!5q0 zw)@3l!QBZC5-bpj1PCXX1_w4^2O}383kpg!4xDt1Y%r1pZGaphs$wESmC4I2 zv8$hL%vxD4J-*}5vQJa$^krKoUCP!%jITv79vc&i5Ds-Dk%J3^16{x{ z;LuDNIcO?zNQ7ubLlu!mGAz`#CyWIx?RIeunsYMK=V zwhgqj>6nKRLx?NscUEDVXVGTZNQ)JFXguBzEe76WCnElyK4PQYs_X^ zI>+2tYoUvZLGs*5RP@ZBS~fJ*BJp;Ets^!)9pbrcX!BdklIB`eeA{U2wgACXa?yTl z6|l(`AvX=Tc)J0QTMhi7!D4QG*_FSzL~6#B?d`@~ zA;#>7nrk>Ajq;Pro~B&HecPbxwusY`fXjY!ncSR<+gpvgj+k0v-#_AbdiC=BGnx9}?G6iGH zJcBS^fv6%n?GY4h*42--_9INpCSf^$Y8aNV$F45J=4q?^D@^2OVcEXjI4nfRe(JC( z!#4RdOqix&X&-JNmTJs)aCFlZ&$9du6RCMvHg7i)3jwkmR_m%<%?~jyn~1SxyP+5p zJmA5yNmexUOYE6uVr~~~EM^tQWeL098^Qh*Tac+(!YDKt(;3OoWND`V$JzKi|1Cy< zxtPKsHX7SAec#GtB~!;9Y$mcK3zv=xZB>V^Z2t}^Y$hI>qnpL*b)o7f;>Xr0rW$od z>lH-;*YR{KGe(TY0JcmwunE&l)%My5$~BY!fRFXs01X4|ul-t%62YZGxl-M0oGPA5 z1~~-vz1F*fwRsh8<`||fx!1rLl*Ww1{E@w8>S%4^2nlN*&~wE)KH zW*6zKCedGm*nUtNtdsHqTatf~WB)5=4?lmSFF}TdIiek;E{MS-4Wt|)NWRKLi z4Qh-jiejL8Gi(etS%WTQOSZr&_2#{kiY!{vcP?Op_JwJa8p!ds3Uj!;z<+YfrqGA? zj*z*6rWL<(cvs_(@}%d_&QfK>L%Br#ilgbZ0a!Mzqv6^9*o4mhQPBn`;D2SWPCkXS z2zMlc3AAO1c%!8~!USd@xmLu&o4U{V%6^R zb1KuMY_Ib!uqExy?wQX|_->6Z^a6=N5z9mFCzJx4>^-ZG`nzu5$w*UllgKn#glx{L z-V=ALvHRPS^ZV0liASMuqn|y1^QGxGw|95maWbOjGiqOW+>9@9YP4M@ZvEzRDJ-DK zr=9bTu{${JE0*l81zzya)3f4rg6>%SzL{K~3asKhgP$J3{?(16xLKeKM&GOCZvAQ+ zf!&YcwL3aly6c;x4ZsOAR2`u`WD=OczEcaof?HQE2!Q4zyg zDtRZcP1VWq$t6R>f*j@GM2nm9tJKtwC~6%Ba|8ajh|Hh>nYa(pLEZmJpBGmj1FjTB zr{*lISN&Fcnh^LrL}_Aw#}kOYlJ(CV5A3dzx%kBQD?K#A;N0Oa-O{PDS5Jhe5vB;!sUrFc&SasY z@~7Zmvz33ztXeptMftg#Y&ou`VAR~5G~)gF1=M@6_hSJRkJ9tz#R&4(NTW?27MP&# zErc&vrs^6w?*i6n@X1Nlt93j?x91JNaxJgfTemeuj*~4vZ-Q<4xlOD^B1`KRT-aj) zZx zCkfGuGf~f~EP#N$N(d^01B-fuC|1)&p+YnG=IC*E|7Fk+o2%#9IMLYjyI(7>iWc77 zVK(y5N3fH0#&py=3c>*z<9-ir6@^j?PV@NN8-vJ^o|jXv|0_1dtw+u1K7oy1QG$SFft$MxYmEKGyQmTlC8Lr-ip zm(rTAZMwC+p-M4#i6j6%NL_Bbr(fs;i--#O4+bOn+zIdk zPE(2G0uC=$tPj$b@V5r9%1c^15B;Nnc&9+j+nHfz4Hl zYzGGs*yLj?J-E@-$I4W3R=D)2b#yw=~`JcFx-|l8V$k}R@CSwE^aoR~Gw>hu)uLp!=a~^M{<190Bx*B!cozrJ_fo@>vE=2$%6T~c z6|hoUJM$No&nGSCUtPfncZ2=H;C)zRgWmetgzqyTCxKUI&aLxbTfgfxLrOS{sJ>?v zr-`h3s?+e2_Jb#WD9W92kG&7R6P;c>4orGzTL+m|`CZTv?@;~Q~ zIK%HU=SsWgZxds;&}mK62efH`wqm0z_51l<3ybfNGnqCI%HMusBL8%?@=(Nk-;@K-(s z)_ZP|b-T^tOESm{(a)})AF?V$(psOZW`%$5Zw>zeQHEQMczyTeyT1kxQvSu>6!J4> z7?;e80{o!mMb!JJ6JQ^Lv?t?2<$7dH8^Wlgrc@$}>w3hv>0b$htT|Qqr1SOvJ1ImU zT?QHypFtJL;_ibwOYf$AToCT=ys7phOpEx3sz&oY)Q@#1XT#wnus9ASx)^XVZQf6A_9?}qyPnC2 z-!G}!=KxLW$8w=U%+SMH7`k3rGgR+XOqu!Pl)NiPA*T`*);eXBF3r5D&e5KpuFJ}T z#yMgsoV4Qx-QJ3Z6418JzQBq;QgS+@@FCV}v`y%mk;4-w+UFpQzn#}z%$0ybZBsNw zWUJ2QxOc&hIiYQEOum+(3bM2HzqUhwid#098lW9F2$Ai3NDK1K9v0g=E(Sg=W?H!t zw&H?)>>ch~`?dMtVGpEs2lA|Dx4vr&j(A12Knax#7VP}NmNQpGth~UlLO9-P?^+GX zYsnQd%S>=<#mt>cs4Ya1O=~TPFOdVa8gruJjOVFtt#*K+r@Ds7I)VwR+Uk4Ck;bP4 z+cYhRQruBf{kmhTjn`Uyq4>1`Y!$0kX^{>l3n|UEZJwlXv&$0e#rLmuwxJJJYgW+# zQ!7)8Ty4ha7q{tl53XY4?`j=e>#%PcH{X#8ybB-uiabhI_A>&2|AtpaPUeD}^aCTz_wh&Ju28W<)-QnHD^Gb(bTY?_htQwPqZIN6+>vddIBmMT8)BsmCW z03U7$SB1wIMf{Z_ilz@PGeN)Pun|OK2(tJ=YUjpkEGWfujxoYb8~JSaA#XdjFxDq7 z7LHSuAug;9R#(N8@NibZ&;&2(2m-b}j{{JIW@A9Jfvq?3@t2Ml%i&xB+Jq!`2ss{J zg1DhA8&g|cO*8^<-gM8JvIGfGW`Wxt`X2ycu^DE(!$0tKmn7`-L&owUw}`Pnnq${G zkZaKWcs&8KK}X`EFaL_Dnz!H{Gn(Z$^@)*y|4%A-y9T=V;v%7!ArGU@MFNRD#x)(5 zcTwnCyguQagqSXFf6>Lf(AA|o@wB-TqQwjPPJknC&cy%V+j*DAKtfowewAg_xfY=@3gW82BNhai@wMt00?gV_H=2tluxv&>A*>eWmA?f z&*l*&a$4E$2o)-H5<&pKJ2)a&Xrs5=RJuji%1=1Y8dU8N?C_o-#F%em|5gsc&$KJK z3g^)B&l4zId4R^8C8IAcrXjE3o>|W9FZdHEh1BIRB_d9Tq(k;4X>5QOTa)3O?P58Y zXNNVL9nYQ}T`5IY^z_{VRK~F*FsXnzex8c>p8$C{E76evypdTNS!9c)J#B`aJ#1EZ zDUQOF=4JkA$<~)P>9s}57uTkUz~1LGDpL~5Y>Hsfy=UP~ zBODcBF8=T-9aSwbVS%gzq5xpEBG-XRd3x29?1fO%RqX6$?13Joy}`mnhT5hh86 z0*C5&=!Zfb%{UmXt+Xte1m-OKda`uCdSn{_u3ibiW*liV6bawW#DIoJIhx zaG2sK_C(SPS~cZ_7hg>oEBIByo^il!vhw06)s~_`+kPA$kw$n_Rr^?!^ft-WiIwiiP>U7r}EFmV+w< z2D(b-`{@P7FoSjbg{)>=M^ackz?MQfoEI)C;2wmj7#NCv4Ni|S+*!2EEG85i(Ps}< zNi8iMjRFAy2#>Ck(nS_bF%ghL3LT9$VSh;jQzs}^yXNx`!=0Ooqhb+f%{B;Vt4>W` zX^iNoi^os!!^Dh~Q_+G0L{(Tmy5v7FR*LKv>t=6vs8*P#WzM048Ek_lx{oxxZG?K0 zQ>+3`L*=@R+Iwem0tWm_>zX!NH%3zP<^E;`5Npa}HY4pSjn=9rut0dFY{9o~DlYF$ zmjZ|>!RFZ>IF&1i8>-^}-HOJ;V;o1vY<}-FK#@W{RlQIT3~r}j6MLy{*-3^7VL>(t(GV1BRkWnIqk=H71qa=HPoPp) zL1>Rg`kqcfZHfuOJ;4%*42IVeQYA`lj#!})<}O;=o$|gLowP$ z*3CCC=7RI1Z~sfYcmag$4d&~#pE%Aa*@_(Z1sgB`s$oTJNPpyW>x(#3wCiJg3)HW{ z9xeu{>=NWF)y6qnBtkdT{bFn-yd+0>YAG^9A+o1}FuaOS7(Q6M1NQqWb`0DNs!&7F z=S+0^G@_UPK=96HU{qK1U8x2!+ZNjjp5kxO5i7NV&Yw=%u`Fy!euaGuVI8GuTE#Emh?OrQEgJv8%MzwPv6+ zJ}8rRYwfe7OnIqkKE89qdiVbm`TTSdnUKY6#LrWC}9D zf4s~*jGBgK(LX749^|Jnv*?=7JWv8+_>s# zd8g$mA3=SzqJuhMt6HXD&6E@UoZyX)`VRUA^`Kvohf<2im~Z(B$wpob$*sJ6_znF5 zZ=~1@V0cWwgvny8f?iaTar~f%-V$f}f3$xl6pFSgck!?H_zC5Cg>98KgvvZ*h7B2# z3S$@??lt8*7|McO$oUCY{75>>29pS7{0B-5BrWW4kmXTOiTJ!%Gs@Jj51kUy>J0Rd zOrp)8mFFKBdw#Myfa06TRu7TR3g%wvedvU?^<|~Vl(sQ5Ade#w-N`1wzv1AQF5nzn zIL-8c$ox4hT3PyQ_?&rOm(yeg`p_cUS9DbM7wLYx^rRXgtN|gJR&mk$$!hol?WP~% zMJ!y#sl8?*nBMdTd@b@s1jyI6azHTf>MuqP*^&mW$GZNnH^yo>rtS7gwr1s!KQ}ZC zdx_`gau|xbA|p%MWowvyq*fU4;W*CZI=zd#?i}FRG5F$ofvOopERhBvJATm^mB$=v zjVR>9Mx1pSsUE0Cu09Qj2#$^*W==72$_ggEzFu@Y-EX&EYH4hu;~_^qttV<{tb zfaK=z$g~}_hy18ze4y*2T1ogdwe4$Z^tKo+dSBhk=#az2kqQJ5#`Pz-cOvLbZJZ5H z7so&p=yuNa=E2}Rhwfh|;>hqa7Io$&8pE#C0Ib8m*t+`I7V-ghBU{5-&S;E~@9SXR zvej8pZX6!{)N>R(htuL#bcQGVGT!40qrrFXg71U|PpRj*t znQO;YMC*tmVxd=enhB;DR-zBNq2Hlur>CqxADht|^SdOfpfe(~z#G3InoT>!qN^BLueH@He+$6?Pj3AYXV=(Nb%|^;iswB@S#w1d#B^;0D zV!IwOpO*pc#av9aCObq_ZIPu4oCQgE8)7wfk;}vZAC1+Tp2AMcSS29IPf+1N-W@if z#!A8FYFr(Mgag|?KbU822;6I{^ZuM-aF zgUF(Rt*31C&&Rer!UfzEw%fR&4G zi%yLr%)GZI0R$m9_)Um|{Mr6L15!F_Dv6i}KBm1XdLRJ7G9jF@JNp`Az1V^wjjUDFD*4*cbuL|8Wwm-Y>DNQ-DQ zm?s9m(Qz6R4bLm1J=*dIlY_6xP!lU~80@xq-s%yd%45()!>w5?)g`x%l5 zPEoTnUP(TUH7!DZ7E%DrCfb_Hu>oDl%rNICA*Hi2U)5i!D~@(W=dh|}-QT(|U5B=5 zfW~Zll9m>Z#V$Nup_b0bDkZc<>rP8amUfYFS=4-H>V>ofut!<&*fVC}4hBwDkNGG$ z5bIeyFf!Q1QK7w9XD6{!43>e47R#I?;tdb<8pdal3r0;Y>M`m@YR{nJw!=AtD0Uf7 zsLeAo0Mz5ONy5+MCq*RPjM6|zaG+rC+9O_W zR4}P){CXbTFrMU3m(yFgDK1vIMoHK5XZ$to%~LsrgcMjBd5<)7`G|e{g2qtMMY~=E zvF1&Cit*Ubm#|D4{3jznJnHokqQ@*G6R8Ch`%hW82<@0V%?tf6eK0s;rZ2wQqxxdG zWS`^}gtw@#O{F7*_RH~uoc`=bz>f+6#z2b(9ivh!;GkXg`oZ{8O~j9FGouT2nu!yf z>t;N%eUfi*yFscy*P?_f3~fXPbg;r6mn8_K4@+N`6xWFVd~%V6;TqG#@;v{~FPmdR z6Jdd%+7336)U%@Xwj0 z)}^Fk)n~(^f#yV@W4awry9IFC1-yg+JA&E!JkOCxSZ9s=GM^c_jL2<%0gHBY4G~F#E5|IoY zAW`g=R^F~*Hu-{M2VM7MSv34`v?a$I@fIvYaluj~YWfNG8w@sEMbIl8)HqThtZ0b# zku)z=2g>uFm#bNDH(tK3Q_jU zKiDNh8_hZ1V41fZyUur>22#-+f*!D!0L>ND1yL-c4Pvm$b>0UB<11+^ziUUG<%Oaj z&kNQG5iCLUC;|q-_%t&m72f25sRb_9l|1kPPT!k@?b_m{vfNr+OU4BJ%gF#{K@?y@ zW$jYXgJSb`ynIewzwdqRMTmDNb{J#fa#Purb`XKu9>|;(Lrn8)p=l#nE7cBJxX3K8 zOehNg5Kr64Ih|8?&$;>lON(cz$)b-#AoZtLnj%>$7Uo@no8_MGu$4$^y2*5<@u)(X zVNJ9iyDDI^i?%=tSka{P}+qd)CNa&8N+}XvabSEY;LpkT{Z*`<_ zXl@4U#{-hs2F7{)goSC%Oi<}3aF)CV;$O>toja0{U&zIdejz$h1pECg=^0^b(F3%z z)d{&=zfmV9l5^#nO~w(C?5Yrk3IUm(iyEE6`3P51aBy^l1ro;I`sMJ^ZLU@9O`BSH z#cj09&|VG5Nq|>v%QbmCj$fBg))*`y_eXX{wy$Flh9f?%d0T@Z|-D{4X6nNp9S{|;JRN%;(jxM2fQ zA6F+@ha=fs^OjvSy~2QA|{-xn0D zuVz8|FDido2_RHauBhqRM5MlN#_zlpj0=IpEl0soI)w~h@ovTf3(on#f<9mUIESf7 z)22qjf?O^oX^6os1lt|ut*HBSQ&G{jqUOfe7iKwWSKw(fkkDlFp+1f&KX4_Kk6Xo7 zn0&KR9uYMLkp3zBU>DILygeJhohFn70k{XsT^>!Ew_USmQXH@Vtm-s^rV-J0nAQdT z^<(<6T75!;lMz(9b5kldBG%Jw-%PflLL@-Dj$Kflkt8J-wY$GyM%yy(p~xwgVcw)? z9A;*w$l7=;y*RoS-&SSyatnB+WVy$C{Iyi%5H2|ne5h+8E@)~I?k&*rS@EOV*k2#X zVbDQ7$*mb@T|qy#Y_6~pBVse=hLbVKCU?M(OYYfR_VBPMc&DbZ{1I}jl?3K7`oD1* zjA}jvsfdp&Jjqm_LDBBODYvXXF@<&~x<*`PV>$+IJg)-}h)YD#OCw~Qe;~MN*`1&~ z-O`Gp?o%8%BiJVa(xM|DM#(~)U3d71Nc{5EQ z#3458=EeETL4{~f9 zlg1%COZ4|1uJqQ}X6PSMx1tfMmU`9&=1OQ5LJFQ1InhQ5afYWJbLL08t|;k_Mxe>x z+f_Ry=DVO~1g78vsb9k?Cbx5nK*>nfNWWe>u{#}G2FaE@s?o=|vxK8GsB^VPWRSlr z&U{fMIZaI_92yR0gp$1Fd`$CfvaCqj34B&u#wsCU;==G#ip*BDTQz2p;pjkIpFF8K zo)DJ2`jIWC>=Is30Z%*b(|;p=h>0BIm5M@24FwZPk(}1BLgYBjh^Db3raHKO6%JU0 zqGBWr=-Z`CbHIlYMW@yej z?W<5=tKOhRkfuP5BG^npqm;#pi_;pt1k=v6H;MF#rqhs!%qD-{AW4Y#6+5y>sX&(2 z5J<&3dj>5Q^o(D}aDL@~j*DcVmkYot!y&+i2}&pMp%S{JJ@e3vigbpo_JFr3)bNZC z*g1OO5T%1)4{4I5^pDfL^gV*JIMd8vQFf+!gNEo4ES)w75&GC(=k@4neQYolHpqGGWwz>~p9CGF3%uF!(4(lP0nwyc9Y%mHhj z?lO(5CyqC0Sh5d(5O5xQfe$+Mtbo|#K;P>gBtPI7J>Q$X3-}r_C-5NlvXnsLbeKU#%&v`x+S69t z6_^`nI$PlZL5?k^(W0#Y#eUx|zk@*X&f4nAH7Ml1aV0Ny~c-5&^~07F##q`SO2 z!{}~rv3G7CK0GME$0&^&0|T>k6 zSjndiNWew^gi-vIn98HU4X7VtrE07*|4)>VG4z7LGzKjAdVzl3q17qFGg~n5JLP1) znpr6$Ot3krva3WEv~UNMyUJEQH29aAol4W*(HGQBl^Jb9LE;#>kMxhVVz0!!nmD z%f6XiW#)to9-P@2I#UEH+XFL8E6k!yQ*GuO6g<4yh=Q>W%^a*Wd$^|R%(owSy0g&* zg%8d=tvI`KZS|RN9C-M%@dt4po_SJvc4ci9nzxtCfw}<;oC7o^6=+m&TaA_tWD>W_ zfen@C5Y190T9j$4(tJ~b=h8+m$mk%=rHZs~XRFhk;DJY2v5n)=mq8q^ zqCY!|p;E-D;<89MGc1a$#7C}j@nZ{I9LS#TfU_v++b7A4jkecYFUqw z#fZ`L)cBdA0~FM!E>VRns%V{FlHFu?ScQRNq+<~TK@bF057YwU0xSdCudU76?F<+c zGOa#X5HR&VyGuI+-R%9hM&%>LFeoHI6i)9qARIn09*E!mz1@%5_gGy~78a6UepRWO z&N8g)F?2>TsutpYW2zrUh4_j=Y?ST7=i#))rX95*S&`e*(J}pT|2A17iMKmFJv|jOrSosa z8{fTUdgDVp&-KORxvcx0mbEc0Yj#&`;#ZbbUw=&ei3v@&!p-!|G!M z@)n`tG9BMGSKuQwI3{#YXqWw$H0}OV*DiHh>PwC4Z`#R^catyj`z@Y$%@5Z)wXQ$6 zdABX6^T$hwiplJl%_&cNQ&Cw_Il1N{UZ-nJGqm=;zut70Ha(RS<9*^r#zSUJ&C1dj z(B;nyQ16fxlA@@P5Kkc~4E&}F2^R)#ONF$5Yu~*vApa{1gRkEzq=i8hk}769jfQF# zg&UmZpivPGx6x}GI;#(iC?ksHpy3Q72Vy)gMj##-6L92N4nT&4*rFcRmUDpQ6Cd{KeZGF;rFK32XS`_&FknFg8U{olEx-yWumA~M zpHcq!^EgkV=E+XOC|=Ay<}UWy z?MLw##MLxT!~Cx%_t;qx)mV_{k*r4Jfq()O;LVf%=~=EPpL6uw^K47A=T_yV+T`F6Soflyc28hd%Rd5FUr@8F8=a%d(Z+a3;QR2`=>$-J3rHkzV=JnhVFfS z5y-|+VYr?A4gc`}6F>12pYd|jVY0Y?D$C}E*5)cOS&+JXCyzr*RWNbfE>+4@Cj9>e` zKbw<(IeAf#kM$OQi^XrTD9Q6kELf04k{HJ9mA-E(efTl7b&qq;HkRsfrP6%V)idiK zcbH^m7k*=F+V(u&^qanc29UjtPxbauy;IxBxVD|Pw=m@HDobZ7Kd&C^DL+DkVY{B| zq2B2|pL!mr1Fa|-bDFLP6-A54kZpTAR?#tc8h}VmXi;4J;`G<=A zmrl=+^L~?KV)FLxO@^jt$$FN%>F}EsIq~xz^Zu?v`NZvZckOR(zIObrs@acr0S+`c zEEWt74hqqN!6*SZ2@FOx5P^7L7+_-2P|Tz8uy7!?h$0L~LN%a;M+Hej;w%MW5J6N! z;$cAu5FwE`tHCf53?o^fN1_r3hvae8ibN0tT1b+DG$Imc!GR>f!68wJN+Jj3@j#XY zmz^w|b9vKtRoD0x8ykCHm!-i_PtsTutFd_SXf)25;wF_XMsr-fG0So8QO=m=tH$>K z9>u&vPiGm`jv}UO!_S5dFSCQ)4>$SS>znFnck?#;Qdw{B92;+ zgrwqv1X_SBhmjC124q1EiHQV~L>>^d9H>YFEi7h1R0ByOM$&jBh6Q;z5(uJ}0~8uO zctDFqv{*zuB2k0427iVPqDb_JXb@RYLPQZkE8&6BU^tA53z7tAK`|yIo&@4p5RoN% zL^v1>2cuz@m^>aFC`UrEG!c&kVyFmYH4BmG(L`!N8iMC z)w;84?x=@pCL7UJle?@*>!xv#ae6g=lX=F9&Ni4Bnb8dwFL^KeqC|AK|F`eApRf3) zBoP2mK(D`OF&f76NJJ1LN&hs9NjsJN;Wo2AmV_s!f&xlLRt8AV$yZ1JmH-h3+pY{2b z0%gH%81^`7@2KNeN;SI~Rj<@z&g!Y2W~naqFpVnCRgA6_db8|5lXmYev-YuewzsmV zaj$arC&l*dzof|fi+7vGd8fu}{A$L?+-Ge2pZkwzt7*~p|NkG2dmmcp#KhL`tGvdW z-rsW@649FAVL;Zzg6he8-E6wd$=T^2_cD_g88|KZlJ8DaS)8*UlV6jOFC`|u#@tq9 zvs88{%OWQlBRZn8w4>}9ex83GZA|rJvyf?+#mB*G=G|FOgZe9#HO<(%=hB5u!;5>! zoqV0_;NpiDyCJ`Fp84TEAD{1W=PfIZGRl0d?tEp|z1KgFAETb9QHQJRy79ilH*@^f z=FxnX*EDtCDvK)E&n3%#JicQ)ZsXNkH_p9%<2;T;jkn7Gnp|I5d)2qy%@en3u6J%) z-HC3y)s(o}&93_Ts=cZX4$ex5L~1pX2m>m3Ac53^h&-Z(f)+MgKmG4}YIipFrFj?! znWnFue0cA1^jEW%aUQkb#5HR)b+*-%VVe5(uVqZBa!*aKtnRG++`S3)Cz_Mn9l8DP zSylg!pOZhKvoogD-sgK>zSs2lz4)$+?``(G#@4PJ91>^&BEn#Ba8RgNEEbE!vmh2+ zkVGn34n=`-=Kgka!sSzC<<|uw!k~b1g`)fq@MBZnT~p@P-DTEBOvm1)dy0yePE}b{ zS6NQ8pq4eF~spGr%W`zMOc zIna3UWA3Iq;}af3tvs#vrwx}g!qUj<1xjNcbbV!4EcJ_yIwetHt{XWricT$1?%Oaa zw1(iW{z~(w3uWjmtiEWg5SDjipB-ON7fF4QI=YB@3g-Qr*2*Fz{ zvW8XQA)qz5%&X3P9*q$I0vRr>@N|v4gBU*I?!24)@QqRgzwSdU7(pMfZ#0C%K(JgWumMOCaXI##w z5zvvbD?;^_8^3Dj%i~r1%14mbbjumUdos2)UEZG;+tGnW!|x-9m3RpV*RSajpy^pT z3x_qJ06LE2@i)z37(sWHUtf#W2x`22p9z0T7X8DpmD8WPAE0+A)t?gTyftke_{Q`a zejiB3+>z=IN%a@&P;F}|FYAFX={Of5p%!Z}s;g)PTD6F*TubPvP8E0{$GiGqxO}l? zXjO5gin=1$sqr^@nG$*?YDg=*kxQ5#%$&O3fRaMiSt;^IYFJS6jCwvvKQU@(GSm)N zD43+n&VO>H<7g?HT+{^L0yGmriT8XxN8mG#x>h_;d=emcZ~6w1HNb6nBVnN9<+7WN zkujR6><&V(K5ZSVfWAzg<4c4jVU-uj1VTpdR$#2<^6>L)@FF`1mkKR_ToE$fcZko| z5haiGniVyfnZ%_wY0(TW_XPOk+Weq54W?L-+RysOLV|6DcX?}AavM4-oF^6r57@@ zN!0F*nlmN26Fo_yQT^~bONOIRiF$TaKqFU1v zKKb(0tKzM!QW8Dk)G2WY34fgr8@#@>NrdcU3Wq%lAD27^YxD}Li*|MoBh8~dO(bVxo2q~pGc2e#4R&pJ8#^nR(F^4l z;j3##-SpzZga6b;?Jj=PP_K42Lq+;s^mb4)=o@?07`Evm0rxXQsiHoL>gln0Pv}Ni zgPIZW__`%5oWB%tVXVL6q+&zOh< z&wQ=-L3=D5iOu0kp7jt_6~6!_u72aHk=IAbAVSg_$Q!-@6Z$vUOFeXfOIH`Y2z!;6 zuA|jdyFhZuR=b)9V5T`Ls=N4Hu7HGXWXdg4Qs*PGD%}HBxvqV&kE-V~ZgA-`|AdwpQ$k5k}P<5trAzGF7cV$tBf$AJp zgr)+SuJ?xXiZc~aV3D5OTxXNw2$W}ojVu+f;Ag4V$4;Li4@=F@+SqNx?@BUMXu4nX zRXprQ`m|*igO2`NS3s0XmJV?Yl!vXh8;WmfJCaj(6CC03CNOuZ4=clvM}l8{Kbzpe@9`Yf5KD-G(kq=QrJJ3v??5o;oy6v zk{qI^pjs~yM!`x?Chjq#QCCwlUTqjoCaCEAvqBj^(&NUx3`uVaLI~DnO`d68CpW1{ z(K?>Qf~FB4)+PP6O)&zuNQeZUY+bP!5Y<4^#bxU|yX#Jc(ktMv@S{mxIrJg-AUE6t zEE*uhL4FuVs{u?v25(peH9ejxdVwCG7))yp9NNJHvRBB3%>MFhXBps5^ud~!Q=R1c z>h_nyfkT0l5>7Hz)J5?E74iTG*dKShCxR{eehF;iG&3A;-<31oE>{Dv!q#q4YA0LE zd&4&-wwtl46D#_EqDMEb$9Z=g^EAHa&G&*4Y9d{u6n7d{rg>X@L^>WY1r*`+dDIZ1 z9pI+J*gfZlhDGJ;@ZZQ}ZkH|xlLyUC@ii1jo0q$uS=PcS_`gj%r6x1Bz(B?sx#}#go2jmltUSPuiqCSx-2J*ZkGVOjn!!U1qUMG&+z<#wvbpuh4@q`Z<^~-} zWKOFF$4F41s}h%0gR#wAZrNbKVB zhw1?)Oo;GaZIsZ}@twqMD$KMq{jB?JZi=h7$Vy41ll6C8fivdt^coeLQ~DfqlYa)` zkOI{hR|162mbe_an}ChfS|0|~v^Wr`k8TF}speJ<0}a3Q#|z%U6(BIgWK+R3rxaSN zsN`&tiUmrv;Lv8u#tcb<&)^|MLjLiuB_j;z2QkgIo>`B=g5@_3e5F}&ThVPk8co-G z9I=%MiD}_#g}`1%5au9V#PqJegzvhp7`=YS)yIgK7SD8EhiHlKJ&+E(|2GM>Q#?cj z-rZ3_FRCWrA2}K)Es>{l2pUP1$Uwt@y$rC}&KBeFHN}^tRKNfYNg^`vD!8T5!P*re zdJnN?a6o(skQ_>E#E>{LJIX0o=+(j~5RHy5K*#;dBx# z^qkp#eHDfH)T^h^PgqB=kG#WNyDf-$eHufaLtcR?x)Krzl)VASjzc26gDd1+q(4?# zTpWQds)q=xLqG+_7nBOindgIZyRMUGdkrp2@N$jUL6CQ@mRcL(-kHiBpr$+4Y}O?p zqW-|G1aZ)!{^5kDP$=BPn?jWWC9R;a)ER_rL<#wTh4DMi1d}3)@&WYNzB!r1zp<7M z;=GjEdZwF^O7(4D+5844ZDO9s;I>wy4;~hDdykoK%T_S4*I*zU)wTh6ynFdt?&xbo@I8t4USaJzR2o5{Jt@CJ&`-yF50 z)EREa1Uy_I)1TZL?SdOX=%P}N8k(~RzJ>!OTVmuY%nFOgK3?E7vi)5+?(?JG9wm@8 zY2ij#@uDDv<V)U60K^u^QKQQ1qpqXk~*EAE{ ztIlfL)?tp_UJ|plgdFxP1=P;eTMP+JOgUNRK@g^W6FOSvuB@3`c>Ob`wXrgnv=gCzRhaN!nOA2 z2CdzwN}z_S6GpD}lL@pM)#_88cr6AiqFD{w@})zDX4!SI3hH#G+vwaLrvQTG%a&we zoTtKpy%dG$a#G*jGCCGSa%OT&2)bWU!Zzp%p~iihKl($~3Ra~gyzP}c+EkKiXG2kfSOvL&(eKJ+Oil{Ce_t2 zSh`OLPCFD1pdYraphW5q6GQ^N0bPJ~nG9a^QDuX|$ve0Koq_AAvsX9RvdcCg68x1> zj~T_&r;KMVcm+LoHK_reK%j1D%q)JQWmf8u?jm9H&%YqW)F5GtqQv)K`2b_HGD_wx zvjM^lgI40Kx=UQ(60QEUQJ{}o3Mc@VlW1{C!5OzJO5DnS{^3Zuj|1Cbp-+h4p$)*g zy&`Var)qWGX$rVvuO4~^|6W>cVaVTxRg5DdE+W|@rZbns6lAx1T|rl>le~dYEbf^& zA{dp!4bO*5*opPIvqxC&rmrJJV?;uj4cK?_%K={5+@Y5FPO{QS^Eae1KN~-ye%)dwXAKPzBL5Z17*oNQ|8lvHu4vYVwv;}1pnEq zygU0cM9okXW#B}*1V+;$`rni8UUf!~D9dzx-HFzihIQ-SIAx|R&DL~`fvg8+8-Fom zA{zig*U*B5udbGOfby;5aWuUKO-<8I$|ktdAtYBa1pkyX#Y|4A89;57Wfpu3$dk7W zPbge!BXzcW*y;ebGtP+xdr3<77f@XVQtV|fF#rcX6WI$7$oyC4FqPyVynxVf62$^2 zkCa7sd{PVJS}Tnw@j&GkU#v9ARgFQ3c&sJkSTOH8i8!Oj)FZ`!N&;C#*d_OEo>1;jP4Q1g@mEI2DhG-};uAt`MblM2WF{|)f-WJW$1MJNjFGAay0jcJhP zTZs!sKIJ>iUg%vpKb*F}c{O56KzEJWRmXa&bE8O)e(`x}R=-urF$R8E4d0G}MchHl z@Go)4n-n)M5rQsMfsbjr-Nh$#o@dJs7A!ICwk|Pum#0AtKGrj#MHZpPQGd6U9;8vv z6aWzE=+2_{F%!@=ze%?WGFd|;c$ByW%jd$QaLZII&uR%E*7fV3*SS+`lNhhi%tS&D ziXrXNxekxf!=E|}`Z5jGScPA>XlGvFRCB^dk;1%Cgt-PctdWjVkkUj)qIpoR|}Bb9uJod;}RnWfQ>n$2Ww zl$>IO&T=Tr28swgY_eRc`&MeTQf5|ArKx+PrZeN&sRrl|9HDSa%ME%}EL$FjNddra)T$`8`)F!Ho?o%p**J)VrWd%ZMy9dwCtBs% zd)aeA4E{@<@0@96H(ogeU);QoIGW9Wv z1!gpk+Oc|eLPjsGPL#$q`TA#o}u?+pWjMpoJ3#b*4L(-J;iJILyodObaR+nMiEbG1iPaD*4 zf;NW(@@&KoDCi@cG<|}rAbfUppt2()ioVY@Tz%3_nN%$})z%6W+eE}$Md;+Xe9&!= z%UsL*JB$nmkK2nw<)WRKsv6@kVW#$wyVVRf+ENECRm6g+nGF6sHOB3CdE+2e%iK^}sv+tpCN&F_l!!9z-E#WS09gmEW2bgL7+(eZr`oR~Uccmfs=Nd7O$h$00 zlX$)@(c5;oD}Y@8Bt>;=#glbT zYv_~<^_$!ZFO2?18sTz;wo%JMfX~rxg|*%R>ZNfcG9o(iVYuWOT3F~}m1|Pze~H%w zIG;E4^JUkN()PulDE3Q1!wAAL+q;oA)|m_N`PN!(ba2>-AwW;Ex-ukv4vN;No zpuz}IMlH~1*4%>OT^`=JprWiJ$CZNq#HywD8REsc|2JCyQO-5oE;tYGP#=!WVl}Ce zUm`o1_;^LxQ8Quboz0 zw2V=YjqVqhn*h;OwO-78H+MAI6ZxJ>-vSy2d>$lyOiT0Y(^-aIG2!cT(m4tb-3(kN;XMoeWcU45Ogh={^G2=ueB7HOTRoX`u4|(r0>sT;q{9; zpgksz=q49h{N%D@5TxWs6&g*udoEL$D48$MQ%KcecHwTQ$z6IqG9)|3ec`fni@g>Q4wdlB|LVt?tTu%X5*sEID}(DK+E7yCljg0 z`1EChS@u_^Fo?~97NsK4w%H<}3R@oH=@T)APQ&$1Qym^NBRF=@3dzjq;YO0ZLc3)JBjl0LumCCPDE(wrKM-Vc9c&hXuO&8mkceyv49>YXkK}hr zI=kc<*x!pOsc{|#n$Ztc}l5CIS7p8vgt`st5lIn&6X6{ zWp_3<_*&~S*<^A#BBFRaU^k*Nfk_PoPoKrLQ&d$7o>+mp!S=H070p%lT^40oPumK) z$!=AI2%az1?NZEhJC^k#>~B2$D};{S>8yA-4V`2BxC51xk= zJ}i<*e?(x-mI;^m=v!?>n2O`oh?Y)9-3#MkcNR>OJm|{p5zor>@%yZzwr_Gr{G&mPwPwvjLl_)Jdi&cCpMxUO* zsolSpT4O%GfQ9=WXbysW$#=Rxkt7C$e1%@xf6Zl+C}W0E#tqtOMn&)eo5X2@3q^@S zQ=b}AekYG1E^|TvcBD*0(ahwB@}1krfE5ybuei}dAQ4Uh*dO|HRdRMW?Aiso@EKzF z5=5_1V=wQ0ZC%*cbXmH_F$t+NMuW|j_PLs&7+#htSas$OyeDi*tubS>oVn;mi9&M& zvk;rYn|q1vrXW=WwBS|dqkSFzT1?RO(Pl0+quUflLDY<6m%+gzGeOe`tnVn%OW-9& zlvQ+1GyGTvVnN-^m01r*3X0g&<(81Us~NQ^AqQ^uy&}cspB$Z=O?`MfU(x3W3uo zo;yz5LbpJaC^R*+x7sryPsNeWIUb#JIz=TCs7aUc@{QF8@J48`ky{_4-FMlT&4J^ z^x&!vRY)-(c0Y1SZ;~`<2+%Ap_npNiUJK0*trr`u1nGxS(ubxb6~kH-dQPg}8^og3 zLj!PoWr#Q$rkVO0=IbeQan@v+lw3uw8O#-&@UM?Gm(o6kvIo6Go*dO-{=5Hp*Bqj8YKxXFOR1fEJl8T7yaFs$<3 z9&NQl3>jed$#Fn7E6%{ItgYo9{q+!%$ZR)xFa!%}X~9DzJ#NQ>q6Ss}QC#sK6aFo_Hb99JmnZ zijkIhxezv&rTAJ1IuFBxina@Z+CdIJxoodu$4xXT6J#CnaPmpXgAg1uj~(7h;lHPp znoKNx5dvMv(qd))BQi%3nu;1zKn%P&@2je)EsfKZ}J_0NM3N zRL0c2=!s`lv&IUeiC}xRkDla>2>8sk7G+_%x7dbV{Ln6uD3T*VbfjsDUC0Z}G@HYn;JxXtwr(0q+v`|WLIU{8RH$8~-iu*mr`4e>8r)VUM2FBWRmoI!cC%A;9%@o(S; zV9j1Hc#YWBUHBpE9?p(54aOHlObG zhj0WFd1GwgybXE|K8o5S+wpQoHdGDKP@ zN@rTJ6`NyPt?)-;3*3gk*Dg>VP~!^fy1WBfKp{7xw;=HG__`8`rOE|ce6vio&4`RZ z^fti%#KPo-Hl)CzPo|KrOFP3Y0d@PS~EXR!?A8_ZuvJcgl zwFaCJGC7O@ttvY<2<|^!4A9hV4Uih&SgpGEr_?-6rG!V@U}{^p`hX#o-%=|#m&yk1 zMpGhO49WiILCwNd|CkD?$&@#@8%`lnE=K>v$*-w@l}$O01g?#z6o(Rexj6Q7>Yt`l z#;a{WWm}7TET}Y;9PI>9e@$h9)!09&W@t(!BJ2iLBYsrL)o!mN7yX-xv(2e!Gu)_( zD-38*PoJKm&iqj|Lz5~IVK=NA@uQLepznsWHovOGXjYZe&yA}RVx_JUCE=W(RW_Pd zk@$84t0QpSqp(uU#kTajYK-PpI>&5ewGg*P##w`=+teRcA~&(h_N|6iA#5yIADe5Q z>X#Lln_023)!2%O5>PU;dwQu(|Fjyjsg=$#H@I5ZQB}p2BY|T6w%Vb&RS2{jU5QvS z9#&Tq-p2g6lA*~}2yZvM3c;~W8r|;puycN0$!K;J;@gd{LO9uv`f7GqOFyr~XnK|N z&jwft$=I56vpZfkcJ}wxj?J$^M5_^&2qxntrFla-|7uwz)%5w*$lb5B4XYv62u@T^ z+{P~~ZZ^Z>`fV-a2**~!?ZHh(Zw+*sx`>c!8hW`A<+P67Y@YpiSk_|@w z2>W)hikv^PK53F=rP_vB-g*U!(b|4B=U3Jw&9bDx7;SOZ=fpxxWdN#5lnNs-ZG!>4 zG}Bv(nRp^1=na7kKYpl~OW7@@go4x8-uN~{zS6}w&+mn6zkNqy0^m-y7>H0zvGu=J*_3G>G>(kd?r@rm8 zp2m6-9Z8Bz9pa;o(i~N;!d(9`($`=h5Cml{ka@>=p5in}T?*lv|{$P>h?#Y|XdTd^9 zmYud_H6Zzk=<($=Yl)VK#X@YM;>*$1YeYn9I7~#6PvR6w>}>3@c5E4ur0p16tqW$c z<#^Mv$I=xKOGkGR4iFR%#u{!ko(v2OZOLu^7#J9SzdMI=>Xr~Z|NQQA-|qo~jqnq! zH1B|mgAE5a?Mf8A1IOPL^$sKcAvm#jlF@y7b55PU@bu9e!MTWjm_m$xhc*He#dqj! ziPptri$bZ^?P-#=F6r|fW}cr@eJ9+bVHIZ_cmPi{Ty(9hK}QVWYa>-Md0>OVc`eY% z;CBL9$!yH;l$&j-&so2;3$_-x`<4&J^c==T?#nE z?RU;p6K9C}#j$gK5H5Ozflz03s>LJWCVn)Uo+#}vQ(zh3zy;0zo#N&?Oql1|XGUrG zx6yM1@RS5@_)fqROk=|zb)+w;ZH`3XAzWQ1qmw0qW`UjzCDX*3eRVkxqy{?M-88lBPG)AQ%!Uo-hnBL%dbBevcCEC!9 z_zy0-Fj3hjpdrAsLLN2xEAKSx^CY&KtdL?Uvp1HYKwPJaEKj)r7p6Lzw!nNC@QAf? zI&i{0lQ9cx>Jb7DWsm7(tAi7Q-=_^cP%?^|8*ATSLhztlR^ERa5j+*uLSw?g?H;fg z3NfHt7$_>;+d(&UPLj%m2_CHw$ohjqZpH(-GxtCA zcZ&DZSkW_#{4iH1foer;nd&=m#Y zGM|KxP+}&5Hx1QK%)nC!DkL(g4?HJ%|H#Sfzc>?tPL(Hkx_VXc)LF!Xuoyfg6oQB? zdxo(*Iv7D8mavGT96Wu2?j|Q5JmI#yiJ??BMOp zAn>G|;u?frmuvgdX-&#!s9L;HK%5YI;%j41a#BVcC(JZ`KCxqYclyo3T*H(1>ok9xg!n$IA2PLe3M-5zqJCURq0KkK$03PKIuh|N` z2=H)?o!P>Q|Bipgf3wn&|4x3{&8q)q0G^s)9#dEXc!VMWo|T0G?orAHuv`s3xeegi zNdr6_nOEW8A#xQIYQj|hI|^@>;IXL!z;l(O1@Itc1bDRkAK>AlmJNW>YCl^U;rOuQ z4e-!hpgYb)9Z2rSno1on~)dcSqS#2GY?JrjG!E65`U7HqsX~Kg+izKnahfS&V1NSfBE@|X5eIi5mq+m)KV0=)G(TxPIDmt zjcxP{?L1ifBe~^(zsRV}zZZj8p{4_E6PDw7n%4=ig_v2JdZSuV^9RW7bCpX)7XuVd z^uin=wwaGagtyq+-gRQPLbG9jgES)pubhi?TR5}IQty>%d0b6~WFG_r#ssjH723SS zqUzwV@Gu^Ty|4z07NQ1nbHj=tO#24sC)@6v zDh(+{mafmB08|7?Jwc8s6%{sQQ=az}NSnB((ImiXx&jO3=8GFG(|#IBTKPeQsXMZ4 zjNNggr zIv5U8F9jkzu|}vN{E>D(0VIWAPp2SFRcxI6)OG?4MQkjJft1^m<3c*Y)g<;zSx6*f z6f8<1UdrQ|{t&9G6fujYcTTSNK}%ow{2hq4Z>N$qU8VW7LRx{U2N~HgRgOCTJ!D*z zwQvq``jOZ^v44o7MwhN!L6B^;j%F1V(YqIuA+oBI8|yk@;u|%Rm#mX;_4I#UD}DCF z5v7_3mXo9Oed!OK2!S+}jBORm%9H(0f}3X`3q${qOAvY^X;Fk+m%bE;$MnTq5m=ko z&gw#Y@xV#u)-jm+C^UxH%%GfFHsJw#i;QBBQaEu;ya^67)2ZQ}(LE_`um~Vg`ayh> zI9wINIAfrzGGSAC6x3aDr*)e6l@^lHNsTgbe2kFFZ3Zq8QiFAAl)g#0`N1sQT(l+g zyd5Hyuq=z9Y&~z03~5{)@)}fddRgLKKuRZSkYZ$<5vK`)!dhJ)q*Z2VG(FKls=PU_ zoEnwcl5{YR=b%)`&fjoK8vhcX$Fh%}BZ1;%Kspr%vMWPrY*XQIt|0kiNT)s&tH zqKj1wY6zw69w9y4_UIAK0ZTpF*(T!~r7Ra&ZUJ7Z-NJaan22CG!~64(Y~w@KFc%Xh ze9xJoh$L&w1p#`^SR5(97)dZh(}J2Ug+|5Ro4lRY#D3SEXME06l62{k45Q6`2G>;- zMNuFEk^$cVlL5&+kX!PVBP6+LTe?q=BsQh<^i;Z<+Dwszrkz^O zXL`h$z2p*om`H*xl4N`Htf!FkW-dGom& zJR~t9NntCiR4(TjdhSG$95b7pYpi_sv3tr$V$YG}=9-s{AxU~}%R1TIHm`lmoP;EA zZRh9xxfznw`#GKH^ZMsVGM{J7tL9j9#L26W#BA<(;B20A(>u|8dWorM-rEbBb&gi0 zXf*GfZ121@cVeB7i{?qKv$OeWju>fpfIdjA|^OFYiYvx#P8A)oM%g_8Wk>oO~bIh4}f@wMSM54(`bT-K- z5hF<=VxNcj&}E!;%zU559L!f=Ou{rw#7T9Yyz~G5JcD>m|4MFk#EF&NY^?S|MMz?{ zAIa)TS#on`WtJ;%^PQ}jWX&o`Ii^HPteHfz5hICF;xZTFcb?p`kmOuOt$&;W2cQTO zB`^?}EF~io5*`x12D8cy)>fQ{Bz5Hq=HXles7+4NKzBZJwaVCKnO7-3P#L@)IdUOM zlCQqbk>|)Yg$zTBN}6@DENe+NXY&;-B)MsAY-MWjWCcoN>$z4@LB3+OB1vf|Mb@8O zW3654mkQPD@(Dtpu;D@_8ejl~hX^EKU;`B{ zbV%F4f)9@m79dFIhY2)j06j?12(iHMqT3O1Mz2nt1_Q6&A_l`=w_H4uk|G+%M`Q6| z+#Y9-B61Wp7{@5x9K%sK$7qAG$V@Yz7syD4tFidP8VfJ-cibK?$NYGUUoMayi;4pn zAYekmaXX#Z#^pFq2Bhmw@avu)jNG=+jdQe zJ=Ob0lB%AnZ`r6vk|RcZ?ho;7heb#Ch<_i^5hFNysw+iRj8`YC!bHMEW8vM( zqgT1L2Jg(Bs%qgC-`2RLmv)m>g_xIAl`Xs^yDW?Rq&n58CV#5LA2ywbNG&SFW0k71 zRECI#_7)FS`BZFL<+8lA`|@tQ@#p_l(5{}oK7GHdqyIx^^xP7Ccu&%K679-*=(%yG z5t=$>Hgxf~Gc(rASx82Nc_Q=?j**Skv99;SBj=IxYJ14_QYR488y{7fBIEeumYgNq zxOHM}q#BbI&wW+(Z_`@?4ig?Cprl^E@**Qio$vCMp>jRtlLoA_(QZpxk=d&tj*%qo zIb)S;m&!IOlU)hfw~>7N$hV1%wvo{`8SNsceX{K&S9406}oDf0A zf`+Y9gVdwja=AjvB)3X7QYKlcJfiViK1UwArqCG8{jTn;9LsylV)o+e9SRya8WR%r z`H?>cVAJMO&n0S0fS{GdSt`L;(%NU1e2XM@)2qH}lLn-13a>MDYP^b~yDK+$GG$Al z4G0A(YJyZC1;NmP#07USfe8~58F;`z1s+7yga<0z0uMxZ0K)|b2ogdh2IruI4j4FG z@B#w{j);Zb8jdJEuF*v!A|_&S>x_vOb|tb|3Mo30001>V(y+hs>u$D1&9qE zI!wVtJC*#6osFIE?5xA@7g#ueLc)QeV`Wq=R%3ouRahB(w!-Dy#^QA}JK_(`lCD+-^9HTO$|u${ zYwr5%YOHSMvg_zNl2p}IS?eAq5?fq)+5fBG|6jNNyVWkApVycn3I1bM)ti3$6(TF5 za#Obzw6mL*wM8u~W7eCpUeZmaO=a5DoOx=_j-s-hLE^)rfC-8R0Ub{8FkwL=!v+f< zCMZJuRBkSs%3fOxp<0r#TGguJyh4)HP;vcHYp~|BhKg!$S5K~%Qa>8dZCSCLS9X3^ z^_y_{i4)NkMJqI%S=ZYca_^ck!p=E&}yx~>+q_FALvJi3uYWHa3 za3G&{uyD|_AE~sQG7yjUp>Y+}i5sIy&$f<>t0W7iJmtHv9_wX&&2#b+L-tdY>aUV| z{5-4%jiiQJ()_a?#nI_#q{X_*NU8NvDsg}5App(~Jlu1q(1wN*Oj<%tkW6}B988H~ zmS|j2ZqsKOC31LE^+Z0EeK2S!Imlliq@AX^sW?%reAVorv2?mU*5U5ydGdZ^>5TC@ zmklI#jX@Sux-uW7I5WTp^vIIyJCRqWNcEs>V%no7CKv&3g2!lY>WCMkCct&n`@33d zZ2QNNbb?b}Mkd%{1XgTFHfC_Q@v5^jrF4zGt>Z2r!;|u=tHIrJZQbol+|3F^{9WAN zZFagVq4{T~m{Nn`oA3(!wh+Lf>^9ir6Qe|x1xrsoek6o2c<}vXC;-uylOjeP=kaEK z0%D_)=Xh3P{<9WU5Zvvf{`meJqke278T$r$oe7Okc0iYP-qWhf(Lz0BaWQ76erUyC z3;C=$x*fCuv`DqC>sgL9hnv?;JhB`Ab!N~9Qa9?}KhMMQ_LBE`&k>*28{E3i^L%0N z`)#L8DatVZeZcRB05h;l$<&BtxBC;d=}Z*ppctyeZha8A2kg3_AKQQ(tdCb60N}(k z?9xUx(*ebGMK-_(zfujzaxt})jNeQL{DY;Zv*<*?FlDa*Kr!tE`)B&)oYZ|mfi{f) zz91W(HopFT&XKSWO&)WhZ44)P{e2CUrQA?shh(_GTQf4(Kr2>vg`|c`o)wBDOvrSK z!y)4+Z$e?(XNw-RR%Rph{hP!3?E3~!?Y^Uc&W8ljtImZTKfv737KXD7E=fto;1$|B ztPJ*lP*_cZK~@kBgkpf;#y(|`CH4C1@Vv9=jZ~*f$s&0r4njAe^_oz=b-Q?i&QVvp zkO%~dKflLb@b*L^uH+;J!f&DlqyrGpQGHlm_W5T4DmP$Wt3DNi<_HTjEZF*xYAGzR z>+xrbXvJVFnBUwPcUh4#0~4u3^^t1YnQb?xz@F%MebU~Bm5*Oi9|e}7-3d~G%hfYm z=jpuJ95c%hSb!`dMnzL^h+R`>b%K4w4K>k6^-pnX{ty*>Pk%~^kq=6of*5B*uiL^A z3WRhKOy_8Y3_G55ZgoUzKQSwXZ-pfyjZiBU&jAJFDp2}#yN@blPg4pYNzV7%>ZIu*IoEP_sfX02xBh9Mmww>8uW0$mf2Y4Ci z21Cy$iE$uQ0$^GL$Y2q$AN-PJ3QcsmpnN1&AH89ORmV}I4mKR!hBY~P+{O6u@p_`K zGx|oV)??wD7-%3WmBgz31Qh##UiG|C_tM#oYlrCGZ$h!g)TUVnN@g;n9$>JC!Ul|u z26Z|im==W*Z_q)JP>v4;kMpeU$T**`O9bFKxmYfUKAuJpeMDF;0RzlQ0mm-dMkF2( z8Fi$697%2b4O4~IVRL>91r^AUGV!_R!kFBBK9)`-@Bx~^&r-e>WMMh! zzqnI}04rK2(x@eIZ&wBCo_U(MDw4lOd#m|yPv`WaX{n-!t?>3Hqhh!%b5*}UxEBZ5 zETL<&-X69cv#4fO|E%NRKW!O4Y72Xr@4qbI@nWRN^HBn=?gRq>J3z$0WuyY0f*Ljl zgGvS#Ii^R0*Mnj2F@ZBtEIsJHPb-G5>b076Pe4Mp9|~4od}`r#;oTyO9(`__!#qWF zm1u)PEo1iq+=kPOu^KDR_>8O-C}Ocuk8mZH)+NY4AOmnbSgLIoO4=pyZKiD5qFfZ5 zvQSv_v?-buz|M8-hFH+cO7{%{XjrgV z&u7X6TNYQ#hcyn+#La9SiU`G5bxl>lG2G1@TTz8z=|gDHh!vF|;EX%TC@D2+OT^+ zl*AsN*m(FzCC=@Yg>k@UO4uN_*TtlkwTEoau?EMorO2+o&lOI;H)b0I{FTyWlh8*a zSLy7A`{L^*K^rd1aG3J+8kBdJ_H^TKhLMj^vg^XJG zXsDnoB%ld{ynAxV<~HWOJq@2x`InH6pk7x6WoyNT{oX(N3UZ=k`hZbB;7sM_sbXM? z&EKzHmTlEN^n&nHDwF1f>wu zieQ+C<&Z^u|c(&wvklVQ-|N>Td$>p-(Wzu-o>!l6a;Qh$y1V3apSjMvkUzG~>CN3Odv|U$Mj2>d<%eC7 z2eWLTeIF(+_JEiTPHhVKRHD>@xkIi2nZNF%a^n3{C$N@ZFY}Dph*%wm8I%BmjgXRCyY|2mXckd;324L7w0Cjem5XhyMkC2z+T{M}MuK zP~;N^Mn)*t5m8t%l`@2l zixc`Nzu?u)=+XS&3iYgUWwEbs>T6CaZV6z^SZNnvlXk>Xj zNT*my=k%ho;xI(g>`)LPFkchrn}bo_98`BNu-wS@f2=vRkOi0lYppg`0Z>vUIVt_# z5{3#z1&Mv{>{RDj@pq-4xG9qlCSd5Q*hsi0JD3_f*DwN9KTON;bdvfL?H2u1n-Y~s ze<$gfLu#yG78@z?UGPjv@W9=h*lZn;ZdEU?z19mRINwjRkt3r6CrVos?jGV8fm#_f z2>d6^F`sa@_gw?_5k^^ysKH=^*p?1fb@0p}4oJ$fH;77fC{| zR?2XS*K%$=iC;4ZA;!KCBmf6OjVCtcWboQ&MFCiWxhwg_D>{1?h8E(y=+b-GXc_Wa(P?`p4ppy)sMbF$;AM^XS76+vv(2U~&`4G4g?LigUKkx{2nTpP z!#bauVHCjf9kn`_apdt@s&8LJlg3^+3qzdC8x`nnht_Z65Vic*GDP1YDa0h0qJ{j| z@>gi}?GDzd4vP%rg|x8#&QkH3@(9GQZRnUC$kwi2WG-wSi9x=Wck`?oUVJToS64lU zU_ys&huQ`pCSwe>4Aw80%8v^Ny*8zXCl+pVk%56QHc=ED17YqCa_oH>hj?YxGrj{# zj?6}S2i9T(Q?*gbuKc#yka#sh1U$N1o#}}C6;A|l5eV$&iN~|Bmj45(w_+U&7*R+f z3lyvh8&2zzZ(@CPY8p7!cS$hVYs3 z>k1)Pwd*pmexD8+8;%Jut`h=8hP6%tm6HZ)zURkKM(;=jbVhYhS+@b;l8IRS+0k!- zgyoBVAqcWaW~itCkcf@0|P3FM|tuK$XcsKPydRBAIxd-S#(P2JJ9+USU|gq%r0?Um#N0i z<3JNXZin;pt^}QMeU-Y8`h+=8eMnNW3Y@M5#OY~WV2Hv`W!P(Rp^lni@Ix)S z({=6{_yHdt3Qq(gpaU`~Et|UxE(zq)+oa8Ay5wlu+jNd$E<&iNNsijxOu>y ze)s6jN?b%}Em)?!t^2oouB-J05#TlYu&{d=diUrzu-Ep>mtFKM21?3uW_2ZECL|%I)4XG|P}%`S0LA zn2#y(;ikE^#bo?vDC~HU#PI6%sdBE~!l?xIjv>5j*Ih{C>o9kZk0ihR4n_E}bym?! zmP4-Y36w3J{b;$qmSgAmF_+fv*eNWdEgU;`hdvYLJO@YT?{w^h7VcAa#ltD<&WcBq z?T=WpL)RxeZT%``-Cg^{X{ld-letH5iq1p;<{EW~^`E-!&zO?e3+OfeipUAJ_VN*T zokoaR@+WZLM8mTNNp5?|U8lAU*iq+XYfkzcjya%13?T^VbT>pgC3$-jqGg1s*A?)S z$2{S^LDDHWm1-&JbVaQMR$J0Rj(_DM)827WH;)=ZCev;9{DR-vQVDC&cWA7O?Rj^2J~Jofx+wuXxiJ00qH zI#pVv-Rb4&#M;vtau?Ufq9u$D71%zYC{kMF9(*(U2}%GOaEr7-onoNn%Z55+Ik zLmj9!QAfDwx5>O?p3u7cTYYcb@-jhAn=hot<){hIRA;4OJ{B*0i~BZ5TZ;S zac%01U}qZFsRLK`sU!Lpsx!c&I?FQBfM%${>5iv5bx{zBL;6ko_27st@Hed_T-7P* zCa*AHYt^a$Ri|4i;#*|XNKaO0E{cAh7-n_WEveNZ10kn&j_+3S+J6sEN}jj(;h6mVj778R52@H7w%6y<^1Fa%|`GsfxNyoOd2`O{+#>%cexn1%wbx^+r{Mdl&T zWSumfKbBd?+Q{rV{V`r*r=QZtva59#SN@<^JYT?q!jfCIb?}BlkGGDD2WqbvqQ?UD ziBQEE2;Z*IbBd%2}cFE>2st9sxf>L2* zo;iAWvQBRL@`hUsdx>ie{-?g@^n}=OKYQQfT)f8qSqB|x9qk@rW(6h+NlfjMwQG)=mVO>#?7ib86(bH;+VKJPZzGEWLG6ZciuJh`;=gC z*Xel{jl)&WUB;zF3cL<`vF3Gvc+2~RsdF1hRL3Xgi3-O?fNOs@_O(8re(lB7leFRfI|FSvUJIoI6#?ut;4cjt1kQN%J zAIJoB(l?XD3>F7I(Y8M1=U8j7``H0ypZsorw9$mVlabtsmQjYK*MT@bml<) ziya`gu#>PA8HG|;VWf=$WT$kzcuC@*GV$5S4F)AUanPSp2Z=oI60A-RcC^KNPr4a9 ztb8CS<{LYAb8+mT>anBbQPY(%8*UCeyiaI1MP*tU-X=Qnk=XgNwHuzsz6>(|1I%KF z8$&3}*jYuQ!@S-_0O~5`-l#i*Ig8`ouqc7ohZfQu{OJR!D4v(~#9~~;yp4<5EW5Vd zb4FjT*^!`}ZDeP=5u-qZQs&u7-;)~375@`)K-hBm>`*yb(9Z9*(BDA0q8&Is=K_vk zb(4&GV#E0SFfut=wam~O%ULoi?!G|f;c}M%;#gYnfFOWn?jvU>|2sRHSLI5?@a(ii z&qz;dx5#gkNW1JYKPwr7phGNe5WZG~&>i_Ia4u9=1yJU1nre-9ULxl5H%U9*&UMp{ zg34Q40~5>HKgLG)xFQ+{WOABvu6F*Hao$~(j5TQOwv&L4UvtCXLxnS4b^Vj2&iJG6 zwPcNldxy5uTd_zz_)E<$&sK6gKe_E#biSXsosA$2f}As~5-FGiwO-Nk%7M2FUlNPt z*WkBmcLkQL>W7eEPFwL!s!38U znZYh1EBcaElnv0SWaYn|f({2&8aSk-mZrywoJ+d+4VQ7u%r;~grGJMz?!G zqjCM4y~DyDv(?VUPL-Zg8_%c~z#Y^M)+nPdt!kksQcdy6$dv(O#)@5zS5%%kSYpnJ zVlyJ5AB3A-%QmxsZ#{cYBbNxuWZizoZ{x!=3T7T#2@3Yi(b?_P`M{%;>y?iD?bCA( zI)g*R!nbo<@qaseV*wQD09R1Pn*{}iV^~=p!Cht6Pwc4nIYbEjDc%mZM`0eWw^R4p zpoyjyRXi|vitiJfw)6%anB3X5?bxV$WsqcCc}gC9_imc+$hfM5N}zcOIZDZ5ClNd? zvZ$=F=kGeAoQ}F3q5;aS1BR!fg}2iCR3mZr)A?yT)z@|op<_rku!G`JXfDZUqOaMz^yE85~_lU!=h2TRZ7MM1Prvot@ap$;;PvrgnH7p!S}6 z)W@0VY9}ubON%Qy=~eU*KC8UuI9K;S^`+~ z$AfB_9)4iB9e)FdaSt#g-%e4ZkBQUs>GO7QrF-Ykb-l+qbp7AX+iMcsnZr1v4Iaq+ znl#*@W+ComqU+)gko^i+Xz1gP;vg>yu?=uJxv-a+xlM+jYo^b^*2cL>01o;1oT za9QeV@uuM%W9oN{M(C~jCTgBiiITs zISc4R+9pOK#qj79ssw?An6Lr?G#~*=$!Hxb(2}MxC>1a)V^-55n;{~UfZqSVnHv;A z)=&DB$z4PgsKDTjS?XbwdLRe90ptPv0h!o~%^Ip5nisMgROeL#Rjq19&C&0u%y?M1 zWJR=ETAM7VGz_UsrHo=$Y%xSt#*$j)i_%+g!txxpLW1tmc>mPYvA0yWBR% zOU{+NaLq4#r4pIx(V9xhqVla}T}w+T=4vr2vwD|O^s;oSq(Q}+RNgG4D5WF~Vbjm-1O}z(|M#DH zeCF@Wr>#GweE{q0dbRcITl(GqdBL##!sqL*xA%KRSagJ?XmvJ0m(uo}N}?=+BZ7+1 z0fdA^@<=Q~LjzNRp+Mnbg2EPt016Ka861s>hYS=AN#nw!B0xM)NRW1F^Ajo*f5&8K z3PyTKHkbw_3X`=)OBP~IS(;5&E1^b%zSM$CRkEm(G^oK^-XO0Plg_3Zx#-d?Wi|Jt zK};%G+jzMN1|peYIA9(WiG+n^BEg`6k~BJ)NFEPHVJIFQWN=JKJQ|V)433BfhJuDi z(qN&XVS#WwE;J;{1I0ulQ9uI0@_0DF!I2~nisSJ}$YH|KFdQ%s6BG?NU{F!f0Yt+i z!LUF$A{Y)HC`>SohKBQSU^q}P5*7%@;xHCxA;21f+}?*ADML1IW=1!}W(XRBtbIq1 zYJ0n8PfIg1#LkXpHnJmhyEQ}75G%41nUP)YR-MG9wY6PUvpZXgAiAkpNbRdA3)#9X zq%14M`N|h!_d@RKbr(Wa<}#muE@bUOkX*>`i0}xp{DX*AvqBIu(~%-IlDkP&NL3+a zRd>n`8kHTDaThU!Zu4v^ay+J{MM~H76hZP7If5L5drDI?50BVMd_9dIXd}oSIwL-r zh-l`)Q$hs6qlKt@2%?47Cu-T@C3R=hZ^!An72gL&RjfL$A?6d`izzVxi&I9_H3T%tw93yUHv(YK28F&7v*j z>^#j2llY3RxYUwdt10S|6=!4W`E4^v)5uty1w{f8)MX;!p(HY}1qlkm;{gFY0K)?m zr~(1=Z~*}dJlIBg$iM;&5i&@?21AWzQED5?vx>oW(lmBD=heT!1;h0sORAHl;bqmWzLXRon0e`W=QOL# zM7nh%&AQK(R$YtiwD~&vT&CGs1|~Nxb5l=h;^3rssI44w>m=Hqu1FIH+d@2JValrs z3|-m<168jS7Yrl1qo(3L(a;Rx!PH|ywOcRVn74{)dbG5Rirc+6K*3OO@KOIs*MoN! z5?wH|;^%THi^>)VN2OP_j+dyG@j94zRwHu3u(R(1;b`#fq~3exy+q#*PjzRnSUi{t zkE8+vKY0_?ZkqCIf3SA!2v1e9U@Q(}kw93uPzwg$REM#sSTGjHVX4 zJitMQrC^0=pnwPtRB(ag(Sb4mj?{#k1bE`s%yh5u){6 z9Yvqp9GsOI51#QX7ZFWHyU}nD)$vj>>vtdG?Q=Ft{N3K|st(aHjfhBuST_;6{`%@! zZ)v;qG(3b$^`o__Rr;0scl!FQ5!&l!Uub@WyLjV*k@bp&)5?<2{u6#nO{n$f*&;Nv zW2uH_o4;V>+6#s)7-c+aK6~+K?ZuO?7PT%dqOOs*Mp3700+>AfBUB$_tEvw@OA6?W{Y z8VEWvlp1A>sGU&Zq#&nF^J&lGlprfGAxii~DUm|dXz;Y@J8e>TO{ht)5du_>=W<17 zRO;q@!by{f9&gV;4bmZ7`&DU#?)Vb6U>D~Za^ue5i04V72Q6;8YUguh-c~p zwOyJc;Yk{8objVwJ3P3YpiVMZ;pA?60a8m#feY6VWeqkObWa75ZZ;JPQ)eJeFDV4b zek-xTVGA)~aTUt4l;EbbJgX5D8R2A3RR;YAy|We%1P@L!J>F6Y5dn)Kg&!ZpO6yjP zmUh2s>Ku~&;kq9oo^;o$={yajgybiUJ$;a5)9ODdWjYlNJ!PGQh5|`Yz^*Gi5r}Sg zsA)TOB%EsyWy?1Qji~^^Met4#)>~{yiNvTq;GLhMWSh3D4npB}fm*Gi1xS-@+LUTG zMnDYsYIzi%XvkXtBHfv8JJ1;9-2pCq8V=n6jkK=2r7fNR*{&~!nY3-A(ufQU|^AfwyDt1G+b z{{pF39u)3~lQD0IKlvl^$_^Y>IsulqJv}M~~!i1PL z+xQh%h)=i5AZfcQTVhGC`Ne$m%n1;dFtHWB?Sue#M2IASyhLI~%SA&Jy-_T{u4rT( z7A}Vow)AYE2FC2;lyYkMH^1(@8YO1N?wg!Z(vLX}hI+pU>#GA(K+l=Kq#bp@B%rwP z9HG|q6&FSgEba+HkV9RhL*CISRUA(@jc*`*`^=mqH%)I02Aqe|;ITiR*ieAqA=r;T zTf@^gQ33b?SUb^%THh8AMRf)>pipYtD2kcrxT?*Sq2f-+mPm4Rnzk=@CP}8nk<&0* z%MfJNCSYXmrYJ4##$@B6@Dr529yZXF9pJ(QF4;qlx>)en-#Q9h>Hv|M| zdEr5*PJ%Sh@bL6Y`R2S45>G%S{-kIAwd1?KuzzOHb2Borb0DFs0XMkwskKsze*-CZ zqa9Q!MRNw>+3{sU)#l*Tg5fI?|BM4H&@a5^y4o8q)v*C&M6KFgRtNqYhfZ)cJ@ zW^2PJaX}*gq7}x%^1p0qhL1?tTY8#gerBHhn4a3 zt@0&swMiGdEbHqyq@Up%`SYcLHJ*kj&g9U|QS9L?-X*LgJBOYe2(&?AC0k zB=@%+2fb&oGXA6Ox|w?-Is>zmu5f^`@M03H00eZ#G+B_^qJX?eq-%CQArMRl4r7Np zMM+~f>4q)j)QAC}rk*Xijd2Q72`!$>-w=ey=;*gjZDW z7@x{ba+VYLz3-T>CZla0m1#K`7Q~`G$8|B8tK+B>(mpcJE!oohwC9yXv5H6o&+O+C zt@lB)s`!WmrEW&;_*Yt#QruDEo2_g21!%oc56I@En-i2uC=?v@vCQ-Lk4fgtK`rYt zmf4#6_yg2cXe4BgW_qS%03IdR2XGWAUp62htKcIakg>NJEOv-KJ>XsGS?vPSr!Mom zY8;Ralw{!{NC#p&WLVjDEP5hakK;seWpFouL4D2*gaTszgKWC?L3|Q>5XB_(A^5*g z7GM1}g6-sZ;k0)sI=iF6%+AD;{6Z4z(5OT=lvrAXiur>2k9}nl5=tk|L-3Oy2OG4A zxD1_Ikd&5>wSC1Y%1;m9s(C~(qmpbfq$5KgmPZb9X-}QqK^ksm9Q{;#7Ej9j1kt&e z@6I7nc-z_SPD>E@F-YlhOun+(j5<&>eq$i#6?(+8#ER%Lf=ah$j=<~7bukKohb7vG zT#Rb|TQo!gh0^=DP>!YR;|!xDA?TQ>h;*Ydu{GdHI_P99zCV?QF?BYKin)qGLIKAg z85BxsGjfxO2wFqCotqNAvkYdsn|jfC^uY#m;kJAsQiVZnty?~e@D0Q*Z411{^6{f7 z@iLU!VrFIa@`FJJY&ARBVWP>0!Tfd#)%Daarbb@0w7M;f+kLpHf8snVfS1Ten9Z%{ zg38ry7UpZl?xF3huN)(hgLwuqgp}OQhN%(-dXU{K(U}W7n~ZM}<kbfBmMMtehhtV{?Echn+T+Fv<%pdBpv&Nqg0KF7!PH_qc;#B-9Kxe*12qR zHR1J!QwTw2WvNCxX68hbz6)YZPkv?nAwde* zrqYBKVJms;Z$^&s!rqE;|3(@_@b5Yhdv+PnD!dcQ?vw5hsOd+maP)@pa8o8J^txDp z)c6pBwuvbGp7)4NoJQFiplQ>@dXbi??S7=X1s~# zCW)dvOMLzUsQs)IuEa=DbijLR%mZ6GTyP55xsBMl-7N)V;c#KBw<@p|lL9fUR-333 zA6dsn#+1;nzL1YbA_-y29YmeaW?t=2iF?!{jOT@-t(aFBaK#($guB>Mge;sW0HJ&a zjHdE2HZcm|6nFgKG3Mda5epdfd~>%)xea7~9t*$QQ$%dbcm@@+Rwe8oU*59<lD8?j5d!FZJk8?tao5`$UJOSqd8D&TlH_y_F%;>M|@m>8Q9ilxTG091`FHF7(Xf z5+YaQV9SEa#)_8g3PCMq-nl%uowhnT^<;8t7-=h#;S?CA>8?;&N>ZNlg6F}jycLZi zEe{ZDy+ikzGX^{2)wqNO8>NuQxlI7y4nh%;@d>>M@DcULB#pQ>NnB9V?^|%DXmlh9 z(UTZ6(t-*SI|U;Ln$%xRf)u`;v2ZGUIZQfF(6|)ctoBG9mf<2YD>8w}kzVAxM_E+sUmEk>ERBe z{z8dXq-5JfV&M@&&|Erx?*B2AYtK zxu^GDXL#lYG!NJ+Ip-HafEJ^9S9eN>uOZc+Q+90{EZIt1vEg2gB6B=)S)hO_J`}0@ zF!E&W<%de-9<8}3lGmqVK`J1$pS0#|r)c8r65jna{AHO&?ix{6nGRcc6v~DU5~$|H z_}Z5bc#kUA@X2tGy8tEXjZA3P^X8%xBp**K8-QyVpFfcllY+!W5(?cqyLM0kszoAwysc>9JZ zgee@S39k?w+ZUmT<_cEWP~!MSm_pwKBc4BAi;w8L z94YIDhJ)`-ASWHD-|$~8oy_AxCXdc%xTo1UES(IE?fr(&4;TiXy@oifCbxu?-^Fl? z%t_wg8t#N41XYfX72rP6by0*z)8ON~ z8@3GZvO=M^>i6)cOiBvGFqzOIUT#0NQ&^ZSh66M*;(DksiP>bB9r>mq{1v-=_^G4} z{{I=4z6l5DSB>Y%};sQeKMl)B~I(hbaxFO~Cg9$TO{NX4-1upD> z^?%5uDgg7t$H@TiQ5g8={neR3xM5R;-Lm>~rkQ;Tnkk*kyvjtbHc(LCbPuf&Ge@6X zig_euApR9!UwDRDLf&-y`p2^STkJFcMKu}X<*ln3&ao(O(MwkqL{g1rtys;vWNG~f z@L9q?yR*gEkd;iCm;jAp3v@e;6_`5FWUGxvT`Sqc?#jMsH7*JxUn{vauVB{@e-h5! zBS%IA@$>1#CRdUcz??>okq&Bxv~bJ?88C*0t)$h*zLXviw4aNaL z%!N#F+(VoUR5H`w*yKbHl;LPp__=YkZ%m4DN%n`hiLW~kuJwm_Lfl3(jTsJ6^^cw1 zeeeBktu?e2lIW5{-RKDsuf|nkNLZ^FV)lU%5pi1h=1v~It2*#syHBnCUMF#ha@!|< ziY>Yi7iiMy6SwNwKRvPgW$D2Ob%UUXi z8_J0PWHuOad%Aq+`)0zGF3K;!vl2#>ulVr3h`$5~_VsZd7J}VH1NlY#QJ{y7IIY$! za3>lU;CW>aNrZ1KW`O$~$;q?9>w=oqawDY0Q53(#yFd3y zu`O6~uud&7-6ynW?)in~q?kgNgf7@rxWX^G zdl;+^;mU{@;OO-|GxtTCV8VoNM)#katF&P-3q#6o+OG}kh=2DNggwPbuYMM^s3^|0 zd6v?pv|Oug_=>yrt+okT;<5>h-CRvOHsEv<^iKr4h|qOf#>^RM`3Q{yg7}CFS{R2- zv{E)~A1s1o6|x&!FMBhum}MmGYu}u>zWtnCDOgdv_OfPh6WV9raCc9TvVU>cOp8{H ze9x`TmkZOB=fFLMxOc@D=hc#S-|}-V1zOu`HQsxixTcJEC#RuNG?-D2p50dq_O?4o zo3>s&o*e+%iIc`+^VX*+i?ZK}4?}AZwGzU2j3aEUV!7~4v7W16i3Bmrd-Khnwp@Vj z4nNhl%T@r}`#{a=7MZZDxj$oxe5Y};5p4NDz$e7wrn%vm7DBjyeO*M5BmOABMyy>wguDP7LWgv7320hQUXx4LJO>z#h1`gvqSkw@Z9(1( z?iWdNMd=)*e2ZYj;w-O3mvH2hSi}zv5Q~D2=3^ghWcIaZbT*o#9gF#H?TfoTBYr=? z#`Hv-R3`!MnG9|xHcHO&^;)=XTt)kmappdJNPI%ym$)yAhJEg@c^B&);o3p8cjDL< z^2L0*tb(D{*-S>*{3D)k%9g9Q))g*l#J{`Ih)0Ah1CQZKI}mBacN4X}iS1V)Cc?Q= z(YmF#fi=^vXF(8+c(JgIYy6IQ#9Q`m01{6oK-BoQui9%6?j=HYcOaQ!>k9l2mO$cM zp0b9At(U<&UnA6l#Glyz5&u>G5kJAK?F;FnBmS}YhyiIT{#Yz7*_X*ObSv@H>41}U zC4N}#^tH)ai*-{X)H-I)!f*+3)5^Q)E9~g;)jyG?P0~H>rWv^ZLk5)RX+Nl3YX{a7 z-3plPu5+^RD-l|EC5F3?BJ9p}i;p3CPhrt^yO4uB>R6$YjAR73yXH4i#6KLF83lU# zOz`8c*k~E6hz;C&I<>J95O8TBzQSXGc^gIiJ3|xk9xh4_5^ml0J79rs_Oi!!mHWiK z#Tc>mULt<&5N{vrxEAl_Ar5b=*?*bd$rTFO1D++f`;tiEu1?M7HlVY$Wv{1#Fxna% zA9|*6>*(HWJr%r*qLM<1xD+qfV2(TGX(Emh1I9xrJY^I_&2BCjsN-js zp|87TAH4-A)OcTvgU}#F*t#u~CB?&ts~TVEA9+t73FPH3aNb@Cv1 z`@+=^i}>RyiFo$XZp-h*AXg1aIS|H8Q#@u#cGh=&cBx7<=9?$X9uNhQU;G?tw; zR#T=i)!;+p5=xko`jSTWA{u9GM-KT(F-@(KzYJ=;dK@$8wMSmJUlun*lq((e}l_Q z5e!7_k!l?LkOv@lK)Hj;uyo-&dn!W#?81w|WnX+j_?far)e0!yc^-<8`s0iUwHLyOD^H~WD0PHhm~E|Q z8p2P(oo?DL>944b z1}!QRL26(dRYzkD3bgoLnSj*J1bvX30wZBU;u{ETs{rz2qZycek%Hcm)-cFaU*}s& ziAuH5JyO1WG8vDa#OkjU=0T8ZQ>=I3s$ zLJ0R>-uFgE*26T|=zVaLI#YTLm3WvG3zD{kPdIm%% zNsJ;cO%YU+fWQN?kd4=OQ}VHplMd1Zf)L-9XP>=QX>AjC0}t0ZW;?0DPk7E$V~Z*L z2+{7dq1r1{(MSi~^Y5=SD@RFBjnPNc)Pkot33dt!QCW(dC)3aZq!THrGm(_eaIg?D(q>0fVeMh@$4uea}dU5YP*#9g6++7>~{HHKN(4)TAMh5 zVO67&r>j*;yFE~*7%QTCFzF>CZS8~53-fCrVn%powJ4AtUl?-BuVuF{zCjm=@`_@RR5tryFMTV5*Iua zbb+-B7cA7>4@?+hFq`7HV3g`)vjM4C}0zv*?jQ8~0 z0abr1^(?s$;weIy3QAS*VZqyRRSKe*Xt<(tHurv~fuBFP`8K#u z(1<7oJVmhJE@6mLGM@+nD)$tYq2|U|lI>_T88lqU%S52$JO2VIi{6~i{VB0%^!9xT zOyOM##Ke-7)<@}cl7x$djEN-{_XM!$lYxK4G8}>suw*8Q$^7=xq%S+rWhX#S(E*hT z=|9CJoo?E&Q-4gKn*NKN0O}k-WsQgGK5ZfeEmFLU8d(6&Hp;oEj3oW==%Khm34y06 zk~T(f&j%9)6Pvfy(_ReFmC#pO_))}*(&B@h@^ikW6!uaG+at@ELrIDN*2P-kg)KGL znK$;OODWUvb2c>!y^0TqLZ_@6v$h#-`!Z?a5;5J9b*+-SvhJaN$aEzrNM2yNoNEZX zGPH(bneHm@GoW6;EVKR;YTXHBbWDQuj-8l%MLgq@cIkj{E=`705%;Tn<|N1h>xpl3 zQiX_Xch91v<6cdV#tNQuY#xWb{N+CGk<{=j>-2q>=27Fw5>$Abr^L}q-%I1FwfUC7 zz<~mQw`x1gQF+Q#Y%Ps}Mo4m-lSv_S@Rs8pq{AE&?m(vO3A<^%%qBW_S2%trIY|Bsq~ zdM3{=`QY!`OX4Xpn9Te1dzdli&%Gu5*kxZ_cE%f@Jwe@7Z?JN_XwT&?iPvLGP?&30Rz>nVq#W05myUd{$1=86f4espZr(w+MnFJW$G($KCXT|3+)p{ zJwoDBh&7m?*`A*^k(Mxs?X)fE_K{x({!LI`V-PM+1F|d-8erhDPe9=Z2pHcuCz#qv zf$UQr|MgksIW+&c?%Ds%bO~z2vWK^GZ@3{Qc|%CV0-?f|{|%zTmkC!sVTVT^n}vxq zl%MqU+#4oybeRh?dgC}c#!c4IB{2e_%F!-IQ#qP@Ihvydf|Ogii=!MZ%0tteg5(G;yH z9q2{%AbQR@@L%YIoWlOoAs$Xa#L+dKMkmD4J~zbCC9Ws!qd*jjD$I##jvn3N=+QGY zbl}{an4?FBq9U#{M~^xB!_^!;!;JxhTd1OIwD(W|f^&jkC{QWol%iV$5@28$3PLc5 zK>&sU002M`0001hVHkix5Qsq-2trBbu(#0w;Ghl|AqgMbD;ZJgEWDBVL0{g8l)&wF zcE_7S*Cae89riF!?!(fv*NBSL!n%2z|a@M7w4|HLVga714!U$y^F}lij1(%K5Z-7lSZIKNiM)kpIv{ z+c|oA1Hotw1oQZuh}aRaTS|F^AxR$Lsl4P7Moes`!}D167YtHqcwTt$JdX=f^DHcJ z)78w)?iT@|vasM3l@->U$lOy17Q5I(vlszDfH|B|pbW-@9mE4*9i^P8jXqBu9c5wv|Ar20m9_y0)%ebIB0kBKVMYh$GkSQ_RZ<+J{b2K)b!OIvdqAd zbn>RzSeLt_WYCe-sf<))I(HJHb$5*+V->g+0zQl!HJMq&19QFn~grq}S5#H?syK?kq z4ILEF$c|q0{^OBE2o6AZ+s71)A#h1r)HTl>bkKPyh+n#l5T+djUx~5Ev3kV@e08Nn z_@ktwV8d6p{^76IMq5(qioAX8wE_`pu(6LQd>C4kWC?*4sc+7BduI?BhrWW2P&B>zF{8}|Ip83b)bF8RULPhx@aqWnej-y~OXen@F#cE`h@8w0iq8oKOa z+}*x0hjl3WGmK6ui(*tE7d`(VajII}B2^%xN`rxhbAK>slyVfPfx;XiLypE(e~>^Y z$&6tuzw7=W5$EbEkKc$GLFBOT76TBdegMG^6WRX{#w@DN^*HFpgWqjU7=V!Ikg6&u zMfZ*p{L;Qt-M&}A4c=t{h+7(Z4hUNK0R#))f%A@&{Dg;MTOse3B67cBq~{NcM6&-0 z6d=e+xmxX~>wW|$qYVKRt%_my{YhfHzzywWx=W5RLkkb7H)TzxoW;{F;fS#bX7=PZ zl=F@TUrFVxrM>sqzLibXaELS~k?VMuIRyt+TL2kjhsm|6%uq1~YO9VFtyjKck2M4WnZB4vqrQ*rh9zj{0S55rycYHsR3KY1% zF-)eDZr?PJ6}Lbf=h{uveTr2WK1bWz*kh=Pb9RqVD|b`8rWddydFkV`dv=QbH+pP6 z=-*zZ`dj5$-)I#+KCc#M+q6+qd@o`>AEr09> z7UGAiDv*BQjUJ%(;5uox;y0nkyh!OG3G+c2WC6N0o(JEMOoG1=^0DB(wKE_qiXEg`PKSU zg(62yk8nUQRcd#%9rzXh4*8KuauD{G6`jE&qI6pY@`7Nds)X*&xKXPf8!whQWJ(6D z^!di3^{**@(N|Eq*ANI+1k|DLL}>@!{UCYM)2JNh0bdhfYW(Sou>`o$-;FhRW^M07Ap)n%y7ePH$~A!emeJ7q=E6Iu|V z5qE4OM-|>ugj1t4T@5tlc!iv(B~>6K2Z$(Y2a}cs2sRlnQCk$4r<@nUjZS88EPg$l zK_Nam5&*CfnyJLDw#WCTrIFNBK6@>XsoBB@NCDnX!N{lcD`7lB1fNvfjIxt_f6#TJ zT9Ftj>pup)`WZejiA#?RiKd=Na5GW{3o6Yy z`WD5jpI)NoR7gX#J?%xN5ZIp8#bb^dT0rDp`oce!ni#_0m&7p!|BEY5+Ci1$y|4gd zAl{8jY%pE7)q4i}sQS*6eA{>Y`PSV0 zEpPlK`Mcho9G>ndwpZOV?H{dXH#`5#KiQKedBjHy9oYn%qy0^jNOm-rgmpOZJu^E-tr{yYX6rHn4UHDO<8j2r9vmtf;3-%#SbS=reWeD zDICgJjUb#`Wi+_*H7BbkR{nbmcjD!EZh>LYhg2>!;q zO`DO&nV$9ghNEh)@kX?6gz)Wj%i3!H@l+dkdN(lHZiLv_P9q;?gpXUtT%dY{hAf6Rq)!GPjj2yqSA$&#fj+ zx@ij4w8f)%l;2edl_r-xtCfv8rN(0NbitdKa%<N=|;8mKvIsR=xVi=*+R(t2(RA=?@di-Bcj1My9UNgy&2ywl$eU zXIQa{o>QvWF=3iXXJrpB*7dMVDm`7LXe$~jJ*&J^XFgso%E}PEIL~ZtlXt~-$(u$0 z*kslISh+IgQ@*dM?t8~)@t@W8^;>o|cAZi#ejX#&Szae&SEFiID>I){74{jkEOJYi za!N{NdT%!MS?@WWn+~3X>{%R2Y)tF9b8EPiDpr@3y^gKVY@S)_lsa`iMS8ZImo)X9 zNs-~LVwz%1foseuhJW~=(NdDJaoJOB<*{Lr_7D*$6X@`eA|f7Jgp*l4HBXs6a_rw@r{wBAOm=bxtuRozwAQ|(pN#Z^r`O_U~ZS#Qx!|Fl01 zThUH!(N0m(zG$blXs5Xe-P1DcyrJ3Y9&b61m3l`!{@7{9KGrd@X_jcGhG~{)r+lb}}<*n-A-(EheOQgH^WF*zcI$7g=aplx5~mF76lT)Kg*T;3=AL?P!V* zfeJWKa2OO8P@v%f4lFneTr9l-$Lo+`+XX z;R1)!rcC|k=yoZrS4s$-QCWmuxTM9z#gqH|c+o-q(pAU$&P5?sfs%2%OF;)a4;9;E z$^xy9L95DDESVPTlg3yB8ID;f6)eCLNGK6x@WC^fzyZUcBn~L>fd>;X5V&KX&M=eUX)sAmgtD3rA1A_hmCeQdyLtCd7h3XtyEi8&~aBHgH@b@ z3Mx#OgO2G!tyodc`i4vAQ-MI@0Rk2pve zRcujMF{vOeRZKZW8^qK1V$>}0xiDq6P&PMMvbG=;v_Yu=LIF~V;7~3=89a!P=pcXx z2ow+=Ake@8AY{OSg9RWkK$Hs+1q2HiAT&g1P%cz}Ap$~55fCs~fY?wnC?G0CU`TXG zK%mfp!T{_9ka$QuJdB3{2msG4m4E%SR$qOerg=_KxLd=D=b99^jG=JJVuuS)+gzUJsywYdhC~F(~wml}5$wrb5cdePP z*vRf0u~yTuY2$2nW_M`T->iM-D()SPlbm@Ik0hx_47E@@1Lpl+4qZwNC?Rt#f)s2n0>ip+mdjPuFK7_ zt4ykFpd#81eg32GkVFhC8;5Yo#5sJ!E%j`CL(7X52eJ zZ#AVwJLc1V(yD@%t1sBa?OMvrC7->aqgR((I{TB3QYFGFZUz3z*x@Ufi)GS+6t5uK zW>E@bXp2L~FO;i}^NZ+?>~I!XB+S*HHrlFr%&hF;8MG^qIq_%vQlG1IbzC4+MQ2KG zpS-hJ$#_!hDHs_oj>U!3);3Y0hzXAtrx0QmC68J{$Av*;M^~~)%$81Z#uQw+SA@n= zBH~n(1%3@G`dpT3uZRfQ}X%IOukfeqZKalnS@R$-a5M!sd%~y z_iD1_yC-8UYAhXTr*v%8(x%n-Q~gz#^C?{|+l{o>JW*+J%oR_`z+Q0#A;GlpA z8#pK#AUJ418qfz9AW8%bV4=bW2nib=m@nV}fdPU7ivR~?fkQ-u1P;hT0}O0{h`)jfBBkntgbikxazA8&oK>Nsx( zTim|g$lNm9Yvb)LSs|Q=0^$ar=8XbxKpTo#E6WOYR}mCkumK_f6)gK9!FGE{;Mv^} zFDM*J14RZKN(L2fXnc0YN8>u4G2=tx5)=}N(e<(d!p2=AFf=ybs&_8{6>@K#{E)~x z>@`Jf8=TD8SbQCM3YRZ49;fc`V~iP~h=4k<&RjY0or`g^w^_%1L(8hig*S^Qs?3-MzcPr1D%zD4Y-6& zfZ>d=2Z9SCXkdWD1{oOWV1onN0SID(0}C7kM?xpXJku9LjfwEiVo_@hrmv~YGI|ts zqtkLqvFB1>DW^JJ>ePjzRnx{A5v#7SB-7&4MJc^YR?l<0jCr?Z01U~424Djqtb+@? z0RxQS0SgJ(fP(}PB!D2o1Q<-<)8IhFejwo{NMM162nq^h1@-oDi#mgfa(*sn&9Muf zgKQN4OnRNmcw4naRcP%{VTnybbvI?@QtcKm)Rfq|r@6*DQ$jAJ2e80Uzyc!yWrhr8 z0|YRzp+I242NiUHVS))9cmT1%1`{GESX&J+&|m@_QXwIgQ`||XksXtrSdmp$7uE5;jE*D(XZg_WVSbLn?qzU z836?tCO`@@Sg_y&2qG|G)Ifs_8t6cShV;Nx5Wxl&P$0tu7a-t(0{KBfn3?9#rc+ef zKJK&SN-~-LJ%zrol9)xa4E_{_IrajpK*g3!Q^nb}`njWgEbI_sJ6H8Nc8W=N2&KZz zNyVwGSw&btrUhVfKm`w^2opeXVZw$54h0&B&;So6U?@D0AORUNKoqzT!GhG_0fZ1j z2p+j*Yu+V0ba|II4M%?QhH7YtYB)*{RY(0We~14?j5NHCEyEmXh9_v>%}he_|4G;J z>^z7L!sE)`b*BUGre#)r*;Z};>S$iOlhd!Alp>NEIi93ncu5p@u~3n(gCz1HDa13h zCfY=6-=wa`^Nn5IJ(dqip&4RfAKu~ZK)$@^)!PQ&@fD9)BaW{fA=yUyY!LFgWMo2E z_bxk`iSF3{Sn+oe+dXPvp=}x3uq3a+%_og!W`qEP@Qp6cK|aW41mibvqcyLU_y2wQ z>+zEDu-yr1+OFnXt-g}=T#%R6Uz#OHwk(k$bHokWpo%~Yed-rkJ zi)~f;_Hr}zMzM9WW!=F1?&W4?XIc&|UgR9fg;-bO;Mkq|OoruiQE%-<*+sb+WF z99VUty?w4S<22om(fc!dCT8Z>j8ClYzs=J=4>mq~5*%E+^|X&dt*aX2F>~(g19kD1 zH=cMkJZ&6kc>k3|T=#`1Rb2M3q49CIG)W!b$19UqCaKK+IHx5cY&tZN_eh8t@)1KD zHnNZq9ch?^941K~y*5-Y2_a?2M`}ms9w9jt_iUB*ZuI;`9?NrOOJuB6mi$wg-xPD@ zvw5j8uh-91n?VKTF;+@tGqIL`|Drfmnyu*5=0tX-TV!9akFcL@tzDGa`cKuyYOMYX z6-3Q>%~#5&hkxaq=w9q!nP-2>?GhRO%=cy2#S*L3N@d|W#pKCcJ>DIfqUl`|I_C@L zRr8gnzbW|c#KYeebL!6h{_`(aXSq|;zBZ@wl?u%Z?^Pw$SxLynDGRi>*~|Rd|Ev3b z`tD;g%_Ve91A`DyU>y zE637Vs8zEaYdtEc*pyPb!Ay0jKvqw3yuMuTZo1=+S1Ysq4Ei1zu@6SM*x+ihgnK5Mkc5 z*4~+SZGouJq=>h-No@~*r+o7i)mWsd9USRczwji=z7A8UqLOPpC)MmnmZ z6=l&1Ws~KwvT9*kQKj7#v0$~9Cd!{gDWU%9u_ZHBEH$Xt#a?-~Ona*9*0bt`mD8eG z+osbA5v`ze*vFVym{)H;3Xx~1vu6@v%@7R$000mG0Ar#U7!r(#gc6BF5^^l-l?M|* z3Td)tTV>r+}&Ma!xb z0;TjpwA&-?BPJBJG|lYAeMP1;B!H6Zm5F*^D_P2;nd`vkv)eiyA7KAIZWMD8q?^m_Ps z|M?(@A@1FLF>^4MTg!x`*}pep$zhOQQ9wq;AQH-7MDZ0XAhybD4D`nJ%8LnQmlqzG z)59;8Q3=L~^hoNZ^pT5sDLH2p;z&$uiPSoXv`Z9XY)yd0VB!2rk_A|^0)8;;%){l_ z!U56~QaXV2l4ereO~Xj4(Bb;3CsV6oAq71`+|5aU#Av2`;>Bs-%Lbm!Q30T--@~@f z*lnZjTgt7}x>3q?@0y9$obMh8)u!ldw=ersc5ElaOAKVh2c~zm>a({VXCJU zzme{z?Z?t|NkM3x?)D-P zAzBiwZjW8OhV3h9IZ2P&gfyOM6r(G2IfsZVW+L8c3ANmrneEZi6~wVv9C{(LdP?oH<4@5ccONSuma@Mcq!Xpbr=P36O1kw{aNq=tCFt zMK(wRZEw2O_@vI{nVPs%pD^@y;=YXJ_TOs__%M`bQp_OdrklJKypibFd6epP>z$!Llw+)JLe#8;2 zp(+Wm4a< zN-k}M1D7XCGEb?zjSBY?pV(K}m;1@f*ij;5l@HU&85cL(05@AxE=6%vuRIwj+KWre zRnJ<)_@crE=n%R2bY8y=6hoZVf8daA97oH0&FLZ|n@JR{Rwa8zGo6Bp%7Akb`+;^h zH#MOz!@jdiL!S)eLZnxS_xP~}-`A4PZx9>a6+v&cFgy{Gxd;+OXXTwK;tEhX=GP8X z;#iyhsadh;H1KVT4ZHXu47q}LMCc9}!r8mH9<#XjrXoPNv%Mwfh3c*5Bvr@*^J(+k zYj8QngQWC6Qd%C9qxG@D9A}QXJ*{BE*KAO8I@C>)!9B>1;=>GUNUW0daS||_tz_G zfs0zdD7Hf^#Qoa5kT+=lp2hvrtaQ2}iK6UpT8y8!Qagv=``5Rvx9qSE8NRP>GkcT& zDhAN^D)9()_y9=l5Mnq0FmNY{8}M2g1*uesrlZ%bpqSm2hhXZn7cIs5_6D@3vLC4e ze{H{@eg_wg=fZ70!YLV93aeDANYRBH`)${Qwe1!X(;@c_WBJj)>Js6oO_t@$RIPYU zaz(7(*Zv`7?i^^hfJDHaPh^fmm}BFR-mPX_1BFn&>Vr+LZBl6&i+6L(WbbGtP}dn0 zCky~6;TAegzCzk&lN;@3Vb}d^?LE+l4gr6~*k`ss{z2d2t9t;K6YYu4;I~UrPZA@5 zYEJ_k^;$)IGe1VY6nL~@E0#&(i8*aMDPuwU0EE6#K)U!Pqw`Ubo%MbKx~R_&@jMjJ z02da2(Fl${NHM_c{|9mL%FiNLWG0jp?Am&W0}!EBfB;Y2A`BZ9<%)k|So?*lW$2?q zALy!L@Hr%2JtxgD+Gp*A&U4~uCf9_r%c{}?+-3bKH?v~YonRDY&P+xta5?vLjP+lB z`A6FV*FKHgDm+iL%u0RydvAMy9K!3#NoUQdHau09zF=ZE$(Kkb`?Xd++qH~Do?4!ZbkN6OBsM?%B^0GfIr4b$Xmb?AQ9 zY&3P7%(&<$?TZ5iS+h5EA5Japi>o+r`GNaJZwSk{%d(J6?+1T@8|Nsan2(tjvKqg> z9u1ZH7UH=K;$bsQLZJ}V6R;Ku1jL_$(C#@t#;hrLN2=^&bFH&8;L3p^Y9R%y zzOHo}s){@yZy+aa$h1_cbNxnhUu*58PdTwCEOBXc7J*8WV%Fuqi4;x0R|8|&-=abF zQ@LXnyPQ==FI((TKG!Isag!HPKS%E;C-3qOFj^zW7!S2yqTh5YX4e*h*xST{Ufkyc zewpWv>j8v|MD zW8hc}EurL-1w`@sH{$Gc)mzduLz4F}8;>ELoPAMFLlKk3v#&hi7?8J*8DbplCM*iY z`<<$}vt=(N(UOgHVE+tfs#c!v@@WV)fwBe=ieJ$Ujpv%~#Um}9Y9XMn$Xp#@#dsfv z(vqFS%I#OfU4$D4!8yWb6}a1$ULSDn1#Nzyj#+sh^c<{n@6L?OFEH@tl`s}|af|@! zN2JOCXLhf#zP+I$a|UjXC9g?b6Jx~0E+7XhdDFci+Bog=BE{?k9QU;RbtI%dipp|1 z`exnCcy7u^%5?gbX^kKmh>S}a^7jqygE}VboN^l|6Dl+njY87Vwl$d3gtfPnz1hAY z5?{)m`M&H9bm(N=^0P#IXKhXuh4iUdqCl_7FEW6)mu7Rrl8jUpfd#avP0Ns$9nW*{ zep3P!adju7Q6Cik`RXguxNBRO;%`Fn^1=X-GVKR)PLY{50StU2tJyTp5__j_HPhXD zl6l(Mof}C))M1x!)zhAVlSid7)-1Tn?ypeiMMXPQw^xqjCed6nT*LMkAG#3jZ>OEd z%ot;|vX^)b(?h2mQOgp!F^N->2>!BigKWR2q_Kh~FTqX?XoX4c4Z6h`hvG5*%!6(j z_^+BDd--jLy|Xs7Te)-UPxsjCa;?sIo!;&gdT$w7>j?!JquF!`4mit!?Z(R69!n>{ z(#-h>Xtp%WYY)s?b)^<=;5;T%ZxfOj;Fzl`>X#Nz452rKuK7^g?wRSN?N~5Z({yt2i@MBZOs?ls_$xfh zJvj>g>?Fr$j^`)tJ-#IYlk%%m%6e?n^UME^Dmv-UG#o52YQ&ix#c#e4zXh>-Qd?_z zzwbmR2{Qk|*k1>oN<;^AtnI^Cp-2H$&8!>No%F#QK|8|;@O5a}509k3u+K!DS`xaF z4V>Zd0&(+8mqGg;OVQ!6sm<%?@K|7p^7mHdeH=D5kdJ$bIa+)_~O8;3Lq*yjz;q26^ZFa+@d{T$Q(X z%LB)n2$_M@G!VeWkkwwVZ9xvl6V1{JOJ;;}Bt?n=kv8cb2He9D;RLj_A(HGg)K@wZ zZAJ!G%0j2=@1Nv=CD4EEqngQwXh$HRz?=?9--Iq&XQJpfL4DiAA6%e`uduV7j6oY*JFT^}8Z8fstAM@Rc;w@uJf;)C?; zp=;G`jUB6H@U+Iv&_cxDLJ&aL5m2k^cPo+6pwR;zFQ)idHF&(5S;*lS3haxI z<*+;g0kRMs$%oRsKbGc{e2RE{`F(^5Q8|i_g^(zsL#|MpePCDZ;SJb; z;pY>$Q|l6+abPA0Kd&+`{z7wTu;IR^w!nZW#h-*+a{TDUSr#FcwP=UQDrEqkN1kPprKqkSwB_GM z{&O1g*Y76Xu4Lysnwcu4qZ+))>Zu#l4%q*YSfmYMYF6hjMg7E|K~!t_nk$#J?Q!qJ zAu|FlG$3F;$We)PWtZpOxQn!7p~MgGU>tA z*8U-`inzzs*0ov4$hPbX+>)_Bt<`HnA89{%lZ!LMorrh|@0e2OdxZCWDV0%oj(jq+ zg{oQ=*Bwx;cvfe5oXiA@zd_0NH4_`HFJr!?iTC#DUdZ&c){!h5TyvQG$|#zJcGx_F z)^Oy~=}(-tYpblYVW2c|X2shq7toDYwIat(m(`KH(qBPeSK zbGQ8fGmXlZBkqB;+Qy{16}rAr#m)QV9>gDcEFznDdD@xdUR$k*>`}2))vF)>#AV1p zD}`jX-jVA=#ZzPThk}J{bKD91RIMz4{)t+#8F!3h+Lm^sp$RLf5FLLUXVwKHKzI)?k{cACEB$FP9EZRC&%v_4 zp#(j4;O{_kc%#rpyE%;HYsekca`b!4PFq~nb>g1pB%aemZvpnL9dzH2!{+%1up!c+ zY|QDW4pH<41FrOrUp_3Z(pA1uVf-cxH+^9NN{D51Zf?$^r_0>Qg%)k@F+5X@NfRK? z^$oe-gXz5tZL*0EX6S-P0FyhyU5u*|Sl|}p#H(0G1dG|gK<#S*&afQIKs;;Vbg*U> z=>DwT0k7xMg@v2ysb37EPGP!qT;~Ld#$hzW_TOb=nZPQhCb>)2tdie#n2>mfoQKA z9u1r`*~K*R^!Ja2*KSN*32Atgu9KCz58 z^KUCKX0uV;R*jRY@^`$Jk?`GB1MmOF;v**69Vn7YtefQeUx|fDJVg zixyX54JYts0ks6#lgVqe@z`#OoqFcZCx`GL3cUk%&X@%>@cxzB(d5`R%(IiVt7REB zPgqePp2hNF$AA>?boFuP@e>Re|LX8NV{a+GHEZPYcLwQ9OByx0OQBK$5?nWa`C*8L zf0emAy;>iBkC2QH$@{O;LfVXfo1}I{1h8ew(ntUkYQg-bq&5Kb>*uTM)gFge7=mo3JJ3N~~%n=^ko$?9*_e!B6e!;bH zE-3okH9&XIBI?T)BYpv9&o%wW76)tNearm<-nN()*H)bmB7u8kYC<4yVyLwCEo)~T z@vd*<0uYFAb_-w-!IT)g1fIvN%N8JAG%u!Ws6{EiuhJF*z>T>LEfW~{o7jxh2MhKRnNC#QWx_L;Ds-J2f`|om z8gQC|4VOx6{|^34ihg)1b4!Kd^w)B3wmcJPn;b}y+}<2)lJ7-Elg>@@kV_Q7y)Z{? z6C^X`sGTG?8WN%UT5lD^tsz?2LomInA7_vm7)=stpj*`#Ih26nE0KIL!5iopD6~YG zLjSP=7&l!{OxECzC1_7=X{ih~7xx8P*ewoq(H5qR{Tc?HAnAez5o7vt0N#Vnpu-1h zu3d7?8y4y6*)OnXCA^RIj#a;UV3W@a;^JG#ICpAXd;GKsCLmCa_%XYHjWITi1P&pbY-D`jur zE-D~+md%(0n2VS(N~5>w^Cfg!RDzWy*Ql8o8M&RWHpy(r=vv%ArW2f3bJU%9Nk8rK zu=iOFYqlu|K~$EkWo|iibup z$?(SDBzQJ`!HEWvLdQR`fr2tNPJ&I8evbevu-qkqRfUB3MKGz20sy6ivVC}dwXF>T zTNWjZR)~Muki_Aw!^Q3--$n>79sW2xN;Ol7yn{rWjNwmiLV}c(!XCGT3;!>DlkKV$ z&`@}&V=XqkXig3d9H!OYUytz2=E(P5A&2n08suoyFEW9RhLf^KuQIrJuAGWO(-MMBUTX zbZek^t|&-BhbO_;f+Oco8tb3L@wgWH@*&MMG;h_loI%T62lW={HPKg(*rKAeR30R9 z4mGwO`cTERQIV@5F&$lCJ)X0uk9ASwW?UnMJf>z)N#}v_m!0+2Qg7J?{g87eEv8QD z5QuB0C&#R-88y{`&OQ9u2Z^fK8N>WF#Ied2kAEV@|{w4#{qq60ph{zKM8<$*x*;;^zi zJz97t8#FiJI771V<%1p#coi!)Xe&otY%Iz`T3lC_1e(jbJ@b)RkLP7ie$o z=t5reX9K?zGAI=gpc1Vf^5OI)0?>!L@;h_&NMKNq^3|+apEPgIrmv?{1z9<;Q~xd(Tat6usz_z&VN*fBKu~;4b&Q)Sawg-;wCFO1yK+^nMS@CozG_7hOmAR(B-u*iVu9{!^_fxhf92 z5C@~J7Fa0gF9;-4+2sH7LCB-OtOWIg++-#BhN?z9T zv<-xN;2|@YysL@fH!;7C^K+OZ7B~+;vCwfjHMoez#C>AeRB~>6s-1s^ulMt8A0 zlT#kTH0!VvbEQC4Y{|b6m2k%~@K%W=Yic5;%A>WYf^S z`7{iNp-bn61jlGK$a7?dqU6CBY6(h3lwe*N3MES3mrS4|bl-cXyvM;)oO*PAgUy5u zy)NR1S@NZFlj@Ys-AdP>NL5aI*Hu508~pMs9U=+@TuSKh9kkyP6n4t9*2?>18Q%0~ zL`SsPhq?5J-=PsGZlxNjp#4MNUL!X@=P^0R(6n}$2`VIxA4ps6peP1Cd=Wc~;Tq9r|voKy6{yx};D?{cnH0tiY)g6sz?IyH0+SVL>_-S*Le$4lm< zD+5@W910Uhdwc=*&tJ_8)Gk`GI&ZceCc8qf6)^eLyj6n|+ZO6?iLYxDbw}ewqy}N5 ztAAGEF&%X@Li{js4wIb_zbMBL*%yb!j3^EzBB*e`s_WtX_c~|d8{2X;ac{J(Re#r? z6Am%kkJLn=O_c?49}KxHH7ScK5fQb(ZUZk5HI{O<1kxJlEWgz{k+g>;P&>@-O!}JZ zVjkT1A-!Qw6xhdqhfhZVY?`Giz+p`tghNZ>7QqNUpEnr*x>z}CTR9t2=*J3i4WgJY zy^#d9>Hv)rC}do~d}K;EC~6`}I_WNQohpEx&6d^D8#<4q(?F$x1#`pYz**p}>J6PS zp3R~Z3mw`E&>r`M^|Y-W^gM1SzE!y3#q|5fy|d#k3!jGilJ5$1=)COo%Z}11 zmXa_NGAzG(7G)1_4TuGf2|UY(!FcRppfLtY?0J131uaqBM#@s})8Dvhmgl!uPJ@tqNG@~&PjxL@ zFWw<~JYLHHJFqebzd~HhPJ%$#XHi)Tvl%uh#-%jZTVW=({tx#7(>{dXIMcC}#zCjDGaxOqOF3adqD@Qu0V-?!PsLPJHg6?VfNLB8V zRWbAeQ4_Tx031UW1Xvz;RDeb2s5u{>pzJmlW)p=p@S-BlHKC8Myo=}-uA7z=Fv~aw zQsj;} z?PAp~-sAnVH%Oh0^MKL4o#dw_|60uEMGW$F!dwDd>IUZ>d% z?3xlO=$sIg?B70Ls1_uOBgH=*Vyc6rhGrXp1Y$y=UjEL%zE@kL)T5yo+WRd*$LRRo z16yqR`AZWm_evBELa?IGV?0XAfe)6Nk2cYjx3=|X9{dTO1c|q-KI23Y4$I5GB*Sv1 zORw|6BUr~9aaEO8z)8Ip#vKE#k`S=4wkZ$r5o(L?6j#M6X0>lnZP+QR?K$tdXPcfPwn!-_Qopk~e!s01t zlk;}mM=NYFewc+!(8$m#H5wyUB?eN|;9a@4;(hY3*`WLqJ`Y6?I1nlztrhQ=4Urd! z;UQrF)^+n75%_PhNvO|ck!-ijN!NWGZ_w!?rB2Ew`2aGN8(o>GSW*dfn~*xbIxUD2 z#+Oei9*7Adid*qkl<2Y^qXJ?jBu~EX$RwxLaXwG=;r0@V4W4$(FB(X{Mafwk57p|D z4`r7;uPWoulj<0+^sfdo;5{A+{y!rBHj~ymm0|zeEJa({D5)7#>i>Ck21*PmH;-aC z&GJfj6%2W4c!3q!9`UFbxP4a7i-oRxF>hia6?hseDL< zT_oP`Zys-kY6g`-byeFMJ1#<50v1@B6cH zrftRd-Ch>xeM>4`F)bvFVR#`Sd#9zWHdn)V<^XMwS915NP5Y&}?VAs=%&SF|pT5SH+w6@i zpLW#*^bt0#N#=|H=xEcLjentb=E@F#c|HX1LwdIQwV4dz+}65+2%^oUV5ZRC$(U(UVNStY~tvj;~5>!n+$#)GT%FLAlu+wIis z-Xg_^lKa6l?3tF57F$00G#_bg(qf`-makRP@GE)LCO*a4O5_y|-fQoz9KzcH-yo)0 zXtSpF&>9TpoDzKU^r|0gZ`lEe;5jm4O*_$}_N$7hT7p)Ud%SsDmCo7wjjf9BB=5PT zimPMkM}&)BPD7*8y1R;MW5x^g8sg?&Cz`2Bk-30-%Bp@z4SAVySSV%U%200+uJBbH zja;z%W0n7Cb28UKZfMjX?Kj(?C-?4EK8k;zay+B_rv|Auz@Vl{Emz7(1Wy~TkJ;tV zE$FOgdi_&N_LQ2s&(d7Q5+Czdi(|QM@)>uBXqfM`c1Oa$LG9N3`gP8m#V!6+2bX^3 z6|WVpi(SMa0~#B)Y6xTNpcC8|Ek`tRLO{juBK6a<)yd^rq`{Lk z4W$S6<9*eN;LD%OIXNpr7iEr<{;xnR9~ET z{MOq?ZwaYI<-xt;`^7o=Pg;FBZ8>{eUmPU%sjnufBf0&feondRJ2v=<3x3x9Oh$Zg z1I0}7%%pvZy1hZS`gVo{xHj7|$g=J^W-seLSzDXv$PWMN{zsyn5xlwm^{?4X=Sx+m zZ&R0DBW@hE+}l$`u}z;rCD%cl@i_x^*M~T>!=_ALxINv^mrYN?*y;RM_XSqw>|%p* zb*JqJhd0Ag@Q&MsI5cRa>ywgw`^{k8%-%__zK8stTO}IG5{9FLLPxudUL;s#*L>@e zlY|M!5@LRLBCkP{BR+q(XmBaq!ORq0kQwAL&kE1&wV`{^`yCe4e~WwAG(=8t<%1BB zKpmVE@R!8a#x7ft8x0W?Lw3t3sk7UNe3fzfjs$yNNdi@HTNU}$W4n;A5d+(3W>qpe z4K$?)yFR{35*+%7b zl(nI(XW{RUUlKR&>W$8G?6@kRq4bs=5Pb8LWNA=dB+ zee<>pE68($LsfI7!V~Y2c z8GdF^&zye4KLWNYqRvrFr zgfMvpi*=IRgL-CN+lo$956Q<2NMCH<#u%yWME)5%Xkob*A~GYnPMdrgsU(Lh&F)-r{KcN9wDOOcZ> ziN6{PW<<#pmlSd*QD76eUr;&mmz3*1FJRn7?K=b&B5}OAJS18}9EB))Qy~-=N8#o{ z!qyyOPnERVXOLMEjgIOQcvitTG4p=$FYvXz$9H!sG~}hqBEBZPiieh5Y&eY$ zl!VfdVCW*fxPTjMQ4DPragBXgjU;>(OoL5@>x88(B=|a>1PUT(Wx(Ez&a=D$i8KApLf{Bx2#9J;{*FaJRnaf%>_V{#2kptv zjX@pAiA{*%KA%)Nb-0XY0{5w|puau*n~|XkI#-{MMnpJH^lu+$CEv1HeT@!94&HeNdFBoAlaiVb`F`~Fr4Z~7a4=4XE7&u{vbzxjq6fB&0({pS4UyYs(^ zH2Ru*{<*d{?9I8JpRX^Po16dt|Ns9tdf#2^bvNC7w{m-LH{CS0ou1ph+YPt2wzjsm zwswQ9-QA5ZOU3s7>%G=yYwNZ?>wj*Zo8+eXof}*YBjdttW@hGOX57TRW!+xAx6N;Z zyx9C~uT3i|YlCWttNCB=zN(EX=cjGM+9a#DHVq!5Hkp2!dTPI#jLK}AKH`}2MezkPAn^J90vHoeqc8+%o~{gt?vw|+IU3|s#j{O{GAHjVf0vdnLG@#Z&w zzOTNT#-4Av`?@#l-n4t8E4dBVVUwEPykpatT}08FJomjdZ}>dY{ByIq&#ZpgG>xy! zr=8t20gi*aYfhsCwZdS);3f$)fb3x70t7;V$OZ#8GzA6;qXaYzfIz{ZfDSwHK*NUQ z7a*YV9Du$P-metdgS+kc`#8}eJEzgzNBaIGM^4^Ly}8o^wL1@_?jZWJiF%f+d?u+) zwvHsN6HU{!>mc-?!r%oHEG}$#Aa#H;CN>;Um=Iv#!Xt`j!95;0V1NQgqX38l5O{qs zP)txDg7VbYP`4B@=TGolozuE`EL z`kY65kYVJ@jz759=_rf|5gc&9z(5D+0{eyz2S~Xg;w-R$G=>HyEU-XENKgWtmLgC% z1_*u5)cVr%AWsKLkU>wB{+J$45Bi_kI=B9G&-0Tz99eX{%8o(rQ9Jz*TjZ$;Z8S|8 zS>lA--i~MX)1>ztIv81?jYyI}l0@TiAc(_EIuNitM3F~y6Or^hmW$;AFaa^) zK?8^h1`;nAYh?@Q1D0; zh$C?}GM0-9ip8SAGDRLH5NBi3A%g${JR~qNAz^_;1PBEX7Zef?Bq*qeSSTtQTqvNB zuwb!hP{73!0U;4V00Eqc$wVSiC=vsPsZ1hi61l*^c|;_V1JklhoJkT{BoqxA5{2R{ z5@`d%AtJeOVGsR~6AY#jK^$n|AW=Lh4~irKB(gw~$OMrzEY1==o(v8g76u|o&ys)= zfk+|4_i?BuO+#5hX&gJX}yD zizkZ6SR@UHK?Z_IdTg+8C`%wbD3ga{;&3n%$Ac1yNFv7rj6HuqO{0in=YgEb{>Kvd!NuHg#08)bCZ#s z=e4~T^4=c%-S;xU0>RjodavG%YTAgswp*_+G51n=*|m0ZnqDHG;+kU{FXR1N_u9NrS1H6aVYgb_@UT&|j z6sGOvXJdt>^u7XPK`$#T#mZQ~3Xl0+VRhfDidw3oG9(Zp^xR!pIawo*n#yW_pZR=k zoM6*6KLLX5^yUPZt(TnO9``ZF={c&Srt`LOFgi`!l5jwfbgJ9G8~M9nK+zLk#bn5Oe|Cw z1`&*^aG*eu5HPfZ1}LP*J0QUza)1EkS;NM}VFQDMNlTcR!i0kZfx;6}fTY&Z;L9j!5>wvp;S_c)T2>pYG{(B!r^Nm5_?<%AwVQaX|( zNz5KS&-3fF^O6A+C}^P200I;S3nWZnU>P{j$T(n{a3Mm9ARzLfAsT{413Fj0 z8&838vVNk+(TO0_YY5xaxd)n>qX|3han`{;XWj@Mb`bfQ9#%t$IUA>cX^PaD9|xc3 zZ|aFDY1Y@w^Yg=V#P@UkGt~R`yLEpOxVf^;Th&#cdoh|;SQX>1%i2`CqQBqYdk(>X zU@VYa%i8&Dnx?A^>xGF^fQAnCuDaH}P2Ci+yVI|`w?FDmzFzhk`*huDGxm`b@u}{o z_qR09v+p}lIQyb|#jKluV|#z%J$~nID+V*07cSqbuI_)nM_lON+-ug%iJZQv`h1+m zshD17bzWBAWp!Qa?oLZijv|`QrWuaR?Ci@lX2tj;-z)x7-^>h6&HMb!2flK*Gvcdl zV`5$+>T%++E@xvzL_@~=%Ut=t|4aW5`TCq+JAaKm?~K3i+3>KtV!4*d+i1CWmUhXO z`3XK70dSz=#4Y)}EfX(y7eDov<*s7x*~0Dm)$Lu^!v>^JHs1X=%lln_)JJt7wBIu> zvwYn}<~v3G!_UTLeA=dpu{Cw{O=Z+Wx1!wBu~RQ%!NsNRiTA6CSwUsxJaY=Z>)yBH z{jFPxR>la}ety|(ZR)J(lO*A8#q_gjrlMl>mLGq#=(ul+jt-Q4#(Q7Yd(Y+e4jCX7 z4rFu3`(|}MOuoOz&U?m1yfyVpfM7{S3uleF`hV~3Z`tp99dmsyqs*79_9^CXed?lV zrXGK$Vpt#ZF%1tNFPS?$#*A1F)00;+=A8vK#JlWI%CaTir=eT6>7V}T`~T~sFWb*B zypSib<7v{^nE?O*0RRvH01>?~D%H{z0UlYll8X?FeT*&{Yy__xDa@AOr7yC?kOAH{ z6R~|K#!P!KF-UJ_a_MkmQ3c&piXGWsqEA6Ej`ee0z)i=dS1|1IF4}{Y)g$q|1LA!jQ5eb zYTvk99YD^^o)+|uu*|zM4SvHvfW#XZ68KITdeQ?w0ObW&L9zt9k4aXEZQ#DHEL_8w zXX(q?QARRI<)xFe_Nm;=ox=pt`3aF`37-Ot<+JxUQ>r4cWga>qDhm#i&Aox#zE410 zHN8R7^im|oYt#~x|^}4q>J}FC3qEJnH4J4 z%_yL*2$jmp(b9&ZQ_r5UrPT?|(9%Kuma1GI!2{ZeI)@;lV8+Y18)5Dtzp28c6B zTOf6qn@O{ekpj(9YV64^IFVO1-;7CB##L-FW2y*X7LXerx*W|R-7#%scd+vS`Uh4i zG+oBK&U`DKzzLHbk`eDxu~`)#Cjnj3B;GhTG^o5x>}=H`70Pg%ed93HAq~pzUgD*J zDI)a+YqMOPZ)qKerBm<6Gd-PO?lb4)!=0x3&z*i3l!wXyd{w5JCI23+68#w@xf^f|kN~yzG_=Iwkw; zhki%})j}(!JoYsqB*qMFZdNoeJl;G7ddgrN^-Mr;c(;=6zHt~X8K=(+iCID;wq_>L z%Lb<8S`D;Z>B47i8p|h~Ds+CLT#d^GDc+!f3FwJ*rZs%hFZV(iMmIS*oTrjT4=BCD z;LJCX&z%Dj#ngs=Q0l~yubo74)anJQ?2}#N#YEzH)kxVxL-X6zs?$URdCX+o8Q}|= zHc3^0*3;G6GJQ>|y2oU6K3>`y zr;?6A>i(y!n&z*Yxx9Z3^d8rHn&Td^1xmtPra|`-4VRdOAA+1-NK6r^wWF7qQ)xiu zOdiqT-~{mhOsSUe{%A&!nzXoEtU$JPLaH`7vOF(BwIx@>>M8M-fsT-zKiRlCp2Q_% z7(5#TlfyW_;-qX1afOdkyx!E;GjAk>^u~nepOkn}oKa7Pvg4PTUvlO$4CVQRw9&n{ z)$?FC3NP+{bWli7G>^)8PkUpB2Oc?1x@s&9(jr+&V>dy|K`3bxR>u02FdGqffK8+q zx9!B_3x`lV8{-OTe_JOD^BBNVA);@Rw%`XjLtcAkGYPNuE?QJ-1G6le*Y2Ibn$A}s zZ(Mjav@j+0y~w*ZhX|DwGd~WR<#hzrvB;!G9{GPU5`b11JmV>X#3|e7X&yq;g}`j1 z%`BxD#qk{a^|)XdTF8;|#`sV4%z61ef%9}ONx2FYm|Ybmsb(h8CHSx`tgwaN&Pz$A zP8if9B!76mqu$1je~F0yHaCCYqgOKU3=9i(5R%w8rMKANl<2#DR3OtJSEI$ZWu5L+#KWH2G-3F3Xx0Q zf|>G^eu^OENb8nuMQC5}+r%#*^N*jq9k1w5jV(Nr%K8y8%T)k^TU4-Y{o#6KIPB+U zidCK$fe<+%S|cZ_4Z`L=*puOvpsX+!{q38F$1^qSKcY(4gUNngLP@yyQEhpNta5}~Hn3`U%qAssR4fo>#a zhdJS_gC)2ECzx%a+J&6@^?Qprxw*q2k-A=G#RZBwZfK2>t0Ilq#!BvvSr zvidJuI6h=A3IzvEn5%Ov`MucoMobtD&#yUp5RFfah62~|h8^b|rn&6}fldk}wZ@1L z86001UoVo-9iI1iow#rB3|`_%YC>U_%@r!C!@Uu@`piny-Eiz3yCvQVor$&~=65E< zjc76M+EG$i%CfBNot5L=M4QuMQ*IVcRLf_iEY|NGH>y;v;-(#>rWAH|5b{k#7{mZ>dP!eH43b7`sqb1!&S6Zmg|-+c1~EV^ z6@m?f)TD{5eGmx3f}0lhiPtH}l_a^DqWTeKBmKp$HS>W%aeX-es%n(Y6A~hrH6&~0 zpU@IyNwrf+nt&6)n01D>YblUtxRFbGU42 z=Opy|x}{_OT-<0f$IzArC!-BaK)BTi<}DI9KGzvnsx>oMoPO1{PmI?YTRe>WwPk=O zIbw{l8lLlLTt+%OJqJ6*a}8J<7R!qk+&8EK(s0P3Fdo~YP&#`OO#uZ-Z!5fr%{hN! z50NmD+!=+`e1Qf}Ab_CZvZVYBzfk%IqB6)ylA42 z--1TYi=egKo|PCu$&iZEb7&L?_8ucx1UAZ)U}{Z`@A}xE0KMC|?POoPiJd?)0BGFJ z`zDI8-NVxCbaOY_YYenr=-$0d>Kl(ya+^YSme>k`LKQ#^6jGb?2imF0QId9Lu6q-E z6Rg@u7Ya)v@ymc3+%$V{L*q)a@%Vrc&=^?~M?8FjPUADi4oZcFo9011r_chDd?NS7 zS#xIL7)7DQAV!UCujT_JpOVT*6G;Xv9g?9uSiYgrk(&Rb)p!E+!9y#WfEWBc#vq41 zC+oQp4v^mvJAkIPKYlA|vMmu}UZ6qeU=r{scT1`=&rMo=-|Ll8VF2* zjsYyjV=nY2#}=O$u~^7D$TQw1_#Opr{^x0LK^Pm-8plYRq>hA%-BvC75p{2cQ)>G# zZe+rnB>XR(&M^nv{q(aPqMbsxg6>>eVw*hZ__aHi=4anIv|NX802l_-X~`yj*oON~ zbXr=wVp{i()CMnQ+{q7!k?J-Q{qmF082t!Q{#?$3AF`Dt@)K_(_}e)Q{!>Pg>1YpB ze#%*(08?_}Tnoxi>@^wgT**Y2dD1tLIP=-H;v>$Dv8F;%X&+3cajFFUG11`#K^cTe zo`@uZxY*eap%4v&|{WThPet%N*{-)We021LuqNC~#b5C)#;1Lm+p1&2y&dLmmivf4g z?7;@$P&Fcb{9s`&nxmboMyV}gv%>HafXRgEv6!Zl3zH%n)$z3`7m}PLT#2voO9Zw5 zbT-htk}D$i&*NZ#U@e8BA)@q%_TP*L6h;N1J$)y92=KUCqyr}x+lCRzE5Li#S_xWH zTJDZsE=YmHS{JZnY{!G%N|eB{poZs;$6#+GIdy*+h?$6cP`@36-J|<=lp}*2w0X8; zSc6CURWurXUX*ZDqA3t&ezazCZv@||+obW1Rx&Qcf8X7IK zCb61bxfY2Aht2=iQIjQSite(<6F{dzcYHAq z#*u0#Pf7WtLVl``bbhGgD5j&03F@wlG)OETD8W^=7mMM;4SMkG4sT>Ju7@^ z0h#w}hZKZ_NdJIkU{xeI$-dwe;^sq>+Me?ddYy4&NqgZsoUk`Iw{Ysd;+MGD8?UD6 z?B)ofs4spQ32EZD^2|;T&nJ5f_WX0p zDq=3911~iaNjTa5jA_=1*;C^^Fk~*$;OUJ-^rYI5V&aKD7}{)B0H|3(3M7&6PZM75 zJ(oyxcx(BTf?RXcWQpfBflMeQtbj2$nxxw!HoGO6=+GcQlEG8zbA25(VlL>L=6(Q| zRvq2a2abe{&j54^U_OPCM=5W*yPk-Jb~>rA&!y1-L`NcfX?d-Dks8!;ic zM+^;1Q))DGrpO7FDUY(3+2Cg~Mp}YWeMeb3X*C6RVvMV&Vgz_e1^mGHg+DRVCxyi+ zg-LNNJ|-!L9=@m1GMPY*C?ayPKJim2wPMFw)12pIK9e9A!`pwh(?6~ zJB&SB)=IZeEh(vg*xOoJE;Zg5NU>!sr(BCfU>hJaCgH|RR_1;*HYS1-fskEl&7$+i ziQ>i9q1HVKie$twbul4SHITg@F|MzWhof3_7miOO^9+@)R~JO`hc3KJmCu2Guj>Cz z`!~~FadIjrQI!_y6j)w{HRj247NE)F{#Cg^CdCz<@|F+2(>xOsfYR4YAeLhS3D2k5 z-U8wW1Su4-tY8*!H3Xh*o3j_S1AE0XFChhg4!H)D6r&3+>F6qkZW+-^7c&=@h*ie9S zy^U6mvO8JTsug{Rd*7ilJDwEs{frp@Z4n$H4BIhg$0u@4N!fAUl$SoG-)S68IuB*r zF^@hXMmm=G;{w0sr@B2W6Lg}j*5s_`>|us>PFiS_n+v6el+T_JKx*n!e@8La%=g28 z0WFaYh=w}s%>p;-q`X1HJRWngN4nT^8xXHFDeY+N@y@6m5Fz4uRto*2y5bLOasKtI z#A|d44Pz0|14&eDwgx=JRE&aVfxLlocS_6OqkHkodSlT?n*_~N;COCPw8^CQ_;Hp{2qvMJY$>3kAlUpYnH;?i zvJsBLPwf*>Mq?(i1|AqHY%-w_OF;2UW?f}i&!x9T2KZ2#wefF^QFSXu{rH+5RI`Fa zzB*n$n?|@-)`bSf`OA%K1}tO+;DE7&x|0<=zt}U%3d$Qi?42NRmk#O}Bs8F$WXF2H zytP77fm0W2Xq^Trgf_Zu&Kc-&L`8pu$svW1Pd}nn@pZ2X_Zua;!s>%949Fmeef|QX z%xiYCTSDp?Tvxf&zkWwrrFhM%E1Mxtwhx?Gtu%Uj|w*VPDyVa%nG`;Rx{|KhsMz|kI8 zzPV`eJD=xVkT7?tuBG_?GcB9-_iMfg+H>jY{JWLDm4U7>r{H*f0PfliGwF$dfNmeN zgi`zW_!G)5E?}cj&}`dQg;J&}I^mJaZuZn$o)Hl2LUqXyhcFcUt?VNPqse#kWmuP@ zB(a288vkFchf_u=UBfIq6GaNH5lfQTn_`<6OA2GJu>yqcWm08)B3wl5hFWd{c;kaY z(ln6dfZ`hLFam>nX%G@iQ`j%<@qK=f^ZGVWn>#$*ft)b02^>V!rjM3|WqW75RjQ*>#fRj>1f|>@be;?sk#$$cXPi=h!f<$Df1~HMoG_${?BoM$NA&Y6e4>G-kWrTKqls@wW=FatahVJo{N-Sf}etAC4n&v8Rxl} zxr&=*@>I0!68IG`D@g|O5@uV3&r$lP=7yO(6*0R6eg(}+(u2H6vn?5GYx#s_MwGM} zLOU0^ITZ-eZZ0bNOcV{Qak`49f8;E_#2R~KQ;km9byP$FxMAd}n*P|ipQsXp(W8#? zT;cEPkn%)BF%Zm3+f_W|EmYU!R`asLmjoR$%u@JLRmln+gAuN&R)g4u=!6>htYeP7 zu;>JEJSHp0u5ok%nr@3A$Nq|RQdS zW3O(P4GVu~H_0R2CB5JUH5pBoRxVbn4nv0FI;30VD_vvE)Ps>B4KLjgDLZI_47ABl!W zme7vWCMae2s9a^@p^Gy~OofFSk?$w&biu@*aO*xsO;6#|f7{9+*rPq+7yuY9_@#V8Ac0sfP|KnwG-uq{tLfxD-xX`8Me2yEuCC*`s}BOf|resym%55(fo}C&QM`3hk7`UksJA>RJBh& z`JSQkLgC_Q%BCG~MBD>!>_GEG@{aN*%LlU`<+JQ>07-&2W6Wfgr>{c zA0tx_F)d+_dbuZFjC!-hDESuUbKEVR;INL!nD4D;i%mp%lIP*jlqh!Ucm_iw^5jLv zZ?G(UriI*ju(>T%THG9E%sBQWjjIgK85P6Z)4nJu+~5_>8|QX5Jgk5$8QCE|>(NXO!pT_SM^audO}gwuUyL{Y1|}NOArN%~AAKThNhf3nLL_2@x#yVhkFdF%Jxf7p=lMP6Oi6qHBR9T%BsM=@ z{KU4^82bezYsyFJjn{D2NoQh8dkQ3H{?sS+TaiQ~(FJZCPogRN#UK$gX%R~G)E2AF zjUWTQlLY#u28Rbjy2;_2)g*k}zK2*cZlhj$&uf`z6`v6$D5G5FvtB;sm3 zmxVc1oYA4;oq}DI)`ZmS5r+S%dhnY}(R|MZe`Y}!QOHe4>wpH6)M)zUQrA$SL^I~n z^Ts?08SD!(HZ$m9JZwpcQsDZ;7k3QQp!#b3rtte3m!&vkLR8T;(Q9N6*$sVugd}LP zR7mcDFW=YJ2`a}7b*wld$XeEHS_Lw#O;^VbvXm;>;B1w@5aJwP-$bsbH{6u+dlHJh z#wC6;^^~7<^IIVaG1B}CnM)?6c_m^@xXh|+N;(Anw;fj;4CZxxHAKz?%_jAp!`s}X zf;a4!q4BkLJ711UXc*VsHtQkqQ!Z|>Jeb#yk@88)s^S~4LIHw=cJEBNy??!eP>>1sgD?dInVIX z1N1ps$45@q4l9V5q&6H05-mAl)hW_naS$hzkTqV_ed}#^$n_VHTC%pA>KoNg7wckG;Q5>5G2#ZZGm?`@vLT|04G-&} zdhn38H{2k2egqq)YR9O&%@d=FbUe<1GMK7F(g+W#9Rt^+*@&K|HW}I>aRevoAG|Rf z8XDu6BoVplas6NOt@}9LMY!>9bKpk?aVZ+dwgXP*v%KELhB{Q07g9yLYT{uWhj}g+ zh`(WG-Ddiq()P_cSxy&%ME6COqay=TR}=H&!U5jWMaTi9=xL#f>K0xN4h`%$?kQz^ zsqOO67SwUi_Cc2zn`OYq@QLcV{djml1$1@0b^R7wP4SS(gdY&aJvm!P6ikxuJXfSM zU8FcK0j_nxfF)Nz5?gSuph)j{_ZsrXv#-J$tNiUtCDOrJ?#*dNKcDe#zB`+jVDw#~ z4v(c2+n!6Mu4mYRhndregj^!3iEML|#eu;pU?r_9LBF;XFX-k;ce=b(r_-Gjqjr*D zZfwEUK1KGFnUgK9pd&+}fI6jY;AaTcR)t4yBwww44pYHDifyBJK6~pvElT$l-&eBb z?h+A&1tp{TANZ$Om=Lmi<``(3#2ar7JClR!-7cAWYx*&D0>N-~P`$bMk0+&Xoqz~>CzBPu`mnD zSdDQCC{3i>v7gkMcaKa6r%5bwU*B?SE=`^jY(%{I9=??wA|>L#GQ&+emIlN1{$e_8 zZvazXwE)vvbqh_Jzpu3@KHGf7XJ5-G5rB$&g9tV7kRz933@plPOG&~WKWsW44R9~ejg2{&89vG#T1cFa#x%}xe*=AD3aqfsr0m%8bedcG;l?R= zv?1=N|W_1urE>7^YTgAI#g1YykuzNy#h#+JH=)?p_u5e5D!?XkP zaG@4Zv%7t%gQ4?=m=gpB=x#r<9eaZOO(PsMbAVLTgghk> z?^}_6i_BM`ZOF*)kF{z16!>bgx=#bUO=53n=hNWHI9YR#d@L|34W&$ZhNLurX}GFG z4t+?a2Lx^J40V8@0V+efK+$@fjoe>U2ht!90J`YDAQupAv)qz_P8lp9RPCBH;20pC z0+%`~bfR_}EP!mEp3Nv)Q4CFsto6I<5LFC%=Q6Q6@$9z|g1gLq-%JNwtwkX}tKC0qMO0o(vw^CNu+-an>!(#V~IYwIVZavPG zHMD*$&#ji-Svwaz8~l&fCG0e(e;e%0*G*owcyi05Hy~O@D8@kpZ_bbPy^L|a zJP%(AsEm*pr?%cixYtK0Y%ejl!wf2mMZU2|KB6hxv#(#lk~+2kddm)>2Hq zF1HCzE~-9?>`IT&Q?FJbKwc^r2H9+7PclKYh7-4~|6-hI_B)%T;S^u)aT5M@Br#Hj zK(o+WbQI@rjQYf~TWJY>Q4s`drLn7c@fme~=RanSAnW&+}^+1{J z6zT{!P72i^ zDbuOlpw_micw?jFA%&Uy@YO|w4nr|NZ2PEol}$_$3e3$_Xg*&c{BPe975LY*|A;GJ zRumJqy(8?NZkdYBSZ2yZQP6^iHDqkU*QFI{tVPvJk(cHo?tP$Wwv|dzZc#LwTS83G zW{yUgJzItnqBrc~Z4?@o>ChYplEBG{E#Y3fJ4wz=-cj@_-E1Q-11uh9db|wu4vWtY%v7WQl+*%kvk+1`lXh_{CDk}wli6KD%NNM%cj|6-H_M&uEA)W+$OH%T0R2YKd`?m^Vfnr?Foh1Hnem z#pH`#z)`IugrC_fD3>Ib8DPotW@VV8AJ7zSXitPFUc>F4)z3t2+7_=mR{4T{%Qp+-Jl9EEc16jZX~&UlfFZU?>SnI`8SW2_>P+ z%9(?=(UAZI8KCqIZai+cLqwg9#C&hk4S6oU8VOYQ&d|%UQC#2-+*a!k7qKFI(EQ<< zZIc#Oh$7S*7!OmV5YdCMg7PrNiw^i|Ft6N?E)OTgv>f)gJdM9;mI(KHNSu#TX@_rA z;nf5muph3r&qgsAb-+77YwaKV`!{ecoRN%#j8jdbVv>L|BHYBX1%^&%6U@B2-LYCQ zaUB6fbaMNRn-ruiEji&REW@_i_{Xvd2Fx*IwSt;Kh1oO*mNS5?Kev-Q8-VNtIFN@p zeySiWSRX-0C474#smlISt`G4n=B`pOfLeBfb;BBeuY#m)cA9JnSs6MiLa5b9F@wlt zIQzzsM%*_3wfzjpPKj7X*dV8uZ(vp|e`P+DDu~?*=B;g@Z3FPFM5U9s0$j8s-N45S z|0D}^!uX+QI_(5$J)ZOmnI>NgN(dGl@St`pyC1VYN(et2UAnLKu~-B`n8)l|upAd2?xp4)CnT-?!l8;N*6 z%KRzLxwYM)%S56hZHIS~5*9DZ$(NullB2DXh_TXe2CGq9mci=qGUO(!*pEZvdpZ7KNG)tG(G8UMiaHs!{j0-0YTzd zAvr%EgUuKlod7hd{XIYkdqRMl#Ebcg1vuUHrWWG1(wsfYHJ?o=luA5aNEA&SPBk!# zl{bwCf>sOW1|}zvZGzf#qoD!`rxBuWlxKQdlc5yOBZsQaD3H^Ng3;bKEi>Ns$>yXa<)&at|? z`<0MF^Biv5WAs^?{mB*^-}hjtN-T-osGPfg3%<(BAGf9~ZNG2X(t}Pn;HUxodycRa z?WH87j4LzXAXMO9h-Qs~+L>zs%rtp4(n8`dMJV0cyE4=qD-2j4=(0l%Ry5Hd1;xlB z=BCnul?=2|cTQ?sowV-;*hSjtLT*ffy0F2=`3iL6##r7F=yyK zAXmcgffmOmlNaX8#YW;BP0k&=sPf~S+%XnWkr z6G|JeI;pWRg9?E&F|ph9ELGrqX)t+-=xp6I5uGXs{@Uwc($X2?N$6$VDNd9c?-Hby zLQX|jsqM^UDJ0e=Eg77sS5I=o_q5n>Uf_dnge3s4%AY$`Ez^?tmiGQv8<7(P7Xf<3 z=p8PglF?uQqL#Dbb7?S)rKimcpL;^w)PVmjEd!pCk}`Ge{Ux-r-{g2XUXGV$4X*)^ z0TTjMx1Q=@*7q1>K@YTM$A>2b8pyC=gM|bPC|$hztUglTntI^D%^Vs~pu+?N889@E z;eZSnAWM?*?$oe6y3&VDM7ZFnG`FaB9stKqdA6vEXd>J*BjVF&UZt&E%RRG1OM7kA zR_#@79@=`>>rAq`lW5s=JX7X{jzP`O`G7P9RA3A6-HajBy zNjmv{$5bo4JM&YYf71E>tfZK@2j0(>soj%aro&n~to2Z5Pnw~TYW5PHy}9=4u6KQ? zFjH$Y)z-JBQ#!@-d-boqt-J_zw#HN)Dpi|P)lBoK)oLy3w+^~CLxUmBy`7lI#aOl1 zsL7YK4YJ@KA6xCr z>e0N*o_SNPd8B5;^9Q!wW@X5NHY-xHl~V1Jq2-~gv!Ac!jj-%{tJhg3Lj+{G4LiP4 z$YM%Zwfp_iSV&hF8jtx(Fm}wT)cVl23huIw0HZ~6)It!ZdIzfN+FBY zRaaNqP4k&q^(5V!3bIt*6)g?ZzN2SbN^-7R719Q|LtSvZKo*Dx$O55oY*-40 zL*fXEh=$@o1x;l{0vb{SUx;9X3mJ?CBRFVafCGdC7brHssNeybq`FogNxxP?S;mD- zQ@Hn4EyWljt35+rY#K6*pkyb?P^5WM_HLTUsZ<-udH0MOn>R*JhSE?Xqubcgn8_*3 z5;G}JRKrDSc))=V9V#vqN}v>2N3RFVj)1R2xSyyi|~lYFB}nUqOJ%09I>8!=;~%rc1+$t1Ezkr0Jps$ozf z@nK4uqfB0I))<{gC_hG^K?EHRplqNtFyev81QnAG8Z=;txM0B;7+@$8P>lsRR4{Cq zAjAfTjB8Y|ij+oj5~A2BDa&YDDKeonWn^$A%)}<2+!6X3nPj3(j$#tumXi5lb45(V zBn_?JHW3qL!k8n>gz!p|I=MG8!MyxLC2l5>rXn9Un2SmiB7=X7D(}iF2?7M8A_!_L z?xOcwT*@ppt0Ga6FyC2IubJyycbR>3XtM&lWpAZoAxmLi^^RGnXW``LA>uxBoj+uu zL_EK(w9JAmI$~qb6==8Z3<3lQAE245@;IK`?Q3b4{&?$3w{%zaqh@M9c=e+@9ai0} zUUW$J*DW|0uud59puk8fEI?FjSa4jF?cA#A%uc=^Gt={Js-1n`(xO|mCtcE=+jke; z^X&gQ2hIM_&s~U$$=AMV!EvGXpHebLM$>^t#ZRLB@|fAF>}0p^|7|?pk#eGgU32E2 zzZy{=8WoJSI#eJSIpaxay=c@0_Suyr0HNfmX%XY}_0NP09~mL8LMF$%&Sk)EF#qwIe8 zAU#;}G73R&OB$R4Pq(#nKS2eiDD-WDd^$fhL|GcryFKb2>+_V^=M0^i(5EnV08>D$ zzf_vfXr6qA_H*oLp7)@u!qN*5$*y~P(xGItv&rMh0tK0oO^;37W;Ul*8qm@$x-|+U z7^MpF)5iF=HmH8{)LaqN)DmFh{FXV9y+Y!LSG8G@x-dh_fbt2YPmF7!UR^dD8@P=8 z8nMT;9!pDdu)I&z(et(3>1=4=Nt2M#tk6E;B+0t5`x)1OcJ!_&2oe`w@KT~~lJWvv zz`N&bhMDeY{^K!CB^EZ)YkR>kbTq8xp5T?0A!OdvL|8HfAbEkJQ?dtX>F9ZMKzv)Z z9~td#IHtoSFQ({7itdiR(KaNRfpWOFW{WHI{DL?*BIofuw^E-Fc#9{Ul!MJ4*gTBD zlT4l&gJe3!PNJbM>N?86-8~saIun0MSx-ZI^3$rb&^LGEcbr<=y#e;B-m}d zL8L+FuKbyVf`#G48dDwBURUKQC$T-|fvBk^Q$MaJiYVCNH5wm_Uk2d1(8iQ$U1xrT zUCdT9LxTOHf<;l8a?7MpY0_44C@?H2B~~jp8WM^`O)ZQa*2;83h590~rL6YxGpCcON~1Q(w-PlPWtJO)2fm8H|ej>5~Td{=a^D7{WjE zAkAa^o18J^{(PSEo;gR0PxGnrgm^`Nex6LFk9ga}lkBa5gY5lMwzBPyabPnZXnqa574hM#Y!ElO4Nlr{T^;Qpovj1m2_rw-|(H z=OE_7F##$5rxYnYCFDk>#Bo9TAOrRr30u4N`3Qb8O&)Bm4?0-(xuzB{uvQ_+#cm%f zLmR(f>MDQWC0qPRnrXWr?Ed2QCxR;qX~PRjB37PCn~zpe*9o?L*)l(D~x*Q=Fso158 zod;lL-bYCo>Q-pkzy93N02%MT{*_g6YB#+u4CeB4RsFp#v|c z0QzzbbF7_~8W-EnMs0ceK*oF1fn@qRX+tU3*yGe}q%5l$E35F3)|XKn4jXTpRXM_b zH+BU%C#E7qiBRDvu`#lf<6dY(fMgHtp)sc4(bpa8Jrvm$u~>^yq|JdBul%NSjz(66 zrgS4k`)M|cOx1rdjr}an5bch40>agiJjtsXqO)X&gnQRR(^=$4_ZR{S5s?1~WPmX> zZAYVCk?qwUo_`W9t=SGC{=h;)t%y_3n>3~_XO>vMs1+>Fk{JI4FQJ*!y=WJGGJ&vA zf#GqDm76jv>+a~WoSk0g&9^_6Rrr@n&4)DAhDXj zwbxaHd=dtrIvmCFgILpYM)ZG4UWXJ+@=B{0wzzaR(bRo#L4q8PK&eT*R9KDpJ^09_ z8H5rq>)hZwpjN9^`52@2z)crkU4T_jV%JnP?fIEe_!_IW<<_@h#F@n+Ql?IKv6&G{ zjfv2E_#lRyN5&cyZ=ALLBZ%xite4D*3KkfD0lu4fa%v z8TKvj?EVp*P&h`6WSOy3X)%Ud!R#V(7m70cXH24T288C~4xTSYxeG@srr=_?-8Xrr z&qYu3b-IF@g`AB$Ct&3~c0IavS!6>C*iF!6^Y$S%`&-sRDytXJh*E595LIr$*MevUF`^qmhG-y<+0wUcm!(Av@`M<%>G=CZUilAE zx-`|W?tnVrx%#Q`!2&y0I)_srC+7GYm+e4VX3r7YN^zKa3wn?y^VBt?<#c~L-DynmF*wBDJ+-s1nv9RG-bhvrv&7R3|Pq zQBR*d$lv?8sO9y*K{Qdgoj=}b!9WTZLl?oZN&mny1!q85 zom8itXU0$M|coOlQS-IgXK6yxduPMFnr6r$>Jg+Wbm~C7Y8e!*` zrW0#}oIF5z@sZc##67hNahp@wmLhLz{$=DZ^DFI9UiUueh!@$F2A7+aZDvbJb}1hh zyrEW2U|1c^(8b;BO6aQ_6hIxej3x^pbOjfd0W&I^HCNK?8uViF=lKZT1MHq!wo!)x zQz#M!@cZWT>mH~$cL0w>SLz4w_oE2yXu{C>hXMOML(2i;Qb!4Y8aw81brO!OC1S?D z+l0fidIOKxY8w@TXn1=N>`P|4MiA4$Fd(u~YpOih8SqyW>m5YXJ?@?Kc{5>j}2cG%=ji*p1kV4W*edp_~uJ zT`?hL6|-4NF!mo)alOy|secO|;F-t`G24q-y71$M`cw`#_^_BfMPaf)`od(B)6ROQ z_a{_@VDYj&12HHpn0e@2F_u%`m(0gW9=q!@{fPOb3s;bo+)tmEML}USAS}$?g;C?$ zzw!8K`!^F_!d3c6_7fqL5jjmZqpAbIMVxhpMrIew@z=kEjkeFCnI#Zyc zUq*~PYpEJDUpxaKb6*m-2B#2|xoE#d$IZ+A)&k0;w2M&+k{AJ54yY1>PsbIdakzKO zybL98m^2nWjnLk*K*sszC(G_Gu(xdK;rf|5kpgZ}9J`j$TlI@bnz72@#VO8ZJ9hgb5qfqtr!B}oTsRt0*Kyg-IVMG+YFc|r6VH8 zOLGu`vCZjEzzD{?bcdv)0l*nnC#4(n>?i3p7)05Lj$tX1&W7js+o*lo;ihfWhB63J z{5oyJBcVeRPs5K!AMypWCiow6dT(P}(m(!B){i4xyH51(&_I_d&I_lXlneWQL#o$f zebX$1xdf-ZO6U~luEL~~G~bJbG;m@t(x;pd5o(pFGu0XWd~SCsU6s-p)!b=_P^L}n z{l3X2hFM>HJCNCoZ$)A?deON!lI-|ijF=HVKe{U1RFQE-jTCJ~P)eK095I!AICCte z4pCn|;!kfk8hOVKJ3d07t7&A@C$`_2_L`bg!+DLfl+mU3>^iyTivuMFJ?YA1D|gt^ z(3O~?DKXGQ@VGR%&f_`+kkpVTHY}{CKgq17p+Ke#el$Oi3<4~wl+rjD1t0q&< z5rJaX<1PGir?L@vbZ}U5P&)@0NjX?(R$t7r^&3GD*E|!Q{9$s;<6CZw6LPdAOIpe2 z9lmE+smGBU%KjJVXAlyhaDV^}J#WiXZh@-I7(n=nc2?VlmjMp-E3l^p;E7+#ac7Q0xRq(uvMu+pRImgdkYl0Jk0` zgP|Hpg>S1(z0KgP)#tmg+ZRrf;0Gt`<%4%#V9*%9Z{alUi*Y##C1Habos#yY5r^aO zC&4AfXy2v#fD@B9@L4x*h!9D33m@@;-ASlsY^~$$B|JnihReI=G>&M%D6_GiTr9}s zI%B6?_xnD{e#+Y7eC&oXgm0p1Bq2eqKe+C7Mx1jtNK!tfg-6J&v@<)8gn)!p>nj*A zuiZ5R!bD))d5)T}tVY%-IuS0cn7uTXOVOH`Df&~P=_p6oPaKenF!IRLW3=9*VJTLQ zY*GPZVRCUYj??U$h8~g?V~o3A(wW}k=W^CbXyXGgA`NhiI!0Vw{=)3OK%a=REtPP&dX#zv88j!kt7ZUXpX- zU>s}YfZvnOB#qF)4xa`Dl_g%hU@CMe#^`gS`$oFS6pcvr7jEg8MRF(C4^M` zG8NunP*t*2Z0LQj8Z%z6jhZf|6O5@TMxkd4VqUe-ogi~G8kCqAIzq3aEJDpxr8Dtv z)VP2O>vHSpmTrV$?v#CmVmcWG6s>l?8NpBsCk_HR+(c{|?a`*Q4NFD%p;_7>G$HCf z8b`I~*d`UQt;tJgZjL^pGLrPvIU7Mw)FQBlfMG)!5S%^4RV9lCS#ugt&siI1$l{&2^ST zKFfvo1pk&uCHN-FS_)`zSkvM-7|XDf{aLe=rY$*u0tBIUki`_C=!)CIvqqwEiNh>i zTVkewUgKoSFJOS^dX%Eu=m4ZCJbd>XMr;f>5b6c~eHL#s0}sIerB-+ui)@Aox&SI! zz96j4g-*t4MP?+PFXvcPj#Uf-)>)c}tFvyZsj~>WW(kFD6N&|D9u(6A3W^=t zz7CI$8#Pe8&apwgghqu+K(R+%yN`0o2~uXZ;A|YO0n9;be>bPr!e)xUyV+Qmze7X| zqD+_i7$x`cdeLzXiZnQ36Bi+L#Aro9Ax3)$6SY4fd$i3# zIHIN!aqE$mZVk?P+GuFa3|qsNUZS(dA?6kaP%Nk^#hjF*(8@bTl{+Y_wFS$a2APF{ z^Yk?;q6tVrpU#LT_vsGSfaq6u6hlmljip}Td+NLY?s;o=gW3gjf*O?Zywp*o-z(+2 z{7a%kGls;KK;U|~ao>c>{{{f$2t2D|ku=59gh-(#VxiBC&S$*%We|`LTL=aCT{Sem zRBZ&+D-JkS2GBW^R{xFzhU*^LC_6k$h^vZldMkGr_gU5e(u%#c#% z)R5#)g{Gq%u%GXMRD|IyPaUJ}M=s>6VW_qoV^SGQVQTLz970(x;9@@6orOi&D4NCb z-Mp%YL{0Zw_()y*^c+T!N&IMO{Gv!{vyaV;h#1-cdd%f&Y8369Omf!5zJ^iQ4R(xN zRtpGzWSG;E02{)ova`_=oxGfY66N#hZ9wiJ+>>ri1cb^lt@D%)DeXcTL~8w%$yB1G zu-l*TMNkBqiicdN!K8tTib7NJy6i%j|Ec(U*ij0C4l@MM%AXB2$e>PhL>|*p_gn)< z2|R~9<1Fl!3g{RZtpq>wgjL!$gc@$k@^MF@`7<@H4SdQ8%D{#}e3e_$F_GSsh&Gv~ z_C^B~`7oYjOR1IN(5^c`*jyrMSEGdc@B+AO^Zl*bfzKiH# z)S);UJpRiYV0!8;^?z9!gtxH%T-~zMsAhU3*(fSoCx?$Bty1h~<%SuIOF4Xz7J5DA ztdt_8Ie%MsiKquUx3n?b&42bVt^o-_&SParu2(8|U! zx_ImWz?}v9kx8~m_Z*FhhTLSH3fDPm9%rQ?ia{a|VZ{-@rbS95QEvR8U5(m3UA8!k z6{vd?nNu~Mav&-~I2NTx(Tax_s4tO>Q~M|5EG_j7XHT15cOt$wq;J7jx4GkBZj#i< zo&~!dNzsz#stfB7aCt9$eoVX~5NywRT3gs*5_U8sT|&Itf#p5c!l4?vE204#%Q1H} zD#qP*G`8X3+@a-A2-Q$ck-f5}6h_IdgRRNMuxgSW6&rbm6W0y>1-lr1XtP(nxqu)Q8ZNF$^&Pb2dg)hztUjoMo z5P#CZ%QZ4b$MPI6%!IpVQ@1w55eDynUTsnM#^eJ~qR`d)&BUL@s>}qRQsHV=mHI69Fb2^T0s&S6S~EDJ1g4J;tTZm#(Lh2n$wD) z9j<4tX3-fby+cWThkKrHI;Ws_rmBZFD2)Q@VUjNtkL+LS#gfl-A}Ed(sL#o7RcF^l+(xU| zzf>DU-Et#HFvwy-1ycb2UHz=dVP>O)Vl`VR?B~>9emE*u5~2$OBsv;6sWK3ta!_+z zfP8Z7dg2oFP?jW(|;9=Nd@2k-&YzqH$y5YWlsJwh>ZxgSBQ5pF(-hpCM(Jy<_l1`N?f4eIpnfhd(IIF9B}U-4}Q=L#7<09SqZ3Ned-`DETfB zt!k6fP~CLa6R|o+@=+2{7ra4x+D+OCdCa(I&WXY(HcN|v2AyEFdtz*g)C((Z8xz=u zinMdj+#P0;Gq)^sBg#W!t5)K4c8k74ffN3qC;7b|bS`md_quDgAcESPz1xCsN=Efd#QI)AUT+AS~q!DR^GWmgUsGb*iC8RSij-oVDq|qb*XDX9TVj7`Kb7zkt9QeobRi! zQ=)N&=zhD4WQi`4UEES@-mNo3Nue~96lzj`x>qgoOb(J~Qa20CGBPKnDlHm1rnYDz zMR*DtC&>*d&X!jac`?o~Vs;984K(R2rxeEENhRHwzj8w+Cr>9mJNnXkebS*vzZ#z& zVjiPA(=!UXJ1?C#*NoVz8L>p8C1t16ZKY{NNX9lTDGS~bl|<9x6!KZi6qk;|G*%(I zJFRF>?wrnxjkhvxv|}1AY^>tsm+#Lo{Tat&eMzZdBViHkkI7_bB}Yr&@2}LZ@_c+u z{wc2eJsJIMeQ4eBX{0BbXCC5zcH@<{xRW*>GuIikag=GxjfaQU^OJEJLswX`P?4)) zP9FYQN*hXBB$gquJS{nOHFn=YFU@?aId*Yu|1&y@l1I)qr-iA-l5|pYau>q@f(SNn z00#>UXkZo$0D%J&!30bOMuP|<5J3bqU>G34K?4j}U`%j9DWe0HzmfBS2OfCf0ig*^ zXv0@tOSN>1Y*0Hjtg5oQRaSz+D?!2b!OZr2n`&s^x75mvDCv?e={{UtDLlx_(+|IZ z*}cmE1?Bryc<^Aev{c~%`2h};n4oY=P$)AV@u-lmmpOubAzltPEep%)^m~0 z@b0h5GJEg^qtUpChp6zN)(^HERB(5v@W2n~2u)~(1VSe$P$ek1d^q@Wb#WlO$hKAQ zDlVZ{>yq|1K|wW@&{4$@Tn+gCEs5KFquyRl{A~d0q&{e&68( zsH>9DyRWQFLRS*HlF)ms1VTqpOIe$s;3U<;7OyCV zMA4gP92#m0ab7`uD)h$TA5)1DbpFIucS(ru$-EUu%(QrF?5in;tu=nGtbN5h#*eYP zb&ED?5G|fNF?vJM(4g7^OBHq%o8^V&Y6saDlaPKLnm&?NOc#|lUCSgXTS`&t94Ruc z&?6-}OpjpQ7Dm^8SdM8?5EmyiPPUk`wA>>4BN{P8TBr|djZ4Ch{0?(^{_81Z{N>J- zJ*)RMXJhsclTup!D+m1tp(?%7=l&Ufe z8pNz6g*x?_9;47SQf!PEwQ@&>DRYrxQOcd&LCl6yp=-)hJIZB@ox8e=qJxC+7#X9a zNBNNARSL~QavWEc>rh23idWni~L>G&J zt9tZv8zEHaY!+2)*fPx>JVUFt&TV5xDPxM%Bfb^$QDRsW{HYR>FLuVAJ;bUYS6DXA zt(0L35>ouk8d8Q8>xK&^mb@rZdu^1iJc>Mt&>EbH+V{&lfpyHSt@3W7JIZsSwrQ!MwPhg5B*kd;FHbC4=4=$rwitD( z%yF=OSfn6lC~HX#yitX{29+__AliH$G^%5d;_BO9XA%Q&zD#Q-Fw4xRX6N2qRx^Q_=DY8_DTg$|n>Ug6kZ4wT z-$2yuL47$8=O;lq+($al5r=yI4n()cy*1j`>}DhCKHUAAUbA;U9PUs^b!Lc%m}qx{ z*Ko^-gSXCxtIy;2x5YGLqS0DJxJ7vAqOI@w>UpPFs^!5>6;JW!x+}dKTG?>hy=>3Y zgX-_HDD~9m0qM8(O}exD?DnL#e<;!<&8t;W(NYn8d&BFr_L*5XY*r1smG+W8@mBf$ zu(s80id2K?Dv{-_aZRYCYDTZM!aF;sU0oGnq1{^Upjkz{UizxKbSb`wA_g!l460wf zT2I(u(z}v0b2!SOkd!z})hlySDseTeqAH>qYE}*Q8}L*KR3hsNFHH$T2jZrtIG7cE zsLc<#<-tRpJ@a~3s%urMYdCIhu5J?F+$56F)#2XM@36NLb}O+SsI7kh9tiV5prP47+^>GZY*_jYggxZVw;??w z`Aa@;)$i&MN%4#1-1LB!#~ecO5SK5W^dHBHK9pkrZM}{e#=?@jH7`P+P+&HyyTIJ)^wwNEfw8-8@^uUvOhkM(+da=+t(HD7v zdOF=*t7&NIDp5mf)w33wt``mC zSD2U<&oN75XL@OtX2W*mNXbx(TaEqOlz*~r*Vpv2P&Kt)iiG@y>Q`HA|LRZhFYeF7 z`U^VI*Z9QzQ55mjM?&|Jj)T67^XZ6E^QM&MH@xQPdywczSUmS6((OGZ&+x!gI!?{b zS(l;$94OX+xsy-;4Thu#2@ety9wa0r6oirRK;gjyCZLcIK?D>M#smrp3JQh-3JM*N zKmr5~5DOYGNB{x>7>ouEC_pd{D7Zia5=fu`fd&;|;Ftywm~&X7C_*@B$x~YKp{aPVN9TqkZ|cQ9YL9JGKebw_CDD<2`9Q|ajFoNK1D zwV$2Y9S(_Rmz%a`UbCrgHm}pSSuUBG&19C&8@^hua+a3Ml~q+$wy!EBtiIP)Qa$tj zp5A&@YvOH4X*`~9d31O8*3Pc&d$=b2Gq4RHNw=?2(@ckWu@&vhgLqTGeW5>cfcD#?2$MsyAgN0meHDx zIPclgW`EhpMxdQh5lcqA2rI&H$h501;?(q3gqgiww1f3>u$#;Es-!BaqN+i6|KqR! z{(3^ccS12Y^TZ~h@qWzv36FPtrakS%mhZpc+09<|bJbrvy#CwSPuPFc|Kram(nmc} z{pvfo=+<2mWsA0S>ou|Kwe{}KMC#A?o|%oz?16U6#8o|a`0*yxCf2@f;wH{SkdnA_ zw;E;3&9001&Zjshc6cA{ZxEo5YkV|4^Qzjj!^G1UdxqqN?9C{myTNw2j z7F4r!q=bV~>-%2}$+V%)A`?i59twvz!m~bv8a}x^EHA2gghU)X&n%3kJWwo1LKERE zsa_SC2T{t>()e1bN28ufEGBO8|4RgW8x`aVWQx3@2!zV>8vwUtGMX;kO>kttJW`p8 zMPNau)Xhd7!gj_7eaL!v6TRS;wLv&H&{%C!IR}N0oz4x{6l~`P;@U!`l6GN@~=1I9RERBbd$(`zK!SjFBdHviu~ul zC}@#?eUsMlCx{b#fbqGJ6mHOvKRMNnKjRq&m=NTO7bO^hQWw^vZ{+hVtB+A`y2wh( zK7MIZ>`D?&)uKAsa$Wq3wUKJ0EOQ|QD45l6157aJyXS)I$%25ayKm@Nb3kvkq?DvY zhR0_?XI4#FJsJZZ2AfLZAfZ-$QD9t1!Wyc$BxID*;~bhT%QuuW1Qql?wbay8;=m`n zPeuaA(RL9kha*TFAqFSsIrx?vHuV0~nUD=CCBhAwb5;SSdl~`<)rM^R)W)(@uA?^| z^c*mc#1W&>Bj4L~H!>s~QBA`OfeQ(_qpy!=1O^Ev%PiVtJ{w=h^aC6VbEddto9HY? zNh{EMy=hC?pqf~@OuOmM7D{^*z3Csxyg7P@BelVZ4j^X3EVwMR$5Xt~C5y(idd{>r zv3larFl7?N{yHv>S8tLbfE(TvrLV&r8$Pu%eW{@u0D>cjDzK*w8#^YjO6wtJbt$aN zJl8j|q_CzzK#HuHl<}-5Oi)G@)_UkhDnf%(>J1k&(%aaaEusO;Klh#K@@$qebK^_@ zW?boVA)XDJ>Ij8TEAG`881*bDL#{*@!VmPc;1*x&Qv&GFZhq!6wCFHGi4iX{Q;Lxs zh{*~bxcCk;fV7AgRl8^!!|KXan$G_j@V!Xch?nKU3tz>qtD&E+L@pF5?_Vug%O8YT zAKr3a2tBZqSbt;(o3cyS5AMoC{;*(Va8w8{h4*3Lditde-jH4_Wu(B|Ka%-vK~9p3 zpxzN1p+~XlHRU9dMVHRn#!BCs!yp6n7lF65fkucKXl}K1>8Au_UUcjV7QGuHPoO~I zPDi+haQ~zDNwUoFGt2=^%v_MXpw6nUymHk7)T$$|!o1vdq2v8aJ?Dcfea?1<^gPUZ z>h=xf?+Pm%QG&n;x|P3sDy{r~`id$;h5kkzT~0)eGNQm>e`drViuUtf28RdMVquDM z=ofNKBz}$-5Cu<5vk=~hPCH|xPn5{1Mb};cQF%e!3_f=yXnauKu7Zw_ngeu!8XE`2 z#X-f-XHY}jE~YzQg03^IQ$Le)gkVNFRCmM^b{+(w7pTnBdlnxjSGlD`@CfoB(m#qn z+yDr@uKhZYhunjy&qXetJ6?T-$VubZ>D5HDhtc0id@RZwVi-Gikg~p73?-(@b*H2O&)o3L& zqTs|3c(@(5+q{S0Zc_&^QuEYi3vt zA7d86AYVhGz+Xdv)#LRd%+dHtUrBY~AHW-{|DN8ynCM)>1}_GQ9!zt>&Wk*~ZBk5c z16bGRL&42wo^@>W?R$i<({TxY@O|VHf{y*6>iGJeAS>n-@#(fHX9XEB#u)at(Z(U5 z6eNQ_i%JcAb_%Nz#!O|0F>45-X9oA&midM*)V;;fB!nPGyg%WvwdrOCnTlvi{BTA3 zFL##8Vf<(j(x@YbOYGH3gRW)GPEj-^HY0! zv0&T#+=w7FtkM1%u!Nc9$0VDnGX`WF#RgGiH_xg_Rb`ny7)iTOOKYS9@$B@u{yqbG z(o!0o4z$c-P*2&*TeJ*h-32!e?4tE-VCSdB&e zr1Hs>WiU#Yx2~Ih%5gZYkQBGG36qUjep~;hmEtlxyR!Ynlqrxnn1i>`rrnh(V;{O* z>r-&mP;DT3^j0Omf@7f0IdbXe9M)snvxVUB``nS_SB{tEWwE8U#FdSPa5xD5?sRWt z0i_I_sWvRKOzPJ^ z8W9q9xy3RrI-Bh^_@QUdvv@eNcIw%Y6oTmb4H_#03$SKDrT1bE1S z(F!)E-B(Z8iy=CjGTiub)}pcss@Ya;aS-M^q;~iY34Z|M`>vtMuoyD?HFYX7LNIf6XSn&_?i=e}&2? z_-77RAE~mIGsVp@F8_>r??juWjKV$5nw007ce6X3A(bx+d})t&(?3Rkh6li9bVgTZ zu_@OlzE|S0A2<^j{Bf#>ri`;{<6Bky)xDlh!3?5<*@~E-=@Z_r@gN843|`)A#4;|% z36XjLT7~e8G_ISw)dZpQrMrp%5km4(=`pD0fESg#{=+O86*AAqcz93=c60t;*8Ysd zOPn6uAN^%b>^*|IVGe8H!{zvk7`z_LtD>u)!(3S>X1L(C+ng*Mq3P2uz!Q3mRXIk% zjFg6<*_>9G2@#^J_ra2+`IeKkT3C>gcYSEDqgYXn%5nK)N7m31r2E{3ye|HBn%fV< z11LOgrxU1$Z;NM>GVBH%+N*AL8Mn`K8XUB>vA!qw`H?&rb@PnG@*oQsPdh~y$#sB` zB?Jkt{QzPz*IyyQL0N%QawY~sX?QZNUIgG=SefpR0^;f521CRV)Cm%l( z?746JhH_=gr7&biE|AYhKVPjjB&;z&MWa6!jzmVn>ei+S2@2v$qI+57k_v$vbR6Cp zAN(ioUH*=R=wFvem`ZLMsLp2TEiPP)D_O2=d}buf#3Q;L&bLZl~?qU)&BtZ@M)+qdVl z7-ZWFeeRwQ7P&kpV?{1R42_(81$I4oRAbrH{O6}3gy|P`Q=>uRXKaHO-C`60r?#|^ zoo|B&$0rF4P%*2euQ6<9T&%KB=bhcFYc*#`j`p4e-s!c)n4@Eoz@AUz$9fRHHNNnf za7eJ8F|QKK<!EJJj5lLt*``)?ea?uYK+%)plZ^>jGrc!;X+=b0!ga*aNcpK(h89{^sMA>?F~JT4myP!&)>WO%6j(k z=hhVk&7$ILHhl=~FNsk-?;*G;1{_BD+0vmt>ZKoTw}d(SIypVziqzsF2&1=RD%WSf zA04Gd>t)$jD7#`zN)gOnH>GrB@r+yb6LFqTMOYdjee%#xP8^ts1bzFAYGR`iD$2=H zC4kZ5_MBspue%mE@mO8-i9mKBpqMS6}!&O#|Wul7Gqp=rAT`@q$u1 zkUY`do{YOB&#hX$fky7;%7q+RN(wZZ%F~w}NP;2w!b&7y@FkGbE3$U&t&_gyk!6N4 zkb`LOp6q!VY$l4^Y*G}$3pSmU;K#bXwWnWyvG4~q7eDA81aTVx*GW-+)VeMFUVZ*) zwDTc(MW!X!7}G=Y+8e0__tty!52~2cerQKde1i3(09v?%mQN>w(~l9LSz)DSny}rY zqMFVFIpiX7ugKJ`%(w&^c0|pR{+W@-Y(xzcLtqe+7>0HzcFIQjmpMW=s2VR9g9qTq z$G{ti1C z@USy3u|3jSw6OLxJmg#mV^C~I-PIkl=wFC8M3{Wz>Ug)ci{9R+k32-xRigdrWe`=1 zR*I;m6)~7WOYfP%h%;^g%)=b48Cf`7fg`T?Aw5(W!3LI~v|pCoKj@tzhw3=J>I#oG zU6Dm`k3Zk4A^xeE0y0Y1fO#k8e{ z9*xeUqfsI>U_r)bw@!;tatu3!hJ@-K7In0vB$*M*#=}pv4JbjEBsg?7{^eLx@Sz5n zBu*s13QaMEGL#|FkE3ALQnGkIurG(Da;%FQ!iL4zUE4qLd|ER@;M_XSBbivRlf6 zv_jC1TzF$vgHAN!asBLx+t>*8eDy&f^+JUtZh*l02S<9eTPZqqz0{D6j=ag-CyM~| zlH1dJRdFm?#o*)`v}0jiavxTAM}8is;?`oqMty!)-}1tdNJcc7v^>jRSdm9Q%D-XJ zO>hf`6FFS~5GKh?)iRDC;BJ6TyjKtg$?vi&nkWGzTAZ%RW4sjW36MNT5gw#TFP!Z-;*+x#pM|yk1m1xPg@QBr2T-T77wgEq z&Ec%sn%br2o?q9av!m%6d24&Z=bDL73yuMU`CVg2w7u$4;>|r$A2;L70r&<&rjv=Q zx}0_YN~%Yx=Y2OcnS$qX5AKT!ih&x8R&^*ksIb;{oT})fLKAr(D~N};u?H2wF^)i; zc2~}FVLaU6?)ElJ14nvysJh&3VE{RIVQS3%d}N|MjQ}B!to|ijLIGA}F?J}}w({m* zlsMKd28W_vK$#lx?$juG6*NJN8p5#t32WM)V0XB{L=rlCKb^Uj6w{$6 z_bEP6DK=wZo*U)_nK<E9C0MBStutWl%8TY($AiLz~H(ju5nFrRC4uZGK7R@73^BqS>4fS2)-BP2M!8|o7vM`Y!2JDWV zO^WGt+K;Q8RloZZ$Ldv4MXRDtP6}>`8!)%DJ?7rr_Bmk^o{Id|BiB50RCs9FJm6{{ zkCfH$$Pl)y+|O2uD0=ns(4@*zK)ZDH28bk0To>lYcPOFTI)w5LK1#;{ubIKBV>B-# zc`C6~G-KCCSdA#57{}t!2~rhBF2V+L(yvU2qiwDnikOSsF+Ubs*pIcR9eC+mvRvOXhSG&m&w<`#3qZs1}cly1tNT4iy z$bZ;V?5WM8N`MjjK{aw2&-2#SJ_E zUmQo|4+L$?MGM)&cA)5vbxX9n_o=vSw#dp=3AKWiPrLb+y4R~(VfU&QE1l_1%5U8z zC;<4erZmRWNI&8hY}TDFQ5FjeJo5_-K9&7Gqm|WOh*1~(Y4|u8JcMyAJH30sm!xN# zo7yq^#_XMnZ?p8PT@M~Qa>5OX@l&|D8*TVfonKA(Bz89p!E{&nJ35_PAnp;W*Xx_` z4eAnn=#(Km@C8vW5DXP|`$86WF9F9Gpm|_pZYGP+ERi;x&fe~mD4YkLt(Hkk4;Ig7 zo7ww@=B(C4w$4sDac~kmO%qM9X!uCxu!e~}b@#-bCDa0a>po6;qmP(NVly&8zS1{Y z(&y%aVm4CIRXM}x4tCkduHB*A!bcFsK z?ztqhu6>EHEI!F`HOwn)z$9VNJsE9RQ3d5>j(8Dvp0YS^++H^+wzo--4Xd{4xmX;v&bPEF=eG|1wkA??<#1rgt(y#?%MD&H5!c zDb$@!?IDfKw^LjA6o+_)*T%mCCt+rFlUqodjsOq5Pf=7Gl-0O3fDr%mrl+zD0_QnU zuXL245-<*T$Lx3)Ek3Amz^G%VnV7Mp$923vUwPTD>Da|2Ivb}up%1EeP7jVv8T*UG z`7wd!pK8BhIw$$_S5n|IQj2MYUHuP}42FX~>fB-l%dnFt>!pA5%i3)1u5w@jGn8#U zclK3qU+8dgihk^;)Pmb@^S6rxE|1^Yt*L#J@&|A-Lia^xnbp{6o;s(Rb*NE?Q648v z6PMw*h~@F;JE!qDLK%+#Y?$Hr;g_p`2?CE~T9XoKMU!TEu|}lc zx9JxfF?FEoqn8`4A1&_Lwx*J`PgKw0HA~LY5fKnC0sq%cK82~^YtjxoS({B-xuqF? z#Z!8VhRGD**Jz8|e^Q47HX2muSaXdk&By>dPe-s5Ea`fW`(gYVYsO+vrkJv4%ttBm zo}Tv9G_g)NpNX+UilV3Wwkfu_y)U#!a)*A^)J5K|Q}WM^r*Ty$zBjN82~~1?)=**Q zVkbRRxl9J4%^y;u564S*M>w;$ji30qy0^!hAXY2ZUtSdivkXW@uF-WxJ3<}k9+=A> zf)OwC)#cZS;>Wp$2X#_q;*CG1zN_tB*yRd6n_@I*S5 z4bw{`*^mPv)Fo)+dsDifWZBR}z6HkYp2UejHg8Gzs~C~(Ge>NfFKj>pAS#Ni8{ds) zz*)>QkJ?Q*4ucHwT`Q>?iHfIzE40MDu!eY0Mjl@ZAl>yx{M=`o&aXWy9BV!W)hX`{ zQgnKh=Vp_tZnPmapi&Oy3j3C=oh{iB3lHFPR>a|M*k46@l7$g9#x~SsT4rD0-3eyIkF`<~uZ9)6XMMuIZ_hQzoxw>O8w);H(!X~KInPA^ixZzn3jcj_cxvCbL%NiR=~dy^coBfW10gRbkRg1 z!0{)ZNvd0i1OeG7g{eraG+8clmTb{d;!j{hSR8ft;5>OR7JBKnvLeb0f2;;oZ*%v^ z8F`Y>Jv)Y_SlaFZvACD$D3U^T-rYzHvg8l{6A~btX2P15f%ZXN&a&*w7RaHJx@JxqpG1!m*l_c>7^YBHJVbw|6K~4 z>RpWmMC9GEo;Zzlp_NF`c-Nfj@C{Rk^L3A&a@Y;frzGh?qJ6fKlIc889cCsaIV%a(L zghnkFQO3Ae$@qJilty<1kZFRV z^Okpab0qJx)aq@Aa9jL6L?xuk_yWg^4`8v3pK^K6_uSWoqepS&K!46ww>H_UCyzN; z#y_h-Hsi~ihIEU{HAPN-rrvV6Lecs-5Yf*FR@QtNOyKyjydnlcuT+#L1u5hcl&M{a zerdKs@qZ`~F=^cI5Z2obEnJMD8k8GUlS~lXHbTP$cItsY*3oh{QrLNW;`2W39x8`x zJSYB;D+CpV*?6AuB8-2V{I2F{W7e{AR>02f`$c+&Qwc77X* zQ~fuI?osO~oUC^gxGfo}^A4QERJUf>jWzT2RnLLIU8K-q7Sjul#rQU#9SbZ6nF)H+ z@{AleaZGzXR9U`~qo!xdAJag84qR}8FIh}+dD{50ed(x+xNqQRWVz{ytVCK#h#2RW zgos;ibX-nzLq62+10rR}48Xz7yfVdYRiq3OF^em5<_mfM4|z!U7a0ewczz6OXh zyVQ#g&^bpvUg0l#9q=(Rr{t)wppJ9*3~j86I$heSyp2}&R9Xh%~fO3@R3Cs;> z@N+S!e|-@70riW>oN;>)>I;J=XPjpsF9-s(1vCWY1R;*LC@M`BiV(_wuxQ7~QTr1J zZ4%cv@QnwU@4Gi^%fa(*Gy<#Q`IBcsQgbdwz!ZHtjTlQ<(A@gdB`yQ#X~>7%o8$2t zHfLwqXvV#SoPMWhv*!g1;VjX5Y!bnKXcbV_i`Jq*LM1r;4bxPu6s7bn5=f|abCy^} zcm?gJz<&Pk#+=E&5>UW;koYeZsh&kDEryRks2>bi8oY8GjWLFE?1Pi_>%tElQ_RoJ z&QXT^k!jdM5WP!O*%LvDe^fNzv-E?EjNTX?i}G%fN`Uv({4~0TNWV=N3yItGRG1fR%T4Ris7>Vg?@h zRnOW?F@ug@H+fJBx&++>=aq{tqKQj;>b(%{X!h5rJg1`)qhPhxm3BvfM)#(ZJesq< zG!}27#y?ll)~@+a-viwWe1+^&r{;bZ5K5C(-X(dUaxpSyXM#L+7vwE*3ea6h(?8{& z5;oCtew47RD%x*Uc(qRS7ZRxneMFjBw5H4SvFrK}u&KYgQ!|pJg~}?HalDk6#^;Iw zSL@Tc6KzCP8F$x^`oS_g{Gss zzwHcE*Fj)H2M3A*l217NxbJY;}wng$Cj^khni6 zm-82&_JnlxO$tIt$8%u+w;{d0=k+YK48N%w71SVJ2*TYqmb*(MU81h2 zaM@?zXWAp@NY~Bm6#^y zD%~??wWJVRB2=cZed?`66yhPb@3})P4Bm2_bTxF+KbQ_56RzTMd$zfu<5m)8*y;)g z_~O8;tC((@eMM|_bT7U+2`K>!;5 zVN=p`>E6#bl}FoB&)>N>%a~q#yPj9vW+*o_dn>}83;y84u2Z2fW+2PgA|Do1+1zu{ zN4}<9DK&sAhprUoY#>B7nm;i_!DsPO!0T ztt!Bl=+=GCXqX>1DGp%EF19)JiUIwh9?*A|{ar!7$3hCc4FNg{?IkYYh-$(HV3@iH zieSNDmzI`jfXEeq1vmm76rpFB-G53-N)n}zvkw}_Fvb`oyoLfa0xbfZ#awK~S8P|d z!(uGf<5SGl<%qsrYbX}CqydS}T4rnAtF{S69?PZsgOBT8C@V`eN{tmuu_)g2o_BfA z*GC>(?|*FZHTIVNxE&dBdNoB|#UiyBOBL;VoX-@Uf@aWlVnt|M^3)N#W4IiKi7aov6g$WvX#e(zfs90cX`!tQE;2LraZy#gM?Cgo{si|Y$WSQ3vk*6Ywq>3@~ zrchKy!*i)FB1%iUOF1#SthON(fB~U_0|g8f2m}leNT5O?^`Xg)*|$CKaR4Y-V30r- zEJ&!JfCSv?p1BskcnJs~2O_ZWKm!d(NFWFIK+}7rDHdOx^PF9ppi}`U0#c#E0S@rw zbJ`~q6SnwLQ>;zUXf$3hXaR`f!a)!`a8|2%VB(tI>GA1y)!H3(huDse?+5mgW_z#w z&2HCrQ|w$Q?6)?Zh+x)Z>sKVe4&?emaXzfOl(+uY(#}KE4mC4u>(aK;g`)I#d!2i_ zXA`AIVq1D2mmQ&Cm;eE4V7pkMcg>@^sjLCg^z2^te1(vDkCSO9_{fjdsQ6!~~{ zskTYvv+OwSEiL__n8#(7red*&cUXJe&ej_9P%Zst9$UtHh@1d10pbo;wI4Db{iDJG zVkkJkhpM8~u7B5K)1H-D`sAy|NBBiJpG7zn6Ck<}7ZHny2#bh}a3o$85h5lM1y!xm zg#vG9;erFU3K9@ViB;d?J@cNK*O=Fsj}G1{A>^DTqoaj3??~D;vi=jYNlv5?N>z|y zQ!H6rba{)p_gZ&bRli%iTf2t5G_hjR2RkznIp~$P+VD@JF>Mk$UOL7A=|=p=49Qrt#Vlk+LX7clVL%qteIS+mW|A4CF!2SnVWjpWhx&w z-|b=dVfSI16ZCB+M@v!rA~VccZ4MzQrPBrv$rBqzlTFfGIVO{NK_>6WLc`uWxAiWDl?ka)~pZjeRr%rd%q)! zn31D(9W$t~ku68fNhbHZdt_wFAqY!R*=&;3ph;Yc2}>0f)o<>@=I*xZ?rZ8~Mhluw zdebg=tjHlfo<4S|T-zmtRJYvws6mN~_BmrTZCelK?rKFnuH{YRV&-DzE%d{=d$-x9 zJH2^T6P}P#VwaZO#m<^T@F=Zx(asx}GH-HYUn}0Cvaup=D&nd>4O$sSrZAr=s37dN zGx9!$Z+uG%*Fo~C>`SCgSMDm>p4O&e%Ip4JUTNi(J{6mc^s=1OYpXA*$Tyj6x{YM@ zt}xU#QV{|q9A<4)Eb_7u@$i29;@2S` z&qi!(M72g-+vKBN{;<2_nXv%|+?aXK&8%4P`Hazo%q!)~oA?A<+Dq^5taZ-AV#T64 zf33w^0a{L4Iwvhe4N0=3xl6*-NURr6PkoNNhTZ!aK%p*^sl4 zF?Ke0yYE;vorEQ|wY~bc^V-#~w|=W%as7%Tst{y2kyO6qb~Kb$hLa(4j+Hqo+eH&u zZNyLH$rAAr@z%Hg*EO$lznQg}wSO23NmUrn6}zcC`!gG#Gix%JFkf|d)TspKw6o(!(`W$P|Urw%HJ$x@L%t$fBxqQZPq zUAaaxw~m*~hj(g*X{L6lcBuHLng4hRqEoD(Vp6$S`m7JxMdV|pl9Uz}b4W~^l6Cmd zG=hSebwzH&GvqqtI?OxddbBKuMYd0sIwW1WJ_!jrtdnB6sgntBlUkA~%*Mo^o6AT%x)6(?5_)5lv#b8> zhzjUPpa(2xO)>7_^DZ-W&~)KTlX1Td6}faulAb}UoKlg^E+$*?>ke9a)>7lUe0XSh z&|oZRcz9qe&@h04h6M};$ap>8BRQ7~(f0ML`mJ{TcDoL<=7s?Z;K1QQcl9lIeXZ`u zdgMGK|BP%;JH|u90eDTms~&5-H|^}a-|ecM_A_Y1?PS9|mm*gpa~+w>uu>4t zO|8u=-hA<<72+P2a$Y4Lo4KW($J*X3%kWl@xBJ64aG;=paX_Q7K;Llr$j5!nvJUfp zXI48ry)QsO0fz$`4HyG(0HM)9fd&C6a9})O-~q3h^)B})-F29vTcr0*XZ^Xw))l+Vd=1&9v3DbS8>~&^WvuhcAN_&f{*CgT8C;4ceY@Ktq?4(YZ=NKts ze3Ob^sq}cw?ChzkdNb9suik9+W~&hkMM5v-oJ43w&6c1-r?7}Qh>0XxRUT~_dfU-G zgLYN%!)}$SM_+1c$z-Uqq%(c$Y6odSDIs!BB3XrGqRC`-cguxbI!t$`Wp~_C`_D%G z?Pq@$>Tm!5JnBOO+m_nLtBlZ!~$$WEPuLQVK#99D&~Z$avE zRm?4TcS9Y0jx*dekVF%f3WD27i3V14V>hEc@9MBy^Taq}3no3c{zhR9@cO75JvmH2 z$G!YWTylrkk-{k!T-187l#I`B3<>V@Fd;(PhD)9$pbspkNM?V$&?KW>K6F7j)L+f9 zq9-9htW#=L!2mq-V>xtqV;|_F>ke^%=@_kM{BZ`Bi)t|&gO?Us`W_P$}pLIzV)5nM%$j%n23q3)Tkl%J6cO>coTP_8$91wjI(sDc%H0C~bV0;1W0VF-@NArB!+*|9PGEvi)%-6ekD9nKHtfr;B5 zDkA4c_v!=uz|_GU5cb+JM50V9Z4{L>DUZlORButSE*l55;Mk`O?5T~cV4u;pzs4*T zY`Q85pBf%^Y@e9SH&6(}AkqhITz^M*q)6ELU&k@UwNDUO(eY=Y{3zew5Eka8rJh93 zh!;&zprB^!ZxMSS$;Yp~6K`=sm%Ae~!RO%xH1r2qHV|rW$==O#>lK7oYuZL#-$x#y#Qo=&Bbb5du_D9BbhT=^d~B(1NMv zfc;7z-i8=l;rXC3?U=XNL%KAfQMBV&o4`fm;+03dSCmy2Jz@^uM9V>H&{+f}OrDxj zGt?oFvA`B587h)M474$OCarP;n!F>~kx!E|jY7?LV{r>kgr1Y{mx1Petf-!oGYyfGGzkC7rSC3U4 znK1>9nUPJSxIgbcViUuavCkkdfj|+JcOjo!gDhYr42Zk2A%yG?i)GB*XK zNVVChrp?}%s0cdZPehKspwvgvB`P|qP8O%Q0lNUDL&c;dMGXC1O-AxFuHHbOcu>0_ zO7C!xP|QB@w^`?5IyS!?>ak829SB_q9D{0!FTOhm{wQhA?BhZ%s>R8xvaL3juD$$gLSI|q=3 z(W&zq6HbzK5NzNe=pF~pp90B zlc?a<#@2wWofHI2@U-~c>Nt`Kg)cH#$>}^w5hKaL7*wEPPv>IlL>HpaFY4ejAcA6{ z2A(yTa!Mo)Yh{I0iYHkigA_hVoocyInz|wCNfg%xSfmNMAp|D`fT78i?zSu>$RAy;BMRo9|;) zClUrrh`lK&353P9{i9tV9F@vYfZI+M#S#sO>lj-){7a$1W*E>sH9e_>V;GFOryS;f z(sFv=U~M<#!9uUvT+FAW3Kn9XDE7b8L>()UD`OI(BTy{7$Id!o08H|z*^F|y7fE5P zVu1V()cwOC6??X~a5-x7qjo@m8R$o&UAFZ-kO(O6?SCR1xX0>p%*K`xoa!m56W8*2 zF!p!aVSmzT7-@hdl;#yDWE5pC+JLi&qHIMSnB3^&nN0-hBz9#njiTG78922XperT| zmG|l)UyTbs+k-nVbKbo0l113IZl>1#%dj}NE4kgHfy2m(u5u_>h>KCL6)(myI~%b= z1w6}O%2#t#yb@tZbu!8pG9xjyRn28~foQZcCZ5rZ_p$&mc;^#UXEIh)DX${86CEi; zRnlECe^bN|Wy~1fMv*>)wE&Qnqcgk4u@JG_Vq}IuB3jiL#{_FIc54i4uvYt<#+Xdx z$Gm#N_ZaY7wdG0MFYfG>LP+j>iOE(%kgtqiIL*_Fv?nk@V2h(Oo8Z)n9J^fv zjQR!Dd0b8#1v z%2=fVxgD7I3DZ>U*=xfv-AoVfYK$`qTSIwcLdFP~&lrIFtH-Ji*_a|XwXE#My%1-Y zL`aZDy8CKTAS6avFIvBON5N;%9Y>Eku#k2hkuYbMOygS>Dc9T43~`KiEOSp7?AHHs z;)s2h)0s|T7g3%BZtSR$`sK22M+!|7E%bJ`TY6wlvODcF7YPFT3J8=W-d5JIWmC7T_j+z8~}E8 zWL5}Hsi0%iOo3JyUYXt-LUGC(M>j>{7PAS}v3e0p&) z?<|2uFU~YQY#dYBW<4P82AQP3?>$_hmp#S`DWU4rY5+NLvdJqQEsHSiAIyZ_AB8tS z-=_NhL_%6jtiEW5DlUwv8fu~sv;M`wyQFf6{@_$>Dq4~0Ekpz-t@%83nIQo9jCKfqBBkn+ColyrD|xUD53gnxP{ys?81~W{%1s z;Q~N58R(Sr@;|J+D;UxR+JdV3%tB=|$LQIwpFjr%G&COPXCbpPx-qYxMv-I^9v0Xd zL(p~)3yQV_rDbD_^|1Caxa9uGoKh8R_eW<336ae8nn;%lJh$;6g%<~Q9mZ8wO*M*&-@{s9S^@%Mw$FV^%lc&-_n zo4{Q3s?gSgY=%hOoqJkd?Cm0yStd8jiU_Ui*93`+%Wpg+NK2|N)_JpO`huH)01>fF zut8)eYz+>tvW-DV&*c#9VYoMDRX(=puMN#&dI2gn)fg2$!ZLlb1|CT#56{IS)9YV_ zm26AUV~;AJjNX9ZzQ0lOYuUsuR_QwzYBNH(lrVmV_x==lPD@B&*TXeHVxPj<3+o7r z+W~leUMViV829+`d(qYiTQywn%US1w47*%)7<5@D#(SW7gh!uy#9&L03c=CHWJVs6u=mPelT=libdkF8U1mgODo zlnlF8Qra!3x$7P?NTm@4ik#hoBjaPjg8#tqdc_opc--D{iE>MW>m-gQaTVx5yxkZy zqvlmcsnvb_u-vmS0LK31 zV@UFOTtii4W55IK;XK>=DdvdRdaZu->~-3`x$Rm6`=l&A3odY24iRdY;*e9S6U|2w zncxvXE=-Ja{QS&1T5x`w-^MWfjhJUb{Rh1q5X=2QLYhnIg>yRO6N0JHHw*Nku?M1g zxx4a6<_m2hwD+YFYXD?cRFV0Zq{~OU7=_%V~B_v$5T^ zV6n}*ZxnF1mNOvO*h_cEdAk`FZ*V@kw!FZ`6&6c(D2*A!fZ*X(xjegEd9 zx~WTDCA~-$HxXEX$0aR^iAEdifJnvcZZcQvQx$MO>aoJUYpq(`bM%zEJG|A5wGNXu zu}VF6K5Qk#3TXY`;;LCf1Kf8uS98Cl)$)m}s3D2}mTef-1&Bi-vf2rc(By0s%R?kS z5-hY34v*^3yjm_!IQp)QsW?R4zK`@RZo8kR?`Jg5&liqQ7CPZ2Bh!YhKy5V${>Nu^ z0t%XlFNg%EF(U3Th>dwe+cf8^m%rDv2veGr4cLsln44CNOr-6X=bHxtFpV<(DdMeT zmwdBYjEvYtF=;Fz50J*v}hrlcvu^6g-ky!<>q3(uZpZz1_` zx2Zu;`ADN9#`Fy_p)|LAzT>U=Bv2{i9M^<%#_W492T2YbZU{VQE7c`)+?ct@3;0h? zdZfxSMv4s@0{p+6^GJL1NFc{F&xp@Onb1*EjQTCH-@*TjJa*Ya@`LCJyw(s z*z`QvV6h5LftExfuHnk8%NJOlfiMCQ6wHd5qDzV`2h^Cm_Lt^{o#GQJ(X&lcYkw2l zh9?z*F2x>buN&11GSG+(*Wnv;lAId;isIXX7UcD=1Y;V4f+0v?B}OzjI9WFkS7sRE6P=M&)BYRnI6wK!)i4<{*s)@r%WoXEyj-z50(90 zoE@cDUY>FTYaw_?4Q8*fdY^`Dq_F=r$J!%v1qx=IFD73|>;=emPaEgcybiOf>NfPM zXr)|=;ApO5+<`KIH?QO$wSc&T*8byT66DZ_JLT%F;;OO1f;HNRQ~OxP8? zs-DZ?&(d*K2!xMv3RSA&PEL@qii%0XvG>f&Odxbvyna(F7ZUa|!Sce~ zOg8M$XNg$+SVX;&{Bt~mPDr^Z-okWWB5E*7CR!@l7y){Z2m=iEy&Pv<2UKIgr@Zs8 zO?$h^^<}CJk=!2Ir^~A0oT|engRq5VzUibYpLcvXD5EltS%wg8Amt= z`GbNR^2mbeH%wgq0IR*&OgT`4HniqFY&R!cjP>o)wqzPD^sbAM%$M-tRe zEmT<9r+55D@UA@2oi8mCF#*YJjT31WH_}kOeP!S&=cZ~D;0xVZP+}cfy(h7ejjU;P za;IaiAI_$uy7k6pcQ|hz)xhE*9`**uwl#Gc)9hsz`8UTz7?QmxSioY;Ou_+Rgi{oQ zR}V)}7|hr~EkSsT8ShJg>LZJmqzk${vhYbhzyL%N8eQqn&EwkFC3;sHaK`2%Gt%3U zy0`;WS_U`tXre-6{oRKi90h#n`5YDo4RSiNMx}NllPMCH>#NVSd$p!@+K#nPG(t7 zML)dLH4-1R4~q3;rj_@)+>`)7!PK8A~Y1U!LWxY-=2PUKOEM+yv~CUElTtNq0O-a9E_ z)SQeKYTfM+Jb<2U-R(ww)@LPi#A9UWfAIFLR7a9haRiqYBA;snCli3TR4y7$^VSN& zuv0W@j2Pt8ZP4f}q5yu`u?<=#TFNI5wOUNjiD)BIkLv`_+~nVA%GD?f71|*>zd9#x zLv5`wA&7YQ;5)mMNks9rH}&6!_<(|bdAF#IMlT{zm>4=w@M*RDIHx&|+tucZG5wyq zwx6mIaNrhNJ0{;*_~=n!csQ%xOjN~yd0~cPrNa2XFey*sp`~$kFk1~SXvmqme-B~x zFvyk*(>NMg)jlH`MmmCyfyf8ur!uW8?Bi2mVHU_{+sBB9PawVu#DU%J^R^TKrO~ytbdX9O4v$3Bc?Wsz1IIBy7p@CBvw&q z#Y9RK;{9k%-)z}1N+c(B$kDNQVJ=n?Hc~%P#a}x_L3}_e`_%PFVv4J3)}6(t39_{? z0OQ=1n2LKlcD{&xC;SvcCtlNO>bls6W2j^tRW=i`T6;c0YEFq?C|A-CoAm}4%*L@u zR3HEcPoVYx;5!P2w3bH_s?I|7d(MCY)rE>Ch8(Bc!?li|hEvquCgVZUcoW!$*=b~% zhC+e|@a-oi8AAVMi=EHElM+8)eo*jzx^EW!{t^gMnlXhCYXDsJzqcNPpENI2De{@4 zVyBb?{>?O?SH5+qIa>WYY6Vu%PvUO54$u|lZc)1=TVehf`h*y2Q8s(?$fbkOX|&0c zia1Swav);!4CVGh^PDnB9y#Rz4R%LdO7;c#P76_9H^zy7ofzp38O*8YtBur^c+)}6 z0fgHm+3^@a*SpzM>hA-gi>|R_rBWDm?6ZMOcKps$l7i!;DM4tFLW(wAE~A;wj`yyr z$FTLIMdzfNmi1Y5LW$v`RPXp4!Ma@W(>q>_k9w_gMOR^)%FQ*RbGZ!d>5*8k+w&TO zW*K_|7{#kQ5Ju>f9x&8``cSZu4~~Hy+a-r`Eb^ELlqG4C1gs|k`st7#C;*X!#Q=C* zWCDwSM?{1iy{iELh$-Qfg%$KENxXPb`R-K;P*qh;2nzxi0@MK+1i-+c;3UnO#|2SY zZt)>cr?Q|Wf+%RFA4@`Ii?L*Kd4{>Ro8NBZI9O#D{m#feI_BK9=&n|qGyUu&zZ|+yDsZeS=AClbyW{e-5958j8h+5 z%f>T>X_uV1X8LL~KB_B0=A&X#eP=}t4p}sV9n3mw%dAUAtSva;qC}m~+MFTP<_4)Y zM?F#rsdjVJLMlDcn{i$V^W1<44-p_ZNI*gX5fms$nD8KiAk56?yu|b298R7OC(nmd zEJTEa3K0ZRgFc+!nJA5jNoh>Pa~g5d_$hSRR1&^`2@VQ~;GhD92!f!SnlrYtl`U%1 zO2109F6T_yIarr-5_RnX0>VK94-Xh8i<(iKzC1L(<7`CW@;5pxybCI^R3>1d$K)2M0MrQ z{onunxdSRZ+~$vcvtHfYlug;B5s&}_5h4(HAOV5^o3gnV-SdW1I01@bSdymXy)K)H zmd&(Eqe%mTDo^jY6(&l<`uimd|2|ZSgEm3h8>A=BflumOleY=D_54qFv(=iHaKJxWQlyyTyu-;Zocl`O+4+pYn63Ff4L0~ zc9_DeL^q@-{_p1ny}+0KeS3Q6^;#Jcy6SXQwC=0RT4gf7>|Q@QR+aq91hy}wUF0gN zo{1v5R*~hY*0hS#R8dnmH8s^Knlk5A+LJD*>B5ZYBN<)bO`eMm){cBdna8eelU>+d zvXRnBbyO6WccIQjnSP_Y+FtSlmly&;aek$_&vkT&ZW*iNV!XJZfJ$=&ej#oXh>8L$Y7Nb*)2rm&i zGt97q%vd?q$EkEAZ+PX#Z*}xLndJPq^r+u+DJpX>agrcL{Pgll^rx0DSAJw1gt!*6 zS&kUN<+Jq5uk>?t4l?p{R&r^GN-eco%49N`=H!T0oo9)on8M3;K&04q!vqgkyGaI|H8^bZHsf`UL z&X|?#`QA(aQQhpf4?gL1^=2KjFN~+t$#kk3{yCqXsCQ5E&u>mOnK!+zu6lKqx3aoA z?A4h~Ey>JwW#y`BRTb5_&X`k0RYaqYXLzt-ttoz%8foMYSi+N;~Q z?b~9`PiE=5I_TD|T}0}WT8TvrQKKvlGcR$nSV(2VrzIxhBqAaqB4R;U zWcjc-B$~rpAr|iqu{fd?Uf~rMiFH^c7N5d`3h%^;kfwyi7%?v=kMbywuD~nsis!n# zg0A>|lUIIA}Hva z+?4A$j&CIYfaJwU2qor>p|I$dKGm{39auyF%|jO65Oit=o|H|d2_H-qR^nMbLo&% z58*(QQEaodgugA1;crXZugH?mhdmI8!lvg@Z2XzpmRY(oK?LQs<|Q$9&JhKQZ0Ztl z4YorM-Yh>4$^}7Z7}8G;9t2)Sdov$UIH=2(FxUgKD#><)XZAJDr`y1)8Rm{j{(#*snAOMGW9m1ZGmC=6aO=R>M96m#DNrv#wRK|9hw`S zmG{Dpt95I#r4R|Bs||J=J1!lcK9aB_Oc2Xxd08f~2aCqL!J8tZgV{Vf_%pzwbvr@6LdP2ONOI zadR?6dnYa}`V6zP`*^3cKnT3qkMsWYwaJK@8b&xX4rk$qZp_G2o*E^)gIlep*VB7i z-o1}JZRkfA&gX8KNS_bJp|pb{Xl*xjAITIP75bJ~Z&v!UqtkDv{9 zDW-f#yOEyJeV)95Vh&iGGjruIX0dE^$0WFY3#~ z_OXcXGb1Ry9F&TMKHUlg-4d@R#HTYC# zmV5@+;2Zcc=3KS>@)*49pXWGy9iH`? zC!on)mLi3GYE$Xo)oIBvqZ$l=sad(HU7!VWj@YPW!=JZCR|pJ7mZF=^0BAs$zw`9= z#M`*^L5*Fyky`tqwNvQ3%BAo{Y7V_bQ^8rHbh?x@v<_k4)(X9ZM$u_#7cfPDXJ!Ud z6sD!@AfS^}%@GqP7IJUbc{Sp}K!*0ziY+upvZ1F(4!&s@G<^YRNv=b4EZXv4d+c%` zD!Jjg#Yu0IFO)$`D+tMzC4AB%)@8A{f_(im%rH6>h>=H9Z^2Mi1)M1jU~$@UV&4!k zGX>5dIpIk06*IK2%OIlp`I9mn_nTh$Iig|^G8rr6tND^>*W{JT7g`^>hvWrNB8e`G z`K1O+|3N1i^&B7KYT{fHQ}$z*!F+7*8c#I0+y)pEv@mwz*3{PHPMbqCnvSUA&BmY8 zP&ZMNox(L;Wz{0qj|8G2)ebT@;oQCzJ75^2!eBEtyKWN%E(V!O!`%Y|E#S4g??1i< z_iY^yyxmB1xvWV*oum`GO4tbo87W@lYCgmm3eeGTqM<#7Xwf@*2&l2+5<9-@ zT9USOo$knT+K<#$3!r>wARDP4)IfDBm$Hy_a6Alp38spJS_3kM*6bz#Xm3n>Kn1#3wh*(rZX!wE?8df^X3{-nSs)l`p*NT%S zYJF?=deJImup4}IIBRyAbaEjPZxVu2wb0%$*q5Oif(ANar1>a)ij?U*E{!hrSVgzw zh{w-<_q6?g{!Q_6l1X`NA2>&aD?=WBl9OmL`~_aw$$^7gsfOsezFsNCKz^NonN6!V zf68OUgPYG#2>(9H z8A2DX-wUCB;YH9n1d#)nV|quIG811|t=C~WOnnM0mICY;IXg**eWr|8e?UCqrN&PI zhJj*stQigEhL4el^#_(YSnDhLLDP9+z}nQ;#Nr;du;U31MT0HelZZcTMaI466C`;s zw^Zq58#a+-r|ZNdD?H~?01ZrsT+}5}!;#zjHh~u2+E_V5kc2HFjy(J?7!kWd^WPFy z--w5LQdT<=&tY{1-x)7@rvcjfGUktWAUUIi_MEnw1{o@Z{j8P(_s6T|^5yHE$($it z76u*dze|$9Ou)$)(k9%=*e?MZL8f_OI|4{kzoYPew^2dK$xdc124lr}rwa|vJqE@D zA|#LFU;|vhPw&UEuy#*OELj%PxT74qG+jE2)slLa)yqKVKLDMvQ%hI>t&xqj9X6 zC03U4zw7>yQJNzU+s|4Q9r?FKqTG`aF4TSfxWus!aiSX$Zn69Uw@*~FvQFWCu4Go< zh<(CCFm(1InrpJ;Sa_vL1MUI^9+ZM)jt+H->6>)Cfk@)aOq$6=WtJp=K3T4U!ZRPt z5;2Wm{+bB#7?J!jMX>vrLf}t4;&zx1eDO$pFd^^<9|6zjj({f)i60Dt-W=lYFduqw z2>f6W^u!_YgF(=XL*gfca2@W09}I$?xJddu`Ut`1+9fZ>BX5C9$rWV(+Ncj4o#~;; zorfjHP_HO$PDT2^pgX8W8AlYQ$CLTc!m`)8_)52m_EK^BWzYtxql_s`3-XJg?3KC`opGUgkNb*Qf9-un zRyk3rrEoUtyw~QbAcogcxX@HYjfaRJt5m+t!@c-ku?!g)mGv*J1v-}bedI8~jqjHR~m z@JjM32XX*O_RKg70sMp$umKKp9t#0JB#cly>^W!(@MYh+_V~-ODYJ`n4cZJ4_5rd4 ze3tV!1v^7b10T>avifP$3pV}qx%1$b5C;S`FyOyuTeT-?~^F7V=>+eMEgNH9|;1A&nkAi(sO87jINno>tKc6k(XCfL!d+Jg&;cNO- zG#n@Vhi=_u$OL=%K??GxAjJi4!T$U_F5KECa_Gfu|w5AiSP z{B5hQYh;%aOGhs<{7;xaZ!mG9Nd1i22O<7aq`a_@c=}d`FV4Z&g&Y;@5++IJl;LmW z@AKE4;YT1s!*7=}XMdTeDi?C|g*N>B&#Qdakx&h7e2z?HS}TR>8s~=$CaPhsEc`hg z)b!biufa&1yG$#f2oo>-Db#^{4bwBgzVKz=00z;?amb!50yfFri6&$CD%vnX>b^1` zafWX~*c3eCQU3q{*uTZ=A}a`=UqxvRAI29Oeoac7No&Ke2?lfZrN4(UY7-wWFiz8m z-1~AMaQK!BSXnc%pI&h?K;c-?CZy(H zeGW3f9{!5U$qOQnd2J#M=jsFQ3knYlY9`ClCq}d!liK8IxXF1At9b==k~U}84xT?}^M<``j0JExQuw*+Rk)payFif_OEGkyh;OW#MFXK2u3n4yxB}yN9XL(2 z4C~$o9T~~pTpJ_4>*PavF{jC1@}I_Goadx!oL_QfP-A^fc#B1gIO>7Ke^%ugSI1el zc&A+G6NxXie?vQwM5CeWJ&!g*65j_FzKJLEvpaWv`%R%>l}3wF(}Ix_9IO&Q8j1NG zIuO8T)Mg)k3nbIfHs)=y?atJ>457c(QW(G4M%ce!9b`r*+D4gzl>rQZhnWv+68WmSR7SCIOS8d-P5v7c!(ed?8`z` z9(00&_;@Kou&$xuu#;#RH!_RT#^>;WqWY7!X93mF;6oh;@d@CIYN4V69XwqgmaP;4 zjZ)4L0Rj>Q4?daJ4P|~X+AdB=@H|L_4Pj)V#B}c_e%_xX6f&gdTpfPd8mScYrmo$u zn9?UFpSbTm0B#w!6iAyQ23)6Qq7eEmXWMs_I%l8-7NtoDH8O$-sd->MiL-A~GBNJV z{tG7^ZklS}c;b@n(?u3_pfrH8uke3Wu3U0VXxkv#aF3I1kXMSy5*+IQlMoWJ1ZIPR z@WPQ$RzLf3=rp!ehr1M;V*+V2%MCcx*&2a21r5mG<*di17zD)W1YY$$!Q8U=kvF?;9HuSuF!atp-n6urbP70eIbo zqryy}9}gRDbP4EJ&}{CoDZm9ls}F*i%e^Pe|DDAq-A~iNKn&1>6Wp{Bx@=!Z_+6JI z=&t)aQ#;!A-PyH@)!i1ptzB1pd$$LY55M4f_eLs+3hD;`6+kn;zN(vKXerb^T*gP9*YY}Rxrrj&`>coMd23Y0*rNAzDl!=LdKAkPFP)9PX z3Z7-Y$m{^`8JFmLGOi4fs)1ehDa=d5cXC#|7!hPQh9>SF*w}+en{A3)(Y;7xAy**S zSRF_5wKYB|l+@n=+Hj&map4KF2@qqFZbrmDwbhUW3w_jf0-I@OU+qT_c_3OoM8cYX zdmmZ#EJBYGS_KKyb`7x|1Be}!T}Ob&tdJsbhB&9T!#gL55+tD}(l)!sax zVkn;^98 z6sE%rdk?M4W8{DvDb5(AW*%%cxdI-(@4(fbAf(0!VI|-VqK8CwdO_Zk-t~31ulERsD zgB@opfx7>R)(Y ze0AXqK;a?h`ca_xE;Fk^2&9J{J$e)}m@PQhw-?C9E2GpU+Sa{++=(prtP`c+o zC4sr}&xUA7Ft#paM;z020YI5ugOz^tadvUy*?TBHz`mr3Z}udoVUE6h5ET(H7>A&c zKbv!7ua-ZkNl>|9wTGzQesVtU^2?JZJ&aPA#!5PUIhff56^__*+?p~N^e6&6xNW9a*(cL7=}=3G@kJ)m3QalJQI`l}bc zC*23ty{QbOJlL|!`b$clqWw(0qJi%cggBtmvW(cuRZg-PWz8!vRnzFQDHqD(1_&cW znqQ0>*iBfxgP6)XBe*l9G5`G{3L6$K&3-&91s{98em}podpL81B=gXs$p{taIm+g6ahBxu=(+HC z@M{_nA4fu@&Ej?=4p2CvjqU+qOI-362QhY zGZaezQ$t4Nq|H~BgbYm&g1Yje8PV1wNo~TAX?@!mN|BetoyhS@_G#6K9*!7J=+O$> z(Oa8bMfO@2V(#G+$N^2DAzV8nlZpjqJ9@18z0#N$MugsHAO}?9U2R)KfYJ(g*)R*O zt6a}mw8F`I6L$_`I8StA#*sWJiJ~uZF^Ly=J_dFe=U=iOBoC?@Z6vD711tb zxD?~5vL38G1J79%>l6&RvL9Ii99Db2pneDP#7%wf8(9!teo|36ow6ux^+Q+Lr|uFY zXEHXgrp#q%6e!9~L9PnD_$v6ECB}1Sjk=%tNX|qhYFLTUEUPE7h*(li=iWsFxda}tzC}mSUP1GD%mw|h+mk8>$Vt;m5qR&^8mxOM$=VHJF0_v7n=hSo}JQ?)Mt1izWA2{|8Fw#ydxwQh;-1EQs9u$cRp$ zy*7aW&danHrwTkMFp(Ige&G$d*f?-T*^(1`Dk{MrHXAO{16-m9xQ@$N1^A9_LZDL^ zDZILMKLi(~c?2q;F~FV8na}n!jzcR`khIgM5c{0rf$|O_Nbe9HbqiO&4`hrZgoLv} zO@oNfQ3sHK#d()3N2~*jPo9CN)4#IlA+tX!AQov-*q)XuVy&%(RP>Nw(~C0*zaKNZ z2>W?Ll5)J@c4PQ>=tI)+sezpisO*~;M`%feHe%HvB06{-Cmd;H*;c$l!a8IO)4^h5 zd$W?{I(AE}?%NORLN$+60gMrlcC+F&z=Dc}a{6I|&5~&%oa$gRDitD1y6@r}((S*$ zD$@KEoJ=Mr%5S1PCLZic)~0{Z+rn3eGPYL_G?$!1vgu7sJBrYE8^e7mwXu!={BPpen=+D|(q$@5WWme||!gEf* zFHxN3=SR#qwrM!~tt^P}e_BF~vBXq6MRk|02ke40kEpQ537lpx;}T#geR`>e>t{!E zr)xvUA*yy~MP&fDby-m6xCtwx!Egp9Wx_GR$olcQfDls+?7U4l1lFS|g=*w`<{h}_ zY3=2+|9GI0#59}*!()3iQN9v7=s0Dmp=Iu#_QOicB8-C8jlAR8a1aiUop?$ESk{Si z2mci{aStg}{PkJOC>*B{D=h`}4TP|0B=Zsys-G+v+ruY|#2oyAg_iX0SRA(;m?KRe zegKR5h9zA<`)1Jr5s(ciw`8$2P(sZ=WH(~U0^r0cbBkLl0fQPSP`GaR#vhKDvE_oT zXUnxB_aNbjRzsWt>+r#b%i#>mI)TaxlB5$kXObqkymi5GXnirqwmj$o>N?q$Q=tnb zqJ|m(CDFPvFQIb-0gKHrnGCcrKbei;qAB!GUr*b&5O$e5mL0V5O9`~;6tJAoJla?N zKX8yR1QpyFPYR4O;UNn3+|JMNK?x3fMyijy2LiB}zzkc03$0TOw3F^j5LdSM5b+Lo z!HRn}%C;UzyF2=k2$Tag$OXf3sZlxIlpYuSc;U{tU;-S}*lg_!_S33Sr~4;n&=sWG z+kugoII>`L6Qto*qin+o2j75ax%}Up7znXA0xYR+6dx1BE)e?LHwD~)0J>K!3K?K8 z5d-`%18h&0J>{sGh!mI7D`GdtKMN700Gj}s0G)mIpyve`Z1xiS6|o;SaDI5?Hg@*Q zzHHsP)qw$)KZIOh*{5L>*pt{#NJ#P|kXxOOCu7aZC>$y8?_N(kx zHEBkQin6KVN~l3oiPT79h^-M~&&nQ&oq*5;N>(6DW~xP10qjeR$Z!Ue;iT>{V>`vp z9#rh??Ck99j0lCuvj1JiV=?5yu*!i*oYJ4t*@>MccIZ9u#-0-a`(t+YA;376H95e~ zvNPM-YOC#z{iE1>(ZvQBY8Tk~8_EXSu$@(aXU&M6{Udhvp)-4c!HT`1Wh#W7rP()< zk%)bE_By~&Vm~XP-~oo90D};*S2)0yfJ}hFUI<)vw!~gy)BX%3Q&Bl~`(&Osk4GMl zeR#b8DDK*3!{R>5(H-4f9;3Ury0%YRCV_$p(OjMqqVGUMytZ%HVV`7JBv4@I@`yZ* zHd<{PJNp~$!ww4zvoLmfBCNvx8_3w%*;!VAp;j!f*f|8`sD`Vv%QIV5>_P%G?9vud zJ49`X%HHggXl6Ij>J1xUkP7Q2Fna?AtP!|w=GpIf*bj4gXrbl1OqW@@{mrmTiHL+m zvJ?sBB3ZhWC6Q-mU%K?`&GV0sW9eL;lQ{mP(T>NXg>>oCkdP0#LPA&}lk}OYo(3un z)rRydNt}H1jqv8OG;Zc4q!=O+#*i*uS`3jtr09^jJck@2?KDCLx@#k3+BI#Qj`vD-(g?{MdGg|Fy?W(oj=!5|#vKpH|wA+;S707YSNSRe)l z#DG9300e@8KmY&$1b~4+ASeI?gaTn;P&gJ!Qo1bwkDE9syRA{l5=SsRAS;0q@kBd; zO#>8-I*wqXKq-|xl}PWY#FEFzF_K|5;zcC{P?KY2LnQL@Jb_g}f+EVS5>E*>9Z!ka zv!3PbCNbG{2pjZ-d*E=%MaIVwmohntHj)GnffJAHDn(l-@5UcTSL5$_O5~i*0Gb&< zh7=*VO131W1OX`}bRQV#0MsiDt%m+6(G#L14xs<(x(Nkd)Xq(eT#jSE&WE`=)Qd+- zjVQSWpMo{9D51up1oY=-y`Qb($xvbup^~ATV)9X<5)2E-S}y?xjyu@%qPSR?)-y?{ z;ilwG21-;jP(ps>a6}T(TOg<(8Y{DPo&^0NWb3@PS80=pr1_5>^FY z(oI5|ZW8#OqxE&CNtD&rX%e)cgN+LZttI zBgYxQWZf1*Khu5mOQI;B1R@9v@Ef3dopg_D>D*kLVyV06gC@BMuYxtXC86$?1n7@+ z;ZRC^NvL_Zk^n=NH52=1I00*?A*+|htG(SDWF{8P3UskftHeqQ!KmO&NJ&UTN&@vG z^P587rAdWYlY~4ZHabFM4o2=vPy(Rq>96VnM?8qk@c5J|#D(t_wZK^lLu%vbIq z33J+-AlxDC=fIAeKu|Xz7l$g4;yQs{3sOMDn&hM;70R~6kAy*fBsO^{5N9{|6IhF; zBk>4Xn&^1c3cJC#ZmdbnZ~qdZk6lxvp!VTWf*7Mq*_^DAaBq!7?J-mCBs`?0)x=>W z1dx-*9!YcTI+2*x2%Sj4 zfN}=N=-MffY>7x<3V`cQy|E8B?o4PcQh>xFT`)W#w~o?W zMLGd-E0T_331^seNK=`SFdsqn1h|`zqD2--wNA)Aq8_@P7o#x;A^8%Wz(@et8`M_M zbpYQ`BMgT3>6E%;SLv2ak4R^F#NH!lu5R*(0SyZJsu7{Mp{FU5CL{?Goxn%{=nSM9 z&vgK|P#Fw{;u{HK4PFIn0z5(;;1TG~pfx+w3Zd%o%dE(gL{AVF;H5!{ zKyjUeQv%vetc)Z%s7y;*NAPMLq5YuQu!mJ)9UY+p9auEQ!h|a{BiC(ODZSp^bugez2P!^nWc5mW`pH^JkOStt9dqlGJ`BQ!y$ z3{MV6FmO2H?_nBgN3HkvjW9z=4zP(J!V_VvAWa_{g2PTq9_*oRkh?dT$c0EWxVApH z&)Z-}UvQbQK{vukczM6@c7do{iNZ!e`(J?gFq?1qEVEdih~bSe=~pBC{oGH~w!bGHg<$}p z$o?07KP03x)hi78<~bQ{D|MX3PEUce4yk<@Zo|^sUj7B*CD^Q&M0^6k8MK4bh<%`< zJI{dLb(k;!Tx12k0w(bL)F&dU@?F(Hd}+e&3w(OK$le?g{EQISLAYGR4yA4`AnCm| zIA+MuF|#!zs(Inax?X9-TyuCGEPEmiR=Y+Ve!yY`mJf>hX9*PdYKl}%vzh|@+1e?E zajx?}oXtvm*Q@rst@ZV1%J>yXyXy%Mc*1HCzE& z%PCSz$6iIWm8^%BPO?qXGizVOri>y4pVxH5sCJt|z_zbsJ7&?sL0%D}O+g!-LtPYF z6kIIHOza|7jdiP}45W%(1Vb*&C=pn0k5h`AP^^k@k_pW=Uyw8PAx50_8oUT0PN+GX zvsD~la#)7eJQ=^vbrTs_^lpTJka@fVuxdETRW5lcfg%o*yT9IA?sFq1_8-(RA&pY0 zUj}{y1QUuBmatrlwF1KQ@FTK-n@C0T&>6P~iV6o89t3rjm@hlGaG+Vl?T~p%hWal7 zB%aVajjdlW>}u(Pd}p-fun15Z84b+?qd{H)9@oI$`GpDcUwcujgWw~g^Z3sdAs*Z5EjV)gX)(VIGjd{zez>- zuouGiP^1hs-%<_7MC{OBV3D6}Oq^ShAwL+fW{G2<9f~h!sy#-NYSuf_zj@>s!H3AM z0_cDMfgErXr-BCObLfV~<+gxr2omjX!Je!*tD!{#cOm!dA6^JU zdy-Avu&qUcogMNEseAEk&gSc6;S*s5%9=M}?MN8m#pdU+DktLBp!1q(g-@Ek79mb_ zb8b+{)l1wK(N%Pi(H{0Z$;QvdPSm4#K#Rvl91}ruIwm3(rxhY(tEH4}Eq9x|NVNYM zEfE64Su+x8P-U`NZ4K*;2UHT1O5Mcw6yGo^iP%l?5bb)? zvSp_@NXJeq8AuaEYGKs{lMyj6v9&6c#CUy3augBlY|5!b;;x#lkFf32RMU1lZ~D1Y zA)s%%7fk6zpj8YPLMW$mGorDlzpvIHm0UHB6SjDC^kq#k1R&rMHPq6zak3uuV&Re@ zVk(JE_c1(1rIS*fHkC|@KlQ2{Fdg_{=40v0{~NkPqdIOdGDH-0)I1u{%pOZWzu)6_ zA}nNxn9?>0P|U|fw^CZ?Qe$Z%djM)%La5nr{gHD_vxq=DTSyR55>e5vxicM@LY5_n z(8SYt6WsA-gPpLsAA*j9iEJ6-Z2$_Egfq%SlI^alqK#Jd7HROlD|5Y7YLSd+9 z-i><2wngu}=O@uLM*#5=uq-G47bu(I zPI{NQ4%L{USaR{I`V_Uzh~*9!p1Sij# z8djX+Dk~c#l_fIq+OHPp#ZB1aT!RWraf3yZbD@sA>#sS)Fo`>VfZeU(+~ouIFF0q zM-RLOhL!v@#dAvt9hapT!<%Sli2l|=%KNID_y*`^HYsXxR8@f5jCcR;sLjcIfi}Ps zgqIVRHDc8^;~gD{a2<+cC^X_WMxcUm`Mde8bT#pi!1?rHjcVgUD^KWNtOIeD_wG1T zXm0=qn~oELtp&4&zFN#*YLaCxhHA$UmBv6{n=lt%^!lnUtNL`M!JWXUkU`21NU*k( zu_m9)_E`)W0tY$a|1hE-K5Lr<0Y;Gz;aUg%noQyT0Kn>iKyBbt`k5>$3U;x3G?_0+ z*MC7@r5UmZ71OT8I_|c=z5PQzeW!T@#_5M>_Pz=2?-}Kz{+SC15ogwCgk~g0%)x7-r+9uL8K-9BYOc~Wm;Uwo+J9A+m6Dnpj2k84OVRwg z`*#L^D^IUi89yU$;N=;2uPQwA%{SltGVn%zVuF3?Dwo-3ewnSz4Mv@oIhQI+UuScb z%`YEbm0xszVfp2k@B8KQoxWZ9_43Kgub1H~JF}O)sv;`bjjUWUE19iae))w~du^t@ zo;O7%`m}U2U00X*JP@VM+)W~LDJ{9w>(VP-H8r)Vai*nZNw=mxt+m?gxny^jdVAR` z4}^VtbSkPXs_C&5WyR*DDqiZP-cVI5RJFph!ZX85BdVI=Raf;I4AZ5N)~?!%5}%kw ze-ar-Hng%X)pe;?dd~x~di654tiiDN%!b;ULah#j7L}8TY@DmX(6h=*D_>gOtsSrM z?)n*f2CZQo0(r*OkMQI6u3X}~78JFc}RbjOV?J1R7)zA{=l~SoyGeKyNIg?o|g%EKi z%2Co-f)GN4*wIDy6hzb*tEHBHOR~HYCTJE!Nf2+;c(mN|)FnR4vX~*qE#+7Z;?rW) zOll!Q&<>sCWX$PGZSJ4J({7qORg(x}j9IcE<~+?Hshpwnqe0H1q`5RmsxfMYs{g^Q zbEA0(c}Y{nj<$}Br&cSLirAJlHn#76=6m+#UU%+K$PmM;O)5&7Daw_d>=Yw+3T>Gp zNP=uy6MuN*cgWfBX1>&_r6Nkq5JK|U8E#r~rc($pNuHCH5Q1zWf@PLV7KL_EQz*@* zNvxZgscuPH+xBKIrkNbkDT^UzxzQmC-E@m_DS6DDVh5FP+1l_N5rLnizK4lqMSvuXq{xSHC?jS z)@r&{ddn>i4~m6Dfr4Sefy0Ah0m3XKFkmnhG%y#G0!W;N0uK)sP(XqL5fV_Lf&>W< zAY543k7<+{s%)xeNfvz-k6%|7v9F@b=9j4uG+*bv_0OoDhX>9_VrhN z=UVTVqWyZ^IILHhtBn0`gV1Q`8w}r1-7D0&4OR9u{`}DmJjJtLc2}8{SNEfU#7Ib( zGNP}D!Fk<(X`xSqzF%d$zQNGH{Cega4E$|II?|E8Wipq!%nb&XZ+OkfM`d<@UW0Lq z=Q82FUJX#dw!&_c&kaW1>nh7@YSuM1ujSsF&xEq?`$Wz(+YG3wFSBZixJZcyp56CO z|9|Y=_U^wU-GAuO*Yv&qf9(HX%eGEG{{Kj#ALh)xvm)x9>DgwU=4sxMw|U1M`pmn# z@|FEN?$8v?(yXd@xnz}fcTnA7*U`}(W_Jf(*;B2Dx|~;}#PqQ)7kVvyR309O-J$mF zzVEz`J>R!|nEGd3+dgbt)z)lImz~U8bu5+h$OrjOiA7}aUJ*>|>O!K%re569}d0^$CM0tuNJQi0R9@+B_Qnl}nb^A!) z5i@n`SmY`6SGJZ{g@6Lfrm&fBFOI`ks3~;ySam7Zw5rZ-7e{HSShKCQW6d%x6-lbF zO2tWiRHpOk1JUkvJnonZUvap$sN*=SSVg-#j=K{g9zKQbam$5w3e39WIRA@NolBu` z<>?PQh3Yq7x|h%Ax6=7v9$Sj%p-9XF0foy(yZiq${T*k8cWmOO$+o%`?@EnO%dl*X z(2hhqj)PKhWQUy@t|_jL$|vtoPS+J4RLb`_yx2-y%JQ`#??Axa^) znAKb3>!?yV8Q|pAi;&S;9)`rON0p%6gVsZ0*M14OOlZ>d#8|FA;r85+TM2A zJ*_K7*X58NEnbwfB*{{l=s`s(J9@Slug@t=k|Sk)5+w5wlb9q~OdAa$rF~p+G_f1{lBshL~Ug8r0$ei3AkTKm=5+-P1VUxfW1Op8~cmN@Tir~Rw zA|b#af`>hn=yDXd4q{eiM39?9iyad*bCqOl?zUqT!$c)DSyrLd*EhHzVu1h&zz9fq zV1WmT#+E2HXuKo~f_PO~1rehdAD!(kQix}fWXoKcQWQc+liHIT7zhU&To6$J2Lixw zVd20q95}FWATBJH3ya0!!CY{`#Nq%01;zjYaCpFA!QsK-p)3~*2?r78T(DSdfH**a zVqr8G31eh%cnE;QLj(dmG$256fr&T^SV%-HBqSmZ2E~EHg8~m45TI~)P%JDYDpYtt zg$jxV2m`UWuoxI777h<(82~Z}5f}|dLxThf4RR>qq28|gY^=XIz4%zYa zFwBl^sONgEoh-9dO${A$Y|zpbGGLLGqwLfvDEGXWelm>B$ehEpD~Rc7!_Zi)jjryT z*6-X0ADmKU=bUNHu>!Ttv0&FKaYL>2FvPd|9- z#<}1CLTPFPiKnZ?KoFVkhA`P1(fUI=<{i&vdgB^Sdt~1HU$`fd{RtBz0O{k~Un7^0 zCpR!)B3Z?dsvvg9($Wq6WESWF%m*?W&sRW?dCBdVbw_CnV5ik*eH_FcOBu?`^3j#p zgDJ_1Kqlh(CXQsjmjamld3)BwAxoncKIYe3+DUIfA^5-sO3;!E8a z*|jwM`^G+uhtANfPNeaDMJLT`^~mY*zDbaAiQ~X&IvmlQG0+2zT$h6U@CyNqUjK^} zfIS@z>v2<;@5}rm;fk^l50IZ=D3T>@=ct*fWlny=Nvb zYY9bXk3l4CIl*J1fHGIJv$lHxu&I9T@KJg{jY7Euv6O@PX%>3We zZV3vz_yW96r;NvBk7C`_NA8{M5x1og1rr5nDOb1vyh@_f1s6s0F#bx?QRU{w_K|!#|pRZr@` zJ?q{^Oq&60uGb5V_3-R$g-f#s=1SNgav&ECNyN}sBhM6IS>{F8L1}m_8t4ji0V>vt z&mhxGc3&MUF|7KneN)fV^Z<0Hz4FTmz*a zAh*TsHX*}U&agi(ae!rKLGX7zQrCqUV=1ADH0!VU`eBDZfR<8{Vl(5DgN;zi_#qsa zc@1942d6lg@h^6L^oZ}QRi>m4QU3ffxv@0)${!{);CW=2k~;x^1w0dHiZxKvSl5^E z&jH8=nJ=c6G^6k=WpL>SX>$R!<|v=*nNO zM+a*gM0Cy~%$T$lmX|cbce(9OA`MkG9^fqH6F}i|E7!Snx#jkSEL~ymk2$`c)&gq?L8Ep;b|F&$OW8pP{OL8J2uC)qZC+ViwzdqBQoC$ndFKZ^X zUOE!x13Lm8ihCu>L3&}IEXYrYO#rDnl71*SK*|B&*vAQ~k$|+1*C>@t4df^5K}##IU}v zRcGc@5J_AFYIopMPs(O=m-RgSTcZ8Jw>R_hZOuc zIjX=br9tH_#-hv#Q|;~8vzh!imK7zDt!;_wf;JrHjJ%_-CW`p(osYSdoH>s#Tx&}4 z&Z+m#m><9i$v6N;Zm-`Bxqx{p|BdH(%~t_bs5$~88N{m$lcXUz7q|QWpw#x5LE=2( z7{B-KVDHLI!6rHlf3)pm6c-*?eyJ?rrJ1IiJbjE}&g1Eq!#bYxg-M-<6sf>8kObOE z=hD)uzmo{~cYX+hnNNsrcOy2RFvIPe58*4d%8{i$xqTZStH{D?K7m*Jdn4j|ZZe!_ zdZZW1?eFd59nf;K-Cl;+myth2n#>+)K|qe)lujuh$cO;q+%*Wu<37mA2m&%>B)cFm z>b%>2X;5ExsjZZLNneXRcO9L3dh^64;J_o7`0$i+<@BeFIiFzDcHh9wgt61U2 zhY|286O%y)D0`4&`?5I_l!&mj6?je31OXOyqYs{$z4i$IW<-30R>x#gHCRy5&Ccx! zGeP_vZJf7~O7nbxmI{C?Ged$PR~;uU`8&RG4q#R%2&%3nZYeJ0rhxo){+F-bw91BfE}>$A4zy=RzKg$#fy={QH~Eir{D79nSXGuruIice|?w%F{1M zSsUgg=~BxhDlLV6XVHSFoQXfkrG!_?rgRQAsI;)>Cx!i}B>p!ivenWJE_^K(kE5tU zkhBZU04?cME%Y!2!YqaQn;F@V`j@8j51hmC?cd5kZI4s^;t?cm0n=inHF&Yj|51J> zR5f_!;?y^SKYMI4b^3*Wrzu;5PE(g}dereESoH*B9NR>8+xGQg?}Gk7c=oiLF}U!r zBS0oXdlq{uz7XpD(fMy%%GT<&ExQa8s2)W+_GC0&BxvDVK5V`MK{)5r?YXNy$WXP9 zSiJy|J(M1Hsd{!($E&CJ@GhMK1bY)w{HjqZ{spv7i5%E%iJsENT?{B(yJ28@XiOnt z%m89Qoxib}CNvA}h(>OJm+9=oPjzmjR$65NCg^->8^t zA8u43IV%)C0deGLVR>JvuJh&-G|MW=Hdsj3;0p+{Vw3Oth?^SXQ+z7v%y;q!>SVy* z;~rv@oX7~|5jneihp?MHwoHHs8_?%DX=Jt&Q`2!d=Wgf3_!WFa|4@B3%xCR~BV`y2unu>gC^c z?wJ%G!B?-Ei=Wkf)Cn{o-eor)GeJuR?6J4(Y3p5Lxbcl|rPrD=9}VWDb-brpV_-SR!}5~>H{*CdZh!-AJ{lENWV8B&iw)(m(cGwA_HWb4KwEb73zWSuVjPt(Qo~FuG3$Zj-xo3kWvq; z%j4l?k@=FMaL}ebQ?wk1Aci409hXDA0@Zmiqh$9^QJz~O5030QS2@BV` zHDc(16o(x8P2m5*OaPgVLQwAy$ix`4|7)Vj!4Ht4X{X-y0Q@vA4;b@)6Gqm zf(6oof&eoFb4u2NKeOOvlJ?{M>Xd>p=`C7$8jp&ZzJw1v4e8nqQQp z$2%{@4orU_7gocVdu7BuV_^MQKmV~w!59YRn!v@Q4p82T)8@;OlN9EOP}^v~H(ona zG>TU1nR?1XPdRWjY_=raJo>(mns<&T6llcJLb* zb^Q)G7|obAdSW>j?aaLgLT4e~{2?HN(fG1;yx`%H{F(V1;Jzn|;HJ1vz1blJS$g9v3v?EA z#NQQ&3Q}y?NA1nBZtRaq1LZ@c7Ew=xRJ6i`++p`BG&O>vvyhV5pTx=E_`0KSZHXm1 z;U3L>2tWwZeG~>-YjD7R@W!Uo=%Xkt1F(M6yU3^&4l<1MnAjQK;q2m)uO6X&{QG)= z-B=Ioo`HhKxpfTW#&~0WbT~M%`6zw`u`09gX)sNLu z!?XbdNQJ3Swbu67uyJOqMzOf`GLh~b_LNAw@<+xtL5(MH8ATo^_`o9!Ler-K)s=Ny zfUrTNr&~P1kTg3I$TF6toTRDSwAD642YSI$#as~E?}EY9T@+G5V;c%i^&Q1FL-XPI zODr?8D5$%%@eqLR*n;h7@5GphF2N`YLKLkz)YKSRLR|>!S~6Kv4`Kb5rT3d zJrrSv*4dybe;{KuLi;|!phSW95s0@3#2KMz5QPG067ztmh+-3n`#d@ z6I!JQ7i_T>-&)lA91J1QfKgaBa|!O``2@`djG?HJ;*6oFAZa%^6e~WD%88cG3ssy> zdeOEEz$Qcoa6CU*Zp1woZ6ykTq6DL`VdaVn1w}FzxC!_e$KqM9_AkA`_Nj&nMV~~> z{0-Gh=!kxvnisTddld=lSYrYOo3w4ZZ5pxP4vgbOQ*2kKl3&^x`zZzj+=mE7Ow{I@ zFyiRP?j~KLGfHriobY|HM~!`L`X%U-TTf9^G!k5%t}RyBB6U#k1~rK~DC${D#yTi+ z&P^dUbx@Sv)d2^<4G9GvQW0$BM3O5g#*z3}&h4w7_rM#S^!i{4jt8yHWIcq6Y0kx$ zKII~DV?DC2u?QOu9S}j0_jE)dEqwX7wEQ8cPf+!UZJ1~q%3v@YLHvd+nQ-i3x2#x_ zEz$kEzh*xCS&$9VA$(?3a5%84E`yeYpf$XJU7haxu9I7j;&4fz=`kRW+C?KzXAsQ4 zg0XH(nSdhBf{(T_lVq7_1w|V9M9$d*$o$Es$dZzT^@q&S3Bw-K%ussD^r324@AFR- z4P6X77dE!dlLC0`3K+n2X&C?2)s$QPFt|A$00wS%=KK?>0#`33VfiSTovIe}-0S2U z@~b|l^!8twe`12DEtr3Wst3=-@I;>F(QXp^jJuoqQMb=Ou@UxO>9s8vH2#ysG#@0P zIno2n(p!;R34m0$<)Qf}iVY(8!M7vN+R(;T=jDS84Q4Y%T*_b?6!5gafEpq~YN6S% zD`~^ljMqHQfo{cC!!2S8UjY?>fFJuw%9nz3!c*H z;>#j04QJo2ch`rAQ0dmxu04j%suf=hq5|jeb86S(G-bBm?MLNz@IUM+B~tSIkexmu zUQmr!!2eTq1o&?f>7*(-S8R(F)&-AhS!6`Km=Sq#pC?4;HJ`sM+{In2nz{$#^c~}; z8*qqu`UGczw>5VKR^6cnMCb9Cq(ght{J}TR2LY0iPoLoOxq4lj*(56HY2JVtZDDGF zv{62h1f42nb*xcSZ)$6dD(Ik0rEW5%tK4=zgZ133bFaU^CqyY620l?XB*o{vL6!=g zD@SxoD=M&=c6@uH{TzmHqJw+lcm&30eSrw3p7^pQ@p{sYn=K7$va@Y9JQ8xxN&)}$zJZ}iJw zK%FR|*bvl-tU^mXXXWu!<|b2|I=$4QBc^iLrn4QLzzz7|l^!0Q&`J;g#$o^h)W|t8 zXmjF0#2Odn?H&E_uQ?}5DK?mMB5SCL=d4G5^5$?h0vd{-H_VCkZ({nGCrF;k3PW?F zk7z6?txxP%{)I+P;KL|61jv;K%o0>uG9&;qucW0-xRzKU?-XN=6YSI!({=Bsp@ew~ zZI+biBl+Eb%W$GUAZ511JJ9FdF_Vp#LafafsF?)O95^wbKyZS6T=gnzy$*Y9T{e~C zZ}03^{;X4|us6{NGH9&)zF?szJdV5#>_bhX9I7;fx0^_~^e)b3;TQ<2^=$%SjR-a+ z%G)dY<-hoCq8r@;&~T}I+>6}m{q8_(6TGdU@Ge?8)?B+bL1oO~1l%SLKkQyMna4w~ ziiCXIhBf2P4tk}LnzlP-cdEY%+Ed#g!UodkenuxlJ?|&@ZaV%Q4eD| zg#0taCE$FUjc#0zc5EpB2TBtkwZ<)ax6|Q_eaIRUrRf>{@L!}Q6f!jN3krNUF36l==ZaVXOerRL^a-@mU_Kbe`mxQtb z788{ZStr#RzcygR1osL;fkhRK1&K#YP|c#=f9x9%K`AOspe#_C#zkrSq96Q=6(;(_ zRo4EU1X)%cof|Irwv(+RHKm5T#TWkqm>%iOmk#kV)>cnstC6);qbPyw%^c$dIpf$oo;Rq_A9@T zOc+Ej(FGEmHo|%|Rn*2wi}Z8};ml@=)$bDPdw6)e4ZH*e@0o-6_8T_YdTg2=AAXc< zvD2rBLb}8Wgh5l3q}5gsX_E#jlOiRTV1B$|d5M!taF-)XJ{dvY+8kKey{s`&+FsbN z{5iZOz5`KkzWadA7s>o~fmx$r>d@vMk6lrkU^QD}_)C>di{K`|9bpQk5uQ~YTv$3m zW1@8Xv0wR5jY4Huwe;cb;WWmYd$>IX&(PB{*nl!upR^s-de{&LWGs1DPhK10r zuJ4Oj#S#b8s<80ETX!qC64Z#N1d-c*0B7u!$iJcea2SnBf8H6FR{HZO1z&k4rIqf1 zhfxi%zUyYIGkYODkYF@dA!K=eX+{?)Jdi-8GGE?bo}=pB!UC~$jFEl9=MAJ=UPRD%2W+PWpFUS0*6Gw0s;pL76}-!WRNk)TPyN%`%bLGTDcUJ zjQnCmAgUUwjges+Bm^u|3JC=T`O*SHolK=K&sM)$U70)PyqfcuqFbMCpB}oK`dWf( z71vGO4%thV)va@N;BbgEMtmV#ja@clmwjw!9gnNU$O>lKTU%WFxLJs%cPAF~)F9Za zqtffAFM7V67G&M!X6i3AYgof_%OpuwuFfE4palik(yA++yzBB6551H)IfK=_^1NeW znKZ~5DFUP2Btf{%{SGJwL7)T5(cmBegM$U57!53lVGv`%D24(G4Fq6tFjS0lVi1EE z1~G_X7zPIh2MLH#ilIXb`MH4y%TQh31ayDfBz~UFq!HpH->5_pNK)_1L#A>#`?$_Q+R7H#!lOn#%fO zq01^e*{cSr8`qp;c4pb>!Y6r^(;!&Cy49)PBa<%;f~Bd%lYgzmZ2johYyAvV3*m14 z3J(mP+E3r|vH9;Pz_7~mo8#Od*}BgGNO0gVPwK0mGLvYAfN0l(n8gy$knjwPScp|P z$2lSzTY84`G`g){H^^Dth+ZZ-IcR7=t1G0WGo;~mbVuiA^{#jKoAcf8ljh$Hw=|m} zGp*7r4b!fWF%Qj9X#V}~td8)SXC51?quSWum|^N=_>X3=xs{>n>GHV@lTMeRR(x_9 ztc-VU+oqdty6N7eONDL=y6L8yZn`(=-lU6e(|~T#Q`f>Hx>82yqFZ!ZeCXl~4jRgS z2E$#89lAMngYM0XgXcTL)Scn3ZKp+lhj4u`E2Ipm|}#Bn}Yk+6>|IaI2`InHs8C3MLRz4dfoH^{ASq9dJ% z=+7s*Lr4hYCp@3%TCwL7-6s(}(M1>CD?QOQ2vyZxb*m@3=z5MY=eZ}LYmoF`^n9Y5 zWy*&B1tWBy=-wdryM}CN?%5~0&!<5O-6y(N>U2NRbAR;5@5U=Mh@C+Uu{VdkUfuO8pX(6~4e^C+ zX)ZIXvomsb=swXkNZp-dy*%_2U9!Teq5DMlS$mpgo6qJG-5(5iqWgTJ`=l%zWN6Ij zKGA(<`$Rv{#n-d$B^96LX|*ylGc&W-{4|@U%t>FflX*0B&|^0@g@!X=Z#6SBo6%VV ziAmrCqyPv8heX5iU`!&%U^0aCAs3-YOhskUFw~L=_3&6vqilxAW>Zw-VPldU^s2nQZSRrSRYGG;`( z4^2jM!rZw2xBGayNa!{z2y_fi?Ib9=Y!40qB1(M-9)AK7QEGzTLKuV*ogdZ$3(SS( zzKFW*Q~(O-sWbO!jzi1*gl_SLPHoq=nkjy4d;+l;0rAMGQ;l-cK&u_wH_)pnf<}JX zJ|jGv&TQYF*=b-gi$W-A`>qV2J7!{SpO$S5(zbn>#Adtg)48u79dQ|S;?NaLjHfue zeV0vznncu0{)=iaFISLiWkt@e09-bw@b~jil!Jj>S@Ydqp#tU^6=^4mz#|s`iYppoyMzrtFmZRHexS%2-)Q;(z3&>F- z8Y>vWlVo8I%KXe$SF(>($HBhYcneD@4k6PPF17#?;Iuw<%07=qwF%7@Doqz%n9w+j zBhix_hwLTU9AIQBr!{`_2Hs6VkM$sVN#AG!1`{#eLV4~7{NuZRB4f1Ule1>84*B?Vbe%hjbGf6 zofl6GlcjpyB~(~nC&(;3-j_gyx7i(y33Ih=Dr0>|ADwoR#&oyeREA2&xj)%apK^PX z#<)>G0p^d^!K1X&2|`bw5kfgVL73@CW05eczQ;Vq(#Kt4CI%CU5xJzX-308byr&?0 zZY0qx<`|c~BduwO7}(?RleMLj!B{v{3$43R-wjt^gbLHwTcEcz~{2uR-0 zO@n8x{FOoOak*nnF#5(FF*35<(;^%k8GYwcpO~_=f`b5djlLgF-aq=0jXuw0mXdSQ zrvVKs?u}w7B>zB6KPUocXj*K5K?)?*I{EWd~4%poS7hq3as( z773vo`W#kZ5jtIwuz>`LgwlMZ7Fxl0&oH-5`(Q2$QvvGZNWMtMEg9ulZc_AX03!rO ze@WOdst@()(m>xPV&XGV-)i3JtaOeLWgd$9+!3W5Nl97Ii$Q<`O}sdWr)#HWS#%7x zz#R_;z2G?Q2}%(7{BvSO4M?D?rTr{HF8s(0Yg7gAV7Hk6zFdURb<>JyH5)?)(5=Z;Lw_V0_i<|QFd@A=1 zWwce_WsW7NU`dAn8GY5a*GF6+_r9n4`5?Km`j*sw%{q^8+{jQ?A56bUj_VclKC*G% z&rl@rPm#t3v5UB8@K)atZ?=wb^=aP1s;e)My9G*+ne(IO{zI&~gsf`6;+Se`Klp#9 zM5=#n!1D&aP?}!s)i+9_KLw2Lv@CJCdyvR9iv}x{U$%iVh~^$@3S(xF2(jn22?3UU z#d`f72B)e(m!8Z=MVyd2cH{`H>U0=8fY1>qXd9F-&o1Jo@G?8WFxc6$kce&FB;G+F zEHZi36&y5Nbu$T?ocbu5;ezvJ8Gumd%^6&6h>DeWfMkidUbP9Yf4scIWL?6_zy`83 zb1MOz`m$tdn~u`#Ib0I8cV{q0UG6Hl@1x4S>8SH=Ia_7gA&`;NDT@yA;mKZFpO_0K zVCxQhJ?~qZ=pqeK=QzMtDvJq1HMx*;AXstb6dJ5$h;7k`>K6OK2S}?)JAt;pPnwnZ z(zBj$sSQ`IHiT|$*B+D0~y!-N&hMbz!uq!VKgz!4t_H;#TbTU zwZA*M$8iUJf3j7e<11CKw)v<+$3n;?Cj;Sp584Nj^oHDqrbh_I|1-!o;73O0&EyLE zLZ?1JQ+=7c47VvyhOG*B<44w3^^FYG8tyFfGOi*@-7gxkhMBg8wf$s*umD3=jS9eL zrFHG=?US?++@7RY`lTvebS$_l)g(dG5mjes(h*Uf6XJcNKhYGe$QkG-rP=|*D5ucfhIxmSE`hGMuihdawaQ8p zGd1|NDg>0^-j98Gr3$ttLmHP=sCkkjBKuLD>USP82{aiJn^a*+@|Xa>PJmcI;PPj3 zoh1(R(_pwUOcJ&_LR0_Az$H^^N3eoG< zpR-urtwT$}?E`VPh;Uxz^POMhXOeP`p`8q~!b0hlzP9!Nzc?~|Tjmnssn;REFmnjt zPzWI<_kxrlRgebbkmoMq@yHD3i~zq$sc?wbC!S0q1YM9Go)ihtQy+KL``yF&aix)k;^)+y7m5R*Qt{{kuDq_hP2UjRP;%b0(R-ds5C1_?#Xd;aqEx;~!VuhW@W1{=Wo&-@m|CW;}Eh#ul$^ z9~zzSS^Z~VD5Zh%)VhXA`T%+?xqM57SpyUI$Nj2mBcHue5S;^?Q|UOw=*Dja?cf{H zxdUsahcmL+cpljMHniNM#y&718dSyYHAcKZ!}S{kdqS0*0Td6xmQ`2>Xj*ebOyvhi z3SU_k!8DZA-<6EwG(Lho74a!dm=li#>p@Ma&u5mm;p8hPby%|0T!IP7AIY>Y6KwRI zUmIR3GkvO=^KKJN6hpB*@ECQ1>3uSnb@DL0_@fMi%8qxw2`$?xE3*{LlEFcxKx5XX za>Q&EEXqr#;zrK{vup(;D%Z6#?Qe_GV&?${<9dN$7m>)Rzi`4Y>L$;EC!4!K1E`T!(hJy7e*aj>gr(HMfHX$(x z@qd<7%;bYU1|KEJAu&*bvTfc}78zx0F&~$~NQimfb$35{^-RXwo57yhHhl(LZsV|i z?=zohu%p{uj=s}ix~t?!BbsRBTMc%O!w);}Yp`*Impi38k>Udf7^y~B{2v)=o~L$b zv`za+$^Uccxk$TaM5_P4FUylC(Q!Kv2tPj91~`?>p>^E3=Nw>9|O^rm@1m*GLWRe(E- z(*dMaeYQSSODi7c zDSJkz*AsG7L9H+$n!?D3nX&G~^rViCPbWV_>o)Hae#^&&tBN8_9cMsBGNA3!-#X6g zjebPP113CCMZB;wC%aIw3s0w}3j`znj|n4pC$lL8JD8d zWJ-BdYLmU`uc_&Pp89^XPZpa}($^U@aA~kPlwi^%OA<5A$Wth_PGwO4v%926CG?vT zIufCa-oZ!?iPY$H@%DLq30*u2&Yb`}k1q+ez1^o5xHO(ma<>aws5MyUo#b84jfwxf z&Qs@#0ZLfR!(@C-O;%`(IoAXlCFd@uo^%1JP@#=P(Fa`dAlR!^g2ZPU1m{!wQ$Gqe z(>QjJW)@S>vfUKr-KV`4he#}-AeUdF5eQ2$ov%A@oMPb11`^8gg*S<23If5+a)^WC zU*Z}~H)&4kFnb>tfGG3WOe2388=~&`Tt!ibj*xJk!$28Cd(a#>w^H<^Yh0XW&qb(3 z^HX~0l}i~}nxf8L&YCm89~`ekL(<#e`aUG*5hNADM;_e?s+&z6CHc4%U!R-BQ-8N6 zr4^It%EiEP`{ZSw5x`P6%1O?&Jo9a9mMcX~#8 z9loCUlaCGl(|YGb5>p`{Cnbjz37Q0=Icd|=qTFnNC@5oT+?Ikl#VJ?V`jI&eC-vLN&qGj55--muSYm9#G`vPz7COgL<#c;+^ z=zu_`8b$Ls8L~QQ$0VjF<3<;CSW3DHFgOvaTwdmr>HHmsStDr%wd?M_)qQn1rW+B1Sh{c4VLubvvMAu z)11zTYL}C;QTimyTw|T5mHS!$CB7}%owl0K<7)hPE@U&tSgcLRNSln? zL{dT*Tp2<0jAYOSWWWb;PbKsI3S4MjrsjTVZD;!JX>`410`oQ&lUn_^shZbhHJf{Q z>NOw$sGFBMOY%zf-e9&Fn z&tJ!i`e`A&wLc0zoHgO0hH1ymB8HazY<@)LaiuH)rGrkWg+P_u1fp^E%PP)fsZ=LX zB?M`#tJQ5g?#ui)qpFe+Mzj#Hegtr+nK(rP0#SGH=C-N_VGBn7-|d(PziW zfFd{}ibN$Q5zi9Fl2LI>Kqks+6k$9_RrH6?V2K}k+Ru?hFN1#S<96OEMko*tTN3-%zeD z@e&t+JFjw=cf=JpS~gO(JlSy=%Hc}JiHN%6NC>j|EKHB5(rW%CL_Q}$bxk-Rzk9u| zzq*|fWPJ+B&;;agkyXN=diN~0^n@KIWlSDRfKCQM`PKoCaMsnu3%5ekn*iiNe~h_KC@Cze9IyXqwArMIdwLE)KDr@Z>)REQV3P zMvB)Fele5NdxL@qQkii=y07x4bU!9r$FnzMDaUC@L$4hYL~z z?*TRja*1d*ctQ+9&V7nTK`=Z6FofdfKU<9nn{H}C`utB>{#wZ;9Icue0Wl1%@?GP_!Yo|WQ=KxRwlRai8Z ztYrvf3(BXB;lQdwf(T@rLM%aRLh$OTtnNeUb@MB_3KN7APZ~(NrK^cIGhq(&2tN_& zV;O+QVfd#I;BVHSLJ+5}dnSV*Q|SjL0@Je{E>8h^I*fziRTXrmo(S4{h(gYVP`67^ z`x6V$fMYV`j7)}TaxN?%?eN)zX#B8)V_xEhGngX@3u$WwkNTQ4t@+q2!d}Fj95sl# za)yf>qXe@F`VV=vL{6W7W%84-1So9=r7j?A5+q9yJeUcY!wL=X2q``*k7&ivpds>w z3k}2)9DdweoVej1`h*^1v_814CK1Y@G5|g)37~6Jv(b3_^OXY(Y@kK$64@?U8vmY- zJTir|(-uite9)zcM12HGPp2%JN;W}>I0{je-+7kkDzMIE=q1p<<@Xfbg!vboUg4&G z8wHcpTR*8dj^d`CffWA*m)5{CB>{;J1g!a26C%{H*?(@VDcvEK{-3xZ zV*Uz2>2^W(6Ba@QbuvOp96^Y7Bvl|q6r^r~69B|an}Q@u(zmj&(;^``G1=DIb6%X_ z)Mm`2;H?R1!0^g=6TvKOr)dFXgd)m951Qe`n?5FOnr>mzGbK^-N<6N!GL3BS?Xd}T z=6A?mrHAcTj%Sv`4&QA4tuxy|t(mqs(?S$eiC#Jq<-rAo>ECM+uh49hL?@8Ycm)Ae zT6|wIHNsii-Y^|AtC)ixbzDt;7zNK<69BmV$5V)Az=K#}jEwo~M+mjj%y5AN4FCRE$27i8DCPJ?dZ`EH522;t z+~iln`kEE#0?Y+73&k?`oW+ckf36B-C}%$WjH>5hiDu*sJWbrWA<0uYLLwtMdeqJb zcyyt>k}^VKNHe9g`{Cw0l^RbCF(UOkoS--p6~8`L1@-Z6{XEjkNin^0>2=LdPn^1} z)8RmtI-m?gsKqf9VM{k#eBqH=y5St5r)IC;eHKLSr9z)6CA$pnMgKLv2CU~5IK;vu zqvM6cfs~`);v9oMLca$QM72WKRJ@ezCy zMKT)WG_Q#3Mp|ks#^`Q7g^nSkbLcV6{n2>qR7i`7m(6s0E)nh!Dd0lQCN&=(>S0tC z>Wv%;|H?xqsW`1Tw@yag#v12!^bMVTE4dgNUo*!U)gI(1?jt8Q-5wByNR#yLK%EK? zmh!glOcedN8RXJkBpQLh9Y^}}9fF+*5tblDQ36Of;QZ~xwd0o@ z(7i|Cb3sC+@GVYwX^DGy!m=P%FN1Zk2QB*~!A>;^OQ3I&0+jGOXD?G|b|-9rpUqcX z5oeu<4$;M4{~wA-JP2jAI#C=DCoXf{@z6uCoHXh1u44iVInjU>;E(iyd0ZJsD`dni z%Sl{Zqxd7}=MaI;Ns%|vyvxWAZpIq>I|)wq(`8nxWYzy94zEVpZM9wOOM?&eTow8O znJZ}^^fisN@R3*cxSi_>~=U7LjFS%1%G2@@zgyZ#fGp1jnE^0Opm>50;h>OO=Gvr}K}Tc*bpV))F1|!WUvuYb990O8DU`{!$sSEjbT+j|RH11EKIeq(V|6m#AEp;}+VOa$Qwb)6M?TcU2mlwqN9XjCTLKtj_6iea4$4@(9O-h!Bu3 ziOr;=>6b4=M^>Q)TgilBaI-&|=p~8~B3QNu47>;4x7^PA- zMyb$^ee7(kHikB~oMyUEI8IJ37)%gR65?Z`9B%aCBg6!xgam?v3srEV2NxL;JXDMr zF#;n(gbWB67#0`2V1dO&f{g_d7*KSgauEU|#0LfwN-}~0L&c;bWF$n%L{tIX#&s$p8i9Q9(g6VFE%#g9ymPoPdgoinA-LFZ#7Pp=+|J zZg`W2#~H(%v+SGJmH;t$Q&+LjpR$?k7NoN)zMX0{k?(wz~c88s?N{rWdy?1oLsy@X)}s_!vq=^SHt)k2k;BtG}JRs%2MJ_E-MZq*9wF z6S7_}N~K!!o4%|kIg_uy5?`Ub{yKU6g^73bcAR{@5b?Jl-M!v1!TyhCd{HWdzt->0 z7o~!Kg&46{{dK$EYwpLg`|GdMyk39dU)jIX@UK_)>DTLqfAw=?ldtA!@@Hw5wyb4V zUZqvqNtI|x&T%5;Debh=PCM2G>df7(yerf1XoG;vCv ze*ggZRF;v;YV6rmx^I^QTu6U;mTc*jriqJXWt`)hk*QID3%JyYJ}nv=vXG|pgfjGQk=6=5Ay%TM5u%(=?gOe0;^~PpeOYu!VIwBqyey)@O$F%@fIsz zPx~kYqyj4nOaW97n(9{!m7c7?Fg5jiBWgW`)=~<6$Vd?mrJnNn6i8RNg24Et*SLf& z!ps`+7}a&(te;?Gc#K~I2*dC5ghxq=BdmikoT}B8D03c=Ig@p44K`q+9^&-|!*pqY zQBsrE5+2@8iNU5k%nuzt&6$aXenH_)b8#+mRI5WtAc|&w9~xdKYBzMXibe6vca0LM zsaaF7BrQbMaIQ2NB@tAcK5{t^QV#wS#JnwA1Jj651;@zQNIo&5#S^_l96Ve&DGNLH z4pmtB%hU{&?hVNwGd5$u%p7@(JWH85dhzAp2ppAZDkVd1PS0EOsG~YGw**+pTs0Ci zI1GcuWWA4;Bydbfwo|;?#%atPE2)^_s)+Kx?mibune&QSDEAgnTU8W|;mPUI{KdIi z77ZWel^83!Jl}Z{{+#bjwKn-+J~^?Shb%Wje_}u(%=~_}%${LbG)$S% zDg23lN$whu@C4nc+;Ar+c5DM-bK z@=fT{1UmX8E!C3oFPZotm3iiO%UGjxkDI$Q6 za}UOFQ-58$QIP}N?&s^^* zV2X47A^6l38>)K_5=t2`G1n}DiCqf}R|6)67U2au2uzr}zbSs!%0shw22&Y+Tn(81E6U}fp(M)LfWpyLF;5&|(!>E%bKQRV z9f&e&n(yhhq&7`+Lh5vAdweg-dcPYETZVtU}%ImHWSh>N1uBP|?WHX$UamYp5^6@YU=6!W}Th&uqZ=QQgce z-+xz%4wz5>H-IKJq^pk<`Ucln%A&|CKeH<)=XL9G`JMiSJNt|=@}BkznI=uK%*4!mxyekqRfNOi^Q zj+b6e_IcIo3+pwM!uW-F_oA&1+I9LScw|C$!|;glVK>sShQJJOqMFjAAC?kyE|6w^ zcuK6e4r~_%fP`$Ku}Pvh9|JE8TY@ZjL7a_y%OKRAs7to8;Hlaf)j!AUp5FAGdu97l)C4?oUgC|Az#^qd?j)2i~0eRsXul4{%F48y!3EnVAB zuRO%vrrA@B@~mkrz(8M1AL=Djs^5qNVHs*v1E)$rP|%50t=@vs3C(2^3Ma8hGP0;0 z@Y87+3KhJ);o@G)X|z4rG6XLX9M6@#+z6M^$P>|yLc4eVYS^^={yNdNA+`n37*6Q$ z{^SHJU5s`xgXBX>BP&UIMWRZP#ABP1ht| z;)HW2c}RDKt?LVxeY2cjZ8%3Zh@D`39hOgV36qF@y8VQOz@UC7Pou`@xeZDplA4H3 z(wGC)1tYXH^W2W2T)F%QZnrefc!F(!a4Gt>KaaE9+u^1NRWNM48M|sY%VZ=hU?tJc zI5tNcHAGZYOsI}JEK#l57wlO~({^~Umm@Mip*`NXI}HwN!wSF2g`p8HehLfdLV7?c z5AW*zuD7A~n1V}cE}RSGa0qW>!4=N1;m>Z5LCMb_Xg|Rwia)MWCRjOG*s<~*=WiF3 z@wU{^Gy{?d4WP>dL*fbWrjzMga?@tsRNTX))-gtC^YTb7$uDcp!Rp@>eR8F;krr>W zyI0|1YShU$>K%&Ib)Tr#k2s5axSb)jFzt9;D=XICdO}FLe!&3sbN(c~CVKl6niTQo zVFi(c*Q*@6PLftRxt&)pj@jRnhpvDpvQw%$FD=12H^ZVk{9zG7X5$n0A-Kf$21xHC z$NSgybNO8{|MJN1?p*oX@*L7qZGGDT}rVnhUDY)iA`vy=qIh`wUV;J`p z-;)~}!8Y&-wFOS0Y=q>tD0D5LsoDlsEI{q{LO1fpEYc>8YcnBR5wdK~BaP0E_gSGp zXJ}Z1XpL`IW52`SRKb^WzD*iMMZ>)fHOQtyXw>G=I%!{v95;!owRPq?@eci;%NWu` z4WU=Ao^l)-I%^7LBBdh<3|x-%6a({b(}?MKfAFr{USB#dB}<1I{M>+m2o%jH4--h0 zNm@&g_gt3j5JZ}GWTA2-<4tcfGxpSk;KR(*mtB@to->_DDOuyRJ{-*$V$ROj9!nb+ z&X5YK$sVL&I=RjoM`1&Jtw-5!1hu0%x{f5H8*PzT%(HS*4+@`Z)&e!dY5UQaI*I{6g8&K_PEWQ79=!?fCi>kzf&RM0fNX zCOHLU>2QpF6{cwfI)ySQCSe?-F;C|h6JafVYQI*{Ct`budH`>9<`BP+SIXqx2==vd6my7%!KdzVPoKz7neAeN(ng z^77V7f#&$xEsA_|jG@PxAE9V!ph-ei9BQSnqGbdg(J#O21fkWuz^^3DAdjtRK(8!r z*ybfRZFJf0A%;UDsHoo*9HHac0LNjW<~m5Jx+ad4xJM+z*Te!n2d$(Bo0#(@tFa5m z;d2Avu^_;faSKWi*~Cs#r+-N?n|PghCNAO6CXQ%KyUd_|4yQ{Sj-F`~qqM9eS)Fjc z$7yZij6xNR1hh?T5)p=SpBmAd?l!TL(*GQGccft!Zel#`D#TKgoA^wu(M2xCM=9@V z+($r;Eu@S;>_{_m4U57%a7<}s^IfX8&=g12o{W+kRAeT!&Dt!Kcf|rZ{l*_-m*zN# z9HcNT<7*QlM6`9Im+sHg+Gv7NRvg2dFw`&UozB#4TaMr^8S)- zx_=PN%69`@6Y^`vnJgCrkC1t|K4Y-Qbd}^2-WZj@QpfHcg${o6{TBA)sF~>nyP7O? z`#SMPuH-WKbz#8z$IE(i>eQsG5jKebJYw9TD@Y)OFG{no%Y@c~7<>eeEAN;XwnuqO z<{1?=FGMu|E8W)_$sY{_4-mcoPoai%F{GXE&aG2P*R2bDum{5o&QnQyJZAP0qHiFn zL%q18SA{-U=zTCLZX3buNwTzK_~M37X40#Bfr@eecpOsD1YnOpE>q-4UJF^~9pDY} zer$DhZV>0-kEy{r5y!?_3vjPS1PE{_!7xk^GLn{Daw(V%MU#AqFayM5YoB5UR(|YI1=N`C6xmLooK3PN73m$0Em!w?IjvYJiXsd@TOX<4C~wp?q41lMZ-)`JM2WJTe*XNLW;tNZ}WS9pN|h*`FZN0EC0blmZIyH}B^Ym=Go}KO63- z9q=>dVOn2x;}n|mO1?cIRhqk(WKvu@SYWn1nO9Ao zWl*58{mYYnq;9jG8|8_68UtU`+-UHh`Dvj{GLAv@jV8bZaRhqO0K#hhsb}#37e5 z@Qb8PC+bH0X*TWx@`>WU_;Hc07=T`*il2=)Kb0ss%%co??4KYR*j9}8J)_`JEwusq z$|YB}Q&W{nC5u1FqUerko$Nw4vA+RjGk)oi0^RQS-_NyU5XdN^3Y z>?>DmP)hZ{{n`mNXEfn{#dL8^oo4SJ8^e{;Xo&n#5UIP>*#zzPWWj0-F`G1D)sY8w z5MfXtr98dPQBACKl8G_GDKnUR2+A+cR%+^8(OmlGKi&pQgp%1ExQ` zFI6=m`$Y^Qb)DjyPx4!cPvuyV^F=NQ+E_{YITGUmi_l)_<#jkC<5JIN+Voq((qNj@ z>Z9ZVUo=Dq12w1R>#;IC`^h_QF(nN9TqrXq^OqBdCaMtY6i?1>2`8T6O6a#phO((vZzkC%Du}JsbZ|67syGVI z?;D1*TuYzXD@?GeC>lczEr<48NvqOYX@=g@&RqxN3hSag|5E6@H=bFV-TWr;a5F;? zee<(yfKyG51fw^q!*PD6pqgmAaUCey0EsN=g)_>_C_d>~p$;gH%1_T61@0fp31Gq> z8Ns4V^34^kw29Jq;c!MSlbXm+L|YQvGZORZeEhW*(a>8>=ssCO0$nbYz7L=}51%Wx zM%gLKN)qf!>Pb6vsZ&!x^U?SzfkCrS7dn=pbyjQ16hLsN{l+sKWNLZJ@QVNt)EI8z zvghX$KBgGw(soP-OfjR{icG-#a~wI+u!F%*I8mt`d`RG)$!`E8_bKkpq^SC+0kARm zs8X(G4FVVbLn$}n(m?nPIbYLVB3g53cS+_4n5Jn3r+vM5AWeIHJ#WE(p^s;DUkpOE zrU5G15H8qcn>xhvd3JaQLr#Q^Jto73rFdkdI4mwbR+vZP5a&txXAHmcXVI-j4z(j9 zM;_hw%UZ|AP(M zv5Z-jIX@8QrWD(KUdx%SLT@n%$j;CMc`Ou~ibiySanK-SOh|}{3ju|qaT`&I3LZ8I z7Xz8h#sPxmpg>_0!S41Asu>;BH;oQPmeHznq4T9jD5>R2_-sinMefqOur;Y-l*?|a zF6gP=i&LtUB7Jh@RLfqpUXN0I)8=-icdGlGF|z5$A~}=EWimJba!$-(~)-! zBsj#6eWH6ypL|NT_DK#`?OEiL87=2#jLNOHjU^CxA8SHQPxVxcW8_3pg2f<%NI#yz zq_-Fm4(os$4N9ozHX0EcnsMshW1UcIPC2wkOf4Ureyc~J zkTK3FdaP(IjKnjK7{vlO9pq7{a@bFy9UkjvgJ%adZEjaKsWq)C4WX zZ$tE@D3926bK*TSdMeDuM!R}~=5$qg8Q&k@Y{gL@{68|BV1k2*8KFp1xG7?N=s9ku zGYA#Y7qVfRV`WP$h6?13;12W#ow^C2FQ)56!0xt>wmp5+NU0Hk`yGXA11^kd0%^rw z)~3exiJ&=OLYqPTX>?~?tPs(1RtSqJvt@=vyamAUy(G`GaD*Tq%{Th)QATh7V#7_E zvU&YiZ^q>)&TD|URKPJnUbP?w$R)l9V(mmL8!*RJHb5tIl^noJNhi6N=GcG;k#}Dn z0^dWc1w>_(?K0Ki;Vl{fCr^_Wvg;LJbhvccE3U2Aouxc_lSK1kW%47Ic4C4?bSvXzx7ar1DG` z=^-uk3tCwZQEC6_q&+03cB6&&kduly4abL|RMZoDJ_Mar>+tA9oi-<$LVXBRQSOJa z4>@|^Npc^8(gI(m{1BAlIU)8#Iz3^~CiI7tloN+)zz!`-9e3D}%*-`(3g^Hwr;$&Q z0=bch9sVJx$P?lq{^2A0e`EfKEYoANx_OGtfDnHT2oQmf2LgnFIxrwig*d8=ck#FZ z;_n1B*z|lrWWgf^=LtjRSETxaC|1+pN$VbfnT{TSq=YGgVrW!x5Qa=iK&kQrFw*P; zu(XEejX%`sal#O-9)K8@Enx`swM>yUy_F0wYV`(8VA=stlY^gErhFPVGlnWMwfj6y z>I@p-^e1{t2GRwpkV=V)P17_^+AK^G6Z?Oo^t&JF^q&!)oH^|OR~2FeG}!?(*HBH#v(#B0Ln`J*er~IFMi+m6{bxCAnxE*sjU3^`Q z-l1Lr3mRSC(f{tC|DvW@bCYkg(+w`!xg6{5;@iGtF6Xe`a+=HfH!s=z-p82lbAJA8 z-hzmG{=FPEy*+o8X*pV(zA)D}s%my4ebtJH)hch!i8j~&RHl?E@fqJeM}Ec;@uhTi zV-hmyspoCcxj|)^P#HDWx~;_2qh@QE5aQQIjA&&`sQIZe&eGRgH5;_=c(;D>zkV{m z9JQ>jVi=nJW@@Achs{a(2l`*dVWC1sE;O{v|9nmj!=Cv(lPw2hE& z^JY$YVo9lCo`?8RO6=wlbN_Z}S!2!^ExW}jsrG397UQI>l&imVv-b?KD{{l8_2OrI z?9B$3kqx%i?#4^?F-oM**UEaIzshtebnSRmnxx29`jt>K-FDAYEvaPMTEyLGrtT>Q;ZC+rc@#?uRGPH_<9X5S;FsS zsmXl9V`lB2wpy>LcdcsV>lUx&Y@OdOb&IjXOl=!LcleP1PYPp-eX4JIBdX;Id)kT;@C`=H=1WIJG zIz*I}BTz&p5)u<8$zYI(36p5ZU;uekC<`Qw3JeHKKwtg2;wcVRC_h#6ddvuz*CxG0+xk;sFDzGn)9j z`L?CfHC2cVYhq=uZrRpa2))uK#EV1=y+}lF`v{Lg_r>dsXznftx8)BT8T8X_n!&Xy_7@s3OlP!6# zq^{E?HOCBNYGsNbk;emBG+66^QGwB595N!3)Uar@$Ru(BKctGAuplz|&>|lghlyiC z;+T*;SaU=mNb-OnR?(O!a7^%mLkcn=28no(NG>pv~L)u^tf*$TT97NkeJ0 z2nsGPB8@QxCp0c1kq#2ZE-)q%3EI$jfB*##NHC}fheVh{z=LHpAP)r%NMyn;SO$kk z!~<##$YX)z0z;_?YOsoj0>z==BGQO3i9i&5289NS2nywb!ZDGN!5~TogM^qU4#lBB zaVQSOp*UQD;!qrlgO~Syj~@G;)VtNy-OOx#R{d4OYWPnTIXPK5c{y)1cQKiVFEcY! zFHhoLo?p&o9zUMPNU6V+uPgQYfROXdV0y4F8=y@>nGK;`LnwC`7VdeweG?uRnJ*XRmH`BS(}~tAoeZi{Vk(G zePw-hUT)k*+{fi5{cahR?-*6YUwrqR_xxc#!)f>+?^*smyStfvdO4Sqmwgh~8(Wt& z*vsj24l6reYhNEVx98lN>vQaEjy@kg$K*br*K_)*M^jUA0~ZsF$cKi)Ad`i{03uw# zz`#LcfrjR9en~{q0ZA5uU^E(%hh$x#jKwxU5C;W8N>K|!OwfQJwc&#VjR>0tAN5?~ zF^{N5ug}w5dw;+4_A)Q23vV93MU{Ek_vQ6ZU)Rj2A9X=JAAHq?s(@9vlH(e`d!{JUmxEcb9Zb`7jyc*f9?-1&F}Z>=b#@SkK}))N}IGu`yBiY-TRoxsB(oxp#MxmtT(e zaQjy8)^Bk~T|AS|lF8?qna`O!K9`vb^PSh_Z2F}ib{{Y1%xcSNkZ&e@c1#&#&M(n0 zqpv~LWXyJV?3KMTjW(1h&H6ORGud*cOmoH$x8Wg^XN<|khN^H?TkZS(Go?q(w7VY7 zdLrXmd|X6!g~^nwSK7_sx_|s#nYWqHHAkF}jpZ`FT4E?smuW9# zTB6p*(I7LNQd8z%@}*R5(A=pqW0X0x4699a#7tvmjUzJO);!C#&(gb5Yt-8`Vv8lD z)-%^@OHo1zQTt1ivZ-s`+EUH&u{7pfq1i;n%Xm=*NQA0DmU#^nX4xqP{-IB%#;niMR6z&#i1yU5DtgK5yBC|u`G&XQ5=in2;p#0 zC?PH=6o*5}h)gmVLoSDbV|wZgRt7b-)GiZSJ_|C1e=GDxX9Oy>x-8=Ve)9On1>4{VG;}`5en?0!iS040EtY9L~`5U4lqE7=3i7+ zB@Of41Y`m-0hxeIOh6`3eZjs*z0W%bDl;~tZ!`WHM&v_Yc0@Luk9e00HzwyVdZ^6H zFYe2`S>**6R)Y=&eE@7dF7}p7ip5tA?vBb!>K?8Hj8pHLmqW9S?eE4{b4wym*aiwk zgF)SWR9+0~Y3q{br{0LF_#IY_O*70NX@nM%IKfS(;qJRh!Gzw@eUDb2KsfgSbuq-23KuHHmZKatjwaf;`GRaG<_`mnhF=x#6{KBAS+1DRM zimNIVJbhWD2`-=i!L9*B>bZjuM%HPD!AHXJgTzN(q$cxL7MAX2F-N(Fo?AiT2IpS>B!Z zfNJxGtMEZvBwF$vYt_H)FSsbqNV>fm%&lSl5!d#}8qOTsbe8!yu{TY`=0kFbg6?G) zPqbjm!}$(&1D0kr4A`>)#mPMh*nTNy96$%V0ZW5aS6`Z>IW%YEfMS$$V9dh7EpUz} zVm3CSF6bw^)kkP_UTV&N~@v+|2Z>)S?E>=lXiSSP zs<6g!qMvb_&9)o5D{gW=A6v|Zq3~j$23V`d(yWm1%$Vuk+IELhno4BcHSs%=7s)1? zSSx4{=As`T=VcVdA}al6XS~v2EqEm^KU>q8t#a86?moyYInf1L0F+dJMmMBwJw!b; z>iv_dg{{Rd#;wYnTiMC>`5u1kVpa%A>FFP08ewBdoG;s<@XO??z!La$8R?gEjGK{8 zs20UgMm5~=F_ie4SPME@P4`@^v^^Pt(SkXy(7rI5T7D)K8(?U46l_CE?3SEN#*b^a z1T$nZKQ@v^J9S*{J!A-kJA zfL4GAWb#S68Fr%#!XNLVxT_rPN^v*|%0ER%U3F@&BPP$p-$$Z*9!n9{?KFH5PgVh7 z5XF|;!g*~Zhu{X%a=G+;?efNR3rp53`xXtxdSS+=+>Dr$o05{7V|@^EFDm3_YW8t6 zz2k=5VRigOC}T3f`5;&$EA(QE@mc`KX3D&C5K&;m(GUHh!{jt7@sTf`AW7zuGYUrL z+gu8ele}pjMy36(p`XB6vemh{Q9Lke2u92@TfvNH?%GMqUTm8~bJtTDUB(F1(3KxnhDcLrx}l2$$P9^)UVq++&EvtkVfk+nchT zCx0TAr1R?ppqb&#S=PTnA#lG+A`cjrasp7!V{@Sb!-#XB&%+J7MY-vM>W-fUq!A8^ zjh+v+t333`U0R*>47qK+xfQ4Coarfi_*$7xRzI81>&%&6%|z40|J0XpxQGpYnRlJz zC}%l{8m~zksqKNF1vgo;lhG=VIV{i@w9N~e@khaE(7^3-LIH;V#b{~X;*28n?6@>E zEnPxM1@Z#fLk|eh0x*pF0R@^;ML>zd|8(jxd}3STB(p*uD5RivOu2+`28w9lBNs!s zKQf(lysV9@Z)fR+oPR((8O=5G;U()wGOyakI%+C)die2z3MXP4wJej|Mz&U%^7y8g zGvY8gae^3Of@l6Am^hS~XDSHdbXW*30kG_0qYUCmi5Lm-g)Vlroo7~uN#oAazZ%Hs zAYqbwN4@hhQQwpDe4%boa!$&eLy5{^(vA|=vXhMawXE;+%^YG-F;__?a1j@#QCpv_{QzZnX^;;ED z36AqT7a1vmiY;?05fTm~?Nk}5WPhn7AidKQCs7SYpgxeabA_bR{jHXO^oJ1l3>0J> z+M#5gR!T;Kzg3e)dI%dX9CUQtXeMjsVA9SNlS=oedIF_*tn}PZV2MSyij!MNBAME2 z$HP*~)Wn8R<@G18eLW4P%TyU$CHEHG?*V#l@GriqneY_(v@KRHsBk-yEv4=|LM^Ie z?_yvYW-u<%hnJB*Ku@qh@^Tebn4#gKa%*gOx+gAs)rQ^+@3L5}G_(uUsN(Oh%(|2J=Ko@lK}?C}jHul3Ex&Ob#K7jr71v zm|OGfAacQT7oHJgyi)XebFA^3%;ArkrAE+syE+FBIUleg#AGf?>|2o3Bx&Xoa z0I?|(6Ag)q#!2MtO`-olY=mcs=Ru=U8m&MlK`I>W(V>P6r${r7Nbd8n-m#I&{Cwg-Kiu(sfG_DYz#WnELWhyN?o$p z$YdXJvHfOwnB*snOFP+z3JnkwqJwh&A|><1pzUUNLzgMTolDaZ;>0c z1Ac@=FvDd*W)~QwKQARi(x9SyjZCCx171TL@S2kg5u#u@eLH3N+&<$G+_LmgL8AJ* zcN5MnrptUuU^4!uc(2*MMtT}1Ay_XAaO8?%y?-1t)N*dxDp9Bs{A9tbu>juwl`<^& z0WjZx+Cr!RE(5ra$WxX=7w(dH!d#o;!3}4WPkw#@uhT*7yUeI=_5SytuOW!9aW&;z zu3CZQB!fytyj&cXq2tSenlq2y+oJ(!ezIVF2GAA_)AKUypHdu%zTxu&me*LuMNwR^ z0SkF=c(?SuZ+Cj*0Yf4Wz%uTsaS?uWES$S<3Jgb{vliN;rzoe5gNZgO^qXb2exWj? zX)X;MZ!#R42JnsrM-YHEp`?FQfrt<;@>KpI*TEX*=9nvyK)l8x`HTeG6Wq%5HU8E20P%*-IX^t**llHv*3d(IHz3%EOJ0Hx zk#T(LYAD!ps09M^f{lI?gf6dcKY)32h!Al&Lt5+E3xC>?JZ* z2+WbVDkFE01e{vWUW&Cgd{hG58|`c~sfHAD0Y5We1)Z4Vl`c(C!x;{`$&4X{Zg~A} z;zE8dgTXJDs^gC~G!ED_VT7)V%7c!a4gWLQ;-JlOdu-Mf>zgWZt9<=HFMHXB@c=#THXt01j-5F!-9 zf6dnF3vR)e!Us_f?pmo^8>v7hIWmPN@?Eiv&9+&bxxLskzxI?BDHsV` z6J>|oZ!@l-Zjw>DfYJ?#@mjI38%ssi?K?=IZHx50wvV%+PA?haEWb5XV$2a;0niI! zK92V1o$PlD13JIw$s zG>;Ygh@F9N=VSKk2B|w)z1^|oHmdaT$by}zxoAUBS++>Zb()Mi))ELq10;d@+i-iOm%5l_=^-vf` z+$GwSkOcm{r9vtraJ@~@6?73FKU03qc$L#P+Y?iU7;*GcOuWde%>^+DV`Fy$yq-%6 zJGeG81bNXN_#Z(j7mc)R_y=iVCE*RX%Cb=EA&fgJE|v!EqbwP0lX*6}v+J5Grcf%^`u#Z4pv0tk!E_&|r&dzcrxDNme^A zxlgysZuC0IDc+3y6W(u&Cm5kZk43I&oq!Z@?tdI~`DjN^sJ#<~x{<&G>7 zKM6o`jLRKP$c5AGdkfCT+pWX;iDBA>J8t=;4e`KU;BK7od=f?r_pr5DwBSx3o z^b$GG_K{UjO^HMIg*ekJ?NdS*CQj^L*4Gek$4+(^iguoU9i+t~+_s(8;u+N#?zFKz zY#J)swTdv-OP5sMta@62enIbuc-U-(W5yYFc*I_J-W>;*u&4WHou@srZCAB(tPH8m z{Oj><0uf~cf;pKi+2142T#D~9Cox7_`(^DejtsI(@W-%GIo)k71tNMLq!DEF{YX8! zH8%Fi@~G7OBgze3PS)Kk#Lp!jy@N!4TPe2ttL7$Dc_H{P? z{j2OSNmC_E^iaZgCqO=HuRQ1-Tr@x@<}_z$H{)y>jj!#?S5DoY7r#Z<-vXMRD|?+_ zq+q)v?)sPbvy8KuU{k)HiM_Kp*Drs(%d^@tq)L&xq6+`jW#L)$0HK#bhS%CkfSAj@ zn?j#!_kw%@6NbV(BCvlTH&4r$r(^~)j*oY?REHm>w5v|rL7{K-4rEymsqbpS8#S5; z08R*Ow0WN`%F13Rj~Jn_sUpgtprOq0fYn>s`1{UY*5u8 zcrB!xkAVYgR8s4F_mjB=4bYZDP5{LdamHXWvJ=q8{?A!e$SKUN4a~bXATBE8>c~x; zyYUN*rsxMFO_0zP$9;!t7+AoxTJ$gsqvFh?K5Jfcu8Qr!rZT*-L=1~SLrDhl24mq+ z;ZEl>YrU7{qsAp(4(*y_?lJ(H)A1|RhMY9fnRA*;9O;Mg zL+t@h2qsk7qyyyLOD!BHbfQfcxfg~2DBj>}J}`*bORm_&$kWA`l+#|I1s{s*b9;OU zI<^g)y%k>J-YOIFiT*(fJ8@zMjvO+rA!WXDs6S_TW9ep#i269EE4B#}^=oq@$@}PV zWS3>liaF?Gq1K2M$;3tS*t_AQU>vGv_yrtgog))9comz_G5u=qKaf zpoW=|TIO|r;4D)^OD{Y6cuy6&aZ{LN?09`sN9OC0nN7OKjO9$j4<4#y36CqIDlHW> zHz8e)R1SZ>Bo~!OVA@i{X!RroCsFM80v%SRWLoL26pL{2H993yIb0f~T9hJjXoRvW z(unQz@gj+EVa>wFNK($Ns7A^$4lCS9=>!htY;T7h2|&9tbt$cG$_Qcj8;Qk`W;6ms z5YC>sKEm!Cyb<>v3UOyT?qM(An|w^%JROtq$dd89GRN*Hkctn8klO4_1I)6ZT0vuf zMRW9svcNVTrnv;;J-%Kw_(_dVlHpYp> zzD;r|m0)C53T&6Jm!ovKVtC2#e{y|ploN@4n&wjC#9nOXYfGdrtP^^YVk# zwM2a*MU-^JrJu`>)j-L`?sKny3wfdH9?ay7oG&Z+!B#VE6vTNgVRzJ(nvBTMa<+-b zmPM%4uDb|p2M-u@cbk>(gN33k<`HBYnm`TAf>zdpmQO*upeTF8iuINtCzuy)q}F(v zv}KXi5OR9UI|7Zq-+ra6jp?|=dD0}#1KuOpp~ypv*>lYAS?tmVI_zqcMGcE?gMX!epTaQ~(O(4EvPD z_VuGtqM8bN7Bo|XPJ%^6VkroDU50&oq@Mq6LB_ePngYl^CClSunmf1 zOOFQz(9w?sIpbxY{}LTiKrjWRc~E`o_d=hC2%T%+`@YVZtoiFeqzkj6_NE`|Q| z^M*c+Y>WG->solZ!eabX$pJ=u(i1#&!d@Y{Qcyk)gOM|=A-O7E*yUj>TDJ7SX6~b7 z`_-3hVZ0E{o7Z0WcR)&VESV-QKxCG8K!Kc$XvkqlrodFjevC5_Jo?I(#hFhL77HF|cIB4khhD&HO*dgDcw# zDq(~;f{VKmP$wiWA`&fURn(X)tUigei!8Au8{ZaB;h%&Fs-AOSOtm(=?CTuF~|@mL|; z;_Je%ejG}k&56HgC-a{J@+xdNbU@w_TP;gNGzBA9#3@=dIK*VWNcMjrI14^Mv)z?$oR0*H(qW{wjZlWRFIH#S{B{5(+&9mX0V=*(i!ZtFu*dAg( zyKfsg+ft9xumMfSDYfKF)~f{K!uUB75YkyO!~So?&}%IjefU#(*5eht(AL z5cwqu(UB8Zu|IXgTS^fmx6HZ>0mvXBk1x-KeBp4GDs#+DvQx6OB(78IE#e1a9g2$U z$0KET5fR!k*%djx#mM-rnCwiC)bVKt!gdZ4t&vo~g^?uKY#cKMz$UHNbzZw&VY49{ z_AOQSJsSbrEgB4)obbY_yOPjg4o)u%psG6O4R|!F{2*9CnGKNW2AK2odzfFH4ny<86Eo zgy>&MVtU(zxq7f5tSUno`p6qNj4RpF?c zr@5{ta$FFZooA@Pv0^=fg{Hor5{hjeM=~MDuK`L8>qGjWZK*n3N7pTVCFuFf~IBBJF{F(YIswbUv91S5nBUkH4KHBd_}wCsV6V;PkXMJ5G)ZT4dBmKG@5tb-9U?vadpY?6wg9t<}c2|M$U?dTVGO<=_Pnh$ixZ_AP- z>WhqHyl@Z@CuY=2{Uixj+ZcOQX(Y=#+@;yJywU+*DZ4X@Y4EeIe=Z3(3WR|C`_4#B zhB}o%6tq(`-IPFW-AQ@Z5>AafzlSGuaw)h9N~Fq{FGQxLXnf0L*@Q9%(S0lPg0-r60@dHMl13%=r-a@xy1J}rU)5$ghj+?NFOX# z5^=w^y9JsF`(S5NNEeGRxvTJov}}is=tE-zhhW*r0e1<+$gntsb*@jmUyU2I`YTHIBj|&S z;{YuxZ+~v-Rowzm#T^80bPV_9kXS>?0#ujeeTd%4kOwE~DaXsVNH^Hvs zdC>UQzoxW;iTp{DWk3#_=Mo$BB|3wwDIkIvhC^^NuN zw`<79(TWAWU~sJ+ixiV*OpGl@&ifKuInvG{GerPggEAktIXpgrH=*Nf)Oc27|47_- zo4o`f@+AdI!xb^@US2*H$yZ($&${83th~J;-|e3WqFv^Ub)ofXaU+i+Fb543xu`S2 zAh)+K?6U}iLEF77QJeTH-GhmU6e5Ej9^#e4fMwiZe83!ZyGV}X|CJ=j{Ya=Wj{ zy68}LY{^3XVG`2?-`RDyAhTvI+9v%|SkXwU<1UP5RV}$wsushsZTrdP;JrG-AO6Q4 zZVLm59v!f*^$x(R4y@ay&L>lASaw&sQmlPM{Bbj$jMDCYFyF~-yo_yJg&qQIp>6uM z2mQBZ!mSmph0`M24a=k<=F)5n4XRy0iCg~lBa3{;pTKk!bE7*hX|Vp9SG7W;%O?ec zEG!r#51z5oXeM`a%f^7N7uB8%$M?#pJMVRsp#$ps615E0ePJ~&adyYQI?CC({6=Ih zXphwEyS+r3%a2@Z%!%Ur07>lAo^U2IRmv=wwA!vCwpDvU{erqNO4G7#Y<+brB5E)c z*i9yTmgcRY=xH; z^Mt+Ziltn#QI*xsT)b$D^evXB7IQf!<5gEUtS(*|lr=UGZeuou%lJY|t+p^|`#6JP zCO=-rIQ(cgKlVs@-u0$iLs5`P3@_p}${MKUkrX4mUN28js|z%V3XLEBeb$=Pj*HkD zi8I1}n*f70?o?UTwY~LyGjLjR{Y2a1s!OU}Ta$w=X_;CP|M0K;T< zf#xFA(cpNo8YIQGGPK`Yf4;bX)f9DXAJSNc;G$i$NX7u-A6WG+!+Do|7eFTK!KbOZ7p+#yhV1Yt zO9^!GjIX*PEP z)5Hl=Hee!HWWKkZ^pDuVMiHjq*kf;1Q5HmBX(!rKG_eZn!6C3V)C>lP!9k-Qn?Y&4 zs1~Ski{|QpB@Y^-q0wNv^K3LY7~L)79`i82rVreac|K(^I4hnrmRcQfO?8^Cp&*6` zvqX!RNqa#a3}sas>PD>hh{V7Mu|pJ-@5(^V@9xu$2Vi6bf6>Ev)E?JfmEkY0t#V%B zIph!P5|E3J%zMi=BK5SRrCn7-Hf`@f5KC*LO$U1?SS0u>1I5YrUJr50wr%5h8Tp$e zvtIQoDc-=*j!+whnvm#`?Xu8)p4-}Oc@l%ej*20iv&)j0e2|ex%5nIZm*;5VBig4(CMy0Use?$0&Kx?64mggSgK; z8V5BIIJdUjaG8SSosb!$uZC#VRBI40SaS*!mv%~t(dgS<$dpW59#dJWbu2wwM0R_1 zxude`Hses?-J|H0fgagvWoK)9tE25UJwgI(@QvwDr~9I7t6kGOZ=GUMf&*5#pn+?l z*8fu>(Ur!TxNCzN{n}$UaBb4}U4a(y z&S^Y}J@+)4OI+`_y6yZY$HM>uCZhmyhPhZ=`Mzg!9<4i0YvS;066l+oRI8maMDikq zCSIGvWb4|j?zqefJ+2@?a4pWs6rb1t?$&30wla=MxmYo0L$UxOmuK9zm*pF_hFrjP zyFSp3FblSN#nsl28Zii?lStHHyu#M4o%KesLx@IP2!4>p7qFEEWw(ToOsjAv@Ncob zDI|eQv{ks1@1+It1zG=I;2XSw-N)o!1EHY+8lQuW{BCw(Lz1_O(%$5Z6GJZ1-s@h3 zegdg7loa6ziwl}~3cNf3yxCvK^s5^hVteI~Db8wlNm~n=Jk|TON zh&5G$A^rrKlvn0%%XW5c7P$IK%ivjmkEynW(Li>n-=!k(V=m^+FLi2j|IXHnH9P-WL$Gg(438gFc!qT8V+F>+bdgKWXX_S_7R$e#HXMp5?ydftgQjNj z^0t^s7`B!1|- zgGiXW#f26-XW53eV%Wwew29=a+JISBUijmQ^Z;X4m|QFUJW(<-e_9ar2B9d>%1GqG z$q^uALxT`&#wFGT5~i+&*9$jyaF;GFiqUYu%|nm|8M`7nnp|*>A~^s@K)AnmKqwf@ zKt!iJ52cD+8pu|94{!U^W?ehi`uYHP%)=Ge$+%JD;EJI3*X@VL_llaO9ii5|bzQea z{#>;Ktr25#DM3D5WC~l(U~u3ObukO^>VWff+zN>}5=>W%+(qWP7GSwRM&X7|#X9ro zaxJ$cI&52%vc^|zdIyo-VkP^Q?Ka_ToX!NFY++qWTz`2hy)FS4L+*#vA`@qPrHYnG z{BMoyO(lfHmovUv7hjHviwBE6-ooz1&}2>?wE>h#hB4}b7EQBT=8?B@ujczyqslKG zk+5quCX8fV-oQU9p+z_RNjk`0i#l3yW*b}^Tk;Y==-PUVVHF_IVr(qLtt`E-;xGcS z*3LZ+%3r)Bli+NY%AM3I%u^8Cb95j`2_@cdD&V@c(5=<|ZAH;obkGNH=Z_t%p*|)2(!EfH{##L&Il=kGW`FeUjG~f6x(`p~Ffx{A=*tz>x+~{9(6Bj*L^SPHu;D@Zc z)5?+fR9p&a$5=4KK@yS=%Lr1xYUzVhhhuibvlk0>c2UI7C%Z-MY#lg(K?rA~&5hCq z$>O)K|ktvYNKpyt(JfJ8g`GMITbX=l+Y5I=}Kr+%D2xlZcIJLdyM3P2&-0W5W(t zg9T)07&(G*&@eQH0Zl2mGxxx@t@J$2=%D?4Ei{lh)FCHA%zhT8j#GgKy$wXH64&_X?|6OI0F!AAi@wRR3k9Sov0*d98hW# zzS|{X>8W8Rdn&_M3f7dRxdUPpXWyYP3eu2_K0bcpsA#l-lpP6B9BL8)c}f99!2dVY z_jY(YJ_1JpKzJauQZ+$T zxTp1z&QEHa(*&a~+9Q{sjv{mQ$f+QX>L64EMD;u!@$fjQ)YuzPA)o~zYV*^;pjKq6 z{4AYx0FL_XnGvfTl8n5J@ar0YqyoZCar#6DpsNNOAe#DcKpj{u=Kn-L087+McpsvE zRTDKCKeY3uL+q6*+ zfmG%d=2T5XUWqg3OPvm{K7E+mb>JRbKpE5FK$`dyV*ngjShK6%4lmXqRG0QXP*O-23Y4#28>IN;ebY~M~!K29$aR32~#{diqheU-3!-53~2nI(3g9I2fpon-d79A)M zTa0FNImLFm8>B0p4JKKKB))#ka?Enf5~QL2U<|^CLI{t7(Q6g-VCTid!^0QYXyDlp z3(AGliw6kCnW1PJq8=KrMN+8;sYceK6f;O))JxGa6$%JyoFW?!52lzmaIsiWF$$qr zG(M(aG#(faSBnM>1VLOCrP?(XN4ge5Q7(1XCm{}5x@E|!X89W{c*0xpN0kT?VW}h+ zYxeKkmZNf{morIlFeudP=TduVLIL5But%`v6iM>zznq=cre@)nvi zn;=)eQTQAUMWbJ}t{@Ugup04^wdB~4D{;UK z3X;^sVFV=g(wpDP=)dZ3TkFMDmNsP#?X#aXvuy>jW;_3y$A?tDlG!S&9g^GtqqT;Z z<4i+O4-cpP#xE;=GArNdJ%kg#BEy&-3oU&^6kTaa+zsLGL50R<#hkdUB&6j|1v z?+c$T6`xWhY~LZYHmKmqLG%Z{INNCq6}U41J<6M#yG3az&0 z)LB}O3~^{)9wez@4IQjgFLWW+R8py6Wyg*`xrmC$W~yBOlZsmZl$Dub0}foUv#XW4 z&(Q9YIeSGkG$eVMw$017SOk!S#**-1LS?V;vZXm?wM@O8Q%U;t?X)b6(T+R)h=yP_6XgkHjiRtWmcBi?A z1{ypf9vVC>C??44y;q{U6Wo6?|D?$^=)b$8+ufmq;hpJb3W|rlZZ6@tqn-+tT|apr zm7F|b5)!^e*iuuy5+o)hARrVB3P;0(0)_(;i3vx9qk*wV9t{Zw2LptN2ku(N*Ami| zLb{nJp)-I9FJrn(UT&JEq(c&8x(X6qWtnPLT}Me*I&|h@e17QiXvh1EhYl__HpZ*h z=E7KM=k93kq1Yh*Jm!b;WhSfK$FilFs?^z9Ek_rh?MlmtW1A+?* z4~hViR3C!E!UUY@OqHbGYHDcZZhkdIJ7(H`cbrL@%)$Jx&>WqXhgZr*B)P0+*;+}F zadP58o;2mt(eUtqA$d?xe6WZBA!6Z(V0b7P9t?>PS}iui{3Y7Yj-MU-o`vW?>&N-g zl;}Q<=yclC>U?MUX575YpI>My+C|TxjnFn|=ec~&JJ%DPnTO^~=$yZ_M3Oq$$Z2Pi z*^8Yy?Q+`7*?Nr7RfpxwY{}}R%2}DjawWy-JC1Wq-NPPZ^ou{$4GL@7h|@0mwA+FWSgvtt>|Oq z2mk;K5)BFj5>-!>(9HQ0K*7UdU|2932tr{%AQT7#0%1TH5C((+K|ml73I?M{4hV4? zuLl6y6(j@$WjLpPaUe48#GBR#v%H^pnH*(8Pi*souq(XmrwL!}a><~*1cacn6b2Ji zki;Hg{H1;h)ydaKlzv{D7)6bpjCpB=kVe|uc=BWf4mj_abE|xpPfHsl7(|L5)ai;r zbKTOPc?fl%8SlE#%(y5PC}pPnSZi{j$XgfB?8-_F6m!%($vZwUh`Bb^1xoi_$T$s< zlLtcD0)Zf^4@k71t9DP^Fc#$GA_tUJ%wdh{2=f~RF|E9HaGI~<6 zp-78avK#DkNyE{8JpO{PRASm}o_ggVLy29~b&|G0nTVNLR7E9~f4o*~vv!fO((xhb zhCqcN>7EDjS0vKT8(@ zv-S+-c-71`_xd1U75OtT%?7GKz^x{gyOtiE< zQGr2Hksq8iY;0?aZF-6LGP_kyBnQCiFep+m zu3Gfh+O%cf>X@1H zn_jmP%@*(lSa=};#!ZUnnvb$C597rFU0|p(S;QpClW8v5W?_462%OUia;~8X9w1w* zC~UwG_swo0O6BopT?|71z>}a>SYKdo-odt;37{Uu%=?Y{Evtg0$T0i^p-4f*C7`Eb zS5ZCE*#(k_3XQupSS9rH5Zxv>RsHi?7MQYGG7jiah;Iotz;@{_e@ zD0FNlK_H5Q+CxfzEJB~(px`MYrSBl2;~T<4c~n+84_OGQGgR&qB9LPolbYt+l)FfK z3;4z2SNFGXOX}EoYNkZ_!_Vj?R-z|38?4nkRdJPi{Ql+6op%ss&@@>T$$@(wOVSPG(7c zAJD<&>#3LkAVE@ zOLM7YvS}Kc!X(F_IMRkOq$0O9)7QzH(3Rm5UVvB>3{DJt1)wXRAfhM8!bIdCLk0k~ zcH8}lna@9Nj_jor${mXwuJ{((R7%}!3OIxXUoF)zKM4H8)}8Qyg$(%sR~*N zP%rOBjfh5fO^rT5q;DW!Ch1T}!}08Oq&qO=r4!JhiVi5QQ#A#3tLFzQe`B*BF zeEIk$J%<*2!yy&SM33dqr*o)a-aGNQs2$>S!=}5Bc5tbcov8rMx`yE=bUknf2Qufu zb`I_B%V8Q|9Z>(TV#G`2S)PKJm!~E7Z4Q|=4B9l*mxpBYXAUdzzL8DK@#^`z#=4Gu zaM)P`8#WaNm$H{g%P1kijWEo*2JWHbNe+b{8mfQ_v_$IcDOV~CL`SL9aE&Su$7L*N zZ*>(ktYVEJ{)4AKT|7sS5y1=DAZWUzLI4z^9Lvp2#H12^fJJh3^lXAaAVv`t`f%tR zeadCY#XKHxxUI>edvB#NN z+*Z{ni;Xe$*J5y6o9lBO(v&?Ph{0M8z`uWV3PX_Ic=S!C@gXDCa1vuX(I-v z>W+oPx{$|kkvxn;AyVTKyMvB4m-^l~6+aq{Q9kv2s{7w`aoq3REgE%%)ilS~Ip2bf z#B-}*#fwP}sN?j1kqOmVAFoq4ie`kPZ0Q9ham`olNb%8mmF+VXgaa2V>F*`b){IV)FE3E019NjfEN! zTXOa)GuGNGp*RrOR-0=} z2U(SQhy5oxiW>BI>BvVjmt1dz&;X54i&91Rv;$a$?tLQ^gtZI{6rrwb6=|{@F_qGs*j6Idjd#!(T-1L8YSQpptl zquVK)d(%w5A8f^t|8$;Dg2r6;!E*~rW4`fLfnO%!NTz`$Oz?$s1dd3E&$W=WBDTdr zU)%ubaAx9gVkcZm6+~y!)|^NN{Uz})`uK3#zGu5IeNyKU7c=%bILl<@z93Asgmbrc zA_|c8Y+m=D0jZAS!5$Ns3gut`ZF0CXj7BIT7Es=uq|s8rL}$N^c2T20IH<=Y!v5H= z;tJeQ#<-AY!j5fNf5L4xxPzn9!hA7=7!pbQK5^1^HTUr*(wo9k-fi`dDEQok^JVJ$ zWOD`M&U}}eqGZ6V{|Q|y6QWZFtA>8yz8I%jEjqw0WwSC5=SnNUBK3J}h08t13rK-Y z7;x7L*GQ%IDg&^Nb}ln=KzUS%8Y)V!tK^6`jR!jHMhP6>qQ(XE6$NplEI3gJluSE8 zzqh3vyJ}egjLalNH-8uArd<*+0uS`;o_5IXO#*`Fgzf?LTa1pyitbM@(}vDOJMP=$ zppoMAGcJXV;WzT~VYJEfGBBIwV+3;0$?*Rp=DUbfXbfqGdJY?U4f?_g_Dpp-uHsEF z-oMKLykVt3{v*FC>Dx$}2?}Xfkh?a4#;VeRH2ghFJabeQfsO17)kmpvo(cYYNF`g; z6N)3yZMX(KIPEB)^3;Hg7>BU-Lg`c?`)ComP{LrV!YCFR&I+YX_XY_L%0}2yj^tJs ziVn%Rz7f3Z2PEyWu2sm55A@T0BP~`oLR;_DqcHZI8p+`huZ2Ae=^Cv@V>n~tXzkcR zCk+;P*3QL4#JBo!a*45d0fUWxCG~~R)c~K*u;FK~jRmoIN`OKXTus94NJ3J(m(g`k zV_{IVw#YVMjATGrhIE$3orrHS_VTL;^|G8M&Zy=l_!xiDMNZnnLXChJ-Eq@>DOAEi z_u>}Y6c4G@)v$Yr2UV6wO(JRiB_q#vpcfi4Y|PL7W&o#B#C2I0cuenC-~y9Be~erh zlV{m}2)%^!B87G+4+x<`0r>e<;C1a98XwH$r582Mk$$FZOaXj0{51*+_&VV(uXH6g zu%W|TB|}f+_?vS6|3*+!nr)i9l0N)v*2lmY>xndKP1IMSta|Uvx*%-Unr{C<7szxE z{nDEPPdhJeDr>V*$sKm~*YQf%oh3OdSz=TC)FzwL0o-fGmN*_>#|fjd7>JM5Z(+dk zSa>%&{4M?p$eMz>Os&0FTM2yZi+TzjSy!FxD=ngxn zb7>ECIyh|RiU{e~BJ==AaxkMi*Toyki`bobO0d-<5;7qRN-_LFUnfI;(wb`#-T`!Z z&v0B*+8RcdMwTqRcgWj$kW3YzEaT0ZNGnQXQt){I%Fj)iuxA+sOWewB`des&kTFTY zGs+`QQrsl1eS9ac~T*k0Knf)WTFi{ zxEI?t2hPN{n8OnvRZT-EpDk9-PHrhq9gcG?SFBvScnnKyjv>HEUNuz`cz!gKqu|g_ z9_5L{C8uS};*oVK9heqWU6amucf!Enav>$8Ksx^!Borh{iVzV$G!O}ORd*ZJd&EwT z3h&WIjdVvycI;GAv2E~;kT~Xz0z&p2@{tAML947tO)rEHl4HJIAb8`kB&)(5ms|i@l9_m} zBa>A|?Bx0zrT=}Hf<9$MFE%bhPe2WG&%|ZYqHtRvnZHe7a?4&mJg6BNbAcKomN8lP-GS}3VaGvoIX;6tr^BO-Su7cVv6)8s zzz^1inaetj@0fmk+Up9Gp)$JcZrt=C6ygA0G)O5=rN~O91+o#n##=&kvpGoGiY19o z&rt~4?-(8I`tfQsXVvs*I*n^H8^_I#o5~+{17n?O(ss+01O^M+TatD?uoL-k5}s{u zzcY}`+JZky3r~Qu8c+lN#o9X46yHW>P+*Gy09kj~6?JFGOn%e)i6f;7KB;0X7EHQ} zhB;Y;xjI%gKxb^d{&^7XNNm-&N}ryTsCJt0KzC|jyg?-tn#R>Kl|y+*5U1&K4`|L% zEUnV^hVP8B87LB@yw|~r0a~1G6l0pZA_=&dQWWxBhOsD1x$dd+&(7FV;;1e&H^wu? zShdYVWaQE^szjV50f{JLRW@sAsM4$m)(cD)G5TrUw3FG&zh*_l9zi1Yf zU37x>#R_0ask245@B(&J3>z=SFimx8JZ+o|++ge-1ED7ffqqYpFtY3RLLlb0b@k9U zvn|;YkHUJ}<&BH4MiK^DTPywX%u+uh2!nvM;ahRQjI+Fm?*)IPLDd3b&;v?(k&WJL z{8~Ow0fprdw_!22?~b@YNd~3DXMpug=@IVF#FN1#rN>cc2`K6Yrh<(grb*a3>$SzL zml5h`uvh$`!nw_rG#FF?KjeCN5Agw{U%irH!LRyp0VjQ9Nx?90Sq$r7QR-t3vuUmJ zd7uXp0<+)!%es@)ZvbObt2P=^0ClKP!K}85Ny8Fcokh0Bcpt)05c2zc{X$Op9*&=mVmY>PG%YDR+&!lwA%a{jNn9S+ETU(3P{f95 zgof*h@o^EmFisG&V{~|c9={1kVd{Kd^18XgcHa`(K8$m*5;cEmx>?Aqr7#8UJ z=@W;(r~JIA6)zpy1*n0NlXz#OTzd>Zd8P#LELq~4 zbtZ#=GWxoNcHL%#;Y?=2+-Zolu_q0S1FuBqZjK5iX-Edh;G2r0M~P701zYGr78CBo zUL6rZ&VYYns@1T~S*sebc-SrG2(#CQcgXX?PUkpvdcJ0$o$-oA zq#@xXFcExuM^O&OERlVgAbO?xQIa7| zj3iV5)QG~k=hFZa$_^rhMCSO&6Z!qh)WMdOz@a)B4tr z1x;Kw#}wLlB8Z}~#62e*DJx-+(tFiNe1vKVzKJ5+H)NN$3}wjOd>8bUO*ah0Lx z{HBI~baa#r;ZM1VfvOg1D(cikfZ$}R1&A4U4VL=Fet2Zm9+Q>1^t0F-wP1@teJLYM zm6cafI4NLi9@Dte8Ze$O$9}cQ=>>WZdEXI}POOJ93A>k`NQ*HE!%MMaYsgE53W*tY zGAMWj*LIN_5uDY!MV&Cnx2CCUcP=K0qh4OQkxexSQO}inpk%&7>#xKMrF^X$PV|_q zTvgJF?hK+;C5^?=tOiy2ay1$iuRmopk`N>jS{F@e{mC1(^Y+-fmD&-PQ#+^dvsLaK zGfwgK8$3dsd2j(TCzUcQB&QZCF{UwaK@`&Jy6_sI;W?vY;tu0L2lH?yS*;UN2zi0| zZd}1_G;ecoot9TqCMq5B$hz*ZdFRMIe2MCi+Y{w|E~~B~K=1Zsx>R61RR3j9RiPH$ z5oOZ)Z_!#1iqt7Dju|rt#sGC8?^~b3&oo);>R((^N750NF01PpxennhYmnN1o3N?It%Pc$Nc0E`a-X4PurW_>W|(v?2v3PDSo@d$$YtQ64E zRNScV1{YJ2+ZR;C-e@O|F=M}j7=>PQLc*=B>~DauGqw_YD?Q>nk;t}PStT}Bh>=cM z3hp*ndNIoXRyX3qzmWwxvi72tgFLKq9H!GP6-yPOF5Are&-5>l!q09z2~^&yKMDZp zS4qno*LBP=uCH>;r;$*M`cC~rFEF-Hcxpd#M_9>t*a%hu#JO0t_e6I_Xdogv0rfNc zm3w>IY6^+4Z13K3aUBshwQ695bm?TrrpRtr!BJf3aBE<2$H$+lA&0T6h7JS&1+Gn=yY+W$~^v8c7T+`@opK=F_(f` z$Za}jo=E^~*+c*P99L4i9IlYdv_U!5qf{M;6CT&0_|_W*(_++t`&vWjK|q>c35Z4VaXBF<4Ximq{<@p zLP8nfFp&#VrHD_OVI5ym&G3rFcQ>}Mju*@R#ul1lt5_Dx-}~QNEZd`#a(Vs$hZS3_ zNg3kuG81XJtKa{gLyVjuJ2NEr>kPQN*Ad-MX3bgx3AU1QX2zB@%i2FNBY$t~xa2Gu zaCz%zYsCGr_0{JxEchXWMqL%hvVu?pOD@nwbaf?q*R9tcwPdtR> z2v5wHr=)1c2RO`Uy3poxnr?-|*Yp@YCkteXiMW_pa9B)WT+q*^rLVrLjb zr>2F-M`%i}g#?-w1#Pu6P0h-6XGdvJAO}L;U>XN9gNtF|;Rc3<2YE~%3*~UygAET` zusy1V4nG^KKno6};9$SI00))mLCi)N+zNuS@O@Saq)09urVAX8BXb^gY57Y;7Y2b8 z(f7rHL*g$S2BgT(W_6A$AQhsPfK+9YxVqf{dZ(hUz##*d#1q&6DPnp(Q$)Lo+t`_# z)1iOgKYMPGQ+7`VgV4v^AcWZ@maa`9U(64&FrO)6lxF=9&5QLzr=y1|1$&4nnm!L5 zy+RL>(LxoGpcK`}%K}9~7E_|J%JrS0$t9w|qNV|4F}ZZSt;Z%HYUM9EYPaIxHIn4Ilpy)rjV=8k!J#1LPv9 zVS>)kts$ToK+`X*OnW{Zd_BK>-#U*%>3 z1^%fc7K`ktVUeO!Y_3g+qVpZrrh@xb0Q!zdU)Di*jmOO%q8I|gSRcU$Y zy+m8+PUoJeRKup+$N(q7?kVfE{{)DMcTdFxvPwBh%u-wYqGmVoFYi4yQcM~fU}Rq=0=6)xQ=!J5L4O5WfW;00HOT2uv3!g zq0z>sYs#vTVS2@4azG%ZGt#&-%%Dg?ARyJ@G;Tg8(C6qfNsb7N-=9U(%krA8!H?ox zYB1;Tsd7L~J^xMXAIiwIDNTKauZ(A!2kEE=?{m-i`0|J}PEKp5!1ZAiS|h_z!ytgO#UH7{Xg?L=U4(e4q1zUdt&>lyxSjf1S?#M zHZGwu-V~~F6u3MPfm3cCwK5!`@rt)|y5sQjz|e z0+;Sx;e=AEP4uMDXg(< zqtSO|(f0+o@}N&)ndIXfiZ8{T+5#~3ZVx6V5GQqefM%SY0>bmB3MDQ`k()rtR2rBf zr~y!DIDw)mN#vg5G|^&75l~K~QzhCj70+1OkqUH6E$1<`4mv85edO&2R09@APWVp4^juw&q)3bD)~ zG;Nah<0oh)u#s9$L0+20cNg(n9!k2op$xY0zr?5RZ>|z(5$1Wrx57~W=OZ0jsEbAw zK4EUS@RBQ4=p1gUKz%FR4i-Y3e4|DecUuWu+_oHku45(XuxUtIac&nIdH}Y0Fuh)J#&C59pKfO}xO4 z7Jf(xtho6iX&3?*QT>P%$;oJP^AR2wobWxT#cNDM`=+f395%`}Se(0l%K0g4XEPe5 z%$GJ*!wwo{61JL?`-zw;Ltyi?ad?BJ)5L-dr2;g{5LKiFlAOT-@q>!CK49FmM|<)d zR#a%M9B?Cc`#C*8wLe>1Fbiq-e5zPi{v*MI-U!dAmnewIuWW-Vi^B}wJ1MAUXp(>? zV!`R9Hr9BKr_F*N$~!C_E{xrrhHgqQ0c=e1&V0k%n+;4d=r4~oInT$rIr)heILRuw-f9D*fM;d?k1vne9+M}RHRT7R0J?uZ)Z8HX>VE<+UJyb#ilb4^TR zEY}p8LsM~Wc|eRoJO%8u+N6&V7jEBWxT%WQ|HUSlu#Txmxn?d+gJ{yTP$9+mUu$vb`BoXvq$dVS6GYG9kI+muN zlN6^E+g(BxTsswn;HEg+(;NQQqQc)9VyZBa_hGLL`;jP`t5bj5S;kVL4$#}VD-XVO zHbihV*dpub=)zwd#&h1iTTGAzH8df?xE_l~Mz+cz=cj~YE8b>Km-0<_L}}X0>8Y%k zqenv&f3h{1?@8t5$ajjq5x_W;Pzy3<0Nk8jn{sI$m{CX!R*{AR$zcF%NSd@Y1TlX# zbUP%eG;9#+nC8VIb8x%@Why7kS^$DJZyI1PfB;(tp&S$`9tI~MrBA)5loDJXKmG@U zR&1=SW>;plD%)tjNnJHIRf-W(Wz@NCbE-r{M1cVXgh5v!GS3{{N5)agoD|JK3(b69 zNIk$^{Tq*1G} zF=EdUE0qucZn1I8@s=AU&=j^SG7#zAsqF5@8Musb&nQo1s=yB1b$UaaA8(nUa_1nRtV zLZWt3tXu#KhowZRb1J7!G)Mp_060VdPyjSM9I&SI8d2x-&O7hi>gpi`7(=Q80&`B@ z34?Rsk|KftfH08bNKi3kRHz5HMB)J|_V*Mnl}%GRMKYXIyKljuQ4kD|5&~Zvixt`Y zN7Y-K^ai?a7#uwF-Uxwy&LIkqUswbyF&8 z5LzceOtg)n3<>nQMs%Q)=ITJ)p-^yw3~Ew6y&ax0}GAr6f*L>dIi86(WYpFaT&_%~_2(X(a-E zQmV-IA9~d8;?vIiACv_!Y3vN?s@|Q)Ou@6o?fF?gv^sX-8^H zsgA%+Ru(g{KeEB87B`0$A>c??OI~rECFduyUt*;WW}9tN^ydl_1xeuN%@-_J{FMdilEtQC{Ne9 z(zS{dbV~X0p)cvU1w79P*v;g0MkI2+$g@NRwYnqOrX$gS`EV@s`hr-&9J!c^n9K$eE-E|E|dz_!OmsYfX(4m{`~Rk&{hjhqaqs?yMr= zSO|i`vgE4^L-V9BECTpFrv(L+oKeq-sA-B)2|UL~gkW|RrPPxrk=O|O_XCCfK#p@! zYIj+xevTA1!{68-dV1ODHV8q?K_=n+9Xdyb%#Bh@9<9U+1)m%d&lXe0QA*?^lrqR* zq3uyMse~XQj>0btO-E%mC)M0XDd#Q(DJ9;0>|+~|+W^_e?h-RB>mRAGr0UYysf66; z7gEudF7^Y1%m>*_C0=ba;K}4f zEj}n)O|41k(^|rIQmYzM6_F#Ho`QNqei&U2SInfuU`uG!cihA1lappr!kq*4!ZFUI z;3VIB;D}e~XHuI@P_t+>N)<<-NufQCY#Pt$QZ1&XNo^+kN%}KQO4OB!J}!ocswQQi zC{q^8bHk+gNjG`;xWuG9YdPy;8gaarSDFOSyNFVV%5G@TjOKJta z*3y#dq@Dt#2(xxY<(jPZbb-{8Dz7C?J*g*g4<}*In4|=G4j;()1*|;ao4cjNaWX#f zm*ph89GTH%Aq`~lA(jpc^~u^G?XeOyA}_OP4(@HbQR(g^MM`Tr2Ma@5l0HchA#Jjx zE;+0?UsB#=BPkd1AaXpngN z7%e!5iAY_z2CuOa^gvQYWN1B@JYXtFYP^Wv)p*yWb>ebW924|qXMQXf?jm(Q6b8lM#0mBxN1aAT9~L?U8jMt#-YifUsaf`CcFn~| zi7Ym^)9)ylH*L%v9+jBF7%4%>TqSnXV?#(P3;c82%@Fb!$wety!A6Qtnsd`Whc5Vx3DcA6@J5PjH#Y=F zN`@YT267^mAyg?jQZmdG@HtXv@I>@br$0iBbgep4T)T-OJ5n;qkFCP*Eg8VoA{q?? zO#1CieT_#-UG09{bLD}#LgbN}`Bea4Gx-3F?SGRPz*t)QLl9&aUa?iUZ5^aOjN17$=;+%a{5 zi^fRehYqF$N!?*nr5!lN&^ALlbB3)JWIrHTX2^J$oRE|~Mj9)V!z^av1|m@}f-A{F zK&}Iw9}g$$ntVe}XZ2B;tGV@2{H%@JrppM5ebhVs;sR6-#;M>BR}LRS>x9nr`>4lX z%FHSSKT3!@aR_EWr17H`;v6A9gXaPHQ8&$x5{4%~6TS9MwTX&-S^uM{Mf#cfCrZ{r z)nQha$&amH@yy?2*;drnk17+jU0jt8y^VT5Dt=tzi_yh_BD{!0Z&Qs>s&xa_ty;Z} zrwx#bnf*n9l;-Q6zg_{v@c_!5v=k-~Q4)D$AhV9!%buX}8uS-hyqzru$=4x|f zF0;|3vfBiv|F8&Bcq~p)$5j$>vii`C*IF%ZL^og8#Z@o^vKEsBQUOU zopV4;;E-v-0^f!cr#Vfxt}89=*s7xr^wq7`CFS@75$mg|t{SggPi<7KNx|{Q9+Q$| z8_%CruUofn-HI&Jhzzr18fQ8`RGybtmz1jDp_WMsB_g8T4?9wNJERxk71}2>7hwy} z=QA=)V@ikI8kuIvexyim@EYW#5qRem&9- zFY)FMFE_eUz0Yvy=|!J9DW#K=MVc31BxMM%NX05zu|*e2nPUB@PJg|D)zfM2IM~|} zOG4=-J?EUHT*2bt4Ri1{yYpkO{bQBSTGeZ1uGgwpSSxHEddjVPiqJ^^m_CR8pZ#0( z|5f^5uS-P0}-Fn?)pFfWo4rK@iqIe~|!5bVra=Ckt?rY_`zv^}0 zKdU!ZuUj&K*Ga*-xrPTe^Gp(4W9Y7pJFaRrgk;16BQRR7=R+Pc&4)bmAy;o+6U|HW z`CG?2ooOfal4?pTT`kL6Ny_t>*3&dZ{Oje=t=FyBjSC4$F$O?am>83ll}*cflJatr zj%p;&A^CSsG+Pl;out@$XPKFnZuXfscSS^s3(L@|4DD%TWmRiktz<@5Bc)10A^W&l zo}o=;!7@^OvLmX*DkYLaL{tDyK(W6!oAtVdLsZu+Rh8?$o*(-ig_QME;?yLyO{B!C zCm9aC^2te?tgCCSn^~_*QtUF(KPD|LSfua%&;9R?v*|~bF^y$YnAhu)f(=Ym z3lWQmgrtA~@Qx+xr2O@dT%2S5YY-wK4x)<|ERKjrQV>Z={6X2Gq)L}CP0AGFC!s}b zsaO!liG=ogUArvfIWncSX7##tQjpAkW~|q>s!K}Ir5Vl{_ACqQb?bHOq>Noyuh;Dr z5jJgDuS?3+t=FxaZG;zrL1mr#S?IHL9-423j297G|J!2<>iNCzI6f(aHd zZlJ*eiwqVHAS4zJD7XXf0R}u|0s{vR76=6%fS@=91l6-(3I&2;GH*me!4eCI1cNCQ z3%}ak5DW|qhPE(@g&%>ag|Q;1Q%cN26_}_*q6!5D8)Z+?lOf7bW>ByNAGLD~X7DHy zg-9$YTplsm@IZh<@xX9cG(0FIDqu7~B3Lv$97j8>wNVw-rthNR!5ut$x`-Ba>vcvpU(Fpm-@=7JnGPe1F~ zG<&Ai&CKIT`4iJWChf2gok)t+wb!j{uba(GOMBXWnf5dPz^hkg+Vy5q(lc`g@R4Jd zeHz;9R>$aRJZ7Jypc9=nuGd905MjMo#c~1x%si({$~%@o06TV^lakqV-b%`ICL=Hc ziEQZe!~g&Q2gJg$U@%L=911<`6956h0KuWaXdn)T0YN|@3FenTR z2LFu}2=NnR;TaF>7{lX&=8 zhG+sINM5a&y!r-4$He5uu|&YX?MZW!H$~5Nj+q?utj(GR8&5V4Fa6q#sA^F&yUsU> z(N4l~!;>g@0a{ofJ)s7`!K!34fW0m~ZEyff{*S)sFjp@c)Xop8%DvD@+zRtM=V*-5 zDAB^_WWmAYRlQ?5i+h)&Fz&Ayg7cbCB;mOfi~lzJ#Hl{}`aTPDf_sYPU$oy%@ftsPLZ2+i z1D?Dm$U+gvItcS~Pf^vo;)->wibE05`Q-ZFYUpbf13&52t*OyRxolopIHfNUN%feD5sM%kYd}FjU2ZBNAJKl&XS8Ibmro(%8j7>-%!e-}^u*RZcC7P_TG<1fH zfP`p;6^eAY2f;91VPRkV=MMnHKcoGMQ?!4)xztvs%x4Sfhk}-cB~=#3coG3TP>P0G zSi})Jb1ButL!`1b+Pt1Y%AkTRqQ`AvDd0K}7lN6~L$|QPLOur<7A4AUTB@SGF-X>h zwNfQ6ri|p`$M->on7TE^8gO?JEW$41nx&H61#VtgS%gIGIu|Fdg-{V7B*{gGFvS7U zAB}?0fwc`n9m7f(R+thua>KB0&B#CLiMkBQc`+>hkpm&0AOWx*!x~&H!Y2K}%fkA$ z9-zJQ!l0*^W(5-#)+5aQc#{iQOCMjrnf){+P1clYci}HBtQ7*&tOh1Rc>PSX)w_p5 zkna{24=Hg&4*PT)*AknGip_A(3Cs8fHTfOWGWkEDVvxdW=@+sh z9i7`!SO*c#F5J%~v$$_Bd>{Bwlg?l0$CYzve!$)Al{|ex07WrQVg~{m^LHJ4NbIlC zqf1?hj<7C`xHE)but!*@6zC{&;uI`MSSiv_smzX!A+d#gSBoIk$x2NnQpQ8l|3Rn5 z)J2jfb`5T*xbl!$Z@WNPF#M?VbqP?d)c984upq4bAS*h@vhCBli-)t(a$1=+wp>mW zY9a*nQ_@ZVSkAYOA$lqYs})zeCvfReeCT+IA-P%VO_PhE%;$bTdY#q2Gp4)s>ND+o zJ_J~sQ%+AwE~6PMgwb1l{dw?kJ)vaM{iqfuljCR?&qo?J^cvBKHGDodpU z)H~MDy%Or4=p8Iom^o_NBteTw)Iy~>b>V=a(B^c)lcqG{caX8Sdv!)Fe99IQ1u$^D zU%?(ke1boPE?8L%&UnGHtdto5lEGl%1|s{HaMXeq8TekrV0~p|hSy2p24k46T`$ z5R+tMoOtDI-0>XhU$D>v4V?-j=v^HF^%E>d{clxGNvFJ~jyLLAm~FMMcr&zuML(e2 zf&C}i-J;6Ib?%1cT(o>6&>`urqW6F21)y&}O|k0-(TU=<3KT1}Qt4Jqfnk}}Ilazj z4l!vdEUtvWG)CYK6zBig`=_rHUsw#~n(OPv8~d*!7Yp?oux!juP&@~Pa=?<^V+KbQ zV5y2m4w~C$T?Lynz&gm#B^QB(T%eYZ;Aw18Z}8=!!Afb9%{iluu>B4v{Y=DO_W`tE^5wHO14 z`m2W&`ztd=W-ERAVau45_QWe`pW@e%cFxY-!cGft57+6ecY4y#!9c2!EEqzySUJGI zI#RML2ud%%)bg)-_vpz`@K*|gd;|i{w~P3Tpzq$(%+#RR_$&GS;5MRx2|h>#cyF2Z zAXp({4+_}IU*TY;Fp)}UC*+>1GqL_$&tF}rQbB)pxFxB;7>Bt+yyuX~y(}idXXE^~ z&yymeV;~@%^DkS(s7KvjC1T|le+BAx>Ue9V66$OtKW!t88?7wP9A9Hoex3bPczj3F zx^viE?V|qGy%Ij<^>vW@?=iW2qVT7YmSFx_(smKiw8~#W3e4-M6TRQfUs330DcUtR zQ-mYn{1xmEO$6#@3%K>TGfNNL*6Dsonm&R-|=)s!T-n@2Z64ds_^Dp3N}}iwL9bw>iJxO8U}r? z5E1;Js}?oP6glXvG%Ewxfv&Pb=qlX)0g4Sw7-n-_jG>C%-pRR&;1asZ2b!(|PZWw6 zs$u)hqlR9A&as$^EUIHuZBMx{i+yG0pp($jjpU$=@alQuyc>yCUYX1;O0F9RwqS32)J^uku>rC8jUHLnSbW2 z8n{YipDP^Z+>y^iDVp2+!ME3gfBW!Va{PRI@mxp60)Ec4;X>Ay@Fk*I@=u>?mG$O? zt_*|s38=4~5=U#ea6KFvYDZJkZ(6-XSF|Avr|rhfgj^04v_)5T4$N`@wT^cs8C?bb zWGVed##Gv=c?Id5Q=1W@E(f(IX#x}?|7oR6@yO>M)KgK(+`tY0pl3|#!vs&NWB)hyCe;mV7T{Tq;fvN6j>bJY^z>6lOG$5jStt~4Y`dGHP3 zk3v2#pI5G^JTyF(=qm~@@)4eTwp9mYR}r&Z5x3l%)7!RiPrM6!A1)^yK^(7gM^k;r zMPMAud;oA{VLm=Nu!>W%?&b*naaE?=>n|Pt0f1a#&D5ul%Sc`X6IBER4q&Ip5cl<1 zYmJ37dRSeRC-{dJJ&=gEw0<*r2S#!g!lx8r2WS?pEI_5$ zPRc&mZ{Kcmk5=^QqKF0#EG8SI|Eci+9;%@_{MiANG_MfVVb0$0@UM@ADeWU1_4tD^ z(5U^%ry^6X_;K+}xw1+QLFG!(1hPR7pk5S1u0Uf@S_4I{Oj=`&6(d)1A8!-g0I_ky z09&QX+Bt}gE5npHM(B+z1V~H}faoTk+wh>{N~sOC->W>e|9?@Zu7ho97{rsfB32xL z=Z&mJsx|dEnc9-F$lN>Ktt{+mb^`Xs>aO3?Zr`Y_g5B1A{TtazmFq9|-e{Hd${(&) zL11%Z^%S*I4S>3IAAM=17lX*p)4)_{{5iM7I~PP?!8$UG18`POW32J^c~+#PeIUm*0Fa@;h9 zmJDDokkvCjtW)`Mse`PrZ@X7$Eb+;OBCI(zpV63V!YZrb;Di+k$V>y@JZA##uxWn!8^AQoRN5@C zuu!bGLD*ZNpIuZl*LZw#K(BE%N%S)Dx+a0=Bkm%#z+>0nX3|bOcloAs!ZLR z;u|Hi>I#KVmaS+ZW?lZxds{Vo8{De1g5#=g=~L!NH*VG9X;b8IoHQ%1?%bGD6)Nf* z1pgPau`Qlh&B^2ExpZe$$3#m5&nnCw=Rj0tRadJi=mrOAPqF%TSXSlmWmJMF6k=6O z6biXBNvu-MUUn133G--Gn_KB=Hw;{H9ZWEgK?F$H-2k-Vt~`NT$$%sj4YnG`593cM zLd-+nS4A&qjbGLNgVci8dC0mdkQ+3TwyTQ0@Jtc12}$o4MNucg2l1#4B0|V0a*_DU zp2$mj;@gvpWUSez#>@bUCI8gIUXzgZ5A?7`mUW@ZMRs2B<4Q!sk#M>)h1ORIN2sUCna0an%SD2TtDq`Om@K61# z3hC2plv7IYhoFm5Q`Ms&^`x$#)IL*|gtm0Sdhkz+L_#!ubI1PJTqZAH+T5?E@3I&8 z@7tzWRpnL?tp9r4k~qJSIMh)HTCgi^8@(V&W_0O>H<6uzYFT=xH&~uURT&vM*AaRs zOj=M-EPSg_rRlvGREbhmml)>yHuh0#W7mR~o@k~NmX^Yr0Q|b0)C53}(1C{Xx386J z4a>g1N7vkljyR!sr;Ua<8dxWgbfJ4yvS91#(Mz{_#qIH{|APBAP)a) zm^eTOhkG>F*P{bg`5QNUR~zsr;_3+I#NECRLTh=fIf_#3)rgPxULu zw%NxD#z3U9AME0ydz6L^RXaF4vW`|1C$M#z@t8Lj#{YSv*qNoMt?J+_oBlL0bPUv) zdZ(-Y6L7Buf0Zq&_xE&H|L0K*)UvBI5P&qMXx&^Hf>h3TUqlI7r1& z)uQePO7iE68nZBsESwnfOVyAetfYgwCL!ibbO4702eYca7d`b@<3+o&c`e5qmFYg; zsE+p*uIl!vq!ZzSYXKY;&fmgq`f$)#ca3m^0B?YKa3S;5S~_=SolS5T;yy-W>oolq z#p~!od$T6m4w<}IRb8s6)Icz-_A09M&Yq^jCKSM{&qTE$O^E<<5R2VR5NJRs%e2S3 zi7Kc06AmkbSn-D{C;7(-P(V!rQ5oKCw%im%Wx|pO!@TW;IN^=yZ9`-(4!sr(o3ZZQ zx$0W1K=Py$EK)vuO&SLlvA_*6ZUDzEu$ElJfzcf=OS%ZbU_O0jQJo0gw1=V0pmiE? zWQ?8dPcd~AzYACfxex(y4Hd3)Ujk03WO|OHktv{{wCw>Ndo;42DD1`v<#!DkbKup_SW_0Wk`dc}5| z@8mMRL`a~4igu)C;On>elzD_SaG)9;1<`@3@bQXXPzY=*R|IORN`~No|4a=o_*Std zSEF762b^~~3XGuQjZ@7q%vY^BG`VLH@%NcljWOox&sKt}Hl^mb@Bb5IV)RacS-5u> zlCw(JHiIkhG2EcFryNy?9~$@bv+eBHULpk$`~7c<_fH(2;o0gBGZ);vt07^<{a2(4 zDphZEJM#X%I#}=I%zT#~7IKIf$=lWh($!3pi7W&*Q03LlI$TEl!hDlzX`2#5+-=?; z=@8D9=r))5_jj+OJ^6FAl1-CiIoTit`}eMs(KhVexGx}crmyCHYpw5qiak3REj;4H zeD*}pFD->J2`*;xhXwk}m|*9e1-qfBI+UpyMxZjQdq1Km(8~*&Z^o2vzGwnfiG|xp zp`j#6uX{lS5vBrFafpKBKvu@XZdst>(*-KXbvuIr1C<5Ne9OG8_;|>8v|_6(k;ynA zaOw*OBzc&eU~4Lw_7eA{6Y856rKk#vFF5v$(Gb2vS=`6iY7k8l57?zxHjV%)B&gkk z`XcTwMN@2j02Rl|UoC=_^hZkiKB>X#pA=Fx&F#p3hW4~dj+`JfB#g7 zR_}c1FLoFmyD{b{b^j@>TNc2xr!-Ydl?aA3RO8DGFF*fGWX!hV4~%B92LY(chPVJK zwFmS4)1>nyz(wYE@~5b^+%M;Jp;0w(N-KX#m|S);Q86aYp9-mpR4>|G^nnCa{BF() zU?#3VwJigTq#JIPRSlX4JK3K)6!ct~RBbI42Vh*O9aeucrUD6BZtoZB%>cQ@tcGh0 zZSY{UHqdl=uH-MOe36RW;Rtxg$hp%`&}T(`FO89Gs?;?-lQGe1C1q-0Pgw(YPK*AP zu}ln~ooZ|AN~MewWv=wK#mUihO%Ko2w&?mXtW?dqNvCAXHTu}&l=k9caME8*n!%D- zq%GfdyqoakTpdw*CM@24oP6(?bpfjJ4C2xiOv#7wfhwC)7hPmpugkU{3g$~DK)08S zXUfB0$b=$NQaz-{(_EWs_Epwms%{szv9V-I4gX0~;2UL3sZSv)Pm1JGqchshY~sQa zDPT1%AHtU$uf#mPDjRGQZ>iLxY1D(t;_MQ zft;ZkV#wt~CGa=Yuw-Qa=-7m!48?XfHQ3F?7pc^VV`-dcRw`I|Q@vB_>v0;7@)zhx z-8sJ@N^!hsL}UV{Qel%*iGP*NRxes-ie)_vA$4R4)2xOQrJx_}7xcMN3{}e9BWBA! zr9!5ii2YM4L`CM8+R5)_;%??t!aUaHidgSN(nBlN1@@jRhKf5Dl@bRR$jD;K8#!^n z!N^gmRVtB6-R;@8(xz05vlo&Jr&L`R;R>2liZ!ME>$-koPmQsyW<#$mR4=v%UemUU zKstwJ7zQVx4TpP5weE|;7ySqkl1-|x9}x3$QMGHa*|?TDWxPB&9IKlvUqh= zMP+8}z2;OBJf3tP7wSHb`wLlVkUeTKCUBm#e*9=eJNis1{s9$=Pg82~x*V!1A*Jyt z#MzVr9I#-eK$;+J7Mr?(EC!XbnUSkygCOQkEWB_jgd0*a1;TxBOuyL`AdSYjUH!0G z{@rkkQVyPvQJ>s5E*4VEjFpv{ga$xMZE5zC%x-rPvy_4lWDh_Np4q1!U#3%hSyNOw z7foJT;j|PJ;h; zY_A+yzvE7+=-n(uaN+;_L0}};r2HO z4#K79rBG-sv#%iJuK=oe;1MMNZwdwu>e%-|Ro*~-cdq_l4o;>dZ`e~(lI$jK`ey(C z|Nkoi9{~#i5&>$8y13jS+BA`Kp^(62Incz=;~b0;Z;4rKFdT=CgcOz`>L^Gx%6??t z_K}b^KK{Tuzha>XIR`#(GbVuouKzw-YFR%?xXjE{EQl(75Hsmm>O+W1Unq7JG2cK$ zCX9y!WRi8w3^OGF}RCX_@IAW`UZLYtkZv}QL5{I806KqO2f!KkP5Rc9w{iT3iKD&Fof zRC6Vw%vYXNBI-YFc6+24O; z{@Pggndl3(DNm(+%3BZ01J;i{CXD-}?c(zN7ppENv!m*=c^#c> z4h>%Es)n{84-x&NM>9L4J3BjjW?Oc8d^1U-5t&F(Q!852+&ccvZc~4CQmnu9u9vOp zzo7d6mwr-V94w5=W5Pg0X#H62OV|7v((d=Wu)VN9z+Gg%8<8m%g-dDctncqqWq0pQ z@x9Sr6$`4`qP=gc*XQ+?mtyecS{mOSOLQe(9alQj>fjn>bBjcyFHsd6x`@q!1cZcv zCQ^Z+JR~HNNQ8s{31bpzh*TgbiH1RenFwGc8cD?cJR+eUAs#Kn+`~LV%|i(zVgi#$G?fT~hH+>Z z4v9o1!ZW$f+b-=VsS7kFb^mcEEXCGhGo;p)QG0%)@XHWH9EYj)ppm(!w4ZB*5TD5 z*O~HmY4Pf`7V$3e>Nt_?$~K$vqCl`_&bp0vr@hvt%2uPWsL-d5-u@iz92tv+hUK1* z7U|Z(UDL(AEiIRs-m#0=QPZm(m+sx6$}79AyUN4cpZuNKc6RJ$yZbS+t7H1>t6z5~ zEsd@rC!tVL;SXb=4`g0%D?y zfLLkER86}FT$B@}F9yFL0jiEbVYE7L4!?!!0G?C4hJ7?zS zY%V4!ZiSOkAxd_&2*P3S`btB~S|huHSU@i=oKl;#F2bidIJHQwrqqIHt%a|`rNRQ5 zN>a;bHA2Y>-If~sGM&a5CU4k2Rdg7;!T4Fu03tPFTw>eEc|GE_6;<(NYuc6~P|4Fa z+%4@vFD%y$YkO|apox_W^LyO}aVd?a@@{;bztk2?q~sokRCc$~wO}--d(uT&pl%LzC$yO&Lpe1Jn^R zG8k5PlKmW2fQxyTqe-$HVWcj!#!p20rl{#gJZGSk8Rx+79ocisqYDoop$OJ9b^Lsw zO@hTN3b#XqeD9;-w>-FWg6vS&U#6JjMK3MhHKqY-%;ertyjYCWttO2)?GPk|ZW^pZbU7fW z!A)`;@rEcy#a%dJ6SRx68xf)zl?Vex(_t^VuvL*Gul3RbkI{tlnOD{;^rQWB!bhQW z;D{-NZ~b&(?bFS12bY_tf^O1GUQa+yvLoi<$rN#A$R`c6pILYbKm(2l5U5m)7Oc-?F#wkm?}0c}1mDa_+S@HWeulY8 z=!b>-G{<$i5V9F)_oMyy6_s={BYok2o&fF-r)cJRhE~9>nGicSBWUGhQ}6Ty{|FW{ zM)_=p_OV#g2WfekO8e-b6(741(yJ|g8|R`_WMc@c0@(Kt4|MZ#jXqG1rEM&{ngz|G zaIIdHU2{g$0rKo?S+U|&K!g*o9E6|=&P@`g#G(13ox^Aw?;{*|YGDHpRIIA!wNoaf-PxJ8vzM7RViHua}PXUP@o&P8^UYuS)whz?U6>^ z-IEu;3uE1SJlXKa8IpRi!VN>7<=tO=o+a6IGXgK%PLD08tJ|#QrWV-B$d;llQAvtO z+ZOkS6C$OBinp)Sw{h=nRH+zNz%~VdQhrGkb*rC7*rfJmrEr(m33<+WTVt9`I>V2$ zx!3?HPaq-gsZ5i*;jSg8^(p{BP|>?Zvni+yOLWl8Q4W)-QVi;Tqc)r2)NA~H6O`$c z9w%!49E|Z5yRc;_Xx_sc8}Sg^aU!xBX$s7PQ*l2uq2l<+6Ke{E5K3Tt@*3qreIj3u8mZ5+j2*uv=mDAniNZ4fl_ua zN#~I1V#oPlGz!21v>zq?1N|hpDQD7H&{LFSU?PM_gy?p{6E3bD0YnOgXdu0xrjX&g zB8uc+oUm0%0&RS!;GB!>7pSrv+|Y(Oy|TaaCs>CMhkEjlOtmXAC;PFmS}%I0^`}aa zIyzfA3JyNI!@h+_{Ep|7;`kgIt!v;QZ4yFspBh}$qGU0lD4CIUU=F*O=~?H97sFIV z*gX<`vFS37@esZZj9Ch4Ek`7!t9D4Ci3OIFqf?#YT<4XFH6V{{Owk@-BwO>-h$NhYd3sM}S886I!S4-(4B ziw4Bhj=>BqIJ$2GHx_9T3Wj9>ic%}Q7<`y`$PE*$*;G)dxp~hku^gwYZxejME)7Zu zX*u8}NBq`|z^ocbWB{U8`?1ju?w;<@)a!uLz1Rm-oiXhTy-nb=5N;HlW}bm)hjOvL ze}tTYVgppRx#jGHhawArue*77ug*)pVIDNqXcUA2Rf+7CF>YfpP81XOij@c^C@#s= zc*@i%-5#|N$9pMVy|)R1)Qou%a;e0L)0N1eh`IfXr$(SiFq?ePxEBF`sapwy6D;ZA zpvu#hqfoxztyG`1>VdTCnkwvNR;Oiw;$dZuVU9M&ZEtmHoj$ucF82=GS-wkP%PBx=sNMYHSx#ZT^pNNuZS3fC9K8Wr()oMG&#M;Un^&6y=ex&~B3 z%^l|?fXozLFqG~t1+HEuA|~rtnYU<81X*pdK)9%@F4O3~2nvwqn>Zes1jsW= z`6QG5G)XSGj#lc0&E{w4!C5hZbFmuRu-ft)vF1o#dh8A}KI$dJd>saR=<8?mLy~=z z%T}rMn0OP@2{KAUhbe0Vy?WwX6^BGXD3O!0GC zK!~#XO}pqYA_bNV`DdgAOGzeO*qbARV5yRYhRjo!u<-FEmEtKz44W2NGMdXVXe!X9 zEd`28Xh{UQIxWD@beu@qDTw37WAPbBIwEqLO23iFh=&qgdKuhsq<&)yUF@DdP!WtP zPnA$u(gOV(Z15TnxJWBKdd~0oT!e9Tt-xo3)jB3soz$its6w<%U=R^#Xv@v7$C%hX z5nU((q*R{hv<3?#3G63Fg-jXT=s-X7Q=*9)y%RGBv>k@u5Bhw*!`Qhy6ICe*L4c%P(0%wX9H<~}k&`+>`K z{4~oR62vfhKxXV%wB45ha}8>~IJ|5tVmNv&ziQX#>I(hMA@=n2KALx2z{+jy4C~WK z4&~&U0WZyo^{Tdblp#O!dxX){3m-5Qj+A;4`c-@#%l?h+0TQvf%Vl~SaB8fN=yen~ zAAJ4y(eMYAd@gDo0~MCk+a~Awv8f=bNv=2{4h9GppFgm2JV|#=TGm^c(9%VM@F?A_ zxVK(lYo&j~k0v)JCP}>wY&pT5j2^4IdEl73SH1hNVTp%P($A~g(%1-*-RM3%@#*U|Jz=ussc5Vt~ zr_wq%(eKGB)8UZCht>hDm0crr7LR?V^W`~!I17#b~ z!Bh2^qq!1dlA@K0Jt1(E=(XoLu{!@Q*CA%4#b+8{pUf&y4jXv+yK9$W*3|^$w(0fulCQ zl38j=w%o>@R|Oew-V6m0aLP1vO&k~6+mi+R@=Q9H5~cuylxnPP;<@v2BZFi<@w=;@2?h}ED$r~lgX)6XjxY4o&K2-rxxd{^ko;T{0 zHpVaChb2P@#D(fQ;*Ifds~P-W!vjAungqiT&}1npYettnNcJ|G}FNeQ=u(hrIZKEquecgBE3#%qmw zkq%xdU?h6Lp!b2QV4UB*?F!u?g27)mEaJg`2m>Q3Xrkz~DG3bm?$YYUF9Xz~iaP*w z`^8BIjKm*{Y7_*)>&Nk7=AR~H-(zvg7;6spn?enjl}piszB(myx0DhU{)Y=kG5nwd zU99*7zuj0v>#UJW48kwzvo?Y+QLq_PBAkAAvVG+Gy}QT@KwTRlYR3D89~kbcjfbmpDiU zXbJ3%5R7DbTTpYT57K%pk3xbaM!bMbCYiNd9zrSCN9GHuZuSoK_>!hjoUonr`P6 zeK7;`;3SuW(m0&AXi|*=v5lTGr(~dqjUe{Jg0edr)iP@w>rT4-YH7k`1tm#in0W2C z-5oeI4gtVv*flkz?`_YOr#hifj=vCA5^KW+r)H#*f!b`!O8axPq&u0KY9f#sg9<#1 zQ9?jeTvQMup_R&)M|ZFkqe5CHYuq&EHaj;%6TfgxN1HI@jD=AYm@iCHgsY#EdBZk8 z-#>dS1telQ#-F#-j!6-bKwB~Z^b_zPhb>kY+|AvJb0X*Sx#6LL?09_9IPZG*MOLyV zqNQ58NLtf+JQs~Z==lGC9Vw9#bx{{}Q5Ti2bfweLN5AMrPrSq{9*^c-WocqS@Qt63OE!#L^1ETsqiV<@9r;>Y{Rx1DxBVbebywYWA70j-x9ufjc3k}8j~w^1?%%HM)#-aLBw9t}wZ}^95i#+2 z=#nT8M*Y(2>ay;-FYCV3W!;x`Yu%fdb!XjmD~V4&tj;kD)w7&KBibgII1D<>pX+S0 z%7S`IW#h#8otFgDS>_C0p>xVCE(}mXk#s0P@xVcmKmbLE1Ofp7Xc|d}2M-=7*$NTK zMs%*KJ3`$?mvxt!ozWNRjDySwX5DGo7{{ybOwE?gR`W9T_buzbtebJ%-QuE+dpqsB zY%H_SW*oK|hb80i(X`fVBZ<%d$3z4YVo_q^?(QDZg|51)t9uv~_V#{tJXU2FjjY;w zCZ1<$ZDf7_zGxUxRP4XJRFf|08u{*bPxt$^@y^I0#UXmCr(}tUdyR97ttyR)-B>shvSJf?Lv@#N+C&*$@^u{ZAbORo(UH#NQMKJ%0K*Zm-=eRWr8u&?gR zx)}%7rrMP>RkdzY-`-{2*1Ahk>t-CJO1`9tqpB{~bY~|Y(J_wtI&W8X?N8Qy*-Tiw ztOvHZoMu!rM$Q>yjNS68Mmn^b;D7`Q87zokixA^joxr-&iyN}O5FRRIP(fiK0j6GgnOg0&`O1Mh|u2C>=;Lq z*%LuC+Nv!=o<-HLNG^2n5W<;w4mq?{bt1l5Dfi}mDP`iIQ3)s1CcqhnhKYXJB z2b`}1G;S)wI2aX5Ts**Ru#l(#0fK@IG*CuN_)zh|0||yg1rjWf&?F;4i3%G+h}h7e zV8jCz7}T*^MCJ*@IA#m3)2P!hK68{iIlClPUpvVcXK55BVa(Atjb@YhKT}1;L~dqA zMfPQ@OZK9*E26eWMX<1rr*>=UB?1w1wPBH{2bK{H>#)|eA5f3-u2u{VSjYh30t*)) zSjb=jgM~|yTJVg@Lh3MIj4@jZld__dY!L0-nmKcgF^zcVHcUBK<kL>c|#V!+3BRsi?Uno6(qv+SSn}!f5o_O3jSJcFnS3*~(@#EyTL7U7gTq zGC1VOW?UjOBJLSoWOhuTaKQqE3U+wMPN7qZZ!DEmE^kW0EKZu8YZBijN+ynjnQT(B z%88H|2i2YLWHlNY9+|OdZ?h1(tXh4#>e<>&EU=!fXVzngZwp@tehUv#+j6bFki9IY zvyx0co~wPL;xEsEVuOSW5*|W)*fpXH6Cq;}u5qc1tF?H%(vS$*-7+w6yyRZ8wt8Z_ z*DN{C9U_^tjXHJMEN5J1&Z2si+?nD=VK9ip;M21R03bvj3&+FpD9z#lL4yxu@;2HA zY^j}J5ncdu?|7CmGqZ(u)4C-1TjW}@)fz&T56}IkRQ2q2rqm1yzB%bY4|D&q& z(3K&01|}$GY9nYRKQ_V>4p(pEX2%MIogAsLLV}`jwZS225tosh6fNY6fXoT+Z}#4RR71nauo!95+-8`6T}Q zt00R3m{#<(k7{UDGS;)t2TUO&8H)uZBB$69UAkkj27sZ3ctbbT$Y`7qhhw~qLw94Q zfP_(TLB(Q1+or3TQJ}^LG#3?IM16#`#)s4UaUSr%RkkTqkDzzXPXe|VXFC%!V!=-> zibHAce9M}GG6-sH=`?ki9&Zn~iP`#AK4^hlo8#9*g&29QQ88lD>BBhVR3~%(-s^}4 zKyA={{DwMpWGqxkoiu5}pplLmDES+>0bgRw%~!wvLBELTq%^W>Ll8)iVxY}RnGIG_ z;XGtOVxJBIlFFWJtvG2I>V`3^bW)~Bv!M}AGhs(;AkNmF{W&ByS|c$eCsPu60wf3* zUDQ!_4&#oLzgz_y*Bqyj*epp*xAJ}-Pq|Q$gqe3v@hDLFi36F72fsDL^Wh<`ppsa( z;7g@OdRmO)oe!V7Xex8kf`E@?)sF)r&XqfyfPE#Ql~^ zy=rA-CzJ<$FKTuwM-WM!%9b!ZuPV|*0U}Smd?@<}BQGVOVuVcxMy?x>FVfIK>#vI- zXA?-b?-SP%(Bu7O%j$E%1d=!ZxRwVrje|K3&5cmOSWXQJz2OeGs*d4VUS#|R2+C0v zuXs)H@fd46y)c%l%|WdBa&&x+)(N!DV&VZ3aeS-Pd^0S4k@Byf;~7S<>e9yHCjblIUM`rh}m`Q>KSUbV^ChI7S*K~ z7jN)jr|(TXKP9_)?P(u`fYVW+444pqhyYiYUekSAFQgpi)Wmp@ncwt zBJgXNgm&6fUf*gy?1ZO8xkT!K>9zGU;VwHqnCbJ=S)3^7##IQOk%DhjEw0DGB2q=8 zh2))>McMvsBzC}VjwCaCYTr-5GaJX&3|N^M`nn8+>?Hc-3Bl?EnZ=_?J#$hd$ap)2 zttK@m;4GhgVYGzFm~}RFqXf*AV(X_;ql3OxRgs&08(@J(>rf7(t+7q`|E`h85|nC~ z@(gz~7}onBGa<%0<+ml};67l5irk}Y&|$C{z5oLXya@<--02IvI@Xbd#SmCY|DTA< zSL8rwg1-OoEI5|Ls}1FY>bGC@51cIErOsW2|9S>3H>c=PU87~E;g)LA6?cgHdpnZS z?>7c$K`efRqxKwxhlIuI1WH0EvKoN_@jsTMd!j+5dgq^LPZ*9TP|E{!y=Oy+CMZ;Y zIQC@_FB(JeHZzbn5SA`+J)3*Nmb!k*j{8gOcoO>_j z5YyGwldI^xM|&Y2u#NDSHvaXtq%>h>oE=389rvKg2V z@>K)Ga?hNW0%!1f;ZKFfVv)JwxFUm1@hyB?x{uv&*h?&;n|TaVAu@2<0APTv3a#K( z1=uL9aZdMftewRHJk`XgWprh0mp=MZJqOe%ouJACo>JocQLgxag%MZP{r>u?+E|)a zI4t$(Qm!j~fVNh@JTkw0n~eDy2(B$@FMz?5A%qUcI6$F+h%>wh-Pd=F-Yw~sOPP*Q zNiliq89Tqv?psIUd3TC}(NfxA6gE4WY!&-M>b6s3Q9<3do48L)GoYw80$RXU*hyRv zVd`cFG1@e&5ZfhBRtd*yifqKF5Xw+dB?#zJvHqTEq)Jw4*@R%)_#TJyYR6G-;+4)i zrVd6o84-$GR!}=KG2$~05XSYE2}Fzp)ph1M8I>+vbNu4Nrx622qGAFUEARB1>HAOm z4-|(WQ8XMb@ptb0*O}17Nu@swB~~C2zRz&|C>%w?wLqyPD*C7tS0Zb{&Gi4+yVX{KU1X=#Hwi?0cCi28_4{!XPd@pxA z2(d#qU;e3*sMT~h+z?8NxfCRez71(T@ly8ATAmXnDNCa~7rWUTplo0U7ee_nE4U8< zf#f1|%pjpzI&-?r(aTFXeTV7oI2H?$_1q$G`tlLmV;wYo3+xP};wrY0XaPhhCyH#? z0hlZfLSJsZg(CE-&CD*}v71xpsp=ZK_PTOD>{2YHzPEiUxJcx;_(rnqbx7}+b?aT{ z@c>*vqrXh*G!WGUqgDLzfZjVG%`d$q`AIc_Gw25n& zqL_>ecuR4ewli!Ikg~)9z^qCGW-=e}{vL(bY?756kUpdvfETp74VZc93M?52GQbrL zQByJfHzr)|WmTiy1J2bdHr=XPK-WpOAc)e{ssWveHxqWXgCdsUq?PpI)rOSD`|#a1 zdrjz+A;K7B&V^LrQL|wOSgV9`-A`N2aSA2tlXeMfSE51g7{qI9e!7q@a)l_9nO-U^f13}Rd{cvvDpYR#C@erXFoZ5HQO zp4RBlZ4Adg^_FVYlFBqMLu6(zu38ZRFrBXSES=L}N)R{~Qb9p;u8sOWA*a){+Fg;8 zr4(wlL%7IRYo>Y{7+Uqteub;G3LhR^t!H7m+I)9>m-Ui>2W+%x&D_;`rza7u*jok$ zz3fkM8B3FJ96uw@u)SK@4rMVXVWFWI^@LMo=uPqQ63uu%{4yO&=pNBDwT6cI@TqJq zETvhfBWk3;;6oX-9NDV&CfZyO03APgq}>h$;BR&3%eb}Y)-=*dt6xfPTG7|OIq~E( z>0E{=v=f$#OoYv3mGh&qrW#c5TsCK_vIj(RnU}FyUJ`1^-sQ{!!iw-CH|u8~ol`jq z0?P{FKp_p3l--OXJZ7lmBB7UTA#!hhV=$0VVUZojZ;X{}nOc%dZ#-`HTkBmLPNI3#bfsGa3${!KsBPz?)3* zwYK)%;XKIQ(Qhq86jYrIn#|VxrYYSD*IHW^Kiimc!{|)AaDG`z2y%z|=IEmd>&z?++!%mfEK_EO@htQNH;+9{ z6s|+GRX{i^rG-1Im6iQxMyT0Uht~2u;2#i%vNXK|Z#_u{_d>r$Kms1ltjeYhkK=2x zOLc;VoSnNNRahuvps6f+=;c5aTw(r`OlWB~j?pyh8;DlU{DxvHf8joGNk_@F2(@VL zKTMZJdQ{D3aIqJYN^B{HZ~SB18O3MDohq)-l`qm(#1^)pmMq7gA+%9x#vbA^TFwI1 z1u@cTC+~6i6bW<^nI#YO#={1ku|eB*rE0>>l5Z?GxH2~le&bttvuxUU@rbOHfu`{; z`ZhdoQiIDnH5+3(6mfxPr8DF?KTW{65ikYAAlfF*HllB+;Vc!qfYcVcl}bkJf(;tr zlTzDO7&ri*WSD;*Wu~-GuHz!yq~dz$j!Oc=3lt?Cgp7F_%WT7~0I+N2)KG=e^k0TX z+5_$&knUh+$&VPlK*3$tQeCTTTf}vE;&l|=8jJ(HR-8P}@k~!_jY0Geh|HXo2@svj zkZ1G~&I)Sb!)VO{kUa7XhLYp{oW7BO@5=?F-9ZF`WWArZI7GbSnCN7(INkFUy|$)y zEM*@obkXhLbc1HY9COQQ+n}b-B^rHm?3(p~56{8CJRKTIR4mlL`YMVqIMjf{~A%so_qBYymRu{wgJ34g;PQ24xm@u?*TRJn{9UX0ajhw-l~e zK%t|vK}NmV`a0JF$4)p*gDaI}K-F1k2h6A1L_?2{z)w(9@w(ewrgXQxz8c2wh z1}Wl+M#8ZX@*I<(TBbuzgvxd3%{fapZ8%a%f;sMLiS^GTdGcUu{0T_7q6<`cSJ?d8CVg9V^(}yM z2y|$|;o`_!DrT5K$vu?cA`pYF@y@umTYK$na-MnSssmT;i+vA|=>LIifKLo!cm6PPh`!e=+qR=l81807d@dT+$M+P1?8aM6A8Gu#-% zL78nzswd}Vb~E0z!sFnXZEgh3H#8C29?D2)~I&5AA|5yaAM z9Gz`f7ns-xOx9fHo)Oq}d}1Amu3rllg`I8=lZH6{IDe(L6HZmz+*|O zy%0YC-w-~Hq{ZpgYP#sugtX>-;g$ZO)@FYHWxj)nyPe=J0uDF-Il&;qY_a2a5QAq7 zT5Pr0ATh6|2BM0|9jC=@TkJr{GNo>I34h90z?{d)5JMKL*f(Xq_r6>s8+g+sra3FC zWMOt&kh{=s)o}?hgcfp0oE@0h+(FAu9GDij$B(vK{d%lv&;jS&;42o-!hbc{V{j+F(d$;O~@;V!t$4V*Z20|2eA)hMY>El`ee48O?W zF=@r;vzRZLh#1<$ip<4|o`@U5kEVf+qAIsi=ZA$yUKbVPQVxmoNHmT^xJ4w4#V8gJ zg%&+o%k#1s9rkm9SPa)aJVs-pnp`sIj32CRlb0nCQ}Gk#1q8uR9r?5O0tIWZ9t3T2 z*v=TB#TOK$C|QF~>!F0A52MmE-g}A3!GULp{Rb;^cA}6TQ?$hR zV&+G9aTc0~P}*Z3noETHO#E0NjZPBU;70@Q6Yw$s&LDMmNWdcHQCq|!m^mDn+lU#) z=*vXCmd4RK?~W2DM51Bg9G*AC4C{MdAeNz2xWwLZgP3+M9nOQ7!0d(Q&TM$Au#?+U zJ|$vy(AtkiMj^>VK-Y=cgV=BR#vIG>WAO{N98CbK1%y~B53vI=P6~&(`Al&&DMVo| zT`_ay20{=~gE9p1hBja~$!0ZzsCLD^CebU5aC+lJ6lnsQSnchnIF*TXl`p4_D3h)* zBSlA95Oa3Dcph>R&4r;cK=Cm_rQt1%`4R7gXJ}GwlrGzwhM5S_?$>GEkj6w{h zVneZfq+~(SotEzEsHM-};Uq11VJ9LvoIc>O)b_0rE)P0YB1?I}6@YFd@<|i`6g5tyKH*QmO`KY>E_a{kBxesTf<4Fvejej2owOGeC_~B-d1OB_) z+W4U)6K3}z#w7B?D2@t+F{{)3@CpYLbrAH!8M+LG>wl>roRrQD)epfx|4Usm1XS9H zG{xV$eh5X&C}=<|q-@633xTq!!90M(h4R?|j7?=B(oPN#Px3A(5KV7IYbNdj%+dh5 zxiA_rP-_AN@c;k+fA>F!9*IN}vn3fRZGZoNzel*iE)rNonw}MUN{J_)_yD4c(yywz zcH04&0ek^Q2?vG50Ru$^5sBFuRq^PmE{Pb0WFv_{Bp@pI3fW6|zCVOiuyAh%-H86&EWjTL)zRFFU>AQqdBrqq@W)7Ip{P&^b#)1Sibvzyo2RUIp>R|^Kf(-eMsRq)7kqkzXFwTaW;1NQSA`Ayz za+i-w9_G@ZX|O>9|8g)8Mmms2BIkG*gK{i!qd{d%f-q(t)yFsK5luE=9Ql`9@Q03?*G(6(bR z;U93o1!lFn?i+8>a2Bkkv@9^?@JNkq@x|*@P3FokW6T`yZ*V$UV z(KNq_VUhGZ!vgE{O*Kt3BiT)3wZqH%=C`cAiCkAs9JhXx5!H>gZo=P$r<)iSdE*J` zwk(|sanV4zXt=UPL)rME@lVegk7hjGcuV6;<>11!c10`FVc?@k*D81Oj_gK`apXN3 z$;89Kp+FoI4vGjCXh89BS<0riDr3~!wB}CjXt?1bt13v-BFn7GW0Y~1AYO9}kJo6t zXh;zOp%{iDp(}TW*SoUq7;iM56rKys5&G=s$AhOw-i>Aw0r7A=KroP_QI_vyLc1z< z_5cDonu9PNKrle@@PP1(joom@jPpoS!!Zg0k_arS`q_#_g|CJ=u;@_2fs}Qsua>Py zpNFd&hyqI)-6WchKk#|=eCnQA?4J$_FLWdvQspbQ#LhOYg#23 z&$pPRs;yEnwbs66X-iuk+N}JU^P!K+Bo3wLLrK{(aaISpnBtwlgso>8^TOh7zPaXK+utQ0;dBoY!z zJu{M!WLL~aS*R?@=swKe(^*eh^|19a!vPlzoy>BsZtS~_L`8F@tppr+uz&;~0xOqv z)Y8y|2L}u&L?n&}3zbAjbQdlFAfW+md%jWwQfcT{H#Z@X_sz}qs#m?5o9k6w&CSu( zy*!Wb8ZHnPR-uh$FK9~9D-Rr+Q z{q*Pm*L%hNWwH6(Ar&K=Gdxxzmdh@?5bera;ju!iiiPULpV4SL>)UAK{d3PYW+8G? zWm4T;dU|(fMum5(`&BSiUcnY`u!q>vz4s!Pu5=x26}ysku3Xi!AQkfpr=@7>G)45& zWiNaA_wtPS4_>jS4o`InQ=FojvYeVSm-|${lubE{YwDhd#-2TWGy5Qw8@$0X+YLSm z&2HDe;NfY~nr0s=Mn>7O|1UcuuL@Adolf1iCTzoXN0mpcvH=5u@Aqu1sVP*^HI zb~?0uwQc2eWb0YSS44JL%&zwJ9xOnNmD6=DvlO1J&}vqG`Q<9t%T~~~dX-DQ5^E~) zS#vd4hjW!xtZY@V`cSclyx-WXbsrkXf02r&}3V=jO00iY~l{n&Mz$96@i=8K6lW7A$@bu9p7O8*NWtb1# zGha@ep6}#96>^cO4Hyp06&NPUV->3UKQ(rfBoy$4$H~sB9^;|(p|m;OG5GZHX=v(X zO$rieq1Mk1oP?wJ5#D_C!|G>`5D^&9)-}5W?0ai zQBe?~BKVFdQ2KO5l4zcbOlQHweb(ap-Y@*IvO^SULG2XcNwR7 zF8ow7Y!`S^SKqvYK0lvrz4s;$(Z^+>QOkf@GXe9M13G7)SvQtZy4$`11zc<>lFQDy zUm04HU<)cZO__?4+K}QnkA{P!9or%@ZbD6`u09kVl5*u-BTLM7vYwEKPSN6LIxZ(= zpfzuPoTp=yY`AZU^3Swg#1DBOhkAr0L6S1-;+pU0vhm5D$s5gIMTq)L8%ePQjZchl9jZ;<6cR`-ICf0?qswWt`>~0hN@7y6sCQDG z=S6XwI^m~dD8LooJ=L?Y;SvTfqO+&B)^-6#0KFB0O%vzgJP8qGGffM@^fa*@-o<_2 zA=oZdIHs_$n?>L+w+9`P`c9lSO`$#ZwP;#&UqWn5O(9OntazdG5~33m?yrI_WQYpe zNHrkE7Sw+JeLHf9)W_mHVfNW$Mr2hMqk||%VG9hwg7;Jv*-YnWdFgGGC;Fr$)EHst zLXI*v20Qd6n*5$|8>M!Ih#51_AlO9-2kN>-)r)C;@e$221@?aI%p4%8DhHxA?&S$> z-|j%^XN<-YC3h8Ec^KmbEG$vkEyhEJgNIU-5|c`cW+u>2@N5b)MoE2EC>oa!>7j!W zjON=IrzX%0DJn3%4n9+w6dxCa%%`fx+X&2s!5>?WE#!_;jmo2+iSqM$r}>mH+zoo7 zoEAd8=DME#UC<2gxaD%jf-}^5ZUm4AU5F%p9!o(PWRi}981uArC}Q<)nuM>ln=Biq z*51ue#Tih5eK@lVkorb*IY4v3u2Y67+T~VO(157bXBwep^b{=@Id?$x zh=QY~SraW!HMMqN!PIJZ6ZIM*=AMr*zS#&ifO%Ng<(A8E#S>j;XwfDXA-2WHc)WOU zp(biti$|sX8L>=aJhmH%E5srGJ{X3}5-c4iDm~gxXgt@`r>VppB<$w` zYLoOcP2YJLFq$?aJt9(AJa&d-m(A{QJ|qb#sX0nJZTpNnNbAWPlul?2_}SQ*LM9)W z!LUqa%=e{Q=MBldiXMCd1DZ}Dw@P*f}`H{rvO$(TYg@DZwzGAojXonn1ZX|EBK(JFL~I-XT% zW`|;+=cMDEE_ngbWAe-@APAlY6V-`or_;~&NJ>!Fc92@9B=np#BA3>V7bq@vt+NbA zW0u7o(zU1zXXL~j-ruIdgyZ?bkS%vPB}I-3MMtqGQ%=h>s7qB_#*=+QGRg&lNv;FK zNy0u*8Zfz5$H9Ua;*wf4*-l#H69 z=Avf{hz8Mdq6(nMFjU&ps|6yW*MCiS^-12F3Q$Z%lc5vysMXXeBDftfGM=KHr{X~> zFC@e<1JAC3^AHRC4Zb;yBumgEy&1)Dpng&Abask|f?aC&=MRM&p@{>*=cr&6I444d zwBQ|{o8qLds{IHT1ukMy3np?9V?&B=F$pyU?RPuKRDt!O90GH!OmalmVy;u51>0ij zdpH+Mm{~Aam<&ra#tQ~bE!X5N9ExRi#*eX@v7y~CF~-m+5+C5SkQ*nbj}6I-nB!eI zGUE3kqLsO4Ku|8AO}G#bpb#v9D?<3Tg)lcNE+b$d9?6&jyCIKiC9-63GIFyNf=APc z33{Z#uh8a33c>k6RwzjhBwrMJK5GS_z;!5rlvCI5s-)yIOeaEuu-B#RaQ$d4jI(s) z)vlyqg39@0;eu&)iX6lg8D&~cs_BKgrQ@M=W#9qvtg0fZx+&>i2(d(w%5Tn<7bueB zkZk@|H0;%AN&@bSq~Jl3_RzKPowNSkds;l#&zE&^9aHXYV-iP@Fjagd{{zP}g%sUwT9ib>3l#~rHD2_c{`O3IL9;iT?xZ7^1L;>uP0T_0c)mYN=@y6 z+7&jmu%2rD>T*T%Crk`8K~+s=-;6`6sqCLYFLSyzq)GkJXieQA9;ECpNCrzVmik%b<5#HxiPB%CU$|O_b zD!7!JC8%DB~fc9qvtK_#s-FlcAwB{ljm<^-TZ*PG{=zG{Q5h z%vwbYrr>mc~1_NHhaFs%q=KRaq<)7iD^$m8=is69HhvIoOVC6fs@l2JwL7R z^l5ldQIF;HG(pU1ru@)Uq&b;|pJ&R9Q;X@GcGpZP2_l{olhMJSCNaGP9i7Z+y+E{~;%haKPETY| zrr<5|)}umB_(-?!D$I2r?9!YhsRsi3VrHn)O`knsuoFw|3B$UtxF_=9!1|s#{oY{k z3D&9x$ET;$86=-F7frDFgb`QR=@a(5ZdIR1&Oj^dnC+p$mRPQ-Y9iJwmQN`eDin6Tod1g{CCxljI)XBs(=-8&~kl(|U3Z!wx;&xF-p z2gwJ>;kLA3jtPv`a|-f|By|SQs7o-6Yrb9N+%{X`be8Mw@=v}qmkF>BAey4*=lqy| zf|lb8t9Pg+$#ff}?iQ9mNWGqll^7(lZaSj9X{txj$()Wl1P^AV%T!{xumF#mpW(4S zqutQ64=mLvVLKG>_hVW zH4s&o8RKS-Q3Bv?`@}7PRtd298F66e4l{W`4pz;?_S{T+#)rhyT8-DOp0=eEp;4?pUs;T}C45H4#nQ(G8B*okd&6YTu8c8{{NBeo%bM8PI@qmpY zEE7fROQ!tT2RuC^OEbsy$=l)9Z}hQ3W9JY8IYFmmMM9*c%y9%BM;}fuKI{uidFEg} zO%1zD%GXQ&&=$~klRBsGiOLxQ$J=b6LqXgE!XbDp2wuHG9E zx{N20&v!&LPVu%zmg-n&4l~qj4G(?5=Oy=3n$Jy3h?r`fQ`-HWG|x#r9m{s#=!k~n z^+T35dhQN3t%RGSZ7Q4TK%Fq2q>X(B3bns$$~S=qVBkqM7dISq0Mz`lIY+Rk!pAJH zhAqvBn@LlK7a^bi6M0r8Zaj=IRD0Rsv`J6z*9}xcRMGC1^30;2tw-T52WZXgPTX9Y zric;bxgYTvO>qw5vp`$I06CsBZtvh}btXI)of9&prtwS?aPphfGL{s~!q5l9WhsU0iWLD8tgl<+YNOGd&auC&CJ|hiEu54~ByC>|=~*=<}Bb9_vZVG=9{f-q1iny@f&XqLaq_ zxG*0y0)!pWBg~`qNISO<%hAzF1enxE!%WJn@$PziB3A+WWg}bUZ1Y)RZ4(mCVAb=f zZ%pR!JR?7v+Zn5%Ls9*q)Cy)i1f3lYNS2BN$c3H(r6yVg8{CPAY{2fO3u@tjgr=zS zWp@W4&8@6@a7v1t$(i~8s%|*q-Fq9k&8eiE2ElD7eq$PABFUmxbyzz4YKh4znp8-F z2x(xxL=ewPSx3w*z`4&?Ys>?QMy(Hqt5pi~o zGp%MuT=Q6zrckW%o+k?m8#n8CTIy(3Q$8%9Ngn|9gk_h+m(z9*ECU)eNM)m_c*jC@wEMLXm^;RX&r*)9h04r25hzlBDtV;moIw`x)&|B{$E|N7*{ z7N2s-ZP1yY{z&_T=46tjqNPO7Qu}!-;iZpf%_-EWv?a~InCCx}I6RfEqs^O^fT4E) zx2wd@{4Ca%Q_e>l?G_~+$v4*a=FL-R5IvS|Q{#h*+7oLo+JgGfO(TWZ@?tai#2o+- z#)60j43Y^66g#6+cy&-2L;6&4g5d>)NlT6J$RDtB9;M50r$vl@?C8j`Rr{tE%S9|p z7a`d-@vwtg9)rl`@qTmJ^Rd!O8?Wj{Xrjf|hM6aWIU#cg{ImdC+<+o0;D`>`0l@fT zXM-bTZ-|J{FcqLbyFOQ#fCru5ev{o~e{w`9!x&?XF?a>w0T2Q{0*YB$T3Rmq3%_E@ za{lVh<|k?MHgEH8?}h!(zs(o*vv>UOJX35BKjh7lS6Pl%;;x^%?&_|J^=rT4;^N}w=H}+= z>gww1>Pl8uH#b)?vo%|_Ra<32R+i-bebv={WnpbqOj%A_b(M>rYfc^t}AI} zd&^dAMa6iL7ZpQ9#rm-yH#98I@+{x-E!T1_?a>|;`^8@j`CwF*S7ouyecji6-M_@t zT?zANYTCWyz2E!2FEVzfeWRsf%Cae8%WSqiRMbqx3=Nr&4Gmcp^E*i^rkJvDDof{K zBQ|0qw%;FOuSByA5AB=%fiWAiG2>;PvM6G+UY}yhf@pZI>8{<{typ@ex4F5wIXXHz zIyyQ!dU`r$g~ZX(&(9TOZ}vXP+P8fxOJe5d=dUH_D@&*4t?x%JBP0Kcon>0Cc4Dp? zFtXnUjG)BqB>MXL?C-yf7>=BTZBfk96zffPOzU5JOtXr_9C1udvv*9p$0R_Knr3UO z7z+$^JZJ}=2}K8gAdpT4M*{^!#)e90cz~cXCLSyRd9VNtQs)w3uR7OLv@+S)q1jZc zxU$_KW%}3QbVIgQ{%Kam#JkLyU0=Kw6&FJ1*s71Y-u2^TYfT|KD?FQ8adc$(MT3a~ z2Lucd7)Ugrf@H8F7(_^nP>8=oz(F7!1_O%(E*=o9#O}?7_N+H6xmve$@!-nU&q
Rr*|q(N~?3F>`a*Sn8d0-9kg^QI%X6vqMhxPl>4gNM>A=a0yx2h!2v|U zf(Zy`0s)bQ2a6=u<24)-6j0a$8W|i2Sa6t+11OOSJXF}NqrT3eQ%mZs((ONNc5xd$ z?7PT$RIiicoi=mEbSXni$P|4eb61AXcrvKo4C1ZAH7h)@!}i8!>bPOfhA-)DMs6OvY~P#%^rHh4Zs97fnq4JH;|R({&l?^ksA5 z^gUnHb4T>t&;8uby;~S>_i$0fXTEMuR*aLoxtovqn2-6GpZS?lvdqi8EG~?SEqBVE zf0vr7shX;3YP#_8rs*wu$E~L7s;Zc>DyzDxt76XTtlY}2As@@$!l#(BoN2%5$p7+h zx~QqCIlenKV>32mGrFTYUgI@h<29P2Ii_SAw{cn1g-sP(?iKsLe(cA7>~~-I)!%>7 z$L9kSLpQs$%NTjmL&U;Hq=AP57!eBlG3juC;0};y@R;b3_{-zyI`0^Tr)#{sl%M~8 zRtj~kv+B2=4{BO$XL~+%%g`RB@^p)5mW$9a(W3sM%SELC10o3!9)AoVsEp+Ba2jZ+ zS}g9+kOL4nFcK&W5DbWjkOKdqrXj&Xru~@py6kt|t>2uL%Sn}2+45XEeJk14ua(#= z)#0PZQm3OrSA6=_4QIPys#oiSo#uDwXrZO@kyUC?Ar2BqgF_gkHBk^TfWU)>h{RTC zh=T*rqfTf6=@v+-IkWk=@nXrEzX`a$R~J zh^^5%=hGs!7?rC#+*HF><{b9JKd=xP3JV56mIs5vLjwwsg$un01Cs^8k+6`!#{!4K z#e;^A4-b8C)LH%QG~I(%Eqk`ghLv>{Tl5sgvg`k1CZ)0UHZ2(&Ru;cvyr?Fd&Bv&jk*SotP-R1%gQhO1OBS zz_8H}lEGjSVWJgcyVKfd`Tv?%?}YRaN>=LdGaa?eqBh1`E#vfcke$BsLC4G=Wm1H4 z`6E6Cm#;;M4JU*a+C6FLmMb`?(ZC|=FToiAqbW`&2~ve9xJY0J4IT;{ScMd1fCUa5 zoJd2$yLFIiqo#4aL7tRiw|y(#Hr~Aw7bZhgW+|y$)p%#_d^>fo#K|vJv$AxaNg<<- z%_>{8sQ}>{7?L6dEF2;g4IT+J!ckxV!A1j)#Sfo=xM&d45Hg_#$c|`8h~Od-ib0VI zNgXw{n%#Q69PttQ-?1|{dUn>LWIHx%)ope7d$A~;ZR0OUl3ECL5V0G|yA5H=7S>4>Ba2|a_14FBgd^2MeX zUmCJfKZ^hVd^+U&>pa(2vr2Z#sIM(u=Wn@|iQLRGel)dfmzFZ&rlm<_=L(05j);#m z6OgZ1kjT3z@S)HJf(V2FlF9((`+0}~StAQW)G2+25x z1R|h`M+Ae2x=5JNFfft84kmP|Gi?Sf=SFq!c6!kMcH%oWR;drF)u&J;nWSKZIL_a^g(6*5L8 zw3pUepLFDv7?U--sdL5(y*bGrm7=y-NWdEtjfn!B5+>+z;RqNk8ZfK|2?{I}gW*UZ zm{fpZ0z{_(LK)6`gx`ObJ`>WF=DBMOCxhGlpY&WZZHm+{UWZtj_tIl!UMZxXYrE63 zS0dws3_<`wqQL=Lm;ecdLGa!2l^bC)QaU_=0}-aGlOb8)xgGeI)@LDh4~;z?#6(|b zf&2|^nXGYzwGO65SugJ5-F5fp#^!I}8 zRABQz7oFr=Z4iSMElWfrgV)91%TJjUFBUUe2mMR#rgwFhYe?#Ah_l-+Dm?_}nD`;> zpFS~vJOf3e{oj^Cd5XEvdvK3{O=7vIL2!D2$Ir0ZCXG3^|sjfuDkR zsQBlj)>H$wCc@H!6SWIekdCL0jCvq3pt0)e$;ghwJc8xo702ufl%wu)qoUII}yVODBZzCf$ zS>maR`#D4zJ6>4EBRmB7;2b>W`vkQvw@;-VC8>*`Hrg`GJXIVRF9vo)seFDRyimJw zlXbyG`LE`1$Zj4meH1=W9o3A_{>h5wb-LVjyL@$Dj5^jFAh1Q)*PWiqm!h{LELe~% zz<2`fSu=hi5?j0l@bpLdGl&yBy&OXi;VpD*3rF#1Kmab z+t6;_Dr!i8Ed|T={p_C_47^lTbR)DK0i~($$VM38TYzw|2YVU7n^Y$QP)5rKpdvm1 zS~ee<59oEePz?t>;;Cx{eBJ>knBnpiob-f4!3M!8{DP}*q(GfwWC+2e5odA*&Ox_$SaGm5(wsTu4y{|ze4zQxpQCuBt}6bV=9&r3 zU!pLEqAH6gTqzxw`h^0bFsRlAEn3E36LEvbm5ET8Y%UI)%wAvQSecUJzOPIp6emfp zn;oUU@6>5X=<*a;BD2oTXJzazCxMOuy^^e;GP4f!pzc}bsKGKLw$XMr68d)lj?1J3 zh+er&%u-CrIbR$a3#+uk>0(gRn#vx4W>^DB5mLfDlVbQmbD z#WE_3Gjj+tfiyrzfn&K^4J<`e#Zbn0BviR}APt{nl44jQg-b&kbf)*PEHxTcJ6ub# z{J4^E70YJ*OwDzu{yc=1)}BPL;6+Xnfp&KCkHju=HdCoX%hQ5ZC~Yn!Llx%+IiExq zQ?AM%(wO@`I7$S8!EpLeqV)PimW>kmFw7jKM9!W7eUzG>KgupaF*~P)l&)BmeJ)ZW z$)qefvKDBcCJ!YT~_PJv8*EJYzPRP?&mIju0fSnNAF9>6AjWT6EA~v!ucqVh#a5m87vmLY71-_js11 zC>F^kK(=I0Wo^kQb$lERg!E7L$y|~r#zdoIwynYPWCE$aX$PIqiA6fBt7A8J|erQtm?`qYsLV1{~7s%D0ewgyWFK2tt0Zq6U@qzD1~6 zJQv-Y=US|wI^F%pc#*_90Z_ALJ|f~w??3AtMDC`bm%<2|mAEy6^HLPt?1cvC za+Yzb6(4o}D0Lnz`Z)sJd<4q2c}Jrz=cAegbjU=3!~};5*cO^`oP&c)E35gH9H-n+ z*ylGss$qHTeZ>z^&(BmfXkwkX2{p*Xo;VY0z=S$MQ-aRFGL;M{+2)|*qh4%uMy1hv z75|XEZ;jYKdq(1@I4-;*dk*e$V9#)H zA<^8lTSe!saIjJ>$%Ca5RS}di9w{!az4CIFIjS(mvz6I2V=NgIsyQ5Jg=3^N$}s{n zCr4&WD3%yh!%QHV;5rC644s-{i72fJ3V8k)qcZ}G-m2V(k{mqx0>(9@i)-c@v`v_^ zqdi?zbF|pom0r+b^rS@x;lUd!7HDy11x=3NS1JFM1<++k${`{RQ;a4+(MuJ!(64{Y zXgz$!YWgB2cC*Fd&J=?ECMuiukWBCo0uJk=X4NR}oME3v zqvgObzAKIx&r=|wY(!KPe1hHLiuhe)PK{sYseU&!kcm4+c?xpxpl zqP3|jDZZH(9T4PB+!z((MN;b&IGQ+gvxZ?bDG^1d!!Vx73RXP8yBxinw?h@jOgMF~ zhT6JAD@{%b#hH1c6G}SEk}?KEx-LqehKU-E16>@3Jqq~ zifu|bKJo@aXIclnR?&z0`PPi-x_{%w;n$bWN3?P}=_p+d<PZ3NVo`(M2;axXxSV2XTnij08BvGBztMqMJ769Z`&JHn(3W zH<=P`V<-)wLkl$s(q5Et=(-wDJ6j}smmMlQ$TcUc^h_$;RYKFhj?<|gm-pJTN z-f3WDBFy~sXzM%p<*?H%o+p4Y)-V5+S?ZTghj8(F})6bEiO3(*lW>l>G00@VM=C zTpozV=#Jw@w-2vA5>=%(GGwzK>(J_!k>&9P(*|RP%Er^589hA{85*d z>16E}UNM@Tml-y;xk(AdBjnJ~gY{>)IYE^DdnU-C z!-W50G>iYp1jv6tTK*H7r6yN%jEUGA;lu&`w{LG9v^F1Xc2^S2$sn!du=)lT> z9RmCT>H&MSn|hl_MLeXPZs4#%f&>c$I6#5`8YFNq87jz7frg_&!mz~J>uMyQtg?`f zhreJ(V_G8Ofl|w6=EU7E9H4BZGD5P^=o@-V6G|NpazH4p8Zi?}Xtsp!7>R=M2|Xbm z=)uB+(hUFs$~)nLUmW}xoFL8;NmQdZY2QxJqGl95;w`KejXIhtL+$t-NFajFM z%8B&N?v{AoM4GRZ@^yMteIC_5F9U{L*Sf4%yH+$cT2GkBYp&Vota(PK6|P-U-e@J8 zS5J2B>2vS5q=n`q`goRl+e77Tbyvq9>2BGiN?S$xvJn*%t|C%|6KF4yt5y@fwGseH zK(@bk>rxVviT2<~36IN>iZ9nn;o^1RvjXoNk(4)JPm0eBk?$(y7LIQCw z)&z+6yK{vE!dJoY^c50_$~}Evg#_a4zUL(+yvhm*M5XnHxD_5OV1)z%w?YDe77!1J zhXmr`0r7zFpm;z$ARZ78hzG<2;sNo1ctA|>fOtSWAS@sr5C{x9$e_ap2qdUf&c{yIN*Q|8W<=D1=@fX1T>J+h6IUSSu;nfx2Kp~d8gT- zCr6(J;TdRd9Ytsqj&Y~#+>c={XS1V}AtzdRip@eQ#zZY!-j`IUE)SeWrc~5RBt*2M z=EXV3ILMGP#q1!b737?9+!a!olO3LeYw(f?F5<{h+}I)_`iNwBw8|tJG$4g!#>2JK z!}Z3lozjIgA$&!{vlBI5@9K2F8k>y?L-c`Mxg<8jK8043A zbi1}?W;R3rL9VlCud=42C zDSF>h$lVxp2kG^CsWi0PuN1M{nc9t9xOs(}q*;l0u(Io=S+%5JuPHj+1I1_hSZBhT zZ~+1*>=K(u!DzIX5EnGqaA5<63k0lj>y(ss!Uf~egW)<+hsMJP8#L=HoOgdrhAh7J%ADt6*!p+ylairFhdiafSbptBecUESkk znLCBn(McO6FL7mGby61GMJ&1bqk0-VxM*3XuJn3%TGvzo_SXp)IG6}-ysu@)1l|i> zqDs{7sx8+Dw_t{NP)nFCVvVK&ly+CfO1BzC2W|1QOk-@wF@>fUG)hWx&N$Z8M1-6| zwkpL~wM@Hmp5<9NPiR)o8(LY*KibXE*tm;RN^3pP(9x*t4rS3D=0eBXcCIpr zIW~@&{63Co)hM)N5T0uB+1X@jcHaww1{|33k`+=RQ5}g&>vTG?!dX{dOrbP#3`se2 zQHmuum6V#L6nk!zGmR9*7*~kKvkJ95QPGdi-$EE0*l&dohbh8c)urlppOjXERdi(;1TZh|a#v23xdvJBT=`yyL_A%eBn>aMCL z+NwHi%fK;yHql4JOd&!@in?(MT9?_fdO>Q8Q`oHv#ZfG+Q+4$c4O6d|E=9#9H70O3 zaW}I@Ip-ji%S}PXiLTvM4w7~Xm$7--)Z!|WG)Ntjxmgf%%OtaA+5=f+yt2Y8I@iyY z=1M&iUSX-Z5}9|4zh8;69px!ezcqbDwY8AGNvc=pV1orDaL|E+G4(O+`d{76GOt^# ziYo(Y80(PTLe4-Uh3O#2e3T{iVt1rpzwhYuqh}pm>;5Iy-H3-PEGu7KWtU5RHuEsF zi!JXdM~rXJ#%hZs2aC2KgvuIRvsQ*FrHa|t)l^a0)-JVOXS>j@N_)-5)o|06*JL9u z8OgV6hJ}WGqZvkr`O5F+wj^2=wk z*+fpVu3>JSE_GK4`One93bN0;9wkc(6+~>wb+n-Knxnj^n5&uH#N9;R)|*IId-*-B z3s=eLRBuslPj4?(i#oP?;MFWCNs<^}DNS6}Q4m9P3*Wt0Jm#38!sKq$w4@x-nQG$f zTl(Gnl3IPuTia@od5d{1!USnjk?Mb!V?v`KWDHX7*m%j}=o8U)s&pNy+DQvfYCP#7(xpdfVMR=DriPwO3pq+oS-w(=f<}%> zLUnV-DGaeN*3|qdA*%hT@M?A=6z;Y5sM1T{a8$M))gF~mu2HH{1&EO7vlIyc5Eu-L zje>EOBq|B8qz@ASLc?KDI4l+kM1e3M6bggFU@#C23IhT`Ko|%JhC^{2(XuG)696mm zXMK1DnJXz7ZxbVe3|Q+-LBTCIpn*?ou}LtQc2mQm1Huwet|tY!&lnuSGh&+S0|sdW zuhj^_t!VgMPT*`N!G}XcvDOI>fRzW==8Z9fG28h{*68D!aVrR1ixvE2I1&3Rn`N(* zq+nmOvp-&B|5uoGS1g5N$$l4P0yx(3R%>Km$<{FDngZp2?*}9aR+_F{%e^V2`&R@+gG_*?Qg(Wcrj8jb@?0r_24vzL68L7=vtk9T~Fb z;Xj(u^2Ry6`-Vi(Mpw$p9=)**nfI__AS#G2eV|UUw*fR1zo_{d+y|&dC;}QFGP+lt zMM@SWsS0V8jqjf8xAs4*n{anv$s^lJ))@Tq0~?iR!r z2pb-LUPPG#qp12~;wqqFOCZ4vjY_G9@Xt`fafUQIdoyG&tdTM64`A2<#CDj(7DdI7Q6^aK#y?g#LqErW?yDkKP2wUr8iIV0u z-5wDNNgTYU@AEt?s>bW^ig4@Ty3kT{+&4W6LV|Feu!M97yr9P;5XgMP-G`oqB*Esn z`1eOT`j-t2wMqISL&K^n?{|s@F>m5Udnis(Me@QR(;n1f$jyU4rLbJ$xR(;5n5l4k zzc>DAa`VZRbezPA6Bf-29mgAMjkJRUX5=QiB!z_0c}4$R@JB#W*%)A7AQY$4(Ckgm zsCDJBHqYZO^p2ZkQGE%Y%hhL;xq(0)LOlREW@Qeux1h0jfQC=~B)O6tv%7GE zc!CkhF$;CZmgC}*O1M<>_7YqP3t!d;+puv{=Ny7wIrH(lc(irIVNb@ht_O_`lP_lP z&d^vGV5!U`S%I%4N0ix8A9M96iLwxa8pB<&ag;r}O23blx(q3(0-n5_ZE*w;Hbn~7 zC4+q`3T#Mv+10}9%6`){sMe-FTj|!Lu))dVKktw^nm`G*ELWcPgD(6eXhmTgIyCuJ z*lCkMf)s^Hl?-S;T^2=12=Q>6W=USDsWp=D)@Dk)Tx zMJ%LSL1!2dRue}GfdnMFi3BVl^NCf8=`^)HKlV}0j!YZE5Cm6~Dk*^SVyLUUUFfbv zP3MD%LW9*u2N@9uLxP+1xeccyqoT7$(W~s!1VRD;h43&12z=sInz&?5Px1{3>nuXJ z_<9Lqd3ns_LIgDrVm-sWLPE5e&pZ-HTV*sIF;AKmlZt{ca;#$9fB^|uR8K$1#An~5 z&7h@*ptMnl^Xeg1C2uPrl@k#A!!i(67^Dq@wsQ+)h_i^N-C=)yUQbr8JRR|-!^KWa zED3BalyI^YP<))oL4}bb8}KJYgUq0hwk7n+lNmOw3*z#S$;ho@^e;BabZxYjH-MeF76LD@`A)D{^(kC2R0f;d zO92)aWN8{bn&c+h;|Mi1S3ubTmSmM=psS`Nz)+P#a|}c@M6O<^r=B(t)bYwASc&af z+sIn69~%n^Fz8-B4}k4+uA;T2<#qEv%K9US2Jjd|I|(@GH_$e6Zg@&VXx1nyZ}Bv` zO2DN_ffx5e$1IN%zYvb5DoR}SGY_-_RTjAQ;>mpI5oEF7U;}}8$ zB08V|3kW>5EX~3-udYyYn%jbwY@bPfXKm5(9%(%@^`NTt54HYHj-T65Q}Xs%C`Tko zMd6G*tO#cSKmr`abDpV%mL5uWaU?}_G5SVqR}sv%1tn8G>?O&G8GxNw8@=M^K->|h zDiA(?`0Bies${~5|Jf4n(9^Vyjq2mbWXg%&9l~Rfbo_9hxfoReQ~o#ZSn7{d0tqgf z`G_>Y^9idJb%S;eL+VGYuNKbzj(EdJ0*EtE;HgyicBjssJ4bwm5QK=sl8-qd96>Gu zW#m}JI0FC@U{O5tAS=YqN9Oa4n7t%QI~6#<=7;>Ewfve$FrTI1XHU;sU4t4-LN{nA z$fiEOP_Qk#%GWr&-KeyJ$~*N&o?9RahiEioLZzPTgHJzt@_^; z2r5$Tk(npZPl%RXGg*7Q^Xw>MBl12F@o_NdOjOoIQwWwe)PjQ;QwXT&A57I=0+Lt4 zuSvnKvwivEOYyU^wf3o1Rs%5Hc51cDqli7`up2;KfeH_vF*`3tw?L-3QxYE0ZM+i! zrI2BPN}b^9Lj|)fb5`zyzo%Ws=y$|-m%0p@m{QW~T~1%(c=`|;Sx&2rwSspLgg?yp zR``iWsEN^ZOJA(ugG&b|F+Ei{_H&bfr`N;|C!`n{X8u|*95?ETZ5bZUOcBx3^(x&} z*@Z!uUYJBL-mOz;jo`#;2y2>3EE`Cs2kWVnD<8Sq+n`J-J)>Sv8h!~o+zpInEg6(Y zi;e!kj~bBr(hm%mL#2@ff$9NDzQ$S8Q@53tdH$~3A!owisf|UW1kn-j$9`rq>w0?^ zo#U0PJS$XD6Z9ckO!zhdb!tje1c8C@6dhPh8UTy7=SGNjy1pUud%6I$4Qz73{;@B2nn+1% zqgf|oST?H$@RvbdmxWOhnQ|vy=09Zaf*-o1P|;!@u`KsX)xlXkUhu;Y56mG=Y6pp+ zG`Tw&N{OwAvi7IY&cchHtB`5F5fsj2U9}KYlSmJNVX2@FFD4!6(U^bMY+^u9A6cL$ zxRs|g;^kF}BxH1|4Ray|zrz@DAM?0I=XA8YRPsOH*KGE@uFy#;Q4Wu$nL8^kIkB!xC3q{=^2L=+k#}%*L|f+CSUsEY`B= z7VZdXTQxD2JSJkE?}kn?Hw=(q`0JrGg_Zkrfc8dOKfMJomH&Lg!Pti#>Q}i%E)SqH z@^U0cYyjy|8n(5Iq~VjRJ=o$Q-A^=Y+6;+e*EzOw#00uX(M&Ox7;=-@zwsC1b(1k* zjn`M>IBu>?f#5@npsNvriMLPOrS{g|VoU;unlY5+*3&HwG5L4{-#r46Li*_Sn@?H* zbe~)3RpIJQ6;3mcQxfQ{&|3DBf|I9{L|`-PuUoDK5>IMK{jTxTgMD*OcouD_S1?>I zx`oh3o`PB=b$H6ft5&T{DXj#uo&cR@eq0(-stLC0rRk;27^d{|T%d!314Y;-HVOm* zIPP2P6(4Db173)MIU!D8n7=;X{C^=q7l?!i{MRoAY6%z+h%&r?{RQAw+^Mp-KxqnG zXk%dY^&?@j0wyfPW6EC~Ss1}YZtDJ0&V)fydi^$_$3dtTW?WiU=G9EN#NW?{XTKVzNyetwA%7~1I`As_tvAIyP-iR zmJyqYh;Nx0hQN54l!V11G4_k{SA*U?v1-KcL~4>6ag)P-_z6L`cS5mB@5ALF_qvNN z6o!YCObG5&Q8QNE)gi$M*E+~JV9Y6&)0`PxAygvJ>8@Bk6Vs6)PENZRcOh@k30AF{ zoyrvk@57tuVc;85!5}!d=5gaIXexCWJDlHR6;b?Q5Cm1Hu-@2Cbd5VP#9fkC_e)M) z6p|37t*UV`tLf7(mcU?(v-IefKb09=WyYfIxU8@#2su7$Oq)tPu*Z;L$7~I>bu-V) zqd;fz0b(l+N&$=b^#V*GMTwELikS+CZ?VbhHk25ClK)xAUf^e;8|cqQhA*Q>!Y<#} zX}ADUEt%Sz$uzyZS3wl?!QZQ^hr|ctjR1G>m{-2hWE*+@%e)0j1{YeR9R)*3E`0bDz-v33CQ z?Sw1dzMr*1d>yvv85x1nYi5=PrRc^r#3MuH{Nu${QBm%~l6zus{4r$X)IGub8Lt}e z(*WXd_s}hlPR>&nJda{ID9NiV0py4wgz*z37XCtm?2S(8)ca;ZQwBh1V6GL*Alv3< zWv!u9ja2_l;B7O$R!X$Xf}Y2$rYf%A4E0`G8y)D0D`kj zI%2_$GVTm09n4Pj>e1`>t-tYW6G?tCx zo^svCh(3i64?{*QWGZjC7qA&)F|12*DPMKi5w_*B-7?@$8s=bmo}7v{XANOg8Ipn` z8iq(hln zBI$D2)(hczlwexx0Yer67q@!{jQ1je!zLSej&*pmo=4tLqD>y}{y*(1bRjjeYiOvi z#ui0M2`*c{&?>zo2vXuY1-DE*9Ytu{#=@M<)IYL-%q&f0+n4EmIw2SfV~J)iG2Cn< zIqNOYeIg9*y1Eww{6oCX`(%_-vXB9eYH&YYLovb9HeVVVm>@jSN1@o}H*IzJL@g0p z?Pvs$&!vH|*b2ci2hAYnV2lLQN@(0iBHaGjrRDz6K}4Nfdq-s?7{qlsJqp%srnGBI z?wB|40^a62Xx#X-qO}EMjJTX#^rS^dp-dP#2&0s9!!*;V;qyex$A#%liUFcF;f%}{ ztU({3c(6vwx!#z3^TIiie9Q7x7X*2cBG_+3YDTw=O;*rlRoVhz%-OEcEaD0U6;*Vevina~)dFg|++E_zYi(PUiM^Rt`bqD+pjDjsg6M%KLNp`w=R6O21oZ(u=GX>;&{sINX@YA9VY(gaLJ!Sg zLy1euzcS861+tHseDL{hqw9w*2g7a=@p+(Bz@GeDK3xH?N-p9AkYK{|lyiygt~2oV z3+0@tV*fW3GZGim#>Kgj#p`W!U9sX( zFTn(CQ93={-P6+Inq;2#5{}l>0A#7G8yZ4}gcH78ZUh@7=@*oRv6xIWrFuj*WJ$r{ zU_|!OCkB`J7l4qeT8MZTt_HcrGsZEKys^2~f#R@Xh0*iA(`vx*00w;ELT3R%crjBQ z=@%b1tCGK*YoD3xu0k0!-Fk`2An$5V=AeFcde>`s zSbDUjU0hxp|6a>418bBN?y4wVt`U8nOK1h7`~@<8B?}c0a(LO~3Y9u&q_VJ=MO0{{ zeuphG%Mp<=q;`y(l%eEEM6rIxXB5j|og!IjRjrF}D!=6hTH3_i0&E6Zka-sBal)K; zn#GDH8U*jM1s@GJtjU{ktQ9mRUJD>2`Sz7BLudgJ`6=^Aa$XS@B)5G~@8Vtc5b{&d zFYbOl*Z67@)6*3a;`i!Gh5q2dS#^*ehBtbk+rmJWASw!ZgVf-~k(cd@_r$1?;?h8EP zw(={{F3V+&RgzacE}3LH*k7Wu;ueV@of($p!+4n)!av~5$Of%ilekH#tYZ}L0Pw!N z3-p6koM~=Y3;=;T9IYfHh&YepDslawNJZW~ofbOOroBUx(neY(-#sR+6ch-%Os6ld zLtG9~B|0&@6$N52u%RLqG0?-4)XPmoKnw~xX^$9;bpz`;Rk~5EGEF_C772cmRSt}hvR zc2ts7#wKMj$9OQtHlwYsy>1OGZ~>mDB7@D@a~|vsezg9nR^(WzrJXEX;~V7Pt%F?9 z3ExZPF3UK=#PrYQSzRwLD8SlzS7MZqp2rRhd=M7uTG2C50)RuY{HuE1pvnHgQvYas zcgYR(vU><#04&^mZjYFHdz}x~-ZM&dyeBZ?vwL3E-4gMZxfG&uoM3HPrUO$agg1C-!pDi4zZlPA}YZ%f~DhfN|&KJH* zzwo>N@avwEAf0<+=NR}?-xa2C=3+3k!rLKn<1VC*>DKJE+BchOApGCZ17ZFa6 zY9wZxq|FNqJCA6#K4J>cY9+pjT`>Xm%E2Qw4yyGkXx_1FY;Q&haTp=VNQ9#%Lc|_Fo z^Ou$qpPAE->id`upgbi>OKEhaPWyHN7Z5oD0uura0(DbHc?~g;&`mUlNpfebI5c)- z&R9jPTb=g?7&aAn@o3xcGLI#F>~@9adk}9BIp^c(l3gK=uJV+cQ=F2+Vo_<>ev1%F zeAXeW>WTmY&SyIi9x51owX;J5z3$HbpXL1w?`LX8%oK|jdwR<(Kf+X`ut+EkWNOm=(C#y}Q=i@HYy|_I-PZ2X5G~QSeExY{=&ow(d+%vTqnS!CrsJ4x^ZTq}xRxI+J8FQi%i%IDy zy+ROuiZC)x4%4WDKanJj9kTQ+C(!OnnX|65eTGD)vhzSic2L@~jV|Xw$;#S+5lezP z)2y?xY=e?9i%cdZ>y8T}BGRjDk!XK39vwYN$`sl|#4)9LKBY{UO@nBRDZDbiBAZ{O zWUV3}&<0&y^3oC!s=dSXE>g;>1esw9op60uXS!#EyIMA= zDiP||d9UZiL=xwYW__lIp{pK~vP{Gp!=N28zFl+3RjG+6r89@S+n_*5&t6!m zlXGV0RmvPG8FNJ3?HDG%ElT~aoMDI-G^fNVh6xoylxh-*$}zzb^)neplxs`W4(zjT0Mz54J(F;ibUWM5-o$=*cDMcI0#1`O)OGHLc~K_;T9s=zx~@kGTG#(WJxtv zu81@81F!DrC#8a@+L?^07%^iyc8v#_>GUTv*)_uRzx`%UL!IpZncn)$3V*hUpdBAg zB4jnn<%qnH%85JkV)Cpr)H1)Pcs<0s!_N$LX?JI9bmq)jQfi_GDM==deY>*K#4|$d zlIp=?ho^Lmn~m8+=MGJlanq$)-KO?5(=#(enVy-c?VGlnbITAH6>C!zGsbycogkuP zm^CG22rcipGFwqG&Uq1*LJV~oshMa)MP1-*cD5-P$|m1V%=BWe>Fe^DHx&%!O)N9s z9xSAlDg+M}3_+HnR-&n`U_i~&>*B2!ZM~PIKI|nLHZM`rxK{OnDiMD$LsT%R1RX5a z%d{x#cNOp698pS#Fy&Q7bv@=w*XY&QGD0JMWrYgaJ>w`xt}9=8Wnaq!qU!}=!UIKt zB~vm)%9Bwl7?My4>FTV?D$6L1%#PAzRpV8IX#i-$a(X$9nh z36*(NFwkBaf&~T)XduGV4;EDM(l)yAxcAi+=K5Df>25Mk>IB7D;i@op{+)BAV$KmM zJ?<$>-lX+?(A)I){Jp>anYGrmOKZ+FTCzHAvC`J`Dxp?k@|+N9io6dwuOvyLds5Tr z7-oFrmlK8`gX-vHh?QKudi^$YUC-R5XD-#RRsV0*zQ2HOUwWNRbh{QBB{9lv)4UA% zRAr9DvuOg&xJ4htj762m8)dbS9U~5{UioZuZEtPAMT@I0R4rWHUGjqk>jDKl;6Q{1 zY8RDOH;w7sRWMcbsZ_BW7N2rXp(BvQc^7~ z&D5k!OUk^=%-fC(U~1+svGNir6Z5BP?oOVi8Ks;dE*Fong}@gLG#t4?)E(Q0Jw={J zmF1$=E4T7+E05_!G}J`BChk=i6|D{+kiY{GP?1n^7Y}&ULr7ow$XB^_)~c(s8Vt4% zyXrL61D+Y#%gD~i^s4Mi=?Lk_rD$N_fCy>6mh(lWI%AO7F4;a;6@B_N zD?1Re6zR&{>0SELqkC!5z0`CsHM#&?MCVkOqLqSS>At6iUOYAQrVU%RMr);{wSTan zSAnqug<`z1AEic-Jf#qlHheOYF(ihPYT@xfMrs)d0DyTIvXmEkyQPJC3qb+| z3BIu~iZ@L&_D&&71z(C`9f@&Bm<}vxL?UVx!9hforTA5Ssk%_r3URLxzg37|B*bqL z9xfH`;bI}B9-$FZDs83_X2{V(D@iC;nAjkRvq+0)IV;6(AA-iYsF<78t8-#+?k*AT ze)qe(nz-NZ_gS~7TZAXo`?-GZ;ZMEobW!iM?)5Ix(;_V`QtOhMX8wTx8SWuSfFOaC zv&HD`9$KPuPN?X7bi^PtIL07`aG0`V9J6u;;y&*)GE1aH+{#np-nTwPU()sRc+(XO z`P*Lw!?BFtKS>`f6s~a;QG9q_MV4SeLcQ7u86<|1Rjw(bh$>1N!nA_o+=lDKyYg`M z)t{UP*Y}8JdO*8HL_1AAc-IPsV_G${1M}IzPAgcz3tpOeD9glO_g;6+#PZ@OddHZ~ zSh7-NAlW^7LfolDDQLsf;#;G5PB~rVsw%4evJQpo1?=`&wHxA)6*b>`sq<1 z0RvMkFcK;#9Gv;A6o0lom{0M3XJ)%9n^`%QiNetBAcY8W^jylE5MDJ`5=mEEg-V!( z8Y}bJtJVy4xXj>2^x92ctu$+ywT!mV(8{!RyU-D3tl@3hQWgmvA2C%d6Ni%g^6XCX z29t_{F!EPbdb2Es^!Td zcyCA{MWpIMV;7O4gzE%~a578-;FYW7}?VjO( ztk5R!U~9VQP8^Gc>fANW6%FI?wQ*PpfXX{xX{Ns1sh}I<6jVE>TM#eq)R-cOn)#%6 zQURPoj62=7_b%D$TA}RVa-eZX7QG&KkT8pe7?OzdLp5+fttSl7G4y$hX5;sd+B`Wf zEQRTz%J`J3D<3MQO*e$i&@({6at+T=G829+Wv-V8pj$)klvIe`dMJaO9gO(U+bOC! z6}Lg#!N+y2lzPI`txGu(V4>1)jk=K%XO7cZ$)Yh7DRm<|d0a%()PcKlf(X{~(8+e} zG7j!eHeO`PNULf?O9sAZAI?%x#SCb=ywRvE7hE|ECfd*`vxyH)F+xY(SPDRsFu@#W zdj4n;?jzJ9(u_7-$$H#nkbpqjMA-Qi!f<3>3e96#3o^chJ!!g8VtEoi-KJ10_4YEa zfSJX+zfL+Ulf-;uPs;{uiU$-6wcbGPR@6dGQAmvGJy(6#(KmdvEt;~)et6)=DPg@p zXDx|mB;3G^>?$#s@(+WNmYUBIMEip}2=r`RM(5teDzU<@lu*<7p7{!`fSE*#*Jl$w zu#2KXppC_jPb=I-Xrdx$iBfq2k0RWq6fTGzR_bdr^45yqT8LSFnOltRp@JYW3Uyn7 z1}I;!7t~l`wz&vcDQBSI&;mxDQ79Dw6awa75IxaL#OUS6yTFXzk(b`mk+&8nPM~;R z==HniBm?P@{qn@w{ZiZhKs2rivI-@4I|GV`LYGGl?Tb%IySQgAj;&bG&zqz3g))UMzjxd6<2SqaT6Gnv~S z7D5qMEXH_~63C9%Eaf|Ei}WkXz>v(6JJLurplFzna@<dH%s0W5#nBK6*&H#ge?9?T`bR(JAfF5 z6N+s4N#!!&2w{Tv8kT~;m=??F28mTHRl4Z%qX}ff4L5S{e^xfww~5vvRj?5nQhtGh zh(b6D`loppPga`$yQMRK*xgEqB{du?hM|hA7If#q9B0kmVRV!!HdX+|VVxz3H%a0< zm}Kq;<$AN&5Tfn};Cmfa7A<4szKU@}?r0#PZWAU0{~uSCVd(C3{)j7!j0%)&3QEqL zKn0(*uD22@EErbDW&j1ZTN`_qlN>=^R`P!`@eDKiI_;_Xo2)4i*fg!7DI?J8ut`BL zHiV2Fa;&{THc0E+kTE)N%(5k*Ec=k;b>dZcBngUaD56WF-cigY@1)SksnggjVn58Y-X0>s-ZL5)87{lrnCrLesNm7sfB4c6~8 zq1oSo7*j_E|31=N@ry7y{hWbeBhjM4Mh;csmmD~h2__n-Td(Bcod$Tyl4u_Y5P^%= zv}Y*WoB)nkC2wfp;g2=);{Y}Ch)U}%E+we&2{d>tdoV~0LY$y$dmPJn<20Byx3II& zhPFfALsj)pMJ_e^WMbO<{hHG^AH3h{7O0s z@rea~oB$PtY{;=fy9^{A!B`bK=6Wmq_Moqu-T>44&b`16XOC+++>N z^l-2WTnyB=!_wu9ntDDGtxQ~wV=O1?|1dNgYLsXDeX&>(ZHEhFYutDTnKNwB zDqpn@M6m%E(TTQF;x#(-mD;dag}~EDv2_Pspj5SzdqDLll}~v=jDkqxCT62`lZi=! zWVGZ|dk9rxbp%iM5Zs0-3NPW}51K)Tue)iNtrz0a7Bt!HU*h#}@LB`#GGaMgfbl@u zIoC=RS$#9V7S*Q4LVpl8U!cfQuI!2!8b@T`fZEOFkv*g87W9HR!h`<17auyHWB^>E zs52vSRn6O|!K5V{P6&D~dR#M!9lUe@HuUEy0x0~adA!u$+NWo1=TWVVHS|heWLu(! zAY-6grNN{~CQC}ABqM(mb!{;3Q1aDPqaVh+H7f{`rMeFY)t)D5Mv^Hs8L$XFvPrU|E0hpr6eHEl!tUxn`U#w&bfClQkSL*Qy~ z{0nMfF!_Y!AGIgc48zAfhQGQI(4qs(olXF<)E5v2)WENT!rZEkmF-1!Mcsjk%4i|Hn0D!f)gd zf;Q^0J{`?it19rKSbS+x0L=%;tmn{wz`gy<0N`$Abl)6V(bNv;Z)@rAtFE$lQei~S ze>Y_sXavB#vswgxqsdvnc|Al`T{Q z_nn{SWF8!})_M&wgcY`I`T`Pl+EXsHDNa+%h-Cs1;XEQpJ3@BezPz6dpwOfx{kv7t zzj5~ug7bJOepNCrDX?zbLqImbHCv?~VcY(Q|TOLL~VKLt>)c*~3u>q@qz8w zS5qMoe1!Z|z~iHF5!1%8s~69Cn~{(JrhckwNGc|qB{Tiz8`vfO-gtRmVg?I4w<>0u z^GtcxKf;{WE6M_qgtx=J*si)s^5Cc&mdcptTu)MlJ|r#!H%w*-fxMGMMaU89=LC!# zi586-`K}_*5QGF66mJg$AhwUnL)F(N>U3L-y2WO&;ERA469Vm+^Qw%ZmQCv%fu;=x zg}kvLJ!)ta1}Km)6#h6q5fMoq?bZJy0w5djc@{;rFtUo!FeM=Y6pa#f2`GQs%KBx4 z(qvit-p;DcqB8{8&V`)P^?o^MS{dx)?663ehQ8(yw1|x>akJXI=8!LKB$;uDEY7IB z5?(O#GUO_hmm`AiVx4yCf{4-eFmQ~vtSlBW#bG^+>cA?)STrlyy{0(N(R?dS6P%!# zs9ml6>D1feY_Dgw`apNaZ5H^~nUxD~u=3RFf)BJGPQ|cIT4#Nii7UZ1R zKnV$Ijccq(X1P{C-!o$U$6^kH5x`4pjhFWk9kprOL;=tOEtcQzhD~mugRx;?S0>SR z1*dy7twvvf>3E+yzk7ycb);FkL6dR%l2s zQ%>No%O{HGngrHlP|L<=!azw~SlhG0QU9e7@;0|xuX4u{=CCgD2;pI@-lAD3Q$?gV zAtr6zc`P2oLkNk&y%80t90#C?Wr$EQk{d$%b5#&e>tHE&*aSUJ;QcXJ_Kq2&vM)Oa zg=dczK8co{$E8Q+peik_a5>Z1HB^!I#k{J-+HB`dfdmQK>eLkA{K%E28|!rqZE%xO zJe3&-+E`izZ6<%>>l@JU<`lHx4tDt3`gzNWvk5A78M*yHv!TAUJ%QZl!j~*Hgc|zC zASWS`N9)vr=QES&Y@4m=D@v*$iKL~E?nH#O%aM|;BAY9>@$uc1?R?W24sZ;i{04u+ z)Ooa)T0Mw1R4T~}$a(p}FGBX{A3OVufQW+BSk)FgeI^5bZXT|gsDfX(Hz^=R0OmsPJ1J{nH`8kiYr@RcW2PAVGv1+6QP?fp0|9`8hT zNuG=@eT@yEce<%(yG90TYUJ2)20scAg5e}W9*5tvKCj8R)pdAE#*iT3|6K$D4~1b+4-Z+ zK&nZE0BLrx7!v>&h-~)&Jc8dHj4fdYlS`XDyc)=iJn{GONQyV9i=K7NB%Ch8 zvRaQ`cJvc~P$}U{fUswjRU5Y?PqF}7)QdWwlcz zl+{aM@3NUo0>3lxXiXWsB11s`Z;qYs+A=^x z7GFjrY*4ish}-n+P|Gh!a)?6o{O;w)eVqR)Vsa5c298yxBSb#|5`Am&Q$l9z%TmH4 zjMNLzodF7Dze6D_L}z>yeAIpC+My|+$er^jHJy<+oun;-KA+el* z7sV~!^=BZJ#=k-dC@rrWJF zxeiM%SanXbgKBLg(?HSX-ES|&h3qOcb?somw8@m(O_d;F`vN5wio)b?a}`3G@xjl< zwA@HQlC}F9+$++FVg2u!wQqXPZujK zUy<-0I=Apd!18CqG&+wQcP(HTP0@t}rL zB89^>QSKsd8K_yaa5DQc9`!rP-!xo~LW~FN3<{i#2CzF&HHa9J%}@9I9jPoTF!HmC zbB5p(z*F=nyuAhV7?B@&qOjjMt&b=}1ugj!UWJ|!jxkLXB3vL~0s&mS=8*(}FI?|qbZxK<5l*0ms#X6iIYeU<9${~%;t*;uq(HJSjlA_fQk z0IMpV(bQGlt^}*?Sk>D95qpOZu(*l?evt3s%8e8(5opZGDxvAfn3TgL^URhZK1Vgi z4q|*muh4_k3d>;jf7znpDTRN7s-B?M&!D9aw!~MElA}Mcn38!h-eAox;2*nlmnoE< z4zg3HB5UOXWQ|-@0E5b2Kg3p~0JilVt8%)iM?EdjTDSF~2>B!&#-7a4Rc#7&Udu4t%Msw;S!->sJBWaeD2&7mcL6+FZow%7yjtC1q=Q|&Eq98X?bVEPL(J~$WWKp_Z%A4Djs?~NyHFF z+LP(`{Y~FrcmLh%$p9!`@m3KxGmBoE2nA~*d0#kfUd>k>T&I)Z;hCDINk&Z_D)VWY zWsl)9F~=xSTnMa8+8t=hFjoeZqCdJre3<*)DP!(j6Z4Lz+c(cvhe9Nu{6nmPe(T>B zq9H!G+qBI}RS)0*;fOgVhFD=p=>?KKzjDUTp*-eNj1`lIa$WM&5!^3a+R5D zTcxegR=8G}Ys@v~8jlJSp>TLqhR&1sPS!Jd00#p&(AyG`Dyiy$_{B1W$wKUmgGAV@ z(shg>lx|&6n32A`JL8?}&UWZKG9TJ$Xy06PYg;R;<=HHWOtnkM!P_;q&@$Gb2_NyL zL9!4WWhiZ&)FK;(d-L(@jl(r1&|0>5w)ssWYvqZ&RjxB4R~6d!ipZ4~v?=mhrDzsU z3FAG|bEp)hk37Ug;VlvCt`5z{f>0=%p{kxz6`fL5f$UX2)heGR)?H%x607dAmg=Ty zv{bm@$JnJroYGNc-0Mz>VygWiMZ~CHDQ7_ljYb)#C00x!MI@S{|ALefI$Oa{z&ge z`rNAWDjdR$qvbnE)Yw>pFUD91LH?~XhK(9C$oOfXElrEGu>J=UzU2or@z*_6knp-S zO*_R96>SnidnV<^%#jsJZfd+95T~?(0~I84iI3_U~em5=qM6q5LBBLFIb4F^3Rmb@%Uhw zsF#W`Xnh(JUUN5;)A=LeUP&YAY7I9!N%eUnA3_0j&j>ToY)Gu(GV7?WCYZp;$frB> zL@50E0F7)f@-xfmY`bY~CaYBPJ*cBs9Z}Vhi;%Ji$%>GwHVJE!uC_cFk!^Ou8&;R> z{F>s;Xh!bXJVcREJJCG)akRuK0r$+G*3q}4;(sNKNg*PZa>*IDxK7%Q)E#8Vc(1U` zSXDYS7FuXd)*z*MrQM3?)I=vHIy0^ExtCU4v?ArIrCt#Vm8;IpV6;G1MdaN z6^&fQH9JaT6jp@5-4jJ>c}@`8mKcwu-q4IohK6KFNQOo=EVK={d$@;tgjc;0-r?4* zZ_QTI%(nG2x}`i@+q^TiRo_j0XX-=Cx->G{S}LU|ff`qb7MUlcRQ54>g zEzU!EW0Y}I)@X{;f+flk(cE{nS8F4-9lkc35qZF}m62s?8P-;%rP^SIwTYLQxL5fB z>B^~PwO4&-cr#%$qO8Y-PS{W-M*|JV5JC_!l$b#^YWz4sMMmCRlPRsqTx6%+qrK5? z(Jn2~qIVmV-N`kIB`>Yc7%N(=mU<<`B)s9JxEe&uAr5`zTZ-Y9u9n-owY}Rb#I3DG zBNSXq`&C`K;(m|&pJ-E674@#CcxsBLqoanqpz3 z94Sj^${;~>N=bsNgybe!BbkcFeEX}Bj<}{{08Bu$zpd$LYaeeNz|Ct*RAVM8pteLP ztlATEZLS;a5_MSHTrwg{B|Gb#lGUD#7NJn9gI8B+^{T3M-BK3{fdlR{K>`Ps5Nq@- zAxBrrI94s9%Ap`dxa6^z&@pt&jPU&S`K;f6_R|AE$CKI7KT^6G2P}>ELZ&7e*=3$S zu&X>9*;!fv&a+toK^XmW&XyRoJzI2lONtG zh=|C*GU4nnb@1;D$?j~4WOqk)cBgfBCL=rAyVBC~nbz8<)^=~%Y~J;r*^msqWG=7S zmdCn^}5G>?&^2%w|igRJMHnddM(r*zpJK-c~vZj zCyWvgAyU=cXwg$u#W!qJ-62ZHFClYqPrdY%Nb*%PbyIaybyJgGx|z?+XXf{LU;l@4 z{5Ci!VN!a`@d_LSAomzm8ZFDnOBK@%xgCH+-MTpKTS%F81LAvLM%Y$Q-XSDdy4bj6 zub)X`TA5U`k_P-eZ||{Zgn)JI$v!7k0bU&0)H%bNDq4-8LKQ-{rOv>@R%BJq@{ijkjKe|}A!t>`aNuriZiiNL2~8NHpJBZ{o$aOn zrF&3i^u#XT4{%$Gfbp1XC@`8j9A^{f1YZ(P?;vaz4?T*4(B1RCu}|zIucR1RW0X*Z z)&e8Za&b=BGny0uC{bnEwInvwPUh$T=e=uHx;Y;m4Q&5g-fhp5c^@Lv6@1_gK^%X8 zDa8F|;DL0hOkg3GP8z{U4RN}{x%xke)x=2&Du2;`qCKL80p*^AM|S$cx5I@t0*E?G zq`^BP5Nw^Yvmi&Ax*j@!nnD|@O>;m@l{Yc3DAz4qSxPKJ9^%x3)C@AZyyT z_Gg8mtx)7Q<(s;67VzsQt(+@iX8&#H!)S|1j2n?Ipo|xaQq+s#SO`3AP3IfjT)3%3 zpy?H42z0TOod`#hm{{GFO(cars^zNWnHCNJsN^EsCX<&68QwSz#3}BPqyfjUGKnh< z*4T!HEBY~;H35Q^ivsTkd0qnh3(BXu!Y*Pte7c?|#8vDprkcJvh>eYhNGd6_AeYdn zYhUz160$=|&8-4-v3CXj6S8Qgv&)jXALH?aq%6kQ>XF!Hso75`U35wV3sYcp*Vc<= zCe}=A!q8hnVee*#W}v8mY)eZ1`t^y7hjb^Pfvt%6R(i!UKQo(~4jcZ}3KJz6Xt#){ zaAn7bL?n{3`G;sgYc}gx4TtjBA*`a*Trl z>xO?xnk8=y5Q(BdL$Ya`O~&FwdL@u>20|gZ=|o9i&RCyQ!#gd8oq8>B;QoB36+XE( z03=fOF2|onC8oC00^B-yHb_U@20lo2%OTr&H*$Y$~0iJU7Q=@ z9vuN1#p32_079(jHz`kbfoGehi$(@ewdoFyih%$5eFf?PsxS~CnP?f{c%tBj<^T$m z?|ZPw`5Z5NoDKvuIP{8f7zd3oeJsB35X=0p!XO@QS0ZSc@b75f6C^FFoSSI|%^5i}Z6QOYrLyq^#gx?M#@c(;)GSbR~H|NJ~ z5jfH8lJd%`O!!Na+milc_K~A9;cr-0!c*(6T*Oin?h(OF#pKx|mia-}>1d0C4j)jD zJ8{4Q1IJ5H@eJ9vPE-Bw6#lSd*s2=@>Xt34jNa&=nqE<+^8l@bu%1$-4wiZgkoXl_ z0|h5ws#6WH-`YVHk?j?xIu<9m;(r+GV^!O;z;nEyZz7Ocv?{W)#!HtOE6s74 zCvZ>(7S(fJFye)dj>C@+g2WS=LfU(gXDXbBu28I3A7I;?7*3q~1;C|fR&0kIy5TqqXL zBGx?YQZonl3xKzzZ^)%k+H(`Ho<1*x^`L)y6qkZq>S68tddlM%a;8D431c9ZYAIt+ zav!l=y)qwaxDfP$K#$Ev;20BKiOfMaPnD8Gz#vmp5>Q=PfpO!8htIPFp=UL9LQ43F zxh886Wj$3ejT}C&0v007aSK-nxkt7+SR@4fQeh*OQd&5}xJAAjg1k+r_!6-zVLW^O zr`(UP?xN^(N@K`4U<<|bF3GF}_&6%5nl0n%P9^%T&gg9QzI7z?@`E6*|J8a*%k5*c z6U^4qk|)?sXjRJRKnrmQm?Vc>(nXL&92`2kxDyTA2&t(|gAm%box{ya_A{s;B73IL zC@lS3et=eTh)arX*iWwHql`Kv>ac{UaYuv?Cp(p6SUyAKkO{~*F=A)4vG-3hK_N{f zKSyqp_9U?X`>td`1~Z|*6ZycT6nrdZki%Cbs4z}nv4lP5u1kl|M$G4}P9`c?BkMo; z!My^JjOO*=5V;q1$0)#{~ z5Z7J4CH(_z$QIuR3xfua4!vdjj+YEs^aWy`V*>}FJXEOf6UXk>i$^pKdh0JAv*v!3 za};XU3Abrn@oCWKcd60WHd4Fkv4W0{q9XJ|`|&;11&a+Dvefw2!r>j>&r^ju6tWK2 zQOCpzAuOzxs1hN8wx+BH^TXx0zIfl|i>C78)LpFFzYH`m2gPy4aaL;0i9op+>8SoT z0^`txP%Mc(Nt#FC%@K2~SoiM-DKHebA^#GEVm3JGqWxCO!-)Ne}@c9M>%_3dLGz zc{c}Tq4iM=AyG_)(X=BGA~HlAr<`r)ohXX3a!$}YRveMi3IJj(ymX+DLT=*r!#-Y+ z4a_BAlgKzwu8H6vxg@BNE1cuR3&2iF8J#TIN*fO3fc{g`-h(jPQf7)`lTwDCza!d&+fI5hO9eX_hA%Dl3qZazp z^?mWS@Du?8xJ5_P5oNnMg3TIm_MK6MCC?V!6NJ3kGk1S;ZP?PFX56CM0V=A5xoEXN zMoAQ@@|iFo>gq*Wqqx{eB}%)i zcBxB{RD%i9BmerBcz_zBuT$NCK@_q1(a7j=qheSo1sRK*0Z2fm2mX*&9^RwL#vqFw zvC=kZfFh+Q(zHFBas*XqhHDh%J-oZAP=MwY`pNjvf>hkf4PeeOwM z!R@^;v9*3xlxS0i_K;EL(6mgCHKnuRW{PAVcqLG}l67pI)($Dd6)#l`5nb~)XGnIs z72~O{C3rnO7tQ;tAl#wo+=RO|=*-1vjy=b5+5%PEvXc#1Tij}Fmhx>YE?2v4LG$4E+o*M z(*VWnoi)S7$%er_VB(r4dm|ucYPD>ojoJ(G)+vXSgyiF7(3KjxuD%OzN(m&ih|$xE z|6nioFW;L#BP46=1YU|9!g^9q769z95EE3!zqkfY3GkVMYha*JMmDMr?+Y6uUJ>5p z_mVXtVGN>p_DvanC!8dV-eB}3u|R)CaVR76BM80vb_X(>+9x{NL<~;q@ju#a8HfKZ3g&H@PbzhR}l`OTp3B6PmSlB#FQVFB_L&@*@ zOd70KLE}iG9QjBgTu3#wB6L7#tsGfDiuR!;_`I^Vjg`fM-Njyo3up<}L{N#rqMXi3 zjjGd(6mb%(2Gu4$#{}mLJ-LKpgt1c5pGd;4cV&Y^tLS^LDbNw33vQ270A*hl;~+}+ zVGqj6b?XH1g_FCKDDXkTe+`oo+L5L;;n-9HG)j6(-{Acx}&b zVqjYKBb;R(Jj0GdAj*(Q=bxFt^?R36Y}$=+0a@#wS| z#u~bsam1-SUz7z!v9PW11gauLiMY42;i+>M@nUJ}cB#s3fcd-krp7bN-F$lhY(--~ zHgEF7N4)?2xgRi@z>vzlxBn4wh zeZosJC1F-xXSTLwtY24+j32=hLSoZ$$9Yu1XUW_aNF9r&P`^PO3{-zX2y~z)Rbq4F zU`U`7oxtWX34Ao10FlD{t0R*HJFDvWulz>Vn1+_Ml~M2v)>xMh_ymArhSYD=0B!?~ zovX$`7Mtt1)kE>pJ`F`FdZd?DCt8uVZ$${WrdGlWrxi-|*-SxsCr%M*cc=EHa%vB% zWfZg^E;s;P0E)>c66sVzug7vsqD=*oR2o_UL*C(9MYU>`k~Bc~-88;d$hmHf+I^*f zNXd!-8NO035S%j0kzScPpY5GQYA;nX*&oZ){~$7=O&IhO2lm}{N?^Ben1X~vufj)) za~Dq({4|T^DMDwJ&cb8dn@PU;4?_DIMtwkn&bOjoopubL)#1zOjl@jjj@H4y^Foeg|L>5Y2_;t)7U>D9yLye;E1K_vY^UOGT zm8q!y`|ONJ>ETlNo1);pPoTWG6n*zs(byiqW3wE;Pk_QsI_3L-O3Mz%yr%Zs#H|c{ zg%}P&WNW0YfofIuaw22_cVQ2d;S7Ws$fgTf0CFf#-RTW^dGWP-Hn~`Nf&#keQ|QD4 z`bYp+Qf{FrsMBCyS0Ms1!dw{ayR@tm5Da1ilCbGqLW__UB)Yf<;v>3N(5Qd_lj8M{ z^EJQ(YcxXiPJ=Z6L@`%#GE#>;j|syWYh={}sAYz=bfLQ}W<}aDIk3p>#jiy29~a7D zb0}5)s*|w=|E+eN*DNks{!~Qp*hVD1QBke@LFY1MHa{etA}aa{3c?|^6xsp0Y!vI@Dkm|IP3Mpu!R;?Bm-YJegMO8nFc;vcYz-iqE5UdDc z+UDZP%o#(x16wLall=t5eVHK zI*I|kA1=7o4;Q5|@KpunG(Or`HI!X$J3InL5ZRjQ!=6`I+gHe}B9u%k7)vc7)c%S z%!~d0EZK_&l6&}ALg9jH=oo*S@;pepf?5T`3C{3f?A;3pS2JV?3oNZIb!-E?o<*I> zgKuyMi=rjXA0n>*4F8n@X!zAs?JJQltXbYS`P~DuvbkL~m*K|L0EW zv6}u2l`Bah%58AWvN0aO_66C)h6Yfy1oF0Rn0F{fEMH&PQ=ttwofWB(0-fKPJd&cY z;`Sp=p!0`X7HPLHI-pb1YKDML>FLF_--#T`4w!W$rYyq<3v_Hjf~Q@G$WHm*@lWCm z91UP|c<2(V!x3~u$T7>?i}^RIq5B3ze-zL9C+0Zx4Sn3Qt+lKE@#ryLdjh!6&4UgA zDDVp(1Mr$(1h!Ka zUy-LR9+as67x?1?uoPV*(S8OBUOVjJC_^DOxrGFJz0pP0OzEO-gnFEhncE)}6V2Rj zspXk`H!_GS=_2VR?+5`a?G7!Y51C26k){wf2}6Yv9zTdO8OSBYmQXr)cLukydZP@A z`Z3i=xt=ChKWoP(zSd^T3J8zEvP$}bJj;+1kY9>w51u&6Kvkohi%4~rk1fHLJ$(Wb z8s+&kp!S5iX`5{!1F(eTGsoazP}Uou!Kg}xQKtpBISMOPwwBTM3d7(~%f!$K6~n<9 z0+}Uq!zGx(OIdb6YX(d;k79$D2Q$EK)IqAUO{y19HEuskPDX5S8uR_K21e|LwrQ2N zacE;<0`A&$B%ndn`o>vv)VlTJOYeLG1htYLCW^dp!_tw?%wP`=zyS=}0Gi|!9w0Ce z02+)66X8u0LP-AAKhdueT=A47_pWe-7*Cz&Io#O+*8$!Eg{nX~2D$cZi1%%*1B4pu zgg|+kC8G{DtkWOb2CA{ea1Z*IK<9{qyU7;t&3uwh> zp&+b8;Sg~=Y7`=OGrUmB$T&P48L|+XGQ`|UxsLf&uGd()#;ENWz1pn4`n^=EDHszG4AZpN+A+<5eQR-rlb78qMI?BNG4de! zPfu&*X=;IjpjwrLEmpNyHN_zhg=MTZOyP*h#VVM5GAoQi@`kH@5m(N}KbWUv{W3YY zScSzpeAQu99kvG1g+(eX8e#Fsj8kSzG9!{zOXZu)#4!YtLgv zl9j^X8m9c{@v;W&@k708SJYpOy+vF-)(3CB=XVu}kVX>QBHFSJ7Ii`KVzRy?b@ z(vsS?)!xmTa4Ty2v+UVWdvjKs-t=^B({_OHth5~<^gh$yFE8PDyitRLN-~}Vhhbz^ z4k3yPVi;*C&WbZ;oX$&eBT0!yc_3sAHI{&L%c}YA1Vh{W3C-F)9ixO-*$flt>Rz=a z9}ec!own6lr%G<8goV_ye3J|g_$v$-%82`gkMxvMAoxO-qeUBq#9@4}7luNip@s6P z<<(e1w(QPVW>ck>A=v@KAXr_M(Gk&=QD$&JMn*PRMsroG%dA{xX~&uuR}`6#4Hv4+BdYv&?d9nW_Qi*vN_e@TRzI#iL!K}Y@ISo zJ%XRj)1@iVWMwt84@3kD846=4Q5Y+Cq1X*L#;BYzSPTQn3Lm3DX1I=wVs2Ae0~+l` z6v}qRLc!2MAMWm`ElnypoM{2M9(s-l>Y{Y^ZuyRe|pygVwYw zSa~*sgAARs$vHAa6t28gnRYnM%V#M;gX4IYQ?;ELp#{*u&<+&Q;BIDohvRAj(w%E8 z!E<-E`){glcchn>EtIK35tCJoY!tz}eUe%fD zu<8t}%wBU-l}0bLp3SA2OTAqO-Un|Gzo?V~@hD+&t~gebVV4REJ`a?lT%w4idS#j6 znB530h`<7LqPYTSa8&mj8JX!Gq<=s%2$QI9O4^1h)Z|Dpbfq1=k#;!@gA@)WNKvMd zLU9fM3E|wI4x&AHd(dn`+e{@14iM@aOuG{5gJ(>@xfx?l68;bn&MYxu%&6w18YR^# zv)24?zB8Zz0}9k-FhLXrUld{stWg*nV-Pb@8AxsDH5MtZL$Zw7=jTgbo4z)E?fkF> z;Ie1eJ3?%GfKY2*9U&a~(y)B>kzIh;BeY`n&U&{t3!>w9X@@M#6iVEMkzqB3NEsYF zNC^iB$4Scrgqr@c%O}j;9Z#17u?(e{zc3xsA{AoHye#?-$;Zfr^b7N$0}^pD)La^p z5hEM!d0tG0s0o9XRrzWYZq8Ox8qt}U;5jDRu5-fCc<3u*p^LsUzHT&n(w1ESsYvMg z(}^^$Iv6DtPbIDoca@y>UhMT z@J42>mu@V2&FzMRFi7+d;yfEb=k4(GoBs}N?jL$Slv1wnf*6Pjri2$0`lH!9lyF!) zd%ftn?7ZxTh^`3yTwS~PITz>N32G>?xmamsA3TdFjG3b>h*C(A!ytngF>-@W_92Ie z`~#R3SgJX2DFdLfs4xRr*JJq~%g#MK2*JlVc>BV&c^ zsYQVc^W)%f;o-B*q^fcjuWs|r=ereKTr6Y@uyAU2>l}zqIdSn05LImBY4jv6*oGJ_ zK%YhHs0K!t(Mb)jSC=oAGv=>Uzcp7=wW_`+hKz{qS(=AC$xU?+l!YpdZ5rtka<1a+ z6a|_Gs}XHEkF-p+6yK_d_5bPfTIG%6jj)9*6ksNLi1Q~yII%-Di_^Vb_d5fj3F!#Z z$dwWGRiQHhZGMYtzQ}1LnR1UNC#6fYg)g5|_2|`yC4{htr9q71*~6B1LswDM-6mk(1lygtg)Spsop~H(8?V=h2%%RW~7{5iiae$ z12A6VjEMr2q9>{lY2F1D&d%uU(rz?NQ>da!YDAADai*!uF+^}E+VbEu4SI4h-p8I? z80`|t2JC^Cqi|DBurdTKJ)`!Pi^U}8&}Xa{@CVv0=}8DHIm{jy_vVZeLJOS`B5c?}ctCMQi2F3C<-%IH&{_ zG|03F*LCz`pmuGPtqOZ~GYYEvqj^k_EukHGDd7-30cxcOhcrC2K-dfws+Jy&lI6%U zuGN#3m2jRvW+d5+3Tx~8XCwxJkYta7EF3XP_jX$xjM<>{E$1mZ*c=^G#AHcoM!$n@7TnSOk-QR~hWkSWk|le`PU81t z;}h(_H^$hme@b>_<1@?lexJCLCCyPzU_TGKOS)w|09^&=xPWFB_@tJi%B-~N?9oKV z97)eqd8VRbhZK)ev8o=98icR3aj?}5J*4|GQnQS?G#iWs02Ef+Qz<+ZZ}MAjgY;f zV{v2vU++~}`Y9=^irkB;vmVhH{&a502}nbsnox9mIkk<~0Uw}JHK0a7;1ON+kw7no zeCAp&=g_Vv1C4&{V7*+oih2pm=HJmg7db8PeGf+GTV34O%gp2ttA5~lM$v*K-;IO{ zA`LG~j~-Mxe&-N256D3B=H-NjqGAFI=*?RNhCFR##0elhiYeU0BiyTj;(+@&cEZ@a zav?ECk^sE0zXXqGOgl6XbU|k*H!!8{8JW;H9OfzoIYBx@9(?Bt9&*|O;Qf|U8AN9Uub{JU z%M8*~$OtDu6ez?{6Hx*?j?%PctosOFu*PG(@JWDG#9u<^m?sH(NhJ z=RP9%$i;xyjaiWJM4`RKHsNj8cT zn<@Ej08rY-6UFy-QK;2TiQ4k}@WFw%{>Mp0*6t@oRl^~A4l}}IO@g3R!W=ObVkAz+ zGL7WT>_ezyqeP^*NW{wkT_iI=v|`uM&Sqqh>^(7Z)( zCU@kG3$&sb!HTqhtZQe>!=SdJ_R{uVHCU7eui3#59*To0wN%-E*3{@^1Da)ZzCF!A z`Eq&0rpZxJ3FggUu?1&raU@E(pveI?6LQ};gx?FX+sf+Vgih2@LWjO$Pw#se;`j>* z8{|hRA`#~ZcTAVjD27*HrLK;4IZ2VX`7>0Hm#2WweYG;MbeKyBu%h0$xa6mt2%1r* zeyX>0m3fu~Bs`v57^XyO(#YGVh53-^C3g@Olqt%)2oth|Ah>I$jL}qzy>4RTw&QElj!QC5l=3+D- zdyZhV1xU8#88dBeQ3ys9$~);FOWXDYqtPVGU}UAwz%2%Heqg;obV!sf#Sx%bIn{z- z5N8;yD~P&@bVg_-6bB=!w=2^)5*OH zBY9pq60B$7S&*X!mbyw!G7UY3mIjxo7jGAQ6?7As=?Dj3G};CR>q4Svw0O}dqN zGO_Byuba1%t{Kwq)DZ&a)z~d&f$7u_V6pCqbP@_P;L#+&reT5wJ|?J1 z2w7$@Kx*IN$~6N))EcrB8&H4``wqaqmY@aOHz9)6+AYODXkH-)`eV5!R(s)8debKaDKUEc3VbD}zRZn)W@}+m^g7E8zd6J^7H>e^#eMmlL)NgdR4kZ(19c~t* z8`3|3WfUPer6OwKcE22yiDjaj3X8561xI1IeQ1CTrRe*rZFEu~n-ZdjuSW@(#IvNS zgaCXtOY)7FR9-^K#|zHs1As!Hi)nly8ik_cgkDy5!6fG^#sQG%TY6kVc))mV9b&TO zj)FX$P%vrIIZQ>+V!yHhpB79o$59=y35p8t^)Wbri`rM%Q}lZ9>dV_oKRn!P5)U_Z zPsT%saf-3}z*1N%*`tSIUkD`H?Gq9yO^LaX-fp zgWYQ6X)!cq$p;6BbvOd6mSi&RctgXF{RH$rFux=O5-;krotPSai34*$07B-GBj|k? zZAdeOxj7AfShV0uR*2}r7=!}z$F^j0S!sdyV^DRdK2JpW7y`7IG-cp}Z5E5i*xZmq zL8{~@YDdu8)hFQ}J_}nid}PCOR$dtF^Pu2gC`{GsQ;EV=o#{XXsJ~}HSR%CAruGRl zl-t8fLQUN|6idhp-Un`OIu!qDLw})D6 zim>W|n{kkTK^7A#%u^UX2EE&#@A|~o)kkByG$u#yj5HE=50Iyq8ZLA?m{sA7Xk)8@ zC+B#c8b(3R$`C6$DzHa3i*{@29IQQgT+SOYy+sS4JVm}+jiV-i;me+b;p4K$VIp`D>b)H z=j0@x2ZaA8`l!uG$Iozz*dT69a#wVcNA|?pW;~}6A?(+E1?IEHYF zXXxR7t;rDd-@-M$r$7RBo8i_79tZYRiw_f%7SeC1Pz@>qMXSf}eWK;oHTw$CT@aMh za_+Y2dYcUy>RNVoLK}b^IzCg2pm8a&d)kzARqLXoY&G||JDF)?bQZB(FA(HBo^ zLeuWha{M+fG&;;8U|ba=jSYlV>B|#c>!P`|h35XeuVTGuYzk=*2>ETI$0;OLu#&K~ z3JZ_{?nH7c%*2t)TY$KYmH9SNdK~W95ioL1i`l6>J}P%VP)N&W|Bumdc}O_|LBL6D zLX)oMfAA%@;D>x$4b8~b1!N$Hh|Fe$E;M$SpA4QQ1hDH-0mA-pI3--9yx7F5DTL@T zYDfYy(ru0SVeG?!U`n-!p7Loy2>-!4=%pH8tZsyc&D1AUR%G^j;l{^xO131yl4I=}DPdr=G&=~{FN9sBS?W5pDIfME+5>-=goke!P zl~_gMX8I*oU=CAF8G{WQ4Z#SdvA72rC^l^0b(bBr?uy z-;Yn~?t#;Tu`hHB58klcIB_9Jj-tTfw4ZWh`8sUfB-Mowz>;pNO;H^ubV7u}C##0{ z&%SXj7%3pxV7*>-**Sq5$+orD7H|g;qUuy4R^b4}PcyqPn)bWIvktkmjUh1R z0`ajn8U-{j%~<zagz`e`B|SH)i>BylzJ zn3B2)uua{dt0kc&ak7MYBzHUlB9Yt|i$GEvH8NV7wGITKRV>y*rAxncxKg9ECtGL}$3NsqHuM~tc77_&QM zbSFteZ2_cPXfcbwR8^eK!EuFqxD`?v$KbrWntC-6VPs#H{3cQD`k)^(yH#gGLfN}% zQ}shMvGM_R_U2jhv$E((5Fyrdo^Dv)MFbdqtm^XBS~+-na~!y+Tx3(p20G7Hbvm;}uIaeGYoH~>p!&(^0Aceq(o4s)%`_^&+_U?ZI)L=qN>nERnjcYpL{;Q7 z%;X~yspi0EDK~?eyIFz6KeLRX6aw3a8NM>*H-sA03m3p-UJVEdLTCpR!V-5}X|l&Z z(&-e5a37~;7_Zv!OdSl-2GzZe64Ovn8Tp59f+TPFPGS~erU@khz_}~5@}$~i0CXFH zfN!pRfN!=r5&@MGv&atZxeRpd!-Av2Y6?P%PXI%E-j)W$d=yoR#V=Nv7DC{uGg!CA z>40+GJLzRePV2%`hF(&`H8Fj49BJ}S&o)ey%=!@rQOYUn2VEXUJj?IQNryCIJS-TM z$uz&D_8vXF_Q?=NEPzj8lmIW*kZGl5uWVk{iNS$0w0LUL1Dvd-;Hm8H8>*l<6j>3o z>KLIDH`t28(y7NRY!Z z*jha0idHf%smM{ooJ}WX^&nt~HQLmWu=@PFWnaES|9NGj|Nnl+!HHYOw%zne5I|1$_5_zQ*s0h?J!V7;i83UC=^Ja!v*2QyECs25b9*uyABZO|J-gRt%Ir%*vwUThH8CgA3ayHBIGhNn!fOWF z#9#x7R+l!oPF*}rC5)ewRd>WqvrjBd^^BdS=L^bOY}Llv1j!%`YkWe|1;E49Iy zUS;u$2vubgG#KEg!XJxxn`%pf+sxFp!Xq>`f%+a=6%nRA;qRyGPo{)l&v^GzNK~T0 zJfp&(F;qkla%Lz#cjg#Gu(IS6t<2|y4G%}Us{R1MsL-Xs!b=$RAes&k2GK`|n6Cg1 zU=Y9o4EjEKZEzY+wb=p z<2!g2HBuCYvK9sv7gH3*Ffy`GPDC|Em@kM8Lx~(E?x;}24vkbqDpH*X2vbNcy#1`Y zvjZ*vZU7og9W||-&%`5Y9wwQ7iIaB#8YJ2U=Fup#>;}x25oYETP0X%G76`3C%Ft54OcU`I$TzC=>;U1GWHp9HWM_#(R#hKE z3_}%Caiw)JWaYWV8iUr-VQ)kZ9UBJM)ffdLs#R-fEVJ@H0D6 zb2!KqL>c6S6^3HgF*q{}qKK{xA#hY{gcMS!C{)LdOq}8Z70}b7tSM2Mhp^O77TJ@f z#L3@e`F}xTL|Qh(Xdvfb311XmO2gPq?t=?sD#=(6WRT&RHhIc+(v~#Y`b?JcHJPX5 z>9=dRP#_|&kW>LlHzmDEca!dB?;}|+wD?$t6a@t0xe ztfZUaA?Cn>mq>r&ymaFW6VS(V@|}FgInIsnMmVJj<}J?W*Sp?t3D+x3@V`edR{nEB zfgXdPDu|(0lfmI^yh=G7bHa)kl%<1iqEP1QLa~}pHMh)Wu9<5lNRlB&l4a0&(5pQC z%`R)Wa9K3vK}t?j;!q@oVX&$=tBsYEcxkLNyYh8_FfW9sYnYkEEKcQAPT>?KRVE@` zLi+$B4&i-j_CU`2JY6GqK~pG<8EGmi6e?iZ3x!*`0vYgyTu4z`XolgVOiexK)tyoA z?v9GobD`^MsFsjDK$sRH+|E5(Ax-OS%3RaBO>JtH_qYMI4zfXxY-(^6i17yJ zip5-rjEjgPW!zYjtW^pFF`ZgpJ!o?J0AWlmtSd9g&Kx^)OeT}bL>?0viF`7djFnh9 z9={T>64PVP7^zu7NQm0Xd=x@RCEVT=w`v$pQg|3?mU*}ugNgEjJ)1wC6Em^TJV3w` zg9=Ck_RK_THh&dXxme}G((wS{$ZVK|mT759Xb0;jT;Yw?VUGt0M@_pzIx9>ov>oSa z_%WQj+T4{zFs?=CJF_xbIm@Px(QjC?R>`WW#{-0$nw1q5TB3RCcz`fvSGVGnmCEj* z+FsRHJsl4asAZj&C5<7OFY6M*XulW%d#nOz|;219XrvW?6kqdopOdu_?$9y z($7ghO!{5cVCJ}QUG@L#mvvTPfdmX-fB*r8!iINPNjntt8KY2QQxI9|d?N~)F^4N8 z3Zs}5$~+TuT%YR|Vw9ED`m;KJR_S+Ny@?0}XbFjlPTTH;v-IsM&k1E%ScP|nugRBh z)#eM$@W!k<1pepZ?e0>wcbcjazSTh|*m{6)ggN=PvZT>Owv1S;MG10oE5a2HJ=q{%i?t3j&q zl$A;rFF{Nk#g(-ucbz;*T!=CyN`PW5hzfDta6D6)quH&b&?m+vw%=nmYkCmViulXN zu0p1c*puNv)vGr+*f+-1D+Qc1FyU4x+EBs7d8RNM;!-6zLz{1E$pF8>Yl);$juS9u2ZZ!jJ>=*$8jV5(d*fMjoY>P zj^)QSwVc(Thv1pJMs#>`3mz6>qYJ}f`#ffpp zv_uC7UrF+0qHLnGO8Yq}%yN#jZEoxaG45>lh-o_q>W!$6P0oU-k)Bi~cxVRVLYdcmSffK#SIufW1jG%uJ;8Ndhv znG|Gc<8SskCP$4n`N#uFrmAuidP50h@5bK{b(g~u2+D2U=N57N1vW3c7D?{fpDV~z zI(GP?G(VJuW2Gxe0U+cin2c$LkCXdQh*{+@q}$G;-#u(hkH;Dj=dj9wm^l5IYXGVZ z66STlp!xxHI*y|EQb!=BaS8hrW9#?cs>+7IDNGyh$1I1ER6 zubh3Iozn7879FUG?B)Skn~yyEt2@RDM`!#jw5UPEk2z4!-_C^froG1^KKU^t-cdAx z$y}Y6j{l<=gT*4#zX1KNqIfn=;~yA|w(5|AzQ1awdl01WZIGV6lAuffsg`q06n9k#Kjqu&U$E9${{+PXCt&OvDy~9 z-3f7Vfm)FnI0{N2eRG?`1kQU9b8^z)-?8lgs_4S_D6#m>reE?#a6eLb^!78bcu=|d zYPpk(olehFhljo<7T|y?GD^`a=1#Z@za1owWy!5dTa};uy9Ty~Br;W$Q~D=|2($o} zT5s^*>U`t|YzSS%l+>XQgI~;nka>biMi@s22_dt|o!$TIMsc-9zGTKj&U>*tN1pNG z%Z=JbSzf1aglwjSp%MHZ7*RY?KUr3yQ(zAV87V_^0aRXxmACHBtv&62Yb#FO1ByXC z+mOW$++TQw?QVT0Y&u5yJ$96S*A+5kWk`si$p@y~%-7aST-bEF-pfnu0Vvol&$>T% z>ob{f3#A{yT%H*j?`*HGIz+UPT*`tGunduXSV=27pdQAM^ds++FA1zlNhyNqx2D}b z{a=qny}_}J?-RXZ$#1|2-&2Op^qz77G@lGav}f5!k>dpiNH z`*UL|haI~&%lwrAEU!ziwAI$o-wa-#YEib787`YU)X3rsg2Rlula&lC_Iq$cx4mK` zy7h=!nBciz<0pZ_o;<}Sk_j)C;u~=tzZCVDb^>H)00W{lOqb=4g6!uP5NB%r9%X5M z?cMxd=7LNiq#=3{@xzLkup5HR@WY6A*edQem|T4yoIv9j{{`7&){cI8vOE%Rvy(m$Bkz1T+u`nRh@TBA!*-l*eVUTr7 zGW5ZfcetDxQD_5D0I2X8^5SZOg%oPGEGkJ&oR}~zt&3Xiss&V9#L)x?;xs8%!}Va* zAXsx*8I#X6o{0aI={9+FngZmLhJ$vqwSlG@&#z+)(O5Wd%N{@{54akCb6Te5Oo>Lq zmB;3^81{WQjp6}`xdIJ6<6g^p1Ar9VBN)j#N3K#PNYJ!Cf~lKCiXHqxBSQ{q2~jk` zl+QeZ2B}q%oLk+jP6FPhWq2ql4sJO3O+L5&XNFj2eSvfU7x1dF3@<`o`n))=wp@w> z7@-iXLcbQ84Iqc{svvj~(j0985>T{PgH9~3477bLss*~$pm-bWpou5RfV01?K!(H9 ztwvdy%6XLv;v z)L~!;{~CVGlAzEyWpy<>WPp+(8Jq_X5+&=2C`))lpn)@?patGBDjSvmC~-xMYGD#o z?53eyLYSR<;RQb3Xw?PL==~Cci$tp}q*6@?(3g%*Ru`)2ZAV;2B^(Y{7I>XO0ttPK zg$Em8{Bm}|8D)LApdx4hj;S=G?I}se2RL1^X^eNe)IuCfQRpJW17Rit-IfqBy*~q5 z2nvXQn5gzXbO1|0w7ajU7&lrP#0&O31R;cS6_aMs?}LJ%QddOwCV4g9 zN%DxZ7SQ&mkube7@?il0pcehTi3M*)wCy1J&ZMQdR)Df#*iwZ)f~%I;W7o3aDkbw% zxGo+>c9b8^dM&b7$*4Adnu)zx!>?(wR z-BT^=y@aJ3cjH6AAV{>0Oe_8zkaBD#t}C&jLWGHgA>Xi5G&Ahe8<46EE~e?Uk8BVs zZhOB$$qayqasEn@1IOZpyy;q1g&g}lw9Pb!hzl9#Fk`wro+?1uA7$_cL<}e^7;9Qj z-@7?BQHuAM|K54kqwMcwx8%bE4Jy3AhxABf4vNHBnv*^AAcQ_UJ_sINAs@wE5ZWbQ zHo@bCf_5o_=W&qqffRL|Ox{xL`7pSpzONA{<|`3BQmfM}c;% z3vrsFitre^{*cw6;QmTGZfeODPqUsn?NT~ZiT`*SJH0_Lcl&k=UJW`=+rJrOG4gym-S_ zc>kb?jx7^!k1N!32%+RoCsfypI~2)zKG2s9Oyn^`F1Q-y1$qeeh~Yz-D-iSwL_N?) zjRFH&ho2i{A;`(-v%CqD( z-tY2b@oA6D=XW=Gk{D0Mj)OKc`L7>A`acQu-Fu?;EM-4QR^@lyNte!HDlaXeu(6Z? z{@WokP0DL-fX3?7fX;F~HXI&-NeieFICujBU%jh2601moF-*vdXGcY9mB7gbI{-CL zq9C>0oJb;#;(Hbi+QUaAqESi>*DL#Xok0Q#ebWLmAb@u@aM-(CA?wqqVAT`Z;N|;HksRW1i%{Mp-gMe{5LFJM#0PH>t0b0$?n#7d?!5tyi2G;4QHImZP{KTV8(QiM5bpe0x!#YOtC;Xoob z8u|6gkm%DN;LZBqhX^UDM|A22fuD3fiuSH_WLON)CWHhWnwOIdxONc=6ZO`xLP?8R z(BJs4fey$KIn#*bM4K`gv5?p(QhoI*9Y%rE%Lq=H?AMKG8^dF8t zpcrXxpsa{JO9F}y3ASNvmz3uMYGk7YSm$-Yt}3NrfEu&co*=h?j(FitP^5vOhNaK^-Qin5m+TZ7p(DPJY)uB-Va zlw>Ob=4abLdbys$HLeM^BeRGW2ZJIU!8RoYd`@s8_S7EbsSCskX&&iD*&32oV2>^O z1P~hKd>YX9nX_qaS#x4+u1R(+)B~MNOdb_vQC=!wb|blwOdA*p8d$krr#;LAim?$Q z3wn;@_X43cqEUG@_|{_L1eUEM!xYJTi*L&(wf{*v0LY__Z0JIGz=bTTA^Fuo*3ei@ zQb}}c2~k>hc-b)^qBNS1yXQ`gg5VEyQYp+=92qRe$Gh_h7_TW2cwTX#b7A}zOt{R& z(r8Hpij*R5%wGDkb~TW5`GSy$ON9A~Ezk#=DdJPGS0pE6Qn&!kLT7RNrA7V)`rKtS zMT=B&w49MNDY!kKq!>fX#WP)iBlcgH^sUmkam(=VGmsx zW58qE5Ld@Ih|84AZWZPx)9^PODEQ;%6UtK`A^MD8-`*zthcHRTfaJb=jMOAoZ+aM} zi7dMg`H|-wiE%Y6H#K(eE&e))5-fJSIoIL@S*M_esxbhtCarQppF$DOoKRo7RvFsk zrF`_{^&whysw~j4k3`JwKaF$B96cuol`_(hcQm)fOkribl$b?lD^$ z+&UsR!N}5cQ(-@NqZ6OOb;}B43EP{Y|FIiEmh;+c&Bu=z5h7cbl}?PomeJp&pd9mUkb zm{C*=i-Eo3sM`lo;m=&2Jj^YNOF{^5+N&tV|F{h`3^E+u@oSzrJlZV&ctmtDU(i84 zFaLB@*Fvk#hqm<-?&Gr3gB3SLsWCB}t!}jn+AzVI#Ra&zDm552h>-wTz`z0?TxHh^ z)3s)3e}X99X+O|KI{qz+<2<7u+eyb{L*CZ7iFHI0KK z9_Aa^v4RkM)|Tp}K-sfD$Q}4B$`(3kC7w;F*(W(wG$X9%)!S3`bx$kwq)1MEJ+oHg zpmvft!h#k>tVY;-84=`AMJ=rw|0a2M`vf*-4|Rhj9Q*f6cB{=xTTB!7CWOkSC{)9! z4XU#MYBd5}8Gnx+k%7>SaWuFNb5ysASM$=F0FO*R-JJ9NVVs93ApFRB?aV1Ri-Q(k zghVZE^~*t@&7S>S$OCkRye@+(X~a3^D42TVa5>BX@3qBHhpgljmQu%{VCY^gnkA~4 z7VgW?hC?H5eJsDW)2ucqx4C?|;H*5#1r|`RVh|YQ4@G&<{lp{DgiWNr~~SxHClWMD&38`N3IjlusqK)1&K#|q4hJ@ZQ*OV3}hf!&U0YN zvl4(OA@770Y(t}2CdPI1UMV&Y8=j_gSLYU@#Vex<=hp_I%gXu74_|(WTOhoQBVU8G zJSqrwdd)rYj9`4h2^FNC)&gTyY1z0~OTxZVM1%o46p<(}z^-VJ25@vZ&Dv~Y(t60= z8;_9a1H+Sqg(|@0KbyNEA|g_hEM5rEqb!0T2!g<4SOTR2Wddw=vzy(_%(VaCEU#E^ zGsd>J@b-<~%~7+|Aa*I)3o5FQ7v^g%gentx2CE3(;Mw#oYypU zr7T~#(U>b-H+wIZC4aNu7#FN#x!3Ek>fM)}ZR*?AZP)kPlrha+d^TCi*ev6kn=6>R zXpBi*G+1`gpm`~CWe-i~rJli;HP`u)RRhOc+kX4M(hakG;nc&ZjvA*jj>;VML{uEG!I2#-i3!p#&0T4U@fq+C2A%VdmA_4`7l;L(= zt<((}OCX*&y04H2(k+j;Z3X_uK<*_k)C3KJ>90G0Kd*0tsW z6rhV70dd|_u(vRfjYaM`RRU$PcBa@M;$eb90t7S|W3XiX&T8wbjs`IyYZte`fc-&>X(75o0Z5YPFFpR~6g|RSfZ!ff?-C%AtiVG{X zS9`T@Ilqmo{ncJ=uG-8CW0QmB@);GGEDTI2p=*On_g)p71cfxs)m;64cX_j7aqLtf zg;ocWY?;|w9f&YW)hm*)chk5~rOw0KmPoq9Yj$GkQq8R0Jri8^R)E;!I9YSwhH;CV1#3==NxYx~In{W_64c zNtKdg6RZO*I!=ERYJ{Bcfl_ju+tiyRTa|^5BXJ=Q2z`M94T$i-PC!BOAYoy`92$ZH zFq{b}gae_$1H}avI3R`z8Xa&!^Cq1r!gvD@v4*Oh?g48s!5;03Z&CLxYnrfOx17 z5n-Vrp$7^ASb#_m6;6Xhg_MEALP7>4@i2iT8ju5p2n!7q5(F@T!UO~2VZp$n?&_Ai z+bXsEnB~@{%XXEww#PMG%QaKhdo|XqcBj8q>CGS7BBCA5w4w3ujO`4M*VN3;?9A+H zW_o{xxA@eY;Ax&Q`~Ix3O!p+}EbK3o0KQ!zPXKer?TeS=-b!%%!%|WU_HH8q;MP)0ja- zlV?;5=NUCKdZw=2S?Mk`=CbNc;n@scv4YuY=8SehKDQPuJnh{zjeIVj z=V)+_2@4Gr5-BC1;b1^Q0}>27y9fvwj72dP#xe1ihG=%N(H!E@bQTsKH(wW;w1QSx zh=s<^^ciD))qIR-_;ibpZ};{N7q_+bOc(v0?dtXBRiZWSN;t1{)SDJIV)CF*|cH9TK3lPOXHgUs+yZF%xWx6L?Ip~>MdU%-szp0 z&eCYcjMogC87y`;**C^EzpS)p-pVd2<}uzhKJmpEEqA;*EPY2ywVS>DYCrq^GkI*R z@JgMb?$2EQO4^k*zERyT>(5N{jAmG|;96~IZ*_g^#u#&%FPymB{rXd8--;2e4=JCN?GfqawGd(4Do?5XI zqA2r6Ttj&z+L&BsB~OT6%0f8X&WIs5L{%r*vg6W{vMft?BL|U%OXNgApxA%{ zqXIiSlqOJ6NEgyX221=k%Yg$26}QG}f9pBn!Z<5>i@BDp+O76N?b=P&eOf8h@2}o< zw|=&AhOvO)!UKGe6*v&dH+bN15JZ6p7PtTdLS|p8=b^{SUOOx4@)UClTEG>R<2GEaxOuIiU{t3AV(9jN>CZn=!Twb5nA(ZKFXoshS+^8EGefCELbtug|_zngXYHG5&; z5x=IK_O!$S9Amc?cfa_GkFkmuhB~k?_)v`z`Fi_oo68{$+k5Z1t)6_yT$=6t-0Edx zfx61oXX6F#kB|HMY+b+A<}nMt-nYjPWLw>vk|orQuuei`(Mjjyj*k)6C6qK9b)P#+ z?z&YKi*nn+!uYF%A=ipC;Kp82%cGAdrhJ-1iOi!(K`bd3$^<0BJI zPxRW|$JblMMVzEgwzJRjv3j&m@^4WdNjkJO;hI`5V(w<<0+5@nBOp{R?&dC{Wfr?c z`^-$n!qsFvF81L*lliwsQDaf;785Fr3gZH-K?R5*f@EY~jeP9AmFx{B%g{EQDF21bD6*ESlRJ>rgg{oCL$fblH zCR7O9Z#NYbv~W3E78%6&jrs$FmZeJDYi{T+)X3UNgA~c&P%ZhOrf7TK-4QNCeU`;)!mekR&*E)O$dg7C##5+E(6scf zRY*~h#nCNM8PmdSiI9|I@*w^vGkqD7GrE#VSf`A<2!#Fe!6c{K2H@`Mg}Y8gkpI)G zci`8|8z53ObO8F6zH$ftJ>Y~76q#lR72rF?Oz^7gR@2v)vGfwestWBp^L#V8d;%-E zF;OJ9P%lpXsm#oHQ$}n=S;lElh0bUc=P*cap0vVb)2?xj$@Se168Z@qWo)43lbLi4 zXEu)ZJT?iOX?Lw`1Zjfm21MyxC`R9=iB*^uLyC%sQ7-Jy6lWw+Xr{Q$ZIX&Ivid}G z(D`6)3IJvncBH(~0`=vEkr1V*n#00Km5@@gZTP{W&QZ5xJ$p-vLc#+FH81onDrSYS zOg8fn#}u%l_Be&@HWZ`TG|WXPscWL7D%h}-?g*WR*M|HHuyMShI%N)A(KYTgXE@hIfX(QS~zH5Qxvrkevnk+{7#cLPhS~D02Y8U_~BzSSSah>QdLs8q#hJ|AVXbPq=onn8+1yRc{;C9kTayVvC zy7>!Z*gGf(Z?&=|@xlkHea^%C++S{T)-fpL#?zV*MjH?e0zycFC$=utDAIyyiA4Zs zD$|4=L1Rf+w5rtMT>2mJqD`58Q6SGIIfJoV&=>Lv(58~ZJob_2Dzq)UEDUl&Xi!$^u81x@ zE#21&P>XG0H}lXXr-UC1U7Z2_PWrhTconGP0)>0xa-@AB z>FFj>EX}*3X%^w=;I1*-3?<3MLJi}9_n|d6-NKS>!<@l}Rt1BT38&r+fPyOxps?6RKa+UW!aoKGx>=`$ z$i>WUJ!vslX=JpL`HT^)%PEvf1NUNxa;G#BCV3-4!f>4u=SCHc6mf}tO}fh?zPTCx z0w~yzxyq`JZsJUo9VXBiX;;TRMQ76BSIdZ2Lajj!RXLpDV8@aS*ZPH{diM|ZaLPX zJCk@|Dx@ryi}zcL3=R5kt+j8hdx`|rA(*21L@=Dm7ZSV}yo{xwyKS9!3>xV1ez}Tr z?#b@X3MU-}P+;_arE>RAP<$CRoCps<3#$L=#&MkWnVP*~(dhQuI(ah@f0*t)@8EEm z+Nm29K$%InfBRVF6@g$Oi#9tUWkr#6i<&n(2XoUqmaH9o{)21@J=pM*5>v99mS%p{ zA8HnsZ0(2>hSr6kiIKo~8H~V2;3b}3RB8sE?Z7%+&>2~l2?7XU$sVoYuS6fjD54Tk zdyN4UP^B-VWA+w|t`0CXObWZ*V|>Sx!qMsw@h6rxPlxQI zR#j6!!36hMW*sRPX$;yrYJ~4*7Tk1olqs|~^HY-0%{h`%kj|sF)rOwpK^7J_aVtv~ z2PHEcWZW*)#lr^VEvqk92R9X>6X#5OD6jC}BKQ|o0TH1A&gnt3M}D`vQ|Lu~E~*sf zmcVZX8XC`0;cQ$bnV0kJkj7b%X_bQQ~_@R6MfN^JhRyF2%UiKM)h( znFN(y4rA~kfA%%y2;4aO@qtY#Nb;n=khOpmt{0ixwJdKPIX@{~`Y9vdk`B(SWoA!3 z+R4y)vT%>%dG`W?`o8nIR&K$S{OP&S%%GKcJ|Vd-AxOE*oKxR1L_4b$vd%^5KI-2EsG5J$k-b5JM^Fm?2J(gD0ivJ;)kYLura54Uk1);wmbfR#aZo`t!PhP9 zlugbTH+GFPBP;;wJB&npAKCl}8`(iX%DY1cD|3N@I)AVs>9?Y&yl$glAFd8Zuk$fx z=3>9v^d*dv*e^H(T+~YUvr9Zv-?T5c%|cuRta?&1!=5Wh!iN#ueAD92&WXjbx`3HA z(_u?uz=`jo4(&OvC_1olJDf88>4s!e zj)GRgZb5Ic^6-wEs;wf+c4Z}J3yfo(vT;2P-6KV7A{-MNx}e#pL2)xAcL3W-U`bX3 zqqc(PB$c7N8|!N%#$E)|b#Bap+Av&BJhZN{SBbHl@i4?g?{q^*kPAfnUZr2;6sMy2B0KA@seVPru_J0A=ePiO<`hVs7mivupYD2B{ zY%?E@GuYykF~J87kdggYxTD8>v7^mA~30&^zi0_f!yDG^XP zaAS9cI+F-=)J=<7y%E}By%PQ)iR1%eNuSK?Nl+*9Wi2~X%15Pa-cs#AXg{P77-_dg z@qTGqj}xC6I?dszIMPGByFo}T&b*2Yy^l6mR;3V1Py%mdBa7pX2>>=!No~goTMF~y zsYNgK+4agjaWT#ecSW1aiBHIFUa?%sLV(Nq5U^Qjtu;x6l)*bMW?LBdI;;C2!py6vVJeKvS!#Q@aT_+7iA^4JZ{voM z51`0R*(nYP=X>VgYl9CKAD1cGSL$uSbPXg~vj^2%=rleSZB~4wnx72O9g0!3o>aQP z6vThWqT6-KPzKH7TY?nfDn=k0eSI8{%@0-?IoLp^@N0Ef;(99K@L+|=Mpf)3M+n#e zY;YWq20hyq3)~X0t^MV$DdS~j6NTPm0jZXg+f@SWJ+_1VWNIzkL63;#>DL%Oz6Rnl z(#V?$tm<@Ga!ICH4}~XV2Nknur*R_XMPtLSHEwk#=(m`mi^sya0Cek^52q=1#|L_Ty>K(gYtw0Tyx~5s6iv18ev-{jGgqPC^*PlS=_%*r}%9-oN4#LAD5w| zEUb}Yp}|+nR_G<$u+DsfrHM{%%v-wG9c-N`-1u)2Oc!#PK`L$fiYM~zXSHwg(P zF>oRxGtgz%;YFe!$Im5Vk+YPPH21pxEd9Q#%2xO>6pm6IuS4Xw9OLM;q>%s#lX)mR2+I~>8LP5MGdxaXd zCIYn6u!h*>e|(SM>WVH4jom5AzQx4rEzk)~sgkU&zV%p@t`ohliaEDKczFQgDUcETDu708a#<`5CitYJ6 zSxtCpfdR0|{p7Kc-h0w1B8wkbQKXL11`Kz}P&SLk$pf`U+w<&>aFFSjWSgvXhp!uP z-sEa3o-j~0LPK97&FqaO+O#CRVmdCnc9ubpww3tfG4q6y@eGyZ0w1CUa{@_JDUBJm@mpD!G;uJptb_dYKB8??GFLUpyj>0)z z8!I=3IU}#~HZM>_=^|^)=iZEz=VN2Er`cCy3tdXVNVvG;hIX3{zo3zmxgOz$0XP^L zplG&o#%uKaLu>Yot%dP1sXYyLCu{@ll9$;I93S5(QIy<5;TL7QC9UQ{z!IZ9Ns5Y9 z3B9FtFmYz+)#3PKOTYdJV9K}N%tX8b-y|^6P&{eo=H~J1|$U{8#b~_ zDrsi0Th_*_4lcf5b3$$Gy=@jo4Uu*$-0V`Kv>1k8Y1BT<;fV|!xtM5iz9v0L1u>Pg zDE<~?)bi+@lIxWGX3(;xJw(_u$|%qR(}-3;hG2eK&SDhwTmzuOsized6IKP4Qs|dN<5ftCR=A2R*^M;5D$0& zq1}!8y9%;?bM2EuMV^Xn2m!{6{x)hw0ULFWWNWe+L3czgy=?HhA3}QVtS1BvY^0Bu zAa?!Bm2hCAPOTFG>@~j(-bH;`nvk8fGY9EVs=;gINC@u@5k+HiaHc@x5sI1Vi)y6p zW@^M++@cuG{+T1RXxrFf8tK{XfM=SBn!@`;8L($Y8w#KD>WksaaJ#fHDJeel>y4?y zXu#>D(m9bFT8^kc~k#m88+?`a}uJEI54UgnTQ@Rx&tRU`YW6 zN3-&7jXhmZK-LI!fYh-pA+i>jj7J{rp!qkkKehU6?QGg#f91k^h^in)I(;IKPD(B97ijU3tq`Yzonb;$29Xc zTQr{0xg6S7VV70K^pG^`!`7945Or}y45E;SBOOwgXsd|O z)cXk=tr9L{{{ZtGTM;Nr{K%jNPhjF&JW)b_f>pA)&7fVfOk{UbYXFNdl7?;M?>3<* z-^pmvm=It)E0}ic_7Jo?sA8pTnTc|S+2J{NQ=iA8*Kt+6{_pd_n)pgCn&LDz9~49- z6f3}XRv`&epExf`90)R*T1M@CiFXdu4~n+@kgxt}@D(SPwPTrSgm8XT3WKtNks?vX z2^Jt!o^h%AmH9xF*f1%uAVdtotP2mpXo*T!7z88bcx`ZMf>-w1(zucEj;Kk-Q@R8C z-4VDSuXJ~rV-*aPwEXQe5Paf%q9SB7iAX3S{{@X8^j!^6ru)quzSy*d(-thGZ0qaQ z$YHNI1qP~TUJ;cSjtk{G`ssyOo>*JFyhzzHM&yIrXr4w*MQs5>@c zfRY>vJwomv+*#O?A3sD%$Q|;)UN=bwUl$rh`nD6@M^_^^)L6F*{Pr@(WE>Ak(oZ^j zuIZ5er@lMsiy~@nX?>j_qffhks_l~4r_O^Ch!MS{H>BxXYEKf}fcM0T=7l@+YsI>!}MDRZ%2l`<+gVNmgGsJ@J|fP zHkM%hxSdnN^Sub{`7oJz;$^iUsFY))fNv6j!yOEz%e`eW*n9ou*^L>h-Qs*MNA zP6Ff@aE`InUn6wf<8B^gyl8}hga1fWWKGOTv!2~#M1H~<^aFzVPbR752ZPRy_i%VQ zG<-BB-ZWJ>!V_0oYz92}oBasAb4 zyh8ltfO4#;XX);=z%JtX`34pw8|+c#dC-kkN{FH7K}ER0d_(H;=A+$;Q* zLE6c^T9TO^i(mHHgGe3Cn47L18`C~H;2?LU+*_np>6MJJfWlOzSD+p$W=)x5o+)lS z-({0LtzPKV1p0q&0Tk`T|NM4*Rq4~QtzFl}#ErfHv+Xqr2rlPHmOSq}E$oDS^zFO( znut)JuhXs`2V}cHDhrrcRb22U^=ZY=3ky;dtst^1pf+FLrmNA+y2%?bSo>NR+ym0s zR-m@!_^pxf=}pvHZ*q|!%G4egwt&Rp12_6tid;j&+U8qR5RJL{9VQbkAL}DNW-umj zDu!0^z7q)i3EYA}LNc4?l~p5w%Y*Y)iwgyB3b|ZjYv8N}gxWN_HzBdaZeOVA!1&r8 z$UHXmczhZ5+H1Z|;Md1Bp)#oVH`*+SjxXE#t~OWfHpZJbVIxZJT9tu_~Ww`PgN#w~0VQ5Ck|1uHjdR%SCHipln>9eq0c=$tK)?-1>@&^2Xy zHIL8cYf!SR#e=Sd)>ktXt*=-yLu|b)78FsfkhBd3wxbo&7rh;`v$4l9xdwYQcq8B? zQf&|vW3Me+PHcheE2CSFU%O)mn?^9IPfB!L-Srm$kK3(DqLubVKbx3fy4M%-`Bi)0 zy=)H_cWz$LvUICpj^Zz{mTiP5-mrF+pdig0ozY9huPy3@PtOjPC&s*cQ!eDRxI#xEC2r5&HmW3y9RXyL4kaY8ZuC}b_je)qkGU(qc8YMB_qro^hcyR~$L_LGy zM-~V}4R$?XY6wq(EA0j%jVkG~&8n!0QAT^wf@MK6lF^l(Q7Z8U$S)SQ*5RfsOt4uY z{m_so7}*n5pXZlXFw804WHMYT4p$CUH)~iF#AAt6Il_v-H6`T9XLSy3Zl@q(z!pfo}CT|ZcC~HI2Yb9R#w`3C5WQ+MU zeF+`tMPeV4g+;A12;{74WI%ZOhTfD-qi#0luBMnYrh2nl-G(DylVd@6p#!xpy(1`4 z-L6e_AQcgQ1O_d(W5*sES|5#^*A11BbDn6f>Ef;t6bZwkYJ|(RX-L*f%3tJk%t6|f z2p+rV2x7a8!rC$}tZ0xOvSmlah2+^O)Ol_PQC>3y`-KA4Oa1?ahPr81=!<;I3Nj6Q zL1!hi^Razwfp1wU9m~97q|OGM{X4|ks_~6bNrx~RQB1u!Plj3-WW=|25PTQw85B2e zD!!E<3omXKjx#G~g?OO^ip=V>5qyTFqpN5xwHIy9I=9RI3zQ*O%gbRs1P-7_R&U|* z(F?|pS!NGnW2M;|QCNIWHMnH^ye#i0vURn3B#*!M&0lRdjIbaroF>W$2W#3ey$K|S zyqUJ;oQ?vAqtq6*(DAUK<_LJXoDkj7G_3NM?RypaJxKbt;A5~0f^O*oFu%;geo0Wb zkOIvF@#tOSUfaq8SI;YmO4KcdFE@0o6frm#*;Sgw+#r{S#cO5lZ)a;Wb0fMxIFXO$ z$`Yq!gb7e{(=&ty9FjBtaEq^Su_kb!Y#NfiAkiyQ zZEW%)WOL(ll_{i+lCr%9n4*g;BPqYE-9$FNHT>N}{(h%~UbmxeE~ebI=KhF7669sg zk0U{#11aGCH#GBC`PYrNT-H48Nr!s&i|5-RSV;5k+m%Pb8eMsI-f0O*7qT*y4qVga zwK_8T!GahB%<<2%AQ2&t=Aqre@F4UxP+<_y28)D@0^N=*s#e7_K~p5pgLN2=s1x`$ zo9a)lZq8TGFv-YX3ANJ1%q~uYhs_ zHETyKXKMXysdZA%L{MXB04>f6{1taQRyW6*KFD+n>LFiUNB|bE#_$$oo=qKfPQP z%G)?VI7&?%pw;Ti|olf51JCyL2nnMIOn269ghv z>mAVvtdJyd8Uhjm08K`6x^l|D@ErS)?mHXQuc-Z5TJ!pEh(q=_mVmlBTtRUjU?$H~ zIg;a!6zo2K>#@F>Lgl#%Qt}Iru%LIe8f6i!3F8-vmGTfr_E`9NCRPlB?3j zNKSpuphEBFYW znNI@_HXkTS3Fq{{heTB4!vlthQ$skI?egG1 zQ3B^A=^%0yh18AB^+Ki#E2pqd14;^f;jwe~c91F30DfCEbmMhpoj(5LXFSFc;k@YF zg`!@h*1{0Zr4;AXS*fOdbMS5pM$`mVPsCx({awyiq~SHbQi|*Bk%#C?oV}sf42=X* zDsLblz@VrX4;(5#u%_X64|I8qjV2+FF^y@jvnOe&6p8`xU?Y$}k+cT8bI48HdMa24 zc=9h@4o$^eVM>x9gGiJPPadf;W^Bmh(C~}unmy`}!vPy?e{zB~iBLW36)Ci zUeth}`f~`GsXmJas@sL&iO`;BzT(l%WIl6tP_T7YV%PEC zg+`9O(^El%Q{@_84kC;cYlVw$9bhGm&b?A`jXxOjnY4xd=gI9N}-VWIQC^d z?W@rlO)gCDAuqBDn)BhW?7Wvg=2aD|=N<>gvK)^LXKcCA(uULADq7PV4;TYpPA?nB z<3d+18TIElWaP7lnriqci^VZKo4g5P+5Rml3Q920)o+6A>0s!1DU3-|UTC)%o+@mxwQVF(xh99?--wZbjko_(pA!dgE_Ne*uC zm=vTVQ5h_Pr*)Ey{V?gV^ytwc!BQzX^=b3~cb1;FCPq*GO*NDz9G%yispT5Esr{X*Hq zmbE|;Fu=={WJ*aYnejM<|Ns9FD6RtP0q6nWv^Jlik;`YOFG~s<&$R7)K_xv{WQ2C_ z6FGR_p*+{IJNK9Ql+Bb&}UBQrBYJe(t* zzYkvOP$;ER4|N7&!~$Rt!GaDNFwg))LIVU?8xI_+_l+Wcx2XpjSB->(SV9;fBV9#V z8^|H*8_2->R!MB1`#Y!KnD#Nm^##9H!#&yvWJpVE9k(IE+0@Zlp-R* zP!4k`7&!3rfPxAn009nYfCC1D1`a5&3Mf@hhR`5G(uE4q8W>DM3JDAiRj5LZ0vCpYA)!@hP)OLq z41~HufqX<(6E6HBy^DQfC&OHfd(6Rpn?!2L|}pk8FbJv01qTY zfY3042}GbU0S6dFNLc9L0|^Wk2mk~kU`QCn(~boI004l7dc0Fpb4Fi?KoswLd8;o44_wCO2ckcqAd3!pr!qPkz2xADp3W+= z?-^&adj~p|#Exi0;fX|^GQ^qNAeE2`(ZFBGweKwoNSDpQRIshDF9(Wp1(I`r4$yq zlZPU5^KlzzAGf#UVeJt-d7dJ{3JIxJD-~xhYP8Oi{*7?v{wb`SLbAdkVJ-B9W#Owr z3hM(WG=n8FmM?g@wB!&I+|ZnIyy83j6mf}r^wCE|a=kWGZR=&n%P+p(CG&{O{PWL+ z7k+(tnLEC4j2n+J;}l8zV(c=_a#iuLR9fY=?9z0wCNhyd_E^OpqZq{; zb1Y&JitdWTj z!GZ~hAR$5oI!t58O;xe=X-bCWrjp4!8Si1Q!=QlV)#o#8k- zRh;tA;Y>IkP7J34b<=1jQjw6*4&;@HYUY`RYUa&Dqmr3~gkw6#bt-nKqNR#jYN@B5 zdi847tUs-2i&A?q{9uu|Y9^`8RXd&JnVsUuB+kk;x)e zM#XMpHA@_w#o}KqF`kSY<2Wy##Z_@T4ztwowBAv8UCZ!GRMi$0@#J}`9T@ItE3qVF zi5#NYRwJIT8>4(!^w0jxd*_`z`RS)Gy>$CUAD{QWd5K9gt+)~sJ*L$vOpFn+Iweyw zB|$r!MpILk(MaktnxYv=Ni9ZGG9|U4n=~~gHT5Wwlxj-C(pDmklq5<%X)1y=H8r)5 zq()Oy^Jt1SG_?teh=||>?Jx_9ijvp7qUI$PRZ&rQvXpO4^#Vi3B8K`n%BI0_wwc2 zw=cc)o_yz(i}>UExJtw&;wo`fxQ1HQ3Pnn5O^#JoS!Es1H6hpH^ciFcTB zhek|ek{J^hrAbxA;C>NBGi^@THx0%9X2Gv(-vF zQqxNrji6X~2@5ab2won%ge4rci^wkF#Tjq4jPG#9t6cS}dR2rYUiETYQYD`VQ(l)} zPQ2VD51G%*Df5t$@AknYcRXJ4iaY*#Ilc&!=Rg-DyfDL25e)Az;<#LhhKM{w^)T7D zNfCSNnfvzl{&x9z`RS(*deG<3?{D98puW899+U1#^K_5r9{-*9-QxeFiFT+Xr3v0< z4FLTC05gaI0)WFv5XL|pgiOs?2NM9nf?-fV91w#6K_CDO1_HuBFc1I=1i@fX7z_-F z0z{JdKpKh<0I`p%?8z->N&bvUKwJL+$fr9i5dbe5CJ=$Akp}|cAs-)OT&f2+hbIyx zX`mNo4lt9W8I3$X=cWN-;>;L8;cVb&+RG2uL(TE&NGbqq`N?Z80p_HxdDI}d>;R@> zR}~((#GA6g7l3_L9Z-I!(TW?h0XV-X0O-f*69Br40RYnV|5xAtyY`{$9{sPmHHzIx z>N!Q$@LxP1bnh>KE8?Gy>Ra0l;HXC#75y`6I{cOVOW;L4pFqXo9}7>^!)rzyXz$q5 zU)^CL*`Yxp5K2e2MiB0d=2aCmK7m|=iig(xHNI}4cqn1ed?&=E&*Ph)P_3twX&9WM z66O#CSQu+D(UcaaTjG#%H3Gu5mvk99O)Ae;i;j0IT;beKG%O(%Awt0%KyIGwjy=J8 zusj*>R0F0y9ryeZ^c9V-st8cM*mwcv7FGBlQq3l!&5j%jQzsB5GoA1B25=EQBKdJq z?SP21e!x@_NtF?ydF9j?u{>}kY8hr!!?MNe&=tWw)3FHiFXEUUYH?(dHB-7|{1NLG zhpW@^DGPOqcp#vaefWf_OGRVh9fN7s_^}fe(L5yfX72t|^mWq=Lzi?AycceyvC@{B zp!K$#ohC}!ye1vgyp)oz@&2!zzp^p3&C zG>RFqT9c#+yQQV)Fi{^}LN*IAFFbi7#&X?*M?gQWh<7{Gp5vs(iwPG_bBvzM+{-= zbi{KaDixs-p=`tO199^wKjw5?hFzTmu9qys69zBB9m^uENDXAG*zOwTG7wHjb-zSvHyj8mvbRPqxJxQOD)uggymFRlE z(g9;R*>nl72#J6hsvu8xQ5m#)W?c&`Ga%DJ&vqpGh&Nwd!BCp6^&^1DEmGw`%Q!N1 z5g|tHrh>`t(dcBRan39o*>!-6cm&eNMcXk90!qJdQkGZ2th~ovGV1{f%uBs>S;U6TM_FGfn6lpTHJ>zno#>p(AOu3pq^$ zGweZ4SZ>0%;Lc?CtR7(dRQDkRx1}JCkh>Ei>-+K61VXC7Q%{FBBo?Pec(=;ioI~2! zq^f9|CO(EK6I!OGq)kclm7OtEre`F+UA&zTY1W2_$O#1FpgD{odo@s*QHu&9XRY}& zs0yND(!e~@Krmy0fsOr*4j*v-gL8&XVOjQAW{9L!fXzl!5=@7>ES5)y3?7AJ9StRo zk3cL@Wyu%uJV1(D)Vzn4HA9EFJ}hP?n@tgb-}+!K0EFYu7A}tV>)Ltoxtj7*!qm|I zRZ8OT%c+YY?=k2!V1}MdB1=|(@6;w27zGkFJG2dm6OAhyOfT{KksQZ~j)=4z&WOA^ zjutMMkdm!?+3#*Y0h) zE7+(i|AhxC%Eb|@<>jl;jQ;yN%CT2`B8KCKacC+Sk29)_n)4{A<=z4m(`=sqdU*LZ z1=25ueC>2p8VIc3^N?QWi73%*yS#T%$YfxrqsTWE1*whzb3QV0D1^NCBrF&$sk zki9kk?bj;b7Yi~kNL&OG#p*QBs*8?JgDne2%rmF<#{X-U#3<)7{;A%hga>iZR4pEB zR2Vf!$`(V{!&cPG88Bz<0ci`AnpK%sDHp@wopp1udQ;G9~ZkTEbrHW^X9-CyYs;x1{>kI|u3nv;-`WGg$5QIQ^z_C9DkAktLN43H%x zc^e5$fetxC%)nP=*Ou^KL#-ztj`(1IiNt{h9Cz|VJuMRAK93IL=oDod(*d1}gphq- zriG4yvePTmq(dr`D|5_2G#GQ|R%*;efBg54HonaxLd5(r#Cpq^qm{`!tG#$*EjrxbZ{K5|Zr_Z_w4b`F&ZWWPa+o zP(wNgwhl(EB|4UzlaRKRSkUfRar5bMF29lir79+S)lc$|D#Nn}BZmfZH6qv^Y6@6j zM|fCmVOXAYGYg9}Z zDoMx_kd#BP9D>DFy#(oEpDMb~HerHpLwkfg>EkhL;4RXd7il-gVOm5|$B526b5d=v zK9CZHhTKU8Wp}Wx`0_{POAxn7bvjQ_fHfSk7*HSL+XIPw)3FJQGAU0%THT#fG1Xa< ze$EPB?EFHimgRhDe_^(UwaLf2zedA2@O+! zz|~_2j7v>e&0eAU*F{!+r*Q$Mb~|0JR*~V1J3!h3V2_FI_eEuxX{JEklh8 zr`CZ3)N($dqU4^fqjg&b%Seg_C73gp!_E$JA;|+LN_(!WY$#Gwwfq4S_aCavgC{cV zJKUsH9lQW*1D2JHe$*DV#WyO&?La%pGIvuiQ_R{F)j4esgfXj<;stusE2WwS!Bq*7 ziUeh6>|W%*zxorJL2GE>sa>C;&gz^a8~2RLJ9iGYF`t0i=PZz#wWD_+KMo$ucD!+Q z{Zo4Y&>j}yfZ4RJ->S5Q$O!mB5GE!VMq*RQ13Sp%a4d{nrUH>@7veaX+K|K9B3q4U zOy7UpDK+t#tP~Xm=|9A3Bt7@(E1c!k1kme3ZUa@^ee^QQk@4Jpny_mhS1he)3M5}8 zr9Dn-N{2Z_X{a^Gw!t0USiTL9Z@d+vNBi!2r~;6$iZ{SYg1!;K%nM5!uqquTNH8Cd zF@T^N6{*5tJ$OMP&u4pO{D!xr^eYNP(s)3P(EghB|LR z`1C?0onFHq&?T^G^VUubfP{x#`&gBRC=r~iS|pP{&J3>S32veM#nmD|SE6j}&khr| z)3|{}fNMo47)t}ZcIw5u{qe#NdhMcmJf64VOBP3RZ;Ff{KK5IQ7SvP(lvckqsn!IN z7U11MKiQpNC}_mAdX#TH)ReHu&h5zB6Z`b3_}%kL0Bz&SwvVNuo+M;ulCLX(r(l0K zm<0hDJ+>&BlUU!FODCql@oxSmnoC#YsxkQ|( z;=YA*DqaZfGrsoVL+((Wm^tc%9PbBBVX*gcCl!jqd z^z%T4e_Xk1$`-O~D1oF8a~qWAnQ?2z;=oE&a#AOYqj${_*KB(>PnLtqqFc1moaMmr zR`e{r7YjI{XyLlI|4UDYSPJ%7M?kYglkZ>_ZLJ8^aVRUwhiDk*Z^+YKFuqyw(g_ar zBy1yqPQ$&9D%9uWMQoMia8(9m;uFa+3#Nm%BTa3TInyb>aawk8Fd#IQfO=~h`I;Kf zl(RfZu0FOw1XpM#giaLZoJVsr@brApN)FHfdUXpQ;9fb1H7mU!^oDR(0Yb5&ae60D zCHxN7>w$eR>uG7b&%I%?k7>610e+=XlQHR~4Cd8M>7nxba7!$jk@ zD8gk|0yV&pV~_~{qG<>7Mkt~s(g%e{0==m%9Cu|i@=77+WMXySMICQor~KFGpC=!y zPk7UxNHkuux4rB!(MT4t_IG(byFrwKubN-|vY6BhI(_n~__TZr)bOydZ?>3^x4pDLAHq_G7g zfWItXNy|OYihD~aQ8HT)naa~C^_T9j5bBN;L1JBjOOH61G&(Ap&q_ZX57MnNtcN?$jJ zAx5q_ij)ncEB4l+ES`8%-dza&!aqp|sgJCmRl(#M{+bHn*ew$J>rcPk4MmU6A zR73B8J5=z#2OphDL-!*hg??EsB1@4bA6Y&nH#R_i(6Ijq<`ZqO93--+7=k1N1yLow zTA>4^5&-*p$Zd~+rxYdg$^kuSm`B1m3S#yLc^Wk6mqpZuw&G0lwL}d@+{IFA)cNP1 z4u#}H)BrTjo07`R5~;c|AFn9?ZLg$1hd z9f6yfJKsOjU3M3~T7;f3q>Jy7*xDc+7tVYH5nh5@e2+MiR_`7T505Bcu;(R_Y`)jg zC+-BcJ(ZgzDiude4J@k}ap(%Si)o2ttlztviuGQo+rPfi1O8O$jV4|?NCCnCi*(r}BQR<--eUMr;ah5O--uIX!g}az0+07=4RuHgl z)}1x}OpfcK8B)wR3X!Ajf|wrEO|_y->Nh4h4B%(l>nrK}7dc{uYn?7y{tNezg5UxL zmht&^c`rzM5%b5`HS?X9;efL03kIP7#nbg){UX*stD^rOx&dB@ItL&Y*W(|^<0lRBZKT5sjtYCR#)@@ai{ zOp!Q<44}SFi0V+ttjx|~srFu-UElyiOu)oCAiDRC;D_vwYndt_AJh&Hc-)e>#oriG z;zAmU6ON#A9TVNWL{A~ldpjuxmC$KB?~v0Q;0>rs$+nYlOvvyDewcdSEki*NkF^df zcg$o>hKZ)8EkEwbR>H4Z!9zr=fjlq@Zo{tNu^p;+cuHG1Q=y^q6hhbpQM~>KNJ|dX z4V`5iWpXA5fJq?kxspl`iSBK$H5Xb&Z50e`1ediikMy5G(Le=bchX{7Lnf`4x0&Qv z!F|66H$f2UOo&wmQsfEIpfs^8{~lMekI!Wd^8HfYuMtrPRZpd0KvZDDhikO z(7;YfC9Zxq?f%ks3j3-1v4e;q^hCT72uJyrz#1}Lsm@5YKrAKwqVkIyDIwDB!vES8?toZa#zUb=(Q9+aE|m+ z019y+4H$sAAQ&jO*=Tia^JyScf@qA(PX%P=fd`JuQOn)}1)2%T=-nl^0_i+Jw1L#95> zc~FIQNZ5uXG^h)`p|1=vG(54 z(8vBR7P}W4GWJU`O^0{1`Co0^woSL_7O~Ox*f?k-p?zW>CN>{p|M1u>Vl$q6#gz(q zB1A6|gJ=+I5E%xsf2mX|k;5YrgH(h05q@GOW`>zzW-%;=YLFNV0wg_(J`8C#>C&K!e#0J?QQ+&EViDa>Pw22`m8e;y${MpQAG>nF6 z42?lFObjBK@kcWE`)mDU+Wpvl&GK06y%rZOqMOcCQ6fxq!!YJ%xNtKrr_DAaZy07! zy{UJ5US~CM0lRhBK!HHQtbV95b3quwgH(ip z9_ZOstSryHRpVe4^d@M!2i z;e9y684kl?7z1OV25Mku<{!!ozWkQo9()hJFSplIyKwNLjB?weDk+?hLW&czosc9~ za+@)@nd@#2H|DsS0fVUM@?-)42vEiWni&*f01^N|7ytl35C9+m01yBS10Vpv01N^! z2!J3U0CFS@Q$;P%0d5`tu?}l_@-g|EL35cfK-Pf`Lp;1L1e-x)vpXQ`Xm>c^ZrxrS zm+j4HNz=PD~8iS%KEEJ zI31w=N~LLes0zDW&LF<%y}`%D!9V@de=6ECi#B6sg}dl>132W4QpO8o!u)%qi)lz{ zaniQcxR>xHEP&tB4=hmZ=YXOh5@aVt7o3WC$pJ>ihoh0i6>nzVO8S=;A;p}5*Y@?_ zZ*)zx9=Bj-^jarh)=eM~p+*;oq?B#P#tF7kdVDVU~L2dU7)ytFCT!H)idU1q2i?pFNvVGVuJoEUqWQr z5O#niYAS8b<5IItTn(2SK$RuHfXA$|e<)4@Mn4PQ)_|&3)PVN(d&Av8+^?GsL!yG2 z&J_kh&}*_9JDMb&^-2tveHJAEUgfb@wjU%zKtAc*P{o{0E;#T6oFE~}L8PVpeFp|& z4VLOCsTsn9^Os44zWJCesZ8f(C+EujL}&{+7&}Kl@rHN~Sl}If7!;V|Gj2|lhC7`F zM2t9!)@F!j^)d#7CJP5jgKqPW81Yk7QlP@eEieRRW{_(USrsVsgO`30JKPIDK>15I zR)J7HLW>dq5C1!P9Ie!%yx8CepjURvv#MGxWU#7M4TLq%Dn>rA5BB zo+aRb&1t>3_RpHK1E}Fu@DU}fa?~D&IFz z0c24Q=vM9m%FtVFFbev6_a_#Kq=6{rh*Bkc0-1Njt5Dz(i(;xET5{U`X{D`m5?%BgaHBE*UagOd&VL68%y_ojj@mB_fKkr^#OmvXgYVTQF2 zBi`o390Vtq4ipV(=N?(yctQZLES-~%viaj`aZd6}OJ*jWhNSg?)Fn%Qh=qHV`IAp- zzyxxqk30J)&B=mO<#st#*#JfrLSP7=Q=L_tb`ZqdoyCbqSplrW_nhSFdNP9PmwHL66j%32o(kl*+)J# z_^5et;6^?U_^*BJo-3h@u>$$U^D#lN0w3vfc4qdmr9PJL2E;M57Cs93?0Y!?T;>Fb zMtcDiUH}q#iy9Ey^QEgf-%`-5U!c6o}G<0Gn@57E{rVEh3r^mhW5=?jb5}%CqrckjhT#URcEDn z7YkZrbM7si%EdytNS<3vgK9o5y5`QQRLl2$CsbQmwnbaHXdz=7U&TNFtiJ2>Y;RHB z7Rro&B5m`Z6F5#K!>LR-9uUWZ<9R3)jSA%gAW)wOSy$3aSD~UOQa-wT#!+Q$p**sU zD%n5(TpUMbRmiA|H!X8CDi$jhOH;a3QFT-98mde`ba8u&c5f4RLiuniwN|xsR)?np z#M%2(j|u6Y%j<}l$}ZV(FeZLFr)fDK54F4=>Us5?Xv-BhUA+};d^9GM_<={gU%BG! zo}gXNi@hb13O!rfx&u^+EslHW=)TUy1=62ts$HpGQE18ew0o+mWP(gWrolZ+lz6SA zWqQewS)#%t3dapyIznkw2`_EJVc>BT2g~IpC=Pa(Qp^Z~fdhlV0z`yDA%SToydY3O z0U>~Zt_=c_^?<I?=bl32y8bdulLTze4U}7AvE+(!EHkMlj61Oz zJ0b-|1hWJ(m??5jnNy8V&9o8evY1lDB#<`QrT?fs-E+<{k~VEmTsUewnTGx z-mqF^u8k#eazvU0CSgnQBU^g1WuD16h~QX*<) z&|!V|gv4x63{;arl+9n{777#&AwTYvG*?G6yTx#VG9yjqUziFo1WO|zWxy0sd93E% zqBeHRAvhwu=M0_yVmjr=Y+_|D0=v8fa~&q7hJ2bAnyTUPkK|}XG2;hBvUpr<^5jro z+E?b9MXk7fn$NaqQ6~`V(zcOznkQfA5af~AbfYl6q=5<3P!u#kggY`xA>M_(L9>dz zIm+RrCco!0y}kKUIL55Eu#QAUXJMC0c;~aR^T)~hqgqw;7`&OSGZy!wGyWW{B!Y1- zdPq-igPHr8SHUR=dvh@d3{$W_U97$GHH8l^DcUMPHrWF3Jv}Mk5F`9iXSGxw!Ad+GEJ2f?F6^TmX`xBu~4w z!F3l61MuE0K}!d4E@`t)oKN7UX*H6iG2InI#;b!AHwA=bq9019tltMh6#_)B;Ef}@ zB0len3|?|UPCh9*U7&2T7RIxsI&PUFgt4Qq8OS;xtUDyH7V3$LJ7Wa&6!i(q)&6uf z5Im)xqPz&%iZwci5K?Xmt$JnZ#kBRooUriJ^3)FN;kk)LRToE&fYG}|wGP!lcD7C# zX1d|#7K@!qJz~ffk*88`md5@pi;BoAoJIXO8=28jAo4ncCp=|C!8E=Ci$QW zrta#k=SR$|AvvtduTaVnUHkgko+ix+;l@kqbkfu z%OT1r=D9;IFjvj4yL~gOd3N3H$IC{HPTenF>>{aKGt6SChA`6FvT6@e+OREwaNE2o zz;+p!3%2)UK5~)WhhPsxT*Cu}bt1}g!bGSQm^pys6e1D;&tnBaTt&1m6GY}jC4TwD z6L{`{P2AI}0SxRrc$_VbCCGL{cdrhF97`M|{%Oh)IJ3@*LJ(l$&e8)pT>r-Xl?=`Q zRIk5KbRxYL8?XOzQv08(vp;V3f5I;#1dVYZOkOB~J9-4k2RYeotk72Lb!y}<`#FKIaPt7gx>Xhqf zSA_9a6wd&GCl}XAZsNEM<8lw+7T9fFU!my&hnUiH^t*|j$Z^OyLUp{$Wc!Mvi1D`+ z1s2dn!lmUqR5EsSlhc1_-n@7~_{e~pVsZI>Zhgwx z*`Tu*ClF0z>N(z}x|q9HIdxxOuTVA%xaGPv-7hA!6eoq{TrF5C?%1xKsB* z!nP(nZ<=sGIB-Cahze07q)j?(j)mv#1jtgvkkqvJ2X+PV>7n+1v(?vQdv+PF)ET63y&{;4m;qVOfbbM<~=CA>|? z#1p1)m(~+hjdC6Or)|$gq)wKUp~m~0zMcVMlxnX7SP;Hs_fMJ)V7$+XWKGC}CpbFkag{gQGOXL?~N3#`N-u05nczEJLDZJ>xhEO{>@UBn@XyP^WN`OzV zn{2gx!DZ@1?@q#C=%H1{Lioca?}=(M}y;w$!h={|fLFzyhLinulq5I9tR? zsUuZ7fjk144j)oihYt@sX zfPKr+W}6kA9&7y}{VdPLyS4#3B6T=qd?rFYM@;LMS`U~-Na0eBrWueW-~7SZqfR zde&tQ6B3dF5Cj7QAPB=b4Ga>baQ3m; zSFc|aIN*Q_#jkp?wbj{hm8hP%bKY{j-)Y`ev~#m2X5SKrmD++Dl_4!AdCgBr6- za}>xSb154t9>hb}XAyxK070_uu)?i6WHy7iR1*mkJTHrX!OW`qUXCGH(4aws0;hI% zdi>KnZWj@#016|5DU1luGJsav-mYF%*SdZyjO#m9^}T>NMSYhl3{$Nv)@%Q*FckBx zFpk-ohk4v)CT3#3_mVGv$(PT|y!;cNmxX!6HT!)_>x|88xrj#oE?;R`l~ERPv9Ylg#*@vG6~^Ii*SE}e6$bBtshO9X zub(lk&X@}0mb;JT?1*=9znMXTp#g#d1r|iS7mMGxe`n^sv9J2AFsuvt-OBnd?&5xD z;~l??--H15E`Aqx@q6EYF=D+qkr9iH2)%hPerB;MUS67gLL&2y|DIi<`)6DElGwsYSJ#I6z^i27(o>=-)<;zpWyeqxZ(|Y2wbW0Dh zbd5WVZ5s8am%Z*Pr+wUS>+SAy5s&{^nug3`!a{;g)9c+z!o#PSw!E!S;7JZ2__^YoMuF=buh6?HNA}VD#|ro&~`~Z z<1{8Y`S9|EDb-2H&r^40+}xwAcFoIH=Sm`Bf&~Kl3Tb0<3Mzd`w3L*Ir_ByB8|yA( zvQaUmG#*CC$g>>rk#^Ho-K6BLrd@UJI_lbXota*Y1&fW??3`mDN?^uaC!;Ywe;{r<+>eM zdz{O<`t~R-JHx{tdE>)E&)opkN-yuXsu&QI_8uOV#0Skx( zKv>vjQ%WLr&M9vzc`mC>GU-ksBO~afgDz90zJuBbW$RCO6-Jo?X;GxPx_fwU(2#{z z`@(B1)Iz;&#?!s2cK>+K+Nx0=^*jp-W&n7&LoY28kIOyoUJV*F=r%fd;D8OqQm_EA zz*tvdsG+N8ypYgkT|SI&#)if>00ukf{n=9Esu4HC5=44|&y@Poh zaAWDUA=OcG+(ceu~c!Am3utM9~+!D1GZL3uWDC!evs zGUcMD<5S|9-+Wk!H$LY|NVu}19(`67EG!}zn2k1gWrPhc`7&!~C@^;=u$E2!F)u zqw0S4n;um+?5fl?LHV>|ZKZpdoTXaqN?T*()<`+!EYW1rr|Z(2Tsq&iyrW-fS9aT1 zG-K1B!r%lX5-?WI$FEa5onwZaof1|qM8cdV@vBIdQsywvl9JZ2p88z+@Mo`^FbDyQ zhzfZ78Q+Zv3mPz3z#)PMs1V^@7jA+mFGgCZ)3Ga* zFQ<;J&@44N2A5GcowWLB2jYxR$@mCs@zJX> z(|J5h7bjDJ)m+G@lC0m%yUN#$dBgGw<>(m`a596Pv(rv?-0EsYFu z-!7iK?Nim$#q+-t5+F##0g#j}gfPB}ZMrWyH$SZ>p+hHYiqwCH zL20^1QX|)L9xTQ?d(v%|gSt`D-;XnKw>R_f*Qks_iO_Kd)GU zf&~SOhy??uz17lI?VI-ffvx%M&EDHy(s)90%Vg3{jIcEwa*~qaqJ}>Oub81yhJH+u zGKZMq`{-rOTE+!PNXdf4gHhT~K7Fz&0u?A&I0z)LT5S1DpW2$~lM)vrLr_V*S+1vC zA?cVd1*KvRSvhh_b}9*#9`m`{qq)~}$+(#cLz9a$A8;T71rG}x8XO>yEr|L`BF?H( z4Dl6nY%tGxei*3Zw9Th0(vfVg$tkBtW2r1Qi%0B9H_VC^bsvl=?}Tr3O!> zmFsjzj+!M{*_OLx6(vQKNjcG;GOF!)J1M6+G-!bE(BJ_F7!+Nt>%)iYM-!SZl}C>G z-0-zZLYRJB_;zYjzsZp^%p0b;WhWl_7OnA!^{d0}N<(3GSe9nFYp5`|vceiNKoAnK zV6d~TR2a2qnZ_;g&~hy^4^;t!2L}uq8Zc(LX9~I#Dcsv>$tiXW5p=+M(TDUsWrwYI^$9BME`MhU)|kS-lK1?_{XDIFIm^P z`T~l#sAK41U8rUN2mk^A3K+wa000OCL?YsVK%nQ@MlmUU6959j!LUFu772ubU?30* z1j2wo7z_vn!azVM5JW)?Nb)4h^0feo-uet2Mx{}4%22TmWhV0CuXptS6wKI2Viu6p zL&a*^77*=6P~S!<3@LE2nk+fE)tK|cBG6f|v?qoMA`39u+zq-V9`BnSV85sUq^+WN1ZACleAb=_`2Sz>oWu_K6LAVW0jNTK;JA^M&}cw!)@&lNs0$xGR66 zI(~N>e#7JqmOSA8wAkOL`bsf57J13ASugV);CTsRKBI#7K<+tNqQsVyxK{oHa)2?D z-Sj?||MyH?_}Bz!WEKH%tv3zkyr8ZPC=|({M#0WlL#K%ipk03%H=n0rwy%6!UjLjF zt1pQb?#+75u%|;jTLC|3>H&hgt{I(uW(o8`Kj`&>+D<|>9DLeq=&t<=U4$l*L*|5& zp@UNc5`uPFIO-=nc1|6$S|-HZf=ioUfFW2X4JRL^6}fplLx7~&C>}yZ0<>cSd;64Z zT-5{)5{>vny8GG!Qj@6*Pwc1+uN69zAsjejEg7Db(7G24u#I&X;Lo&Zfl2?_uJ4{T zHx&kOTkUdWNJ}SAY5Ms{0exbp<20I}&5ro8c;IlIbT*ajpETp)C~XVd zozTV#FTG% zeyWgs0L;=8?g7nj)2+udC-(Q#6dLjjISkyx!x$uMgfIcD`ArR+rDRb5--0$A06fk; zmoWgA(j$?}dvEL*8VBu(dnB>o@~qY#_1=UTb|XpMQZEm1+d0}KQd5DC4XGX)fnDVB znX5|hP{2yQXX$cAlCb_v&-5;_%V8U-Glk^KSTO0Si{D0F2^A=ZUGhB000vC<)9d&p zzq6{aFC#~s;i06!K)?E3Xs(cSy2fQpv>X60TJy2NX51=bGEl^av<^)GJwU?0M1-D{ zr-KDq56Kf(HV%QjKm5=CTvLPR?Fxc(Hg*Yaqpqh(bg<^z0UY62t*(p@YN5XEm6 zAuadmeC{!@8DQmeBC#Hto-`(>oTl7cO0rckb%_Sek|ncq>OMBczk zdo=WsI7OzO4gCS@wh2}c3WG=nHwTj!BR4^v^LnoOrhvvi&rC*Xj24~6SaX$O!zLvK z;7x3%NSUPIc3#dmeejN=ZEn|fSea~zcJuH7yAek8D15xnCSW<_mUQw!DTUpLi*7hykuZ^4%>p zrY%Jp9ihyQnPMAnreRMW6isuJy^L1fZZS>N%g!W;6R>AfkO)fu-V%c`* zV)O3_lRj$Q5w=Fk%29MUzf29)FE-o_-$G+FZcU+MJj=x0+2)*x=l9&zwKa%RHO$w?P!Pbrh&oY(2 zNsOiE=OVbDO;fWJjQ2wUU{aep3%2ntj}@)`gz9qYnWWTaEaj^kS(}`^?&h222?UJpw_MiJj@vorx7gZEd+Nvl zcZZ6WF;;576oQlYtt$U_W&@Z0EpX(RllTghacuf`9q3iZKvB2Aa}t?(6uL);>{R3} z6cr*bn)LB%tEFw}Ozf=dy8ERW#Eyd@DQvNjbIS~5sZRnpNQCGg`a4#%R3}x~Srgzp zgGvb#)K%ZtZy@$e*kMiMsJ`_9}}8+ytnG0>pPlO?+=(hvfa!N78eNcxU&bwp~E z#xDsvFMtRZ53orq?k-VFv1dX#x7?Ns{ukv5|G)%yTtmei_=*^4Ii-#V8fgz<7cB zbBA8Itmn4Wv%c)ySz8zFgG_1L-4ks-_-A2nH%5(%7tL5kV(4u7)Ei9jfwc}6?@FrN z$gz`mL@0<+k)&$Zv}n(uL)V7DkwvcW5ey8HTq>bt>~CV$)`%9vU@1aM#^mu&;REoEgT=P<@_#cK{FEv>A&JMTy!)j5*^! z%NaKcS1is#;*7aC1`IhPBN>D@!Wrf6ICNkTUPc)`HU9n1EST)@t`mukOd_GzUDtME zBG?)zCjy926S8u(-3^jbpXV_BHwsne3)BW1q*00)iUuptQAl*AiH1Zfia;~~%Ejt` zA1yKIVU*>0-*%}8^(cb0J=LBG5e{-f*Te}63*bbY7M+Su=mbKfM8iD#?=()p+@N~o zw}m8?3{ikc^dmseg;Jr}CV9OYGh%U!!7$I^gBJyK%gOd(0}iHGaSsWCFv* z{)v@}^}8csbh1nF@aqFh0fR9>n7;lKGddvGvP#8zqJYV8dy>_^SUq_mfp1=sWGDen z$-zwLpt9r80fCz+DFU9$Q5F=*#>9|ltc}4?ETJ|RH4B~ML~`8Skkv)X1I-06?(A6u z9OAs2O=xpU9vtaZqQr;;)HHj4#y+WoNi6j7G*#laSZL3~?u~}V+Bz^z-bkH(7Pc?B82dIpNtvIcjGuF z`p66$J7;(}NW|iR=l1f)rV0N#9Dw@0lvCS_))@<3wlhoMv*p+RV;#~k?3o&r2!uV$TrDmy+Fh%@LR zn~LfoQSms$9}!Xx(n#BpV-BeD2AnC<*4>H-%74fi?P~ZwvqaXlA!HpU#$jACJtt!3 znG5cNHKSL;jE@awnAq&9*ZF>#jjn7QMI||z&A`YqP!b4&y)9tS1<~as6j&&JNYx6B zgm^DITsMm7F87lvB6EgtInYHD7g4vW2#qT*7Xm;T;nD!l(9b$kY~OWI z)>hGebcT35JkPS;E+)~TIAV5-)PbjzC^Ja-JCqM%dkv16qK8aABwq@bTnh5Iv2F*5paxu?Zi=3wNWzf=3$5rU z(vu6A;Q@6UL+XVPpbuZ*>B+h)Tq`ItBGrvp3f8PhZRvE3Ztf5s5}sQZNEEvLPr>^s z85#g$9}=neVuu|Q?g3!CY~O^r*dgQ2PC7rkbJ z%OMtjvY+o|vDS+6vHt+@7myqa7*jVH;xFaB{YY%#ePJp@MW-JEsk(+?E}U5q0}&-! zJJEDm`Nm*8pd9#6T_ZkqZyFSk?NSs7--j|hZeUeEN)`gyXz}c3q_q(TJVZc%fJ^DR zCGSR{!zDBQwt1KVUk(ozTClx20NY)Hhz6mNYY=j@xL{vp5=fU`%2VI9O)wT6;p`g| z1i2!OK&UTVMhPE9oV#D^=Z&ke4ER3(OLktCF4;nWmLGw;pM^UYSl5Ln+>mkSDj)N= zz0GX=*$CIuH59Le*c!4BMl?(MfmR+!A*MBp!RxjM(32vLTjzCyd*4wO8{lef=FQA$)n~QV%9Tm z40?JNWC}o|MwNgdoRqxfpjhuV8`NVlA|os|WMiYZ>cMn&nV~xpyW&Tl)(8P_ zfO#uhDOL$r(IF_I%}%5NLndie%H{|oF?03IGz!xQY#Py|YTbQ89v8@pdo7C;Zj~`Y z+dj=Y;adE>Lc`!-MsWx3EQYFVyUn{7Dkvx{Hyz9`FRlw*i#7--u7x`>nwzb#_SO3Ak00POGbxc~&l)8Ao7cN{NC6yVy^BQAm}1?mFA z-2^TFYd4^ZP|E{GXhI?q z)p|gm$dD+NGB?Bt;bI6}$Zr(We8~9$=e*Z~&r#!QzLu|k32Dz?+i#)bcNrE6j7|Xx z_(R0$ZKvz}nNSQ)LYLFu*f*W2s+~qghxOTm+@EB*j`R&DBrrAqSlG3!;WfCRS5Zou z;?D_*rG)<`L_Wa(V`xn&0N(djDeVMsmo))00))p|_|N-mDZ~I9v~``k&N?2Y>6eFq z;fH+_JwYLc0~CwD13B#$|PvxKSN!gj0> zGVu#@1(R>I^{_OAklCcq(k7%ACDFmA;Ljx;yn`q2R$8CoI4g{)GL@R1u_K^UB!nH& zK3Pa637Uhv0|4)TDvnLOx9hqvbRHLJr^*ArWmKpupQNhzg8Tc=(%YMhZS1+Q|AiHHbG0-A$z#Xo2+Xgj@V_Z@rT#!KAC17|Q ziH+6j;Q)-?U7Iza(RvS(c(#b`0DZ$eRLKDsG~ZpB2m=^BL=u$9g>Dm!WE?r)brMW# z+X#k7yMrQ(aly?HOn)^-$f!{O8V49182jS=K<*^KabS`*G%h%(&p66d9P_#W84b)y z3g+B`dKefnN184aYDnbx(N|y^g%p@{RtY394~t}TABTZ82}vtUE)vY})B^@rj}wxi zOG=7t7WWL_kPH~DdTkqxgX^Nk8pAr;eqG@7++vJm6$tl>t=O^w1}>@qdpOdtaR4lEp+#QbErW^0uOep!hHEg`ywj)V)SaPJi$9a`|kb3OM;KT^hNC0 zT@(vku-Ao&9nP2DF`WVdp9ACgVH`r*od9BeGv+tG%p1>`6E_J}m33$MB05OWo(N8o zNs*+SzH~rPga#*w-@QDUw~joE(Y@@b!{*27GMMmA*W4)^Q1qvgxLmSwo$eV=JCdC+ z+Zrj-^}%OfC9DObknE+wVwe-bUT}fB939*-WAZVl*UMHsu_1hXi_xVzT(Zei1|bUs z6dt$bO?Z=(qn8=sEgT8G^eNY0CQcv@vQBE@)AP~~5c-?XIxlGuO=ASLFFYAE9%$FAz%?>HsKsD@IDV_r2=J)F6JgCmM)?hNZ;OeMsDiKr7uLl_8>78J6 zijqOy&v&c=cQFuwZWnSPPGh?$$NGRY#$Xdu_ADmgyu{__IC&o7o~Dt2yce4#WETb>+4&UL#b0%@hq7S9dVQ!%CF zMf#pZe?qjZGfgEg)E^C9#yjH3n6M4g=-Sg>W%U+H7j+U@_@qlW`U@g#=WG!^>r)+f z$D4rlwG0^jq|_(&0;9^^t4F8(D51P zM8ap5&gP;Jnv0=H;7IMv2xgjtO-6)6k}G>HbJ>6=YaXT0o z16vd?X~~aV`Yer$DVC+AE~*uBv5T5?M#%_{QmmGlkIMbyD`}anWk5PEq_-rRr#Sax zY1yN(aj72R8coI}#R$FHfXG!4+W|V$Pg-0G^@iq|LW7rRC)KnnE`V`v;=ywH-ZH5k zAW1JQHHtOBwgO`{<{hKbD?6@GK}9U$66iXv)1qZ+rO~c8c5zL@p;FKCa0&IMtI;c) z)IYQ)w%dzm=vo0I87{Ff8$~=rrfycKG88VYkNO~GL);-;iqJvk!L46dgB?F8xY$6W zYv7_rQ9UxirKPxq(Ey!=st6_-@h|Vm)Y$c>#WGpVI^P0oV2@epOC<3vDIW-I{>b$( z>QMp@%6a9mYqU%B7O!F?FfBQZx70%IBnJ*}38mWlB0}cE-Eyo)-b=YsC$QpGCnV1{ zV?5hclxTcg&JC_T%XZTr2*(dRxd#$DXQkKaw_f}Qx3hDxk^*V8jU1<`FaMtR|xDVP6sVkjZ`FrW%bVY%c zuEgLB3Jh+PRH2DI1H1Q> z|G#lJ!3*fh#a4i-pfal7hk^9O!ZOI-8vK@hstf#j_uVq02KYPVveLE(X9 z8k}3v6opE~t#l*d3!;P~Ut1ZaZ&4}P5Q2C*AjBaFt8*9c1|c;L2PfT zF1d5dH(kTSGs9dfTevTAW4R@RsH-7>FF>_I*(k!BBe&Co?d(DeMcjq$S>d|xp}tc-nzN#A+YmZd<<0)ASvhfRHVm^e_W)4^f-=45 zMzSJ8fL`;2EA<&K2Zv`ED>z&|rpMM(zn;v)>^Yz*4#8$8}{oIj5fR)f} zARx+4GU9UWd=*`4dAVMNx$3rM*bE4e%xJF_s0xPSJXWV20rpDi6pb%-=liLrG5}Yc zF<7mD!S`*;M9!Wz+@x~Q8cum-YfFp}$02E229KW|nu+~-%$j){6HS+u){mtIbDybz z`v$bHY|7>lZ2YCMDG5MR0d4P^62U+BO=@0EIU&!0wT|6Cb%jJ))aZ%hUFqBk$|9rl zk$j8!{cD)=0g1I^ImqO5e;DJUI4~xn*x-Pyrh0TNlN2R;UFa&O2|nKMJI}|;q6>T3`I5=ibyi8oY<2*k;X!ivO+Pu?+HD&uOvou ztcs*iEFbES?zVbH$Z`r_?j;aCp$MYqBh!Rp6m>qfuehVX#okG$B_b5oo@8KwMF&1C z?E`2&DCMjNWfGwSixu_;y-C6)I4Fu3K6ZJ}0LIUI?D17egYtG=6=yr6IX5Ct58X=r zU`uI*3kn3Ny>4jFwV)6Z`>VCd&2>TrB{xw}tnLTdnV%u`hXkd3R}SZ7KH8MF13}3R z3V`f^0^bglFBUxD$!06I{kZ0wufg4Fn42?ukb`j&MNv ze6ueMCIs!k`4q+dGvZV6vJK0Ct#) zgBsp=&Tp7`suyKNAufP4PIk6*NHc4R!3y`F&%lidPa5A2k?@w0Rm2#Dk2h`8X{lNXt!XC)dG~!sQ!zOB1>sh zHG$S*qKL@tD54w}l))uVSoZOUoReq(NS&7N;AMKw@^_fl}NY=r#OJ*WaWzd_i#r_Uxqwj zA_Nc{K@@g4W>Cg0tXzqSx5QPlga{FBKzPA3mhJ9C5alrjDlb@+F%km0s-tA z0g6DGEGzJgN&yXM2w1+M)jxIbzc+T4;z4<&&-H?ov&;VEE+Qg?L59O3vWaX$L)!wV z0;mFXG;N?Uf1y*1J!wTzGkl`7X^ztBTAe^Mu4Wt9x|~b znr6E%%q&u&amNRXZHgR}#TU$8EA%@D)d9szQf_qm}jQp_?`$VFYCoK~V zI?o~{<)URdcaijzC`~1$MUx&Ls&DJizDo^F&8CKG!wU=z=|lz;EFb~~NKill3Q!OL z1-0h1k_Kyby#IK=Bi=_m9`Sj~`)0TA>wG)yv^VXW{O{S0>Z_mXry5m#J$?Q@XZp9M z&*>+o;$L`FwB8BtI~x5v9liGxmT1G@{R(}Cc4xSDhIDI)ceUf*2_y9Dc|N<*_1s|g z)UzG&jP^RGdh2DcJJz+%&PsO6TV3gBGebjG{q$P%*WSRx0|t(06ogr(i(2Q9VD2&K zqrGEGtcpEd8SS_tPc^QL?;09sHUrVCn~zRuMm0A;f(i%e=HpU%Qy0x?raUdY3Jn@8 zP@rJ~b0LBU90d?C5KO?}LaFe;1rvc;fRPbsrRIBT63NhzNt>8PVl|!VBW#J+D`Bgz zs;a81rW;|VdaJh5o#rBN?OU&YS=U#$t36$W5_z}ZS3oFGpn%vxg;|YKjQC2=#C(up z$ozC>%7)GsX(6v;oTa#NIV@INM=V}nC>ho(@2vK@?6lElqt%*bvTdq{`qNF+)1#6e zwG>lL)%2OFcX$rdm|1!G%iYZMrPlS*h-tP(-Ag?*r4qV`B#iQu8o^wI7IDU-{(gnF z87f1=wT!67M701O6o7(+2@oVqcwk6c&}yO6GDxQM=s3q8wu97=9Cs~bMaE%aG|izR z9xU*X-kz#?PYX4IDJnoy+=wJB;|bC`t4-*gvvPgL9uG3q z6gITjv^tShQEcX?C@cFhh~~+dIrBvqSg4SWFw4AESB)`qs{JR@NTy0qq8<`H)iYoM zbHyN4tSAJH9<$AJYNy3oQKv{dDIGHAusAzp=uD&RAvX39qUM(%QeTr;%}n1aF{w57 zsbDFJ+~KJpV=KC(O9sI?swMIEGLNov$lV-r#<&_<7W2UyNOd@>)Hyli1$Ufbh)efI z9U?3U0AbakVMyk0L#kGgC>ACSmQl`2Srn2v(T`J{Q!Ex0yTy;`^Obm6Xr#GDc4uU^ zwXJt;YrB@VXxnfgJRsYn`|h3f>6Ey&HMAn^Y^lhQXzfXR$FH|exwS;%Z%WwF?lbp$v{&34X&JQW8P=lEB0tFW zeyWVwwk$|tg&ByZtF&A6SV4-`YbLt4?*= zhkY zpNqNZ07F9|;TJG2Jiu!0i;Krx%F=ofCyFvLNhL5re(SkQc~gkMAcBJfhXGiFGRRcn zL$yd}8>b**CJmjVO3uGy8!o2~rjN9!+KZ(H68l+x>z9j{XtXOnfybY!@2C^lH5c9J zCo`EF955az#=?c;;NXD+1=L=p#9!sz*9@r++32Q)|@?)~{M^wNVK> zP21~Q-?lB2uo>;!{@&wH`_te4_468O&Cqc4wNWCmxmG>mYHdndbcTj3HDS?ROGPi- z-#?rLuTw4IEV9OEI+Wq#0DS zn3Upd@u_lWbZ5s~qThO1p_SFvp4AmuUENnV+ok$z)2vpFZrVNiX^+12#iA?i)!vA9 zZF60_d#k&*iEG#576rp{;5fA1nE@yiV1XK@v?>qTI_8o!Nm6`VYB7jPM=>ph?bJmT zFDKEQhULIuc5M*+8))G znUe)xE*7|mCo^@=Os6UWiPNddsE+C;Q%Pum1PaE5+0w!u?fCL-3UN?lb}dZqa*{px+x zq*`#n!UGEx6g&xAygP}X>iU$qiQ2GFR4QD&*4plEgasZnXt01n1PBk5kbLoB)itAv z{@i$BL5PfE=m-)Vxn`|6RwgCmkWHl`yDV6E@+dPe(Zo%?#{AN15!%V+^ikYG&y$-)W`qd}zbe}?Wt%2<{%ZU&7DS{12D@O;Oq*qBqUc1^_x4-hCM z;6VTmCgjo%yvbZLv^muRu|6zG8CdjrD^AU*!}A&Aqs5J?ZPOCbU}|BVEYH3~O>jX~ zpPPCPs1;QuQT>Q|p>EbG85KiA>0hL33PX~%w-3@9Xrwqtbj&^r6`d4CrBq#%`J7Qp zLgY(Eq1+SbM~jsMLFDwV1ZP_1dis>FTV$VS0;&hX}?35FU_<&{G>>oqaFu zcWJk~{EzJNYw86cJW!C};J^Wc1qC>$<1%eJN6Kh-i>uSTl=VpqF zZ;ZC)zGR1#mM5_|y->}Dh?$sIWLRWdX+=i)K!pja$Sw(TqGgde&+K&B$=k1V8|Q2*3dxEUk9yHA<0MYd*TgI|%^Ur(35_H24OB6oAg3IOD(@q%L-6X@L=nja+mL2`H;8A;2tvU#s-tKK=JI&*AQ%3QMZzjB%+27rn8Tg zKAbnGwLCJ!+Ap*LfLUgFZ@>JjqnLmB)K12Aj@n@LHEdx;D0Xlfp3@0)nbgYazCy*NKxgi_aLsSNDC+ef?^pTNgjyIf@P5;HLC2N=aGPpqiteT zlWGjZN|T0$J{QO$yu~2t^-@1{$uri!hX+R^JQb zemo6uPdML4iw))wU{?9}x)-akBNZ|;+g?=#^eM&3lMjg)8gM=F)K@cr8-k2*Xh$lN zw*P8kAMB=U`X~U`9U$sT3at51GfzCc*XQIR^kI!XKpi}j5s-`JqOgmUIu_vu5x~njFnUF)dg?BEQ`0J0G@wETXe3`~M%~@eD0&a~ z!D`vwQkWgoYgUD*QoJ=W`E*U(mIYqNvyKuXq?S%~))y|_N4)?*QbBD64k0RN_GZX| zs2fW$G|A0IU$>@5JOb02@nMjG4+)H25>o2|m#*eOgd1D zpM`QEvYI%xB~PmYsi&(Ve8Zuzqaz~gd7id^oFnc94Se)frp z-0EOz_bKIZn#H9%k}~vyofZg6zX4E>annQc!wL6GArDg;Z$bU$-mFfsmBsi$U(hs< z9dePZK^@ey5Cvq}OsvJG>}n8XAPxfo_=(3{DV?gUA?M`^=%6FdmtjeBm-gdZW25rr zU9CZMF>NJ#_(;DU0odM!K-Av*t6K5N6yi)<0;ndLjfol5kV!(log(GppoZ6zrTj7;oZIbv;8Ic6$d{P4kpIaJsKjRXSNd##@ z6iVr;^7%i4_qA^JZw`PBhG#|l-`lzx;|B6;qx~%Il4?voA*dJEWX0x8Ya&R< zCmCM@Ghv@^TVb-AG@yNHI%f@8W0DBjf|eLmUHrxZsL!N82t@ki6>mW|%VDLgEBB#4 zA(^jc9q+fIc_}@myI|@A{d{{sy&@fzi?uw{0X1P;V8mG=@x3>8GJ7C#x{MAY z{$KTbgBCIcOIx>{z#bmVgYJUx3R2N!(1GSeRaC~9ZK@f#gQVMz%o#9s!B zr;Q5M#B)EsM@xWmjE;+W-~$Eg$)}#v=_);~1P-tT;Eu-ytzd^^cutE#%uFE`aTv~X z=sHrp?k|76(}6A2Cx)()7FBy$LWu!!jvE~qR|NWB{8#|wQjBXh=c9oRvN;;C3hTWv z?k*Sy6pnjz<(VY#(xuO1YZoMpj__#=N=6}_Ba+A4kq~Ohd|#f~)YvX@Mft`uLHODp zy+Ih80LK|hP&qzNT^)dv09KzbMo6od^a1;UMUTP_nC8hm{*;*TAkvGd7$_%k)A%Ie z@b{Rp1kY*6^xWA|La^_*vLZY>b$XLrh>yxVQ}~Lokd$Zj@*p6^nbYz+!PjHn z`g7+OOB#!-X2yL=NgV6Cm`XQ!-@xpx*>~JY^>r8rZHgx(3BG}DNC))+A@b17Bk0qp z#+!Da{9q}qX&^dF_v!gsR}ShvSG7bIW$iMq^^Fw1we9HoTh#6dw|RHob06XJxCtBxa=i^$I3lSS5%YwP_*LQ3mBqB$n ziwB=Xgiklf8kOC6EA-l+9jI;(d;O2diJ?sIiX#Hlv^uMs3k%EE%Tk2Sk~?OOD6dhp zouW|yh3M+BO@!>$@jxz)#|^^C#d!y5459$$f7>G(QQd=?2*3duE*<+9E;2#Qbh^?@ zb}O0ygx~K-Uj#?AuA6*}>2_cqw-+~#Gm42xw|N)Tbq2#}w}5?^Zp(jvBzF^!)Vx#d zA~JZrKc*oVB+m_V0e3Em?x=lD#qDu`U1XxpKi}c`7sny-%+8*MP*gk2GLCAI6Q?i$ z(*BFJHq4%YTRM8DIHG)HRn38#fH&a=R=V@I4`3V7#b01wwy&?Asd@ln62+mqP!y_g zkUx;PHzy2WmOQgi_*I$UZv9mM;wv(`&QCA~B(ATYXF{YaS~dc=X=}mBW{O#*ZLpCB zcEqC)l+{o)2n0X~OI@nns$N0KcoB{G7b9s1_7*|{+ivnVX48_8AOK9pCLem8z8!0? z-a)$qK_5Ati~z%by3}ZQN}v@4#hkM>)Ci$EN1Zq+t->w94D`ixeMAn#1-ZEYpwzRo z!Ciq#H*kHirb;oEquC4ANcp^uh{Fe2Fpk@y?KUVqwQ%{#LP%UL=l(TWjw-H*>1}na zke!xQ0NLT7vexMZHj{#$32k3lUk)@*GL{%=2~r;At&lZSq(_G&TrNopSIN~Pmf+F? z9#O*gtem z$srXM8gVkD%GUOVboVV##f(<bLriD*_WI)}Bt}Dy%PZO8_T}UZi=bM9X(d>bo2;C2(VNMn@d?l5283;ys$c**d~P*Ba+=K zR#OTEwCVu;BbsrIXX0i5dE~m&$Ps27U$JR6cd~pQ zBt)g_AZ|uA7D|ocoQ*Z(G21@; zgBq=_(JlqqjiFRT4qMESgT|L9TBoo5#KNKADx7SXI4=jmj)`uqDsHagxd~p=E~r)OmdGJ zG_>3w&rok!)B@cwIQ|bbuGr1OP`>BloHM<2x;rGjnNWz~TMRzEu2y{B)9|=;DJ1q$ zhgJ25X^Sx`yt{8{11^p_#o*{DKWBxCVZMSb(?|Uc1&(iIL&jkTAIsvtmd`NVdS-9$ zC;CEOOGM+jx2`6(akE)MJpZW0R(#t?D(_-%N{7Uc3eKRzatAl+Nrx{1iT;>}`?Agn z`e`w0bORttd%c|e{{gi)1I$xltx1rvh??!TjfuH8FuOlE#rGn;1v4G ze7o-&Py^Y!Qu;fO5Cu;ZZt)=nP=gWyXc&QEcZ0D0K!C8uYw_xiyxv;Iq8ZAU7N~&r zw_WRQxVFwZqH*#co&zSjz^4QR#T0SVWLhj^16mweFJVs@T0FK`k>9WjEiO}9hjsNJ zNiheBL!+GaE}F#zI?arrfA6%t_z*~N3zG~8>CNAKy%vr@%z$vdu1P5g6D#V{>fNEn z1r{Ej7{KSJSGLu96Yw?$llJD3Or;A2c2&|7u=B0MITV ze$^&>9nhk@Lpo?Ew}VCHZ#OON4ohkH@%gc7MLe;ZL}%fmBsF3n#5`%N&l2aoIK@On za&CJ$rPGMv1-x-oivd8zBL%88Qd36sHf!f?XV<#ZI=X)CW(BHz)3(M%|3QPR5N-@r zwVW*%nu^`L;X^c4A9enu?*oi3hxNq~TrPSZBNX_=Ye_#?!{9r=N8A6zF6N$ft&QYV zdnGKr#4%5Ypl4R%Z1Ze$Al1{r(|kC&0r9ovdLLy9uRyMDYf~W7f<2;NNgCS2D?2FDNju4CeCbyyYQg#k7!569WNn z8#_oeI>wtF&s7R8$>v^GMM}Ya8hlv^KW#>O0K7lg>@VE+)riy@=2Boq(bafA2|{`> zERM(Cim<7#`0<}eCDuRhpt)0jkN(+W*m)-NhWpS()8RLp#UVg%{oyu8)xfeP<42*e{w-60NF`;< z$mN%2ck)Bj`4bp?gx9us0HL`xi2=3NiXXS$DQvGO=E@3dWm0V!pA2y}EZ?Uc9&fwL zU?|XQ>P$0i^yf8?bhN`JIwLNIlO zhZoA~Nh1haN@r3Tk!+R1as@7>De;IhQGFn2DR@;M9!^Vbb(uYmoKwOe6qq8d zuGxfQG@>-1&xfZ2o1hewc1{QIe{!8PiobUw&^7h}ghuH=eZ10Efgpxr4&NBD01y@E zZNrc;`pOY2PQc8Igs3f)fLJ(PTrK_xX(WkJ<5rBy8l|^BRR6sB((Ucd$* zf627LFAcII-s~ep8S5f@>0ZII3!KX#UcE62m2BC+N~-?^y+i>}esTZmQj@6M_%f4h zv-6b^>P>9KAzEGBpqL>QT)^n1PoNDByV2>m8IpNhzoN_=xiU|NrzkrWqxQx?^4ozI#w0L#WHlUVGo!eQo#M4N42k^$2Tn>IQ#xgk|S6Mzl z;~Jq=$P20rS-Lm6Pn25Xt%3+svnC($bjvtW#QZ7Q&zjz3eoed$3F@{% z(#a+_4K#oJ{X~Xn{^4!du6n@g0fcelS?~{5kuOFOyGgc@Zbv3V$HK>N|h z!(P*q6pthvYN~&nmiGh8&mzvQKy@sVb{@H(CZ#D96yxr2X*V7)LBvpOKF0ZGK2*X* zb-yyWCmS?+!M7mFa_c-K?^F4JB_)zMY$;y zH%&`k@*Jfd@B6KwT0&T3W=j!MJuHTFV=yq9`)CexeH4xqjJyzP+7%h_s;c=oQe<1) zIe?&LMz$uo2r;68EZ7a70pV<@Wq!q`vZn(ZU(Q5;6@ZvWOpy>RcI8>5ma+9WoUPOQ zdUb=|cqR5!@R`-au%j4>QZ3fE-|9ye@{>R%b5-~)e)uE&D7hpgG+SWTQ*7N0p|``| zSJIUhUd03{^?djraeGDDW-9;MZbr{IJfPq;Jkolg3)?8SaajV1AFPRfII%23CSKMi z9!T6Op+t>d|C$WgL6`Ik)V4HGqb^u0piZh<SrEb?66nglgHv9;Gv6*T&&H zn5h`WH)KQ*078%}9$;%jz@MUKqVDegBal%y$%kV=`Y9!!yu8YC-}3@keA)2Q3I%|> zzOB1{?)tIbzw<0qjfaMUVOTUEjkJJN8dd$H`*yt9f3w#ye?J5WkOG9kaWHTs9N3sI z8I$Bmlw(bmyOc%d4lTb0T_!!ua?L?l-{K~8n`^=(yZ+wVlOEsPsw}Ono-eFpSQpi3 zRAV8ho$XK)MP-3oW`OVtWYDVOI6-QPs4lxqa0^fv}R zeyX1&1E#df{<=>EOo?JuXmvUP)B9;e&!CqXE9@{KvA^^_8(?}N$sk6EtL}ie*%Xw; zg{M96Qw;SRF7C54bO$sji_b-JW-LX>nGv1LkBlDi__NH|Vy$8(YfU8%kuF9h9+J`1 z+<%oeC6aMcOk||(CmhnVy>2t2V^t&S%BS=gZ&Tyww1s5& z6d6y@Fgtzv4O~wEqN-FDpa(e2d-)=7hajlmlT0#6d7_*_PysYuanUSRh8_SA4(#!a z*cF5COKGA)TVilqr40~-EQXgB8m%cz43uD^#Y87a+4t@}7L<3*@Cu*>8D*5QCYktv zU7l-HXO$RgSmrK~&qUIT!TVf7DhtCJyTv~yhQ8hCjIPqeFvOD<7ojnwCQTU>1BV|G zeKA-lh7HB=m5U{$*|*3|DNjZQ%`;7`x%?QKG>g+m4%H|^9>@5WW^p-EH@0KQ_!^pJ zbkt|OWY8Fn&nb?-p}#T<5LEW5jENB(zi2b&!uVp{d+j25rW*{q9KBSfgp?o zDT>6L#X;Laku^h_g3y1OHL21RXc{>R6!%JWDN(#rXFKWAQ>9Byo-jn=6%$b@GRUsd zst^REh*a%~@aL%U$F5LX2&8Sv7Vjd`Oh5GK7aE`g-DP6R6>$`BuC&bt$VBG604LAY z6cR$6Zq6abkECd{*b^8Mgl8gF`T|)U$LAnfyW~NLziY!lKTf2^7yJ<=#UXWOXcMV~ zONYDxqCsP7nI|BpUrby4mf;f+5oqAn$y^GE0I~PRiEipro)r6qiL-Ipe{E~VEow9OYJhYXd= zp;;{LO{kNB*H@GS=F)<#r|qYK?JeDuNg5(U6NSllZA+KTZK==RvrUrJYTHTUNcw-+Did@bkkvWNebw1n(BW`wJccu?ch~b44M(&u-)5uqmZ->XMp{);^@fy~lm0cqtTw!Vah0r1a$72pNGT#TRh= zmZF(aT6)uZ$QMmZOP`ibm)xMoq%sV|+`zskh2syB798hQYs;PoT{NTU(kR|(+u6r# z6D1He3`O!ldL4^93Cu@B%YxQgYN;lMXV=n4yd{Q`cop7st|2jqN}ET6sR7U8D4bpZ_Cg0P&q7QjQ)>3dI$^YUQRFsb)sB7tS6M77# zf}jS#iSignWpBh3-!UWsqsr5kT9Qe<{SxNpJZ-LYm8ZWs9iev|xeOJxTYC9EZ|SoB zU64ad8;TUww{+Q->tfT5B9G2Hj@+B|!KJ%(G6$iF<#0I|YN05_rCBz2`TldI@j~6I_8CBMq(X%neGNbm^%s zU8;P#6d7TlR$Y3xkPI?R+9@jK+*ho$7>SmrgT?$)dK}faJ*FIQmu^XD+}sr5VTk^? z(zJQTCYcXa$`Zk_r0jPok}-HGj4tgTop@=K?6sVamqLdzF1pt1DN|3MUMtyoDI^uy zElE6$UfMFF!bIw&JbUbqpOyR5Sf^l9meX9MU`Q3F!A%t6CLv`ehCiGqNfLeUCp^*@ z(^{i&P8F6mTVm?((w3POrVB6S8C;5S6iB@UuhZR9l+&}MV3sPJZZp)))45AEg9{Z= z()1V8A8CuFwvd>rXi&emAfz}bisr>;@-t0& z3K!IjDaNGT2tE;mek!??Wn2_Bm1Ymwzl3Tv34VGJ2c;sUVPYL5G96+xWGFmUWJm-{ zcj8CAX{~2)`kdS2=r-z<#HH#eF*A%yqev1XO9gzJ`*G=x+-nO*$C3zy3FUck^5oKI zpj=v{G1yMU6httn&?wTGvquXLiqDPDgTOg$xXFWxq61H;mK)p@RqWmb(lU`xL~~xq zA*8r68|1FDGA2#PPhT#2mVjfl1cU*9D5jA-2e+j?`8soEQWh(+g1ne(ZYfl(Q#)*Q zw^T7yNMc}DyTHhw_EIo#XQ&N`kS}|H?ZPQ$Y6+Va*<9CB_7b*x88J=^f{2gln;c{w zi*^&qgA;P;-bMz&vBH&STgqO8?S2ntVgZ|qW;~rWm-+&@Kx~~`($XIlnx4HJ(50## z^)yy=DNKI~N*0ADE^oS2Gthi@#DEDxx9TYfDZatbn}RE;Vh!76;Bcx-Z-&^`MZv*$ zck>Fkbt5a>5|<=ymG-*yv-Xs`sNkQbzVV51qBv3)Jcf2DWZCB-w-!Emd74hs?r3nk z;jMh5Xl>+OCz@O zflt7+j9kRD22^f=M^Rn+P#r1w#JW^u>0Blr<0V{Bx1zx=-ND)wm(nZ+eatQ`m;UX0 zl(Su$OX}w?MOiWAE(f5?2Ec^UlY5&Rs+{xz80FO_@X~deA#;`CrQ&LFU*8-9rR=U@ zyp&;b%Oc8D2ei`DY4qvKA40k=FYRnzif+Ov@a|2W3x*0lH zVL_+SS{cJP8nY-8N&PIGfF*}OlSodV69~lAPwn6n9dSThXq!&^F-JTO;_SJn<3=%q zL0p^%apb*;!Hk0EH5=u>M%PDIXf=}O+{Te9gtq?SJ@PV z#;!|Q%vUM%ZbRta!WO+q@itOQ@Y$utUa=$;rwx=jSS-P7?2Yx$X6323X$Q(u4)dk9xI!<+z@<=IOkvp6^h{sc_<4px^`)hWed)}geQDBr zUy3u~%G;kIO>rW>6m#a6%1^%(MeCQUzXKFKt$wc@jSQ&-E{Ds>2L~k46cdnKd^Q0+<@Ix9me{8&HH&tv4KC`osWJ z%l%^tOmp;aguws`f{+gxfQbYf$Y?%L(i)=T6XhlCOp@#IbvzFM0r&v%0Pz49x0|vm z`;-~^3_w8sv!Br?M#ZDn@2VG{u6oCPV1d&J^CR&k;{D6wCB1|Jt zuPn73sq8trBHw*tLbVzR)kLV|xR7lXve|@eLUuQoA(HPva{MPUoz0N1Na#u)V%?DR z{gCuS{)gu8(EQ{$`T80%>vJnGHk zh3Ys@IgQi>6rj>^ziLW9yeeFk)xECza$TZhAE@TG?(e^Ae~y>OM8|2m?p(p`TC06W zIG=Vjsbg82qT<-`5zea%&a7oK7yW=mN02hYlqSZ5u`Lm)^aw?aP%=*P<*lSikCmtx z!Hi&J(G}jXOn4!ggcPBK6~-fwaENiTSTkff{vnODS^m&+7Cm%40u4*n2^oPX0*PWJ zW+hg^$IM=n)g!b}_6TDErWviRX>EjR+p7+F595|LPE|89vo0RhR9DH!n#*Ktv1W4v z0=QajwSu(#kzTHKdxZLx{cKlu`zpd+1X3f&w5>WDx!9(GsML7T*+;uYEtj<-lp4X*DZ^EjaXJ<^QhbCG!A|0iO9Xr3 zopRhJLP>Edick?G9^oiHRYvkz5vZyn+|<<*xQ=?H>xg9m2Y4fPK6F#(r5A8u0*;GC zI59HU@veJrpX-*+Rpsn{`rjwL-M)R-0NJp_#v>TjQbv=mY7y*LU&eiRA}dQH`dPID8F+srm=9Ka@G$6L0xw$^5=>A#J)Ev?p8ZH4bsjj3FA62<->)84c*b*CuKKK*Bz`#I*;(#z127-N4^otM`HXy~f{an>4I@n8c^Ifac>JRayADO`EZ2sNIB<|IDb&7*>WD30+BO7F$ zZo%o#?miHgqxUWjDy}L@!jw_kHOw)&1+|2#mqAA&8l7|6JC(>=41&0n#2icTI}&(f zV~HLlp;viRyJuuZC#FbE4PYjr*UA9CiV)DolOJ8NI1-uREfw+UzkpCM`wY4Acy?pO z4N+w3PH-WRg%idg?iays9y%3mR>R*Dof@m?v|09;Z*kunLq*bO8J&KQMFRUDomOCr zrn ze$(lDo}P3Dyq?IY(;qgXqt7Ov!c=Jk(&?~!Fq4x`f3CE2+SJ;8k8sl|b5xh1kY!xG zOoz)6oeuQ4i%x&4badLJnp=+qgX3owyCSq@J$u>H{S0nN`CwBVbZP~EL#N?UiF@Ri zefS9RNElhScP=`O;W>5i4`6c7>7EwJ&*=-tS832`U*PGmw>FE(s+e~Vhxk+`;hfeN zvvW@QjI?w5WSMaWQ7yBVO>6ksk$leB@vd}b2Vps;}j+%rz1kCLpx3%+XEwdkJAwOINi7i z3Q!=xWxRo$K2W;7pbR+Ug)E!%#7(YHa8j0=QJen#ycw?9cw08WCp} zadBwo>f&@a*UbmdhPaQn4JM{Q7|Q9`INic=>ab(Wm?%!MpAl3=D^3xF)-7!Tsa#Bg z1a(=Es%dh&Th~sgP%=(C?Bb!0Z2h6lwhD?OP8~>`CZVq2ku2=1;1PiubobLFJ3E?*R(>b58+M4fx zPa)R<7a)@*)x~1~Ite-gPLUHhHThW7I?{RyecyCzXml_!#=-cG`N%-%b24Tt=0XRB zjS0b_vMwLFuJQ(L=1gnxJ3u^XPkmEp_f73Side<-rqPs7iK{ow9|8@`%FE^qDiclP z+AJUCdcHcsoCRBGKFt=J^4A_$)_BYROcsIe-IbjI0F<*}x)`4y#+fARln;P*r zF=BI5=x2P_=};DB(z^q;)?7~YexskPQn(F|J=a$aPIE-JBHk3oBDyI?=mMS5Q}!+b z<#baSZ*@~@F$~D;qUaj7^4Y!URuy+vNqUKoo02$m)+jgKyreUTFJiW)+k(waR&_qT6Ov`z4k{utT&>3)?ou zdPL$+mjZWr+b`gEUMXL8F1O;R0$-SvY)Y^MoC=gp*9PfbN#l}DgLt233T_Or+#uSm zWpxZ%^C;mK-{Lu4H-0akyV>-yycY?0Hhp{Di{XkkeU7mE4ba1JGf&IY8UiNoy4`-- zTgpu=JWmFkh^O{6h+D+%Ho2bh9#BA}FEy(}aQz~ZCgS|7RPM*7^~h6(C7bp+S=qG7G(uEV zphKI%Jm%Y-?GX!cB(AH&rZ^XaRBT#L2FG*1v1vL5#vZXN%5hstHl-waVolhxM(ah6 zh0(bB?g5VDWU#lv(Q~8kH61Gh#d%_zeYBnKWt#K|d?B0fwkS1#pIXHmNt>MqBP4ek@>`_5c-!JS%okHG6amDF151i5>uWm{bCBEX{B{#f zyDFM~34un&rU|cmMGQz5D~OhlPye3O9-F7TZ4gBw2$xW_bIWT&kZ5CQcVRX@2)@v? zb9;R0Glr%QIT1U$L(@X+q3Pzq%T9&mE4-Fyv%`U&qYgonx)!>uYIwRz}>JV#Tx zKNdf2KAM&sWbtIAX+{@_CnelUu0v_c!Wh5!3MvtO06BmG3n`1*2&SpYlWAI`G09O) zQ+P&OM@RLWrldJdU8;7BDH8}P$J2E0_khWtraU2qnr`|hEUoiIO<6`wT@FzrCJ_~j zpgC%aNNUP1CqsIsrd04hH68A&T&+|P)f9cIsiP{WIZ4%YWYu&@4<^;Drd0BAH66~Y zC9T}OnzF%~TB!;ridW=gO-mv&nKf(L=R~G84Q*Ljd9c>BpKfcqWxBWnS)~bZO%Y@c zgdvT&rU8v@LcU$opFaZ($OgQoeA{p1fW`5q=WKS6vDftS{Qqq8eNF2FBmtW~Ii4}t zVAF$q4HN;MtvEpsn{F%fmsEmFGCG1gicM`s(%SaU_DU1!ZEV`LJke_GMxY%@$fmc( z$d^Rq5R+tF;(&uF9~gnYro;?FwO1_Np@ZluHSh-e5i2;C%7Js;)|8fX+xlJ+$+{YN z!(X_j=Qp>|il?|6F^I#cCKzN_Q}Sor$XVUNa@2DqY`ubOzHZW{*(^pgZ@knRwLpHuaAyk zT`{mM1DcZi2N^Y)zmqhp2*8*YLeqnDPz4K3FEB->2jap&0=71sljM!wAE_d z`QE-RJj-UrIK5&Rww`Z+bVkthUV7o(plLMqpy{9tj8=@H*P#Q+EdhGQ2(xJ0p()OM zn~0{7BdKVLH3sOa*=bo^T)5C^s+fFK#04N7&y$R$iBtvNxtQ7DNSa1nm^5V&q)BO7 z2WUPlP4T-lRq^pe|<6_f_A=_WiK3u85tcTsHKn0DxifV63s(e$&?w3|mvJ!p1{1n|S7phnw|%_%(t zl_>%i?HI5^LO_u)|FX#0p6OM`XZln=Q|akUlb@+EXg^bk>3^nBgHcIOK3g8P z61-(n0(DEcBVzPEXqtnTQD}O^M46UHK_fUj5Hw4+5CzS zjMteCZD-m5b<-?r0z&m|VZ{!saT;(BYpG&-Pf{aN_94%d!^7M`i8Ey+?d4^|yiU9_PfPA8DiaWegSGM(|+(Gy(|YTExkiNz4gQ{ts;$xm)4D)l6puCA49cY%2L zI2<4vDw)nP!O@|$AxWh==DERSn(Gg9s5m3mPm79P0Pw{=nL5Rwb0Nx9Sd23JO_>%x z55J|?X}D#FCB!c2qrz$lIz$p2)9*E1Wg6EUSEfuyrxUcwGIb2@BIdo-TBcz!w@mk| zBRiYIn4shk%B{TiL75?a_uD4kC<4$gQ|Lm?v@+Q`DE+NWS3}8(RM;A{L>_x55a-ck zn2}GK#<9cGIzMaL{bVQ87LS09h(96I;uW<~zE-Ql^FpT9Bv6`xlfnBTQ>Gv?6*lQE z#w$w=n;*I}a4L-@m*hR7QN3jQ*=gZDGCjs@NHT?$?`+O9%TH}G#XXM16hwAshLLs_ zQ_2)b*_S!y`@B14N?!1@;5M>2Z)DfE5FZX|{wjzB7r+ z*9SwdYK7sL4%Qgvn3i(X-crXTSQ^^i-~`_Rc`P38rTtZD_qAqJL5$Cs5|VCUO=B7* zop1uvj)}_ht~H_!!W219bGuhsD&8(S8`Hm_jj1^MwZc{qsl^*p4SH!%AIFq}#x!s@ z#GML)oO%_M5(HW_(lL!iB%#E(1DIGjGRDu;s}3 zz2IDg)rDV`a&g#V`v0>oH__`RSYt;BJ5Vq~M45#XH%yn3fzoW9%-qoFYk`9M1;MNt z_Lx00-pj9svC6XVd0~nht2UuD7^V)5=c}8|Ful7IO;L;VUet92E<^0K|#^^q_ z-AYh9`6*qro@>AhWsK0*t;J~E^}!2O&yUfQ;>DXiPc8*Td%(aK=9=_u|59e+GpasK zXhYRbi0A~u7y!kzrnz?33+3Um*lJZYi__?p5 zC<7qZU7FnIa@MOSDj^xX!~p7pU=SccYa`g`&LM$y5p+=!JZZYbDA<6{ExEGyA&h%# z>E(g@00#m403=39>Zz+Y^@(e}=~=9-k|H1ZEwV}IToFU$||qN$eH!#cH-v5 zpp)C&UUL(pYqMR66T^yD%iSe+a<|Rm&Q`Xmv)MKuuO@~VQ5DrtQ4tlrB_vLa5>=7a zlv(l0$gF$^t5ngaqR%qdCi9Id8!j`s$xY0JbaJtr7$r9s8@odtrgJmTnEBut*OQY$z)oK=3vp5lhgk&YN)9!!O{JQ<7l^JPPv^n$H(B{km zSGLw@T$e|`)>Lc)UFp*^uzj)B4Zz8~XD z^xL4NU;3r#x5MZ)y+=p&R=s`c_qEaQOTX`XCU^a9u`cz}MZYxt(sYs0FKJ>-0t5*l zC>$Id2p|9e3l%^fQdQF6j_MA{ z5KX2gX3{P0d_-qrMP^lWMPyZEJK1@`0Yf)2cvrEyiuvm05+_EL%P(?Ch@>tFeI^t8 zEY23g{GX`EMD`(ZLSjgnS{C+b0Rh(HPG@mqOo>6&IbKO-M$S0mE}xkbM{_luJKCJ- zERF#+r|Fk|=_C7m{h7XcZ&yE9>9=f%SB#e!G=l+N-@SC*H59;>eq96w8XEdVP&lA~ zemNQ#%;a#Qd+Qyf%AaTlGyU@Pd!`NX()24o`ladP>Gy3<5Am|F~p8i zL%+hsw+Ir@LbGE7`g|pZeROl=d&Q79FxbrL2GBuRVTHZF^gHaaj{YJ(j4F6vAm_@JBF%moL>569n}fFVziFzY&sX2iHu%! z>koZ%BfWR~Z^kt&`Y*@#;_od&LJ_m-h*d5)xz_zu}{2)I4p{4r_di}I^I#G?PRy7ZyIX#_2OnSj084%DE(cRRssx+fF zU{zm2mh*cgxI8=Ns_TSj%)w-I=}jO!qC0V@s;r3GY}qGY28pU7TB3C*p{i1w>Uys$x<6Hu0ie|m)tc5e=~T7a6J3F8zRA!W(%G20gVEGv zOV!s)C8U+AbvmT!TTn=&RL!h`rR==I%ieSD(%KRgP{N?NeLS)1v5K?(Kc8~}6aux_ zw~=>5v9(k1ossx)Nzvl;vFwn?YB6+ZFDX<)R3mE*uk|UK)}gA0)rEzsBLsZ5nW*vB z3|^SPgi=J?0>1$&7~dsC_4jy}<_a)oZuQPDNss#B&~7=aO~E^Cs6Hnw zDOVxY#;PWuFZN(Y4c99%RnXEhpmhX67>0b zYN!_imAr7RgWv%G*+wNwkQDX-8zzME-*Bo+Sg3yTL8@R%s#J%KMo6esv8=(#DO7)f zu4zmJvhOO)AMRWAx$z5Wr9S0Tlu<@IggZmg~a4&paKRL zjzdj{xOGtd=Fl6I8>;zW=#Xh-0*R(!xlOKt7PbMJN)oGs9~L~-4x!GRwTsC6GU$K{ zRcbExL~`4!`~z1iVWLnK&Q8(j5Tw$!f(NQ~Y8Ex|5r&MM58~Mg@t|tADz_`3W^yec z1b7mC$rsDwx7bGfU{<4ExvB|=Z*CKd_STi@*h0AHFs7$q)<$$rQ2my=FNm`Bkj>lx zzymrECHrU!Pi!Bn8r6z=$(pzpu{u7>Ya}PAJ`QzN7=;!LhMWkhUszRI`CP#OW0ilT zOKZP;%0&h$K|q!F$)}?4HBkND4P)eVg-?AkP<&WtF@#p|?;apll}?aVDz4gbFEf6N zl9Y9xCuhHRG0HpKPL?PdQRhN61{9S?oDmw zxkdirQLgx5m8T=dGy&D`UCS1Xp&n9;28s>QuDI{732UdW%MLYSDS~H=sdW?w+Z|*%D>?HT-um6)R1?1FqM6GlZ(3vS@FsDT<tLI&VB`{WO4Q5&dP;qyC+IwQGbmloA1zEV+xU(Dmc(oRHK$I$`j!x zgGl4^NMc68_`zLRHzl5h-q5F}Q7o?PU==g9twkRp)DUensvCa1OjWf>_E`r-v}q8| zGWGV&pt{8p0|0U@KyI}p5JgIj!z!rclc{rTR1T(ZM+lft{j*5I)_`clnNbuueP0?G z0EK)sC_$n~f$W|$H=|v3jes$ggFKBo$r;B~(pvHFsiR~xmMrfC6ydh1VT0A)?J(R^ ze+PcZH4MdO^D7=UQ$`wd5wVg(=2^l5Fe02nwM0br(iqIBhM(-Oh zU=;y=zSNGQ1C=~M+(D3H?10Tiq!`QYcvF~ZE*%$aSzwo>r(PA3G{<^Nk3ug69=!ck z$tpymYk{L-ir47~fZ&ykWDZ2IF16M9Z>yn5WznU=7u$|78zl)IzXR+i0@n66tVZ8z zN9Y*8!v_7(3S>?x>MP^dI|H-om+xc??Awf z!>3J}JoP{`cm+OS1;rL#8L1scf|_eJq!b0pH+K_Yg$iWq{Y9(A$&|{n-fP&Y&*a3o zL0wE(D%p^bn;4djpE(tKWM5+@3wG5EqmszRGi8rRy-++j>1!aI$~>; zz^qh-OMDjV_ZkO5;$-&@zzAte z)(Y`XeZ*WupTJV;v&TggRs(UO12rSFlIl$eJ@W?O0`#f9XH0gn^ZIXy0zx*_ zv`E}0z>&KBa21n|#CeD8*~l-mXt=elNZsqO90x`JJk@49bwvrj;ujD-Nqvn1Vh^bi zWQ@aSP%*#O;-*ZcMMSp=BBX9#T)|`^ao#<9a`Fc)3b<7hNZsqWT!|V4`@h!gpCXh5 z=49gH$jyj(xU8s*#2GvIPoaNv-BV|{cRxN*(`0(KQ^y}dw?~xmwLLGckh~zOJSx$E zfv}jv41twLD;`5oc>9iuY-dztX3MVQ7~8xtErZWGZ2k4TGw@;yi}J$3fRBt9+0i=z z8(@?dh)|;#HSMlbKbllr!}-xHa`*+M^&TAf7-XYLIT|^N@hV?nEo>QyVIW9Wr+%LE zZE035ErF-1H<-H34_OzT=RXv76O7#MJH((G-zBJ|TvW&p=oj6bS=kqD#AOi8FB}$b z=3|(#tD{qkg6$#|=^+7`?4oKmXWE6+*pzkg=JXTQ>Y`!6hCY5#yZ<@$3p#U9OEYv4 za3fuLk&%=}OW4}8Qkec0FX_;kNkzp(?V|p77MiIik147HcUN4Z{(NhHUINpkKL0cX zrO2qMu&@Rcnk*dC2KxPxLMB^F{N|lfsJ?%y8Rpa-_VzWMo)bpplK;dNV#*7P04OoHTVjzcv|8WJlZqunYeP$CkRf2t zn--EG>bYQnZ5!kjO`0H7Y)I)ZatqkGQSsyuLr(o}`&0%mP+kEjbwOe0Ld|ahYBpH7LbNqG-nkNn zBW2MN$|rjll{a-VVQdmbUN0XK_=A}!ixZSL_2^{_w)J$P#!72Qn>Dh{00EWVGh?6R z;#AIl4TQ*Lc(2_ue-8UB7%&q1(4dg3%LUN@f%^UooamfPn2^Q!m??{}ZKycm2E#af zNLh1zD>JfjM)IEv`nXh%hGM*`LnYgIWO4jxvv?jdm&>7oghT#vl2Eip(ys}y@yB}E zufFAF7HhWDoQ)q6q>vAxC1CYdJ&(l-?x}sPaZM!(h~(+ftDE{6Nm)|KycJKFb0!t) z&DY45g~ta;&AB$r4w6NrVokya0y%E#TWQcme}o1H&ushDmL8iEVad788;p<)!Vyvt zx3EM{Z^douhn&EcPHEaJUa*zL6tTl_WAU+>h<|DaXX@pRixHs3o zgcMAC9}fajdZ-}^bwH!q)Q_lS>ulY5`o;$-oOPa%<=ykQ{< zpZZuhdrt(?n%@H_mGjHPsLqV*#dxrNooR&_>y)`svrey;bkb-)2uR;~jLu1)GQ z^$mRgPEFjXML2)A7gtl5MuL%q(*|{5;c5ECAgcfX%{z|(8wyhIJ)t&G3#BR{=<9ub zrhXK();w)E;1p~MIK`*{r=|(u6zKn^5(0>afBQEDeMrn@qs9GA;|C6?%o1ytu&)q* z(~I9Seq55>)PB=NK@+%yw$<5j4!9)b61I&cF~2FH73I+1$pJ127YzqkW~Umy5e){M z3Y`QP0dNMC+3ecUovMh~ETT$(Bh~gu2shhP!za!&03l zJ*-8p|IS)@r*ROy)2DLpo!0jPL=ZviFgWRc59w6r6JwZkKE_YfppNb}-J2h(5ox;n~?sNtfzx|}b_NYz; zquMRrY4qB;NbR%}@47i#0p|}~A`a%wE(ISI)Km=0or5d+s!b3%Xf!p1RYvWgeJC z!rf`gQ~1lGv}9`!eAh+;v*_*gx$;)ixpn4UwE$gx$4*7(xoB%w2~|%fJ0+C;@h`6| zf71lqvCJ;-0okdCoG&~2(Hi@V zGdyt_9+X)olK?(*wo`J+wo|7%-cI|{*UTlWwy^)C&Aqg8{6E-K{L^DNTH$T(`8v%P zpg#~d6lH%oVSmS#t)8KEx-ad}gy^pnZ(>Z?;nr!!0vg|bq9)s5snqa10cAdgl%C73^uk^j72 zvz-p+Im8)j3Zo`?b|G&(pk=}InMmU}WuxKxm~mtqR3E;rUR zl&=9f30Xwc60W)9T%$_$(t~1BILNZr~jWfZenV5?>raq8~dp?`}VKDm^?7Z$SR!V|xEuooljF zL(PKO#OzeXguxWrsSPOgq9sPK!vX)B;^r0SGIna~Ve_1AooZ(DtRS9%VLBlJ_H*`UgSX`w!bG*Q z^D~ka4kuDN&22O{SvnPuXkMcO2eh03dVZh|8ej_`1~^mu*t2mS|K0e4oP0OJ7a0Mh^7|Lp9u_u(!{kMHZV4<)_RdW~wQ4hN<8Fga%WohAkwdcQ`UbaYgG zsz;Ua@4XZI?89FD07#Xpk;>|Tgt_bMTW+ZODm5TsdoUYp=)r`F1|+n`s#ct_s<{CP zM#E51f1bS~7@+C1e;ppC5cc<<{k}=Pg9PC}`|QK;g^-{B7vhFHNWFuEk>01&?6VcS z&tZ6W_OW}MO`|qknDv7exA*FbVBk7e2)W^YH~Z{%!-YP3?Hd}>nD!t>Sro|__7KkA zXQx@8y=W8*vMB3A4fmK;)Qs7iXV+b{fS>x(w8>v(G>eXc!2XeJDA*cG;cmLJR|_ z2f@J5VQAeDe<5<^V#h&7#7J0sMxo15=nRrH2tWWQog*Z4v-e{snY|BVV^hD3hKKtym&!QQ_Aok!K+1x};A!tZ9g(!+nAqFZ0Q6Yq)(C4mqqd4lZ%!rJ{ zAkpI)874@gq%vnbGhzRP?jaL1o&C0fZlD^72D*Wsz0d1-pMCbYAIv@jRS8R^oOz^{7Q}?rstysViYG^m`1A8^^P9urT&Q zkcFTPR){jO+1c5{EY3Q6k!2NBJ*LL)G}n5LefHUd7{wl)kQ!);n;teX)Kn_s5r>M{ zkJx9Qz3y>^R3E9}kd%Z-kAY-*Y#0(XM&OW;hbwl--3zn`fRMhEVLIRtf|FVK|7!C=TPOE-OAwgH^;c zEB-`ZxC1-m(3ZHL8(K)$ZpQTb13^E5HCX+DU@MpF^adj&FJd0!k6r&3(X>!i2Y#`A z78Kt`=xWPj;v4@15DU7xQ^KP@yNiN$ahSq#&~gov0f1RU;^HctC91>z7&oa9O?Fv`k!i4#ep*m|g#MjWJAvMpImxsdY?*l` zu}iu{+joy`X^d6DT!{i2%(B5F8E=>vvmz(e;~2Q-zE=<*(=_J#nH})9t6m`T^9rTV zxFKA~`R378`Sm>x-tqm?@!)9eK__qwI7>Nc56rVy0$i^I&4MFjnDn1DS+4Jv&lkJG zo^aL2m5&(#cxeyGhJ(DO0+d%W1J>GuVoew$TonS1womS~jn4h~3f^Tk6VZNH#}&HN zs?m^SnE*c&J%;C(68Zp5n<1L|p|z^zQ_$eQXP?jTysQTmbTh8C-SBAk?+!uj-^xT^ z`W1?#3s^MW3?L*J2)ExOna=-uSW3)M)3EPL=>2EaIGO%CuzoGHZ6va+swr|UrVJpi z`#)r-Va9!8kIXNeJ?t7*)m%WUMP*tcV?M1Q2V~mrwpzu7wXWC2P_cNo-aR`DSDa^s z>(l8Y`fA=^!AW~1i}_SAw(`KRGOxJDsXWUAp(UMFDUqL^B&<_-qUw!02FqVqTF_T@ zdr50Bwsb9`IkL}H6)`{Ijyk79uC+`q(8=>(rNsfX1?#Ogx?jv6WwqGqhR$4IJ7T@-KBu8`%(Fb;gA)ek0+n31R5fF& zFtW;36viiwH`D-tHYG=QsML=P>V>7MWIlo*ZSAl=C_CXgF8e$(j;*?QiNaz4@;plTJ^cdvU@q2=kZYZp z72H*`i2f9&nkvg6Jo<}!Sb22yNYJ3la&*~VjS`0?-WH}Rh6JT^Eq??c)?p);YWxv6 z%mNjUQNoCI^q_DWV8hCSPcZ;@K#0F@)!LxKsx=USJ8k7ItBD`i-AmJmXTuHY;tour zE=-A%*GE|F4;`3+^b06e=Gk4sT@QnL8|rg53!Z5eQuug~I%!xFOCRa=k7IcnW9g;% ziGAd?mQ>eLn&Y-Al5q& zZE%P%HXhGvx+X0a(}e*@L!J{NEN4e`TP;cz`>3v~1ryf)AuUiqUp38(Iko zmBi3W3rs7tGNu^NNuw&p8-?@Rme!ytU>tA)2y@rI1*{r`0`cc{i#LUYbMXE8^ts7QwK?&8c?^P zoivEZ&G!1z>wT*<7MBynq>7eWQqtXapbF`nR>5#Fy6gapltlk>3g zsgpD-5oR^CosMQrclOkRk{Gs2z{agL3i)|T`~i;u=&5Dsqa}ifhlXOhWN7rKJK3m= zD`V0v(q&~Ar43zm4(O?QDD=@MCs$)u~dcP7Qd3aT%)6bgBF<7COcOX!62z(>8{7#}5&tTk|;r)G=-X)9lRbTj~{Y>5mq zd1<^Hh&D5s#elJl$#R|}oGM;8&kI#oIa{b4lN6Z*VO3QKr%FPsbddGX9G9~ z(pv{|;^V92n5>Hi4CL)U??mf{mZ|q zs`xUXk6vh{;xjmPm->k{azclQN+h=bZqAg{=Ahnduy)F$a7Q@HCsAq6%`x_ZvgWU( z-6xEIfC47z0qYwKL#+g48Ue4%q@uUfVHzpFC`igS)wDX>ecOgB53sUh8+Cy3o`FR! zjzh-E37@6%ltrfAZ-oF*noL`wH0BY?PH>|9gQ()sHqx*&HF|15FmMEcrG7#d&7t0} z8pl1&o~2eaJB&iUUqBJpMj{Q{YUIzcQY&DQBm#~=ezmiJl29Ce)qXU@(WD>V&;|)G zdJ@zh%*1omL^h~W<*|Ng8dz-5v1*(oH(_6eQC{|x3ZkjzzQkA|I`1kUogzdY0{rTU zoZ?(Ncq1S%cd{=wmP7mjTEz!thQv^v%LP;oi(#QH@11t zBIfyelt8}mBCyY`tfqO8hkT=*Q~TC(Z=W?peXjt?7T=-17>T9*8FknV~%Uyz{kg<&adGLRKziUHvaUYpkG% zK;Hy@RaXt1lh)3~GqEVz_f|^VFmmqh<)0vZxy&~9`xq1Fp+rIMFWng60OsDS}Je#^6Mw+ zBT96l($L*tUA!@6ThPjufglWmleoEC57aD{+c< zYW5!@^~z$4j>mmc86@Cw!!6}@lA)v3B>~k6Ypzr{3F>ZxDG}WfZBjdPH`V$OsVt}I zY8qRst&I{oS51vQ*O!RITUbos4y#sSQ*(VdH3?ySS6fArHHhWQFLEU$S4R}romQJl z(Ho|3iZZ@N#y!%sy51SjIu~V>1o5$}EwJRHRd}g&5)Dd1#Kg7cV)SN|w`4AjhaS*5MsJI>mVJcTD9#NN;%;oAG9n0gWA^4jbsZ=#zh55E( zp(1Er!(O=&2I{fJOR7{V=2>2&g43nfQ~Z5I3q$t%06UUXeymzpC~XV3Nj+A3T&AT_ zF-!B_UJ^9}*545Lg2RgbwR4K87g`>+CP_Kc}r*r6@3RJmPaRHjgi z!Hhd%rD&*lboy9505n_M@K-h6?#d3*tT8Ij8l%b|6YWOvAMVJQ5Lg+HB3KWEZpi!I zdDZWxi%R7uqingk=YtQu7We1;2Fb{Qsw~JQUDS>q`R%Gr@4Ne340HwwZ}9*{m>3d` zuq$=97L`^sH>NA6NVF+QQRvI^R81$vx4!5X*xooLW#_J-+TU#nBNTPd8 z2mr!_3k;y84Yr+-q~^V=QX>+^7*t$|_@=@+t-H`q#6d(aZd%0vC=R%|zP^3!1)*s~ z@Ce(t_MyBrS4sYLb)QhX(iAp+4ky+YyCSm}XH@m?a9Sah^zvsE>vqfnkP$Q(Zyb;EXsjv3euNfS)~mk$P!C zC?*{hQWBcxV~V(-rf#TmS$70aO4HH2wbyN&~zC05CZ;H#1~5 z5dh6bzCN`%^nTJfK`XDH+6wrSMjINr^;7GEt3PR^015z9qrm(=fCT{6Qc!?WRKR@! zbO5N9!o&gTL(~A0sR5;Nx&ulh6#XLwrFsMc1OfmsGBq?cGBFGQkJ{>~^})5jG*SQs z@TpONel@@Xd}=9>zcd!G-KUK~%7CIID*!yL0ZY-u2AD?3`rmzhn%f}600004KmgME zzC5+L_b-i;E3cl~$^!q=DhAvF09Z6LGdDCdJ3BifEE9mB!w%)40{K{-may}n0Z*;p z56H)+K;;g@`r~6NGCx$kz%?dI!2&$BSXyX&M+f9%DG*&%1HgiKu80XdwemAdZv$Lo zxPQr)r@0LQBmn>*HZv?D2mlNJ1@f^u$rM0<9;0C0!U-z`0t_J_LlJ<*Q_&9%1ErAyIRKto70?QlMv6=sA;42R0KCA< zfGj2z2HgN0PB0Zk@PN`7L3gDO zsm(!R3QSQ>WE$XB-Uw3;;kV0001Fq)LIXU23n;ppf{z|L@3AMlle^ z?Eo3lrUxgv3i_v%Qc5XhjtGvzj%)+~(HPbN&H>B;W=dB@5VVj(h8(G3QvAc$$Ha3y z2(P4HvS(8(F({e3wIXf*emy?w}KarI&AJa3rlatf(5|LB$)6-6PtID4kkv5_XB*U>BV1UU?FqsSuhsM>;FYnrpKNqxfsTJ(_ z`O^VJMTLUNKs+#@#Q`k^o6CX|K+_=sGFco7Ix-#_9vPaB=z{6UP=HJ@7mdqCvmlye zA;YG!srDYj56kswB5CAHW$wfn@gD`+BSfF6i{I_vg3KpP5Ja7PUL1DrI3nCWFWTJ6lu`DnV zQMsTrXrP#=s9ZcyG*~oekmvVnX$}*GXlMuS)tPZlpw2)ea)Tviy%Y~h+dZ!f%N`wmu2nQFK*)%)aX{VZp z$IrjcZYEcVXozUAS4gc^3c6kUsLs^S;{K!%J}Nzhp6u3i<@L|@Vmeq*R8%A&E-Dcb z4Hp{4muY)|0$9v;$q zuvb<-RaSM|m9Us_aA>%on5clLOgy-VR&>R^rz{GELRo7qN^bJ1DzCQlmz$9wn#Arn zA}Y7@m+g=yln?oCq)9!HMVFd5)ojzVu2gz5#_z^@mmtA0GR9)TSTGih1!Ga?ykB|o z_4D=nZu`Oe^&!sB==<)E+W-9<4ZptXt1Y8)#ezoT;(8+yGF~Hg&1miY%I}!+yYZ_X zyLrD_JM0QL<_ugi7)aJ;vOp~i$AT~!Xd{AXk^mk_!y|1#l!k$XV_`5!1A@71G8hj@ zx?IS>kN}x%E)dC+9yA(ag5q z_}K8+kPVLw*^mv{kPWeq4coE}*^mvJ9UJ@Du=NfTd~~0mb)R3K!~5~Pe%XTRPmtGPiq;! zrdCB>6|KHjO||wZi8OjzO5b5$Up{AV8C{)~xa0lH-RRxt+;Xeisi)huws490@7Y@@Rp*!tn_y6L@}tCEPjbBC+FYRj4T5y4n67B@a$Lo~Z} z^G$oZk4XCX-JQ+zwe$6e1dr41?(FpT_k8XDd+l|Veb*g-bsc{lAI;bA+0*ym9KYk@ z&<{Od-;*8gKXZ~-y;m>XRaYTaox9h$yMK0Wd9~f|*3aGf#pB#VmmvC@v8@8OXXvYv z{d96w5|LDhNFq)owZvYiI4B1Tht@018AZBpeM>1vZ?D$p6Q#$Zja0OoR_YrrtJhP{Doxp}R9ewLd#C9UD>wCy z)I#4*xgzXnElhh{AB#GzJYXOkTKXpaf$$E+i01 zG_bHRp=rrm4>F-NEQMZAj3p8GSFnAfih}UAQ&kq z0M57roeR-V8$Td;3QW;>v(Bmhfso+Vc~&3q%3j$mt_o~ml>>d46I^h#hE{I1(ClGi z(|kV)%ZA|0VL)W_WP4*fhM`JJmVG+HTaY6^cx}R9|ctQBRqi<=Njou&rp_oc{DD?y*RCa>OJ6vigx?W zValaq5((YJQJWk9*(}>ZB{0fcI2@@&YB9l17E6twrQhiv=J0_N6eBO|P8{^dOF&bu zBEdW-bh@RTW(5QJt<9xC4P-hN%@8t)Jq_gZrxoO;>hx#Y&~-RdxdOd|AQmrA29{3e zk!HdmPl|_ZpD^7@(yD)WxO9`)rzPL~mBpotX~t8+la3VNOfSMn{3nCo$3=7@Fmj{kXQ^D7dLu?o8aAgF6%-W&>p|xYx{ZmLQ)*v96rDsCN!ESAI#}R>x8BHYrkZ=gfQ*r-I-b6OG3%W^h;{~ zfDLnGUp2AxfMLdhWMBW$|J^MTsi4}^I>MmCEH>CQ-rmLY|2+BF8Tnz6DeN5+g$GUM zN>J*k&llx?u9%7ehyUybas^%=hlpkl!*l>Qp=#k8vb(_`f_I#O63D!Hb7*d-`ecVK zVmXIwqJ|t}H3Sq%yEk^@s*Yb&62S{}^F6)g7g#P?rm7<}MHlPqn|z z{9GBicjh{U5)Z#JDyqnkF-`|tSKEMYznWY&pj5QO^HC5L^Z{3>8_+nh`T-gpxr0>6 zPkV900CLS)AAV%&EkppG1ZT3om;egdFoLR{ve<{!kOK))Du|fLxdfFSv1FN`ri78^ z3A(ZOea4H^g(&YKaC&hdO~YF2T&k6(|{PY0d!O&YDFA})6!GwC|#ZwGE zNd7-@mal3xhz|+@P;~!J(h%+nAR}A}Ai@NoN5tmGc7OJP_Z~ez0nK)bK%A>J!G+&@1*6ilp9xIzn+RcsPW`w_5z(m?{QE zhgd`pO@TWwpl=u(94x2+&Nt#S4@qKkFJa08M7sr2aN#aT$;33(+fS`OTkh(?`Lb%H zcs(Y_PZ6hm(gT2%L_ZWE5D2epY*$IL>oT7&4iIiI?sma=w<00yE`)~g)S(o%h&#B7 zVZf!>U|j2obKI)PC7%veD(73;PsRk55?1lgF})(9&G43|Lln61tx+L5*q)5$JAj7T z4idqz_Ui82kvDmYnYlTDfd8yKQf%bajFAdmvv6ZVk(Br?e|p@CY+I*5p@@&KMMa`n z6I2WlfU5#O-?1yBDuOGiA~Lj3;c?i=NHA5Nc}ds~*r!UwPU8-PW~LNRr%q5G76f&| zCe82!3}qyN_Gb_p=l{vkBo(DCV&X73Wvlh(S>5f6+|FcpFG)yVx97*#WI5g2blw>f zB$Nt%+iH=nc(w?taM^p`<8s(L390wxqPiLUcj7&jjn2FI$P)V?-vi>1s(2{F!Y5=t8y z<*;#R2qEn&I8341D3X;aaN$H|>v-5I42XDlZboeQlVL_(Cf)(HbwFGx=FR+jz%G%w zqr7>Qg9!g=Z0k&gI}m~*s{vXHcX#Fmq*NqL3DITAdq;w!YVz8b7)3FZ+YoVZFQuDyPO^ z9*Jfw(}rOHO)_!25~I9Q^28=6+gKw4RNQ24E#*+?0s4qzUVDDd$(1t^ohJK)%c#eJ z)D(AIBi>znkZE4Fz)|)x!heS@E=L}>7Kbx?k(Yenkxs6h| z$k6ynVUe-;k8^q4G+WPcl3=pFyt&mfwNlU+7q~MMmuQ-i`2}+~Pv<9L`bl%k*VG>( zJHZ-jG589G6^{R=1y0H(m!KB-pP2k#CL)3VgzG1wh7KSQC^PG>0GMu++|Mdc&(jtM z5Qqw!R6Cg<56~gi179kmOomvPAq^c{oyK_Xi4=&bQOFs74~ z*vOJDt;WH&R~i1ojVB4h`#ION1R^?iD!}pzqLM|FqVU7apM~_s%a>U6&_rq z1e@GlA}>M@E|v$q!Rsw*1}req)Mrkqnl#V%Gt&}&56h{b#JEzoBNCL|CDjR8+1$#&l3brwXV1P1ZQ+wPK*#?Dd^SfS$atFx_)cTmQ>73!V zHP+DH@f5k8dvNsClV;4pX&3X%ibfNfg#HO6-%&O;N4s~Mzz3{2OAfdpuhj#`n2W3f z^@n%$L+P{NB15&Cf~+JS9!tPpI<(#*HFhnqq(nO!%=WH_hbmb*(mMBokHV7a!H}#{j00hVpEDiUU26myQbk zR5Uf0O0j6<5d)3(RGU}sOOgT3ASyIKJ;WXjK>bB`9k7wE)t5ee$r&Lf7K{2F*Wd5t zHgOw%ks8p=v-;2ZRQfqJEx@f@VoU%}0r=hB?b*BmoS@L!fxpD3k}|jdH8#AovrH+Y zsHRyuiwe4HHf+0clMz60KvcGidaIL+{Zz+9fUM#1YGn6Vr1*C>lu`;dWIJyHOKoK= zCECb6$nTOsEVf-KhZd8Ouqxapk6h@rNrO+?PY~rP1AI!w@6I5rP#8aYnOoCyvRSLP z6B-PlwNX$T+bbD3RX%Syb=Kyh(S(|gDu&v@<``odYV~%;6k;}j7;sMi+?9(H=q75P zi^E*7fqTyNs0@qW5OZ51o9?V7vYJ@dAW8PLY~4h@yt&9+uK8|Z4iUd&ZvBIqKd~fm z$Y(;VOFTg%333v&wSV(nQ;6y5R%A-LqjwxY17qF z{z`kD=OQeyoj9+)4^R&$zGw2V|KT_|h=!|Tsg0CT_mjcSg`GK{nB$OdWn>@zj zz|)d7$)GiB%iSn*g@p$46EF!B?sFe2$qY(Z3zIy;9SmQFrSal{IWXWT^1_t}+teRK z#6tB?D+(s7|xa-KqJ!yF;BLrk?k$^SwJSf`1Ze>JlW&qP20%|8)6>_s_) zHT~l^unmQope#Lc)tlAFj8b{#;I|iif+iZ?5)F57$5ZW&HqxW_FPf?LkV{c>rnSDF;KQaDmQ_9$$s#r0P4!XNa-?tB57gQjE)=ZGB_z z`M)EX6@pJ}bHOz#4xP-JpJgQ^$RfE%h!Ir(MX|W4*;=P3IeNyBM8rU7aRt{&$B#uU zxA}pDCsXnrEkh#@w!V695uz$iqO^&xM)w!yW^V5p<6kESQXE*iNco;qiZ;9qDU!%} z>zQeK!blj%i^1}6| z5*nn_;vGpEoW64rW}2F;gWw9zct5yhLo`e1G(C#qAYox>lSH{% zBDv0sMz2b=OmwCdnrKKm;?oJs(8n;x_;l}YH3!=rrFHT_4Cqkdti2nOQXUa@>l#fgc^2eE-gwHTyCZzjj+Y8JF#cx zTU_Yb9%R~kib1A0v`Jw4(M5*@q>~TQBR|>_+ngLL12X`HmglV-#M=`7Zl#2dix6)b zOUKpTVn`NR?1v)1G`QsN4p)TJfbcUa5sSSAa$9o{qX~1K)B#^1#HpIpPAtQH#(OTg zqNuVCnHk+A@iWb(pu7GpuKZK)i)aUk++Q{p%q42Kk(sB$pH;>tNHG{F4c2)6)G@{* zPGC?0z8IOLI58s-k_<&ozA(=!JP?t4q!sdBEu@(JB7|H9$fGJ~J@_+sa(q4IuW2YY z&isS-{29@7rqS_v7~L$xl>}bPa-{E+39G0lg)^e*`3gN#Dhl^bouG}HX>OC~Izl6M z4FrwLQC5r0y0Ru?$A1^03Pv9*YMqRLcmyXCcOW+uf<+=3+=JpB>CEUGWJ=tU%Tt%| z0x8gvN$xWk^3xK6i-8AF>B4+SS2q$1bcWf)VCG@I1VQvL2Ei25fuK)RQo8^@C@__@ zVMJ9?JIJ}RejcnJ*wt6I@&#w4@du26Ls;CzYjRFy0Xq)!yXEi^uog7i4qK^c(jjM6 z4NfNcP65;W+q0!rocAN$BY(1Ls94Qni^EjN!q$uHuCj676O#bH=DRh7^HK?a~TAXoDLeQ4vk zDRj0hZl?Oypw~X_(RuF0>scd!9@gmd>Bp_96+>Scd6-|c78h($+@Ur!Te`1{CY7;w zniGn^AoQX8wb0-@AXtioEmO0~YhMu=PpY5S+619(%g=el*Oyt~9AJRoL;{2J&nQWt zzZWN~!Pnl@HBz9xe7b{r69-BJ7@=Cs23>;}sb8|Zrx~>OFyT6cN5~6oooEP}-X->u za$007cR+QB-ZMA5C>?h+i0yD7e1Nukn+z%8x}iQByMy&(NQpbxaib*K)M4(CmQMUT zSR=dK=q+qH`e;ppGLo8R8FOoeSYcn?{sw-Ac29Kd3$GNM&F)P_7a|V5qnWCgx4QG7 z7FGXqa*FKJr;okO(ayQ8Q$TG>BXY7&|Nm{0Iusm7EZFnbqOwP!E2J*2fC~R}D*M2M z^W)LY?#03j?(lqA4&Tl0olMrEqPdMcm>+X%_x$5<54nS;q=@v>ssUUow^u2)KsjL* zu^$WPY*VY`J{)MJRkW}S$Uj6Ius&VaiaT+RL5tQ^@nR+hB4R%EGJ`=^W%p43DzQ@8 zJ!fK?171S~HZbgk1DTmzOj7l@^RN@?jF=iYNA>{h5nG6??yNgw;ZO>jxSv3czt+Zx z8{=!0HTpznXoP`P+Klbt(?&$iIsx2Z0b24n zC@#RtYU9vErDj9e3DFVzsqtDM$Ke=ZkpKjcB#1rhZnB#^)s=?Jl&dI;q9_`yeFD}3 zTLR0cUaFeMX%?5H93e~u_$l>-(^?CPoMw`yNR`)Vv`t&3JI%9V=^}NS2+vnE{c6e4 z(P{kEc_XLkX?=1u)#y{=VqH!%nW~i}sd<_N2ri?8#Ufc0GAbNv0tE+%BvR2qZ7fQK zv^FAY!ek(ca4YqS`m^uzM}O^;c)xdZ-)QI4`mdJvv~N3&hlYlT z$ghEf1`zX0T&xC-1{TEv2PPT{Ox#l_#G32_2AhF^hXZS^#f;jcj3kL6)f@duk;Z~EvpA!LasOy!YutCy1cilm=R^(|KuX1hvB^27yA zriziD*_45Rhl)oA2h0OSiZC9PN`!F~vl@sLM3~21CN(0>lLf3?>f=7p{>AG@x+601Jmb1f&HL zFKJjJ3@#!jRAA6iQIQ}B)p{jGcF@GWkxr`Gsd!aG=*nA{MH$(uwL{LwJWczz(u!Ck z^`|PQ#x+T+4qc_|g{0rfaaz$&cT1&$++6C8aBV_T^DVW$ro{(ufW+iiLK0EoCQqRN zbfKX^1jH_tR4i}dfD1~Y2Q(l-LxVzL;o@-hZ%L^sMUr9KN$%4!?RKcmH$f>2*--)f>!unJE>f0$(GiU1YIPpM#t4}Xw%&# zV-&srvr_$Q3m3BK7v9vm8rtW{k2fLT+vp8@IfT&feY#DTYo;rsJ!t}6P^eJBbdW%B zD419jEXkxY`G{a1Boihwv1nvOHY$vgWK;yOIA9P$#G>)>Fp*FsDnm(>11SyFkstwr z3Y7?iSrr5f#UqL)4MdX-iQ=HFj*27kkW@G(9Fk;01P%oeqaqtRK#Hk!OcYEcjKYcs zV>t+nB*H~SF%`8a77wN}`LI+l4-p9Fp`tV*lj^8MBpwt7%!9{7sR|1d9VBXjbD30S znn)s3hKh>P$dDleBZ+8ODvE(~$&dg+24+<%FqMb|%Hr^_m@v~sQ7jxLA`-b3NN_M= zkwnzT6v0?JP$rWN3WssTkIBTLtd0&3Mq-T2!{S&PQROVC4UY)YWRk&DB94Ov5X_=L zD}%B)BAAB-%CST^79&A)*oY_vlnaYuq*aM%d^ifjxm=KzBq;~7a9EOw1p-S)WJQZI zu}m^FJ|+tU!?{Q%la0zWVH%9b!b}iU)CUA2fh;a!JOFuskB`GyUHrHhI_My$fzS|w zq*Tf+HRjUVLXuiXQK!t$n<=^}-*aYqhR2=$t&iL1qdvxuYIlj*cP~*Q8kR%MNB8gU z()*F({fuvEkNRqOnUAK<`-YJDIleD(IB%a8o2$-RI4`uZ?-zw+Mg;*fky%zLx%4j;a?+qQe$ zQ$VshA}o$c1jAGW@V;j;57+j1+3&vdQEthiSSlVB%)>@MKE7&qoc}-pifk|iBtGYM z|M%?nKX0nMkD0D+YAi?KEm-u8};^U5q?hb}` zeboh?rhfr#2gCiHVYo}vIL*^!T$)SsAg~I=)_I>hGb@nXR=WfUnw^GyGb$Xp=rp>i zh-mnyXz1pLxov0IXJ^;^itD_n+}z$m4061v=;|0Ax~d_>AO@j4r*=-`G!BNwX`IG! znoHAMnx?rl&82BBjdN)(jdN+7OLJ*1&E@hq&Eqte=F&V)^IR@X<6N8zCWMbqxpADv zxj2mmhy@uP6&CCO0~;(Z=z<0iDs13@CM-ur#$+NvK{&8r!UH244kHrKw2)Fv6)CP` z-1L<=RqkYu_)#-{#1=tn@#@aVx_=o9x}*`t99mzRcW{L$?#8Ydy+k7p;3~oy7VG zsg@$;cu>@w2bofky1Lpj=|45!xZBtF@i|}ox4&oJDDzg|_H@u@s+Ll*X5&Nsl15WU zYL`bIx%Ag~>)McC=_H-9GLPpyed@gU>FWHuj*i{#>FMmwx)Q(nLr~<9H1SI3(zl*s zg~}(D{H8jiiCM8Jgd}KLFAH>&{=?ac zwoj%rs_LP7v*B|;{rk6%HzV@#@wsnvUM?>_#=E+?Pv?4Os%l=MCm!dsrjK{Z4|=My zvT{4VCO4vMKDKy`{4}B+BKGrmi*DLDiceaLy0F6u@|r0c0RRC25Fh}6XA$s}H_F(| z4{|EIDV8s{(j&8SLr`>$D+l>qKMET-VZ*b01w5UDzB7X5UMo7CT?+AqA9$z1fAIVR z0e)iPNzjKuvIEK!KyRH_H$n=8!dHO=+#;q-rR1 z$J##)LyQ81&-+kxvg8~)yHX=9i-K`@34b((rjL%Ek&MN&k{qWmBTV~dMQ|X%Bix3i zOkzx<04^F19Qyw4@!Qiv)(#G8X~Oh%vy5v12JYeF@H>|-K0rUb3GCzy8*vR zE8Oh2LlCvtT6tj8q5tKaele%2uORhsG?klRzPB-tFHgE@jI3}UU-4zdL0xwJ7Vj(O z$p~!O`fUXlAgq3iWD%gqUDR(a^E5}Mxo5SMvci{B2GkQY2VT?u26nb5Mm0GdC<5tk zJ3OHUz$>2N+6Dq-lf;n}jG&v*eoQaqqmC%)=g7t`)aKZjB{%U35d<};&xFN2kDysD z6akne2f?^KL*)Y-N0*^HbT#icTdqBUVu_pq3`U~z#;#J7cojG`ws5Bh%5R`JujmhF zw<(yM3scm`^~p}e65mT(3HMI3*3JWE(bR-M37HL48>7$lG74R?T?^wAmP4sCo3c7H z%OE&{RJ1k~{(CpXs{-tw9NbKS|3DLYb5kQ9ksl#g#VFL0hf-{O+rklY>{w@8I4bEV zvF1FYP6`s`+IpCOg3>;s8gNW!+i?mgWmKxZt>rAzpHx4kX$uOpBgyYm0=Y09fR=<{ zQTwmz6^@(DCeeTsKrxVZ8KxHV3_!S_)EbiaNi@MsC{k-TrPReWSzytFJ}W0u?`^0A zcM95sOU;)>w9zO7$)eL9bQ!gcMA@sg2oz4kG9nOULwLHS_0?I~73bB0h?yuC*qvnF-200MCk zUhE(ESm9Fq;lQEn&K#?#}?!u-3BHW+DoH z#!(CtUVcpOK*kCzkP{k3;_D{2fZ}3D-pBd`gIXRT{G=bWIC(nT7?zx>q;gQZ>ExtV z{JE7R?PDXW29vG~PAb7*7q|}gj1$sksJfGZ1@{7xUmKx4*qfysY%+y80Ancp*@!(G zAyJUuD}KBMZ2%@&hZ%gMkPr#ZkfHmZDYpd4z4YExm?S6}OX8&@=M~?Qz}#(uqblJZ7n2#ql4g$Cer3WIU#)UO<*Yy*{a+;*bV^y&vO z)AC3Y7N2%iPq@*QOc2o%Cic+Zv}@?+)E9bDV>Qo=l$hpY8E_!@?^(wi2i9cL zYNs^-xhTm=1vbtis(7S05XmUMx@-X+oB<~Id>=`h?}?&@Cz!LsSJI&=ggT~UN8aaZ zqyxSZ8-+EEcgBEz{(@WICEa~7TyvPPjLCbVmlw{L+q|}!Ln^HyLtvpMfUXM2xq33% z6bzKoE&yGu)Pw@N&wxzQ+j6xUs2+fJgn9f6)R@3YzJ&!+R@g0VsJHyeTyxwKC?pjq z)uuz6);&Lo)V6)L>2;C}m4kiO0bx`xk|29pl!kVmPBVs59FS;zA#Ys~;5talN2Gtu zm8IqlvFX!2E!a`o+B?vyAq?zyIG-boErTo+GY0Y-zySxTRa8Vp(7I)GbYinq>Fa!eH3(v8|iQ^3hY0x1AHK*Yb6GXs!d zx5<+WVLle~Tnc5<{Z%-_uJ04qs}8ymuj#7<3aSER$A#_%VM4naa_$O3evf4tdMFQK zFk2QSfv`l^$8+a!7lf?m?pljCXSt?ON*U+)+T7DX-HTxa&$LGtU3m<-A*wd~H{T2p zC%QnGe+nQk|m;=lS$7~&^`Z&Rq_lI5)h zNyTlmHC?JcuTGw}Fel@nSm^uh*weOhD+%m)dJt;zu#D-O#gOzup1b5v*;-*Vl~BQn zd)O$jOALGS9TZTAd7l%+rE?)O(esgQN+?{Y`Aw+bP0I{$8<% zN#Vpc5Ta#hl(spZf>usS8A;bb8v5n4ymlE3b(@eH^Ja;|mnjToVoi~uspmnnN)-eR zGm`N_T0B8(qYip4ni~_GIQa*^JR7rCgN4Q7#2 z5t&dufXPtofO~p7@-O245nl2{x6ONB9F8S!Ht{}2$Z)wi8q9jgh@tbh-(HF2fC}8z zqnGFngSaD4OXQs!4Go!|sPG`W3?xaGu&7iW}N2ZH26{v^e2li%?E47l1r1-cBjELAi^w3mA z+&#ei2^scgG-2)EDU}xs?>v^a`%i?Bx~(_;$uvbYo-x|&RGYf3hN5L?t`;>em!{7L zUFw8<9bQYRnpeyzx#NFl$ zM_5DsR1_Hb^|AB-BA#mQC=S_)FCeJ!bpQr_nC~6IK&g8W!&;(17^Z26B3jB=AP_O| zA46Lp72A#h(UA%Ga0fn027Zyc$A?v4FFHT9ACOCVG+B-?e>|7p#yyD`6;$yUXc20A zvl@B6BlY4i_VZlCV?`;Er^l2bB=D6F#L2?oGCw|%v+Qr&RA?PuCo8HYRZE*$$J)f} zH$`q|PA84mm^*U~Nl;E8C?~)=KZTaa+k1qkB@s+E!2q0%Qzci51)gw&2Cr>OYk|g* zE8hp&(}hpBCBN^!%K=Y}Iu-3Pjc7bw-H4s!*p8o;DJR3~ghMlFsd!p((J9&l|JF$3h{J+N#u5tc!W_{}W zY6n7gPlT)$CPET@M8l~iI;o%b=p+pxWbO(QF8GY*xiy*+eQnY#(Eu+|+W22A$v3I+ zF>R-oX(j&Jr>%s7exN=KNaAyvr`Bmo^=+eOsRF!4Y7?I47ZNp8&rrU8Qp35GI%&VQ z>Li(=H&#P26*C5{L~ASYs7rMpL8gRjge)58pBIJtqC*}tB**!mN!FQ4`;zD~c|vvE zIJF*>yJh)rSDP?!-72ZViDovgm1Vo`)Q@HO9-dUk*E?;x@Ys-}-R`hM9KfsT8)8o? zTv6Yb_+A}*q2h8m#~WiK^1$L5)sYW5mqGEfrLnV{WCD;AM
$qX&+OjMAiPi z5jJD(?$r8#?c7gmsBEVam~~@-NYik8r_nZ?Y3>WY9U%RUr%ZeNc@yHk(0+6f3-JNU zy`mCzDT~Sm-3=kBIG?3{ly&zMX~ujhe`9d>s(=V0AHchl{A-BzOhv#^$%1y_jGF5z z2`I+~IkE&GLa6MTfa7=2Y2IIZ=L$6BCR8>B+BC#XUTIaJ;m*y%T>&COFWwoy4=$SJ zxn|n}D!G|td_5>iYV`&Dqbvev2yu073|xX7ZbIoY=J;yk45%~e*dwU{dwLhj0yaQS zTYkg+p*col*M%!Zx^24Y>1ix(>t+L{4%C;+2yj4pA;Uc}7~d!=-524b;R&dJ``Y zA1^>lY?)GpBdgAtfYgf$)=(+9jUck3UJSsP8lfoHz#&DU2Ex!01w8#4V2G4$g2}gn zgp=8gMf&0&_lm;p=_pUlkDJS=MD>|IDG);nUILgARR>qXiAk;l=$J~AXsruRyHg^> z5CGhb6Y*7+m8U#+u{xWcns#&Gg37_TK9KTnAaOyqbmL1Ii4DoHSLkYxWCp=;cs88T zNg#s|L0F9kf2EvuHiNc_=Nz0NSu#5zX_iRN>E_vnY^wA?F`a&lHeJw+J05RdoA5}G zNvQ_vgD3|d>t#vBh5Cs~8(TOj=3a+d{cr$Fj3p#QpVF#9gPg)oX7m5oHKBoWovi7EKJp{^;V!4axC~(C}hVe`G#8QFQld1v`@<`x# zY4TqWU8J2`d$Vxn|KNi=ix$NX+GR;2$s%>u}&5+MpN-5&TSX}R?DgDdriQLB*?GAYksQb>X!Tx~X;TSL*v8GIaLM$VN@*Yfa?EDWgGX#uv@ z&$ipSy)zjOkPs4V)<`N}%5G5x?nVX>uy7h-(sIC4e6pzgXv(jo#mEB}KjS*d3n9}m z5DODCljuv4DBDmOF19-|Hp+2(Gd_dN82rv**z;aC7_*%EEJnw>9-Gmd6Fiu)pH^E5 z7$aB&X>^la@Z-Embg@i4V9oUOC!kXTEq1!EsU8B%chJ$((!Qa*O$uDni=A}s^bYo@ z-xCibOA=0UnWGD+@a7zV2VN{nrm}hB9vY%71Y(!n{R1*8t);bY8$?I$v9=6`4eNMk z4G5?AcdAXx{yKujmi}M5FrE7saJ*TbgI9P?PGMNZhfmNyH)spyB@!HTAh6sQ$;NG# z_=x4Ynb_or!LTk8K=>q5qXN&&=G+>@M+YD)7mdIFb-HE9c*knRX zyhs8^D1C6GANZaWXC|htE%0Pf7fR+9hM-rFm^m`5 zo5C~bFo@yjRhGCr8T$SqFL;wH$qj_|ge)rUP2G;Ip~2iuI6ali6-*NT+kd@^K}Awh z1dvfD^4 zim-)orhk@o#(PT);%_+Ai^72E-1)5O2imSBB}W#zjwU4qeWV1Hlda_u0g(whh*Sij z5~=l8WOH;4clw0){Yz%pOlmksp2${El3b$OMQN^)Wy{a)ZUUW2lIkKH(C6Z!ch?IK zSt0v%S=c1%b4B_J7v;Uz2=d1Vj)qTmOx) zmMZHvc&E%=a~kuB&&Q~X^w~t}B*N%$K^P~yjg(wN#R#Iid4g^5$v&RULS1KC3JWHS z@u0dWj@bc3RI0=JLP(@KVgn-zkjQoRihb2KHW*TY+>^52pEe^cr)v*AG@h<5sOI2u zj_qn{04>t)dOre}pN&RyaY=1t9wWlY$jL?oBmp8LjHNxRD~+IQM8T24>4bXM_uBFx zBx@NG7TUi1bvyS{8PwfGS72geaz@K&dwn6U(9ARF1f9&`&@E8rU2-QOH0Z<7YQ@_P zsnKm(2JO;)3Bs&xYT-h?5bL24A+31fK{OeX@osf6xzQbgj*+Yx!86Z<$vd_u`)CEZ zNGfUIyFnW8E@{h}Si6+_*O9eHD_IjK& zJ63CJSzhLt=s`#KthuG1HI>?d?W@30gA@#mJA(JgG}@F|kk- zLXj`8mI-&TcSAN(v9sH~h5CqQOK*)xhYLDES<>IyJqF`Ta6V~_m8(cut_f({gGMxlxrB|B+M5tKp3 z(<^=qm**ooPOhkA0kxTg8va5?s=*rx;RbJ~9A~=nZF4|^tK5~0TN>J#cgZG@vK^M(eyU{Id+Ol0vzwm`fP?B*w==9h1 zW5?`AaJ~Wvs>npb)~9+>Ri)Uli!12|(gemuKE*4$hPv5BMv>Ld0iXx9S_(ri&JiLY zS;}x+V(PR$Xq3g`%>mgdeu*SovhL}e+zL1Xd zQgq`W>}oe?X&*89CNoki7?<|i72rvaSqO_!I*`&{nZ3U%wS-IL8%u40S!@Xp1+X+&thTD@QM}H!z-s1_EH_`%P1e*Be8LtFBbu1!mz<%M&BzJM%!_FO+jV(8(09}S~ zN5MI?LR;{Vp@54@nKGVI6_Ggq#VOWv3(93Bj5&Y%bR3vuS{Ne2FO%D+M^a1OgR~Nm zHd)!XGKibiO>=yPU2tNnJN7OChSs+z2%d9BqAKo*iWZjBj3Qq0ju}}RHrj8GF}B(a zgfyd>Cf~dBP~;H4V;;KOfo%;(#orzrCPU!Vpi$4u=#suN()4*q+BROk6Za0>Gor!R z!B$>LWaa_GV>O6W`V>lX2*7x9DtW8Rz;Sw9w3tT+QyH91Jn95ax^1_53OMNa&1x;& zMb)rTc}N^vj-E$AZK^1v+LZ6v#b;+FF;AeyuvY&mptRx*lTUo-7mtuCLCkACNrS2T74^!Xq&N z?OYL@s?jP1K|r~>OC_$tEn%idaHCTS0_M(F@+mcEo}uL&sF9pI-RVR+y5#CT!?H#x z7#=%`r40hUDcDXlQX(jJ(h(SodF;feTPNn{{0W>>%u(V~H%RfIOE%mDnvfcu!L`bxN-<5p|tTjYLcFW|IM@6Q0f_ zZ7lSmAniuOScwOjwId}6QNfTn-2x#`H3idyf`Wvu&hP|-d8H8W?Rj9@Q$=K#sW+2@WV- zECod>BV2aq>OBQx$Sv0ysDxg(mZ2X&3Z5mFAuzCcmf@5NMTSftI2{Bjj-l8pIP05v zIwa~7h|1ec6)@xI%}p)dnhj~Aakvz1(eS_KPHriuG93&rwjCv^ zq0;M!fE{RZwK$qFb0C6Ka4-JRp%d`kp~kr_46GWGN1S4MhI6z7q&^B=WFg9AxCKjd zV$U(Y13_mNcc{AINdNlP+#}Ub8^xIEK;%r^%;E-&yx1_dSvi1HU*`w`oTzGgdY-%$ z>pftvjf}%Q3@)ud0Gg1XZ#`l*c+xtq7S5*g{fJnWlCR>gT2W{DGucaq^p-^v+@6KZ z>sJb}RbF{GC!AQO{rLvEP*H>rgmvw#!y1Xf%jhnM%|(wiFelG(71ylBLar%y+RK1% zTjNbRfE1^@6}Fl%;~ED~IgfiRwSF+cAChI=O^erw)yhVj z&(KX;-wYg*LScBrT`>U;dJ$vHS@|z^L)3wg?F{5y+68E>D47V!n?+E_|BlM@Sw7YI zO3_tJmlK@U)sHV_9HtJh#JUaX_}cRTWibw4WnhkV(h2ZxI8Xs+8lbSr5mAW!Go;Ff zDzx<|Mr3~sv2Q30=;1B>C=$07sfVxht*y{_D-;LE*tCSVYv;b3#NQqaNuI8V&IW7r zA>aMA$CRGiJvKeR9<5V)Ku;cUrr~Z$l@1OcK|ua}-#_Zj#eoTn0v>&s1}76^h-sjl zsSJEi%rksamR*YdWTXK&vBG3tbsO_yb0f6hHqlO4dsHxNmK?BV5ZpE2cq*$2h6V;% zRO@%A%4poRuM?i*oZc81Sgvgu-!Lp-`D35JxQsC-`qc^9KnAmRf_~<&TMJ!QPcP;I z-x{N!tGRs2mSI6OfrV5UdjLo&w+&!1Rqp^PaAtNuXy@|)av(MX&P{X!ix z1zErb04sRn|1;jQzomyy_|SKP`L!kMh-)o z*6^ow0Qe!=18foj3cy|Knzrzf%$YCk0L5+!fY zYu~k|?`_(eT$@Hc7?@sU+&PLv(l^9AlFf67SchZ-v4JFI=r2J_j-6d6mn8S3jxuT? zQNBrDR?-iTbQk?eMNj(Xfd>;j-v4^nE(;481KG0(IRG%Ip**_?U1zN{duP8`U;%{& zJwvNTlrqN=L^Z?1I5tTlOOoRcAH&chL5@(SB8E#oM{?wN3!^CT2Z{2M&PJ7&E-Fn& z6fB?sRkJNK$<$0y*{6p{XEMp=wr<<2o3+V9zb^Q$ZLeNORb+b>U}#uqSZG*qo>8mS zSRMIT z71540jsme4>*KJ`o$~|}w=m2o)#yMH1C7q1b4X$o8hFOkb?~s#sOC&-?zu2C$y}FP zdF3bS66GX3G#m%xKrHsEg%hzWkU|tskr+lEYUEJEb({|R)V31TJr40ANBYGsnj4kV zs;;OO?TSfcS*J=o08Pu7o|%$;F)43lU$m9+;%)0%4h~iVU~n893Kt?BUeTddmf8*J z45ghY2BE@{)_AFS6A<5yQY7&fh+Q{9FfqAgPk|Jdlqo`o>l~ICl_bbhvoe>Gvos9z z68_xm%SM-OdTUdvd3Q;+t4k{=Jh&J}HW&w)os%z>RJT&HArqxwv^six|VS!vG*J(t9d3H74 zx#7^U^s@X!l$SAmLuwH3Ze?>{IV&JxIjw|CnB+XdrCj(LAv(Q;>9m%qb6Ux!l+vn) zChm|ZT8Efr){Ln~muN!o6Ayxl*pZjT(c~oOBu|e-kfK&i4Sgo0 zMI}D2Lln~zvozDn?HTUf5o=LxY!6R(H&lm3_)N6XB?yaHYJ8_edeVKp9$smUd6joq z<=4@c&|upQP9-A-G-RT=E%H?vV-aHQA=a&Rc!sR3?8*jTzq+LI5#^5V=+^F6c0c{4 z-Tm-)c4&7$v%E5EbZCP`mq(asNVr94{q9{amk2E@ard5>n08J|tDWNMZZT2M?(l4O zg=ctpyW=6=)7*A?cGKOmrMtDcS!8Oi&#QapFITPLTHUP?rKnzOHhV_qI{@ocP0jcF zp_MG5)>NSsw+`1mdlXEi8lRJh{Mo~#}RB`C|agy6Zt}u+_$WeA8 zuER292*hYYI-)yWC!fxB%_?qKXjo`iXjo`iSi8Qe=yjrLowe*&*>%nA>>$|d4GRqm z`F-8;sOo6BYB#lKi?&&4s|&#@kF@`#szWIbhk}Dcgd^c0gM-0@2^3(U0S5}j0|**m zpg0pANO*XFSU}--23@a8B_qljqxvgLC)}UF1-du3&7(2J!DD6H0vs1Rywo0tb%a5P<{Z0Rs;Pknn(MMy-5KvN?T~%9JJD zaK96rbcRj(kqz$A|Dt=5;dUA&-uqJ?lQ_}J>I;Pp(?D<5oL>n?aqSWPW?3RVk%$G#dHQZC{Ql(AV?S%mxjhMH& zo5h2K2h>AzdzE*Gt6p1UwP!@}5pZNncJhg$6^Pf1%!RpT*Ow@Fey*1&b-VT_iVf*z zhXu>4sLn#$Z`LY_qPSzZ5iw<&09uj&6M%t&fnZ29A`OFqKo}4Pg93ptAQT3K0%1@n z27@Sw18ESbxf}riGoN!v_y`^ihi;ofg&+yLC{csl#8kC2LmE;)U zl!r-d2qYA6>_0nO<@Eupo?%8!;2hR8HyyrI0Kd879y)hG3Mh4!SMFD<`aP+VI5<-N zd8#QjJ&W)F-7b_h|L8u^x)36ZTRM{(5GjqX=yk_UOGo7mVKefFWN-v`s{@ofyAiVu z2{hvLC=mjJKO|!0lo0p>Xb4d-scN8L1jC+S#))%+sT0)%^FT1cfTGevs)>3KovU^O zmUAn0q=I;+IX$GBR1b1Fqk^Ng)lWa*Jcfg@odrjvt7_7hvCa+Qr5cz&LRPsRqQf!(aADBE3G8mEoy(YRRV2E>g)p6*3u966D9&e7>4(T zr-vb1@-z+3yz~3#QIr~L(hG;n1!8rYAePl3cZx0aetB;*Ncf~^4znb+vN~QL)9yN>*xOP}Ec8dT*ZmbLG%eaUgvo zK&X=9rrT8tMA1~WSzExuajvVmRpL^h^p>2ZzfIc*Bxi(Nu^82}#%+S*G!ojxIp4`L zm2zg1;;qL;&S?Y$$KZ4T51u`d`9LGobC4Eu$@x7Z0hsQ}?&;SF(ER)|F8H9fEo@;> z!p7|qDrI^8)ENo{dH=dG0OA7<1|vrgZR^jK-T=*&L~0Y#DcG2?iiz}!NEF*j^=fp> zXDljiN@5pl!u~X8X|jxdD#|V6Fq)%3o3sj!roW_?ufU96l)9jwCAaD(9OK-3Qh#bZ z$Yr7YuD+#ZfzwVXpjed4+#aUD;{=5k*;J^1_lk{>;#k1UMMMz~X>4VQ5q zr^uy}i0&#&5zV2TWSEq^HxajTE6X5!OMo}=`HmfWzyj_7dvYtB2r+_l<$ocF0&r6P zJ3cx&spjz=j-BucWJ`%dZ~tCU%Xe5E&-JIZsA^>sPaf@*FJ*I0niI>kKO+Z?#FmP~ zi3ONl6>nl$KACW}W`ehOZpV_Mt)fc-(Ig-z2tGTTbmJQ3B(ga~qdvOJV6|;^1yZ@U z9f{(jIqqP?R)Fepu@LTDT9SjOeU2wo{|6aM+PM3W-r#}DhIXXKT>nP`HeZ9 zqsM+HI9TT+StxiVx zawsGT zz>~s6czzmYaIGH%2}I*1Vip)ZZiHYe)VA?~oSqewUr`u~@6BTiU@Yqg9|vtKPcL4y zs$!;_Vkf?e0&MRh**LcbaGZeyt#b+{n%GYOIE`L3q+x70Z*MHLiK!9^Z1getrCKe` zP$R}V7eTqIE#NfZQ&Q!#zabjY9)?@yhcA;MMJ3zJQ!XaPIJ@g0gk4d2AVMr$3b-{k z(Qejppk&2E>ksN+(-w{m6-}*Ea@>xvYwi3bUG)k)o(F4x);DlaL^?uwY|>a6^%HRI zs!NZS?xU%qFmXo^gyQ%cIDn7DHf}=G(Q0r{Y;}{?-&?@rv0zB}Q&lZwfJFkGg?>oSpK3L3f!p0LG2+l(mdCo2b-;6!r#u4^4# z$EB@FKRJk4E`q84lZRw+vt@)y_uKF$vx7RAzU5k}5E%hOphtD4%k{Qfe#P)h+9jP! zG(MUgX#8F88Q%-+YlZTb3Bv9U6h~!@#6->+P%VW8?r|z{rRZA-%@kuHJU_?(uy7vS zp7Xqc{A4p>AM6qYhnIS(FYJUx#9&1W2ZO?#m>XeX1k6Z$$u zkDFCf8?7l9eHz&J%PR~72iZreh2-g}T4|Lh1Yv#a_1UJG$MSLN>%KhgTKbv_z?J-< zuM0FDqbiOr5>gXxVYhnU+(&>5L2!NeA3gFsIP0Vvmk6SaAhQcY!M=sA-^+ zI?na!NA zF=%F}b`s5IWV)BxiU((z%^d1Lnp;I95IH;j43H`sUEA|~;gUJ4Gav_D8s@4b(s_2P zx@R&!*UbTWN>IEBxdL7@m=t@?&gANgW1PYO27nL=zm+^#0=(gy>OzmS8#qi8hzw1 zn#22bqZ`k5zcf|Y;mELQ>?$#$*%@_C`y1n@(DOTZn_h)9az zO^CqQ>B6z9iv`i98Zc@eToXlArp#4acnM(!fg>QS>V6Sr9qqs7z-9j*Y=%GB{DDq8 z;qF-V7u;=Ex4?P{DOk-5VC$q=Mu=e&N`Vf?R>$d%dUp02S8V9ocWHOa5KZS65yqh< z+FdIby*C>o@M{x`wmz(BCdTGI^A&s+1s`+ApDAm$1zMtC3sOGD)tyE>-qs>hV%4gMZ+X z$46n1GUZF@JunNnWKI>EBCdd}#;~k_IDBF{9i7 zqfV|RYk?7!)2FgmkgxOj(`%w=JpA%Y&X*uwzK_j6<{@Gd^r1n9Bo^Dy$mH0unW!PIKL$Ot{2lnW}A7FO>^`NQ$$5)-3ikcv! z5W)zXfoh{vRXS7qsmm#}o&c1Gv+``y%jBvuG4V4V)t^ z`WIV*MeN9>%_K!C3X<|H=j3V;?w!9CfXf+4=hVAa! zo_6eNi66$GB6&~_1Z{?3@6{4!NMVB8`t>caDo{Cf9AzzOtD?zE_7-}{%{WlOV(1mj z(%CAGLu(!=q!Q5Z1+kU;!HRHbZAf|wCv#foBmvJWi-wRablgCJoL?1M`D6Kyu>@K; z$zgw6zM{{<#twyHntpO482eDd5g=4s?idc~5ql5D=we(Cl~K4KeU0qTC&u(r=*G>a zQ!-(~HzDv=o>@!e*Sz6hb1hrzaY0*0^o|DP0OoY~AUsHS4TVajNtpDGk06 zozQEWXwBktWaJ|}jP_tHaW`;f_JxX@h7QO&qUPo}4?0Kaq03hO0`QZ31ai5(-^4sE zic)~8vtTcAU^SWzLv${NQNEt9wRsjL!9mQfns7W&>>dE{R?$K!-;pntIu55p)GK^% z;-+gSfBI;9gIg>JHF>N(@&QY3fAbrkB9+X{D5b=o0Rv0i^%$g{WV^Z8S4W?&nkICi zkm=Lhe)p#ClubQvS8?CLeFiby15QyIZV-54nAH#~oficf6$l|li`0mqv56Ed#)#C{ z!ALFQt(dv9GA=X6VR}E-hF@GGOoz(VR7QhgJ0e6fLn~i4rEB8 zEI5fh#zh7+uoz*1EdJ6h1G_l2IpL1`5TYGlU_59p3Y2pth}qny-^cy450Kr|dY?-a zbUYb6&`VBX0!%X2zl2!>;D?0_!I`GMMM*z3`5NOLt5}msCInZ z&uwT;B%mJ0lvBaMbpU#f_fM2q#sgmjEA54#qWBaQ9TH)4GN5IL@>nE_C_p(iBsh*{ zHWw2Bd2Ix>pd78b`qa?$kt9Tdc z6r^NLK{Baxm=+Hp-vrZGrW8R3;u2>$MAKqgjoM0RL-9rDrW67-K#6iKd}HD@Cuw4G z5#}a2)<<6b_z3c}*fI#@DmJ1wIt8Kf62msOS`w~lB$4RYPTiWgC@*w-bwRt4=XU1Lv>h5%*oT8UejGEa-B9-kaXz7SO`IXe`G<_7(T zE{cjHoC2=^OBfk)6_*l-id+5=nO`lmu@K-YWrTL5SW!*(m$S)_IJDt%LSg0f8^hXvm7~bg zeWVWIj5*U!?Ue%p049d65lvhHPGh7sm<{mh!%Ipaw5BzKWrCUr*JQz#I+p1^=o=VZ*=ox7ZZH5Fn_^WYEb=tpNpl}B~d z2pmenc!kqdM*vp+Vy&)rH3Rgg(&0g(C5H(&wJw+)Rsk^yi`1ySP!9i4k{DxZr|+%S z6tyk;w|f3BBF+J!oU4Yi}R_9Bzs8~ zcE3th63+~|h_0J5;$k8%XimNVMQ<|r&f`YH0?8?_n8jvZ3nEZ~2_P)tAq7Q^;{oUo zWc+A&163BTJwJFHz+ErFsb>>xLa!A$;4>0}(cSXnK%7MZG>Wqxj8*>$9ltFM(3_r@ zc7#E?e$?7O(nfV{5q+4xRArbdkSC6=sJZ1@^xMGsX~nyG(_&}yLx5bBoQo>F>hV1+ zCLRIG7H^1SL)qmXPV=gIYmN3|1;5UN5nci>_*=n^8&V4gjxNUvplBzso}|pM!{i0d zIUR7u*us-TRyo8=SVX(|rW#Nt2`-XDhZBRr0zjkWQ}Uh`0~eVI6(xL9e=!&DL-e=E zw(j3W>Q5J3MV<6J8x8NL&3ik4Jfs-(__5!xuFP=HPI0ii5LSqC3;{h0Wz4YeTE?-T z&TDiC{wICyhc(d5F3%OG4vkEba!v3ka^BUlOLS9V{2S1A*NI(2L8c3-tybDd0SmAb z9_S8wvYRgc_&QZdk#_6-+V0>a89{)g8$P7>T@M#FDT=+deNrTxV6_HG5Iy@sPm+Yf zl4?9}#Apuv5EI^%BN#>aZW^l+ncInQXpwb1h~-%#>?*WUOd@-o^X9RWYAqo8SZ_a&EPScxT8wIquE!Ca0-Y?S5`cL=F+O!<{ivBB}nvI zAmmmTj9Kf-&6jA5O0Kgu#7{Q;t1qfW)60IK)kS=o-|G^$VO|Y|k9N6D1Zi;7aMts1 zY=*O^K$nq=kM=&q!chmvbyJeSHPlb;o)-Zi57>Uh^@HcX`Q2&$fi^Yf@XeV}bwLV? zG$jx+Wt%(tq=KLsBlxC$^v^-XqsgnX%L z9zc0+ly0}*Ya*2_`3aUu(e{w34Y=gP@|*dm1VBY5Wfk3k`4_!foiL^7=T?47hITAz z>;C@+915d1I zh^H!B0Lj~@vXJPLo+#+Fc37uzew&g47Glk)D)t-2l1S8alw*9ZN&wVqJnU}TLkFV| z3RS@qUj*m1YZ5<@=nq&{kARtwFuRAi9t^m9C%N#s-Q@{KHWY!QKEgrz;iW}GaV40u zItVG?NW)ib9_dY6fplccb-((w=v9fzMueW76=gN&oT(@V#PIudC0Rpcx~D;+gqM2G zye*%zqr-KncDdE0!*o8PWJ&HFV*wvgGCoI@OsuA7B#xC{o;jFRL;N&E&w4^2tb%t0 z^hF}i7l4eihBwKVj6+nEL%su}0|qjQFJ0DsOek*fGAfYDsh95VbVui~c>8~wz$;sk zB}ZrRg1c#`n?7LpT^{j-Mz!9zpn56(`_P7x`t;vzbFQT~^@o|$R!vD=6g^h@QL5B2 ztlx2DAP)c&FQsGuA!vAnn#Q{wIP;foI*fO^KFv6cqH{d=3bPlaf{Tr9a+TEWhQWJc zd|Ht!Ce4W}=Crq#Hy)vV<)CniG ztpo6CYT^bO+=)8j7w{$bWzxWudz3s(6sj@WS=XF_?%9R z4|ohPi86uD#sj^eTv4R7y^4q)zKV$EFe?gbMA?Q)Mk&hhv796_+?t|j0xp`1h$t|f zAS03yhz%hVhPl%wVGD6F%_Q6rAGFvA65>Z}kROR7Oc^fTH^Q~yBdZZtby#ZQ5mSK* z9)H9e0-{*p`iL$Hr1Sxzpk$t6C_ogc982OM0o2(5XfM?ZW;Zw?1`tJcV&@!+{LPKm z6I`eO%5;npC*ZUo9TXhs*E=1~AcPQ_%lLI_lHPIily^7TO@8nwaSy{7V~jU3B>^k} zApwXI6U$@ENJb>%G?_+RMmAQK<yLWYB!snLl5)hE$L=9eWHSFRw0dE-OnfOg*YnUytVbCz$^}7d>4gLCd))obX-KRxD{Cl z13`wvXaK@u6~jdZf%71Ogg}P{5fmE~AUF^OQf&jaM)|^KssGGVy6X}qWa+2|U*FOc zd1@>X?_{IRT50HzqNblOt8c7PeHJa`sL@a5r*5@*_GvIZI!iTwek*SJ#3vE7*o3q%gdMyn;oNG8h@WN!^@qBevRsk8+bt)H^wAq7TRG@JtSsECM* zvMdVXM`Qqm0TvB7l*IuK8KGlcSV%g8q2d673<5lOKnw&PGH~F~VZ%aSgy_`F-zYYp zo|RiAP0i~hO($8styNCxQS!&csEu2uDO0VDHBHJ!PlKtv_*VDACY7$*@w7j0l2tM5 z;xWKNg@raiV1S~-f{Kp_8z?wL5+LQV;Nl~t4-W|p&HxPNiUc9hK!sA2_-b_@Z(m~x zSM9O0-FA7sT&Z2w>hzH_ts$h8Dq_V%wk#=9h)L0Im<{>=Zc|Nox< z^6vW4?9dUFg>jjc@TJI$mn)p@&LS z7Hh@!(>^>VUr!b97TrD1^<+9r`f8=b3!krTu^$nsTcc|+=4PWjadSQoakAVD>o42o zU7IqV25LjZ!iGfBtR}Ob%gu=C`i8hXQLvqlH5pFMWnO1|lXY1+Ab!d+!Q|r!Lq=^m zeX;+3$MuEtmBpEH{C2-z)^jGi7qZNL%gfpLs19jX_r~pI>|B@c8~eXCaA;nqTR$U{ zy6_x()39Q-5?M+rnPPNMLU(@oIcyZJ^ZCNf-9&^Ij~!J96MH$aF`Jc}+3V%LWdObD zjCuEH$G5D3o@eXZ-k6xIx}4WEOLYw;OM2RG9iOJ6JpJmD`z5qr+L}~Rz zB;z_6#b3re%fnTcykFM5NCqFH*sAZThRpNnZYt_>rlKxtDqbq;BBr9H;^`*pW=<4l zS(H=_g;$_6D(V~&2oe&I0Du5PVh8{L6cUfgB+2gf{GQ)=Po8?4*-CEDWgK zgupksHG-v>$D)M0=NjcPD4g+`bFoHSsl3hG>IPdsDRE1Idb2vS zXPQ^(^geC$&8^J<4k9HL6b>j9(BcD#JxY($BW+YYWV?)QE|{lwsK*cIp>vLg*6`Db z&h$WsfIlgmdA|dJIobqs1`-3Rq(!D}rNS>iqk>IkmNO@3^gSUMY8k2u3Z?Kcobr!T zLTE{_jeM#jw@Mc5$quN%vS4Nlm_rC9$UbwW$`k)Xg9nWSJ~LAd1XitmrbLnww71cf zz@5sw7=_7{`2pK>5H15Aa2u0Z={+xl;)Bns>d4W$4yy@ROB!(tEZMJ+N2rIy=*j9y zwH5h&9S&emQ-e5gWL%0 zgKl#}!eMA75D}<#h8;O5M6_a`ix2Re-v=(5xIj2f zZyF1U3MhqGYLWAwkOwWv-wg){Zw_7@hYA_ywp>r5BVc6oT3NAKW?E`j?z|zlllXR= z{OXRkJ>ao4xkZKmZjRTzI$!}OGy2jZ%_}@I4j`3~SBauWE&v@PQ3F7hy5D~`Y4V{~ zYLIUyLK9vXLl;~;riJf^Hf#|(hRHSy<9Jgia%?O;_VuhlrnDp8g|7*jEKG6CE@Z=c z3!2;(lUAWHAIE1S1qpJJ4*b=sLdh8;Z-ikZO^`2)vKny*XOt`*6fMKm*P};SW-&Y8 z-l@6AFj5#xhy-J<;vVB{Nvq>PSY_XQCQun;6bhp#G&oH7eipLf)G4qG5ED~i35wc_ zI%yAd3SuUI9sD=%rPd;Ip1~-jtQf}Oq!5o-aENHqzqa{!=!zFzfT$v;uap$GGu7V`qrdfg+?6#}x|PLKQ$9irm7>5Y?z7Fnfgy7DY!xhnG|YRf%>(n9X!0 z+o*ZVRP%^yN9!2hH44>lTKJ*#uRcgrJniJ{+DT&H4qhZ#QONm&C7<-x_s3u^8WgQI zD;J3=^sN`PXGy&Lx^J3?xVx9ZZTelz8tUS)Xtgcn>! z({tg`{suEOD0Zgt4~L3A$)ASV7$>twpbAuNOZ|b<-1zfW$YLtvo;*u;mTa{Q8)+g1 zY70-Wnv@VLD*v^W<-eKq$TkUs+9uGpmQ^D=(vYNRLhb+a>@Q``GSio6sTrR!SKbS` zB%jShwPPvm*fR4cnsnTy>{j5?hQD_Z07O zrY@%*wqO;iJOwXdI!Wta9i~?14veDAGWn zFhW?nT9r>mN$w6FKD7p+s_he7Oe0o-apnqribS+kWZbo1LlIJ8%J(>8_yE|s?_tu8YEf}9ejnIsL=e8!Ec!)BbFnmsH)op>=9pJ>$@ zu{ayV4oa7dUxHSqEg3Po2hj5?!tR_Y|p3h>pZnnB%-W>5O8EFK%;T) zVU0MzmW4O~s8c_7E(0h8YW*9)%DMpxUF{q|F+E_KBj+TPRAV+lVz12i8~~zNL=8!P zIm>1{|0$=U>XdwXQ}!`t*(CK~>K5qqO|!CDl=p^hX;VUwH6mBgJtVmw++P(VIj}rt9 zlj=-6LE-?K_001Vi)%rmcm>p9@|+|v5j%iNoe^i60?wpI?yAIgU2ZGMsz2I;&kyE0 zU{O*xGkF^sbm>3PU=rL6CT|D?)Ci2eb{9N{Aqcc46x1OtZH{ugakU2$zgSvZ1U&VX zMqRW8(CN#(1GYarwXY#J6i7aMZuYxD%{NJ#Ad`I*$=Osv?e~Z=YI{7RUJ$)$+M;7y zmP-?eM!8&mOU!sj%9C!wxxg_`+#QpHO-y~l5oXGGTo)IKbwvG&rC1flBdR9MKK+93 z(b{0P33nSykV2V$*-Zy6=4F5{7;QQjOIH=;y6{?ldWX%yfE@fH#>iJg(7CJ=_yw2_ z%ja3k2yI#hD85%iH;EPUNl9z}-o61hks{$ImpFpN-3k)yofu)sC1>2llPjr49fL~H zi5uh9;EfJ}b+e4K=8SD3lksck)v#0cNJ|A~WgmKT3!=z3H|@+-NG5pBwrG&Tr?7k%0y+1x3~gnR}IzlQSjO%}%%|bD2#Ugdrv_ z_Ghu!kWf@W!g`1%)evOzQx6iOUX|W9nc{LoL9c%7KGr6N{86`0*`&6HC`#^PbM`LG zxzB$=(VLaDg88t2=InaibIs6c1m1CpB!Yn6x2%-edbpt;Z#ulO4xB4|rVQ#K`!Or) zD3W4&u25tHC>*t=1JZL#Pp0S|Z`B5o=ocEZpkEPVAhFRpqAF{Lp z|H2wwY{wO)cm-+FddshZLFla3qzdV87PfYZdh*VhjCubi%2xV~7bHX$1 zXHlEwZ-#Sxg0`S}O2a(0GDUBGk(#0C<~YM(rVDp_@A)beF}Zvna7gwuKJHzAaxF+ji6bLP+Pj3xf< z)V!3jh=lI(e1*VAoy~1MM_tcaD31DcXev|a%QHt$-JD4N>Fh|t zl(6S^08zFKo~vcNAP+4=z5ujJQFR(Kfnbd(8h6FDRkXt2c>X&5@p7$w2!z|vSC~yd zh*?uCAt@rM!xz6%iEJ*h@)v~}28Am@_APY>KUT5o6r-Gq-d!KB{+WR(BFaWwDFTca zWN-|S{9k}LOag@F<+QQ;GyhfpwQ$@ZSC3KQfvW)7w0C2~{toPqKmr?hRW4Wpq!gyy zZ`*)M22~p3ExnfU6@i6WeML3_a*3t`MyH=4Y{?(2x~=@Jn}eruKUuFe>8-pO+E$1z z>0p-*?m&Z)=(9eE&jH0C2G}!9hn{a@C2qc&)k;2-V8?(QKK=wE(&UmtRKe*IDGra< zE(v&iZP0D*%8q)8r6gsgco6`7Tqz@4X>)>Dc>YmA#D)ckxM5Vn8_06euTKySEE-f<@dw2ZpCrGUKfXWEATZDP(s6!eZ1dC zT*y|Kk-rubiB9TJN0^Cqz%4bHre3<>abhy5q*$~XAosq$w9KTDLQZ2~KBo{7TQ_Ux z>S9cEoY$X2#c8c)(mxqOu#NgFhQr*UEGT5x-MZkyN7#Mx5M$CSLUsuX7D?h}lJwc(6u@<`X?=BOBqORDa$^lsEW}YE}Kp z0aj zfOqpsc$}xmKeD)(=gK7J=do{>ub&Woi?tC>DuV0tRQmw z7Oe{W?DF>A5AL8g_FOv`=FX0-SC`KLzt;Xp!U&~^=V>k!1{dQQ!c6lhdN~IpdtgET z66~YF6+=rrgq_a5HsqE`I<*K_`<@}ZizpAA6i@^acQP}SWCG{ zwR4gY?oyW&Ln$EKu9|#{=$_6d z9DWt?lI6CYCzFV8(%!lG8F}kD_-)4^RNz$Vz@eEV>_2>C__^)DqbAG2ba{#5*w;11|-(A3M?Xx`1YMhzP zA;V&j=Ia9=9pZ>zD8ktzU6?fLNXi#`3U(d;6a6!u$=r|htclOSSKLFDV+bn7u4XJ;=EF?Uc#9N1>g#%>SBt7*k^iWI68zuDA_C(ho#|gV~SKJ&?rlEb3kq<>e!}JCg8^oACbufH_5k8 z996jiFipY(U>dt3xBIG0TCh|EZv#ov0wY~?{w}8U9-6YRH zzC_p6nRaQ$W|z+}E+R-FqNgUA6A{kDC*Iug6Za4i zyfX-bXO4*M4HqJ#`I_DE5i@+`hi8=r5r21w$H&R9$*)Q;Su!utsXE6-Mn;xr$$6Ie zgyD`rmEEWJb+s&M%ieF}H{xZn>_oA$tX2}`Sl#-4U5#B`sco&)lUE-f zR;*NSocxSjN@*=)FPYrj_BMW1!q-ca-w56MM?1OoB#G~^Ew7AI?{ZYvS5;r)EG@fa zm8b02+(i*4=McZ_Y4W?}jI}P|0)+w^iwrLCpo4}4EEW& z(^6~8HMP4=%Reno)%s5U{Dom>l+vCp5 z#VWSbt;~EGUb8lf(YE_feK#gwYhGR7#}J_b5|9wVox}1029|bx>2Yu78_V*XSvKK6 zqc?y763eoIIu4dNj#X88D{U{&H{&Z~OXR(BW?^A5FXQ=k-?H6CMlRLu=T%$VE%WJa z-{n7Gm)fQm9}aVAY&Cg~-?aJItMhcLCh`?b%07PmmDkO))d}*1-h?zL5x6?c^ zkHjU8JI!NLDPSsbBpwn+B5@>)IT8~d4u<1_;Yb`4E)W3>G_U|ZB0#C&uz`dFWI%C2 z1~#xDX+VP!f(0Vb!2}vXHvr)OD?qBNf^=}*(4dS54$E`Lg5)V4fyO;U1}P7oJg&ocQsgrGPJh2ud= z7!8ON!T|vTAS@84!Ei`Op@D=@oYc@@B(z{Wz+tg4Dqu9A9tsRagTumZK|unC)HunZ zgM&br1eF>gC=E-4v8aF&!vPD}U;+#nDA)i4hDD>%uq20sS^+~Sjt3115R3-K02+*R zfrG)q@gRYr0i6Yf5YWLm6%dVv1aSr6fCB_$0F0z)NGdE82|{QzFcMm1fRKPOfDIfG zphN&f1&0GdVqswc!Dv8GV5A0zh9V&fkdlK34TJ~EiGYxR(O@)CsB0^LLkEXp#?XO- zp>Z5XB4H8{5P-X@@|9MJ=vBV0t*^1OwY8Cxla-gdId`!U!8W#vib$@hRx{X&hKM33 zY9Agd;(aZlrGYdcToG`1-JsM%tA2B_qjdyIURoWZ86Q%zb@yo0U8zCraXH zC8#bHO1C7-Iag<`Z|GPrCU#;bEVO7@Oa3+cN0%&zkrQvyzk<>o(4YWC!TKm#Sv7e}%~|O(|4JDQ z#fyQLGC%+!0iA;b4mNQ3fN@dD&wlT)kqcqhH(;QBwYA&Y!%C1p=iGvR|hIRWptxR0Y3s3`MbK-CB5N|J^iQ^7C zyE3!H@iy4_!o)qh`HsBR*50Sy-t2y{?RZCZd}V8BXK40Rp;9f4t1@xiv?}jqW|}vX z+qwB%y>TnE@xDy+uBna3Bc45M?O9h{_v^YgZ`HNcU5%T&nr-+r_3m0z6UW}L*}Zpw zQBCVsZR#eDUTn6-1%+(2ey3JnTbA;bIZKzb6uGX+YU-+d%r@7@&Bo1W=9$Ht%6q<@ z-CZiCuH9Yc-lgrW+VLf>-O}>Q&t1)3MAlVQ)m}ukYbTbbhR{$%twhAsBXZ(0#N5?g zgPHm@{#u?DTd@`A^~H;3T&Bv4KqwZh?=Ug-FuUV%55|4n%%|C`sfnX*a{B zmqAI*#L?Fd2Lg-+P1UqpQ-EnZ!g+I3%t zKn-WpQZ;|w{di7&X<2Xq0}`-71Pu}(fx&_aB51&Xfd&&m0EC6&0*Hl#TeGZIn!4F$ zUZttl*PYtksn}~eHG5Tabvu{Ijhbe9dwaHKxMqFG9^NV<+oc`3A&Zy@J~0txzBJ5f z3^|!^uzvd2KHB7+~FFlkC=AU#qpPOg21(D_N zI&Eu$`0H^c{)ewT^WFHAg)8}9nVNiDFj*hxDs6WBE^V#e+x58Tr2Owq{zixK$KpC) zS5+>{9Ue3Nnch`bUHOjj(c4=1i^2-0O?6%{DW*H=B+1dXVkYP$cVva_R4P=8f}4<$ zrgU1VC2{H{Q}flyR>WJ1>dNl~(HlAYZk>PP9i@JliI!9PmZM6&T&e4(q>id+l5|4J zo@7s=og`_W%XLfY#>V|+`PQav}EjhB#;PA1!n!pX7F$scwChmqpbUt-r61 zj-C}o>$W6Xs&z0it&Xv=^m+U;KI`R*T4`KIPaic>mbKD_ix28&7AHJMzo~Uy-dRdo z<(aqT`&yE#d~Y&BeL5{|N+m~G>SxcTpR@dFNxpu_Xv%-ZD!n`X!m)4BTeCWGmJ?*4)4YH;XnRd9^E3Eu;VR6!Blbq(C!HMwa(R zO7eNcG(#5dFl-z10~JKBONe~nEF9g$LsrH&)R zaM=Y6(Nl~uA!NVgs4K$nqi*t!tJVI@6H@b@IZQ~Zy^6)1rr9scjj)J_ksiyuU}G$U zeL=l(+#JaF#-qjdj@C67nrDwqxMQ2$$CGxI%>G2Ku+29ejzwvWaf@6p>T^(eF0LVP8xIvZw5&1=Z;OAa#Ju>W%}w$sk`t>E^-xo)XTj4(X5 z9@g*>5+T;jkSF7z>NTclHl@*tJ%zpq1srf<*coR!Z+TFTdQT?bpf3@A{cM|)ng=?3 zR$s?WQm>seTF-b<%_Yy)eyeX)m;T?x%k%XD2Kwx}R30@XjY1?!LAWN=U1&AY>vf}r zMIA(jcHB2+Usy|6uv#ql%$j!xGGJ*kk;V;AeNNkH(ds@H9&}xpObam!zP${3l^K*f zF|HRc6urd2=RKQkTjoiW3EUO^9+tc(ahmMNjL8IPOZ3#lVe>+3&_)io=_(6OM1mYu z5U%^O6esgReOv%>G7m*45W_D3x-Li$WkOqS6|l*2RWKIM5Ao1V=At@YGCx2tM<~o3 zi~*QJQj_6<2OS1>tGJP^cYStIJ7X9I&9T!f^&@B=Y}b&dO`k7y9mWmO)-?_N9qx^y z_t0e}aQx6HCvVo2Tfrv#oBE+M1^RFrcnw-R+*KYF7;Ca@MShbsR!tTwkg%l<`(G8b zeAvld7%odVGYl6k^2&rQxFOCuyg+YcCgn|XtqlIR1TO~qYP|}0k#f75I|K|w8Tc{K z7?#sx^NXLxdLcR%)x#W?w7o7=xNcB2hrv5)X)r+cj6Bcv3zsYB5n+~i5T4JTnucXm zdK<7k-iY{lijeq_%FyD=0WBqgKmXhh{~geC-ML}+*y3(3lZh$taHg+C?|#o$F_o;A z$6GCpn~b&z`@~1_vm0SI#uzn~YPH;O2AkOA_A9YF1{ZD~mKeUoLB7@4rWCLM51E%F ziEFT1Z%|Rhe|=)s&H-cFuR%7Ey$y`PWYZ8)K$BP-?92p8hh;s{SepL{>nsPgTGU`S zl>sNzSz>v)-=Y7uJ>w4Kr_Tbxf`}`bXCf7| zkWFT9dN^{@m}yaz%R_+JRVvs9KW%76YthayO64Yh5vlPP%xR|&yDg;9_K^&X3vub} zr&`4(G`0OFRF^#Epy)ZXP|kWeV<&VUc;4?nov}Wecdp7$dI0+f3f;F`#?^~xV-~km z)la?6$=Jd&;7IbMT(WB$WG$+moZ4<1nFP`*LyUH!;gt>V0i0PdoJ_OU-=xoAb9F(~ zuIVxchc3Q^P#3$aZ>2UP9r2o;zK9w(z>STpiKX_IPUqVto+(+QCG8uk@>thhmZ4k1 z-S-PHF6ol8eJI=EQPM|j1_5{@7U=d0^wwVc28uB*;2eIxB%~aEn&A%de#^m-w*iNXbgpk1XEH^V7Mj;pG)+^xY%>_D z-i<1OLRXky&chPlFT?g4?4#c#I)Sr4dfVv0YXzc1G&WS>*M+NuIf6z+o8R}wNl?FM z#jKLNHP7+FA30F)Ccu9Lmt>*C8dfl6=AC?s`Hswc?V(0D(K6w_1#zO}=k|h@ z@K~pY3kL=%M4^!sycoy22)Q}4N-Yl+TR4%2>YMTZ7$9(<_;eg_o5t=-F5&dOcR_=XL$u#FT-;F ztb5P_L3=d^&QgUhn;pVin@(1t8G`0ryXyXy}Ow8f%EVN+Z zVtN1}dkf;=&R2~7P~Hs>rSywqH@qV!c9)(2r(cEU^~9@h#mK#XR2U(MW0bUcbn^*H z-f6|+&<6zUXgq*PN6j(#x+CZKyR9qEU>@;h8$N0l_KBnW0Ok!{)Vf~dB-n`qB|~R% zV234cdYEva6qgz7Qm)WEEv37Z>y#}L#*HdRHXf}8L>Cq#A-$5YUZ@J+n$CJ)Od*ld zX#ZmY!9Ilt=BKJHH`aK(@i^ib#@9o0XKY^6TojIvjt4+=vLY{QL2S`3An!js`Tn9c z8ieyg6?c161JK!NR@Yk@h#>v8r%?*~@E$d#x1t@8#k^9=blg}T%C6_BJBdjOOs^sy zRX{*_>GmDN)_X^W(@y(IKg2WqmBiH#Z6ISP0CevTW1-WbY8m$(25-j+Pd;zapioOi zqy5<|2oXuqE{)Q0KB%PbmdOv2R8S5P$T(`iGY`ku<@Z1JX^kYnv{|E^1A5DX@gUd zNhTaUOQoE<-eqqi1$9ne(M;~~d+YHzKV;Es?f)=PxKsj0`qsq}g*o$$R@ve)iNRBv zN4@qGoyI{jpx>*v(qQB@gegdKrnY{zyF#YGE2lFt(%_r z4ZWk>3_YFv7y6j875bahgua_eCANl@NAJ_u(Nom-e=W0ZwRBqP2tyPg9zgU^u)3%w zv?+p>yEjA|f{Dj@Xlw4>d^1YyUqWjdz}dE8%FeUT+M^>(F6>E-%W-J)(4*!lbu_d! zi9mt~70=V)X#tI9<2%neGiavd)=+V-3c!Y#oq>H7E5MC7nc zMqwpVC}m#kJE<5MzAi(al*f<@4EaP#E7KK;3N=Fhl(LDcq$HIIvGq~8 zG7BMJv&_U^a}pOBb06{fQEB2-e=O@b%!($bM7&K&hd*@cBij$S9Ge-S^d^~8Gt%xZ+CY%~(c{GyTwTO2$oO#~#g zlgg}!rs!s>wZJAF?D05(RJa=@V-mt~om1qx#UU7!4XXsf;n-NHmdXO27HX+qjylt5 zcP^m%_%`aMseC9BWHVrnngj<}p-m%gn-CmsZ}v+zi=-N^xq)soSk%^|&;qJ1#?Q7yzpY&>k^I20C;Ld}v`js{7EI-hSGe4zcXX0_hB43^lO6y$?mv0;P{4zFiIf#%W=Vr zq+NjT*@~nL!v@I@r=bD;Q`>S>0`~+lQRpwrGfY1rm@6b{a1_yvma`egwE+Z?VbEFa zG&Z^e{cMN3u4SsC;q=iLL5ioq2J$HZOM)ptUEV7V57-W3&qY!Jn?&t-$zSz5&}=Kv zX((Ta5v7c#?szvYmH?%+VOzDA1j+k4`dBAoStwJS8IQ7`f$g!ZH10;RP^RkXY( zDZ;x!3K;nEOG@*%F(GXi59s^(FDeQ{LKGSan$i<}o5JqL0cKYF6xBEhCr||~)>UGB z)9Vph53LTkc@uxdV^d}91;sn}n<&s9a#V|#o=B>9qCMZX^YwH&hXF-T)2M!ARgx|S zX^`5W!k<%&gIcmFs!ShD;5KL|qVyFNBud`&dde`(zN|$cZ+`BC08~IBj!?LO3oXmH z8px>&`IhaJ0@RC5hl;KN9Rk#qJ+N{cOv?9cib#AAXUC_5Qiq~b3$aO>nuOLM(zMU? z9L}-ZtheIg9*saeQOqyzF+=DiQO!Wm7B&y{EbrZp|HU;^w2c4~) zPFjWV?nsoOBN|vjGg_qZ_neI&w9%>PP%s_KiIv)Oqv|A-&I^h{OIix)PZ33Z29%MV z+T+LajRMaopGjcN7zvlU2o;cEUi>!b{-pH0P){{>6JL*W?EWle9ZkpCE<}E7QBS8z zA@(==G1WDSpjIGgu%5x7O(wC)|5;)_I|9S6TkV;OHo4f>plvD^_qYTUCL}7Zc)bg4`Pz*wyrXf%q zp5q9nCOy5x@9o*TRnL)h#00ixLl{I{lD8+4y0)M}H7;`|IDxNW%whGBOGsU^rDIV| zG%B?SFbQW;g$-z@1SCV@TZq%@6MK@VZ354=2}e%gw?z^015EX_s-D9iOB#|{QDT`X zirfxOb(E?|%7psP!P9L^wbf7c9i@+dlk5qGsTos?XWl9Pa{{TB$s1-pXUv>nWR0Pe zf*EOU8#dRQfOJGMwUJ>-wJj15mb8k-rD;3~fCMuiavKA#wV$IDaKp+^|lc37wz68=VZfhHgw*(F4N+QW6Q4Q=J* z8lB5&2|;ztlF~iDR+(fGWjAi0Yuhkos#VD2Eb(ddXv|IU3S$8Y`GAG`39Li35;+QD z6LZDp6rSZq1-Cwml9pVL7oVao%oNS5MT}26qP0}tK>RFuVgs#~JM?Vq+&1I*39_vM ztW#+T>(rj&7zH>QqE zf4^y6ww|7uq&y6$(5Qk@rjLzy{Dj7|>Jra9fS@6GU4DmE-iw zbE5Qpz6}>FCN<@eBF_}$^kCWS5Qe5j9Dgy)fCr||rG*`(=}ie@1}N4hqxfh=*vw1I zLvy*|y#wjC4eO+WQMqQCWR}1`{7X_-ab`L%0XHFi9!$`6yYhI-3TdILiUD}?k*}HX zdUB}WWRPGG8KXfxUE@a9AD!U}zVZPW>h0!+pZO8_FLGkIeoErzM>XuKN|M(JRq1#a zT(Ob~m-38MQ41Dg((>FFE^v`OsCCv=1i3LGC2JwDtmc#)69$?_;pikYBif9DT;{%r zOEJbqbiE^p6pV~x6L`h0r(AvBRVBJikvbcWsKVo$6Fy11Wlgs_B566AUF9Yu9p@JA zgEN&UP`9)E(;zqE;hzQ8nJ9UuJ+8XjeiBJV+|MjKLx`qn4#q;Wq7MFSZ%jJr0DYj7 zMqE#Z^T9E^55?&=q=VY>Z6h?v1f%aRceEhRpGgy)Ky? zG}{WGpe>fjQ&{rLTV1eWMf%iSBlN((B2E;4@Xrh!#>e{8<)RA$6gU8Jh$56PI7af} z7F2vgzElR9;qn~gLj%V~JU$ik!p3|$Un5Qd^rT)BMR_SvRuM78sGV_8+6^TiNR&bE zf_5aVzZ_821;835D892x&rq+PeI zjKsExOmWV8l7ppdnj%EG)gJ1i@%N=DyDv0U4-+4$xCZA~kD<2OY&%vJ-s5pZd+hz$M=*tiajK(Hd$i!JwB3j_3 z#CD>9$c4ITQgT*8U%N5@S09HECxXI4^xB0k|L3>mJGiWEIp3!b6?e|v;J{PepoP)s!VW_ql98{oP=@ytcG5W)F~+gkTSjkjN;*ole0~3 zIohoBV~%`!Hvg5(ijN3qgRoG{NgF*XRF2PVBuFQagV2MCO#7cDpCv85pv~&H=^BAT zJm=d55+{c?CiJ*t$Q;;)W`ex|g~Wt#l9g4kf~Z>}l%ok3AyLybJPpZ6H{;rlR}rE^ zVz`4Sw<9VYuA_v*Cf${TB#DS8W9)go&b)k{SWM=Sqjb}AmM|;ZGSuZWM{qQrg3|JB z#$TvSAYDU5*F&(>FV~R{uW)E=k9ZeY%vXtPUZU+CuqQ>CMu~F68_k1LP;R*Oe*&|k zAriSxlfWArn(e6%KQ{>#8F??CLAB$MSc&AR;UTrH)gws>DOAF~_8~cg^K0AO+-?`! zEc1Let+lMvrOGTI8MaQQrb3^zbF>625cFcC!U}|>r?+P{DpTr6n&Bq(u!3?}lpNrk zCJqq#rtQC1XQioOa40tE#*1JSqMSe|@d*o2#4PfBnATm)V}XY#iokh+YgYD~hDVm` zvGz~|_|%D{l_#5myxU@onxJ*fQk_np z3EyI-t$=rzQOExJ%VKN?9+rX-{jaMC%yXC4&J*P(<@+GGn>GUMpGA=KV&~OGenT?tO`vaN!jW_xYs9R1}ae zVYdsIh@QOoV5#6hfwH+erSS^E7t_tlfzA!Ak%iz(w5--c7=2lqQa(a+r^mBWf{3)f zg%Joc!En#zkWLtgIQ;4xs5o6iO5>F+KEoSPsH8zty$D<~M>I28ewk^3+7chs9f&PCxZy~@Ao(}Q43I~NG3{ynZh|#SRb7}IGOs} z(oM3NUpL$8*)Flm{-9SDjAzj}+zPMFfE#6tN1#2Pc7DcibCzDEWEhyU_%5e6o460| zBJR&3pKt{qOd^|?)e0zZ3k4MeT~)F%VmrROd6lsYkKl0I$9TPu+!9srnHx|6h)VRr zo1eow*nKj+Zlf_YpP%cnMAT4_1`}z6+DvK}2bK2N`5?+sEif&@Q zVONdFZCrz?-t&V|!#s0v?>v8d$o0*TW-$ZdCoa!1;S_*nz+wQL+V|%LZs9x%Y&6p? zu#m(DD5os&w>N-EInrpvMf7GT6c_sHOB|MYmaJ&qYRMQW(K+B?&IET1fws{(+?|=` z832+iD2Z9A#zvBogr=MOR|pZg%$jE@M)Qx`BjZGCcTpV-ptU!I&CB#lLdLh6aV;*` zRdjF&z>DAEkAbu}jeQ)M3~&yMJJox$i5rG9zUQR}B&8Xjd4#($SGnjwCM(cIhevL= zSLRhTpaTB$*mZEMP)09$tnK$m)fZx5=)r5al?>Aph4M&mImHfDnIGnZG?ZM|MJHD= zLj%lFw=wuuZ1lZHC9pW`;L4+WSc|W#&ST4Lrm1VdSh!MrIjfJ!u6}~$oycm94B+eX zR7kH0&9qi&DI6T1fV*f;XT@!hG(Uoh^b!RzB=ic$;I~_d8xKUGO*v7jVojc+C{GQ` zDo;^Q!CUHNT6*1yp(DCFg*OiU9^Q;cg5fi=f25drNxSAC4AGKl{+wnLgo{;3ZhLqHQ>PaP$ z2*fVSI;)tmDYEq6f~q0uIqz5ZGH#H>RsEksSeG`nx2q@=7nmo2_VDtds3Zuyo&}1v z>3)hkJwmPmxiHY*>~Ee=xsL*wMDr4E5fEk1zb`Mbl2Nk_mMq)l)7K?+YoD`jMdE)K zLK6WQlhAgw)TU%pVUbo2iK>F{x1Q*@X3iSiW%EONYGcODaB8m#LzA07qhAdFJTp-e z?X(nzubs=Z)lrqW)ZrxTEm|Uhi$%TtaDFl?RZ$*!9iN`1r>ZE?^eRtrk0rZp5J z95+l5Jsb&aq-9Dl>_+Ye}W{}!Fnd598Y3FRGxsgSU#>uX~yMS1EGlHl;C;94bI}bj;AFB$m2EZ zIf3J?h=y2**F0U8{?&RQKfmg5bFoh^G!Fu3J>#XpefW-S79lt8==$4Ld-F#|`0L7U4aYGau{%x6%m1`C;#N4k(TI;uhj}n+cY*~Mofr|_kiSva} zXyX{0AZk_BkFI4(kkuS7*zB^RTo@{-!y9m#(Li_;xRN{X+cT`sCeE6q^I_tM?~R;Y zh%#;B!U54u4+0}pLA88zg&h!ya>X{AwjoMlv76A^tL_Htp`N3#+UWUa3>{+`LL)dL z);12xk!@ofMoXx`QNFskXizB6!}nBPE0gz|_Q?fRj@ebWzUKRWH z`~k~TIM5T$G+5$Pf9?UanIl2oES&lyAC z5=r%HG>W43j4_A(gF@|mL_!YL@uVY}&+XQ5CGOTY=(pm&Ie0-+tn}?n8W9M5xEkFu zGWAp!xs@nuk=iDNCU(Z-u~EY7jR8w6N zY`Wv68|3MTF4!Qv&SR*;#AMk9s%l6)^b&(*($ZKx&<0o-@Z))@5X8~}p;tK47c0gg z#sz$c!KDC>;D$U|G)F9&=ua>f)Q1%z76Xs~;$YE$bd|bxH}P2O z$$wb9f9$-Kz?51c5b$s)o?_brz>vXeS=@PAr;~fir!1%c$>Ssu+6xxe%A+X`!rQsf zN^#ocZg0Ben+^iftdvd(-Bp|zKGR33UU${%o}!cc*PFz36lVi8CarH>UCjIvuBLCS zHNvna-sUR7$7px+!}RI4&dMf(0)!QzjzxQ{%4H~qP_MJg_Feb1<_da+29_ok3C}ic zjf1$cZY$aoM+zF#K{I2MyaU4>Oj<6e@_7n_%~ou``IlzqJL$P~k$=4%zqO3LL?NH% z-MEl6qT_N}i*;fcm{<*Op24#HmJW%27BF*HybY5R zh1=2-LvTnx!iHoX%{Is33;ALw)H)EjgXf~BSFE^YR~Q;^;pAQ8jNYnl!w$K%a0~^m zan~`cg_=zWJ#e&iOG9oQetGC{Jii>53jY8n{tS;i6CTKo5sw zr+F||pmsz~eMYD^=%qdH-GX(U+u2dTWh49GYQL4R zLMXcIsZrxuX=aT{Gz*<^kLU8J zHa0HU>&HXX31f?SMyf@6K_Or3t;_g$Q^Sy;k37DGAffEF7OxyAisK=?(*b;NcN>`> z^1b8xam!Irq!Ta-ejaV_~2 zJp^LBqyH>+F9wLTm?Gqy^vN)}OpbOmNv7j5G+kkNn&Fn6*p8;{INz+92Mt3+h0WGh zVY}+Yc}h3L>gOO8wRDfBzl7p}ah7e+kM;*JaqyX-?b8iy?lb8Z3BWsr&ai`#wB{95 zg_W+_q75FQ*sJY68j4%UHnXfF1`{=qa{ZWr|t z)-(y6rLjL7G^V@COh7o9gF=l91j&Ls@Id9!emJ%$Z~pw1_J`f(8Ss6%C*2aA|qBAAEVB<>24KzjZz9ZznonioP| zrzC?Hs%UlxD(awk&+zRA8HXFq?#(YRM~6me=0gem@Wdl_f>HRk0VKXEX*UD7gRZ}I zB|6!FMn*S4hPCPeoQs4YPpEv14cO$@1X53^dcuti003QWl1R*U8)l++IZ3#9$|d=m zf0A6#K9VIZn?BH9q{E9^(e^WB;6YeI+QLAXJJ!6Ag`egF`#9U;!Ns#FE#cVGNr*kU z0ZDS^dqUUveywrR!9hAWA+DSB3~O?5Mpeuun(Nvx5K(Y&AiiB-#79OEvuALWVkH0s zsw=2)raEC&cGK7h$Mhi$3nv@N>~v)lMmJ9?1}`9F1%<+Asxzl{NIx;k;Hyp#QYxIk z5bm>!MoyjbwjPNG>+m?pI5AI5giqjIfaD3P!%)(F@opn^jRPpk45YAS@k8#cq!E)x zAd$sbLzp0m4{JLNB<6xg(oNg4!qdp0O#d-PT!Kze9N>ABXwsn|ifPeyTb7$YT6&{S zKj$6X@Q|pjzIsw~y-CX-3E53eB1+=NkoY`N0<)>z;^t&ZwKYYIT;!NH?tXGK{Zt&A znc%LrSwV!00MyI$KwE)jJI@q7=?FR){T+WwLn}doB7{)bvI64gxgL#T&ZkRf`-O=0 z{&H^Ja1a@Dx1oHdvt?+18v=x>K$Rk5YD}$aun7~G+AKrF zQ(ON)m63*f@cq-XFtlh{Ee=jZKmZlNK@X1TN)s>tdSfl!#IwkxvT`KC9L5c1Ibqb+ zljpXqt0aUTA&FtS!BD`oJ27>Eh!IdCg+Mt0CE^IO9vxZYbrdsd^p*V!G|K^6_CO;m zz}8S=0E9k8F#)o#I>-`aQzI`uJWX)feE-EoM5L(4?<#5mm?l9G1UU%k0Ve{B0!Ab0 zOZCqCOK#$s+HFG%4Kqws`OOH>UhySE$#EbwW$FNZta!Um9M3L9Lw0c zx~sdpYrD5gyS0mHGW!{<8=|3^mF7uJG)D_FPZk-o%XFG;&8&#Lw72KZZciR<&z@b| zp7oL)eXh$+zD}lGcFOK7UxsyFMfbCNa`)Wq(Ha+5_xs}w(VmX3aPyqaJv^gY@mA+^ z-I?yJMu$k}^CsP{nX2e)=)2Uc`d=CJ{ruc9{K21dze{tP5z}mo=0e*#%kuwxIn|hI z=IN)~uHKN6uc};VS1x)QEuyNMyE5$zsgUYBWj;V@zrf76T^n=KbpK7%3kk8oO z+@omHr^F;H9>pXpmATSouE(J@bNBOeD9;?T8cnr4XYY9a!9NPkK|Puy)UwGlb5Kw1 zaf?Vvd2-e4tf^W(=jI%;QCiKcL^~V8=h+-y>5PYGj;g%7dsu0A=hU3=P8#!_e@1v# z&+tjQN0wz}c4uehE6dHgbM~FOz`Dfnvcx!J%Q9 z$UuPOVNubLkkIh3csLr$1q&sipr}Yrc!sC2qt^4=P1;a-ydG*#((!yDOWGBRb6(+oYXng6*V`?>ok^}IAoQ%|a@ zrk>N)qj-AwuU4YkH1(98m$mTp)LA8U*8k^bW}eGqd2X5Kc`oUBFi$3CR;DH9F_S*# z)rzKg6weDS@e(i5iB7CUBNk#I>e*V=@T}oEhK7cyg@tGF3h@{bjc8P3c+QG!JjQ&O zb1wI)ytQ}qzf-=vr0c2ao!;hc-sX{>-qAbOymKB#OYNpNx0F^^Q#B)(_N4x#q2CPJhUbx)q{pmBpQ(9PiS(q=%Om31aupsU{f_+%U}|U8j&184tjoI9 zN}781o^21;7?F>AUO&H&XI|T_k8Yi|e%Wr;-mcbOd%wn8Z>gR++goq+@4DXs4Y`$h zn4mt5rq5MW=3Hfz78Uz-KK-Xpy_a{se$CC6W&B{cO7((psaY=)n%hkzFW$a>Og&zi zJud2BW>J5bTBiw8m!@?|S7oR&L1@qEBSPH-sWn$iy>HqX%|f9p!!pLg1hF+`Cc83Z zRfcA0sZP%HP1=P)1TCXi-rdF1sS<=Jl}i8IT{D$i8ll%uB*@zB)vLK$*MGNVWV$;| z-uvqBwR+V*SHIq}YfT-hp&H}atxb}I1$rCbELwNb94n2k{nq0lda@yXg6#Kt0{I*` z+q!A}H!#VAnUaSMjNT*5 zQPt3*#=S7qf&S89*fk(7ZPye=rs8Ng80E+Yql^w^Dc8zvIg#;Yq?$qyA;dDwj$0Hj zhH!Z67RDQlL|Kpuj1nds6+n0}f<-VKD2M|n7>Hm35J-?Xh;+a6dVws;4o2vQh|X2Q6XVLg~dWag(L%` z;^AmWL{vn8KoUU_;b2jLp+Uj`2a5&;qJ+f)!-Rte76dqu;9!c0hKh)VqTvKUxI{E0 z6(J}WA|RH=LcswAa%hAg!2t&q77+mnLb>QXX5xRn#r@4lUpA3{psU(^tAt3hTBcjq zd*07<@tjC<=d?4ct-60xG@Ji)N5t|=^~$R+vz1kAXk1FBWV`9z^lr5j7LKf%JY=0B zImr?gjHRh?DeWs$r=qo!3rwSX`ln&h8at9;+Aa_iQh+#mFKCkz~ zKKDP(-zJiLb**sMr2eM1c#=TTrAlhRnJJ0$1}a315BSN z04)5{TxNyYBfE6L06;_n001Y*8o4IuwFE>(JH(A?C6sbd z!V~#oRnnAhd~vIdETrS70cYsKQ1{=1g<$G_Owg)GBC>66m7>F_2Gc6j zVVOs-h-L+(@ftEM1@>?&T*IkRl|vZ6na{9?4(*$(a5uJ%u6VEcF8uMpgDF+in2!L$ z8u2ySAUkh#{3_eCp3w`4U*YF5#ZwY^DvA{*6H*icgi%$IQUIaBCk8kP)RO7M({<2? zzjWIeV<~v2_2n${)&q^Q_~r?^Do)_EL^T_KCOJ=4CKF570ui+q%>9p3h_^Y`h!F4voZt2A3BMWQ>{UV z@~n#BlZ-PyER@m07Ehr#fXFT63cfri$wQYI!5V-+%HMli+&C7$&teY(9f2f(54MV) z_(_L7={amH`8WIrT%nKI_RqG`u|N``ql3gNP@|4Gf|m!v+7X9tMp#Kk3SfxjQbvXc z7}^qf3lWTFK+(3=$SMq|U{Mbq(UL)C5*_Zx8gaF`XVN%`Y*vGb5gW-#gI*OXF#Q1u z0#C+HrG^z8W&}&cVkIXDC@SrAG^mk`xM3_64mo5-u#zDk94dtHp%#=Xh(VGYSyRd@ z_h;m?Nz&Qx@T@yAKKPpKHHC&{0W)|E&u$O3>_*SGwYb1V=FvL<7K)5)FaykC)Bwpe zz|fE;eQ*PgJ%z9nxm*14lei8#-Nb=3awslb3cyTTq{KM{ngVY`nGX~s4yX!PG1382 zY*G>3u@9&NYPPP#EwxZJ^750P2CDwkts4(QuRXX77kwC7?2AMgm&!+l9vqie} zt8j8uzkc5~l2W-_{zK%vQB(k(M(eqr2Qg7pGt!B-hmPN!rc=kE)nQlz3GE1Z9+)+? zl4MS%C2KM#Dep|j?kBw2wQ6_zWsA^+{0RtJGjXP=k`b1OUHA5nR;4g{$b zDS4D2SAYC#aZ|DwqS{(;;vqg+_xpRNB6@ZVMm;uEi|R zE`kpkIm5Om&O8GX!EJPP`v`8|fI6QZ+hjEk&QlW|uP0@+sKp!emEPqE4r;Xv8Kbe1 zX<^}G0Cgh7C9=r!{x-o7BvgW9uQ;rMsP}!GBw0)7)Ul|L{tz8*4ui5G8pb&w8~WB% zmXRH3fGfhFgV9H$`<6tHAp|54Fu_B@v4{q%roE2X&3P&(Gk22sP!V5tOC|qV5+jC`TT5`7kIoxf0zryC?;RT?x|nTm@or0OsZzPv5L=W3(P6{) zhd4i^L$V59yO~KkI@TSrMfpfpvOw+Ebc)Mst_R*=kFCyi^9Vh)5M^7?cG)xqi5Kr!*3gV4@hMjL#zA z()^SOkJd$z^)cdFv5kNR_=E%VBDrJ~Rfc8_-wwSnX?o*`upJ}0ZWOIF)`641t-Yj2 zP)T1@hc&&U9^eHNOpDcc7~m6Q)2sf-t+*jND?|r&Lu~GgheXu9fOww_{3x*5X9<4O zC{gG+uC_{&!p*Lad>!C|^2vxspLq;U<|G^BE6vHL?Hvp}vLWacw6R?h)M=sXKRAFmi{= zEceB-jvI_5qs3@cN4@ETq%ad zAGf8t-pGTKXB^wiO)-^i6rg(^t@*jYvTm_5h2bJ*zoC@OEK?|)NQoi5i_L^p{Xjli zL53qj1*!c5OCfN76yZ8lFvE$ZVVb2JvbWq)c*@05Y@rS2=$(y@BGK(WJkoV=ySl1& zf;K+clrD3Y=xC8-KMOF&^VgsdTB5{4%c>l8n?P(C#w*Q|MP zD+pd7hfjBL7KBrRaA?N@UNluQBd$gwHYeW zq@3oajSKxeajnN#wejpeL=E1vGNNZmj}QSK{PQt3n3)W>1o)|n%b>}fhdf&5<^0Gg z7BtW^wI}ZLny9%yr8>`cbxx7kRvfjnaO2|QR=@tNO#<4s^E&Jdoe4@VO*Oco zSR=YM`C+zI0;}V0PUq2kc^p~dVFXrD6W{KwL!I*5SOX(G(L%aAwom6MKIW5ZH$^_F|T>A%_B^W&8Uh=A4Up7nb4>i z(5M2m2qxc8QhCpD#z-iM7c-+J2H%sVl~%Y>3$0U3m{80sV&PBSFDEdxJMeHeD}R6t z{KN3bFAs>~otFaWp+5r;ymsm?7=8fV&d#K8ff3ooB*TR`ic&L&{H7^euPNpc2_A94 zO~nzUE*ok=q4pi@Vh{YLsN~ldZU;##fh|u}4wVnI{0ynKcDzM0LyCaUM^{vAIW&A9 zI8$g*kO>!gZswFn_y#A3Si;1=C<3VP|Lf7r$T1NQzuwYwD3r)Y$dGSN!GKDJ zIhv*cEf6PKD?H;<*abn59O1OwPZfzzg9UlZTExN9h9Y|kp*9>zfU1_M0k$z6m17{B zNZ{R6Jy81Y{?4D6_UBk0t$o8~{UYsC^lH7ic8Xw-WR+ER0DOHySr#R%BF&5uf(ZLf zWNMWHUe+h#(IAoB5r|TNU=ezep0j6EC@F?O7-EU@^x~5~c4n-|Xp@#S9(zUN^V~P1`VLnaw zPv1o9`EW>~_18R+IUYI~9(`uiWZC)qmCcDntUbNK%b>kel4yIs^e2TrN>z2TgqRAK zM1$lLMyeRleHxMrlWn&eg&=_OLNn`57X&g2<1{;+`* zA%Ph@!t+f@K4Ua4qCFEHcA`r?pR4Bb9nGJHhjFHW2|IsB+9^d3E8#$i$Jf_MNQh`=lbb&JQk*-zTy_ z%|a%UA+BcX3k)ets}$S@=V6c3(zA#-iwKTNs+;1CDDep6kcEY4SxPpxLgOEIZ5Cjd z;pWq1?Mt3sJVDFoZLd}i)(q(sLAQGywnzR5^GwC#8&82X8*UAhoIJ*)nP=>^h*5&r z_K?kB3};Ufu-&V4+WCM^z1b-d%=cnXkxn|ls>{cQw7s$gyB2}^(%wZo;f#{vGVUG$ z4Ft~=2#?rR{*|~1XOc3LmSLA-xy}jH7f}m|s!zW)`X;M+5hy`PdNfV*ApK*m`NN@{ z_VR3ENK_+Brtpvi7f}oxeY;Sqr67ZI15?|b3GsBR>>4sz&FN=*z;KOQ7WFG?pGI%O zbk=6VOX7&Z;bi1hw$}6ncWGt|>^>Vc3PET$VW&kv$oa8D->=XXMrLW;L9f7CisMA~ES2fCnXOflmV8o&f}3ojddRBUt9%el|@;?Uj;b zKQg!VXU*{OWI>az?_-Zu7dB`^<->8X95lh0yWlSJp4OQ|qCBE74Yoa2d4gklTp|$y z0&64lS5n`3ngvG)MElg8GeF*?Wh?I#86VnnK;#6Z_$0E^gCA#HIAl0CXPDAL)X|Nm z>~h_l4v`bgu^p7EBn8lsHcvSv7~+pyn{!8?h|oXZi^n==v#!t3wzv;vIdTd*!QpWn z&t~A7I!~Y%X5@eCMK)ZsZZ|El$jmM*^!V(yK_AMD?AFvzLw14b%nroLHiz=@5PZ&L zmkAHC7`YtN?F;ebSj_>+guT_`xIBt37Av2Y1fWS_dmvfPG=Xy+L6k18@r(5EhFK9; z?q}EGCN4(L;=JCh3#JI>J^UAwwN+|Kj58}JH=$yC6J|Q{Hn)p$&E7b1Tr)7(BgZXN zK+KJYJ(RN|MCmz)z*Q|5S|qKvso);BWa(*)Y#QdM(6$4vRVu(>c7lHcMz4)PG=6`k zco;xi+#isM>tTksCVKTql!PJ$2SaNggWTTmwqOwcnGJFV7?^Krol%yG_E!+-{Q1`{ znkpKP-S60-{jwXUb@4Pmk+P?@*KRBqTv_Zx-il+~og+FK>+V%9U{3FDJaO)A?`|26 znLIgtH#TC)#NXz?dv$x0VesZBUT+TX*Sf(>;*BMCWg71s$BFEZ_mZx5FZ^s(-V0-C zMl=;TU(UQmdxe~oOdR8RkQiC%aaR)A09Y8$&?-s`XZ$A)S7Kv-OkU7+Dt)N2AR>s-d2x%S z)tVW;7R!T#Gtb9~6{YlI?K%Lq8e&7h<=ge7H%#(m(NYkIk*TR?5;d$T0%vN{Lh=p)sfUH8VP77eX+n^42dMm=Vopd! zcRHwTC7rG;=dj|mbPz?(B6T!$;ECWx8lxDTM?ZyLt|Lf^kvgFhE_vUoK%QG!TBsC1 zD6|DP_~BfHABp)2@=fv)NSNBnIXXBI6*Ql4KFSuRiZ|R=C@Y1H2mSPV9$Zb=yV0#Ec0*E55VCU^f3hvI(Kmf>CD{!(%$QXhFH_19E z_GDm^xzwT2(@7Hb`BB5j5nPk8S1Sp0^9y_e>~Ch&Ghk zWjSDFPs7NB#&oFY<-+J#LOKmSiXn{)p%C)|1=tQJcrL-}T<6B=Rk@jRKryUBGBV&> z1|SaitktYKm+@r?B&soEI3e41}2Oi|6K;PO4H7*;(R@K~Ef5LQspB1ZTS65L3h%!7m-<3iYN?4h69>^a26yzdgYcAc~v; z^V^O;t|`GIAT+0D$_Igy%{*Ws^eiQr*Ra zWhCw$s9|?2gldX0lFq2fg3j6sK2Xcz93Mmy_b;WN1)e)cBS*-AmD3W0`~U|LLY^rp z3L(T51+mpi!w)p%*NcDTu>igZTT+aooLD;GXr@1-L^&W6_vtSi)6sCa6t1cNeU|W! zoxyID0j+=A2JvIbp?=0+I*Ek%EO<1W5(k#i6#;SH5wZVnTKtxL7iv4T(FAcV^t zqorgYT#;l~f8z;z9o~qMo7{T?Ob@oVv|AVzHe86zEG4dukv#=)(-f)&gcOR3#Grpd zKbfD>=*u$emq^TX9cn@XE1l{C8S8+>x+$l7&2rTg$le94Iq6BVo+gT3`1m*WG&h=Z zFZ$rhdiH0)z1%Sv9=gzzqo}6%G05YkOCZH-u_C45kG#eKTN9y!sQAQS9Q{d? zn9yRgfLq4g23`&fLs|GTee&@ajq04Fhr&2jl%R1Em%V!N{KtQtdsMkKNIwX0w-xzaGY z&o{F$I4k0n{WCuDQdgdxN6~F7L=f`MG@cX3hJw{vDY$p-EzEsN!3=`lX62aLwpqjP zm`1EI#Y06=Cy6Bkg87^x$u9;v!@}L=S}cKA;RfwX-!hyq!?ie73?Kr^k_V0UO7L^* zV?B91UeY@0dudQ5xV7TBxGH_egCOq@hrAi_ehi?no$rLfvvSAtc9=2{%vtoLjAX)O zM+`i)QF3|@RkHDQ18S4&l#5IV84@kg2@b~A&TDCFm$RVMZ)*aqQZv>cZCNAUp_g&^ z!xLd=&N%4{U(rxWV*Io9MhFi{#S&NJK#Vp=s-}zpW8rbNFh@U4c|bFZiFSQ-7dcY$ zfk3c6x5+lYE=<&rXTign&=Ekgh)YzW1TKJTqlyRO-YP@hlrG0unmL#H1rYyFWe(~E zljkevAHhcfgz9x0TT@xEbxzc=$W+E`NNBqCX7e!m=re)$!Fs8pBGjUl4@B69R5MWZ z;NeM)F$$q!^i+_5WXVX1`hWU z+**C&)ulV8gc%i?bOaWeGzftG%rxR6Z6PFxcwp@#x`Zq(baW85^t7-H&=6sO$GWl= z{4=$Nv_-swB(nVFM4XAGTZH?PtWG+AlIU~tFml>w;$b*@5vZ=CJKw=N>=7g@85W2C zkmk307nAw;2Y@Nsthnw=K}Qh_C3?K%Zr)&h?nf^t)Pndf~w!_=2);FVjSw|WgTY)!5!Ei4K#-%5hNw+E*I-r<7#qWE!r+$McLb+`aId}dm<|#!s`iD77#+IIGi|>%C?JO%!qBuvD+(ysCH9y0buY0& z(_hySL+Lh5d(@2Ku_lySFUcjh1Sd3AgLZeE8spJ?{6>(b8x%AGNZ2UVR8gT=_9}ag zo(vTC(&kGFrCZlqRF7dgslUNE3$k-shHoycy%E#6mFp@!@*t;+#tUo`Lw*JLc#{R2 zERo-{PzQZfvw#qDrNb$CrWcrtVYV|>P8%2bfo4~_Fd>>y)4^jk!Fu)|dnuZ$XoyR= zu{>2rq>{-=BAT8ECpaYC1OYu3h0ih00C9h#*~-xjct%f_v(%?dgn0o3HIn9$MEKB_ zMhC_U^uZo^QD;-m6Kqh^Mnbrd-{H+69%{lv4B2{1c^v?EKckRM*&+fNNiMf;v0=HT6hD-lew`<`p=tw&q z3o&l)wrktywyPi_Y3o$pVU5UpMRBTFZjm}?P&Lv;^R5qD|2(Vr$nx%zoy9W$;(nTI zA52eCfi-s$Kwb#*kZ&ihH@k*}zHIF0lgS3QMF3=x@`$Iq(RfOsJ;=HJ9F6@zoC0Mc zPP*+KlL#iWhShu?fO~?OL2hiQ4!1Zv*R1ncFu4pAkSK9#2A#dpL^=tyYPO&h4-cyo zIQ26`y7{-D6B3Z~DEb2__|W_i7Qbn=?--^4=0jSFclX-4xmaJ-feFkJM$!j>kb-*F zK7WQ_iSlCVnKvYDY1&FWDo>FKyU{?$>!+d&9#Xh(=o1aaBQQq~*=HnQ175n3y%DE% z!)_DQ$fp=AfzU%Vg=>Er$eXt0&r*K?RDya)FbVz4(pIk9d5;3XQgnF)i6U)?w~fiq z;(1gk_yAKt>5Uz!&P1aUXH&CO;{h7he(KoTsFw}_v~ezt|56tPgu!r5Nxhfk@i_pe zK_`!ceCK@I=btCTSaS-dnT3oN0ElY!3YtF7z_fPKI?SP+drBnaQJeB4=pYG_usfRr zBiX3;KMf}VEVXFR)j_ouR)3I@6dFPQVw9x95;H__C=LjTu%5C3Ar8i2)Bi7`Uxvi_ zp8q4sdU>&6kB9pOVyTR$KIpg_5*S?Rgj>`;I|^0c4TOml8A9P=@o3J?pvz!xOpan6 z^qxVR9(K~7$Efjwatuk~F4OU#$#twPf=4hr$O1N@Ii=M46d#+?S5pE;m%R**vB~FU zKY%RYLva9%o!54m(Yn^3XS)4SV&RB)Mc+&g^tGAFs#xhtH!~L>u!Us0Fd7yy z#rn>MVqb-U%97>SDjDdF#k7!pf>C`$8Q5?V&KhDhe^=ueIRh**%S1|H+<_)J{Pok~ zt7gf}AVCspzNr>902)@&kuL6Wxh!S4CIdH*tzQ32l2>7%;GDrx7lJhJEjf@}s!S*d zj|4e+a;%>WU$5O6x>*$;bz=CtM0lu$5Wmn&4?S;HbD9hRSK!dG#1g;EKPpn!g{t;a z@mRDV6hdZe@?GM6`e7=;s&rA@jz)5FWDEs``u+xP#_iwM&xM^ntI?Y4j+2YIlfS|`#UkP$MR}x$jlH=*N zEkx@Du5{4~Uj1!5N~E2&=qtAPmR;ev@^487d$4#3+@h2UYZjy}iGMwyf6a^-5}8p! z>UdhzYz+x)eB$E4zoqFsiEjcXJdha6s1-us7>8C_O8Rl`H6U8JQ7|8fG16tGX3eh&k19%3s$2?v(v22zbHmU@&ik=k6Qb? zXl2P@9oS(v^k_WiE@deu1p2hb7RhJ3QoXYzb-4-x5m=1i?{2o3tK{)U$P!R+G4?=4 zm#Cp*+0$k`D51TE&UQDv-9x+$?=9n7lJ`Iv8iq&i$59}THvMB6(Sh*!BQ)@3{uicN zXd`1?X)K~f@8_3`D_YlhI1qs+35NwI$096mi5pwQSOLN{^V3}>X8WUm%W|YKavp7nmuI!D9k}X<#mjW9OJQ({V^T*#5J84# zj6u$=#nTA1OBJpo*{gMB@W6LKIS`>>iW}vYn2bF>!O1C&#RXGou?QseNN$#TdjDy< z!g!p2v|YOp=_L%8dOVJq?17<2Lu=W1uO+=>@{h2)l)L(!atYwlO~0@<6yKTmvhy7r z^fAyv&5`tX4jL0&c>~fqmYxY*Gauf(K32qcy8+~xr^xon^ylwlA+upbQbetX#{qVZsryY%ifR(DAHB3mik_ z4nZ202e)wkfiw)up&tbS$Cwa_yOZ49YdNM-BEhVhd8=nx4Ktk$LD&B?j#a3j4Y0l& zyUoz_0J*0kVTApeDuaa!5-ii-z_b`NFC;EA{D5Wbm^WvjRX0D22~n!RisQMg;{Ve3=HEz*AH;? zZeE`&nkP6vf}1H|E)X&iTHg9PP$HYlo?WHoXbh*<@=1vvt1s9NoZKXSX&s zny>Ngly1{HNKaIKX-84~4la!OLRjQ|yD^#-PXwI{v1x{b9kAm4b;>^R*f+;!RdcxG zxXpnpv}Ux{Ok^3^A}8xnwp5bBjPK1vYlFRMT%xrCMv_=Sjs51HO|)SsRw8y+I{szmkT5SsR;!rqCjdS$@l2bh_MofjHQO%OFO| zsrNwveO$aD48zx@3C?ab@P_&5*xolRyl_bnTO3xR2$pb+3B>LoNbe6+sHro1?a)LB z;~cJUqu(@4CP-~LH)a|{lerZ_!ByaWBh+?W`>jp)>R;N*ut42v10&T(MPDw^t+_Ue z`P=`lp1rL{@4vDIzd8;cHv^CGO~Pv9XA~Qw!U?3QJ#ZA9M}v`JA;NnET^Y|15xX}6 zj9opo4X^cMYQQ;T=ET#miAXEC4=p=~mLdAc%EQBuN=n~udB$vIVv!7<4iIbEx1Lz| zdPxiKsf&1E@PGqv{c<_mj;e7W7d*Zi+m#%xHaAo)r%Om)&XhJ|w5@?w2JM1~>O3!h zfW|+0!4NKWcQAU<_6{p_#JAB-zS&xKvNu!v8EJtrvN&?JbxU0t=iIfr0r8GwJlyIA zun6|^p-EfNACr8mQ}(&2ix_ZO9EtV!bx8nS{aW7)bKgF+$S@|2A^VUF(+v!%CA3Ey zWVl|ZJ>P!1X)xv8wakAmdL6nswNa%16Ob+9tw^iyDX0+2Fq8h^La<_ z#Y)Jsi(YHRjIG2^MjrB#kDKi1SgD8G%!V7yr2}UpTGcs;2fl(r&!^X6M5$97%zR(i z6V#1SR47$+PXg1L?_sld0f*AA8MbfrmG1=e#0?1tDO{4>)nTQCxA~9W9tYBxAXrBQ z*IoxS{24=cuU}>c6R{E6$OX@w#B6MT>!zcdNw>&-JC-}kiFQ7NiXbvlo&$yTfx~u= zmv1gq!k}ueLBafW@ZX}YUQN|zhh}*&{ zqGgyqkGhsU+sbEeOu}Ly{Jan?z)ck2j4Tc~If(4=M!d@_U{qE6WxvWeiv(#xaieT$ ze>;tt60=o0sMPj!Cs9FYQ7nm?*->RS`x>JYg$#xU1;^Pb*;}L2>{P^SL35RuG(xUC zjj&cE=t&gRY_W%tMHe{Nn26A3qQR<%e93~7Y~zwG%M=gSRnRUIMJoy%S~+K_Xe|Cf zUahR+(cXL!4@v6)C)XQ=DFyf@HtQ}B?Yi6sk_F%Z$tK$3R^8m%cf%IITdgX`n)5Sl zCW(BOcfnxOO1>UE5IOBmr2Dh3W<%M;a`IL!9&;ri1rI5X`k<|r=!|a+|WRWYIO(usA zvtqM=bK9Htz?8@ep;L~Vc$hoqXk}jIrm+bO)M()?gS7S!LBzrylyVi#HNgqAhKkDJ zC59k~a3bO``?BHu5ha|UbFg@-O--o+u&P}E=1l{SE7U;zTJ`RT> z46Nhe-PniRNR;GA!j$*#5q|KM=JAHbd26< z8&TQ@o?N4V1eSA3dBhn1ZEtq&mTlmNB*_g(l3HxntqRCw=0&p)z84 zFW3~p@*YZxM@h0iZ53;#|_@NJ#CpmcF zrqO}bgj)Dvo^I`M@pZDh9?D>m7j$R0PC(Lx{88)%Pg7jOJ~@I8FS)jkguK#62uHl> zaszv5AkY}F-J9(RF?77Y@~CE#8$d-z)_fp*I^C57h@^7s?z(axqR{>T{*v69>f3Om zDz_Ld!tM4LAdzOApM>*3>pr|G(VmjsJ3}vt^!tw4K>KJGM3u)zIyxDEqK|wPK#C z8#L}QbIuUd-Yq!)?(E*ic^rsvc&I@M2Kh3h#=_b+QNnS3gy+_+4n$Vrk0XCvIkWi! z6R_`yqV*G$SS%VdGe(Rw3_5ZqP`8mvPS0oGNH&5g9EFtPb;C6I0B6rNMDR#?H#`XLP>iu z*$6rE;Nm0lL=M}%%FgP-?l5VV8vF;Yw9vNpjaYOg+=Oc(@MB5%3~G~VRS&-MC4zS- z92uUau4o&M04BHT!O=8|k#K;c7J86m3e=;ffp>8#YXH%2YimTYu^$chKD-&PCZ!{C~ysvA&UtX!Mj07^i$zv9Ya^gRy$39$Ad zkqvgPsvH0?8N|ZT)vj!5F|#!fuFtsaXj8%Jj@ib-eFD>Fe%sdVquL($BF+&oL$pzq zog_s7)eF-3t<($GX13=C>sI7o!p zA|a+)ML*e#*YHC0x$9e#6@)qSa@@5D30rgxgT+XzdOEGno5J7VDD@v+$wrTrS@{FoqX$}pFZzgkz-^{B>k&M@pA>|x zwJr95%eXX(OVmYb!UMWNz*fbjS+RZ7xDc_6gPuWT*~lh=DKZRPMy3O}!|5<7kDEzL z|IcA;i~)Ngb<^1@z=Y`7HPTa2r=(&Ftkg;teo6q=++Vq^>0w5 zkFX)(ZW1{u)(Jv{U^}MXbJ_gAHX!aXSc(JdsOT`~5@0W)wwbUP2#|p2A+)y9>Yhee z6fN_(^+S+~myRr@r(wInW`6IV4N)+&L#DCaEJW|Exn-udPS3CFystB%VOqv^70Hx6Q3kLs38z zbyt7RMmARgz|7!Jn_CI(te-7NynrygFh8|ffBhiF5ENiuC8VL>rdciALj%}g)x&BKomEr_loH(d> zSCfl{fnl&|wd;vH$PDxOl{_fVBvtF3YQ%Kf!0GGS+&@=p-@*i?ZDazqovWuw>qh-p z80@^;fwh^~9(k_@M!S0z3bF)s#uk%(B1Ik=Zrl8eK~IjTuQj)}tVo;W!h3VLr)^(l zC(X;E@9P6=6jF?w-|ra{yIa;O@uN_W9&hC#lmg5c0eYjT6(sKB3Y5A3gzwrP6|L_7o0Z0Kw0U1#0#$wuDVUs_f<7cB60n`wSG5WXUmV)1k{bDEB+n4ufGp@N2pBtxNVfT4Hi{OCxX zhzljtp3RM?CNYFgH$-Hwi%g5xB*w^4F(kK>pZwj)G8>y#*NjDzoLq2Gl)7=l2p#<=8Qgx(W$4sNer6Sv}lMW zK*)|+mStr+mJUm@7)uP{Dg8KjaTl})`@Yp>K^Taj`_ ziiHe|Dz&IXL_a-P@H@80g3`pW6_ti=nxPsEtw9~6JnSz=n-0xiL;w6U5IodVF*Y+M z7e=EMr(NiTXtP4*JTY984J6SqG)P#$fkK3xMIo)JDl0c2>&TWWu1e|w zwS%U_g2zjhhYFTRv3@ii05|`Kd`17P{WsMc@5K;J*Iw)8dncZ^ZwXB=FCMG>tRpc2 zqT*ZKvi^!{wbfU&`f99WwJF%w-TN&{4121ud7Z^BG3t`M$Q}2pW|kOq#Vx;TRkNz9 z`l_Vrs9 zBvjDgK%oJH2^1nC8W<`(Sa^s)0t65+M8MGSFepp_F+oBDLjq=3P}xHDx=9Qx*Og1k zx;~))FHYpvt;Yi_%u-XTmXuSel-Qdyd4x{LN28KIbVI(vNMz5LMb(_gAJ>r09EsN% z&&fD3I%gJgMsz0@xARVm(Rigv6OSZIctx8|j6=Xi@x>l1F$;E(SOB>(Vv9{IcOsU% z-%(7l@J|doU-3qQ^3cAPfcs!hk>^5DJ9CfIuJ$ zf*_E?L7a#40BU;NP~GA}2eF>V<#{)v&fbXSad5S>kUS1<%5(tdkZ%Ghg^XF9h_IooU5emsMlZfJda~q^oK+m^3FtzW z9Fe7vQY@dIdFFHLo9LpHUU1T386E@n10X-<1JM0xOx!}zpZ*y5w<$sO{_dH3kBVVNoT;8^`&gJd1Y~Av5FU73}<0T zoQQGS)F0od&^%g$&k$sL%(7k{GaFWVqSWxYV>ZQEXyne0`StzIg04f_dKv&Ti7CR9 zoy|h#6Yo##J3SK2k$hq(efB0$PgqoMnpv*Mt($asv}iW)Z46m#NJgX?xwLEi#^ z+aZWj6AhGix2sK`TUGiSMkrpb`$L|tQ^)?{mF}W{7TG~;Lk|^7T$Okv?9f5VU;!w^GxuO> zlouK1VD(d7l^CEShA`lx70mWMGqZP@QfZ-ZAEhJ1n=D4AtGX-fEOmAH&ucv?fT!~@ zOI^Xf9!#B<@axOY6obtu`b=x1LpS64$i`;Ln`4Xz_m%x@q1?jZ$(DY40?R44iH{mw z=GN66V*cXIS$)q95XC*;k6}rc^29=ACtD4AoTvGCLeHW7>rBOQ>|#Nf0w3Aipe8DQ zB_YFgL*0VlmLR+1G|s_cH+$)O61f`*ESPkUYUTEefGrol0Xyla$fe2nS=1deHfNb_ zp}dZ*Q8905%RXXHcIap1?toj)`bU=CouznZBrzIXEjShZftA1EKmcPok^>-;^;DdS zFxz~U7HJ32B>vfb9p4aKWK#`UyGP7a-7CM?@|YlpyJ3#Wt}ha!+^H(qpB_sYqmeJwz&4!J;@^H)P?XG1UlH>am*M&3$a9) zf9w`mN<|yl&tpXn-E+@ zTzuSSU{aWhSov(LbgAhGf{L>~yotOlB8@jxm6iO5=8u0~FyX*lf78X9Z4|oQ;rBvmOJ}5A;04qs3q7{cd>KP%>4{U#WH=5f4bMND9&k zge1enGY9WXBwh4H2Ya`?gAa1PE2l!>BlGR?H(eBe$2G$(A8G>5dT1s#JM9R+@8X*` zD&$-rm?rb$2K~pEBC>(;%n^#_QaU?PDla|aoezCj71ue?Fu(}W!qI#y95+$~WA4T= z#S&TMtvf+xpu|Byp+{nvVTGPy@Y^eBAdNVtEH60eg*UC~Bf4|T+xGk@+*2{+P8p`Z zT7-<_FvmZh1~<41v}?{gCY+lr=!%aihsIEGVY_s(Fr^Hd5UO@7J5$XymWVN8M=^qd z*#K!WX^*myI)|^_kseMEUvY_^siF!p%yC;kN{V!uP^bp7 z+NZuC9;LO~b&l<#4+#4pIH{v9y0=up!cVYfkpo*c?=HwXw4$(_x*sDPV9W^;#0fuC z4pXdgY<0j`SqmK<(0qGR%J2Y#4o-pi>}umf#baDYg$-=A$5=*1)aghLj+*HOPIU*2 zJA@3m+aqYaB@)=Ems83L_sb;yz#Mlq2`WJf+ASrSgVm?MdhuOro{w6|I9b4dTMCdnBS{b>RM)iq(yCvIf*&yJ`TM^n{a>%wjlY{AXyXKRdt8khPZ(vf6jJ7+_T{Jk?@M3_An?L`f$3Z;U z`pGlV(zfu_wa9h^WKz-i$73?Y%RaAhiAVd9$^}o47p@mx1N@@Ut+zhln5wCaMtG{( zV1X}7hGi-8)`xku%Q&a=7b>eMWt96Jxx>a@A% zBJItrutjK#0=CI!l7tHDWio|-P_?CjYtsFLjvLT=D%b-;7^Yj2RD~AOdXAS5G2ynZ znl485ov`3&go6u656BH60=romBK>>f5#eq5EE0<2Ork9tXKvd7aDzO82(?M@hI)5O zhX76-@oVcuaXHysiEy|xRMaG~sq8E-+<KvFeqaDT=c%`|>qk+GlFa8S$p%T*q(PFfw=rrYf$4Ec zPCLLx+H?dWn?b~hhdBK9wPuPn9EO;_-V&46%4zF^>_W_YRHN;dq*$CA$CKR_tS1z z$fMqSp-ZOSFj7DRVw++@3qoX_B8bV2i3&x_0w+rhB@m&$0KRtil>;mtudz`Tyj?4J z5jZU#qu!r6r7YW%seQb&%C7dS&_Tybf&3U@@C_r;qaKH|DNq7+W078j8nnJ-XRa8b z==aNA9KRNSx4-G#0+B3?`~zFnlFBF#ZX}kG1JUQu*$!BEQr5ppGffoY>34qps*BB_ z$q2_8Wh90*;=ZBGbYwVs+dSwd%m0XPQ1!=KqUnuXn3@w6C6RZMTCpTB!cjjj7-0r9}Hq>3`n7!MC-%Sf_HADnObopR>Dsqqr zI2>T(lk{Hj$Ra}wfusF=gHEWMYf6;dHyktbLnujSIx8kOH zYUR7@ToX$KMF^ITg!D?@W$|9)25pWpB)XHT6}OtH<-|jdky{$au;vOHRN)x;$ZA{@ zxFR!{!jaulDWcD)u>^?)1#nfcBnAtKL~T6mIKggA29dmECj)j4r&1|I3v23;3+2FM zM<1OK))Zkj|B%bp;-3wXZ=sBTMYvrY7at5f5#N0h6m&>fJ1_7AtQNR8EkYnPH?-}+ zv#O6?C`LI6m`%#sQs!@Z2Ju5qE)k-^)fv(v6KS*JE9u>#t_vwU_2rTkQ2rjeTelI} z6!D3~g$dkEdbf+%w{m0kAncmIPW0eE-dkulgRo_2uOxdK^!f~MipAB4gjj8 z*G7X&fa!{qdM_6bwLBUa%)Nt5!Eqa3rjTKvL$(R&qWvNc>7fR67z)~Y#DQCdZ~w89 zFNvG_$r5j12s4OZpX2@+^>Kot-Jq7=3XjkZO6$VSS3PE&{k`g|fFq4VBwW=5kIrd! zWBUMjK=$mY1j%%zV6Tlh!2IBV$RHRLrlOS?I8TX_0WEGX)RYicaF^LQh41EO8`ni4 z&e`S$Rqbxb3;Y^CD&5QHN(Ea~ur((kvC)_x@U#D=gbWVgxh}L7sO}JK z@pti3UvY1hsQn>$;MP7Ch|=2-fiQtXZiYAY;SH4_V&8M%~2fLHZMG&=ep0 z!jk=U40NsxLP&-T%|kZe2#%V|jgY&Vj5n=3WRXX`@I~R(0CM5V={Q;`PC+i1=AiAi zeEGV(pY2v0M%lw2bhrG;;x!Kmf>3o(;k!!lpOJYmf3T7jw=Een@(amLEG0ERyiY`j z_hCa8;p{fNTSSI;4ah3lTMoR*5H_sVmIcT|+22-B91gmS<^XQqe(;ezz=w@|Viyi0 zd`(OTDLnQk(Y0jrgcZd@+e3RmdXT=3t3cZUr=)CaXfo+t%`L_nAmQ?mQ#wX9E>M=k zL*UT_JjPpc+I|EtDrR^2COEr*Sq8j{c`tX-W8c>|dORHf(^aJl`#MHo&fKEZt z2Z=4D=tkcc-U_ z0?2arHv!zjX?8xB8OS`)+r;lD01qbFw~GX5cm$_Hbs#kk$7 z1kGZ5f}m(|r~&lTPk$JoeP;SEccKbj$pC>T4Z43(eH}vB^h%LKhf5L#yv$bTebOFa z>fC3NqP=1E1_xT=Td&P^N;QEqGPzi&!BE2kE7EiR`7)CL!hh=i@QV*o+&B`avQ`El zsrOQ0sE6fb5Tmzp-uCn8e~O@}NIdyG%r2LQyx>Ni9{af-A(ygOL|H~o5=k(Nn`lk2 z@B5h9bk4H6-#{(4`MRViBB+fBZ74VvLbXXzgT_8Ap)w_}f*_oq=T?MFzlE#s;4n;PDwl=%*)0WBaVLD0P0 zT3xR__PF9VHnky1{>c!ngmRZVjF`eh7Vn5_kl^ph2U9p{|O* zxVCvCpu4kG(UrxVF90z-D}q(&0HTwuQMg?vmn{&N2U5k*$is$KlE9D0)n0ENkli-< zLlu4RGf-9jFxonLSOxUsc^Zcc<#OsOUYUCGuU<+6& z+PrdKq=~o>%`$B#^%Lg%noe)XHRb{n^rWYLe?3GIRg?|hoPgaAD*`6UGa?Ow8hY5q zE1O1_N-&bHzud_?S?%ncjw^y)pMm11B=0V3Y}X>lZQSFu!Q*M3ei_iRrznq9kEeWw zmQYnH6lEvc0N>xk;C_W_4#_?J^{a=k0H0kPJYrb*tkM-y1K07<4AWyoUOZ-IJR zF-;D4m%fUtUCGpAqiAC;f@Wnz8ao-YQlRviOQ?b?LfV1gO})6r&GIEO1)C^fl+rOR zH^e%Cg?r@xplOm=Hat6Hf!lx>we}M;F`uxO#xzI ziYzc{0Et+UnqnhrhgL*902oJOG7EJf9=H4>8DeAG4jVT`+CEMtRq+$125>uQw|QpN zKBXN zV9)fnCt@e!8f0x-7Nl%hwFx@c@}!u8VTKb_%`xP-b#Y19xC?>NzHI;!jxDSUm14!A zx3J>;vFe78xh_GGwqA!x+=af1PgqS|E{?l?ax`YKcFM)tVoyWv8zP599nU7G3vOBV zb1~PvqseN)^$@ph$xa^9V}n&8ZN@_f;RDDtZnPxJu{h;%{Y;!etw2APbnvO6 zlPdxyun{atm|tAL+b$b59WK)tx_~&RGPU?BN-c!hrlzNT;Zha0m?osZXWHy`BSl@I z^A^I0IFf^UcN~jpP!VFT(|~9MX-hK097O6{~ijp22IUTkMJJ7_xn90trr)< zY2w^T6tU?OEY`)}oXb#?uLfdQ$t7uEA?&PO<>NC*%H2N3s%+e?p&g4cmnJpiOf70K zTT={3)7Mj1xafzrACT5;ThpcB;uz`yE2Lt;30Lf4vLkJr>dpsC(I8J4|MReO)@b0n z1Mw#B(9e&?%Gx-%yYB9LCMw!*N@J!4^CXDmLRQpddw6qE4+m&iX4toNU zP1IZZ?W3kyA*v!SAT_1e7CBrPx5n4-$RD8J=d{IuDmH-b*-$*y^Sj(1Y^&8fMqv4YwMIQBpeCvvD0#7H+PTSfX0eY`hSew zdCF%u_&f}g(M9a+L~-h_Ol->m>S*kxGVGL&6h{~Qe4aGq2FofDgi>mPqn?VFVGNNN zDN|zuJ8FNS*F`yLKky)T=sJ71QUr0Db+#G2S+M+3ggL?9SvqJRp;zxLHd%)QRaNY~ zQv^r~`6xZsPkdMx7NVrGL{Y0_)UqU6 zW`kh`xrM@+@ktc3!Um>Y5+jZ5V=3Xxssx&*KGL~ccGq}_(lOY&te_%t>KgaM#-<6+ zC;zsqT%!KgCl|fcid!sa32M|<)@tzzx{~5u@hP6g+?MdGD#iPxwK5AB->f!>ef=n+ zA$!S^hsevF$VMU$es`HRE{7iJkdX?u()@_xf-9hp$dmGlF!?pu5obMKzmx1Tc#kMb z(6AI#>*K3sa|QiWO_W$RgA@v9D}b+~ni8bGPc0M-G}=%XJ!9m+yO$AGTFKRb?Jk|m zjr%yjl)%r3d)H*$@C&3eqPGZkTu2Lu+qs3GL!U(uTM%jhHKGxk#;<=PM>-J-DG9_Z z8SCI+fVbZ(Cvh^2!^aYQHxHIZWB=EDEc+WMvn6{`Tcy#c>J(gsctY702a5$NjN}&S~DmGf|k*C6F z%Qkh-ktqYEs$>?Eb})j+L+7mMv<|`mX=T%YK7dgKSE+X>Ap3PWCICT@Rqc%FJ{SA2-tk(&P98 zrB@UsAPnw-`(@6e?dQsIocD2NnPt+;2@LqAXVM&d;mcc zIqIqI{*U6amosGec9)|d<)PWT*+rqGvfEC@*b{rzdm+Q1ag@DMNM693+}bF>BQ6aH z26}(N5Y~i3&Pinkq787J?DCFQq%r7)`^0Ey3lK%kG3xp%)9^Oh504}PNuB9OPEGX&$Rcw$MS1nBQP?5gHIoGj?A%3;in71%lHq^h%VG=3%V9xI z5g{fZ4n_=8-j5wOZ&GAeWIA92wm>n_1}ES4%Z7Est<{Fk7SO^7C7Vl(Hv2U@m{16w zGiSsvj{gU7)g~xUe`9G*-S(ekDbT6fj{Sb86_R(z{#(_;1gO4Pyr+7OSqL>8vapC>h=cFUNLgn z48Gr$P|R2Ap=u`|YAS{e*RAPLwUgIamLI5sqxd%nj)cF`43Zj$x#hmtId{mC zJ%(4(DzpKg@ViD^(kdwWu+pc>kvlA~&T33(%^vonV>@o##LDUuS$Ivi)z@#U)uthU zlW0}fttVrJ7!z4cZ!$TtF)vD=;EFq{BC@W)mDL4NNgJ?>6d8mxgfK*(EtC>>DW6#G z(NYG`hJzi%4-^2!E`^qULC)9xR(EKU0{}9#r1n% zHh&k6%A84?X+8jvsR+t^0%jKBJVYjCpo;KuL_)8C)d(ba2H@qW*bspc%L(gsf55j3ILx^kbh&$R3S)E2%R=w4f6I`nGnK($|#cM1$<25J(XC<5@Iy^l}lZ! zs8?b_X$}TBVWFj?EQQ#aI(BxQgE=542p37bRfLR=^Iq@&MiESk$w@momIMOC*b8~e zVYNNNYvU`242t}~INX6O*R)84-7v$Q8&Ta*s2F_PRMT=hhzOH8!fX@HKvK#Cii71Q zeY@J#Py;DT#}ltPRR47EHrUVuD$s~llzBcB>XwISYbDEm;`?lea3ll9$G;p@b!pfa zfX-IO)>g^*h|$-FLgz;aWGFSB*HC&(S zK)gCV|Dp)m(44dfULg6}DjA{pO?9MZC|WVUd&M`=L;<)jjm6fM6#hJHs5G?%;NPZoSlw0b1TXAuG^w2-pG45FLt;RRRN1oQ#c0cQf2(pUM|^B~j)ZQ`}gNrw%sWw4L!`8)os zjyY|7O&a55W~n@hyONsS<;ZtNQJaJN#w71V%h?X1=ktzGol6E6S4yk*xs4`1$NcqN zT>f^xT<<=UPuoqm^YprUO6{rBTN}OAO!D=j_ouN>Z1^WpBS*#z&#HUG)5}mwxzyFA zM!jB*)KRN$HR5hNdb>wxks`WnV?~+l>``haAx7)Cc}%)~_o$`BSA^DDr=A%hpS7L; zTE+W4%VyQt{kra}o4=W{T5opa-m*J0VVH?bSr8e5#Z=<66$W2yL$P zRD3cm)$G+l2+bXn)YIIfxAH0Di)QG|Y9DbAc^|2vsaae1rgSbDbAF=pra@+H)V>`j zO>Iu$VyBdBVW!l}2My)&gKD@tkuUX{SE0N-n%q$-)trma_BENSL)B-ITGP#{=RNIT zo=T@RovXy06y4G^xqek9g`Cp*?weCKe#-o}GL;DL7D_))?EBllv$e~^YiN6Fc5Ua{ zeuo+)C~yFTDc}OafvJEX6pa+|03jh!AwdNOdZ6I|!JyD!ARHkFnrI|r!J=Y-+`b$A3k^cb-tm#nK=xI<89d2oxHodF4Eb{f9;%~$(uIzn`34_Hd;1j`5+Q6x^bF>VAk{Hv7Ib>KEgr z)&3o|WLYg=w7b1Dez>=ITWph6e$NEI+s?IM(pp+eYat+EB%d66mQp_H^ThJm zncKOUtC(u)dZ^sVB{F)uC#GX3cUg|Jl+FIf$3`zDIdSo{Uw5(aAr|Co{LNnU-0fn^ z%gu+LsqKj(u0G!7u_D^#EqY?|1sfCb5-(9k*Mx7CTHm(fM@wvpKl3@CO){fq{*hy?`?IFxv zXtlkbf1mG&I(TO3Al@>3oVJp^x|-~jy80;f zMo81u-B3c9Ovr^KInYD{v8b4kAV&p+;(-WnwHt zLkpq#BdbzJ_cO^051m4V1{$jLaW{s}h-o^xMwyptVFk zr_9_!^i&ay#BdlJJS2z-h6VLNL7^rj9DwlZJXBB>6HEmTOaudiMMQ-}ig=))j>Tbs zkW}L#F&-X_1&fFjVJuFBX+UVCHw+B~!XS;t1PqD_!vFzcDkK`JVGu)|Mg;@XP!bpd zKrS946a|B@1e^*9C>p830AVO3I8cBvFeISGV3<%99WXG^M8hB!6%#6`z@SV+0tiKf z#)E@HgQE)P{j)ynCz5=w)SA#5pKr6GG)FCeqZT2jm3WcMNu2={>XEGv0fW*Wds5f8U77_=vf9 zNvh>vemefD<{LZrfwZn4$o;1>Bx8ZWqGEzIvTvUzsb;}pfM>(Yv(vNOyWaz0Jhuso z`%5sf|73w*lWo&~`^5YFf4zVE1JTEX)zpID`#pc(-$nOj_wD6&B0fv)^!7`1e{9?( z{+H_O%HN#*yh-M_`}wl}_1nKS-n;KE?%^J8t7~I&xXLN9>E7s_y@+b8=!pO7?(WH} zh>80qE8Bg8B{yoKj@bB!zp1H)_xInMcnyZg$?4^Rc;0{e<>xVaqpu&nCU&>Z!9xY6 zh{=Y3aa?5oy=S6}%)k0?C;$KsW`+%RMTZOF7Q!X9_C1NukE+ukH2y{rAy(ZdPo48i z_2!>fm+pV?Ey?xsTfXxo)fd8bZ`-*h-`I_R$z7khPfwiL(H6h^^;7LzzmxBD?@g@p zFVRQ#eoofAPrW;F!{vk--8Q}3yxAW)+Q0k$(VYE2G=t>u;xNO=p?)tw-lN{RE!!o? zfg}fVTpY)79LI4S$HAarplGCvBXKwaj-9acDca=G0LTFe5CDTzq02-b>fT3XW4bIE z?NmfWoMDX+VBkD!sofyze0Kht@JcDj@!_dfWfJdyWb8u+wR_4nQmjW!I5QQmsV87j z5n49$l1zJHkW7$LLEBX`@=q`|jK^G_+e~8mQw>*ujm0{OCj!P0Y`QvDdLx9#d={kJ z4L`!+(KQj=u}WDT&4puhK1(s*Xl1i+UdT629O%d6!%PA8Mc<)BicB?a>?BM214LY> z{X$YlZ_;x=_;`BbGh{FE_c=18-Z+vKJ&dqHD_Et?$U1=G!&v#4gQUcT-r@An6W@NJ z`0q2R)+x+KD?*wOG_VOb^a|k#F~n{%K{bl)KSo-EQ$GSDF@k)}44b-|P`D~cQwR(fHV@4b8<#0}3Q&_n=l=67CN z^w1jVM12*nd(!;9&&jAQJ|o~^Tnmboi$vSHWz?lXvg5Vi?-aO+5szR{>`&o6 zYw&Cw?4p?mrq{v>OdXIi+*3zsRjON}cJ?BK<1&p3X?WO;9vhRCBSs2RQSlCBr1!;$ zjoKGU`vqBla0=x`;s8z6N%;j$>LB<-6CLWV_Dn{IqMcwPi(LEL0|dN5+F!wpVVUHc zEWj;3Zh|@P=yV!tXS%m|ue!BvpwC^0EU2C2RyWkOc)l8^ZaF-kianX+`wj?01~ zK}oU;yI?wTMmd~;W!gFJM2VP8Qj|gwLVr4;Arkkg>t}+1mE5%fsB4&D_nBLul@4>q z9!cHGw?Lc5dmZruR>h-Szjaj5<3m>(Tl!z7u0>|E7T3s>YNBmX1w*%8?-3*JDF;jX zFNPY(OmxbE^BZ_cPy;APUCECk*`UQafc)N~v{Sd&hZ7&-vVkdAee zhB?Xhro!xBA7qMCRuU1I5Khe6H)RTVjMJ&i^q;HsDJVmT3h2|gEoY>*azqSp-2XUn zAeF5?Tp*Mj>7VYLlm6JSN`VD%DtI{@cjj(1q?MxuA4AUzS+hdmk}Co$|N9z%tVlp3 znL5pUhHhGiFoTT3jvseRTDtSNMUCw)9@S$gJ|km>gjq@C8|68}&D)>da~=uDSCn+l zLbrYgiiu(aa}gpb{@!FnVzNwIlod2pV^wIqaIdO3*D0yz(T{{CDSW&4rBAbtPJ28- z7)q8NRK;^^0m*EBt?h>UYa9m6MDw5$CW0nVpYWN3Fk^X%WEE5(x5FgSY9B@c@w#Ko z#%k#BV;sjTaXsEFt{Q18ygM57&PYi*VGhMDwa^wVzN0&ES?_I0b>g6%_5 z2e})9ZsImoae1C-a(Ifvf)9!XcJv0N45BwDj-9SKH#xdlEyCVqHeMEQ9Wv<%Y$lu; zY}W@`)h&gFe5!UW2&$)@xBH>18A6cju6R9SXD~W->O>Pyhbmx^hN(NC#>2FFk>R}O zGJ--2o^vXjVH4g4CKN+sc#p+eF14XKjO%~%V?_3K+}e6Gi*c`PVhww6pM+;6wKbmg z4U-D4GG{uUigT2-OXeeND*4xU*thl=#B{l*ROV(iuo?N$@)#iJ2L#U3y_|S(A#WxO;vX!yj{1i5vL61d z?FPLvL&_uPM8Y|wJWSM(3gMzSOzDJ;Ah{Yu=+J14)K6SwLZ#aufeUeq$mU=dVL#2O zQp1A*OtkI2{*={;ad&b^(5+qj9omu05pQF z!J!4VAf%xa{m#X+oUM(TbCihd;AhlCW_WQ**1P_I#+m2i^--b`ImA~VXmn;k)H}m| zdJZ@P;T&28b^{5I&SgcU)nEqbO{Z6sQ|Kp?>1;a5$I>sHjOcYhl5`E!ROvSt4*L;wJIm*t^E|rq$+^W$Ww`ZX z%@lV)TjTQ8V6ev5+dPZ|)VRl^SiAybK}HROZKiRH+$Qz7J(!+W#m#o}_ng0Me@ zMW;Q$BDt*4p3$|XMo3H6ngTzqoO;cyI{d1NF?TB?`g9IB@$_kWy!Fxh4|hZYGe+F+mb}SRU*F`9NNkS$zbblaQaF;;He z@w}i=UU7^9S2rigQrWS}D%0fQVf#$8OOspT;f17a#ftQ;WsJ|X%%N-I8g#hVSy`#l zb=!#YfM(x2%u^aDVJyw%Zt`Rq2cLmBm^-dHCbll(KAQ5Poc83%gEp%&(edi_9uU%$ zPJN8Xhk#U#LeE~SLo)J?;9?=Hp(ZDyM1>TkLX5AJQOOqZljEU@igy?ZEfWFi<6vTa zh7maB_?cy?9-`1gt}F2=L7C_6@u;2oKb>Y|>ZTNJGP$AY4JYHeFQBFN^SLu4BC-f* zkbOA+3wnoAR%&vg&fBE!Pf<|9n~|t<_duciM6^<^;pB$GoX)GaX?gU>glRs1Piv39 znAoIn+GK)``JLt1zsL~hB+6cEIFyHh>`XLXpb$q> zvIYDb7a%JrSfzt%ljFqWif8@FB|Oo*P4M9VZ5JbuJVVt-2Up&V@R9y{o9hCh=qXC` z%vW_rgz*WjP+B1X5MKJzCjYccwaI+GhLt?Gz@opXs*sfXvTC)~35AH+S;JK*gjNV+ z1NX&)Ph92-5xFW!v!aa6P5xDGBK@Q`6wfB#yCMH)R+vISG;TwaxNdPpIU?F$XcXjz z*%n+Kzlqw!bC38@^?M|tBlUavp?AAzwr!&fW;v^S$&CkD+FzmX1ohA zp|ONK?iG4NKOWa2TMD-D29_rt*rstH0JRi>M%qxTkRz!0NRZDo5cYH|m#n;T_p`sb zM{yv>N63!m2FN6}q5L1Ez{J8l$;v8+YrrJ<7TH~em?7n{_)ZlXqN2IP@~sA-MvBdG z6@rsJ(AW}AmZvHio{RE+lgC83(NAADaMXfGq4$pTf~Fgr5^`D(51SXX;n7pfzHws1 z+RgK^z|mrQgoP>ip2zAPsY;H`5^jZ1$VMSN;~<<_74AdR!$hjFEy|ezQK+7iw^MK} zgx?9{HFctVPS%rXkTb2kw+GN!e4x_sAw}>K44n)k=yDz{C!bW503!kToII-)a2+qI zA_c#0F1$`qPUQ(CHD9R$mRJ~)mRWlG;K_d_Vt^_+yk{X)P#h>u!8*}Q1u;y~pek+v zrAD4cGgT9nEDiJ{2&gb+q>u!Sl~~;=CDa*7lA`=EU7(34CrQE=Ji8DlXIm&Uso+5J zT|Am`-rLiHIAOiuNIWi-t~hX_;)^$n!$hR8O}#Jhbd=^B`2h6-e)y>5-t=STxJI45 zwLpcGf9Hkn9t;6Fo+XobkI9MThQ48xGRYhUI(ur)2-zZdrwMfCQ7x1Q5OB6zj5i6_ z(tt%bW2`6l_JESSzhrif!BNiL(R|15_NwJLB z^PD8Ze0q%YD!WC2rlOQs!Rc^`2uBbkxCJAuN7%$Ff?|An`T{_=F`b}r8g=?c36?XG zbd1J_QS-zP{y-Cuv&f15&|(_#srE%|2cD=|`5TgBPikP%QCiy9} zhH#IYQHNU~S=OGxte}L#FiH^A{I0;377e}f)F5boeDN}p)k5eF;sPNjO&mHyM;`ws zz>OOU7r)c7nWFa7dtSv=ZqcBYyr)Q9e09b0IDKU*jQ#;5Ovwp?&wk_2e}v z!jl4u0zjM@^NFH=0B1*hc#FvZgl7QCJcx!Efu3E>cOWE6BoCwG{oaf_1E?JD-(kyj zAM%SlaQ^;PBe7!(fGRkPbF})gPP~W_+|)%+j!VSU6cbLHT6h2`O~Dm~dWxoo8er&C zD-8#lY|Kx^)U+7i6vf8HoX^f?4mp1*hnbh(1ZbIpO_j4sM+T`9GR*@mjZpB-l2Z9Z zK8IW~PNGW{nbqRaHsy+y$Ip9XYyC^H_iJD<8RlcNL{TPey9TJM_ z&weIp{naEr{#P+BD3?B#uPBvW?SV=BWg1k0l+ZL5HM}iYAOrF zliJSB?R=tXMJ!*Tf@&3w>H!}Dxvprn=%J@X;2se#qpUN?J)AW8Y&5G{ zsS>SHlB+ia2W85Z^AwTs!&40I;uARI$h}pN{&ts>k3HI6=iR)zNp)6WX$?^zk7zHC zKr)CY5YdC)hllN7%C@7M6OeeenVsZIE1a2dpH!SLuVszD2 zx)0-PtiF!>?F|-7tI^y|luVj&=+LnYE$uVsx6_yYdrh*^2{rhV_B9tUMla*t0hyef z4WwB3ekR(s>AY$w=5<}YW;6P1lr#os=Ln9#8qb|*5T?B7c}qBn1Z=&3F>y2mY-D-{z&hgm1 zXJc4JR!two4S_JU@$mUZDWT_SKf}HqIM619xG?(Ljb))2kicdWd8*Q;T?7jhB2rx; zuS`rY>ahtIqFzx76zJH16%-lA_^8m32N`#sow8TkD@o@}ID%8QW{_-Z61JQSU$K0c zEKySG;JPJTi}pk@;i19a@a&!_kH(@X;Ek?G378N<%o<;C}fnaG~FTuNv+3;>C2}1NAe7PNAnNYI-e3D{2eWbI3uaceDnONzDP9l%+Mlrd>qvN_eBU z6lfhUCDA+A;FdF=JlgYla5`Tl(eaEA`1CD;FG@?uT zS$8Flp)wfjD4&QZ2^%x}b}pF?A&8P&EWkN12LX4->MH|1A(0V4LL}2=Qn!u42_aB_ z*;^Tchp!V^q>A3lD8cVLJ@8x;DEJkH>K-AR5QP|q%dfPga8uDK#2U!h+Xg-At_k6? z^AwugJb6|-4NnLmxaO&l>B4qb5h1Bb)D)3d5;YVt-+)HZmknJ@?V(`Oq$EP;t!k$R zE5z1=7{)d`6w&>ZiYwMZ5mO$6#ZB`LWk_JIMM(i;n1i$3+vu9Y)BVW} zNidggEr7gY#*26@ zs)FvlC;`b53CB^n$z!pPtd$^6$apG5w+X0Bqk{q#3sw0Dset;LHi zZAQ-HxWMimL{VNT%50-(`OH6S{U8iPK=L{ik)K)A96~3Mn>kWE)@})Slv5fdXdOpu z?SFh3G20vigdfUBc6S2Do!V{Nh)skI(Lt9gvR(KcJy`=$D0$)$6Ib3nQRjyG!kw-Z zgQ4KA6WduL+ctTzHae|%g0S56E1Ffnbi=6jorSW5A-9C>IDYzxhl9yB&|&3+<3yZb zA$?gC7#bfhWn2YV3y<$e;}jh}F?5Od;J72Z2S;@8*p1SjJ6ONqC=#F;YSM6$F2o44 z*p+NXOlU&iZZo5ce3`A~?qr0e$0%b;Bd(Qe*yTlpBF3_WBgHWH5uY0n%+8>YXaosN z%sL}VsWPqV5Z>#MQ{3q5Pm2N3nK6X9)gB>88IonpS_90UIf=Mh2uW}SOPzrDnO z{px9YS{^M}l2aiS6Im6d%?_`8vXDhtY1cyRX_L17uS?}k?#3ZSrSPh}QVb2?$9{Pe z&xWI}<{0x1Y(>hB_ckCVbWpt(Cqp{Kk?RCnEel?4n;~1->kdHtM2myfMrXifyPU_4 zcYv<(S2i26_wkLu$VN9oTXQQ}7^ZY`UUrhp9E}n1q>U-&C9z-+M9aG+FDe!@`x?e{ zl1TpVq^_-nqY`vyx(iz|yvuwA>`m17$@aLmiBdSf_xz`clD{a|s~rUMd-dUnH-~L?{r0GOBq( zu0?h9WGz#s5hlws9+sO|JOXjh z8CQf>QHD6oHO5Y?;Ip}7y-_jkJ!If~1?DP6w*djNYTh_mVq#_<%uP$N*IM4IEZox0 zIYzmq_(p)frHN6o^%h;wzeMYdt4MpGzuJ!-DQ1a6vIvmLV22r-sjSY(>TYFElujzt=i9^&S*;ji10#%cVB0`I1e8o(KnB%I98ZDem~b~ZIG04o?WrP2M<G)Y~sq6 z#`KJ2pAIU>a5-bBn^2^TvK*xOX=wQgX<+ZdLuP%gsv?T$$VQ@$!U5MakrTZ5bZ`+N z$+pyn;^)DDrPu~gLIAG!W>S9wz`TdTL#t`qUky}1halz)6i#(lh^khi*R_y zo0Jj-pE`%`YseCJd=<#2m5H*)u3mW|l(uyUar%(L#uZg;%kyKKYBxUBH^WNA$$J<%D6R^2y;{6< zgZWXi7Z{@>bf6JL@`qqCrieN>TD!qh1T=GdWtsI6LgoM-`$fYsLU%a^1V@$W1h)w> zU31&K#ds}^G))G9S11#l?ugdVafUz%#fC?6WQO*CK+bga=Z`X^A?2XA;Kew+CCw? za3*YKgnvU*%%4*|T(4SL2a?P@)K-}#3vu4T&A7FZdCXMXtSQ*;vTKs**~F>vn&|nf zV@U}^ZbrYd)!17IH&}}SQqfUlREmPV=1fGC2W5%6ww-=Qe$Xdw(crszdoY7Zaw2Qd z)=XEK)kFyIOrH=Eg3buxeMAYh4pw>b7upq}#@| z-*VG7hKQsW(2z-|ql(pj}q77-Ia0m|2P(tgr4O&Pr6SMgvjh zDp%d2HgbFldQOD|((TSAcW07cKUS4 zzfqeg^Y>sjxr z5;W^-TamK{SFK%vFo^OxnH^Fykf~FSKc*-~Es0i|nNGStA=oL`qExv^YZ%g0&c?o; zjI2y!zGY=t9NnSWMT7FnMu`gCgecFV;81QLvp%*SHyhwP>gj5D@vb+xiXr69VU`t0 zwT9ukvh?tW@__EP2rEh;Wv4vAbd9!y@x`)%79lOh%eYpATnFqQwk7roL2JvVSqx-y zN)|JqF4@^Kb7L4Lb(X~@+*-j<_A0yqG7|B%i&0vrfIk|K%`t9eNRhK5+024$D6h9k z9~u!6rNI~t&!YLyb*ZDVEDczqoZn%vh0Cy<6)XF8Q5-093rcr{MwWDKhKLDQQMS43 zy(p;d+yZ$J&Hl4$0x?4)Zsz zT{$XCT$$Z^Pk?trNL})u5K8bvghxgicV%2xC_rtEDJ5!gWu2R9k8{Mf`{N)ePes8v z>Yxz^9Atkc6%2XhYFobxz@@gwK};S$q+ShkC>?9E16B@vF?StI8HENM^?}Aa>?lr> zSWljEHhyXIoZA%*qLJe5(05ini2RZAGQkdzL_G+3+Z}}4M%o8)e_f`t#Ly_@u%bP# zUJ}aSN2XfP^}%!HP7AVV$1VB+FuJxDhQq?2U9XmVmH4p`h z;x<5(dzo`HYhaaEKptycHI|QOy`dCMouVC5ig``_Y3PVTISgOHfm3pa3Y&PBKX_PK9foV2?M-S%+ySBu%_? zK>gYv<=|5Vx>A6*M3J1H1u2KuNpa`om7f5UHD>mG+gh0JK*^9J(k;-!=_KAL6?9xH z1DSR;8kfl7lHlpWtP&^N5EJtzZo-+q3qejM)skiM)n?LWvoH+eI;#bG7g4TFP0!(C z9}6ro@F;sH)Vzl{PN>C*vR@v#yiD{GSo@s`na2XhB&M>gKx&`OI}Ps#QXYlEJcrwa z?hs|VCLuNcH_FyxJ5rXE(ApS8&GAUN+YUCf^tgtr8ld4=DDpi)d@5pUqWpy?0S)2r#$g^-`c)mfhZ4*Cd#Eyj^=VT$T_cJ&n2o& zBAqT+-1N6mx<7xi1ul%B&>$q`Swc4OJIOW#>g1K4SUVKBNta4!U6quJwvj)Xo4N$y zi_=MPMMZ@(6)fe3NT38n#WKmR_)Eo?HOfsZ+XVtbiF@F9Xtqz}R~Nu16V(KY(u@|O z2wVsWeV|9!7iogAWei2r$DzL5)-ky=f7ra>g-$|`B~A{o<~}$-=WfkI1#x*#BAMx* zuXO+&LdFS+^t;K6n@c6BBHHF+Elyr(V+oLa47O<(&*3c72SPoe?|~4nkdOyhL3olP z2WjK7GO>NU2abey_2aKC3<|YJteNLoBcH|_2||LtN_Lr0njp7XLZ0X>Tb91y8fjaE z?jZm&7VvI0w2F~4Dw3s;%d|GrxL7DCAX_9vK{)ldL9QDC+DRxGfDqmt zf&RHzR){*|JxS)YZ9_)l`3kiUMSEvYcGe-RE{f`* zn?t)$fa}$VDDM?R_7|i4^3e?-Y>as0-Z*I}`DFv{3r<|`w+c}agxnc+u*?fswH`zU zd>=emgUA~QozF3)=Yd_zN27xlU!Ep^7Wf$SpeP##izdHlpmkSxy!DPcgr~MkDC~J= z(yExhUd11T9R&t$$^{7)bnOK~ne`g@EYI%wE|*^dXoWE2PufM%K@W(UYhE!(g%J)o z^yR6_n`7@(%texwoovX>d5|<)FO>Jl`w2~*C_=!SAn*~in1oR?ucQO)y7#GP8HeCC z%jEoFcP{7LOX&6dq4cc7010^?Blh@NpyGqbIn)AnC2*8-5Jt$t?q)*Qhw=>s56aJS zhjNUtLODDXyk=Fb#TfKeS^-PrCkqM)BO8tb8a}P;q!!%n|p*C&qAD%VC`# zglHAS+$=yIDn~!gIF5EG5vh#6=h>C6kz-~ihLhF?e1_Omq8!Qkmnb(k*s2|u#ZSi% z2B7ajNm`;jc_^AYeQx3D0aeF@^>r{Cx*olGcD>uaR6oUazGpImmx=bh;tXqX6S;2qF8>3(w1Ss(9QVx zI34pCNmifv9)X6JYg{(m)I#AyUxi zD4L3~DJ#LE0f;%OP{1N}7TC`|Of{ZfLeU;}_o`D8~~SxbjmL$hfHj_n1+Lne#I;#hO)~Rs_H|e^4Gn*@bY;bx*SisH?w#a_;h9UU;>e9g>3X=d;&MwO>@r?rRSA4M6yi&ilS@|MSSd_ zZq~0;%MnMQfAY|#GSR} zPKys)Pca#6+lip&EGJYu%zk;#{}o%Ps@Y{9E;usGhqIhEj&=qLYK)Y;Rme&BFd1f7 z*rD(pUW{pV<)A8{&CB*La<| za`+i~ETDvh(+S{r#4beHA`RR72cA-{rd6JJv`x$$=WlBlS&qP96nK=1`}L5s)i(54 zm&aqp$9&(pF?_=;i4Uc~ilfF+3THkXL#E@Fv4gxQOFb9$x+G> zI4`|EG`zM607)@enw2&m3|cv*vt31&E6d(6r5#2wNc=+@e!!e+)$&Q2Rx+tU7;B_uyA1Ez`}ur0|(Ofuy~6KhN3-8%p1er z<>ai8p`aigHtmoqcu2!T!AikHMM1$MIt*#y9I%E^W~DY)b8{AVicrB|i&bH3rmZB1>VS;9+%%J4lAf{#I~^ff;yTds!C7}ALyofy%H71fxL(j10rCeYTMf*}b&OG}q}b+j}i zBqL-=r(h@u3>IXZpm92qZC!GjT+Frz7Q;DKMvgY_XdDfSW*o#ne2Ag+DHv8^_U3>~ z4G<`u(g~>v9cs@u?`#j0b`U+o+Nxc|GX;Z{f+4HH+Lg|Mu8eALZHG1h2@(XLOl|-X z9*ACTj)Ea|00vZ5HB2a7*-8i=fnlB{=YrX9MUTN$>}FGVW8Cy6{%rOb?h{Gm>LKR3 zW;DBXK-|%^wcT7>1%oQ40CM&+XJ>S$_F-z)_05giC|qZR ziCT;k;>}iE)0|)t=Cp3CV?#b_aODf1>%-6uRQ-1t3TN{mP$ zJTD@{nSDviuNKWkeNc~kEFXjX1}T}_kc@TLZnPig@`gR8^HrxxQq+nOr7Yy8}1H+lUt{E4QqrI*3%X%8ycKZ+Z^$G15vg@C1^L#*sdU_TE>< z#IDGALoQvm)W$#(F`>6B8ihFATRPe}Soz7UXf!U(f{I2$GNs~!qoO2(J#uq2Xoc+h zPjw8v5-+*&cFgD#KllWaS&LC|?#QZ`>J*z3N)3A=qK%NISc&7I7W<*^E5gL52n`;2 zCa8kz@~+q2iBkC62oC1ykY*`nWo|8_tsI;7qFY}q@{^D8Vul^IITE3m^bET2Vfj{~ zRo^5;G)rhF+9Ie59svN5@UP$i5P+cI0SgfvL~wxMK?4p7&;SGl3knXfz=H(}AT$8M z1R4e;7$g`LEQo-B1p@Oo+)sAZZ@+w844kpNt8p_UHT!)#`eZMgL^iKk94W2JY>Cv~ zmeO7-8l{S#>WHEupO&GExG|JWUIMJwVfPEm~D`JD>)ot^aCrQ6BM7$Y)9r=u31iIXXRu~MQBrv{Uej8Ly2 zYe{jcn4a8@jX}0HW;Ir&OzoeAVy0s3|2kgKvG}W(dPg^};dV=2#`Qin2uD#q?r7!j zlI`k#?rJeA8pY(Cz-Hzu@ugi&~~qo8?$%Nm(e|JhQSQYdZUh zwTHDgNv(B%CHVBp-aC;_gs@wiG8O|u7gLrm`d1wk9go9+L?f9t`twV$#ud`?~{;p=zHOab*)3#i$t;H3T zLg!M&o=_2y4m;s=v%*tCgl+tmwopMDVH+j+!r^&7(zvBM|Y(u%zaP%R0)c$=c<=toa}Q*Q*$= z=UARLgMADR8%aaDEsw?!E>%WBacM18ae33?G6b*xPTSy^lIB^;3B3}UV?Pl;afjce z##~rUS#NOqROm?M6VF@la&c!z=%Oa+jHu?&Gp7n})c2p%94`BP%$11hhFwndJ?7No zT24-1m@=ErzM7WU5T#r?v{a;<eU*A5AezJeoSnFDfc(Dw_W};wtnt0{8dnRz*9H#vvzZf0%{*G$%=)tt@L zlxIwqz#8JM!dP)ab}FYPX!F1aoBMlkzIki}A{vJ$y| zY6n@<^D+sgLn@zKo19cxf#Z^^k|;SVCz;12Ny=NZJG@EA&G0elvE|Vn-CyLl;}~O2 zta5%tHW62{Qf-jRqLIB2H?OEt9yB#ld_4rRz)chQG8knW8Gjh}7jGBUmS|Ddh2HEq z#Vo}diLHs4C`*#OXo)B*gC%b0q3qIi8?uqvo)?P3D?&mRgoG?SJV>(w2hRq_1xaO4 za0sV#O8IU4N&Z!eE3hNy5s_FG84#c)C8;c3v)4@^mq9-4VQ&5-^j*bN#dEJGw!c^cWSkS!gP+ge1qJk;J zC|jXZnHUvc!Y)(B?8G3-J`|5iXzH;#Z2~6d4sYAjUA(e65Ij0T1xqDHMpQ0TZs((S zDl}ycx6mG$r9-G#bXG$hnhHSbKomSG>8L_b14tE!nnyD_G*|FMNmTMRObb9|&~b_S zHCqZsK}UssJf+M)q*-}sO-tQhJSp2|);R;LE@NRjoa5{dQ6j7ix~-`!5Fjrf`(i4K zUJ}97K@=>Cc0{Yl?>kW^kOG8)k0ufgjuh<)@97RnduUnZWuo~&O>n9pY8Fj9LN3o= zd`h)Is0wSSMy925p+op+=e{S7azSAH%_15=8UX?YxbqZ@zcE{@S|WCy1s&&i3J{z7 zJuO=h#jzL${Vs$~HAFIQu3{XPCj-5Q-G#hLPbUyxHpl=@fz@XWyQ;5WBRzvl6?M6Q>q` z)S!|K5G4%7x$t;keA>enR3w`Ct(c*IeEB~C(Nkp)zp84J;0#~N+B2f`E7H2;tox?Q z1A;_{q8~=_UTj`Aq*-I!tBz4l%TYuW;>Etpm7mOz68@YdJ@J8I>*f1gJG<7PZBT$n z(2Jwg#+gcW6h;+zQq($Zm~o{~^7+cjXLLLcGnO6_&q$?>4G>utsgkl60@2Feh5!y* zsb#%j5e$ii(WUs!Rlv>YN5ew5#3iKSa;7bTMLjJ_-1Twl?LAy=C3;^R-|T>zUBM-wx-n}r z`hV72iY--zhy(1zo%7$WZcUWYcVgSB)m!E1Y-%w#k7+{-x`X|n%sS0oMFksie{1nsD zJ5(3Vz6Up4a3_jbvPElWWPrA4T&I_-B}X_1hCNtx;t}BRt``LAw!}m5k%5?0?orqf zsus%ql=b{8lE1LV=JmoXZy)mv)=xoQcF+`M@I$#pGygRE9Zfi+A3uPJVq4+OcNPqF z7>&2t=y1#lHZ)cqQ|V=)eNNP}W5lg8AJ9m#bKp0RHqzfT9G>KjU?+!WBEdU!Nj-K? zULzU*u*Ga#)fA^nq(h6Dte|?9(oM`G;7zSqcxNM-v~*3!8Fu))TPyV_K{2Vt6J+{uZXh64ZfGM*! zh6g`QZ~J1jJrakoXrqZ@#g+o55>7s>b%0oeZN74N>QY-V&;3rdSyD2M$+*P+v0zZY z00AXzW6XwH8Y4lbmSZzX>DY}v86P^;T&I>OZbVfx&#*zkjJDsa*%*%%!zOo+)stIP z8$v}KI>u;~5pFxxd35@dhb-vXNAHZMVCpDLXNf);vpuRHCiBsANVO(I_vA9`Jmo3m%}zNbwzRGst6-~HsUC{ z<%TTRElF@*9k(f=x~P>e5~mN~@!9wuNk;gFB+D&GmoKGac~0%!{rhK4NLG6HFH?uK;U;bsJ4PQsd!mpE}sN9A*`KxPhm@On1x&Ef&YW#b!=EEb`Hs@E;fNgS9t_hMsnK%&^!jw?Vw~2jY z_Y$V}7h$Pf1NX9&d1G_r81O>7m?@aJpx2Ej6l8F&fP*rjcPu`>Hjr^u&)n!BTv_JO z>C2rd7ygQ{nbY<}<;&~r72bfgx)txcWZSWuEr=z`gsdl_0c0W}^Ly}&+PwgJRhK!& zHSR>8M7<6}NZx!qEBS?GVyP=}{cD8)WPL!68F#$~WC^J)Ix{h4p?0~;2Se3N>^ncu zSP}fpM#Ec0r~GDIENhv4?Yv{IFMJ$!dw;(%x3aRfCTJY3_3#f{-tO2! zQg^$1dqr;2j4KLzAC`q zWOfm`ha-i-+H}x3(Uz~|h-zJs<0bE$;iJnlD;sR+wW>Au9-NBNiIloh$1Q21Md%+- za-d)zwjYZsME)L{SAJvEzf;8Myv^n=zX64>lwtI5aMLJXBoOqKI5U?~Vo)(s3<`B0 zXWJ7NHD`Wz*rf6sq|lu*kIoG=O*xIJbWZ^yy$8*GxN+)Tq>2L~>Ht+SWKtQ8rP4i6 z>ff9h6gqX(_C{1mN4z0sb^t(9)&QX(Z3Zb>Q3gn}L089}md39R0ggC>gBr)r(h$`F zi3p7_08Mn}pQQR3f39PB1Mbn4S6Uns5i-bA&`#NAb<$HV_AUbZP;OijQWmR_G}>c3 z6-?715J6rkbmQx6ed!`d0$7e1-ES1GWJv|R$Wu-9M4%Ay+@}x@Ah)8rhavx~U8LRx zRw9R-5;pffXf|#?g={rlhr}FDWbdVhpQE9@WUx$+RYPicm24s_R(cvO{$$WYP8QQ` z#hMj}jcv?ZjHf~G2Ni*NhKA=V z2Ptv6%Tp#Av3WK=lQR^@Z-~qc;E|&ZH>rZCzmKL=<|q*9fO>Z70i_@j)|^dR_r?HF zgpvghO!ZA1qd|T08@q*2_7<|(eGq+Znq$^7DpR4<9|=QgV~IrOV88oQwSZCqg=UWq zRFO-^J}n%mNnu!H2JjFhj*Xd>LcA>o>f-RR3Mw0jWp>qf*xJ+@W{~c=?qM%XnLSHL zlp(hW22#$|WCPi0O~A3g>o1Iq@a;&m_oz+DQPZB-vA8qZ#R1V*f753|O z(Sgsky6uN(5^=})99`QaxsWE5063(WJh|Mh{c&mI9FI||b8bu)Q*Ju9r2~N9@(57SlmQu`desWdYioP7 zWT@K%3V1Uor>;=80)C*4wwaG&`vv)2B!A)2!SnI@E z)Mh(~w#)5E3a9^Mjj#qix?N2Twha2Ju+C!sX+hT$7wL#tQn^)!Z67q`ha~|{DqYlP<=-duCaF8J zpK%#)GYG$D)pIIb7Rt0Zw&xnezdi^YdxM-QYJ%9xRmK53M8bTBUR>GiR4!9-X3_A@ zf3#HbXk7K&^jg{%F~YHbDz!ffDnS`EI6vo3Gu^{Q1&kgt&fYD`04GR{ccAH$gJ=-t zwy710+Y$$t14-X4xjXvqY+}(ShVv9}_A+6{U-Y+{q!dOdvIKGm`;=WGhX%f-7$B*m z=3rLHKms7T9&O_wUh!s;?DxA>+A=j?b)CK>)x<*50^9JT{D{m}-Qr918peB?)ioNC zIpj-jB=Ci;=hV7kUbmmUP`L5q#i`)M%2RIf)w2#jdf-4l%QI?UoW31zR5QBu3YuPX zLweNBlRPwbW2%OnuqQ=EJ%&auqZ%7-<&c{ zG0~Yu+dKuZ*$#?mG)<;V)tP5dwT9jt#aV@iIS)-k&XeZeG)Ixl^HGaL-;}bM;?dbe z)np1*N{E|iS^p?Bn^Lq%uRS`=LH`_}N-3>pH5?GmXvteX$IOVK!_j4pF}4$YWrmFv z=R>tXWA*7=6#Kf?P%&DG3RlI6q6S1aOL`EWffnx@CqX~3I0-ZXvC~CcV#-mf9ehhV zuh%Uh4{sIlEL|Lpsr?l+YVG*xaQ5EcJU;p0cINnG7*A*msq%Apuy~rVOOHbErzAiS zvu4h`?aD_H%TimX$fLCSSs|GTl&Y8$U^PP5%IQekzvB^ga+Wz47sxCJ8f_lUNc1LC zd}Zf95wYxEhOM`yLuWeLrWB|Y9%^bdO{3&~H~lLnn5ajhZA#Ta+R<`E*<@-d8HJcd zEh2jJC|2QPbJ1oo=uDz*GG&^>CG&7FOlAseJ&BZ$8U)&=QMmtb9kkF-$W=Z*Q5dFN z_hmZ>Tht<=H;+<^0q`;?Qle}wb+>n`t-^!s;X_>8%%h)` z>a>#6%%d3{y$Oof3SXr2(3eJUN|88?M-4jKCKSjiOjOdSnx{@oYSD~{-kc(R8Uuwl zqD39WpUtyRI##`Do0CIK=+NT(b88m9)j&-lr*gfB*NVwFb<`O#z zMmzUb6dp;km9*_)LCZ_YV<7Aac~_Kgv_Q~5w~p57ZgyuvQf;!1$T26+!6|FHaUugD zujSp$*U|`gL%gUVtDH7UN4ArER|H>$I#IX)V`eMy_z!C?L+$Timf_#+gmt!%@7YUa z%68O%D4&8q#Yx>7P~PX3YoK1Y1jAX&JL;?@$?TFW+t{lLQ?<+Y6r0sC0qvFaP;<#b zG_DDWCx|c3PmE0O#$sN|7;5?j^QMpU<2|nN8RYH^UfeA4NbROOr%+glVuD>bAP9!$ zy&7^0huHm11LZW>+ryz%BA(=8c02+F9&>$4E3qlzJsKVz^o^)s#2qTgC_2?p5n=h3fN+T2okUJG&XY!CL&^6vFK7)k zoLy)99zyvi9fLftNw5W3A*V`mL5JmrS>ZGhZd!r_^ZAyZ*QEAFtRVKVoQEr1T%~=q zUe`tOYU{ef66OdjnS|K7t~j**5w0gd#V%ly>aoHt4W3)nL{Nw4W@5D? zcf)gMKs8aM;kkmO!Z=I%d}a*K)r{$jpbO7U_{uF683U*DLi8cl((H^&uOv^7^u@!C zDv4IWSanaOpSf+&t{{-3xJ1nvIFvY~XHcslp%fGCW(rD28-g-F8(rFy^pp@NxF+c) zNv=DlwSfy=JgpI@Gpf8P$+L2+ky*je^#IgU^6Ecvyx>0^*xF><{{8$4&vUl_q#?6s zlHvbgy$j{uL`qjUw_5wp$w>7Y7llAHQTK#*0KDyYY>-mOU3?tS#^kb5ZTA1IC*_@N z6KrSG>`DeHR(7B5KffFJY&vq21q=*vgrLmA$8?@hiDSeqBTF0{h4@!7$v?&r3TfEj zjj~q|<{I1?%FfzMOr!(oVeiHS7$_xSfA@4l4+&F0;zDe70%uszmPahx88o}tm#K9X z9+FOU@B>e5C#uu2OUcDVX%-)fO(Go0U?NPdq}Sk zln*>Q*`#A&cTr9xYEh9D_Od8L=Mb9GI#aylM!A__zR1waIu}HKb7mGb1q&dBN+I52 z#-~ABgXFJ;(B+6Ttu9_Y}34@agtKOh;ITDo`9ARX0MbQn+s z9lD>0Kf&JvhC|%|!9%2?d%#TYRMr5ZQD}6OBy;F?J$W3h+DBU-F2Z4yHki5?G4_I zh!e^ied7+jg$LlUxaz@hjucUrH54@FB74KbD1Xf3G*%QkHm2m~&jh&@Llfc_HCK(H zDT?4Y=P64>Vx3%s6*J`~f3X!pZR|q|PnKp=ei;YnI05XH}cV!q}k}d2T7{f_Xge;8cp6B4#pX zFr3AetFKz|wS=gwi@@KuvT+U78&fDJ38t=}Mo525bvQ+-eZBU4W}=Z0o=k8w z8*e~#AgIp$OpSX@_kw(+WXh0>rxu^d*rH^jjeR3Dm>p*P_~kyIV9RNZ#%VY7k^m)N zgNG*wMmd{8J{_QHKT&o;GkNpfO?gq`lH4E{fYGq6cZT>8)>}52;F4zHLd+BqTkr9= z8z;L=j?n^2L*+xIHdg8cP)eUdf+|t~Rjyg7ixMSDA_O9a98t=fX5qjly2V*PY~Ztf z;Kt7(2d__HoMbX+>s1@^v5q|HctnZy5+eE=+k*6sqk&TinM0j2<=`ujAn1QqQyUZY zkGK#rUYh;<7>g)bNvSQZ^y;ap1=E&qSTLah^8!>1&{N7B3QhV*D0gC6($c2%CM{N!%K}?Z3yM^~5gYm;4=HL|`gldM1;<5M`zH$+YB1OfDG)QE+j z;w(roz?l(}AGS60$9g?L%BII{1gg|cMoJ5$A{(*Xd3{gpY zgfkM|BRxp83N}Ai@uSDSOAT%!eACLZJ!e89bJ1BvO%(A9#k4Z08INy3awsS<+STy2 zTc?e?dXJ9ai&E=3poK+g!mN7#KrS@%(2{cACf0~Wp}uMq91tOC3V6JGsPuLSNR*@e z#F$*9g>L}7)?~0|^c#<;8^%h}R?fB;6=`H0R$81eq7#x-@f(i&n-!EIYKl0`xZ^i^ zAQoI@Zu@65(~3mrJpm0Ae@Y`+K#gFYY33NhW%4(+=8*T}% ze2Y7#^H>O!46y{dYN=AIl4OJVSVks|;TCyoDw`_++gx=lf00ZTJQv*%#Q&^_WQ`J@kx-y~wX5S_5|eN!+31 z)Qg^fOq>IVGG^ZLGx(w0K+-ZeC7^sA4pSD1$-l;U$-7vZTy9xrTqFBFeZHu8GvmHC zN)$-y{YS%?_olw?lW4OvnHG`WD& zJ2R&vD)AXdL4!?aG>Ls7N~xf_v=q~RE*4Yx^h{HeI*ekw2tcO*XBH;){nrM`L*;}@ z4ELQ?53W=*wVb&j>SRh4OEfqG9z^AOQ@qN}TxW20w}2S6$H3eS+V)^B*9B3;)Vs2E z)f>6h#tAkKd55ZmI2F6e#_IaWsHbRPG4&wMkWFJi=VR8{-xlA$j#Ol4M-vF-qqF*X z4Ix90%Ty|0dElfI=OaClc|MYAC!tV8KP60%ddDw`aVcYqk$WAC)GqvcU z9&@XIkpw8D!3<(M*BZA&eAW}yIb>;AZf?N14aAAk0jF#N9yS_=knqGHhld-ye@d8= zQmQI^6V%htvu|WjLOPf3#U`Kj>Zw1+KOnB+c4^ zpsa_t{<2t7fOqCWO11Qp8YbR&;O>prq0q^oUTOiKL@?zPQ$m;cbnaT&fd0W-IZGWo z#{s=MBi%aq57U@XZfDYmP9-JOeN!g_a}kmQJAU)jxj@BFo=HE68R*!FnwViTpuCcT zn}j$AH^4M?Hs?!Kt6Qf6MA5J!fY*U-=2 z)48h{(?B5}ech2l6qBBnyFo0MDPH5DHK(PdPM9iQ8r^1-&te^FsMIYh_zY98kP_EB zk_;K*4_6uiaKdsba*kS&m0C6rz(YDUc4V zV2Uj{4H5_BhXLx`Svw8DH$=ohEmB@Y${&s~%>$wXtOJKl98@2c1XyTTj%q_2qnJxT zL-lx$JFoVG!2-6^7Ig*@>fq~6_{BQp`3{{u+%H$xnJcfO+s)O{k+Rpz>E!kfAUg4U zotdDLZ7-)47hxKwAQ!Ej_11)1hr14>P>~qLC&LQi0VH*DG_e9k;-ISy8YBozLFWo0 z7zhG5U|w1v3We+lC~#i!rf`V;MXv5(OAbDBgBXMdFpV zQ?A@q@s;|8pMxhQ=@zH6@+fCJxf?huLsrfTtT;!ga#^Loy?ryUO50&J1=Me4K2Req zi$E#|cH_D|o)~ftvCOg$$5unWthO~n-fi9&s?tBo>r5(Z+AuT9c zK+BB_IrSDo4I&GaDp(PNJdh%C3SM)J&=>;@P{u$~0<#7XqJYR_+6SnF2pIsKT!2Br zNd!Ouf~X;|${B$IDumNj;?OA1vA5A#vv}@>aTAgr#d!CRzj`w4F23BPl{3evHZCR@ zNpO)uRTXg&n?a{RM9Kwm=7YmdtO}joGk_00w?K>1$wY*bXUWa}siY;M!E{21P)5lK zP)2Qfr3w*%81~08$sz{X2p#r&L!Ur0VnP597NSC1ePI@G4*(DtQ*yj!glIQsCICr5 zw!aRu;Xos_Ng+INwBybNQ8t@(F;E9f>I`|q?gkK9Y{BLR3vVns0;UYnz*tzah1N`- zia`+4n=zJ-z6sJ%eYFm%qo$*#Zjw@)Hp;D(iU4aGy(Y0yi^!djdAj z&Q#36sBv{x6q2d}oso&`$V6~V6FNsTi17}!sL0}oam2EDTlQs)C>j z^#=;&C^V1={RkA=DQCe1a1#;`fk)c*o^a{la{wlggp4{A+4@l$UgHf`4TE>JM;Rrp z5EZ%^EP?=fO9@0thLnIo{aCmGiU32{<^(7KhyZZROURNR00AXH4}eL65E7i7B@Uc` z;ReVQN8n1TY=AM?x(bl5o#8xY_FBE6p0&Q+X{<8a~nlAPP?KpMI`P{ zi`-KA5WXEDAKU7OX!;6MI}MtUQ`PMPgSfMkKrR8ab@2j7_%XOjpDV`_qr>p7%D5u{ z`AAhK&xZnn93WGX$t*IIJz@Dzo>iEx5f-{NI5w4|!-C4)6{sW}JkqxwhL}LNokLs+7}*;g>Jx3!Kzoi+8SfSsX@_@Ekqk0nJWr{nn~279u*D_qrZ?h)asct$jHr`_VtbfyJk?!i)8 zVE6I_YV$TWsZOVG^DcMkgD?{Fm~h$_lUp&B*;i4~@tLTDnaj7$b)9D&pAqQTHaLBa zI_?lr0cn~;XR*t7@xTW~-GwDpuHFURVs}IV2JXCX(AgAdz9KX=y8p*3Q4uj3Yjm%p z+vn!Wi9DOeq8o!ngzo>#$^8p*`!9d~O%+f)vmk;Q+$qiq36}zCAhkI-bN;>P!N6Lh zbfd7=yi4hYn}Y@06P5rr1z-;Z_Sj7T#Jr8GK;+H|FK$IgXLY(?7m$!3rbsau#@GSc zSXcoN8*PNa^6ZY-q~_1YAy%R7HnF6aM$Wqf&Mxot>{>_H)(Cc+@AkI1kEhD3%egO} z_kS)zIK~9P+ct3u0RRr#20icsUrw}6Y~iXhgA|C&bOj4l5Do)ARU>dlF)Af#I+vIs z#Vz%OT43!aP8~!sP@0D&1Y$ty30CN;suOn|8FhAtgkYjTh$|_2#mHAOfQU0JacBmN znB7VR0&@_`17TAE<63U202L#`o(dKSg2ls32rdvF00s;|f+C91eJq|vgoxT;DOYQb zhx>H8Jyb2gsOtkM6xh{{v)WlSMF>MmuwulC2e~Giy$~!opVUwfP>V|{Dd<3LP_k~~ zNU}^U=sGbRJ6MBqbyJM1&#ty{Rj-lPa~lss0DJvn#Rp(dn~ zX9!x4NmSjOUq?rmW0S&GeHcf=fCFcYD2F5J+Q5kDE6W_oOX7u9l$*8bXo|60;cQ!pX716TrTs12w`ouk`30H?SLa;90np*hRK+0NQTWB5{xvP2Lq0x2Pib88!q*DSaL|UQu@;n zH+Yx<2@gYNpNPhCqG&FL^g;lDSUcJe*eu3{ftyI$CR_36;fqngErwJ+ifiEfd>oS!O7sh$4^W9-(IKB z{UoHLlTi`Tkf~IT{q4?9aHx#Vt~_huH|FQg=<5z% zB3r%%D&$)9*3o!opk)}WMIxwGrSxhPBErHdNtyOzgLoz1RxF^Z%7Y-3Q^+plB7_h^ zgrX1{Dl`QUGC`K1F@Vf%vfScs%M4ImH);Zg;3A(NomDc z3pQ40SgT<{@}x^R*|Fl;INg9ncxdRlg~n;8cVTg5Z{T3x+YeUg68Z`Y2M5QsxF58* zQfRaz6PCF$=Z~2=juW2FpSI5GP7X711!Dz_-p@lp%w_|=Ok=u%oI4JZm3VGPonQ2^+ol*lmBe#l_9nJk(( zfQB;|8rN0Tc?0%sYYvqj+>Q);*!}2A0 z#rkB_`JIQ{q&R0+XPy_G>d4dXKIe;f%FF9ohi7%NGV=wh|Bj%JsS~D0H`EzKC|Jnq z5aS4uB-CIzMnk4CCY+AF+@0K?>(ujq!ShEA-O5#wd0bz!2glTdCZ;AL3=Bs(C3B!S z1ao6P7~k!i_Nnh2IA-GEXyYn{#y1|3@es38Lnc6=sIS6x0;c`J!BIsj;|^ZRq?~Xd zaq;Vm8Kk~(l)(zF0lub01i;sHh#)keOf-aosX+i^sN9&)?2WX7tqTB`S&Rcf4xJS% zUHlsDiy#x4h0gJ^>Ks;<62`zmGB?MQkt+d!F(Ykg24gfpl*DqRl-qV`uY=jXohzT4 zvjWWyKy`9YIeRHpN(^zS&Y`tjA%ld3Qv0H9kW>zG@dnZ}Oj0bAnTC{-BBNu3Av#8} zPP_-|k2|!l3rY1ik?YsNLqx+graT(TnII>QO-kWH%3#i^(RpU#7sLf|Mnk+9%xE+T zua%epC2m)7O3cC;;g$dqnl8FW_r~5{C*<+C)ldyoybDWl8D&t_0=(MSDDS~yksOpd zI|6qHZR9Pc@*n|qkP0V%&08^oKPv(25bM$hO1dV@>otEQUQS2GTx~L&R>R)Sj98Kb zApT+n+zS3)iv71@p#GYR-d3D>$HxMuYQIt^QZJ$oLj6F!vTm%C*Mrru>PWYn ztT9z2RdvVkesF*HMq=NcGXI}1LswUhr|bFTbY;qs-Uw}kz6L>0*#UB z5DLUV48t%G13?S}Fc8Hs6hwk-2Ys^TYgQ!87nHWj%WbpA$uEM_Iet2cxI;s>a{65T zjMN&jQ->+TM($T~eSEry*7O}A{r+_p_^k*6-n=1{|V`ATsvr)PCQvkP}FjihQYrA9-R$Ssof z?0oX&Y;wD8f=$Sb!L6tGu1!|MkAhq*dpi~F$zzl(0M6-+bY;-vf)W`A}Op}G^ym}3J zOJaA~;s!wat=qG>g>} z)-L*W6ov0)%JLeHczw9y8Q$Pz1oXT9tE!*3($`_dQS+1dLZLeTAkfrk-;2(N9J9Kf zdaFPc;arf$ZMHrKf>!_BuN4*be^myf`BjR-;bQjpH-tVWB6W+WalA3kceba)?hdRb zr$^rJda#$wDu(3zrf*gjfQ6-LRJuoSUit5CEjIRkw5ZtfNfjJE4Fr11 zEzS_G)flavQ8~T}6#h*NW#jd?+hr)ckG!-6wo>x^nNLMd`fsGul?uRZtt%Auh-stD zU5}h-B{M4Yt6wM)tXjq#Bo+;emz(lV7p@ho&JH`<#IPIU_7O>7nAmm?D>(6o1I^B% zn{}>ncLe>pl10|A5wuyd(vAL`BGJd|%GoZ)trJSsaCslI=Jl}_2_@hDjycg~Ir)%Z z90rLfB1H@D9$h;a%P!>QQ>RkVvVw89Yo}K6f%r+_H>gk4bWjIV!xxc%i?hUsX$BnF z`-n5hf|#@TK*CqLBUf3~Cew-H|97-*M*Ogo+ z2w@}dB8)N%LT2sf)*DaSB>%)vF0^dkaOox3LOJz9FTN2t-z^9vqoitOIU%j8vEUOL zmH3lTxR}=59lXi2ZgD{dRm}w-QrcAbQs0G+sE=$F{lq-WTc-#0RywxUk&qB>IllO6uV5r!I;%J8xBY)Mu>{732$K1y2}Rq=L{i8!+tNNBR(C+Q zAqU5?)ukFgA>tvv0Q#ANB=%6_xp+D8aZW1c7F5vKe`vycjO0(q0Ex!f9k_XpAJn4A zBI~1sd}vZz#V0d|%(bg6p#IcZZIA>h)b@y8!(P{mi}5^KI1s;o8m()vCR=Ox;bFAy zc~Dp#Le&Y8Q(;h0F?9Zk3C5FYW96l8|E>J)#&>sFBpv{?Lwzu;#*Kp{VD%d&|0FwVdW*XjFn>p-ro0RGd(SnT_lX}aMrw1G zHo~(uY0jwg6I|s3-sEw*7qN%I09)v$p5U~`BN=~=e2cNU8aw)PSi;(b2KK*75UGTX z7-O&cElvB*3ip$9mba|{HE;Lbu_!9gRF!mvFXW*;UOJtX`7RK`jDG-I@Henz-a(Jn z&<72RbPwx3AU*lIrPO!96W>_FJ7htqX^v!W`$or~hi8;N9dj;heU zr-FFvvHFi^U8`p78_`KL)E6u?4jd#1sd^s6xp)Sz!(q4aL0KoW z_7Fv#InFQA+~_*KT(4@76U@8<0^YRXCS?pXG;lqL=++*$eA!dN?o1OS9-Ge9Iu~_T^96tB9B#NfrJZI7T-`r`D{dSPP!(8h^ zw*A>ZiM7JvboB2Ksn}T?F#7ZU+>aW()RK8c0eERA9i1B4EYyE}XYz(Vd+$vR`^&^$ zMj8u6l3U2$)9pA6R~q<`e|FyqaoIh${s<5JOEnjViTpd>PkSmpH9dfhC>c56u9k_A?-CI^ikyQnO z)|!h(mcm%6gG+fY6+7 zS%{1rJb? z1kmj8JKGt2y}a+Yth@tIliKzd4>NAnLsZYX4NdiNMmO9FsJlv=hb1>YKZp02vjd!) zb7>lns-<7=Trrf}&exH&#om2YVW3Y>4Ax8ju>?c}-+j`+n?gdo+4#DpAaqiiTeSf$ zUIx&p#Jsue`eM)a1#4y^x-U#i8dafceOx>TzsgaRn=B7MtR?cJ75Wz9j=zGEyuL$4 zAzOD#3V8)6t@JFUep)Us)!5_C$_?2ji0hHzlv)E@%#1CaQ+(MYZt0F1=-W4kB;!`IKJ4v)LfIH{=f0`d$`St+-$9)E(YPnNPrTr9tWpD?|p3}I-YHb4_ z333e5mqHT`#eUP!DH0B${{Vzh<=7v$5R+d&ax6 zZuLtZmLy_Du3r(wh+f_bYbL{f5vfpnwzn2;Wjk!oznuV_AR%~a!rW;t_a$Nio~osB zD?S`hQ#wb5erwWu;g!L&YIwy=`{K&Ud=CU!f*cy%HR5J z?)8dM#~A6&4g;}tQ~HjaLpeSA^*=ai)a!=lG&SsKyI3!|TmD-Gx)QqV|BYvHl>Mex zAGhxwzmjD%CMx~^$f?DqahR{7P9&nqZY`|26AMz!wgjgh`26j3P#D@LYSS)(2jV{@ z@tehIOzmakKS^>s8TYNbtzA5Y59BusewTzb&V!bT*$dAxewVhB^0ImaGo9iG3$ zoz=@Ni2IYrEUj!sg00Q8Gz)yWv+%X|aJe&ddrPp_;flkoij|?J*XO)&ONpfUaSna= zUy*cD(RNwBtl_{J0^$l>d%<+3dVcxC(EY5po4A7ZnsaGTh~xJ^OZ`}8m#j?&nGxfo z^<1QUu#1FY&a1Dk2Tpwm7y@s>I~;8*d>_LSG*K1dTiPJ=Vi{8ERJ5rK4Fd26P zZjtQoHrw3MdLpl6YK(m95+{#&Oee?^F=hGv@M7f95L!B>gfF^Fg0?x9*K>WL$t_vo-c}8I>i2Pv z6GN}5m)h@T`l-!pM)HP!O{;lW+u1bK6nz4+A;q9gY!TmP>)@t6Gj(oF{zyEEzv4~R z@L+%Hfq+>`D1U*x$mk?Q>)d^o1Y1?j>Ru!^bW`6UV4KOuTPW}@tZekGt@lsmopTae zJJW7XMVs8(#su42zIv(DT5v$#(ZL>nQE{5u0XDp{zqbfi8Od_qM(68OrXOr63pcqG ztxBcIORl9@TD%8R;`&e^ZqJ-soqhQ{%k0&}Z{QSdq9d>#m(h5gYqcO+Rs{U@UIW%5 zes%)FH?#1T{Nt9NM>TQWaSHnBE*N+xIlqDZ z8FMI)X^r*v`Bq+tile6d?TM#s7Tjt-bB{ocYE_e9o8OoDKd^KGbWcbxE>-6>A%#AkR(T}I<`GP;jaEeA0W@;< zmM?5GYBU&>>$7$fDQjohB0+wmp=a##ix;odJp!4^u~h+1fwyh-o@1__qn2v zfwD;L=>pWgY|aWAHh?6uz1G{9*CH`0=jC5?ILJ{<=XpME@x6sz#vfEP30e%4tIN{a zeIYGpm}H(!Nne42#+j7tuI%PCL5|uHZM0f^{vNn`rs51dmhC1*ytO|TGOrIzp690G z?0_R2YJucoL3@+qS>DevL>yV44yk9twc!JKRucHZV*Nd)rM5#tyVZH=XLzVK1K#yB zM}zL7wmi_u7~*ANdEITsvF#INHw05q|EsBeM&EES3mG|=(BK$V{9geicR{(Z7_WE4 zM+;bMQF?&6$dhTA7URaxPbo-`UdD0MCbVH3)*yN1fG=HlvU-vysF^+;r3L9J9$Xvtw zH5czWWB12&DIr{0D|j-spwTr9iEfZrVx#tW>)?blb`WoiwQXA|#0@+i_krT~luQ?? zhCBx{%vk4rnZNrm@Hvz)ys+>|-p`_lbD6#tS2-&1$oJ3)=iQPMD%zN}&J6q26)d>} zW83;ZZh)2VEDr;)nzVIDCcU6m@3C)OLaEXv*;u*hcXCF1xg~(a`o(hqBmZl-^yS4; zKmcKj{SwLXf%)PYzKJDq?ww)FKPv1V7zf*js_dskR>}WOPns{_0|F0|G|HOG-O}<& zZN*9o{mWxh@Z~x%?+A@<7p;9o{%3YW-|e>@-HTg(NQV}D5Ts5N(;ULvNI;%&FL+R- zcdZ271x7HPb_{b6B1|}3@#dKU8S+#wpzE-IU^GuyBB0R(=(b`ncfv8<38oSAzRM}> z6~o^^h@imPVJ-&tnt|NL+#)g}kUOl)4W*0x^y!VN%!Lq#lI97DhH6ixC_}~izECJPqTW=;YkGas z1VoeG7>au&`5-p~*@XtllZu15U@7EYtj!%p_(?SaUYWATPSU;ldT`9 zuNbFrF_t$IRl?xG>m>>2#EXd(vnx1&5s*QXtaK)=cJKPJeJ>mo6jFF!8lkLdNAo5j zuL&fe47b}w(31h|iN13pj*{*Vsk-ZvCm&pm1Hjd0O0>sYP`V@Bmcn>0xyEvusu#eK zj=DS|QPEdx4?r{)kj0cEI?Ng=E&X1WMmUbKWF{42Q7zi{s;xjqJI} z2S;>c!rzkE3X)hc8J3xl_T6R3$&0BuDusg=>5((`7De-j7L47@h15qwmk( zlncAR%#9*-?CirZ+zQ`tQG)^Z)+*w?wTkr89i>rieJwC%!;TC$D$RE6gN9ZK;+_)) z(;kFjM1VOq^`uJ*;rimdEV6`3q?nbcKkm8!F?JLw0LlRoHUp|JbKHdlul~EcFx)aV z&IA$Cs!R|O3T!xwTzH28pbtv(Ym`G%yh~Y)xZRg4QTKoCr(jqMm2mO*8e>If&%-v% zhOJ}~KlJ+0##rHdk#g~eDIQ@syuH+o?p(wp6DyuWsqoDG{%e3_WGQG=$eu1otoTTK z-FHe?^xFm^vKHYUnNNgbBdwG*zVL=hqjL%D9}$YyyqCHF(HqXC81^T;5M!X&@#Fr; z=L>m{I`nl(70b6kF8YLLZ}%kVg&{j1OHD3WHbTlrPoV8%Y?P`+h}UWgIkL+o^g?UF zw*ccT*hwGJATb)&MCG21&lo_8C6y$wA*|w{zBsEQ%UBdI2*#$9YIiIY={^6a1Cp{m znod}hx2>#}4y{DED+fko2%0{+5eKK4cK*Aq;%Ni@#RPlxl47^%3Ph@pvlc7@&^%NO~ z&b$&-wRS!2g{@$wUtU1?;i~Yyj1pxb%2;^`h?u!U5V|8t>N;S5Cl!|TS+Ol3lZNfc z%1^lN7XIn*T_08iGq+jmtL76bxdR`-Vptr7|)`2Bl{zFw- znXh_a6Ug?T0%^Tru}hX&#d1S`pS>Oc4&?SmgJrG&-*N@qavqXfkGR+Hp=;FR_H&Vb zE=UAPE$kknhTTJF$?gd{P(B!dv-;PMWl@4V&rj?u8i?N6Qxa~D}8GR znIhXNn%p!|6#d$^(84i(ZLs?ZD~ZeZJO7r4W?B$7&%FbHRCs{FH+28NY{Z}=CtvN4 ztY#}$13sr{_-<|`%)QV%G+wQ0<8UEb{E8Gb2rh(-$bsj6w^38}(u^`u_9-$A4l(Ii z2q10bea&~R295{0zX6W{AD$bR@ku3qOp;Fz@oGNzYcf_4Td>(7JFN&}8}NjdEkEvGUBUk;}FHwIEjXwQ!DZe+`} zWgxrivO{2!13|?39-IzuUIZzYRmc=;2%N~<9x_nw<_}kXiL);LP4-4hpS9fZC60#Q z-l7fz)ioC3xWMB1FgOO%To+1Kr~J}~{I^{mO_0->HEw!5O30AHp~gc^T0t)+Q$zM8 z3`71LOH1NOrAjXlM7B&L@-mE_Kpve{%aXsi=D%eqy`MlPUyAMaGVA9(h=P>6uK4E= zTw(7?ajFtB-{ul9Z_yKYHd>r+b5*bLnk~DD{?RROwelvqWu@ouA5tMR+S%u72WZd9 zWN%a6MV*GJ}p`HH}C!L@hJrkMrH}hs==UVgg6Ia_Mmi2xu@~;As z-KkqJ_GGNa=maH;)?Rmc<&MMIO3gfEQ(tR$?;H%lLW8B)>K}5T!G`+YmA%Gyi3vKW z1%d3wkCejE%3$u*aq^7f;!W@N>kT=QN!P(y%Ri1Bz*;{_8z%Lf~ zR=mUb0h7^oeN{H``HlD>>MC^c8e?#C$qi=099{qT5Ko7;oN@TI^=6^D%Y=nWjif;X z95hX0XRamTrj^-o)B-^FeKLynYV#B^Zyo<_*{c2h$e8a+HVciQtv*ZCHBOmv#dZA(up+uTA2F|P5cHwVJ)cm9R#~{?* z5QTIM>AD{DE}b8MI?(xtZD-DDA4LkRyXAHt0PmkF0uc$203LWIy5a+ZKmz#SmFNc_ z90C(Sf>)xu$qKHhi!nt&ETo%%>B>2n9kkHA8;rkVX&OYvrFvDJC-;iasTpV;0FjLF zHnE`~qFU>MS;3-zB?~fu7kFV>>?knqW%_M9Jw!naf?z=5`Z_4}4+s?8fFH9BIx!nL zazYiHxHt?Tt%+!wyp8tZ6Cxnz)L#@5GZx9yb)zEvT=W9a@E!(no9KG}(iRkhv@0w8 zbM|#}=>5K61mrHp&q$hkY8j9V!mFhHD4cfQj!=132l7N@Q}FEgO>o{zd0RFiC7;EG z+Y+gh@%dJUQ+bVSj6Bf{>;7I+wW6a)0bFFQJv;0kZl6kuW}6ED2Hq)F6B1pSMQ z)=&-T381kWutNoO51^_huATb_Muz@uHxXkJY-0~`=hgBWR5j+hx*&ifyw!aZ0a|%9(nZNojyH^+dbkE@sj%uP3aeWR#RLV21>hKLOx8_cEM;bqrNQ1o zB3Meyx~0JEBcgc<+yb0{*(Kt5%G?S|fZ0LB@D$x1OM}@@#PXE81vr9PA9@1tJwQ^s zFdR0!o`_Ck)n3BLO1OtNgs5NN6C4_ zmi800?VZ~9f2UgxbPnCPSQ#*~>2U+_J<5!I+KUs5jc_s;p1;ytE zmM~2iuErl(Pm#UrthSki>A19IH~k-AhVy=ZXxn`b-n5&o$`bWQ&S+bekqS2TBT1Ar z%aFEal3Lu6bBA(_=QtcdrLy8lO>K~oXzAjUzbO1z-f1}O3Rpfv111=IP>)0lEs*tp z7wsp``W1%Yk6aG#G35}^{wplNb{;W`hatPCbVn2?=+CNG6(X;3hRdJNMQ3$!!e|%+ zFaXJZymV1^C@7!(|Jeg36sLDk&QNl=4f78XQe*`M!Uz5>m`DbEnr)q@o>;pY9&_1X zBVumE{!J%J-UDtqu9iG4wvrPkVC$U#d40YS|0&1>^p^QDsj+82r!W zce;0C2i^X#QDAaoMe7&i*adjwspsRq-#L&ItO9lAM_|HxOGbEz+Plq5+CHE%r(0}=xX10nRs zfPDsBc5ke9S=As4LhVOQAbFAYn7AUv6P!SMhTh19V0OIPG<${{2xY&lmj$w4VV2R6 zU73+hO^>aPJ?SB2k3~s}-3naj#sF80Ffaj7BiJA7Ks|u&+ktu|)BF0gNb77AZrUDi!LMlvEyQi@POgHGY(#0eDW==QLO)qomMR#s(o_qrG6 zQZ6Mp0!PY1EClFr(~6Xm7?ccY(7+@vu0~`;hKQJW%Fs~ih9fvqKa9did%pR0rTsc$ zL|>3pq1{XDB?fM9hu0c4Ru;CetDe0T7WPqyq6);`s1QsPDH6uE)Ip>5Y5ni=-@^XCU{i29T0lY!7Yy9tN}L! zIdLDN+ITxWhL&5r6*3>*Et2rbdxcV$A`Qa)>Gtc##p^$uZ~}JgpxYP+-Cm)vKoGOm z3q^ynegT7Cs26mLh#)kS)(JOto8c>zo}11}edp2$6quXJ&h0|JI2evDSku*sg^*du z*=@~2WURs{hpwKwjZ+m_x52?u21P6=NGqpSHIv12Rqzcijejuz>QTeZ?O6M=B8_7zy^%c%}}Ckp5WXy=NeG+8Y=`$ zbE7P>^UQ4m4c&@LkpPOhC4> zNQrodwqdu>$qRQ^qM^e|4QDLe=q#>^Faod&47Aj=Wmu1)r3RxqNk(=mi_4=bW4qmm zL}IrE%L7CIJy0Mx#>)mY5f0~z7Ppbp?>896AE`*>|1^jR8H zBQ3b)Jj1&m7~CQ{0P-CJP~VO);1;nl3<3ZejOv7&=r%x#veHsb7lC3TB4tj5Y6UmR z2td!5U`AoU|9B+6fO0!;l^~I|4%lZ%ey{1<-t8HFczabs&+Ng%8|# z4ZPaG=?A`Q;Ar3>22P;BBkX@Dg++!ExILp#B}^l1q6A06!d=4b#|RM?j(9+aDI9Q{ zh=?!)-lwAS5Ca~jhyre9m$q=>gA0}&)P-+?c7=7pql3>k0B3-UP5=fWGw82uLV~Vr zeadtJezG20_+S}Vf2FdlAQ9-}S0*3;1*!TH)zg8^q+WQtP%oiRKXHLgk7xRA5O!Vq z+oY_im)>Nliaw+->+?Hrszy&5Gb-UkKQyAZdh$N?`s%tigV2M&D|AAC9SX#8$eU4+ zkS_zB)A^ngK>?iD8%Q}n@0%MPoWtfN*h&Ds;J`q)>T3Q>wbM4oGOaOXF+-vBHMo4R z&0Ch$C0hZ@3r<_IuGS3zdNXtkBFfwtO0!BXeBB6uu~qdL-i$-x2Mc)$t$0rwVbMl7gs%vKBsKUU zprRMV?;Xz~85H)xqKH1!ol~5Jg_=z-}_w!1t$YK!2Q5pP`%T8$@Omk zOY@%hdk%(vm?qv$K)B<(&k0`D-G!@!VX*tNti~=T=2cbqsmoq^5epq&>CQ3DrOe%x ztz42^iJWW^`2CFQ=65>=jhn(aj5z9$$3PLH+m3u4(aUVVJl?R#Ymc$3y2Sv9fqm}y z19r1G$k{iy$mivuZOZ1O7eUXksX}yB-~rDq;&NpjtoGZt;o;(lSOqWO=f`}ny5Ya9&LV^$x&@=jlh#}NB$yhN&tS}imd3e-CO2lKx>B++* zof00CC7jTAm-aEFt7p% z1&=Cc4ZRF<6L%Ke?JOwU*X#9qyl7`vAWs)Oi5E3LNl@ z(fD-AH$k}oe5(lx%DhCOmw0VdiF`N+2yMh{}Q@zN^5 zcZkAc=o;w%02jI5|Vh5&*1 z-Nd&7ynU17$o*0g9t#lPuJXlC;)@kW9zzh{0x2S=2p<-LI$@v&^B6h&km8#lfA|Jc z;&w4SE&LQd;bWg~`L(b8b$of)X9bm-QIoM}s1qoegj5^l6&`1pwhHry* zx^$w!LB6iwmA39HQZR(vTSSCxcJA;^FktS1<-QyDh2cU7#^V;*r*dn_AS5I#j6yQQ zL5wPJBWyzQ9C?U%^6#DRPT7KQ_0v%n_U$A-J4d)H7=yN9yAg2C*a^`8gOOru$u!nnKyDbiZ`ztEt^!E+qp@(!QVV?W^JxEDRD(* z^8z(>JvFzmX&Mk{RBB{txJGHNY-qT2XXrDPGBY(dK@pkTw?n38&d5Xk$^Ph9_!II7 z^n)Pgi21o<{8;Z8eFkej8Dv&I6QALj&`h?O}YTm@9-fN)Vr z$&iu@h9>1Eg-ZYm04j@M$m3%ZNlY{uFb2ki zxJ$}{&Mjn#sZ>SSE6S+EP5`GlNC@{zCaEPM3m8W7kf@BTd{{VDXe1Qm3>&c=vwy7ZRsjP4ntzP66N6#7t!D%9o0Xj~8ux?+>1)&0uk!*QSM3wd_A$=&MCLUew^yrqxQujsc5 z^PPtY&MW3`dCbW}$t&cBzt~S#8sdEMtaxyT%eNiX3Kt#OwDQe2F z9~dl%Ex6()WA?Nk1$nySMwgyasrW9c&J;xu3c-j8+6gN@B8zY*HDo0hoEl}K5DFo7 zJKjE(m;i*o4IVMnZ^v#4-47ZK4o-{eV%J@(8S#vu?&GN6zR@2#IoB{d8sYW{bAAzf z+>0&f!_^QN5C#LH;Vj8nzLN=o@1hYhsX!po-XRHW{Q%EoqAESya3GL3cLao03y58y z#}aM&eaYS1ct=Qwgldp}2q#HtdYdrrJuPQG-VDQYX}9hB)BZ$(iRw!|1J`(mfj?8l zNodY#fO%tpg7e#9amIHro?v|2rcQPEPXBx{=qx+gQ3%3MS>^Ry5Quc2F@~L87M|=n z1Ydk+;ZM1%&3oW58U5=%0ZqUgrJRX#hZbGkP5en8iGAQ7h-9(w?6yL*^k)?2=REr( zkV`_2j)x>y(!qQ-!n{`ZWK!I)zGPLfk|MbnnU)iX36Z>zB2A}4N`rD3 zwIRr=CiS03lwbT{aCvruw}cr_6Ri7_Pp?S)0G&sJ@`v2Byn*@9&txEV9dQoxbZugD zj&~9TAqckC;23$cuqB^?$x|Im8wvFChr^0{)zREWM~8kJ5z$ZYeG*>Zn%e!zetB94 z~1?vGTm9h;b0gww7pRg}EwmRjMKjs|(c6#$;TP}-3T^Q7;PF7NhMTK?u-c%RwG zgY-8iyGDb?v_@NDa&hUwkPVSa5q#7r$AW3JbC^m+gUll9j8}AQ1s9)g&8NFUn$0!Y zbhm-laAp?RY$xJX>fEmb11AD4;Gw>~V^EA+=eIXInryef{iyak{g{@JaHS9T?mIw) z1yT167NcxQ&3i+9T6f|#1=QvGARJo$8Ki?c0}h5}h!?Q&`Yvg~BGQ&N^UAo%ESDVA zJAyh3Z9llRht;Sy=<3`%WP_Mm>Dtn&fr<_~d2x4XLQo_KI|W(wQZw2!Q~VN1zCp+T zxHO-UH_u;Lz>Bx(LK66+O1trao&85(KEw@pOk?uS|2$r!abE~e(qJg^JoPkh+2$XY zfeZ*_D;8&x*K=UTpG95kdLQ-%Vib`N7!FP$9d#Eo)>%CtaOtEn5Uv$;!n+on)jSM( zTOWoFid8Sk1aOqx(}MHSx!y3#8o9AP#>e9Uj`}abt!my@B;4=LQ#wC&_=3$R**iOj z$n(^PzP)q22+p#yb-;8Hm~+ULAYl_*{~lA{_de9Zlec2qF3OK=$x^f$@_da>$BEFd z8=Xil%&A6@$&qg>wg+%P#E!vzL?$orR3~!;Qzyl`NwT8_GPJbfiAcEK*`MtUKiZs+ z0VtpwBbTMOAAA;oZznfrx59z@JJ1jCTr{S>*WN0eZ9b@{RIk>PiBY;A%G){ML*~mkr%sit%;n5U`lts7^z;-jZ%%q0of7 zv1xnss(f>~l4PM7wXqPcHxnRe+25}Jny@z%4YiFhg4i5M`FluToOw+w@^kjI&VC5$ zZXKr1^09*FNaD`VGgTe0-!5*^LESm2B}gBjh-Qs1NQ%8kMa9cW2M3{@G$70F3MQEZ zuQO@7mtwx#E}9>OohSw+f5F3}wlVeFogc_`jIQ)#9Shgj#dv;4zwghd?KgW-SUAd+ zGQaR(It3*GelUr7>Za{u6Vr&Jo;Z?Mm!W|GTR^10fg%7f5;=rc=1k06hyh+zk8O`B zIN!k)0!W|(E1GAraR$sSz?ZY6{>YCbSuy-%&P2Zia1oiVSTc9HuzsVaTWqi;eg{kXIwZog`4HdGrfk(olJdw@*q3mlY(Q=>wkLTd zp&>Z@Z-Z7R`_Q{=GnF#)1h5NE=tWF-C+-eVaDl-i6T4zT8imRgALdWBW+}xQA zrPC#PV@I-TIlb*sG_LNR@{QwKws!cIOK!#Q@PwFdRXL2GqGg^{+QL4m_m832Hc`|N zR|Rvo3np}^YQOKW&N5uh!r}w%u7NP>^r3CU9^|!0pT9q%F^q(>9kq)t9kuWW>cY9c zihv_*1f=SnYjFPeg`xFn(q5koELeE4*-TwU=?D>APbLjW71+MG3&3NZ@4??yp4xDIk{#Ki+5JrQWs#2v392N zI-UYn8u>B*a;5jwP5^ga=szea+h?4W=&6XjiAhzMun@oztU+V|HH-jD zfE56U((W+PzgX6Vi5xuxM#$kQlltT7EDXy1RL<(yhDM_^!ynEH#Rw%?UHfbK+D}nB{OdE5X7^(@4zzc)u{aI~ zyRk5eI~aIc<8B;k=EqE}xG-1K}ABHq~5v0&C~X6nq(gLh3vw?R~x z)jpkYAH42?vrE+~i;aN}wwD^1uHaH}lq~V#7?Oi&dinz0pjFfV+2vEm4jRq|9 zaQrn#?O++iq2HWGNmjek9_WtPsDB0@M-ET40oZjIDN8O&@`ZHCmw`!LY4UB7GB5u8K$Xb;K%@5vB=8#nWJO!rA)B8ZW)yT8eH%y=u^2}u|`>3{U_taj0 z`b7bA8diGkfd2y}>^m4~tS@10RW~;=gp?D{23%#PICM=jhBJwp>tM-=pus9=l|l0S z7lBZqF{B%3y5+wJbw>=4l}QZbA}ukXD4Q5m(-VWdHd`>E2T3nEpT2FhCBUXNdI~=3 z60omgl0IDmr-ik$xjL({w#%q|z;L+Z+`Oz=0fpTC^c@+Zw8Pde%@6o_nUR;Oq9a)lX$-^qKC>@KgP>30*;j8SiCI`@7b&X{s2*OpRBA zxSAYiGjdKHxJ{IwBs>jgS5AUIje~yGLCGa8kTg&Dl7NNe3_r$i?V!wWac`M1GQ<1L zEp3d~>wD9#G5Qoz%MDKCG*IZ|#@co4wA5VsjG!&B7|F<6)FqQ!d22lHx5zUDy4RY}h z%8|Dqtkg8z(5vaeAU#GyxFa;E7(|Ch_I2YpD#T?F2scQ})SFh7gMwSXc;p)KR_uJc zK`aQqL6~u2_u|p8CKk*fLIDG|MT2&Q!e?A5c5WK`>;wpD%aBxC{g;#jnWADGDmiSZ z9MUBR+x3^3_hx!#GM*{iEaWqpeZAa#lwH*?eCj^{AzMajDv$`E< z((4;BZwFE~7~2Mj@Aex{lgeDHN=4Dt3D-&Gj=~TszkPl+Y;XPC4Q4ZUoIh?Qf?sy= zxg$zRbid@2ZLen>{vYNkiZ?(Qa6LC#XO45V|9E9QJfp3lR!?0UmBGKBx0)7RKsmfa zPi(kRfBxJgvolGt`0UE6>{UK&NPc8V|SJ%IvqI(mf4jFd8?$IA$<43zTeM#}~PLPidWDO!9z9 zE)D}4D{20`8F!|s6c+)i2n%A=m{P((F;^+85(UzGU!ssw)f18}nTcXsxQSv&{Tyw& z47#zjjQNLTk4MsNR~I?dBujGxGyOC6YL&^}RwtevAHvJa_zJIgx3O*1Hlvdmo!_fA zJL>t0fV6eUQSBW;X^8HR(dUh4UMkcbdw`?|!@%)uZgnt4CIjBR-L{~?gf(8_&efU$LWo;g%>Y6*uyR7uDoTRHR zUUsmT*PQ5}fTP6-MfqlSWYXx!KA-Z~uKuk4*EtJ}ibvQ|ctw;fnoe)rm@_%Y2j{dY zBnKb+oPDr#rrKNUbm;{hnPKZuk0Y;HeM@xj5i7}4_Yfs0P>OcI5z|xhI%5%mhV)%` zEZ}0$Gtq$OiWxB09_SX&7J5)DI@J zFycx=h8YRmjzxhRXhikm$J3S*UmE=DuDq*?W>?W7xKE83wc_jTOZ_@uV@i*8lYqag zAji|gkZjVY3Emr@;7d-^c~ft$KSt4ngJ#M=HD_ZWsFL6^hb(~EBCLn~&2hqhY_wFJPP zhf*Zc{AA5JZx~moVuQxXv4MOu*j&^;okOHlHO!n3u&0USJUI8*PU$+`YgyD3RAH3+ zXs%_=hpyzGIAA-1eG<%i9l(4Ze_$6T#Ka@1GH3hW>V;+bjnK%oy{slwZ|6d_EnQ&) zu8ZEeGK+vDVe}S_(QEBEP54oCmf{4Mis2H$SUMP!;X0VIaj9*D^5Vf%&S_SWZK?Ra zLl={GIEfILr2m-}IAC~rdzkuSLKsiyJpq*+00q2>Ft=wf;3~q*qTVWb>*hHJPSi$dBm>bE}9+;@4{M`C(u>JVqBQQjTZu)nRe@>=I!t+fW0sf%`Y&w zTvdN84C8Qvf?(G>^>v4&0ER9V7$3vD-Btu%m`lYSGs6hCG`Zka7JHU<)ou59G>q4; zl!g&4()tubIFjY_R^Vf`hB;X(6wp!d44w^RLpK^1YQyy0F#9bKqwyp6hH=a`i?2S> z=I9kF>?d}ZezJ^Te>Rf;D2yNJ<%BF&eQZO)q8Rvy@FV3{PO?Xz4FVhiNbw^c;BjdO z=>ojy*C)gQt4yUjS9@6KqYkjbk)(Jq>zBynMz{5AK4(gKY5{5|c0g-p8TM31VerEZ zMz}q+g70D7{X&dj)Ds9Mjtv;7{y6xH#^i#QD6YOM*;tTWT9DQF>o1QBlP{_tHpe2L zewny~xc@YleK&Ve%Y^*%7rtF3<4w!MYOk+iq?V`km9N7lmc!V3DY4-oT(m=OYO~- zOHhEdwO>2!`sz8rN^70q1;78}0ZZ7qjyZhGQbt74Ef5_6Uh#JwUy3Ej!y1H5SrLq+ z-RtOO7C~IZ7B@%?LZD&$$qVpM@Y4nsLaKtN6iIB zMmIg|ILH71`XXs+By+^LI+N2UC)m4k0%Q9pG(JB8X&rIrSYR3325l< z;9;}=qkpJrhpIMDJ4XM1OyDR8+DZ)Il||?>m)`o(Dd=eC68zDq>eU5%m7=-a2v6_f z$T1Ym+@Nq&ho)p?DQVDVftSzstAJgmj_LF7!xm7T60a_1Ezte!X1_qt`N7$Axx zw4D%dSvgjRy^0>&Blpb{%BG2Ean@n}Y9XocC!w3Nr((s2DH$ zs;uVcTj-f`hikgRc1ZtZZNjxWta0T$B;+W46mDLJ6L}BmC;Q^nnp%N&jQhj&4g#9Z zw*!%c1u>>Wt^<;oU#_)z19v$AX0Da)p?3vr=UQHvAE1Yl&$VLS1)Cx@x&~X?o=VqN z-sze(OkESj{CneP_SQQAHP*MTiFr?YUAv3*_=}@r*OFi{yGH8;Jqb^?hB?dKy9O02 zkTY9c1G1$i$ZvXuaXn0xh^i|RC(MPBu-xz^bvF!W7&U-r4GPsMGSd~qnEf6Tq5P*k z8O9+H#%37T7{nGF0mz#~!;oY7Qpr;ZU1TngCpX06!L+;@#%8N)7V~xSx${~BE7~J zK)Ug>>%9Ch`%bNVRmYvW>dI*M1ZWy_wFM1Bdx;3U+~M_|)h!bui}QveI!+FSSw%6n z{&MBM0jN4G^hxY2)-S44%gTG0G4tWa1DCytvgLF6mm2M?TOXA%s~X$)qky}#DT5ke za9(G5w0CZ2dGxufKP~fC^tEXk+ANgvSX}G7u6xPkd{zhnX>yS7uSFp<8mt& zlF@<^BZGo@*}IwP;s?*`oU8Zgh;|hvO10cvfNSF%S7q#K>B7~=alg*;HCS8Z^0eIt z(q2KG`UP+=g5mt+{#Y-(>xRxZSBHC!W8J(xE!V}iy0VM}{9n-3_hjLoqd8CR@5A|^ zgRW??AW!7z;N(gWFGZ$!nNBg*QNaO z_KnY=&)1_gK1uL7PxFYaqH09Lez#Zn=M@dJveR|sDsU0NgjNxHPc@>`s&DyB?r}h7 zm+&7S^yo(3b9MLko;=`)NMUn!oNO*iwUpxvBU?oIT*}riMUV#Dh-CbKeD(S5+*>=o z{+Q_x0BB4eJy0U+R7KU)0$Hx?W+~GoBO8|A|6crmvBgm!8pu|iEsz_!v zBI(jm`QqMYqN^ORi_PM`69ozTHX`#PH-gOZLm*_7BNyy@sD*YDcfodcR#I%1g29Ce z$}*l7X?}n6E(pjRtOk12$12T5Jhw>8-=2Ka@}O+5v$W3Qv3gjwmZ%tC8_^?O2ibf(cwKvmd(@f)$1W(mKG$TtIn=e0l2_dL8(< z2Fw}8uq!3mCImSLZS~X2=w&_8y=C(W4uyqsRSX9i$>hSnJ5KoL*Zvw0c*>xCPxaJ* z_EbAf3%$J0Hp(hlh35w)8ASo^V5#Bq*CKk8!^iUe#H*u;fe*9l3l?O%MVHAT2rqYV ztuug)+K<~D^uMvA+>vKYTvhps=J`>of5GXR6a|MWc>H;f z_>S6!mabONVZ!tw`ozBa?zCHQk?Af460Dov8ZM z?KXZ~sF?9^dddsQ`yJ(OaXR9`U!R0*jk?~V1V-I3XQV20>^-Nt_zI{@oF)~f_lauN z3ZIbdku-AC=x1WW;#TlAq7HGl*_>&;@djmsB0fE5I90|Y%abmRxHdmEqN%zBOrji7 z1}u2zc^0~!IHKe>h0Qyj93K5_IG<$C( z^LiI4EahfOjX^M<0HXk+02mBbFv^@Em>%nq#^UOU`5@j@gRA`6-DOUTgaIk%!SgW` zg##~driaD3DmHF|WkZII>FI8WDfp@U*r*AjhKy9%Uw7H6rmwd2c|8puicVhckCI5U zdeNDc)5-BxRu-X{Ss|<9P&&QUooCok(bpP8Epl6EQ{*8|$=Ro(>O{ew%S`XWE8%hX zgGIcVzBeXYc3`ZIySO(TxpY4jS?iTR9Lq}a@Rx8xFceWA6*9uL=O+7AX}8nRe5;ieUwNiZel8iMKM z)aqgyYFg*MW*UrZU+1$Z7nI3xZp#cpCYZAknPLSpg^cmIJ;o5cnvJRWn7j7AFh;m= zdDFf(LQF5p^c4s(CZ+;Mgb%)GhT&YU5SmM!Bli@_6)tyPGlkp=NhUO8661pVAOwrR ze))otY(nafUBMorCZ!~6t5Qu$Ym6^T=gtVJAf%+yX3{h%DXknOSz4!SMqfldM0?p5 zOArkznj+|fpcslO9fF#JrlE&qi^z_HFR}x(grG4S!6Ax1Y&lGup_4oqzLq81L{I7oA=v9!`$qsY2s> zu)6IZ*NVnVEE*7 z!$q>Oq+CB)PYrIgN%?ElM#{cs(e&y>IddD@YJeZp?cLIHk3_LQ`?8+v1AaG?s=aTM zp=xg~#;;XnE7-E*L78aX(89ZlB@=l~&|NCwHN!roR%c(6WI$&9zUXn;Wir>#!a+AO zG;5!I4d~xRjNf^r_2pWk-t^r$7sW|bhkg@Roxp`Vviw$fRyCa;%)Zryj|?(nfECM4 zOkY=qtr8ZAwMcC@>N~37)?V$Jemgs=k?svIir5k17PX%7lG0s)$+IpAoU2SgIN2%9^Able;z$=Eoa(d?tPS{spBA{xRIK- zodh|NI$OgORcX?Wt&^V`zLZbZvxzsyi{mCNFb>ZM2z#`0@2PW~HB45I1WJ$@3?$ZF z7UtP0r(vH%6oWr2DFIK4_1VmQrWz=}(|gDbdKY4?Q*gB#n*|Iz9VP(MaaTVMj5|HiaGv{TYx3q!=!4LhAVizDX?|689_%mqaZo z&?&0Z44d}*3u@wDyf#rIiwDztJVnUthrexoN!O> z4wu4}{E$=0k=X&-yo9Mh6oRS_^Z^>{(AC73IC>~Lqz1t7J0j&bhVKj*a!Yl@Hv50h z?11ybi04%5I@I9SdKO>sV?-9w$jG04omX~|a-8x)n!-7CpA z&VZ;Hr-7!AqOf$(m>%fB;j#P(LR8?5sahvzqLsmM!ESy}1IAy3c>0yhp z)xr@`PAq-+`$Wge)6>q>>E3vO+YkVQy% z_!fak!sk;{&_f2D4mS`t>dj>^=^?85TDtq`9z)?%JDYLD(ZhyA0tt^O|Kz*5CjO>J zt6tjYy&bmS@UE1L4Yex_TygcJcu2 zC$GucOEIFu_f?K1leCSRH-T7=#ZKs3hFV zB}Bap(JktS$5=);v9!up6}a2im~{DJ!cfU8kjak>@8HyUhOa03PU_Z~llUrq!<3)jW3|#AysX)zO{pwl zc8}EzIbPb)H4hgUG#8vp+QQNxFr0?2tFaqW*;56aTR3IiA@C9}8ud6X4d}E!^Ln%i zvE|3x_eN;_H?6%Vg(DFwE=|FI@EOVz6cvw@C$svLrxbhvltTmVLWX_|5U>FoX8)VD z?C#zvCB&=d0&fd^3y}*ppH+R)l8~Iudj0sAO64vVLPhjrlwTWD)PB=3aZF+o)0o6` zFUjR+pBWMIf+BQzZvBsvqF zidv)eD7l8T;RRA|W_S@phK7)aMe0zChUAh@Q}ZB_Lm)56(eBk9tQ9T;Z>%Nvncz*d zEFsDCUwLwP=95ICMQPP53%fwdnWD6lGBU7&Op=00Boc{4l8fJ0bY7XsDpv8)BHb9t znLhd`J;-DdvI#29)C7ePiXnsq8A1s8S=lU3ypG&niF4njJG((QFa}~ zJ4ob0C`u@*j-}{Kka!&lh8x)g85HJ6zf@3AP*7RRtZkURxX*r@`86oWhRicI7B*&& z$=C?3k0Xsa~p|ifT%NDnghZ`KuDwkQb~nJd2!}gft8#k7R~Cm(19Q zA-D^Xvw1kIIsH6){z7iK@shhdF2W^D*mBIXXySTr@O6lD};7G+2MVxl6dQBsT; zn;k=<(}Yo&h%7|3p|YVO5ogF~h~!SA>1$0WCKQv#zbexRPk0*Bn8q|F)5N^W6;;9z zY>qx8LS!VA5z`1|Mi_V}-DqZlL=U&;d87I z2I5gE9LuL(Ry0P#dI+6_PDGE=%XB->oRN-XFGI)^o9$Ct#i{1M1M{Z`Gh$jiz$yQ{(GgYo_+Y?G?%$W%?S?QSTL@Z)d#nhhKp-NC) zs?eFN{$q(d=6xr2YV}U1Q{g*xhsq%qD~nm;#mLP+y%{QaHFh9fSslTns2ME6p{a>7 z%Ptx^(T&I?LZz#Ebj;jfu$)&o;XluVh8Ont^eMlYg5@cUF8>o!7_6xn7Ri!zaw<4M zjs%B-6T-9?==vbPn@1GEd1Fk(>=QDTnA4o<;n!a`3m+RX3%oJ2hnWx?6Z|7#%n0wr z7&4}eIb$kff-yliRTe^jMMVo%k+Q(^%YvM>EYMk$g$f>ptVi~B#6-wHVYR~RYlvLr zs5yN|?*0VQ3CCKoz?zU=^s?7C-{g(=COwkI3-9S1wGg~8;f-lVRboY>(oDP1D$K%2 zlQ0UCK$jdALj9~^6jo^ojYvo-wanaf+s7x{yE5RMm3=lQ;`**!M%)lwqc_8|X+wZ=CU(-%Sg`1b7Vdj(A~RP(d=NR4 z5;;a`FLXY7ICO7Yd;7rHzXq?{!Jp}tn%|o~yOUz>)^H9b- zLOrw47nKM8s{T+snx7ZQV}(HT?7x2q!mIh!hfwo3?yh`}{wEGfUHIFHyoPLOSbmMe z^Ars9BYm(aRPP#R;8J(2E4hFBZ_E$2Jj;>ay+Bwyz zGt(Xgb>?o;26}?a^Oo{9j9jfWlnc5IA%Y=or)S32qNS^f_ET)&cMA&j*#y;GAa9Ol zbGe`^*eBH@ODWkQ7MT^SdDQ17qCQt!sWvhzA|fJQk;uuWB>7A7pU6ZO%IiY(`a>oz z=EXjZlJDsy!7J`pp$B#zl%miu$NW zJ*vc_sLqs(DknE0pHZ1*LTc)Ssr=k!#y%I-*eG|Ce|~rV@LmvaLd+!p?Q4()#cw?S zg!RJ0dr$q1nJO1Hb<7uG6E-bg+-#OIKZL;Oy@F~i)r41)dJB2Rpvr|=$@kirA?Pl| ztE#EFrUCjoQjAu31gF7qokGEfZ%k1lic%Od(8}8S;E(nsJi;=y9EVZjg28bb5=D@ZA3fVE{102 zsI1q9tvq|!A$+bdt0Sx~TEkYCX&0u^=wj-nTAk2}p>i>0b5}8h9P2jobE(36LsRqx z@=8+VT{}d%VtW&EZAi;}LkUR7R(&HkRQYF?PQBo2WK zA)>vSNg|oJ?GqQjLbch^Xc7gYd(c%so3<5^$*l~wqFo^0P}%)e2HMr=rTfHGj!5Yk z(v3oNRTiJ<9F`u%M3y;~PPbFY1rhbgOth(NDlsukbkUYQnl_N}-wIu^ zR4g~8*wV95s6uWbCxs-$b)V8fterkWFQWe!Lo+<8IXkb;>vNjCA<(_3C#}bGB!@f; z#W6fqjGQ+Qp3N$~kdmQm^k(ihtQxvY33DzT3g{*UrVo(nyS`7GRE zdp>)o%lSP+ePcm}V88b_<{3ipkED7Zc|Q?gpXC`@I0|H12*&Ol~T;` zGSmw>4e5!WxIc3GR8p$+sOs?DMjnMph319JWv^uNekcR}Pzic(W$p;Km;g%IX9Q5Mb%b|F|6dTm*t zqaq8zm6>QRdUY+#&&Xz$Gv7%kaVrM%LTu)nvc2K?8%B=3&AYfJt?AALoe>?3)_QKL zbYxLt)ER6DQ^$x}Ad_fC)WT$JXbhos#1b);(7i>XiJ~wugrXA<$yP(x51m~|Y#YP5 z5n%{r$A5ZWQ5fh)JAZ}{!J|#$mH(SErAJbR@Sen3Hp@yO^XO!q$PwkydxiI_P0swI z^u>Rkai6T8$VoCU5Hh_Glq(vCx|(sI9?3X2zhlJpXgoN{GLIUxoF3M-8hvM0eWxq@O`enbF zJ{Yg659bB-A&{I|{r15;zYp!1@gZ2Eybmvi55Y6y1AAj$!e<_bOZ8$^E-x65Xb$E{ znApqJ7q3_jc5b<28VBN8s=nbkgh0IL*h46um^es}L|pHg)gh42?0zql@=7TVCww`C zSH9(f@JvqTB6kRZ7mRTS3m%cnaXXY3Vu$xkQ;(<~9;@=2*cV+z_sWL8@N=O<@u(rw zo3bA@=wMg7Ax)SLhY(oa*cxi8%Brk$;SY1N2D^{-zYx7JykHl%2A+vE@GPt$xU$fq z@Lo_6MUk8rCkg@5MbP}w>FV&JC#C~=;TNUriq3)#^J1+1{nij#R}pqN1Q(>rGpQlz zci)268q1MX2@OdR)!gPJ3)V+ugr+zalt$4OUObAS!TiBGvya5C ziY|~>bY&Zo@NB4RXl9h{CgtPN;&fgoYoJ-H8`9G63%A3*`M3 z3@>>>vZ~!!na?Q_iBrUh=$IVMiN0G*HAWmG5>sXpES{(%bzvhm5+q45W=^W+D@WoY zAw82wkYa+Lk&E%HLN37yxnkHNrk(19W|ygUR#e@2zC1iF|?2rhoZN&wuk7f3P-a-JT=?EUd{Fc>c` zB`0&Ly>mDFKK8b4+YXM)$_sLTS>VN#1tSX_UP#YM7S$kW2~W7f3^)96S%hV7!n`fE zd4oix(ut-f=ravkv2$cJw#E#nhRHJeClL zVpe8>KN}+R#>iw63bG=Z$(%mZi9{wERVci~FufazAvmGmILxNXN1s==GkJ{1RGzb{ z*?mKs)vRVU6JE_Gtf9S7ywe(fQET9dts&UYuhxZBgU701b3V0mAvEOsqNb%xU(*nr zi7AAgi|Rsobz#I{UJ1$Ygv0>2E4e@|FRDc6p`4gDQbnHG^hUxMI6QNYV?`4p9}T&; zk0WFBJB#Ja6#&)!sD^9IfmR_XJzZ-%bJ`dKg8+b&mw~WQB((R+@tRWD2 ztdj`Sh4K-l_c0U%yevjsJf&zmdOLf2(>3!4AQ`s(RoV?XR+x z@nrtzKm6@dz9v2uchp1tSP#Uz>%7=R>PC!WON2VqA}RUwppsrj8|5W0dU@l`Z#m-j z@N+0YfVkQUQ6;i2$Y*c>JacjUHSki3-Iv}CA6HJXx578;wAYDNFlZ^N4FZC~qa ze_d{v8+@XA-n?}QjAjD5eScI8tUNaR@6h*Bds-m#Dv3Ph0a}S;m zyfN=Lj6}xn5LZb4`RmlnA6AWr5a^3XnAO=Z3&R)|#julM$205|VGV|vLkQF(Gl^J{ zDp63Gd5Ne zHno|Ws;Qw}aJlW--1@CdJdjM;Ov#i?$&|>{KSb@%!bP`X=!V+XOlrbY;hpJpIvozD zLI@UZAiCgSsTYc8vX!&JDuwNZ+8!;5%?+a^F_y+sF(V~XBPCKIRiU|D_&sVhp1zR^EQWfIXkv69L;Y))*^IqUSFNzviO=`^fi&_6S* z5F})3IkQ~9qD8VuW|3KD)~4AsF&k#ow0qEat|aS)xjIFMD9Rcf2SoOooJiqlsNYDVLo`H0G!qjn zi>R2&28&ZiZJYW66Z1I?_(vsdT0|HjddD;hZ>BRjUi6To4(<#u$PNfg6)&b}^D^ z2=zRFBewkOXXj#_aZ9GKnHfSZk zuQbtf#EYb?igUDmeDFGEVJ2O2VI)K5$>cqFCz&&zvFV8Hrg~-L52Yt0b})|=mwEL~ z`=Vt;rLyEgNKQ(=2&!RC$*axlr*f~4OiDV5IcGX$Fv-Z{$9luu=cS!(NEWa9`u^p_ z?lGBM;dS-4!cvzZ=sGRK%ML$;^?0{5b8sgcozSC_}G7`^89ws{0rj z+2^Rt%&da^D#F5j!?0jBVL_p_HnrNu<{QN-jNDy($wPc3#VH#0rf5z@uCQCkJ*om( z#;Q|UGag;@5)v9B1w$u>=ba(Y zTq|7e7Wz9+L`WhuRda2R!XtZ#g#<&8IWdlwTKXYL=m*J8M=iK5tOT1D9;>0vmn2*mC?kcWU4I-CC8Hm@i}-@L_;Pb z3;YGm*zl@+6=TG!&WXo@i1i#|fnp(;SDd?aE`AHV@XOk>xR2bk+k%|M7S0Ra=V)w1 zTOcp2BaNO+E&SeEFy7Q$87-72Lkn^wTJYJlH+Y_~Q&@60E6``b3ir)Kh3FYY1v}R* z${OXUFuqrh#uGD}hzdXQic!ISQ?oFS#5guqGIAJ&;F(4t1d;3bWO_A4fxHg=q^?#J zh&L_@a(;!>trYB6L==V>Ckj0aQ3#@N5{YK0H#GFC8uCyrh61mgked-3p)gkxl3OU) z(S;roLMFbtH**SfVP|v-J=$FO6pD2fQz(yW3g?kcA=s`ySHjEHl!9FQGpdag0xlv_ zP?)(A9xaGLA%u~O@Of?+MBzuXv6%@Q+Efi|SR*y8aZLzj_oy4vgqf3IUX;RAGpDi7 zF}#>QbfFmi_Mv~m&*bEZpWm&X9b#vs6YYpQR-E8tbc7=uS4?BX#LPNIWDaNcgeF9`neIJY;Bca$xwnFkrH|?B?NbtUPwrlMa{L2O^r9Pdhk)$gPlpqRE-`2cV|Kmy&1Zx>e@LO zddlk76Dyy;~|oC`Eal_S>m^GnwUbmv|`mBI3c$j1L)^L(S&kp6SRl z@7c?|yBtEy6_G=IRYW5jm4mw?Z%RH3@_j$CH$n{Nc_=s@j)|Pa9^&62kj;D9!gOuB zuXLymlpCT$FrJG>KWf%=C|;cob`?7GJUrQ;Ly%v&7rdEpn2RfF@TP#0}xTiOx;k z@Vk9prwzp`YE|xUgRgGGdsb{9p3IpYMH_DCun@#IVl@_mXJnzgmse(kUC#!eMQ(x( z?FmJL@K{pOX!x0pQa|&-^?IeGrlzQ>s)&e+ij0h+qFAjU5?d;nd?6HiB@{-EpKxA% zg0Dg{p}x-q_i84{6;1d_P3YMsjNFifQ%U&MB-Cdn!9B;!Oz2f7+-FPh#U-#Omr$Nv zf}Dtio<~A|tqA%+F1RHHFY_9 zWb^#`%TiP|5~K5WbrO;YU)V^{DRnEmhV(*$NAk~Ye)iPYKZEe3+#Z?5AItE9ezn#o z;WJdPXU5ZK(Vu<&sLF_L<&|WPh=s@iIY7q0d8xUumk_FMXjZ5Sxehn9DuG-Hp+rQk;>$-tX#+?aiucjrhs14A`gLbufOak<$-$q zKR&51Q58`|DlSxS%6xD><-lIB$-R5P5`l+=Xaj)o_up$S|aClelp z(Q5706iKh5qAUmc*qWXUeS8eM1UZQBr1xTm=2s?djDvmTRL>$sa#WY2-GO`&5;zV+ znBhe)@ho7PvQ>Jqu8z+*HJWEQ8Nbfep(bQ1V?jaylPkg+;J`tgXgjyZ!M`7 z@g}S$CsUPHLvS;`3F=gCf)Dl?#d&m@?^T(DBmT39hvA7-copU#S2Tw(bJlVox$;f) z?3Qx)eac~Yv?D49IjeGD&qQS75C|dW_Pwm(M>8^OpgV&R4!vSHgy{vV=7~+i&%vIJ zT@*D0@}yTX4ZhAaSos`p2)Y?*Ah~GoBt-je z&<(_^oz)G)J6+jsobp#ycHASm=Ca1kJ4BCe2fK@9>=52Fp#ypS714MTWC+453WoNC z!5;6M8~w~`!iW}AG(}Z4M_UN?1)FW;Qz1+*438?%u`0~f%0v?9zY)*yT;U_$e>x#j zE~0wH3KbRT-U{VaC3N|qbbsfu1G58s;EX~R0jDq$Y(*GX*AxnE?nb^ zR~Z=7vr?$ZK|v}*f97B}caLO*_n9)(@W|w7T$eJ8&&E8OmG@r7WxQc{rkB+pxi|Ha zawWx^6OU{eg0u3Ib*#sbBAx}` zS8rdA%4IW0^4Tu=kt_>=yQ3)Mqv}zVC`L7^QKcEJHAYXSsH(`yJxh@V$In4TFDl_# zCT2WC7OHMUJVZAmWI~~OLJT1b(K9;nhOb`Co1aYPZswKVbIa_=CL^BwKa!7)oXg`J zMahEChGa1pVIw`MjLBTa!anx-d_JGg=ku{p<_YtVdQu%DX~BG9L6c9=SYO?n(uJM5 zM+sV(E3>ePp4GxwzDE^VZdI|0M+;IHB)O>j3%Q$k>It!=#)Pg&q@={8DtPsb=k{|< zRWRL{LnuTjcCpCRFDk;fpZ>o2G$*}fKHbgZE%#YG3ZZSL}QSmx9Lu69*PpG7BB^B%#l?sv+XI}G~*SzE`~;1qB$Lr5&7tz(3$P(zL+@*BMQ9n9!13KZEhr1nn<0BRkaEmAzsW3{U;tF z5+M_z7$J^ODD}(>xv0u4WU)|%gl=tHh^7#aL6M*gDWs4y#14svP(ti=I$2Ro6e=Mb zqLC9>c&P}5gxuysr@B;6BE3l;)Jyvso{GQm61^i%XEL&~TU8jXRx}Xze zVfk1Ulgd<2SZN!gHx+c^63_jKDHNGG28BXG!t5fx*=@u)azeYznfW+%I)+>V;O@vP znVFORYn{&gB?Ohje6cTf^DIA(+pF+Js4CS6Utf7|>CZme3yfW3#>`Zu? zEAl13a=eX)F)KWlS#B1VX~x>`RXj2Ebg~dx!ZMaycbUr)tgeJCK}4#lv1^begz|}d zlp&cpnK={Qj06dZ#c+sY3Fd2w&@aEg{UUdr>KFTjkRo!+<3HcB%T;Bj*uwFc{#hE+ z4~s`sS6$Z^XiSh7yJ+LlR0=EM=iw)Mxj|=oogv${Eeo zPd&E0G4m#Db3A{P@NtS)D#cg|k0<4%qy!^|1gW6V5ex=ntZKKB(N(TX#hkECLPSIq z66lK>s*w;r@<38B3coyERt@FY$?O?g2JR*4dG1)wIMAxBm~}t zBpr_3TzH+E@@|Gy39rmY6Cu8L?34GbA&|#MNcfeRy;oLpayn~8*@Jl|BeKU&AwlHK zre{GW5y_|~BeC!pBf_8EL~Me2RIz(__U&^+TS<6E(i^k=#@9!CcysqoWz}Ay&PCnl z;uOu=GX}5iU3(xN=R`c@nKaTqRAEzL7Z3A!bRQ*bB&13f#~dl-rjMFxx@_Mj6S>NFx$@da$|+53!W`3$oaZ48$)aSI-jb2X zRn9CvL3&KkUkBYmmJ?rY z@|5c+?6*PW?%s)4Z>2G%=~)gPrIc3Mr(9Y1jOPW_siZ3Yb5Xoe78P&B15vLy@leAfiLn$WbC6%m%nXfdJfGWDd*+DWjBF$3 zv6KB|4kjMWPLq&}3Zn`eo1TL<5*B8^S#CGu%~-Z7o0-jq=0L=wc+CjOjlHD8XO#NO zrcCuJbG3+36=f_dDTflTS}ijp+H1Cv*-FBsT2`$Hmq@FOMQpt-aZ4h~;lm>-IZoxt zfxo%QVk+@$Os(lQO;C_n8XDu2M?NDnJvj{VPE!|O<6t=t1(A{UrpQbb5|hlN5RwF` z9z+uq6eLpP5X2*kv3G=iQyd$o?pbs?oD-)aGa^GH)5^$vULV@0Ayeu&2zx>=v7|?j zl!rbtavu?r#C=o}7paJl37MG5O{yeJXi^g!ao~uA%$P-p@r)s4WfqI)SGABTVHL7U zg~g%fD++Na`&47{oBn=(`yW65FuIXM#gKzqvQnIaat9BOhG;cA#BZcL)!`XcU4OA> z@4oo*=Y=2M!Da^&9;5b68DDh5Jk$0}i)zcqJ~3kp#}2;|v5CYbHnC-FdtyfDtv4hr z5h8Pwqukf#dR$LyYHe*@D@kQoB3l=0Pt@cq`luLfWsEY~XxWWYCDkdL5-JI^u&49SSSdax7$r^}yL^0l~Mhl0L!{%X@u!eaJYE(RmufF^$mtA*>Yg}gaMMWb;WYLAlL=;6t zBjO0j+4Z$S^~Hl~dUBhJ%~)8GRfW~-tZ0eLgl33{h~%1?dC)Y_SDg{heC(T&UZr>? zG9)8(R{spoeEFx-@d-OWvioFYXLtBBR5zECbD z7n@6iu@|*8e0UXgF%N%a^)QncHB(6yNzds^J+%+wt$5?3FOj){hIiL4+kdVdwxc(< z&1%w~ny_Ke$e2lSq%$5*+^&n$bW-GK=GdPR-4ICb6gIq)DbJ#c)rR@esg<9n7esBT zVrn@gmF`c<-fu_gb;2d{)z!A577f;HuoNX@86)awM8`NqDq68dD@t*QLo6EB6ort; zbcUs|Afxf8E+W#Hl8hyuO{fzD3DH8-N@iqc1OWg5b5$8cCYFhb&@ty&Az7E(;NKn)O zPpPNG*2eFqLvYDzGnq_!e+?!&#@$b$6LHyckr)#Hhos1q4J)+G{WsP=9HLE>WldWLW;96p$?Y;pqi;}#7Sgqy>+g|1 zTiSg?yP~tK@iwYhPeG~=s(jh?<*kn%K>@`38|mD-&P7CTxyM{_Q=g%&O~5Rijb-CS z>Vgju5a0-4c=&)M16DE?%C#b_M%GO6D}#NfR|qyLz*0)u z%v(E*&Y&Yn1S87G_`1Hfl3beEoJ~c1HBS8Bb+jAiIUgAG^v}NClF*pV9T_h&YPCtt zSEc7QRUNZ=Soo*e)ir2Vt!pPE5rt?<02Zy4aDhWD#nxM|5)7;vLr`7JN<^AvVYP~! zl`ikq8xaIYha{8jY1{==e%s{6Vy3=SXT&$>Eh&cC!3nsMWFA>4`1qBi_ZC4r%euO@ zhD2waaK23^DE3xt*N_FXu*~KQPDy*XSTkdtfOm~`4uH}*YcVKhtLn6lY*JL8*N|HZ zm2*~(F20;neszIe(v+?(@O#owg!rPnHLcCqdUO-hEgf;EWc0cnnNW@&sQhd;V7O_6QJ=)U|u#HoL)a9jW(?%;jJ|(z|OIW{-5)>g@V4TXxeOSC;2wVzUTTZb}q>VS9 z%kh`p%1Wh!a-K*iI_-?i9v=qE0R@61OYmK)Nv7SbP~} z1#QzT=e1LHe3*B!kw6dD7U9g+RY{rT`zp;Mh+hpwkU=Q^dyeE|2)|@QO{CeriB*X8 zGU*2uP5KKr@P#pa^)O(Xc@S#AuEm=GZkv!cYtz;cz%O9S&-a@dVbjG@+|>{hf5dJ|h(&@u^)XpX-!Q_)@9O6zNtQ47MBSO)4PfF3`wmV>-@xE6(V8 z6ex3SV0S5AE=*39!b;o^AuWvYS07k63qO#=TfL=L8ExMvK{mU|Knj zcO~B7a293C)l>+}z=qs)qvPca7{#uYNSfqGr)QKImx657iMaiEh zrDwVcnb&p~UvN(>*`Y@R8`>^{nLpC)2j^FY5ye=5CLj6_~)$lcVV_S~ytqzoabHOQ6 zB%%#}X@&*fI@;ibKq3iu$>8!^H#@ry@r)TSL(IM2%+6D6Ba*@e$N~x~b5HZx^+vv^ zOTspgxE~m@qCNJA21sMpEH_B;voP73$@-k5nrnqQ;6Vo zc{roVWs*9=OWz?IKAk;9n<{axac2+E?-_h!RYQ5eijB0+_(lQx7iInsR(Ar#Jtn9=nCd1dlJwT9w zR8?Y3_WR&o(6Er_J=?^7#1};of&6KSn*hq$p97bED;=me)G&p11rA>^NO12eBn56199IW__ZCh*`wPE-wtf)EWR7IS^n+3GG zb7h=f;8+asJ6UAXLP3@Rg?OMo;*gAsKV`qyN(3~Z9fsubTz*G$e4}ECjIMDd3GQ*w z7I@<})9fH614fj|n2fw!>SAa$x-599m+K4Xc_B+tAEV_!Ng{xZ$An7!4b5$6&qwK00;<604jV`SGN+mkjr ztxoeOy%p1!2{lR}T3+qUrmVph6|+j{Y8tC-nsZd~UF!y983qLsc|qUT7F&-%;gd&G zauAt16Tg!=MrRm~@?caIl1Iovbu8loTtWwgP($v@}w zj3pO}2KWCr@3;PpwOVs5`EF9M^iz%BaxVLHX93a9(__Y-j?ku`Nq@L`LX*$l~h7ctRX|d z)L?Xl%!wzF7OOwoO|eT(PTZ~I;C_0-}M#WurBLC~mLAi7YCX+We>yj1?+ zly>AeP}MacVd{WvMg~IHZ=_A=#mKrggMr?& zl^U>^LAdtrG-m0ub>xsWWT0F@Z8l$`V6cxd$reh98Ix2f{$gg5#U= ziowI1an*G|qbD4M(J#*G4c%;$aB5Bslc!O{>FE8d4t2}^X;_JPl`GYNx#pXhq(Z5g z0MC!g@cZ4o`cA9uDTe9Zuv_nz$k542?C0{Drd^@^cqO|?98ei_8l?M#QG+sioGu}c z%?zmTJ5M*CmT00VGUuyc8(CRIJpzDaGed!diAHgA>`2_13N>sLrBtE0q17+?{045Z zAdAp??TELg)KFrO(i6F$&VR)^2R~qK75;Wa?+)!srSV2${%M5cE%cAZ3wfGeXj_zk z-GWxEDPRVGj#87z56lwc=vj1G&-~lPI)7@z=La26B}THr`B|z8ogRYDQBLJEz1_da zj{J{ae?tTgZGuN7GdtZi6t%9xq#Dy@p^;aclTK4>wO6yDz7TA^QbAFH$+e-_vT2Fr zgaqxVSzA5%mlS~%qIS#iE8<{>=y_Y6)mNgD@Q;c0@?1W>QbwhBsmw@jezmPgMGZ46 zEaRgyxU)Qb4+3&wDJ{2rWy%0l3ZSfReuGp&TEq!EQboq_oGNt!f?o8F>nu(8PIY{P zwIoQBa^7I=v%MA>@W10jtL2JIi3H3W5)B5)P|9SuaDYD)(Yfn6KXmT0;Ly2yj%jl8 zg=f6s?DEc0#+BU^X5Jr~$?Wk_cxBHlhr6-%E(%Q_5BiUdG0lmb$F&vq86BHMO1-KD z3cvGkGz|}JCs-k5@&X||vcgFbivxv|kIs%9q!;vn>tW-y2^|z$9|{u0STfdTlcnLb z=j!}!{#UoJ=6;oiRiMb2phYj7EI7vz^YLQnI=-tmIZ!VCgU_}r)1bUmI{~2}K#0T1 z^!Rw!XOfvoQ+hxSr=j@J=IA>bE6Ks;hX$~O?BQM6w0;8j4d zA;wZ<6jwJ3f`d|OiEnHEoBBjvi!pFWJd{_hcZA;SCEn#}2PrIZ#2XY*Xav z++i2D_|LrFXx#6&eU3`E3jo3b1NFs~(FKtdEsQ*M^Lc|2oo`VB{wv2lp2fA5xP06V zD9xqX{3bmT)W9G*Dn6tCP2%76X|0!T2)U;gjv!te5-^$gvM~8tWSkh#@8MFyfLNJV zg5y}ZmJt<{D9A+y-j_VmbO0V9KUao;t9K^tSdH?x!R;T+2>9$r&Vonke6`moBx zX4#7-v=&U7C5);(qLoc!^Z#lxtWgZdM=xi0jt>6hyq%pd!O{U?#wkUcd_>{1T=;&d z%CMwjUPp3M#b)f?p;#vR3#ZrV>I~xh*Mw=mUuYCXg8Fd8i&jJZ1i)Lj9TtMZ^AIU3_*nlE_3}X zvvoZc|6hsC{hY5l8fluTGHzPBD3?=uIUkifZ%*hRgndNY)dOk)#-g#L!!4RYO@P;J zlRCyN(U9u8>*Dn{c1?$|2&Jy1D7I1rPgJbQr7}o7ToxshMByP=X?$4CP#G~6mT%Cl zvt#;+de=pHA8Fqmo**-Y)Ilw1ht?=@k}>9gOz*gqw@O2X*1D6fc215m0z)^+_SAw8xJ_)Qr;$z?|=NUwHc}J$KTHc;|~^vPBLV8`1Tb36+y? zcm*n1WseEooqw)X!zAd4)beF`YYmVNHS&_ec&m!R{x~xGX|A$Ziy01TK>z@klpAMT zQm4V1yZnZ%U}qD9uRIl#x^qZXBu=e;7_Ez@y|2~o^dTmHEcu($orwrblwW1|&oQD1 z1}qfVU=+Z}9x3i^XtdgK(Sb;AY4$xOJ8#J;?Y#SJ2A~qzb9BlNpMgOYoYMFrL*qEy zWbtv5De)vgnXR}YB4o0%6+b@4h_{khA=C_$o5PYZ%P@SkIPK3xNb>ocllAn9@8v`i zdAqnPOdLb(w#p(CES>ZN{U2B2DRG*MeN618Xw(vW$V=!dNyNyUOZk0pIKmJC7r|^! z3Mm4}6`#%*vw|rR9(avrkdz3#-I7tl<|p>*3{Ir3y5ycU`BIHO=5GoQ?e%!&jDeN1 zPS4Tf3V4EDY;@jUvZ%H%BF6U2qI92cu9D6Zk*-K|7SLjzAFyEQvUrxy4#{F;Mq_5V z9-^=`NXG)a;$_p>B!Jp>v}|QXvg3OgZfwIu07O3ZInoTGjt_d4BDpS~3rl03us$~4 z9)ZeP0DKD8?QY2j2p#9yo}SQwTdC~qdF$#Fp7*`}iQ9d`NzRQmQgHu@r+96@3_FCi ziJRoM=qL-ANu)WpLq%D`Y$C<6J1Ak41!kJSXNZ>2?1%%-O8BpxrY4Jw2nsE0>+2jb zi2xLRF6g6nzo%X1WFtu~qd3H^e*5OF!^s*M>m#(?v+&aJaaJbyvqPB=F@Z?fVijcl zlVp`qJI+_Wu%Wb=(b6N^k|jRn&_!bQsYz~OgeB1O9aI`Y@-CA`VhqK5e8OD*UojPa zi-Tsmn=BOure9ujSgxGlyG?0c*n^?v%r(=z3kj$xTv=3jV2AkHdO#9S z-KRwwRu9sYipAO=wyVV_s$J?X2t$ejIkyoA#V=_@C+v+6ciN3JJ<1yR>IIE*%B{RB z>`b(d;RYR`EA&JV(uJv#IG0DawP5rH5-K3KJ|B!~*sRat^9IE_iwbn!7OH@d*L|2h z?;-ciA0enVK)}nwJiZp%AqYwT6bWfoL89nsxj!7kr z3bXe^*dvxK#=czzimRwJo69Uycq*%$%GY?Xm=Xur4(BIfkt;P%!B{74=rJfk!Y~qT z)%#XBq<+!fWfUQW3{sYzcLjjU%D1#6vMNWv1a6>L9yP2q6i6L!h4Ct5av35A1Ve*o zRc|HM8}U=xQh?&N#O7iFeM)-0|4(8$Opj%T3YC+cdEQfa=bB{WzBAq-Lk*Y*rmQQ)SoZ%tbo?>49^sKE{IAu-_*V3G|fA%6N9Hx8oY6iX)b=5+-X2 zdz%)sF}w&d!j%P4X7S5Q<@u_Q;l6~T7M%3I=NWHdS;Ubf8b5H55Gx`t6Q(mluL64-F z3^M#;PQ%YXD`F$6RYv>TD8{rE+kI@pc8=-tBf}g?0(Q7A89$9#(8Gnhi>)^kM{QA# zPK0Hr$sTV1lOb|(Ln0gS3a5~)hz~Dh+P4zWhV*qg=HAILU*c!Z5+laKBLpzkhe5W7R3GK)W{g!$jY?e#CcQ9DoSkz`xeM6 zNk71hHqzps!-6|q>xw~BF`q|4?a>!6Nm3<7FaH65Ao)e@Ce{k{lw9@k$ zB0ZDKKZPN_>d!8-PANCQlkxbVv@bhowxZ+9oy97F1S%B8T{1fAs z{Bs|$qSju;nD>xL&OX_08gBoCPo-F1Hx47HY-54%M7XfNQpDt6=ezQlMBR;VeVWgq zha!~Bn^LE##i%0YCC%h%ru3V9!F%s9*9tyIV>XV{HleLGNOH|sk1lfEHrlIGB8jka z*nu(#>qo$_7-y^ZU4QsNh!)mfq=(lPpEK#ZFF{1x?Z7SdAxH3$rnZI{YSbc-| zyxdT45RnV-y$(ujN>u@ELPm_FbS}L~<>UxWm$9ZS@Ho2FcTtO9)g_)HiHd7^L4taB zyTq3g5nRGT!G)>M+~jjutC@2zMDm-*lM_2isKW># zAFa2Al%j~C!1Uvzv>QwZ6f>$5t|e6R0U_p$V{6jlcd0e~wR}`bECBG~^=O@mlafbe z2x4H?e@dC)0HO0RJDRs!`-6FQP_n_hV)A3x5;7{G2_2;@QB=*+D$}wl?hN79V8_-m zZzLg>H~wmKabM!K)o{?G4Cfa+<;txvw4OVoY0&>ji64oX7s~U+Dk-guSp;|kJ>iyn zh)7CQGSp?Sy)o-U+O0nh_c_}P$AmEKLyV|!axL;c8Cq6y_oWUU+}(%cn5hAyIQxNF z1xcnsgbu`!JPMrP%mlWx>#Dw)9H(d+h0RqDHuh6Tv=``H{~cyb$v%YF|Bbc@xI8T!vrlgcGo1zEkG|v; zgKhys2&7=Wnv^T5SQ@^-DqMAvJ9O~lpMPiLBW%#E&_3#($FEM|vLc#oXo$p1OHUwv zPr%DJ8}HAQe*Y*zeHk9nh!=gDO_p2)?3quZmf!jkaOMFZ;g0aY-qZ9vsWaA`)njmdkmT~2xq$3x{PiE&#-Qe*^DNc?ADx4ID z`alYmg&-QT0E;aI37H?~6z~|Cw?xaf4^C6bTB?PISeqJ=FB%rX++NW256AM71VcYk z7syiV;DHuJJ3@|Qim3Y`I|Pr{7y%8;HO-GuQ$Y=M;fOkJf_s5~R+M9fU-Y4o6N$PC zK#^FL@)4DSNE=H|#=@ZuwkQaNy@~a%5NQlwpXu(ijG(gAV-{%}#?GKg>m# zg{0?_kX@%i%SI+ko+u-HQ*27EZ=R@h?5G?Sj7cy3@X+fI(3_*`qAmL$jqJbTME~qQ z=XOChERirN`VBZjQ#2x05jm{HtW33^U`Rdi<|Ib!%za<|fpwrqkY+fRg(j3HmRg0=c+ zB^5}eIZ*ZeZq2bx-mt#2yrTxq7R?|)1W zo)HO*^^Cm&WN^J0Y9LG%uRXgbmn}p4MpT&(S~&R4H5>_vc?k=FYl|nHspxC6GSv%Y zePUFJT0zip1vMLdE{(c*nu{`Le9;GJ2r%8WR;^4K$v3E5OBAiM)Bte=uk*P3qMeDN zo?8c!qAL4YN@vCUeR8yV+FNoFTka3yMnj8o7WJe4ycq~CG~PU;M&o<&I!eV)ial1A z7Rc3oCi-MP!`p@-QakL^v^7gZ;^_%!;);lao}Q#G=s_Zy1U+vx=@S>{Ucnvx{)nRI zZ}n~<{nGh!7cqCI4`sr)L#HQLUy(eDn86y|JZE(7}+ASjB=ij}JJ$19TU zwG%85goeOxq_KwpykYS07V;};kP20j4P8!A6N}6FX<;BN=x6TYqN80Y-U85z)dbVs zk}qwTznv!01W=dmp6HwLF}TR6pu^@C0qZluR(G_-q+97QTid|`pP-QC(+RNfo zkrrd`s&Ya2ixav=0$@WvqU~dhC;%3zh$k)P%NI(vaO|J;1!(GorT=w7V04s0Ci$>C7=j@qi;5OcCb z&8F_;1$F}*ThKZbSmJt!_@Ag~W>S=V801u=`4tR4O$ze36auZ2xpCZd{YG^OVP%Ee zBC~p={jbZBU}{6h#ZX+xg0)~)!EJI=0hol zy`~ZhJ&qNz#z%{2f+|y(J{++pOxe@aM#AqtWYfbs$u2~slAI-LLp6w;cTzi1_;BT+ zAkaYx&Y>uI3u;A-4H)klFg;<=ZQC5RLR9k1dxp=W&5tZ~$@yy&P+hP|cMm_l!b}Zw z24-QZOb;-ISkHTm)+GuYA{MMt0LmL{#H37rS-sPEI#WpnhzQ>r7P}?%@l|XJ(kVcS z2+Sx_?FbQViPE&3C2GAgA>9%j0dERhBMpD!1Q6d!k{<=q8F!sJ8&|q5IZAQrRdRfu>ujGg?ZKi0&;By)ud;#yJeNc24Vjj zb7X)MSYH=k&-+y7BV;Bi!&|F!P$E2hj**!(*m@cO=H%CDwxVjpwqb~7Ht^2)1k(&!W@@n$f#F<$#S*QC<2xDwc zy2>sl8ok^1>hFN;V_pd|ywBiS|m5_uP<5bL!oeEJSj#Vd^84(45lV z74vl%l4qxl?t_U$`v?s)y-G#I)HO@YV5bx4Pr{C*v>1YG^?DWi4vR`m%wNHbag94i zOXh#}nX97kJU5d)csugz)aO!-oU9tqUwEuFxS0&j-H#tl6k~E(LEQGhr{+75!9)>Y zOwEkZmTO$h^1Kg$#Qa-W%O{SBk`x~}+tEa{yb|T4rTidkREMi@eG=Gs0ZDG`=wW4B2&0D@Gp*Izdo_*el7zIU?xCpQSGKDtWLIA0_^RG`C0+LA0sro@FFr zMzYy58T)RC0T&4AN-od%h)W(hW*gHO6i9T)8}x;ITo3`5$<#Dzx_zN{L72b{P04iy zGB^1sn#pxlV}K@fDbaKdCZ2M1dq_GymAW0PJv@|T|xr4IBE+`Ngq*=76(&&2hJE=5r8K{_*_mpH( zNTH{Uue~~IzWZhxF4?>v+9yo&ZAkObgxb6bFOx9Xyggvf6~?K$HGI^o{_H6T$#l2O)HkMb0D<;xwvzha?Vi$Spb?m?mOx!g`@#VV6;AHCpYmQ5se~ zQ3&-tGlrN>Ae)9*a znk!Iela6fAs7lsdyJvIYI~G8MR$O9Ga#mF%o+ZIa)tSZ;SVZO$K@L4z#d^@!-T>p3 zQnQ9ie(Y~Xu5BYYCGuxn2}!Nm-fZJ7H_Fqx35LA*Lg`s2&ZoM1zHuKJ1};IeNaze8 zgiobM`9tLFQMI~TI8&SlPX-iEwxvvBuMtAHaWFj8-BW|ha>)YU069Vn zpcX%UzWXiR9-q-7qyc=F(eNa%SBphD>6ULcuB7p3ty$MymcmE2&X@ zI3YUaBXqMDbd5FfFGx^R7tZoQn3(gwP z`7CP94rhJ`aysI1(NV@*$9OKLrum-b(tWdXt^#hAs7mqx;g?^UTNW2s5^w91fuc4u zC8~Z(W)th1@3hR^?1kurf4YjAF;8JW2vb6V4S3 z7DINBY{*($hXbJoPg;rSIL^OfQQH1D2YG4qiAmvUN1-UI zQaH*ZdsMreiMx8TiFEM@KUgNq0HNySXgavYQ$aJe`fBqf(_3x97Il zP792Tpn3-SZMjqAH?=cxaZTu(iN*qkmO&OAj@g2FB5)k-U*g$F^H<(_rS~W4!G~ESm+!eDyU?9q?_MH-zFt)d`YTtv1T<5 zC61Tm0xFjr;;a(w>W<~zjfX3%ytb-nXzgH?Sd zZx+dX!AZuN`oY%~_(WQ{urbIV$PHt6V)=X%AOXLyHFOEA#Xch=K28r98X}II6+vtz zxNzWl?9aU`%=efj5R5F2GUWP#iv}h0f9*!V`6HMDN``N#}ZDX3?ZTL>=fmu z>?FoZrnh#?MIXZOi$L)bKDoSGbgA_gW|m;ziG^zP1t!T9etu&#lH_stWWJ*}S9HV| zHb(dy%ZS|aaM1Q7LT0)R;08Iks`(FC(^|C46x{Jf&f2VF?OQy{Wfl<9DD-c*(4H3B z#GTQwD(rkCXYD*?U6;^3DCVX(K0}K3<Eh~m=NVc*s?^ar*3GMM35an z)Z9feEh_uT%!=mDgorho$@OY?!EsB2Dx!10xkRCi(hW@%UO)nwe7^2Mcb67W0T zPH}`+^~l;XFnkI2mE{M!)U)I0a`9}5LRH+ih8s-bB0xc4AG=4!kI-WZnb2okA|ps* zvsbY#M8ps&-TRY}Ix{;IK|%jAbxgQ)L}XW`&klvA4-B614J*+dDX4%^6}iM>Y|xQ4 z)+)wz$CfwcA^L;TI8FdrqsY5T;M)j3GUGg-Cb+DS@m;zI->``|S3)MR%a^W!>Oe4D z8<8Db=vIhGWU-Kd2Pm@k08s&DiGqT3U7JWOTae>HiWRk*0?`zCTXn; zp<`Rmjb~gau((AdB#oEh=_sx^cWDKL>mfrC#;iEEnQ(^-L)+|2kK4@ZjfaNHRGaY+ zm#^|^93f?Cj@sy6+i}fP5hXdR9S$N~7^ChRvpT@dTxx+Z{YVNe-CI+y9Y&Yd z`jLaEDwSldk_M%st4!@AodI008*7b-rFKN#WRu`GvTju|vp`srow-#mhT-w*a6xTFGoP>&649$d|>hSW_Q%wXRmIZ703wf zdclpj%I^Py(W(64)e>?uunCZ>8p?33y98dqR*jKKVDBW`Ft%kmfW|J77F8|3GO6p{ zx~N_|VJ^;KmhlO_$v_2V&5uxvd;y}1K!4wlO3`v=zOihP-m%r(jUd<26OLSy-QsB# z7WLQDENuBmyYdnlTo)ZYjIkZey3rbpRsdz+f;0WK3N#(pw3hG5I@^U& zq~85at3Y?%*&_)yl4YjWnQ^;fac$To;*uyRA}LS$A;sQ}*l=XI-~xt^4%h`(XJu(A z`;rS)B*rmW<~%~9XTUh@E_~fxDbCG$U3YVDlMqTO+n>N<-U#$I?dD>iB@BlaWt&re z=U(v@zB@|wMvu*%t|C5KD&p@K*Ypk_^wg$wYic18`~7_$e9i(9HEPkyICfEUB}9pM zw8|%b%3ncA1-+%jySwI*%7M`+ZsBhTnTZHZlE*?hMBWg3buF}xqUSfO zqrDIoNO)$uamr~UHLWA8lbUGOSWjeg4#tGE+&ie+1~peEJ_L_CT%B2olMeu)DP`I6 z+NOMXeb;&(<4A9eZ!s?i1b_MbK*bK8^t21#k<}JzYUGP`5Ve_Ezd9T-^o4=rM7v)n zT1o1zy6$1$?IkW!#UeyRgS-^KSYaD-ivwL1Gvvh_psH>^wi3}2mZL7_f-VV2Gn&!_TeszFmR_J|HCWw)JoqBT=A7%H zB2{0mcy2`Z_+T-$yBaA7!B8SbIhlT|{w&5#%O$W^%m@_C65y8*WuI6-%BllsuG124 z_qa2>ssO9vd3X6jQ<006DqW+jZR$C88tbVV{>00bZ8!Ijqnr^%SyKg?rbjy&>$Hzt z{_G;7j;FhraSF~&AQH{k_@U~>qnTY}U(#*i=lTO{tQXLEyivT`13+lvei-JxfrF1) zc!ptf?ToV1qpncA?rk7y61|2@wC4h`-gI- z^Erdk8G}2aQF9Lr`*3AUlA&g1_1xq&LB(SLg+|rT26W9cLLgkd8K(yZ)oP)$b3v<= zpvk0gIJE8+`uaCK? zdm1~0j0I;b3eCfXdY2%Km(G!tVcz*0X>_L;E`y<_a0!FlBn>wO*CGUvxt{b;(xUpP z$akf*>5BiaWIP6S%9gSFpNgDsEvvwl;6z&h;Q7^Di;*G4MT}m+i;Cre>I)FBZZs`Z zH^yF11zGQcX|75g)zSvKR@+)S9jyVtq|$Vih3dX6Lp$WqL4{E8Fn`c67^yt2BLtJ~ zI&&GUdkd`#=ob!{V{~iHFn93PucdOdbcZ1C>NTLnML3jB0+LF|)`eYe9J1k4Y6NSz z=&Xy8p=0yHTg*%<*_?yzXr?GRXbFLSfdKosK;Dta3RR(qL@?sr+HgEH^fLXE3ON)( z{AFV2&sH8>caRp8iL+e;hs7HiW{qY^lQ>AmwH}Pm+T6*JSq4m@xNH&eF71l#--ZdR zHwt1uS`H6bwkx-J#^Xik9SDk8YvDAlZvnj?#f!sP(VyH|UwLh^?KZ+Nfm5Ub34({;>n@9E zG?r5$_DTySbmSVhA>yhJNSgImhSwE3mMeOG@WNXqdv~L8j*VoEK*>VXz!ZQ80ZE1c z!qYn#sjEWmF3dJ{f>p%LT73alb`z>}K@@Uu5Smc&sHB2cG(VRazv>F8&Y3xxDld3T zM^zV%Nl0mgVFB9Cs30!T%u3P$*h>7-*4ln=f87WnMNC!fRNn_@xZ`2HR5iQFZZa8T z1ayT`52MsW&9nhf0u}-s6FldTpasKB(Bc7%Cov1NEX=|zXu&K?!z>H4U_5DI7G_zR zC4m5aiJF#(_>71Pc<~a=7&URdlhG0_5mOQG_S+BB^OO(y(fE+D50Sg!RP|lE<9_sh zyGuv+dp$=-Pd0Qj=4NCzJZwYU-NRiwR83uO5Dohb&k!{kRo{C5uk-)%F&`V>@$d0( z@$c^sPk(=(zu#@`-}2Yq)7RLS{dv97+|KoU(bpTXwO#cVar@PM@10FuPd)w8xv^A7 zN7wH=_1(Vnw?6!~@8kZx_jYc+`?l}==455$X5DkXTh_fjTW;nB%jeBi?>^V6hP&&kj2=Q~T35FYm4$@9s=9@7hm~y(NxzZ`UmF)8obS z1=g&b*`!^Y+TGdVecJgohV1Bu-nY$Y{L7u5+q(U>-KUP4t)`7<+l^h@t#w+mrGLAA z|C(nv8@B)PF|%EJqvtX=n`~#yy^DXn*?nr}f2Y=I-K+|0$&RIo@4Qp3cBg#rkm zN>CSpC;p5aaL5V?4pJ{bAP65Esc;Y<3}`xa(49vSb2-cDRNuFrE!olZ>ZtY4r&o#} zq!#*goOtD5Qn)gia?(2QrN+vfeA0Q&)bK+-6B2N>p@FkJJ*T$R&l|0W`O|W2uWi$AF~YRe9dlBptu5Xg^Q+@>df9`} zrAIAAyjC(gnD1;z$Oy=W2#8YwQPJDs<3eJP@Yo21MWG!gDjsA!`D{=m5ZJ)LpfI7E zIVp2C@mZSakz4y8+qG8NZ)qk>tJ}%jc|G^kD78DPM9M+@ub0+73Hj-i09}^G_ z5mq7tf`Yc3QVw^wr5QrX z*ZEq`GTu!Jaq@jKS1R>qJM(!b)59sBe)LVI3>EjExk`$HjKr}ZkZ?pGYXi!FAb|iW z!hr&T8h|2M3?39jARH*d@xW9>IwlZI9Hr6mK#8E`VR2-DJkJAB4{LEiCLRqQG!}Tc z1Ue>*V@05XYa&QNC>$MJjKQn3;XZnchH? zntyosc)xj_iTV>o=SM#^M(6K;%x|FaHs-!nTV0s8)wR`yIa_^IS9MiaT^Ms!3psJ* z19V^anFS*PX;($tf1U*>2;}#Yrj@1li6L>?dHJqPy=0WG;iZ+UPS2!z`1; zEX=|z%)%_pLQ$|Vm8G&&7PP1=OT#QpvosB}ERl|hg$jm+3LaBoDi$glXkme=Fw24# zPhyzL;u5K0f*+3p5HcwF92ycZARrq4AR#a=7;vh<6BdOA2@MKl>;(-M>eA`tJPAFx zZhWlnj<_1Vy7P&)FSR{>4=?2~JI-sflZa3{j`(~noujCqr7#hb%53!nP%tEkhQ$G9 z;-LZILW5!>LMz~L!C}$?fsF_Rfyl&1eoRb=RG?U-OWY@5NK~LmNNB{#T%Glk{~UVq zpC@7BTJ_6RIy%$5rK=b-rL-JHs+NyB?8cYN97d1t=l*(#S3njIp2K8912?!JaT^o{ z9ohoJgaUfdfU*so4hjqrGB8p{E$QyK0NA39@m2bjn%)Jn)UX8t%fdYICj;3FSiSDJnA&$R(;y1fPm+LP&gzR6b%%iP?ie7NxKlMI_Ke4RZlvo?b5HKyuF|0kNA0%YqvRWk2{!L z{kvgH#ay_REi~{zp&}3(5=O;EOsu#vuO9#pc$0}rSx6E6@Nn~QQE*Jq;bH$~`>A$r z->0gFh$NToD;C^+VVIFU{aE`x4R_H{^$hjV@B0CG0MQr5ySekWg1VGI)+mrq-FbD3ND6x|emPcxAn&(pZcM(;n|Rd3vn_fBsO z71eN0|Mz&Cb=&*wKa`?OjA|5(>M|Go3Jc@LZPyoE{j?9|KCZu&9( z4>6y48|8D|yi@%K?I^j(tp9N{TH2cijNzd3X|mjz0RTV%000001CjtN42*~4;&?nB zCt*w(Qq=HKrJtp=>ED z(y6R6L0fJ24(SnTID0Picrr5YcH%gYTE~Q>9?aSo7jL7YOM%<{qpeX48bz`l$J&3533aK}iJ&g*Da$8Iasf*D?}t&rg3U z#X@Or3hIDlmZQz|jOK2qv`>mrDV4LW3Fkxtd17$N(W;*lx(}BgqsXz$+z`NWS_MS# z#NWi3mL>q5Kns}v6FLY;C9F90wFXJxmrDBey|36kV@K!* zo<)>ErK{Y|Obk@HO) zLU}izCvfNyL0Kw^pVoOQ`Lw(SEN#zTVeA`C zL)d_*7lxgjv?~0(r7cjQ2qMyS1ZcRk0rse)<=!0Spq^83!c5QQ*g-DKKA@t^W25{w|Af@T3-%F+PRCI*WqD8oqVGYsRt za~WgzEOgd+!q4D52Z>3*FlC<{BRS=CWz|Rhjj2Q_g z7zuOQ8QjS}X_F$a3}Xjdm!y1gKz!6jKPO{O)>9Fq~ibyf5o=@4jU?)jd%=95aNdRBtLRx$F7{W=-`H=HNjwK_)&JfC#SD%oQ z9y0D7^D8&4#qlS&GrA4Erm=1^G?}iiF*MC~k;J#O5}t9B6$IJPz`^6A5!7XYrN+)w zFy9+8ZK`ern(hrODR?e*X}i|v(s2od@-obZq&%lHd6aL$E z+OTRKYf(;}!ut6{dprq{cUs|!_P`+*FE9{S5~CVrldF1TH2q~9Vg;Y~J5%L!p4y#$ za(X0Fp`1oo>J7s3nI?8;3J;HVvU zf6#xTr3;CjyD-Zx2p;VdBM=6@e%nxUGAu!KYka87VKy`*_Lg;PoG)fY;^}}{@je-y)Cf0nuEQM3rRt%#wVj0<4bX`&7Dl}<6{oUxYQH3muH6t&tq-2(wQRQK*c+FcT|-j zNas^ik0^0wZ;Ut~6w^>wMo%>X>yg5^p2#syk3ok3JmCP7b0!$_ou&a`RfPZ>8D9d| zzCUs#WzHr53H{XacbcO?CS@L+87j_u!dhseOXmIgRXR6f00!`}UgS9$(5c1(3f>cl zGaS8PVAudcqA;n!)$#nJV9~q5RMv471UEz5a%A6Q%ZCo-DLzpa=xBR719;Z zM)>I_r_caAWT2UJ&Ic$#xwBTFeWt%RbT6l#PD3Cy-Z#)kHfryZ^U+i_Og~x)ICH&w zUxR`DTEi*>I#7;tg3B<^lar)#+BqXL%M;IK9SkT4lx=LTGg&ROaibe*fOEgY9?tr{ z%ZU@BEC5RXqwRN-o;1r#B9ng zPzh&!5A_56BXO8gl<%xywtkSna9ub>y-$i8s*v}DGXuaZJJ5+$mB=@o7+#7uJ6qn^ z1MC1qGtOrF6Qi#=>j4bU+0oJy0q3F*dk)JII+uZNC&;k$pv!DnJdWz8Qx8fIm`L9* zaTV9pKewfp;Vkhpd`wn~g+oOeo$?fd;!j&TC_0c=&mMJ9RmP~HtYkP9ey@*ii(y<0F}>T*u^jMuzT#vj&5&b!hDA0GAmeP!q(cIL7&ZRKZbJ&l=Fq&xvdo&fH1`MnmF@dM|?b?QM(TKMhB;7tr$=OB! zTmKbgI7R0iHbf#J_PoQ^2xj;_4W~677vmr@G6b%cA8FImwX%IW z+^r&|Ig95wadxiC(6aL(TRS#zW!VD2L#8H}LhrG)Jt8(ry>UK;8lWo_;dw&BNk|T` zLBZWV!`yiW4#~3oaXRlL=`lZn&={mxQg>RW5LQD)HOM!08R{#Olfugbzaza1(7CK{db$N^*F)^kHK}v! zDP%`Hp|}0@Zjjq*Lkxl;yU>+gxDNC=%UUdJ&;^9GC!=PGmb!BkEkCl1e4~sFY@`kC zbB4l9&#t|WW9W?>QpOh-USVSJJ_KiT6sLt&WYS@5n>A|iehQN?Ugr4bc%Gl!h|_!^ zB}@V&`2ohr(vkLh-tw^xe$-R<#a_Ubb$sE(5ZS$(8l8wu>rFw;c)eApozKW`~qAt|ocde2T2$z&gfo3X~t;q<#es?)}5ea_VDzHkHkc+X$u0=HBlrMTOUEF8{n6PfMkxxh+mnB zflSng#b53V9Je{!qJ2hd1eMd2hmkY!Q$96JY4G&Nxqj;bMlVwX`I@6d;&;ftJr z{9$H4ohG9Fgc5nk1!Ufv-&z&N_^?UIFx0z*V(1j9E$ZDnANK1Ukb0LSj6O-BQtz5% zy&nE^S(wt)yL`$FS!edCcMUDnosx7(jCJW@uBbDniYiys+f3<%<3p=fNU_rn($zoYMogJyDp*P7%7 zM*?%S5_Rn3K+2F~xK!SQFr_Pt?@_c<;N7Ke=^WA&yM1zz7~lXjG3g*g#UH=<8jlGA z0-luW&fCgKiQf)5Zxl?4!-Z`c8cbK1Rtj7+7-CP(75dK9cf+)qS>u%wiZO$%sFQU& z*~*3=ysDCq)|o)6glcdO6b^_g;>NHC`^5+ePl zqlA)bpm$1O2PY_}Dzd1{HVx|8%CHQjh~5eBdQ`d_Y>}XyvLPISsTY z70jtig-HJESfL~v=$#eVXz|cLt1!;pDqO1H&J{!gBS7?_BbgJsqXgWv11}$*lNjgj zBrer&rxGGn20VFJiM~Zw ze|FOx(+i?w1B=*f|E=j~M zIy|E*@H=(5V+u}c36ZAS2pXW9L!Pbs@nlo%nU3I4@B12 zK)XTVR_@Sd#TX*0-I=k%ZQY?Gu#Fwx9aWUhhg+=%b?U&IX!GlU;s}ieY=!}%OJc`> z??uUG#wPV5)rGg~j?v+5&J8&_K@N>ip?_PS6Dwqj-@yrb)aeEZHpzk>{tjNdLL~1y zSQTf7T!O_a4|d^X^astuOqjqSe+N5!>*J}jsVH*4gISEW4&Ickli$HXig@S)*YDsM zWfT!a#XKI&WnTv`giDL0PsV8V|JMWZ{cIo@8I8% z+Ldg8Op+J;Haa_krhRZFZ-caW9y;{dPS}L)fg`lw@gZ%LNYeNNf70Mq62`D%`#3Wg z>DmJ6Nr91Wsf47Eo*&uY!M%hpXX1z#4_4vW!SiAyQHUTLRsxgv)z)>v9yx?4+eZ0s zMY-=|TizZj*(Z+&b3);n>MCjUxqB94+eW~rd3)?Gwn>ql5Dw?Tw>Du0<3%2~hZuhTIF7wM-@a9jv5>B@Z5s7IVsO@o9R& z1ft)Cl{|R4Q1rX@`~7Y$qxqz!U6+;tYdjdNY+4_aW)s9e0OMa3OwYQ^p6EGo;@mU zJ`Beh2fenQ!0*@Etar4pc$;RVh4OT}!t)tP!Z>49VqO~e zfS#$IZ3}p=CrAa>>WYGAMHsq)VbbU1rNuFE8WXi`g;g_nyNqLM(bpX2FMo?n;A+l6Gg$mGD9}#6s@F&ZdB<)3E_dqu}mnv8^_k0AJ@38!5e`WlYJ#~&oG?42n}k- zh>Q$c08Xh-Odn*$dL{7OUDP-h7a=Gl89~k+r5UhFwR^PdWZWt5F=Ltcz9;0gGt4)h zUUq8`DsiwZ(y$PicvlPCjmm;v{_W!Nrd*}X-pS?s+++453$oV!QR#9}z|dR&j1i>O z-DW-s_V$ss2@OJgJv_b4PmCv}!WZ6bpqsE$CU2B)%=JWtA$YB~)?BG@q>*z)WF_I& zM$+IkCRYsXpj$&=i(|>8uHfY(l2kimOaPRA!6We130MFP7x`6@t5z9r&LfVYGlGsP z(IeRRD9DT`a(pMB?Q1}ET_e)yOkHLymEvRzEVc;zz3|u{b1fba&6x{%DWSV@TO=C> zlu8gVK04=R61wB8V5Y@H_c;W*q+!J>Z<2g z%yf;b-3)X4?#Bo(lvwVvO6sD826>_g_|um}@?CIN{WQnNHilin*~ziff0!?vx-3H9 zKJ*1wv)8zgBMVMX4=K3W=)BVhf7CPAO6q7iuxd-{oBp+8H^L#i0=y4tsMV(~e^tC0 zUA)MwHil%O-*f)TKQXygVPIc^j9WG`?AJju)xsb-yY=M~potGD?iN|C)PZK90w&qc zVN3nGAcwEQzDmqOcS1P3U5jEbLRLW<>Y5;%)!g4PI7+dJaSDfDf!zN3AI1bxTK46hNb5CWo7KS#OtNcdHj9fEScx$P>AB)|68ZZhUbP-|Zm>5~abXJa&a3l{~f zlM9Lj$z|f%KB$`-5VdcClnh9N*g5N~vmbDowrC*@9YWtXJ2D0Pi16@cJ#-Z>f$s*4L9=|0VOa5L!2^`pRM08N=J8)YnM7R>%yu4#d;ff{6a_eC1Yv zXPW7&cFjs+bB))MzRbCgD%-CA6*>1p=kpuod40`w?qxpdpu+vGz$1CmNtu9dU0!toeW3f;R`_gFL`OdIt70W`9iA53qT542kgp1RvyvTO zgSeFvBD?RXPKTPLx%T zLK`KNHJIu$mMN<%XNv$fbU_b@Oed=zi>(i6J?w9?#{327alD9#@j6-~yrq+ZLXTxc zab;b3Z^9=Te*Lzr<>^m&``Jirx6QiMgHIw{cAxEm&nLK03AdIzq2MhIx)PDDtl2*J zd=gFo#7JTANrJsNmaZR}WWd{VaV)t0Sl5qQBk-1RioVSgJ-bpE8Q?P#8RyOdJ|pJ4 zeRrjHmJ84j_PbJn!RwSSZtw~3UFP7J$jff^$k6A zb^sfMK(=vpMEkZCgYzRC7$8GYj<(Hd6~8FOwlNW2fBfZ9fBJMtqVX(zrq{ z83+{b90DdUI>B2srVY!v%7J^czII}N5YQxEw=imna)YW7M1gDW$J+WWg{!01Yz)Rm z(+e|Pirh#uXW?_jWP^aQv34P`mcAIU!xK~ZI@=Fi!lt;e{wM~YS8#_`r~$tiPS>H7w?Y5U6E~E zk2i8+DDxU@z0FP#f$XpF%o%&GfiV@On*~OKlDzoAb8ojLb41zA3P6Tgq-aEO&f2uw za@j+vOPV&&zwUDK?$AB-Vdke3L&$3GbTV(S0UK3^w0%&Tk<`iMD6Zg)yO9lygGf^8lDsh z>XbNwkG#)9%@tbiWhTO#V^~@x-EaE-Z1P%@mu6tR5aZJg@?4gM#%E&Opfxv~Yasw4 z?N5;08mqbSA_8#)ec|EF^BhK{IYI}b&GDAwAW?8&T7prqhqUozz_1(XZDeEx>3fu0 zH=6LqB!8Nx4M77%`FdNeLxld|Q5dE!1SAb|HwXu_1r0{G&`FH((9RC=i(roDW$~%G zb~x`o%@Szl^oz7V!l71pNxGoC>@xzJszuVyQ4SI?fvw<^S=<%_P>dA@Rfy5{gH4h+ADRBhc<9>3+x?qyKSM54#5@jj zON=C#NBMFeDI{v-9_Q^CtL=mxb`6*qgvkRoTg&^>+bot`8=2=jkBas?1#( zJfrpmkt{BRsdWxy>Kr!^s@&EuNITWY8|gqyX3F;Rsu)v^41i0up_?(Yk7Z*^oAU@v z_x=DIr5iiS*Itr~JqhJHgXE`vnZQ2kurS^ErClIZuINWEyd?J1_#5`l>dbDm1lo(# zoUvMhy;XN4khpav+r7?hS8D#(~Bb+}zNOK>LuYZJFs?@-&1K6_YNFvN6xG6N}+g%p3$hAIJOV$+hM{ zQfFi&99W>?Vz#hI7(PurU8%8P2y)9@_}pp4X9I5Q4U6k!h)Q>exffrhyhCU%RCPI8 znt0|1E+dL;9zVC_y7 z21Bj44JJB5zUo+NQ)`xNS1fnaOE}hn_7C+#7c;h3Fw7X5T29FYY{wEZ7RgN7E?)`I zLYX@drg%wIeyHpRLO-ezuhhdCOUW8=JlOX_H%D4Hc7*}&ZFtR8PwemEyGqL?quY_! z5P0+@07LQMCVMNum&p6Qh>d5l`?gk&HW`tl>;;h%i4xds_Xd!Z&By9vci0~T1-j^7 zl1XK^wuwx8VA8b7W`e)ngZLxbctFov4*G=!bR?V?ZGIMPqRngh>?uqh&dv#S=m}fv z;tUmzUHkH}_VW`VkGgaFV7r!4G(3dMat0?8m;w2gd6tICwgExFDYf3|{*9{=L^;n% zqutzlhi}N~;qu;i)F2Dpo^|x8H@2lU>XNX;9Ik=eQTn~yO)PFKho9MxT`+ee-%7FwEwc^R7z@5H~r$K=Qk6?k2-t0edOQ+KJHNy8$@QlQ? zs?>(}D+HED$#Vr~2rV2Zb)yiGw9)@rBX#aZ1yOezD%dK4dYHSq5A zH5pmqg3aWIWDB3yolH!vV1WMAq8nkL&A~3p^@$Y?iThXA-@>q>HtuSzwlN*dm=j`h zK>>pUOZW;fuG>Oo>7+QOQ4&v5*AM5;uQas`_TK$YY;TWwEqZV93NWW|0*7$W&v==y5QQO zUfsRbWS8lH?TtL?#vj5!2bW3Dz$i}FW>MAQy!EQl?0t(9acYxQ9UA5UeN$dZURKdd zQxM)SIIQ(c5Q5{(*tVOCUT_y%ND5hLvp5)>Srso+ce0~^ECyG-Wh{inpCe1XW6lP) z&0?@%4XDg>X?D=l#exk;@)m=GU}2)O9E4jIlR~2_8kg?pR88adlK6iZM;h&T0Qx$r zZB>d6Z47EP8q8}QQ4`+6fZ?IU95}vw+Ig3ogB-{}1el=5!i9lJ*ERCYeU~?S z(`|>%meGI_n+1=bzv6_6zFlG+u+zH!O=e64?s#U4MW-)V!y<;^=D1O|i;z1y9l`nVs1mHK22q&T-mDhea{9RKI~PW6oEPQR zTblHA1e*C(FI#Ah$@AjeDudN=n-$#Qa)8`_rl3gW;bA_wMHwMRdh)p+E0*j$; zse@v)2)K15kG?00XYtw1$P;eW5W+y6Gj(0BetBaRB#D@Z8sG#U0b1Bp9wPu1K-d75 zx{<9o4iOPd1B8mUjUu*)l*}TWNqE~IRaI3rA=&}o0oVarR2V|F=yJo6WymzP4w>d% z%~iaosB#t!VsUmW9!QRW{e%)!vo5ND|aC&LJXJ)G($<5fXUwQH4wv2-(yJo1S}aA}j1;3r5OYxymL_+~Ow@69oZ2dTwnhA;cqZZPV5Y9J<=Rq@ z=cJ%RxyB=eAV;X?HOcrTS<>RFA&)&PJKRZ?Yy29GXry=>KO{?#Tp5P+rfcOj<>aM~ zy5zc)QY(0@Q^b^r3x*qDZ!Vy_dXG;71q)=1tT@L|oS7IXZY9-5nj&aKb|Q%oBz%gU z#X38TWX+iTiR8`vI7h3Y5)~1U~3WzTfWWAD`#fa0%6e&L`%CPx!KrlC&i-5 z5Oi=L$t%~PVtz8pl?;+{NU81?3`;iik&T80OrSC|3&~kX&cYQz0-csHK617?R-y>I z#t^lIrflW?u`Iiwr*Nq|uZKv!VI8-QTgTnw-Y|C!6>O271|mUEddIQFU6~`qm?JWC z@Cd759W#XCC2Dy&5+i+Fb3=bnKi5zJ??6?7%7}QV#^>stGtr8!106~i42M^_h&MrT zF@z6v4|qkqOFIh1m<}pfuw0~64B>+cD5yLeDlDF&;_d1o-Y#lD1PBjEaG(JR7*H@D z90@`JK|aXqVp~Oqoq~u$Ecpz}G=g4Ah6E`Wnr1|tlX5lk9L@a`6P?eyEBN-_c$>It z{siBAa569-7$}$tuhm*G;J<4(Y<}~;Y4^z+LXE@_W_zh6X=v#ngOt$t*3kr+&)w_Q6kt6J(B zZ(oP}Ay@0HmRi;HO4d5~G?g~jbg2#}J^h~i%dc(T`FrNGQj~pSs->mo^Jq$|H55Wv z^d=%+PVnG(#QW~oYggT``*`=!C&1URV0Hd|b$=boJ~$cP{!N1p#0L(1!C>YrMRJzy z&)(m5Mn5fEtD~U8S{+g~ZI5Z++YWcZ@FLr=WH)Z@f?-E6t&<53K*#`r`sQJ;_Nos5 z;b!%+W5F917Ab^4wWr&`sfiRK0KrhmRodO7{|CZFx^2?&P*+ZqqOqLnf&nv)_TXDZ zO*KXR0szIpiN*5}QA7)!AG;VDBZ+yTJw;3jwIUjJNS%ZCQZSE8DYv+&N@aGiL{mJ) zv?my?v{oYpqjfMr)uJ{peBB7`fM)yOU#IVO%yNAJhZ%<;ITaGZQt%#I$Qo2~f=33i(;DZ-h>>c(txoBUkDJMU){^x@4S*x+SC&i9xtW6d_rPD1F7HUrY+Fde06n7p+>2 zmT0vd>iN_@q@gbGpHxp#6R9v4CU_uWLI@H-Fch}<{En?T8Kq9_I9hyi*iiAjEonQ- z5HUJY{N$dvZdRb6LM5eE9%7bx5%Y+Nh>DqsS0&G}l^aM#p5{1GVY%fNCxlPWP%B46 ziFlaIG(0wpxJP>ulHNx=QUy?fuOxbqK^Y;-xpKN=wwA2dIc!8_cSoAJn!Qm$g$|t( z=67=b5V<ee%3v@&lNO&Z z+m#4hmF<3YTc`W2t{UbhAWG_@p%)A`yb22o2BQT-;ue~U438aT@VWx3fV0b>%6C9K z#R{pZ=K-(?0|<%UxP$o-Wg!6o00IC6000vJ0mET{AT$~ZgMnZm3=D!!U{00U@}aY-+qra#fx#g_8?E5R2GZkiHNCd5Eq#L7Xzujz4lW^iZ}{ z9DKPHA%K?E$u@O9@E#9Gr-=F&BRT-6&cLk?VkVujCl|1wE*0j8gkoz@6}X`TvzN=@a~p84c&kL;RFvAWkY4dY2|;{;{oV*7E-^C)lky#UNlzZ)6Ec<@Qn7FMT$7f@wi-eH&@x`D z*sbMMMBo3K_&B@Ii(Kbfv0>5=zWN~W)5;jGd@4;Dm`}lKbvP`x5riYQXqiF?QWo}( zn>5%CWeW@#T%+@}thM~SEC{@g$7M$l8OwsO#Al#b+P7MW_)IeVY8WreDz)@}ln>L^ zC$MK)+4k#E39b)1XQ(w9EFrUOT_RwZc$|+Rr>^u2cWI&eM(9l#vkbH7RJwkF%UJ4V zDEr=|@DRU5vkbj^)@BiG?RiTZc3?L!JHBppCgdF&%4w**dNTV>37eRhW`lwSAqRoJ2v!WwVm>E2(?7;^UtJ-lFJOiC^bLD1jav^s4^e7l_g7S z0YyNMN75?c(z!;8DX|SS<6VbqfMQuN9+Nta7^edJ;(R4qI9CktF8yFov#xk&{Pa7` z4)e4CHbBY0n-OG>1_5?3*X-HvQ4V#uU1aR4+|T?<1)a(|UaxL1qZ7X6P}R&zRek=4 zsm~I6S!^FBFoU>f%;`FN9iI9cAM`MFG57wpwA>&~ZomvqK-5ajDT@*~D#`20>~6m; zp4L3YtrVkGd6ei!1&o`~0pp|4i<0PR2;@ARhe~_rR zMxcM>u@0sK`te-8ZX+ELx0QS@hk%?r*gz!cjIm=rMr~CJxF+PaB03w;UR8=30GcNj zN|l5r!C8r|u`r1{Lm$z>t}30mj?DeR*k5UnA$Fs|6{QLcW{sD(IQEZdD4+NZ7$z~O z#5{)A@em_Q!^u5g^GK*okBpst&lw0BsZ0RF0ar2^$PmJg9w*FWI6qx%qXNDiOh;~5 z8uHS^xwf$25Sg`cw`Eo&o>K~U#)sh(N3Q=|w7qb%0oV%=L2{(5)? zB!6g0OdEM{er+eEpc@lH@R56*epXuIV8~z$@BulxYu(SrlF@$E?8U5fK0a$NiIS0m zC4ZtgzFP(vb&>*sH&s(pF z3f)AJieFQ;>TzRAqi3VWZC7MPN4XMi(xM^AvvR?$_F=b~{(?tIZz3o&Yhjdec>GTGvpM+L&d7m>_G zh0B2?hYWU~BnN>^GZWK&0GUBx@fr*UB@CIMs%Ux?H0=QRtU~qQuIR}VMOvk6PW(|b;jz%2{8X0jWawnBa!(!=XGPr@l!ol52Xv2+o zKUq2IP)XgpK8d8Nv3OfX3aWuKqStaC+Kzb&=`KAO2>IL9WvzLodG6kE9p`>6pu+~} zLk6X#_W_qF!Ym2K$vbCicpNSRrsxJpgRHnrdloK@yblG)B*J+a+QtEXkfIPrgCRkR zI?`ac!`~Rm-VE=|a<`7xGJHGzjD%>;=L($Ny|Dzl#%K<0ND)ra445z)6QS!~qTE#t z*%l^ppQ?JKUa7DYJt4pnI2U#*y4JNOpk7F-?(A`%Qkc3^;1t-4b@O>HCdF#*DH0Hw zYR)65&aXFla2Tmr1{-eRj?_`|4%`Y;CJ$)52f!C2N(P3ULA>)>pcsn#B)KO`|6&Ah zOd(r6I;k-7xQB>bA^?d(iBONn7$0rYGQ5S1(!etfuJ;;c09{%8m0r7CH`l?c9+riy z!Lb$We!Gs>z!fYg1H7HTGy2uoYRBIGyBQM$o6>jr^Hh+;923&+FQh9!9hO6(Vi?K1SLBKTA#$F*lC;Wlt z^d9$nqU^_trkK2?^+6oCRI3WHAAnI_Hb@5)ulCi0se}ChVd9H*^(N+$HwoKs-FIaA zQ_P1=w!=i8#W%_%uZdbW+HHtaCL!@^HQxh%F+qbe{r zLFhvwDtVEeK!i_Z%Kb4Xk=$8He`RABw?r~{lf()Ch;_R0g z#WX*S?~kY&jtj$HNaE%cFo!8B#GXCM1lx{sBU_*$&fM0Fw9OZro)OrLsn9DAWo&ai zr|^EKHC{Tyqu9`N4n|zGfM_W7%r772rJk|M7I0W|F%;=%+Ln&Td&n99XbcAebO3AS zdS{Yuf9~5`wn*#cK2Q5iudg_QnE&a_n48{=gHP|PYq-o~RB_`nrSaG+=(=P&o$$Sm z22JC!n5XIjh{XHY+Oxp3Rb-L6lfm83{OPnf*P3UqT%0M}nUb92usOIB_ar6|z)y@v zW2{jiZmd=W_EGC&^Tcu?S36Tf|Ana?EimHkocbgFOA95A7SM?w| z=1cDlr~f^&f9%mc#+{5bLl9GMN;K8QDa;s5Y92Fze++JtWtrmgLXVMx>BU%z>h+i1 zSZPWzsK0zoH8=aqZX31aY3-5~SKEaRmvk|`^t8&N|z(@!@@RHBNjq5 z=zge?1xPjGdNyZB#wiKZhW{Pz88CuS)%w>;EHFSq3$`)l-sJ%MZW5K%De%z0=q=bnD@x_5{@hi18K-`csn3X?Fn%W9{KVYoM=s z`uhHAPYXN=B{t7N!yb|tg}@9S@@RIa6~K4zM;swTwL+Y@UPOV{{m4ZdY} zC+mjvJu*0{2lI(bpf~fylcSPNa!^T_^~qC{0dX8(NVq6e2zhdoOqsz$tb)M<7|tPx z*N7WF-zi*m$C@nxZ|in5&ng~kQa%Wa3IqX5DUF8cTfy(xVp6pW=E_Lr;1?YptZ>NX zk(3&_U86d(I*pZ2AWf)pSC<<07>7bu_UnYlL(ax#G=D-O7sQq{l*0lcce8S_G04Ic zP$^&LI=3IEaXLzrD3FKrM1VYkeUJ@`^6e|7@@L>WZzfIh?7B7|3M_v~UcJ#u+r|(> z3#0KS8K*bhqL=X5iMm_)dO&%HGzuSvfCWlF%A0oixlV%VmeMR5^futGKnn@TQ?q>2 zu4lkP=4S6vnBJhsy-&>zsy z5MVtX?M|)1Cz@wJw>n#4s1?aaI*3EoTjs^VV2yiLZE6@Z+dWKb3r>CbFGt@6S(%@C?42m2(?LH28_Tn8(qC=!=kn7thO@I&bl?mdtT&H5U1|srKxcAU6 zYU$i25&}LZq@eH{R2$H58IMc_oXfIj=&a#%iKKQ}e!I)l#^S1h+R)O?hx+Qy#&OJ6x>2zHwhP`@Cam$t*ZR1lMtJ z2y=^%%iCc8kWTuWdGls@ftM1{kAn3Z*5bWP_-H3aHjg!+hC4>zVp!*uoGN-Ti(UqOcIrfm=L z3WGGUw!!1U@YD5NN|KWnf!mJjM(f@@9Xfx84z@x2Mt*M>B%B}2R&1$*O%98X79}@nq^2{(!&uzyr)$1}-3+$|BBLlHF3WBC}5~z7ivCUp_dtpaX2lah;o$6?u(?Bzi%|L)8JDN76h;1hk zHU@}5S%xfV2v>8&iN#?GqD8&Af|w`d9GxkZnRhmPiq?U6OTY}77fwAFkdbmg7zgTX zPBmr0q+t^T+~CYBDRmRNVE<*fJ!sf(l}z`rpVY1TNsYo_xMFVxe0F|eX9liZojY)3 zt{lByhRZ*J)EmK_4QQj>A>$2_l0l=+!J+Hw??Y=HaY)YYfL`|qZxus+2ozs60PKB& zG8E>@0~S`#G=c{(jR(Chl$S=$`c*`QEJBEm_k5F1vZ|ZjN#)0<7+aT5bag(ssC5Uz zD-W3gxS)t2AR7k*77fi;8ss=r@MP23HuYjcn(U?3a6zh;w){y%s1ixpL*o3E=E)SY zQE|}7NHD9BKoqrw+Ihqs%P?x5ec^PJM5~Q6^~LROi0-xZkR7jr1Nm zMVK$3ouH}TbXHLDHUl>4aJk7&(eblvFHsp#jxY5=g@r=wpS4UuWkX?_%(>Fhpt7Yt zK-i=RNDRVbM(sDMj|#kNow2?}R}}^qgu3rA!kHQl%f<2h&j0}y)6NuFVIJd8rydff zOZ>t{BOd`dKp%lGCeE+%hnO92-aK>;O81b*x);g|DfW6lvMOs!bZW%1?#L4 z0K|Bdr63XExyW;Ah}>g)$izWTJeboK0U-359_Hf?#2^w@(c)zgYjH&XEJI}g7%I@XMZ6Q4Mcp@BkSgpaN_zQi7U!y@~f0P zr-;b?p)x!duqji3pE6Z5Go)OvJNMZ9vB>?%0dg_|ez#)$>SVv&Z;47&M_=~YC-eYT zi&&Fdx95}CoSl*)5>gdXCT36GyhA{2=993m*~#i=6QoV>U@Gk#gquWI6M_IBfft_B z6QTJ;iQ}ArsIOQLVTfX9 z{hh2%d|;VsbO;sdy(a!Y{-E!8qMtPywxTR}U-XxOny{a`3v(fMRq3yc<$LyNH&>n6 zwrhf%358$xj2(sM3MkRVYo_Hy(o{RGH@WI80|*zkQJJV7S+y^yh~RK-b`|&P?n8S- zlr~_~ifbd(=pVQMuw%Q2;Lgd*A@M>Ir93#hdcWu_G1&_MsSk$%jJ%>}VA&?0(>{22 z5s`7)%8m?{H`P>x98jA;kAUW*CNj7o06LF;iWq~F+Gr`6A8seI0XWwa6hxNwYQ@|gH)uL9<#4`Fn zqHfr^1+cd)r9=s)>YWj55&rR72p zjHdQ@3}SAwDEIU)VysI*Z&rk0`bJ({39*f)9h@xcwiS*MehQ}X z`$67u7ybxM;Qutm3HDO$+7h%+sz&hCRHWc)I2JbRirvgznQO|&2v(|*Va6em|BOO@<|1@UX$#lv@&Ol~#t<2ZO!h&bdzW0j0EuXjlP1&#T}|#S(h}ZLt?d z3d+EF+sBNvs0NKpvc8tvpch%aP{zC&6Qq#m9eNt}+mc2;U$^sd<9{}y*Pay5Xi{xq zPT09ZTDTZ4PYoWKQ~fTpdG}yyQ4M#bC-7_MEwgGK3A-7vZ!(jhK6XBg zv-6?+NWQ8H2}ee16B&vZ*|kb#JYo83bYS~!JXJscYu?q$~4-6YP4`{zll5L}8 z@;eFJ_K#>IAN@A9?(NK?HUtlzdUsSS<7Cv|qi^cp^=1A9vU%0u=k?Ia^Z^R8OA^A& zSc4Uaq5d3X>s7)BFza=}D(oksP~;2;PR)*tX3Z{Rq4!+92#$ILOt8eBziABAVXT18 zaIV%5ULb(p_2YhqdhvH(Yby!EBSycvEE^ z14UtU1}?#=tD2GB^S~f8Yk(OkfabQ2FV8U!er!M0TC9FrhyfXrsF5wf`+2@Wt7n=o zkmqh7M!wTm>TQ!(GD~B8N*Q2($RECs7?H0(s~xgB&S|f^13vIO0Js%2C@G?V6iuS( zp;28~8-y^7sUESrxQ6(p`f_z!;h}Jy7Lmw9Zulsse`WO(m?N1i#X@6)p5rA31a8Lf zx1!est=TScteU(#NG&$_l04g`i4LPBpFk>6yGaBxM>``3 zTMN%?C~{~D()vX`G%#PAdO1-{{T|g?)i6zEgNbKr+FI`jXvlM+1b`~THXdcKbm(*; z#*&S|Tm1dUbAY)3O0=G7Ued|||D#xa?Pc+Y_EAuGRarTtB}yOgI;(Y*k6v;+#ok#@ zKx9w+tp6?_kHmrxJB(vb^fA!VT;x*FhqYOk>HP3rq}*c-xTQ2Gpp~JbwW}d&03;SG zBSEymLUN6=#k;Ishvizv_AIo-U8S|NjxO-tr!()ED?2f_o}_3mT1rb>laActKB78iyHAEE@GEdP zgj-!mJshReANny01O-t6bl|CEC2w2LbNcF z#FvEJB!U>1%oRssJ+O+TXEP8hVg?$=83j_zk+hHkq24$kSUE;r&$}KZ%-Q#= ze-7@7fdUK^V4%R|wDvazz%KPfw%DzZ;;7WHEUn20pL4pA`N{Jl?a@<8iPZG;HFv(XY#$h2LNOCw1 z2PT!67)2e;1M`V=2MnODJffh>@W?sP1|B#_Sg;_1#K|kxr<792{@nl#_~b&Ie4cDD zIId;>h=OJ0o~2p#$Yog+kZpOb700rgFJtr2ChNG8su0aQN}$qm>d)JcQ-#0*1sHG$ zhFgthClF9$B)TglJwO;VI1&UTcp$+62p%X5I5cQ5beI2)G)QnD!Dc$k4HOR=I55CL z+A{O5H)U2Kr>bBp^JLN#@G{>bqkC}rK(at07)n!^C$l^m(#_-q_EQxu`=Ly{zxC@^ zHR{#u|1+y_I5*=V8Ch8XuP>$C$77<{$1{UChqLonc#&~RS(8{?&K z=er{q@~$)Jd*Oq2SFAgaivhv#Bws1!@#(G}n`{IF`kFZ&-)w9xP>p*#*VtKR*MI@q zd2H;mIzl?~Sk+A>-PF_(A{bg!)@mg*fEZ1S=fiGb6FZSZdAn=~FV%5E7n2x2r=MJ|s;HOpN4kW7HXlfw&#xMR9QQ zN(@V`NQW6_Tnm}l-ncieXWZ&|#xo|$bnITvxUd5@X$%AXCPH_KguK`v4#bos$sCR+ zM?qPKs$vev9H^$=jQ?ADZs`}Y^m^vxHSjBw5}*VMK?l0T7@8~)4=Mu_q?{zvl-S6_ zU>R2oQp^uW+AVxR#=)R68<(!NU@uUmn@^&Uf<&pA#>O%q&9@d#SavJl!3`DwPuKX> zDKb|{vrkbH*?#nrAraZCn$0BatA@xfm0~c|kaONrtW-w-P?b#2p_)D=qEIPKZ%Zc% z_Cvn+LxxXu{!UD^*YoX%`}afjUF5HRR@h#M^~%mT5F5bl3IV=^=l#;4NEmqjfO3% zZ3w~8f|1cN&Fn3dahfF;1qg-*h!eH|tG04#(`iY_g-T01y-mj0+@EF^SSNl^{J6^Ic$-Y^aP&Z<8)LTVx>sa-|7l188ES zJ}#(?hC-2Gtet{q|FmEIN%qASf9%W*tx}rZL_qSw8Eqf6wRf(kQO7q#a*x<0iz`N5 z-4C4!O2hlp3P#=t*Vux$8B3`i@$JcAD=6+UExOt@q4Xv)VHJwn{=_7m=Fx;CjKiL! zm#JueEoc{B8mC&J^hM3l)W|OsIWpyQy9b!dSV&h-2R<*ZX*yxA026?RqI??c9gUJI zP>|5GTb<7J`UpxjRAQfDA)|CDA3%7JNt2N>qTk(22M0zog7+ORsH zW)AQgvl=86k~)q}fys1KDovMcQw)F_B?u@JAR)?=BOpBMBH;I%GqWFYlmGFiA`$s& z1Pc@}!lI5Lw~w!&tf$#nNyaMWNbbq201fQu4$27{tSC%%)!=JY?UW-+gD-ubJ7{mf ztE!r<5(h%xf=AbG>1F~sqjv^bE|g18!eKF)$t2sOzH6$ewGc*8j~?A zr*1#Gy#-d`%%MjQAo$F za+%x|;8ydT_+Z0{*D3x7gm=qU+F}F)F{PJRiwuIs|VLu_&A=tJ!m3oXI zMs?*7?DydKq`8DLa4$OHV_*h-jB{84>j=}$3h+6WB^)CHM>FI|S*VBPN9alkQ{=7W zby}UQwOFgGKK&OMMihc%R8RWhq39E-j z$0iXH35BNVcHv&%cdiQ+doW(~A0*1cuwpp58hPk7BoygZT80z?5~`{Hc1}kfQI}FY zM;Tw1TmjOMz%)$<7=a`OL?$^a4f7|2kaV3#^QH|ySY+4ICG5ep=SdTr#PR%aWY4u) z!6Wjx1D(6Hi19z@w>WPG^w|!r@Hh|K7NG^cG|x@ej{1j?K#STGS`ZTIiKufF#^>&4 zxawt0rVP#Xk`O3)q21JLXwrpGsNd5B4K<*X2-@YfIhv4GdL=~ASJ zY}l$IbJY?s51)Sra09)eS(_1v0dMI3`_gvL&QLlsPfHWfAhK;5YCvhwpgxPi00N)6 zrDVRSXQUk$N~xjCCoueJ+<{^-NYx{a-qKbB2Cj8NmX9VN#PXS3rN^i#Naf z7l_cl0pGMbnK7{+Qx3vFxB^WHcgN_=n>0B~9N{(Jftq953wjAt~GQ!a%M#cM&3x)_e6EM*|vn4pQ0#)Jb_Yg_8^Z3T{~Jg$vg$`^N1tMHbNhr0d#-Hkh^r6 zdn{Xwa4E`T6Crr`nc5Ei8A5MwP(@2a8*$lgh8n%*w-t^Sq!(ylNSln(n#XZGA#LI) zN|#<7tz?~}If+~Z)%|w7Y~XstY3lFvLDd@L3Smc^Qz>&y4tmk7<$ZdK1l{dY2cXa> zRZ1m)#H@r5$wviO0$~_(BObQdMk8U1DQnmrL(?THG<|m!rI;g%{W*FJXy#j^xqaN} zB0!1U$RO)P7*wp>!Ik)5zNMpRwnP8Z8S=ep3|feq?DGj47!=EC4g2;h{RY9EaiDtA znz=)bGP+x~10{9zoEwOa7=picg>T3k%g?HfQY$iYY+-f$$mrVt3XFTq@e z?hrz%0}n7NC(L19nv=hkMFywaTB3q%l8mAE#9 zc+k87h;c;6rr(RLVGqJ_N=-ic z);I?N&{?i%FbDYMr}GfHsX7T&M5>0B>!6aq^;=8~{n&bQ=+MP}S6)B-&v>Q1LY>Si z7gkgYVy8pEAL~Dk-AhL<(WN>hkEwOE@1a?mTc67JH>P{kMz73*|L&du^KwllqK0;&v!xB5 zBJ88406<$os&1tU@9|>#ipaSSBMadX`_q9!jqkDD>Ds8+`B#|_nZgnS6GY1+$utG@ zY*VXp-O)zervgBDqZjkrBY^yzDTq4jj&ywJAvQw7d<-uXJYY?@V&2AZs;29MN6>JH zYZb&)lv81xq@y0QxOPkteZA7Sy3Uh408d)$uWhc*H6#j!3%8qvSpx%$LD?AeezI%< zd%7LONb1cR?FU-QcE~*ti7XhKwEo*&LSY|&M9K{&9uc4C0%|+zhxYsZ;(B709?UTt z8>hGq-^*=uoC;L~&%T4P6lhY;7%sKGsttLcl}CWx5yOj3SWc8qPx>R>Bqu<2E9^aj z)+u-?FKwo}>wqg0f;zTCa~60qVUCad)cOCD7Rwh(@!OkhhR%4EOe&4wC6^pgvLcL{8DZI_qHD!v{@rgK6COvtN5>J0jgwP3p!T3-q^ED@n5}mv2cc%5Dp8 z^8)jLqMNeF$yjQu!2D9u%S7V64K&srExIt25nP0*{jb2%}>*o(yttEpi`~D`}n>FD}Rc&%3F$31*Adk%LnAm19coN zL@v=DO*W1W+XEF)#wzNp!V2c_SiffUFTV;_9Ua~6i zs8wr805BF@r`rmvwT~*8sD!V{=yak)!8v(z24o$t0?~f)$tlNnU}d!!zc?B`{%v(5kuPpEx9p?!cRSFq1~#C*x@2Hdk!9RaeRQV{pf*T}|EV-%=Yb@gU> zIA;UQFiQTENUHaOH~I4-ErFX*+C$P4j-!Kf1l0~KZ0q`QSh}s`8EeUiScPX-+$ilm z0j|I=6@^#j@(l_OM2;d&4}MvG;u=d3Cqm5G(HH(<^TFY$`em$&AbedMWBeov%Msk4z#>OW9;4uI} z$3`5e@_h4HsG9#7u*$Hs3hnS786WcNMuTmR&H$Ue8Bma|t+0>@k40)c*Jh{uqvVq) zF(Hhr#Wq2}t7%*z*ucZ}yrnK@Z=u(qM{eZ?uO}w*7M3w_csHXg-?%u1!!a0l=1Y!E zPn>`$Jk97y-I{qEuZtwvuDm*2IZB)^j!*Lt>E9XC)DX|>&?m}uRVg&$CFBx>_+Kt& zsXxQF``vAvN@};LaMi87XJ$6iDWu zJOsSIcH*pNBV;3}<9I+(rnc1Mn9j!C*X01beC4HKjz-%rr=6EAQ8(@YVl;j1DDtav z?NrD|_K)uF*>(s@?a*d^KBCTo8_p@-l zuKIR!h)n^P{9+8LR{{&uU9LV)qvvbBQdQll7|?`7*tjj%l2KF6vgrNh0;{b<+`g4yPT$#i`W>X{0kt@BpR&GFE3aurd39=FO>-Zn;4*qjj2o zE5RO1doeZEFaKni)lhKfHB2S=5NdIR7B=3@Sxz1;e&Xhb_LYm8VM5~!mmaRSF=HVI zJArN}?Da9V`&ZbiH7bB*YZ_iX73?&Xv~T7}h5B2g7yW_1LdBT*3lC5^QC4v|Z2uc} z#F!I+_Dl}vwR{%%{`kUq*sJCQh=}X=g}ceK)3ud%HJY15sI}fvbEyoW~nJ18kieCu}Xx57v>cDyQ#V!25MH zVM;OSJCV*drixr=;!VeUQri#nwSVt?(k`6&xb%8xK@|j<2Avq(6{i?&I#r3l)c{aw%XE z4j7uPhOq%oJC_lrn!=lesr=IY_R%X0Py#ANNy3+*Tysr@z1IF>YlPsxFuZY0;QNitk!ep{-g+6cY<0>VV?ECt-^s z5=l|Q0{_@Wsadj*3Fv9bKr{R0_2CGL7m^BhgH122$4>nhlWgcSwdH8zhC<-}gKWzc zj-MKLj;Bz}g!yJ04iHJp=zGL*$sK^rleth#7iHW!L7C)UOBJE0P#Zz?Bt2u)Nd~Ky z@&}au$m%iFLV|m_wk0zBAh_fXo?vzHS)~g)VfV{nM#K>`n#sy`g-0fq{HySw6u3}| z+PKV)n_lpbh%zo6l);fxI|yc+Q~M_bdrVIc@eX9;mS*@Ifc?O{5zXDvA+!|7F~2(8 zfi?q7HD?L#KQ(QjOmiR2HtWXm=!-*{#L%|ZqXd}M1a z)RN8IO6X<_^k#;7Cp4yzoqBen^h8K{KT5BTtOcMzT#UPu+A&yQzB$UV7-aMfopI=F zoKei2Wdzpf(Ljt>ERhBV2%))Ra1;uV2}2Z04hTEQ;pkAdqeKr;Ca!CGRrba|KXyOE~SL@X_(SvI4>48A4kAkjlXYB=-YP!p%RBf5tpz7GZYGu zFnA6MS4b4>z%n+)pigmWnN}{tYdPL}OjV4QdM~;c)vgjI#w9GGYMEGOi*an-cz2;v z8(he;LwLY}3t5&(x*oS~SXWbLW3kZU8?r#dA;A^|Q3$dGd5{ot!Vo?j&QOct;1CCO z7#3MrM!w3oIPm?_3dJ2fd82`%+#FjA3{j@UaBimDiPH&ka7 zTF!HJ5A7bI83}77w7FI`!&{AzjC^HQQ*ITNv}ES+a#4xU_N$-#ivHv8#zrfKfBoIe z$>rZCyPk}2wF~DVh)3REU4M3?pi?nK&bngUxp?Hy_LOU;c?c!By4~tlHi}a*&RkXu zGrvaKJQ|F-#9CA>a>}*3s+Fr&uNT9LSBxspi){J7l>gtneNt|_d*7Y+sTlQ=D#q`z zlXRa=;LT!;dnT1lb`gjMcoajo?UU0yw*nCP+9&IjwVJ@AGLQfPDwTC2|2OiGov37| z02B@iY4A!-{RgwuU68)JVO0FXG+AM9Rc|CRX>cyN}tuR`j3v)7U47fOW>Q zv2LR{6~m0G?02Wy#ZItY-Nd>{>Jvmjw<5hox{HcYC4DhoubikZ{@ zeoDmA;8ZsajYH!3L!m5PDRkwK=~N7&GK)dF5Q)dnYu*{6=R;_T^0|?Zka!~PQ;gw{ zL$vu7%`X??hhW4pxi54JP%&nxgo+_^Avp=#kf@T+W0CMB0rf}qUkZV{aB_l(ZyKFA1q@*JF_>C3`qIwCF43h;C ztxuF_FHTAEP;yWT?mNS$5GHyaV4@Fco+%fxVg#TvpyKg7$P^pPL(#uyM66m@ zqz;;~HE-mu&_dhmqz%JzmqnXt6#@rBFAlsshhO*AbV=ST0}F-k86^28YdP94FFVd= zQfNJ6s=DkyvZf1}buolDFKv)RJ;(KBL~^;xp+O%5XI$ov!yFpOL_`F3KfALo;H%it zu5bbTbqQVakSh=W+io(W;9)*WMTAO>anucI0 zA&jg&h8C!BBUiT+Q{dP1%xGTEbHqw70ls(JqRFom*@5(UdZTB0MJ${&D2C5rJ;SNG zw@bfxuC(>mS4#Z(WPm;1h8f7sx^4Wq!?3`CyCB9x9S)gSWG7Y=e`7HS%Oki+#Nfbg zA8Bomc235FtxYEdNO#4c*n+i;4BYt$V#jMOi|2^-p9J$tK|j{ikjGo>5ISUW>km9K zpaDs00M@8cCQ|JbtE>{gszTw;-1ZNhu@twXLvEPMTo?GGlCrLpWgJSDO3|6*Y*-?V z>3e>j_w`=k%_&*5%(FbA)zmWXNbG}7dyF2by8Wr|j>Z|$K7$eN0LqV_`A>1?#p;5R zRT<80^4sAD0)-FIynY2%9s{(49wM`2Tl#%%TZ>J-WB-JJ$8_}s8|2c)_quyPU&d)z zXt)ma=~j@Wu83a|kIL2Z+uW?LhyRGZHR&~^EAUabIQbosf##!iM}=5S8d7>eJ#y-Q z`0lYWf@Kz-p7ckJBq93dOMsupIt&#pq{TgIyB&OiIA#F07ONRIv}z8r5UIW@BqqdWYz5Z)LflOk!W?IM`oJAll2YY_6<6O8|Ac`@H244;C% zDbS7uqlPkW!Y(Alc@8we4w~99RX6AiqwkPEq0l4xa)kU+ZU0BOH}bEcDQJM$Fx!^3 zWUs5?ZTUpWLYX2D+DCFnR=F|G>FD5SnvMC65s;$j?QtQNqeh$$h$jpz<^HN_g!3Ji zMnfNQ`Z4rK)xH05HA)bMuF!$#I%p8om4bn$!0)IV!3-{P+=ahH1M=Iq5K4&`9mg`b z3lH&mIRc|*6va_(TWtJ=V;kowaG7(MiHP# z>E6xW5kXLTw^rGOMVjM&1v<8IS45DxZrUS;SKR1u4lCy-SGmm825$pPk|2=q#SuI? zoEn#8{2RGx-NvJy5ZDZSH&dw&7e;^8w|8Mw2*YOBpaXcTkxN_nwZIZrbCC092AmC| z3#6wa$!y2L95KbL0oaKM%k14Z_++f@9YQ3uuCf>hu2v?tpHACUbLVTJDAdqOhg}=v z0CM_skQ4=4m@5&yj_!O%?~*j1iks;KR1)Rjg!2x-X-=+*e|$vfeCT+|JXqzci0Nty z7XFu#OZ5Zh<@$4yScNNKKx4 z$OL&Z6wJ{8W!?hIs37Uhf4?;bjm+UATnva4x6P88^4l7bx|{wy#2n!a}8 zvHHI*(boeK&>knqJAZDHv_NCY-~~N{l)d0NV>y&)kTwUg;Q*I^dxEEALQRubNZ8gp zRr+$^?rJH!GkfhSz=?lRLei{#SI5aMELDxy4t97PDaAq2vRW1B@jB2?5?Zfqn$>Be z2hY&r4s8+a7+)|-$+gZ>@#k!j_i^k%n#_=0!Tv>o3h|IkL*eIuAmO+vpqx~V(29gT zfhl1!(IGS=a z$=d73uSwMuVs#xMn5(wd`K{uq$`y3#lU|nrT$lV!lAY(B`hlR{#R}enlnI@TJL5%B zGvV4f3$MxC62fs-Ig~%9wpo0-8S!Slo1BC+b;_FCCF%o~;(aSTcq*d1l2x7lsiQbY-wr$l$4Vg;ol0 zrj9QDs`r?aT~rn$5b-EQ^vwkEW3A#S*RH_Gh_{&pQ1A3(pe^l(J%^A#Ve!jsQ?zf3 zb}9B$gk$+TTb=mG&#W3+J3!_E~3QO4ptl?cipq>mTR1ETx&J9 za}@zMjw{s<`lI0p$G@eTrROocG?<#B1`&=q#xuM6frhx7?j9a(G~qi!Oz6lXSd#!9 zBYdu4>wjf}r(q5!R`?TJsq>A##`a&Y{tnP~4kJW$jqEzOSRg5lRuqX`@lX znTMUvF0*eOSz8QgJ<6Y_!h?{}<55WKd4xp8mk08b3pc4mP-h=DWk*7W^I?U{c)QO0 zRszoeAj-eUTD#fN5WUXQAS)WDkgA9h{kYijokebU`CDgZIpXY+_szh-SpL3ax010{NUHUhYjvVlW5$*?N<9`LIWEhRrTlnM=Av=ve`DGjd-See7sB7@|j#2I_uwlp@ zHTd*=IC)q8BFbEZmr9O?Amp(AnlB`DmB|fdF_6xVZUe3|OBzQ_=5nIjmJv-zsmYE; zAR>C}vK2y>RMt(IJFADr41{lM^=dA3Y(0c1cLF`))Bp5x1^mhxX=zVQSFMuY zzHv3KP>8M-~;azh^arAl4b@N2szW>ES5OooU%Y61`*ODRS2wvxi78yD_5BBv5Ur6aVtln;B zx1BhBHbY*t9ypsZ3x1N&1oZ-H4=;9<*RX|8!0_2NF${>Nf>A8+Cf29ywJmhBqvk*rgX+tp9+9kEpSnBnb1M z!=7S?T-^A5cMtSsDGm!+s(0T#A!B!;JuA#zK-{{Ov!k-3ehZHtanvR;&p*Z`7_AYP zv#2f=S_Z?gFw=4o0e!C-kgC%f3(LauvIrsMn6&Ys!2$>BohG+5cKw!LdZ0eC(jcT2 z?l)>@$7yb{r4?viv%j??G7Bt0I`C2xiLLjW_(AmSL>i{a(46n$%@Lb!LQBHjoasqk z0`=`EE^dUh%?!h4l}1klW-QDwe)0^86asK3KzKqnrhY&@mby!aA8V7@t`v3&%P%>w zY_KvQq!qRvLB0icn+^=xh*!)bbYrS&yG3d>r;94~cSJ0*KVReh-Qn!0UkJ%SOv`h* zk!O&gK3hLr*V`ZCVF5ePWg9z_)a619(QsgD5*UQiHqXs!|e=ze)Qk7fA^@E$=qGEINIV6 zY`ok-Uv3JBd<+)^IGtah2A1?>A=D|aut)_<&Y|=az9;Pgry@n_9NUSJ^G+qn3!j8< z>@1RZ_FAFVLnxPoOKE8ft=$N00EF|v4w;{3y?>NLQjVP}fd}xOQ2{QbhEG15^#32{3}9k-_>mkUGnte-Mzda6W$9wy3-h54)-t zcdP7@G{&*N;>4fB@-4L-;3!0IBsJ+}N;i;y9%uwk%9#$cL*5a1)Fvf{gDuEi2Rc>qccRgx8YU+1TS5tLYyE$_r2{01B)gL$sS70-3W}PH?RiF(2MF zbja?{wfogh-UoJSf}2nhM?~k?n=8|~7c49#Xg%IW7rJ2UPNl>RQ2Thtp%>zCjBCRu zwA{baH}K3TT86G7XfFMK=(}YNMNpAkUWP0+JxO0ePa)gUD zbBhC_b7AA}05b#{?~tPE3i&~C%Y&2y3Z8|->dqzh)4@gRepL-xQHRoXDCRxl4)##v z+3W*}NdhdhaYC4{xCku4Y4G<2C4&yYNDJ>!dDBMs2w7QZ+sWajON;Z4U1`a`2G4dG zcaLNWP>MEJjzA_8#IKB&nBMjKjWN<#O@NA+1|XvM-sZlI;eC}HQ+kCl9vvVEs6*pU$c9SrOaK0KO09}Z}tqTfZV%Z-Aia=|GycU7}Ly!6WG!FVs3eeA0*xRzf zLy)qKts>XNO+EZ++=;ky>X?d?G$8-gOG;Tla=QFQ@c`0ly$N3VthO_w{|E$UMZtk1 z55oDtcdE#L97^MaJDdd6@L7(+XyNJr)Sng=R-L!2{c$VJ@D+Bos(jT}GPtx|#U10J zpN0;$g0GYD5`eiVRwe=M4VX;p%IcksEaH-Vep-yQpb1HVHWdU!Dh?pje2PVX)HGyl z1YGQ8i;O2%1_=!L0+G44FgF3U4@o>uFBLlFFv+n{vBN zfV2Gd$Yqvq_~XzU=p)zT*To&b#++kY8E#WT=qk_U(yT!vj1U1@y-XV_z*GTa0O(M- z{n|)(M1&-cG*ylT<*&Wm0!@-KISK1k^<*^I zdNsP`GVZkruK%L;SriO|846p@rHNs06Y2mo;x2M`!M zuEHoDb5VC4P?+o9-OQJ;g!f*nw0Fb+@o+5mRBxBJ!`nUWr4OhyXHeAkvxWhJ^G&n|=d{Zli#B9-4{7i`c-yC`dV$-&HP- zLu7Htkq~7!6X__iQg!&myC|E8LWx57#3e+&=c65O^Llyjp7-*5l=#!^zI8!|17br2 z1cbsaAvPmOA|zNT8=h>8Iki#HmF24x#_E#v6b3CxPw(`63`mKYYLtkn3X`duh?a_o zcT`5A)hdx@yR>TVjTylhW73;mg}G%~VGixJ5gOu}P+`7|OXe62?LMlxnOf4czB_d2cB1|0;1 z87l7TclD|;cQvwm$zHox+Rf9|%~OR@-KXv`O}*dm_gkml+e}F}i!L?4o0{Jv%_GGl z#U#xn#Uw3t9tleiE4LJ}vNVx$akEr$B{0xH1&HtNbHBgMuZ>?X)wliEIwpPXdd9Xa zujy*j6($u1?p^lNqb7EFjR-t7I0^_Kl`*q&ZDhb=TQR-5r z!Bwv+Xz^GIgS8WiW$hI0)K2euR`gMSR-dZs3C#q6pzuUOQdCpDJerQM3_4@4K0&X1 zzHZAzR|b)u<2=sGXPz>$a`EI*Ja*P7!=w6x_hUY37_Q-C=bY1a4%)Fp>4VO!Q|Hc4 z8H9#)R5K04L%`7)t9wW9oWeEKR9Zc2nclfgy<68gcZSKz>UZdj|9*FOM|WFAE*o~$ z;lMaqSG{9X<5Whv?pRgQrCO$CVj86-p$Bq%NYP$xQ9i07aw$}0q?l6VwL+__TC}uyAZxlf zU2TbiTq$VFLAdahh;WbPr9C-uqAZK1F6y*+TG**hRcWa_Ia0j^Z^2vN`m)aGsPYTN zNE0LVajN80mFh|(y!?%o9GVWqRCQG5A}s5oP@V{JVkgz(=s*%9Z&fNuA$X(<+Ask` z#D@+e+=$M6g|z?%56a@eKw<+408G%8b+>H4HHx>!cvQx}%OstaS^7p;5Me=t1rZk9 z@X=_@*%ulnj8S~(V2nb;gfR&$4@gj8Aw!0U4<8Q50D=k(Ab{Zj1UfK)f(sHpsGuBW zKn4&pTu>@Ra6twOM=qG8BD6SsrR4e$kxb$iu~urrd2EP@?4Orf-F?I<0WKOkVT>6$}Og{ z%91W+={T~W1v$*RpjyRBA}AFoJfIUUEEJf6K;bAjN`VU#F!+GspiqGW2&lo5e4rjw zfWd?dreqL8hYFIlTPVpb`F&EQ--DhkbR=uF&}1n^cKHv~R1ll5%gGc!(d;@ zGYd;g>zu4p$IN56m4s*Pz0g?msiD=mwX!m_ky)PAshjWn{-V=q+iWIdRaIG4osbYu zm4Rld2UArK>eXx5nt3vhm2pkYb25orsFMgHFIEEp1ONdD003i?01yra#6l8rG!#TZ zj>PGf2NQtBgHcc*6pBURFenfT1HynnAPk6M7zSY|hG7^8!eEdDv9%iw^N@~v#@P|Y~ zr!MhX(%T21nZ7(#Ow%a~Np*_oNd)Xn@x^Xkpr@=Ob~N;90w=PQO{nL*t$2jy!oV<;{7tY6Ej3RgyEpMM)|MGl@xa07i=5&-6T#i>7qnoH4d@;=wHHnjv(ScQR{G_16LJOZ~o-ZC}72(Pm}s3%^GZ zW(_eoj4WtM%s)(n-Fvxqs*YZ7PdzP>Iu?gy%*TvUnCAbDOB7Xl)i)QKQ+@*^?AaFB zloXu#2;ibZ;Rp=rfS3rqC+2fIb4lA%An;9+5ZS|kUEj>j_N4zbas@Ks;R5UsQ_~l{ z%3>r+nP-e1DWK_F8S+I6s|6O44r67M&8U&U zV^9^wNx&G;jrjGNHdCqmfInh~(6) zUf5(IqxK;3JErZXxGu@iF835)=;G5k2{W#DjVH*^&$?f|Tr?z}f^623e+;}$-&I}^ zZ1tvI1h`R#JWMgyb)~<{T!GLdC&V%`(eDm=f<~-BoKQ$dMQhf8oN7F&46sQ#N^Qc98-V3vDQJO{8V zwQ&W@hrWFA`q-C{jx5@psyX>P%?UC2H`Xlk|(cKQq#MD?M#i*AkHZybrYa$XA45A3gyV0bt@}B>MOX zpexUyGJA=4aea*Z!&*_7k9FO1X9qzlG?fe*$&iWX-OCXJ+ou(r)oE!CHE?Uo#S~4l z&%#hqnpMz)@qms=qod&IzMqfpt+cd`yXLx>Qew);HgY$ z0L)>g=u##UY*2~3M@y+wMlKsZWo1#9^!la;8^XI&47|#69g2D*iCa7x-@TUm@jSj+ z0zN2H;`HF=EF6>K`rS1J9m1i&5^BUbM`dp(M%&h{G!iUTKVn``$%eq7)fp;8^C8!j zzEL1}<987FKzmbm1EZ8VtfBbC_ny-voAo|z`BxLmwTS4V*Id*-;|@%{lnj#zq<$2p zc?--}-UC9K1>*rD0faoS*beft#EUkC@+w_X;tJc^ zBm;TN^;J$Y#Cbdd^9z?Hs1YoQ&Z5bkG>b1DlYOlI6@f zu8l7Pd4q(ogNvKWSJ-C9mt1?VcFdIxP_2X`USk}RUpq$ezoN?U41g(-tpfe*ryK2B z31Jl^Y0kTC%TiOG)#`=Lo1tJf-1Sq9k=^sS!OD7)@yQE~@~5v>t=v_csJ9ZvkM9I1 z`X$7cF&qeAKWs(5n}dU<90jfV{rb$HnG0v*@*2A&K5q9S({H@fWioxv%;B`Ncif(0 zNg&sxA+#l!x&!CkCtNb?DM7TZYy0`Yw&Auh_$=j`rNzF@O1Ju}iR3fad2hj28KNj! zUcK=eEk3Hm3ic(13Dd1OO}>>0@h-%VPfkxTe+uccBUbJ^ea)V7$GSzFc}lw#R=H~` zn3wn$^cKPca6aVe0Dg#(P(rb3DC>!RTW`s6mCFmRls9sC9uD1!l#PT)MFQK3U7!wr z|69kul2CY7+NW^i7MoCxW~z>)9(}TObSD;P6U|M(QKeC7blG(9$|g1Iy8PRhV19!k z!0y?*)v&LhCv}?A$Bbb+K+Gcxl$VBlf#FwS;IVa@{oauB{?wu9gxJfRoipmCgGnz{ z|C=|Z{NJ z`PtH%WB66x>#4AX?vA0~bOIAF7b+lcnE3U!pS@)-Y;M!*g_KxmJ}(z<*Zhz$E{E;1 zlN~_12Yytp>SNDW_;?Y|{bU29zPw$&HJw@DU3OqQ!A%wvnvcTSF0$_b@>w5sMN{M9 zK#gx0xe>paZ_AtcH2hXmXg=@s&)5`G)4~LlG3;a}QMR^MAeNBB-lz@0@zW_hm`XNC z9?x5FWm-ipgrQcn2CdM6{lkw4)A>{bbQmy>b<`XaYr*W@ZpaIYVPwl%%!N-;Q^4PF zwdLH*+2~4B&>wI>yjaQ7XGde>9SvxkMNPyOxC|E_{c(5?Fj6Gi!ap0Xj7gJj14QF) zmEsPNo*==%liOyd zCj8e&KSv{mOyZGYxVdlz*KLEK5)aTXAe3KD&|`znZJWSS8bVGkaZb@~6AcZ6(@=S? zwaE_aY6Sk-+S9`kxH%DVe~(w{Z8kmzb5j?fAxwxom|wDcE*Q`jr@RPQ%hP%Vb`YA% z2J}~EDN{TM)b5LeeOrWad|P@thx0M%+aOHbwB(fE*XDfES-yxlGYd2osp`Sxzj36d z@hrN$7pQKHXqi(0v;|#Zi3!%ITxj`tZ8%zH&*=Itj;x?ssQD>{rX_#VEVXGXLVz_h z*Gxu`fiO~_OJXE6?;v=;7n4)2Hjv~UfL5d@y3TROTLI#nE^`XQ78?LkgfoGu7@L+z zofRDFY}axO&27=h# zDZOTcBZ&WrkQ)i>l0{h#K#W>l((cty*fCnc#owsXA_$e4L$K(2n7lP#!Wvk>F+2nI z0nS0kTf$>t_lwb7(Va-W8VmxPJ({uyKsUDpx?LS2CIixx_u?%2G^nYC$Sz~x*t(gx zMWK5c14oU;J_bVsZYxa41YcT&!an)$$LjV$YlJk@y|hIJpiP4vIS~`;015cg32Sp? z+Ix*0?6>n`m@)ZRe0PNUEWj9RV2P@=!7ny-{=R=&qsa4_IPvgcOwy1260HPm;NCcV>=~_m^nDeV?ph2zDEp>4{KX+}ay!N)hL2IZ} zh|i#BC$sv#Hpt{1Sj*hppAM+gZz@Xs+tuhp{wwqgau^1PwVuNG{aZ9gPJglk4K91UE$@+=N97ug_lsA>2eK~(VG(y=mm`0(g zGJHIaz6DgaJgqT^8!-+~g!VbPI%{giG^8*X#&sD^kCdCdoUE1GldOz4C}^mac9cwV z@V?3q#!9eS+s1_Psc`zoU?IW^RfprYF{+bz%b1~&*A!Z8TGWQiuxb~``fPk$#j2{$ zL>HRP6nob|rlJmXgW($p=^`bn(kRV~0f|Zdtk974FP=4D?{)|1X`j)r9|B=S@8shy?TBYWh?7`Aui_e|)dlb04 zHG0T@7?v=YPXIRnL1g_Ck?{Hprrw-kb17AFNO{?bl@iy7k69(Yqo(5+f;^vEV!Uyq zjmf%K4ktmE51pfU-{MU)K0V#(OjCKM(P2W~W-$V~_~kM(FW)-i%OXf7vx9w}|5No} z0Gy*`O|}O*Ef|oN0Go{$#^6TNt{v2)5M7F+5Q0 z+QRh)hkAn$8L^(;k|w=?*EQ32wiL)8wyM%+2P`Uanfds|SOj+NRG5kD+E{fe*91s| zOH`XQ=E%M3PC(gQmDClF>vDm`3-2 zwWtq}jr{;yGLVo&>r?XOvw~+IGF6}t0~q+emZ#E>wPthVb+2nr}?UXqdcUSQo)SnwNKA$!G}y0E*+~Q896GBIZ-mLk3=Hq~m>e7#|_mP!W35f($K7uB$xFTh7V ztS`kQG~uZ9WQj20-0{gHL~&mW6!Kub)7}nc(Za^ejFjYOFbAgCO#nePajt<(uTH;D z-QZbC7!s&Z)||@Vc*pBKBEobFv;PYkX+Q;)SI4ZR#wNKVb=)elG~ubnp8636pG2d? zFN{S73uhyw%XOAUETxiUt?qEe@1SedvDBif_+_vNYICd3C`h)oI!BirB}+lI`G3!= zO&#M|-j{Y-Bn8{Z4ziU2Jzk3fUZ}s%-y<1AvNNXbgjB+?PQTl0ouPN99Rp7}VLgrA zP^+JX)|+V5jhmpV2^)svpBYRWdNM|&_@qylh)PWfdyra|LnMCm$uMOnYvfM*!zji5 zcq@mX9K>~8i(LjS{la(nnZ>%bQRfm(Hk<66+juV3?8CM$W!#)A6!5~X(W&|{XqU~7 zhBtV&zCPK)A?QeG_sbu8r9~(k|aBj`SG+*}AcCZ~$7JC{aF^IBQDJwj%<7;zJ z{D1}+t^*Jux1?t|y6x* z4v)O;&zioILyx58Pa7~LF?8PyQ}|#iX(vO+ZKGT$<+I&Z`IaeH{I*Y*Z2)g zw!X~_;8)TtgOAAYBNF}I5eV=%`UFzv?+`iL_#}sw=?IRkwuAQlOxafMU_t zUG8DAopB~B31k(ynnM1c>$2KyAn9L*v ze&+)}OC|R@&;r5h5AyHq?Sw$(^_y%}2~h9@hv+zgjyM+J>5Y^j9#DJ9<(3&6C>*>JtdC=6Cn!{Hay;%4h8>2c-igs$BT}|t^C1hB_h31Ck#$J#M_u4oJ5P+1rb|Y z<_;!%U=Uk;AvTIPtO?MOB2^|u-X;a@rK&x!R=}q*CNc4{5{zx=t#~o@u0nbE{?S@oZ zvw{#h1hR<*b*udmVq8Iby9OaS%O4scCdt{uj6qxKcaT9y4o?!J(~Jutt=QGL%%?&s z1d>}0@LcvRg$tC=l|#4ls8Li2NIJ2a`Ektm50$c$L(7J>d+2q$IE+QlY$mu);-R!1 zJE)A%5}>;vVcm!P;K-wn)3Zps1p>^my&&NoJqcR$&s&lR0nG)Pego39+uWzB5wn7b z*LIHlR&w8AI|^x&4{SgB(4x=qI{t4^)NB0jEz7BzxH>3V@fo`9c1zAp{!VOzhf{6? zc*5}CC`}Kib~(dL{wkal+;>!oI{2R}4*Tk+umal7(#cMFYe*MB@&4aV2DnMpwWlo{ zr26O<&EjBgqLD2TVL5w8H2bR$V4V`KCNq0&hXvH~N2Vc{fpAPkrNe+A)f^*MKuIe= zKxR07uGcu5*Tl*jkf~z~YCOc5Lv9X-mqQnx5lj>k@*!`CX zsaDj41@mGxtVnV{%^oIrNF~Ib*f@ld(yEYsx7N6oXAO=W$7dr#x?-0kUmkcM!*+tQ zkX~QMh{CEfVrSfkaR`MTUh^X%F9)9W5S^gAQ^jhmF~lR-(8I)q%jLsQ%y9x=n7COv zVG3x$?^rGo&jD$k!YxX7r$dx1h$t@~+{Yt{6FEpxjE!nZI4Tx6n<;P%lRXkLkd5f4 zy4-k2tAqpcH@Qs`tOyXT6b73zj55m zcySE?BY?ygTHE6&T9PNjJH>;XV1)78WrS|WC*zUzn=Wqppz}iG+Y~~E#n+#QhhxK~ zFhdgOfg}Zo045o>c?hKNXQ+tG4fx{AFgb>m$7JYMn}4O!g-FbtcL#<;T;V zgisX*^%#S16DWnxy-AO>uI7X8GL_`u8R(R%=%$ppIqmiJSiv;@CltA*a>gqAs653#cs5bNr`R>TqZ~c=Jf+@q$E~gBL3L=j7u@ zp#?H%aKl}G!~s(w>-($e#FuPBR~|T z*3hCZ;6NboYJ5!96xF6`xv z1%tI_aeU_XCJ1X$Cd5N=BNHxR*KO-3y`kyj*A%k+@kI4DTxaCfz7{SXA_!C9j)gJPH~#J&{2{!AE!kfZ3KgyY~rCR zEbSHHa?OGAbc;<<#ttnp5kS@#P`Ar{S_vBj0Z^4KxH@7YA-OHvs2lQ|V*&vCOcPL< zlV53}_7UX;Pffk%hmw)>+*CNS*l6kjG${eqHB?tE-X*BXcBWoPZCiq? za$Ugcum?DS1BmnsSwP!lxp3ts^aK>67=?x33v{cCe){e1=r6nJKf0Js%zIDF*HzRx zw!mF(R(IVsTV2ith1*SBA`{=-#r8;<6(#n63vCzM#l6_>02Qn0B@)*bX;DfrEn12^ zo+6hNNKm6&`Bd_dprT*tKg;K@RebfD&sfk1E1Di>Aeb4lvk{B2&cb;P=qsJi3C7qJ|#qinY` zvgr&JNsT*b@oKewOiu4vQ5xC+;-d;PhVt2MAIq_vY|4GB;%Xm85jCK zdOWNtQ^tN7nKGHn3|%rMQ!+ABKN(wO=tSo3WJY9$(D~-Bo+E!8rQ3JcYmc_-mALEX zSmTjb@!`=#*!|zHG9R9kX}A2Km>R7_5&$Ur029CpiL#J19*a>B1ThT5FbsnL48kA=5Ce=cgdwMxG6O&WmPtBD z`Y@Z)nK_8GD6)|23ug-iRT&=A3A(Zoj|?_wvj`ke1Wyn~Ow5hpEdR)Xs|bf07x1Xd zIyHm@a)AHOqK4_BiC1IMV+3%Z;8a z`2hz12`8EEKOYvv(;Yx|C7Y`}DDs?BqY$U(9inQtwvne# zblcWuVhVaUrth?Mb9>XlIR>u(ijap`fm^w&U*K7C{@ycwd8vU9E=uos%Gu>WYr@SbKIP$riPlm#=tKSI5#z|}P?`&j z1G@DgQ|1Z%Xs(D$S4TcIL;+dotU|u%;Q3;JJoHkwe<7hlZccygW;%fxBhV&=yW9l9 zi0g3pN*~81&>AP{i?TH$u=X34?l@qjHNhf3v+d*_P;QTwm9Wb1zVTWVt{1I)#B47z zi#PtQ2Q6Ik23OCZ*5~U3RReF3J{&ma^!LfuBs6v3o5^mANhVl4b!O?L|1Ff6X_+BV zY%*w}F!x;{0~YhzVn+0><-dGuh zuNqThGi>eROBFKwHNVmeR1WVbry-*Y@DBpYEN2(LhSK5?BlO{TvVqhdUPu=Gg=>&@ zd!tmlJTS0Ld^{8eecm6n5UW;XNyk3Pg= zWRZX}4(&?N1Ep6gccq&Z1nPM``m6L+$8;mCfwbI~*=55ZFlS|mC*n6|!lMDJsskSjb*eC^?mO56iCXg~W%JP1Iu8B7NiGyLdSoyXMAA$+ zuxkEQg*E#$`vlE_PF+Xn$pS#=mOyezMi4kWCo=q8;q4|$=VCtM%MIgh4^-*M6fwc1 zfkY3cl6?wVZ8zm7jWFfDEG;iR(GWyus)(fJDUhK=50gca*cg8=$l%+*dBYKjWAq~ZR zne*N55=7yBjzd_7z|bAVY3L%S-2;=&3{ES&GM0xFiZSIvH+A)nlQv{ct>0uhM3i%m zr@I%)XVOLeU&}&lz%bQDl9Kzv8nn?01w~M~f!fak(IkQ!S4z1&$!xs6f@^>a?)+Z5 zLc{B8U?Qbz_J8PecN|()MYI{0CJpW*Jw);1eJW(lK*aVc0&pZnMe4iJwP&T z+kJ_e5F6q4vJsAieo&~05DIPbx-z0fpw$@<@u2{bfsoWP3)5~*di9kTn9dWi#LYBr z$*%=`;oV&oCXJ(ZNr=(gmd{f2LtD~MHn7y{qN17friguO%v&>x^3u?tuX7>*kSMxI z8kA*Cx5+4=NgM5}>YvopdMXRk-V0*w?(;_TJJS@(%&=avFc4{oeJE-y z^ppAI_8JTOEA!*x2qXsK|L}eW;R?HL6?KIp2QtcVntp{G_6X$cqhp~1sDNq0jQJ1%uleN?Q+apy4gN|i4}ny2BOtTYv| zz86qRWbk1i6&0dliJ$YB)dR|>OE)Owus%pXl2b}`S={105)(xXZtDgKoYaZ=D<~2F z3Q+><3rtRZ{Ul9u`3em`}1hrVK!T{K~;t36vraQ zOWs6i319B1L|}^=K)~-ZMOM4x00s>5ml)`nL5;>l{26Myi z{_r(MA0JW^d(bM^M{gsiCK&q*!G^zF98I0(j7@%eOW%4q2*itq&ZJ#P70jm5D?nB}?^=bTW90{#_FtBD! zzO`oS>|0@a$EuM^qTE)I7!Un6h$;(Mb*mj>z~BOy(G@0c{erC%bLk3I9bn&qny!$6 z#ddIoUR~{%h7^jW@{ay1c%6>+{0FZqZ%Yf_bU+|tEEcNb_`>tpJuB1c0c*C6(8^>M zN(psBvz6Y(WK4l$BGT3lHeW)l1dwtTonQy_#`K}^*~q{+8W(v|SQwVmurH|(QtWLL z`x0%Xibm5mn=nfWk2&xhUXeO|A8ZQ>T0VLjeOM5tCM1~)o3 z>#ONxS}C}Ftrb?%yHP|aQF0VP!#Nd4_=W(|9P&Y|VeAH_NeY2Ntir9IAm=w_TG6;E zu>_HI>wM1SC?1@(okFYffQnxxh}`MBCwqwXCc&{O#P$HPygS*(PkG%bJfev-5_J0= zdL0kxWHs~}z_S=j_vz|I{}uFT#z5u>x(Q$DFPf!kF;qtp`SYvA9cfJO<5&pDqe={yoEc*!8GY&W)sB;xLnX@EAJH?0uil+Or#f11@Pf6?H^_J2wKezg$1=bs<8_J z@q7XR@gsu_QBqzTcQgcNZtEN!Xg7C5CgbFwG}E(-D#9kc^^=atS!_4>LQXX^e(b@tb zwxX_Go}lNKdy;L&ZUF7&R?U21=|f$k0}o#!-RH>o#*RqVDA`%j_+?)tzt5TV9N#q6 z!8XDBhDNL*i}7#l-WeEQUT^Focfi7?ppce0`jLVNA(sdkdp&UIXt;nDL38Yt*KbAx z3sep>whiSIyG4(k)^N({dT48E4X5_Rd?18*wT1@lcZ9G7;Ss_;&6-aQAR%-j8xq1M ziFt5gW55xL&I4+B=exM_t5SR-WtC!^8G4T5ldhML;lfpMFY8;{8)-r?YHFw!Qrv2s za8nnq6m{E5(XwF&z)}SKdIHyXF=%JWHl2nXWiisv6U07%037dDK!cJs7;mKCoCmRzXej}oef5Kh+3px~( z_q>jr&*G1Rsrxs-;Qh+5gVSnz#7h#YWZ`d2W*h>=1C278w0PE;B{K0P6?eF@Wu z0p{)KQ>@uh0dMG3KQRK#It4h`Fq@>PJ<2Igy;(8({JPx3#!k@QU1kRxCOzGVTcwTY zHcW&ji>;YeWc@oO;9_iI6>i}WO%4bv6T%^rE;a5`UvG@m(HW^esTlCy4mMz))Qgz7 z9xspQd?K`}sg1fn zAhAzpx!U+@5jl{Kfxz}@M6b5w>1x_XLm`%9~%W3cVH?&RBWj&cP}Kx0mP+#Ou zw1{z^YRqMQiS5BLpo}=4H^OR6bP1pp&3!yFKdNY4(+JBrPv{}#XZetx3}FG=KxJcV z@exzY5*v~Z5n{@SX$y|Yu#Mb*Efcrp6OPgl7tFfVNM#-##Dlf%mdOC!iY=X9M5{&5 z6Zx2DU{1Ph<0Hz30EZb7OlGFZVYS*Q=1_L+V{-C&N?f_qiwyLC?yV1+*TBm*lSrMn z%=4{Fu6D&9xX<84JNih_t&=cfrl6^%uR~R=S<<%klNfnAv!O&-jj0AFOREmqwHO{b zt4(S3gV>JJxX7>>|KxN|CLA>03mruF0ZtshEqfD=-Wmf(n|z&BaglUgVjQWkTT(KB z%uQ)BBI@Zk*`#d-SN$ecW;MH#t_|_Yp-cU7*6YmcSbZYU1+H}m>YS-eZU33hH+Jz( z3f$OB(Yk0d7|*vO7MWgmp686|b(0G3OvhV`pfPJ@SGP+e4<9!;$3^Ht5ea~p({N@* zz-8JPT1w;~B@>|y7WQSc>GOsF!sh@9L!Jj-L#wLG)JLg}i%7MOo7ojhPi(tDWEtTn z`eLyY1=Jj^l-EVC&J}bzPNu09hRBZ+RIfbB6=(L9H%fH@L}U@;uv1L8SnOlj{J9n= z=k4rCnT|{KEQ4>n9do5wz{aPc*H|h3_v|L~-EjCy26*eDK;~z;r*bRZ5rp27Y4m;3 zYzm8iqGi^TGnJM}@%v2JKanqTQ_Ou|oaX7@-tkJzf|j$Z9L=S`AN#l_Sm`I86LmR~ z&)zw~jFX#>5Jq6>ZPxw?%cmtHiW8Z9^VDZnonFVBI_zk({GdkAo{4cLc_5seRetMr zJEZbG5m7}Er81D{Ep-ZDE!h3h)iz+c&lewv0iWev0|riZ{U0!?w8!}#@NpWT-ft8T z_zdtwUFGt6!omm06KkV9ZiAG;J>9vgb)G`zWXt4Pk!8bt@m{D`aGx(KzYKOUS!CKG^IHi3+A8wo$QY4>;5s}K*iDxxKaPC1Lh7I z6jNx_>M-;p=sX-{)P*Ya@np~n#FBjgQppt$c&G4&_F4N8nd?W6{?p-Om>8 z=@`&TP|m$!HY&;$(Q&F*2ihYF97><8Z?0JacYO<5qr#j7u_%r3yh^|`&eqH_h?ue?eShZL(7Q-Glgir7B~wINjk_aPnn2NiYzgP> zhqElL;y{iG$_{l@fCizO0!C3#cvSG0TE&a*t=}nCXcI+0Z_XU4+!9%-(L821 zC?d+&crcvSIE+9J&~SQ_rG>djT6o|oR*1k7?Ap=Xwp`f2{Cu0c?2S(du@Q96J$4JffTUf0*0&`qg-*!Q#nm+QX3 z!|9eD5Bg+oow@KCB#v`XROF-=H{9`&u)aJ^BRC(@ol{3}flq+wqlD!b<6P2!UyKpO z+~pP_V*K^co4a&f$2KBKQl|JzKS$zes3`7W5q8E)@`4X`!D9jI*P!f;<-{%0Hlu%o zhIBcKOIyjKaSW*|wN%H76B|yy@?Irobq@9G5bGz5KCCtB!RTZ2NA+v_<&(Lm_+sD7 zACFsQxzJ+XQ#0u$;IkY z46~u7UveW20J+lA z?DO?3=lUIWQzH&kw((0cG5uP^o^jNT(P^rX@Q4kWtGkJ3S2c%HCrOOBS37>g z#4nSS(_(m9$0}_0Ng{jbblk znu9hHC2>ZcCI*~(hrLC(Nj~)A-vby44qn6?%hlq|o(Q2331uNF2TxWtIJMpJbr~0iw0VZiY(^jd6K`_(P{>CaTveHNqJu!u1jv$s;2`j9q1rJSu`lxYF$=Y}SCKcrSc0fj z5x=Hehv+H>;HsbX5^wP8~6W6%#j!=aSa~(st0{rx}RFRibqeG4MF=Y!e`krexL0L{1;Byq}7@wn%oZviD1n4M= z=+cdZTs%NHb0A=_JWG?@I=t*>u(=Q}$EhStn+>k-G=z#kO;;k85-}W*G95$lvKW!! zgRXdxAbwCYqqgNKu>a~Em`4|oK*NvHSe7;*nDjPVED) z=M2mg|BSx{Xaua>{Q@gp58emr=cpTc;Y)uLl+5^v++~36Jq!BX1}TY>ZM(1rI~e4ewPt@CE=!)R!*vQ8e)r>hI6L%S=UBm|pziy_RKG#JJacf7cq zvBBe~axOFVh*ITJc^CXdxiYE!F!*(uGE+z9<2h|X?U95}#60+F`GtsAU^$|^V4^GrocMdh{qWDtInQ!3qzM*={K z3C%ZB&aUK;(>x6a$9-_Uk5hhC$UXB{M=iMQ4mw~0r(ZAFa;=Y?5hsRNTbS4iEX!GS zc&%Im6#Y$=35mj|C6HgM7SEt&az_#5!vwah3(p5GH4#P9U?VL0v#{*}8cT~Kh4cgZ zu&@j?WJ1-rHOwRl>|U(`YOi!|sE&NmX?br$|4hqBy!1j~A#>ruf>u&D8J*g5i zEuF2U8Q8$X;)qH7H^(fCtVfw4wi~UBmqE&epgA6i3169O$+?>hxztR^FT>mq~2SXx}_LhnoQur7S)zCxSYM58}mNL0> zhY;`-cOWd0fkAZrIVmjTBNEGlU47b3uD?+(pI@;Ito&()&nX~lj82ZM#A1y>`H&c6 zSYtR!gz^N|7~6;-9ljbf>k#U4S7R(mf{@77m_lTsW3<&6y#`rS&}xWi7Q~#f8iI2L zw}Dp;QS1R~lT|}(4M%RFssD_AR$F0x>Ks6?pMPh`V z8sioc=fkGPEWu>;#MBs6KZ!M9sWDeUK|VOCG2J#o4I(v06qRTObkvxab*$fiJ}_$V zL_hFmmF2%8X3#9s_PsWa%$A}Tt|R;Z0<<=;zFGqQaS!~IJXTYQmHuil;cbjpSWH+A z(fPMzu$oI!L9Rv<+kk)>E9B3t6|qBK$SbVp0P2Vs5+G1%2x9=S7^rq5$3%qEZV<|| z^<#O;>?FG}z3~43hZPM04FMkk|CkBHmj?_8TK?so?@B1!>xl$HQ$7i#Y}?NS2-1{j zQwrpILVv;Wgu2I*t+tWe)6^${K0IpWq<+<0(MvnB(rMzTh)%*qc&TUx+}jey2QDk zj4z*@X|0m|7%ZThsQY{>QZwIdV|&f$V>_BEc#e&+V=^+jd|u^~+??jDsxoJlKC7zo zs_KPa{FJfa>&ibg|L`tEbRnMWh1q%nqR_}yvt3R7%Mu`<{TkxvxwBE>e^VV* z=O*um>TgaF{Q@}yNj3m!3dhBnLM$JW%+h0~HHmM%tZYd#ORMT-Uk7n>x^f+iS@AIr zCVjk$gKToq2gM~iYqSUl6HV+CBDvJj*Z@StYY*?*-?dlMmfAb3l}c(--P6G6;~TfG z+>lU>5X?~RW&0HrmO$n;H$~qxjWj5;wO8~Mh>^M+vzA!Cx(s(`C%dXklkZ1Il^zb0 zieoUyhT#}2>jyr5&>907r*bh_kR8;E(b0wvpa27DG;)% zoHNp(tPFh@qIh6^L~#&AZ79Sf-r`2b!A&uRVMZJWVjY7+`5*}UJc$-aS%+F_!$X&q zBf5;B%}1Ndtk5;L`Hk4ByXQUc&nrXtmfhK1{dz40RzbE2|es zlcPXrj`Kp39f8B{H^00E2AvNwApi$3$>ezQlXn(KVqQ_5da74dU#~N?LVA`!%yt7d z+YNe(wh5$c3i3?n=TX8lU=l?9Fl;M4d^lWVR`l@Lhk<;xBk^?!O;$;& zqJs4c0t7%721 zooY2~vubRsXf0u3BAI%4rJ@KkK3T^VM-Q@7H;6kiU|=vb9*aX^P#6pbgTY`h7!U@7!C+7z3#mPX|M8`H@D(MT~K5McIeD7qKe&TygVuUgMP9;pzXbaPn;nln(z2rwu zBd=n}NeCs-tk6^}N>yoN3U_JTV-ymIW75hT#ea8y!|_9*ZIo!%Hl9IM`;Me%5~G!?)I9EDQrfAPV(V z_shayC4@Mp{qOk)!wn*>4nl_e5-_Qw>hb{nxT4k&VF7)3Lq;$<(c=V4H@$?6Rjfc` zMFTQv$QDmg6lu79VaX%R0CFt8mhvKVc?U$kQ6myIWkCUE9~HQspU*xC9`(#R$y-UO z+J7L@Cqc0+msW>MlR7rWG#uyrf7j>iNHOWmgdeib)!SXBxs+#Sy2s=Uj5VAZN`pVf z53c|t0}QLzt>3G8-9?-PzL;aafsh^QlgFA$;P)LX${y_jFOnb+L3tW43S7uPj3C^C z-v!l<@XJW&TU7W{ggn)IAR!!G@|KVh%0g=32p#Hot~OaX?>IHtx;g1KXBt)w9hU`< zb#^hMip)@t$8!>oInz#^&uq~t%q?1J^lDY+zXuWt>)Qjeu3d~JzT9O>ty~ildHRTH z%Db$o>|P}Sk(q+?&@OIy9Xks9n7+L@0&mu1N*xQeXw@* zfT>&ABg`m@J+_!*ZBcATmBVpOPLnM?@JYKz4r(5Uu-(%1 zkwl8r9*>jJUC)CZR-Zw-6|X)c`$8+wn>4 z?Hck4LkB4FJRIB3B`jMCPs(u;GUyTv zbun2XnYK4^TI7YaaPwi7&XTQ!&t9icb?_`{jLOxezmHACNYgA2w*h3hYbYDltG$8w zI|g7O`veE3F73Q?x)+p!iUKlLQFmmW@h8>c5rmD;1cG)bkM?<@3qNLrjYGyxhQ*@b zZ5i5w5sqmyl9*i|T{8n{FiIM_tB!A-!32Gg&P0rTa2wPZCkNzU#^3U3D_vO$0{D6Bd8)p)HrCsDD$IZPq` zNIQ*unB5@)?8cGAIF7aJ7_?@#0S0-}2KLlS?FIssJ4Y*{FTbWU;{9St*l!@tqrM(N zI0R_>5KsZoxfYaGLsa1~l5h#C6*;#yk9U94rY9+~KDI*#YKp}u&5{VbX3^WU9H0OCPhG5Pp@HgSTU4lzuSLGP{kKKc}uBkATc^D3gW{T_j6o? zLBzsMHQK^mdNYFC#6>yqha4o!!8l9^LLw`NsKWA>_bUfr0;63Ob@=zEg)^d2vkDp* zXARC@9f{qlty@c}Ds5}LGVQHhE9DOZuI2gXylGGuBkKfkdQR|D(=T|O+M1>%&j3tJ z)ETcCHD3qJZ#)BJx?EEO%oWCOp#SEPTTRWruCe1aqlKY%Iyj{25GHc1VIRa`ZyL&v zP==SauiGFg0Unt^pbys#M1ncG?W`K|@vWYR zxT={CoiudY`QsM{H)vQ=$kGBpLNcK^d9?ZdI-2x5;7$s$+nbJo)`f z-^rL-Foard078UTXHcx2xqlojof*If0KO@-%Olib!~uH&p7&rv;{~doL8>!m8$W>$ za$ns2fg;f7a{@;!Ur@$hJV?e57s#%S4*-TBFT<+vQ~od4Sv9$+(I6A(5c;Qd~cj`Z1&_xFpE}jc% z1bbxzvze*8rUQ+zAWL&m#n%Pc*r(nb05E_78i#s+mEWP!VS&mo@^ zV1&N>JlqHOydMULrFqdN5iSd0TmT=XnIJTefsss5G^y5U<&y6M5jBJ%cQ82A`GJT9S>Pn`9OCD52-9yp0 zgGiAwx=}|X`3vUHt^#khT7TKfJ>U}v&ByZ}SP_xHCRJi(kb*}}(H(_)8DP)-k`tC) z-|4>7Vq>7jM_FE_&ZSwPRQDi&rywK#E+hD-WiN=}>S;=c4XI*3MhQLm8)KOn2LCdy zI${o(xG^klxV*J^erIx#T^9z>UhN7i0|JXuhBRCd8D6sWz}FQ-?;(^0YA;8W5EvG3 z@6bv+4VLYEV=*P$=b-^wv8=KGW@QVVo2#sKZ9k4P1vY6jL|9MxcwHXMN) zb`+O#Qr9DIsCT-ISat}bB?fq37$K1<78`-$$~aj7=1>{SK>=+I{4k`503IoJPx}2G zBMDKct09uG2UM-W=m*;VkSupfPje%4OM5BfMz;cdRZWe!QN@yWMsNt9nAUd0>QGda zO86Jf0US%&JX=Dc?+N1Y?b5ce5{M+B^ zM&OPdvz+ElS2FZ@bfu3;X&QZP2hLQ7gv^HrPr^Th$H&g{c_?Y?Rx!%+Q;c1z&(;$i zN=^z)xYqze;mLHZh@xeOMjRL-9E$Qg2-^aTS@px=l?b6d2(Ta>nevh`rmciL31#yu z+$?T}N9(cbja=^rS9cRWgjMWKGCJd)(cPRVbM11!I9Q=wGIk$B=i%L#Z-=5i=hAdl zTY`ya1u~Sgi)Mliv0)*5p`7O9s=YhU0R*(i+>Ts9k7gTBgo8>QZM?XtJ(*>lwd3fx zv!?f8S6qG?q!haeAf@H#VFw-e&`TeQS12s&m%c(E%MKk=*f#!qh#MKMM?`A z!u%oas^e?y*0E)AaWT!J={pF!dJQw8AVjnaFjJoe{R)85G1MqEES>u$lmH8z7#^~M zc?X7(^Js^ebHYJql~d|htM@XkD~(;uJh;8x#IOm+aJ(esf=5EMHPUK;>I0y3OA7hS z^9dq9S?T22GCpk^g~p|z2L(AF>dopYHELqceSdZyQ&}#X%%hpg1=~@4^eNdd!(^af zf2p2)YYJ{$xt5}J>>m7ND>VrAknYI`DNE?ZBC*%3%(B&p>b)djwM^_15)x{TK@w&| z?7`bNh7lNW8Z7$A1N;7AJ87++24H)|u&!^-xnzk-MB6A6ByrPQA$VycC%->=z)>X8 z*%7b@i5A(4FkqqvAj=(3p6Imk)iM)wDKLncUBvZ2E9_BUyRIq;0_HYS z5-&BzdZpa{9Be&3t}GD222Na0dY*F{WV|}j4vfqSrqe7f|B88JFGW@36TDjxj4Z9x z&^f_)S=4Fv-`9U?RP!tWfJjSDzVIY>+ak)npQAc9B)X4b?p1jvM3SR1)`PFgSvyhj z#fXlbcGS5vg&61z?2%;TrSV@>5z1F4z|}lFphCz(EIYm5#+`&UkT?zr6{xhD{;Jp* zG=o&rQ_3**wBh`g4~rSlqO*EPk~<$zo}_jz#_o)OXS#lyGSTVy#J33bk-itIWQoMa zK__XFN}=P8h?VMT7K`9vz@)18C1xAGBEZ;#fexnmPH(o{0G63uv z0<5ce%qu}+=hrk^`%RT9@jQhad zIRundlh2qWE#gdL6CLU(v1LM5Hx4{z(O?0GD5Ov`(Q!`0#s;2e4a z>RybBFlVMsY|D?High&>p$2q_oCMCvbT#C-ADi%{_+*am-+4#LBsxapW)P(u=7<5O z9$D0`t@nPbQx>OIo`E+JoK+Q!wYJOWmw25uS|9$kpi>L2xejsgOJzI=FP7ZmjFZnDi^$r+}EJ5B+sanlSS{@OUSo#9$ zx!M7ZfsIa@xe(;O_#tgFBr9kY4Ds{ADn}K2<8iggg|M8FesNM<6iNVYFB$H=dA( z;3<^HhLJ7_)wUAte9lv-3Oy;t|AyOm zMWz(1KP(awF^cVB{}MN9XAL9JCa{(9*DyuHKAK9NOp;U@bm+Y_EYmteOrA$_G59Rh zhe8#X;Ac5I5wsBSwPp&7?bau|{(jlcqFk6Rt7kP#(y&}fG&isf1ChYeTmiOhNqush zn|i21!(9JEf)nidksgVpa-KVGL9{)w)(@f^T-*XhhI0`9alc$r-**mJLHg^{{96VC+bzm~)eH7W@lr80e5ZZ5pEgi?gd@Rre zuV)04O9XKY9vb{bKtJ$X435!FR~QjOn&N#+Qr;nFcNtGC+H~JC_f$_+DF1-af{X;T z%&a0%=|18i%q)pV8=HW$&xGix@gIG4eN5lh&em8rOE?C~?w{r!QnSKtMc#t}vjhRIV6F+DC6`84ES)nlBarBCKuoNag;GIIHnG;^ z_PS6Yn^YyEFCHTO<0JQ;D;d3a$*!5w!RTF4-N#2aceoVX`*5;OM!*Alhn~Dl??o<` z3K)Tr%V9F4j!r>M6}QQ#T(>3f@1fnF*j-rcJ_1fcdL@Ki`ZzkmcTu7&;;)%X;TDp0 z@Brb%aTge7x&svLp6cW1&LQeP3lX=~=->`rT(5)lk#`7)7o`WNdl0l(RvMClJGd`c zToFkDI%3yb#0#8SQ+R=u3jN+v`0U+X%KFRVxY78yCyzsN>G9fSg}L;^=Phg~g&w59 zNFc5TG}_TPjbEn zD)JzTU5I1LVdzBTArJh1=qofAuSZ#E1ZPyabhDwWQV`lh0}P|sJ$aFjyS_PE_Sr8V z)$!|-#TXRPMmIwCC;uo6f}B3S{*J5vyg70Q000no((49LF7`(HuZ|JbuYm>NU`@VE zlGmj>NzX3e=~Hg(*q`2PtZ_6NsY@sQvh5@g^1)d=0O@{GdER_%wNO~9)96h;eLQ3n0em@Wfocf~*vhheQo7Vx~8c9ris zo{z(;Q1}tmiNHYWV<$s25I`mI+hFVsPOaau%|O+A^?%=0D6$pE)Z*3ZSRQS{k!XPs z9z3Z&gy4Q?ga)~;Yw$8V(VJ(GNpX5BabnssX5-yJFtFZz2Zu^589ci8+RfY>nykse zI+e&v2OAxu9x>>?D?s7@M~vd)IXYeM0ED*a={Ov)Llbw0o)3+c*rY;Iwj`+Yd2CWP zZn?62UouuuKE&p>D2tA=?sekWPkkkqTfbV=XuQ{{zm}z6oohvLlb01sr5D{5vga(J zy$EQe7rY47WIRE6u(-omBZW`!Q?_I#0J&qMuyyQ~whS2@l{B>9i#YymY&Bu1qtG%w zpva{%#03uMUe7jaBD1A|2FnmpY!e((fr?1O=Upy#LF3b{O2E@Z=L%s|+tSkmteX9( zT^(#ioUiJL=O+f@wrCj0vg-X*3H@ZF--|J7Pm zklAj5C{5VR;&Kd7h2o1mU*Py3N|G9s_pC=gm0UI^iLbhf6|b=chh91>+fvWREcaBW zYI2<9Io)frZR?Pq5)jo(Hz1{<;BYYGOfLzz%65U6GJMKi|G98HjIwUU0Qe}QcDJ%S zu*1!m(1CKim#$!L;#fMqbO-z?%FuH4f!2UtF$rfDm=!w9Fl_((%Ruc#iQ^cBy(RaB zOaI%y3_Kwh0j(5q+$1`ALubDW?KHN9OOhSZ^d?o zF!Ak{J8Qm%=UmATs2%254ind^CPOz0dmT~%U{3yKVR~?#$t{BSQ`AJ?Tp7f<>S2Yr z9c~)CouP;74qEBweQZO)yqK_@LMjw+Iv`lFcQtLur6n8^&5Y~>C=@xT^41IE8Z`yO zrUo(AXDOnCp<4?A7^fQCo1gfAZFdI4yL}#=)>pQoA)pFWZ%fL%MCl5G-EN|sA+NDR zT>3J>@Bl)tU~Q4hvJRVp7Lr58N0Ovpn;~hEJl1c9!4`Pq$r+3kVH>710J8%E(48R@ z2^so4!>G}4r12R{qP2Inp8-_)s9Os(q|3+(9yAQW1y9M)V3GxUe?$W?2QGD6G=O3R zg}Kp?nnTVIq+ui%{L7LC^NFLK@hN#FAalw;TqF47@^4qFsA{CYv?@sG-P$l z8adQ3lo9?YQiJ(`Y%OeR0ObPsjd96R4S*KO7(AdPUg=U&M_%T?Q66Mhy7&^lZAI>@ z{uOcsTJR?b1`Pi=#V_Y1!<$bN>YX%$6LTq#5i01V_KHv`84 z#sa%-B$k;*x5_5|zq2OU?%kwiXcR<-#+^C1tz$srms;--ay6?TzlO5T5^{B3YN%zy zkS3rRQJn#xCu)u@W z00q`5Sil3XpaKE-1q~jo0}5OM2`1pcg9f321siA(0SDXQf(aCWfCmj6ECmM$6ijdd z1Gs|;9!$^$@C-DFz&?<`BbW+o1SY6JBoGNI009O#a12ZkfeJWiz=3r@1zUj&Jc8iB z0t)<0U?yS*90!Vz5N=glZa z5k)(#)X*GC(KuCA#k|`bV~WsQo9*hs;^0hI)Ig4F+W%-KE{`%X zQxY>X^Fq;#-On((g4lJtu&?GbR#7BGyKcW(tlrW5gPQh)*NcsL&Ge0SFGwR8HzPqzPi6 zVPK&cp3xxohAgWpO|zL4mvYHqq@hGwn>ws>20DzNxVpv1<}%_g$=bNUrmqVlJE|!w zL_&+RBzypZC?q*0RGUI#Fg1i1gJwclgj1%2WyK)O>_#jHM{EX1EXGF0Xk@H(%tqx* z@RT4x#bPj6W`b8$a#bRDRU>63ffz*Q00IbL@jlKJSEbvI*OWRmS_KP6JKU{1Qihco zLpk-!D)kxj3=GNa0V4`jJuI>av_PXI$_f+>G8LI6(o>GIup7DY|4L8XdA2BB5pxk`W?&JRU;Wh}u(o zw6>3k@BM16z}L(5;Pr?y4{DBx!4o>HVk#)f>qaF+Ac-`~(l(ppP6?c_qL&a$CG!a8 z5K>)XCCQ|2>@yWFfn>Ny6vv*NHiRyRAN!A(@T#;ULD7GUy(ASHzOe~ewh~MfGAvrm za_Z(sBw$w3kdOOXjmYJr%uU~&T&O6E$StE2#YsrbhS{)wW>c?sY zDb1|3Lw}fQ)_dclFjkMCMwWQGPu&>il#yfj3XS3{{ye1V`EG4Z+7uBd4wu1X>rln~ zyE`u+QiLN_G!429MW0IMWnDJ0I@w79arootq%~|4`|dS6OK`94tzIiuCqqInwyI`* zc^TX=!K%ve(3~}$k?lT>8_@DgxDLgb`Hd@>Ikw+`>{(KTv{J@OuAH0 zom5X-G(LN^oC34LWu<0uWxZr^$nwYv$w2c%PK-lNbixtLdJ5YZyw9t<&zriF3t4N@ zc52dAXjvw4SZEiDzOswHZfmiK?9ejGruJFLQu|0kmC&eP4) z$-}AL=`;{$2I9N06h8Edkw*v-xU+3SQv@1G2em~9H90b4GZ7KZ@+#t%uHKd|*0L-q z1)}NdwX*89DrzW=CAo|!>m=l)@g&nEy`&nYR8i7jWK?8CEF`Krttv#5aPHLSE_;SY zSa;a>H%NxIj&U6fC5vq>uL$K^s~}etv#_e0=Qn(KP(V`EOi9*l_3hf|eEx z!&Wd1IibH(1L7@iSGQ_cH)3-&dLN>>GBbxVGyCxr^Rq<=T7(V0(;Iwaw{XL7%`n=_ zHm%4u?YmOz+mf9qk!C27o-Ze%4kK2zYj4?Yx z&*Qzw<2|<9q6H8az(aTAlc634eXkuB#UE$0#}tBNomR5GSbGNwFgqNhOk z7x_88BRReEHWUE}K%k(SFesB01|c_dfzW2~(8^x~C3K)@prDBbX8#rc6)9!~KlzhC z5j>gs+ju|mlhLpeQ}*a~ma$67YjctCJP&8##PXy#A}S(pFi3=$8PebrBe)mS7!xK8 zhiN_r1%>tHI3Fh8Enhg&@Tu`>ZW#xkZ1djtUhp1;mA%D1y*(Qn+AGo6V2Le>)l1G) z)wGN_){{_rk~~VHw;>Ocxh)$r&M6NI?@A}{B+nwx9dGbn3h&Nl?&j^5?pp5b?LyqK z+i3`GCv21!_+BTW?4+E8)erDl18 zWhjk-BMtX4l%1nZNnKdes@Uy0B)@y2Jt?ZX9u#q7L=MGUED?96NQW(_KfV308vJgT zDw}jAd9@lGmuPV@bXfk5_=!f|dDC@e4eS*?I7R27^P4MUSfU*pvv{6D^##$KwK9v_ zD$5L;zdR#w$}sVqPDs+*^J0X5%;3s!_Z5`x&I@r?alDzNiqK+O$ZdQ&yMt1PUDk;U zUT86oklC{#Z~T4p(v{hM1BP*W4#!}qkE5*Qbj*QESYpxsf=>ydw1)9`Cr8c*g~HAe zY-2pUEZ2Niuz>#U9w$ScJui!x=>;-xJwAvPfVE-7HUfAN9^`3K zI(FiGkW;eUAR_L1aAhL?I)2x}Zd%AkTF8;Z$wT3Y*}XlO9*x#IU}iXLCKo|o>PPSp z=mH8BE#NgXkO5S9R^p|W- z5|s3(S)G$466tf2c$!25LnrCbcyy8!4Sx(G&#Og7OjC6Xk5(>8MfmZhk;z?UTpoaT zj~t4OnO6jQgkYIul3KKP8DV=WNw%>HkK~&;_68b%YSI9zv!*2{m zrxIqDiAP0$=3&FEIvAwl&mPsl9$sc*4`IUsgHxG{%fD=yuS}~radnYy>TjO4AlbW5 z-9~et{11b=s2gzbhLf5CU7UQ*3NP4j?%Rc^i({R@K=>cgf3wD}WB(VO7!^3$uU4_{ z(*XuR3qc$49eqtRG<-luN-GEwvhKrgcC%I`aF+iu4#OwL;yORWS@|N znp%!bFRy{k@t>Y)Bm{x+NJYv;#((E&K+^pyTe~X$qn-$^N{;+m&W!kvW`2Rz+;kis zw7Gn+>~5wHsZLt6>kxxf2HF?#U&q(s{cN3MaAi@`?_-|WKCz97ZQGgHHYT=h+sVXs zPHa09XM%~XdFOfS-g@qb`)$>(UR`^wReM*h?qC0V4vkE2dox*IIu}t_ zh{X-@hPd;ezLiI&mo4zW{nWKpslry+Y^! z+>$ZM1c>%~9sU5u=2zY>A;(+!*LbV6*#mEC^+0UOYY;~Wzg|vVvE8Y;C{AWY(-&u19O}Ng3>U!vF!*Or*9YAIF4FecI569K>uhTh$rczhVmEF-xm9A5D7YnY_XI zUf&&A2QPB!mRyO|GBD3`+zI0wBKnOd(K$*2>~Mafk&mlJQCk@s%g0y%d-W_t88hx| zR^q8Z#tsWFkV>+5g+Yd4yeC;#Duyepc%GAZ8q7pN8pZ1k>*t^ZH_S7kREaf6r3Y)d z>RF-gg==j8z~R~9DE@he8_(L9n^NJseFl_F9Dw5c@3ECQNprW$vjsL3lb$3d)%bQ9 zv)oT#z9f%#k8C?8^M$T6E4R)Yzo2=FR8~fwdUs&e0}YTvKgP+D|weu1XYw82&I3e@|YlC6z|jtQsz7m zr*w+li3WCc&#Di)h!m*z&CZzPN~H8>9oHqKn*|P}GsmxmpGZ5aVQR7Q3IFIl)LO(D z8eTacC>Jr2Wu{q3w_i=Rh`bAfv4nn}A{Jsw;g^!?p^Tr+p{=GNn;P;j^W%;ehr)Y1 zmej{tb|+(?UGgjYppfuz;TO`626Qlhw=|GGAxT37uBaZVwVOnVO41(@7@JXyKBIsp zeq0Vmia&dsYes(_>u+Pe6QCvAsw<>tURIhg#08c@4IB zpa4V%_#Q15*|~OOw?g8QsLE@{N_4pTr8gUZy|h0ZX+UuunfW1zCeJ#P%A=;V2_#w& ziW~{&7B9;lCORe}LsXsa3F^HdkhWnESf7-ZE@$&#Ez{&?*8a=qvr7iu7N7_duLH{~ zVb7xN!lwK%@FI;#`&*TN(fnO; zmr)QpeQ#bpJgrP)}O^nWfO0`y%K1C{j>`3>tC8(GFvpqe`w~Q z5vX}D@|v=)RUrHGwZ-vr~J__p*dHfxnr~&jc zBTkKUN2H>PXD%AfT(N$%pt>thW*RMbsWee$$L9fawXeg{zS&jqXUPGv2YBecw=M|h zvDK`z$|815nN$laoqRG*!TV^JZ^@ZGB>v=z@1ayhiR*+a^_L=o5U-4#n+M;R=iBM% zJNSgktbAA~Z@J-$3XgT-^@3t_tcafOnUP08lIGX}j;gvqlN3H-SP%ef&3*NqWX_zOks?29X*G~RXuggw{E zl%_ff{gt;zZpcYWslYXLO5r0Eb0vp-b_5@X$C9tyCP1TlX%?$S5JLjWZ&MfUrN{&P zbC{Qemawp;j<4?iveCTW2yJYC@R@g`!A5EmnABEtXrnfnNwaAFsb7ZYr52 z^CaL?3FNIT3@)ey-dA?*m9Qm~O$AXAmZR(ltrMGFH*V}P61%E)Z=4w z@l&PeGmf-J1Y#`}ac)U2J(Q%|>QI$5L=NChu>I{Y&W;gnkp^D`X06a7hZKQ5|8_XX zr)6Rv-9)?fS6BZ{WaHjh;ssm?>t9?6jmdU3%Om1^3D*g1C4!`JR~nle(@lwRi*N55-{Va87B7DS!GlH%D+$ZHvS;n8w!z{5J>WW2DL*u|^>p7aNNrl4bxmXf=$6ND( zB#Mk{jLu;AzrjK=V-@3V1`I%+UNBzEEXbBcyHrgV8Gon+>ebZ0!b}w(KJcR7Lt2>= zuGy-o!IKI9kuv!kT&HTv9HJ&2rl=zwE50b2bet%IjP3UAo#Uq*SGs@p#WcFoEdS z53|hD*@~l5=Sz~RM?5yF#t?DaVcAp8UwRo(OzN(FFjZBJq8Z?~1L)}9i3X89mtufb zxpv=(6UrDUDFrH}q?yz8{yc=r6ORSdS@6;UM%m&k02c;4X(lJ~^ml)N4z-?uYiM z3_a4F09h4_i00h-UTRmJU4VluCw#m#Ro+mh>6>^4rfQgK4Zxl#Tj8ZcR6*}Cdx`8gen314&J{X zqJYaCi_#OXkmbDtD}wf1o`!AGMb9cXEyXz% zY7uXgTej8fuIh-{8zlR^#1?(PJ76}|h%G)znsb%*CxS~$iyk{DO?70Tm zW|DF)2oATRZrI3M`Yz8UDWEK3i1?541y*Inu75L9;w#n<7z`-BH}D7F$K^IIoA#8$1h<(#>_+uyIm99TX>)*j>Pf=% zj?Y%l2gVSkWInX9#H6bX1C15+t*J&>C(78HibK{XZnx5X@vkFGPg;_Y7m2B5GAYGS zkmWXdM-%9&ajg^oolzKk6L034=qJ-@ zHt2*&^1NXi``#`I6v3z(2p&?jes3a#hvg_LcjeF;V#PSqH$Vz?@$ULoIZn=O@(l`Z zvMjgg>2b0WMj^3o$}|95$P?hXT#jexNW-K_1X^9$6{&R)5?m;z74^}T9I4bt-o-dv z<-3}H3B>8%mF8osNaRUv3d9C6w~P}Ly>R#het=#_PCB4aeQG|z^M37sh8yCO`rf*& zE(hr}Xz`sm%rIifnS-0#(g)&aQt1)N7)4->!&AY`oJGYjFK!;dgBj;z^1_Q1c4P2% z33U0wIK1!AT13?EfTQ1PB_>KbRxWRiadedQ#TPDbXCy6LsR<dxS8IxO6KHU z+b|;@#Qhm+(OK99spFsmXkp{BrcpSU6;FZB6W%NRy>r}m5|MD_Q6+RB3jiGA5*GX28Dg(?pG9zxb8DY)BLl%Y_Sx) zW9n6I@nMT2J%fWuBG8gw3PZ+iU;&xNn%inag-g=~0n+^dzXh{8NDJZbAXui#wc6=2=GwGSo&s61mDtb+o?19Wr*&m@aYk6*=e~W_)=- z{E0;^owR7`|97n;(rwZsuDt!IX^@id{d*~y%F+qgy2I$k&;51v8CM{`I{?o!)$sv-Ea zsQKomy;pIu>PCq=e>0Q*)NblNVLax(hc5s@Choy%+It1|U!G>>;3M^yb`=7$N8?TF z&IqaI(>DwJLH(W?5%96X=@Q7yYzn|IC4aq!VnSJQ=I=Olpu{-P58?(lfiK$FuMeGZkDq8tmPj zDlNcFh3-O$v}3ShEC^s3$6Uz~aAEz)P?dTA;Pi{hi;XV%oF8(u{1YPUJ$OxRy3w7& zqH>!&iPZvJK9VIBQeTc&B$OtO|qJ(#1^T*GkhoN5kXXs!y!a;)!Lvtp%#ulYRV2WC}^Z zx|#(kdg^i?NC`NddM>)DVrf+T8P+B6ekCk+a82%VJkYFMCDRm{S*TnwMD7@$C5hwq zk4Yo&gjLU?q=*IWfC(RU39hzf7)#Ni9I4_7ykJ7{L?c?d;robuG00`5juYuvceeWr z;BUHef5qk=6+b$n2xz-;hA*LXhPdDJpg78t7;2mb%SRkc@;w9es71 zMbN|ik`xL$`JP5rwjXyrN2S%aF{rdtdlbh;*HmTLeQL^SNg`ovb`QlW2<8`C=;o&vMbvSqh)Rzxg%1 zcz2#|_8=4vrOgA4!OR{H;6qwpB};>LA(?SsW;2->sQ-+X3J=}KYo9i0Hq(J6#49>4 z{(*DeYQ5ED=*eLJz-DmGi;1%1?87YdJO{Q#GG;u`2Z*#gWOwL~=6S+>fOOByOsOl$w-{PnR zHnO#t@xVmcEQ0(cWB-Nsl2kD|N`@&LLPGZ^GP6e5+FQ+FJ;m_~!a)X~{l6QlNANG+5*g8#N5yWXM=M47jxM&6onL0dX(-a&F_lnY11set zhLwuB?Z)g1<)o_w^t$fq{lsmz45EsRdZZKOcN{7>D{#SuRysx=S2(Iz^Hx*V#XH_= zsa&s?mV}<`jqpDMrg0?^hG4&+H3gepI76zYh{Z*63tHDV!rxG>e9xZATLd!7t^ImK zV)b#XIQUHaHO*-5y9p!n;+RGcW%Q=_SNBgopiGaH-sT z6YaBtC^_rTpu9-Alx7l*qkF_=W@w9MXGSs>e{vf3)TnNaVgy|NJ zx0$?{Xfb;VlZZ)T-D=C|YEYnI{ycrGK@#cbq{vKP`c|C93hpGksYz+PMl|B;kC0|= z20;#f$spz3AaFvDXl}2r8^R*_s#w`<#fnHghgZ^N)mvHziK9w5)M7(IwJat{uUveC zj1Y~$Hq1mU2-`83KOsvpmJTFzfY{|ZV|!}~T@0(m<+vTLB&rJhVk+@ZSO$ahp(F0Z z7`G6lPF4J{Xk|xP`E(SRnPNfpTp-s*0Zr|0+iAX|9YJz949S0mMu!NOr=@@#r zK_E1<9&oZEvtv6^@HAzNzw$tv!KLQE(ayMH(^uiB!Le0Bp=~WcztKwM9kX^?eJtm# zFD9BbQV6SIuKPKYHB75|0OqgoFj8f85a=CX^io|BgalW7?SGX>;kQn8`WY4y5JA2i(G$g1` zno&NZ%>NVD{^bY>fk={0b;4ntPw_B4c#V?!>Ss_jj`oBIgKi35CA(e(gRI=;FO*j? z;|TLD2q5~mi)b2tnRQ|HF&-0hKZefvsbc;sgH^e$v$-JgmVr>+1i|SlK)mFK#y|Go z9sdYT1fWF6c9+LAi8@}WtEL)dglWZ{0DknUSQzWt>Yi5y^TaEc0eazE>-_Fz*$P zF5iyCR%ufmQgOr70QVLpkn4jGr-t{0X|hzuYAJD3tGFdtL=cZKdM`7r1!<(yf~!}6k*7K@+5vy8UM`{?p!}gqQ&qAQ$nTU^T`GPR=$3I~1K?V< zvk##NzRJa9TY;Dxv^tBf0$d9hAJVe9^Kf=GtNiASnp_)QLeYm1R3QyBML=7|NSES`L1xB#HQVplbs&?05=aS*2IXk@4L3ZpD8(C<*J9HLh~l@s7x2d0Z`Xv zq}eXS6mBrJks?iizMBwkKU_~mST1)XtE?y!1Q{)nE}l9LTFZB4-5_`9o>$(ZzTfKPgu;!3@juY=jV|lm``l%^$BQMZLdXlMP^xNss_+HnOVrEO@HAA~Q+q+D#eh z>E&*aH?U$Zlpg<4vnKV%1HQU0DDZPmSG)%*C08#uwI zSNp8Y`dJlC;GIzE^%52E5d%w!%OH+Bb;DdtIt3G2EMu|kV%SE;R%!Tv5rs;AoJpGa28xmk6c@d$hS!y#Y?BP*GgvP{He?DoLHERPAy@HM2o^ z;&OwQl)nt>UB_on9cD$mczS=xxfsKtqRCtT>Yy}U0826CdO_QS@De-Emw3`D6^;5- zSe#*da_g8&AfBr#_OxtbpNd;m>8#=mh341&<$Xh)LUyXk8Ty~tzTD(#N1W_X(jZjj zH=&qOLRUH2M>1%o87qEYZ=r9yIA!#bbyvNRd;n~hkZ@vKkUY!z{otBdSqO#5C|Po(F0KCSPj1ZJ3fp<6OZwM+5=VB~rj?U(27o92 zI~C(*x2)mump;P1sVSI8mrfzA1PyBcEu|lwdcx4Fv;~`bH1!X~9N2=~V&}$rQk!xx zP>}m0v<&9OQYt?TiI5Zz{(1Y79VeI~Eai44%&4AIR*K0Po3*QifGtBwgb5-^u$CXL zF_@>tViAQ$^>m@}7mL^C9@B?*O(|BtJU8sy>(VX-*&7sFAU>+X{K-Hy4Y(nj8d)qV zPCnvTg@!1+Kv6>IcvbU&B|r?I_}WfpwYL9Q7oQGc9xenU9Ye|5!<&%xQoND!mjmCs zaT-~+e4}-mjSIQJr>>?Dk#{nh9FsB+Z9y7qHoprX=|UY~fcNWC-!+k-PFgp%bMt^c zhb6|}FA?LimlchrGgSS#$cq$A&r9Xapw{ad4I5%DOXa3ESL3wS zR5lJVDNco78qL&ohzv`Xn8~-l{v=mATcuq47m~SQ2p^7gX1ZFWeJ)7nOHHj^XMaky zrT&oKJnpEm#lv}Na^*anwgOxhnE2O@dnm%R&s-vZ`w3vYACIHKyT_JWZym96~)K z;btTOyAOfzPizMcLQAMOd?KKELn$D2$9itl9Huj#%h$aduUA@C$=W*hoSa#uF{$J%saQgaol%_5 zr%T_c4!?dxE0P@JS5Z{WL6);ucw~A6xi$Y{maDp@4!29JL!MrFogG(#Mz(pD&eWnW zdEBLUHo4p&TEfRog+skf0Arnet%tcq+XwU-)5d;kz|}ayFeevg7VK*lMr#mO>kSw2nPo+RB zQ7s-XFFhGqPPfdNzOnWBYU0Yeqz{7`Q9{oQky~@Rvb7nAn6`NyH7lF%RhSJW&GsYj zj>v1RX5AP&r$*;wSr~5$FNKvCA92G#fr5&hyjy`xH?NeV`nSemT9ud2aIEffXLfah zfxEQEw`SxDrncaMH%`}ZO#AwHUUGHIdg8^J`y`1qXSHV-IihdT_2wMxC-M4+u8TSC z;R#syWkrib72$512?%+mR&>;a^jOK(rF`2ZPj=a#mi*iWyi<4<7E@WqmLV2wX0A1j znv-~ZB3s>o0Qv{RRtEgQ& z8!m(O8f(0l@;VCC-&_z|!6XxQy%Y4CIu&R1jeiooeO}cVG={gm&PUhE5-*)sg?b5e z?cNj|=5VVYx(_|Gd4c+MYBV>kaTL{-vVHwJB$?Q4I~DrkMa689H!LJmrt8nE9VF@y zH#*dQhQqms+NHV~myUIKR7GjfthCWlYS3m^<0dC%vt3IqywPIoD7tvh(B$tI>bd&zSqOZ8pt~?2tW#5iz>uz%(uud zofQTh@pzP5>=xH#ovB;l76aVrLa4R5Gjn*T^2Xf5A2J?vQle$z4$!Lv zD5NwvBo}*ZPiA54OS#@sk~Sqa73mkzs-&C4yu`#n6t(*z{5^ae0ffZyJ-0lI(746749xif0U%|&nPa4R@N$+8YXmJcr=+t)o#7yvXS;MS`)Qz45RpC>two8 z*|e(U#gPw@Qv5ARtk6HE-4H8SFyP@4U6GxgLK7P}qzkf21_${W5KfFt${aWagE`>p zCVhIvpM5e?Fp^WqIxP{<1>+cZL*sudK6mfjOFe%IRb0OJJszHJ%zWqDNC8|t^3nM@ z{EnJC#PCb!373#V(M{K>yMO1u;Hc+!0lA)xN%7tLxLonf@qFby+pi(aT>atYWAvQK zZ_b*qK zlnhj*H!q(gTKJ?%Ku=hto+S8@jgfcmM%6q~o;}J@&W@gvrJ@>FE2&LhB2JH;BlVn6(~W;H*ItUsXpRqHd!IEQkE0qe1y2QkpKcJb(P=bG=&Z|NaW zLhlzY4QnoI0cvpR#otf#i)K)Rz(EB>f(66kw}?=4&j*5oAmK2XSTJBz2Blfh0VY;f z9l|iEG%VnvFxbL@!a>joxc^%5fdK$UMZplfMM!KNU=Yxl(uFsHuX z&@{q@0}x?haLU5_BB8LrNFX=>4i=40{{}b|f`BIs2L=XXBm%{vT`3F!i6@;o5DbkQ zXcQER0PCe}ybl4d4saEqSA<0-hhR`mt*jXaAyt#{ZnRvmxS0C=ElWGM*~LjN7^nR`jQ#x z7XlTK6)SNRUhxIY%ssLPjNB-JHd$macF*2E)#(N{1E=h(ntAjOJKo zXmW{4ymY9Pe60?@rC95C3(nv5V=h4GJD`1aC?83xqE#@rpkq&S?P|3ZJxmx|-8{Ex zwA-x4HeR_ZCD(B!x6F{6>&I)Afry{_EtZ5zL5LACew(R0TO1g&+ChL3C@WXttb-0> zi4A5M$2CO-!k`NOFWm@agIo$M1hX!9g`%821Mg-BQMPia(#(fDDVntf;}@lUu|%U`?8I;O^uwc%4JRZm7hxo*wF^F!5@e`C+aP0aj64oe5FR zxHo;pC0_V(z}Zv>uX=BQMY*ijP0+sEn16|wDv_Lf^I*lsq&q~%#b3!;;ma~?Q({f# zUC(xt=i!67GprY%kGu@vI-B5#u?ooRCG zFZi;FcjO3|%Zba?1G3c#L>2;5V6rlxN0~RJy&}L8H!KcF0 zhXfd$lMb<@N~df2ZPgnsCVI}d{(ROyA?v6+U}UB)=@DA`V2oP7X!e7bO*f=kliUUb z;;XcM2S((ptH4eSy=0Rl7;#MvuNtb8!Rs$dz@Cu^=Q3MXBSRu}AXodKrvDwO z_=ic~Vbi`;n0VVF4deNHJ&>@xw0Vs4c2;w#pSh8Qxp-c$&{0EDp1N;n|GUzxdlD%o zOU)OfX8E6XtbnEoOF;O3leKNb?Z~Vy;ZAUo8p?6#c!|A8_AYGQw!CB5926>+|5l)z zaS|y-V2K&5NH+b`qOpu>y77n8x=hMkFArgY^TH6?z$F^p5~cCr)V6}PH#J2VNWk(%Ry;w+!E^i$N6npuvB52Np(oA2Y5MK} z4(yQsPfrct11e6v<}5IL)N9Fx`5GWFtqxyOTOhOI%*1O=4Gursx9VJ=^v(g9* zE+&dAYbE{t`j_>aSbvNJ>6W0sdJ%KiFt7ulUNQg%dZ&)?Y#5Xmu&8P`dI5?{sRk6*<;)<|w4(P7Jl zs;s+3H2@y1uLxMDsrWj{F~x(ZE~mc<=L9+CP=bg8EaTgI(2gZO=(E#h3OfMjuPPE~ z8&(~6WRHU~IZUyhb&@*7KT2^u)xHV&84}vTe-oh(2T`+~)8MTucqm$6%ZfzP;(G*b zv2f&Gw`LYkN)xlX_&RY8p~0bVrQH)t&pv95A=)givzzRdI~<{^+j?=t&NtH4mdC<9 zV=8vnHYc=&wd3~=rji58-x&~X#X;!cs`la19M|~M9x0x{)e&Xyc6?(_al3cqvVHO7 zP7^owht;pG(K5#vX5#7NcOhn%V!#WH8spB`HWJ z)_cY0>V&Q+MC=^sK7jj7#l#mM#$Xog$X}U9A72&~*XElav|H%i732@SEzEcw;S$t%@glkZA^b4?TlXN>`j=jJrHQkI;hw>Z{A8M)h~xo;b<>p zVHfR~z;&G$(Z>K$_PauNjay)KO*nRBoBxC+1o-}(nz@3E;<>s)n4esPpr?4?Xmq9v zR7Se{F_9t#wW?itMBIF~nfsR;_b%Jm%h&FriP{;)Z#~9`WBiWn)A$!N%oaf^YVdCd zH?ObIp))|2ypXxos@`c++;fCG#RG*A6}=b>d6>vL%}94do!oZ#3Hej+X zO-Z9%dEb5D3|nbnz&)?sZDY}NQwDqGvp?5(^ciDLeg4Phy!+==?1Mk|j6-YkD^wFV zr=J5h)*nA^Y0us6B19Bs?|$Z*!t>-VWCDdk%{cSIhV;fYd8I_#}k$smWwg00F|E}OCMRh@ku zzM6CcpXNj5VM6WlF?!pro&!QHtb1&2eMN6VQ@5x76qUI3FuyH|x<)*_e-ofSLOJ5q z;N;THn_RRf$au*jyIB@>;eETF-3bx=;-be%=?wlox@C^JlQLE4j(4~#Lb$?(@pu1A zIH+$vO3@`?no#4FWQ-vat4Jwk(E5naIOKnFu!(QQ6t7*4>*}V6#kOa;ivmZ;>A&$C~DujT0gf%IZf&cUku#E^{6B z;NkZ042xw2;^IdX&esM@7wK*4)+O5$r)+`=be+ur5;}F9spR>NFw!`@oXaOp0C^kI zAj26}ff&avGnN4ea(5(7Wjr>}mkD%oo;c#EzP2c5wc2>)%?fgJ?{_kM(GBqNpLu}; zslCXxT2ZDG)P3Ux^j|b&*j^iK(9_fW7jz3Yc=sXB8?&2Us?yu6>iNZOkof-Xd`DLR z9|XvLxp0nB-KTBO_P}z729tdZwY=6i0yMvK_b+uDlRtooPHbw# zLposq>ZG&{V@ENC_DU`VgVF=y(WvNhEhFfmZ5<$j*~pqmuUzv=_>I~~UMFj&>y^EN z!@=zrhY{Np5|)@ls5q{)@GOUydk0)(URJg09?kBo#rs%0kljbAlveh2LvQl$9lrGF zCiYT0cX>PrL5rardr zP;R{oHtqYH`VLkrVw{-jsZ7R9R7;~ik_efu*T2?F!S7Txiuxj7%*?d>Uobg4 zurhBsLpneE^m<3?E7#Hgqz1z!X>5{$F*V$h=H82>*jKO66RDA8NP~7bcuzgJ;Zw)H zRm%^*qWsN8Q#k_TlOkDTazj4FE@qb_azcuSEjZ05nzW+z7iSvI8_L_e(*5D@d|c_v zCxGX}*^Y@~e<3V=o{oAl#H#7upehrRlT3JnhZi|;a-&s|#3yfkc?yk2Py#b|fmh76 zPxSp35d9q(#rM?6`g#no;5@rms(HqU^xhh zyl-Y0%DEKz;Xd!e#T=Qq`{1y&PEu}O$P{T!WnZ>N*y%1F0&v-k zDn{)OX7*csi6*!3-Qd0+scTQ)fpy@#B6wYU@%0y1=Q@5?$D!>vt0#lB5Hopl=Q75m z;k*k_U2v$fb>a>B5r^MTN;bM1mq|ehXt8C0@%}?~+9-S(@;Af&G(?{NJ7s&-dhGJN zB@9HHJG5T>zOl|EoWx|~8PI47Vqn6u|0J^g$nw*QjkRlDKp`ZA75)YRr}UMSkar16 zL$sstg3!`KCrB`R@{u=@D%m-@5(;J1cF1HWAi_*)?>#9Cy{~qE*H%j`xmP;+>QYiZ z%B`Ftd?r0$(W8@@Z5q|#JEa6LNt&fJYm2&oigWU&3akj^Ub`p|mV4F(UM9+BMLXM| zU`c?pDrRwsBIU?~WtzHQ)j?2?KTZNu9X^vH5%y%WZ;FFP9MCW~jCBtUU)OVGVMj={ zT*w#_uA(B-0ncotUBZF##cpHE2F46JSD4zzdRxJfRrRpl zxsk5k(-E)-w63N<50C7akXm^(+qiLr?+Gbhqt{B2K0LcRmU)eO+ez0oOVbL!-ti)A zU&fyIfZ6wS5tmu;DU3ei{VS?K{)ATkfmG9*ra?x!{lu766}j{KOT-v{@*|f(sS>=E zkhf+&@aYHdb}XT;arfwlG>CXW=3MLE8Zo?pbA@jCwoo_hZThQ~bF8`XEZc(7U2BrP zxVR)uI}J;kKLo^&*5qmiad%b+n5y_To${0lv*~6SP9}Rz2602hgO+(R3;fxHon|}@ z8#P--x`|T?9pnWQG>z=#LwK_)z>^j)BSI`7%GmWDgG-iljk>r7L6B#zNnzkGBP%Ur z9Qz?|dsZ2Vu}dIeuQZIF0pI@&hlc8vh(?9^cB5(Ip}B8-nL~Q1Clf-1WAMk10~|Dk zO|&^RhR(jqE(0S2HLv>_kt580IG}|?<`v%8@G7*48;SI<9mVde?9vLQpwRDfDuA67 z;~86VcE0Ko-`}+?KKerW=SJgdwfwD!*b=MTHflDDKVEJa8Tf@CU+SqLWY2E%q3Rfr zvLnC`7skBl7+vPZia|nZ2nU#(Mb-!B%pOmK#s|!Po++ZX2abXC!L=f>^G57L%J}Z_ za_Ks+faC$EWTf4f&(Q#T#AH>XzvmkmKAv&ckRIZ11vBV+0tJ_J9ZAqUti~q^Zl%EYlIqSGkM8QI+>WB-9=wO@;ncpEC$rd; zT)Y@Z>5kxq1A~A6&t(7BIo(R`LA;~FJEdX0a;SGU@1zDWER&C|oz|zbAD6~d2 zm&8~kC=zeXQ#|;?27P>t7E84teh0g=B}pT`KH}E_m=5vXOVaN}HNrI|Ng$=*skM$D zQfdNVWV<;{Z+E^34!U{Is_s8c>XB`-`hKX>C#NF< z`n)D~pJ1=FrP^UhCV4gXEsvw#HO<0US>Fn;QIF5zV@tQK_$GEtBr$_-8ilG>x(rqZ z3t>p>;hiPqYt*@Qz?w$G?;^7|sZ%F0$9W%Fgz$GRohppM=cQOUMxiZ?nGYQ%HKa0_ zOdvMyM`xUk{YHtSN(h66_#?@wStJP{S3HYnXEcn$^WlC|m66PTP8_BLncANm<2QvU zVme{?7x1lA18mcQve3cp)DTi!tAF_5`&PVy%5q_R9x{5G#_2({o< zlpP|l3@j48vT#{xYMtFE;=!OGv;t?+LS+j3{ZA;0!!92dNs*S|+RBW+Hk(t8-!L}V z`PxYdR&cw2^G+86il&LOXi{AQ`v&0#@yY7;SEV?B#!h&d+dfPx1l+-pO!${FUy#)s z@wbP?V!gmzX1Z%=7bCw2&gnX(P3F1V^!@O?SZo-OEOppV9~Mb8HOo2!6B~KspMIA!t!09eUXhU-+Xo5;7o~F-6z=mgYaCC?_|uwgI*}z@ zf%46sIJzx*)tJr+A0@S*h}e7;Jf|Ovkqemg%8#?yf2+h|OZofRwApXL%2q7BGB~%% z+>ZPue(qyVd~|E54;m6a?5~E#ely6`-N5ydr&(P~lG67Oc}ldtr^C`75_WRZ*)qVk$ zkr7OUh$GVeL%z9T4hCp|k!{!fac=u}-KCJ0Sd}{P#};^Zx)`K%>9BdOLm&=)u&zwY5PHcUZO9 zG-_seOk$lb1_RiPPIn)0@PeXLQXmcUt#=zgfPRA)c3yy_ZsYm+z)wM z7H^_64FIXr^KuhBhQuY~n(RLg80uBC4h1+OTyQZrvjHj7Ba(r4H^HTP5tBK7%d=+? z(FNLd;GY&ag*|v1gyL@TBZp7`78&@S2%H8mg~QbajxnOIFd353|CUy>9{}rGk1nw} zzJM=aSOV>}AxXx0VuTHmX*6N~R(N=H-(lZvYe?hy-SW0}PF=fLb@X`_a9f2HfaOO_ zAIO~5f@~o<0q23SM{F2)`s8qIxAN>?f4NJp`RNQXZ=;W>f3W@&y1dtkf8a~LKBj*( z{-6NlrRrr7>jPaHvLlZAMW*OtC=vzz;~vHF(c@!A>rlxY2Uu znoxcw^4O86W|PSFBAM7cfuv*?;q?r%(GOg*>yIYq?=_>8N1CO5pvy+jQtDD(n0rh6 zfX?$9A~CoxmnF^NHdyn9&CM1Es*0_@UZLf2aQ~7}RL@t@R1*ndHO&ighWoYQ%03%j zZo$KnKw1Z#e8w$mdWExx+w!=R^jEf+;r7@uMjS?mtrjFhA#F(F$W&ub!xHaU0nqZC z5o;Ld#EDGgAil0R(SePk3WComv!9!xAUXkvyY3k!NvdD*H;4~9FfEC;$t|e(gF$sv zd2L(RG?rDd5*uH?s!>f5DIV2mw~UhVWT2Cg=FTxR@5McPxjja=c7Osr&^eUg#UFM} z4U2gQVjjFkn|qsyf)mI9<|&Co4_A9Nt3^SL!NarsId#-zK#}_qK!I0`pntfO6Wm2p z8=CfSRmDXt;q3=%2+~>e2RbQS3+S)Uh6Ynk3TiXrLYKf~)O*M_Zp+=lG+Bn7v7gLT z&M?Yt;_PpDIOG&o^FFA3NC5zSmZ8FPAFHeGC-1R_GhdND-gQ7X?h?Xz+O*zQNcnfx z`k5`Mk1uZuHBE|lg1SyDwkaiN?oLuR{Y-DZuVA1AY;+H6{jz}bAQr@w%}X+754LkA zeL$!=guA7x4GFRr{OJR_lB}5zd%=HD5|}_9Zf=vT%Gjt{{@i(TIGeFN-=Gt==qQ)0 zUMTB8_q5t>qgSrh0{L?;paORKaqVCzRr;|H|C>UMZmtoqdU0w)1;Hz<>^|C={1=;R zgw1TTw#%WE2-z1oK#oiimm-7|OeMj3?kbiaqBtktjDTn+sNTi<*$+ub+}icI01AdJ z2{N?&AVd#c4;NmQ(;LsN;=9dRi+NYsO(DZe!CV~sCHKI%P$bln^P;PG#3Ui-YcnD! z;0|;x++<)+N;z{*anBmxe4;TTGI}kT9NZwPf{$3?X2 zs|&p{J!Uq7M=#VqdH$|dbh;Eg@Yq|}nR6(EXmm>Y57r^%uT`#Z1f*dgRwcKY9PG13 zL5Ztox)4PfS!N}jz){K!wj%`uzN^j+lmu?NWS#4V02y)AyVT=YAKEZ5IU&NKXjJc4aYm(VpF+WTDe8%Pf7WC^Y9TbV{AWpsFM(x|g2cox$k z4l<8YoYY}F+vNs6`0YJy1<}e+;2h;|C>y{GLS|8DE%{vSgFB-FR5XUZ(K>G2+=FTO za=+9kffZf%(!d507}Q+?WDuR|I?VlEB~85~ws?tM)2$nswNMe3 z9s>^@)Osb8LHJURK`1w?^?&pV3{@?35Rh3{OH=qRfLjwQ?L!0dN8zPKX85xU&#THjV&?jm{| zit`Rf&6y)h2)YPRPjOfIt{#{5CRuS<j zCaWEu*PTTNNlnv4ZDTOkw#cxyR?G5fQe^=s-0r}GG=PjK9L)NcSNtGasT_MWwE26GjRf^nZ{Kg;HQMQ0tSfDbr zqgtf^qltzl4s6XFXJU+HA+()$OGXTl;elaKzKCen$_hHxU#jG@^C?9#oDHy-EYFg|gAu6DLdj0^qrkbgoctiV*Fh3RU2EI*;(4fJ5SL zx5FxFGiAl90zT$_B6G_pF=LttiESoeXl-AW!x0Z;bqJcSML#Jyk25tsc%9cZ706o< z1X7=%Y1PkqOo?+jG9kXnXvj|-6C4D|@Hp!>;Q;V>&506B402)P_W}ekM4^!o2L5Kx_Kn%_5yQ3+g&+O_eo;}-xsVcXdFaJ$0k=_K& z5(zj%f_ys{CmN6#@Htv}6N*CHU6WQ%76T~`ADP78Aef)kIl`k9A*}?s63*qEm&N~# zlV!Ko5|MNW0(~-S_{02|sz)-agZsV9!U>uD$XV=Ti&GgpwKTCxcR~=^7(X*$AXNZ& zPWTa$s7x(dvY<(2zk0Oi0$eePfjm}oHHPT_p-NsE!OaLVQQjHxdLS*shp(MIizR3s z!{W_~3E_|;4)jh0!Ed*x3JUY7TM%EnlfHPNP5@B~hkFaLgq2@r7B>9$ydSzUDA(X3 zOXF52sVEYnqL@|kD+H=U#QLZJNJ@%`(dsysGH1o(7#aB;mF^~0nlxcj#!%eA6_rNn z7A3G3#x>ifnVP)29BOUs?KG{lo%bg!&s6~dIn~bIm_A0jaUSd zZTQ0UbVc^ET{6e5G$ZWlYV1(!(iME^Xc|R*pdEeI$4_CBj>}+zN+; zMj9(p`Bx>aH252 zqM-Dj20F1jp)=I>>K>jL4*ts!p0Vb^lCy8+`1ZskQDhc*BipO|xpi)jM*Ml1n)vQW zgIl<|+fUTbz+MDSb!-;P0Z_u$6fa5Q*0O?g_6=t+JvC8>{%RAUEf}3C`1r^OmO}>ee zA}jVduAvn6RC<5y!okq=JHS4r)tdia;~6+>5G9Se;;Ax?=5@^R&s5_nJh)enUE>op zs&ox%m9BdL(&xX>KM8wq8 zLfabXuujpot6^VAc+TX&f?w+n&G6LF^3r98Ii%~3E^WXpt217YY;})TzwXm+hPSgd z+3j;N+u{v}sW!bqeMPBMsdNKn6*||3ExoO-$+sluB0U$$hI?pOWV2nv z{%$ZI_4%jMZjZ|7_0zJqPV27HyK>dmqS~NNE4-77%3fCWhqAuxiZB7+hcq6^w_|T5BWts%z5^ zXeC`cb#tAk#WEajt3s944z`zQhW6%=W#1vU6zTX>U0grcj&`pr9gxa{sugivNO(T$ z=xbL@vpyXIhq30d6_1s!T_>FC>`^fvnG;)$3&Y z^sSXO7w-ZLG@t;&u62n@sqTvElvY%O5!H}T6+}pgkPsmu$KyjLVrUt>8b;1)JEzHI zv&*=--xKOyZf+51=Gbb64J$lyb$k@dD4S+NC53jVnY(ce3L$QcAw+eUA-ua=gB6#2 zJDgW*bJe-Jnq=G(qT`E#AHvX*GB&4tB!cklL@UKjB{?$ln=C?=h(%4Czg5*XSJ5@R zu&^FLpeH62o`r+~9+dH+t6GBK6)c&SXbcfYjEAWWQIe;Kk)*_tZO;uFWt1ajN~NV% zCu#>7Fpy*u7+|#lGofIrR!#eUDq==ZgRK{j_u{R1n3Vu*ASO7YlsrasTq)}!mW-~1 zkQjGfPNh(pJv5@!7;3X&MOsV=1xmazQ%^W>{Xw`*fCOX%1|o=HR}mnH4McGb*4Ggp z4h}>G2{u# zGS5dRb&|iO0RueHBm?0wzV<#WCqkc)ELy+d6V|0F`^&i8!axp)a>Iw4o_JpK0L{gZuAxz!{OIrMKN*N`M&d(~@ZUsqlnzZ^;@V~r~Lq>** zLP@zE(fbX!XSRA}R-B12>%;~fJLc$`DJE0YS(#%R z<&lwBDm$sN3Pwex133cz^n9sLFGLvN!J>`4yUcG7k1QG1nJq^lu80VgGZn)IqQJ9N zty1Z&r~2u2>-x1h?xma+I>a}6cGw()W}FeVI$21RB6P-*@RT)kJ;zGNBmL8APY>S# z^L+M%0{QGe$25jMTBkwv4F#(PMYs{4xd?SQ83HfA9xW zdP2e0wMn{T=uwkn2xhS<##eHji91M>6x{6UJsA9!`Uw-^RfuputD$kg87IMv}!JuX+1_Ggwjh6l*=|xoa6^z?z!Io5_dd`xRooYa09FlVXTX(Z~6 zj}CQhl2DEnalkBLpru@Z9sOadC-FRU-ez<;Ktn@lWRc2&!TlIGx6)w-c*6|hF3TSQ ziTmczqoZx5^j6UvpDD6Ov4UN4-9vKEpAoRIFBDF@neU`W;owl<;(A6NUpfU1}pfy6#FB$*rO2rlncSIY?+hX zA#r}P028_wihmP|(Blz|(w0a46z^kXhH6-x2+^5YyY+s_zXfbr+^v!)6Cr<7`c9Tl zk6ky2<0LZ6p0-;JZ?~yFy_3kPB-~^IoUPrI6w2u3U)%yUT#G!!c|Rc}J!;F$C!UC9 zbg=e+8btL|4=1mj0a~Do_ipj-;w+PYq;U^Q>g`8KYOl>3yc2*tJPb zsxmjuNc}wP=uC!iNFw3CmdFR453y8~zEyJo{0>DShz56s%kSt6bY!=U$s zbQgF}Xf6}N)#RaqCZv#^fK8w_6!_#vI&$a;atAg6X9*`-8>+HH#vCQflYss>Up1eB zU{d0@xuFcAK6U*A(Qsuxp-|%qoAC1* z1mQeH6IbQ8;tW)T?QWmIJ}IERV=?-cs-gFlgK@3?iIS3294C<;FqM;3p<<811=5Sw z_!^VegtKQota#5I;7PCn(!gpeBC08T`NS%KUW{mva75$M0AH;#xs2%j0RUUICJHOb z5k?iQIK0*U0n{R9+!U8qKe>{yfx?gls}rwa!Q@>AVHd$Z0I!X?Nju?c8e{W10z)7h zSfka5<;eZp#zdS{p+u9-#@x|Gwu6qqkM;b(fHbg5Dhbtad^3eo7a3-xdou&na|K5o#eag3 zw(ns^)ruX&B@`bE?9-qVfz;{Ro7m%Ob{#huzOs< z)9m5TNgi&i)@Y;QYjUB$_=b8=HXV~#Y{;=h87$|s1f0{2y%ss`R<-J0}4|o&DxiTxKp`oCPx&g0; zDaXbN1_S(!I8;}JQ9T(^sMgm~Mlz2!EKR1nC8>YdlNd>w^|D1}obA)Z3m6}az>6*+V{%{SiT*RgRXkwN~a!LdRzI z?v*2~U~@5m4TeqziH*t8Y+HfjRb=l}k}#mqu89;LK4~6JV>a6KV^qZ#!834rW8y97 z^Fw-0(0iJD1n!%J7GJl_O8%kZJR8*GY#l!gS6Ez3LZ+G>#Hk`?+UB6CJ8`D8N0LU3 z0ue0A3wug6izY&z22arMDPKqZxCLqbrkkN0CmPL+;Jl#`yD!O8`P2LG!VR0qLI*)vHbH z}AUigfGV?A*vv#Y$msO$^@*13preBjfAG;ets&4t%JVKXSOi znAg#-oL664d87gkguq}P29MqzK8^3bx3Kg;6eLXj(95CFL+UP$PbZ}Z@Jm#`MxI9u z+Pagc&<6Dt1zD%L)e8L)GBZc57a*YFz?B2liNXmunjr8`SH5-%Cg1|XXCG|W&}{bv zh5@#81`$^fdx)26iyx5aM>%?!huZ{(5~7X-H6ibBu5i%t{wgm0J}vwWE*n!izN8K* zN*=se%|C2yq(UD4WKLVLfu7Ucej7i#tBYJnq@-PTtuR=}9ur+`Q2_0j zoR|6!Xl>Cya0Y`kyNZCusluBcTFnOdACQBLlftpiS|T$o^Hk~ofMDPCy?*O05Ii}U z_4u@q02!=gfhb?MKdN$f zplAWw7-!-$knS3vB`fKt8pxg{yT@2N7x!5KhFS32^`j$je#RD@*@Z zg4r)+U=i2Tbr{;(`)a_JXl)8W5g?siD+aDSO`RHTdWU)Ns(yjk29Pc_{22H=vRocl zDBgO1-BdKfV+*uezhOgxJHw9y6MyR!fj82ItLNa^Ak}@jDR<<16rc>?zbl>t zp($IEIFwPkL#+(uWYr@iE<(4*kAVr}usDIlrN98f03lC>*VUQnsh6%t%(-C7K=5MV z%chaew?<&a(pSTvIR6Tv!wIsl$ZZBMSfR3WqcEf9|qB@FtB z6^D!qjSxjJL1H?!fVZlKHd@9y1dc-53uNE`i6cgdr3TP!j)Pkkpt(l70%w#~reQdc zfMDrj`*j8TsNLLv>n_`g+PVuRz;N>U6ThuC0zGhE_u7#nC@ly(8NjE~c*0 zNfMC-5apDkc~zL6b^Qq`fS_`hu}boe^@}l|5*9rt zB5jCnx_kzl%NUK{5P0$z69-G7XZvFavSDHpaCL}qqoTGVbkzZv8h~>^3(f>tJ5EUT zLMGKN4hKGXtV5R_K)|Kp8!ZACs!oSPM}{kkK)v$}d~l(KVq95H$1L|;MY71EBC(wO zI(apHO_~%$17iBPLdb=sa5OWQ!^N$*k2)9bh4`}QGWMKMsleZVS&0)_OG8N*5YJz9 zoBNlj2?&2kQl#ZsL$J6ozL1tUc;h4*I9STEydjtce+O(z7f~v9;NxeBiy_k_slZ+^ z-{dgqAmWgTGjuW_6=88$`9>Iq* z9<4x^Zfwg4-YvR<*MjO#CUS6@&l&>LfP+P7uOA}91<=O|xY5?uSPcum$8MJpOz61=LD}NPp`~oZd35xaxj%wy5$ZzI(;weOncQQ;f zQhE`VW{P)|_!vPo7bT?&+~f)KDrG-I39n+m;7!C%o;!fY7XwPZwnS<3s)gg5AZe^l z8WsdsPpZ=^*eC4_`i-x}ys9$*93Om!rvDF0ej51JnPM=Y1j2BS=P!XKG2irgnGvmS zf~0UYbZdN~0^y1L((*N{%KaB~s!2$l(SMLZP}f*ewl4Y=>vn9Dg7b(Z`H`D*&;55~ zR4{4y3WcLx3P(}`mwGDh6Rf)H^5s%O&c2WNy-vD${>N(Wp6%s*f^PhVLwv6dyWtsm zbbJt+&uJHI`{&ihof|xq1m0na;!IG8Km>>{%*u^ZpC|o{-D?0tH2THfcNqV~C!&Ll*w1EErFLAyjUUp7hH zmVGAHgQieXP%uCZB#Sz*W^K`QgnQ=H^x3c&`VWJ|Gdva4c`$s(O!OfgtT~_(BC7rO zkAq9mwg$?V@f|$%hz?wj;99#^5@`&sG@_c&;=0XdHAbE zY7jT1>1zM38tjj<+TWwKdTBs6R)DJVY5`doJ9))G_{1NQEvUzvA76-d?^Z2qXzFI zLNV57i*~J{8eKTZ2{`<@5mdmAup!;tOT+`e(4r|=6~+OARYR+ss%k-vvyeRt4LctO z{9(BL?ziD)F6?y4dn{ygC6zeiaUcsjAeMh<;UtTb4_stm>-jC;^0WI$guOubWO5&| zm}zj34wig9&nptEOE{5ZijD+<4+)zrWg+#$vxO4`D;h5mO5SFY-AuXaa~EC%7RKCm zz402trw1O2n!zjLy+1-XGU)ISn5<#wU{Du6gRrM_msZbJlBT6Kk)zG$PMP`%r)^(Fyrhw6xvAi=B zl*g9Vp+X)X69bY2_HG`FztcLGeAfQ*1CaVyD3C?*55`5jsCCe|ekyce)H+)Rl%m^e ziK5mytZcl@a*j)n8%54<2GmVrq7CGod&2hyD23(X!RNePG^Tz%*YKyoNd(G(8bbT^ zkSO8y8>Ref#JcG&QXC)~pqE*#9To@AzpOiW;Lw(Gu=qN8!)(!Nyn7q8kk!Xo%W5*K z=^`TQYsbau-gXZQR?K*C@oZHTT(IGvdS z9afi9bZwClF*$uZO@aC-OkLN$&8q@afNKpP0$i@(RPhuOh?`p|1;Q6udG-SnT~kw6 zZi^@}t!{%$?cjmMOEsJ!csGaB0!*wRznXI7>0C zYjh%7YQQV7cGKy`c)Jp;VDo-GcqqIsM2QHNwGVe@6?w-qVv}N>9Zm51KG#;IL4@s$ z0L20blS0fDA(%9pS`5`eDfc4AVH!$(Dfk5f6gE?figH4dbIZ62(&bc#v6F*a5>C)2 zp0dW$k{U;2nS&A`9?;w*!8MGyMfp*!DB8pXrhHCUz(Km|&~*K+-74p8DI zSMlprR=qX_`;*v9qCF3aG`&xv2ULT?nZYNqCz0!n4r#QWL_LtPW&VuZ-u@9BYzBkuZW775_3h?epw39>oMtsNtU`~ z+oPr!JBVO%z0>mI0BJL2<~nTCqX#h;7hXx-!WB!$@oklXkFM8PW+puz>WEve&T}6{ z>|cF1yJ>NyJR{Tz20tL#0~&uZ%_G3lbsMYuQh&o zmNHbv6WN+3f`3zm&ENtxxUQur6`DsX4ny3|X|#t1?)#Fxh0+|NuF%hd5OxkR9B+;` zE@f~P+_`JedSgx`Q2ObBIgrXuW@%v;tKa+01?f@F6K9Y-DHYwKwmT^_B4OByedS5p zlpXOBb19)NqGUp-q-8qrp+dpwgWx#nv*iqBHp6tg9!GR2w)q}{;bc>Yg5)av;wwqNc2z`R%VBw|4%z&U6t^x4N7H?AM2_%X} za|c1K4o86^GB*WD$*eGx1M(|x0%T76<2PkuH9qh^gZ zuWhd9PH6shm#mNZ=I5YZia^PW`qvKWC%@$&}2?4M&>2j zTlCKu!*Ayt{{{977;rvl(R?>&1RkPhC`w+*49VbNY%Xg+ke7uq!R}oz{#lnXjE{Y1 z(p*6Tb?k%dEAUc5|yJ5mNu;|t(r^L zByGkHUnya29GilrHpsEF{v{gq@>E0fL`6l@L@O^UbzWFS<&Q)55=MW75El{ZHTY}g zt9$Y8&>f8m#h^*z)EWH^D8Pw6pfFLG}Q~?A6m_?IV8Fi(D zxF{q;B$7PxR}0r4`Is0YIP1}cMb-t!j&Z$rB66bNl~M7K2-eU6fe|GZ5fKf_E~zl3 zIwNnci($d*MHU4z#R^WD_Hd5El6?zg)@y+OOI2@*6K%2G8n!0KWMO|@RPYXu9@ zbgc!a`N`Pc(H14b0wSh`FFD1y%sZK(!!n_P8Ht63SeQ;y*hX*0&Pf#IR~$YzPNJBF zia=A}F*xq3MYVr5n`|5-GbWNZAjlHR#GuSE3o5AzDhbM_3H)9Tn89pG!^QBD=NEDZ zc7Jp0fRuAY!(l8*l|n^9c^9OLT1wgz$$f4#FKojB$?Z@mP*^Kdbm{B;Nt+^WlOh$P z)FM;CsOn&jDl>C3hwIVyLKC~8S_}UF8wI1pyRz{RLf*Kr#Uv^yn#TVI6KyVoDg~9A z1I?5~v|0=D@nwMlQDr@#V2DWXP!@L;x`Z0uI8)7{Hol&z^@}nfP)WgIA`YgCFb4!W zIGP_VKYC^xf;YQd;dm;eP>7}#ZWC*D;pV8&S*q}*VBmlt3-JbcX_qOdQzsa34T&H6 zSqb+j+VC%OJ;?gobSE_%gU4#tqPk#s=-1jDJ+sgBeke3JkpSi)Kdx)O2NP%e{$r_ zL|-+p_Y{&%1bUYppZ5r*zo0H*dLpgEi30+}Q!O}6O#o;$_2dDVJ@*!!ag&3-*DTAy;s)6Ad)kNxmK$XTQyfKGeoKC#l6Jn!S zJNEL%K6-oowM^p>uMM5mBG*)bPzI%BQcYbAjHXb<0MwdNfG1jXJ_9=XObGoXifiq% z#iQsdR4z9n_O!YFQN}Rtw$RYCfvrI zyD1pO@_Z*}ayF7uV4g-RNh#CJbr&UWL;gumdM`aEew=Rt26V(;I_tQ;XLq7+N1G*2mt zOhbv(-*uw@u|sv&Y8cDRymWeZ& zIFcQ$Z{imAMAwr!xE^z~Q}kBVqZS(O4`EY zuA|4}P{{3eST~mKeMy{i65|(>7uFoc7@lZeqM$~|U4+;<`c9a1Mh?m1u4Cfwiy>Cb zyyk+29?2}IilrKa{5;BPMDtKY@$T?YE>A+yrt4|+{J5?jf9EXctb^9Qp8V}m3nj3d zOzJa$&NwTZvf| zmxurb5Ma5H!+`;I0RTuqfw08rlBa4qX@~vYTwCwn0sY=%rxCf!&UZ9Su-;|YDxDED z711y=t$}JBnE*&%18~4)2@Eh*Gt~@LlSU_YmcZX2|T-(HN%@tUA>oUY#|tgH0_3_jl}SJZ zNU;CwjysTtR6=2#RWqZxuws+&qriEKWWl6<>t^*P1ew9)g&5wA(G2vg29+|EC_zC% zHy1;`cg2BgtUAfWlfRKVBn0QakQhWlKzGNo&)_Lnv$#+*F~kz1FeN0!8DQCfV2F?e z1Lhl~gE(fephrIE4C5e2Mut~m=gZ0FH0Qoa1tYAOh>0PFiiO#LAZtJ%VH#Ff6hm+2ZA zxP+>Y$>#U;+CwsB|~YG#>*kjY>`a2W7I@au&-BbdbXvvn9oG`-ViPfQE;ub$Zy z`>;^SAO{3vDOM__YHDSgHyd48_}x6t?ZM-)@irZkkZC2J&^L(#O7b%Yh)JvyX;EcH z000000002#BLo2e1OfqrB3YCa1aXp6L=P7^s$2fzYSU$A?8NahU~i0S3CBr!wG!$z zK=6riFS3}nZ3lo&+(vQN{X0saq{_6*fM@TeS%-D%YTY$xVi3t^)@}p#u7EJT?m2pGr$$xR}OQyI1hW%lDOsCMG@ZinY zB8z&ZpoQV!HM((ach1_$jp?XEP*q#Oa%PpYxn=E-zOa%>2#AMEtk}*y^p#@XG|b#o zu`=QC8OchV#fn-CVmiDFlIR?byAV>X6OO2Arb}3n6Rw=ZibV7;LYXjy`Vs~Tgz{G) z4zw~n40K2_3tgBAMw7)icYSS149O@FLXV~h99`9eA^PlEbm~V{)=yw(PoHxU+#J?m zI(A*iM{||$vCAl+1)E$m7@=h0ya3dH9;tshHP9j!W=n`0t1X#2w!nifg~MPl$N?Hs zI3aZ)v6@H9iORD8R^)WTw|oE<~+IM z^nx_E@B0co(iL@XO2xng=7#8N|479wDca>dbORIbkg48gflVldlgf8fQ{24je)zh6 zY4hNZ!B4#o#Kc=omxAd7JTQ1=G}WjpZwq{`%_>s^@Jn>O5g-9&4W#Ff6f!&&Vmqc5 z9obD2BcuR1HvqXBGFZDsH!ssZ*Dr`j~tXS(meCj|IZNi;{JoW z2bxFyk6qMzY`js^2txESCJt-}aOB|jk0^u2Q8|ty&_xV&=z^hXNH20ttl+RY7V48r zt70N>cU=>ueo1h*qicsh&J9h7(y-&S3b^bbx`w{xW(LWG93q+EM~dh%JA|F|;uZ*- zB`rsGX#m@Kj*DnScP7yxGQ|2okUWGQ$F zJ)17YvOAw9)q5=Wly%lLJ=?kqxP^n%%s?bqQE>?8iLQv*bpk0<@g%Z6*u1AQZut0Z zG}Ltk0cIr4_q*(Ary$7a;3)mT-+c@T3PMGWrue=>z^-_FrI?pyxm@H|AQ(<_OWGd8 zkmDBNVOGTZ6#!L0e@&Kt96AfKoiSv5qUNqGo+@eC$$))vLNL+u>BG^m2eo8{H%maH zEXlH{zAhOOUB1@TgqcD74y<)z5L!Rs)1)zz!-9MSUpb;I!Un$YNESxW3iuAF+0do@ zFKkvpTC`P2?_;z^H-slMQDi!^sRcUVNb5ooN|MEo+_}8j`|H#0PXhkg|2g|C<6(>%$RHWAY9}qyf-_-~dss z_>KyznbMC9hZ_PAZg_4Y&<*0rNCR8l#PiFs+R+L+(ED=tM_48(@{1ruvzOf$;TXWr zBv|qn#>bimfHLo$Oj`F?0?>IuCoQRav> zt&m_t{ud@V1nu(+BF4|T0seW?7ax8!VyF?rjtlaIFThn6{)zWO8mNJNN5l!bG+c?Y zTMlZrOv}V#nX^trijZLO)eTpAfhveItroG*%6v@i$FR45ss|Z#I#@py3Q-KUv1}#o z`(83fa(Wgemog{@t^!$C{$Udn2Hw!)pirv@TjSRF6T%oS zq*`RiSV#Tf)d=%=fLVOl+YpSD9|1zo!6dL>*lpY3*dgd#4^#xKz?Va~@O# zy4D-S$<8+^=+s>w$We58CBZ$z3A0TbSkfR739BzyW3-C_G^(FL!(K(6Hhu@vw{R1b zzHZqX$q58qsnn3WJ3K%>T0LIfTKf0nmSn~Bsg$9#p=u!UOE3Wc-G`{TE91(l$vg-L zr=fuyCL*t%KFS6qUc7Sz54=hqFnB0HYtup#r|WotT|-6?bG=%uEEH z2W}Gtebu}~%OUMVl#Bea;dDB-l^TD{4*tg!?}^_GcV?)Q;pd1Ce@dmhwi|fe`_#Gd zr}&LfceDBKS-an!S(9C6$~%z8&rZf5TH}`6%N&%Ro_ppm_*$WIgpDKeWYm6nTO1%( z@`5YMK*WIn>~RCPj_Odu*ZCa;>ZRy|7C&Hy-m*O z7ThgPb4LiBfLAOkiI<7d^V>DF^Ao0---SfL=b(^_TeZxsvInW&%ukrYZpkSon)UR380o&Y?HSpmFgWii zDZ1FVCNKv=O1lR68xfK4+Xeg#mO>O64qIp$E7%O`&BfRl*BLeUD%+KZbiLM_PV|egpuYWD6J^i>G}RS2@V0VVKMnlyL`Pq z6D6kc=&Mb%A%LY8{|fDH^HFrg9KwRzO2e((BUZsd;5U^se!aB6l4AqR>LM%T5zdu8 zhBSwyRXM!N@s?eoV)43E=QSlcT+|>dGKpS;J@CoG#v&lDpx~dIiDewT_!-TaZG;whlO_l%`1an)#OUaP?AJkg4sCa@{6l<_t@Z!|EH)q;q#RI>$VpnZ;BHc0 zQq_$vb96(2L!Td@kdLKII)0jZ)r3%yd@? z0^C3b5Ljp|9|aLtM|YCEBnf!njAPz2p*8@PDYKVos!5UJCNw9j^`DR=TUrkQhyv^K zN*#jrVBu0m;VPO(QXV(Mlh;ZC;xA~z%+L@T5;e4ROGs!gANc7Nqw$zG(tVqVGy)aW z75eppZbPU@L@E%*8JNP_ssUiO^RK3uPE}*Go@bGDP*`B*te~prNY5J^?#Gb@nkB6V zSKJq~s5dqD?eJ%X2C_|{vQ_n)E10+_W(6p*1rL z#^~MyH{w9b8+0+k1;9XUQ~;KfQ`FO7_eWMI9$?PUCcDjFf~`4ENL`5&0@UDRR<0p! z-8Lu=oWZfJxdG>srx!aJGzzKw|0>f4T=3l5xHH)hL5TZu#u~HN>^$e93)VIrL*+*& z<~AT@4}=o(c}>KEJ2Mk_Np?3_kYWZU&{QvTfKDdW_Ii94qiqfm)~b0Rb}2>j$3wWk zZYs1P2Bm*XN+cHj@7NVWC-|`g{9iTu?%rNbybaj$pH;;H;8qZZ8kQn$6>+)+#LKVD ziCw>3(d_p|lNTKBgjFZ85U8k_Y|Ic2>5}EZFUN}_zry=XhMsClLR>^KqkgZ05{l{E z3QH7=4d|CeFooxcQQ>V8x&@$-7FrgH$2nGr#pZ}b{7R8tsT2SzbXf7Taa(0@<)4j1 zf@?<}kXl8cWB#i$JqWbbH$Hm`qb{e8b30xI5h7`z?%YGs*svKdCI;hfbV5>DzcTaRK!16jZDuOCl*x%EOpW(dLy= z8-$nbL^p!+l7e0B*GQVL5InCO1gdJ);6wX0$UA1-!F#)KcW1uj0qx%ceuMTYph8&{~Wk-MM!GHeq}xF>+L+a)46aKShKS zQcCO3OP16b7?3fKf7DEjTzo%RUvHv%XXt@@zMPUC1@{0*ahU7yQ$y=!5KMm(6WTY6 zE9EsnZZ?t)3%>#QjOUp!a!#^jZB?o{-nN+ymH?drXiKW}cSA?*SfN#grXoOHfl^{D zEqI!~n9I??-Y+drHQm~F!^(c@oHG9Ab^kCuC)*=xg)qy}J#1a-&Z~(coDCWYm5|VIB?&k-EsgD>)I}9+GZ=kf&F-IE zX2*%Yk{<_SqoJDti{wzzY7AFMUG=9NA zaFhnhMpE9oMD$fjKj?RrQgUwIaVKfmqrcD@d)de? zBvWl`fPhvK^iFSVEeN%tQ|au$o?WCVz~xi3OLg#uR>@VL1nN^ zAd~t0G#6*-QXpf^SpLAAqiRe00!!not-)^9)1hU@+z&Dt7}1yH`T1gyQAk_~ppaZ= z^Ne%*30uk%eatKh#2eNnn$$sd^QMlSjoRk!m(B}1*ZL?KKpW-k^upjK2hEA!pf~d6WDpnM_Ta{I#XVrC*Q{}`$R9mRlELMH0 zv6>c}8?afq=S*aBsVBoJ6XQ~cQtnY2ct8@-#GFXF0ktb^0m9ED!=0=+xO}--oe_!z zdbC~j!s^ZsZXl@vz9YBmVKtILe;>A1s1j5pTA@jxJ@ii?hZR)bpir(;#n!%vEO5pN zbT^lMswN&m9rexA?vUrEbZ7_?mRHx0m`+E$2U`e*$E>lc`$Sa#u|u`Ih4322tH=W0 z!L*gicZW|pppq78vv)c3KkZlvrM&W91Edt#uS(Hdjq>nnomU&-3TldN5gHEMqjsT7{YjZ?J&U6T|EavK5WRP&&Wz${3GQ(A<};a0Aesk>`% zQG?)9UN;zz?&hOkv!TTd`MO}s+Cd0^!d5UQIyAu>f3IqqCTYgGq3m)McbLj78ZoO5 zP>mbeVm>QwD^v6QEKIHtjVHewv+B-W4MN$UM9Mre8U>?X#bwAMt#4=O(p=F1Q19`g zkdkO*u5ekU)KcaQg=1_TqI?&Gb@X+B!6>pI6Bcog5*CixkrpyUH{q z6DOx|5#a?>$x3ZaW&>N~aBsn=`mgYqv1{0%ER5~@p^S}p$?skkhN=3XPre)=Y&|sR(i-%SIayD z2susGhF!N|M{b_2^=_7-O6|AnHcb$6ewjWwg#PJ^8zpdmHRgbS3I-bR;xW|qV*6r< zA^?EPsx}SwGF%KuQAKCIN8PfcNo^N!yD%2wLZ)nq zcRW@QaA^x=YAlSH`PCpFl~a{RI$x00-T~5P*M{-n7EqK-9nXG)BkjR!j3AFG0}%TO zWe@d{j)phg463EVHLwW$xS(0NHG62}E|5<&Qj6I}k0=vGwXgbA(7vO?Z5$J)ta@T= z7sV}1Kiud(j>l#j6&I$jIZYV9f=xc>;4P3fEMd+`v!`cT@f9z014B1#=@-2bupv2K zmL-n^sW8jAA{NeIuX_}7seC;{0Gb%62pfv_@gp_}Bi+T*AZkRSgnv2Uy=3WxZ;lhXJ%Sr?n7#okK-jOl2(LU3v=3)7UyV z4lwptU^@g}gTzE?2Z{A);c!F->S@%(47*G~bbu&CN8kkeUQ{^ta$pr7ag$*M2lkv3 z&BvQ!XqunJ2vE`Di7<+IZH6CZVGDRkGLsBp!3bSJm|jS;(k>^lUkN)10vl!jdGN3( zB-&w>S|g^^upb9yinwcQ$oe(S!C6Dlrm?J!EcBts#vwu3J|VRcubxp1R1-7oJB;^> zM#6@wGa5{Ow6@^Yd?uLhr17c*6-9Xkw$+}Jiz!*#SFv$FHQ%VEKE5U`U*xt?vQYOU z#km!cJX5yRWI76M%QOEZLH=vWGOCm$ds|Rg>ia|$#l+wP5_a3DoWxm7F3tDGHr}ro z9%%*&FPovq0g6PYyeI~I(iMN{MVX(4IqO+B9Y6#EY` zSi;k_Sh(%JR9rm8cQYVoLd-Kl2c-)IWK5B@a0OG`01*^20UFuh9Vh^E!eRjNGEj$L z>jnT~_x?>Ng9eqmsso30DJ~*KiPvm+0II60#0cd9!U4_!#;d|2Jw@oLp|Vg|ST}y7 z@D)+}RX`LFp@R(~gb;zD;b8*;2#ByGw?WEMEEPP%S}i$8tduiZzWCxxVnnbD9nHj& zVj_zq$@6a%b{f&y561$L#HNcaa!9A_Me;@6PLfePrX*Ko4g?no%^0Z#qST!hr9Z5S z$iD;PRZ1!>{IOqh$t63tM(?tWUkT#AB7s%9w6likjAdL7(OtI{T3Oi=nMrkdt=oM6 zY7sJ6KqwX(EHp$=Bn)v$vQJT(95FIT(LymHChjuFD04I;h|m{HJC5WMITk}RG|f!& z)DO#s<~LZ+t6udI%V^rMZVAO5+R)rwr*$?)S7SDGJ;c!P;DGXA;czJQ6%YW;9p&XR zxm=DxME~&O3Q@A$$pSeRMl^RqkO;v-7wa3QDuXJigot$&mWW!Xq(Ur0@)e1M;>f3j z!pxY-lBsE!;Z!<}y9kdD@!%^Sj&p1sQ|$ph<{uP*Z~z7ucr34K!ZI+k9C5ND)r>6b zXZ+^xdAA*d1H#OHcV?Td>~49#=alSTIjx`pPF+*iw65zslaic5O^VPZwWOsITa=j{ zUl#P_a#6;~Z-qT(SrqCR<+w*{MMRB+GdTSX@t{B)6p}nWf5@2p6z{(#6m&tv@2%k- zDW*%`bXeE7GUKq$hdD*hq3Q|TaheZtE^l}DG?2)6d9K%UzRpSzo4a0k!%Hc2(iYD_bs_+(ofS3PS!@nzhl*|7{? zS_d(hfDBAwy<-ud$$VOGJIh$EEYr>M66@+2WkNA^+cTec#%=5Ahh~JPY+aaOh+|d( zg9HW%3=$Y5Fi4;?&RIOB*^P-lv&p6>%|%4S!A!`Cvcq8la~@|E8J|!%3teY;@^%O7 z$X9c|OD{b-vs<8Uc%>fxE}`(%FYo6<*UOc*7*o_7tPq$RHOlf>S@j8poz)mxTDDrP zI;^8`nxbk!(3(Pe>#U6b5S3N7Tv&!5vYQ=iC(9~c2Rf!~1{jg$cd_@3Z?uH-=1~AN6o( z$T+`w&UiyK#>e{F+>Gp&b3Sq>ZQvD&EXrmTS|S&~1j7n9SvWaJHc2@%Xf%T$KoA`n zQ4Sj2kthdEIFwVZ8=3i>(|^MZv58HhOebDC&6r829ah>QrqS|Qm~ZJtMspDB=0B*w z{rA{}!Y;TQ{0vf{a3O*R4U%wYNqurTM_%%m0Xg)aDGZLCX z_Zu1$ijO<^JoM9y8e=&~#re)YN<1UJIA^sW0fmMJr*UWph6XG!{t5?Yq#Uz)ooY@~ zRuB`KM1K8GD185~3B@UHTtr0P?|!=9W>kmv+uRKeZ~skUvmbwd9qU>7aAZ(G1PcTx z5;Z?tMUhfvSrnrQ_QJ87{p#aF!$Q~T zST>`LHrmA`%ruuxKROO!T>x=#V4-1YU{EB`)#a39Zc$tda#(M{BW=c}9CDWZ^qd?V`grV< z7eo}VH_YA)#Nq-*DqgWY6>Cbp%?GdMH@{aN@91YMKX*RgBTlmtv6Pk#h;2*-uztp* zk)HI^mCX!9OSCcCoei~u>|iFrO%0;@?4pP*>|$|yxJTSn+)4LPY`Gpv=JgQkb#YB0drwhnAETuXrOTqKTxqqeiPb}? zUA2Z#Zz{EUq@`2Zev~4}RQa@$Dw}3aZ~2n=l^{o=(4trqWu8<~#(j$@Ro#p^&0I!;X%$wEmLF_J8f zUM&zivC+p_)j~5i<1NP^(mIq5Qqo%5o8GRAE_xt$M}3IA3nhyOVWQAd11fls0fbUv z@lYXQ(BJ{3!hwMTfT@}2JOn_*&&u*O&9tpTk@zfw1O^Fqn7+EhL?gOr!or#wrt|Yt zr_X6KD9^65>vHal?+qRcF|pJhRxhEj8XB+A=sUAqS(#8^4*Eo8MOAf$Cn`K;TUD+n z*}$^;RcDx4_6V0GtV^V{qrAXyfg#hfgNMqX5kMUryi%dA&Hd1hT1I14WR_+#I9X#^ zSso15X3&aUuGe0&6Aj)b+kA`Ed_UNmanl?ee2X0T6F`AMfv`9*B#c6VP#_El1j0Zt z7!U>oLt#)b38l6)u@ zqDZKf%zs=L{ra7T>a#Wi_r7w#~I@?xK zDsKXCE(s4`EN|b9j`lY#qJ?zJJRF?Hvj`4@SkIcLF}aS-=V%u{vZG9d(F1lLg7m-a z${S|OBNu1vzpRV}EY*RbNgD?u(VYX!YUd8pgy_`!#S`v7r&|X2<1j!z*)n_S25Znm z2o?-5|60TGr*~}!a|$2!>ZT7&KWbmcQ|3%@2F3ZBVOShjt{AEe$uE2VRBM+y0qvw* zeGjBuvpc3`yw)iREr9m(kyICbu&hy3TjtJaYfFGrA=c-fq6IpPV@{*ErqP5pL3Gd< zv$9?edc*H?6^-cFl0z?qQ_*RqNsHJZ{m|M;`$w?H*I0~Z-pwbHvbh);yE0}xUrTXO zWJr_IFZB77+r*N~mT;4_&P5%lYD`jE$*`7}$3^5gxdBtKmNS2!i1@6bE~=#0bb(f9 z8K(PDv8j)~CWDkfA%oJobdcaA!?J!xUjrjqq<*eNYZ2%NDh;6Gr)X zuS2A`u;ilVNfK|ah2^WC5njGHNl=b!M)W?;N8APV++RjPn!os%(12=qR^oiVe=*~t z%D&AHj)F9zZ<3unJf^4|tZ~GU>c`&}O?Wyh{pMb`NW6?X&nNzkZ6x^q$5VPD#`ivk zbE8L*Y&APz;&tE~&dUa(nH!0Uw>8CYWP|TC8dIzRjs9i9Ynj7&rC(VyM9%{~^@8|l z{n_CE)#qGklVmB)mmZ->8}YqZRyjJREt5Gw=4`}rfW1Jtb10s8p-{)xf5E8?cJhV^*{MvqtrKjrXEh=BS zdb>1%>~^A#C}k&X<(q5A5^q4n7}YUn}Yvx zb-HQX;~8zKU^czWm2-i|X7JV(tztGpA2#Z=AN~9nO@B60+DR-nmo2RjU;%*Ff^twP z0nj-Pn?r6~$nYCcE_8MFm5agp%;pPa4#Oi0E1c7u#WDO}(TSxhqFYk2!s*sojaRw~ zLSPx#1k%Re7Hz73l50QFU|oZ2h}J~IwWEgwgK}V|?_pUm{vSY$7cyW_QE>xAxC9F~ z2XxD`ZU7?e{s$nE13-WXM~9}I`{xH7!uQP zSI{8S{0!bh8YOyi(^c+M7VRV0&^ipRLj?-s{jjYJ4?%Zl2dcqAZAFuvp?tpWNK%71a(^n9ezr%oSKCX7X9*=Wb#PdBW2>U^Y%%V|&c(kC?6UwmkZ z{X#!igd86AtbyBKD%FT!gE>eNm(s*@qoPVny4M8wMSWvOzFkLr zKFNeRrCYhbBw|(PV{~AJcqzN{H)k^Ds#~e!CHiaMV|_fSz-)I0f)~a*PCM%B16e?0 z^;z&kx zVhx-w$Pw)6?)ZeVqxpRy#ah6d*`vO^h^P)U5~={d&5$6qp85z<`|<1D!T+wnIe2?6 z?aFrRB1j!6Qdh2SHBg}qg#owzp%0>>83kzYIhY(|vJeWTqUW8Ru3oqN1v^7sJ6K)c zYkKp2;cifvq(D|0EbNJtuuFYnia8nBRus49R4v%p$da^?2)j>`cn2@izk&zf4+xl^ zkvr!N)2!uU>50!4ubc?a{Y<)+twh+ zQ|BcMK|}%R_GL~^wnrN)tjVhp(6mPBamM#E$W$=*U5KL-|9aNY0W4)2zKCZ%CpQ;I z6Q7U-mi0-#$Ld)?9%_U6qq152^y{j72QL5#)Ee=URqJSkfw*W8%x8k<;-_#9JQo}t z3h^Ofca?KppiCuZZpdQNQ;?1MiTw#Rpjd!wYX8r)&ZU4_+J41Pt{?UU?eeM-xl3v z$EWAh_ho}yzn_=S1W^gm=biU~H-VWtXz=({I~2h&UQgv_=s20%!BFgtgcF$79^C^16>mL~+o>KxwU$pvAr+;oRcVB4WR~!|B zOf5j9?Q_oU%U*s@C3oWk^Fn4j%;l$2i4hVh?ygz1k8uwi+XLW+$`+LqXY8ip)p3ad z)WQ$^&%&4#rSNkc;H}c~=P-zeJn?kzCe#LJiU2Y1rMHSh6aI%Q01HA}OZ>POTRNgo zdeSY6pxZ4{=(D2wet0>zj?_8ZY11DL~H3{ zyEiZ36rLP*?i`6OPXSqMBm*H~Gx%q}U{#ga&8dK!FLK~{7ZVEK*w_>b$&PdgEuhJB zoe9Xmz7fE1sh*;M5J$o*IM-_tL6V#&FupK-lh({iTLUcxa@ z%$)-BsJh}g1A88eLY?0>ZJu>P`o~mW`(X*V{o|4ZLGEi+v=ya31qCY0lQDRl#SVoP zwJ|=pZZWOX_|van6df*7VnX9GP{brM;8FVhu)0C5nk8K@;4@c5NLnkDLUV_jOhN~4 za`tX9Z3`e}pADzkjkgN3M7R*wQEf*(esdA)$)}y*lrm>NoA{8?T>&Cp z_!}LLa$ra&dLW1h8?5hnAp%GyYt%8A&;>JuB@s6mqNi)ewW7?rE__nns5@D1frf7N zCuRM?0TE#?=OnmMx?!VWyE_TcLf7GB4Vuu&8N#323i~M>5}haq8VK-lwS6t>lhe@8 zWk;A9<8VE$sA13IMjB&R+~n>0ko-N+U+hX0@3!%9ne)yVJLqQ{Pl_8(Fjd8M@6YC< zqJF$>UVuYv(N;+yW(B#erGUZ>IaX4Yx!14A!X|S}_hP+TiB1#Vd6>lpeB(Rg36O=| z&B~h+gWoi@&#OoH9!-On(^306A|Le&Pu3BWJmali&hlcK|Hu0j)J+P>xuL>6-&>9G zq9(p{Q*pCY9qk3s$w#wXyW{DXk_crn_vni)2#G-pTT##VZooEWc)jLio+Wh^#oij? z)3E#d{TOC6B?j`X6I&u6Pv$war9;w_%acO0lwJiS6}5-aV~ufxqON@Mm87g3PHmi( zqFP@8Pt4xw}B6A7EakJ`d4 z;WicDk~T`8HTvO0MWN4NR4Fj(^~P>qP1`H7rbar$n;A~6O1QZ0_~6f*7iNT^M1>td z;ASvE&L=qqvNo*zD5-nU4>)k#`T#A_*ULA`K?`cU6hI)%Pzhu{LgqYNRkJV%#DRwG zWTz&C1?Q~19f0JU+k|084rs8b3ORykGzQoWdg=l`aE@jm0SM4EPBo6U1LLvFT{XfS z2t5nbk?J+bXl*R6r-!B5*LW&~7CleHSl}X^BnZaFu^l zrl>*N-um^M32^Sy)c-FH1C&QW$Tn3NHX*zQt(;8y@G%AEn>}y!!KV46;k1A7UoCo!&l~FFS+I6Z$n$N))Q`+h?UD0GqD{sH^ zwXz_D!0^Paut| znw{DXOzEndj)AY6(-x&Ya=a96#D*olv_g7uJ4F-IQvn7F5tI-e4I!*3#6V%KZYr-6 z8gA$BDR2A)j5bv=n1CJZeFiGXP_mK%B+)2`cu*+vn49s$)xnhx?_ABt(-{iuvR>NxIH(o6q5fksAf?cafscEA@ z%nBiy0SIyn=pdth>Kitvln7Y7K#}y8<~&5R!UWf;U4PsUq=vP;}irW#xUE9 zul3i(sp+N(-4(njV?bM+bQZC|C`eN%NJF72epm)<@`vK@WD6i?gWW<6A}Ua3i$Zq_ zF}&SoIw1bPi+SF6v&Tg(;Q|d9+_o3HRD%R0&UEowQ5(IXi_fzD75oidoqGc$w(!-VnHzjZWp6-BOA5{TNF$n zfTS)_p}RHU)k;-4C(>T}eSx~DN`s{{l{KO4i+a_9=o=X0^<~-70`W8w;CA>A*;j&s zP>5Rr*&tI{>;G#On|CT?K#c3@g3_%??q9Og$H>a9B#$Z73g5&`B} z!A&{PyaY%!amWbra0(&#bY7A(Y?iO{VO6P>)q7Od(1s`3ktx-vdX4xQ_-`S=MBwXJ z)TAov)C6KZ+*PKOPiwUa={^A^q7FA)%QMq5JdRyPnT`mXX@A%#wCzBdgIGoiDT%~* zxR4P2Dq@oZSpC_GC*Mi}PBAM9h3Qoi8+lbpG(=1#Vc1bgoI7{eP*~&F?3|OP zMU+(VbUdz9yd<#c zX&9_O6Qd5;RhSsD$TZ^3T|S|AELmI8bY-n6!cB?ifSIp_AD;VouVI;vr zaWEq!oS7sA_ofKgsb(sYBSLp2tqkDkPrlqy!d-S{zQ_*Gd*{Kf{TM-Hp5}T~D<-f% z0B$%6Hy9T&x7y++933{FXTJ~~Lh>@JE_QNGom(v zBwLrh)ZIH?r0Tm$aC9|l?$8|yDxT6niXAgYiAjS&&j3L-Py~jhR|+2P9*lpho^v!vGHJ=wc*MNYmr8ydww_KD@@q7@7pom?p9EO|#H}p?TXRWUYOE zVfKLr@j02??vrlF5Z3(4CVZri%$6v`tY~g&0La66#JPYH2fXMipFILTIPHffC_>}O zNYoo~CcSGeaO7ID=%~c=!s6E2Lies^$70>$l{$HyLqC3JUm7S#bRnV+IwoE+ z2RpbmhO`W$rA*4{l}wQ@lh4tH8+Rc{c5H(5&2JL>4rT>1Tk>Frw_V(=dJbqyxTM&O z!&cS0x!lXyR34ELJTI_=V+ofDz=Y=j*ktjI2+Sdnq4>N-#P^*=NF;9&&*|Omf?qJm zngNSm6>=PhMDb3v_1$&Ki?2s+63-xhSA5sr2@4}uQegQ0?7Ns?>KTTS=t+u=-4O1h zYlX%^(9h=Q>f^(VG&t}jY?p_nsP8V)wskV!aI#9J^eXstX=FIn88>EH(CRFyK~CVq z)^dycj*RBlx#Y&xs(Fm@Wmcq@Dluw|A2!&16H&I%&&!6xg1fxU9q|@P%i-n3I-qje zc&5CDp6vuCaj{7(8IN*ZDc(^n0h(2l)sPIT<>0-eS>n2Pjh?yB=~GMnlTR}(YS|Q) zsvg))i4gnRSRp{S zQ3P-|z${`bSU-u}M@-1^xC|h=eN%I=&-TzyY-)DftQ1FA((5?0MaM84?9Ns`;*|w( zux+F^H1tU7d{f63TAy^q=7{??)UQrsk^BLs*{ueRGuuqLt$U(>IwVlokYcA!8pDD>EbXx!NVuFl zz8H#qjw0Z>N?_WW-drEN20l4@!oTi@ozwsBuMw-W?m^*Ic&Ig0h~|Q_6bKxL6IA+% zY47=9Zvt;C0k|G(FnB=4)9y7YH6BFlx1wty(FY8V<3VFsx7T-f5zGWS>^Mo`{gUn>Ay~GfI9BBI&emc2zAJNi;~+~ zM|T|=j>tL=oTo(%z^2*8`E|N9(7L+=4L1`UgCiDXV;1qqW!=I3xNg!h8Lk1DocfRM zg>>A3hj|gfr8C8<-CTR3cI}>D?&WqPFSzI5>|XPva}Fs!kFgU0BhZUZu_wR@7?{xo zC^8R!E?z^d(AZs<Hu0GX*)(1r(=1={2 z>`zj;d02xJIszPxB8o~ax&74ni&_%D$(JnQUeP#roT{A`k@M@~2SC-f<{-U(<2@k@ zL_4yx48oR0@nK=A7#7Rayv$D5T-vjbM^BSdZKwKB^jQffoZtqNS%pyuz8jeHxTDgqzlwm5IU~Dl z?XF$$#2f}Rfi+2p0NfX_d}`#_r9_?oRv&-Q4o~&R0JKhBWncjzHL0P_BdE-1-y6Uv z@p5I@m(1O*fvdUhOCZp_Arhgut7J|T{smLMwK69pnNuJJ5Nia@`i)#eDo5xtjn~=C zOd{BKE%%*x;<8@25zR}Mk`ua&dwHx-Ab(ZbijTL?_wcH&Z-2v>k>}K)W zf7L7}egOAu+hwtI2ajqlJZu_oMjCp}RHR05XiM)wH8SAId~PllM!^$6&)*0D3)iw7 zVk8)$^dp)RPrvl*TSU^xUA4h5Pe%&TB|~}W(5pNu(!|O}lg>P;o`7*G9{#oxST?lC zO?d&+tr)iorGQ?&5h-IjvM;>tZ+)@`7|mULr#8|18L|eMnkT~Bp=p1tge&`PXlG6t z12-OmY=NNtQoj>~_lTyo=}EroN9~c)cKlEPT8R`93~lFH1b}VcBjhrOSwmlFvvLRb zS6nv(j+I!TBIP{E9vabnY#)txAv`0xsz7i_K*3~+Ci%b311K;rsu^Q zT3$Y(!qfl}Lqu+<6L(8dSx_N){pUvnJ>Ymcbk(yf09vchXQRlH4j3E5*sYDwanl*( z(WL^Mc(3$o%$IZX=RDC7Ou!Pxr86S6YF$GSxTEBM|dGGJ^XH?;6`m`oO+Uu?Ry6M8PZ zhiwAnM*%9pu}7e!{pUc1jEHv4AuaV0Xv(n~KQ zy)@29R#2!I`-pNK5sL74M-kTf&ZIPEswHBnazno1vl~|wZ5oXc*@eX zf(^F7z)#N%GzPLt3{wV|0vQ@y#jtQBX>fUg4Ya^wn1y))7s!Okf=Pl636q2{BuIq@ z1u7&OuBWh|z~ICH5(PWX$TGT>cq;VSj3t#NrU8UHG@QhTpAbA z)Hm{4&Cs1h!)Qo8p+TjBBtTKF2djf>%#G499_zTL4S?yKEsuq!5k7O-v z8J@(U8)CarCuieD)1C>9i(ayl%bi+MQ>a!)XXTd4#57jD>Ml-Q%Z_w+ywo&QL!L`@ zZd!}OQ(lEPMhmmoYVwo)DYY@GHqJpp8&ny)(8j&{aYnNe6RYBCh?QMte#xE5t;EhW zo2^x!8c(vadQlnG=|eR-uW_r<#56gYS)EvGs%-FB92Iv~RW&Y4RZ+{PqvBhuvicnH zIMri)ida)sW9n6JYiK}JZi(AsJ5-v;I>vkk+h;B-ZeIO-9rd%!Ag}-k5k&BY@Ie9!G}I|kW=J?$kc5So?-nMZ6c((& zT8zAENHko6;g{esIpPy!NPNNeBovWtk~mC&K#&EH0SFyTuXpy#Hy)GiZnLX%M(UD` zSawCOqod)GW?X-rR`pAr=NMu+>H6JGXU>^MHol~!8JWx^rb#l3ZkbXZtH?#mX}33K zxiPcXAJWOLd_`eDGGc{@L@&{dR7Ah3k?;B}{^n7;e;oPmD(6Ak=Rr1xTxyKRJ?x=9Y}>ZY!LgYh zH=Ai5H;=Q+M<$cWWHQ-Tlk2*!JzUMLL8{)2ND>-0mQ9hR%h1VoGk0IB$NCyN7Z%own$O_%qY?JO{)_3P1%z zL6oIcN^4kiKv<`&q9R&P>0gWyhZLIW%(EiZ(L&;~SH$mJyAn+Y2_lt_ufOt!N8^pv z$fwW$Ux}S$>hs!seju3xVooH(Dj8pRB;(sHbVqjxvSrMk)6u=|EnH`WkM;q#%iw}r zZ9VIm_2_;((EUbh4~U;{M@4n6n%y=ijEopu@Ut+PG1l}gnomhp9ioEr7YyOT7do9? zG?yWoj}I84ptcMysNm-M=CZ<8H0|3uU5FKle&d^RUqu#?L}YC^wJ}}BuNxN6il%6$ zY6pan^WRyS6`4K742^}fMz~BJ3E6vb3g>2XL}n|_mnm9vE&77UdVWcbI_TrF03Z?! z1|#BdNTjAHkOCol6TnNUqIfhGiBS+lQ4GT{48kw~!Y~Li02pJ8LJ%=!2LM9D0(!Ny ztD~b4Kzyi(na+yoXi#h}B$f}4?shCH_|doEx{wY&AUM04a{;IL4`XuLQxS_ zVvZJCEcq$TyfEs}f`-uOr4WQ={-P;y?1SZW8(l3JHGTwKTBT#O2&DOoJGG3m88c}< z@0P+(Oy>ODgy`cHnX<2B!@r|&lAbN%R&-buw$;X+9l2Ru)VQ9MiLF*wDQD1v9ECpK zw!~^N<#usAdcax?5+3*^I~k&=&_YUpD}}!{$+z>?)$=}6JmLc&z%$5wBUg&#AJ{1C zhj7>MPEz-g&~UYEZXmS_zPa6Gt9q#%hFG`s3$PwWW2#5dl7 z!MMo?uu+QSD(AZu1CAlc9jZ_0w61IHm9GAZ>~@1upUh|UNIX1Kg1QzVnFe|`AGhGCgGb<0VjiwNN%<9}v^sqNk%Mxi zLL~-mQ3!C49tV{Y@L)9uYS02BR(V1p7Y)QaOO9=DrGM!g-56G$2Hzm}i-`Q5);0n$ ziR8YwZh%xJfwBWa+jt3n z#7sHjsj`#uKU8T!%ZjI}WbIh_m6fIjlqU7={seG#)H3|BVh6Lor@}!SB|z&vv=j2! zeV{t7*8$_F%3e6N7jM(kHCv;#5>$10`d4#!(@-CTDg!%Y!I=KOMvCE1& zI~v<8em(qe1&#$X=#@u}%pFQ;S6>b@o{gi%S#buAh|&0!F{Gm;$C%p%P4~Xz`B@b8 zDYzt0wof0eh6%9&r2U&1`hRJI`3&0<24shHL;f{7<)Bp?;rkxpq0*n6}zUoIm;lm;p?#&n0KWS_Gh*mH!lo)-0nls*flj9oC_>WGk?o^ zK%QV)<7156k-C7uPBsJ(IA9{A5+^JJW>J%}PSe92*h@m*f@%*>{AGEyj`?i|Eb_lSUl zNhk&B#vv#}ieK>9IN?@kLvWTB+Ye05d>TTlDvwNtfb(yc@oP3>b7ghOA4qW&D#Z7S z4E;S+e@p|aj;ybn1{AI+njJWqMYR5^r6syuE4}qFo7zgNIVjWWMcDQIk6}8NpZjoU zvkNxDcVakphyj%ep|u)F<~iYl zc?u#HBr=F}#9Jm&iS zRg4!dpxjy_P-r(!bh+O;1~tb)Y611k5jvl{iRrE~q9Mraw`mZ_$g%KnQ&6@tqLaW7 z(iP&$h@vwn(SZ`UNB3hB;WhvqbAm%G%5Ub&NoINLp;XY9yV3hH>!zQWfa3CruEWF{&kPVvW%GYj9$DboN{V10PWAErgw@A1k}O5y;1D3~ z5j56LI^$KP2nvU5VjvQeo?|4a62;~8_0p?dq0UrEOaG?qn5kn9o>LqSkVJlmYU4|zk24LVTJLLlGNG2~X_i_};C%MSs z8-$4RKkvgUP;_5VPB|sYkwB%z_v}28< zMCT-FaSb3!h+>p~LO=fAL%R6VHwsq^%pD$^`P<;85H>&&_0aTrV2Py;e)%py9t4ZO zp5jT$2Tm01N@B>qv})QKDB!@_`!Z+}JiM_vPz+J!CBjJ_Sz&KP1;pPapx; z4`yV4AJV z#AF2KmvYw|DpRIe`?+Mczc#l26%xPEr9n0v(}yaOBp}`%X58VdYZjHwSiwIDs@9ZB z*+@$Kx*bwaMrGd+T1adIZVteOJ&vymh#)qJOd9F{tQ?Gei-SWDTc_Euc1(vyW0VmbwTH6M% z2d}#Jlx~^qpsxjF%Nm=_H63MJVaC}p&Ps?Oj~3Tbnhewkfv|Z3R0@iNF6>|^VR1lj z40X9yo}A)ZhHjqXG^Y(!5$Y^@b~>3oYg#rBeozI|ox>UVg~p#se`5MeE?fnKK=h5; zDgh{bffeA%55#eGT&i*iGM_ifM%}o|k@^ppdtB2cBj-P}n2X!!_5T8WL*l;!faTmS zwiO*BZ0*Kb=BxBG&@FIBh)(K_FDXW0(0<4XOKv`Pp-#$At7zIS8`3ziTr*hi{-AgDUB4;9d#_sQP#VTdM0@Y1vhCe8Nr-#3j6eAU{qs5Sra7c9rCB}OW(PePO8(% zU$=E{rLR|D20doH`Hkmh9(Ro8iY@|?_iDVqfypNL<9>&Jv!UEYu%RZNohd~_k9AG* zAMx*{M=!K4P%?|6n#7btu!WHS7%M24q2%#5SJ<#yM~`*k;vX>vN`f z({utjYp23iE+a?iZ)zJ8OmZbd;sK}f>^axwrG-3Y;^iG{G8N{8sl{fsH(##(Gd%IQ z;wiu&xKqD%FKF;{lL%e@5;M26)qVCo6c#*6{L@dfV57%&(7~KTQ22~R2u(6-oGvSt zU~G*k$+;!|HJ$dJb1oxzA_fRvqd=-whskmz%RdLv}de@+dfu zL{(sKupi#uM?-#TZl4`LGmO+L+qK1qGMbaaRUGc1|MMs3SniKu%?iO#BtmAIZGs9_ z5!-jVjgcWa3X34-;?OjuHbd<%;wlFBCo=Z{D=DM6lxfrd+Q!HvjSLqrs-Hq-T+b5n z8;MTEQw{6hjHmCn;{GJVaxT^wk!|$>thZbkeZ+Ax-+lgZJp-Au4DOZ~nEo0dKV+$R zKgd(!=3-q8XATAbkOg>n`Fx)GEL%$iH@doRATyw_=S@2#*+71bwl;I$d>sA1W_aRq zpObHMUoN2MzA6MDpUXrT*MV#H(#L1XT{0^mdy)F13Xs*diH%PgjVGzZj87IJ!W0AJ z^I4(1_G$6Cb0+fcsQ9cL5TYz6KCuPUX(c2+sjyjNj1Zq54Z@_Uc=)W&7>YR?KJm=u zzyl1Q#66<1royMsewcBKgwKlJAt&L%CtfhQ$HU;0pfBniB>42@?f{4ofq~D;TjO7$ z#^_2I@afTy%<%y5nHI|c@$~l`?m>+k==b!Ihvs;2-!r*)U^US99CTSq8Sy>+rndd1 zE!I%DTj+4_Sz#+*2>$Z<`Sn6hrjTmSLEmy=Z={%Tc<7%@o9DB?V+@}W@A48SiAw;* z^B%)^>W|UNEYd3jwjp`(WMh+IY@WmZJ;Qo+P=trBi$+gW!612J{MO*_zarlnVn7n! z@g4H-A^m?7u>_%I{sT$m=M*tb^t3a;2gPKuzsM=`X=g_>XS zn>A7(xugq3Q`RhUlWcoQ)ak^ca-=xo7zJqr1;rypO_~)CHU9;H8GrmWQs9ur!y68W z6Hfwr*bPk=BM6ig_ilf-l7e(z9b%PIYp^ow7C?QzV#l>`V}i^-f!S!RjZ^f6!&2Y8 z(aY}8TR`?V-@5^arI^Fs4EVHy z+Jlw%eizC}vo`UI$MgPcq;&|9zI@#qpmw68sdP3-Ai0|gBCbKj(R?l%zFhJ^PhBWW-qreBE4jK=Mo5h=KfPx%)W z3eC+yead90YxSah4QFIXPbr3VdZ17~6eE(&q3_x?Qwl+x9wC%3+Jvl^lyZvZnQ|Ap z+8?FJiMvn`_}GL-DaSKDv#n8-5_=E$N;&<&@yMY}W&OaZ&^;C+c2gc_$?CPN?ttjKgQU_*m3o{l0QLc8X5C@Pb+zN~dR$j~1?pKg*L8td;@2~M@~45|RvpE3 zSuG*TDkO+p&u?CRap?dR{VO((-Xjj?fC1LrVX0ZLyUS%-?L`<^C=qsw=2M*qBqMQc zi))O9b(KEED#=Qwf?E8`x@oSn8c-k?p5+QA>;6_3Q$3!oz(y6cgz(8rfP9AkmPW*V zpNis3CcuDA=79KeuK{W+Md!?@v^c`Cw1Rc#4if!csPiZ~PdjPm@&)6FD~Jp~{bmAp z?h1o)=z8kDg6u9S3OMxJS%THHnBm$k5fg@hprMbsLa3;ohF98)nJ!qH4yWR^Z^rTW z{(l-qs}*s>R7?d6+d* z4;Ki1J=0Gnnx@|tZ%rF&%Q}P}zk;nL51geu&hsmc_&`k9L>&4$dn-X|34%8oKC=HL zr>xT6(t2LdlR38ryGQ;~9+0X6(h*+SH1)L1nyZ9xxKw8@CO5u_h!R3;6@4j_&Z1O{ z2L|H8P#0N7vyBXQLx&kH7NC}$uuLhW9kVClvBKAt-!Y|sii@N{zE5TW0lBsNviQqA zIp5jGMW+iZKB-Lu8b!w<2xvv`gHrW1sB+ko|Je@Q*ccN1)m1r_RlcfO#gIhF75Qso zs0T;>kJN%OMx?Y+m1;}FBZlNBMMPV>SO4w5Cq&&v6#GLXzX>q)a5It&N8GnXSNyTg zeo9Myxa?c&Cl=daUB^c#5EvW$V&V)Dhhtb>26_KQ#abJ7<0s5~CF=Jx-UwVbXf?PY zi_v&qgsOyd;UGmZt}k0Q`zlu`>ECi|1%<45AuygaiI-rAC;~(*ygqDzkcYm(>WLid zN!xEJUkEd(zefmhg%niQa!lCc@=(R%b^t7uSJ{B(~|s+5K&?cxYt zp7niY0=!VG=*tnv0Sis4h-Ty@iO@niSq8jIubt{5Db9_OLqqHApf2LbISiI$&+;WC zh_YvGNni=$CtEwf!Kb=X-AwYiaiBXoL?E?<6r%-QVYM47U5-_3&kn400NV#z9;uWE z!J9VDpC4RJz_)b}qv8kD%Zm5Ko)iYxZhWC2VC&1QCHTI9X@GGMF#_QHR^vd5{QRWI!5@d^aAuk25}))qIG{WCP5VkYj<6;(>*V&R|9B+4P7Sd zKR5T8nqd^)F?j#;tcGvMXWq`a1bUrJge8s0PZu`zzUBc<`YtP6lcXgEX5!*}%#{8B zVAFCm!cFFcv&k#-N~RKcrhf#)C7Bci*oC zHvRs7Lz*tgr;}#0)`3Bkde;M9WU{dl7i=fgsKI3JLPOJVN5+9l5rW_7Re()vAJX%f zq%bp2SY-YHx)63^ceftDqw0xN%7xydpEBydh2}L}FJ_Y_i5hVc;D!zF5EP&3rAh|d z#Mz)o-vf!fB2y3l0w0^v$Ujtd-QvP6%#7yI8)U7b?uInJvFD}U0>^K+4-B_E<*urM}I_nS2BAUhsURFzzD+88Ny z5QG*ze>ma&rln)q0U~D{7}2wM>B)f7y%-ZtUsn7EIC!Yih_9AapxcI|A~2nmG>K@H zNqwlWBR0gp2$QySb0lDcyQ%q{i6cT=FF?I18j5`;aRk|28X{6_*&bgeVI9}g|3Ch7 z0}K_(G=hH!_@hhmxrPgL38O=3h{e1?GNLOTs=JK$-Tv^F5IC_kwLaR6q^ z5uC(vKAcxMG5*3Yd|?{if`M7LochY@{~Rl|O~(vw-irHKh(K1iW0$)^0SHPaD$9rR zbN8zzQynleX1MdOZ2xey9LPl1?o8Z3j=bu5R1|+hN9JvAYFG9tvNf0<-vMMLq>c|I z(_0SDw0mP77v>u9>KkKH|A9UC-hyE71FYeMG-;j=<%9;Qw4%z4DvSW>?Da;pFXz_l z`G;@e36ml4u>u8Z@~yfoauCL%?(toLXx&f4-1w>eZ|&JQasX%lnV2^PQ7ekHq78sR zP|^8aq>Y{<@CFDBa6r*s&<026e`8PTPd*r71jU;lc+1!fJqaq9x#VW-K?6xDSG$Xy zsMQ*%ItCESBAN~Af&J4|C&*}7s9t@6uY%XsR4+EtTI@3I3bR-DpwU*w;76eftX*1g-#_58i;SrS2 z1Gmvg9DKl00lEvHqG;4EHEG$qt9@MwFg*@mP9wk)yvn;qqP)S*tr;n}84h+%AKeY{ z;!I>#5`+b!0U>EHr>cN3FetqSb4m{Ru>}~z^ZX!t2YExv`Oy$3^`AGE2jAK! zR-M~6K3e^CRSeJ_1jm|+?}X62Ku0A&xxA6NvPz)x+QTGP5YrGk2o%&rkX^8CNQjGt zy0$)#1Flt%0Y*4qv$w!d>+z{St!>Pvt4HtrdvN>mV_q@_M)f^-<`E=2kxz$4SMrP~KkNi9_^fzfm3 zu{0wS3yz6Cg?KLP(@5xf{#)G z#ym0Kz9T%c&(XnY3g47BJWdI{3#uB}1iMklh2ncc~| zF#gOoYZh~^Zav6I4D3PjujDMIJcln3JlafeCmdxr>!eXhE7}KoNt=4v%I~&jxreei z+(V)G(yCE-6Zg<0H&+n2SjUCgNx+>`BH7;T!PStP%i;m!`;2XK4_hHM_Ij(vz_09! zAM4xF#=o_j%yD;;qde^8`i)$Aih_zMivSXAeHTbHcA{0YZ0%-MYI9g=D<&()ZA44k z=I(RU!~}Y+-DH~ko-}6LiHq_m;^z|0q#g|Gs<()!CkYf=puPo_wt zwVSW^MlY(~QG(EB44u#uw;UIHrSb?NYn^?{$WC(Gi?;-F3>tSVCIz>X(t|XslMC#5 z_0@UNVD}KOr{5g>Rc3kwJ~!Bt(&`1sX^A^b)3?0qQP@kIAcXCXd=sQ7sC!Yf4Xvse zi9ZD=cIz4=?jEo^cra+sgJwobcqR7I@dCz#9G%F4)fW&-aJEbY748&KT{6oAIbsGS zl_J_a$YytmN|;N>)rZhov6XO2(H)ox#>2M$13t2WG45^8XZr30h0mTaxBFsoKOfol z+%|mm9O~9c4NuARI5=31Eo;NW%_}K)q8-3qsY2mHyDRBOHNPqy8CvgB<60}C_DSx52Of2n}OX+kwDijL9fI5Jt zw0CkXhE+p=6!P}0K=9ScE?A5yytk{m>g%u&9{n})T1d{khEsxCK7RP&K1Unu_z7rVsF%YMp5g!*&{Fn zYLTxzo4WOu)B!Q+ndt>YGuZ}WW1`xR4Q>2AP8H)!%6AsjQ4&Z-v!m+^fl zU^BHXp4Q3T|FSVB^B-$!XG9X&a8Bo!Gq7H3(0-Ecmo zmT+SpJ?$ldW$-7pZ!K3^c?uFMGYeE68dS}gfh3`9; zUM5=mP9V{8FaSWyP7Sm&wm}BNY(0dIefv;^UbP53*iP2l-jk53Ws7Z|(geV{F7s@v zd9K@ya`0@E+kb^5^`1}2)xEE=41PFb17p?ioMuqvyKLF-SA7*mWMBqJoBEk}SRtk3p zCJooI^K1KAOcw4Xiu$|no_VHa@lVVIxB$tkAtB_sBs=)h#tC^X#Fn%Slun^dVDLsT zjL@uktnI>_*O0jsz+vE0yROn=`9sacvCPz@_X43uE@gxEt{{NSgpv^>pl2C58^%^7 zCODIHK+&Y$AH7A0ox6tbkAfAOW5tBdQLku?_V^64cs|HcSHgAr3W@u)>3JlB3E@Uq zxWm_IOkxZ{qrVlVe6%)Otcgv5MN*}9F1a7@%Uh8N08FaOU}7_zfDLrFYb7?O_7{un z9c;Bb2xmW?gPokzDzZ4zi%nU8N82G$S!~9pSGWQus#r(|tQ>~iSOY-Uh|JkEzo!|~ zoy;T%-X4{W6RgH|l`sL{%l6}ca8`N>)k~dAf-Ofv6oxc8v0Wk)xt3LX{(&M@BfYpH zUd;9UgDo)!daOY7wE0)McS~}SyO9oYusP4eOJ2fBE z3h>6!`>P9ajPDv?=e;V&E}%#d@Vg~mbi)N=5D;&J9t_^__YWZ zOK40nd95R+OaFk4I>B>mmNf^wm^lf#a9h3`D2|j0KIjKZZzl=CRK}uxWP#*aQvXk6 zzSX;zjLTmC`h-@^^?NNFl;C%Mur823^TCw*9u0SGL}Aei zj-n`mwS{q88ru$GvBKqoN%>oyB8On75H%;2{K;zSX4`b0n_v4bG_8sKg>xayo~sqF9D+c&qgOtgDw_&vbN2H^e;CoWcN%>fxx?Q#x1ZUe-}g z0imJ}e=uh(-W6z#C@QN|fBcPg{x*n=sd`45$#yUeo@8)#&NFkmsrk(HiTcd-ayfYH zDgjFC@E{}590Je)XazM8_PzIydy&*y4GrxgQZkE(HU{Md0|NsCI~Mo>>H+Ejgt$O+ z$`!FWgNQlC&eZBSz1`c)+Raqm%+=JSy=_RP=P5m{b-*&y=he29l=OI0h-Qq5Q8h&t zql=_a8M7x1kyAs9Vl+n?#Ln#=QNxD)jhQz(Z+zYnlYCi(TZEK#Yq^A#nu%!NSRv;M z4m+~7d2Nz3#7aRk(yq$85|b}{mK0kQrm!E;^+T%@TD{P2Xoq>Ljd!~$8aGdGOr2gK z}$kLmvFAJJih6=LXeN z|9-{afBgN3ziV2<2EwEH=Hs8g`S(4*GBiUvB$bwn1KW}mgbuQP@_+UZm$Bufac)sg z3bID)bZq(X*(rRM9)-}TC{BzVT!Mu%?Y5(pkEVCDy-Opjoo)9!*El@#I(O$gqpEk# z&~oaGDu?=F=*3l(6hD z$`!^&6;{RC(Q&(`kX52B+ZP$#MW;WpWRPL$^|jJV*H+iCq3Xt^IvLW*id&YDLz|Q= zbkVF8=SF4<8-$M47;+v&%4wRpr)s*L(RH;n&<*wOU#~fBYups2AX~H!?G>boo7Y&Q zWCW|<+i8^^4)ea-BkGjQnB~nw-OU?kuVJluXxqhef?Y86GarB-LtN)4s--jmnX7( zxV3FrshX|myys<_FEbIX2x94kjkq6f&kW6Hs&=3{Gc!v?6Puf>g|s8cxInyxL4kxS!qBLU>_P!1TmZ`1>s;kx0gXWd%KVli#MG($Ix*}2!1aZ8{tOm)V zIjo|P%qAo=6|eI6q4KISS@*K~y~Si!^svuN&i83yd#;F*tJ0%2XosgDn}?ZF=rU*! z4ID2kHC!%ewS%^(jtEJMf?U=VQ{{?PcL^CA2&EJLb?%8&~W zy(pBYm_k|@Qi(?<6Qq)vq?y!h%ZSgn0x5uQ#DT`+X zmW{9_fIvbAcX?l?O$J~Fr{xxEZur&%GiW$$&~VtG;jlr&VS|Rl1`19D4VX}ZiNJs{ z!GjMHIKYVTf&w8xz(N5AV1Pmb8MHvc0vU+lLa{JG0}h-JK?rIcc`}XA7EhL4uKXVa3KpQpggePfS(jtc)%Ala0x0{ zK?D-;0v0B)peO(a1u7^LtgsUt1PbHL8yAk(6GT_0}V^2tI?{Fjcg+`BQ+z-q9shY?^L}^R6Q!8 zx7X9duG-Z!)oN8LRYH}N3h8z!vxv+;2_n7sBv7|foy4If9!kshB%DO$nvx)}{u^<$ zr6Yl9L?V$;BSDyXF#TL;cxOl$5sh@Q^azQckjTUC_j|iKLEs5Hp{}Q$WggFT;%KwX zgWXNo5$W~>k)4OH{m|}ub*-OJJ)Mq)mS>*{yfdw76X!$|+0q1YgbB2N6DAN;r!p!c z@u+TIy&^TGOsypB2t0v=)iM1;dGGQ-M;MW{dUzpmJZvgq;)nX`>Jm$&?Gm=RWVYY* zF0o&)msoGA?$y>3xLZq`s;RC`EX1ND78h|56;p8uQw)kk{6aw;Ctf25oJCeO{8#Rk z`Y+4L6wk36H@l(Xa|U8tdf@^(fGnR=DWL-a3tVzQbQ)Qz3};bwH{vo-hV7sI2x8A{ zXZV1cQ6R_V4gh`5-@Ub!RP3-9dS%X3^u_5ZY z51W&~CWc&i8BLfXlBBqzXA_&pqnfK(9q{G(W_;&V`a|$FQ-TCf-TSuDnw%s4lcIcY zs>pYaL(dn|?@o-rP1e^E3YyhsT>=#uXWHLHM13}sj>o}Y`xh@DrP{>@-K_2*Oe8vv zz8Q$C75Qc6-U#pWccP=3Z;RRh!N3?>!RIMG?kw#L&_kQn%#HH=UXM=}lL%+qP8XBZ*T6Iglia?hh$D{DkS5~P^f%D*&xUl9Uep9RNt`LTwK3A+h){J9o!b;ub5BX!UYm1q z+_GGhJFC0J2_KMFqIEByV|#{;wb<_k`;@PyFsQJ3)$GCi2V1!VIjaFP`GtP zwJp=e379P8<+4e=@T}bA%y`Jr>?i^e4-1Jx*~)BI|^Gh+`5^mz~&s1)ON{7e>?L*_*tfYNEpymS^;R{ zI%M!AGP(Ya&OakPtw zHIfnbwb{OnIwTN84%&{69t9B?fR71a`ltx=ZPF*9yglfz2%tsmcf4UqYVeZC$qWLC zR(3Vunu)Xpq5+Q+PQQ2`xeLKin=wN4A@z*ZT6kE80pV@`pRwd_8kYNB72ctK)^Pm*w3h$sY_dy^je^Qn%m`d}W`aPF$93TTzu-y@ul>ft#Fqu#$gb|`goF#v zU!ic4(@r&P>$2`0JVF@*sGQ>mSQNQ)KblGCmoKTtMGcHhS9ZXF`8T0%dXm2^DRIP$l|h%L}pIbr1Srv0X1;J=vLU+Kr~!y)Nl@lfudp=%7u zlZ9|D!$0z%=o%n7MI6Y<*#A-kqg9TY#ks{!vNIRSS=cy||0&V+RYH%lQLap;X)gb! zIRwFdizl*OHEBj%X%>ut0)`$31NhW&asyYQtOg!9mYVJO1M;`tJ>{)dP5enDZG2V| zGHO^Yrn=e1XEZV(WmsRgQ{PwI!Oe8#Y`c6Nr3*72%M3*eqmHfXmUz;|l$Zw3`*A-Ifa z!Gkw3?lP~9MQ);dw+WzD64XQkzhWSl3r`OfRwAY3K82E?40wS~^m{+y)R|OSp-8@< z9IX?l_*NXa3n8l&PFeT*1jRj9sEyi68Lc>^`kVVC@18SYgm#rbiKP5_KL!kIvKFPbE;fQCB{Y4;$5uc~ z%9-d;?4Lu$>Zv*jCFfhUpi9s3w=q2&7L=)OJLnhXR3el)U1*bXDts8RxKh?z9~46q!sq#-R3Ctav{ z>GNvc1Wo}8#LxM>S{gZFB`nX+WzI3Q&@dseH0K$|jK8gA_`Q!|f+SQP(8Gdy?N&|U4JZCD+)BOoR zgjfz6l8DU^6$h(G@GQcBT3PAR+s6jeaySNvsOD+`straHHAi9pgLCSODrAl`O>We! zV}AdCJ(6zEpTHcs=QY?+C?{m}az)2#ZWNlPJ4XPq**_;J3Ncs8gqDG+CQSPz@G<@j zH4tvcd7nn88NwZ=V{(RjyElVToahay<~J<6I7YBF=fY+J4i$?fg5XcJ#^|{@AXj@5 zLIP&QifaL&&VLrTTs$?@OvgfA&Fb-e0I)ZJ5O}TsT!(xFB!r7iEf>gV>bEvpjwXQP zosnwvdNc!zG~TS1GqDkoh@~EF2A0-8`>>gvIN9!OJvkFPk__rr-6m1b>a@_ADjKKr zRAaz`Y-x5FG2lAl9Vw@}Oaa4N$1}z(_GvO}4CjghIaws?JVl`*Q*Cet+0uOC*m?88 z?AxdZb$sr&C9-g(-v~Eo*xaYgCY}aXsLW6{ik*U%AsC-o{h$sVisXalqY;(*Go4!!-)-!_F%(ysJPvj{BpniT$AgqM6`*;272PCzrhL4o|qaKz#M zqH&x_U4?r&8pmSVbBRmW2p$dzhxpFt3T`ANd?7x_e$U%TV8w|Jzo`o%A2~KD?DZ(S=5czcm;Xws!rAt6Wh!0aw855X)go%%1yGh*E zciA)TN=7dfD&V!T(gVAdT<{3)jj%cv3Wacg0iT{yj<<9kT!Lr3re`G&+&cLtUT@>Q zCJ&e1fjgyXFRezaI;Qvx z)J>3YEHamN4j>YWW2RFaY*Lmh2~$YVHe+GA;u68&ifWenL@Za=vv@f~+&e_`8j+6& ztJ@3}!BPDI9y79)j%XZV%b9xZfvnHp^&zetxq+KqzW?b~>8=|MFsj>WwOE>**{vG* z)VSJ<4%|*8$n`g^b#HJXE`cC|xr5s-x46xq4mhjhnXdaqi`v#Hm1TJR9^~{lUE{`> z0@0yC@D}#S$0B>(!nV)pTlv{PP*Y>hy3YTC@aoJ$#p)bXGzt3P3N@^M(Addl+2yxh zp~h@Kx+7{L#BZCbLNjtx?*#|)3N?fw{lg32n}kX@LfQzz=#Gr2TF_ev7$q=itKjF$ z7av_gxcy6|IYI|;OXrgF&sQfG=Z05;#KRXAGkgI zg3^sb-X-3=0J)S?ghdG)qVm8>k8QLp@5kFOTrW_lrSjC$gDO;o_CO+>^Bsa|(D;zytvoVkp6D=55Bwus32#kQYZR5i#w>yhD zrq}2ce~)pYe<=hrqB)5Ya$4Ct@4)}d#5x>EC=0nz>VE4Un5hn&zSDi%pw%9)*Ltt>UhU<95JPch;nR2S&(99 z;CQ9g4atfFQLmZI7o9}Twh^kl$twvDzafVB0~GIBjQ1vltT0I_D3Rx22aYW}{^8uj z3@ed!ipgfZ-EBBPL4I`Q%i`^-K{QyOno$Www$EhV-Rt-8y(!sf$2=8vG`s#*cR?45 zj@whF9sofn{VZ>AYs>eFk|nDKD)F@PPa50y4jzIE&LvJ&50!t>|KQFgjv45DU5xYr z04_qyF1`TZKki-aw=!>@0YV^>17Id}x!+n(rU=^vs3crM7HGCh^nYs4WSW{V2N=O} zqBPkeKB_hY_u9Ze{LB_BSkhYE{^w?5&5uIWsgnRhrbyzfl1G)Dw}>1pD+HggZk~#eSxL|bYoY9VA4x;$!yhMH;3*u2WKVm zAawBhx%#@OcKRUPr1iaTTvrv&sk=Q}{@$`MM_n)##*JJYH~;mc=s^Y&98jikA^0$L z2Jb#MeHTywuJ0knwFq4}>~GT--)^$Gnmqb=oHrG>kI?yC%&Gz|DiOM-_;(?S?r$ep zk)9HW`(w_#sOGMn*;2Jz_%;xe{w{E46aGq5UG$2d1*5epavo=iLW+=!a9!j@m3|ly zbeOwx>zcVuBT8dxnpfVISY7%vE-3kDUDQftd67yp+NM4;?0ouvyFLgFI4P;a>LmIN zAZAQY*)YLX(jW>hdA?K^Txv36SZ5@7+-OzO?Hc);!PpnbfrS(=Q}#z(&z|(|Hrd>& z0=uweoVa%3h*^awL$Y}@umiQ*WNzS~cweFV4rpz#9%yZo4I&1Nf;@+h0j)601zL5R z5hPoY+cv_bJX&V}KkZ-ONe3>Tfn2*%-N zM#29MeIo^L)jr2M5aHn~n)06G@|^44`6jxKIGl825IR?t(1`4Q4*`sHbkXaD#DWYe z!q@o`qM9!8dKV@>Oo@vLN?n4|&j%rks=6Bl34tSz6Kyf#xAtM*<9KKGUBaseb5m_e z<>eQz^|wZW?V2hIv(5=fhlBVWt-4$$gemZ1D zB^T3iJ`FmLNc-kb6sMt2+d;L`9mU{7MtPtdMUl}ZN8JaekR-8&lIk%dIrLzvbcJ_l zsQ`YgM>zq1d_B#4OpHVdpt7@kaw4|G%p@5>-|KCNMGK_5(8aALvp7Y}wXe>8^6Dc& z5IIy13R0UI_OYt($+70h5k;LFqm$p7X(foSCEnHsKctLp_xe{4b*|z9^^T4vVqPuV z9H~XNakp>OO`=?_?l(`daXMMLo(~BFC}A#LHk|`Z$&aQzIJ=464vUDWJEyb!BQGf& zNl9DC7GvF3s^gCQT;*?bPsx0rBGlMMcUcmNpC^ji9?)ho#iKc#A7ua}q z;*h92EF+?pQ4D+r@zzsy+fWhlTUi(4NU+2-(ma|lqNO&GjFqVb=`iv@+?ljtXV%Y~ zHjZ<`Cgj5+|MgRcONbejXbR^??IDIYWs%!eg(A$PQrlcbyro)kbg59BR#yE3wW*UO zRMoawj9>eB6~5I0;cFN%wFKBv&%($;V=R-QUzz;%nU%)TvsFWN^pYE}u%Ryf%{-J{ zB5Pcd;4W@+9T7sGFI7Dtj>SRg1kRV5eVy@IDN7A7%-p~2)dFR@UU^{y6mv|@*9 zq;hPF1Yn}$lhfE6D2gIC56D!WyXKsSf1;emL{OC4%(HR2RYcI9H8K_)WW2MA+Qmf4 zDGVSXkKf_NUS4K63vmM%AA2`)qW<|-?3ZuUXd)V3oEayvjDudWG#ypA;1ScxmKuP? zH+$Lu;&~_4M{m0Zbx0aZ6l4(H#bd`&e83){*Jl}bbkNE$e=P2#0y+g|z&AWPI&8lM z7j$MG8BK$d$??U~AfBo$odb{KFWTxw&Q)MzSQg_b;Uve>)^w>Q;u7DuYJz}+24kkf z6b4vG2kwlk%1y2iQR=3|&2Mzv22mekn`i=Y+?o3=5aqCAOHZKzNRM^r4{#_*bAljT z8f7DXN|vp4K44hmHKha0S#)TT+ck(-y);6OUDvB9$k;W@?v4w{@!KhwyO^T;X4ib1tWNSgpvMYalpcj6uL?aC$NM} zUr}7JSOlwX$w1?t5ztk^l?gIn7T^(*nEKq}i`E$qLF7F(vl&<75(x9@Zh#e~M5)IP zg$6yegl1mgK59Y^A&y9Bysg<5($h@az!54IiShEL+OiT#mG6YENT=7K3&!c86f?A= zU7uV$@9OQnecBECl<8U$<|PZr2p#0mc%y1J2L;945?HV^6*JaBgZXqH7F(pjg+>V| zP<`9bFU##S@V~P6il|@tenq?Vb5i8T%5tkTaA8mzBHar_cZ#HeQldOE@r$JlcNjM3-h zN3psU(OX95J_ZS}E9#UVyGgsycKVxNh7SOfoPGk;_dEv~^um-CWHanY7bt)l`* zJfDkQo1g(Dcw`ACetXZH2}^Qm;MsybYItA!0%gfuLKet@2UyI1f~haOA%4Wo(dZ6Q z^us-&0IK&A3<0o8O%n1mMzzu?URJ90I^PwirBD5%BEnV4zlFrZ2HC#Bz87gq2OsBn z0sG&nj;H#k0OaLW$OWDl$wmM3X36_U2a)0%x^x*=VkrmhVktUm;T{qa%)_Dm)491< zqBFVl^1A#mG@t?2G*!dDA)$&|Y9_kRpj`#lG4g8Yf)w|idt|SRAbNMfh`msBs{nV6 z_C?ZJurI3nO1t4BZPp^bp(v|{Djg&<{uV-_d{_suy46=8nx~#jrU(j7=GXq$KUg4w zoBFP4u&c{?CXo_=i_Y@Ym{ngL7iL<3StPSBt9rK%J~sqAzvOnR5T}&b5-Tk$ecv24 zj``=YR&jHX4|(;88v58SPf{K%d}*hgN*nqRgtM{;_j7xj(a=cQjl&YRo;L+6WqCej z+62lCvA?H&dK8@ju9F62ZLG6W=VPul(imXAo32^KdTXz-Qn|jGXfwJy>=gHJ!V?S! zho5F3YjVNn`vLu5^)Oe&QB%OBIVTq?Ygvg;Ld zv${#T;qQ*F1ZtZVe_B|RtKKw!uAbJ2pZd+a8|aDPlq;MpKjQhq@Fh6?ni+Sjq{k)q zn{Ejk?9SnE5e|39iNfjIJMzdLUoq&2WeD~s^)*E=rw4NDVt)5@d;#h}Q|>iIC}{wT z68IzkLQS5D84~jEV*1ux{_J^j7)n^A=D0ErBP6HzyofH~E%)1V(-{fEWCYX

ZLVLDxe>8TTH@SCX-IB5Y4X6d+pb(}S>DUCP zU|J|>%l$zjqJTtgJYDV22{`C93}*VcRzT_LEN!$hMjJiZSoir!1Q!G7=FQPbR0eH$ zN0Acu2P$0OvVQfPTGWLrSxC!nvJ~&H3xjl>YKU2g4E_fdBIB6hu%=u>96Qozml5Qd zu9h$QtC}dR330QrtleijaoyPZBaQ9B$IMnjj@OGEX^!fI7U(Rh7>d}FM4k$6rV8Sb zaPIjO3b0=wQ)xAeUOH(qq`9941AFHlLZ;zWe8VqYHH4;8@?p1;<48?c&`OJzZN{bT zAL#g)1V zo2~djn;zAWnr&I)As&;wIFb1!Fc6X^O<;KRhqvB;e&vBctUJOoNjjD&eSk<*`e`8F z&9$E_Kk9xq?sSw#7?HktG8nlIZcvrF0mz6A8Yb|TJmKaRv0D{tW4M}#d6!~L!;zn9 zaj^WXSORI+u_ZhbV>Z11u0V9S>|P+nrmbbybmmO}XF!<0zQf_5$Wig{|DflRWb?c7 z=8*`9_W7e-vkg5UU@S~&?QG+I=Y^Zd==t2L^z(p#u~gO3Y*J*bLcGJ7ZW5tEm$N<3 z+8QMpK?|H7lmI%{e% z+66(=#MoJD!pLb(HVpW%LmxG8y;MUjg-2j1&_c%1o+$7!?*Uxp@w-pthHf;8kS;eH z4K!h(I(@b5ARx09*nx(hUnHv13I)_&^#dw(Dn#AZEtFBnQ66Z*rn{5C$C1?_oX>dy zNJxqVLd5{D4WYyOA5Yz2yKnS#9L!G1jjA?HPSPS4=SBdWG>A!=2^kvDAa}qB7?WMD z8Kk^eNod>595$IlT@a>z6`CDUFpvDcDLl!tz)w6vKs<+vSATGSqH8#PN5sq#VlK@q z%3p~CoYVlScf1fGKu-_@yL#GLFG4pD=M{_K;w z#?93lt2Bv$H_Vtrw>M*Q48N?<&=HB$Jk7%_G*tND$WF^LK0W;{p8knvcUY&-C$4F#Y2s$0*fi6$mdujMM~4MP;^HalwJ3>F5+xC8 zhp-bmIFv>9{r!e`Xudl;y4!7`T1Z%!bavbFmhN3sUfrwcRU2<@S7GP~+dI4~YzfaH z%N@CVds^^$3=TgQYhQJuX#IrirPDQ0ULFeW6D5lc>s8M?90var<>*$Jnq2z=VL|ok ze2L;BPuIuQd-=YJa(cE{;AXwR&9m>U!thDvY=gkb+6%?6_jmS2LcXR}a04 zNk+W1J;VEJwrC_D#9FwCu{^wEju=DlGRKV$rIEHs;*l68?FpHhD9cXwUWaS9xwW+Q z(@R?4L1_a)h|-53haKc|m?namgq(BA1R-OOB2JVov%GE<{hv?U#?s}HB)2sN(KMk! ze3FxCBxMwj!{9ARGxe)_v9HQy<=g(6BA>|{qvIn6ayZE&*RVDbea=nZEt%x_p~Emr zueFL&w4)oURIH^kr9U2)QpE<)Od7^eBOFsKmc-d zYU5cJ(NRDuMO-3OLp?3jLRWK7imBt1!Dy3@ptGMA^;z?)e~l%Lw-nrv~eiGu3( zWYuJr6LXGSA(Z&L$su=aV>mhTpqV*_Ag6}Shc|X)dP_vs7REO;G&D3c9CSbCnBlW# z?2${%Im*PSMRNU2w$5GEk}=29CV7^&c(rM`iFLTX;_F+a9T$(Ur3{DN_!Gc@LScY7 zJQ@tc!EhK92!uhQFc=I51j0Z-APht?7)fF+OPW~#0aAxap14+`R%xPHQbcqD_J&X= z1T@oM96ZlSe5FaYfUdh4@>^lAsfUo@$LVCvUD*wMr3SKZ(he2XohO-EgEZHUzi^8+>`^4bJIVR+R^gLO~;>bD}?Zj+~pY(p1*fG29O79ZjlRV9# zKmq_g|5|*R^c5Fo2JUE(yUeR%fSjoqs9}x4FF)`~7?<@DkcK4EZ3U@||0<|qn`LJG zG*CBv_`RDoUVPJf)=j$WX5WyoVu2x*rC^$=68Gw3lAzxuy%ak=?bBb%Q}5=?zJga1 znaETa>ch-D_mI(~)~Tw2PQ22HxmBFbnGHV~VuFq8Q?4B&wT#71_L`&&kQAf7VPfBkDHEsVl~e}xxwP{%^rQ(4=_@GOLBrSdD`8Sp_Q-XIiWljK%cF?e$oJ2okSPs1FS7eGX zTn0o4e}&;0Af%!l5Sr4rkO4h`JgR$uP}9c(Lapz8*&7f9hw~blf^FAR@z6E#wy(aH z?N^8!kkA=190WaKe`@Jxgh~R_>N$hD4y5cy-Cm5*AR zL9ouK7-#kMsDqvYahZrIBQ!uO%Rf9x>k;oCDZx`=N)aKEO~4Hv%mj9<005zs z!*Re+Qn%8&rbpr(duuMd=00HOS74me>}1pFU@X!MZmK{QcvIz4D*b~1NViW-;_;l`CogZg%setEn{9be{wn;~+2gdXM*|`DPljK7+WPkQp;uH=3e(#RI6`D%X zXSUuT0X*@eBwRJQqCo~+Fm4Gb4FP%rW0*WexROJaz9^p42*MPpkc~ETBl z(KUGYK_EbE3aocN7h}oXtAoDbwg+V_E5pLz1hN8Lg5#Ob|DCfk*&L4@o~2}#({mMj za4~2N!$(m}qC&}5 z+8gKTb;a(j>0BiEGfhJ9?;9+jQM^BPIX1$hcC zWxFaZ7pX{q21=#40XKbOCQ$*Z2nFviQh=iN5@(E+R-l{jTWul)=7EHYnT)IL=VS{Q zAoFZgnDMto|g=^u9~guMsTZhu>-DFsbDc+)(dhO{W7d z?uU~7gaEg1suYh{=%#9}z_001QX;v7tdcdsSF!rZ)QTA&5+B)VM(?f^Vrxa*C+Gg;hA0k(O%3D4{jk`uv{;B;~kUEE4u*z55#Is=BHr9zR@rhfDIicS+1Pp;^5=2>zBHG4WT4irZD zPfN)|X86N(_&oObFqP=pf{fV2SgdjbZR;F2B)q?ild|^y~fcAC+BYpihd9ECkE$O*fo;xkVt&wDJb#Q2KK4 zWL%$YNfs&ap3#u)qvS&&*n^EYVn!NptfV~6pb-!%u6zqFso2b(+`ojAt(gP0NgTzpKtcL_G^Dj12>0RFjN2nNT#{AipWui|I$O2I(;$CFcM>zs}^5j%iv0@LK(X7d^zpdVgtFFM1>pCS{%#+6#DYU{(_c9a+>|Zfg3& z#WpU^(BEar`Bf-EMdec|8K+F%-7;X+MQs3=BJ z1*s7jA}mWx_+KzD%*wKgYTmxW3AeHkJR!R>q$^>osw!ZFzzh9yQ@Z3BH8`!I{OLeF zrz8T@b)Mfoj(75%!qK;g8XDkuhe*N03W0(n4xTm38w2TB#btpqx>9jSVjhkGB~_Fi zryxmre1oyWKoxCJ4u?ypvH=lc5jjZuoT90Gdk8g-vG|-UFb*uZv6*Mpiy-EqfpuR9 zkmH-L@U?9J*)D%Jj#J@5J5gADxxs`i4{p=rM_(9*Qz5(ioy-Me>~oHB4Du160S0>J z%v=GYqQZE6srJ3_R)~#CCet^%g1boQ2{eZg(k*)KM5O>ctgbPbZt`gk7|0TDCOC7L zzB<*3M%2c$!2wQ82}y#&Z)i$<)6Q&*=^0H?s`=(`vbo-@I4vFYm|I#%r;Y;vvUZe6 zhh}VNjX$b)Eg%v?G6)`fGL<1F7MqZlK5l%p7>Q7sIWI^w>QPU~7`AxW2%i8B0CYOZ zTk;KT;LWhyQeU$52v{OA?0-+rweuQar3llh)Y70tLa6~K!d`~?WnVnh%0$(bZ`@YI zkcIlJ^5BU)lrpysK8?$kiAhqN9hx<6SQAXQPQd>5E|YWC78Y(2b3Ma4*3wI zQZ?AeoIsKWdU2U#i6vz?sb>wL{Uln+`aL!ta@Po28G8=`7uVoWvw+M37YQes29DIS z~_I}|#jO)`U z4Ak8&j17JSqsA@TA+qU;@r@t~RO?A^v*tc11&q05{bk)f8IcoP zA-JbQk#iQ}^ls5nXsuVk_OfpLUd9SyjgmN3-Lem4Ih6l@IQ7hK247wo+yYaR{-0-G z)5Y>A=A0aS7Zi)TBz3$k;+({{nmN?Kq2qX6`PZZ{2{{(rR2RZgxGEr%7y!Na-alUcVILfK@(xge zeNE4ji&~0qX2Co_GD@a9J2QB)ol8!cIcvm3YUOV=3Cp+{&cvQ5mxgQF*!$IW+zsQ@ zceK-P44CqsbcRP@Oen2XH#GKlV(A&ckg|7e_ylCaFq$c~n9SOI$>u{tL02#~T#*ef zO7s=l#1eItn7!&PsX$w#ObCGq^%%5OQ5vuAWrc*q^@kca7PR$I&|(yK%JKnJEy7*$ z%7qU>ICw$`xP`-0C%WQdB&Rs>jgM#Nxg6=UU@ zqPaoTRc7Y~vTs#LyL*fktUCe{YVy@#{nTqCj1W3ex=#SjP^Ci^J9VK9bc%)zh$;h9 z(ewvoBR|lIOUA2ENaOWAahkt2k@j#Ww1-`pvZ%G9sQ+z%ZL3DppC&1C@gNsLzV~R$ zR8|FZ%tu5dKb?ihPdDpMCnC4D0!-V{ik@EvK`s^gSV*a4YUJpF->Tk>iO_Fjwur){7 zjIx|^+NVC@l7lEe&*p(5t0gXHiDOse?8=r~59KU*@-l^|zz_74Ql;2@HEE1-b5jAh zAA&2^^O!@IA$v(*5&)8tV=se|qyyizaP5s%2&6g1{)O{m==O2~D1xv6ng{?6-86`# z5!_f1PI8I}(TFu*9+ZjbYoY=UUI+TZV-%&BDe`19g_8hoHM!@R3x~um+5wJNmd~_Q zyaOE(n0%2_PwFknBtr>T&^9rhiT_F<2LU~Eq&4Q}nd4Ah@k3vzeVf}(|DaS9g{#0n z4`l@v3_MVEHV@2el|RJPd&co(V@-4qd0&mE85r+{#j>5QcS^BOouLP(I;IQ|b?JCw zEmJKz_gM6+{|qfHz&)=kmh$O|;q5H3#A&LHFtB`Lm%m3YLcjoyL$$bGO>)QO?4epe z0z-heE-fe7qdl5d5N)bf_(JL<=@Vw4vsLFH)Isg6S;*vp5E+u57N2TIf-MN$^}sBZ zNu}baIcs~AH>!si;D$a3o1%539Pdd}U+3!c#j$iJO_cM>8{g-;B`Tgmn6OJ5vEHJ6 zWVSNKKI`PUX6^lvGM#zw`HCvTh7oqgr0rjJtLrZgTl`w&qw}gf?p;^V*GZ_^qf{P zY8X|eK!u!m`c743CjTG<=#W?WK95xgs1&#W9ZV1K*~K?uPs z90i)dXlsWg%4V1>>5?#r+Uljm%#`2qPEb2-1=1b2RQ;AFXAR^7d@CKAb!8Z_XF^Pl z12MHuJYzgHzn4FM@sKWifeOaun1@XJ;lb#Pd|TvMik2PIBer+ewsX$s8SFSE2M6wi zss`Amh2KBlrguiK3o?e0yHgwJkCaC;6}`!pE0KjtBnRE+9CI3~2UduwPa5jLRmhyo z^S@*{2s((BJ&OQ!$2|G$2qU%EqQd)BCAJ%yJT@tkgpqVWH^a~XnRLab&`vv9+O3Cs zH9I0iD~cBiF6!?`5_u zmFsE?VtNS*BBpL+>$=ORqtgJPxgTP)_!On! zq41&%-RZ?g(=*^g=4d;7U7VO)Dfj+s`4`Paqy{ z)eQ|e1{@8+%z+z#xr7_km31D6=qi!D$^D>60goz+fAfevH&PZ?7RpY#>aCmm3}3e} z|L#m6p>QMG?N|--bwegQi9;k@L#_t&e?Lu`A3jM$V}lI&fKw!E?`p}4)xtffImpc^ zM@Os*maQz0j63dh*;V8SaX)EpkyYW05qJwQX9_r=oWiHr7v;JRT-^1A&CHn4yaeH8 z##ZK?Ks3yo!}?^MvjRUr4(GT)mGJW-g9NWgDt7UmcpuNY&GhH67u6f|Bg}qWhjhys zQn#P}+HdoVx*0<#PPf`t)tT|fU>+OIS2!E7ATrxrQu_r`POUpBl<9bA4XTlo6)(IT z{%BXp5riYFNC4A^?q#}kDiEWyHDvw=KhI)vp(g@09o@+1s*eOB_M8l3hW0$nC1RlU_Ee-}O2X!6)M zk`!3UW)M9xag|ho3X2%4BvfD${HaP#!G$6msw6~E5`URWjzUEtiBu9^s}qe8l^hL* z&{Kg*PBVuxoTnsIHwhj%CFj0EX-!iSkSF}v!jx3{5X%WmNjSVD*rSx3@`i$gq$GsX zW7X6sNJ;f7QjPma{G^-QG#&Uti@D?-AzqTvxx}@cq_6|(OIP13Fpw`ScKt}m%t;uD zA35U_)nEHaSZ_<-r;i->B9)Eukudw1s7Lt7Q4dkWbdQ88SON#xBd7K4ejtS7sz)~O zRWY(a=+Bc*H9e_eCO1HVu0*b7fdaIuudzt&I6Bu*{2wbu-DIT^+1qdhq9cjt91X8^ zh(!W_&@I-*GZu}mV8#(FSVJ~{BQ6}vbxCUoQvJ`<){s-LssUmG%ae_|Wz85)P{Fcu zS6yRrDag+n<29|RekL`rfzC!Ig{X?4UuB`bvKfOY>e$`~>Ymg; z<~pB~)rqF!sbVrQ8JSvB+(h-msOlsq4J$|tmm>##dY_;fs@--mvJ%>mWEnAzygGDQ z&YVgwKE2q*MjR59u$WH!&^{D-a9vJQLX!@SW1M2AoN~^on2HIG#8f;n727P2v7COP zOBn1&B4Pwl5S2JnGgU*6eT4J+QhASf5-$vtvs>I7vJ_P+VxFm3RHzmD*83?pImPTz ztI^F~BQ;dPA{M!#sEp1)^htR!A@AAzTAnjLqSPms|MJrxzy15;hhM*V(x3|=d|s{k z`!TJ2_C=pR0Df6saT(m#=Q$aB4D zWXbXrjbpG;hG0Aqp+KjvZvGvdH0?X(lX^RWeo{PXI6-`IPLHhR1pCN-p}x7%ZywXqRbv8u5-~AU zCLzXTO4pkN@d-`lN=n}2OKkQCiJ*vzun3MtLMR~#B|Skin;$&3!Xc=k@Jr#kVlqbI z@Wn^M5~e+DAz@a+6BHhrt0x@9ZZ6^X_=f}agd8Wj4wu5PScH3D8{trL922{evi^dI zS$z?=29je{S+Bxguc;_j$s}YoSp#rnM%2VV98WS8BCZxnQex zt5j!?DwnU!$m|;ORfKF>t+7_C)nc(&ELLsQ5gw7KhUnothk6fbO+Z0D(o|Hk9ND8q zB}6hrQbb0gDbaZ7GBh*vtCXsaL27uzChXsBy6`wAYmAhp zL?jb3Qj|<+TI+=QZER|4r1!efkHV?M{N^&|orGQunZ~F5)Sr&0EyasRBX+5UF9>EJKpS~NhoYKjb7R8h?G3FSPMOj!?m_)54A6~H5 zOF|_+d!!>WQZtcK+G|H5MjAf62$6=bacPjA+!L0PaV08ox(CHQPDsTp)~oz%ONkd_ zxm7HzE0q!@jM8gV8d3(&FRHgbbB-KY-Vj)8Jv$h-2lYRjq2Z0y!*z0|Y#-P-j;U9{0blQvz3%-F>ptuq?rXs_C+jGRe8MpUXsH7ct@9cocYE!ERcXL>P- zGTO()WfEj1@z~0xa%s1fYHMxj&fB&gmc`mw`H^m|*1a}{5WN%aVw3i!HJYDU!22f; zW36TM$kj!vCsYlXDON@J1^H;S?aU+@W?j@&yj7njAu%=nrW`NW}0cHnMPxeDq)m1oM-N-d`LDI<@M<*K5yc+m8e%PmK=TGSScWnM_w zg~D867D67ehmlsBU9XtxkPTUpOJNEj+Yp(gs#RrGuY+tPKI-R0KH{;I+@o$JsRsb=DN(Wb|UgZQSIt^X8I&r<144CtOwhT zwdBk`B2p2Hqh2+elgv2s2WK1!p z9OH->-z?5CixbcLbSU>(TpXqm^Ij*UT*t;et8qudBOxO;5pu*M9-cxxwb&MmrK(a9 zT1qMv9ja7xSPE1gDTYDi%AKzp=)79~#@M+%dPfxw|<$k1h5E{vr#z|IWWoBF4V$5OVVr^W1k+>wBvK=-y zqq!3CmFZ+AVkm|>K~g0ol(2<-zOe0JJ(=3PS}m?EwU4GJG(uM5L5?!QdTY`}i7&rS-MOHx2D7{Vay2@y0`w z&?dBn{^>bQr$eW8-yv6uFJ*J;v*;5iN}otYC2!IfvMMMRBn9okk>HGlPkh;!g-|CS#(EAx2{ztg{e|Ni`G{7LSU{tFUucx3WJ z_&7d4zRCE&g{hpw9!|{neVa$Igx}l}XcaoM$}1sAMSo=_hkmETU{mOV2#D}3@s5J$HBvl#B z5dA`}BOYJGUOLN!=28ayMc_m`bPo{lubO%ax74< zCRUUgu^>LWm{_P!LToBLqg&ujZeh-T3%-S5b6#t!D?W{ejIhd$ry67F6Q@W}d$vi# zjI4Q7IB&)%l$?aoho@fAP`UekCoyfb)>nt{$XCna+-l(KCyig1N!VPyzYBe|MrgyiXoNa2x9{{}ekCF!eM!CcQu9z%i7Bql;Z z=IFu-h2RNvX17zF0{MbY!M*7lxD@I)Q3~%BQF;#ZMo8vSNg)KCIVc2?pb)5n#O0Mi z!G1$FHo+n+9Ljid(+2}h*c2~h`iRFv$;~+Kc;k*c-gx7UH~u$1Wtv-5YNEo3O4)C2 z6mb|uit6k&w_7x#MCnGRXA_D=C_(J1PBHqKSx7IKg-GZp;G_)d<)n-y zDpkr+z5|qN)59U7yG453fs-*7i~k64mp32D^Q0=&lh-Xo&0m|ONj_@-_^_RcUO-3U1YSs#r@?|j2G@wT`;w`kpGXa3M z0Nj`N0zrrbxHv1F)*<|IFk>VHA-fbetA3bAq)p}II!enSp?{=c>cpO*l$jIv3=&LE z+%u3caiSxmwA3l4pp$B#k3-ZnN-Lv}PHUE&cwpWOUBlGzNl=1((7kbSmiuH_ZpJWw zM9FTaRitS%uAT&p8Yqh+)KYWu9zlc3$$ONNK?46!!NiGn25FfAbTa&!l`TXm1vBF6 zDVIC(@)=`Gs4BE?$eBPN=mJiy0+1hVllzTef#r3~fU9~!o^B~UsWR@z1{733q*ET|$oO4o~$MT6b&T0r%hx`K3pqs3-Ir1X>sd4fzRh^!m!%#SF) z8bcB?!uls#R?m!gm9O29A4&0?gnKH6k^jp~PlAe_4`R3z&>xgDwD@Zo+u~1aCDI ze;}7=rCiz=hHJzcEocT~c%|L$Owf=1a{jrd%V^@Oya4gaZ}MeOd=@UEjxBs&H6A;{ z+brOZnclefg(^$<8g9%(ZjakWlD3OdIBp(}cB})(nuVHJ>EkR+XlkLvlMV=O$qgv& zspbP~B`|}jJ6MA|C&9-JXeuA}!`PkhjvrmFi}q>Jv|1{t$m;EK(F61(T_?DJdGoKUTx6}VTEy|uNzn01t6jSxqd;Kj0pkLeHk8O>g#3# ziN=Maqa@Pw@dmT2FUhxqUWe}7>LVorLIN@6DA6rcGaeJQ@1a&^(D9N^)%9+2x3Q2BZZiYb*lRRB z({PBI?=#GdD0hnr4@-H%6Uydp{e-F$N=0$|EFv;aC|fr>WyWqoNpz0$nIhCGLgkb# z24g}=xc`r#q`9vZf#A&W^Hd`^$;Z`jVK6~;+AfrB5zvLQg#eJ7i@^hx!NW~(BuMlk z9ldD&g4Kt6L=e=%Hu8)drf~>@k7-NhAXd}PfG^;bm;L$T#Tkc0MZi#@fbi05?G|W> zmuWC`k-x_a&yswbqS_JBI(K$6je7`5gM0-Kh20#IpdjGm#N;_oNuEC;HVtdL$(@oO z>d>7MK2wPJNe*MpzW1rCqAXbnTFhwEp|Uu-A~$$-GL)OW4yMU{G_XDDmU+&iOmD2o z_)Cmee$=R`bUp;{Bd8qWh=l@GhAWnVI;DtGB8i4as~cfDblW`$nCEUhylesfKuoPZ z>o9v$hw}244s(!8t?&igMXv{?EO0N-8dVcQzUiziMs-WjzjMzchkUC7CkOp9x?lRb zvo%=-q8d=`hy{LZ1X|stS2c?Z z;QI427cMW4Ztmx0It!r0*6!qgRLR7@k4FGdWv(g-kL69YFn@{B;)`Pqs?BEU2*8*! z^^H(rMTy3-k5J3tgu?jW^uL28peDgYGm^ZPEDMuabXJ0jQh)9eu@%<9SR^s0MBZjf z>v1(@v!vEt3ChwrK}94+*l=>__91_H_Qt)p-03_~5_NJR%(o>{@wZK@tcQLq?k;Vs__`aGl@?+s!G~1>i)2Gk~rEz9L~22YY8mr8Kaj=Oe(nQ<@;(&NyF-u0I}3>TC;^EVY`)4VMKS z)}1$M7x%j)$Sd6GmJbgXFC}KUk-(J=MS4;Ne+3mf=|Etf@25Gp3l0D4AW2l&4D$!j z6lIT805(dlHAVH=|9&$O7mk!77-}W5jEDjD0F91EnJ5QF@=6nB+g()iQ*`lSH3*KW1M7A_=J!Oa~vk1Y;wjSYJHcBK@I3 zNPURFkbU2%%Hzg7U}k@J@1otlkr@4WEN>vOUSMkmpu1=a<6=^x*#$UWcz7B_-A0)< z4uUYMs*mLpDBk{Yo{a@V|EGTaZ|O5nijj*lL;`+ngvoncr=ov|f$`CrFm)>!yE)6I z_l~ZXJfo#Vsdb`x3%3CPVnh!DTlBUk#`r&J3;aRFiW7b7*Ipea|uCt?*+jH0C>6pm{ zI!n|;WP*@Y*5^y2fb)=WYpimcK0 z$XsH;1L+HR_qsG^r>ms+I|Y&i4&ZVZvG5~@-7q27LP!9ew%!UaY~3wXhqt)E>wQ!3g+@B{uQ#wd!3BgzPynvj zI&(C8k1vi&N@3i<%Hfv#-w>zZqqQym(mvkWIm_B%W+#7t>M%CPJmSUV@iqpEJZ5^cfdmI*%j?{?z|u<0r|dx1HZGCz4=u?64XvB`BV zXvdpWT^3}ndCWj-(PwD(JY%}(&OhWPS2uxiR=@jh?sJYeZnTAcqG71!?8UDXLgpsQM?&`zi7yaqJY((ofPv;B5>8i_dNI*^iz(Hvk%Za?hea9sXc>(jmWZsH&wh(-GoLsL z87}dME%}AN*`b&yz9qD1vLAA8ZWj8LF9QeFw`XJ)aYTxHN_CagDNI(#5^LIrbn})d z?R3$h-q9k`0+Ey&4(5n%V54fFkzWu7zg0i{`FHpx4EjtXwQb0SxR#1049lMy-6R=M z0Tb)|0txzJm6(%G0-U+dFCjB@dc7U#Z&!;8FhaPZkcbNx69w05S89?8iM^!279gwu5=gu9i_ASQ0fT zUc->V^AHMx6Jt$Y+*KR}0r1;%gA-!BWJqCZLaY>t6eodMw&FdZhe*4%4W1qRf=i&x zR^D24ceE%4uq0ItJQFn9y=4POQsdSk_t_5(QDMP2Z(+zoS`BttGzAm#17Fj4yqyrLZrHqkGcaYvyebPrR#I}9m46f@-B-Kr?xcAP$hi_RG=PK znvuz}58@v=NSD@twd|}v3Fw?)QFagMt~?M7{*9n6ftk&0QS+X94U3FUOU?%&33Xr0 zzW`Bxu8QH$bUze6XJTRn)M^RYlBc7P4f(fnCW05kIW@7Rwk!nmG_3-w1&OPam>F(% zY=)h9U3cmD;=Exq>|}sQ8i7K26&W^WAw&l40=ZDa`LT4Gvm%QZxYJnhM^8qKr|Px=fycrSh!uRj$ zwB-9BBn@1a!+;<{U`OD^;tqWThes?cYY&1wGJRwbvh4@*wfWJ9zAjR|2&f~FQsn@A zpw1w?)+_|+FndS<2`6msK}EjJAeT9idO$sKJ{&L2*Ol5 zeW)8>7gW-q5htXlKdkVqeS;H-+5@=B2Roo*5<+D6iGC3T9?}4!!obYC=4?OP8GR<8 ziQrVupK(97CdL!)=sECE^Aa9h+yA>hWq_Aw7A0v$?X~rUFlz%&e+je`-Xrm!OJ0Hu z?$@5RZed_UhY5orv~9$BZGm?q{PX-eFF~^ z4+S7KZ2}ds1M#BC)w5E)%)gi3x6bvnXN{ixVjA#lGlJ=)hf*!<{kTpW`zc=Vrdm&* zD%spR2o2>*QzwH}kc^r)|KQCy)@}Ma_2YJkzB2CMYDLwI{$>IQeKO$XpQ#@d9h-0- z2B24~6C3?IK{&@W6an3^;}6IeztS6L@E_x`iak!HUDcAplk^JfP0|{AAlG9H1M`?KXVYXpmzaB-*O0**!YhAQl7HUr=yCG1bF-TPLGBR1@VtC@nggjAO1p@UU?+3bQ`3c>)0IQuSbPs9)%^$;d9~+&xxq6c;^ZO2h?2_q5nRf|^<_GBaugm`%u)gRvg%KF!hP*aT#%bMIuP?9K69^0=K&1L*BBW7Z z2+_3}q%`l2vh+(WtQncIIMqI7+|m32E7(HSZXp&j=<6+;ATmF{tdNk_$;Y=HItaB8 z5jED+WMV87us_%DAlz`GLA9q}DFa8>(+p4@RtAox_9EfI96v)_902e#ESPSH?ceDh<3mK8w8Za0h(x;C>(GJAix2ThNfd+35+qbE^MAsN-{~w##n`jz(R8Za{`G1 zLbXuGX`MI4Tb|NWdpe%Z)0on9rZb(+RHid+yp~>S>1k;YNlmv%*GWl-N?NHjOe&R1 zrP8vZW3-|bt!PCn`oDyxrsf@zi6Ki=VxlWXxzj&=o-B*dS!caoukR$O)4Vy}oR`bz zaz^DGnx~sKZQ8VH)27FKnaZR&v&=GQ&NEqKjwz;?Vu~pfGlZ#;WGgz-p^~JMPLkw` zfB4e2BjN#u}3k}U=qWRO9=803rQi!>{?$Rdj^vdAKfEY`3{GLR&vIG$puqttfR zS6{#Rc=}kU(a~`n$8qdy97!tLY&M&*YDSR~$3Q2ZC!Tnsi6@>!9CJwgNGxIzNi3pF z5J%)83Ze~dh(j{O&k#?+D7MHTgF$lDS4j~p;a`9K_19k=U5}StcG>mWHMdG!4_AmQ zTnv}PHQ`#gLIM)EZCkb`+Nw^qq7|)Z38TddGKO)oK-FwOY++6{MAjsXnq* zOdhNHsYqhAajB@NsHmu@Dnb>dmZ*xTsG^;+J)$9h{*j>RrveasOgBwU4GB%T5+P%X zDaQO4Moi6lsV~7M@|yG{F)0#~m}E#ek`fWBkO~QhR7izXNIfP(S`v#|)S@Vgq9|Sz z6Mx>KA}XSyK1u`yqysx~dyKTfIF7d*T z3U3--ow_}+cx}gy9XodH*i~0ukSki5GiP(=%$ch>s#@n-tyZhmYP{5{w%Tf|QH^Rb zUgrhd&VUMx${IQ7689}Hms$56&yb==tEOsFlZs4eQj(H*BuIpWQx!!;D^iLRwUEhY z(N^fDf9~H8cN}8UL#@EL)uz^wFxFW~vLYH){|-wW#QM)8|2XS!{oeomQh&6kk{Y6# z>1yQ8nU%)#FRJ&w%!0O!3k4>|`d6=dt6ueb-Bhu?NGq)F364^1@DLk~T)>6Tx9`DNEAcLl<_twY1Z1BxB&5Wym9I+A5pl$4d%l}&G^ z!9|8)WK8Z%k8+ehM>)!$%b!P?-Ockn&+|MclWbndD7F}55p&G$$U-&M)wy&?a+u^I zA%7#2$z(FQMC6Wzf`WpAf`UHYKmIl*cE;oJcsvdn)0k*XV;awx#;mv@vk=>knRrwx z!;C0p6jMAyafsy)UY3ZIK&S>{P4r1hvgJ*Fc=I5gHd66g}n$1DLml` zPdIAA4ZHqd{{8mbA-#OP&ez*m{5}2dF7L|SxodJy5|MU?F3WWQVnCh0xeK?*wp-(J z#@)E|+6b3ybQ)!Av!zMf8g*1EwKZzliL_yD8`!pO%eJ4Yuv%?ZrRr6qs-@*zBdYsa zU;A~Xul0JbMeE1~MJU9UYOPuyv2M1mYt4FDWmc+BL_}1YIz%N>TU2WBR9r<-{pp8F zL)}n0)C)C+Izwfon0b*r=0du#QWDRLoRTG(H%*Q2NJ#Fiq$m~+851xlgyG~o9t>hhGGC+@5xpj_nioy|#TQW*(&@U8idoeW z*M$P%VsRA`OtX~>} zL1{f6%49t82|sVwJ)WtlsF5luZ>OP=3bQKH5LQUUdiyFyl9D`-5fZ8pgK>sJsZ{*O zOzb~IL}Y{!A|Vq(Arx}U=XCnM*I8z3KmM>-JRXNMD7NRnOj=VKhKyvfAI?;(b@BH)6Ly(!rJcQ7A#s8Wq9%dZl zKvfg!@CiZ=o&&?oidah>H60{ew9w%~ZlF7d0(P)V$U)`tL!h`}hXW%lue^ao-WpX# zNw!6I8l?C zC~9hCR;gN4NYu;&Ils!yTwmPgPfSS6HbO=$PT}%;*lR{jt!PO}H`{ri`8045vBE(i zik_Ov%II;(j{NXbq?*v0%4Rm3FNlgJJWP=@(mE z;^0Dri?bH98EM)$E82v**A_EW&MXQ!&3t}&2>FZN%jaWqdy}^EXWt#p%#4tTCB(rn z>D6bKc;jyWDbF)X9I{-lS5S(IYSnSE*^IUol}bGcmHM#h`OC1VAq@&eA0O|K5}ycPX9R1)U)EXd(a z$;eF2%tI$cd2nVw(NCxSp=hzo=3V1?{Z2S+Dsr<}B)NwarD6|TjC}IyL*?{0{u#x? zYb54zM!XU4=QsJD(~prXE$`kPJL&E>F*0&LPRFIqok&& zxXl!k$!4P>(%3WJm==qSjGj%{ci7?Eb#0gHjp{Igy};qokcL#GA|2^SNk}FXr9~^c zj*C-PY|s5i9T)LSre$o`TwQh5WjoZIsU~NR4$_$~LH9eK_c1RyD`~oVs;D9; zRg|jO$ULLdcJVh{lU0000006;)wW;6m6cT)7C(^R?Bqv(f@na7nEMaM4WBrbm_x^&@Y zbM-;di3>4<%l?T@T<98Ha!+(ZKlk9q?nEa#gqS(%oap;&!fkT20m6gETjZ;4J^_ZeK{N_3*H9-Av7rbG|zR2Lmv?7Siu znT!642f|!LT=YPqLBu6O*K+M^`?0!3HI44s8_PkTIU;b*Vrq1_xHagbRik@R@igEv8(7{iY30oJHl)rG9pCEsu1rL zR04A+jV|ERN=l9-DJ66GR->!-tfH&>49Yj97b<_AL`5fuvro;^Ln!B1Su4WgqJ4() zdeXpri1H(gPM{N?Hfwv(w>gyJpVC9p!q~&f;^+s;f*D`I^D&W0bY+hak z+e7zS@;}{^ZHW62sItp595lO9^|=jtTWZ;EiMZ;;JnJv{Qyg!H2&mHg=yqfHuUPHM zMFWLp0W8Z6n3j{}m!V$2g+faiL zu*>|B%Su1MK8uPJoyCG55cM&S>hrOkMZd7;^`@_o$4(G_OspsIaO~7dH`_XnLso5D z2_x61Rm48|$j26<7_FinuH#0mqHX9}K;(>zhvZaM3klaZtHAS>PgNwqMUThNfu%*y zqt8*!T%4?;Cf3f=tD--`Iw!~~BBCo+zl0nF5!ECPP)iiKs)GYLEjnCGjV9!(Hcluk zzHMQ|`c@@y#t~PARl;{sOtmUGCy8?Ys}d$e4%Dhdl^f!;S0#uN&1Y4T!xnK(uS!}H zITEXqI6=hCu1csC-G)`k6>Y>}S0xc%)UT2jtYRrFfSi!x9|={x@;-5{ZB;PMc;DovQG395se&UZ@g=MA@H6>Qs)o(5b3TyC{!IRf(ZW zlM)qTFbj%;NfCcQeO{@eT7llb0P9g;MXk6DKxt3pgneCXZYEW=jth-cRpyFm($GSG z3ZzOrP9jltRLOlkR1~v=VeeZJ>7we1p1Gn?R3Ywm38gie+FV2xah^;jXh@Cuo?!`Rfvuv)Y_849vJIWOJ@bb-EarbBV6=b zY*>>C-6BXtD^;Qba&oHR3vGd%gImr&vJO9&+(Tyayy_g9kf{xa{W~M`2p5HIG>s{? z+5;$Vb1Y6u?rdTK09mvb2uiwiD#q{ymLhgWPsB$`iBTv_eXunAi4zi=^sxBn0>e!e zqiTdFqJ?Q_LbF>6lN@zYm4=ya-i$J~*ZsiI-xS~;N0?>uq@sLNz?oROB3y!wc;GY3xjHQ>BU7?K7a;iCE{IFQbq2=^}z z6Ec>rfz|FQ0=P!6wE=&ag*7SY%C9k!SIBt6eKDTT4XjDVURR`zCFW3&r$1v4RPRIR z{4|2Mplu059*VgJ^#wNZ!@}rFyp4Mzh(?dnjKUYAc`2U$2^aorQN;q(4KaodwbqzM zIRJ5Rg5Im2TC`X;a|8%JvCq)*N<;&uU=Z+NZM2NskrV;POaKETDXa8QO|2YmQd zcbWG+2YYO5#S^4ol3pmXJvmh(9r#-=JM&O=QR{wfrA`G&&<=9m3A^7W_2QDr#5sQ3 zD>-`l8o9F2g|5e$v}p3{r>?^}KB!|C2b1?v5)E<-?1I2}t4wM0L2jpx#|IuGqbf!Y zz=TXawLw%|L(|bYg6O6DFpn^?BOy$xc>X?z*K!|zB zR7@7`L5^9$q_@}L1&Jr=O>U_5k=SpcG7@n&IJhsSI<*C#)kaBgY9 zeOy9E0!vLt2DHZtr)>Ef{VQ{ZS6kp$9=I4_hr8_8C1-wXTSMiXyvZJ_=@Hx>7{D42nd6X zC{#>ELxY48HChCO!3{JDieVj%_j;oqa35TdR?aILVP*U6%o&3XFs1YmKr8Xw0=Tgq zLUX8k;%zB#bgOe)Fkq%Vs#huR=5`^I{MNi6+Bq&8z{Ux@NP^saT4CG3rGXUT2-uG^ zOJ$o}mu09Oe9|Z;bJ5!EocXq3;dn6l{vZj6fhCAthso@n{UK5m^Mon0*S|-@*mCty zcgj3rrf*x4{4yE%P94eb)I2~oS!JQa1zXbTnxg>+PLR7Y6;NyG*7S%M#5S@;vO$oL z=kRtqdS`is{me=A3cUNL?Y)K4(5bS8(%F|?+#XA73hrAJ-eoUcvz^7fjZ4Hle;2xw z3*%+vE`=SOYsTD%G(-@SFT|Y#_-W$7X&9Es$q(QDrQq|l%0GpCzBUY{_9JBt4z5WH zmYAA(;9uJ<5Ns!aj=*v(_5Dt^#B&~fkh^(MMv3j00Vy2I3DxkGB21CIo5SpEgG(R3 zXq;>2X~x;x#}@p(AXp$24qiMQq}rgFg!tYAzVxHs3#0P#fJH^(8w>iFSfl<=>_d<_ zRyCoMcNyKI*uBlA&@2qby)Cv^iwV~^fN#FblGoCl!Gv)!Cw;(D-rNfwNDy)NSzP`14Qnu_@Yl6E_K-+G-AapT3n8#G-Fan$V05lGWv4}^wlpitP zqu79i#?+Gw#^i%*Pd2fxStdkG^e#+jPa8wRfHdAmG*`F650H_Fw?K!8GoX6}P2z*9 zL$OHf_B=Hy=t_XDm0-!{udvl3k)Ep|7eKDHyeYG#gO@b3s?eHS+y|8yp##*UWRxNB zW_^31cmB6~e{S&6#BYLSubUZ7DR1UUD-@F~Ur1aGHxk_@&wK7NeWg;?<^K&bUlrVmAg3JmDrK>rPWxi4!>IkYP zBeOt-YgrHg5{X$I{C#Ig#<4&uc!l2!Vx$ehdyvgSiHB*Yx6ZRsc;DEn-+F;TG)a1z zK?$>u+3bsc$$!BkwhU~3ckx@uP6&gmpQGxZSm_v}u5NfmD0|Kw%!^V$T*DXdvQMRS zo7S#=a#EI8Mv~%sj4Yetg=_>BSrLRsu@g?51|w$57=QaT4M6c;SdeP~rbGcM>BWaR zor>QnaN*Gq>|_zR;Jqvoe+0KKl0>Y#B6GD^({$c(K&O9X`Iq*vr3sfPu5yTjbv={L z6p?BmF=AS)C)g_{oqjC)E}8A2>h0`YcBUIa3$Dksk!icuo@lwRI{OGrHk=sFRyFkT zC%u%ov~7x?m~|Q(Wy$BJr7MDRG?`abGHCcY-|Wy?gQi8B!%GQ0^q2i zoS@Fe=z?hllov;%R=pgfsHLyqlgwD{qjGQmvlaknU35j=A>+X@cc7c!4gG+xm-T#hzfG@EgHwhp{ zv$6}6+INz5eJe9!x4c(uiStX09(+!&ri-M@vbjfr>{71BlmZYLNSKyd+Z@&WAbk7} z6#f(IplSJQAu@$1<><_v`7s$RB#_$Q4P`>uj@$K4nbV8FN3@+q(tLv@XEFTt8+~Z? zvLvVPEb)`KQeZ>9WY+RZWM{p_46Ms=!<5kpmk$eoTPA33%xH*Bpyo6PD$xY`rn z5*)o!%Rd)Nu}^tDgM&-NYNF06HWiZrQSIPscaYDQ%jFPo-wylNR|Jnv1Gc5(itxLF zLip`5PA1N1s-KhBwDzxrdgU;-F3quuFekJA^Y}AYDigshy|w!n z-*W`9j9LqRaKONl_m&GbD|f{poPtY~oOd3VPve@B(u$&Gv}F}nxzjLFZ(04lWwf%R z9|Ee%QG|;^+7i47xD_18Q}A45b>(Ln5N3#!U6(^L1Ya+WOHmC#-71fT+A<{Wk|570 z8RP%-c9cR}d*qw{WW~(#czvA)VsW@Hka5A0P!cXjB=GamC!}9BL#HmLwaJfhgv*R`Be0r6I@-j)$3L%;Hy;H;-wYw~$7F z2oIPp_|2R%x^>{-!_WrzN$Rv>f{dT~p7!74m=s<6p*aI46k%f~6h`ihm7lER2dY`7 zrFcguU%>GS39$(n#CfB(^V^ZxQ$U|FUf{5c7j@mvp2!X#M)&(f?PRNe*B4P+PgP{h zsl*mm@eb1H+Fq!9*pmg=!d4b0Lv4G={o-enbVD#mB$np4?E#BHh8u^Le%vk*El*If zcf%&yifoll7gIqgvvIfz#&OB}fYmD+9b<4-q=M0+_O|eukjhq8oN_K9gtesFNrxt* z_(86_Y5@r)?H6Jo2oOnEL3NgX_4E#0w7M)8}VysfNufebYbp$h;)~Ox!P=bUvfj#=% zEwlXkK!DJ67vJw0zjW~pVX44+Au$6ZmDP(Hf7b}we|mPb)x6kciL3}v>D_(r`&wdwX6i$rY}%E}*+ z_a$#%Xfpp&z?4??o1$_ARZe71q^j?`$HKyBgLt-UyV7mRiW);xhCkH&X&ROV0#d1{ zftuOi`t8_|?*d$@uj4;*up)}69-#kmqU#I1JddRH%zHxk!So*1A^;fv=dRwxDQaunQo)H;bORO zNg+$~%Gu8KrqoU#+t=s;)sdp+irrLFuP08+8qFnMzj2}?E^MMD%m$eZgl+QA3N_S>qa4`@jY=o0vZlW)9w@5eo@8(Pz2IW zh5MO|9>p{yn1Vonp(0Z82ZRTZlaTDAy(TuvcBG*>H@I(7j%`iXZ*&u@6@F@^xc%a0 z{eNt{Pu$IpVRCGYkf`vV9`Og)$kS(w%q#rk4TBpYiOCy|LVcJKQrkHZN+aWP8m>Hax{i9!w%@ zgEBTH4h$#NzO}>QL@ub7KPAj!t7m|pj-6nZhR0nfSxm-QyeI$%DfjD;{cSG(DFfv% zvvEhEj*FWkt~l$RhA736AWm>AsEJf9{|dQMDwqy*R+mBcjzsuB$5E2m@8h(I(B4$x z>v~Gd+N}ijIb}z@F-K$}53fT&-33QLCEx!n)`4iE@Z8ZBF8 zOeH2sj^Qx10%Yv~LMGAgsUai*?V1AoeMY0(8X!-w{Br$I}UjWLle1+-hAQ#B2{LA?z@S%^+(~ zq)retRM80MGH5l294V@k9SCDsj$+3$R6w^&ynqxIS7#PhE}B;cEoD)5AdUz8$fp>g zN^wu?5z2}VXStQKp-*=1E8@o?r3P_v)hvB*EEf~-Bq#@BN%n;>*9Vsw2R;Hq5`MOI zTFIf0)G-@OI^O5wV0cvg81a5i0>2yl_IAwP+)6bK;W06PSp*7$0XOyB7p{7Por4A? zx2^&bEz++z`rO-Y7^XpMH#iPfH1mbAuHHn*!Nl5el!Xw`?nTl1X-9rZs^ap3krA?B zl1)YELVV+pUX>`3o$>cCI;JQWQTuGGv&_yysBQtlN(O9?Og#l|v?;J^D933l8^J+@ z$x{K1SsCD@nq`q5%OY8}-a))?XCoOGkVssr6j+FDAiwN1Gbvqg82S8#=LH^9SjH%R z8EDDByHYu$_UGj1*#Su`P_hiN!9|o$YmMYVY5ZMH#;XG4h>*U1rMEoQn9r2iOLlj9 zY%`oNSZ_1*n01mP?;B~gi?q0We~MNIJA1L9j4P`O@RNpG{T|d}O)IbUt~b+KUYJah zU4it3XpDMiwNZ{W{_<*~;UOh#;RiWZ3gkJj_hu&qx3+#90VE+06jC6SAaVmjvyU?>dc6eY~A7?lNDx zT?W%4s}@?Fr@-2g+Z}9qkI1N{g|-t z5V89d2fHrZEc*Rrq%@{CeUkv$dL%ONkZ{w#+%N#EKznEsQ4MP!a8Kv`_kH|rA+OBW7pQ+ol`%`YT91eMlG_J||wa7`rESTr>P5zQec^Q6i> z5K}TQYWk{2j1T{=2;D;IsCxvJL<1ViZc*47NKu5{q!QO8 zR4lanJIa@!yAl?bg(HGU=y~9!+!ytSiFse7Xma6$8nSgBWMyNT zjas{N_y7u{&FTf}1djJbYH^C1jZt1lfwv)dD|m})>AVPDK<0WG6dtz-%X6Q&>W#yL zLG17Lwv+oez5=4S758t$w}+{+Ezan(`;Bobq`T-0?fyue(L1i}J(8$1I^3cJ8aLWt z@mr1>&d{-5@~pRcekTdXp%UqLsQ|Frha!NTW8gEb)@jacaHZp4466S z+q6esQJhu02hx~3LKR|=()SHK*sU0k^yychM(O$vRgFfn1k>(;%&1a(JEq%8=F`Wr z=`YnU?pn3z11UUlwA^n+Cz_5~W};CP@qtE9NCzQ!N}q*S9`e-1_Z(%p3~DjQHjqJa zTsDum%LKCR95VVp3;#R4_+s8a@*l{t@kQZ#etz`~zkRhuKZqgWgSJ1x!g81I0l( z)Ic~9G7Mmg6M%B$%qmCGL2CSwV|$z_G1J%uGc(7`h_-614sx^-l%267bXjnRfB%FT z^TWxP&aMNR^`?kc=t=@sPATX^>hi}MZC$AzvS%D%*Rd_Xi>llYrJ$y(bsXlU^%S&Y zHQph7rG4?nFSR=%vfgKjopeVaA_lhL|ZN>gC>6;Ij=C$C^`D!a2i|0V+%lvQM-p+07I8wdb+~>`FBZFCV0nM zuPr4FqbS%hK5r;hG3v-N`kt}xg1@lRcy_V_$tslw<~^a*2uY!HScNhHJIE$cA5G!H zAoytc%O|dz(t>e{qcC_5fX9sD2Zo%GTC=19nq>-U^9&en$f2Twk5+WslVfL=Luy+C zxNL=dTf!_Sj>GXre>!~U>mq1xu;?zj#x<@8(#Q_R7Z*i>ir%_>xHy?(c$us=m}S;S z&qz<*@?|T4>}y4Wovy3OBl?+fE4^hd1o}fVIpNE<-q3q&jfBro23S>v6oV5aeeNC` z9B-l?4aRMyhLHS-vyJvT|GES&~^WOI2j64LO(swvoYgW(1wZ8+7 zlNZx(2#lPbnhdp8DM{IdTbff9pH(=#sf~@8g-$-7larlNiJRdJba>@$lODG9N0*26 z-w&7;^GNfS0d6;iR4lf{3Vt+@I!uB(F7gDaeG&{8bw0+fLYX_ljVhZww)94O1&~v7 zxm+`l@)FSqbU!pfn5hxj{0U>SpBNokmnUv~(!$Di}tpGwf19cNTpm_Yc zX#je@qw>Vfhw_n?IOi&8>po=w*&Qwcq?i>C%u#V zx`US3S~5zeK9hkB=WXqyQBodWTWHjm;M-;U4vqbrQuHv0t+h2u2<(jNdEENy5he%_ALEcoHg#pS|0msT^V;wilH})pT;2c8XbD};MV)w;g&mCOdTPH&aJdWIe z=|0MNgtg--X`wGZNQ=;FWI70UCo{mcyPrp7&rM_GPy3z7(QZoLke!A%HXCX%sF1F zGx#Q{hqbL$iLzYZ$&CEgU zu3X5_#d0ebsz0c#i{oQ(ulYoPmp6<~C)nMHtc7{#4aXApfO{+d<`*32C8J8bh?8-s zZ|<9CXNBG$69kEZyZLW&8|}d=Kz#56L#^+m^=xjB{3Q?MbkWx-6rXI*8QK^)!O|CZ zzjZ>(_7eS4g7VRIv6D{+9ej2un2aS;T+0>k-YTMVw2r;_an;nWnb zL^~NsKmh1(hu+lD-6{cgwr8qZ8D_$U<7EKga_xA?n+K`6j2 z0ukC}1?Ig|y6mV&7sQm^@tq#(vGS^D>D{U{Z&JeU*gK~il9l!UgF6I(?WtW#Cr#f= zfV_@d*~+GDq6Et0@;@~pG$x@^Zh9cByd?|AKtx&neG$#vLAFIX?>#xJIu+6?VJ z2a!gn-F{9$7%e@Qcmo!h+OvNnIq`NU-n~snfbM@}Dp-QW5|E%E`>k?_J=p$1@6R>x zK0Y~b;M23mS2#e|7@M^1K8Q4m9_5DtN9eFoeTn~sLQROY7&v8dSciqm;y_G(5NWI^ zY)oAr5_`^@ zARgLp6*2XqD{n#QAkyeKVl%Ygtr;0BV_Qih^R4^&tI`L^KBg)(gGggdLLeJnXaz=T zFRh8A7?X{{-v0CR*x_%R93aaTe|vMhvh0EkfG0mE=Z+RBN<9t>-JE~;;QT=k&KS4> z23tccfh`A-21Ya*!?NL-0AW)aO)1ua4l2WAoPWGiCz=N!fjc!l*ARt>Xx9K zpSb{UE~XK81AyHW5@<4f_(7zhG_9%7A|s#Es)O|HF1IfS^XecP9^uMkL#%`xwVylD zGMQMv`~?AjLFJmz80wp`AlLHFw&<4(B8~2H>_5}$obTg9-tb=J8I-56UZWXA8dS8reP^!i$gzaZ%RVuoBgSJpYV0GMCVm>=qMfLI^;s`2GF=uw>09y#*gT0*3FJsQ08WXNFy5Nrq{P9 zh9ZuE@e)rS(dKDFm2FLCE@>OYEseNuxZCqU&cc18q{z250rw4Jh$6@g5kGs!FtnzT z3F2M7KN3Q7HoF$yw{H}6%Y}E3gbB7Ym>D>*`GQp;!(2@_nR>ZtQPn+#l*sNc1^HJa zsEB`%&`0kZw{$T6o{B5DBMK4o7HMD^r)4^UHg#~#{P0?n=>05zTT$Rc3`4oj$bN?= z=q+#1W%#US2JT1aJIJsy)NE(T;qeP`DK#wYY+mwT{11=U3(=vWNlSbU7y4_%HA!}5 z&S68DIBuAsoFAjj5A0&#J^-eAp{GiLe72n%to||3`v$>7iWo#A?XsOmhq)%>66*~V za#P$8v}N?Z(WgXINc^(fzPj79efry}JNhXjd(m}D%-O*HtaIUV^u9qB!0?0+An?cF zprGnIp1NNlpMb2Lh$vXl`$oU56j^+n?@W;3$qTo35uqXhT$4rG0Ak?e#FE8wRb4vR4i#@ZU*(b|_8{UN1Qnhocy4G7`Pao`(bZCZuik%*V}q1$`h zs#|6NC?43cB4aC)6giaV1xs}#umo+sKNK_>_$|D6-_c?#T5n-S~%W}s39_=6wEQ<*@hyB z{Yo(dB9N2if!3nHead8fwP!sK`XMso1k?_EMdwKhh~R_JK;s0aU&>SH`RHH3Y^?KV zRm0RM{5_D4spt@yLB(@PSv_tfrsSL(e9(K5h|F{=2OtGDswJo8Au{6%%B-W<|2!`R z=>`dz%UucIZUXC4Zv$fwf#MLEu|?QZq+Yblh5MjHh>I*H9flX3-z?_5xS(&FM=xLS7RGpteeBhY_mS zi{QZJAur9a|G{nLAAS9}2(jt(iXm6BsG$>Bc2ceD~eewN<1{Ryv zUR|I+7db=Y@-t>-?FVN>^KzOT#fSHW(j$tEi?h-9gQ#fYf8Y;>#|CUYCvlZ1?y?ur zIkTA2Q0@!nF`O|j&W5O%7yJ@gac~@J3fsDR{wL##o4$w znU4tn#>*REfs3GIDG}jrYR|Xkpi~0wj=fq5G+t=9I2(7*)5CdMPAw}5UISlkAs^t2 z75;wd`6qC3J0yDi2j>r7%?jt`frQq$A;Y5sWuU=dx@gEP0bZO9>U5hk9cFG_M2~HO zL@o2Iv2O9pkNt&vOo1~}a;Lj5DRHi_yxijs2n+M9;TqLsak&{ICM`AI&z@q2SD-1d z!vHw(`T(nk6D6TV>Fpk2X^;aUW8y4ydIUHXs+l9GLWnHPv&QxL2XVe#r9#PH#$1jl zT-%AU%(YseeinKDOp*~}QVU*go;5Vom~in$U}QumvBf0FCSaj5ZYss`=2=7BdDiG) z=$$K5A|2O_QXC8}Pjvmh-qazBkHRS6-3H1Zqh{V6;vbeufgfN~YAt z9WmsralDrcU*EY%?uB~5hJTW@2)X6;L@f10jd5RLbOxdqYkD3Q9PoyqBRycluTZ3o zI_X%Q?d0O#m8GH%qcuNe$4Aj$&pvpDBJM4?TeLe^OL<_bbilVy6zCmnqz7yiYI-ll zdBQcIc9t|DyMWSUuqmhE9nRO=JE~1723x-eY#`bX*jP~Onhi$hIp*YLJX4t*y8A(} zi{FfvIq*M4FM^j;WyhPcr&eLg*jjD|T>{xcVRhNMX8FIY&=YoqI_i*Oj{IQ*T5Qlf z)Q!Q@q3$?BugLsqdl~+Ub&Jzd_ISRwq$sL`&-Vd{CM%&gS118QF# zZ4(p-04QYBMFxhdje=6OuGw^&lR5HtL4YDiA!x7-ok%zv;I4k@trIDI!jOiJR*)~s zde2#`8J8!>7tfOMa#i1=Kj+-gvuawf+X|g+;?1Byz|#RCdg8`h8i1$}B(XMtS(k;v zpdm&;bkATA)}ygNpEOo5a;YJN=w>j^I4Pr-A$1}nlv4w`jXJ0RO!aY}yjlim>+;1W z)0!qu522Zh=#x+iR}CsrI{9A_Rx-@%z7V{3+QuD{YG zG~o$@39m$4+hVrRLZltM($O};o&m+pO6^fjlBArD} zGcEf(szW2M8&Vv)oa9j_B5&nC@v*FEZjkQ(c)!6q8e4fwIqailb20PgqAjmlX^tF3 zov(GHjC`dIsoJN*q;ryy+{b6Ubd>3aRFI%GNLpC?=|^*UpWk-lb7D60%?8WPR=(LT z|9GRW$irH+6sN{?pQ=b~b@Q;hPQ_4F#JZ;Svq2iFF-H#@(vMQN(S39e!epxQZaXl@ zMRdjkLt4B2!07zI7@8g!H37!+LUHpQLN4>7TvJ0#<2_g+%D%eHn@7N)7k4y+u;PnK zxcCIDjHm{29>ObGQ*=V%sE$rm9-&aSN;M!=HbSAM%+8_SW*mo#SQjc{EoGM)gxQO{ za&VPSQkEJ-$w*nEQ7_tkMm>m6jR#GBFh`u3>*yTmYTs{GmpPzS`7|;b_GXTr263TX zW&3*vE-JaKW)LEz&_zQi3_zo#X57>Wi$@=a*^sJ3<(v7?^vXfpJ>HVsSJIkiY)T!S zA#yMGGZG3_J1P+gxeoq{(N2Vf0u7$VrZqb|q41Zgb3(y`{_Rt4bFAeY zQ}KgHieT)7Lv9c^n{gBwS(=V|nJmRJnOp~FucA*90}P@_#(d#=NJLDew6bWQo-Y;a zYksEqofq=^?!LRvBD~y3d0m~)(a#5QCN?vHKvaF6uOpY658^hrU%ow7BdgVDWU-R< z!~`&(`4;opu57z1+Y#Bg#$=;w+A0>BTP%Pp-#X|fixp_iR1u46s>J$?E0vgmvq_x? zaif|}QY9HG8qy$Ws7k83W2HM9z4c=;YFpQ|TAFELXe@S7HV6vo8I3wSx@g8jG!4-m z;+SKenalikvCZ&I!&Dt6TY015V8y&pd}0&7>`cT_EnbYn#?Y%UG8eGg46RaIwIr+KP!9Y2Vkja|)z1;UZoAkI55i!wvNz!W67EmIU@eSG87TxB(< z`Lsi%OLbZSE0NV}*C4J`Qht?6-l4Uh5>zUUT7@Xk3;DE`# zE!5k0`{eADfH~Rm@3^}=y1SCzd|R)ymnPRaj`2!IH5t`aGr)%IQ>|1lm6(vnSVe1Z zOyn^Y4bi~F{QCbduCWQxLuXSen^rqw)tWUe)|@p`3Z`Des*bsvBbqCZi~%M$$r8zv z6k0HJk|eJEU~tfv(jMK|vPHmvre@U7PU=#2awpf5-1%ZwgJucjG&-KYptjQ8kR8de;@6Kb9==)Wy&hX?6bpn=8`BiuseqO_PxIL z_`Qo3&5WX4ANe>lC_msw1GfnZ9N=G}PU+dnQVyrh&$*SM*e`wJ7!>Z*+JFMpv6iFFr9Vs&_i!_6Em*R+3ZekwVf4WI;o$sOzJJH0f?Es<2;DZ5Bi|no z=u{0{iJsTvQ{-sy5)sEd3kv>EEWXe9i7zseMCKqI#pMHtBjQz`yn|up1!XQt&13&<(${NRwC!tohvY!QcHrBwq^D8#niAKuZVrJeT?N-3FP&{u?TG+YxKi@>dS z>EToXaa~Ti*n=u<$meo6VPUX+lGiE?zV?h`PFo24)`ev(8$x?OmYv2_wzOHfnl3up z)7&A&JQCW$evD{Ur-o_$VcxD=LK@W|AhZMlII_0lqNSO3U`r(S0K8E$ZYLAg zhw;8T4VFlXp{*78x!Zz>qH3QKD(!Xvy95C_vN>@jQ>%8XDk`fn3{U0ORQ{?kHU>49 zs0{9mT?ZR^0+#|zknoX6_0e3m?~+grb{6vWQugSVj||&&Pl_?BW!3zr?J9-5Y5-;1 z{@AT>c=)xy&K>$vqWVe$^8gjd$Mr{qks7>BeqF7D`$F(wM-!#we}N4ZeCk+)iYJ%Z zbX>9AREYunmPTvaC&C@dRC9M#%5+I5+Ik8`YT$sP4+eK=G34kbHVsz6y1Wlpiv=9) ze2-#PA&caM^5ZFqD(_D1PQWE!2yB<&0LRu+J)A7n5y)iyPXV_SIe6FCYWQ$)@qQ0YG$p9gz-IP8iecF`FiX($@u8 zOK_-T3n?C4mgxjvUAf51c&hOzk(~lNds4mlso={tMG=Q`Ha$1oa_sKwkIyH)V4xz> zLPGb{qB#f0T54H7H|bJt-awk-liM^%g2|8ReS#~wyBE+TieQ>AlM4M^q1i}+)E-uG99Sdkdc4#&0AhCdawrjdKk5;uyJ*SH;L6y zNE3-;@$K$+%pHawWq*Y#&13P?1!)c`;EVm3jtaC(`&XrSKzuOTfh&=bG^`sf>_-6K zAi{(g$wd#Y&|+V!zp-`C6-j1UvYEbyV`%>cV6INaf0FfM(DU!=7w;zCDWMVjhYP2p zV-tvn12C5Z0?sQf(tJVeBUK17z$b5hztX(8+wkZF_ z49v=9KuG(Uc9h^6leWjhe;houh0*T19>@@`5b3*=K2>n+xQ@F%e=MQ^fsnHc6j2as zB8VL`q97WWK&zW|K<`QO$95RU(gwgmI%Om*?2Q7Iz!+b!7fkSmwAL7s?Dp~ z{T_V%jYTK!Yg2^ihh;@wNOr@~4Yx5uZm3ghZ9ibAD*}`t1+5T4nJ@V*U4{tc3%&|($fAZW`1QhRqwXoX|q9r4xSTJb>$6vRP8q_^P; z5cre<(0oKG+;n8bqQ(9!;*$&`my%Uq_5B<2l=vGs)kC4^wuN6WRKnCC{;UdYT`VX)%`+4Jz< z?tLzdo1ru5IY}eghQPW|2x7u54goU-PM0uJ$ww6a5>;+=4=KMa8uFx3k%L5E5GyUOno={G zpxQR$Ol+R~rL#+Zj^WHHv(eqlPD?3O4D1D)SGrA9s;0qyB=DN4Y-X_EkgresM*A2P z4~mrJX-+KR0knlO^^!ZC8N6O2<|#JOD|sUbO-5m&DO>m3kEZWrrFy>DXT0Z}^2DGp z3urhlwK_#n)mbrx9(_XJLevury(}DcP#~2NKG>6f<2c$g39hlv5-eD}QFX4@%==Kc z1Wa|JKMIwTPbAM-AEE0l8SU142Ao2NDe=BR-cM5AT)(gx9dX||QJ4ao;W(uApAcik zebEh#0%t8(QyP`Vg3JD_`@O1Wjk2e%^?q4LY`tZUo>mJF>j`S{y?H)-2i|E!2b&Nm?;SS=(ocK?1!P3LgJBJSRW2 zeUg}Az-|*VP`zPEe!+B0Wkk;e=)7*IYQtOBuL}XQ%j)sL0G| z)65uhRCsM4Og2X2j6t5FpNWA8qr=r6I9Lo_IpVk{0NJK@Sjcg|Q^sm)jWGzX;}?9@ zjv3j+bfw*j)v7nqF2cuG;geqx@yEoT6V2Z59FRH_=*LnDve4KXpU~u!!12j9VHZiK z=|=I{R@GJHV?I|%w8lYRf3MPSl7=B6b$53UMHwLfIa`z701^UH2j`crUh{}VWg^4t zIMvhW9+;P)_<}#d9e`yzO;fhGHneU{W(b(nqe*=x-pk|)-FY_%m{<$<{1K(YL%uB< z=7MqrxqhHY;Te1VB5FWuvVbNb;W;!BJ>&Q(oomwy9s{(OJ`gSMpRPo;ck30xI;7B(?0(i0KBTd5yFnh6bdy6zM@*TdPVN zb4pY=Q=iZ^?g}_$go(3>FlFw-rN!lm+5M+R?<-o1rL~-gKdDtK>4a$jNxB5huO)Tn z#*dXEH;Es{$2WdgjqJ)5yD9yNolFQZiZyr~T8A{hco{jGRysv0M_B6;CN$j_t1C0;UHwR9|#%v(&JZACYmnyJ{PfGKllFzO2-ss(Y*wTo-h2 z&s%FI!dfQ9ry?dE!l_!*?Pwb2SI`3?rYV#sd%Xh!d1$wZw-=(Fw&T!*g~&hGg)p_+ z)*CNeIFoCA)0K8T7A|v0Ni?+XP;$jGulf!)Wp^}H^(@!p3ZEMKVtnNzS+{dn5&6V~ zQU9}m=_NP;6MonVI;(|{UQtk38cy3qs?mCwLkF^5u*==%i97G01zKGl?>npyYEcn4pSD#32{v=$xK6+ zKnv(8TmXQaR3YD+nHJy~^`@u8xkj_8lvI(xwdS0|1U5v!XiV+D0uF`pg$1)CCa8Xn zf#-Hl!?;R_V^qQd^TT}-yBkz|w!BJfyIijIcFrk;Hu7;pyqw3wP7I@{&;Edyz5@jt zDk`7sjU55gc4@|tT4~5Q1kvn~FGOuAaV?mdlue}X-fXJYn#hz#ZH{*4z~6LuS!4&x z7|N%HV^Be2loB3sKmG<-lmXVl@5Y&e5`vN60LvO$U4(%ouZCP2g77GDq=uwYQNJIm zfV|gPl10s3D@)?Ufs1FjYL0f+swJ`9h6*nmr7bZ&k4HdFlgi3Ifwdy5=HgGUrB5>N zur=F>JTKbSy2i^CYqPq;P-J>d1J3=i^6+Ilv{`<$#l9_~-lNz%p^#YDwY6!;wyHzK zF@ad9pVkZ#BU_2JnM-ffTgY>MmBrv3kQt`(HK(>@u?Oh-JR2S0zWZ@I?QL@gv;QCm z{hqaV^EVFE_KTpk1^^r!eDb!<5qv& zJwuumT2n}VbPuvykLC2?Ss_^MdsiKc_i%lZocvUd)hZjxpch(k*?Lqcv~iB<2RE^J zIcoKOB}&G?o5dtz;0E9S)X{1q6bolUpDNvZsD8L_D>& zIe7tl;Xd;Csh70f|7hf(=q(JSNJR0!Nf9gUQozT}%DhlSS#@8tmUJ~dst_H;dhi8X zAkn_6gg4arePJz6Eqs`lSEr76S5>dy;O9)323-;r6v8ud>;XDlkcp6p)Y@C&l$Vwc zvDU^BX_`mhI5_QCuz%od!Uo6ytuUhQYU-QO3Jp29IOk*C)G1Pe3WE{$)n#13Xm!(6 zpn~`}QNUFnRgHW@o)bC%$WJfYi6;a>K&=V4&Qm1Pm> zemGBR>i`zvJNb3mAL7Tu!5sVcQ8XbX#@u#5DZklaq8*ZK@IlipTgCI4O9!}d={39e zlMdoyk^p#rlzKnDG)ma_*dqbYt(kLe8adokU#UPQ$PV?YQ(7y`H8>>ff1|jNium?@t5j z>zN&PQ;L|PeW3TsdbbdQ)w>*@kM$w)S$YEd8PD2`0xxa#itbhTs|)&GD<*pKB!0$K zbwTsCbFstj;SSfSUa?$-3F_%l+2mLRJv`43}0H7>1x;- z-gFcpu{v#?xZSE*(XE2s;C5bMD1>RfR%e(7%Y`1nS^LRHprHIQPWHvRmUh=qpCKgIvu@l#5oc+khxmF~7$ z_{ zT!QZLm|<0g$*HrHig?AvRep+j z;bEG(7Xws%au8=+q~eiqC*^a>T7l5h1!51OmR>UL^-kbs&WW+KiUpg<8L}wrnKZKb zHNz&JSUm$~Tv5QwB&isuw|NHW6qY5VQNfsKtP& z+(StDGrdl$*sS*KlU9SmDGBFidH~!}MK^jlJ}zxDeGi$6^5XZX!o<&kRWjI6WFtWH zOFuEUL5qbGn5bgaqN0jILWl}3hMS#rZ2mn8{kYwZDiD-6gBl@m&>QxN zP+*TAvjCU_nH+5i2B9vJvx6ri?@kZPkFg1s5PQW_%d3Q?7ji0Mb!qfxPMfm&X>ckc zapxZ#Jj#N8Mk%YIRi`2?} zUG@4P#|L#Ev5E^aDLPxlPHfKrI)FW&k7X*`T!pRHx$y*uoK=~>u&vqoZV&j?4GR{X1?-kT-&veu5;hoTYGE6O5zTc zR3p`FNTq6KQZuQU6~nGpRyS7Xx>JxqjDOXpST;pf3?o)m6-`x`Z6vL`j zbfz|7(Erk+&Uj#eVS$2MVQS-m1#-*AVmB@ZIjpmsX*n9w=t`p_8eNgD~8-M7nT9Nq8D)>MV>ggc%+kYf&rl>P4r|%i!o$7k7wrD@yxvt zajqCPCude;tR}n{!!vB>It5a#ATyfEnawRwfXv>pfSlMU#tJYX1~8oEOzy=PxgojR zmD%090Rt(qtIW4=vDm0pw<@!l+X4{+Q<>^zG5e^-TvSEXvdcwf94Z3_79bD@1q}=o zU_fZVz`%h31qdJra3?EUf$qgP^Vlzj)VG0Pj4{R-Y>jPNG0NNV^;I$GJQi;(Hvfq1 z&aGOlV$c|4i~|4;=Cvwal@F&C$W=VL$_x(-$;#Bp3=b5IK_09*QkHd zt3s9i&(f{zf5MZW(DD=hRfV{LMI?Bu^TB{9^7J4HDk(xB5C|g>48K$2U^UDU5ImU> zjV6(B6)UgK*|TyfR^DO%nSQ3nPk)=N!7$puy5OMc}B_tpq z4wWDp&M=63LV`x5#K97CIXZ5;wM%NE)MvU$FX{bnzvP$v4g~irf7H&0 zQ~KTL(w&vgR=O8?REatq?gLrE6A}=bI6~_OBJgA%Y7i$_A|V0cCnO<3XB;CS4nm?F z=&P~nq*o@rG3n8+M^<%WW>;>VS?bIu;aS~PzgfSh27&|x%ux}2#BxhT7q#@-RDvXo zWiZMi(JP{J5{JmV0|_#_BLFmXkN^o{=*FH7)Hf@d`rdE`;qt#w_(SzG_q4U3guD#m z$e6?dN#?Q+V|5Y1O?Cx|J6(GjeIqZ9|KWzpMslP^RRod%?C%bPtMKxq#23Kcu>rAX{CUWYx5k&KY zlpvvlrKOx1yq9G+Dr#qiTa%x2&yY4e#^#YZShyHycnCNgqT1p0hC%TG+Y!ZKcVz?I zq7i=n6T<-w=hCU6N%R+zV)dtB?wfk=B8|@zk6@D)#L?8)g39BlH=}?U01Tc$*{c?& zNk1dJ1(mTF^PGwIqFPtPF_X8!p~!a%yuwP&RT$5V2W(pr6!HG0k@^SSR^_DM+|PudOeHRK<|KY zgYfR}XcCrulRhE)ycNJJneBdW+A?yGjg}IXJ2@2a+OzCQSu+Q(GHr}pANMxXKKJ^5 zcftX*_PfPfBFB(LN8*Z$h}`G4rNOyc_dLbV#wN+S_`$%Q#{i^}hk^c;F3M1~5Z`V6 zkxj*G)HralFX3`B9{EDWpyDFJaHo(JGd44b)>!UDHJJnn-%AV*f?zhVym zA;i51P)e&;)PN*hX{$O>aUmL@gG!d5IpK!}qNuWv(s3RUSIUaZ@hTLxy~rL*(rTS( zNh~GhIKyp``3sZI@JdM*VZ2D$3?MdU@kYKI%#r>gj0?7Sr!&;sIBf+lrJ0NNaAs@? zSOvR$F3f4b7vsm^gI1-jGnvtEhif=;^VFV9jw8j|kRL3vjpInsf9NOs$ZPt?y`Mt= zaqqWD4|cxvg}w;|!obKU>gdS*o(e`FEnFQH-9vOmWiyM_IJj#Vj zGt@GqP9J5IBPTVIX>Lms0mGMV`^o)-bTbvruGjXjY`F}`v#3H5GcJ0DEPDn|=?_;+m|AJ2y7~p3RSfDYgxC-cqns6F< z$c$ozc6QpGP=jB35sk}qlS0AtWb?*mJmrzYGc!j}*mLS<(Dw0$0(P0hJvLAfSs;OM zTdv~j4t0uN^}r&*4z#kTPMIiMS9H|?XF%Y0cWcQc-|0dlM8=AXt6$$VILf?;O`=ou z$@qq%8Hce|wJrxCJmIZ+OZ7^uTqRHQB4}mNi7O&`KR+zsH9c=*3x+X>n`(R(6BZ-8 zCo`yt6}Y)jCJ?#3h2pCp`HrG?PlM7~c)Tid+5$rWV%?cfu8_M^Ktd1?{HCKZ)b!Yv z2gS8uYnq=mRMQx3eILLKpn@xifKy0-9-`!xA1mAHKhk4S+7z;oT2M+I6!()c7lE6| zZ)@;rTx-Ivq>w#AWA|G~BME;+cVe@Ee3MaYyv!En7P4;z1aCnT*x0iW=?w`Hb+J>6!w43%~xsW_eOlX>rb;<(eT6lSEW8Dt{U*`c%Vg|>wq4!mUH^zMw zi}pO^TpZswM}~gc#n#nA1zXL`{fu9}pb0kxc6jqVy3gu`DW{W)%V-kV)@Rk+8S>C?vi6G>#So)itn%EehZ%|6nu;1qgQtkl{fYq zE?fP|Ms=UmS(IGN_@{CqdE@Kg*6WD_iE%M$oQDl%9Uv-lQ1tx|l6*hc8@!-;TR{!y z42_drqgtwyt2Ey0kT`RcA$&BsG7BUt9zkA(`8S$#ogk0qkl{MD(ktKKbPls3w)Kgc z6i9(QtQ5^)FJPi3#U2Ggo;;wN^Ib>~)De}oWV?%z$pA%8N`I7y*S0l%t`8^ z2C{DU$xlxy#1z>+pJ*8gmEo0IfCv%2u=)e_u`AW65xyd6n$VpcMBXLKmbeCV5P9XC zi2;6)V-(&54Jt{x3Sds~QiNCGYLYRUKVbezdJkutNYW`aPjA+03`M38OiSVu@>OwG zSnx^h!%V;azWl7|8k6Ry=%s)1O+8=!w{es+MMdQ&p4`KWrk0+NtYDLXwEJXf?Gp;I zQKMrHC^D!zDG%yD$E+bDVx;WdWrsAE;5{ zE}xQ3%=!ye3DnYt^DOQA&**A>Q+MA9Bc4$q2*$#m+F6$BlLixV z9w}&Iw5G|g>Z$_t3KZnZ^;ok=+8A5%3t|}kw(YXymjD=T^-3O)?dB{sLdI81OAB)O7w$$N z6EnuhVh8r+%MMJwV2CkS$h_#cZ5JiK1iNVGB#(Gi-RSC0HU~OXhMXMC zAe=)}lOSTNEF5qp`SNTa$a!EAD>ooW7RhTf|EAG=5|I3Tn;J-rj&CR9=;MX7z6fu| zcws9A7RuzX+c!5}(i-2(D&9&pew3ky5J}*Qe>}0c07w;Tuwwu20>DKa(~>X~iLFf4 z%KAUfJcOMj!$4$sG)Ju97GCJpkq?`zSFB71ytd-bedUkJ8yyWeTV+Gye^4BhL<%4h zXmgR0vbg9~%2_H@xe&e1nLIUAXJF<_-kD;er-Yt2{7gINXr%7?BAd!Jx-=ZZnIlFF zcr&2v#NHpM{O$C*;Elzvh@;qfpjPJCQKc0&L@^gZiZWC%Mo}~oFiJ^0yG|6A|C56I zp90d9e0(&*$-@?fcSYs;5HcP`O%4)SRU!fav65gm8zVwrnNJ}oKOI~YYOwefaYW4h zxOlvmggxJlBTFsq0p)7*CVNdDXxV#=Su_-3$%3E}L?t&#n(E`C7=J7S$Er>mMc}Df zGiJ2O3h_J0Z5GMfF!=MJD6K1J-_Blt{nEcyxqs4%nR7NCB zS9Crsk6N@(i5SlVMB{Isqb|1DJ(Ii_y);TbnqFsP$0NNPbVW3wQ30=e_ToCom#-2W zf|CfX<1d5aNc4gu7(|isz+&z|!mKVfG$K^KgChOhOacnr{#0py|eE2jeo_>WD&f zPms&Dko#(r>A|uDf+0tjJv=t-j5y0lH~{b?`YrPhN6EfU%SZ$xdZ`Aa&uj^0lzK%O zZOL*FbkNvWncO08kPQ=KftTP$8}9%2-wusKY}4g!eV3w zx~Pbdf|=UlpmABdaacP9Xt1RXc=gzj>?m>akMR$JJw)vpl2C^`Og8B+FExZz#KQzx zLc?@Lg_NqyC?FH$guD{%8f2@=n6n(%_aJGTj^?YL8%9aGWhc&$)sP<}d}QTg`Bh2* z<02NUxTSJrs2)_9=%T?$6vmf#$h|jvPOP*qmWW+-1n1x+2t_zx1P-f^ZYj@CJnLvg zt4NU=HUirwEx_pgs7GIL3bVvFJOa9*I2VvOw8lcE7onuo%-~wl)3g(&pJPXU9|rf> zIdb?@+un&@BZnIA3Rw0EB~k%V#M+!9nV&z9%snPG1G2N{MxeVtE3vrXV;3}CHEID9Yn{KsdV&%f;I6gc9?rH z{Xs>eL{|5WhBkan>kID!qfkPTph^Rqc^d`5ZEelmtbtM(P?8!gMM>aW7hg*$Q})F| zm~2E%L!vyECy__1u5qcf$jn(+YJxa|AlHSwMlmBdP3_>eNq@;Gw0(+lOTIehvlfqF zm#*GgGT}k**KVZ_FHFtLLQLiZ;Z2^&!8zpot$_Kmqh}tQ070r+?@)VHz{A2W#x7hViIOE2v=q z*U98$LyBN&0zYit6=HU<5$iMO0)ggL4ZH|tyn01gfB*v7sK9q^oxPkaKn>8BB-DEv zVr8ee-;kE&xMp+Lq6$+p;lM#9n55acxkXmi*f>rk;>I@VaXz%sShnwy^E4s?GQU

-|<$2o6CHVUmY}~4LkB-fR)ell-EcnYb63UwR38P zovrCgIm$E2IfzDR`~fJk^a7_1G>$wQGnz8;{l(n1MjDb|x=%IUg>&VU+Gg}efD#FEvt~4iEOZ4vMwVUXKT*}gnjt5Sr zeMubdALRc{mVMWX9CVagJ3(YBpX*J@&H`wFQ^JymXHpHuGU+L@Jj!!weRZzX<*0FU zu3DEHo+B)wz8Y|Va}^50=a7Z=oNO{}JW)LJnyYJ_bP7^wxiyS1WEB@BY5g6+Fyz>K z=VtEVmul7DYH%JaAcCoNJT2j^H0oery(dO042^1#z@`$t5a5Az*=TIG?MO)hM#JNxs*ycvB@u3_ zb2^2!s^m(NLTcr3;97*jAX|@>X0i`_U7zg;xoRD%*}p)pCn={CPXts;80?{S*kFFL zZjcY_>^}G>>j14Pupzn&wBX56gPmxhH!o02C$vk5B7DT)3Np0#rBZee|I*X@#Z|YI z2FYY`y*Jrg3=>dv!hjI1!wNGA17}pqZHcSkLK-|N;(Bwk)~ev3_p<0hfFRhGn}#wL zZmP0v%mU_iBeT`h>LO*dol_7|jpEifV#~o{H9Ar7T9moLDkM${BwTf`!3i7l?ln#q; z#$y}mRTHHh(aCp#|$K| z8m4S2dKuY{XUk`?Fx+|jFBq;hS}R6tt@t#Eu!=t+Set>#9?al1RLD>Q8>xU80>#oa zES$rh1NSwq9!4_9TlP8d2)lieZbgMULV-;L!_hQJpZhVf8qpVvl;^O*s?tD%gOo8--HKv1| zp>V0jtG3bgb(|{sfF*It6@iMj51XjbG+!PtoH@yE^B!dg^|6eaTBZsTF`!e`rFh9R zNfz_uZPk+wQ^iaVkPLpdwK?lp$1(#&qkQODU16SuS9PhG)niZ4Ci4 z+8^!afw9m!Xm1C@cAZhp^e>44_<6Ez zT@?v|pn`zv5v@o`J`|ZE1ne(_WCMXv_Wb&BI{oZ#g;-jSp`Ls!*uMqnxhycHXFXACaKD5#k)tMSNiWZ<4=L~Mx zFw`~DcvlLutWds{+Xm&{f!Zn*B1zbOYOPw=^{zD6kI!^Ns--Dpm{2-`T56|=ML&#S znx|Aw1Hrk!Y7Vvp9uN)m8zL;-D?>4qd5kxHN|eULp)6+-b5Aec+Ux<~#X+u0F>*Y{ z7Y3x{KB@_6r*F*u<>Wf?IedSJz@-0mBMK;KYtBcm4NK5Q$v{j2))bmjvIZFl!B>r8 z{8A3e8AxcVDK$iSkdJp4)X1lrHVb|TaP$9}z#6VshdSv&4bPG7trH1FRntTNucjqU zNCxH%7M6KVTEo)2(LIIoFeRtbd#`1N2x@s`8iR@qe{CQbgHNgW%fJ;AKDgna;Z>Ryuor`UX37|A~-xDUAEz^nG?}I`8OuW8^VsJ+`w)NhWuh+$eL3h%qce-nEOd`}SaZWVal@SvQlc1$;l_|+ zn0AEZtiUN6E3ROTBQn+E+JY2E45grr@>dQ7I~qSvzPqF2(+cNflW)c~nUg#S7&Bm0 zfB_F&Jvbht&oiF!czimK$K&x8dmfJ~-x>$Pl?mffEEE89<{A?KM1WDzyFM#pYi2iuC)r>bF=h_DEY&`MV-LNpd)DG*#@;64G2ta2U* zWt*SP$25Lc{z8B@qd3fg7>YtY6CpRmkz5sIECyATqADdt(2*Gka>K+4a#@=>jh{4r z256XROT?pcAQ+V^l>P2@uC%!l?Rvhv`7%u?z0P^#0-&68lysuftf<{UFJd=j83fVH zBKaOM#y*Iy9L^$elwyL#X`kgC-q=Q>OZ2^rl3uS-5@jC(8bXNf;`X5?I3fa|ex5R)pBDf#WtCc@W8Du2aS+pPgJ?6((X~jx)gB0# zc>!okGh)%uLsL{Vbr=YU3?$9~bqNE3rZXMiVZNsRJcXdxR790O1DP8caYG}DXOcaJu5n=+09l%{=}oQ5 zgU64v3SU*NqP#$uYuQW%LY|LkrtW@r_nzHtb`OLTg5Lw>I82{%igJZ;k}1UHC`W}6 zH3lO?%aOv7PIARCq#Q;eeqHf&#iyNoSIyPTm{nzUV_{rn^pjOvH~DJ4b**dOT9^53 z5dd?EEy7+U$yXZ=Sy)+3jVKy53^)WSiYqd#D6KMhDm1ms@O48%Bf82}FA^3~R_S)S zrH|d-T_7BlQQbv_R75pYHbu6V5EMs2M4w~E;43REE9?j|l^jTAEq-^zJp`3cDPniS z5w}l`(>_iEGA%u2SUz3;bthJ#K(*&9{eEuM13-uG>vla|xa7LBD<+97UgO5_nDT-! zxhizxqAqQMW{3}nq)hTmhRk6=6EbvU6<7&?vT?_549SoT@AVg1{T(B+lHvVhcsIij z*^pfx0BEPPI-3^H41l?+t3ut?1^}~K!fi}fEyHRxkN^XfEj9xIKhE}koqq1+xfKEv zEe|;|+Lh*qn^KIT3h|d2wJ`=3<7JDdyz zW6e+?_f7}Gxa{h3e-+3A7F`wymxHxikTGL;)6rsVf@pDPV^9{vaCXE7Ur9kq88`z~ z$s)}aE2oO|1r)Fr7OQl>j$8WM67qUAI{-Ru&Dhk`)YQ0BpBC|XLxEscYlp^X`LT}k zZ`+a(e2Pp|)>{lskkKfNlpnqr$T%a$5h`Sg8Tk~%8o4NFFr1-mr`%3;s@T@mr<~O@ z2Lhh?1Xt5oee={elZtuOV^Z~)f>pEc zdy=GhT!Kgk(-w7XE4kEC=>dpYtTQ<&nTY^ELI45~2mk}+2m~k;6q*IoIF~_3F}egVRa&=ye0g+z^s;iB|>4!-0@_x$N9PdRa*oi(&z*;v>%Mf3`2_O!iLNOQ{Xog zfEhD&22Ixi)2AFx*uWl6u%3BAKIct{5fB@(*Pgua7gae&7++7?quc5sGY$7a?IP>2 zY(xll!EOftR(fG!wI7Z}^b-mlbSa-6$8R_^Bb$ zU+c^+hxd{liwJ({DIwQ~*4*U4w>O7D1rg?g1$;XI!DF&slwyYG?_S3r_xq&|>>Hnc zDC7T|rE|igl-2@_eh$%we^NeEC6N0wy}vXMU7T z1}wXf90r<1oV%1eN^_D7CK*K$L_&Ir+KxC++$9oQB5geY_q0R85;zLU2SAtLmL&!k z?Fov2g(6CK@?p6Hi&l+(jb2L8s|ME2Bdu*e$pHOOLTox{RJ3s%Gz>ba-cLQ@53C z#=!(6(f<4)^$E)6D|4?Svx9wweh5)s%(~O)Pbh>%ZWZV~)?)r)TFeC^jlLob0a?F# z&>@<8fFw>Q4W;oPschWt&Qq|nVF6wK1_1@@G1X@08K!>!x->xGr7EYl zsdJb4**(&!Ez=Z%Ea%t4ZxN?ZVN}MB2Z+nm*b7X8?Skk$12TOmUQZlMBBEvl1V`et z)VhRe4beN>C|@08^jJKDXMDtE5C<5)L`j*26VAHz2Ed_(T@-TJ%7V9;k~0|+mZ!vO zB7AD=eTl>UY!JQzu=D+Nl3LN(j6{qDZ`U}n+8LNn8~{gA9R!qbpC1QfOv6g?lf8!i^aGVXg`XlzAakN%JH#a!C4mzks2n8= z>5O8^X@?1F?O>Ot2Dm>=g*}(8pW5-*HgF4r$cm4~1|Pz>eY2&b!I5AyX4qcS?>Y;=V=lyav5l;*-zp7{aT zLe>Aa!Z~VhEE)lzv8^tV)~UuuN^zpY38>|ZhBN>>^5-=eA@n~_M+_H*U{&gs({P7c zidQTYP2CaJwRq_WLz@HV4SlEr$_mBk(Sarx(IK$?3g8HmYA0K5MQ2mNY1FlHsEDe0 znNe)k3fCiSby1l;5^C+gtL$Y_*eG?^GlLexJ*h7OwT)y#8FcvPDtO(D=_Z`>>vMS;gN}7xypaAV=@uLVSr-+EIL9qXHqqX;3 z>-sJU1pwmAGG)=k;4ID8Bg2N}6lvEbQb%TrnQqi2a4ho@L}Vd^San#0P@|mN9Kl$u zR#b`pCFqqT3yXvP$-Q&QK+)zMA+J_56G%*+y^`h9-&0qVX=ViPVIzlO{xHuJ^{?4& z7`$eG@|O)FbVwkBxb zAJ+dRSyzV5L)AB*3$oYYBup#==9c+$!FO>1vqpR4gq`ANo8Tg6@#__G7tI)2T{FVY zR~Ed>KO79ttE*Zt?E@MYi{3)8Um$(bDXGnDH-_~LXS-ZPCQLFpvLWub6#95z{6xZ& z?+hIlB*Tvl2T}_P_xyGcvw1rME!*rZEW>PVOZ>H04Fn9qeW9py{;Hs8qEp}zHz@TW z;XR^J=i~njbPcOyj(>QdHUS>UEO=f{LucfR#G;|c$FU*dhK^w~+(2)|D|EDeH3;xv z)X`yQLk`YT`kRe_X9@Wo1MWHuX{mZ2Rl^or;7e?4(4%3)K-^#&otyB?zZgbw#FH<( zr;!TPFTvCG?-;PKl!eJj!;M8=Hp9t5G$FjCPh$)lB^@FxwCFPt8MNHORQn+_ibkpZ zxSwpvIz0I4Q61_XTn^W=a9Vi6PmufU?U-Q4l3@=MGd(VQa>uVyP$^E`tiF zxvK=hp8}46%7!|UEWRRj`kfDgfKY{~0BQm*Gut5-rcRU7h%+}GA8ZR!6Iyh>T~|1l zzoCg-REOPPRWNMhEaDop#>vt`hnO0+5;6Z}j^xHb zz3q)-EWVdq>V*Yj%Q)p<`_eTT{c7t_zHkDSEZM}D+*NjLRtSSKEX~8pJzrlYvj-4G zAa4pgVrx@rZBn-LL@}84hBPRM#1m8$1&N0=0fGEu*tXUO)m2HX@H;EI%qQ9~rp-{J zKVds=H=`RWIt^h%(UF}pYQWZnP+Z<4N;$Cqqz#o_G5U8$2SIBegst!l`AZ~w)nPRT zPL#q$5^ zn%O9V4^-`*NFSQWKnP-#j(jX=2}m_eEvNrr4S<9|z$jMS?7TvTlM_YjpL#Mt+C*k* zAPEA=eHD{ofW}s1{S|)#%QfH`&>evon_Be?{Kcdp&3uJu3Pds|OVgY?2y#GXgKV0Y zyZ*Gox%Fkh(XkMk6o4JeGMU#8(#1@7pEdjzYm1jX6CT4fDMv;bo$?HRmqwkPFv;M5 zemVkL1g}P6c~2LMV;0-g(Ywd8e(oPwyVoINsjzMSJK%>iMOuVJZH%zKk}{1NjftV5 zgcikPDUT}3!>hg(FMwKPx`+3Eg*H3IeZrb%#x z`o8unxn$Umw=Yz)O5Fzx~mL?v%7yLVMP;@V>Ehpdp;P3 zxM*YR9$y{h7pge)BKUsV6;@|(B$1n9~Yh{Ep(ZiVEP`*M4c ztXeGO#e{jrg-1x@Dn30%(6R;ynHhz7H{mDSf4dcmmf?-0`pCb!k^)sJGf_x%_ZDV; z(M$#)yL<@+Q4)9VRz`X3?%*BesT1u8!Bka%Kx+Cf8^{ss;_}eq|M3_?m@m4_R8}`p zXe$4RZ5*}MZpOk{bnxh{tR^BVRxCr$6DWkfN#$cf8uAjqQTk3V&Adz&II6uyED;=R z4!yq`Vsl}uv6cWpK)=7&Y0Pe*-(c%d^s;3ui+fp0!`LoP3~N;*Lv8C4c>Zw;L!+ zW&FTR6uFH^fPC){PO(2TVA=sL!x#GAYyio;!}Asf1fP+lA5hvBo>_1v z1GSyJH=N#t^!HR>4qR(cY7E+j3D5v7$@kfzc`*dF<5=fps`Wk@7QwSA?)uMYeOOHv7B0q}+kO(6LAjWZq5*&Lw z+-07)t{Gk^qK&^DGX@SZMaZnB7ok%yAz!Bo!L@a8AYjW#&k6imO3Dr`ow9xvpb-1aI;stqPJ&aKihq!dqxd16IH9l*I3X=zCnmEAv7a!k z;&5>i*Q>7vb<=n@!)|+?WUmX6LJ}Dsgk5+n%!bnviuF49bzyiHKRGY2UrpNsFmM7q z&w6|hETGo|Q+t3Tlvz$l`(oCME6?~8=}i-p3<^LEd6$}Pw`o|iG)(5iV8@9ENn#nd zIGET43ltd}CHcpiIW8`}xk;twCQ1UT7mLP=&$=tuYTxZuolR@cNEX!>XXULc_qC`B z=g!`iaG>p7vJX;yK{OlNMI!U;!=Pz*HiOeGxSZ`-H#P#ZWT2#a!MO-Xd*aq{kLE`m zdhIxIBm%*)k&a@eNF52o-kIMohfXN85HytWzmf9wkV2^8>a*n_78^r4r%rmPEi@B? zcQE!JHPiX|Mub=~V~ZH1^P}?aKTibuM(VW`PPC~xUVZ+i%8DR9&iJ{)qtifdLHg8L zm6oEyrg$kDE@a&$pyCm-J|LrjJ19`o!{1UQ!QQZ@+xE9b*OEh4`WvcKk7PhL2-I(v zew-YJ8h843s^2( zxQwm?`q6YwVp6nC;#^y3yR994#NF+#Ik)~ruMAdVo0U91LNlH|w+4jQ#d4Avo7LP{ z(I7CkyPs%tHe-DjD0WkDH#hA+$BX44_FM2KIUNoQk~DZ7B!o01wNn#|P;U@i8ReoD zOk}1B7c)Cxl*Xd@3n6>gXISljQ2|1)zs~UZ;aa5D3s=+?`rxm|2%eM9t`dozlpHukXrPx|GQ7ufJmNDe~(z;ZSnIe-4gpaYZ+p zlu^TL!Ejk552EvO+qk&^W>}c6DJ#-j*Dz=18t^GQrPO&Bjw@14E=!A49>Qe9(H2eU zDY)YGwd!~OGU1-oCAsA9xD}4sJZv$uV4rNB zw@5Z=J}iB%$&Z8nSw?qhY%X@}cq{e@hBUI3PB%eoXtd^x>FCbpgV74a$faK-nr(_O za#Yii@^1^mOm%`8ihYEO1N|WqowS%9j|iz1_D%bgaMNczhTM1QK?mfBr$uA(WuWEN zd6Z6{k&4-tHGo<(82_6)p*P61W|iQ>DdSd5Y5qtt`pkTHNnhi`l@-Pppuq7Vt$pF! zoOFM*U~D=zR}n|~Jo7LQey?bknGUq|D_RiHQUs2?4N?&iOh^amt*s0+#$FLvG+t@B z1Zd%ij2lQO(vx_C@|s=zQOE}7jfs~Y*zzZEU&%`MdC`^{b6DzRjIqz`)3d`+$Gr+| zuUmm6w~F3D1`{#H1XhDYg@0!Uh)&Qaq*heRhwN&pNgXD0)!ffgtAWz{PJ5rLik3Q? zZ`G)$I-cWTfd`Mmnr)^HXN%YPXrfEH43sPjzoMiJHybF*Gi!_or)t`2uexi*9m1D7 z0n!a(hZQYRAt+?)C{(h!ju9z-lMD7wVK>mPRR>NUHH+;gY>_d-Lm1^y^J7vmhK+Ho zrFs;v1kIZuMm92}FvifP7#8?6BSh=<<{wPM0qI~e2_u^p#B{^`GjxAk#t87_I7et; zrX{d=e<2NwImbZcyfS*iCgH?~ZNNeqse*HjCi0+}S@LnsHqfAtLDt!#a`qYoA$a9Z zRwl5J=y0f?ys*G)aNF##)BIPIs1^c|zLSvI_8&ngn2@+Z<@ z(lf|sIFQ&aE2+L5`p1Xt3o3|s$VS!mSt@E!db_mc6NDqk1~)&v!a;DjxO$1>jf#}I z4T=pqY)9B0JdZqlLSj|ehPK1-0g-F?RKEdS;Q$J+=oTM9EdXNxbii7db!&U~E}DZ9 z$SKw*&MYFrt9SeVf1YIlbpc}mg>iJ@3GrsEo*%F?TOA6+tL9w8i*z`LWQaJ1XoiG{ zLxBW|c*Cj`29F2C7SDPq6c(Q4fNEGcMm087Wj>*qURZ=lSoJC)5_$@P2d0opg>{5S znAZU&A+&^sC)zIcTNI6#g0!Z3rGRCLl{6EmMP=t1bB2D${@aCJ=;&@P#tzNsk)sZ4j~7G=o4f0` zQN^i>XDPb5dm2>FH$d|HR@>HCYsD*DuUEoQt=_#G?B(YykCaF`+{_`R2U*#<%U_=6 zYVPu$wDDX)FAOs`g~ISV-`(f%F7NKn?&{8Vbteo+dWREGxh{!8Z zTp?3rtXxs7!+xZT7e!)sS9f+tcXoF>J3G3$`Y$*>Z-1e>ayE1S(4?_;qzlO7x;7AH6si|J!4kIfd~(Pp;`177cXbYZA# zuXHWCXGOOL!6xM(ia}D0GmbL0Nl^@2N(wR3$U=sYDvl#Zq47}J6vdNBa(7yN>ua>W zuGuYH`x>)eRjMP}6aat(3ou{+0E3d> z${BfRB8NegVrYi5#S5yVnCBQp9p&0E$}kM)D8!G}#YnRXH*8z>pLe-sB*&QS2<>r8 z7;1buM>WVX4e-bDIE^)(yU*vQG#yWcp`J#{nflJGjWMxiG+mM}OC=2DvQ-Yq^4Gi; zVIAQFgdfbos|PpXn6LWoL=qE~gIc&8#0Skmh{z-iSM$u5X7=TnFLPXGtU3D1!azrz;d!fKl}$df zZROV`zaHvSN4wKRQDS5bLLh<0A)Z4DBnW2=tN(SxD2!TFa(pB?s0?v=5r#`b(O|}f zcmp%~$FmvVptIw#E9+b}ro=(vxv=>TmaxyaGCOWTKY%cd@9clA_~v_W*)A*#0}cXF zih`I^od_AjlOc&@Ek_ZtB8XD#3BsnCNA4&|Q0Xzo4c;zSnSR%JUgl=YnJEmDnN1wZ z?pPSCG~^-|OGds9C%@w(w|1i_#tb39mSQT%K@?&}ib|6qtRSN(y+RODd<hHX@fvTp+RgEoKl2ANqJ-Lj4}t_N(U*1(PIva zBnVHl5af)PDoW6qNm)vp(D}I?k=;;Umh;ykvze35PtK%`nc1vKPjk37JBh6@;N+Z* ztBOxb8eFC2-2m(I^3pPY`^zlr|9^kavIgJKgrUFXK<}6*Zz_L$y2|V;(I+3Vvi#v( zE%Pgfo2vCsX?69~L`wXNr!2%z9^N_m!W&Qc|7>#bgMUqCi!yI{f4Gz@xpLu!`-jak?wi6@BHXF&+LZ}@_ZY#`N*xla85U665fCe zPji|-Z*1b-B;MumFcXh5@hY=`%l0hI_N2QrJvJKKEA;iTeNq5A+Qw|?h(5dnnJKYH z01%OA2mlhu?4IqdCoZJwL4w^8Z0edwlR7l~Z)-v0Iiy8TLk394ymr9A_n{vVh$Jmy zKp!)Q#>ptwaxF~uKB=;v2UW5Ib$9iJ)mzhgf-V@Fgc=xS^@IIMbBp4C{m-3k1c?K%=!%4xN~5vy2p;tl<>;l%s`qs<371B0<(sWd8&}xVNdW zN+?=0R!+(7iIZ4nEJ9Q<`4X)Gio7E1P=`8tFx^DL`%WRKQhTg@sgX5D10FSahT^ek zMoUdpW5CcwjGD~jPXLC(%nEOWqBU#f=pl=z%pfOAC&V&1-ZjH~~g!YE_H12?WjLP0BTxrRjFyZj6Lqcapqs$WB_KD zd3zw2#G89nAk^lt%M1xoln`mvTL^B=6&}2%bv7`{3W(AC3AjRpNH@*nn*ePP?hol; zIn9YTv^@QnO^NVGKr=mIE;}{7^$`ugJE!XOPl-60gBB$Z+gX3%&399Xp-YmOl9M5* z5J&93`V%-&ipSkJdk2tycp7F@CQRCc%i&-m3E?zL4w7)VqeJ;7E^YJi=+tJ+heU_?Zjygiiu5efuFe_e+myUrI4N0 zvKppM9TW1)0}5*K3e>xxIdKZF z-x;LJ;SARwWVj*IQ_MDz`68KBJ}=0O;YLx}W&%SJ!wo^TiJ5{e$QFJ&^38KxN?;Jb zCWWBYYRwrHEvvnro=)0Oh@F2_#=086ZlG-v7&MZzkl;2O4#SF zsou#hK#1Hnndfkj$Uj9{svO*1#Gu8i%Mca}S_%V1!iqGHhe4tn9Ip-5V4`}Im5*BLM=-b{_@#s z`8&L&{t!f6rMnkc2sF$d&{8wM-k%4D2*#Ik2mG@gQN)d`w!QG?=*aQiS6dxdHfYQ|LEq5uK9S{gBV@}W>J0DrR6ZnZw(tg<;( zy9GT?Ic5b4-DGZiR|^@bkeSVb&JYSuc$iIra56m1hEXAXz#sI?qO>;Ue`5MU;B8GL z_TLn^BVz)swHZxIA)Xr6arg|Y^UT|-0Gm4fio!^P6k@B!1cpq!P92cUQOGQD%w6$kA$3zF7-FoF{;;(Iam_z>@+(?>he8;)(>7nUV=0yt{n2=+(ho% zc8TQAg#e8s@&^qsBf*=k#e_&1u#hZ)HAU`HQd1T)p^^rkPW(T{G)VoarIL3Xh8}7T z)8<+tOOXj=pE2r_t8AAcX7Z@{Ln9Jbl`c^$mNnIf2!N}Mwrm!U#gGbuqS^|jcSGA)5UpkX> zm85ixUPlS)BvRJKzy?Vtk{9& z)lF>bKARm-rQ71f=()}6Ek%JT&NkS7kP&B*GcvHu{E5xUZ^q(-82hRq6WJ`Zoi$vG zgxu-~GG*%-tz>UFPCvNo5os9wa#Cx?Gbo%eIT?DDoT~83#_-HDvp8!l$Ocs=k zfM1xQoj#?}isQdS;^7=q^> zjoqwemV{1I1v{M>s|4B$x*1Rh+v)v*H34)WanOL$V(e+OY&<(WhX&6l_XqNUx70?X7vay>N}+KaIuR#qNAA`D zfJJfcKTZxdw+NJkttR9c6Y7c3pas68vo;*zCy=-o-=COEhqf8JtOGb1_S>pYGx)IH z7q8aA50f@h5^GSTWm`6I`D}GtodUr=H6$@r&(MmSMvtjDs`O;+$BnC6&QNR~`gm8& zn%%KCfUMr(lHZ|iK+C$yAX4H1pj;)q(<&renR-T0IhKI4UY#lXDd#MWXg>xIromxa zC!?!I|1eC(x>$t}{X;dueM!)zTWa!%f@bXuR{v2L-tTY3h$}I+7&Cx}wg!4}28~g} zEalL#2r9Z13ny5JKH^S-$s8(9Ph2ltREhXQ!A*~q7!)0Or!Uj!cqLKdb2Syv=js2T zGUYm5HRfl7Y@s1NE*(#d3$%@aw#p_8Ig-q76yA!mTXF&grnXqjK#E@jSL!azLkdiB zyx5@4_4u{gl*I1Ey5kZbx1bkr2J@;}+fq)@60+)PADswU4>q!fnZGc=!4l9HX#c_+ z5ZO=&&W1J>ITU4DiO;5lVg^Pjo7-k`0<)vu21gbeLiazxvPXED+_ElA@`A?2qHO`} zS4f?-k=oQ6vwxuMf}r2t6*z=g1M)(Fk${01!%^Y>STY9{jA;>dH%F@V6Uyqg;+H1g z+B?h_c~4{rL?#gPIYiKm9!^NfjJquCokSwF7l;cKoWR%VI#PpF;m`iX>vkzXcDOb(C? zxGfohoKe#3$t)Jf-8%++Ku8RLZ=hTg4v>M^3bRI)3>~FU&|`r-cvb~sAr~)Bt`xUF zwWI+eQY4yaK_p`8?tFvN9*!hg7`QbQ)|&F?QxW*j;SD@~8i@sR*ttZAEkcM?!$ur# zRXA%mAKAtzRzQsAMZg6SiqN_ILg8zIfSW7}0H}voJ8AT$%d)@%s4VK%Ec1BU6^S2P z;W`pqGmtF{xJdY&6=t5P)h88XDkDP1K{{Pl15Z0&NE)?$mDlTnjpdLx%t zr8I7aKzSn+9XKPbO~8lzpdStKNig6Gn(v9~v0?JMOvbpUwyalGZOvAz?4+^k>erFmi?Ck^E(#tI*?`t-_mPKkB)@6EuAP2r^+t?B2;-r z8hy&wwGSr`gp(~+6A`Mr?O=2STu0`)6h@wRIG;gKgOjpu!cPEnrqSe zx94Y^Ehu&MhZSw0I)@kL`N_@xqjm6_z^}h3_zvX2r@G>YNC~ouRSnLvS%i{!voztl z%pal;p6@M#D8zRO0dAd(nAU@t7Z)&uIMlovrK2q*87mSon3PbS0t z+8|;An>v2S@#oKOCzl(xsEy#DP|4{VVJ;_l3A5wOJn|UKwsU+B@>!+ND`jE(uZLPo z_w+z>$DDcgxJ`XH+E_|70qX(Y^FKWey1o3K+=P92DOwg2gw2sp+{w)Q!3sv4Gy? zcl}O@Tdfub%^Zbed83I(3 zh;ZF+3d$iLSq?`Q^P9gZqUWIFkztabgExK+ipw7}qWX4Ww!y`|ox-zwUR9Z`I2Iw? zhuutZ{)0bJ)x9Jm*9ZUFfaMI}er7g3)IM4yY~pjjE2;~~xi`g$e5Mzw-V^(47-P)3 zh4EX>3~nOqPU9iUxjQZ?p{n*N!(b0U4M%G7!jT({uO z_Ee35R~E;x4py=z(I=Q0j{q9?7!?r|A^ED5ZiSc1Zx@Dr)Z{Yez_AU~tbU8eB&aHN zfR-ACH3(?UB5*;HD;AztJ0VP&&EtL?JoXCB0XZ9Jf)cjG8GQ(;7yMTZQVMSst%t`5 zUyS)U7k|BKCtrxqzWCgJ{DYO{YPQ^O$)&QjwXek+D|GKGP#Im2FORU%5-po zMOx3@d@#{uO$x2R0O!3L)QigbI&Njrjp8DSj64}Xb;@~FBI3W*kC+|xi=^m9qB_df z$bgBKY&o7+^le`(WUgObXP08G&XFA+$`n~za=j@yf-P{`%5r2cWyTKA)KkB(Mj*-& zZlF!45R3Z>Ekn?=<$Qs2;TBe+^Z-+sn@woySq z+M$lz>Cz`^c;Ku2EoA_cGBg}PN7c2u4vpgf7LbE;7}ywI7NmXLyn12cLj?;Kcwy0` z@O>Broym@bKdjMr)Smj^iwqLHYD~-_COvPctiuAKuVh%9bm?1LipSd`y{$uu2T~)S z;C3jz^7{{!0T8>Fk=fCxSk6doz=C-SF;C=9z=N8`ll$C^8%bg42=_S2Ym7!G+%B*mHR+`ykAvz^ZB6 z=Td=|`w7?>nx|5U?Zu2S0#)@F%tN`s!xa`Sl*mT`e7u92r7PGV+UUeN(gFdXTY=Sc z3OQFMV#c3jY=C18=U+e)Bmx)pGpH+#3C5(YI7G!xiE|(EojXl}yCJcKP>8lf5o$Hy zmu^3u5!_I^^bq;DYW@OUtp+82&joUa6&7-Wtq{S>Fr{2sRF|CiA#B_H7dD?2gYhq`G+b*AwEhWxrX#-2 zF6AkRQxkk^8Pl*}1TSpnTm6Ce& znJ949kxSiahREKAjs(xIpD0%Wx=0(so2Uy$b6&cYKqfPDBqb(>h9hN!9iaB6wFQ3_ z?@cxy4nCB(;U)5}03yb}xzmYecYYe&tk=Oh_$=DN=lNiEH%RyKz4kz402SSo5LgKU z0>~_eMctwfm0<3L|B90TOpuU_qag!D~^_v()Znx{ZVq3r^}I=_P>>`|#;8 z2OK@9;2t3m%sc#M)bUVz)J}e(xpwM7A@74WNLDwlk#V&)p6)6N0D*+*FKaiVhKgIO zE-l>#Imhh$-d?I!l<%CjTfV~SQN3kI?T`ik6bYFOu?Uo~;V__1kF z`2;d#>>@Sp6Bc;%cUVNdAF*bg1#p!qtTh}Nb^hmSQ$$KxbJdPXfbSz`&q}lc;zSoAi?OZqn40+RIINE=Hav;45S0YV8F<(LPO6bP!3T_XseIZcY;&g zYbGg{ffFu1WrwQafj^X-)o!y=;I%Xcz@?~rRz_vq9P9vSiQEL^BfH>xst2=;!@g_V zzrEB7aWe1cRk&W^1eDbP%D9*p8-R%<2HTvU0&%~$w`3dFz}M_5T%TV2B{HP%#=tPIbour75>2DX+03gPm_wCCiR|XAf%_*+EZ+irq~`$-;$?qlcsM^)1eCW z3{h*P213#FrlzS&G@W9n*{PfPcRKaBNm+aP`TRe0G*JHe3*_zpdHN4ro&>0=n(R#T z{QsWmaoiur7lOL9f(VzQK!UG}L6o-?qqusM55mTb8=A$an0eA`2IKgCD~^w2`q9U6 zy+@nEUj~9(MZ%3OuPW_A8Yq!(+|1sNR4%55!b9Q(;iAO#1&g5?Wn{!TRL+ev7xMeh zQJ4@KLtf*;XD(pq`#t{0gHJ{E3V+cHQ;5G*G+E^k!<6#ryZ)N~-kH0M6W%geSb6gwLux;Aa3-t-OY< z-NV{*nb_LCJey8PED!#s);#){fOqGdsw3uRg(wCbY|cY$rr3N#HAV`_TXbd!X{W51 zLf6T$ntVI7wX6t``$n}{&3bkQF2{aL_j%296IOBPa*8>vLIY{9>zr0j`}|N87H+^ zL~;?0ut@(=t!CJ4OMyT~RAKH2+dI?Ggj3bI`M+afJoFpgU1OnB#xJHd8P735_w8RKc83nhf3(CTb;I_ zk2^EHoXon@agGIOPNFS~IBnK7oF1psLY!jprY&qBm=L59lIFq^t|BSF8Ax%VAHlIC zZlD40nqp`n2K_!3HHsCY#3A9dn1&sk+ShdHrh)J(sw|;{?M>&VwvtV4txXLSZ&Pd8 z)M^u60mKlA4QmUZi`Gmuy1_HSo^1<>|;6OYdyKqMzR5X>Z$#~cV`I#W$!DxyRx z5(3LPVhmBx3^ZI4maUi2E;$G5Khz8a(L%`8k*1t`jAZ5fRyG2#IRw(jIy# z=Ye23cL`~#EMcirokE%teoDBOwTFJOt(K}|hDqJ!d57()?phVOmOB$jfL^D9mG~Gm2DjBdP4drWWXZqj+0T&<_OGn^5C#i> zd2Wx+*c%}jMyfEMC7{jO$kOIq8=IPedI^EuL-E1NkUApp12%K0sUgyX z7&oaH<{^TN5M?coX6bAAGR?qQR$19bSD}^5PeY?AsYu1wn0;gJ$76mV3|=n~l57H! zZ-HRslZ$LJ)<5@&T$!z+p_tM(atrlKNLe%Ib3RZ<3m;*(UDOZ z>@nym#rz6kl3|8Dh#ZKC?5{e@uV-s^`{Y<{vazTt5L{G6=vtK(Q597QQ$-3S;NpQP z!aQ3t(i?AZTO`yGg7piCrvb^d zI1Eg5m>zdy%pIMOK(cdV*KestHBJ6dPwWu`^mfo;fvtDnCc^VGrnmV& zRB|_aJ*YL&O>Ra2A@sSmJ`gR>Bin?w4Ks#(=KX#2!SnpoG_>Z!FBR-S1r?4ZLSui2 zmYBoRG1JUt0yF`DGHL?26nu@fXy%^Sw=oikDpz3~QWK3Ph%0Yg2C&&n5Dl`fWN{gw za82FDlN6^}dFz$uSPXxb3NS;!!6ENe?Ue@6saEGvmaz?UW2(XGwyjFtC8Z5>F{;fT zTMl8|3%Fqu0};B^t7I{OjXJ4T0nuFH%glweThPV*S6>@|KJ(@_l$X-t(FO{ybiHZK zE)6a-s_}4&uXfNQ5;n_MP@7ihioqhL6~_KiGcAnYi4a@d@UlMOR-}~GAhhB4U0hc% z$_6h}G;~mc++8;frIvDXMwLd3FlU*ALgw1|2^VGHW6Ca_bg9ha=>~#Jv)b9#91B%Z zvw~`r6K!q*4R>zl;t8oUY^#5#0a3f!5fd;%{fIP6-0E>mV~N=`syT$}3C4Zw+eN&` zUt3Xe_EG4cqQ}aLl}m<;y+5SZId>Yktc z*9dxYbq^vFG_|M~^iB05>tkxx_Mnf^3FYY?%unH}x9*Ati^|T6qEDdTAcRfT!-Ptp zN6_Cv831}WDqd@PXNwtW4OnjKOvDq%gpE)402R$B`UtYAri;G666R0WzURlkn)W7k zZSxqn%Mc|J_IK#SgqwLNu}wE+IW~4clsp(v@cGHlch!&9$c)17)YU)bJ&k*T(MlT% zt0)Hesyc)%5m_&z8C8v;AOS+hvN|PnwBTTrQW(-m5RUiiSukZ284VE{Az@Z2x&%q7 z<@Sdquvy9E7?o34j6>Uugy<5g06mPlZjR2j?*7n?Cu|5HMy!ahD>}&{w(-OaMg;6N zRVDJ$;X*@E6RBicLqjGe zwxve^{YEc_X&DG+M&e5oVSjTt5Y|EA5n0+$23hlN35aMc03symkc zn9;1~)l{g7{*}fXM0h%|#|c}!=`gWXGpgt$r8b}BJAL+Twfms5Jg^4?5UxrosboO{ zT?7jrK6 zdkyLNcSv5j-1PNFg2Ih3jcL$XE5&O+-m|@JuoT{Hw;|Dc===hl%d&JYr%1UR&dGKL zsd4ZOe&tm=7y^IaR9zL_B`?i#U8rby1CqT59QOV)T({(2BrTd^d~&nswlijB8bnIg!_&MotT6LGFq$@MUNFHjf5RCD#q(rExFlwRU^unsijyQBJ?iba>Pl zaS`o0<58B>Y9QE$=XnZf?*UkPA-lprx?IyFZT#~#iM^Td3rRoq##J)*LtOGHo+c@j z?oLdKNsSMpWH_t0e$;mh)g_-kfubsA%o6gka+WyZ$ZR+W&Rnhj?^}WB>zxi*5Hcsa zB(*%0<2N;H(;~f-8`k-$05v993Mr)kMdtfF5obcmTIr&1MWHIc)p+74S5?(VhZU5?vh*=|QL!W9d<507HfU3>*m=FGf~ zUw{h|94xfaxPR~@x*bk4wwger^1o>@JHLm3Sqi1HgRq6O8RJqKba#Vz&5Zdd!qHC3 zW~2?^?i=W!L%V`R$gkziNY_G@)mnT!a*VMd!aG0G8U=zcXG||uBKFV&yYn(XwO^xxgw5(iltRn_a2m!yf>%5AYs zUG}|8@d(klYX@BwLpw$oZ!RFY%EU|Xr%&{HL{CEKE#D7@HRJar<=hL}N9i1^IrJ1T zncfeOg<8oCWzbJoZRHg}h5IY9>CpEV%J-wA@l&#-OP^_k?f+v=``^i$lh_%Xd}8)Y zBo&d;?Ly@-}(tThiKds6_g{`c0FsjmjDrU9XfhppyHHc zYQM%k*{5iJ-{T(xRYjaZoc;2F&T-OROrU;JX!G(v zkX@muN9KVJo^gQ$d!oseHR?yHg284 ztxnVO6sD0)1JD7aL-bb*e2%FEITxnL8I)KOxOdl6R~V4Uo`t003*%>;eBE-})iy-A=?8V( z5$FTAx(6Li2%x~^SUsuh%E{F`pD^QC}Pks;4FqTIkpGzlZAOzcfQY( zOhP)4&o=|8Iq?MNs>lGYN2o0#lGLaq_9(&sVrMc`Aqqr*%p09e4w70Y7Ys9LTzY}Z zY~?gwy{MCfwfmiM4n_8r`Z2^g7Bz?A$H52>Wbwyr<;r*S+}ne!~wNi zi@~LVHQNwCeG0WY9`=&gUF+*`Z1khWe^F0wo8A*m#R6fXg`eZD0-dm&G;Wx-9b4~E z-Yf_RlVrtC>n1dGfD=?Z(7+yHQx-45cRn$jkU)LiJxL&&vZ#0QH< z%n?qRtg{?`uxM%X!l7wq(nKp=#0?-?n@AKTXfpCN1@c+9MkSliE2KLxf!sbN<|Ev1 zdWtF!3EwH;gqyI~XN8^W0qIX;Le7j&=338X6qQj3D6@S=I81Kwl>%7d<~N$dm+6@p zLW_(SGtGRNMf%Qhql~Z><7j0F;;XM@gSlEMnE{1<1h82NT*K?>c~In{`v%C!Linru z%~Z^4IV*u4^-fN>!pGIAa55v6ZIrTA*?h!ir>P_-VYVYSK2ztQ7% zrB3g#a1QMG&l#1+8KOq1RmA#@KLe`YkEa?pMxLV7Wy_Q|P9Oci{(R7*3Ig%S7X6z$ z6j^+UuX&Wo+2GLXqe~BT=&=Bsc$R^O69fJ?jh&#Z13p046ajh0eT<3#+g27}azodC z--I7&9KfRqf6ha(lxtutpWzfO?x7YY?Ee5XH!a@P{>mru24%0`Y+jyaGiZP_j$lo< z7`;enz^QQt&f^671oW5&$TI zc1AJJ4p$s&nQ6*xCbCowBkjc^+|H7~u-qN>1wWK!C4~T3+XC_`U8cZDBt)2juYJmI zBvVLQGh(;)R98fHH4@pE5F5>Gf0eUtCSp2KExNA)960(d-;C22+>1-7xFaLIL8G+S za-JNwy=HP~hBZgdJpg;eCdQq$*gN#IP0gtUS2`%62BxhAMHCR5hWq9eq>hX-I7I7L z0Z+`IZPC-H_*?jw{2W9{0>PJDOmqd^fyE|^H_##mQ>I%3eHRcwxaOpk4zD*nl(*4)Wi3Uu{iBG`B05^TSTji#nUY@!J zSW>s&wg%3Wqsx?QlnqWPi+VnVQ;DY!+=9GyLY7it3)xr4iUF7HwT#%3H|m?eQ;I`$ z@u@ncbcu?SkJPHtP*}h=`AcA`BT$pM^_iRb?16Gn7?|b?JQ5WrrHhcRY9m)=PJ3cEs^u$D@ zr`wE}SCu&uyh%OFJaiqj1%YG4+NLsjT_b)i8<_UVK=Aojvf4FNb1!@L0WY9HT0A&$ z!d<4iAv+{_KN%(pHQZq%2dE(up~DiA@%uT_;at`CiEs=9LKI`sF}RPJ#h4bBN%UPe=g@T9img&PN6OX0Dewg{JGhzPp=Liq3MDHKgl$>ajj}*5q882Ua=I)@>NCxo*nd_b{aD4N=L0mWTp0*oQ_6R}te%2eX;3g(*gN6z1VF?Rg{) zs9VS*!G&{?8hnDR(YoLqA=LK?+XL<+AOdZ_P?rt~j{HU`C_2W{iAfKw<4I(CI=KlU zDP|=|(!Q`B5RFa%e~cLqCw2dgB;qL_*wr#yhGmh@JH*2P+2V13P|iEj;_Kcu>ev1d zijrHEt27%=lX)t6!Xas|{0EO&poJpLCj*b~7MGYx~L*|p# zkd2G9XP>ahqu(Q^=mLpLJtA_%*3eo}7epzDdJzCGDHlW+I^`wRQM8N6OD8oZxBlMF`nD#2{h=4`Y%VhurCP6L4Q{8l{N|D|Q#v zKdwe;K#2Nk0IMe8>!vUxDEa8N2Wn~#P+X$7c;l>HB^LG>FjV>n0FK%{*(42j@Z{l+eNN8l{X!otm(0h&z`_? zJ1y>e-o`N4)Mnb!qNV9o+kelN(XkI{A3}}_zr|UaSR9Ej69fd>2(mWHdK&AhMuLPh zFh~i4TUv|n(c>*C6#Slj?F$W$u#;9R2*Bi|T?%s_P%KD2AX7Jy-~=MG{vWM*G8PkK ze>9(tE91%*8~~@A7VN~hFbP3SByocDTiN$BeLzvLhXmwoDo+Z}S%-3~c5^BnL$axu z%+mlSK9o9vXW1!BQma-_60#zjSZV?eim<6f|Uo`&gXr?5kT%s}#8J%yWd=TdbbnJyUKIuq2hmrNW zcyVF!m}2k-QY}v;J$pn=*j5zA@v$LnFdT+sz?mnpLcH%TIJ_^QyzeSA(`qs^&qpHV z=bkdL_{7$jtn4+e`$&bJSo_6lR@GNUCmzp@tMXk{iW4!)+U*X1LnXI6K4c6q@(sp#eaQ$Vc0G}l8Tk&h)4hUFI1QDSwG-a!yo6I4`;)kwRp29+7);i=0!PD>BX@ zDK|%M&6(<|r?S$PO`y zN|>ZL@s2GYDwPcBQ!2ICJ+^mhd*am2S^=dS6sxfK@kojGEM)_lb->T~BrpeOh(sr^W-z>9` zu|f+&hhK-PAslv$F$li#%${*zF+=`SE(qd=F${rI;2_)}2tqAu%d*)L&+cG;5ZmWT zDJqrNBn1(zD9>sQHHb*jS3}!waS&&c@h{KLf7)-)Q z5C;-N&to_U1%o&aH1H-3-vc-DDX`1uVIE>(@X1?`*B_07G3(v%Hgi{cJ!?0*jnYPE zwHi>s3yjs!?i$k1@P=mCmSGD?xk3ud9ESXH#dsj5g=;8c8N@LR_iAA|Sr`(oIcf|6 zM%x$+UftRDm2Ia@9ogfW-6q9#$BGoJuO`Lns!vo_s{Eg`?>E~BZY1zW!`*}Vz=J9Mw5|pHkK4~%Oz!8bd40hNJ?8$uv2LL*;UUDZ6+gS zioX2Wky0$Rh&9XGwXlT!*)^|+{S~jhCYF6#$hDIm`Xpk?VOmfUcgaZT;XAxh_PTu5Qbqb zLnuL4)PZyhCD05O#1L4swQS8=jn|u6S2QargtsZjDxFHoVg1aCa9S)vLoAp$mgqtw zF_=_KgXqn$!WFKHxWm96k1FEBkud59p>TE#FA8^#s+z33LM#Q>^VvUW_IuL;EVDcL zUOfLcufN3Kiv(D&KuOlBoxaK$Gy0p*zdNcQYgF~XAQ*(1A z1+7#oS&;)9&C$%wTv^GiG;?$%x1L$_075I#0Uua`A+D;5m}gIlv6E2vX+q?swV3?81rdQ9oipA;pp2wnkd}gt!Bh(;?XKLO(cjt2dLD&8I4_XoaylzrL;q8WGV0nGN6#(@-qP9 zM@T&jdb=DI6vK}!3&3x=OVn27w{{dHPV1QoTQtg$j1Yt(P3D`C9Y9f)?PRfioKG5k$ zb@gZfgmTQW+uVn{R19@`uTP#?qT0@DJtS}1HUJo-A#L<6>YVKouGz?4x*SMILJrTv zYK3DCE2z~zT5Zf-QZo7PeOTL44wed_OcA016}#B!UAh zezz?&%5epqX22UB&5}uZEO#*J#D7tGXHLqUh_I5Tw4hytV}DL;xR%=XqBt_3)vu@< zXbtUJNar*sV+2eP+FJ04_qWW0%1XAdzGd*cubh*D-Y`|>!sp<|;3!7Q#VGMIrj*I^ z{evpb8o)Se@rVH$4EL?e>p_kqym}l3#Gqm$2>^2SOD!S?5#1WI-guSDualt=3v6@% zGO2L(UdA~x#kZjsa<3r+Ta|uOq6yLQ8`BP?jo~N-Hm>$!hN52z))_U>Y!1jS_wZ+A zD6KlI@{nQ0a35G!iX-1ShW|l;EAR_1qGwD7HiJ$hj|t2j)G>WK7kHX7NF766ZJe%A zC&5E3k9wfAy3t#ER-T{!kp}-ee=t$ARCm6XAmb()+TPk}4$w+>5@AU|L$03PIhcn< z-vO9(GO@4Fh^jiOYWNd0i|EY8o#xx1|H}{23%gMpOzk z4qSo;Fs@`=xL&lSH3hXW1iNN}LhQc?yUL(K*;6gbvqS7_s6#R>qz-4V;sz9PqUg{9 z2G%=fA@QcLW@-b{^sX2aDlQADUf+vmdneICu{Rw*D9~0d{NE_;CD!lX2#57A{tifu zWfyDpZXizw#0{+fR(RkjWn~x$EWSP&_W*ES%ZKi zmf>@9H`(QZG1!By^&anq6HgwAI>>0%D3b|;+#8PQG+SAk5|fdhIHYZcZp2TZi}Hea zP85X~Wi5KeL~}Z@8DbErCQc7^@4E{fHR=vcuxLTv4`jG=m@lWJmVkY3tIkevJghLIblu-kzS_)D?VL|WvM zHVbuN1z8~>LwXV|&>yLajU z)GN+Te|TF#wSYAeW+6`}uOF?r>i&wvX7l?o2jjZI~cEX=lDLV*pmGdSuqMVL~&vqP-RLJIWIUAgvXo;(C{}^6l>_Bmg5@keKNmxi#ck zM?ztbn~%;~+RL)`Feb|kV~QI=Ed;$6Gb6 zIxHK|mZCYvw;U?-G^@1$h|m~q#!IH~gusCrKrC$#R^qU-$8-|om8|aJ6Vn0!t~Hh7 zcRJ@b&FLk?nt5Bh8#xSb>xc>?8N|<&5OUrOo**nF033^FD55tG{b%ZpfY<;golmN} zlsR$WDm12(QJ)oq(z-FbMy|axVlK1+0Mi;KB74SpDd)B<2>=4LoIQ9LWB`oVZJ82h z4bWp_Pjr^`I3*tOt%Y$itSw?%UL;j@Sq8?IQxaVvzzNa31ON{u1pl=E zX?EEhL*ujBka$(5-Hk{AGh zBTrw8%q#67MY=5lGDd)hYAG0?x_&5GWI@>noC>R-2uZ-l8IMP4)S`-)zt6ypp5okU ztjEZg*GufMNqf5C^@!02%P;MFH*=y(LTaej6 z%}J$zGsF$l;m(IK`vDIp>-CP2QUr~nSo37Qig%~3b2r%Y&l;cKU$hL?l&l(DB~&egckB6+UP?S=ahWtR^>>b3j-^5Gbxa( zn(-F8wR7Vu{n@7BoGUxL{9c&=wJqj0nDvsA##%iJIWWD0hAl0&~o$}xGby_exf z1xQX81-5i2thG3CCRC0HfHdn^9~Z|qmYlPR0~mgfm_rRa(6365{z0&avd1bANvA;F zu7^~c35kBzufabIK8CmZ4GzI5B<-mk3)0yr&Q@pi5O)@>-wvN%S5zBy+n4}aj*3xQ z{4k2d*!yTEguE#%7dk}IS!Xso@q~j@D3QMcGh0EsOi>al_)C&uL>-F! zw5>j+D0)yAVEj}N8c;o|&-?i0IEf-1gj;9RCt5t~yn8(hN9&tr zJ98xM*=1euC)fLz0FDXWTsy+opy^=#NaenR!g*6b#8e~Qjzf1gL<&Rs7)64PNkl$* z3sLzVRJ+C2Ko~1_2E-4a(?TViH#HEiF1j7=At3UZAa-D(>W>06sW(dXM6US)+EtgEs7nF>;pjbvQ*cJtk!ba!dpFUc54xWdz5KmQR7fnzb zWy2bxPQJ#khXLZ*Qm=`Gs%QGz=v=jHTT;E}%)zztCvB@PSi`n;v7D1CZx6NCz!LC2 zTA4JkjR46I05-K?hP8wuYRJ|nMS8fZGb{|<2#` zb<^Q_qM1KBXKP-V4=DW?$Q;P)qTAlx92xFmvrP`V2sN_eGZLgiJtUZf^GmSQ02L>J z6@c^o6wq6~(@{L+9}OW{;ZZQva3=!G066@0EjE7V7x`J2h<2S)#cA6UZ&nQ@+8!`b zf*Mp`G-x@LRgD=KX*fMPlbH-N46Hb(uCvaLiLGF<5Y4+J;P`~pj2}XEvTKCHbXFt` zSbxM4^poQr#{!Nh7_|6aLY(kQ`1m0*wIU`bVhL9`Da61LMTsJ&V9@;gH;`7eqkBYh_@3!Lr12pCjJNjEOJdNQO=vlM7gOGs)5JBtH_#sA1K zoB)W$(q}KA$}I~ubZLqct%voD~pZA3$XCC0oeTgxQH6x zdX{f-)YaPj7qo;a3zKzJP8Y&u8u_W}m~~qx@AgJF#$zA^?&B=f z=+u!PUI|}A4cUtCrgjoc^4bJTCS5cYvJprct1_gpe>JCLu5NLdO9H7-6i6k6#2o^m z5&MfI-|ELnY-NAG9VG!plt-$39|Wt6#DXe;Q1NX{R6vZg(8e7Si z|Gu0m7ex8ixhwr0*D@Rr!{My*W?MZ|h^8B5XA?obSK7eNR#l2DXtsw8h9mq8NZuhb zPN-n6tADA6`?}2iHy(;8OX$TVA2lVTrhMm7*BteMKbp)RGC_OvCS#xO@Uq-t)J?fL=CRvOQr==+ru)aJ}EqhoWB zKDWR&*-o>LEbrB$IEXVAd2MQ~zM}aZJivga8Q`Wlp{AQ9rwdA_W3#>y*%)O>uQ&%U zWuv9flAeCDHfP08L*N`S!@T6uo%XBS4oG@jBaP$}C^9;k(Xn|*%ew+r4lKjL+hget zytdFQvI=2|$gAi04Ge6najtF8a096hb9??5_Z$naL)?uHQzDKi{T5xe4agY}dZnR0H@}$4 z9L;OTW-4#c(#GeZ8~&r+BBRL}wbJ)amuQsD#TnuniKiy9SHO()i{Il#&M%05WBNO7 zr4}VSzh@7bWT%Cuhc5$iBtvdh^uqTqI<;47)YIc93~ECB$#0 zLv$Q#=55S*6>1@n@w1w83n6uwm}H4LeN^jeQPZm~)0}T&ybC;095gmd@s1_I;R#T8 zqP1XNgR~~l0wu&yx_A(DA7Cc2G`?BE^Ux*7rGRH+u~kuMWs$nQHeGD$Khf+{dV?cO`K2Vt_sH<;ON9zj9G_bEd(;&!C@{!%pFJ-OA*D8?X)wc zjIy|A1rb$*LzsaJ0mNdOS7vfvt3~cBUEK=L$zY*g864>~FSHaX9X19?&ch1x^>X+{_Hcb>{;>`vg3LLp zBrcn>9F({>=Y3`O&{0xi7o5ZZYRCi|A|MX{HUK-G>(3KIM1=SlVnQSpfhSoJDT_|1 zVtXSZAQ~6E0ipqu0co}3oHE7`n1-myF@aYG*!EF=WXBejL1Ao0;Cq&Zgg=VWT&g_aR(#H}c+mulpw zXp|J!bCR$yB)JljlJIbCg~1R_XhbAjwE7@ey`g8sj~-D<5*dcZNMdBu7A%vZq@{^F z<&i_U6nzp=$X3tLPc={RfIPj+Q@dXson00N@{-~~3YkJMbDUx4#J0boLN!v3!!-1< zHyY$;5Nci+vgWVRZ}c>}8U2ioX1mNXwY3`av@Q<Fmhyj4c^N&rnq)klS!klg$@V~ybN&ehp`F{Q}t*4?#yQWT=9=9F2&aSE;E(uOFq6U2PrH)B%5P>doOmfX@#bs=M)fBQ;anOTyYp;s@RV=daH?SyW zrVz9`Nj0k$W!yMAO9PG5f}ncXq}aVAs5e|JXdF|8v@mGe(Ozu=3iN&=lU#Lz@4V}~FjS0d)x3d46RbNALZuP_+FadOXk z%Q56gP#L1+c*ziQq3b{-9;RL{m5f4#Nt4E6rwqMAffjK!3o-RtMpQLDRMR_Mqgjns zYBc-~E23o-W(|Ksd5#%sauq~mr7VT55KGmg>U#|dqRcf?O0zY6VfZO6zxVUAkSBWu z56M&Vm;@o?c9sa)qFA<6a`%Y0jI!CKQkIa>t*Q%y(Ws{1g<;WJ&%_Kyp*q7w&b!Umu-W>fNOi?zsUc8Eeu3PU2C4zdsbVUZho&?7-;jW z^`yF$>ZYjm)5_AyS88D(-CW8%O8+SBg#lf8N37!qIwod$_bk7AS?J5lwWB|O=~ozT zsJmC)y)^d@Z&kGR(#7)6&dxsM*?E0lo7YyZBQiSQ%4X#=^JZblLtNWeznd-$Wk@Eg zv{#xI-ECFydb;eYyId!`p5?FUe$wgnpj`H;{D1TOpV80vegC!+Hho_aZA!Ra)eTe) zZX2EiO))--Dh(^Yt3Z);QV>Coo=PH)k3G#PEsCiTj6*!$ zOOIAlG(9B6!#p1TdqdQt)};dFb;M#{JX5R$!ZS-VL$gDlA=-Fry<2Z3+*oKb!Uqo=M3BG(2MPmWqT-n@Zk{FO{;V+A z;reXP{!6gJeQMs!W?Hvpw)k5#J2S#tzixL?b&r}_!>I3ts3||B#`P+!tQ)5sV@eou zj3zDm7`ijedG8vd?bjs%2CB4P^=Z{utG6!OR#hvlSJk=Rr`mzJ@x+F8(yRWu_u7@z=HJ)5-nB5~eZ|{qOiRE_A|q>% zf%?9UWwfr2@X2wm;J%PKI)zEf*q#_wgrFKikI7+bwu+Objo$mbcX@C7YO?`k%QB<) zWrbmj(pu;tgEA{4q^+SSs9+!zga*U|2f+&xNY0zBNwS~b|2~<>TO0{ORMoWfdv{q$LOGu-=wK)~*ikXUv>D0nNzuc_U ztbAVDKiYYUgw7;fOhUy>Aj85?SiVn3I51$P-~#hQy1aqRg<;F5mJhJ(>m+aLbZ)e=*0J%3vV?mu){F>}je68I*r<)D`Bb z2g85~B-ajzVyS}gwGtGynGSc4E$hreJ@T&=LaDJ5Or#-{DoDHDwA8-6v2(N_?AS#I zIDeL`*RcyTEgd0+AB?u%-44-!Ch`X3IQ|Cks`rLoCAdqc#!95JG3IY3DI!kvz7Bk8 zw1g9$K>ZsPRJDVPLMPf4@*d&NPpIWfm+`S@dssI_ht`UD{qDnXbE4CiGe zMK$Ik5mFU8T^H5X5ggALu?gpvS%l258KmHtq2Sbz1h-I@=R$yJI%at=6_p(wI-D6} zssy(|(_uqifwnpXi|r>J?|q4Qd8aODbfjCYfOR;5RVj{mpLS>>A^XY3V6Y9fcK=Is zqK42-N5wU|uqn`jaO)tmQxMALHRKFlpoBR$3U7Fh0;e5_P~stq)1f~P;h`X{a{!x8 z_isP&AdCeWB4i+9{DP7Gv{AZhc#ZCpT%6OU#ZlQ|9j+rlE#W;tlQ+djN+4|}_L5Pn zSZlP36?>-c^t3tP)Xj*M+mC&(76pv<|6jyG z0Qyf*)BwD#fU+GCbtwYeAa$RDD$ld7#6{QD3>@m)t|X5^`{f8i|lS2+TnXkL~mE1ZR)iQXVc* z*YQ!wxkQ~$31}r)o=-LZku{ZjO&0h&^5>=hxH0x!1`RU@w-l;~dyK84C7blHgz+xtvxxK8zA#@<}x0Hb~?xfBr3Gh-<}H?$6aud3o%2&R$99s zvP}hh+{4z&CxXeVP9AAF;?>;*q9Vyz#LHcU`^zZH!we)7jfOJ!y$ZK#g@KDgTHwUkayUP#HF0E= zl_vrWJ9S8tWl9ACUGEo#kTOf>_CDxXeygb`W7+S`u+(#2MSdlH`uRmwOc60wSRNkK z^HU|m9~7JlEmB0u89E+f`nNL0O9N^+GZUj&fz+ZtP-vsBb1(_P-0G4M(lLr2)Fso% zb8kd=*#yOBb!?UL&l>uD8A#_0y<sH^VIvx`G3PWfEVjoF`aT02nbx=$N8>Oi3pJRAZ2` z-9OATiHp0`AAhjPJ`Hz*6IH3~_JB^Dg!M`6(zaKMjAr8;q!Zo}m`Ixo%8W<>Q92Oz zsc;{W+^$XZ5G-FpdQ73F+7rIJ0wO4GNqkc0#Au~+HBe)$Y~V|j^UxIx5PS+V#F7ka zZsrONZR-&?h!98GcQUFHVHP-WTGl@Z8XB2K~VTc<24}?e@5VE?7k(7>Tzzjus^%W-XeBr=Yk41kxxU^nj@Q^WxI}8c z^NXKTPohxruNCt+LHeWuno$luS@SU#Lf-2%wnKxN#%1Ks_%2ZxsrdwvlYza$w$5V8 zK>60uS}h%>U-pnr~e8^pO7 z)3J^4>aeCBCt_usT zpfDDNkDa~@V}>8+ihvcD8HGG57@8Y`gL9#yOt`fPuKLmlDQ{&17zTKv*J<;i^u?q5 zQ}a>yoRbJ>WavEO9FzOHgUrlAJZwRlW8{!iWXYpmoCnj=@0s`+d9 zMM(lX;%X(*!bA|LmVc6 zq2AHHHT7)*k&cy=AdksNBo4){@%s43D>tcqJl^+ohg z^1y=ohx1I=FWt=xM_9|G`>Yg0BJUL_(rrUF+y$ZwV!+EQ-3}wulmD~AeD8I_dP!Z( z-Tf>B{kf0p{f96$r9%$djr`H}v@%zHF?d_ycp#&xdNW?=C_n4|NNOt2_W@(X5w>9V zP#jM&|GT=jL2~d96UjCKj!nmJYQ@3$A`YR`g4KqQKCRjXp+!=feMDaN^I;q5_4eC4 zw5@B}P&*UDQ5HHc5%d%+8PQJDXMPwnB=fv$IS1xn-5kAuqq|w$L?<5bq&-w&A1Z}PLyoZ2r#An<0Kzy!}wso z6yf3AGKZEB1yg(;lF5wsq$^H2XZuZZqOZE2{hDCwmreD<-%k!Zy(5xYuz&phEj*+U zrKaG&ixA?Fj&nHvgkXGHK(leuwzUc)1?Q40;t~IZ&IfH|Z8O(<;L$sbS(*zA!ird! z3C1P>0B5<7KjQ!Cu9qyw?oEqz_R<%EY){IinYZcQ|!J->bO@mPW@n9;>xjo11bL?+p)g+ zCh$=uQ$-vJr=&mPC_we0HngFFj>gB0i7fEM0cZvZIXtNaO1bI(M`F<(>CZ&Q10jKu z1t7JOMrwzTe_|3L9KMOv{r^C$Ryq z)=;9XLhMIP(A3)Fg`_x;X&sl^n*nt;Qs9Wy8;4-mtZ4jR=A|_<&4ozl!RRi1Zyv;Y z^etq)`;;OV(|Hp4U?$jb^Y$`{pN8(Ty|W{2&_Iw}?L5WN^Sw5BtNzQQ?Ys*f}o+*l>WtEEN@bBL}LF^9xkAxEzv*x9m~AZ`fh{)iDCo!fbA zAH?#gz`%%8<(Iv#gd&tsO1ZO?=>a~4!0QO!w4v!lQk~!*l^bVT5LwsRq-2c;>>kN1 zaS#iD-X+E`ES8K0smFDzra(7D3~C#HoWRhF#^L{zSrYqXy^U(r^{TlzM=N-NV1*cv zukzRr1%d%>m8Wp;MVt$v3K=^7F|Gy+G{v_C#4*>A!Ib9n$&2n9EL77)tlMOCHelxz z_#r@}zto;iHcqbs7QNgJhd&L`Xav-dB-?h{ls25X(w7L(cvLI6R1)oogi=C0QF5Uc zXhQ|AcMltbwUBmRK*UMRSe2UPO@<<%%e1nhuK`Xa00zLD+W&{QJH80`tGe3~x4Kh+% zNR88IHjb250esh}@Al8pM_P>7D&iBq@`dQLky%J7XhXIQd`h8~P93d>fOw}p2q;GG zbP$=N2Mq=!0jC2(0KH_}*+C6gvpx?85qede(9Q|5hQwH`Q2zE;^aZv?dQ_NE=|)UT zdZB}0G-`3R5RId+7Aj;-KYS2Iw>L@~;>XmD6e@p!2}nvy;{+dShdxXIfHtUMTtQwS zJCtyd$~8%3h8TSp^_Ue`1%O+D>U1OdR7O)2D`*9>LiCwNR;ZGh#w>ym-GW#kr5S&e z^fx&~(B1LqG#~CR!3}6t>s@HU* zkkxwGXt-;nhHx^DZ7z)v$t9I!95Wf$>vNy~Pt|ZiixWI9# z7?3WPvOw`VOuHin3uhom3ZdF4iWXcKxlS{Pbkiv6CorK?lwn|6*6@bp%JUnKKLk5Y z*>##aPIkZm7987v`n3RncSL##4&heh+4!Ns7$gK0D?VIJjWUn*6Ndkv6nhc=NOayI zg^ck|bm}wNy6|V@dJ>{`0u{R89HvQ2jni5q?-jE;0 zXor6{C|#+TenMxv^rJgQRPO+W$-3C?igt|b4mMpR<$jJvcM0_q`c3U#N%COY+7_j< zUo~Dnkgn`yR&-1|DN)MSCD>aU2~(k!N>kIO{7U=Fn+;Kz8{`FII%Wu6UAp9YA8^kz z#q6H5o9$rBb5cC|w=0_Ibfha9uRB*FFKw0pz$tyhvS||(!p+gWX6``zk|0v19YpIY zwS$RO$6@tD?AY=#exZNyoCx|Eum>_Je||mWj3}< zT2*2>5EqV#(%XwZf#A%S?#D3%qAhO^F*3BdhoHH ze1Szo>*Cb1KH&f;C2OqbV_J6xI_inWVcHL-M#SMiPKINxU#}1HWA@Zai({guJtl)_ z#?xdF4b#yD5xyR1B(Tyk0#O{>Egi4|77+ihk%xlLy_1K6EeQ!@`L9mkAfD!rh_wY0dxUa0rfJHPZVY(-%iXzQ=#f95rxZ#{uRrGMX_dtt}7&p z`v}XoQ}#_)Gy87>YDy( z9u;mLQPgj#=xG))?NnA&d3i-uewie6;SojR5#r(D)e_jgF3x>WuLGHKL%sR-q>J>qJ+7UH6;ooA4I5@ZPRP^^GV>QMdRNAyHgG6sG2~ z`U(Z#h>$2QiSXu|eTN1Uq3veF8yz)q9ST|T2=PKcBb8cGvaRKQRi$*PXKEOW-Kdj2 zpLAKqQrBs^r+ZqN`b{)TE5u#1B)uet$>JoFG@KOOqUgK>ttgR_P3mM|8c2c*z7m6W zO*%Jr?5xgYni(Tor5$t5kKNE(S)skk>Em;6ouZGKJ^HLq0LH->b6zvIXXngpo~fyw z9kX-kk4^5+sbgC9TC94NQKdqd5ONT5(3gesbV-D?&?#bQEkwGIZ=s_WGFvF2g_1Re z@)SyiRv{~-6e)ycB&3ujq(`WXAc9c23JD>D5VGYUH6+=ikkmteTpp{(vKWbRhfxbD zWTXjMHjmxi)oVkkgl>yQO87l1jTIRKqJYO%_D1 zj6;4(9Tk<*sR=<0LlW!Z#6%ZTvWoO-OJh+g%HkqGGzJ$!nyd~q3pC*b- zyPkNO`6WMO9{N`h`skm3Wc10#r@6EmRGhV3jbWi1zO$3EtOCJ z@WV*Vi}(;_L51+vOpQ7zi^*rlRrx?8%8gPg9|@w|72Hi#f{tv_zbR3mPx{PPqe zV2Cw_D2ReE1kqrj0&$_ZP!L4~3l9Z2ut9ck1zhj|Lt+7h2p$|XSmS7b0gC_|8YHMd zfe2`4aLFN{5CJXM$*CFQ+cS(rc9HyHU*r^JS!2OmgN{WJ)8TD`OGn{?+MN%$IH-$&K9JXN__qlkP)(YLAk$e?wnwo%oqu%Z^rsv{B>A!o=s$=1`r z{KiySgxGfzWK^urlW3gDq$Mz8v{m+$y~xI68IruzzUeU z@MzNo{MMc>PARoQnzMO+HsVF%7@%-)_&8}Z%H3<~gh6(?th7zy-0i7&Fsea84QeLR zip5fNy0=a*+v!oa?i~p~0Y3#36clJ{an`A(qBn-4;R-_BujnB~M|Q z@n~%}9T(wdTMlQd=6I!DMIu;yqO2}IYPk(RXlWcD#l5E3Z40xv052K$A zD{`xw0!ELkPcl5mhA(&=cj|Y27IdbOh&|!Mh>o!8rKU#-XZzQcBx9A_o2D-_i?Z+4 zU&?>-P9La6(f=zPGMhVdwLk@E+dpF7Xoa}&VMNBecspz|utOyr`Q%lBWTv0WoP!8e zY9ewWoUZl#RXE}{aTzhtm2l*vXV%KFb@I@{=9(1m(oux*i70QPR zV#aiy5ZRK*HmcH^fo52c5QJicz}Xs5FCZG@K+PFMk+`$e5(V%FDgo~aAOU9F#yK=W z+}&k*!*Lsp^X}pZZPT4DS#JBHD_+QBcY_T*SS%T3cMk)I`;gk5M5ZAU+R8Ju?bn5| zC{771&lzYtDuGJ}#z3SHLucU2!Cel72h4!4ye-m$Ld7P4*)q}RDUx#WmD+*yWduGe zK;eu9F+?bMSqG&VB>`xB65uQX?K*~(gwqE6A3%a5@L9I*k~?07D)v5|r7oiGG-)_| zayVtkWR-FZ&T&DCFX*TRMe<=5t9*_BGSl2$WEBeC1v+oV&p_-n%rw-I;*ZGC${6QOtL?_WE}T- zbCFHT*P&mm?b*d0%@+ySD<+MH6*d$A?k*}?ZaVRCv7Qoo)enH$O>X#VzF=$) ze#h@p^_+@)exX8sOsCJ!^k7EZ`+Pnz{OG5jpVPvOJ^lH7BKYzCKR>6K8D$0Zc^dn; z*b4NMqMs2p2>P6=c)VXhKPMCO9Bt6&>A~YY2>qM}1a6xNeV{xRj=VyD(hB$dV(8x_ zA@E}x`kDqu;fRQbKKM_JK8YwEM?RyUsmkDAZS;AX^fGKrNlLsTx&uGYY9{Tojq%Y9(aFGC{>2p=dF6xJ>2HhIR<@LELAAqwTel@za z2uO;usPyRmmPyY>m2=(b((tVzl{6!UfcL~nosar+{V3A(Q%ic1M)GJF?nkwSj`zQsmNopm(!rknp{iupOMyIpt_HmS13JRqNQo|N=P~$y{NsFZEc)29 zb)j@!ozq8kx)2p$d{}XCbI4kV#CwZ9D69{#5}^LuaBAk~A!R`#W0wqGtVzHVb;A@~p+OxS7uKOY0fH zJ(SYJPDF_Vu+qe?16a+B5dy6C5NH9c)`VVO5*iwqsvZ6xh#zX5UgE-60tgPhc+eve zy|tORDpHJRwXe372&fwhHjxJj%2pz2z zMYOjhmMwaI{}wR`ox<+iMfHqQMNg56fJbsXq;1wRn(X!I*%z~Zs+fT1qOjU&i(gGS zt1$cXJr-;{@tT?uX-Fv{yCle`K-Fk}MFwx7l@~V~;CZ0Z*RSdMJPfZyQXH^CHoDiU$&J=Hh zndE~Goob{d0;vR~$jk9Yg;3*KoGcQf9emfAs(8vqC=LdNL;hsJ9wB81DF&0Mz~tq| zXudRnHWhJ)SQ}a{x|_D5N*hSEx5K{LD9ng%b1Vy?^Yl$#yWv_|c8?#pt+b|(&&{fs zL2UTPm3QH7Yd@L8(csRrEzh$JT#P*MGr(bIFzL+UBC6Pnv{*4wcIcHRoL)r|Ta~=u z;k`vBa2QKr8Fs^uF7<8|t(HX=BPWU-ZTw>5COZ-Y!q<@TT;HH=l?`Z7c2PyVDTr{7 zVbf5&c5mpe`iMy!R6&>D`AIiXT(jTRuF)c~ z>+%7scYs>0X8#m?T%u?iLxe~ zYS0F^cBpT2G*CM(knr+tNBsmP*bsM&fDscSCp|pwknKX#y;JCRLk}@wB_JMeL%R#; zp(y*F>#*H4o|XBijpGHtdUuFT+KeBKq0rdJbQmO`QX!H}Fy4_LcOLX+wq5%TQSaZs zWp&-9SQgBF0AV)ig|l4zgMdT;P(ZK0!ITE2#%|n*8cmi=gLf8Ui)Jm*qUvck`oRd6bv5|V1g2#Fgc(R zFdMkIORJDlLdS67N?)pA&mHCk!<(=E!V~(S0+51%%hYA~J5n(2)h~o!o3iTU5JI=Z zrmCv2C}Q3YyFGcD!Xg$aHXKke0(rvZWYOF$K*`yOkcJ97tAIhl?iN(+L%{%gJvvcQ z{TKKsbc@;C*dSCO8h}+>6*~DiNvKuK^uri(=f!M)h@IBBn$`;ua_lq-(VBtd2YrhG z@j)%+s$05|8$_drA zeq86_5Lf?`qBO{?#Qh-Y!|!BsjF0>fT(BBU&QHlL^p-0O+1jOxjemnWF1uUA`(xn zR?pO4`sq5t$KZtEu#ztIqqxfr<x?Wp-?McHlX<#;H+;gqYH&C?x3NlC5OEtVL{AY9 zZ3f;ISXjLcmggc~ymn2B7+XTCbYd;?$Iu}7#yv1!?rq?lG>xJ8QR>M_^`^{@7;d3{ zg5D`N_W{}qArM*{s@k%M`<49|4oi;bp|NRL^EdK3@J;^Dk8u|_YNJ#UTJPXQc4OlV zl_%v5j?+W4i-HCXYSfgEA3}_x_bV!d7h~6@L3t*W~So6A^#{Wy;nkc$Pl;@k~6=JvQ}3eogQ@&t3NQ{R2!QVSPJ8k{Fll1 z#rFQi!58S1dX&cZDMwG{T*C=%pH0U~oG;F;3HB0q7}L|(-B0`m!y&X~36-eLz#UM9 zWR9#tPy;>%^Z4NFpETTnZrX!t5BQW$)(<`)xIS7P;3)=s4=hDbLI_2pCD!ImNfhUx z-i_gPaV`e)`_v)~{wZ({#pYQi%HjJ{DcLza#G4IHV33l7PsNi~d#qRz_`f#*wS~A?J!5Rj_(; z2Xj6;U!bn~QOV393r!F}$dQM6^bzMIgz|GX21ldwMngHwnvSNJuHD1hH|SZS#wDF7 z1F_W^Kr0l8p+-N_!v|;5CnrI0DwgW_RwbF^R*LAY5n)Flmzs{Mgrni_bXRvuB8Sr4 z=}|_PlOAFN%-xoBo1{fYF}%@c@c@5u;(O$DwEbg~p9SVzxl$23pl8C7t^NC>pMw@~9oLIz9ot0pxXAAH^WIF$RlCac~P}fA%OYk6@ zlNbpm&SNM^Ol*fR5~S*+#zstSC~Z)U(feRC6C$G|DFq=1(_5QL*=Jo*CN@NmSVxCV zTGJ>~QIn3yU{@&;!*WxYQP~}UcdC$@NkRFv%JoknIe_M4JDHMsLz<0JDH{;}Nh_x= zE%IoSVqS#RM5zHO*_}?7plFjaMfQ_A_=TnOq_GGRvFHCb68f;D|1iZ#QUN{!flfVTIwz0s=+!dPhe9~U-(%TcG4t%suWT}6iO7t#KI*_pw~^~v3TlWN|VG)xim$+aBrD1VoHki z$|wyXBRI8SF>6lW7_~)ucP0>W`uuLuDRFKF~3Y$5I44EJw7>zfJrJm*DIYfT8@a^l))2DPIYdX zF-(ng)9m(~IJ6w}+L|oJh+HD!63ukbmmIx>YcA|H>k`0%z~40W8a5hK4dOT`)p?4B z4h(XRf{B`C35nBcQWK<@#;62`Fw=0Lan8)_a4A2<1=d^?TIm7_(pXWXK1k^xp`pkW z=BaedjjI4C5Qr$XQ>^~uIG zK6V)e|YIr342Q+}I@JY$sL>n?pm5lX_67*06Cb zRR(4s8YK#9Qu9?#(D7iV z&#z3Rg!x6wf+A$8Ta%oDouSUaWS}|6(FVHiR0yFu&w!_${m)%=0%#^MT$JcUR3(Yh z!cCjn;eDv;F9h6lSz+jR;8td4e7O%2+;!*q9M#&kmO@Y(hP;sc22-3T~84h$}|P5>B=CG*eNtp|I>LQ zw3r;BY)CnYG~K0VYjsm&pqdku9Oqr|HL;=Ju9Q159TRC_vO${O+Y#Q&q5l|vfsTw0_4vwKFKq{J` z6sZYO5)4Rvbrzk{9UO^9?>!0QPzcY=1x1Gp-Aq`+E6>t$)3Wz`3qq}=I8U4Wb%!Uv z&XdxV{nn9Cpf%Kei_>m|TLwd6bruh;QM!_Kj&6IeO2X%iGRmE}qiRZW!bGtrcObxt zBd1&(oEQ^^*{iclog_vq@;{-RR-+;`kIAM%X~!Q3oWm4X>Q|oPYl=_v<(LvzQ`0C+ zYEC5n37OOc)sMnY{S#}vE;!NUgUM@UCG{o-(sFa5>9i>kgpjtWQ)zgLEHrd?{1Qx= z;&XQ-Q>NE@>NZj&C1l#+B!mZ$8xcG$jkq-qAs(j$HQl0t#_&Nr!l@fQ!Yf)z_<95} z%`ZGQDT$QRQhAs%LW|NN;gBRe(^!i20>x-p%B>TwD7bjFMA(H5Rh1gGlC+YQTA_I)GKY8n)V{y)_50PFzn05-NU`N-%Rvz=V`Z||9+ehNEK|1X?4v&7;+ zgLp;`RSI@&%&_3N_t8JvN7MCL#Ad$C=j}h^H#avVSZUNKPvPW~Vm7n06RoI>YDSv* z#KsB-FWN~hQXs0du!K|%huV)YjfS%4KvF70MUkM?s8ARX3B(bH63bj7Dl2iSs;qIL zvoi6~Oay2&nntvfP?fM~r;+OFOj+ZhDU%|r!GZ(_6jO+U7!I#7q6S*Lih4|gp{5K+ zENY3xh{1Hd#NBu?t|tyXB2hQdSjpmql}Uv%2gcV7GkLWaX*G_0$fKeXg=9#2Nn}Ig zo#U+uS_?MSb??%LPI7isgn}3kxdYo}<_6$ynK7HjiG*D~Z=JaIA16^Sb^ zDv6sYUSx786NT8FVdxZ#IWsM8YC*E7n&8qWc1eXUlae;cT|9K9Q}?d6eJs^g&)L^! zxP(FpU`^usZp1%d#U`e4%h+&fI+A07*ZCytz*I6-*8#Fv986Ro$f!;+B?dXTHwqg9 zK~rH^xaMGqLjpkyLyLzx!|Eec)w!I|K38iQG{y>dghEm6MkrA6rWKQ+DjBl64-pEl57Av$8vM+S zzM1{@oBfYbV%yASw$0||lY|sd3bx+ZYGbRX+#A-R@l0!v6bRCB3+G8S=2%yKKF_a6 zRZA#TSRfRPLnxG-?`wGp1uGCJm`v^YRQPr-?+w=xu6}jjuRt(tS8#Vip~$m8p9$vN z_wK$iZaP;kK?C=!$DgZKP&iO1C{Q?9pim$nfPespl}E8Wdh*DU$4UN-IfU&MfLVlh*Qg^3WXv&+ooPLcQz3!fR@vXi=z>Vi0k@cJnu4C2G7Fx<_h zUmQxv009Bw4B3=62PVpu%JP)W{!z{NU#iS%>YHD!v8lx2Y6y`q1BWND zgZUA$8vsB=APEBe3PO3@W7})P1LZ-f+}Zn+M+$LgF`qori#t<#@<=Q0Y`&96>TqYM zP9Evbogd1{gWA1_5u7|I|2t1_lLz&2k!ejHDaTdfX!1xSE+U!9BekGLmSgfrv#vw_ zMpaI+NH@3W6CxrJS7Mv_%qt|8hh@WFjG4*)KM6h?iN>b6`w#lM*L+ z;01vy7+X5J>;k|W#QMWYQ`WE^Xx&w4846plK3Yt_76e#NSuDm@gcmAAkoWr|d-yfq zYBZdOc8V6caAEeu<_eg)P#oY|*t#v(f2kC3+2bMGv$)%e3+72Oh<(zkL=;Q%sghje z5-bbFi$-4~y5GMg#{zfB;$d@FB|)N;1z~5#R4R3CL1nCBVA>{1$$pY+8Ml|%K`6ZZ&_EUya~opI7V~Dy=9A< z*Z4goH?v{8Qz5G$_?9K#=rA}#>7BAkBw-faO!JTR<|cEU^$mi>P6)=Qa#Up+9X-^F z0$v(kQB#&j-5|`VKl60Qx`nRQ?Q#-;XbG}IO4KIdEXeS;k> z;&KMtx$JwBSjY~?W5ZAl?~-LYic;RYxLn0tMFWR2FCoLQ#h^83XuKiAoYrr(tX;su zGbu+cYA3FA`hRcNx`bbb_6Wq=aUbVKy8GLemBeRd;t%QRNZ6$#vj*sZ7uR-4)+KZF zZ|+TjTttQ5v~W^*W{-2{`~MOnGYj0}_QTVdU^bMSZt-k&IRlWs2E6<}&q6%{Aiq&s z0>w2W>K;4YW%lJQ4Yp%Xfc#Pmu>8W|pu~~VgwN4ns7%y=*+GU_j}AC~K7k+RoBEnx zPC2mjS$?m9!bD(1ra(U=?Y$Q6OcWu4a=Cby?pJ{;P z;1IW>jN0F@r=ZK8%8EAjt%S*m=)f;R^!@ysXat;crfF3~>m;DnoQ?5|z^a1ARIUk| zmCyx#@7GfWR8|-?sm)GxT5j{7-`Gd;jE*qHqQJ=C7TOTpRSoWzUjdXT#_^nxl9l{Z zrJlllp0$T|cJwiYTZz!drk!Xnd)K5F*3;@l4+n$se!X=_($`}iTck&pFZ++#t@5M4AlP+|Q zAvmRjj^9`es$s+2V#ePY3u>x(FULtn=FyxH_Y|MMy+a14B~s_&x|5gTYV_>lw;<5q zqf<{Pj{zD+L5#(R3!srMKWFe^>0(sSUpT|RbQea&xpYcx6Go|3U|ye#h$gylH!@Nrd^HB@^SJ@>Q%wOSY1wYvA;gcQ z>W!30fyl*_>>Rswso*-E?4~B3(A)uQb+&uXV;6eSN~-CFCFKQJL47<_=_xVci@}gt zXG$ZFQWr#w)D)GbPN9FNaf5?Q=^PHdn2L4b0Pd)%p-~&6g$cr(G$K$aQOfIa&+20^ zHh*I(>=w2C40jSxnVaAi98>k>kW@L-yF5}-l8kYXB}g@ciYv#&_9)U9^7#ZAYdnRR zeXvNDmoPJ#i10^6Wi((&?GqU`Lvz*wJZPCZ@$nKfSrFea;>-EM@Jyp`46|{Yk24<4 zL-P0KQ%@n0q0%M|ojWOT;4G$AGNzGpBUK(yGMq-fE>>(3e{RCeV1f+K z4hoY{Gy-g*4|ZQ1MrrWEH3-g->)aaY*P0bTa<>d;z_Y?qZJg1BEl@Xo;XF`LaPx)q z@k|O-3&$~DVaQIOK0Tj;R*-B}Iz7$=Lj!<4u)VVdrPRy$Mm|XLeiPjU%&>ko5TPB^ zE}dMc1y8l~z@|1n1%yBM4)&ZYG0iaB={eA7o+wN=)8z0DJ^ebVEgf;1P3s?iRa&`S zES~f#!>OUbIc&mbEB++zg?fIZ(v%=@a0WDLA*poQDAO=j7YY-JQrlMUtuLYOvSm_i zG-ye5XPU<`Q6#7}2b8kJJzttqpA^#MQPjMj^Mf2gQE!1S*(wbU4F^gwZ>T|18K@YP zwhcLx#4#v8J_^98%tAPg-NTMZ!y{Osp-H^akOZc&hlMHHNqOv%nvS}gPIxwmsnvWX zl|0#sx{jnzi*a0@Nj}F7WYEOPS-1?Kj?Ga{MH!eE zMs56YW?{%c5~~}_lDU-nHqmWM36G667N2;xPfu9NhjdMy>b+*jVP_6RJZV1vPH;@& zOMj=N>9ncAN8O_7Np^h8g@~#}O`s6F98Bt22Q#S=CEXQ;0(H4{?vpIUS@#*PLar%d zvO+*H4G`emH`y?~G*e$| zunnRgPFwA|BUP+7~z_?-WPjKQn4ui*1d-qo9NQ!fu;&Q{+V z0q{yL{|BvUVAWIp2MCVidj6hAp!mU$RA?R@A%1|{H~~T{3u8c8cnc zPsNh6b0rz9pAqO(nuYKkl2XRTn5B1Pv}ly9A5W3u>k*8?3du_u4;qllF%3@IJ5};S zfx5ZTc3{E73I4rkmku3nXQpb~wi>fM{SsDeCGQzU*02GQmm7>A%z$!BDMQO}mGd#t z@T~Ea@mLAWC`WqO>OYxH7a8yx+TqE2^jx`y8*l~}fZQ57$W!_t*~o3=WBBAYhP@^T zOtnmEYwPpwlkVooKIw7wbWPBG9?r`j%lZf|Q|DkC1iAKx_ai<}C@EMPBPEW6R4JYy~w%jsmK?osB0#BQ=th~b_jHnwt)HaPGU@~G_gDp1>j(NHux)AVnI&Dfcb95fb#FHJ;CRk8&ruRI?ukk!j#>(e0Vp6n#aMpfV`t6QsAw z)u6Nff%yOb=3Y@il431wU(E%cId)%N=>YCv98~!x$tVoV8Q3c7_oo6$AEMwgCHRU4 zaYRz!3BQKa+d4|AM$yppBhE?CO4K=Qvjw=w*~=4-D9s~137p6$xx`4+U?MUG_#&Gq zcMz<>(SI^ljZ;(+=sk^pxGV5^GDY!9P=m{e|+3NjZ zKJQWR`oHWf6=i7*pj0kp((d`m9NuihV@xS4p`kK0Wv6?D50}IWW?`4aq@+WptljDm z>ev{~#{EtSL~t@4P_R-4Fm)t2+>Gh)6g5fP=f>9Ygu(tHQ(U*<|MMWJEc(2Lir}jV zB%L8f7e%7dG^+V}+1mk~Fa5)=hj|C|B10j#Lm#HG-vm*dKN~AiAF`ARRs3!4!iEyB zQK8d+NCr9L^z=r($-=N!Y2BzIn4?lH=J= z75!+a#z0J(>GQKh`17lOUgB9ZHt7Upnh`>pFj3o-5=j+UMqmzK+bDSfE?3UTJSMGt zO{^v8h<44K4XkZXDa1mO5(+O)79a^5bgCeQ*t;(QK0R8u8iiV*qIo`Eu#63rKX7+vWJurJvDVN@!Ume6DWkk zY~}H6Dp5VmF=Au|ZE`8LdGMR)K8l)f&D~zGQ9FF(eX;{8^TbVR@O?90RyPLDU#t&j zAnAwe9I?=&$1<&CR=iw3(c03d^y+UiaR96sm{#D&a)(R9FlBgJR3QtgrAcdhdnDLe z!llS9EP|vcegu~^BzYPr8FHu0EL?>`PmYTX79#m*%*3}Q;rz5^gHBD9D5^t-L+2-> zI}4ljPAVLSnM#2W#E3rQZFub|<09;qWkzgy@B_vok>52h*OjEHvI!4fej^c-M0gHT{FgWmh~?2>MJH&D6Kd`a_nbec2bIDig{YVt}>=Z z=3KU$3~?N3D0cYvzczfCO`B&4?F_0>-Ov>zjZ`=Z(p3l?pR~}ijZp~8qe_c&by%FJ zCiO8JATQ-C>({aM%iq4y&;yYGo)$uJ%F+BrLYhW1bJ<2*gMmx2rZ)%SNvdx&!mv51!GKL(bRbcCN)PfYOYTYw|JTTj){jOb6r`+~ ziYmby%@`i#)p=GZy}0Nm#cQ5}I)`b8>cTNh0o^n0@~r2qFF#Qx1V)}|BH4<>sXLzW z6eZiIV5nHLOjC*HP36+Or5Kc=Ou>_nabhT^q#`VAAk0SbDyk0BO&lCxGAL9iObYJH zC(dN6Bg3vmlP{pbm+SWwJXU~%Q7`t(s-}n@w&=_)APrjCjcMzoFEn-a5O{LdLT5tD zLZlr@p*AKEE^P=z0}?%dW|o0)H-$^6h~vjGlh1QXj(TB&_CJoAfmP&A@bvOZ7XL=Sf1#dxGh)xri&mNt!E+7Do@a}KJ^W1 z76MRW%73_Fxr{h5qkBJe?07KK(*jS5CT|k%yH96zLYxW*;|NnmbLC0V_9-mW&}oqe z%V(n>%F&*tB8++CYd0tj@|6yG29evxgG4pP3?5Bg#+$SW?ekud;5XUxzV>n7FMk8B z*}#nmOKH|Zm1Gfoj>D;d?I|k`qv2Gmhu6D8Zfr~hLTYY;0E(83bFHRg>kT&;2TT?7 zQqZ+dieQfi5&4EU@=1r%wVYCLKZ;66Di}MMG*+7uOGf00^o$;U*@jtD2-1C(Erl1Z z+6MP+D}EdMrd4+v+!&S|Rkd8e4!D%%rN7>eW$9 z4Tx${hY(cL_cNM^ir;wZVL)espg>Ys()2!YV@RJ*YssnY$u}`)qvb5bl#NV;zcc}* z?g=9*0)L{VN!`qapMfv{w*Y`4 z8#4Pgq5F0BKRJhW#J9iA;Rh!%U!xgMaMG%d?BuR^LuU7wO-5&|?be#SRb59+#a78Y zQD$Uh-pji$FY}&x6F=)k$=UB|()nC@OsupxtTZcCUR2d7N2+{CX+&gEo3p%h|Em1t9McamrVrY*dVZUFklY^v@nz~D5Og=YjJlrI1ZP|=*m8nF z%cW0Tb>v;{)wxwiGz|MNK5f-0g`&FdDCp^caZg9`7)21}>6i9PN9DR7ViG5IO?hLd z8xSjtRG5OsMRrENWwdk7R+_(gwRR5O`zAmn{HE;{+lb|*U-JuvjDK@P@Qisa*^5eZ zhXXwf8c2X_QSjVHs)gdgZ_Q}|J`t-Frp*g7}5jm z@{zZnFcbP_EW0g|;03lhmK^TEz!#<_nb(K+CunUzpp;BRB6%OMjM6quAe$JqsFV;b zD7-V8BG^UZ9YD@_;+aHmQP5;5)=}1Fa4#Nd9zg*GY9Az74fV~-5(aW8u#Q(hk(EAp zbLaITxj|8P8ss9JcrrN8gKO$!IlM0M2t5oit*os{sQTyEaczq3r+M{`O@ioBc)H@OOye* z+;R=~9Z6l-f-LsR2zEVOLoF-Zvxy7MS+I!osGNpb2OczIrI+)u1;O@j$r|@O*K}Ls zOZlozV36>}m!9RZ5d=S8$8aZ9R1U|3a#6R60XGI{%JJ*i?YVV|U;ZJlcNFT4^U}BI z@BUIN_;Xjh4Y}D!vX|^kQY0C~K_=$F7TXUTMZ{CC<$mDejXbeB{gEVVjHvpDsIOe@ zyjny*InT5#7*&{32klqvBvVO(W^M$=nb4Ym8 zS=8>4v+bj+aX(K$9jHG?p7BYcZOi#3?HzyK{JEr(ZU4S9UsBFq&Vkb&>S&6-YsvRB z6_Gak;3Blcyy8n<@h8Vd0w-_1?4y!}E!DBaUSa70pfmrfoIK|<5hJVRHPI;}k@ie= z;iED-d^EFPp~)NEmy=vJcjWG?jqm3v3{Gwee$Ift|8~xoC4)Wq_swDFU45y9tu&%i zWk)ZGmVe_L&fi}tRmjc@Xr7EbBdfGViHnPwbso<3&kD7c(k>MC727N%>}BkR-4e)2 zguPscwl%S%Vo^{m0fv_oTiKU0MtvKv?rBV-7GBA7Qf6kR^u1ieFd`yC5a&6I z<=m9wshK^!YfqXTo5p+Idy$GVU$Oea)&3~yn-zEs36KK z%(;p8aurz_D>1p0m`X|MqopkBo-~O*n=-4LS)N&0k||l*#H~%*Ik;F?DX`wXUeYy*N<12_pH0Gc5UC&|I;mR zet*C9-=w~u{`f!5wXCPAzTQOi@n2Y>woQ)}cURS|O+}CD+Lt#`>g4K{)medS-`W** z{B2oL6y!oPHEcLwcPtVsEmTk!_9MEweo!)B(?SMm!ou}M=u8jgiZe=ApRo);=zXqhzP`h00|AMgK~K2kQfr6ClruSGDx5hK|;X* z1`QBGKpaql0EVmpVZj9i$dV{{)FLEBkz5Ws@{Z%Di(wQR3MMwiNE9ClvXJpd6(OEk zl*2G88N6dT{KYtoY=t&Dt|&-Cs*0iqLI@f4^RHopg$RiV09dXp(8hGFN1&UDrBY)ICH~EmJaGRnxuhfF0h2OZN zxB|096CmgZ6M#<0f{;9z!zc`*Ac$cY1Qi)8;DO9BP~)}YyHdh~2aQscu7iqa54c290RWK$$+rnS{BHy@*;BO=cNqW^ zghisP3h^+p9&nVGeVPcrAD-7WCMXWKQs;OHSr?Tq(rs*V*_5Qd5&-Gk(<0di<}q(4 zH_Ut461ku)?YE}SWeDS1Hp^Vp8)pG+WNgl6onZ@C!ws`K8PB(m0}rlIsa?zV;O5|B zk#nJalb7JRx0V}}f>szumJ4&QuVj7o4ml{JUKOd!7IoAM#jt=E%DW8>BdB>p$qV0EvEooNis6XD z97^V1IP_76lH7t*3hq#{Fp$bG!iPnV#3f|#p%)#p`ylwxt&8k?1U__&IN(pd0QeB` zj7dcP9%9aA5+}chm}{DZ0(}qhTAMx*d=Jreo4$#?hv<$>A4TsWS_Dn}%X^5K-#|zb zR5TqU5Q-5YG5CZx$Zj+Wx4U>n@$SSSh>%wiDrC&%+1e$%R+4n@SCX;z9S4JPgRgAUIHoNW#DE43Gv__F;dEV(kx| zjn+QJiA@Po;uTGQLkUHR^YARV=kE$!5woQP>eYh*k0^AAT@FF3!Szy2avsBG8ma_+ z>R6H~hN?=6N*9s;bB0C!Fa}eM8E;8O*fydzsuSoPy~P6{Kj>FiGBj(@7A+(8TzcCC zsX$6AU^yIb$zHUAG`IS})ci?Jl(+}qn%H2gfvXN7csbT-yjk*^poX7Pt4AxtUvtgn z?}D1p)Qo1=(S1*EcfBu^x3>4eQWJ_#)yAHxR@`9U4u!R!%WW=NP|&wCX7hl_2kpmt z0OiKLt&O`g4);ScTAtM)uc20GFVY%V@Vxax!m4fAd?7xrtnJS3HQNP75}eVW%pb_u ze1vex*PdN%GA~7TB_FGs6GTM$r9kbXWHT*%E7{r$r<8Rv_Gy8M%7C(?D4UE>#YCy3 zW)OwWR+*6p_m`dB5YG>05nE=w+E701p^tuMl2L;?ls@0+SBrARqm|Vx3-}d<+MD1> zYAN>d*S7go0_At)ojs0?iU<|S9!?u*Wq8tdoZ+Wel90w2_er7aPYfe)DB|SeN#VPn zDP`@FY6zT2rSV98+W<-zLodPuYl5n3feX_pVuKrw&@Ee!ns!4GD zv=>91E-55YXeLY8I&YFeD(vf5R7wv5m#XVJq%p}?;VwF?549dtkq*vPhJco_>$Rqw z$0ts$Z&w6~6TAh=5seWHmjqzu1(EcotR*1ksI5`H|Gw9l$G1Wk>2?nGn0-ZE_y&ad zj^3xvQEZE}nJiXGyUcf}a$p~vSUf5$9tVrpR~#-X@Tgn;Yg?`W;}B{*etHCkKEHyk zh$PmS!@Ul2!YFtm-z1a`a;Sv&V4j7^pycsTBJ?1uLCirSk~-OX;=YdyUD3GvL1#5; z7=&y^&QvD0(93stDcz?)qv}-m)+~$x9@NDzl??YiPp>2YBP2j^>GzkAnG=$z`PDd5 zz?^L1Xi2^nx<+Jn)k!&t9sC+ulfE{TFGR}SK*{#`<&@^0pUly4-&L_d;!_v+}r?)1%!6d@w* zxwH(nKS37B>Eu0MXGk-tdx77vh}|5C%*eKIF??{&S%U&Jibif%PXXGUm)*KGd zUcJpC9~Xg>ZRW#!NTe}|yc$OKci zgoq*qGh*MI4r?QB*m#iPWav^$uHoj|*GH+QU$@XZ(!(A=Nx=_dL|4V7z7UE6*p2v; z3`qV^?8@`j-m3>zw&%ZdInA(XwZmGr+`*lu|BKq1dJMr$x)2tqLa*qi-e{l;sMQ_F+)v^aS`V4cnvo}8Hgn_ zx)-3&s-ty$ype^imru}(HEYMU$@l_7yG*FeK`aCGs;j-GwKMem|pNv#lvuHz)p`~TgOS1 z*Q_b$X2?~D>DyAi9h5q~9jp|BmvXU2OPf*5R2#L?lJ03H)((m`J3+46!BwRL;t2fo zb zw`Rqh>oQlZ%jPyyvZ&2%TElvw$n~S(5A^V+_L*si-r3COfv$}bO}^yKz@fz00Ywr7 zVLjes-^&VDH&4%;D__=8m4~)%zZ6aXM{VUCQ`x?>nrlk14ue=N1-R|E5`!Q_05E!k z$i-o^Im8w?TY%)i8VE{8C7*HL^-{B9SrJJ}et zmvY3PhRDtXw_t*c;R6_`3X$9Cu`7JSU9A~msHB7tSX!0ln_J1nniCq(<0u3J@;kRG z5U6rHFz+yXm7i#FzuEzvN)x?_;(mK;9$7i=(^{*h6KSt^Gjvo(!WfF~WCX^k%!@Pd zq@e*%m>qxI`q8~}6gD{stRB#XnZZ_!Vv}Q^Q2zEfHaWCJ1kGcsY;y6Kdjc$pj_}cXezyjR} zv#Kq3et~XUYNI$%kOG$(h8sw!p7qxsgO^BOi2& zHuGhwsuJAa^ALPCnQ$*wF{`cvL)#rJ2q4Hl$mtP0^w{NcEv&$tv)2V!kaR0Mb^B8n zC3^IErYGxy{RuH5*X9fqk*HUuHi4pA!0q^dFQfM4a$jb@gDb@Q)e^lU+pspZl5VXRtQUwf1z)XW%=c)rGp+e=Zxo~7`DpPjh(j7?y=+Abz9 z-RpNvL%K4~beYFI(}p_YQZL-go6CcIM7i+^Q|aaj0qixu?t$ijk0aGr)wqK@IIrZ* zSNoq5z;?7zdD9qB` zwJ|Kzm|-w-LFl2RWBY)#+dhX<15p*nZ$9iM!G8l#h%@q|Nu0KjsRF*EB=TH37O_2@ zA7_DhJ8juI9VF#91iIb~$rBF~{VYka{! zQ1JRjKSc85RF(?q?@QNtBOpLrP^HP<2Cyt&&4Ue4Xl@eSh_3(u~J zGmqtJs5VKrFA5))Cn7Zu@Af@|w=Wt~#lt$nvD^K&D#l(eSVIHo{&mVay26k$#NAJD zoDB6rRDc@J@p zX;}SGi|fz5NjC6?7~O=b^DLe-Kxv^Q4PCw-k9De4hUDFqqmt$6i#uHn=<24&{P*re zbdz(@{n)d*Xgt%kAmhn?s~}BLLZ3!1Rh0RUQv3b44J`bW;gCHw`i;h+Mxt>5_!%hy zn&3hCxK|+}3baHaQ}@eriaTixlvjKCB@e$vu0G6o z4t-u!V)ewv&2S(;&hXQEqojCeA58{)=b6r!;4VIC=v~C-JfxWuI7m%`a^_(H$lowx z#KGZ39QklYJjwRQkJ&eO8-4+2h$A*kV83BN+%?f7gYwE_Mat2hN#CvtDq-_}pt3LQ zG!ufLRAdN(HRL4kE)+o|4z>1G%^JV`?so*_gyjckp2hSj`KBX~H;(!4Dd?y&aEbPZ z)8_Acx-)&4rLAY){|`S(&sbSP=GeQh0emk3%I?%3AwZ1@4Kse?H-<#_cgu^v9?L?+ z01Rm9DJe+^Le->~E><7{%Hx`w(wq9nI$77DybjTk{x>4Mf(2GQqAIe@SFoUL zY9#pSr`FWil%JTU=_#@)%bFp?sT&TJ8PE+)PMHDKkP>>R4yO#K^f&c@13y);po;F) zSgK&bV3jK2f(v<5cvCl<`tx^F<^^?#bBMQZ>ZY4gqxu=u(Fhq#rP)oHe@Q5es#UFE zL0(O|;OSeyepg#n&#tOUGf`Hk$t$I=rlyppil&Hx9ojVYI{Gw)PMR{BN(P1KqbX(J zL@UyitJG62Pu){ho-$IfAZ>a$r8IT()RcKD%~R$po58EJOqBN2`NlM*H&30xXj#FO zDycGCW|v{2Dp+7=VsMyA$>5Yzu2Y_ks!ADP zV3ZXo_~x_}EJz&H>yZK0Etv2kTcD)Lz=&pbW>XJd5=ed0Dl_==4jCAxS-}EK3_A)L zr-&0mMw$^q?C$1?{g8XPcR^FNRcjyWC>EXSsH*D4$KBE8BhdR9_#DqMs&#|t;|8hu zGk9uBSM}QkdB>laCB~7Mk6h{FthD7sACTP1hQm&XuaohH7rGZ6GkPJ!`$ZHclCbWk zNdxUNeQB9Hr6#dRI7nt#Y-V??u0xcO5zGlr!~Q6Zz@ib{(#i)M@T ziqw2#(ICBxkRYM|Cs8Ddwrp`x7q!bSQ8?{Lpta%c!(heUpaDvFOhh#niNU# z+6uQANfaLy6C#EoW4MuRq18;&SbnxuHav z*5Hvd>>E#LF=fsx3X{W=h$jZT4#PtZrId1(B?nT3DBZ{rnM^AI^~!18QEL z<&A=Nyg@wAOywYccvonAgrJtN8`93VlHH2Bzs@H*kwzNCOs7kMNry*=RMtKd@`;@z zHaCZaJJ>*j<C6!`6nWuti}=H^Npm)fUwZ>2h4u z4ShpGOIx%PXh>Ivwr3>H#8|^RQ#wOoHlW$@UtwmQF-WsSnKf$%Iw8BH4>n?R+G@kt zkTE2JW5ZCpLm?>+TS|RPrNBa}4vUS@U>S7XjMb#FDvmAVj8Qj6)zPEGAc{^dik@{r zA5c^cWk-wFtyWd#lpPxEMIcLeFq~4tgO!7ySLB{YJA{h5t-t3Lyw~ZL#zei)slLXdV$MDg7NfF)elKCK z6EnM!US^*yJJ~-&y_CAA2&oJD*3(bt<}1-B+&ANz||A*I`Ir^|#Dx z=BX#uDLY(obqw2w{(&=l)?{ku9<3c@y_W8BBui(0V{W!D{p1w1;PmSld znfsUpERtm;J%$W1A->AZF|)r5QgqzrYamaZcxQ+W z<3R|;aW@9KIz#(K|%l%0E0uq;8;8wi^HK%C=dvQ!C*iP!!QhjKnTMy41`fEjCCRx3ju|oYY`{} z?FN%H87Kt0EChu>HyE_qJE?*~(0_Er@*_S7bSxqVg+NCFehlbFj|HcWW^A{<+nW7B z7iN_f%rE32k$+@1UOweZsv?IjF!S)5OvBD#qdEkqrrOP`C}xvdTs2!k_fhL&0~}WF zKt;REWR>icIOidHt{Xb)av;@U(Qi9Ac!@E8A<9|!5^-`XF%Jkl(7#34%LbvsLu7>2 zEfv7~K*z0DB!u^<#++h%z78Zo*oLzTtYK6kr_$-)5TYx(i3g1Ij7KCI>EUj;AAq_4 zhtCGTc!Vb%!QJLaJLSzP11v3eR?I1`=dWmoK>sF58Z@O2^bvncN`RMRO6>x_=S#|1>>xub~{%C>B)K*1WZlH@xT;q9_;gtqR zag9<&d=&wHv5HWWpIApT92RNIF`&7I_mhR)0y#HfQjJ1kICHx3w@H!n z7>s3uW8cULDyR{m_7RqaADz%l{OD2@n{lM7TFEu~`=lrPyuQI-uc$*aAvSrheCM(e zfkN}7yV{xB1THIFfkgwIT4Rg61wanHQ1k+{fEJ=J99D$tD@BUfl4_QrDQi~++$R0% zmQ_MXXy(OhmFPRM2D-G-?~Mh1n7u?;O5AK*O%9ei3VuGX<4ZACDxq#mjL`n73`fX` zYURAOqZ`;p&HJexm)8>3F22j7>AD8FF{QP#G7?#7qekf2Rh?F@v#^jz>!mrCJ%86l z#?8|I3jm0rZJt<-DjWdxRvdZ~$5_VvR>76%N@hPU0G)2gG;2wTF%@by^EoM_w51#_n?0A6Y5);NM%h+^L-cc?ot>iS3YC)zS#OnP%t*eP|53y**W~XgOe}@*UM;2B z07Yy1sr(dB;~(FqhMA{U>a*rqIUaJ=A0cKH$Aaw`(5(2NJys-G%9LPAvoWtLK6rr2 z!@wJZC-zkGp*ZRBA${vpzf;F@$M|Je>k+exng=N8XURRC=`@i9UO(6Pjhf>XB_o_a zzwl3({9T+c6PhG;vhoDwi9#GUFsi}l}!mX>N5yI zKnQe1$R3;Xz^#U7LuQz9@CT_5EuYfmsy0p40qg)nWgh^7q@JKvKTUoxSw=ySTyIw{ z8ETbqHGJCU2L3#~cv@ldQ@l5sN++~NYKgoJCZ5i9CBl|Gq2RC z<9n|urgNxjKhP#aT6`e2Nl8*;Npo}l`JsBx2$c%32B@$qfr`GGvzO8|S$)X9m1**@ z>!_oqrRUoZDMY4IecHIr@WwSt(yvi_>I2P$-<7BpIxlTX3x${2oyp~D8L{=SaSqC$ z%DXvzp9+Z{(QbI^1f#dj8F?b&Gzt(suk#>(@ARCh*P`tJd}tyfu|yc2y@zwF4&#$( z>$xU0YvRK69iwoH*(E84(EEmH<8vzBWt#K36 zNK}smxHVx*y#>2C&gYacG2dxUrj8=qCzf5yOoQ1jNW0%xoPOn=v`kYH+)%Fr5o+t& zyXc`8OW>_Z3FHzAypRS!-t#1pKELxw2L(@EIM(U~Qqvj8uD5cLB<^GZJ~VTF?~S7jfi#fVT-cUwXQM z{^eL|f6M6xy&hkeb&ilV=>7z#8i=uRvf8H2tAgiWVt4?YLxlO7Fs}+skHds8SR


O@|j=iDDThX#FqUztZ+ghM#u0HlwYukD8ny z>DUT;nnzkw15B9k)w=*3ovWcpoG&S=rgFu*Egpj)2bzSVvsCHOL?}7gnSNNxg~F$6r7I zwKY@$!wmE)NF>I=O$VyOy2>=F=6dh8%w^+yNU>~?LS-{}LR#$(Ja!RM6gU^;u=674 z7A$2!@F0TAO?X-=g?AuKN8#9o9f(ZkQB6!)E6oV!VN+G1W|FWL#6fi)+$&8xNHkze z{X)s^eOIm$b&fkonYHRZrlATb-?VM!-9|=SrfG)?KLW^=gpCunQv@Nc#>S3#vRsLY zan?QC1f6lxHsA?ql_a&L-~<-O;e0w%Wf9VUo-O*CNx9>)hQ zNS|C>ON7VO9Z29Da=>!+6CL!8m=^7 zKwNtZs!Ch0#DWTU6C`2%7g?eC4b^nAdzqE3Lj4T53h%<8987_^|78E6NT=+|5IerefPF|oi6pZUM(YJYlNyOar!qGhHf@9f z*YTOz+UX`7kFM6ZSS7YvxO1N_Ev=JN$)Ns@7Gi%o&Rv%*zB4p>wC;VZb~D^Zl4FkQ z8zbE2LS2sKKts`*!NZ|Akqfx*;$BMwEm| zdgXH+kQ?HqpyXS{*J2irJx8iWw+#(wjoYWVMjoySAtow;1tdSlrT^`9;$G9IRONa% zZr?@hV!8imr8HUW=#S53uMDy^ht2NfOk8C1X=}X8v5~$@gMCx zyhqJ8nM?%zmQppBYk)dUu3-W6yaV=~#vKtpdA5(0>@DMXSt^c%>nx4n|#m-I9qJ)8}u>fkG`)88@9> z{j1U-R^T+Yks-e0PB@uZ5-ruOPT5N8N9t)!)xDQ}s(hW%IfcDud2Lm0@Yw#XEQoGd~b2&y15@xZngs#9@ah5z9U zLI2W>U8M6A;;T}tIDUf3>o5sbb)Wf!`c3h*_vWE2As+Qv#WQ$jAv3=$Lgi26Qu$?c zpYZkAK(k^Gt+uTN%YgbM%=u&c>|>WDp42F>=orQu6>xRH8>=;Nqh*hbMEw!-U7Z1O z!GAe~E;o#zhRxGs+m1f4Fz~-@t{fc97{I#5io3X7tNxM~eJ!;LxU#fiFhMW^rvkSpY!%D78aV;mHg2-s@$kwcsvEG{vGCBod$LH8N1RB6v)rnoJ_E*7V5+df6 zHw|Q1_vn~LQS4JMl=mq~Tynpj=FI5Bf!obX^#*&4ZoQVMISX&;dEl?lwu~rHI}X0?u7DAyanreKyp0-1Y87gjg^))YSbYg+ZM)hyjS#+daSzc@FESi#j~G z02-C(l=6vk^wi*o29KEH{nA2^9<)60vZyfuV?U3mlCf9_tS20R>M`4U2wjChj*(Nb zVR}L22Fp~SL|!X+W0-izSxXBkW_>)_H}D%LZEQ|lO*CpVrds+?o+WPJ1Y@QTy6s|H z7;beKS+L0sTL0(6a^AySRH<{jAE$DVBgwgOUCaJck|Iqdp;M6EUJBR#;BQwok|lr2 z7v*Y6UqX@CC+5J26U&e}!c{(T^5!0TScSWJ*k1KH75(j6&;c1K`}n@t`-LVAM*w%6 zs3TozMqV>+FKB`9Ftr&fjk&%AEHg}D+lw=+Ty7Nkq*<&O%2q*x?|5kT2}gBicO{po z)te@GD89YBbX$#ffQ}C)Vzn;M?&f7r$!@&a0)tbN?#6@CSkWm(!fd4+*=H${7h#rD zhK#w~T&u&4f#GGx%8{z50OlN11Au}~_$ZNc4#?!XMJlF2sG3WlDrEMJoodAZA#Dbw z&Xhg|YSv+V47>>Fq}wS6L58LTJANxS-Lz(QgVe%k!LuQ=Vz9OZ^;^5`w4Zw1twM)DBYIEJ$UZ2C*#6lHrce? zVT}k_b5;qZ1*%EcK}b(#KUNY1^Ath&J{~*_kZwh;A{04P&Pvqc|_;7!*w?=tbJ`@?0^WL!H*{H(2tk) zsVE8{p#fqXJ6l=uFJ>jPNINL^H=Sh}dOjhcPcV3I_hyJ}dimM8>})ih1Wdql6A2vU zn_~4_ZAjD9(n5seyEh*2c*k9eFU)q7D4>g;l<9T=bcw}mrxLt&f(JIU9;ZZZ#9~_S zcys|?N}it>l$aiC=%+>}5C!lLLqq} zc4%?bs5=aC$4`3kJcIN47wzp&uC&tZitraJ&>lZb#cx@181exvMCrX~{Ov0bpXLPY zE_4_dWvutTiyL&ox_CxSeTy5#Tsq$xlDY} zZ`#Ro3m@0pS)3-btiINsyfoK+P)ZKA*NnKchO^P6ZOAH_55m;i%d6rlOe! zNL}%yInQ7Zd5Lo~55@E9k((YZK1e+_98Kz|9+2NMCD#G~>df!9w{#KKsX9Q8d$S44 zGx0^`L4LF_IfyH}&n5x~cI`s^=RVSj;IPFto7Yp^6J}FA&gUsaQUGK)Yj- z*mlM$659I|XxKa=`naUsR6jF6UK>#W1!c}dsmP%%DU=(Ot=us?@qQG4fN!9a!lOep zT16_3`|&uKr=u%Q-#`m{oqxe%x+84&(|*A_S=AsPxLK$k#Aa|`n|cru232H=0=aj9 z(k&0hj)ANL`~XpT2_M6GLZ28%-w)-2%wlgi^PnBEAy6;Q0`LinvOY*f-UCpzZ&c(U z6m_dNX5yz|bFpFRurYkxrd%9sy; zTEEKKM(GafP@h`zyAO6hXZvQXhisD^WfcivJdq7Psm``;svhMKMeS^3DAgz!g+e)1 zRVFZ047J_UiZ$OVxhigXKM(?cy<9*m!{P}Up?0t|a2JXYDAQ&QTx}^T9N78io~4eH zR!2=;F8~s*tb{X#W~2;#;FQx3So0CQ8Fz|r=mOz=?)@a3cdpI{!#xv2Z;kP}M1*W- z8?_p|K(sY4L_e=}+m`OFW`o|Ax`GwK`3)fSdqcq6_O^-l36KZ)RcC$A7YdC*xMiZL zCys?MOcaU~5y-_co6^gl_12r_;^y)<5L%)S<0jH^0fLV|OR!R7quuG0wycq6ruu@L z2WU6&&{PNRI$PWUjms9XD%GwSFCB=7nR-@2ghgg9&0t2-fpA&hCL3d^G(*iPS792>Bo*Mio z{3C7(@$51zbNlceA-;R2zfYY??@e<#;OMAMqQ-wL;jg%@cOElGeQ|9NNI#a|#q7^SOb%J@vA=lysya*;Uvi{=_o1S$|MX1r}k=c$k!M{Mqw?f zIFk_CkU`eeh=c3GyW;J34ZmtF0EdM#`09mC&^^K_MQoZs|vkKhRq-Y%G% z4C9cb1P^o5D7lH_E$&TK93D>$KQ+G9fqD8w*9*rH?;XxvapnAqJ2UY;sEdG)uwS9AGgq9+odaX5KohZ_>zm5nHJ{iAB{JA`}Kg zlS^$rS{ok+Wge^~J~e<{1-Y8zs8Uow57>(>gLwXal$&H#t{EARI~;s)t~tQR;-j}k zodz9u$DB_223)@bxgc(Y>m5f|3DZn-c-|;p!!4qmfs^293-eG{K*4`l^6IxU1rCcy zlLEds061zl5WWQ}n|mijIhHNvq~lQ_5Lxhxu}3hlCVtWFD95?3b)BDw0E^n4TTTG0 zafajnR$c=Le~AGhT!vt!+afG~BVM^TEr*WYe+IBn z)4O3<)fmZQLc6p6ZJm$JE<;p3%-_Lf?w`Gq!Nlw7=iw7-?1Yg#h~qWF*V6J2%)h+GgiykR8$X;DowK!*f&h|+M6c2=a{|Bv#-sd zN~!-P#GNTpYcjoc*qB5E`+37ZsM(-|fLAtfE9X>L!Npgn-Z&_ogT}Kj3!nOu zl4^WR)bBd#?Lo2c8D19Vz{f{kt`#K9l_yybIZt9vVt&gyDCo~@Jc5fv6PE~w$NzNo zq;j+DD!_QSWhG2G#^O1Mp5te){C?OOo}f;=8JpU9#kvXA=qIm)vR1WoD&nIC6>}Q| zBx$w?X|krau&gVD=8T51ZUw1$eg3}_CLD)SU1W9GiaiH|GYP^~V9p-z)nHG>radEH zc!{1XVtSy3rGR{diQb~W)c5)0N>z$}-?`!WSe+5)=TVoqV{9pL7vAGLB^;LyU+7ti zK=E=1FTBkDMaFL`qBoGI@DEF#DmX)#lc79*^zNiox{@6@0jm%%3oRU=URc5rqL`!M z@Ll-;7Ben0X2=-U>zhebgkioZte`~IEQFW-(RL1}}wSa!Jdr7a|fq619k= zqYb@vN(F&iHBn{!dFlk9w6AW!o#sKEfq^~9m8`T>{Ir1gY88FoEm>;$<3+bRcY#@= zEtXllA1?wIsyUNgmOAKu9pU5!+Pcck9pS-dYH}1}y?s+-5wvyr+?;Gh^&E7UMr0;Q z=Ws{#jZ+;>(*@uKRa1k=mBshRSHCR(hl_dp5fd1!VF59|byT3(!(H{ZQTYI_lE}G< z)W513jm-aCX4H_dVP_u(bx>%Ml#`FNJ_5l$!TWy4Z=CfBl605>59+zz^KuS10 z2euXB{0-lp@_Mlz*bsk6^nQn<8O`(R(Tl`J$ixnMwt?{ump&V|P|hx8+}|PNam}(q zl>>>rU4ZP-VBuol%GvHU5LTZ1q5kpkQVl9#$t7~q1B~7|sL}}G|9oF6)2^8j*>4Ae z5rKZrre}5oG<*R$+&N(tX`UPy*}!v&5=p59hwiSXe^kKUQ1pi<^n?oz0=mjb%`}C-B&n|&k^wLpiHiT$Y9|za9 znTX)RDpy%_1(*&Zz-}~t8$5`DHDKd)abp-2|z4PwRC z#UL|B4vK=-VlzN_#5od`)dIajgg5~T=aD!SkO2^EYJocMo)uEJB2s3eMAT3U*QEn{ z17-tK!P(qX z7h!c_$l<4s5y+uK8P?!;I6n2Jkk6t-Wop*U8 zH_%B}&ilm}gED9+OtA*Ie<6%2~S3v3`ZD4GUE!2kipPq_vj6f1)w2?UCQ5-47vSPK*b zmSH#m6FUkMiQCZT#ypQzQ#LNLX_QS_jfquk#4cvE5EHS}hKe?x*W6|mCgyroN!3&h zmE>s6`I^9~sd2f+=$iYu#%gLPT*DBakRHN~E=VF(6opVkQJ0f#o3WUA=^XY#LLpxf ziOxa_p@@Xc2DSK_Rh^_x?e+AY2>nduReiGZTy!HNU6zCzB{R)=QRW%_87ZkVR#Hls zQCA4fGYKj3tjxlaX2yy3IcPHp&qQoR{y2=PAT>xcNt+KvnTLY* zVCSAXm!qbnLCblsgB;G86C~bHZ@G$6F-C$y38W8vDGa z>QtmpFoIE5B@1@z+&Z=($Fphnv7SSE#QDsp7KCT2iG@JDUTmhyOf{}toHi0Be+355 zb@^UX?KxP1z2eMA#w?u)mYFaoF%tq@Q0~Huc1+ryNhyJPW{ znUHX^8ZstT8=XT!V&rI)s*l5vk|Fd6v51gBXE%|1n03r(yF6s{sH2e0-OMwxZzCid zg*_XopVWkHq#8%c$_ax`PLn9c&=^XNDy;XF)k%ag1pbw~m~Tdsa*><}#fufUOLH@^ zRH+29kD{0nqjn`m4AQF=G0^Ub5t$If9~4BYD*I0}JH4m|&j? zV@U31%n=#$h$)JgPkD?^Lu6xALN1Z4e*5Die{UU&Il0Yica5(Wak-G%Sd15AiJ4+z zQd*cMry1Q{(koS1*{QyeDiX`lRI#Y4s4A+e%rBys)r-zfM4T$S=WIFgq*`Z7TAdb= zRE@EOQX^8elks9B<6lL!qS9!_MHNM6k%h?k4v|gNV7%YAhDPkO7_y;KTO;%f6$!a+ z#O`tY2;vNVRr6HNH?w-!#o|RTBC-)hy<>9aaXqc{Ue+7>XU?Z5Gw=I(-^=Clwlz!K zEM~LWY&Mt6WV4jZX0hc?;&GJC8ND9%&>Ve2v3~2Cx`7TPM|KL6oH;Wr5^nfpEo+1u zV#dZLR5MjWqgymrV;j$8;nVkeI@3;PI@34oFiA%Yho40_lpL9jsVDr1xTUxp{zJ5M zqHon?N*N2&nz-YeZo26rLMoF*l}Th+^*^i>Rtvj@*e2F{UU}cdKu;T9?*Li|o&WBBsd2 z^tR}ZB02@Bgy?b+;S~|jNO{^wUQry&4pBKj{De|Nl?>sq#;`C%yF-{~zuuhYi^f?YZYPm$S})@{Y#TUQ6_`dj^tNkkqVTD3#2yY<_JkRdc6iGF6 zM#T^f(TPD7R6?ZJ=L{k0jHmc%hV$~wN2c7Vt0kRJN5oN^Y+_Vza>g+qR?K{4Vq6w; zlc8`Qbtr^DADW&@rBdJbecw-MN@TL1dP{GmZR^&gC$G+yUJf4>tSg7u<8I3jnHIUX8uM?B&Y zkGNUIBkmByR~0Yv{g_XxP=|;<^$DW6{URRbrtvC=&`9hG8Xab0mYU5LHaAs%IXrk2 zWzu&D{Ky?bj>_)X;&QdhMO3D;qsmmKGL@-JLS?E6w^+VS`JHlS%KendTE3Af(_416 ze4HYWSssfDmMJVpJ*uK4%AF{i4Ov)ZA+iyXiA-c7pUfaKIcSK8h#W7OShoo&D2OPC zEXY?5G9$>wAT#7HJ@-WIxqCO}=4~;*Ol2!exn-A)Y+>0Yv01h)woNrQ-q1FAY}42m z{#b4e3tH6XHPjl5Yb-Ht$K&WYM91UHIAkm`j(TlY$E{;khIL+*s=h7#)~)N7Boe&| znV5-~)WjxMLKCXgMJ~PgQWmrDVhdS5#CXUhByOY|sf%{hR+L;FvG`UM7%Iz0O0O^ zbEC2_b2Jv_!YWYC!h+ptL5?JfLIA;n7eCD(s;>0#-&l~ddb#o7qZt07XL*>5KK!T; z{jnU3SKs01I|xte;Hx_HB5a-zz>%DBLOAadF)@MnnDDch;1OkP+7QZnS&4*_S0sT~ zms_}qDfz-a=b#Cil6ajnernEGrevryt#?YDPN$ZkjL3*gWg0lOGN!@gG>Emew$4TE zdBP__EtyoI_B!wQ<Z72%QmsUDbI(tLx z6sss9EqfL0(9xsO-q`r;pDUS^h1r8giBe4HD)Eboc*J6PlAcH^)UX%AD@jQDGrC;p zq?d-uMOeskR+ZYtBywhaNQH9Y)rqB$kZhe;Si-U>bQO~MMfGB$QAhcxJsK4iwJ3uL z-OI$nYD_Mwr0TuKd1Mi#Mr=1f303EaB#f~zggVZJA;6LFOzK!8EQ>d->-vcW#W2VV zAvGqbki9;5)*I>g9S>mydX7e_Y$c;pUT`?LwH)W+s2;gcw+DMi~O0eG8JZK&h zQsJE;A)&q3m5m|zF0vx_m^w+P2^o^18L3*;V#Ez9X~<%VdA1k{&RAq5 zlx~P4Yz0}#Rxm6WBuS(|65AkED=3ITJabh_%<5>YGB!aH)%(=E5>=2%E*1UupqE?4 z;>FA>GSqR{HKH_dboklg|j>5xol(3WDJ{@Yr9~P)l z7c;+xQMiy|k1{2+J`jm{E{#Uv>C*eP6y2Ez!lYGE;$}=Xb?6=xyJe&!J`@S-}RCm7UpEsLzOb)oWGkXBi==Ier6Ux{qi(eD( zh3issiQiRflr<)b_$YJSs~AOT0;97hut#vqqx}L6OdVQSYUCyp@}*g|j&_j{e%HfV z#%LQeFsC=;jS}+e9G`a1oKWMN7v8}WV>cbp~og8>lgihOsnVbh_bHn@k-(pK=)1{yyESrMXz4_74TR_ECD7E$?Oi z4K7t=t={Ctkz#G?;Xbu+a=m+ArxeFjNI*-z7fD8K2MzZaHw_4Eg$6T7BhQ6Z^3T1< z=e}9~V}zZ1|4KWiI~A_T74Zl=GBjTpB}6_UZe$_^wJ&8(5|y=3bYye2y-yeBk%?0G z4%C7-!t^E#3lwg!3f*pE6)uI4=xH)E?*qQsDGYlncp)-s-T7j{8#9m1k{jXpwqXsK0xnPlE2@SH7Lw?DvQxZrr?Cakx%q}Mcm!R* zc}}IGvlcuvzCw!Az3_2&r!=tbbQ0o{odoO!ewirYslRWeG=<_yM>jO&v}z4TEsZlh zc=9>YsbM%e52aLAY35g{=?EicM_eNZ%$!uL8g4z8K4$Sv;$tbfN{aV_gD8gJq1u}S zA}ON`nl3d@k5&U!zKN?LP1kJdX3UGO{PLTc}r3dfP%15_RW|0H?LjXn4% zIF2GpUs*zs#bwJbeAA2{fMbG8lYYwj&FwH@Do@hnGJ;63onVgLP|6H6i`Ga=k`KIz z76=;iqK@pNI1H}xK5E^B$*1h>xj{T=*q~_yJk#C8HznuvPgp%Nj1<5ERDfi#x4aSi zN8|_gl31pUQ!^04d@mkBK?NTaIY|>k>E_pPDp$~Gpg=0=K&u*8@_CR2ERC4sa|(-5 zB`6`Z{Ju!q`5dvrSM8u57bi+y8KKDH6!u;YxibHe5ln}qAVOF$C?R|sMTDCct%~d8 zOjpU&CR(gmQFs1KX|gzIxR*7OAh^gj`HQD`L(3PbDFVn zhbJQBJ3dG_@yae7#OlKpw}+ok?_c(MArd8y;A4LnC7kH3(%iNyA1eG)7W`rU8-tLLtTTWY1&~LGCcf5rh;f{o7UGu> zT(8#8%`5Xia*2*`t5lM-xO0W^em?=G%#1f!C{GW31P1pLDQ(2?QUQJ@S$r*%M;*qZ zT{s4}O)xu9 z05V0w%k?|AC2{JgzebugY)6IGvz>f^2Gs6%+zbuQjc+-VVTa-_ZPSBe!$w>z?b~5? z4Cpkz@ehY8>tyZBoDq^+Sl3T^K2D*Z2>4k^(dT*q;ciQ zBV3*75U84*1seQawzAlzk}sf>S; zKx3UTR0TJa1aA1HPA(cMRhd8nuxiXk9o-TK{{sGb&_|(nhHY3@Ny>_4)NMkon z4hwyiM(g7G*-?U+1_CsVh+?NON62}}kQtB-q_&(vIb?Yto|;&|K}*r2-5)TENhU9L z*m>5;VS6wEuD%zcZ4wEy4-GU0nFQ`}FOm(0SQP9Tx;KkHxORs>k{bLukNTlKXTMyV zzD#CvR};aHqHBIeaZ3nGek0@cCHN5X(9ur*h-9N8sE#G03jd?D$08C_BnxRn2iY3m zx1se8dDMpHFge$`^BiyWjjTL0Xfzq{K+~0UPLW<3PO^J$vpqaX5=L7;c)5L@!tP@Z z`3()(&h39vnVTOrcKqYk8-2o9*z#p_(Ts0{c7&up$*duVWg@pR7w*N0GC>PN>Ru}Kh8KPkQ&sWakh zi{9*q$lc5diO5-4BWJt(yuIJ>8~He*cWc13EUMXq)*+4G&ZYMsDI&!DOfg0mgJ8Ud z1HF`UqpCwOI-R1$i_e+9wYBAOy*S-zG*PCnaeZuTv=X8gWLowR&`D@nF37qv9yHe= z3T5EGw&CU!jj=P{1j0=f8FPtgfr<8J+Tght{%dOw)aoJvr_=J{wD9y{^sl@yKxsYt zORc3FZHCckKnjwE$U1hMs{*-t(bHfB)u!CqzH_|VurFSi1gVJOzqWmFjt}vM3CGM_ zW-=r1AUo8+;$qOn_wty$+qnxmNl=CDPUZQEPa0@lVwZRcvu7%|mz-h31W!r_(syD?-xj+wPdfr5t1G314B-`b|IJ z{gGB2fDsGjIis+9_c|&Q7uKu+Xsd7eRI8Ze!Nxc%XPi3+#uK2$$PvVLfFjrnXN_d? zXsy$p)@n|gTA%XXtT(GyUj-4-^a3=XR$sw0Ib;jC0~$*`0N;5lDSDslVV zT2skIt!vJ}l_qUv44*x3DOaBXnD&S)uMt@ry*JXG3K3e*NuN0pl(xA+(**o5<7{5x ztd-C62FglT&x(V|FI{+cW9(IYAK$^xjHZ``{<3%0Y|wJ`^^r&V{Izc1jR} zVPRk2=&E#x%>SW#j?51=G>9i18Kn~%+K`1Y9GF{FOELI&_CLH3y=Xq8;Cd~xE&Li(?h*F-8Vf*tGDNn18h~C|{S-I( z6=L$X{XW67o?a(`#MfDgY7&In>Ks@J_jc_>E^BHDw5w4G`3t0c67LoTQ$=kgi@zGc zt@J&gCmB(sf4R$V4!jwcDUjmfJ2o&fQ-A2seET-oRaxozA+xLE^|e?Y*gOINAq%`_ zXD0$}p9v0U!)NEcA@m-h8ANSxgs4W?;-!FM!Wk^|37CDG%f>zMre9DXz=lCvYk-TG z-cvCt#E7v$7k28J3W;~5u?Aj?g|1Ldx>)|C8mDRDduA)th#Fj>pIFs8e)vbJ?R%~? zZ+kLi$*3i&T1B;vc+~4CI$~&_?Z~E6%s6PHNS~2}kONc^Pa2+u$At-XGe}gO52Y5Q&(zynz1X^VfpGIb@2cNk6{b{ zb8g>NBw6Iyi`k=~Sx62=LNpf<9SmD?2A8;7WdTv>1yrOvtj0Nz{Jlc0{*o2O*3MKN z2CvR=Vb~PnEetq_FK1+k0(tBjh=D^ms)*5dU|!F0v1YI>1O?0Mm&$gF20eTQh&sWb z-+_RwD4FEP0Gyq3v&Z3 zvCu_?C?Z@c5d_)~kMguQ))BmaL23)DFG4?EKunZ902k`MG+MA%-(X`S`9dY(ymJd6 zlsna*gSW3lVM9mlP})lYM3H!);Kau;UrgE$9A7fr)oB~@qx>%DZp7L{ZZ((q6+#q= z0@Z&203UIIJDxQkocGzMXXgr2xFs9tLR=9G_h&DgF1>qx$PO4P>`+m zyT6RtxvBNC%?-&`5-z#FneMV0-@#3ENS40EIj&d zu52-p1sCYh?K!AAjCRcGBrqMg@(@E_d4{pL;~q=JVjvh*pRwo@z2GY+KVe=Yd zL$q(=#->g{#T~p2Vr=x73&4K;OAk0@_W&!9CTUAmOC@u!6}%v(2>v~M%?SB5IZC1E z<<4`l+t1peC>z1RQe*@Yf^+whTOa zp~-S;Zg=#9l-lK3*$+165kiYOFk-i^aD<1#c$z;P-h%qfoQcsT@DIh(I*+HIGRw+j zu!P?Yc|tETU(F2_Hc@BW4Swy`@{idjI~!Vz!QD_f3aR zwZ-}z3LOZhUVlLC(|o+Y^MZ;o8j6pfW|qyRx}d!1*g2LfBxvOQsUqqlow|PzXvq~6 z;+&oIaa&V2*opbcZiac~q6b*eDeCBqvx$0xTjXBfIWlIqn*(=*M7x#vq4<{$!cr6-5QQ@(S}+D|SxM<`lS*g|l$zbe_x2v&Ay z4=R8Q6(~~rfMQsBkr2w)2SVu~$Pcn2hIZ^TU%jFNwy|fA^E9#N#oOARQlF}8W&?kS zVXkyVOKEdiuYgRExr2!Q z?I(v%d|@@`87G-rkzA6Q&)ZE)YW^*IJ#F|-i$DIp@RIm9X>|4PhpLD=i1k>&SJ@r2 zkLwHQskwY@!n810h11 zqcsx^RByR6POTo$@47B!>V9BR?P0~6-oH}G1psk#EQ8;}Koc4N=0P*%Ql?BefDN0- z!L%(caSCa^SEgplNo~EFEJfaxFz2HS0XD@)l7a+#`V>Wg+jHl~algOxO|-$v7(JT0 zbkW_ff>_IMN?5A`mhlmk-%m`lsHFX?nZp#T%xgPMh&K{*8aR$So;?uNpGu}vetFW2 zMdCTh z6rJsG_wxUwDo0f!*YnnU(n>ZeT4Q9)EQ(fSE(L7OU~B;@+jhT+Hqn(}=l(f_TFl>A z3$~ilkHXEtg)t$Ms|I4uYrVGw1=4n>#rh9(x*jdnfW0#w%=ru}3ee7YO?JgkjqDoB zg_xhwdf=I{V%tTlOxKKm`z3S1hU@lmNhufX7lpC6PAA?zTc`J;bCzB5Q2aytNHc$* zuPr7#|Mq$Y@$vc8DIbhM)fN>psm2N=52E2*DrAupf?rtE#i^k9cu>4Cc9VkWq8im& z?feXGvNUe-4n+29Nl_Z>YSHF@`_B&je^bYg3GAmHwD z&R|`{?RDwIPB|qOtrEqaa!zd=iCTu~Y2x$^lD_dJ7r8VSxilBnb&ffmV@~Io)5!fB z>$+fCc`ezdUv0x{J0~uy=E6w1G`v$7)+)5QHI3GzFO{s$(df+sUTQ)bRa#%vx}NI< z(mQy~W>x3d3>JqmRKX6M$nb_6|t>#X&W20bq5RW)@3moK8-+MxPhjUu#nO> z;*dMo5PhTeaMTKbovPA~Fo2@utZ5oZZPIx#=ef*zE+yx!*rMRnuIshh0hG2K&?Q3h z34u-&kv<{N`7a56LZEvG=qCia=#W1l(1~vUgg_^E04M}HbN~Vh!MiSdoR|R`{gfal z@OBch0Ti6Th1JX&X`}*cJ;~HMT$~C|D|@GuNsbGP4rCMAL`n$W0mT8b0Ug?1RftBy z6LRLl3;7C7NLI*7Ej?-#nR7|xoW{GnS+%NLI?|A&)o?gM+g)zvW~PenGh?~%=2^b_ z_SyBRmQj2yNjasdX{CQe61L`QYBM*~nPHvT@x)hhHFvv8gfk^WMUxE0_vLubh{!4P zqocbboz2c>R8?0etE=z$9X1!boB4NVcl4xp+;K9!Uhg>bew?D*9YZ7Q-5k#{Gk2WP z9iFq?A$N6`v%5RYWNZpD$;`@(w3i)Jb{rY!@R-R=c9;p-amK3KVTZ?F$IaOeG7}k^ zgXJpCypbA>#;Rk7iH#j)iB;_CV3pWmSI5=sprfj)s;a80s;V+7Dyp)m4$^xI`8ZrH ztH|9W7Ntcwi>9XL=7e)oQ)Opr9a2{^JL??@r*5jKN<_zh)Lm-nFtK!a*m~$Qbd(Mf z5)$H1>h4r_C->XgkGg-IU1MZFVUh9f=<}cc!+zLzzDM&@!Nca9AM%o${5UBWn@7xc zv>khh*-Ad_BB7H$#J005xqa03K{l}uiyc0Qd{BM#*5iZnVdjfiYH@qgl8seawJsUX zbcSM=>$7EVmOOWrBf0xd&b*_v(O%W5o6J;3bE4R4&f#%5HKe1wDeK%EX2u!bhkd3O zVuuc&3#shpEm_h{Ib*HI#5gvsx>N7@Nato=BC?Uow9`tBr-#hUXJ$A*?nIsO;wGV(Or?r{V3(jN-HHwi72g>BDG6TTA%!g z?(S-2BjXv#&fdGy$;jyF=sbJs@8@5=cHjAD_q&t%)q6DRt7DzJW5>q3xj8!uPw1iP z;mOd9kR8$x)lp7PPVVe%dPYu*$~bl?$jIm?Gy7Ocam?;)jOWjXhH7l=Vm`ad$c$&? zpOdFm&WrgU>4UE3`If6@638zpjc!Dk@gr zmP@-S(y%-=WtCJ_uRf9{L~{{~s)&eahIm5sV~KnmCvS)}q<_eGBc!{NwI7{Do_5~n zVZUGU=>2>4ob$XQdVHqE7oVxFJZ|-wdp@yUQ(Kq)^hz>WB99_1k%mQ~%jvJ7 zn!;vW3e#kE(Fs|oE>xi*p^w)i_<9R>FSE1j@M(Czgzufk7fA;z8GqYDl z4~^fs*)Dc58H@R7%jc^p(=DItuzg&&m2}rl9~QcQ|MuOxw{PFRd-LYqyLWHibI;A2 zH}ARSmRoMiE%(`H%YF9QW}9uc%{II2vdcER?6S!wo9wd5Cj0j7lTG&R+qP}nwtf4y z?b@|#+qP}HHf`FqYuBzlHf`FpY15`X_Sj>OJ@(jQi!HX;V~c(D)mMvswbfQ%ef8B= zOBJ=U)rYCBi;&b?*W`9wDXZ*hL*leE?QL`CjgRC!Hngft)l^%h@~o#fot0(jtnbyux>Zb3Nu!XyV*Pc> zvLt6X9Z$8E+&mHXYJx~bRwJ(I6LZeZ-zjtLeA8n3jM1bD+f3YD_N_9R;xxln%On69 zq0u_m_}Fr_d)dip#MUY5WKFBaGOe0PnY%Qm z=*aljy3cI1u@o%3qFb4YN>PF$>sf24_P6dk;}y4Cyn^bUdR5ofk%Ej`c16*{De)9; zUGdi4OzUP^XWrFmyFhV);>ZpUt(unN!QsIn1BV9(g~Nn{gTsV_!-RvvK{QY(6b};< z4gn|}GJqhlcyMsoV0xH%n0Rn#1P4I`3RoaODUcz76ijG900abRKxi<4Q2~Yy79*VS zzyK2@aKym^i!MMo3l%Jeagt(#l$SIl3ZkeYA{um!B1S}m8uJC?N)q*)7!nP+2pU3E zV=;|V2vH+vh>>VCx>yX`pd%U;K@t*;VptoCYD`1~8cI0efelcw&{)6$`(jFH0vEA> z2ohW{pacsp3nrXl!U!5R=m5e41uR$;B7(sX5~-4}0dWnU0Wpn#0YR0BXplt1j1UQ- zMUjLf*tkWU#2llAxP|`+X%vIuqW?3>fRHGYHDJP_KtTu>ND$z{1rRWhNHB&I3^-uJ z!UPN+*Z~DdKnoZhbT9w`6ig6;#DxPamw>2+gn1DYVKx%9MlTF677Uxy8Ve(;VOWD= z4B8?Xg*b>-#9|m{j2dI;ZowD{#*Yy(EkTGOj1YH=1Y^-yIKo6|Y!G45zyb_BU?G4F z8c4w4fI&nC4Ie;MfB=LF-LT-G!Uz>6Du@OT6A~X(XmBvXgF^-kK#*8u&|rhf0fUDJ zhX(*y(}P3rrK=+aX}VT~o}(i7!aA{PmgTsDb&yrVDpt)m^_!_TC7%-7P_AySV?K3I zh(6f_k>RM*`0BD6009{U007gn2mnbqB8vutAwiU~1X-0m6F`oRXK|FoIEG;`h!{f% zAcO!y0000GgTRc+~P*!uNUTBRux;V%U z16ln$Z!5!#00m(=((>F}i;r`3qu0Kc5aKub_;o|fT~Hcb>nU8z8QQtBTDwo6In@MZ z7fgj2O;a{BcM#mN@|%`(`PFq%1YJ9@7ub5@4)87Va4Ymnms{Tm>IKb$j-~^Ypbb{h zVWWRAF=ge*I)vQsqs(0x@~&1I9c+Oc`)@+T^Q_}6*<&TpriFKs*2{ouqRD$}%y6p2 z#cRMtJ=+P@tX%HXbwqjnO7p_HYY#b?CoAJSZS=5e9(HlIwGdrhju?m&7SD2whN=)bQX;mdtdX zTRxb@U;bn=N!d%Djzh2Y9b^EY!sH_C%(JD(htz{^IW}P-mS|s+7W*)uq?nv=iSJZF zTEXq!mu7p8Gd~2e(PNM&XzJ!`Y8gJfEI2b#^go$1oGXB82n)#6*fX6Fn3V*C5w4l8 zDH$xDht2ndmf_r4eV&={?gKG#N7DYwoE*%LoJwVIDh2}h8?af3l= zf(I`@9MAGN+E|+J6@eHU%At~So$NF!IRa?E%5zr|G~ zJ+1ktINvI}z;I-oSzF~4G66FI?*cgrNTC=Fv_suH7X!=fUBwT%fcm+*oEtWz;)dJi z(tR#k5`qERV4?5AAks};sd09q;b~z;iNj4|K5`-y6>clX_4M?XEv=&HD5;Y=0d?BGRse^XOSQ0%U5GwcTvI8G&KF8uSG#Xl068}l# zLe+4ZwcmWq0VcdJ+4|2Tb? z82Bu;&d4djC2)%wD&vO}>BP(NEk= zx?s-k--bLaVwGrIp2r``V2MX5&l}sjViu&ZXeSj+GujZs8Ib3b!_f||R}Gz({-1#Y z28=nl@}w(!**5mEQ=v-rVWx4rpNMlxed8m$gJN@{{;xn$98HsrnyKjcEP~`P1!40Q zFhtTo@@$4zbT8`t0B)!}9Y?NNURnJsaB5{P4rixJRBrh^q1>a6scWL9DF$O=D)yCX zMHKV=eyt<4kDE;eeqGyU`Yj}|T&wR8Lu7!TKR0x>?TL1FbKv2AO@KRr+(;Q7hKZ5o zQIN^RK|Tux^&_l?Nz63b8ac>T=!$k;o?kj&MNN0rQeBeZZL2mhUJK8jgq;vnaepfC z>TuY1*?vx*4DnJ!j&~UGw(YQlk9$EC#o^zH(KCYOc2+hrbrVGcRo%WW0>TxZ;-BjT zyOuXWB%;wLS6Qe=W3i%;+o)mOfwDSaV99t9(yVYn=3tYs5^1aQFq?D zk-AgPc2alh2vpq(Jw26F-Pz@uhHx6H(D5wY{hZifao}o9SNh6~a*))3G3m9-}rH{Ro^v zQQKcEN37r|AZptb5+wRW5-Emv7J)J%ZLcNdR0tt$Om{=72S}TwuZoK2qwUepor(=S z+PM3UR6OWtlU*xOF>tg!x=07#fQ>fR^(r{MY7nc%(&hIc8UJ=zi-tQ=M6{akutgWL z+V3cbly^tEpI+9-aBbfPu+g3N4G?h)J^KH{&NTp%BBIp{ zh%GwAMB8Jsz-c79X4eF9&Ovf^nza2dh?r{_);c@sntwAYPj%%osUcAu0Ih*@bigy5WtQ|x1KiB9I zQmj^Qafb&5@FCoJ?vMbHlvzNx#T2ah`_)^3{FFH-5#hI#H?fq!`s&VgvT<9GI8|@iMu?I*RGU^5-aS#_ zf_X~0qBkTc(1zYlCgI^6r6Fzkk4W9V4+mBo@`JvRU@ev6$dzW2>D;U*sF_p=x@#Fp zFb?iJa2&$i=_P10Zf6o=D6SPj4St~{*opW_XMY%XC>Eds_h;x8I~{jcYP5>wF`OeR zuQ>%AK@;T5)_nIbUPNIPe4btnbV~BTzXsCnSFpJ>Td{Fu7+=*b=g1DEGm* zVlkMUlsM!tIf2R<^Gb4@oS&Wt%?S*lZ1BsTzNU9i8(10!3jtn-Y*JS*>PaN&aVF52 zSUm$%45Yvx0{^_W>h(XgUXWB)!@?h{I`q@-=cE*g`+F${Eq%4ErF*AfG0D1#D=n(4 zfy~rCh$-~xrQ4ruLZ^~KdW1jRPq!=8pGJv@wo*-1b|e$nP(>u7u1=LbO?(s@Z52NLPz+UD;;LI7!@ob2hW^)+jr@Xs>YgPLafI6w0=>9* z-cB<0K4IHQi7U_QI=HoyD#gP`VO=Dw;>;vNOv8<5zJAvs;dLeS)xRl}0fI|MaD~mu zi;dcG4nWg23Q-YTFjjY%6MOXzOs>Xu)U+$peqG~*iK|I);atj3+E-{985%P8(o)}O zTOwkN((AF4M#O^pfv;C^ECo8O&PI34y?nZdA-G;r1}#AL>Q`8BQRRM8I+cTG2ON;d z-6l>A(p+>wTxqM395n~obX|V`w@gvNp@($fcFI*EJ$^LKGY&iD3QMsLd8C6Oh!5Y~ z6haBd)NEUnrw{j5??*!5GS;#nZnK6Pb`NhMI80Q6AOiE*&be5&_1)N!cM|qe=arMN zx<%x)@)qHIT3j>TsyrHsokyxCf$NL5$*ssDs7?+YnRX=i4tjp;_37u*pTxg+v!T7n zE^fbfRNwuWb>DEY%t8tpG(@A*r*m3vQ>lN>6mVf&$l#{XL}4OQ0E=(ie%60RKBDr?61vFKa5hh_NzR%LYt3w1I0 z>Mt%gb7R=)`F-H9S*r=d6aRNgVdaSlvKXE+acG#%Bszu!$;+wVZpcf zSGo0SIk|(%%98SnE@37XDxbu3D3VoK=QVjhWhGBOXg{r-=3#rvedtCgHs125E3p6- zV(9gY;PoyX7&OWBn^(m7rxI6?oZ95$gNBgMC$oUYT_{(}kVQ>x%0SjIQeA3E0= zch4!A1wwr@?n38jf-Yk`ttgstXUXU@$0K{(>Vj?^Iol7#@#umUZ>La{**r0B+aZa6 zA6KCNLyWa2_qd|qB{Jwzm$#e(2;(lxNF2c(bFSvd1n7c zbbt!BQmA`_Slovl;6*Mx?EHd>Zx+aiT8IykyjWL2TO?iYZq)SA5NM9b)ki2!#KE5n z{gG!dv~lxu5ennyB5ppk&ANcf3#_CQ+g(2F*R=Vfq&uH2VGrp^oKM0S!&yV_rX+z! zg99u8tJ;`39>chVjF2UDJ0A%)@cU$Q?%&*oWYg9kgJai9f&r~51U1rSDXq7l#S#6N zX$}Fzff`2mZ}(R~gf?T$0ZELO2<17gHNT0tQRRaeYmEf55RvEjlAYe7DQp)XAtp#K zE6vUy@`;FVVuMbvF7-7X047HN{3b(R$klYf(W1g@6N$y9yO`W>Bp~Fu`#xgG-ni0` zgT53a=jCs%H(0YQ8j?4%r6(z5z!4OiI*V$9?= zYPcCalo}W-4yYq_Q^o)@e;93|LMmMwqrR7j^dLPKZ8F-?XQ6C=nG<`;xHF!Izcp>O zgrLck!;=n^9WiHKhE;RYI6Jk2{Yy<&L>q5ui?O6WPa#Sefun zJc}0}0S7@TBPq(1of?l|_P1a`F#&#*72wYn0OfCW)F!LTLP%2c3Gs&S#E)FJk1vRh zaie5t?UyeCf_DNI#icA-*-!>v$g9!`byU-NGVSHtL+c@)0g>uMB7&Y5Dd$% z$Rkl<0@XI7IQ%WEl0JidkZUtp=+>}KtCaFlg0&vbnZ;YY|G? z%rXhGnc^Y2ASO}=c}YIJ!}=?2p#`uk{9u@43#wWAClVZ(R9hlra0`_uDF6ZAxQOzw zFf9epu#5qyq7rpUb09H@DUQuH5~|f@>BTqXGw>oFl{?h5T@;NFbSd3?58PfKOh`BDm8wW?8Cby~o&kRolj zvaEM$x|^eNhx#i^?^A3W|G1b%GvM(_zli&JZ+=3=*@Dc-FdP_tO^B@$JhPc&m!AcA zVYzqXj<p@WIk+0gw_kwlOJwP z{FlF#Z4J$&pIm@73KYX<@))f+8kxfAwH+XJP8;vdT~`|70Ye?p2obI_RZFBX%-uw` zH5Jo&F4$0B^c`;}>~4;H!jnSDxO(?)vCa01`jj|(X${6);94LVLM`|j)BNEb?B3>O znEG33S&D(A@6X!o<|`DDZ;Jya_8lC-gC3M^Rb4;RHsY*nwwt?Jmqs+(t5VXs;;h3# zx~?YMTNAQ;#iL>#Y*WN6E=QXe4xXM;i|)^B<7%V4x>l(dVmQXH1gN%JQA`I)Ju&+^ zt+139!}4?X>8yt@h(8g;K6)rt^9eR^gDFxs*;~H0^-B&f{BS09`OwHe(4Kvn$&T9y z6NvMpP6{D*a=2 z%QlD6N#^v`gJ&XH5?#im8| zdW&JgvLwO(BiW?X+aGo)Obi*K^}h?JITQ$}WTnYc*Z_GGkeo9MD}(T9Nk*IG^F(JT z^JanU-SqcX8;#CIU5*s#LA<_Qs#eM>Jqw)gTBTZ+ReFMrj=%jEviBV?s}crgqp|5T zySELo*x|!E^CK>qX(b&JqcM`42R#AdshKefualz>R^m>HXVr=K7H+>?h&T0LQvEba z)Yzlg*1BfK<(&UZe?(sgZ11vHpI@Wa=`;v}(PCi02+i7MYEbNdt``kqA(bKN+u$3n zDDq_@EfW$Sg1%(|tUjvVp|@fe-DonoJTkT&6DQH;^nq^^`ZE5t)YXv4gC}9!|MKA^!nI=nN2KtLx%ioh zV_)b#22>-o9v1(eX@z+_X!tv=(3yR@U0JSxVLgX4D|{h!CTTIdM%Z_LrPPa+G_^Q> ze{Y21ICh}dfegTbK1h8$(|~TD;LgKxVh`qyH04`5Co8t*Duf%!%nW1i%TG2;8<=@w zOYV5dbmF|O5{f2kZP0=#UZKR4J*Pebug4GyWj4+m2;*~!V@#{_1E~XD@NMhJLqS%| zP+AN03YEmAzCNUroD8QI+{B4YlVD~`+-NlpgwrX$M)38k5* zFu@%py4m-ARHP$QH`NqSYCa#1I5KQx%p~bQhRHkomkl=Br8-a+tL534(F^(nl_N{2 zx4uf*8LWpjP#_+)4*EPY7W4sIlHTgePbid}re9BAthRz7j1adJR9cSIdzf^*PUt$G z|Ldck05rze^;}L=mF;9|p&X#dU_-_#FN;7b*Ef{W`rARDxukJW)Cn7v$4Da5421L! zS+^?gtfEo1(cbhi#}e{;IYJz~c~|khhs6)dl}^qet37i!I^_4kh9oFGDuGZl$N6KM z@iSxlm$+cnWeQFyK!E}S)yDd@+Tz{W?j~#c@Oo`aDORX%bS`hoFu!k4EmrjJb&Dww0!~WZpDgvZa*U607GpmI zMv!w@qkn8QTwK>srCZ9#b`wtkFnQ6_>DI|OcmH~`I-}j-!zD6|8h%VUo~%5jC*_k@ zXi$VlP!War*=a$tKQ~YtypP;^m2Y|^M#glYSCuvXAcpqtEsxQ^8gXN(L`65Z$-=Z5 z!O%FUeSqA!6Btt53(t%{RrA}#IDw!{*X-Hwou>9}WJwdnzW#CBy`{j{^gnTN8`8TV zoH2caMR?F})2M=a;L{1cARg_YZ`}roLezB7-{v0LN*~%yC%KkCg<1c6H})!J<8r}W zf`65-j2o?0Sb;x8&m-+c2TAu-Gk|98F?*8jyV%eeOGi*OYZ2<|Egh71uM!)7v6bS*p*|LRGWB$SjC`Z-!%)t7&w~0er%=H7p6aGS-nre;H5{B zRV))k%(!9D5vfhs#k_6{a}vq~6sI?+tn>rp~Sz*6u)p z*OX}r+M9}@!qtDeax=Fk=B9{3JWW$Juqjc#|Zg@y(&@Mfr7;Lz_^)dcTwf#4xCOBF6}?; zj1fNw@H6A#CBl6}pxwmr!#Kiu<_a361@A4gL90C9TVcgEm$%$eZULT}hO)khZr{qXfB9HCQ)^JSRJ?#OqpH9ss+O2K)#u51o!B?~1tW-BZ z)@~*%K&`Pzz>FajB&YgLdE8~pz#FOp*UZ`zO9EgAs$T)S6DkF3&;5$2lpu#}N= znqry^C#*}<+k`6|lxARV2m{iZU92)>q*8b|0QKDHL6I^ApJ)TLMTN}X;+Q1(hi8xO0k^BjkHr1%U+h;71FjP$p4a$w`>`Ja^qKZ<`h zV>cw8DR4wQy(v5uLaN57IgU-(9Q#UBfwd5<)|YoXg^Tc^uXCi)I+1JYoZoHN_X*~d zm2RDDS*^2ZfhYnRaeo6*a#-?H=ZgQ!e{hG#w#zph6PK!MYLiP+E}BD)oGa{K4~o`Y zmGNYq9J?nZ^PFQo#pz+%^=`-#GK()sANq=R>U_fK;BFx9%j+`40%H)Dq!%p?aW)vK z{jhZme8cKZj+kMZQi#uwQ||{nmbij&iwYYlc-jGf?UtbQYJFPHit1v*Iy?6L8qXjQf;LF?ji1{zgwh&R5N}CH7m@V!qrx@} zzn6k{2*xa(wqU+7DX%jqugqW!0SkwGkj*$J7fJi9mIuW%fZ$YD`H+~8mJL%CwAwu8 zx)xtVyEaEdBE{KPz;Ae1VN&U6W7>=Sa&DerL$vrST7a@B0DUx}m@!4D`N3w+UC1Jm zn-r9CZxvU|EWvUIG-b!yn2`cw=h+McrqA^mJOXWD%G`6MsXSOjCtS>%SB^Z8g)W6C zFJc5=#dEU?4O^Bcbz>PB8fPyT-g>rYPpAW`K(c4mCW!a3MtMY{rSXnV%+_OrC;10~ zYz)E{o=3FJr=InBXf%gEOyOoI!<99#$jboYhzOn)VOy$5B);O(S1tPBClh5BlMpc% zwZu6?c${$o>f5?&Cwdtirk!~YY2V+KlsR)IL*QHQpJvs0y4Ws_PAz#`W&oH_cADbt z1=wt~Kk4>k`|RAp0Fu|;zWZP4TaMwS8try>!Z?FE@W7tSGUvIj360z^Q8z_-+OK&! z$_e%Adq&MI1`C3^>_`UE+9n|dAz%Ce0O$-~L^ z-3)>!{9*HN@{EZ$G2O6CXc-X8m9}}qx#o3%8_)Y5Uj3P!EL8pM{xUs^skmy(?lXy7 zQGux?VCxLM=>FyJ*h99$SUY15xgo*iKNFCg zc&?n)tRDG2Zp+ZhS87HB^343)FFWE##1R)iumql%3s;vbF?@S9SluIzadQ~JMhjES zb|*^XkBb3OY}5ez3XJZm@@D!SILdr5Qi0Ye@Ju|lajrdzBy&~FAEJ)YUy2)FRW<<0 zx9VEe3M{qWDk7b)WlC@%a+Xc_YV#u%PxIK|qLDg$hoR?CN0dXSBqwu2T2mtd9SEoC zpBoYQcTLdHigUj$h9fzD z>v%7ILuP9%MMj47?PGE&tq9WJEclp5vD3;R-?B7gOd^xUbL+#dNi+wetcB5x+PQ|U z1kO&R2Df2r1ama?O=Bar0L3mLUkGlU;s-@~&fddXkcVdHhP6%jg2OxNTs^Z;!V|%y zPn=BmrhlWW3joD0w{1SS=J>-tFS>;3XJg$G?U6ec1cPU*v!aL=__oST9>*zL3p_Tm zOR1z1>7#TOE+$;hTr7sM+b zK{nJ5sJ1);Bhhd{H<}v5K^0}Ey_4Pz<`D)qpc#;a1kw4 zuH@DX;Z0HN@N}3YJVF1O_;H$DV;2PnUdY3tfqdaKQVGLvTx}rNDY8E_EEbf}7)Q$}phiHlUY{RqiBjzf;fH&bCM>xl+nfhSZy~g7Mf6BM5(E zb3&G|J~x9%Y~NeQY?OQEF$QGbRu2_3kkF0)vqUb2FZwBwdTXV->ZhDTjB#+e){BYh z`Jo9dm(3;v5e7Qg#f#bA?dZ(w$Dp`7&IQEIAn*WB1Smno7+J7;Xvh`O*D zqk(TB*v`wBTOsZsvb?irSfbw1ACFK@9-OFH%tG%J*%&rTS~1Y0LGHYShLQv-huiDI zLaT*(_LWF>hx>DyvAk|3g47)SUO71#5Pu_ zZd_rF?mu~dq=j$_ORkqWuuv%)NQ=Qs3>)FA3gEZMWhd4YO_JpOoRUOsPjf~nB2?ux zo;rAigcfEN9bG(pYCP-JUc{J7*bFHo?>lIaWR}8l5Ot=zg((Vr9j?@ZR6Ny+?BROm z$FSfmkIf_?T@O^0%cbgJ0?(2sfn;MtM)KH^;7S^w%%DTCb%=bXhn^2$E9cYhW-MI= za%8c0V;iBLQ8+i8;94Fk1i2IuWQfN5#EVQRnF>PtED|zv6Ohhx*&)i;wmuc)Rc!kh zPxckz&GmyoBbs^;h^AOr>7|gpzx69cRoM&J|uG7?6adZjVcd-Mtr^x=-Pb73eoOy)l0#g`nY|ZvifeC6~d@cZ8p*sg_@#B z3IXY*ijvi?p4JWey(gBe@v)jXeV@;Lj5H2?okFV~lp&7!{6+vfgH+YzG9qp5+;;H1 zIc214SvF7N?~FF*kv-jKVIvvid%gjG29j5wZBYti=E0NFt1nd@gxhl9-l_^2*c*&7 z1qnys(9dAU3M@BoRgEfmJ3)d!St>K~_&-umXI;yLZLgXqM9HGRE6)n705ByB3D5;* z>z9%Yb7=EmG>Z32o5&#xw?p;hb0RB0vv`zAOJL}7mg zCrR4&S^cW~;MPSV%}ghHaVPut_|uHoP`E>t+rS|uC`|F`hzqaWus~3ygAK^BL|6qr zQ3ioM0gL=S&+=Jgi8eSMe@+Dy-@BnHJupf`unp=q9jnTynHoLP4%hq*!hB%FhHqsO z7QUT(_-O-+q7 zxU`Xo{JfYI(eMg+b=wJA!H6|KmRxd?6yyyr5SAH(CMxS&+x{P_vSY-8&Z--15KsPQ zjDIqUcE0$^K)6HunH0AOXn2zN3mOb|Q1Qz6k@=_)2g?>S%+R_HMdj~Ulo1*h@MgUW z*Aa*hO!*qfZZn`a!;7DGG#yPWVIOj+fl;t0G|xNzg=_u-$oev3xvomu>=bCcl8J04 zmO7K3Bkf@u@l%~TkUubL-XlQR^}0%wFP@fq%qRe8prwZ3no-3z+{OSNL8tfAoa_nd z+!$O2rktn(;Y77`3br!v2kJ4P&~b-q*A0oxvT8JGG;LEJ%?L_y0vTjvF>vF= zuEj9z2wAVyc=X~OyaO8lCKq%UzqZi@9&69sn_v~N-m~8htwf>|yB3$@a8INr*Q=;_ zTx$&$vy%<&o)=WFPi0lUJA_oVZP*H!ggm6nX*EJDn`oAA!%eYJrV3RXheTil-;=@9 zC`8(9MIhPcgB}1AJeT-<4iZ8jBGAT!I=nn*Dm>|dMnYIlkr+QB-;q-Ilmfgjw0T?8 z*X&I@pdK8O7z?Ha>_fxpYo{ysyRT9QpXXc^u69CbMZOmZz@vzdqux1pUxCZY6!v`EA;oFIN0~s_k}0MY+SkVi4oB(p5VHiU_N7SCbfd=9wQEd{++8Y| z;jJ1#GGFdF9oT4~6xNqQ#a!6Fcf6mHb&uaaI)Vp18Ee*OxYa0T(-0G87hh4W;+OZH z&JY;z&vpYUQ(ec2M+B`YAB!87awCU9>7TnD93?;0H5k00{Wn9c-o#yzRPU*^TVFh5 zX%mRpGs_vsiZmO$*=S%jDiQuXSmcgj@2^!{9pm|u)_X~ZA$5@I^PJ%_(k(B+>x!Pz z{qphz?k_&5N^vRB=~oh%)#HmFmXL*+bo2K!%EIoiT$(dG#yS;SzWS=LesSpwWCSYJ ztn$5=L)t;K$}_iU-`0@~?QgUO0lKR679l`Q2x9;cwpBJ7OyDM`WHUPe#6!V7Qrfs;u8QURI6$YC!)?ghm zgyfSAjGRPygJq!wCXBWg%pNq6=8>Uv#Pu5PbB z%dht*(1RVV-roZYd~~zIa95fu3?2>!VMy$qFuFergS}Y3ivc9Cy9yNO^;%gvAdF5@ zG4Po#l46X31|4Wsr7$Xm8FnNymOL@C5KI0JDVp+9cmr##5^=2xLwogBzHD{FYE4m{ zKdmY{Jf@Fz2CT?5*(uDMz(V8go&iNCjmO^S?&tQ;j`yENyCu6#Qx&gdiVBNMH0q-H z!+7yxZVKroNg8crQq=Q!Fd^3zKhJY>-E!T!chhUc2e zrPQ1dTLnpqJgib#hY8kIy%JrCQ;Af))@7B^iHLAaM8aUj=Qtjg7+=(pI*!Cv#Yd4W zNg^eUdt&65j93v&&@Vq~7~0K5P^T!zQ#1#MMMU;^K3oohff&a5NT@E2qAP!RY74AM?J@+$qW;TZe3KY0Z8+oAlFi)eK@NNI(s@gvJ zh4btgku%oTso1d<-MaSi4tYA{tRY(s+0`;u%U4xry{kcNwUCNNM1+|@YEnz>+)!mz zWf5hC;RXYQM`*{u-L+$8RQYofMi-VaCh%yEcZr8N2tC8hW+mnkQy6L>htX+Dk0xXsA%~d0asv?`wTq*qQDhVs zTg|K>$8o!IAa|72$(u90xx-659bzG2lKGOX3S*Inku!?sspj^i$jRe~cZA9qn9vl} zQ8srJ*t?k7U<3@x^04fOWp)ejtaj&m&2=&|5JIFz8Awu!WCoAqJ`9`UDCnSaj142H zamXA;QZ5lJMh`lwI4<*^W}~&YwZFZ1M!wltp6oi=L-#8UOQj^!j_F?-pvIjD&R;x-A z=7gl|_3DaL^meXE4+p|fw@JGFZ1eeyDCcFO(Z(Z>g&}K)SR1W2zBan$Iti;-C|FQH z+n)8T4y%rO17v0W?tGehNjMF$timu!W+x12S45yq!I34NB$}Meo}sa4BxvtsJ=?5v zhlpb*d~0wspHBSdG*du8fBo65MZ1~3enA#E(0~I+BG9$0=Ot3f=xjI`hG7^6*BtMr zX*hTyu*|-?T7!f|9i+0HCfa{yfIVNkRCPI~o3b^{L>L{Kdam2;MJp=1G6TAQUUpU= z5n0_&>DE<|PM=#dUB$#R7tMv%yI&{&#DB)%F`lbQlS(ONhytvcJF)ayz zh%5jA69B;hL4Y7I9tnhDKqw3dgaV;Z5D*4~0bwv83=D)q0fAT&W>GzYH8l_x5vIJ~ zmQ@|gQLl*bsy42*O^bA{qjk0-cBwN*vd+0}GJiX@@yvT61xX89M*+SSLp}6l;Sdz| zt3%d2!H%QzuMEYmlvAVn>{@EZ#{C`=^kqk2VBxV3L|^(=6?`-+%0OO}g`|cn6_749M+SVJ))5Ql zX(p|g+>E}DH4HXnfLI1M(53fGXVwRMMYxgO8x zHfUIMYMtWj!C%0+X_{vke%=a0-N@ln{`#H@ECt^&+}5zC8@4aeA^K0!sH_5ff?g;~ z!#3{a!ZWw&jb$@DU35c9<)>-vgejFQ-E-_v?yPEE1Gz2Y!ChZSM;Sok9<|l_KU9#@{|F!u;BrK7UEE8w698aDqYwCp?Lw7Fsu?XJa9(k*J zv=tov5?U-(qrYS%9U}}=I^1+gJt5XtV>U5>U83G0#%u z=f2VwPl&ibpT@-NXoc4rSf^c(BIL}KDqn2KA7U}_UZ)gJ(bQs4bUeSR6vh*C(a+iY zMmTH;lU}?!O=L~6%)J#iAz5^^SSoM4CzMiL4EEmIPg!E-meooE_P3bEbs&HbHyzp8 zTRtz7HPA#2oOXAC?4GqqO^4TiAAXU)3NKm<09$n}G>d)I@;F0FNypD0>s0JLdE7q< zN($KevKTv+kd&C`Fxya0gnj=}DKoI)hUsHT3dEjBfQtD3Mqo_A6v7J0&NePADxc-=m4WOI|!e-%cswmmS@Eb1@eUs?! ziU9=3G7Z$V!o}o1{KkSr>Dum16?%OsfX8Qu8`mm3w5n?nYs1q1q?TUFekleQKX(?y z$&qC$;q>Nc&`1r+6JQA&f8fg;{C*ip0>)<*{vk*IIOpA=d9EeL~w!mY`W zw&a}`A65rd&a5>pRd`m#nTcusdQ7bLIKe~c?+Top*gUU#cqMPscL$;W0nK*0v>t0I zDHsrN4YU=%o;d&xlrktz5_*FE=A%jOsl4I`PNXg_;m}`#amAnjX=l<-%I$7vl4ZGs zIM!q8ZrJy)M+=RAc~V9z3y(p1SNps=kMW}o?F|Tg8G)%vdWcHbpbpvCXoh8sF7WJN z;0_zQN}u~a-2-+TAgv~j`ZcEZb zdB&ycz{{{cDGCDyzVe2h!dZmmp2V0mYM=;nsVC^B$3kLIdyN9r)}{v)6f2;ZHBPG} z+7JK)5FRu)eS9iugJu9c_Y{L|VdpS0CmQL4wj5wCoCYb~l(W_As3oET+jjgO7OQ9EM;T~f|qnmw#aHYzzRpjzNf~#iyRcG^hY8X zT)W9_tv_cCBx`7^r9!2mZq8rg>&``WxYfye3p2zR-zS3H zdVjG&S#l~Rc60jUnykQCI*BP-(@8;%sDs5l+}il&o7P7*I-xa~tkfs5V8F+B++=Mz z8%f|rLqLin5p@=z!>O%)bB&l>KuxI)Vgc|!aL8**ILC=O#Ykr{q6VuY#)ZcJP;6}O zlT&(Z$hL(+JAmE6hmL`W2UcXL8eRp}s>9K}qi(3CaDONd>Y#<9PjWyLxuLB9jj9dX zb%Cr_wt|i1<5)<*Fz*+(Bjb~hjP$bj&yUn8778dt3hlndV{@sym)~xka*+sV87$3C zmJxC}iV~qn54+z57f6t^-#6w>y#xx>gAzTWDq;x8uDzZNYJepUK-P>O3;myba0@R@ zUU%~Ckj}yV@H7h}5aCCdd>>u~fFjT+LFL>mW5}I+Du)^3DDFkXR-9oDI}b9B3^~*3 zQoXTnpZBnlz1|NkvABI?`wYiks<2KMBPf%GoQNHR+NwM`wxoO#G1W=PF3nX1WLJbH z_caj0xeN}tJ;#>SyCkItbf20Jj-8J^uE3Z!@1}>(d4338Q$8G6KuzG{nlm@Im~$PM z-&Rsw8sQN5SslaNdO_K!qInbN5!J#0$D-6h4aco1_fAq@mzTFy(3{@A4Swqo3TY@a zdcU^8fxny%N+_BL9zWMP(&A8Vx1uHbt zPT=D{(-7JzD6_z6YwZidShLZuFQpoxi09ZU4d3!!4 zXD?ARZKWSooFpATvieJb&Kh zqh?APy45wGxKpMk1cmUEnHiw92fSS{-1_>jiUf^T!J;(zLjF&~N5?LIJG*C0pEv2D z{X99w*0f~kWX_19g5VGmy0IG9_5$83~DP^vr}bjZ!C_{g(D0W8oVhj z+;OqXpQGfgkrrnz76>fgspR?tQu5omm9_C@5Hl!UPcE4QXx7C=b8-j_`k5Rchgoe+ zzPcgZh{rSGY4b=PtpggKWcPzR$!$KJXt896PL|D|%>a7K5EV%zYrvbMvSVR)-+{Qn zkfWjX_mR?EuX>zbN|H8&PfwOVT7Xf;rhSnQ%~y}u+PfQb4>=jPLaLxG zht@RrCTU-%w>cN_vE9WKXKTBlIHdY@-7bgR${*H0WwLuan5GtJi6W5EPjIj_jo6@9^P4xD<*YVn>`^ed14`n8wFyILG52ZpJJ5EFXz8dpZyxXrSwpe1vVZK4szg- zi_4x%a?(hvW={TtrF+sum666Da__CZM%I~;;o9#<-Aob7Z$Xv1LFmU3sKWvONGsb| zpZD`&bp9b-YfA*B)=5?h#f-JOk!ntjj1Jk!41oyy|F<$$PZNfc^}=aJtr?N;IBn@u zf6(d}U_eH0C!#U81OZDICgO6V3P8;k343j$G+MB|-x8IVcgteIqKZK9$BHY%k;GnI zGp5g*?4iXx8-ms}c>{-^L4~d$nr!%jeF1OB(PAnfIsm}`G9&D)>ZvSBLsTe#$brgF zu>Vn49sSJjJ_^)O1F^Mn+Cb~j4ZS+l{On^#wykRNd&=M<^_7=h{e#(*>c!gh3PWrC zGcAMuwZ$<6sW!EUe*kiKCLoCewI$TZpAV@QV;VX2{DIGjZ-G#qamVBs%od!KcEh00 z_h7wNlEnql+9#}$qPxSc`6Y98FqGmQq&~)2*~=`CT?coghnvNX_dZ;iZ}KR6Jj;Hv z7TX~8K5RoyC+>20Mw$189LMuu#V9WVHtO?J=)z zkG%1B?_x=w_L-UGkCC$!UtbF>wK%7Sk0Z@F2%vQ2+20*B0vOa{f`#a%53-V3K8gGE z8W#neR)vUDNn>)hcIQ`ZBBy&NUeZ(vPR~Kc2xnj@UE?V0W&$4^Y@+py!iTqUrE57z z?lX8qptI<;*Xd79>~-ZTA_Yf6fmdFjJ-D-2%|m}86jzq7>Nn|706L__Ad)naUi;TK zGb%DTnZL5G9zBdJ-HQ)nxK+hVMHWlfxgpaL5%^n7R)lgFeRaE-7=$RSggzApx)g+A;i z-8^>ma99wu1g0lXS2>Yq_k+%^O{y4zVy5WI!2=^9qO0{8tOwq3#G4xTo*hvSeEly# zbKFfTNY^B)Azk;Ivr3t$U3$3=Z`;iQ>v{Ho2(63@AE+KcK{Sb8?tB1Yp)9(Nonmt1 z?mZ|ggm^j|3b?28$_#Y6K7;q%^W}nk2;$o#7Y6>UB=R<7H{P;>GCzwPcSlX#88O6! z!FCW5J%pB#yXDHs=RrrbLc`aPYuIS7CPue6<&@F3l@`|I1tnGOnKdO}Z6pw`P>Zl7 zx_dXT0VF7(22ltRe;##YEE!LG*dS%mfX)QsKHrF8B^^+C$C<|F) zkD3sCnMj}nrLO$&_^xi+2rh0TZ8M1yDm%jYHdbN~6+Hc3NKZs7z=k@q+5l#huORf* zTVve}9TCr2gI3=b`&RAJs29nz$tdt$J^xuR>l*aMTW>#tqppVrk)V^i5>%kj(RbBP zKj1F5x&pR8n=FE?DSw6@M6=P8;l3Jzc&Qs#xI+wFOu2UH5tUdHg6pXLY0E=K^?Lo5 zJyG6tFAZ7H2Wt-BO7R2#LvUI-n7;mp;wfWZKK-gvam;-hZqcz!>o@x>6L!Xd3jif7 zgbC=&O&P*7E<}gh+x>Yo!k0OZNd)L$LN-mugyuZ{`ZdB&K;M^&dluf;1UgG=QpWol zH$ek&h3%ZnvWy{$pURq9Mx|(pyN-S$N7|yIii34;l2;NP#?XjNkc4Nmz;mjasw*;h z{3swEBi|IOZv;>7aK1?~CEdUu&;Bzk0-^!+lp!?qgTE`X!f(s-MqCcui*k?~shS^Z zqLbDwkRLMd6~DB2Q8MrK>rjWM!V#pHBGfww$xGgRZh#wg{!H!$MxSW7tC1v>bv!RG zdAasz3j%+M(%HW_!yzFBQZA|vLmK3hYmitNob}~hTwju*K6n@HHmwi~`pvXVc`?Nf z1=H)8HF0M`W#*$l5&UQfvSWD&61LYR?`~v&80Y)HLL$5qg0%1^JN_7kS&B#ZY;^TU z27>Pz6$(#}r+J0Th0-@f2(WVfU90jOzIl3^xyr&so}5-8*BOg6;z_KJx*f=kUqCZc*I-I3;RX!J5AYdTW>29*q5IHe}wR7P#>!n1k*dQ z5gV-GbdQh(YNG$7MPLnG@>ScXDvnsm($!8Pm=GrYgCJ`Pd~X;zc6#j;o~PGlav;(- zs2Y_XoHYeG8Xm)&afkE6{QX#qy4sS55)|L*AJRm`+9wf-z>pedUV?ET zZRjIhe(E;K2bPHc!DHD&y_G6M?f*)lv0oE5d#{Io1HdwC^GyfIHJA>rxBDEnozLa~ z8@L$7?&8J+;oGz#Qr#_+H7c)ox>Lf&Jwc^Y;1%=ba7pU<*%qhAWxOTU7Nt1><&}0$ z0*D41$Dp&OWKPBYI-;&Q_8644Ur_O zI$?a)!7FoGnwx0l<@yQqNYgDrbp+#Uj2`dCyiG{9WzAhaVDv`)qqX5C2y!RFoln=A zVZE_2>Mi775ifx>E~r2kT~$Qe9toxW>;1t=^`nq|rLeQpc*q7jj}6yG z`D2^75KIE0)84Mq@Poje^I7sR1r{R19MpTZ>)$M+*9w~SIk)SeBR}Fsk@Sl zEX%Y35X8FViUUh!#&%z8-iwt?+0pPyAZE`(2T_N}t6)F!f*)e>ZsgYwhenYVf|@D? za8YpO41l&Tp+|oI5yXL=W{56Hz5}B27zD}AiB3bh*MW*@@(R&7&={7|eFrSQIugEQ zt4cmTBpndX7YoHJGElqGn&VJC%C#NW!;hWTO#9*^c7ynic-6#<4YdPK;#mQFkG6};5ObaYYE8;6S8v!wK=JX+cVp;)22sZckMQDf zyK12g7eo(Ef1Swt(tbF1JURyb84&9pL=s6Bsz}5FujML0Q|(j+(px^(4?39oJ&0B^ z!IuH=vO3RTd+R)oTTOtPKrT$4=V}vn*9h@#)yA@%kgV~!K_d$Tem2#hGi6q8ezU2} z6cnvt8 z;sOs;DkmBWlsyA0GZw$jTiQEuC&^+E){^1Alvs+6oRq4t!B6>Lb!k|myB<$fZs0NW zAyz^FixS8@?A81t_(*tmig}va*w4a+e-;{GT+ipTcCCtTu!F>IK;S>KiquFHNdgFO z+&RlnradV$8X|<^AoPhSuLtp=GGG<^l;GgW9(0OM#+7il#(FQvCqaX26~?3GjF+JY zj8>5Mz1o25ZBe?<4$crnOKP5fLD_<`LTy7RWHn_&syUuHh04HjA8AOy;8Jlkyh1X3 zK&w-C!;$qGC=Kq^QX$X63Bq1|S!b11iQNt$_UR9J&)z-WuT+5pW7kWKOp$>A4DL0j z<*fyw6$1lmUnbvD?+X>6WV@st>zn&qrvz>zrZ^ljQLVIdTj;8GtO=YY5eMty7q0zHP(bTQHS z*{o(JZbgwN4f7ORcr{QUDqR_lcobVDOHi)dY$c3f`E0|GE0+2Iaw_!%umx zy?RTU+C-uPMBD)StDqJefQcjq$ZZ3r$=-f@woBCWA|hpXDWv8-+lm}r0b>Dc0l~P_ zelW;Ny(AoS@6GN71CQ5Rkp<)9^gZcVvoTBCGk1j)3_DxZRr|U!qEWdmysd_Z3tTXA z5bN&RQ9Qi7wfMU!Yu=F!1_T%k7?8ldy=zA>f)R{h1i?6C(imfmF~%5UOdWX5Z{4~F z@W4^sa_@5Or1Kc*z>mIB(OmlMPUudDuo9^Qc#?>)e7rLXhSja=gC3KELD_22p4y+} zB<%Tqv}GZ^!MIDdsdmOgSeTi42lBFbqY{nBV0eMTLu`G%m->bVqj}C|VM{;&$Lp~( zMsx?rh{o<>rR7=i67wxmu?Mnw&sO09Y4%OW7-O7*ft9|yc0Qkjal50|8Vs@1%~i+~{-YUUiXsjN8;ZL^z+whl3EB%$RfQ0Q zGKEtSl`RupvRxXxRF{aHl!$a26_vG+b3yDE3Y!TT2%SJ81S>Rj7?SW1LMXq{<{oS` z2tHu<)KZB`&6YZp)KalGy_%};WtI(s6R|@Jv5+)_3*9c1oE2$Rby%(^m!j^s$+e>0u9ZgeZ_vzj0VX7jLW@u%6a(2P;6R@I0kW}Rya%qvN^^T` zX#RI@S;3$&z}T?PZN}Q^yotSw5zstT7Nbwq@d#psqBKikRI*acIYki$8Y+YkXb^@_ zCQkEaKF!a3nl%76tG{4)=9x${$LAp>0HubXvTiUWMavAfAvp-8PEt8`EQpan6h?+- ztr)>|zz8vGCEau#Xt5fMs(e;DFPoFoI}Sr*BFqpcLZOjaA?S+9Bhs-Xh>*HlE}pAN z4X;&wit}afzC)@Z_6m)Jx7i6UL!mI7g%JwEsTYF868MMe+3nUKx(0oh*W7H zN)48;>aKd$xvNS#GqZY)ohV4BAoO$Z?4gP%Q_@iS$(6|jtC%=pl1L=lLga|ZU=*V5 zMRt4bmW6fZJSLW5AQe1AL4+Ml3xgHTc^|Fg*!|a=Q`NJr(fyV4%03vrr|5tB3(JADV3p6hA-z}!r2?#z^UI9JW1Im#^0IqL(2lW2Is$i!#Svq(?E&<=h9w4jrP9wbU14sB$9#Fi20`7q6j(BBd5VX%;;NIDFp z%>m~*6snctKY5V|GcF1wSFpDMZNDC{pUw32)iOGk& z!q{Jo!-vJlzMSnh zCc=)J2BC!&8*f_i9u z6MDL74{Q^Y?03MUc6IOqH6Uh8OUjX~x&ZeCOGv4hhC)%A1?}4E2l7gfEJK>+x_EJLc8$wDx?7hd%Pr`bYST1GDb_-Jciv)=YLJ$5(vF`T}dDNncwL^V| zP1~zxeDg!wQN?_E(0JL4Ti8iBk0FV5bRyiJrU}k-66&RnimTPkl|`X5a%8)WT*^yA z-8~2z%&}CgUFUBApv?PgI7jc7!n*n+LLd;bEa!{QXRA&q(J7v4ua*3h{``Y!3K*R< zP9j-K7*d$#$pK2Mumcd)!tNeHk-t?0UMnLEOJT!cx2kvE-Y-Ny5dFv=sMzua{XQru z&=;T$OcvtkbkTV^rxM>}FYfQ7&A008jOFLE9-Y#aot9*P-JV3H5CdxVo(Qe!%sXrX z1kUljNQMULN=^PR?=G>Q-oARk&k;j$D<O-O!Q|Bj)JLgqGa|!KdL!sS zs-BBi4ueH-xEL5|Xr%TR_#Y1hVp%7b_U2PHRjGd{r^ z5ek$)tE$TB`#Cd6#XsqhcxC*N7!5h8PF1i4^>yb@+* zh_O-Tr^aev6noCn_?7ramyLVJOadmE7^Q|+c7Xk6LroK>GLr0v`*M3y$|OVoLI|5F zoqk<7Q;Hi(2|ZloNn}yOw7LgJrW2=BF%vOd)C1Ew*qS??So2c1?a<{IbX#(gDj?S7J z&{2Y8z|=L~dH75nt9uEB*Cf0>zV)=5-A8pb#chlPkK^%KfSjlU+%JewCglklnom>J zgIM-`63-FyFd0?s6p3 zDXfZ4!Y)vW7zNJ{kR<^B4*_fW-biNEdX$|(K9D{G;2~GC9V0#C(?}b>$NwJDLWz9! z+AXI?;i&}OT48o}dkiZAsQO`{03mkcqKNU_vxzbW5fE|W2B^|~pQF%`g3PZ%6l0*I zE^LEiP+HKDSV4pP3yV$pl5!OmS}*$+s)ELUxZXmB$9l=4{PT}}B>4l;OtSm?JZ>P%!4R5{w(;~2t z^566XN3jsYAOT{V9 zTqR6I1zHu0E`>*imyB)#s+$e=_`bu#fbB1fj^Rh$X3(k9R4b$%$M!tvFP((msyd&)hcJpG8_?#+1f)I^+{`P}4+OKY}5#o#-Wa2p-n`tveI2lE- zKGJa~^s0Yp0VZv~PcSRN!66TdlI{NqyRM~%l*P4v)4T2ArOqJ+2{9YN5{q^n7t{Vw=rua`7R>^OqwE7rN3?FSpXoLV1+8Oj-4E13UZ7)9ZjGZ zk+r_sbaa1pSV<$#oB{v`tZXF_I|8=n{vfe51~L_TSY(`@u`zq4^on8!XQA_&-07Lx zuz<&f?PJ57Gt>(TP|^Fa&OG}<@)&~q#JK3W$DL##z6HG4!-4g|Q_m%K0`-luQj~)N zA16S%M;7CUvZ=xQVX(PkI|9f<=+33=vF_aYly{LD&|CSR#|p;VuiIh2Ho+|mtL&VG z+sQE`yPT=1(fjydfl`l+wl<*&K_AsL3pi%=GS=?&UyNum%v7uTwSBRx3SD zO1_y3K~tTYq{=V9W3p{i!wx`@y;!UIKf9|Xk!MuknFqdEGO1+ri1iOC~~ZPPSU$03qP?;25unfldVNB zacLEUzx02%;SOpCvL
#CerSwr|!-wpn|Uy_!f!f@Uko>QnP*OgYtw5YtMZxIQ8RW{ANVDm@38 zx~gWHnN(xGYC37hMpX#AQgVG8d@ZvWwx{?5ee##!I{XRbzfrAFP#IEUdCj)s4VHm))frS5ZXAJ(d z9ZlYd>5Qe=6g_PVD!o-y!Yin>C(4m)F!eM{E!M-AcX)=^%jkhS7m(&XVXc^{%Enx5 z)0C&X3{3Ad`KZ2k6x+}lT3c0c5J93B!Nyzzr0z(Dn~EpI1bI;+&IYn(em0^z7?U^I zJT;g+1n|D#+yiqiUYF&pki^LZ*6#SnMe=*v!~-+NV(K@VjB&~mlpx@R{8)~9L|5ad zwytQb^1TMSR*rVC8&i@>b77@)lBL0ll2M374=V=wQy@h{Ud~mB8rws;zFbKeFTH_h zJST|-kXf$CuiDdR?b9KOY4YINH1O`cqV*UQXcMm8Fr-jqW+9A{#HG!7)A)oo&Y1|| zAUq^Op(Vq8U6KA4v$Z&Bcrs~7U_I;pPVEdt#T7=wyIFrDEOz?fE$QcY3on*?PKZMU zwa?N&aqX%wyk6qM=Gq91=Yf6!8QTb$>fxPh^E>rYN^V(HAs4BeYr{>WH=1;1gqKtq z#jNq?A()=<#=)KOkT5U;QuFT;EE_(FDI>{?i?$Tei*hsHpuVCR&Qm5H0b|mrWFu)+ z>4sT?J-Gt8P?dR_4oKrta$9mGr+(fiRC2Fa{)MfA6m^F~RYW9Bd7UQeG(I!8M8gT_$r8 z$RciW*ekKxvoHeJSlBB{^|*NCwAqZAMA4c~_eW;a`HoEGI{o}^89Xx{1j+SuuD}ed z4dxkT_LtRO2~T=56_;yI%1(I~bhaDeBv?sscpZi)ok-0>;_F1>?63>CnOjosqHJ*E zndmCmqZk^z>j(EF6jl}ZEdO^g#ibu$yl|+$tff5lU-#1UROqeRFs2-=an=su`mtT) zWm+rNay1NS&hmfj5>0+Xn-X5%lFT_KDKdtL3-a?sAM~-dl<=;*yvL{^k~#Ffg+)&a z9Ap#58g8`#8J4{Sv8h-3B5#b9Z-08KB@w#^8DbRpEb{@)6vsrgUb)L)@vGdTX(b}4 zY;`ci(>IY#fFzUxKa=nPvH&0Ir7-w+O%{J<>fPmQl4Lc0DMFn>W&*hb1}cnoVaNbe z!Csw<7ZdInX}rJ#jl$#;D;XSfnmqWQB(;xRo5y}vDU)zqti;U zm~%=0hc2n4T)}p=Rv25%z9cRQ=ZLu!+({bjYB7d^W2R1|MVDPaiS!bPc9AWb6BkW@ z6yvkN`Zhi~N^~;Pc1c(ZECi4euRcaIE!GQS;5^QLeGLonPu)&Z8JyA)Ry6p z*}ol~)}_^%l;ds*0Pv-ll{q9X$L4kh>#hYegW90k>ThYVB!~%@_?Ds}!lg#ZD+;fr zlNgW%4fL%C)h^lx(gJzWH3@OoLJ^@=Gz}-}TAy=Ar$Fm+#0%7@2|zfln_FlPXi^5b zXWpX2qQjK16PH)G00+Y(WGBeex{eqiQf{!E$!X=}A+h(Esu{ekt7Ta2VG>{86=rtcH3 z*3TcMLBLPyAT54U+DYS(1Tc&vMVlTW_M^KaZg6Un=O#XC5lPwO37?I9`#(tx{jYR&VZ z15n)8MW-ulAkb76yVM*_=qktDNrsDjiaF?Gayrv!`K%@@*dkBqMhwWg>4h|WgI9bfZ^KY^%@E!vtK@fu)In5$D9+F9STCav!UrJNI*MF z)T}!x2p>1#N3rHIa#UIH;SfPqr%0VF=)ty!YBI?i0yO}~F@xB^jfw`Rl1oBVVY+ti z5vJ=ol;qUjh0jrhPJAz&BFG%U&E#y`Jqj8eBg7FHxh~Bpze-*N)t`8tAk#dcVWAYW zGmB1*gV~9pQr#JcLitCX(lVHXiPCiic!csf18*ZKf%6VY-A>&r43c??a1P^A&rB>m zNN<`^hzpc{avdG1)MVhQx8ns;Oh7kDwO&Mk##?f@iY}A95{i_g1I4L zbWI!VkvFeIH(nL4HD}Na&l%?7M%)oFaA@Yc1f*k_F-)Dj3Py>JaIp%#gUC-V^yn&s zS1%Yc$1RSLdp#i|Ge!0u6DVt1#cLJGho~q@PTH7(`AI34eGus%F_CGpvcpHaznz(@ z1Ddd^ml+_L>>`j;#StWsXo-xk{cz!Q8XWdKX2DNbM@a^Cs04<5XhU=rB^?~3K{%Yj zp}%;BVJ?R(&>DCQ@yoVd)RPgVm!jg*PLSu#oK=DIQOhOicn$qVPj&ME3yNrNk2&aIYX7{30h0B?#cw3aL*?N-0yK z0lFXmzq~{NE&(V3r(wUL?46IBl$vjwB11JSA#+8AU8k@qOs@)WU~TB!H8e{Fuz&@F zy3kAwgf;-c#6ztL3X?UT(FS(&Yv?J4Tr}7;L&Tt(Co`Au1%d)Hmar59G2bx4ad?Un z9$}~K8xEPuI8=%4)`V0Weyd?=IJ}0ZVR6gRFf~lma>Hl17;amg-B#G0Stl-RI&I-j ze)*ijv9LK*5#exSgd)O@2>+K7@*gF%2$O{omIw>cKfnC1c;xlVr!V0C{_I%rMN)!!^3{^{5-~||fKm!j@&_DzW2vZ*j6ne21 zKR3`3wIY(aX=q*#*If!+(9$%V4qE6^&=i2-8Br0HrYUo(q-IFwYiFVlBihi^056^M z&n2(SK(HVPLd6UOsu@h=FPVYRAZalu|1aOLzJlJ^0sCj>gE-V@NFrmHIWyIW6p`5z zkmX%wDSGBDAP7Quwp_$idDU3?Kp>)&jN*w@Oj;aWhH_tFD%x=B=b@;!4QkuA?OYtr zex#LV3>h6lJ5BaAwb(ThpX$FhKKZ60!WY!*ML{y3eo!N*NM`b7uI@77vkwGXb{SDf z4%Ko94uo+y5k(8v!m@CGnuWuixflpvcp!M;V7PM^@9c#qt|*sluv{;R?jo|p)(a70ziGvR?i!a%6&)j|g?^9HjZ2trh5 z60zPxl)co&hH37h7bRq1#F4)HoKGWBW|i!BEqEY>T?@j_*q;7$zgw9`%zT(C?H&Psw5ypVsP3tfm_4^yps z!jg)jC#w}HredmhUvWi7CQk7XipfNz#xF??`HTz|PfeNp$~S`yV~GVBiI8oi5i&H7 z`X%a?7@CvNJgD@N5|RgvhH*I1!0D=0v+5NU)sqR=9aV(lkf~JMIx2%_)J(oDtLFW~f4~ z6<1VKMUALVK~yvlKHWeX2%eHUL=yxMAT*t6O-tQIltD%$9GMQsf(kyg*$|7viIKST z$SGGx029DKLc+mtU^EPb!hkR!5C{YUfgl(V2!p|(Fd!fZg+qcs$x~$>p)DSVg8sy~ z0^>khAPiWm-+un^Xx>hFq0f|3_l<`)M)M2MULSnCTjSGHX6T6v$UG2^+Yr!-fkAEt zL3?l%H4r+{mf9h~YqWNeW20Rjj5gM>XXG3>agIk=(q>>4_IL}@y2-E~6%shY1fiBH zjL=&mPah48L!jp;0B~xtNATF((60~$;^r-Myqj5SYy3nHEVooYb3Qgfi0<4;M}J%Q zG*{gfv@ZhIcV8$IRc7B_|AW$pJ4L=@R-8u6BV2RLgD$pteN*-G)7W#2=(et^4Kk@z z5V##el$*IqDO?`mCINI5!nG}n zEGD>a$z{S}kO7RZSTu^dp!!u>0Lrn_1+Gp#f^M$f4)mKp<%q$7(E2`reG+@xi%oZd zVh78UgTjC|1B>tNhi7@ph>99Lo{QL3cClA592K-$O}1&zb}4`l6BO3s+bLMej`@(E zS*#&M61a94S z56@0#eLA2K;ZuDVy#+@k38jeEbdBxB8W#NMj^%M3Jx^dl`Jh)4Y-KGu0xHOoo#2YW%#I%!WGnjBkJC4m;gXkY;&PubZ7%#Dv#tk3k@d2jPTtm(-an~8&&PE|?r+pziy z1;epDqsIUd^WieZr7_)>fe?l?<^+d81hR_#2PUbNw*?rtwI#M(=~&s$JjsDxz6<{5 z7vEWTt3U;0G%Xo0n`$Dr$;P(n?8hDz`t?84FuB@xW{~q)7SzygFAR|rHTW(r8XQ6i zGHA;UzG;j0JEt?Eqjls7Q6(`YI4H37XN#z$A6=5rpnQ8tg=iBZ15G0MTE6H76RkoIm4p?@Ls=1KLQiwv9HMl#KNeP=fHlN!*jw%843Q-c0JD3u0Lr_qCwr9er5P{ zv6<@E#WE&}Q;wizPVON0w&Gp%KsyGuY8w6}Pr|k|hc1`jGhPw2AJ+G*#56o;Y?|X6 zvVb`?2dTpJqO7|UQ>rq3)O9HtUAx!<`?{AubI4OuBxsH!gAMd?qB(W}(T=D(4Oraw zE|zx~XQ*)%gAjPCqKZ?x*K3Fs`uf}E(5rc8>a{uk@>#BeZVkSiAWb$Mix)gr&k8{u z3<#0qeO`&Zd~82g>`k#*}%|ZgO zlF?sEn%wN* z8)G&4=Kz7W?QJPdWsJq|&jE#t>sUX>!aj57WHz-*^zb)SJMjT7@HJP&hgPg`WoPkQ zAZ*=gZ@&&Y@S+7ZdVp3i=s1X7htW?xBlKFMJwceXd%LWAO2mg{&DPEQSV1C^bQ|bU z5?{!@C(y{S<(y2HhMHXJMVV%lu6{$P)W&<^TK7^2{fKu0pn&-jw$FHMqy;RlE<*@2 z`gElBcd_WTzsW;f5nzJx4W1UqpNq_7EjL>w_{f1|bya8+c-%DKHP@8Un6Z0jQQ{Vv z0#Mpp!QqN#Qi{yh#92UU%fEW8uS9Uxu9Z8%4s&^t@ASDiBAU@@`sq>@7ppBIb1Soc zWX(aHA)|#UHKWt)HN|7VW=(sEpQ8%>25P6vhV#%7r1{k^>~n#@b_m^j(}V}$aF3HdKS?GIiL@D(5^k z321lPWKb(WqR%gXJwh6Un^PG2`C5hn$Xz==bknAij+S$Y~iX=^UCwh)aZl3;NCOGm%M*?bxlV@0* z@IuQpPFc>{73F5~TtFI$(n$xloa%tsa_A{oIds`DP|VVW$-`wQocJ*L>N0V4@%&XJ%&mGgq2>v?3-1^U4Qg@n@FzF3QC(0 z%Jpm-{G*|zRJE}6jFab&?Ak)IXB_1s`DA&mhOJD$kjS(3vZsWJ&XT#B8kKd4_Jt(? zL;6`V68x&A1XYV`;HCHzjJ-p^%)vIPUHhB=|y}`tTE`mn_5}hAc_`=S1?0lR_(|TU=D-dY3)^q9)DU*Z5-sP^;-z zA%S+Xc!FYcK5`af;zzh{7^R}Em`4Gtm?`PBPo|RTO0ye95U6f&l99o`Cc;KOW`I-g z0~bG?W8Se5MnOv;brDC6=c6`TkTy>|SWTwuRkF#{V!^6V!QMa!$7c?>f}~hHtwRMD3|AQ2g8f8STe( zI8r+O4daVj0y0sPit&&!ZZ?B{izFm1<>6^%zY!A9qWMS`nBpeyjd2`6XICa(>0dZz zPJmAe>rI4NsnrSwsi|^>(&#*oomccJXjyK??jA?}^Up&PvgskH;iO^lQ)3(R4Cln3 zO5Y7-x6mV2GXiN{w=*CHt?-B=%ZEy@^qcfa`WR{kJ}udi>R@iJY(fM`lir`DMlm#X zSfCg;<0HM6pUr|vW~}8=2zv`C0CRHAvyd>(A!Zkzyhu>o@&9ni@En$_>Qaz`>ueYE zzE~XD*{;9XtvJ< ztSJ~d%wg><7KBj|;?!GVH6maW!>IML|Qc^T|sMI4bYye}PZ>ss!|Q4QUSrfEciqmY=}c zyK+>on>EE5@nR9>H9e3N08OGax7iC0&`EQ0U@#kR8;MM(b;t3$)jzG1tV1N8e$XGR zCD?0sP7vFjfCiy(MoZ;wvb5V1B>V{lw3DqRfndNrg3k2W8zP=1=w2TNC@+k5i(B-u0hk*-j{Yt+K1Dd#jf9rPT3FdoZu2Tv(D*)7aK| zDQmnlb(RB2N>RB&%m#l*L6ema1n7Dxtur$u3*SxrotTh`1TgwQu~vU0X_+1034x)I zNH81+cwjB;s3jh^5Pj=55HGxd1_FwF4hrJYR=yz5fDKo;58|@UfWtA)?`&As4X4Ia zYt}q&*INqA*llcA)Gl@nlGI z=BoeyDUx_xbg_fuL1hJ(fJiViG=~n53GS1E&w}QOrUD~Gh|me*1Tf?45CXcwRSuMy zzHMb*=jXOmd6?`A@BvF_owUIR11^ReL`*@c7&iBiwVLE&VEsfiqT9qj^|Yu@z^TmU z;+(f)sVcX6G`^8cOGxk7bzTPg(b_P|B>R6}_QOF?vWdHLGWQ^4;_|V2B%sKU)0GYXS`K0eD7TQx>S_ig zXmVy~YYNG=VS@Bm1OysUL!*eDXN$mG4FPAsPUo#eY~3UT4}=f9U_8){C`K8v5w3OC z6?YFmRT+;1DU11!>CH{mNmSQqU<%;{pI-I7rN-(S zXb0W}PR%!o0Lq`1^kq|LlQX1qnyYQ3P4MN&aIv>>+JyE0lLWG3n;I zGF7!`OtM(xmfYwuBx#y-m8BRI`O9LD(BHeH5jkAq0g+njaO{*p=xysv*X2!;cwExB zy4u$qLGGlYJZ9M~>JNa`4F7>*P_5=mNsY6m3g^0b11mlMJ$h4%ZGEz*R=K}RrZqvf z?N$%}H@Sqma!?=nM4sUhhsEnv41V+P;Q0=?_K@VB=fqxA)Hu)WCJ?@XBr2aj)n-`QcQPz zk?`WDyg-jy!gKh%f*itlV!(iYWpSAl#Y-y(1TedbKoaWlh()y!=xfRH>rfn64 zk(=H3iA`pe9A8>UO;j+f)p_k!WR^lqdCO2f~jUnG4h&9AAgQkz-Qv1BNa26+| z3?T)UqL`$!h8U$3V7bY^n)Ja1hAx=XKB{q3ueItQA)}GTOFB3f*NlAbB?!2(?@A)k z4O*STmL<7Fx=+Y5?~gwaLXlE*X>yHcZF#aSRe-i%oF1q5i+Tb`N7dl`9}pLhqZ#P zWZ96eKV1yT{vEEQxlhvvC{hG4nkjhv_Mm(Vw>=TZ{_;TNH*#d|i3PUO>oa@`K zkWZ>pKrKHas6^!U@jY}*G;A(XE{oI)iV=pW?BW@aKjYUNF*0+Xvtgu}D48#i-Xo!i zmV>zdZTh~T2?)p!ZUzp2b!4-lhZ0-@WY8rVBpO!Z{10O?$}pmderxK zw@MIhLB5Vsf%_kJIZ2=QkCRQ@HHMxtPKbph$AGG$Lsb6++}mXcDgq1ROe)9pCg zo<>Oz##3&1B(Gd9K4xlk)kJ%81*Z=02{kNh0xst#d~W5ZfDNtE1ZxHkr6nN(tMg(+Zn-U(}0bRo8?Z$ejjEWu<{c@5Aq=EmEhs z2Z`)&aLGLeme)b&X;w^}lw}CF)$|_(aGC>o z?#_$JrQQcKtp5=A7Y_h6s%UfosB6~ra)W<=d`9qo0t<~Z)3%IUNHUk;$nQO-1sXU+ zMwulUuOs8rouQXr9~kIo8VF*V#Rt)<*dn)->b@gq{tR>@#(OaAPAW9E?7OK)qG-#! zD*C`Q>uVQCV~L=Z$gIr_5R1dnCQD|(?9YQhkPt)D2tv%0U_h3mdWx(bWwJAe_I<+a zuJaUx{E37-J_2-kk?+}kLvL{(VAYw`X})%EI-#rDe;1*UFD#WL%oeYDW+*at27zgV zmSEh%V`Uj*!dc`-MVn-HMB&uXG0#^iHw)63N7;?~;d9x<$EpzYGF*L|;wmf+$q5_Y*j`Blx&fr>8^Mpi3cumlc_bMbG-+l~`D;vOieE zXsXsGfRUR}eXSNH)M8b;k6oR-G@_t`)nxY8OhS&e~sCa${-`G-fr5q-qF439p#0;Ee?m^08_ zb5t!W@*me)@=J<*y}c2%Zc*{leFVR@oEQKE71=?0B6*K|p{BLF!}Se#+%BzKWtG;8A_qdqzY$SzEFpTV|g5}w8rnoF z9{7^V^hp-^LEF_;OaMTJ1dPNb#iFPn2kEgV0D$FE;V}cT(}nv&g=le?f^n6;klq#) zR&KZIXAO5l+i)7mUSR^*Nf}!_7a{}z1`4$9F;;AjUeSVCygnkWfH&|r z$HmO2*o+O%Jw{3-PvJgwv*JcUD!kV8N*wwP_TPnyC}5bI0{KUNse#oXQ+ z5O$@BImJ?8x!f=1AlotjxnEx|2_$xosVwUC~XHigK3T4#$8=&6y7 zI85K?r39`5+!I2XM-}U}RN-#}*m38X^Sp?}K6(gty@6nqg{s&+8fssPoBq}uK2I1|*q{_-q+%7~&2^uA>F#1psgmAbT_^X3v z)f6uqY5YX7=wILoUZi3JZa*NtZ0b}DFQ=l*z4L+hF2Qi(b*i;+KJ;V}ISH~f5XvIm z9%TMUkFvHP5WBmfFiV49GForiwbODZJK+IxA$jnHNI75#ypCbW^d;i6wX=mK)p_gg z3#B6pGVesk0HkYgC!s0HOP^cylt$mJBH}CKhOiuRdsV`Q_E4;78RRCq$9~wZ23WM2 zmRh5}>rcd=KhZiGiFdB+op9#p*3!fnl`S?Gw$pLlHI@f|Bq+Z%J&~5|tZIiiqbn?2 zt%lTA`%7(q%js$Hf-tbew|^z=fcHWv`c}oX1GH2ay4KXzGc9s++jj=ZEJ!y+X;gvn zG)eaD%uL*sLDfiA!E8F=4y?N<6E^#0iPK~#YSF3)_Z+;N9?yY>dxU<$>dWw`QE$(a ztz_>%yIC^BoH_0!Do-R8uA&ZaNiLg(U^sdX_eNY12dpeg{H?Z!CR!Mfl~(cQ>>U9@ zty(IRC}!aVC1wsv+9KYVe*ATFV_xbs|Lfzf8s;Qr@(mk^pM6QuP3%P!2_Wrke&kD` zNWE{JGSBGTB0%xF;xqzzjlFvw`QSa8Sq<1r)ih-gD>)=aVwg_>K%_<@Ly+@8x0e4d zSW7FoD+@NwO)J1GW9bcz5$!iq8j5`Ff;p4|)zpNn_OH8qHnI)?&3n^V$j76%1)B{F zYxVfZYJcn+FBZ%kV0}cD)(E~KTybCQBSaA$X}u$`m30Ah3IPhMKoucC4+yXUyfGdy zDda%H^a`K9-3HGjGeveeEYRq~|Noo;J^@hyRslmwoKA(p`fMA^Mj=&briCX$ayg{i zGmeIHN}XqnjbB)|({_`aqnkO+uQV6BYa6u!U1 z8;`{cBlkeT=Gi;;Sa&w0@H$5nD_2jmxh>l{fKalPmD}8EIJY5(XJ)etZ8p3bjV17x zg*NowI2`{VY%_Inrku9OsTGH55G>Ag5LLXQnLSM$#LY&h&wftv=OCm=MH)$F?68c- zj*;xxL5NW}s9GDN$tsOlL=wk1#IcSWFD&ARMf6aJ7j}4|utS@0SdLwfHjSEb>&)0x zjj^p=ECL9mV$>;KQTPt>AWV2#$X0w&BWFhiEx}h7QBhN18yq&AY4j5b^awbDy?tMeeNJNpfY@ z&LB+AJwth_n@J`+>rThP`8ku3K8`+S2uAoOXA1cMB*$_Jv`pMv<8r19IYO zwN+|+)1HmCTK2^1Sps^#`J_YE^w%J0rfTB?2?3!G00II>3PY-?Jqlv1rn$Oxfz{49 z4ZDWTDBPU1q*S%4Nu_EctxMx2oto+)kvLM4mUK8|BI3~V4@soBrAM3_XBT(e?;g*`Zf55IF7xWS=Ea2@rtZJq{VNOq`v(d&^>)|I>gl%WdG|J7 z(#zfRx68b3&+Qv;b`@XtN!n*Gh&}f3WAi?v#{LtL(vgNmYC4rOJZDcE*3-@!`Q;Nx za9ThCy9G|&sYx4Wdny)amLxz05+o=zNNC`I1Po}uMLSu-k)`7r+jtz^#v^u+5W!e{5)+NgAUf=JzJ4@BC;o#$ z+=_3b*f+mH)W(Q!tY;z~i`aMP8H8)2@}Ky8_w|l|^VxUjbKPB>qA%Q|sHk?Liiu1X zDj%8*00<z*le1=dFCJTCcpm`g4tgb*A=XAFWRuT|9`i1n(E^B}A)Hwdb~%qRV$o9>hTpN3z|XDNK_-bq9Gcj5)BO%(ML#z{JR;FzcV%RqjhFThC?+o zID;a`;K9t?mD6SDJQ)&yRfa@f>(R?$Ki|(UBtOkRz5SAf=$kK8zQy&8p&1|d8`CE~ zAu{o<*dw0M=h{E#I=f*uIy#%PBlp*v>T;(NvO2hus-CFqJ3{Aqq3KOxBSe0%M%5Ti zOCv&Z58}Lt&>&1~-@dil)MzJ=z_)wG-EL;Lh1dVP{)*``78?Kaiid|YqWHg{V2qmf+o|Nkdil-8# z;b6KTBi=;RO@rk}*+d@i7XhR`VmosL-co0!sFCB70Vs4QRm$rCCD@0ewzwqB;@4eV zJkCg*Y|-YJ&tQuwwLP13&jz{ofaXoK3Y?8YvANQ!rYAPEo3X!Q04T_@uvprQlsXl1 z=2vgBRK*v6-VFP!<;DPIhk|t9ll9e>$QWKsoxs|G-xv9)N{9%NBW?hqf{dthe(J#U zD}vYpXsfh6rGrN@JHs2MSuuOb*sIE581;{9wBTz<_JnFS+9xuinYdEH0kQGS8 zu@#{HUrshA>|N2ysG>|?CcR-s=*gNTHqohhB@Xg~Rv(mhHC&qOxX zPpl*oH@U4aWp65v@dp= z1~K_~0nEyTk2NVarK4JmhFB{0K`^jl&FLvXWc3h4SP+jX>02--Yb?bpLE^(zpPCQJ zR9q;9V=f}%JyeY;>HNaMFSi8#MVDS*h>vex5Q^Dwnk*Sd?1XIFk-t4wB@Y zTI0>?t2e;A7bnNb%wtkJ9BD$raM&f1aT~7w5dqa{aGpU*L>)aE)SiMQWr*!;$?xpY zC+191la}N(oqC|CwLSe*>B#uP2w~|_H+J1S4W+sbMdXfE8}T-s1Ku-L>A0V_Qx}ht zgyg76@$|`TE7}?-Z7V^aJn5|ryWK@Lgdy_S)KDNy^1@dIZFaYa2}9Q$!~QWE7)T;Yi%=K`jV7@5@v-RVyBFqW zWS`*gA=eTTJ@$NQFA!m%3QRy?_{GJgbA@qud^A*KJpKy+S&kGXsoO{?CpIQFb_>NE z`4|^GY;@^363F`vM%Cz*crOw%qMHM6Y$r@i#FQkUpyr8|K*=P=Vz<&Xbu#zzzHQ;PwWCI(j;P{ zksfvvdlxm%C$v9P;-l$ba-MVz3;yR93e22iZupc8I?sOssMBi9Wg*XaYArtFZy(4P zKwvZrFxr}qp7SWYh!cc)!Z~t%NHTI~rcMj=Lj+M6i4YQD9c#psL4*Zx+yPyjyM~^~ zIj%npXf{(+juRFmKrQI4kcQ!_d31Ob=R|!he^Vdcd$Jrc5 zl+$D=f>v%j{lJgTO@S$MCF2mSQ2yhVAzyEMxNAQyIz5V|{#a2GKI))Mj$p0G1Sfw8Tw)VR=cDc(_jXaBnQ8P}C=&5P8mz_nPBaQ& z$}!ifG0@adZ-+IewL^aWbVtn2qzS@70^tQy?{wp+eQ!$orcWQ z{o=EJ>&|->1-T!%MBcKvq0?dzX7IADi$;1H(ExLahUc@7FkTEt!_jSPUxjc+c$ZtI zo?O%prT}lo75UN@D3mEHDTQ2;8$CHHylUy>eCQn~c>|HFSJ&spX6ARHi9_}%{juHf zqjz{TJCJeCVy`AG0G%@la~BNb@uk!*d+L#kmwKF`OC74U3}};Owzg`sq!-|*<`1V8 zkLxm43N@d;dRbJAK+rX_IMTKdC?3-QE=56c4~&O{I-2fpD3=l6DYVF#b<*WLpeJbM zSUX{LEZ=DZYg!O-Q=#c_&0&~Hup=(0nsi^&=asmN6RL@ctDdwJrwOxi991qb1Y@BE zUg^d|!tX|{O1`Es=-^ccPXvIAt3^^w;EmC3BPX~VuM^FGP$7K`Jx0)M zW7nL}Ot>drfFBC(jGS?S!e%;gq+s%d;kL?(WLw86#MoGv=!T@-3f4{ zuPhb*jrHM$qPBjUVuONG0zzMi9wf;zkY16C6Bj?$2Wc4p$CitmNb}e6Ev69S$DENp zA_9N0SKRrc%?Zt@I)vvX9zH<(z62Vth$sVo@K7D!FbH+x@NbK7dcY^NVEQd>Hd|`0 zTaqtNeMk^Uh`3=yG1Pfpg*AtHDb$m|7K-e^ga;_ua7@vvVKddU{v~u(0z%g-Kf6%L z$V+YJQ7o4R3~T=#3G3gqo)!*1O8~(~s?WWe`Oj4~wJ3ZBoW`XJ2?B^}WgpeG$Pg56 zZwHG&4ApN`r#__JVSO0av*$FatQ9CUg%JW}I}|7b-bkotOY1bDPDEC!T&uJbfs4p- z6?X_tP3jf_C@6xgK0*<+pt5yubP!}L6HpVhLmtpcrljSbn?X*UMcm7XpqLdN5HF1f z&Y6SNZvgb2#aU~b=jFMow{kX1TxAF`@sqKJdcDK;L4Hf6k2&dO^VYzH0eH>dMbG$0rr;+8_=n)1IP zKc{x7?^B5z^imzKDX|SF3q>QTcOM)A!a!f!BB*+mPEhr zT^JFs3#u!kM7GZ3b!i~jMuR&y;>7zT)`83vCk2}$3?Br>?wNwuQ4rGHkaCqT3J*T= zft}h*Q_V8bpzx!vZ%tQP(noR8E3=zWGz8L)QK)mS(q#-+3@5AEO8!Wg7RGcre5#&0 z!8__^E2%5vCNLz30ji77QE#++`G+=RbqB~XjKHCvrK^PfL%*|z;;VaL87W2PB?vtM6tSq|G`8y#hE@AEg={IjlcvXXXK#a5S)wi3y;EkX2xW6 zCEL^s@f^1QxLrT00K@l)w6XU<8^HyCbB9FXm*Ys<&!Bh8wj0IcvmL=u6< z@v4};Z8SS(CuYa-9%0jtiVF#v(pEU>nd4=s(q3}HkLGZ(Qmg7bYaZh_i*V(dHIhJ1 zzG7E6ux+=^xB&!!WXo*oK3M=g(W!;URM0Za6k4{pL!rF%X5WN!I z1&-XhV>g2G5Q6BD%JjpVR@hA@=C+4i*MPo$Xxz#0Lk%j(WQR7v1fQYl4Y=WrZtoWXk@eNWI9Y=?CBvI2}@7N`;g7rBCu0( z4e6)BuOklJcjdYWqEhV}nK4u)_6~iyDl7X>7KiC2WOxGyls%w&{b!uvwUXj5(3Hlf z$pNA_p-id+$-)tsXvROef`{ZTbr2FqIW;z5-l0vqW6IChVspovGLt+f`_O0hK5xZq zvM6Hbh?!)^2scZ}Mc23u?WPI1ax)qc@K>@@E~gQRC|(e;8P&{F5Modr z;qHTmXZP=X3?K87NBA$1f;u|kulj0DM8}>E1S`l-oqdc&e~hbf{6ENbHe<|)=kK|U zfC=*lACeKq8I~=(KgQgioC1VG_bX zDe)X%I<|ybRlOa(&)GBGNCAM>*S{TB)iXm3fO_ zeG*qB{FK;yz;_Ed?8{0fcZ4Jds0Of_YbEY!M9Bdvl?1qC)P`Lq0%D$eSQ9KfVk4YN z#&yRDk*hh3S1_VAL;+vPUm5DFfJd8Ld5@pS3WTxGczl4?#PLQ$SnI-WTA_WLk0o~2 zJ7US9pacV8ndU8d1}g@T2GxM!H68>+k$bODH6kTUkJ%toyUOJ^J9jK%&~F{vwOag6 zbEaGDHCo5e4W`TD!zt$AQ)rC8PSXCL2X1q46~|E>yPWjnC%o?X@NA{%Sz#yV^S7js z{v`Gy_1b}QB@NKw069R$zn=L1ps7ZbpBAbA4v2(2YEig_A^|K;(ua6&A*KH!Fa=tJ z;|S#EfX9LahIn%iIBm?U#Cm5mL&Z-<{ued*6NcCZw-u`x(;3~Nl%A1iix=0jE zd73l6@=nrUO|0uk>q4;sg#e;mAKU)~;$Y7gYLGiD<+%~=lL9w2i@j0DSpxWgZ(PBR zQt0_c+o^yVwd^7xuujEOcIcJk5TN|86gOwy*N9jd5H;^IhhOMZwr0Sq@%VOG^Wxfx zBWs(Am^0TvDb^{K_qJfI`UWcsR2?Fvl4&`rcTPY9Hz8=DK6hHM8!xAvA~l?6CDyiaA`I^@-!pEx;qw zL6g{8E*r}3V33uN;F*CnQeI&rGK;+)k!xcNrYPt?x%f1sSC5vH(t0 zB?h(j#bNYW?l?BS*2Sn~fF*^jA12`QPqcvVb6!Cuw&J+FY>LPS!8k;E=QAzBKf$&^!hW4+%RVUT$`J&^e z#w#w#8}n+YoYB=dZ;~3bP#Vw;4GP^l{lz@98qk30;CJE+>NRaiJ(hK}xsJt%Sf1o&o7`QIqU$9*gk6%%E?ePT)cW zEEyv1L1L6tXb2oRRBDmvxR$vcl0W6|82$r|RPi&zX6&xN-*$t&SXbE1fX9l2cY-0q z4sy4Rc6zF~G!sR_aOuOnMxhEvTq5by$9KEBIAq9Dx-;JBj=PX>9W}lt(~0 z?iLIuKuFfYEQW3O!uSG6x@;-li9(Kw7}+W+*A-6|R)A)~oQX<0LI8&3W-UD z8O}|Ee301(;hu{qpS|r4i0sTNWWS(aZ_DW9PHDLzc|NOB$=2OQpw-@p6faBi%PM$+ z0Q1$Dd6ET?85$={ItxHb6Et}sVl#GTDF`#G$_6K1NR?`9kJ=Lzsf!U0P%?sV=JnwywLy&` zu;N1I+2P8uAoa1Lj_HS%1a_~WMSbBn!C)Riv@ATC&E2*86r6Vr!ED^6Gusp0; zWZftF(&NYtoa?(}l^p;ODw%;kPSgVwgI7R5a z(H(>by-_*7$BKnsnWbzcj6}+i80fDBMJ3DWu5Jq$CJdA4lW!H{1S#YKXBgUn9g(1# zvxk$Rjf$GVYmd*YVZ2)!1|`u+qzOo#sp*y?HCiIPV32attLVj=Ks+>@Kfc^Rx+_uU z>rzTa!EEE8syjZ%$ahNiOU(@1E?WJ0ttCl`qOeXbUW38+x?Ut{VW=4X4imkWs_DQn zpO{NdoI)u3YqiYRtR)_r?r;9;W0VQ1HF|sgSwSsP@i?m!HRI>9SOG2^Cka=nlddCv ztIAf!>){Ne4XaUQyp+|PlpvncnoxT<9n>>~fyCsvFA;?4GhqceavR2$kAw2^h+!GS zA{|_pNRU?`vD$p=P5CMT!tk`3Pr(oay_GZZy{r(0VyU{^c)OutQq(|tV2P|O8QW^! zPjYVJcbB3Zqh~<9o2e zWitTxAk5aYBB2aNPTGz-8c_9MbM&HRO4#Ce?k$lz(doXe? zxRWO_QWId{3rdZ+1s+Vm!ZA$ml5)Nv19(TLcdc>c_N5B*lF4Qz>lHZDsU<7)9kQOU zlf+YpYR3RoFY5OOB!wosly!j#3%UA;1vWdH`1Q}<7x}V$C-_v4{T+UkeZKmg;^_aUI1Y2!Wva7 zls&Rzo;N{)x`AR%YLw^(5|CY)FY9BoBg;-j=;NEi1{S@C#L#<15_qJOd5nUp{fp1V z_=R`^RS-@vRuXb(-cu%TrBxh>#~dF)^7g&lzp*QTMvM+qtg6z#j~N&x8i6-6%t?{q{rI_pz{GpF8m5N zcY#$Ey8j*2+^!J3z$pbP&bY$okMXFpW2TvE`7nf>?$nRIP%P`s zPP;OJ;Ou}DZSh5#RT1Po{XROY@(pc~hL~pNp4r4y1^FSrNJA|`42u95g-Nr?q3Q^tb_BmhOB3JwISDifdyq;ke)bUa&m7;j^sX+SHYX1E>5 zGKvO6qG*6m%qSJScQ7)>*bsdE=>b?vfCwRtC-Cw-t1fr~Ucm$S2Y<{A_!A&dB#BKN zJTzn_kN-xpfUr%(#`$o-#vZWE>QJ}wDf{?)B>@ANiyq-#rADl)N`zA7wU1hX$7~vU z3kFqyTg<-UX zQ6y#J7slLVo;9>E|qqdv29j9(H=jg<;mC z;<2UUqb-d3ZiZa8IT5PHNF>%lnrS^lGvtGwF(n`(OiVuqyE9+ z!UZHh4qfTgzPHjrWZt;vmfU-AGr9FR;aH7W4b7hR0Y=x`@MVXAZk7%W-7E8Gl@mL& zm(}~KpFgG>jioy_mVJ=MW8-IB8|_}(57X5>OfP<-_~OY%Sg2c^AYZgwG}00h4Jafe zBrZfKBzkm=Y5E>gQ>C$*^O%P`<*7{FBQ#}~DyhOUcWz=Okt?O8j2us)2q;EM(S#+D zirn=Wx5Vt{_P13pj3P%ELwYUNj1kfrnA%k4{?b7}CasP7JVee!cdOw+yoF%+_$>^ZTNq^{voo6vm66JPb#*nSipqC= zb@ko8@`-J#tIt2GIf<|2lTY^C&An!>#N*~XWI0}sGJX1dRx+$-HO*Ove$nSyF({}= z+iy4ad61XN73HWm4j^prv|gJxs$FMz=*>iKRa#8o4UnSk0@9 z2!MClmo(U`9tph*QUzTqjlVOK@dVi=iVR zh=7DbEMQ1OVF)EfxG*A|1;^qnGy#Ic28s+65)wX;AR$4(016H=zyNU}0)q+)KwtpD zfWpCp3mFzrG*qb2zySx$4apf90RR9o0001Bf@VUpToE(lUS?cfnc<4rj4GRr8x<)D z6lg$EJkCf38u)9tFwX3dS{Ow(y5o;W@u)M|Il{tSFMn|*1&0S*z4K`__gUytrOPjE*@I3Yf~A(26pbZj$xB}1 z?c_8$N*6;!^Zuri`PEb+>Z>1D`LfLs%j<|U3xEx;2n51_Fdz&FgaSc87!V8vVH^m9I1R@EKr|G7#m7KR%1A9N zDq}b;7$pPCkatM(K|w8*-v-8PLp02729Cd%4-yv8glR8~Xo&`zaI_BG>Hk&Ls2N>! z5*rde>`>D|nQ4`j%xR~E{qIp$ta_q|`|OYs@$2aWvI`dQQ%b3&PX-Z`QtE=_z&GzU z3dFUCPCrWJ>st_H;KF+#;X@}E#LLJgtL`_V5fh}kjK1e5gc{>D0LwYjhm;C>I=tqR z#G;fHKmt1ARW57LNJ`8MVWCRB#@m>RQKWtBGMWbfKY`L8Os{>X6T+QH+8@*NHPS0k zHDHd|4DNwPPGB7fk;+7h*w{)z@8C%y;hA%oPe?dECNIOz6jn5c(dpQz0>6o*C$0SeRfhp7aUF*_|4v<|iQ5-eo6o zL8z2Ap%P9{xN&C|sBVfEb@~EMLr%hatlDvm^MW=E!cOD=0@Qy>7)eS2O>ekKdXzHt zxjXCHkSB*=TbHL*t*}X>cxtDnjei>5T3rkjP*f5zh+CToeyy;xRU0!Z-3B% zf+ZJN$y)K4R~(sfi1WhNrY0qL+yU$drY@@zj0lne)sL*sE_KIuWy?A@sRsBSZ2H+R zHg7?Qe}vR94Vo;MdxvEe$*=ZvF28-ONLb^{+IX||0*~>+$EofR}R!7wj; z&SZbv&?|{TvS;0l6^8?wwTZN+mVs$%HjKFCL%ey>HD4Ao9J@Cycr9g+<6H(pQg!Dn z#njV-nONoM=G#QK9`dzx|EmbTlrI@@-0|ClmO(*A3{mea)~{1+Omp0r#kl zM;5ARk8&0&1=EZvW@@BFN|2VoC;tSwq>I+um}9UVD!8uAlrI^83IT}qU<1>Yr`RW` z9g@V}TdKQ%N~uBA6FW%V#}$`%SoYlgRmQILBkoc6VDZS0F!!B0)qO`EQSpE+C}skC zPziun$o#EQx29k5BVFyl2b%wmjvnA7y{D~=KxHAyF`AvhfK&tfQ77lEQ(cNEr65Tc zeo;3IVRWH~A?ueiT>xNnllggmL;LbegBE{>6yvTJbsqU4Di-bBd|1RAuRfEPK)s9j_l;#B>{C@?}}7J zf%lDllfw1Et*~qY5A*XJaH{H=>4Z)-_ z0HBk)_cgy=u@y<^Or~6Puj=X@iQb?hJ7?~~W!@BsPHc5A*3=p6fAO`g(&fZ%bk%wg z-=?F)`Xmgo6%m*V>|wjUW3>3Pca6iIdbBOV99f>rkSw+iuN8J8nZ9rWq~|vh4IpwQ zVWrk)1RD;YS~?C7a{nOEs?hLQ(;|S^wi48bW?ODho*Z+t%vkmY@u)c>LXqkeuLnw+ zlqR~(*_mMe5pfc&baN!TmeJt}X&M|(zy`b}nM5M-L7+OJHNu%kFELjTSV5zS+lwS2 z0!ft-5L6;&%*_>tW%>RJhOBTLdhtT06SijFR!vX_mXfac#?hFJxUJJ4*f7jssJJ<- z51$7I2{MgZmAhQS%mF9-;kcFikqTL8(<3DA5H%t=9gXU(&INhi%{y zAslau;L6dy8w0x38@SiNf-aStAzr5?H8?`}#S0RxmH7nf3-0Q{%o&`#eIG<;JwGQp zILt2MhLOqW6_77#Kb`~TR~<1tTd}c`8Q$n!gdJVCR%v0#3u!{Hsm;K zO7&n;o~>mv)H)~$HV%`6GU$zHT+HxqXMj6kziPCdh3+EraXKqkh`?TB=hGkTG^T6zP6mnA7~B1V3l`pVXZ`F1dr+_L(!hY?cl?pe_gy8t45`0L(f2QEU%#hU)Rq8FA zRqN(r3!qK?ehWsW?*Pn@BTguIN4gRI{BHf0cFquU=(j&jgnJr>C|1pV(uF`z0YSH6 zat?*XP?AVybdJVek%(y3Z4K^qUEn$aXeJD~Jyk&#(LkwC)k6999U3m;LLUge0OnVj zviwP_yZ)pO_dt+@QMLe2?*&Jc&VkYZ%v3MRfw@iwG>NEoUbe)b`&UW%D6bdENw2)O zGn>8ciNXOK0qy5efc+uDb!f`4Moqv&#H_%nJ|pD`9#U5lsUzpQiQrvX2hUj~SMAk2 z?kc4Rw|3lchuoT1gxvM1An8lsKv_b*55}n@xHUMTL@$HBLq|)dY9Y024>*}ce*~_< ze;|Lz4LO*!URfV9AV65yk6 z3>kU#b=weJihujYjZJ`gVUucK=7GSkcHP|d(>f(<5m;)u&K;?80NY8ufRZ1s->+ID zzpA1dz^_!9vbin%|5v#{e6UhE=cW{3Oih0sCu-VU$)@N!jwX~XhY^GRWqSz~(?_#i zoGk}8LMDLAB~LPg2)VD?U;~C54Q_j53?!n~ya_SYn{z|1`PGDPWei_!p<lK!sUKns5X9qc{Teht0}v@%XW-9n9o#C;-RRIoir{ z;w}BztviE}o3THyYQ8=s6M9_zA2(RU&|Uy=R;9rDfWzMkn~nEVVWp?>rpY8~AhMF6 zTK9o%O-hC2D=)u~q)jOOWixFa6={FEF(O6>sxj!Y#eUo`)b%MCWS-cPDFon zJ~sflN(>@{l3CW=TT2-mVD3Qyy|a2rtfJP&jju-^BLmijwgi45fg!li$oOGP$!4C< zu#o!XvW0wvdutVFiwWG+dZ2MaY!0Fp3SSvZx@MD5jOeVi*?(Mi2Jtb{5JK-L;DayA z0!NvDtBhbNe-yVI;0snh4$IhBO_PPGuFkpbQ}&=fgQH(FjYxmR5aX=GI;GPYWNFCplE)EY%{XBdBBvD%7U|av zR6i21A}q`TC3t37{qW(PKn7s>uOCJ(yQfd^%w?9@`netto6?wdK-dh(^g|-s91wR2 z7S9NBXr?m`gCK*W5Egvz3XKTzlggJs!z0Rjl&=B1D%vJ#y!wYr zQ=V-{KLiH8{}&4oK4{@d(Muc96C6{YAJ ztybYQ_NejL637cG~*?K~%Yu zaYVBuTos!Jmfu)ADH0(DV^3q8ql&lD`HR<56|;jRiMyW&iH+n=&-dTO!|Q_tK~(=9 zexwbRnE8{U@UdrHMHV3s^QGX1fG}Kb5{xx)Cq7}B&^{+cLJ1_mGyRXxQokNUo6AmGmVt?N)3Nv<+*LJ;X{<%VAHoz$3+s zd8(!GRUGP=qZAy>A9=Irt`pwZgh;Ds8-chI)>e%?LgesVZjceD7Q8Jh&euEl8cq)j z0p2lg#q)`4rdri-DE(Q|FFE5VA++eOqHd?p+i7e(v1AFa-rI~ClT<|n|)#6KKHH;8g^xFEoU5PS5Ap`Hj%<(Ls>{xD4sCT^x=^E z!mFW#S(`I}S_*uia5$&vRO~fQ80qsAeB9W0-hoeeXv3*nQY9M7qC1bTyNTONf zVF1YN_K8`;My-n@-9lwHj1fon=fT%9?57k9j!mj(Fv@w=L)qjB#eE~yPV$eUBF(4y zjv95P>T7pzsy;xj=}->y;@St-n;%shNQ2dbnTaX!wyUQr3Bf%;9 zefvdBYB|Ls1zlvL6XunV=j=j&d?yPopwU5lI!1!xN~43;rO&=}I2`yGMK^GVKcmKJ za!<2hVJirbRoY@9Ig!|uPA5WyDcpZz&S@NV0l^#T290WRQ@WQ6RH{xG;$d>a7(v=n zQyD-pGfL7Yf>%3`D*Q7+Gu5V+T(}J=?{&4)sWt z5{INtmy2Ks+giuZj#PWe)x2fL2<0ubsdjb(SjrAG#upRc{837fVy>CBwEPiUmZr4s zvt=HM?t3w8yK^pRIjm;Zs63c@1c8buYm0i)lOSNIOW?;UsOP&rLE0d2-!$OhEFav( zaC>8^&y67z7OV=L|1kPgv zAa(!;pq0c*3MPlNVeO>uh0;JS0Twgp4?7HxGx@~DlcR@p0@l8)SKu!t2v7><_~5OM zol$PQH`au;8#0W@5WF@W;PmJL45*+q;*npeS|tbmcrd9E=mlGlL4gUBx)_{B8rtNl z<1wO>5Rf9f>B1nVOE62C4ZITo_svKvImi5gTgeB4N=!_rPukutq3jpyg zH&vpX!^n#tW>9&Nh*dAxTMrlZ>aWc$k%EQ(q{NB$Im}4{=p?k4BodRGM35E6jHIh8 z-#4-j7JCcd3W++BA^__NEDG%3u8OZL?PIpl5Q5chVb3o;Rhif$48kSN6tk)d)EXdZ z0#FL@+7($ReCHxLLw72lF((?^AVg6b2Sd@jvFqI<1x}Xa+B((k75Be*=Mp;+$Qd6d zYho^*O8?gRmnHAZYo>B?G2ii@yITwinWU>7Hlqh^$wsN7BWSO=0^)inbsnT0}@@rENq}H zE_Z)AyE*m2_f@x(kNgpfG;y&lmFeQ!0%$4{-P$L|QWrU*yk@9i^r?JDM5;A)9(>>n z8k`emMdwxoK^iBkVj0stk<5YSPO*~uz8RAVvs@WgX{8JBUV4wZfcleZHOz^|NoaSW zF^5#(nR?I!@ddGVt>Lz4FoTiN37_#g3`xi#TH395Ub$-vCV$0s@XbPshux zW8@ii%@8Jg`Y415W!lx@6F)BK>JmFPcg{$w+xxVD20uN zC(0!OP;dE5UH5&;R4$TCxDXz0p*YgR#VUg7 z9NSPLTkemJxg##%b#C>3jwpE&8LI_WsvgF#*{7lZ8FBE?5PbLGX z?59kj0H;ykpK4*OZ9*Tmw&yoqKV)+TcNDD(dgE4xabRYUfm2mAo_?1VyA(1l$Cff1LEXnQC?+IzN3N4lRJDnzqS_8(@2L zSvNkuNx;?GJ@AmFM*dUJej-8dywvZ*yB*93xkOefg4_7!izgye5&ojvM>GHPYfX1% zEa6QBD2P>&SkeLr6mv2?hXs`8p^b-cGU#{tFwQ1y!lSkB7^#*SCY{Q{QU~s_u&>_Q zNE3G3Mws;CQ8XcHAsWMQBmc8ewFCb;}O%1y=2}5hr8ZxSa%iKDxjIy zGQB+#_{wBdrcF-n-m2{zBSc{*$+Y`U{s7<`=HF@h@BsSoo?N!eC=xEnB2=wL$bl~I z_>Kh!6QOAqQ-^>&v0p&?`Ror~_;YDM~{RP*<5qbY5g?pn_(2~-y_eFxiStA&ZIq|Dsbl|wGnnrTg7h3nSQ=~uhT$9N ziGZjP&ABS!0$#whp|(gP=?apuEJGAHx;?bNGG{4jDi396W{A~Le=G&i@x;@>X3*No z7MwGuA=~CQw+UO2>x>K!V8@3$5loHoG=K}3P#k_ddV64~Zh76SAY3MHb*$$U`8b_u zVt$Qc9xz14U0s|V@JA|(I4D{UzAkT1(ho$_yv5{)!LYIS%6ddVRjw$aF&Sr?|E>G_W^9v`Mm#P^tV8O>KM2#0qA89UnQ;7}5tkI-E=V%!Dv>x2TD;GgxxhK`oc|LRu}(sMQ-6eI9u zcV_TT_a;-8kv_d-)jg{HZH_g3Gj^OWTH+(=2aA9lo*Z!+XZ=kXPj~1wNm3o(oM6;``TQnl^)~*bq$Bsvzy!USgRCj@h8ovf21gt1V;JnSvK`SE1nJ8UPcR0xEe~eKCF>n zgVBnB7QWOG2oqv4OK$P%=>o}MGxF{My$rwxa=C7n0WJIOE)h7(UN`>Pxzh~j<$U(l z5x`~|_sl8%P;!PdotDxyX>fgX_JZiKzPBh10}Mm`2K~xn2`uXfpa3K6p1RPkLLMvW zKJB9YzRSXBhe0)L^DX=# zc2PCA=wlS@lLa8tz!eVhEXrMs^4>$mUfd^`75)B0eBXO8C7A7P#4hRaw zMB;*DF`@EkENCVYheH{FK{6o`U?D+5g8>N~EFdP224c}fM9@$sC`>9)m`p^Ph)M*D z$;1KzVgLvT0U*ty(V#k4dT!OZF%i`DHJv#%`U4jx6(+whuXBcs@BC_X*B#m}s@mSU z*kCANS){mZDi<84rmCuFQrkM4i$uBQEz4QX!^=C#lBsy8m^c&7=wo+s5fyE5R@U*+ zHuL=I6sl-;XLe-D&r~?iu9MEbcdjm&1#5=);O&3 z8?++=f!yxMwUe)1IW1rLIet>YdpqeCdr`TlUK` zO|83KM>gA))oAn?&%Cblq0{%>jYoBM*%vD0I#!1E{rs=}XM3o?5G`-L);+W!ll%&mcT-K!Gk9 z0|XErIB2L~GQpTop`jqrGz|m`i38(;garcxg#uzy!H~fsGO=*LXUBG}{+U|cem_h* zshw@wPt%CWv@>7+=X+OaX6MW@i+-hxe`vQIjgC}bm?Wo}-W8hOouQ%W_0K~XFc^#h zCKi;6LP9L%?ty;Ea&ou2Gllo4unDSthf@;Via@DmXY`&_pSZyW@{0CeA_Gww2 zGYTx@){pF8Ku{zO3vsXJk*5$G8 zj&=Xp;csn7WYnvw($@7#x1I?A05f*Z7DksZzgL~nkLpMNuXpO}-L1d=^80z{zMXu3 zVF(wd@ZmPCU))+++toC!_g@&prPM8|*SYT&Ca-%&uh>1JV-`Jk`TsMzu3x=^es$Y* zVf_2KolInm<}+H}{d)PtGlf5sddj3NjMce4<$Ymt@o0M|9^P+}!qimNq(5~w7m0Gq zTb2_q@0|Sqm*e3r%x`u!yF#-gQ;smtu9x3$`NBN!SEc$N#9El#-L>tO9g&%JZ@-ss`R}AfEe&nmOgQe^SIf6q zWX%MhQjvAbROzbLoJCibL}!(IHLicn?Nt8@gA4Qf^3-Gg&opBGF3hvFB@zdT!^w`Z zPKR}PS%*)|-?6-Ir7odm9_JVHTI#%2TBXDRV$pzDNE{lK1_BF+n$OEr5m7M}H4!sU zUNdns6E$tA3Q#H?vQ_tA10Q)~#L>`0=nmV)Xd(3nHZbDmkhamLnuiUkH3z*>$>>5nw;ex!O*y*(qPKwR26C*JNcl^*R>hg1QIG`u57Ds5?K001BW2><}} ziP0hv!7mRL>>aXghrW2&2h0955?@;p@I))aMxy8nD#JWmc0WNi?*Ri{sn%AHL}fq{ zA+3d|@JuM@uAPW~yTyi>ZP&wtb&~4Czu9c?R%wa%s z{LzTikoY3$y6PlgtTx`cs;a{>;5x~i=1mqxsy5sm4bwj3v;a8?-|NLEs3aa_AxM|s zO|v9w(`+ID1((DtK4&?c6-INrubny2mSAIn_(+t0Vi!~5BwH+E-E?@%2qdi#k;M8K z28aS@ps^YxiItk&4RWp|@u34bGvm=ghYTv#6eKTy#j25~ZVQVE0o0(H?r9Y40S1B< zNOWcbt7XD*CQApSnE;B~o;FgdHk0_AK+OLke_YUt&bdkS%aKASkql&^CG}tlT>ddk zt>*IsM7+6S$S?tVwYo@&&D!JbT>=~?K+m=sDv{_Y`E6b=Wl$BMK)X5-=hhU;B`ZgoWQo-u9pe`kGcm{jNPDzInY(-_k#E)#>4ZMmsdo-zDVt;93p2_d z1oan#vMak~W5Id_?P5~i!l~lBhdzr-#*YiomXH3*ZGody>*yS9qCo`u}qM7=nBmchrfK_9qWB1&b~Ctu{v0a0_l zlzEm4LdEi~e}W4q;a+=*Y^`xM%F#KOz*jNW_BeU}C89@qu2syyL|NKl4*4LMNV=Rs zg1^?liiNs`l7C{NJfZyHQ1|9xH0;_e;UDy9HD-{a(g6C4bbvJki^9U!RQCsx5CL>aZ4 zdyf#Igfy`=saJq6P5g{&kIjU5r6%6w52URoYM@nuo}fpbvxzt8lNzP&?>6!1zG-g0 zq~lQ7Q`5svO0-zG*haEAQK)29oOK}&mJ=hv0m9cyHao(NJSPS@VEE|7Z5w^UV5bwE z*Zsl0fa=8ft`mJe0EN3k42(u_LC3XBauIVxI`a`S58H_+C@Und=I*UK5&OF4;`@wD zhJONrY7hsiqSQZ8g8dT{Enyp?{0B06A}bI8HSm!t!6Aev?h=V;H6pGD0l)}c-D3qo zkUC2?{f~GMJ?Oy(XJRTKG5)K2sv%;Y0`)4TB(oS2+37>vEt{~hEDuc!C>}B%phcHQ zsUeT?{-(!ipcpSAC$a=Z;?jZQr=SP|gJPqW9$aY3>~Q}TjqZ>B^RMFJ+*3<~n&*bKF2ny}KXQG98zS#dRK1l_$=2Oeh+RfT zj4L-lG1Or!w>m4YZhf58=2>|@G^Ki}g(@Jeq#v#j*GgYiU1@Yp{&`xg34zsqoIp)h zD^IvjEdA;{R@lnnH5sbwU+jf`VtA?^Hb#Og*slxyiI5fwN%Q^_Nn|g;It!o}8w7+v zFIgZC(GgG#aI1U4C>z1K9${_tG%$s~xQ)d<9ww zjRoKe5?STag=Qh=Ft6o}cTa?Y*770HS)>^ACEv>@Zd?~7BxaPls!wEg&kQEU&vjx{ zNBD^hiAD-OW}vgQF~^d}H3NUVff$(B+ExXvxI_+M5cVWve}Sod>muw-kTjcA5i4eh zJ0epp#IB-MMw99eNZoe0jg_K`+F`y5P;1l<;96MR6NQCOd^`0oM#l<`_`YQ=n43OP zG7RB;@ekc6);1H|*az8T;sZyqL?b-*W%$Qej_^-V1ADqB$`7CTjs8McVh3C16C*pa z=LN5veIh56yzwBQfmB%H70&}1%tJ6Vy@pue(6%Sa_n!FcFb4m2Dh@OpK2bUq^z5o! ztWbU8Pm&A_@l`a7Cj09ls)n+nXA*QFd!m2Lccpw@#>!kxmX;NH`a~s~E6s7oJheGQ zBJUu!2F&^UOkRl}evZvlMmFt;<17q{K0x9q=V zwW?s2TZSi2U@L_z>ukiN%@d=P2=Pc6i_X{+kC`M7-VclHHzYkOc zXb@xBOkIS%f(H_HpqgECKqMkxdr4x10O=qc*JomWPQGc>Ynd2CdS4*wB$rsK_J6^@R)AJoOt`Sf5M)( z^qlw}G)YOEPRxw!a&flb#7;c$>D3$5W)l$}?-aFEqQ2|}&Mbb)Y;W*TN!p=Tn;Tm{MqYmW{2kzyE zYryp3l#`dozyR=jzY;Ew(Oe){Tjf2$^(FvnK$X8=!HPC`>{C3iVW5~JDh1jmJdr0} z2^l3Eu#3Q{l8vDi$ZJAE?SI+;X&Y$Y*4TRG*yGn=RkYry+Pt1OF``BIie=H$ZiG4- z#3si04Vl1vg$yU1v9&S-GZ8_9wZVYuvLofk@Mo60L3qv9u`CRE>WKmnUuZAHIz z_3&S+$;OgWm>bc=VME|@1=JXR2I-3R&N<>f<4}ljrDTaQVL)^&b5osFU~+_ziR-X02>6LMc#%KmBh94kFAod zn8BJfHAW29Wb8tEq`IGa>G+b*qZ-WI47cLBiUshna0u;Ro;W^dP*y+I{Kt1qL zCroZzk);6d>AE(bfa!1&YGTZg3{HTs*5HUsr!RMz{)MjGv>XIuqRJbwv4@B*7x?<<<6kZfy) zFd+98F~@~*J||lkiORTYk1}lsgadn|AC;Z-_s_8${wkdto1^(_BmL+ApNwG~QdAk* z5QPh(U?||16|uSnQ65%2-c|zffHmL#GQISm=07gQd_!9KIK_#ZZ{uga>Bjh%ibSn} zE90`&XQoeSLk@0vBn@i5Mj8nFi#MkEz`?ai#M^}ek>8yHr|Gf%Cs^N1uUsbqBO}zE zx0+0x0b>=*p2GO_1RNTW$%7zK@rut!LB7J2xopm(j#>Dj%feJ;B1^F6^MH?sHs#L* z;2M#vPPRfDGJses;MsK~+fW+;$Q5CJu&@W@8Jr5@ z)40p8gU!EA@xjG&EW!NQ2(jY8HeUPJZyF-UM}ELm*dPWJhc7W-g{bgs&vSy$-Mi2WvQX5? zQZ@(-I@r3)>!^X)`dsDsJJg4FgP<%35JU{Pfpj&qU@_l>c<`ljO3@(s(h?MAAgL=kNj^n`F+DoZRJ?G(@Ba;n81n-!!E5ZL=qu0>(((TQmUH8GjQ58$ z7UR}Yr-3vmU_y`>9TJNP0S%@JfP@lh=NR=#nfF_@_#H={q zVjh<;>z0~;wc(t`5AfPltm**E54v}p&|hnSt03mH9|C7Xa|U6 zjs6Yo3nYM8kCwV_;K5oft2uIVl$I=?U7@c{kV4XC$F%ND_b`3PM#?1r0phUP??I}; zl^X4ga?8L|?yxQoyC-f7p$OX?Y4yc@EE+ZIL|By=zeh&Cfd9eePtJ0>IiV#)L^&(K zFQ0niDyOom4x=L>a8!RUTeSr0!$d@4o^Kr`%|vP#?-Gh4p*0W_kmA>C9l(N77Use8 zx-(TwaY{S+1l4xfK=#jm8%kyX=$!V?=)sBSV*LYM(Z0q2WCa)&a0>Q@O?!N{mSZ%N z!7Z>Z%mQnvnu4_co(=v=L|CkTrE|kM3T;ybU9|av7=Z>(yon!+DZ>9Do%)wLS~xeX zBdZVMSls=yUd8&rr{^2?f-T?`?M466I>S^5T7X6#1N76x4FifAnN01waxoHCMH9er zBiWQnDjziY0r$wCq%Z~o2{30u1R{;v4z4ZR~Mw2>0C`gUl>Qi~HK#U`Jd<;U| zv&9!VziCpT`s8H2u~vZLD`gdDzE3f(hTI}CQrG05c(04&bbFSNZE8_T=+#MRpY<*g zD?PA4_HO433b}BK+77p#D!j+f=5$9b02}R)-7KRRG@zQ=OWDGR81oIK&Blzi(?by_CrdEOSfGTKe(-g z2br7haTMo2c!)FCk*PTxkoclz^vOq-HyPiaz*t!afd(tC_ZnCk>>|My{m{=2q~Z&a zt5|R#1L2)_t7f%;5RQ@vP;vItn)DIOGB<^w$BtLW)q0G8t%FYUV%~+}PNQ97jfoJj-yYMp;N`F+9-2p!n3V8?9J3H_?^u#m4xZ2!0D z#+{4+QpueJ`izWMPrPw*Bee$PsJk!G=YUzHH$|1Ou8`UaI+l?sdsGv4GS zr(4>Dy%(^B(QP{_8C4dr60YtjoFA?hu0<#J+EV-r(pTX2n8nPcZmsPBr_x+JjLeB1OTRZd;^9g zNCwx!ywlRtx!l)`{>VQtL_*P1Et{uC)SmH(>gro(rhr=!-3p~}3{Eu0z!ed1j@bnX zpN;j*t51r}8DDO=_p`8@%FqyWBa;x5eY0b*ZjrKY%dJh?nJUpdqZdsSb|f?yL%H9STh;5M;Y$3923F4eb+5PTo*aaP}JtH8P9DVs_!_mS`T9P1? zCe>X+br2dxC$u4&lD@ZA6EJ!+kDGX9tI$5XF60f>PF$Zg2xoO<*nin-B#DWHCOs@WBXGwp4^?)2kv#}LDf@aZs45YD43R?g zph@8+V2r%1t_-nS-Dvf-pN7p&bT)j&yh=sny-x6!EraLbl1*wE)Q|xaeD~$V-pqa$ zkv_%0rnv`0;EkuzmBeXG2~~}TaSjR-?2)% zEOIS}3_MwnukfrvGOt}H8`I?irdK$4JEwKjbpOh2N$*xHvNLc?Or??_bqD+_v^KDf zNp+&8+ONuQ0l*h~4V?;{2H)C95P*7cIHT0FvOp)WoZLkFD8voZ#Q?w{(hgzLsf~62YcF#&KFaUg6Lk@d*3^jy2Sf1eKcLhC74!#2fqn$@NpX!1lY|jxwf|xM z@)5RAdeqt9=F{kOe3Xoip?*jh0{2nm4eyS?*^*kPaxJs1BtgwB8_*@q$He0L&vmep z`brr)%b5f&6bivJV$SL!JnUiZ!3?9{Dq2e_=QD#Ys5yIQ@N#?f5HC(aHFf;*^lXAX ze5nNUshjN0EI=9&c9g+JCp4MHe&aW{E14Q0d3zf($*p9ko%jM3aOJ2>K`R{O*tpbdS; z{Nphj=H3kmh`AQPVC6I-z(3Ds0T5id%B77G%<2m5?QD!Lue>&hyQgsH(G*Fg92;y= zCYH>9D*d?Fl>?ya!6!lSiaEJ~gUVf?7vo2S%AIm;rxdjgdNW2=ywNHfisk3Sxlz3_ zy~`W7Ms6*x9Pn#cOqN3234GoTe}e{;e_!UiaAy~4Ep=LOV3I}h9^i<6~8`4c%%yrl-4ktAIuJVVgNxWxHNg{1BCNy9s6w?b^UxOiq1 zw_=Z%TS`7|4La{^SE$KTHW4JnFa^CfljjtNtzS4gd z)Ge|FzV6Ji)j3Js6Fk_(uUE^iNKb%deHxV*i3IgpSpy=|L^z*&0KWG{c!#H}YXx_; zmUwUqgkBz&u1Q963c%$BvTD-$2+z$C3RM}^Jc^8I#I2Ln$hMe^qlXFcioQnwIb}^c zTWSKpApH(Y_(*A_x#G{wj4)e>EgA6(iSovjR6Q=hxC1CPMi!28kY_dm!x*p!8^SsT zQ{}Kb$aQTZ2lyz7V#RklaWIv<82jjmU}40Jh~Y7T&E2l1WDQMIi)zoDUsro-D&FXzYiWUV+o#7 z*Pt>J*vac&`2F85Ws9;8qEdsfRH1SUopL&om%Nsns6HFg;&(T43!c>3Wo%Wc2Ge=7 z&tI3*f2RMYzdUub`$71^<9)o3chNoAV*6eILChez7!s<^bG>`tH>r`QMKh*7nLaIj zI`nB-q8=Sd<;-mNs4Me5eefWLFZm#Z|Eu#7`Vj^b#V~V|*Qm%ph^Y^P^*XScly!<# zE)%Pp7{|x#XXT&GsH?wT^m?7E`uiYwi!{ga**|um2IWbo-{UTai+UNrr60*h|ZRTd&`Ocg4V2=lJw$ZNHF4?Zj9^ZCtyQXHx z*E&^IRb*sjWVf>`E2BVy87LI>~07-{@PPWuvP zX0C>+hD@TFD7-4uBiV|PZH*rkZ1{>jp+Jj7=AcO}eR7jBl3NRdNsPv#$9RreC`qR# zGscP~$BXReC+EgZwh1lygC#3%TQjVKAgk3Ft7FFO-qy9Fq!zTrF?BJ>hsf{YUu?{^ z8Br0 zPm}G;t_GFgkBJ8l!ae!**RT56@2V5;>|gZ7nvvy-mDQT(;6V@v!5*hVk@DoTBaEuR(P1Aclf0ACT2- zqfD%9EH><{Cl*K~qLc@jwL~F~t!L{HUopgfhSVyin4_+aNwdbwUSx$UF@B6(HONJS zHwQMI8XPA_3Cj|)YFN|KrWRVc9@d67_k?z%p{Y2e{C*>3*s2H19P`%5Ezzjmx+6Sl zk9-_e3tiYFJ;Uhwd^FG5iQ0d()V3W&@s4V{p%$q{fPiIb*w|q^ym4hk?$|rknIbA; zhgw9PDU!|NIId!lUA_t>7iWW}Kx#)tK>_5$_OJ z+t!f>LH*2>30+P+?DM3Jg#D0Ndus$|pAG^2j63OOgtC~7dH zMl^xdol0aXRc+Y!QlV5T-di%cmrU-Td#O|^{q{MXgT&60&l+cE_Cjyh`JetXuV_jr zoA5*P-QzyyeOxteM@-kaj?PK!7@j`q-}u6R*beG#gkD3wG-MAWb7=ku(N0cg9PcxF zmymrSvV|x_CZW1QWuwL&qVbT>ViB^q&SEQAqsDDMb@RH&q8WBHog@cCMh+Qyq*VQi z{Bcx_v=wQaL02R>RlTZ`CqKin>y<~EU9F^EspPNruuCo35ZfTiT#?X`ol(?acr_wR zIU~vii5&dKlQoAzv9a?8nKvU!Sxc@zA^mXQ6Q0a<2s^n4?YMJ29z0BwJ&oLDi}P#``~ zfN@|5uNk}BXO{@Em(y(c#qMFBYk&ya0HU0r zL(8UnH8GvRd@jR5k6g;L*TrCL93HC%TjU06Dk#X)(S)u5j|T-PVkbk@;EG@Phkw$-2Jbj)RvDw zHgu^=8%R7x&|{OkSkK!4U)nVo^NNyy#$mCT00NG$Z(na6ck9`sQ`gOT27fHZ5Khq8 z9YtM&Lh)h#vjo}#2l*^F(G(p1u zdl?cH-X>$1C=*pGsovE&avyqA2IYx0~vDBY^#krxV0j;Rv=+QYW15ZqWrC~Lpl!qy!@G&ai5+n}na??rgc;;XL7XiIO zKB^k#5SD^{Sl=BL@~yJfD_P>n7Z8>D*i{@IY{jrtR$VYpoL)uF)H;^UpRSyF+~bHK zrR|8qzKMU9s8jz^v`Yh4&5V9~Y^MW67l=2=$PK!H-_Z0*FQ(s3)+Dtgx;e=JuL`5b z%W4B#@9e`CTp;TC`%oq1170D4YZG*Y3|*<)qTxb0m`F{$?0O+<(g1x;JPi45o||@boO4EG%-NPNaKni7X*)iSAuXD_#|x_aimkM@%WgyexyO z;tMK#^u`3w$pX&7*f@kCGJ#-lNF!s28iDbPGZnf&HsukXAEMaz#o2AFh*!II1*^NK zJs6kGB#VP_u@2}_tdiuLU2;@gv z!+l{cV~m^2Nhe(~7Uh{OI%k-kNXXSPd>kr{CtzJ55-BrAI)TMhFE#99(uW0uJOE|A z<>z|hwf%b3?A9Q#2!O_WYX-Q2`LGm1_v6T88SCW0O{&yF)@SW8=PnEUQJD7~OkfQl zIt4K!xl9R6oxrn&@ONytiTs;0IbZpt#d%wm04xPTb^5f1&OBR;kOxlk48B7dvX>x0 zVi5U)VTyw4B<9?VMG%~U?WEpQZ4%e?=T6QZv*&ePu&y z6PP1fX{C<<`EP;!%N0J?EL2#*oYW)oykj=FB;d}K(eA$oKUG6x}F>(D|iR0>OLcl6c`vSH$kM&raY<9%8( ztaS_Q05JR&V=d1>d|i%O`yTy*^VjUB?N6|W(KLvy_gb}^t!Cg47`M#?ReR4zW9OE= z!k&uGkeJINECT)O2)OE*JdWz4^gjaW1FUnnqhbUZ&4zF z6Nl-h^ATNJjcQUiZD$PzI?yVG`H*lAeU1WbJdMzJ&P?ftt=q_NFU*UYm>mU|cHQ~8 zf~Y@AM6gI$!+MW8LX;eHr5KoqXOdeP-~A2nRHU+7;WZ2v{6fBFBJdNz%#}rp0oWLnsPzr2d8uXD7y9yM)^#&}QjDQ*{-k4_ddtINz0XGG} zV2>r-h$3G4-465NmL;gH_5X30KEl^4D27wAk9TR$>GhxNl@~IiaJj_zyN6CF0!Q&5 zh^+?L{zQ+Q5cmqv4KF2#4O;R_Pxc-z@ndxJwX6s{@4Bx!hHKVWIEEw zseBY$9~Zli^L4kVkn4R^-2TE(Tb^K69*_yCiX1x?-W7~Q@q>yCw}gNGAz3STlRlTV z!iYfWJ!tdWvw_hIDac(%bH1SolLxFstGhg)D#B?}+ruZlcw{=T{jdk_?m{yVv2c|U zkU6K#WPE;)r1Wg5uW*((Knd6U1`BKeS8VAJ*Rc#Zz#8En z8y34j{T~BjwL`WJfHtAc$dx~}ts=OuwqE6k{35)S6vp&|&%lj~`+&#N=sQI~24-EZ z1UR@XKO;F4R*mA;w^foz%0lC549J1qqUDZM8Z?ZA)iIU4my#AR{&7%k0YffjACYG% zRRk!QgPb=+9DrbjPiwI-8>}J!Z@_cvVbfCq4)}n=SoR&q4^j&S18-SVqvgdvVm!7s z-aLM6m%VH46cr7`R=8|{l)Wr@+4bI^RxSb9H6-Z$!srB^kUc#t1? z=Ku61MY^m{a#p#TLKv+qk-rfk|VOb_~7BZiZ{SENC$q@yo7vF^!D0ljfXE=g|@=puo3uyu>4pHN4=6 zi8P(&C0vrM5cGYC znKGQv{t-}S8nZ7$Xsco^N-tI2(s@>wP|;X(M1p{RlLG@*D1<+1viqtI0JxOj~)4gpEkLNF1-b?aM;KO0x|HlK2L&0Zy}h z8EPcPDj!RNi!4sKB{lIZb5Ge&AT1CepIj3ft==CBI4nYJ(dZ^{&7IInOWrefduhxwRI>$0BKAHq?Y+K(L)lT@L3r;(5&0_e7R?Z*H| zbY)JS!okvCKU&B+kuBIj&YHI9*g zlZQ+fY*S|eRE5tn{+3&42k0Gp3j>IP9t@Bn--J9+-{4>Sql2jD@S*p>$d|MxGz&Xp|?qE2+izmWz-o%Y<+or}Xk)Fn!qj*GkxIkOael6Z1=<*%ZA zf3Z;4-Y?@HO=12R^cjWb>K@z<*wQ8E!~J+Kk%La>0w*?a3FKd((kAm)6b2Aaw| zcS}XM7~jvxlu>Jitr?|uvQQwqrN~CFdDD-ridrrfphC29_YqKMdVq@{jg0BLFLsC0@49I@!5<7ppu*R1)NH`>^wkqQL4d0+GMnE*^lkd8~w~G!X zeSEuULm?U6nzGyW1RVZ9eFgZO1PZS)uCrcKtt>+yV}hxG)9`L*TG5FyIohaGTWNa$ zNj1#)nO6F0^ndoWAj=@HU)reJq0i6+@7 zPV}yuekS``i4(vtHMS|FDgMmw3W^kYF`Gl;E3ZW<;0eA(jASW$;uU5UJ7I}WjBT=t zq1w%ObV&xQs6Pz+ko~&YxLg;t!NHBzMNfpL@Rx-?UMh=R^y(sOmISuD*cOVG$RNS< z!tLU<)%ZDW&<~vXE9u2iQU3Lu7tew2u|t?#p7QjPaTn#x{FJz-b`jd?;=L7GqOn7B zMn=_{uETwg$Vv6)(sJ?Vpjli&;f}{+YqE;TJ0ZkX_M!3OZzV#F@@&K zv0igai%0%B)i!2jHc6sb>~BXBWwD||fKOtVx zNZml)o)63i>xZU}|AhoSXKKdYcMHj?En}=3upix@-Zt3+*fH@Ribx(ne7c&(#dwrf zPw)^iv-1#(){G;34^d6kId-|gho}sZ{$Y2Fi2!&7Nb&>(?X%fF5s|SEC_If=0ft9L zkG}1Ak%&G}kKFfUJatR@1s!OI_9dc3Q9OG~j-3d% zI?|_NW{OX6YBRbP(>ug-6XY^&t-n2lGYeR`hZhBoJ z>N19qT2RX<2Z-0&M2xS}iRhNo11KUTsCMN$9l!%p1FR8}4M8qzr&k#0B z%0oo-9SEoarR8MDh&wV?=qr~@jvJ!<(QQpB93FrR=hI>C2zrQ;XA~jg(;?#F!x~df z43U7=7oKxu6d5AZi}93tkupSBswk%1=}~P;qY@%n3G_lET|#U_fhD4XE_`pQ^;l}1 z5Pu&l6sH}q-u}}y+u=kZ{y$1(V2+%7j8>$kATchs-vDZ9E^fBP(SpD077PH9Q_FGY&l)EZ7vlN*IM~>Z7vdDl4MR+6&^#5r$i?3X{h|!9rfFqA(CikZ zGl0n@a}j$FGHDcLc(}~t;wlNHaMMwYC%2i~~$OCP_NK_r&7)(V+HDcz5tgwg_8=^6D`123)qZkqnQExd!XmsSv;Nl9^ zAxc^x3S_3JYyZKRv>dG(b@b)|wS{J3z&4;2rTa!pz#|Fvdx$?57b(%VeuzDKN*5dU zdHBB}emtcr`y*$6r(I;iLS`J{if#y`m@KQZ`+%{VyD@i6gW2BG;Dwew%l{dq#0zMe zSl;{Xr}84-HC;uE6nLJC(^j{|i9CxS;jJ+@d9(=3z+;v*YyEo+5i(DB11`49#4?!&K`g~iZaG}yNaGdO2|>fLWGDRDM_jbafnFXnI!mL zM1+cfDVUKymoLRfh*OB{c36cFj9I&L^O%c`!*gGu1?8Amt>-ZSL4Z{S-0`djhn(U1 zLPXZ-srA$csKo7|Yi~qk?qNWT-=%Jy2Qh5Bh|n3BmS!-x36w9!5AYI{4thkR!HQ-T zy^N2atK0nRSA9+jhgp^)qCPh%>K7^^*T^o&rC1{HYF5N=$`z5xrV^pYB0lpMho~*0 zufFnuLjs*3v@6t`2tSQ7?3DPj%1cMncYp7_=n%g>0T=CRKweoscixTaQW5=IMI4VI zT;gC6gC2w<;nv&9KA|iG%QUozOc7@C<(75~2=@t%C#nrdSEzFlPYx~mQ+}0*BI^tb zGh`92_?(DjWQ8kg4skh(p)V&$ApO8?Y@aABA___0%GH4a`F^O0=%Kma-ndpnl(;#g za7_r(;;s{ssh^0NSqJLdf$36?(?DambV6{B;Ro0wV&wUKkCBM}E#vOTWU@>!;hZM8 zCJI7*gU#k%+Zu(M)H{Pr#zYi3QkzEdhigIPQoFQoEi2;x#i zm8#o92QLPz@|4ACn&%(`>j*T^is)97U*4PRe&+;Cc0L3miwGbc4aAvI-yjBE5F@C= z^((1Y8PGQD#r|4^}r zL4?rFB3=!;sW45H%v(-rhhMm<+l&&_SeWPHo4-O5wFs)8`oq|%h!XM43Ej;~(^qa9 z)Zh>6JL(edhA|Y%W?T`)%S~H^ibcG(N*ltz+@Ao=V=n^w3gF4Bkn7Gr>5tJ80MCzh zO-6h$ZsZZRfQY7W*!bK~3@#x0~l{Bsxiby9I}b?vPG?9ubkFD{PO* zfTgafCOhwJ zV|15s%`-_OeYG6f*}RKW`=)oDx{)z?B~q*PN_SuB|CTE)&tKbX&%b=NCF+z)w-UYe z`T10OozDH=%65+s6i|SGKtSwZ*-L~*Q40&R0$q?}E;%2pvYhO#gXO~bk;}BgAQwid zD0W6+=c_r$6?)3Dd|`M@`%#+PG!tpcDL0Lx1UCT=5Pey2K+vZCQ(16GU~tDoF=3QY z7{-JI1qe_G!NLX@Bs!>|!H6iqh?rnZL`=}okYBwF025U11}jMpd*7+j6sI`Fsd9Qt z1{si$Awz|`RNuO!%D!$&wK2|dr6P&Us0UT8K1%krcAXWkR}wnZ%~$iUg;5onksA78 zehIbL-{)8S2@ze;ATFDuTa9dH6`?DuS+&G&NLsC%saLcl5QPdE zLu7;z1_`xBh$20k-+#aP`Rd3=$Kh2-LLNIv*x6B7$CQ<3Rv2Y0jPtM_Y1=C7L0b!B zE|8V1v@6n>7=;vGrex~+b?dLwbchZ2Lx#$#&q$s2RLk$*+ZL+b=@?74wdT1N#3=7!<%ft5$SX;Ei`Vb0@q=OY< zcXk(waAAX-m=u=Dz(FWu6e3*M@K8KlMxkYdGNf%BGEzuaqmSNLxaONMr&OdBONU!W zKlzY5HDF*5`Yk3q9??s=yUwv@sEqf?k7F`qx_{_=P1mCxozbzh&~2JAmhQXkJJM+{ ztW^8y``)FV!r*JiQ`PNVs`;&5->l`WYD5?YheQ@eUAmybgRFpf&ssJ?t$YxUN{ zL|zf18o64Z*I_-99@X5MTkaVhb!BR)rC0fCr=8%mj&3?UjO7s)O*zDw>02px+Ud=0 zY~TB=d70W}FZZxK%6w#5VF*jCZ=2LyyQHRGdQ!7i>(GRd#}LVfBOxRl+iiN{&m3ps zBGw!r7$dU)7?jX9Porx#x-iCwFkT^8&&I?gJZ0X+u}qb$hG?gcJH^f`BM!Fv=gu{Za)aYC{M^i^MMN>=|PMAOqyugG76rgZYCoDRICPXxMPi04Arn|}n%^V{(b$+sZleHZ>dSF_*LO^fH%k`p5Bo)NnH@6|%&{}59zwX|>fE!}#Z)2COeb!zRa zXQ!RFyjrDbh=YX;95{fu;Xy+}hdKVCsdIT;LJ_TF!9k}fCMGf{fDZ;d6i~nb8!%|F zP~iX%FkBdb00O2NKt$jG1sFiUzylKyxZvSI0tP5>=m2qZ$drV7761SMm~cgqhxsK! z6DvxD{E2j1Z#BRC4iz$hpaFx#0|*u#He9gqAb|nm;Xz`65`C=Gf3CN73WF0u`j4y8 ze^%5hT`jfG`6E))(yo-GKAFZMt?H|;-hHpy%X{ur#pZrrb9#>(*0C_8%H!-WHSDzy zD(&4A#*11Q^?V+RfxI&b3a>LH001B$5KsVo9!SSDU_CgAr%Y}vbD{Q52>RH~h#~@^ zw_GIz#Z_7@Fg@ql{lSCg45Go_>4S)Fv;{WeMIeWuKeCz;25z)m8!>2=m+Iy+?oZJ- zl)&^p3<(Fuk)p3Au`T>R;&Fl)pLcYi@NiO0Wpw5rG@wu-H=-DufU**AHU&AtbYJ&S zRJ30m#sGOf%&a@Hn2&-X+b%l2xu=8lR^cqOCmMoaL_a!oLzT_&D61BR#bs~UR7aEz zTn-rqi=J$pT_$*?C9&RunXUYCLLv?Elh`GoN4uFt8dl!JDYof1T7F-taWSgz4zU?j zz}JauigQWP9tV7RM|t{~3m~4LMwbXK5_*8dhYT}KCryUqkOl*tNAh$x^FkCCcKsaS z)Of&cz@fuI>M&3Y2a`ttc{){)3DcMt=M_GR;~s>Mn?>L)?+LaBg{#rsu96T|g1J5agYhU> z>6tPyT)^7bSf8^YQJ=wUZe2{3G~fWdo*-4(b*V=pvraNC&z=CmaE+8Kv?l_~omJQe zCL`boYFWDYGwpy0{3y>6e1B=Y8T?4}lU=;g%OiVzgcq9ev0%~BT3bEkJEyf&xopt< z+va_&9AHto|AIH27t|-!|0-^EHz=?zF~to|M4rZ9H<$9JADuO4mVZJPyPuqB_Wl>= z4#Dk55peuLwx1FVKD^mNw!qhi+Cp_|Hc;i~wq-Y`a-~>$KY^2{$U2R~GDpwRlS=V~ zjwly~3Y1sZtQ21I7#4*4?09TI3^R`_0Kji!Hh>?VFAFN*NCCj2?_nK%*#vu}R>&f5 z@c{(a-*kFOnpy=jr^jz4(aI>NCqICr$1F%_IF{qU_-6BA>i{(riWRp!Wly@Z?Q$!-Q zVkYRgK?=&-fWQ-Ob?|)*5~_HDVZdRsQM^71PcR8kNb%SSu8Q2Irl`dO z;p2-GWT%;?EemefIPc?5*rIiK&;o>+9D;}nkb9n=#pU!Fs&vZLtO|q&s{Ums&6>0Z zU44Sagj&0M`iic;jQ0u|kW&c{_Q_ziUfnt==nn>TgCI$pt&u{#lF#fubX(oB`Z ztrpU5L@=`%+R|N$CC`510b0rv17M-Hm4#qtRAZp*QlG5DEvu%|AP@wKl@VG_Gi8{w zYYf4+^7VE`jeCGg5XI^?wzy`364%lCDJZMnTS%6Q%0)|n5GHczH?jN=#wiN)54ojD z`u~{uQ%@UfZUe>=!s@D>X~VWW#?O!^5M-AEIF$l{M}QJ}1fzoL@2Lf)T7kGF_|Xs} z8W}l6+YdbRUf1+o4_y3${0en$C|ei^aSD`UHaUpLu2%y4&>G%=J8*+z5TN913I+Jl z?UwZ5Ia*!C?FdFFn|O3RE6TFI4W=%l<=aByqFN=v^mb+a5Kw1S2w7PQ4-B%|JfYK< zU!Ah@JZO^@|9LhHDup|VNvLoG@TTq|$NAZxrW?SrJ>hFtZ5JT?*v(^LvhT|MWEIu0 z0?(balun(e8<{0t`Dmj~`SOq0D*VY(0GFY;`kfJzYYYL<%mdt>0-|M0t0%*Qi_i_6 zU}ljyPd4068O=X!GMzsW8Zmj@07%3eI9gi-xIqK3{@d8buHZ<3I59H zk*+G8qbw`l5F?+Qtpmngf&(KinW)299U)L5H!6F84|F%gmQ$am13M&RXZepX7PuW9t}C>i?rX( zBe<|(f}%#@3Q>U+Oc`hNfrUN<2Rg#Fg6|mbks@fIq51-gWkXEGX-$-dKN+xFKM4=-YN2Z!`S1(@Tq3+p7lYXF;>S|< z1A^H{E<-h#5+x1M%fn)oS-pTx#Wu0U=Kc8t_p{AVvw>4 z=!0N>Wo|YGy+g3^h7v@}mR`6oax9Z(2nRR_q&hq(J+W^Qv)-6r1tF_uZ0XnZnzA4- z-O%qSS1X(17w5rH)!|luZ>3y1hv&z#!^JB#EzI>FEQ6l(oB&%uq`$#I^I%Tq{3tn~ z;~dQ8h2ap}|Hhb?q>b!g&UX0T_H_9z=XYU%O#zUDm!)0EX3bP_U98+i&#Ww~pr>We zXbhf&vgIxLO5N|%ccS)#JrhJGHlYe8BQz~awG{YPR+2E`&^$IijAQJQL1<@V?pCL5 zfEfC~YofMz_2UelYP(?o0_zWsfC>c+vBG}Qnj26!;=n6m-ubM>mNoTR z-Fr~_jV#$t8X3|~&jyZ7?W7kg5Hr_#nv(Qer{HUgZ6Oh3WCB5rr~T_OAG#e?|8$C63f*xM8mrl#soi zsq7iq8RjPeFmo@2TsKTCBo^bzo2CFM#o<655~lW? zE>tO=xG5erv08h{XEPb2y!V#WY@}AOa^E+2%3_}bT39kw(C4(uAGTUE5vTr)u;lU*<|y*mysgSOkN;n4|zKA_}gZ!Hi2@n>II`Jc>=lo1Rr zqm$Efhed1v=2ZWVgt`OtKVyN!347_=V}cqnAm!F2L6KCY61T=MH$duxn;C&O-h(S= zfNb|l3xI`gnVJf4A_6~8G$02N4mhF?0NL{D3@e7u^Hkc}T)vF!SF}&dZy&{+k|cvb zQAIgv?^hc@j1^l~+s|RkA)qhl1)m-oi6B$BaP!tbYduJno-Evm_8)fzQ*^2)rUeCJ znxGKD!@uA~AgQoAUxE~Hy~5Gei=k&El}I^j`{~CLb;eYnOqpYO{+kp2Jjjh8vyCRy ztc+Y)&;Y0fk$&G}-p!T+fWe9&8dD87m+j`6bwM*e1(5)5W6F7PkUTvUB7yK>mXPf} z0gC#OrsfG4`9M%;ogtouQ@0Y?9hQtkqY)l$i7$qBQ+KU5RPIEjI4KE9_9hdrLf^`$ zhT*0cxNb~*l~Qk2MCHE0H`awRV(WiTsF(s?p@*5Z4))`0Ey;>JSs&pWns!#Mc<`|n z5h+U)U_fJ3OgGP8pyV>DN5F9i>Z*F8q#?Bd2dghr=oC<=>F1W%GRGa)yp~@~><@+# zh*7)VQV5)m5PO~-j5$6?*-onYF~arqBB|&<>Zk5gD*={w!v0h*3|I~;8V-^i3WE`5 zV1%^C)Y4)psRXZpYaUp`{o*ydGejcNch=b`-&s-?FY@|w>Jd0;1 zVNp(swrH6YQNwF-^-5+;Ge@(e9`+XB=9QMuhPtsm1Vz~Aa%kEyYHaK&%Eh7`(D_O0 zO)eHi?ttSaosQJ95Do3@Xsm>U>;dx~EWsfQvk5Ekw6a|^l zg`fr^S3_1cgu}c&7qZm;^D2oSlEeb-Nc8n}B%n=-2Bpqe9kYmvwW=nmJE381fR?kV zK@Cn>&YA!_+2rQZPgBr(atjkLFfa-qIQ5Y0kKQn8WjAohp%74gEVP4`@UsAq7)=z zfAW}zNXRfBg<3-(Bc?UXV*;(lAFXJ`j|>ydoPI3J?%~<`1Tn6-cLkZh{015!VkwvG z2BLHZ&Sa&wYQmmZNFXU{)TrBUuogROGVAzU4!2u(K2R7I7fgg@ywlcSH&{4KU$i}lwTsv^(JZqT+#Y%z=5Hy5-Y~A z=DP|z#K;77nN8sTiY|VYf8eIojUe)H(UZ9GW5# z^qo<#2R5ioK&_mw@GLeh_s?%vvgdmM-;{{fStFYG607IF3CbL5{t4Us<^5QAs`ee0 z3TkS2;)sR<;!iIe&-(){dP8RWJsMZ6wc8@+`fe!I^|?{7l=kz_Jtz-Y@3GbLuyA7s z*+f8WGHeXp3tpijo>)b5XF-&jrIO$ds)O>4_I{ndo)(IUipd5_@oHy@tjlo>@i=Pn zNXE*OcXax(&K(;!Nd8-jhwY ztk8Jr$(bT?%b#>ijwd#G73>BrQc>ziUYo4EZIR?Giuq)=!3L-6zjrHMC8s!#$epS% zS=y6wRpR?j4ZR4|a?#>x?a=c?J!KEXZmV9_eJfhWpNa>ycCeZe^!j+x(h~tue9Vos z8luh=atlQ4|0is0io^_%yB3X86CheJF|+q{@0xh8p%GOfJK62#1Q!#M z{tI9Jqe)A^0157Aul`Yyb+J=8v>wdaH*y2`Sh7%k5TpI!U{qG70b?K5C@|LL%utJ$ zn^)!RGMIDG#^NZuQhIh~MJ)Gzu61DJz+MS_`|yR$`reg8{=Mx3ax_ggaDk%lFsmiq zOQ|1?MA+DUk8{dO8zR`<*aSkm{5T9N>xviJoLu2K{%=k$_srB11^6r;W%MZPYWabt zO8%4nP*y4N9iU)JEnUzyK!JJ@YSb+?tdvQaFQ0rPvbL>d-l8vi^U8?Tx@kQ|1odnc z=nL~>?%T2~^=MB>*k)sz^^--)^*UCJS4?yN%XClfO4)?&Ff&^u>gLAfNN7jDJuu?r zD9}uF$%5|>^vZ2c&NLFTIFO*t%}qM2e*RWzdA@Q4hM^O%QDAxPVzbT%4@@qf* zK9J2l03SV0@aZ6Wy{6=~nfj(Fm^C5RKi>)IRz>s5g6ct!g5ynj0brf+CNd!EH(IYk zfb3;z^DXiG|N9kWM=E&4)`5*v2W+ER57&hlQ_6+DE*Ap(H=h9oz%Kj;t^7oEBZ4k| zVxj92k_X(xio)a@*0zIcGFJsW4jRDl(yfnx(S65JxZ#f~-OtKYe}Pf}ENKl;ueXZ+ zE2)CtZEnvwUKp(|du#fbERx>w*B@O&Wc=GyeJsmrl0N0NgUJJ*N!0}lgOA8J9U_98 z6-X;;azH=RLrJpJ4`R|b@Hlbvzcn59#IWK5gU8_DYLu;*V0eqLm_~o_q-y;(~vg(bBh096cKw1OJT4M%9|+)NeOAuWvq z{K0?P?%^D#b`5dT4+UT9+EDWl;eXohmhX)Z0#MscCko<*dr`w8E3VIyE{F}VxD6>S zQVrJtJoy)3=v%BFAAh{@^!aNT^T*#P+&xo@q}F>t@bcJ|rxj|kPu~Tbkxku%64pXX zu_(-J$(fUUd7DBO@-ZYK&qm?W?j3-nDw_L1d`f1OIF|>(;G0fZNO;4+Dh!#xloQ4< z&k~dBD+&dipgXQ3&VVXA8<-|J#C`E#4y4Sk-A0)_J%Ec!DVCK~TQC#_DV$&iiYMUv zD+$%v?#S<{)|q3po9-&$9lwn;&dkow8S5`pGLKKP@&aGk2XHREVP3wrZIP68D3%GyV{72WXmz2DpIiNd#ABycIOCcKqo#o#cTxreBI$^hc@5TXU|Oo6@)2_sZB#K1VsYX25R#cCx^5Y)K)vD)`8D!2P} zVgxUO`?6naJDRk;KkyLdM9elV&f4kh0I}CPaLm*;eR%L0e7T53Tsw~nzh@e8?M{Z@ zG~-{uw^4T7%$?q;F|AV#<6*d`gP?>!3l3b#oS0 ze8y0Jt;$q=A85ZTs0*uH)7KknUb5VA^oH7r9D=Bou$-p6J{U$Q8$>F{?H$4XwFy3+B}4*(DGO%r5V`yR_^j3J?=&DF_uQJVI8t7 z$z?|EkGd}^8Jm8fEV~Bh`4z@{_6RSYp=vDmB|s{bz3btQ+RboQNr*Az<){P7WhEOh z33b`tEzAPb2Y8>*qCnoH69ajR%?;%3z&nr^M#fCQlFArrcISW@dD69wZ&M`hyOHtB zcg_Ea9I(fN(0A#(rO?4Cj6O9Z#lghtos{J!{qrLFlzvuD_5w}4Vo=WFUP+eqYmLIf z`X)kTS|1nW^rxzQ9!I%@9^W*@f_C4t=T|GcI~BVDgX>R_%>j1b?SgE7+>`@Exu(m1 z2kbz;G}y6#6MHzt17cDbuesKaS&;}^TaKU-T$zxxpqBLEg|^KZZT`CZUT=kkMIi1D zKXd}9ScQrRvclA`DGwJXluH%DEl`Pld;4gYf_JP`nG~+cmsPxenRj8xLR%{p8tNAj zgcOrBLEf9!j^^Q@JLn2242kjU2${jS1#uWdy36Dl)yJ&K)m2JM0?tc~WF=~Pk)*7v zQm$~a$U>kNUf zt!9lc4vGR@12)YX-6i?rdv(EhLq*TZw*E;54VdpwZUFa>SvbsxjU*H(*yRZMMf14P z>_);+H~|-l#o&4_AL-9_O(Gr?MwDKSF!8t?7danpwknA`XC5`Z#KTJ#i*ZL7?f?^M zpz8ljhVzpl*NU@NPSiK0M909yg9?`3=a-nX)ZB9&lyBeoJvysLC zk-3bykj0SdF&6ue2zd~bylNjxz;RwTraXs+ex1dPurY+0bU~4AE?Zd?u?SK<#iFXd_Dg|WF*~?+>{=oJ2Vw5UP1;=kp z>dYR0lIs9LEx7lh7z_eF;ghmdsyk&6Pz62V!E~|JpK&J9(**~lYtYj z@D1sKpgt|7R+LEmEhu&xM|84kH`hpHaawbVvP38s4`l+mcu>i} z!P!n6BpwH7!ht3%B90})LqTygOdKE;&x8V1pr9;`^DGfWfg}zn12l!Px>&3DF4-PgVKGuG)DuaRl&^rxzL zS6#c}`4q3G>9D>xRNWuqHJ*=n{_)0m{lx1lRwee&G==(GAKpzj(X_F?o9b5gwmR!N zW14Jji_m$t-K?s$N%gF>HBG;FrJKHorKLALzL#z~J~VB%wECDv(+-*DOwE)_H*ue7 zVRjvxuC-~ErETgp__9{CEOTmck7bh|D&K_YhGywvGLwt{G7%$UM2v{Rh!~qX|Fz1$sd2Hf1Q5YA&s%J)QY$MH zAOFsXRsFTQ3(+2nRQ!L%+tWm*v3^(I(>E_!Gg~9ymaX(`rC6$bOW(vZ&2gLJUAjZ| zo_OO*E-q*8UN$lfZt-p2GOgBXF01BRrDbp8q`6nMZ7SJB1P~EHcnS(ML4=}pv}e;e zeE*rS5&vGS{PnidZkY7eQYW~VuA9&2r*h&|3tKV!F@Olv#I&U^tt)3~+AVo4Te}w% z?&j-h?}>|tjTR6;;o`q3YpbEbERP6{gH7kuT>rJNn0StJTxCDM|E7+B-~a^@>ed)m z8eMh&C9JyVrb>M(<+OU&n&;q&4)RAJ(gKq3?l#}Nh8rs;LH zX|m&Kvq^8Yx=Zy;OY?rb!ZT;#mya13Y-}V2KzH3~jb%atH%;94&D?~QwWzI!XKArAZR;wT z=DD4$b*rpeov`Xw?P_0CV{F7X6z|z+nEAHXzo|?U*ZohgJJVPOP3`e(@BK>C`#$sD z=$R&&=9Xq@*Pi+FCT?b$TW-2^%TIRb;=g-Uecn`)Z&_)WDs$Hrzjm^xvJlwi#P`SK4 zKx~4rG&E$h!>T@}**ypd5KN#mv?pscJ4QV|#YDEd#Lv22vI`yrAYkC6-=dh{B5vaz z9wxs16H{j=hNn5%+B*316q|~nV(Lu^<-)9@oECX|mOo6HH*2QZM&*`?nNd?On>|24 zm_TN?l)SokmOnegRm+Omrfd9u{2d$b7$}!E5d#G;@0RdpnfkC_*Hn9zmY2aLW1f)m z0iwY)%mai45(6YSkimfiFi<>Ppg^(#5-c8AHV9}GC_ofg5V6n|9*AH8%7y2GqZ6gf zkc0F-4|6*Fa3I^559dUh)tx@aw;aePOzVz=Kpw|LPne}W9QHWV%D%_rxOTyqACISTqSa~0aiSfL!>tqMa3n(>9v}uty_*p2hqj3I00E&2(V&vc&wG7|7>tM! zF(O99h!_zgVnmFH5iueLUVmF-ZkKo2S7@kxrs3nO1em6K&$LfP!+l~OYT`3B;+%Rk zWz;oYEj2Z@mzr8CEM1%>f))zo00L1WM9$>lWC)3nup{{?r(`q$kRd1n2>KTw57`~y zFgihBb`MpaPB zDMfsClKCMpWan8Q-phb^tF$x5CeRXz(SY6)qr{_4$tO2tem4>NV&n@j&gD-r zGIxYY4sm|2mZY|8{ZA1iLA!*#n#VhQ;}z8D{0a9{-|B@%G?fyD>j3&#&2$DxHE8pM_W#|A=+lLHnm z7kdpZ>kp-YU9M$z??U+jC2e3%Kso8+hG+ho4cFWLeFrs>1}GzUEnOj*x1^+@=Fh+# zT6)I|eyfC_gm>Wp(@h0P(HZ*ix`0I7DOl}|1hOv~?UwTw`rccg0G)fswdLd&- zrT1!eWi+*SN<6h&yye?WXg7ld$v9Qb$w*6dWgw13UP0yQCS@T07||2^bMIKzoGcQe zE}=&&93tw_yc{h950CmH}+oVHnYFA)9;;APCT_D+5XL{Pq2L&JnCi9B9# zGL+(2oR-W`*#IJeGI|I&C%!{M?s71k=v#1{;8sITF+2 zhHb`y4dz&_S+xZ@BvQQNV8~-eMFSFqn-gpi4tlgZI=bIY;AJ2$=B{T5rCq(D*E@kGsG(lZVcP&B?ytYJry4Y7m&s3kLlf`o5z(VxPZr9GU2(>UdM z8xR{kgdSuuKmrl6tS&?#Q6aT6TC0X#{3<;tqr1je=Aare2LVm#jU1`6v6Ay#jrN)K zDpATQ6R!=*WFv#Q!!+!@nqdgCoP!!MF;T=syt7N{nGz|ZlF7T#1fwkE?2=|wN_MA5 z#)4{!^%)2A`J*t@0?{ZvEKz2KPqdQIZ4N9AK$zb>>K9cT0MCWU*=fSHp@7_A@Zehc_Po`P>C5n zXlR7nZ`6rjbbwKyJ5BUsQj%*3ansY^m8&EOQ|gO9%`k_7b#-TS1VwWEQujHt}1E2=T-t@4g! z(h-&SFsiV}{VX~`;F~I5@!nZZikZqhs`b&L_0&>l=x+0p%ohH50lWjhoDu|CZw@R1 zH+UDESL4l?gb#g4>x;;}#JVxK26ZF})K>m^sQi1ujC5=9<+a>q?Wiqq46QuUPkB+{ zR8lRWue=uNynZ02t|iaXzn-KEoX^#;Yfxw~D~c`kkN@u7Q{4 z(@mNGvd@7T_x5__j z(;q4BO%ZiMp#n5s9i;8w>Pw8hougrU0hx`= zGI8HC11E$kKJsg`U<1n%a1NWY^g*SeI;%8|MmmF21E{HXO2gp&rjk^FXm%NqcP#9q z#l@<_IwkxnSzPqA3p`xeQfi2XGDQRMz00K3G2kjw!bn_$c16jW$jABJgIo%LH7(wUeS7Ns}>$VrN&w<>@2nce2q_@)<0Yt&w{-EI&)Pj)bo}gV%SHaq?~T zjJL=l+8@=yB^4J4W!yxJeQXU@YF5pr20JkVG{JaWibmY zlK~gE9)+6)NW1$jOzA^h^9T%};S0pCnnwZE*1d%#@(|J!RJB7(3q>->F`Dn~KBlps zvHc#%v4L0|*?BmE1!Urq<=k&sc7d&gN=1(**>Qdqz+*X3>oOxf%BErrzJ6W zWc<(4WWSaA3M`hW;LSppIf4k|n%;+1A-qwTg@{Rzhvu&=B7&%1+67GzM*Z|dAVG0B zF^wXlG|}bRW!!Ns*vUCvCvr*-teGg~QB))84^kl^|@*=Oqb=l2UdC zjrJr#B}J|w+z!Z-ijSh#tWlj5elZNf+EP|ma4WNq1lk6Dh>tC_b$;o)CXfX=lF{ww z_)`UaUd14=hyIS4DD z05w*%Ne>1#=j*ZTa-BJ{Vq0iHl%ylX3VV?2JHDK(AAG*vG=f8C&-u_9uok}&0fa;? zQ!&9u90a0j0t=*ame!g}MP6ZM3pk5D>4Lu&uTu*gLjD64kg>JL{1&s))9zD~jdJP0 z`-5{I33>Ka9d9-VYh5+oSClR6_C>))Kk-Mpoal~k>-BQaC#Fjy?9seZFboBEv7&i= z0vr3toV;aA0qV<71XH&>0a0$o2t@K z`<$j#Xp}T;Zg`wOB85g8i&P%0P*Jkfk16+6bPJ4R{$x<6MCxps*|(a?PN#1MVuG5Y z^H{0Nh_ZdLHqv*&^hIE2OU~+B@SWw|b1(V$Pf)hlM|kUxwZ$^a@}G@`d0-wf95Syl zc!*D|zuBBK25NDeDR&^AIid#n{X0e+ATe6Lt;9ng!W~ZLq(~8EgxQ9JixDyzjAIsC zMS6_5MOxw-bFqH6Eg&)P3<)-6D)VqEY*dw??HR*O<8JXdb(;V{j;SGc@j-lb6*y1&^mt|l6-o9IJ| zxW|p!VHnaSq5URU&i6cixQ$wdLlgbDH*!vA(?r}zd%T@V;+4pRyIf_+5)NE0(eWQu z%+sE_6bPg#$IS$=Sq4P!Yax153?`56s343OMS=Kkow@}0hdyREY*Wb#=4oFN^$k{+ zeTBP|b*CsiPf(1`4JNe_n&Io zIAqxnEKtcoNhvmskP|A*OWkHMns-;ogzer3pJ&GFVTzzbE0ywCjZ5rpwZw^DzzM2} zsTRA+!ze90K?R3w>DR38484_NC2fu->m=K46g+Ez)$PDB8U;85st=us^?Fze#W+e{ z9}-@n1d-0*lme)`l=0#e!Ke!kB4;|hCDX8}QN+vOOiFKSZlXSRhyudjAep;-pE-&R z<1K472)WgACi@^BQ`8oe53SaO=pI82GB{W|k{pvkA-W8bFN=Z&g{18t-Sl)IK>xuq zd08QDStWqzQa)%z$un+_n(_@~G3Ua>KNA@`8}o3P1$s`CHeEP>!m*Q48AG8K*QRFX zIpb3E(%4Op5Q-`~+D=kfxRwOH4T{2nk+7dS>ZF42l&Zph*2+l3#=-Ha)&S=EOw#;R3A7CGHl|gq9ptYCydNb%sY!BG*~rUs*E@k@qH@Gl5S)GyOfk%OG6Seot8#d)GN(J;ZFT8|SHyq8+cWURS$ z2X5hF!uFVqJb=fP!~`{)#+#5sVKLFsLCPf;=4EstG)a)Q_KqH>qZf#fh8@$$$!0GNcO_cVuYw7q)dq#bF|6hzZeBE zt>ud)R`;F3Ex{6oEdex5XjEFs?^IlRy_CD7MkbYB_{FX>g#uTEi}L;$@q`MMiRnFB#+nYeT!ZNS|)Fs7J;{T6Kh*%LdsLndUoFmO`>GkRMe$ zpmm37PKDQ9Ku_Va7u8Wb0cU|)(>lT2)aoE2!-__oC4_(gF(|}{DZO|_ev}DJ1HpMJ zOLQ7DVV?L_0*TQyl0yj8E9*H}DsTKeATQ)8Hjq(+ld{aAHLwS4ZAj;uH4)AQsb_PU z1VD3fik7)D%D`NBp3Bt^w8p^9z+5-1h6wK8+6ZBO@)jd-5-tIFa;d3DQ`~Y9aFLvm zQ?sDFC_R6;OJ7HF+CmxV7np+K0TJRnxz=Z-jYy~UMmRO&gjenpGfW4IWqXQW9)NRg z@=6on=>N&B!$Cx?0xB+CEsLIF{56lt$3mWt^B$$N;taoOQWI`8HnVQ<6amKI(wHDN z=!rE_2b&rJ6OoEPC|nYAp&tu`l7QcsXVRb~-GwYOIZU+1$qsCSQW+CsVaMQclgw#< zIU8Y$veOJxwm*nlcPO!Zn~8i9A!%3{wz1M{g!z3t>L%lN1VIH?pwbO5dc4nFD38%UOI*s)Z zsjzb0Pnj^NJypz9d&SZ@Oidu?6taS+S1USN3YJ+;tELR)+X5wD9FhXdFj!N;uog(P z0Vz4+$VOy;NyW-3VT3H%Kp^E?P&(vGlYJc@MYH>nvrQFOv>+SRLo^_#VMUQ$nrT*y zw2Lfd&#cRVB@(Py}j;7n!bLb1B0q4qI@{2^K;15-cJ>y_>}U2cLz}g zM-R}LcT{fa7N|w+$c{b)5hgY&#QGsHgJIWOW;{ylT6Vwv$XD4`6EeP%+Qwtnge%!< z9!X%+$}+EhAP*NrfuP5VBGv@2hrDvthG$6N^KfL~1y&B=W^E>PYOUs{ZhXnm4B}QE z;lxF3+sj()nBtupX+%t}i_;YG{39P;qYLXpXzSitHk2*S7y!I3{Q60h5WeXPfHq!a zBU!3jodo{#AjA$cL1Cc_BerE$)l zL_hph{NF5y$s@Va7n*;R<@lq%`tk)P%GN&zZ&cmZnB53WZdKP+RN6It>ofwj_STRh z<%VlfK#xGoSWZ1fPoN5j#B(KXQ_8<^=^*F-Dm!70O-O6*(pp^{^+w-1hj@u3J9KjR zqUEUJo=P<0hI3$_7JN{63}8V_?~O^LhCv$Tn4@GHOCpeZcO8MzBoxT>V5#7FDr-{X z*Eb^CX}B5hC=+UsrsK=LB2+mP1{zd=8oXigD!Qm z*8+GAv6UUXg(fr+iVxf%Q;sJN^n>!+P!3<*<(kkP=C7eAJRayB_zMO9-p~7Jk;3)B z-0mU=MjlJU=aTcpuxqm*00^eeTBt)13T0Zz6zjmvo6b#Xq)QV zpXeVqy51sKH|w;Sy}8Cc=(u<{Xplpea=rr!EYXQz6X+X9&akoB0+Os)E)`Mw)@dlQ9DO7z# zM~zv0JCGvhHgW+BJz=_j%+?wWFb9eV%hHC^z-ZmkZOL4^$A}%sMxg%pWRT*Z0Rs2j z*8N1^-o-cYSVD*9*=B#eY4dj;)^8O=vH9uB67FJQoZV4JX<7IDTDZ2ntC zFr_V676m;MEt}m^W1GT9*8=}t) z^A|)FY#kxN!}6O^;<6dIMVs7r5?bU_ik^$o@LoC*5(Wuy-g@e>A+#lN+%pJ6m#Kd1 z552s=;#W1wS>p zW}q1Z4?K6!xf+Cm2P_E>%844qsO<9`EGG<^C=wIxFUMeFTVfEprV<_6C`b2`5f>wh1q}hvtShaA+=#(1m?f_Tv!)180S2-DY(TKW4OEua2k2{y!mrh_4Q3_ z_GY_**kn54YLY|yhD!Cb7D5|p%p(N z@>YTS3-s}f5IjPkFOnJFhrS=`7p5UL?g#;W(6DU$i=-X3oxgQf#Uh&JJK&m;INTNv zz6e^LEXp?ff;I|wwMGQm((ojm7P_)#iM;h-(^SxfZAy`s(grVzijQh#8-?odFKMQT zvMCi&S1;QfbVlg0tPLk=5KJ-fXSwUlYlzdAwILU`SvQnF!GbcPw!@img$2qZ8#V7@ z^;2l0KIyr~hLxY!Rc;Zh&kCSH6q4zZ3-{Hl1#N+AJy=?8N&aOlMsxJGkKU?2Zw+Q{I2`bh8yhvEn?|Yn(1gHl4G+qWX&~(WR2t4g^AqLq@qLehz(Hs1S-sEALGJ zKCk7lr%}MAT#9-FIO*4$rrLWbSMniIczj9)pwuv5;TCzCJ7hgKj%qZVY9l-fQtU}c z$E2;SVT5uJ)O$b;A%z0ctqCg3Y6ny96o>xmtUg9b{b`NT_n6cyIY1#m9L|{s(MF*)E^zX^D(od(EY^*(g-0R6Ml%%z>1yXWanZ8h}ocJ-E@C%i-4pr(|$K=@r z_n!gmqtSg}C9-<#C3NG4kSw|Ow*fcIryIxy4e&2%u%4 z&?6DwKNujOd~n^-19|safX$6;aaRTyVXeqRwe}fCyu)gH*5l@0q|nyy-2o zm6CmXD4m~=FW3|rd;r&zFmkJ{Z`lzCEz(%Gm4wiXha|2O){>2JXb3};%DfL2g zO9=p+H*<3$<$8=HfMc@3Ttvx!W>PMQlC4l<0vT*EQ%esT_Rip6I$joiHT7m{S(+*d zh`7>nJP(Zx0tN&kXt_X`lv{7L0F?}3&{A!5qDVPz;?DJcvfv0n zbiQ$R^O6)g^PvRBz*{~Jm673&Z#Y>bA4{=$Mf%@)Q_aK`!n8J+BEY*z^Z90-pDuH1 zUzT_j&r09t&~~Q=%IM|`IuVUAqKXk)Qt(`SOfZ5heNwCd9AaLK*-ll7Z9QWO4$ATt zk=3`Ye~S{C3%UDS55}svpx?AGP&L%era|_&8hk6`JP6MV7h;j@u0p6KK@@t3FLboZ>tpcQ@e$ zYFw@L`+wFZrG?<9dUB0MEl|H9DO_A%!k0RRUYO$)-3^l8C~5Fo(X1wB11*qNi~-$A zj!`<63<+9J`;l1 zDl`SY)()Y*&9XNugM8Hl3(R;a%8#GghVy;<&U|}%AmwPI zU@P65k%$PL1E76@{4eC}ZtdWJxP3~>^pr>^;$aQl?Ck99?CjD5zXDzYwE_+nL?8hJ z1tee)0fNHdJyLoB)uKU!| zFfO=QanrF+`+aAh;8j01r@HcUupFd`x>5R(|m zg2JiTfB*!rAN7xVJSN`qSf`f%Ivy?3(B2gL#2^tSj$OscyoWli&F3?BUMG-t`!Nslt={GO*{YR+QMl5B~s`T6Yfv*qA!-qP4&d*$W9 zU0kl8$KJFZpG^Bpz*{f@HB4|^-~b_y3obW!1_=cyR3Nc|1qlj{4H-yKNQcJ-85|ag zOd!B9+~#JPu4$S|Z|9YT=@{fA^Q7;S>3JS~jLUHve%10U$9cG@T3VR8hp(2#Bp0`7 z|5;~KCw7u)co}P+=BBKiCb!F)RC7EY6cI-QfmD0Y(119c2@ae{#lmnP6&DI%C>9tn zI4~|41UTRUg|mPKJUGB&f&m360OJ4*j|bDhur$tv0vJx?ND&9)0RtjA;1CiZcDPqs z3jhG9rCO?Os-@Z*epBt6YMW}Q_D!`t#_iiwvr4sZs@WksWEPRT_ytJW1>!bZL=f>o z5-&QNo7vV@(d~Azr>Ll0ATqR5wNyjYYnDj)=sML>?VD;0agl0YMZNs}rrI~vQf*W1 zn`&!`q8FQL`#awYe}8>bE!9fD0gdB0Dk2KR(MS~#h=T-3Bt=pU0SFXCXjEWOJRap~ zOmL}4HSwx@U+=!5#N0+s6y-ice27Pe{7T-nRPL%+_T~$#IIlJ_Ezo6M%(6Wrvtk<8 zRm!ThKgOfY`+g0Ta=!WdheI>ZT$gkcCzxU5jkMiuA`YbH%yk#ylMJ?n!MJ&XC!NKw1OgI#bh6WG` zM?*t`Fd#`HIT#p@WP!j1CXy0>I2x^|*+>24O5HO`O_bwQMC3(8-Q92AHX>qf?sol{ zD7|%D!1Y^s;ag&O+a_Kv76FZanF zC;djf?@w5q`-j{&e7`bxQRTTcug+^~ivO|nuDsKZu6vC%WLK$`{A9QBD)-Lv_!bYB z@rr+p44Ic%tt%lCaD7iKJxFEcWuis9ZAI5@ZC9@QGIOFSo-5bbTG5V^*YVJRG#C+z zM??g}@nEqqwye0gnQ@=}%G77h)uIo%!@>U zL`1CRE-IoK()?wat+FNx?w+C2%D*cyxRTdutVdmCs<*P$MOBq*cJ$ihp!!z(ec#mQ zCIkUr(_gM)X17YhIMq1JO_8PI(b}38Y8EOd-7>XH%dT@;iW>8!nwh3+*|^d}#Ycx> zrkN|MdhhM7B&mC7op(j6omc31s5R7BcQfWHvp!9fYwUKg$#$0JV$#_`B+I-k@6y! zB2GRQY57?nHsG zZ&@^-+Ue=*W18frNzan{tUkYUe$BGCwTpAEU94GBcPCzY z!8a+BHVIQ_I;l_IsZ8FK+llW<6D)MFLB)0e!Q;5FPoo{iv9OqMK?ILRfeZ&|Kr}GW z6{HCTdV*h=K!Jrv)hYb5O;xibQ%|QRmvrtJ<8iU$G0d}U!}Kvvb+(?T#9SXsPCAIp z=^9=(ai8WK$FamX>{ut}$+a(uoqL#D{$*?7O`VK{0YqTb0SFr4hJuE1z<~^y2^fb8 z6%$ei5R@VVJPZARU?Ktw6ege`;(%I(tfp^yVune3m@^wbR?>bq4L^^wtlB<}v&oV@ z`21N*$*$RF^Vrjd_vA8rsTf`wV-j;r&QjX_v?OZ7-1*|v(hqOafB46kxBz``T&)-~d)YslQ^s`F1=w9vlyqX?U(y z!*0#5u~uXC<~{YUygE@9ufdSOa6C930>ypZs>Q7&S(U4(ecQX+$_CLAm*d2R(dX%O zoaCh2)F}~p|2WtbH_zfEyr=VRQazYzU;ExOJ!M?fyR9s{U-Rpwc*WgTb;ZYH-Q!)_ z*LXP-ua>H+eVC|tL_Df$A>&o`^zfLNdNnOnMKs+*wSQSJ;w|wK>z7z+-%r#L()UJ8 zd?UtJ@+})tGrq6r$V%h?w(r07=NE5?NA6lQyuCHoW@=la9QS9oCko=BslBjo=S$qK z=Pfe)Eq67W;jvjAo7w5vog!4UTi07_)UWT|p|am1q6vXpS6fDy^~3wF9eOuzA~z#< zw>Gk3Od=v6A|ehFkrZ(x4pGVO5=cOGMEj#&5&1S`qUMt+fngqP&z*Uy4V0K1G|w)o9##s`jXGfxv1f2E_5; z(159kXq*WK#Nl`vjERbf1Ia+~cqkkRtbTnA_^Qz1`VvL)g~iK@j2oFdQ5fQaSq=3T zvffc@;T0us-#wCOw>IwGFITZX+go(kmndx5_tiz!TD>xNs&aE8qTa18)IaVaw7Z?* zY2F>8p=`Sc1_&c7yTbi%853{0jFrpSjr?WQ-`;;BG`!d09acf~(Rxw$-S2)hq=An22QLeXaZ=KC_PE?v6eo{HQ*!m_b1x2vh z+X>E2B-gTZomA57N=aep!A9TlGillSxwPZIqAtt9>6fiA$QHd4f|+(of~4S!z2yL; z!@h1*nf{UPWO_$Hb+1!LQjnic7{NzcG?&!XXFuTk-5gGkdERJA`0&K^Tz%wGYOm)g z7>&$V4P)cP{7&Xr}U~E{#xB*GKOz?Cab=` z?oDxf7`B!J`jdTj1fwx@;EgGOIOn|Ce0tU`g1l0y+Mp(hmS|N`Z~I6oEblv|TucV> z9?|h+Bwn+*$?HB&*>HZ_n$`Sr!W{MlR!XZQ3`{R{JiitiPy{VP&4r-8X7nM9pX%5c zalknZDk9KF(_f)%+ev5^Rr?RuF*J>41B=`G!4sv?ZdYY373Z=ago*pwHh5Ce6~kil z__BZ7{dr>y7nbnwmFhmw*>#mHgkF?_<@9m^$U+FF#})!q;7vBF-ut=HqdW^}MA5q+ru(6{2i}NZPf%A&xq&3MC$r zr)7HGmFZ`TVbHb-O+BhxqN}{(SM6x5@xG%87Y!`xY7`0OMYK|gn$&sf;>*uMT94*v&bX3wgg8$ z6w@gYQ8nMVMz~SaDW0hyP=NBsxe=)}(u{&)#guC18JMxC`5%I03kEe?cwjTG?-4!9 z2aCyINQ1R_FKY@`92i8zN5mAiYa|g7FX_LR|D;G(^QAz~qD_8{pvr?tWHTpJ5tiJh z`Ix6FNrz(Ex2)pel(`BS^tEtl_tpRu;}c%YzdI>IZ}&svTWf@u{HP`p;U11{(5OO) z4Y&ufO%K2sq-D&wS#bbV@Z~im3qxDf-ajb!3=*abMuQWA+Pmw)@jeS6jxea_;5G3o zDLFy`eR@4x<_72@iL!Mqd^FiJ{SG5Xz}54Wx|H$o4?YWoSj+Z4qA^_oLwZ&6#m-!d z1c7bAP@m7ov|I9JHKw37+_Qqn_W_>}aV;u#2qrcK12!PrxDWGvHfUW6hmww_7`=d4 zlqrlj(WWQVHqfoKAW|=#E7q&&0SF&#VWcB-t~#TZfe$x0q^1FU1>)12exe=p1%=Ah zrHv#i`!nUi=pDu7avD)eIUw2bVSJ1THECvRb2NOI2tlYweJ*rSjc5C_EHnnKb}F(& z2)fWu8Ba}ENZXn7|11<1Bg7E;kw;uxxey?nf;|HIkr4sV0W;qi>sib~ASgE~cQ3*U zM07!S%1dyCBjJ-0CsSS_D{-7XRtsKYqgZ{$E^NzQqCWj&tuXd1Ftg9P#gxBF27*|M zah2fAv6_lza|<3Ul9VD^6bQ}-&;_C#)*d?j@2{&COBF#YPIH#Q;%Frh zR-3wf-HUR&qRc1#$nMdm<`P3@()9emKznm&4H!VosN5}AsfwE@IrPfR_xMMAzt^Zj!q5``TSeq$p)Gos44R1?>}FZ3KE$z@8U zQ;>@@QAqJMUrcKq6%VJAjc%X;u-)hGOwu7hQCS&(&3LG_#*xxRSZSR$i1p!vz40H4 z_UGMe?>w`YcVlDDi9_`CmD#o9Ph?b0rZcDKq*dH8qm`vs^83$tGQ@FMm#(F$#dn&9 zLE+8_1~>&ylx^~j)*=@03L_J5J@Thq)%}H8aO9a-RLN{i++EDUo3bS456U76eSYL{ zl>n5LM}!TU;~yZUP1qv7DqBG=c#MwpdDF#G_+CC8c{#5*SO9LnTx=vDZ@+>rB-}2LzWEDMUOC?-&H4QHV)AwC=;Q zq(J?gKLi|d3uD~9rerYctL?%dI-R)L3nEODMN)}%T02QXyAXzRa2UgJQK;K`^N06G zfgK9AX&F9V2}#FjhaRDJI7jr%h=ICjmzF#WW1>t8gU&ks!fp3XgQ6b}RT=uDe$poz zU!=uW*l#=Vhml3tu0||W?QRr>XV8MF*7h<&>k6kU{BSJ3tjQa$P#eY$@b}cYtWK=b z>)o6*VQx;q0S9IVLrZ!8 z;vTJd>b=sjF45WrXnWmK&vcOzx~`Ou;EN)TkKmtv$L=em(x=mdweL#`GTo2~XC0Vg zbyV$Ka82;-L{9KVH8C=&XW&$`3t1eE;?wY!u{jSLJqfB_A2cvM>|CVEEU&w3nVw8& zaSdm1-QB0DcW>g9VazE-6Y;PZvPnF!3}VD_d}=onkS(n+h=)(Er)-k=4Y6mF_^v85@(5 zIfn)n*AG;S@L9noLTv933sCNwwEnDbgAXR9Low^FxV&J;5T6$PL19#h4=irUiPKfbjG%zL+ z4KKX}3&GVK+(+9Xm6kSxS~ndt3ZbM5jv zd94=;QYfLhfr1_3*KCIpv@m!I4>7D4yHf>}-}0fw!3Y5gq)U&FrK-(w^CqKM`jLc3 zYC-&KOwVHmGa2aehfoE(ObR5>kGhtOF9~M+8bvct=cHam;}sTB7*e>Fh&Bb|&AX7S z5^(;3Nux;#S-~hhHWsEA9;NV|8AG*2P)0@V5|MF!T9FFII3!SjMG+ZnLFlX}0i0O8 zH_!1nNXPsYz3D+98JDvhCxS>L;!*Vsw4_&HQB%M`pRI8ueYVAM6hXF-1&Q9O#6z`k zf^W_0@>Y@>^ir=F9jak^kBK3=aR^4VakOB|Wc&(|q%NjG+dI5hjth_hH)5<-LtUT$}Jd|7lfa-`T z;D5q7$pdJ;ys>YQj>B^mn?ClG3$Z;#(Z!I;~lEl_3hRXlgbT zdb<5oN4JR+xdu1^hoS?7;6TBbj5BMPWP8v>=#1=4%{)amz12$|s8GPt1*9u;8!FKu zD11Sbot`6-uu@gyt2L5nXAj$;TV;&|v8pdLCW1~nc^uSeox3*G$~xXzYMw{{Q%nVE zZ=z6%bFIJidUgJBg$d_nwmomiDQFK?h=7SfxV@o-Z2cV*NZWYnd^&3^QZrBg?n(?9 z%BlsrNVMokpLC*;m|84=NV7HZvMOD6}2SCZJW3K<)>PJ=D_CWJ7vC%BR^&domd@--f`so3&R zCMhhyo|P>-tD~fjf&CNiQbmn(G`afpP7x|GniI+~kBMQ6(e~4xu)pVkV)E z1NMB~4i6~KbRUS8hlwr(yq1jw5qKP<^p89%B|zXz%G|^W2$S-^UKh1JR6^ky%eB>e z-KK1N@kkc5ZRRWl$Z`)ZaoOW5-`K4LWl4SYoopY z4g{d<4**85Xg`Y~QFi01lk~JELW;|AEn)lT2Xeb5O~m4_X$AdGAZx&3kNQ)Pz;fLD zWbAWN`IoT%BhRBy6MrRX{U9DNS1FEyk-EYZs{$Ih3Fk_#5eVD4I!XuiF(*Hh(NxS> zarRg97MXhJGURQYt5?S~lhtLeB?SeZFPCGelI(Rb(f=9eJ|{jBvXeLVf!&7+!%!r{EhT z7G|gRMeRSkXh^p!t#s(cbAtc47G&sXq^_hindHQ6#MQ;}`D$fyp+jxFZ{d)y6c4lr zUu<0=x>j>f1Lthqfijyn4)mwVfRPAaaDFaL{{m$FWDB`S83O#1Zt~ZTdF`sv>=z1 zHqAOJX-#QQwM~p0-F7E{k&d7N31=CH?cwwbPh@1I$bfK#|{N~OiYJ~<&L3_;1xs67FQLd)bOEM);&^9zd`W1jGM){i?9M$X@Yab3+o*~#yY%mc?1*ng_mf+? z2W4J@l{X5opQk_dSq38u%0lD{f5r>Jj*YCaq_3s6G9gT*7f}Ca@8d?f9L>T4Qm%#Y zTH|#jsd?dOg%bK}9vS*Gv|sCGVU-J3P~-;yrx6Hd)wkj{C4g-L?bJm9mv*=6h*cJX z99ct3Zcs3+=K416CF5CFiGl11@w!Z)U8mm-IMT-Sa7Xz>2xPOP%4XXg!z4ga))LF8 z2Kw{3qu{9Yw$s>F+>sEH)hBe|fm={wjPVjpA#QIf+>F|m*z7B#t1-sfSj?#NjT9#u`Kb}z-4bG3n-&&GhfCZ_Mz&|`}3tiK|M=qUhc$tB3rO{KL z!7xKMU(Al0sK#n>sZgSs%9TkPR_}E$*{6b1sQ3i>WH1r;fzbkQG-l){0sxuu!#s&} z7LKkfSh$Ii+=tx|LG&qxK#!2!3Mn<<+LrzpyUW$vc2sS66$hl;F0p>?-bJL3Vhsxz zEMwpC;DKwe-Qs>zL{C(oh_&g45{9xwgEWP2*^*eMEzYr>%EDK?NMXScg6I;Y8zuQO zWmaRX%z$$K)<*Rz)*PTA2ApMf;nVD<6-ezC(gtB{F;lUg;Tx&7ZzT$VG-W$uH(&b+ zuG_G+xy)|XYuZkgZ?Wm+M{@|nF)xbsr3k^Mjf?$5< zDb0i?66goSS>WADdVo~Z5*~-4pyI%iN}b%vqb4#TlNwu?u3cXS?k+k!k?KrJ=ZV2q8b1H&PpYR~I6kXOqs+e zl9QWq@Dz>~PXw3WClJMnbTN|fwGuHhrHk0<9|g?CIzUK>gQCUW)TjVfif^Qnm4aBJ zMkk>uu5c@AGGLlPnmMosProXNQzWFmpfnwzObVp>MEa_s_d>44QlKvX&KuNAi{epu zXupOO5yIT}j*#s8<(A1S`g>~UP{T^~j>+f@*#L!Odz24gx`trrMk>L6QsDt)?l^?j z9zJZjDAmKDZ-elJUP+g%g5nfxhye~;(>7_kF9D`!QmKCfu4uQ|0L#JAvB!`G-zr%c zC9P^SExt7nlq*STsw5|DA~ljbe-~uf81yg+tnuwUa6z6D0zH&W37xI+A6!jCOAW=i zl|GW7tW1r+3<+&$UkJZnH9NS_5TpUFW&)>Sr`ev=7YpQZV;N3B=q)ueaSH9r7fa;v z#1s#@?cX;HdhRXO_u}Xd=qg}p3X6HaK8G&J6drT-< zfk}r|J;-^RNel^eIv)H`rOrqKNm*wQ*B3fP?uqN+M zW-6SQD%{UXP~j4w2?r#&58F6!P&{7Mo+pC$DD_c8W@TU)$ONvSH8Ul-w448YgQrr_}^8?GpF>VhuO&Rl%mlAUsoy5{_Pmjc`Qo zkKmHDy~h;e=I!&^?y220bU8Xdy!#(x-6HVS!YV?KWQd^x?~ETVbcqa~n7VBGXV{iV_0_ zDdJJNYuSF+eHgbupK$RT|xS=kCz1uftU>!p#j8tX zwh|l6&2!}p!BXx~TeSDyj#)ctEAiBB*{lixhLDu4XaSl)Dw%?HZZ{ zGydfJu<2?fkruPJY?~wE!HgZyM;w=K_-3?Y?;S-3%BHK_^zcUU&hjZN_MJPiRh+x0 zY@YFAPxNfg8$q9#N^z&B`%)#i<`z0D-qI+=`1T#cv5BLUi9}aMK7(+?8J@=NFr8}z z9BGMQf}%pYB~wsF>4mQ;oN{i7(d~3FJt8IDmPC!#iHc^LwycMdW=D9QZ`z`R=A=NY zd0Oc5F?^e^$EhI%s`Ft+Op+qtNzj&=y%n1q#M};0@*W#H;gr~n0_<)6(1)8z<{t$P?N2Z8ZlW-R#} zCEAx;Rj@{fol z@g=!ElH!~m6L64b(8=LQMci(u=h)-mWp2}~4rV|D2PEZ*9HS1SQLT5hS_=9nre?G|f?lbX90a|*>wE`ZjC;UE%CA)wjm`k|I}ubl)${{oO!HD5 z2k1_anU~mrq{7Lo0>)RwFykQU``YnJn%y+sbcQ%JUoFbiM zX7((3qCy{=h*FnmAgE=3Nqljb4QYz1`WiLpxHE)=?$FHD>XE`$@!M=epY1gdLSozb zQGkTokR9Htthjh-#N+00gCMD=IlgIZN|Fy3xS@|m-=Y#N@?l`!AzdAK7EF@BndGp_ zy7w9-_BxD@Wms&r)wxa)foc7|B@JVC@A4FIknh@f(&g%B^Qe9!EeKu!)SRx;41TV1L@Cx8&Z zUx_%4jDc#lwva)hAO5{LMM!=Pe^ z%*T4bG6(?y?&|Pxrh`?P-8Yy5W#zoam0FRd+*L*~cQYmuo4Os9yDG^E4|aq=kNG?1 zC`MQAD~PBX4+tvH`m4NW_{5kgEMmkMB_gBjAv8-8DD329{yNKS~>sC zjFn%##ITTj#>EwMg4g$+^~e9=i*{gbg>`gIZ$zu^4i9>b@+`@%k&L?-bNUr|OQ&Gi z>ttr&8D@U0OI3!#nq|UzzK1Ho0cUAeaZ;InHs4sle~sZ0PFF#~ZQWW4@lb<_PNwgA z^(+hWV4V+%u`RL0p7kYz9mq25sG4x1&L=UCLxLMzWtG7%zM?RPek8CcRMC_{D^X2` z+154qY4%2@zE$j8tXQI-!lg<-=t-A`D0eNIy;RN{sMhDRO9F)x=$Qif_b>z&CT2C6;N1`!Gx1>7#5NJBDWdFK zs8RM;Pco*C-#e^?F0-DCeq%gtZ<>%v-;=cmH zqmBH{4;Eq>0|gF72{>|~&aOE$GKLnP?aSLb2Lk{&P`8c{%rRsLY!itza01|7SygrU zy~D7xCP1LsQ!Ps3zyznnt&rz|Utz9L-%0N&CJ}~tbIlh9extd`ZSKL@aEo^jMI z)n#)zU40GHQP`Y$6hdOOo7&iqd<7hhirm`)Exu)H?%i6M6g|@?=hWC|_D8yy2|i7f z;N5a&$}e%n!1bU(GQr2ah~en`y+%LcSq&~2!28A_*W(@l$XhuAMl*z9W-g-rIk09+ z!9^%2qvnre1r(Hij^gDl!JS2-qcdxPnsg02>wnTG>kmsF3J_>N`No)EqCecEJ1jkU|=u+I9b- zko_cjl9K`x+5JEIGJ#Pgd-#ytjyM-i_NS{3Gv4T`X;Aie{<#cv5LEV~O$^FBxoR4L zn6d06C*ZP|-Rn6KbTls*VRmH}z5;VTzLOetW)GJWpvy&QcE1LWks+9GX5u$qZn(mu z0H3DKE~78y7JKIG>abCYXHrc8lsXEV9V#pF+3zmG<@N%!6AZh#>wd%-+6he}UpX?R zmS}h5?A#@~KF%ENm4XF>PTBkt=VDiHrhqlK`h^Q+h9tDLqu1){)WT^=nEqGP!8OSI&kZ@KoS#4D43^iEu{_J_ z5tJY&cLGpO9T$<@HwI1IPC1)duorW4)O73}r~tW4oL%q|;?9zGB%fN1;3;rkbr5rU z=1g)4_<3oCFA-F4w*4Bwlo=Gb)Vq@6I15Wm-C;Y*Y#N&1-0;-)04Q_?+S9kKk2^RN z_rDVj*C~j}^$#*k#EX=lJp&F3$9O26LI_b^J^}U^-CIZ5fy^+`_;`2FoV@ZssNRz3v2Ds zrO%~yr7o(?Mtp121W9*ihN-PjZ;B1z9x_eNPcKAqO@kYz*s@@FL&0`AcZf$Vd;+K? zhNhTDDSoGn0yUn26k`;Fhz8a6dBsCav6@OPJCaMq9`fJ~r3r&fsFiXaK}6&V6G=;j zI$&`Uhsg}stv!Sd;Q?oZ7?q*E$ivN-^rkkOtAA3{F-}&~F==*&buoF4b{`Kes!qD* zrpr1F!D&e21hS(!l`Y`f)I_A@$)vw$&lSvuootvZ>rOUdxmF(R{2D_XF6})tqCz%ePnk8RQ z&Dlh`>?C1$adB8nxusQUUanD$NiNS}7-yf$oZAw&4x1AhX0NKanc!BQwH>E1uN7Q& z;U%d$B^&EIWu00W2Ay+b%-(ICT^gmy#u_CTc9tz)Z@kScYw)>tLsqP_vhtOg{v50) zWopZ%g~>`C6}@8eiutNq!7ED=r!MQ#tQ+OUaZ>5SOR&|RT+#B9vcx$%&1=Vb=as24 zjj~CTUCnY_&1ONvb87P##umQP%w!uLS5lZL4KmFs!zAxzZrOZdY>mAv&efH&g}GY# ziczlCu5hmwzG4=uGjrqT=7LvBUa6zlIkro(Y`j~wrdA3vwkVY>$x>By+#Fl}4i{`H z0vt3X5hk$E@bC};frbR)fIJM7JPip%NE{kV!#E9icyJ_-ghdtFRVDxcV9%|)HMNVR z_TpI0_@0N6hTeBCbkt6U2;g}%4#ok|WFP@?JiLa|&a0}*s;qjyDzZeVx?0MLrl)k98W1>sUfAfcmMRs1X zcw;kPS~p|iwf5Sj@*)3@b!oY3yvVNFvUqRZDA)2TG|%x?qcI-cER8Yyr<n zTC4F{!b82PrSwiaOpU1zai_zCm$u5%WxqL@Q$p?K^lb;SOwCMeotu0|$IX9A%Vg*- z8)3Rks)~%Qh?ojf9WU`O9eQ>qMQWGK*3wxrQ}mu#BDTy-neJTA#PeceuCA_Cro_y&sL?WAR;FX7s?2n^UEP*nV=m*oT}=_SC8{$s z%p>dfSIe6oYH^uiE|zT_a^utC7hk-LPlac_C+?eJ-`%Q|HX zF{jA%QSpN&-2>QJ3xa^G z?&Ho?)<$=zP_M{Fcqr>aBeXn2S)Ka|SYU0ebyddd5b?yb)Fb@qLa|UR6pDxsMavbHcmBq zrzE%7wX`WS%}jGl(#**)lTKqwGMl8LNwdvOsZ%$H`H6~3v!Jq1Cl_0mbP_`?=g5jK zmD?0>5W(XRXwZQM6R>~<#N}auB?AORU?74B6(~srT?IfS1b0pbFtGdqf@+65z;&>9z1{);E;t_$+XflxCp+Vu%KtLJ}jl_cO z!4dI@7y!d12ta^DS;B!t0}zrS@kkO2NyEdV!7yYcL&9++i3P(kBpwY5NFxDhCP118 zMDlP{fIu3F2!{oy;fO#20$bz)gvf;lgaU-&@qh$`h9uEEjfRAS#3S)&Ffc3_7!nK& z2`BQP4#I`5V`Q+aXgI)5)dq0K!F60M)F8FAPwZv zXh>L45|BrP1rm>jhNOXrTv%9Wfbft!9!En%0m8t*Bn^5rPoseXPxH`#MB{Lp3=$9| zJSsR6PyxMj;PK!=-2%}(c;J9MsNkUB0K|dE<7hyD;1B`fa1sy^j)#Z_LIk3DL?jnj z0BIf##07Ot^GG6)r{S={g~X%rBovm$Az`86Ng9Wx0eLu{2}$D7Xw*#rk?=Gah8>W_ z1`ZC02^I>)La|UZuuwFxP&BYmDDpJbFy(Oz6Dsntt1smxX5QUZy}fp-ou>`cFuF%* zhEk2?jgGx>aX6m zwym<(s$#9#>gp;VB5c*|(l%{0MQk=Uw070m_v)<1hw(7Z$oJ;b=dWFg`CIb$$Mh_r1Hb{fdR%@!55V@3rp+b<0J|b;aU(2ZUoUv}0n{ z?TMIMPsGGya-Hj#`Msy*ch!=&7ZLUUo{lBR4w)y;RF86hR#gMe8BIVoVUSnnC<8$#Lc0F%w%ysPdwceRpirr=P%(D8T?8eOQ*7_!Aa{u~LxxQ*z zW=~aREYqulH3vq4iOX_@h~0+T5BU#!@^du8C#>99^$mZyW^el-pt3yS!YGM_%KS3tD7^wXtwUU zD?45NDi*wr4z-$Madd6>>$=l(ZlnA4Hh0}=vxKg@>fWkTeRvoT<6%6EMyqyOyIZPQ za4HttiwIhdm#{-9U}uXFb%#9#$y`#U?+!yqDKqdf$;y=!DwX+{)^C?XUj&6EP_(@0IhqS@GkZOCL& zdJ_sPjYJdbjMDf(I#>!{ClOHCGKO=)1Wdhw<+Ao=QiQZ0s(2hJB#1iA%iE5rH4cgt z=_y}LI5&+nm2XBo1Bc(;{H?Oo`08m)oI{H)@A>8k9dq6Zo6fZ!N5x^_rs<#>jFO3g zTO>|^cq*BpvS?d!Urua3+RavZhb2nhfyH2t3FQfe;3&i0$4IN_U~l9cwWX*;5=o=f zmS|TY(oOA`K(dl?z(9YP|_FO(}PJ6gwUj75p{6pT{9E49^h<5IV<*JQ6zW78y9ozZ_7nd=*R zI;fX6nF3-hFEo>^3miS<>Ix|e&NTp41~|;q zOQ3{x4Oa0qF@IUWs=JBsleUV)ilL#E)5rf?l;J<$gaGguj~DiDNF6D;IL|C3fJDvG zX%q?dC=eMy8s3u$x&zuba@cd^!_h8UBn4|3anR*~)%`m7myzV0bq1GWA&Aj$F{5VV zT0-zxFJs=ZjvDS*ht}k9Ra_>TD_gZV*JA$Jwq@Fst&uERxD>5&lnf4)G^& z!INCJ3MM@L)#7tK?wcb~^v-YN*k#j`@Af450F0$02@U4R$`f-d;4Z4A3>*-QBy<2q zz`0-KAS^p|E*C8pUSvq1T|`S4T|}dD5gp?-%fj^TQBO0>hJP4RTunNe)XDTb4j4iK zDU^j=m^wqW$^Qe?NuR+~_O#X%j}yC4R(4Ntm@=O(o0cBN$U+5{pzKxqXf-!aQ8!gd z!h#tNw+9TeJSe6?$CuY9>P%q7oAdETy;e%Lvz1O9hZY+4po4Y?_+*H^vp! zAon!bL@sql&3;Ww=jh)*Q-j}#F9S#_eCkOq^QRh&l2OB@DS~K_PGFQnXGJn~o+2qg z`!A&?Bb?`RRi*fRUI8@mFb$?p+-rU&gf60)PE}PD0bC76xsxlK9JvyE#;cm7OfE`R zgOyz7KT+i657BI4U?eparoc2BffWUa)5UYNq|#vQsR4$LmD(K#VNvb`RD2f9v>P#&&utjTJZZCniRT zB6BS=E$2y5H&lyOp|?=rA64@cD7wN_!I@$d&aa4UwDFFUa6FH6L*YN2Qo0r>Q&9)A z+oX=hNgPDcbqJ9t-A?C&0qM#DPd*#M5Xq_49pA|?V@3qcX^8sBa7c?aVNohF$TEDci%8>*N3(zycl+e#3mD~ zLA5a)p(0h(mJ|s|t~g~BdYOVn)~4_}#gnWF+y(LZKlB(LZvwc5%!+-KRwvg? ze4b}0g-|;_i3CJrZ7t&tqfZMX=1&X8=AmF|b1U&&J=jnzv-{AwAhH~djQDbsW0OKj zGN~p9n5IOtA$Ws&aRPS|^vonCMJYV81};oIfI%z;tI$VdW*B*UloK=;6@BzmqK`_~ zOTnQhh@;`xm_x(IORs=_)i}y%$GL{`lEX(ly+giXx{ziiGw!=$a!*lXOBQrhjKMtV z#$Y7)Wj>+w%cIPJkrD8CiQo~rMhPMqbNkoSQV!LF+i&q3o${q#3_aw zU!t%L7urf7iG)M@8;0h5db9xB<|#vH13BRoZWqrwICd9rerd3~1=t0bT{C1<(AK8j zfaN-}2$)2lfT_tEi)%OmGc=}iK?^Y)0hoD-=aAQsgvAaXo^C`Y7$QiQi%Zxng4}lS zZv@uhB?#)^8&`frYvM8WsZ`|##CpaGQv&&>z~-1LhlPqR44lT^95_{!0Cze`=LO@!cassdG8q$jP>grOn!aL=adPw))RT_-z(ga6`5WhYB1irEoVf z{63w>oFj--##VLcS^BWmT&xB4I_d}CK zTf<}%i5-B>=s4OWjsM)K1(PmHq*8~;;RK}t^mT8R!5s>S`8qd)_kze}U593^WIN=f z3GIj;i#9LUp&LkgfFBCubfk@4_;Fs!eMofd!^x5QP^jpmmSCn(5vUOPOEMyi&d@_G zc+-X@H-L32?u0uK_+CWR$sU}^iIMgu7;)PTfGAswcX25rBH-ci*|PEJ(|z+=ZdqyH zUXZ6iW=vx{O>GAW&J~C@GXM3VK*!;KiITK>&(3}+@NeYbmSGqOSl4K)GL@sZ`3QR{ z5meKan>io@Nlcbas9C80ovjAaoHqrZ0YC0h;_Pg)K2dv6DDc5NCYKqcD20C|2IWF%NpLuT%SWRg8kzi-;Ho3L> z1v8HFa6ET@huuvqBtw%R-kFq~^%+06I!_!b5krOc6NVX+VDyP4-qCSrji^gV|L+c~ zFiRTyY7ni=vAD3fA-Y?0oY-#^WgZb;QVpX_x|1aLH6xr~$4T_BnP+Mt7{Z9y{)U!4 zcSK8&gyA?*$}Z-*bl&4T>Z%2v{1EL<^%Kajz87tMMa0 zSxR4dc7r{o4$C@@*iJa{!5r~qx`A!wH0$B%wS2Wa3_1zfqa(!j;4Jyib&T0flmd}- zMy;WX96~N@-YsVk^~-XY5eP*op$j9fJ3|~su?5oc(3I62vd;!Ba*j^wAyq`w`I+;l z5Pq74kfhh@B3zo}UKtVIBT=73gi}ofrT?jqz!Dqu2niY^%S)o1MzB+D#3k|7NszQ} zmmu_BKip1Y$XWvQ!?@Moy3^?V#;>zi1QqV_WdH_o!vd#{US9qUw0~T&0pJ0v_vH-F z!X?{*zkJ527C%#o*BU-IWSq*i*9GtYpbHnI_Tf1!g7K0cuhC0%f-^esD9RB-W*D%?s!=79 zo3C_F#M+Jn7F}=dD--)}W7#TA(QunRC zvjGXVH`N$z(9y(Z*84nqfQe=?+wS-i;tQxaERf;65D>J&mUXeHyL--i2R>#Bp&DR&2?w2A>hNx{{e~(XiT#g{F88DRQbiQ z44y5OMnj#hb?YJh7g5uTgB=Nn0TzF;>mpZM%TW7b;0ecQ0Sw#>R*{xEr6=ijC8I%| z;v5A372s6jE_R!l^LpWt+mv(aFg@DI+SJ#{&FE$_QwE%-^*AnV=Gbz@t&lzfzeM&( zhG?PjeRrZ`bmQ)dKQs$OZ@HNJjdNPpEFkx7ZNtidUVw4r?$H!TvZT2vV=>Tr?$vR} z2{vT#prGEw5fkaIC_D=>)o8(DpOva)(rxA;6J#7;gR;k6QQl{0kE6n~Ei^)?><*`2 zxRc-72yJjq8YTVa0{wD<-8?#UfT1&-02ogwZjkp#DP)0c#>7AqvB!4dn87O{lb<5H zx{och*q7vGxo`uYkvltbBS`I}3ou)8iTDd@5Hw+QV6H_&wRPX*_S@oi&bgNcw5*cJ zYp<*~kS}-14(e!Zd!ba$C~_{erW*L0oaRk&VFswJHT1mjUda?0cRw5zbW%cwhOEE8 z*U)&WyOel!;8H2`Bs0)<$Bq1fqyn;ikIo^*$j4Kup7nmX2PPnT)59}Cc~WUIv)IsL zBMX0G$SCS+I%%x;8KG@*$vJ{V76qch9K-xMY$ETiah@SHjKh0W{$RoRV!4>r2)QL` zi2Q~Z1uR1i@2=9kNBp=M0CJM2PLE)0lXNLT!A-7mpe7WZs%lR>QbN5uRqTeWN&>ST zCt3G`aYoNRM_Qe8Z=ra=cIHB`eHYA6u)+%=$cBM3{7B9;u8=0dSsmZ<>c`@RXS&Id zBDY;mVj>hF#-}^0DzsY5#~))a?ath{{x(4hYBnzn{Jk;a$Fg65N^5f3I95Jc8< z_mnWE^u~K*%AIDPzIAi$uz?9u{Ma2{A8~hiHAddf8wKg-#0_fA%bhOTu}*CaBX7@H z77*GQ;Gli+1aqyOg#FgRQPdJj%zbSBlt=? z^S~X-(X&Ig-j%s4M5v3JAV8`6tY-;Ucnl-#*hW;Qtd}p?_y0@|{~vL1X`Rd~w3t3{ zq=Ej%^Z^B4>u}>x#!93>73Q$ifiBD7^h_vUEH!gXIPh&LaYh*DppN!Z2jxB|;A4^d zSUT!A%&P<6KulQ@nXiLK>BaW{I!3`~m6+iVQId zC|zagWcXrJU0Q8S10=Iesxu`~3K^2kiX+c*j)Srh1XaLx_IO~Q_1m0v+cxh^LP;j| z5DnbQ&{u^0H3>*ju&=R5N;8`^mYHu22W&CfH1z7)M7!K$Jw%mh!YK2}zaflZ^K9YN z=Q20uIKlO3>Rlzx6YA>7%~vy3GQxGlX=#&c;eoCtu9tps3(rz*g>IkK9kc4?#xV>$S`TX1D_KnBWz`axv-Rzi?xTUFrn}*ZF z*oOA9!ybw&a0a9znztsuAv1`!qR@KdUWXrZQVlLzXZY}K)T@*|7o~L(0Em4v}PMhL(#AhX0B zT0c4yA|HVp*>*GAlW$Gaw=q@AH+BYX%iqQfT=!oH9eRn*2_zfpv&(Hc3yyCNaxqp6=&5 zGz#6*)FxUUi!yc)oI=2ftzFdemR_#hj<{*Y<+_({Ad^HUCy%&g-p*|7I%Xji8jmH1 zqSXkWMsk^6iW6WtHbE;&ecVkEGkWNP0jd$nbMumto7zqruPe;0Y&RBbhXSEH;{{$+)UEWvkph(8*zzmJ9kM6)s$W~FVGKtkfiMPvnk_woslpI4@ z+@RmCEW@rD;c>j{G&gaqJz6g19l7a5LGwB`tx-fkSvFX7WF%ZDCrsV8d=VL^6~hH) zrU4aT;+OGA#vj-nbNN%*bF}mO8E?unX3`XB&O@XSU#Q=I!%9xBot)6 z;0j(*Sm!)Q5V;f9!iY6C3;|&sYOiN|n3QL0h9R6uXOmF?Ke+`H$>v z`G&4d7!IRhQ3E{nx7Nk3297-2)gqeya6w)SMtwQ@G?Rb3>MlNdf_S{*`)eZ$`S@>p zcx1upuUz;kF*g**0O!V4(2?*V+_=>eK)`pur zsL2Q?gPL3zYCydlr?cNdVJK&uRu&XFy~S12h2B@1ejynH_lzHC4@ZUahc| zHZdsDs4$1w`)P5f(Mf1$di%x}$Mr0gK+N{8p)J{L_Exv>R$w@{+8D3hEBertkPszh z&C{^%NfuHFjANbn*CDWJd@9V{VCB;|ACac<#}=$YClc#3$MTc7w2LN2gJRnn%hoKf zzhBxZq~)T4AwEKJ0m=h;3`c(D836i4tL5p4 z5ek8x+&l(JR}*r0Ot*V?>f5!F?`y!}j1p+1+<_VJ8`D$J$^1OV4_o9a)@)Y670e<3 zfgw^5mTZHhKus;Eo_y6*8mZ;fKQQt1uqBl9Fc6_5hlCi1S;IKrQ7^s-AFx9fod#qu znK9cJ)2=46-GL!UP)d;%fzgXXq`1dw2bApN+wyR3k<8VnPu$@wX08{%Us5z1x&+4& zO(kFln_cs(?Olrjm3Snqf-x(`&=w&{8{y3_znkwuH5$%<{0&jRq^mXI(K|p8?1cq) z!ytC05F}UwSBF{dGME)Ak72Vr>?~>Hx}1LzK{_~tmEn%C@7j$zn3IX4918?NfvR&Dc8N53V#*R>uIUYhTg&=dTk0Wal3 zygtlZa>t~o>F`?q5^QlqBMz^|qH16)!JVMbw-IOu*Os$RN$kQ$0zxIyI>K9tDnmPz zI70(-HwY#(fUQQ=SHvJlkV!g`7rr z$*i(-M>1W6qhzHMqHoL&CuNSL=%g!hm{AOQD21mciI?eY7~E`*qWsHFlSUIO>TCyB zL^@$x6SWj>X|?`3U;_W27kpkAFPNem{cS(-&Rfjl)>ug>a&P~^d>AYY$o7F4VaWUG?63V2ZlGfJEa#MtPe6y^FbFs zFf=yMzC56!Cy5Q@WDBqPMJK2i;)u zJPAqb(6+ghip0%VgIbHa_KLhyGP*7Qgww#IO_l7C;tUwcYR&u5+lFtMK{J{U`C=x< zq4D)FczT=WM(1T;N1P84Kqu6Py@b-4u}Kv8CQb>s~ZD-dAQ=`Vu~cv+Vq zY>_m0LIUN7(>LNs7sgeE9eOyillgiIOpp-=COENaAOysAQF6a8{-$&zwPJ?kb6;x< zZ)SXO^g0bN-T(`2(&E2Gt{*~zR1Se(jcVA(>&FZEKu-B=kR}}rYS(mw3k^7ke2dP_ zGu3DU;P@Lz^5THUXEk>k+ULwF*uTRudLUO!mAol_sH|EX3%Og@)~h>WpLXtmoP7FI+sXRjh%@p>;{)N zofzjr`SAi?YtA+JeQ~lTjg23!$%>Uzv=L&vX*i>pfJN3ng2|eBUzA$jq7x4ESeFyQ zH%MzC&dm7I-W_Qf9zFNc=j)XXKyj^uixR35mR8r;JsW-i6hleMl_MqH1hxrggebNV zNYmYr{nQu_O9WyCpr<%$r%R_#L5s!HNzAEci6y={o}sN8;TUDGotY6%*=MOdN{w(T zu}2<;%xjs8yAJfKJ;@gDm>am0fN(soc2s2e75j4dE21m5jFLIvr`e zYHrsg+GqAELAmSRzHJUcmQSDagRNK9+;~f4k-PUyv>PV7&CcJ!0ngu%4^w194&D4D zNUCpkd(_c=xh{RLSG>fTgA4hNMA2wCFud#} zTNuJUci^g4c z45rYp8;?ExQ6fr?oIee{9pqtRL^C0lTvt&W5x1TV)m|_zjJ+ykLewX;#495AFxQ}J zo`4_KOLHfwt_fRi&wojT&KiRaieIuyjefAv`Ewj2$CnYkk=JE;CxYIC6t_&kV-Ptt z8*kqc6o#C~xUZnm4@RBGxct2&X1ps=Ojee`GE*QLE}>`3U&58zt7J%ls(Yj$Xd@HW zP!ZM1AsdQASjg}k_ThlMiH@zF@(GHBs3p{ec31=bkE@l`y5IkcRu~EnYYaGi`?_Hb zM=i;qh&Czb+8RI)d4XFe1V1oe2n7lix8{;9pxcvA^IOIZ7fm8-!+5UNFX$E*Q2s>| zn;G#+y6A9_ZZObLdQwL9Q3&RNLesT5M}?FBi9u@jP5$ec^l;te-^fD{Zj=8^9*y6V zY8GabfBOcd!$rpFu1IiZLl1f3H2LoX5&shHKHzHwT{#GaY`6jmXG~S7Pc}*}ix9t$ z20a`s11;n5`m# zk!?EsvY>KyQX9`iV}hFmIo1YyJh52V3%0#-5*=sIQxhJaMH8IGR}QDq*32-G zc}Zu_3!*F%oY7~}Hd=d#BC|wUQL<0;Q%y|X2J=T;X=O4{!pL_#v&H_B>CF;DbP9u@5Qw z$$|I;3l01lWC~cixoj;WY|$-J$WfNx~V+P8sG`i_IJFqag(5{=zvT# z6N7Sr{SE2a5>W6>7PTTtg0^M1rP)Lyk(@D`uLSI`0Ua&tQ25 zWSMrJEJ!INKAl3pP1oK>BD(G*Mkk%E(48lp*emJnbtdJ5UIPN<2y`3!apxgj?%L8| zjznre#hPy?TXkIzz>FNyDY@ilL~C2Rp$WOq$L7+C+6c5rMVWHq_4_f9_l7Z7zt<6U zaazGBMy7C*7aA07wTQwkv$VY@v&Co*!ELYOu8Xk_5KbfH@x7+r7~1R$gpn$#F~G>FqkQ6 zVa8TfV+@4IQf|e{Q!17T7F(xD6EvBS)r37xiFPWPT5-SsY)>{5k(TsVCLsLmbUDI0 z=BEtugc1Tml@>%&Ev5hB6#G=z!af)Dp zd>0JHb-*Iej|G?no3zv)U!em_W{P;F6OoR@WP}!`&J3z0WeU;-^*@q8y#2ex)k$Hf z&+dt0LIU>0P7!4Z-}2|zl;d4-8k_3Qj%C#u@ghjdaE>JB1dx#B6my1iXoMeFR%wR| z!u86G<;q3hxkLz8P*|Hq%Krl%&lq-)erfYo%hp}Y4Km?b!ag?ADI^DX9?Ur{VRgNp zDE3s*8*UKW>WQKTiPqQ}qC(;0AUA(FWnZ@P>Zj2_KnzsJ9gI`AmoQg1HKf2jauPOX zgy5;xyH&dDk5O(AjoTsEE9CmeR)|OF0p(#chlWAX->K(vM^+UIf zP|A)AnVU?ffibG)mx~)Aan(^{s3#JBK-%lG&Q3>UNJOkP5*U*Ovk!;dZlX#hJbbx% zs|4;$el!9R#b6je4y7O4HWD39z?a(e!>4an&+``|5dMQJINEc2OC!ac!WV}(2Vn#z11BwpND4jYh zBy|~QDX>3Ljr9>#G2^O;jBQ!4@5!mPTAmra;yojZop4IN53}LREP34uMTSE=&`Ppu zX&8Xexpm%bC!boq%epDcDJR=_N_8{cVp~MhE++;Cm0a5gv8K-(YK^3jsxt+W49i*{ zh$T2o2*MMAvX*sEI949R(Q_xhDZ@B1IH|Hy?WiNj9>OO*P%Vcz@#w4YXS7h69;<{hq9SElZ zxM^V&m;`E-9U#g{)(!SF(0UdQK8*YfGc(-X&{tvzuV z{pwmrU)()gb)7g8(_c*AVj9=fTkB_{T4UjGfibcM7cb(5xh7F$OBsc(ylIeIj!78x zC8%7JMMjy3P=hF?mypuj9VP4Pet9bD7uAYny`r9~qOSdp5h|TidohhBqn${$BHKBQ zOwL^-XE{yAfQpR}8jcMh8V(4CX&NM8Ac1LEC>V_+0>XjOXc!O-2aE;|@uX4eW^NtR zdSDt2a;wr*D!ox@>F=XW>v($NxZlo5@#;l-r|nfFBVGI7Evq+CQE`uUdC)e|Mnpqh zh=pMk51580s)T9FC4YmstR{tKO42k5DWx#WLQ1DiqfLU#Y@&!VNri;xQV&m=uVg$z z!@T1)&9i3aosXKvvz^b!BDIi6XXi2#$L7`ra?9lMcFqEHKdiP z+$IWBn~KeO6}wSwWnm?$jp}yJj4CnjwpzR&{*-HdxBIOYulDXIt6e-~ zNEnTULdmc&4<9fd8W2nZOLua!?rHb6z0|Ge)^Y3J{nz|WZ!M}9>#IgrHM-U4SE{Gp zT7R^YMmT!6^at^b%=sf)731{o6j?_H^s7 zagA%EvpxS>y~RU@q{+qZ+bFgrS5K*=Nm9A0WFcEZ%xc{WLsra5VvDNz@|`SOmM|_pfvo@cbrGBAm;{6z+Zk{aErwbKTQ8Gy}pk81g7K($>Xt>a@ zEcW`I+J)D+YH%bVVIC)%B5cbehP4A1FRuay}QN5pX?jtE90aai3VKtMr(1_Bx| zcwiPr0}Y1;#^Qm2$Z#P8#F1fvTu8twcwib34-6%OaS$Ox0T_@DARHDH46txOE)7A_#ZAePC zOrNVY(yLn)Et=JCy0l8`o7YMlnTVTOfWg9H zITJkaKm?~@9z4jP;egP9!N_3ofG`_0FeZ?&FpS1R(J&6i0|P<^4Mx(yJRl1ugANc# z777Tap#g)jz@?9pRV{51X_1!dZOuHVqL!AmG|x@V-dyx1?U9m=A+v!6(<~H}cCJXJ zn6D6_Wlc*=9D3OBl{ie(Sf!_=i+gOT>YDkaqN-#XY_^odmME&JAUC8^K^Rh&Oc1jZ zTPVdNTSpdlk_K7caBufbJIg-YEfBYlAZ5TeNW=hJsyvyUP8)z)>17D#JA?U z+LE8EnyZ;?UZ(yIOCSA^kqRk)fVc&zfQ4*iwD{ry@)o z&LaX#eH3rOA2gKAb-7xnl5DN`t#s1$@|at{JcUOu?V3rcntoYufWm|Y5U{|&1PT*S zaDc+$q5BKVYlLJo9_Q(v>F~#iK8IIpyQ}q{H7e4rp{?ikf&--;?oXQP&}vouxu=y{ z=?|GE4os~}B|ZH9H0gYH9*NDfm=QjxH>dsh@=19~Y3D3aZ_G7@=Z^&C)$V@kxHjMb zJ3oKyEI0}Bnz69gx@nAc5Z2m9i8i=n*jZtzXa+z+01^QJ697TNp@29%90&tpP#_El z1Oj0&7z_x6!C*ij3=V|iI1Yzlm~Rh2V3w`9G;;sDb^n&|h482SXWOJ7E*gFob|UV7 zEopvm%NDBr>NhX>%XLi(rekcAbQkXbu~Os05W&ZfvA8yYqc0JJ#3hw?kZC3x=6@Q~ z^mg)&Oqh_=%JuKX%|n(Bnm>T8W*Nawfs^t%uwEHFyAWV)07(xscfPbgfSGm8Wm)L0 zSOQ2YXbv@SkMx?*%RF5nH~CVy8l8bG#;W>hEnwJ`g=a3_+272V#zwfRR7e@|_>Uhv zH$ixxgc2@?9$6E3#-?BQ9Fj{*%6|?9SaSIQA>axD)4oHV8W;=U@z|sT0#qAVAcDr5 z0*slz8SSV+csdzgQ1O?tqO4AXG1+bi5BAfhvan?JFL8)T#pwzabh$inyI^?kOmqZ^ZOD8pKf;+0IAF|OkM`!I<*cY zKNF9%PoUOr(6x!6+KM+=OK;z&$jFsgJH`xNcXjAKwfr`Xq9LM_Y15bHjMdpGcoy@6 zdXb>m3PTh1f%~4W^~WKnkh(;)5lYlj2`epq2ar{L9}8WR(v|QS9>1|&u>xrqug9-_ zo_JfeLO~{Q^;A4?l;|2l!5=LJC*Fbv_D$OZc7X%CfKY>%03;Kjc}s8QNmR3dMR;r` zD5y3(k)|G3OZ>1$%%RR2ApQPox=9UPDN=beMrk;^ak@>Bk2Mv_-~_NIBMovtjRkU%7kVoP zF|8nXBlC`AKMmV|QX^ji3sk4OWuOBz`<(vlRq1%E>kDX-FJf*!1=G}oysp^9KfaS8Eq=4-`7w;=0NzF)(3=3ZJ*{48g`g}5ZofC!WbKY!!%H+yJd9H z>INh>sHW>k19K@Ev3?f;K(kjo9YmEIg&>7d)U`uuhKjnDVkAZ{W$PQL7&Tf{=~BqL3sj&F>@xpaF02lP*~kY@n$z?EY(Ti-Zq{^G zO0&bg1htr8+xA*}=nqIt-F@C`8QGrlikE?VM&3n7XkCM!&?W2N+WG5Ew`n-7mEJwi z&C!Wd=&gjJoL#@Y*9>l9B8RCZQjXQ=>iLeGkr6kM3J%@g*YqR_2IvwBJDo#KAE4B1 z2qD|x1Jn@+6+9iW2<9}*gS2{e9FdZ+aWY>L(D!?SNZ|&eNtLw|*fn?v-%HRCZUm7u zk>C!4Q~&kLSY>Mg*%84O5m80-X-4xAD=TDpjOA_WiF894`>|Pz_E)(Z#}bN3=>XD! zfZ7qshN65wU!M{tk4z_%&{?H)r}kAn!@ERCV%m^w#R7qSpsIDraPcZiue>jyESZr_ zRW|nrYpoa<3Z$1DAT5(JNo$j5^fnJc$L9G2!aUC?Sls5S1GL|*P3BtRD~RI`|7pLD z$S$`14_n`*O`{QySUQF+21PLKW#ZlEevA{Snj~S98F0#aw<^P6{}@0otvoNAtH%)^ zX@aW$6fS2UUoN70gc&L*E-;PT-j}JDo}yxA<0dJtF;h`M1;gn!=>3zNVwb+sj$(3k zbks&j<>8kY#YhY9&~q^q>!%4tKtG8MOJD}v`j+PyCIW>dPFfpqF;s})E8w*tbKq;> zcVxVTEb&n#(sHeEN`s;1G3mH1gqKwfan8BK?m4x?gdXVc8xsW^o#&%+lshiM zAq%x@B4Or^N}6|@jYaqh<2vj*zA1c65Ho$)HEUo|`KIdm?&1$y+*{i{%{Cm;JZf>>T=oNDZ!aLb;ygC6!$i@Xi^J)dF&|i^Yd>C92L)^mD5+u%{T` zY>i_e!(1_U0s$bG6-ZPIDu&WC+HaGl>TZu)SFBWvp}#=7oeDUH@~NdGN-5Ba`B&70? z<~%QObsEoCXi1S!I5)m&vr4t8!+p!?sx+%)?)sS z+rPF`Y0{&j*!E~-nmotV#oBBIb=uto73f$aPDp_xa+8wtfjS*jgt)q!pl~8(JOh{s z{HvMcQfdR_g^SS=`GrGO??6or?Ul{H;78Ha0F_hT$F%>hJ$Zru(0~Sj+wrzDoF9#d zk54r1tf>Z_-rHxwM_x%X1)yCqTYYy_Y2jonk!ODH_P#R(C_q=mTdEy{a=!n2(=;p~ zDn!sjXJ`DpY?SSwddpdHWKW$EB~Fz0C`JZH6yVZZGy_8Vpr09CFFiKBFu1mbQ0*0P zjUjF^;-s>UN%P?x4{aF@q-@FW)X!OJa{W4kYoUlti@e~!h3w<`hbNm6(lo}8(oV|G zgibLRYgFF;gF~@r2gpaDH_3)Pw0>&>z!DnI-Wu1Vn4)ll)-r27Qgf#&-S8KL@-EqnJ+JKG9VVX4 z%Gx-X4=BJAGA@uuKIOZxY0&TYEJ;rR>=7MrLh zkKvn#$57gzH!*tLF<7+OG4M=O$1r1wjzM(I9K%OTj-lMYIED!WI0n(L-WWa#H-^$N zv@y(3YzzX1))@X*sWFtrlg2Pn&=^DoI3A6=i#G9V`KQMr`lhLsd~IrD5T)w#AO%)` zGllkJFc!e~bo&kEwZoVJrkhHybR@*t=?>i0PMmdnmqE_VuvG7CDQs#OGg0;;WfI*< z)Ja)ct~lXpM}$O|h0=x)SlZ&9jOf@cmEPOWAYVFA&9?NYLJ*9#%w1T5rCc_lP|2CeGVKC6<)0{u}DPY4TO z+BzggWWqXHlH6hV1@@2Ck*ov5BPP;}uWP?#3@fV_KALJ;C5B+^4cZ&&zxQNc4!X%O zGxI$Rc0G;kfm<9LQ5`{Z_pEQ)ASjU5{@P}is z_6%pTZjkv=bcJ{%~xwyYT9PuN36WRb--xSRzTkb z(`AbA(kCRts5HOz|MT@5N>9Nh`7>b{h-D6A%n_ArWBw4=#$`tbi3%U1!w#+xP(&HS zAGLEkA{21LrIxXaB(Kz2&O>hOKW7JC-Q_Nhe(mGJM4xANb9^07u zF2#P3v0fpD!nNWw)^k8t_iTGZD;t7eO2H$JCvYJ2i27atime1FwIzKs>E(QZs(zPm zq@?Ok)%*k|+We8c4~_6Y3I92k7d*qs?YEH2Q_JvK19%7_$*v^U@gR>1K-RJz-D?sv z7DN_Qm@@$iv_H4slA9t8o?T4UM}HvpxVx^Kv_=Ncus_bJe4r=>%Bsj9pyqX^nGAhf4uOhEh|5}bzw1Zl5OrvaodvgbuW5QjiOM1=u( zk9otLSV|;9I$iQwz85#qrlE>ECRx$hB{nsrw-PN30TG83r2ztX+Kng-5QNXW z;b;AGj@p$OATBDg)W4yOWAQ$N9C3;*PGb9U?C9Z`EWi3GXdN;3Q@lrc z$oU_00&G<1=Y)As`Yd!SWzqhJf44f3{1X|g2Aw$Nr`X%_r+}ca8AKnS;{zogz8XAX zhLx#dEkAIIZB?J=PUpE1!ENLAZ6hyprk$#Z4EO!|kB%2u!HW8A-IXBLZd}fs;2(8e z0;F#L{F&DPsf%=BHiC^GOq&LASo%CWaV>B|bCW{%b{ckm1{E4bxd!T^?8I2gjz=CM z?8O8(Fm^e~ilwRxR!N3H>7^(yPg2SdmP$h&omhLy^Hf1c5Gg|wfTRblOq||uty)7F zpTAEl{`64wV;yz``7xHEy-G1kHHYBp9nR5vXB*<1uYL*{ZV9qK0TY3}S~Jmr0%pHU zR;$>>wQx`3Y5UaLWVe{fm*N-J;z;;}Zxgd+ zY3Pn=RZDPXCk5+AU<=Gn!y97K3pR#~FYq*R3+sTbtmOp>+Hebn zv0Vn3>>1q1u&!|i2@r+pbBxk9uqN2oHpf+vlI^{&L85L&+(xy?8Pyv${+B9HIS0l3-bf#s#P<)W~~8Er#0*xP`dR%r++ zZfJ5O8{n{n+fT?4vLi zqlzm@L3?^$TN|QyAiselWR#}-jY94wGq}KS@JU8)$&40gDq6ZR?iT*wakJh&I=haw z0N}mDnnhrYiLu$3=P4K`f>J(F9#`LlWV9RGKiSZT5Mk0?q}UK_jDTxKPIKboQ@#E@ zSdE6Z3M9$YbUU?dpbr7ptA?Ti*zhh0!c>`LDo>u*CveH5 zb)G{}!b5X?W0*q3{u#ztNhg2l>T72-bO|h0lfLv|?$CS*s2(l6Whylv`fy*Co=gv( zs1$jvLtXIIc$(WNmeSk{F5K3T$u|5`p&FjOrfi~)T9KiQ-*6xrR5~Wj(DhMVGG^2( zCWuv;*T~o^$r(=g1*?2#PXOy9LjhcL0BwOs752y0-mHbJpmWafrbFz3Ax~Ybf{_c< zw-@^IH4xxgNI>U)jr=QsRRFfCa)D&E$X zMbRq7^s#nXPE?GYXAsTF=9G-;28cY9ij$f_5qxXH9-{(R1&BoEY5-TzbRVhN9BtrJ z4ZL~=DiKZZ$tIE=m|?F@zHR3bdv+Pr>ZE5rO@R<}k05HKv0QTg6=4{ZnYEn+$ynHf zcxIb`Mx?Lr%-fFyp|)C}zycD0qI6mq3+AsGu-D9K*@ad1OO^;&U%kCTw}9+-#y15- zRQtBoH~m;oOuh0b@D#!7z?`h{zImp+YBWKJJaMe#6tMA6PAZA%GQ=jjAuD(Hg ztgNq?Ss-H600tp!BUYI84at(6L_=DOm_&ZK3RncY0fhiPFd`_8B?`~N3Mu-+gE4Co ziAIdJPHYpXX%xr0%4bQ29kOPO8Od6GVyr}wXi-y!C(}k+oY&_Fqv{Mo)7CT`gBs^& z^bD5{k`v9*VCGLtYIp|VTAP_D_^mK_27oGC=2OY3s}U?>n*o`iQ=AjeAm?UH3gRBi zVhPHCS5p`Oxw!rll5JcCZ%(C1(b^djm+C@9k0Nr4yi`0G#cfelH>MRGi1I_4V(lB) zo(Y9IwslJBV>U;|`=*)vZ8gHN#}`n<)YwGfT3qQ0y|9$FNAyU+9E2!wgQc(7slwN& zmkzfY1We-Q_F@T;<;B;w1=Icq8kkTAPKx4czd-^N+uV)&wLFdILZ;sl22fyVv*2?@ z0U3s)5ELzS`~Ah~+_2LDI>oClt3^uh%uy$W*u2^NxHs^b;t6@C+lOc^A&4TpGGO#sN^}jTGwfH6ABv zx5onefRfx0f6#)*tq;{>EE~m8aElg9=VGwpHJ3*9!`N|YPEgu0QN!#i8lC6CXK%^? zYs5!=L%+zd7g2ojWHQBUgsh>M451VaF#iW}*g5i4#3=&>1nCYk!Scg4bYvge69+jhqe1z0Y9@`HXp zAelpfkaMKGT8;y}Gd}M-pPZ!J!ao)}zIxwB(!>~^3Vq+|>%F1rqcfXrInSdS)M{K2 z0mjvM*VS`ow@Fhn7PpTwsETATXM^YXMcT{B7AR0&Xsb0XY1M!^gXa*RX_$zO3!W6p z7{A5_nG3;PT?-`t@>ep-Btf);oE;?+P(NZVi1{+TEjqj-b@kjRR_Yok(FYCI7*C*j;=L6IhR#ysJB zbs8KbSuK<5hSTQSxw{@repk|n`n5qC=z7$4N~_i#%vAUCgik|ALYRq_1?^X9)IT!a)+#!s{OG2qd^MSGQ^#6;*dtk5J0Hq%fQ2o3f`56{SyOLN* zKqe-&5q^%OzT4oldOxBBCoHW1dZ$bjA|MX}7yz>mb_zJ25R#D~d||cz0beJ{DTvy{U`VdvG%F>u2))jq3S4T%A?we7&Y|E=_)PiDSAZ#%ZgEXjGWl;q{ zC{SS)ij10YB?6HZzJP#m5QIa`d003+<{qd(hj9+*K)8UQu)wj>!cQvhPhJXeQ5-{KZ0tRS6zyl2!fWQI`6c`7juwVzH4+u2{Qi6288Y)&0 zSmQ7jh(?g7<$xwkAO|#sLxNF3VX7jjqsyx3P(=%Llt4tJBBDc49VuKxIzu!Wh+2@D z#tfavG&U2O;;TTa<9vaaQA`s5#@RLB-f%l=Lx{swEwcaE2UO&3IaTrxZN{ zabKBC)gx0!WgzC^BackY4?T8GpSx}bVjKh(uvBo=_1GCIDrgc7TQg?g5EjdnEkibp z9^#DE=n*R(GIJ(n0d~|;M{@QU8J|63Jx77%P3aDxjS=m%$#IRP1r^>=)3M-Yy zQBhf@_6;u?`riG-w8mP)isQ5g#;4GE^%~R0DCHI)*n=BeLlwqA|Uq zDP7Z=h)7#xQgv*Nk}1Zp9c)Q!krq)zOzL>iibOg?w??Q=h+0C@G7yvjK@jn5BAyWd z58RowY+7cERfk4oI>y;BJXO@>MwL+OsvSaCGN#}tDp4_xs>W1JW2)1bikm5tsVZqq zGgLLKh;*sOfntVK!*NqK$zq!t9D@aMCMy<`jolcp;|i*b39MwuK$LPLM`zb>E|3{F zgSc!4%cg8NBf;2Ir8r4FJ*0X>oq@Pf)C`2(ka0veQgTIS6edzF(cj)7=1vKzP4f8YJ=@2|f8-TT>R z_p|%i{p_=Qzai{q_aPj@x#i}~oA-CAhKPYEMKDugRZkfRF)}Lk290Y`S2r5OqteEp z-W1o6i>9cm8gwnhuE)50jNW66voQihc9KLAS!D0*B>N@F?TgX0Szj;&0Zd1rEWEZjv+1ZUG*+~-FN%oSRB*|XoWS{Jl zz1K9^C;QFG$v((F*k{_DIejKMGP_KfGrP?(voTtHHz|6=5jJKYRK$$gyUZ~M@t9*6 zi0ft`Mj^0(rGkT~&Jxtw**kmhGi!FjYId?%hGeE@bz*83AvHUDXYW(o)uh>JcAA}L zpGmW37s^m*cA7MMmpPLrVKZr_^_erf*~z^7*{|;Ies`DI>8{Is%9JVdY|egXAgp%Z z5reaLc7mL}-~FV?5%%Qloc-DR-OtXB>X6w#hODULhMCop2>=Z(0rMsaBmfi=js#K^ zMM;e6rVbMTfWd%(Fen&?0RbR55Cnq(U;qFN3WR|`P#_Eng8|`yctoD8+yekOrLS6B z#bzzw5*-K>DU_bF|IjI6@@o$~K9PFLaUijE56m{q5!1>+mF^@@69Gg^*KP$lS|?75 zzf*nh5uMc7V9`*0LJgDH6Iwn9@;wWd>}z=zfac88`kD>|n{L@oRU+GBb1$thL* zQm~j1l|fsRYOpB%hR`VpFAXb5L!N(S9=!nqX~6TWaywR)1SnkhSUn|K%>W*t%Lh72 zI3))80y2d?5aLZz{MtOHvo|&Wq2pMVBPwR29w)pX;0W!X46YNaK4}}4gP8>`6OAf_b^k>>UjyLKNM(`OQmkTcLN2pDO{I@8v0SX+XP%YL<33a1 z3{D2befzOSIP$6*kPaQwiW4OuZln0mlu8IVOW*NnO6KPvNQ40^TyrwnE<^1zscqEjYQIOiG48Q?2K~zi-0dNOT-C1@mF< zA`lC+F@bnj4GBCeS8^L-{Ol3RG(Ez-5Z|C|W^Nv@srsvQh!VlYxFT$GS)LnOVHv#2 z-cKorT56)PZJVCmts^E$*rolS`eSgRWK6SS={rM!d%k^Onyg}yO85q5-``Wll=NP8 z&3v8B8zZ z`iH3~P6`B6I)UX868@VbFzkl4Yx%_S0$9#zeTjIu0oVta3^0xJ;}1nih-G)ijs0VJ zLWzN;Dlw2!1NiDFc%GV|M6I+w3nhMD8GXJS52%Ch zfzG@(=4lY|t^Na}^(7-8d0j`q$Qx?_F5QKZK zdX#{g%$Nl>MjER}>1{-ZvzqxjWFj&*6SLFfuSXInd{ZQXp0aQVj}a_Cn^uFebTH>0 zs9A&&PjhC!+4wv6f$8dh5VwzHW(_PQe0towO25Gk6r(E@=7tB`i5msF%dX7*_YWkh zEf2IVV{~!gpC<6;7QICA%Mn%5kW?@Pzq4)iv;jr3A9ga z(-(B(HhhSWBU+ALNIJAMywkJ5&HpQ!Tck`l(=Wu}r0*aCW1yiBYjJS)S&gJ}*+M#A zxJwgElQYg(Rs5kRMV?mrjHFI`MhM7ThV!xg5%ny9%u8?>_8y`RVW7YVNJ|*)?K~t3 zB{oMdI$e$kmzIguY2_pr?uXgf{fp*2Tbg;4cu*|RH=2=ph|3ybB5b@VMF}~zWD899 zD|VXaOzDn*-Tqavpz6aDbr->`a|1tI%|zU1V2u{B9z#L2m~}E+nWqC~I#6Y_sq9JO zkti_`qZ|IIlO%$>=PJi+!MNY_2bcCr)BQ|*apvQU?^-p#DG~4+ufqZPQ*SG)oHIZ= z1a-s@^wx>~lpbyy7N*L4-n81*IEpFl6Yv7Gc&Ls}G=OIBnce(dT7TfZN}!LQnfU?+ zw7^S_2ilLzM6MWk!a{1V@>8f!e>WSNL|&Z>6Y3oR$KuxfJ4Kes{B89p0#mz8;GAH@ z#fsIR9n5CKP=DzBezYTK?(}Igxl1{5TKyub0^ZuPu7p^lg4Wg~ zy_V!V{-HqQLVfKC1G#meB*dMl0eMh`RiBg=ZRw5b{!75uPUX|Iw9K#L4<>%_Ab=)3 zuE&IDa1dSCs3HLS0}R9uPk5WM{%X?$QGDq_iAWwfGsPXl{O#Jgl!AzU>=m0N=cc$A z0gCVoOv?<2EbO!!bh~|!uE{QtKc7MpSyc!tq5W7C;}az>qTTfRB*;AB*%9Uvk2OUk7a`aEV=;fFo0w zlnfY9$rbT@4qt(Ch-W-~UqM+VNZpA+=2V5*dd$OT1375IITjh}%;J;q`FomsY|q>! zq=BbkH%qAlk0_#}{o+S;Bk22#2ms1ggGbRXAC2N6a2%aCSAb%K4g#8ZSRAdsI3vV< z?X%L#9-UqUYy;86UTpX%2nfG6`4trb(xHSFhzYGpxlWJ^`>HH|>E0OKgyzmVLgc2K z=_#Fgrou+{JohA8-*mH-csB@zjk6*(5q+wa$8x^TZ4?H}>?gsllO+P!N~e|>p3@2! zoI0`9+zGm#P-a2sH#qtSq1)X$49*U5md;SuU#o+piXNQ|jg-n=lqwp+xR1t8$p;|8 zafVnU5GwcJ9(H$hhcsG=Dj56+MwIOqx!y!{*~BuUtuG2v>l3x1m+DuWn?z<&>l0a{ zlU@aHaUe#DZ_Sw)2{ss@K5i|q?c3JSh%|7F9&3}`<{}DYeu-K{Ah)PU3rvi0HkCeb z1lt6el0Y*E$aTZ#aXzpO-T(W*X*Kaxl*58fz>-zTHIv4oCo7Geu@!a2Opz7R6hkR{ zdLt_&W=ac27MRc0H2=b}%z0j3=R&dG5sVfB>!^q4a3Qp_PJ*2sOd?PYAGJvl)Y7ns zJL$xH`NhUsEpeu$Kf+IkP={<2JKSKTWruSPut_YxbD>ro4a52z*lADO`sYrY%4Fj~ zMBr(vY9jPA(v8Nf?gGo7u)`5IhDMH5a8|HAy6R9LjhCe)Rlm)f~o*snb3B zObF~)k9Mc{Jj=f?&`&%h%4_4fs@VjZ>SIrP^4bWFOLE0d)Ij|XgF-P?n)ywWZh&yY z!CGgG*IR@LZ14yDKB@0O;dvh+cgcEXDlkC?wlA-pPUhEOuHS~vEXV!&4vKg?HV*Z?YF44uzSnwSMOHHZ05e@elaoDORUhHT;$ zgk4|%uC^w42`EemhGjGql|Uia_c?4bdHG1&y#oOvnEd@k>w29M{_8#}cvngLsq1D~ z`Nq-8x7(3qe`->}FH-LvW7wE6Bd8SF5dNhD61q-p(tf=Uv_|=IDBr8)%IY^9^g;w& zmQhTb)Q4`Z%E@F>KyFzp)rG5M#4&7QPzrp`PD-VJJyP^+k zLMlN#l>y(XgPxL6UP$amHc9tt5j$1U z&o=vpj}JOZ2&>d7bAPte6e_Qk8XOlPd;arqH4Y4blo8V-9QAwBe>@$HqE|&?k;)MF z{Fvfj#2UDfFDroNj59qB=X)Y99n+_%4Kfu+%A29doe_+7)>E(ubNRoJKN?GFoI}9? zJ54j4&`got(liDn(+Q7Fiav!M2!_3G`}(19vkQl~gVBpgDU}+l2(aOl2*~q7522H$ zhqDrLisdsG@&=_re5ji*cAWybl#@{fs$CEF4OWY|>EP5Eo&?q@a*ra0l{h$C(g^Sw ztjwRJGZ%pK^BqW^;{`U=yZ^2yWzGJBN>4n7s1YC90U}Ds6x8-Ytxj#kX<0PG)Cj#q zE!|_XimVY%fbx=6p}qYC-kHSXr!(f<^!5&h<||{DW}aT4Pf2yIs;jvjRt0NEH2N%7 z2Rx0ZVr2*`L+Zev2M^1Ns(0V{+Jd6;5f<)9_EEZ@TCpSf;1>7PBsth&xQO`Us>~@9 zauK6)goKY{s%{9rS}^c@-pPtlbTi(&)!W?quw>e9icT$D2~EmOw$K{8HeY-My#CqA zNkD~z?KWqTFOcr`zx6$ar+|<}iznaFldNn7woXn&2t05>(l&WW4+WS-VEa?iqCXNw zeF!>zDk0OZK8+-eQ+NNKK8tc~3;(?21><_40sRqM9Rg5Sd~h#q69{h3-}=bMW0j4;dGXD*mdy=O5l)JWAn4J zhNv}-ekbN&lxND;AFqf?ZtmYZtJ$z!9nkOxwvTi&q&xgFp>dHBW`h!|4jonf>q&!# zO+p?2Xvh{N)0^a05`9MGB+}Yb)(fQjZxx)uuz{jZ0_IV((Jkpndu?D@_#|NT-qpN^ zcHKoA09a?PgH(=b03N1DF#OlyL`NQLfQ0dl@fhOfVx5H^HxrJYWK^oCfvH2_5lUro z4}eBsLb;>9dtqq>>48&D9(CdV%e7hh_Mx**oIbFGZtzx{@-@ zilEX)X6+SnstXHZO8uu{Q(&|C`A;Fy-o6f~L3+r+#h(SSGbzz=ym*aILK>G5Q^pMm zcFW{@n;I@~u?P)=3BfrASC59G@!_}AMH`wJ^Jo8aMI6ylO?tdWfpaM03bh30iD4a;w3IFI$nzuLy z;b~+>0b2YpBrCvD5im+Lwp`7C+2}9f=~|3NXJCYMic9gGGC5^pl+Ehw?Sf#Nq9_Uw z0i6Oy0`dVGA0QTNNKoL2;0*~-;E>P(0`X7*hQ;GU1rq|ell5R6Avhwu4gv^p1R%iz z2uL{400#v!ED!;Q42%p63=$M9T(Dq)#zO%P9UKWngM$tcssM%w3UC-#m|y^h2oWMQ zXwaY`fDRH4Jdj~R1`84pNN}hy0fGw-G|<2V4iSnxm;wq0h=*hx3B^Q6#vV)n9XdEl zh=~XR93UZqfkTE16) z4o4DVB4b120uo~5;jnO6WF#9ZFgVD-xS-&G0fz(%ha-dsjSC7LIzT2Ij0m9*c1OyTmD!{mae0bD@34jAf2MHRG3WvpoLjwZQctAWfK#}2s1xJMn3q-Kt zNE{9c8Xg`N9v~VXJU}{dIFgKw4;mR90yqj}Bf~;l0r}tpM2Lz^dxgUGTPSP&9A0ylSeC*o&G{>&1+^m~z z;-_x5b>2!hOWz`r0wGD7bw71KbPH#rA{k2R$X6_LI}+^pn%d=_jX{`8k}!PCw^l zuKg3symCY*_rCoke*5_(Y3 z7B_in)}LgzvQ1%VxVijCg;vf}_oMaWK4xWhc57}5eHa=rG*G1#nsZT9S>PuicNP^C zR%GKk^MAw7Jn?l!M?^;RtD>tiM`U&pdN7%plY}_rYx(kpSk~h^^%bG>a888hDUHqU zJz0M_)nn1CLs?1v&?NasG(>2sQ&Ihh{u1>y>p_G|x-k-I7D~vy;zm zs?Ryky5!u7`5v2RZfx3}SWG;Uf`}?CsAjf` zt7#t<_n3fC;M^8@Ir+efY*NL4kO#%#l%WMh*~CrM0#Z?i48-E6Fo;#;gK{h(^msgK zg3jrF>TwJXNFad0!UP8VM2xr86waG1Mnz3s0=s)DIsw{#ajf1hOQ2< z2N)pZLIMQiK@qp&&FE*fqS-wP%e-3HQiep8dA;m2GiPuHXK)5*a0X{^28%rC!47sX zGt<)2($doM^78hgHrp&$!6r5%cbFg~g=mNVosG2-?}~bNb$51FaorTl*<6zdpPO~R z%c4^$zQ|Rw%xK6rF8L+NHy(e(VSU;2n8q61^~?%R&*KLO8=uHEQ!~|?*0kzKme*vW zxa+P52&6Jocsw4*&dwBhA=91c6hkdJG%i=Z@zDj}8>8hb(=tCios_MW8anX>J(qPy ztn`B|cMuvzr8Z}pvrDwh++Zbb&EI7ug^A^Q*4SOh?1;_6GFVNTs;R80!J?1wrBzxw zznswv{mCrb;Hy&Rcl=)Y5y>N5C67ieSGAI?{u-(>c@VLnp)oXIXu!~bp#kemI9H~u zGF9#AvYfX#mPxL=k_uZBp6=3TE%T&L`MNo?%qmNAQ9X85vb}U<^+U2T8{Mbhts7^i z4|}FFJYQOE?2yug9{NmLPMOA%YzU7w7S&DO)p{S({t*3aOs~!BVpYS?AfZEq%<_(< zk!!wL&02NX(@d7^T8D=wti!>&8MD0f6tzP(xAOdkPL*{wlti}E`_d7WjP4^J@o#z4 zb7fY{*LZ|e>?@0!x4rhlu|q3WR+iCO8ZQGQ0|T?gIdwKRwn&Ht8zNM5X;PFG5>muX zO3LFv9i%F)%MH?9CjD{o9o5x#s7dV%C6P=d%)8AQDqcQa{G@B>v{y-UTkLJ;m3{Bi z)0Jp;Wo1oeRx8gucO`~~U*_GEe$zapx^vHbk4SDe9lL4WO{vbZ_W*J3PnPB}-HtXy z@W&rN&FAxTbJG%$E|Z_$Bljs4)zMK}re)fTd-wDC4Y3f5rZyFilbviFsZGabS)PoI z$az}j$3ag+WXJ*`LS&zv)lMT59B7PnnnaG0$lIyK0DMSf#Rc=>bDzep!BZmsRHG=;m@R=W;IRvS&T(Suf`@a`v2ihS;b- zshL%kmG#70);Y(rva;n~S^QK>lQOG}C9A4g9AlElHC9%OvzRqTq{8ADPuR?67ImzS z4-hYxSya2oPz4ztBCqC#NLi^!1cg6uhS+c!YZ8f^npuUWG`+kWN75!YM0}I~as5Daxy2gu+TQ8DY3E=lQJm{4Gl37i_=*{Lqn`1q9N8)p*u{XLMOa>fT%eO zyPPgx`LdV0+~sw#ji56y$dbrZ$;->j@Bquy2nh+_2$g1`1sa)!mX=SK!?N4%<0+c< zw3j1xLqxNoX%!D4)|D~ql{Z=?k|mb?|NH+}tf}1X3L)VY42#{MW9Ttu$j}Ht00aO4 z0048;000OMNW!s*FifN%XOgi46*}P7UbFdtaswk zoLHw&LwgQ2OmJeUdP*ZVF+qdo(nB~rF-!OZl*9HD1t=FT+7!oTa8plXN{A2bo9cl{7 z(arMPJ{6c(AB^zxB--!L`!(<$nwDU9etWH70lEv`uMKJpFp{`BWG zvC|^ZE7D9gl7`(K<)jJdF#>rkfkZ1jPn_7?O6U;KOjHQ>QU|SUM$Dlb7eY zOtbh`GJ;&vB6fW0qj4&Gh~~%U<|r^pdYT(S`;6QTDEG~{?vS#L&_nsTM8DNZwPwXd zY1V`9iQ$HMt%8~kDVhr48MRtkO>?b7qt#79kXHJbEURv=a(uFic^1Vc0Q@Hip_X%} ziXrK%zU12RGCS^T7 z%cSUb+=?J$8GPrjWe}G^RU86-nQM$L1^4iMne*Obw4l=tYJ}EoFB3f_#C*GRwjG+ghHckNiaMZ zQ4upM#)mZCy0dX_z(j7O;w22}c$PU+flZjDYJpVZ9VVcd$ zea@wMN` zV-`5TCQiZh2Zp+SY-d4u)ER9Xcr^ z$jK>tn@VSP6ElV8SD3APr=8SQ)nFZ3gv1cfS@UlEZf=Zid8mRD(tPd*= zy%WM(lDiif3duW5ER`V!&zY&=wNE*7u}D43e45F0$+Z?>y2_hzW^p*HAxT z5(`E%ZijR<&3Xn=>1Bq+FkIvqF+45WN_dG?+&>lZq=_CkZ$j{q>3@i%+{!VnMI)er zSBdp9%HU}xMUGA8kXmpdwYiHThB~C2I09vwD0kN<$kAj?;_XfbF#qFsKNb@pjRj%j zprt7#2^UdE)-uFSNS4DifTK=cdz#0MU^m^mYk_9dOswLFd1^AvkfdEmsn&tZS^~ZW z1{{~mXDdAKK<8$R=o@p*Sv&zZ(I!*ys< z{7^kCdgJaCXYd(la0vf33BadJ8^BYui^fOWhVj6IHM?i?iY6zjS9JkN z=ia|dXmAp1%N2v?O-ea_&=*bPhL6zphDREWF47yJyXdl+jSGthlWR`Mc>p~yxaL8* zrW17C%MS+PTBu6n8c5bd3QwsVcY{uiSNQpEP9EZwfQSRloDX{z1 z!xA*l;r73pJ>iv+8$Bk6`Umlo5V)1WH;)3WmRgy{tLp5hCN*x2y?Aj*X0 zL!j22%K^4Ymw_Qm7SpJIs3gDkymi%+B~Z?UR1S}G7y=ta+h)!vEd1$S^u%YjGNli;mA=bhUwl82=z93GtnaK=sbl} zH*s0AI|+f({9A%8iOAR(3=YNk$w-oigTfM?UI?qHJ0qFr(Jyln8XBQ;f>*2}C|_qV zk{pZ{mk5=R;*2{!aa45t%MTf z+jOpK)^&9W=8pOiJwqJx3rizzj}{Cq*im?)Kafaa(cNi?91bo>Aq5pRl!4Gqtb#$3 z<1i?&pgmB>GAOBS%lX@1#B{36;0sQXa*oFf+$%tkeJj`=* zT1JAp7t+$u;}i^>coCFFkfx?}i7zJFgq){VRFJ7a;R1C;xuy166%pd|pF%g9Vdo)A z4`a_hp!k1r7YBT*dlDlSz)Dp!lo;dOS!g&ke+tk!)NWwJf_xAHQ!^gBwaB$i zu={@UF!?<)%L`9m%t>6@=L&#hEK7{L&{T%NV!AkyQvQiaMT@5K2p{$_U)jXcF^eEr zX}f!g8r*;1|2Pl##gy7PMe-*dwt4t1zZNzsw`;yR8nVznF$c*T#l(a>_lh6EJjBMM zv+1B@HU`a;smBF#1)n_m5-Mtz@fa=sJL;n4bP~GhDak;7zbW$lPHjbOgWb|L`l}bvC!bj%W$j9%9?$AVVeFyOh=`J)AjM4 z_5AAGY^fE%Igi)Bo;Pgao(TkXGj*TFeWu5z)BV+`kTyV%=YbPC_^~r$3y&K(IM~wJ z2U6UX1%@{q%nUX#t>FeI%RK9$?8eugh_Qvs&Xvq1|4mtujVO}OeR1sQsh@Ni4T49i zcr9f*28PV&O+(z0)`n4F{iORCtd{4ZqkP%Q`cVA_Z9QlSn~#wfew8OeerXy`g^4gb zG&U%4v9+BMsifRCDHOOyrQn4P=rc;XK?vzaVr)bjTZdbgskue~Pwbej2BJ*nlUS63 zGos#phZ$3OaH$P%RLePZ`YLx@bK5?l-GPvo(iA9%Vb-FOIu<_EZkZx)nn;|6l%gI_ zbf{P68Pv<^`04kgr%!BtZX2uD1l^QttBB{40(6h>^l*d^UqF#N| z$u$!-kX|We4-8U~XCCBD8XVA;p}|Q(lOf68)7J3&IbdH{F!3z@vIeAYyW!5vE_-%0 zXgm>L5RVslHAkkkC!XVI1)54siL;XtMKNCJeb<-5((37ryusvsr_@7ovc2O-7g_9l z8)~iLJ*w*Dw8rv1lr8$6(n{HZ0jIHn)w1jlNn@$3>CxITa#(V*J0@KyQV}#7R|Hjt z{xa67D_x)mEeEGb2G2mt4L9*F0LB}V2mxb|Aw=rQJHGd)`_oi*qPJpS(mDB&oLFPI z+rG6cu9`h{U7TwO1xUZ(Q`(_Zm$qLMd`1pPx1BkE!EPN-=T;cvul7BqkoL$4gR6Pw zNT241C|wIpbLBZd#(5j69M;l8HN=OUY}@30!de!mEIYhidYG`;aHPQWUgMnmzs`Oq zK+F^PKZ?sCNE&LbzVMLN-b4i{1W-a4_FaB?N;5!*Q&b7rE$*x@eI~$0CSk$0-XOg1 z7gqz?Ljg5eq>Hx`XF?~>n6?}Fb(nw3NqCscn;e zEHeY7Q~yo?f#my6+81WBfRNrK9@Peau$u%qEP-@FUE2(-1aheLA)t~&W@;e@TjuOP4F+;;T0HYbx!@wjN3o^`%2QlU|U8X8+nZhBW zC|sEWg$&$6HBi51^G+GatA_uOLiKF7#w2Kzo#(LNI>{6cV4lg5T^RdlSc@a4dnb#e z_5>lbGdR|7pA6q-@R+vuFlaKU2)SWo&0_G7O22Ch`!96NDoDMc3DGy%0+-oKH@XOr z1qj)BR1a-ig@g+XID#0v6p9xzppYemtRn>rio_dX_KX<{JxmR_Adem9ks@QB6EKgh zxvL0iPV22-*i2;nol7GbcEH}-J#N*KSui@yKP`$%lm8fy z2C2hxD#(X9)hrPy+*X50i?`H_!dqls$_ZipMqbziYdh3BELytw2ap5v6Hix0`*6h;axCxqjkxW4on zN6xs;w_Dtg6me4bkMy!=kVK!R`NbxZ{&1p#zNWG#)f}f~58I%NrUoU8rfMSRItMG( zD6zdUY>P^0iucpH%V*YZ`iUI|(z`4IaGbJTZHOF*dhtaZ5w=abQ!FA9q2dsUZhrOX zqYA>9pVk){$`x2xDR*buz~#e4l^F>*I1**W9Vzg0h2H5Xz9<0{k+e+|#R>WZ72}1K zY4lm3qWLWlkXm*!?XCQ%VL@b0ivdN$){YWp9-ifP^452;r2^(uMNz(tNnXNI6Oa<> z63m=S+LC$|B!x?EnpiCiBpGs0-Ug+4f@{>Y6*yxk(zLlF2W1dL_M&qfUz=-sW*|-L zS^D|Rf_qy}CQ4Sa5lg4zX=Sa4LZU8oMx*>zC2`uorfZ4wwhyro2a;00Ml$2L#Gnnn>1I7kiPI>}syTfn2Cu4e z3MGkd0u{BxHP}_GS5kIaQ-e)tp$5lU!wu>^8oa2eK^k=N1TtM52v^l}9tj?WbMBJ( z^B`nxjU(iE>6F`5(NYc|_M16Ski*rnR552Y^OE7F=q!J@=6-6$9oyMk(8S0`B%jL( zZk+pZ2_KJ|WXY0jd~S8f<#Qu08L*auWV>XS(%2fwlkAwvqyyaHKtiAF4$Itv8JurV z=5~sC|BZqokH<;s&4vc4ZJf&?%-uq)#~CQ8dB^|d$9p#~&8SKr4%ZP>b-@|)WbhLoE* z{S#P`)|n1`J)35gN7@T=Jhf5H2b7X2)Ytq#d+9=yjerzuHestne1y!tGuo!uWIEIt z3-5zD8Hlm4X8G-iJHQ0cJ#OQ%{nWf()MIwR3Lh@du+&zAio`qBnv@7XK(PHbFM?!v}6uu54JUVxTrGI zzWJf-c|*`NE^n)g1dyC|b7_KT?I*hw90rS)jvI3Zna464oW(^!cR?jA5jO6K1V$6J zfFBUoh&Kw574n$3I5Ha=@5`CFKgxH@h9gMl=d3+m@()Jn{DX(`=9tEM7ho#TBkpgy zO*(MDI-xKulSTvAyUTv6CNXywij)E4B z1@p5EU2?o?nx7M*$Cq6Ru0MpD#S&qTv^wI|T$p7*mzmypMnR&L!!K?UOTU_QAO!A5 zUktPm<|}aB4bmc>|2OO=<+744DM}tI9fa*-y&3Yi)UbXq@UL*cIugQ=NLQ>nyBxJ0 ztO-ZjU$C+-k0Cgxd{vOQ9)505dX}LPH*_#lYjsM=FZrY+3e(s-V3ELvt(sHs80lSkM^^)49z! zLKz(;jiw|9I`6KmeWxy_PxFwlMX)XP7uyT3T5edD3*=(i2#lWNTh#Jk1;{}cIra)w zM7E(DkK4VumBAC|TN34rAs&x=}3LRG+>BH)#)VgAw>R4^k z2R4Xe6zi1?BG51<)~lYCgU8UxjaZ!|&JTL@9t>i2R2HUaCj693y`Nl&(!awhlB7F~ zSJR#<4Wp7eBn51ueRwd16{|=5T?g>u*8n~5{}`1n6|^t_W12+ zFPlF~=w(I!1P{bX4ko)yBDT>;*B^eZ1^1>~-smGy!TtyrvI*8RO4#0wHpp(boE_^j zuVM0FbgRBefP0Z=w8}hW~ zR3}k*R@C7sBa)y^W_{wnY-jeW5z4H{(Ak)a$7e|S$M}h>^K)|?IOwghoVEn#6!3mY z>U4sr0IGA!w3OwcYVw$TYdB)?C*fIsNryBXPmL#Lrb{}eEz}_Zh@}M5wIHGdO|3=c zewr^S5-NQe%KR`2Zz-WlT>V&Oc_za+!T8iuwv4EN3|PNpr!?^{k_1o_pte)ogb7QY z+onX`m_$$=0TE0JDK!km(6lC2?MZp3*yh=fNfwV!;HR1Nr}6<6rLdDe@;SxMB$EbA z^s#JgNXh$c`;bCjNED=b)U_7_RZAzfX_5+F{HF(0#k(iYcBw8kiNeLPH&IhDqSj?l ztO;mPEhVDa5dPDlA$atk$nKjlJWHpV6ijAQfT!!F$de5Z>g=T3nFc30>%g5HE`2Ah zlC9KaBPB}Gr@u)O2<5CEn>;U3ma?Q>loV{L#)nO`lLqORx>7_Mi3nFJ2%R>?Nec5c z$iM0IBdL;S>PaKgCY%42bVSmof(imWoM7@GU`UGIKAu& zIy&*8Kr9Hnwvf0f4p7}iJ#n0tX4=E>NiUz`-Rbl~`3c369D%dZ5}RUb!z|P!lr%+} zmLL@cTg#^qWprAayqwYtZ`epW-3?H>5tQu|v8(l^r=*>ybvY2+(|I-NhZFYB2uE75 zZZeOShLXp7I%53*rC>rmMS^=enxFbM1ZC2$^ps9-ldYmCpv+AbAtO<&2Q)DO z8uO4HAV5zd*zL633>zXyutJx6L*T0>=U10m=bGV|&!x_f5Ks z?*6{fgHji3vKFI9ofe{LdRWDyBR&#~$Ku!3)m2TbA|_(QBOdXHM?B(P?|SaJ=bLZ7 z`R1E%r7;?lv07Q{8FgH$!s-~+N*#5oiHQ1Ar_)h2Et?9lsZCkav7Q*4nIUO7fdf`G zq|q3S(Og~7@pwF*d+xdCo_j7|zI^%m_1n8QjLU1IkPerGzt zGBW=^oEeU7Y-5`d8le#y5;V~?PH2X7Q+vkUGvpca+<0z0R?nF0n0P844rhpTiXFqa z)G%xu8;W{<^P@TY*>9X=s$>=pESbB7!$rpK?2b3qv5wi8ZB?JExtgoF`UPpEjo65_ z9=u+^F|Cj5Gz@F~SjT!9%XsogFPfqmXY&y<4Br^WYgiQ0;n#*i^f~zd^IzC+|0wxW zvT!)^=hh2LPa*|ImE7h)VJ_p!aycy*kq|MS+*q!bqaE!i%+6mDVa%ABiJ8p?iP_Q2Jo1v4g~x%a zSHiG=`st@mr&B>gW4DuqO=dGT>c$erX_hcdvJ$RfDgt?p*Go}xNKY)cq) z4%?%Aleyjg+ikpTWj314Hohw4HL~@H#`B$>$cw&nuvKCV& z6Bk%O009DUaG=0}0j8o)t(CZ&+@A2<6-!b|QYmR5OJ0L36%weiT*(bfi!y1Cjm<{r zE1MI$Yz!u?7(45?Z1DzfJRHurA)6L6GhLX0GqVP&mB{aj{4zWJ^zHu}zQh}E zH(NYw1cQ0@c2C~qCuUDxG%I{v*^-x+B3n6c$XZTKRIplfmrE|}>}h{AI-wxgL;@5RVt zqTAF-TD|I3uX@${(S7%sjEqc5Wf4e86%8kh-o|DXtC&Wtd4yQj!YMqAy*k+`dC<7}ra0DCm?@dJjq%uGFoxQW~sOdO>6}Kgnie7gJ64*kg}9_Si!o`p}0y49v`2E| zqTkHS#m&rov&Djfwf~V>eXw>}DlqH@m&;WhQLSG_`phk$6tRKJpqWs#Dl@ za<Yi*}Z^FC5G;YV`{uDJfT7b&;6Yye`glE`$d}C;E?wsHdK8q}%^t@fb#N41?}g zeXoXLJsW3!<^SpE>MtB@zv@^0iobBM>hSS*RlY;c)9^ApPu|GL?&K%G{PN2$zx?u% zkK8V|GEsD%67<)Y5e!9kcad3(%#O%R>Sru5b6+E5M;_fOJU*Gk$+!Qut=ryZ3ECPC z3J)AWil4Op2!HMx?nU;P?3v-%b7Rj>P`UQEn#9|; z#@blV5TQL<+D=Ocm99(Z6BET2Q^j)+h=|U?;lg#U>#@gP!nm)!u%7jP>%~f&VUSj$ z=-H+Q0SOrfh1s;7R@;_ZYN@4`S}K)>Dnpf_PO0=B8W14k9l46U4IC(7I^Ngsom)OJ z=GHI%SKD^}(nT=bzlh8CC28{(d-G3u=PB6})+Own^tYHfdsEAeG+0Hu(P*Rx#VS^@ zid8Ignaf<}dh4yX-m+TDS}vDsR7^}vjCfI20gaIO695500bnpVKpqT4VE`Bu5QM=1 zaUci=1VX_;7!(M@fvJ@eodMxm{AK^&wz?yH!ZYZb`-g<3AdHH$2FFWzH(Cocn}|!_ z@&JuSkE|T%U;T4w+pEmWDO6EWi?k$|g7r>w?r1O}oWj-QR`D(OXlW#F=1kYhCLSl< zbniO=`IJu$Dwv(>BOP&jOR@A^NuwT;5?K+I*CFCeaJvRzle_NAV(o-2D9P%K)#Vvh z=js_KF-%$M+bu|>(Mf7&_@iSDoi}-=M4sEhR>T~_jzWylVVXBnZsRZPstHSDnn7dG z6DZpF`h3+&59ilFf<{7Sn&p3ZlUEKLO{Nwa%7-lm>Y3wJ$r-58sfOnnjtM=HR@c^z zA-!>R2vlwm%5)|{sNfJ&==jS z4uERZLin5&854Q7Y51!X8O9@12)tS;ux+JM^rOPq8LOB2w@a=eXSbQakg6t!| z^#FO`_WxO}EY6tQ*dfJyK|wkND1sH9Sr#jys6kn{D@^Xv>-*usqUiBTuPbOY$>%|v zz1O3C84^G`0KKaVQeoCXFOsrC?e(d$^Tg}VU708XU@@O-;3S1m?)4KWM0Mhk+DF>! z1(e`pz>)fl0QF4|1;MHgxqQ|}PKh^i7TaQ;ZuhzKF#rCBBBJmzSt}GuyM+Ya5+KVz zQYe%~d`^z!+7k3@GRb$qMp$pRN0sf}P<})w67QCAUdrl&LJ6cOiDBg)s;GR8&v}8< zcONYv+=G*vwwf3}hGK#;?gmmTCbc1wP1vlez(cqkcRM)g1JE}R#a)|tN$q`~4w8Ci z3@mEq`D2tE(?nJELWc(C>kct$=?yBI)}N}ROGxCaBY6q>eNoabHn_lmc-e54S2JVB z4JI&Y*-q*fI7s&~+nrmtkJWu`II2dQxct%RoHtvY{9y8W9$8mg2w91p&cpZ(*H*OU zJ|*d305T{yuxUTw+KOzs2@28$niovKB3V1S20=K&^h3DLYhQ|0+FnmdQ$Zyg@~&%R zs^g>oVB~YVWsBb*$y!VmC`I&UoZC#9f+!Xp+2$h6BccN)>CaqAqaIR4L-H0}1Qw{i zg~XJ^;R--5bK5UjS_d1VDpu@)ty;`6zg|fxY;As6M|yU?V{|6shDuw@GxY@Kq*7$y zpg+{Zo|Q>&Z(Z$QHLn%A0n1HC}Y=POo%sOmXWcVIsqdJKQ(Us;?XUX z`&m7(u*%OC-Ke4u*WS%_DWDW^$fs9@XUpVOAf~8BLM1>HGGQB7q3GF4S+AwhDYpX8 zUsf4&swxD_lV<#iyk>F{!7dT%ss^m)=BLDmafUZA*}k?75OMxInqrL!0t(zQ40^17 zlNfb-WOIpoHT?G=B5+%*MV8Nj!ZV4`O6-s>dd6Kn_0@D5=S4jqcEo2^5!U; z4N}x$&0lJUfg!C;fHtWRFu9QTY=R|sJYEs|74wlu&(d53M^XDrHe)DpF+Is~l1K z0C&mtcH>ZmWTT!PSa=LD_1a0xskA~(D!ZrnaKR@PC1I^^o3un0B7UMOd)4bm--fg< zA<2v<#$O}BPg>x0=5YPw7wN_^DAoVf*TA>3b(+gX1#_+12WQiIYM4=u$}GIpbB@bA zz^H^q_WEz|P8)7Y>7oq=5?G%S?}<~DL*S*=L!9P@P05}+uZCz~Bdwl5fryM`6a)v3 z=TX$$Fj8{vWKYGdFe% z)iE)eBc-&!1_P;~%n=s8;ZOo}u1pIMBNOCm``yF3s8#CuIg+-%=ZEkBY(~!>YKG5MP*z z=Tk?;Bgj9wL}E)Gl%%hE1oGp-^-=rNGoJB$6iCwp5M|g@1tfszd<<8~10TWQF`1&P z2yk5xM{J_Y5Goqm(?66)2Hz?FG<-%iN)vPe6tpJntTdXD9-mg84I4}AFSo3T?V zwpDiAxx0?QDHv=zEROjAK3v(YF2PxgfUjQ!6f^1JV+9(%>QBx%3K#lIZ-r~TlJ-wO zxSEhxj`!SO{QF6Q?Ags6IOW00J6APjY4LR2U_n``xscO2`2eoOLB*_A0#*m$t1$)@NsYq#-}t_KGsUd?R0cbJn8kG0SZ7lNipFJRi6C zNd@)q^Bg^kq6(cjV4-N1`b&clCgyz?`JxtBCQTU%5eR2eLd_*75@E&-65LXolxtP~ zHlk#$pY?N{TmYCNU|ON+^3L1Gbmg@&8c43O?HegEVaBZ1!C+yK7w>cuOY`Vc7m@5l za%si_g+5bsa}|&BLK-!RPxIM6Pw2NMQ2w4H6b4#6=8GnTiz?J;a~Cww1Vn{Y4M-rw z7^JWT4PKT{A)w^jXIzUJh}DyTwlEGllx3F=DwovQG%c@kM8EIvQ^WDu5R9jmuQ{mX zNRCM?6UarURWk_QJK-BM`|2CxmXIu4C<`lO6?;+6n1OD;z?SM9ZXpkibJ_?_>Mv@J zIVh`pc%Cr(g)Ne^LMKVzHGt$?l>biUtSE!jretFl4pd_>8J;hfA^q{cYE;fz=%bTH z770|vPL^ShZMr}#;HeV-Xz->G4heWy!6QQ05l0%>imiPvlCi>$MBD{K)V-sr%Rsci zBstq-iym5NytEILW9?H+PzvLRF$$-)60t}~?%_(*=qRf~1^?>*@!3;WL++ith{A$O!$`WwnB71#%3mGEG%=wGFoyJ2d)#P*#xBnL?dBe8 zHw%~x4aPJDbZYNXf@@&cs!zR!V3F-XOr04O} z+D%_nD25j{`(m*mEMl?ZS0hKv`9;n_@pBI3O@AaA-14A3Zz}T5GmJ??@XAgs@I*yS z!#sdi!QDw}2r1FmNau$IcA_@==UehDo}Fl!>y+j~7~!O` zJ+Blryr|vibx(3cHm8nkBf=Sa;ITsU{{1y@K8I$U1Err%CQvbsw4UqZDL0*pBtw<~ z8Fiq-L?e>&Se3@AC4Vt!^3NX`=`%}4Z1>m%_7U!3d|0`}*1N$$#EaRMm8Ywjn}Li>Hu*;Iv{+aWO~N#UEIJMH0|g1N4;p5?Bwg!d zDsRMNIM9(DpHJqb9f4%R&bDviWAMo)2i?rxjF4P!lfX+5m5@&p&)1j5`c#ik z)C~>o%Bo#ggIaXb_x|^W<_{={j#LH*UYNrqu^s~dO3oz`Hd!=)`+fZ%84o zr-`a1Zrl_uo?TONSSwhtV_u<=T!gCjHZ4N1MneFNM9-48`1{%7wS!vSMUp8?fDTZj zKJlp1F%pxbiZMlXH9loL6W?S2QN@aCr0C$8b)=DF_2NgmwGLuWZa4v65CPglg35iK z9DV@@!jWpZ9`Sis*B|=={i61(@j7k9l^ZBlXpFgWXuN!vZbyyR5-6^4z=*%WPG#|i z=bbjd_k;(Ky1#*kKbF$zJi)))UwvvS_%@N9-7+87L2_JYr=XZSNS!7CD`zpQvbmHI4sOb#o8n)~M)O9swc#|W`tA&`mIaK!+X8TdeZWEI;F@38 zYl@!l684t7Q$%WO2L;${_u?<*4S8}s(ZtCDRL@W;yL|;-gXZ8hyK%Dhht`}av$5Qv|O6O0J}Qp4}ky)X;ChL#px^xM#ibhPXZ(E`==ca5j>ki7?4WiJe0 zEi02}%u)1C2`*h1j?Hl!l%!pa1Hdzny@NskB_mN&XH_kmhKiQ%nsV~6+R+NcSdRGO`xi&)Io}R z(~`GPTmf4~)#5;mD&7N~nt_?-GnG%`Oga^$ys({_7q+GBEia%Yi@pYNq!b310K~NY z_I(P=RQ9M!9!}a448lo-vlr2*pw_lD%2vX8rIaT#%n=y18;WtU_M=Y& zKp64AHp4(WK@U4{9Au|EZSr1vb=P;+OM4KOkwG%tODfLqYsf!#_r8V7VSj>@h^9T! zj&o?tT`-i$!NjrQF?vi{tEQ{zE-sj~^Yf#mP@K~q*i<-RUbByq^zqKRn5*JXHtBnm zlS6WSY}O{$B3X3NY0N6ebCX9GIPCnWC(jiF*$}|to>@yu4AWsX{ba`^9 zw?89E>4?ZNL?Sliw{X0SaNxiYJrO-{5ENlTe~Mh;8P?WnX@Xe{=iQ(Z(;C{+^Kw->lW(+WAl)dcdiCYv;r982 z5W*8;bKbJDM6U&boPE+w#s^dL&DCR>wA`lOjfL%QtQE){AtP<@ypBY9F}X0z4c_P8 zQ*+D}Tfe1dHei!)jOmm34W!P{Pz*lbcIYUq#_;YoU4Wmt$*=LmJ*Vf8uX{k<7%CoA z7GatJ zz`T*b;1_`jJt{BK!+S%k`^)wyId-x2*sl) zq8vetTu_LOXsw-8XX28EZ!FMImbyQJF?Q zi2^C5V*yR421pnp0|m(P|Gh`zpguTs;6gMSi)OW<=jm_VeqUiw;mN`oufK`#dIo-S zlsW+G%8`O)$w!ew5|<)elruvv3zVC63_mBH$IQ-wt%s*21&EE^oHj-$v-$J~6=jHW zSeYQ57Oed8RxSpn%jPWp++rcuLss+#Cv0U88=f^ithR@wXLj+J-5FH%ht3mEAqO|4 zDN~EOrUJN4pj_V`xQ$+>+#}wOVYRlQh3vtMH$f zlw_+}y*0z|D6f!)ApW)m7J(CRYu*dU%u$16;kM5(Li)5ze+vl2l(~iWX}Ml)XA_Q9 zas)xX3(L}BovEp-mvz>iABNFiVNxYq0=hrCn4hZP3|D12Y~{HN+3$|U{9$O}hqkCo zK%UZBWo0!6vF~RSe;KiB^)TK1X}X-0WQ&u!VI-|^b@S5(|JDwL+Wh%U^b?r6vE>*N zOd#Bv-v}0tdrLe5HmIBA?r1#HIU-PDhBK(?Uk8TTetgr7w{5jGBb#hcjI;qlFI4r$ zG3h3SC~X90yq0>60Q=pNcJhrvHz4GHb0+aBvMvoPfF1l~>APx!m9%G00nt}^<5<*_ z6lR<@fw6^qmA|AcPDv^E>-n-IaSD`XniiRjUQt+;(rQ%+ zFy)WoXM(m)P1KEKRC%ciAe;;qEl04tsDhJ`FCAn>DOk{g*{>fXeOuJ($9xd7@JK9V z&?j23J0Hm{4-b=sJ@y!I2q#a~0KpQ|o3nLK9<|rf9g*QH+g}c`)eVFRPp&g6{VgoC zPy|{eqRrFVPEobOELrxol|`o zPT9F?Q?-I6iW~MN0dBDhD{6+lL(R5imNar96>7iWakD>wGM1bcINW1iF4`fh$_orJ zd&CuWj2ao}%xJbYDZldnfi(lSqL8&{D0_aTIO;B%yX1NvVYLI!04;w~AB6H=Eg1tyW&e-pj>($F_s7n028QNm(Hh zB@KI$o=Xj=_@cw+<_GjBaymF}2WU4SQqpmH-{&AO%^hd?ax6Y%*eR^+?Az?MW)GaFHX0@mE9_$s=mWN=mXJ z0ylzQ8;i9OpQ5<6Uf9TGh+K%wEZusR>D90*jd?X_dg?|)hTt)q$dVV3?qAHNHklau zc7kY(Sg;~ko+SIV(v<{fzFy0|HuamdrNWV(o7g<}2}h1k{WMm(FFy0B;3qV7tt|_; z;RH1WhkS-+FD{ByH^Q(+%$TtZWZl-RAUriE+Gzv2+=P=xbpVqxpZ|*>Y4mSB3nRvV z%kbJ~SB-ksh{wwSL~4dPoVw8kRS;II=B0$5p_A-uQ!>XLIWxr3fsv=&PbMrl#EO`# zSdj|Pq?w?mXl`lsQ6Oi(F2|G%Hgf_Wm>WSWN?Fe^nN#9+q)mOapznsz+g;mfNo@;T zAX80}^lmEE-!FVW$X4W)zX33)de$gv*pu{JX2A5`Ck(?;*X8VtnH!K!uG*(G0kr>8 zh0NK`dSlJVy(k2kR=`Fqfi}srZxYqIpiCgT9sk-NEJ6d-P|{ns<=pr zw-urW7Cv=R4lVGoO^o@2Q~A%;+>q4P}9 zbHEV}&r-$$OR&@0S%Fe)naq^iD~)N+sHREk6eleraE75sT2(4Y-7*$kJgFh8ZPkq+K92_ICr?m_L4#y0%)CO`O0G)v+%Tny8Y7c_u(HahDau)| z9$xRe#iDDaa<1J{%}M&)ZLwH*pWXUE>=pSb}GIouGtC()iClV=z-!brnG zCBy|xFvjIq6u&kb)9SU|aFu&Nq)}^vVioT2F>+KN3P8-YL+R!TS`=pH9_Za%j?0o6 zAQK{v4DpGF0?+lqpw5+n@s*P-m%Fn{KS$Ew_vyeq2Mn32gkoG+%b1Nr(-7{FaUEAW zF)QQVzGN{8@=hl4DO@1tPBKT83Y%fU{@ZS6DFdRIj(CMV7(5?zj{vxl4l=)=3l*XOTqTqD|{}0O?M9&S%sjQeSAl-idq1 zK52-YJ+LFdFOt8)M8a~1p$5V5I;WhdS|XStUK%VsOjj4kFR7B~RM<;td)wK+RzIi3 zzA|i#_>EZ~Lv|tb5!|{w-NPoZERO&_Hb-pdf}GPEMpi&m;<{dTYEyDxIHstGMaNbF zRZH_z;>TwZeOW3~3dse92+0L}U~JSHtcb|^)s~}@2EWV)mufxmNe_2yf;TPutd&4s z?6rR477!H=!oX=z82KGg%$*CW8O6OeW~JUrMIO_?<_QvROR$dvLWxR%!5C?p_>mx5+Q>N(|w?9R_SXqvdM1lB2QS!;=mUV@Wpcl#{n4v zQv$F2-QWG+pP{|I9mC|_-tPOTo9e1eN9UeqFQ=#H%DS%WzxkWLj$tx3V>5PM)Y$m$ z(ARNs+1=fplGiaz*XzyA>k|o=-*xoP_Re-^M|PY@?!M}hByqLXSyNBbQFGswQ_)`y z*ZbC8d2f|_!L~-eRcp1zwMIp6AbJb7i*Y;mE$!UStkb5s z`I?(6({To6LN#xk&b;v)5~91iyC?O=|Mm6s#^s+IC*MZCZNzQd#%FKDC`lzVdoSfPF+1>2D{AFV!;~H*XH}2nGqoRD&tDoYs}@x-TZ%E+H$r1zJ2lEaqV9jC0+No?Q~l23Ty&Kwak+1~Z~1O-dCFE+R%T>%JLSneSwHzXne|wY^;nPjn2+}u zIq}%|jXRRgj`2AC|95ILM&mnU``_mYyLY?0H+|yw^|iO3^n7=5`FpW>d3o2Fcc)!d zzGIzscYku;#V~pA?k?_P8K!65Zr#@9-MvLy&$C6%{cL@+ef(tm4-1>|GwVVYzxXfP z%4)YuCPNfvep_&w7FJyn8SnY~m%d*9bn`jC_w)Oe4ZnZMvs)B5pYvIPB;KMx7RUsd zAWNB~NP;4cAU~$GnReBD{@1bRd~sSby&hM~ z;J%+~r$JkO&KUnL zmZZaaY(x*pLXk8Uh5#TU&m+1eGZ$a9#CA4kw%5n!UrY1me{OrtXV&Be^&9b(5pl8n zWw%Fm`{(;))^NY>(}ew-iIuB-?snve&Oaf zK0QW9_ta1SZqDvr*3ml`T@xAo)f#^pnc9>dgbuS}Dye-YZF zy}i9Xzj98KYUx`3;HrGqcx5vCy4)0|vyaWzYbh>NoJN``Zl7QQzz}fBD#`$JSa8 zb~BAJE4#<;^Xxs5)%fh3^%$d;GDg=W0 zQH8UhNJan>MlG=+#B%A~1y5U{a6L>iAJ5#T~G%fJAS$wnkeA{P(lA<vNY6aLZBc5<>-1`^=wOcUI%wgBeK_ z9~Klz5fs71l*r(KB2k4BRWy=>i6E^FlZ}W5hy`tkOiUscqM{UaL`Yy_N+J!3B!MCf zFZNj;n-o<50Kk%vg@xf_hi1P^vNA+@UHxx2J6`zgp8xm%|9&5rY}Ug>yj;73C~kOm zcdty1%Gj@m?tY7k+IES{c8L4Sr!Cq4ycp}k9WT+i&uYNC`8k0C3Iq}gBt%e300p8z6vzUZAQNPQOc2QgQ4|Oiah#|^fk-x#NtqxDL{W(( zWm+6Ak^zK6kN||nXh5++9pGTY0>nTt(jYVu5llxF2Cu~7j@CLW zbGc@^Qtde$7ak;(UxNq@8s<+xu^uik)`ExyqKgX(?iVP66|DmXyP*Qd0&6X%YFFGa zz3R_VOfKfyVVEzQMzz7zT}(dK&90;F$m-VbtJiFh9eywwSI(_oI_gU~{}|J^oOv+Y znRFaaT+L&usQk=0T7n~(G|$9^iAg>Z2N4b$M2aEoMgkTZ111;DBq9;42@FA~NPr{k zH=5|nu8eb@$~{?MQAv3X2W@8+L5R9?6aDrW5I{-$pk zUT@7`ch2*eKh3WHGwSpy;3PCQBpL}GSeO{_U_z6@p;;q>1IKeTP!tX(4w|O|2LuPm zKs8bnnNZ6RyZcesu;Qa7GWRsNj4JI#+XZ9Y;`==4J} z=aNc&=DA+57ex{K7ZNf_e1TQ)4`a2&)BQWw?kmBMj{i!)bfQZBc$NckGgVRip1U)r z{Bf@3N1|_Ce@JPHF^S11{d}tFN1|_iKb0`8m++4!jn-_crN4V#^e9gJ>0IN#WWIWT zDp4)NG_cuU6#($(z|=o;`rl{me^UFY0}v$1>GwzgIDB~6KMzcx$=$Ay0za(vRRRa} zX?X(bCU9k=a_4$+7z$`-BC_W5o|*C`*Hp~{-i)%`e_Q;?TtHI;X<<$q#?D_rDJAAa z#!91NV4vNb^`4w%fH?o8+y0eEM+4Q|xt!&dOrAg3rg9wEP*XZ*#x#eeOv9++#?|#e zt;M4bHur(ElxelaKj0I<)VzkOwh^BCzlz;EDG@?VP8*Y|?5KTJ2zV(cMJEJ&vaOQR z>qLu~KI$CNp1*2f&g2kkqf)enbSh^v?3rCdLNT7IAx?7#mMv}&$KDR zcfeB=oe5Bq(bD)&v*s5KKCSq3fh44{0;dLb#`a=#&w&6`M>|PcNI#DWokz~O(a-QS zp5We*3s|kKCbCfSNb}fTzr=10p`rluqZ+BLE@+@)wRz5iQ>?dhZL`DTZW^l=BCIq9 zAqW$Xjo_UV?4eg(j;YYJ=3~2T=|%JT{gK57!-Rz)l)9J=WWY>~)tP{mIpij{Mc^p7 z_U`lW5DEgO=CK(M%nzK#3PT9(3^~({H)-r6JE|!16q61`i98rIB#)DHJUIe$?2-CO z8m&O^=-@DFaU$NBZN!v9HknZd`Jf~rFA}>VGZp`rYMjFz~ilSOt+>C856cK)+ zlwHFpJh*QZjXI-XdM+L(P*4{Ypa@RYuF#Ck#KcGAw-0UQmd@@E;i|gm6G7t|6u&tC zZx_AghXpm^zw2C@bd+mtXoMjq05lZ<|mRD87O|pL~|YAs^70wC|=Q zG9RpLSVf&Z@p-;Wyo31R9uPF`>ezI57B9J}>0hvdIC{-*R{ZF}Ek6nc`iu0wBH55E zh*C2LPID+(nUBFCxI$Tmy}*uYY~!qa(EMYjd zwnWhUgXc{?eifAw?#xfaFIi?ukVGdB8mmGOuT?zKr3Z|Z6a}46EPIV~{E$##k5CK% zH&R*I4~KHWoINl$4WBE#Ju$0JTFr#LmrF#}PNQpyb_;lkh$f7VCts!6`!x>MKT#+p z`)W|am<8jJCM>zZU#vZ?U;}JC*GjhI;j$ZeHh**rod>e7zq3pCkjh+>WKTt=djRVG9vZ;CaPfrj&oHN(sOoJ7aM9mSLqqGEx+%3_N ze`OTlMFQs=ipXXhv8xO}Fb%yKSEfOJ5_q4{i`69J9^)%0HsfqGirH(*JsHgl`NuMQ341fO~E1b z1%w;>fX+$Fp!(|h&>vk;@QJ7u0ZNE$sa}Ff==%w3GC#wIuYeUpE4(v4iz139CU2!7~_ z32mHkURx2T2x+odhU-R8&Jn}Oximr5C9+V6@yHplN4m%uwC03K5vDMkz#|)~ht~s~ zuUS7zk0FyQp>X^>NR1Y%QQjUbxoXA;Vd--r@K6kY9%BuOQwZv#xAhx_+DjgeRt0Y* zmH^Wf*C=b6sjKY_T55vEKhI(x#OL@s4hkWFy6lhvP)~1Qkw_G8o!}|xXC_;dG`Lc< zC`viG@3x8(5>Fyu^bMoIQ}+_YsrMcy;!+s+xn(kDPK9FCRJn+uj;En;k^(-o=qCQ& z#Xd4d*H-isQqNFBa>$c{JtAeog9n>zk>-0Z6urq{#Ure$D|JT?YiT#GteXp;MH|s4g6WWo*cld=vS#FIo#Mv3 zp$O|sl)-gYv`QLHs-|%2IniczAzP(?r5{t%R9_=E`qI({fpLJ^8XCb{OyeZ}@g0T` z1cd73pb-Q~K3@xFlU*t05mUlNx;7V(Y;yCWZU9eK&7d=&vG*I0!JWmp18O;S14<`S zWk55Xjsn8&qH6$?MHS5RZPgY{UN981l4Wq+py%6ilbmcBmpw6UgrUIvg2|Ow8aR{t zD^tb(3{~pSpr2!$9`G@H^3LwGlPF2L_-Ds}fCh=?;5Z>6?XH2EIOzfgBDYrr-+*xe zdk>M=NZnir7#JKQ1~`<-_n}bOhl%A%VZ4CD6~#ugR)9Hj zla>N6Mxl}KB>s-fpU^RyClCdiF(M>%7cKP2NJ)=v#_(WP6p+RjvB2a*8~^3+$*Imb zAbH1OQG9^5tDTu}`l}SA2QD}EnBfC4Mph-*z~K2w|BJt-B5~DyQ znHdV&?HNXfXeyb+qtJz>oX81)5~K+@g%-`1E2>Bxk#50P;lxDgru<(I#vw@Qou{R< zA;FE{OW|E&PP>vksKW#8W1=11hs4xq>AR4ba}O=w*__!Dg_jMDCZ=qj%z$m-)MUs8 zg$2?aTb-H&lLj$?2)YTW;RuJc6XR-tCzWNbV9HAtNImgV^_JcGhp7W8hS;Y@1V$iM zHp*w^D0e=>@eq84)Ie}E_fc*WD7{+Pj(BJ5pT=0%jZyiQbVsm2;PX3FZaXgn-HveX z;A=3BNEqo*q739gP;B>3bt9&8Q^dC*M(5_$-(D{u+@ye%hgmlsRD1GXYBjqAp(Aq! zI=^vYFX`qg3K({NnC;eYXNpX7=V=oQ1UHIKi}%8v?F94zftm&Wx}HUo8Bzns^2(vt zJVR*NV}Nk@tm@A+Qw|)O8w(_}p5U->fW*KVlfj`XjLe%-1<}G9H}(V(u^}_#W+~&^ zmW_N_Ci9ah5fJ@x?Y41j`y0K$HhzrMHEw=N+CE{RCWWUici-mZZu}>#QX@Ys%-u1X zPlOZ;dDzRHw`Z{eJpT8Y8&3SoVV9hcKjIn4AI34r3G^szLibF!pBX;fk0pjTbkQs0 z$Ax$QU{A~a?I&~T^_ATSih26h-hSrA$Vr6g8T7LUbYd(mV}p9%_=lnzBj63Mu>G0Q zP2rLcNzDGtLnvThL%m1q)A&&Vep}Y?^(n&-U zKs=SN-(JQ;Px%KRLz(5_0FIe|HL%8&(KC<%$fA7i3)+H1Mdhe0&RLw91fYrbq)#=M z_=U|aH*j8PzRf`80f3LeCk13UjEfyNa~>TyxvdO4`7hz5n)M8YQc@XM2aUo=Pcb-T#FGiDo@xbWVvj2} z4On0Tp`<{khebi5RqNB16Qv@i2}7mHcc;6R=Jo0*bF^KpZmCK;4^5zEOxtj3AGS7t zS%iZxsja-oPg|8Cwao;AQhIeN+k?C4Ices!tfU8oZn*GIcay%3`AJM;$1pm5L_RS+f1Wv+L-<9_f@=RXv#JB~POlyI%+LA2*`0l;G;VK5k2=>t* zf-f1k#|v$U0g8U+8+h`@2LmSh?&VK-v|`}Ts6F1vpu^+0BR~@d%{p*?f+xmFX31F! zAHnu+v%$}%KKdSUK;KM?sMMV9V<&DRt}JjIiL`V@+7-x6*rER?Y-T{#%8(%zc`ce$ z>4;?~5aQj2Fv|bf|ClROM+s9Bm1bV~=^N@=Ac-=i(2CIthZ^T$0UE_F0-;fToH|S& zsX%HQ zboN}{f@m|sYuh5jPQoSt&ARX>$isjspNd8CibEW5jh!Uym>wN{F6&}? z_&Gs2^di$G*#%6898tM}%M27`lR_9c6#@!jnb=AfsinwFP8c=(L^G;rf7^P9wLoeR zE^~^ehA{xQ@SXC&ZHFSt|zE<4y0Ema6VP)Kq%;Xjf;Xy1`smTn_? zeV&m#sjrLVOgbu(Kj&>Cxr`nnl2`QbkUW_XELdxY0`WSDR&VV&2pfC5p7tRR5v)}T|-0q7h3^8kzBy|@Z^h93t>vD8g8a;u-*{sh{g(`Gvc}rc3#Mo;vTr+tWq&l5sZ zf=RuCP`_+OXHRw(d+J4dRMUS5m6OSjWq^<)tXBjrOiJNyx{Io^-W3?NNT0V_REZisOGzRBoyPi0O?oPl0t#gp^|?S3t@}0YRcW=S3rn zqk7I(fr3?AlG8vqL1~Fz0e*Uc%8GtCs5ycpksGLgxoSXyauaeIE8I$C} zFw(4x_6a`jO6<`ZnpB z!!-lk`RLYyVHsWO0+qQo9jV7@TxwFYsr-1*Uvsf*Z96zdp<53Lz$A_*)JPMp&#h~+ zktQy@c`2Jlg6B1PCh#@V_A$Vga-4zz6`xd3_%|2aa19Q`1nE|Jl57vzcq;r2oWSdIIKh4*OaDi^p+cojCK#P>C^ zx##$tKyFqXN)Ce)r?f@7nqk7`;|!QPVK9usGONuT?JY%sC{ z8G@{_lFKy^yF)QbhSwWQwj64~VAmV9WOH?CO`11L8b z9K@|?oAtTcO%LbzS#)L<$JC~S0fSjFBruH+ zyM%(uewkczq-!=ZlA0}ZHMHv#A$rC_FiHmJ13ARVI69&J^|2Jz=S__KV?s7VZkwD; z;X1)mA%%XT3Qvj0ooE1W*YA;-Di7Nui8pInZd6s!mz^OQB$(h0` z7!1P*!EO6v>_nvD6d*~%vVKgMrMTsunh-8Y6W!OjW@l6QRg2rw(!^Wly@ij!nISSw zJ$@C30PRsSao|R%i#f`W+vpr}FeKWzkuu;&im(%XqQV`OE#qjf14V~d>u4py4?(93 zrbM<~34Vp`Wmg`|kk8Ojz=2CSt zkE7bblw96xhMPFt(s1t(J5yF)5=9Bk#?Qw4Uv9JF?sL`l>dc`-du^}xakb?3uq1F zxXzpsJSex|4#Gu+0SklEnYA}%uyN@_dY5%EVYuT_CNET2VPTF zcBf8*27T@&aCc4|6mTLh^1%em;0APHDyN*60S%z_dRBTnafV7Ll%J`iV)Q>dNuup5 zvw$>4d16}OYxWx`?ng+*rDh8DiBdP9FcuxtSf$FHjMX7il}&<7>t@oiO`JHS&*(kX zsXoMiU6MKi<)%fbMqN=jEXE$TX$Ts#%OCR&q0r5fY_u2gfg3}?+J_1Se2y;5l6s&+ z0cD!~o>UAuI$K=yW+gboQIk#5^0L<-ScYZRQ7A+nj`)HJfr?U1F1vbok)q0 zrSgCYa;}0?Z8`$j0(Ux_vS`i)DUpmoeaj_WdG!fHayjHJVf00iPu00nvrVI<*tPU> zc=Vh6MAf`;QZ;98pPxSTD$ATaWFum%=U*3z2f!+;Ge{ z?2`=TG69fwOj8c^W(Rbdvbbrl*qHPz3NZ#6jJZLc^hRKK~i+WVY6cBKe2#K0e zfs>ftFe=51YfklTr`b65H#Q!DU?LM?2nuq^xk7_9*@-{hlU+Ba-5_qeG5!vIhsQ#6 zJLe~zQ_F!tbJasKJTXg{4^+At#p`n;orbOaJhshKkD9(LoSQbh8IL@v;o$t6*ogtm z`AHTc>=FTr*xVc@5OabU($5eQG_E>q`~TmTI)XB_T=N&1vXiuw78OP9NkjkulncB8 zxdGh)C5s*?b#QQac&NkVj&mP|w(ud$%El0ayV8c^rWD@yRdWdcvdNmem!Zw@b0c=` znc8U6c?&`Cz9}q=_~7&r676et3uuj8KCyay;_TH)PU)BYhf^k;`VbLL zC5%$F+N{=mYAz%%DJhwx@wBoyIDHOMZ#1Vlk)KAUJeSJPpK0YY^QTegBFDqB$(F9J z9C2uFeaC!qRtRe|GM7@uY!w=MLWz{foGLR&D2TS4qM$3$WNdi9V;u@2G8{^1NV}YZ z2lq=wY~O)yHF{-z=tm!WUdNaKtL#vj-lT5fIg}gBnyZKIv-;Ba?Nuz`MruX zX>|k*OkbE07Va5pmYp(0pbbq?{cOpY>PYb{G%e9M&8DV`b;Rb@QOcqGzazY!4%1c4 zj+Ce@aLa-RD%L>4m>1e#b17qjPp{j!@pwcI4M%(scBClTZiBcvTzG+}pohi7A$N0} zkwQ@qHC2>Do#FuvLKa3XrOk?DvZn05tz^gwJ2Hdj@heaENV6t1&89H@z9LlfzNYA{ zvliluX-e#2?_?v%?@#ris<#q_uAer~Q@#FjB<;zyofe%&r_-fn^wUY}bh7lv0ycU2 zRTBpGGynzx=vgvqtE|&f>tt((tks)piPc%g+89#nkXluxX}PQ>|8v41so6kdi$Pc$8@;{yldrV<*Q z9(3ygOy}4_n3N~Pa)45-k)p3?8=Lb~nVmg#MJaYh{Fu<$!Ul@BXKJTeJv(X68Boxm zZl$Q)MER}C}U5Vt5L?x!)+>wdyPE_H7vq?iCe2*-v3enE$aI6YVEMXkYW+bee zer?g9;L(If*SjlPA9qFWBA^`=ZhAjAkRp8?;Y&XLc+M>o;;S6B@Y@s=9%dWGBlqnGD$t*-uuRk}Wb&$Pl3c1`7@qCb;L! zn2=@F-H?wA6fS_!fQDhOamuUihH{zQUGV?d5{7Jmfx-m<5bX;eZ2i_Hfw31wFhB0_%=uOBj}TFuDw0p|JWmQ@&@)34;zO^1R4aH)}g+ zkkHT|;m8WG-~k4*lw!iT*lx2<;;F7~vV`ocmS%>n<}nZEWxcszC1Yb3QJmhpuDVX$F}on06=(b&41*s}awVWp)i zqQVo+bZGYc_7&C)%{EIjnrp>wXzW;IU>Y%D+^{o~AkhdW42!5RKRb-Ze6$wgJ}7EY zJr0POg+lC5I4DRQ6mR9|4pj@f6fVdQG!8w&N|fg89?WHM5Rf9--2=fbC842`<|UOd z?s~>Rb7AUw|58e3s44ZTgdu2o!_Mv1Ufuuwk8yJeWpJzxkJpGf905POznY)Vl-Sdq?Wj)}bH+S5o`MJWT@KUE&g|^!w!81SrR=Wk z5(fRIafT`?;!DMV0}BVY=~Ikh_U&8k>%eaO#iPkOZ@65^W!$ zQjw>03ByDPKTLJJeiHx2p3lQ7B)5dgq|0S4yg*j!9DR`pRIj6N^j}|q&SXOQ^IzYw zG*2P<$*VMHDQIJ4lXAIBVxb{=Nv6M2vz1>sGyviFAmV|UX8okpBda7N!u|#!LW1Li zhz=TVJRg6CCQp+vs7iAjnGs5IO(NY4A@vDEjQ5j$|Ab-dj}wXJxHHIUX|0L`dId|8BWi#~MzoqCs-iN>@ zUqomXgUQbVrS6n#ZIFjs|Nh=BV0U+D0X!)BX=eD!KCz9!AAwL1$Tlm!ltkP!;IXou z9)=JO*Q^oKhoVu*2pX1ORHVgV(C)K7t^H4%&&a$OpR2vkh(1x$!I#-PwETv)`eIRMiWQ)i5q0*|*lp>Aob5?_kGq z_nRAkmcfBapQZ0O0W+YzWkyj`mSTWEQEGt#QW9uGNqu^~G!E{;pftQ}(D4CBQ?19` zLZ@(a35De@HP?n^7TTf$o#-6NJ|YSAsxh~c^cNCIdfcfQ14OdRS|x`Thr3I*lxMOB!%~HZqo4KbF8%xvT^9CH3J$uz*?W!t}vvp7+n*rY(c+1CN=@V zThT{n1pCtaefVIT9%Dj;H#A%Ph`o_(KDx3A#+2^L5{20FXdhk>l9 zr35f%^aSyKX6H0oq=DGfxh^pOh-O~uh;xR~OeL|?=m&Si%LkYfTh1CPpJqe- z2_h{w^RK);=za1>^P|2}YV!;L!BStBAB~|;1pbwdO3y6S)7J&ei^qpYXDFDDnw+8F zw)uG|x?wKGiWA~_N8fWYwdsw(9`C@Lq6@mfoje%P3u`;6Q8hO#u}-QuyQV5hm(_km zQg{&gOP>=KSeuftL6z@P_>!9<#laW>h!|$+Vf~xU?4$VH&%#bpwqmr@a%6)3a}21Gbimin3sYghj9m&?N69!JlLEfsjluL7wNck zR!5Wm0q?}un>vFX=^Uz*ce8mL4}Zy#4M>PQzRK1lQQRacdX^Cu%6M-Bg9Q_F1J~9> zcc3*)$c6Thq5}|5I{K2lvb7*wwWn;q=3`ttMFB80xy_T-1hGv<2_NB@Th;9CX8=IB zUJC%FVFc$Wm~{^AjO{o@H%9^FnR$p3MBbr^&$Z1RWKwY`$W7k6U%r$d>JXXW=`Q^4 zOspgE#54fW@iS=DX=ao)mbCk-UMU;FcC|^&wMx3`Fe}hJ9j7of7y^%y31^_$eIq88 z>mG*e%A45C8{ZcTe)35vq0z!OvkikOl%Xvtyhzy-^Tjceq3l~sRMBhHEwoeZbm;0Gb^$T*I#4T_qzYxowv+|MBIYomW)0*~CS>D9 z>#56-212tOm_)!&u1EhAex{L*@k7vWB+BV&$tqC1sY0z%6iQ{0P~prg<9gR1SztoB2bwa0c7ZHAX+Ll-3pD7e<&8DbXRXm0E>>q>+$o z5DCl^zWfs(a5)tQHtg~Mq}MfVzY>h}xMp5!o{^5Q66 z8vZ#RjmS)&$8z~h2dXj4FqV_aLZ!!!AX+IzjOkALwKa^W#w;)-TwOr4x^on5JR6%? zGEes<+j3HML0MWRz7IWl9~T&syTXFIpbSp7NBW*dnFX143tl7!Za#+8pc*RT4xe9W zC|s%84>JrP)2%X!nm&rGM(~#7!j38vSq)a?z9CRuU+%x0A*vPTNawA%q6wGV1Q;U= z$Xb*AoNn9C+$~<{Qflo$hEW~8Pe?}Lb-HYGW|C4FoQKs;4qa#TmB@b6z1ZzGi9R^P z!V~opr*R~&eNeiwJaVCQ0Kp!Eo>+_n#W@)JQd0DfqsYL6N;?sNRr?)*)*2TfR}jM; zUI~2G=~`>q$=~x8YX2d8t%Zk&&$D6=*jn^^LglXEdqwUBQ-jGxkaVp@Eeb8zS@BED z+^VplHY9PZ%}yE5u^Dn8l637*(>rS#sVHQ6-T@&E$Tx)y5lEJar?LC-f39 zmL>APs9Pn~3T^-F+Acx*FcljO2gICfTtgGa2tQCfQ?{+1>75b~Cv-1VC@@@YL~Slz zIDXvICR2!Uh0lqE_JoXsFbm6)^=F3Z;3K)^(gbxEl;&vIrOu>i*oqBcUpA;`p7X~# zAT|yZip2JQPrDOj0a)XXqr#S2o}mk-uR=vub*a^bOTE<=<9bh$8Ui0`fe|a&I6!>! zpRqchJMyNnvqQ|5_HaHBCDUUb4AnY2p!ap!-*(>add6rCd&PMr?*NVNlDC~E>vV0_ zM@^wJC3cC&R;v4SZR7kc69`~(&<^@PG*kAk@+mDjrdFh+1Yt}#iRYl zXg@fk8U*f!41>s`jTl4(EqB?6t`}SU2JC;cRCQt1BXQ?=kaSY$kAu}L<)QPidn(bh z2>g-DG7G*pz(ig+x`4&e5Wp+NTmrt!tU!uJ=F4fNkd6t&DW-oILr)N>izmb#6Y0p} zVvNbnF#_)p*L(}H>DmTH&P_)7+7>=M#KE+UqiwOUGQw#eQpkqGiH8H8WM_>y+xJkb z=cTj)1@Q@9zMdO!@Wrm$S{sbCVn#W1SM6L_j8zYKa8wX?q8p+C2)&d(KC=}t%-U>B zJRY-!5nE;BDw}~ z7+Qo5ynmyS6g^?(R@z#4ywUBb$PsnC(ZO`6zYlG-G2Wc3i1YytBKxpgm3E}WP8DRt zy`Dsy+dsH}_|19~_wEq`5}!?!#e=v z4S2Z{oWq;Y3;>D{wg!;Z!!9#)1JyrwdL|wI-P7Y9VXbirI_}gr54{Zj4Y8#$!cly> zuw}!2vgf%jMNcLLNj6P%Rxt?K!s*&{6|$rqHL}+^9RX<(jw)Sh+%Zsh!Y%bp%!Ihl z_*5Pzgsj_bYBG%Wp}X}wmhY_?K0PD26u+OWXf3iyqOf)EM?@BuG%FFy;#60>i;z;A zUBaFunWDIdOL?yZhR-2Q7CH!f!U$T})=wSvWk%;HFid3uy5~GH>co~6qcEIJDGp$lXsl9m z0~$p4Q%)0srWh2YH%piYAR!Asxjh1FZs_t3c8n$&Jry;Ix z4Wa2Ju$Q)6NHYFltp;aZ2NNO5ItJ!q(Gk&Bv!^{XWeky?2tz-Y)+h*kkiZLI)Yi$n zS=?MrAO*w1)CIKOQPA4Lea&f)1`JTYHh{H-4?A^3NIlg~g6cFhh&oajw96VW(c|M< zR%Rh<sP$%NO4#Cyq?*535ILH4?V;=6B{4MbO#z@$ z4}aRB;cRC~ofFk*TCJPo3?qddoJxBHhK0^HEX(SNU7+!9ymsQ0p%EmNwNzh#ta-z} zL$oEvx=>kLCfc6KMd5s`i|k@9%h^#GW{$ul`8=tapnfRh$+3Vc%{~)nAi{!S4rY?< zl7B)|@>l?9Wy7Gsa$Nuw$QCk&QEE*GUel+Z6$UX(HQI1`5�EWk$5&4tZcIOB`+W zzZPQJ?AqxBxq1-lVmzU~BfHR?0U&8*29x!0YqP+t2SUygV|3BqAf36mHl5o1y=v<* zeGG1M-0&b)wP~G$&b)HzKqdnV5Nj!`T&lDK0Z3qs^?)YPjL>P{$w0l1HW22V3Itq} zP6hM8#ZyShrBBwtEV@0;24u_tghh}ysOYpbwm@#)-vdcI%QkKX$A={+(AZn%nuOwI zI3eJ`SaZzGE!@Z3(lb%6@LuDS-n^xQG)Yb_k24L@#W94M#nDB;NyIa_c6_GD> zYPrf!DgkZ=>bJ(ekczJe1A%ipvS&M(Yg67_-BMrG!2&PfA=#w0RqlhVactMv398 zmL_jD*nHQ9PubvOWwhZx8V{{O0Sz9(Diu^_tso20d=306(o~-rnG`-VKBVp5dMhYd z09N%fo=-vOpnJvXVv|AFXCs#qvwJ5TLTO1W9L{_J*sO8O5`dRnWDbq!hCvViw`{3% zv?&#hM@L@)4s6`kD`$?I|fSJ9HKGMg!obzr62>Yb9bAD*cr&S97(s@xRX$c>|!g{)9VF z5e9owjXyhov0Jt>64u6ggP0Sti)C#fAgZt8I)cQw08`tB2$DS3wr^>#|L0t>RCZbR zzEy=O5o=}mQLkWuM=~`+d9(-8^IIcizeC^?O z#P&v2zy<>H!OoxTt(EcXZ+d(&K>P`nIB0vp@IwFlgeFqp?B*~rj^9Z!?^*1091>?j)ZCn=R0ypS88)j+O>x1tT5 zPJ+=NU@YgLuQ;{bRpbhS1Pl>>{~E&D%Sbaovtu#V05%#N*B>a(4RmUV_{CFh z(2qjS6|LG1<70s<*&OT&0GtV(S= z%9R;17*-QB!}J1aJX!TCL_?8O-s$NP*lE~L>Ow?iLi16gzHHcunj z39@P>-wFF=K*I_gd8hwu!Np4LE$XrG#(V@9qH?qD+Zhx9pzqw&H|N%PZO(lq56^eX z$Qj(JQD7xt$h0<({!cbV4RZPkpYz37;wOg8@`b8kag{F?Iq`}bh;F|8*O(vm(FQSD||S*0b|saID_$GXnW?P)rF{Hl`WmDS zq0q}!55h(bMvTZprv+S^=-4YC#$2C7FDLNP|p@Kj>1k1c?p1YX&fa}lT zQOShLNb#AshY_mdTzMN-8m`0uFWUlH>EusWrPA)HX3-aujPdj^J#4^}bEsTVc?*89 zerDPnzeJy+u-ltSa(2##DN0#w^teELg;uH#YnEugjVW(Usso37<6fXo#3oeGFs3NGb7JjKBIXZe)i?q*@#DmgALGMhUeS; z1Wct)z8-6PljLQj`2i^^NJrs8i`k-f=m&JJdI01_Fa?zNCAmHFAbp~?-(GHbTM+U5 z7vaCZO(Fmn`9X*sKi?{2W|<-UYc$X9bR7q2vmZE1&kca9!35E|u=LM+tfpf0LN*r{(?00diX(m948q%6@ZbZ&#^oZDFh1&bO&;Xl)ZJq|~L2wy;}JN>WXTGde-7q0M6#kPNMO4p4Dc zxL|DA-+A&@C#t;+S?BVh3ZrHwD&EHdt-fZ=_n0ZKFQXzX%R}m*TakKdLB34Xo($?J zw+ZNf%blJA;4?YBbnzorQTYIYvYx0DtCtF$xuf}LHG2(^q`p)WBo3S1#f0MKX|Z#q z2v8!(oC74+hq)HBR)Tq1hbio!`*s|F|sF}cmBPU}fS>1&%dObmOAQ;;eTz*f7s?QBWXM%Kn&5Y#eA z4LB@%4l8=(zd(Hd`Su>wRL3pTL!_1=nobziV^~_@#!-E2Yp}G1N?n)h1aDW{ z`q$Q=Re8lX%xx^L)A&;!k;11R&CfB=^}L|Jo6q-ci@!smN&)mJP2}!P)ak@#l8f#$ zP?{OA%}r(6Y<8a&XfgfAmgLx7P;GmO>u*lr4OkDM8y8NCNpQUkWJ{H3!0pP2U)I^0 zWx_Q*q}TpHd*Yg;Dc-Jilus_HcHuR~6bI)0ZDd=gP~$f0!FbzQ$yU?4yqRQj?h<yEx~MWp3;72`j@rA|Vvkz$xqxLw|p1QOk!nfvH=+fb~LjLk>U z5M*!B<45Ev%lGxt98Qzkf#ETVK4u!9gFp=Y#gW8qTgip8QYY`KTOve_jf_-RR;Vx0 z69{uweY8ZMG~&J!{lk#4_tx2aYerQ*kQTd_oO&h@tKavW&Q0sf0LThjyP*CBle8?- zH_3RFP_uD#p{OP9=x%h=;UBZ)L6WF!njwUK)RgIP+C_NlenvmGXqcVMq?E>zKGjFk zoLi$Cmq|C<)W*}xzs|(n|Awr)X)aKjj6u#r++1@rI-$G82-D{WwfA&|V;MMQSoX%l zqMA2Vqb}$~3EYqi;R4GG_R&#gR>TmPi!d%3L_LhDKKboK(ui65T$?)G)=PR^tj2~U3Nv>>!I`85zY@c=@?I35n;csL$1 zY&agM&;Se>AT+?>fI))*jsgy&LINQyFf1151YU6%z9tk^2xO6T@efjc)hGx84g(>o zvRyL4ibVWGj6%R+D3YSn3COS^LIe|pa3C^3IAF-IfdY~sILIJ_1UAUP00$Tr32ZRg|m%U|hl{3qG%l$rMyveV6TeE;I#%OYZyV~;v?txKaL`*@hN^JLS6 z2o|s)K@=wFNO}fA5P~4ZO|3;C4oSPkAPRw?gklUq3{fl)NoFDoEs=LZI{`Tn#0=l;;XOWK9cLbdaENr+Z>qcfV!%*b48D0BKgC`gF#zy%5uBp?*eg#6?d zXGTbZbkcUDFvQ8kCz*b6OY0~GK}^1-NIWaiNu;Gg*GRLACa#kre@K&ETFIVLElibc zfuRWNDW%gmEbm56qr{y1eT;eJDi;;GMJ?FD1DI({+j&m3?By+Auo!Mhl)mCh%JlD~ z(%0c7+RNydU)H&@&b=$L(vc}H4=tT2gd*RmoT_A|67%IEkzQo}2o7BWk(NLpIELhx z&z1d(nd_#iS9uyop`Aw8j5XiNOvNi#b!i*{ z54M&K)6kY=Gr)k%8un~VHnbxn#Fy1D-*OF>af~4u-ety-tyc@H>}+L)S!IQVWmN|i z7T%N<{z=CP341a^!XmsO!W%LQ5*CO6K?DUBh`_KHf0jH)F^R8AixXmzKop9kI*AZS zjUw@xsmUo2kxZ4 zv=>hrM_JM>=h;#pkN#49*?1Tqd*Rp#i$+Jp*Y7q}UWljh>&~r~>xxzCt*fiK<|_@Y zUl|A8)!cnPiqJR`iw*wr(uJjWWxl$PTs|M;I(zp0V9so~_Q4bv5Ne zaVrpW^bv2WvCN*`%{S%eTpuKPYBS$sW|hw2tpTV;K{##S_aORjt(UxUSRLy}I$?F{cvX7R>8kVFSh-bOW~i?v7BL5n4hiuNi<*bj zDp|BaOhUuMBz$3F4S6=FsyX6~nCzG@SK@h#UJi~k$fh6pC zT_P&xR*8k}5JkdMgaScnPigL6sb-`$xmmyg9VC^R2m*J>^#kM!hYXVgT$r@E{0|F< zacLY*x@_9nthDAqIaQ`N<7j{H1uzb~RS8KS51%K_jaRp0)eZ6JmN*}p-&vMNV>C87 znOT&-{Id>|riN?Q+0m;y%$}u>pK(-`-raZ8m9ewWbT!c-8;}f;1>)0Q>7wl}TX?uu zLEGA1tF;-2kBLXPbLd%ViDm=@Ab=nV029D~0by`3Fdhp+fj}S-3IxJnFc=UB1;SuZ zAPfuzLcvg=1$w-005Dn}!Oow8M1q0>C{&j(A~l5;CA9wDr5o)DVIiyl@CpX+j3%p; zoKeoGTPnN21Lkk-DkXVngG*I}o`oB9%~H`GfgAfcQ);kX8oky)I38#%0Sv`hZQjC^ zz@Ku-2{^+Q#prB@2KyJ#S)c}Q5drY#!{9gub_4b!(Im}QIcE(mMiicZ3CPNiW|=+U zKgaq??Igb*4ztfv(;(e%??C}QFsq`!5FME=jYN*JG9pK*C7JyYYU7Y4yLUM@_J&{~ zj@gliVsK$fJg|i5SPE+u^?>j?EU)cmz3c-$i!$!TGE-(K+yxc2yTiEFXTe~}w-(-c zHU(udjP?h!NdZn%AKQx-HPMzCeiJ46VaUd0iig)|*_2zSwDXn&+3kp}bb3PzdYR{j zg4J34TGr+}X93hpb_3e>38)_K*?AC5ZX0~>e+liIm+lY?PFRdD8hoAJvJ{YBr$0`S zPSB#sdvjC3%&EnWcNE&P53|*~THSrx)kGU4J)#^WFZwLJv3k(D)$hK}+bdfpSyF>* z#sFC05Jb?0y2r`8#wMH9zxCGK5{?GRtX;hz8jGk6D2AbW8#XlrQR56NWEr!o1p_t_ z>)yA!fd|as0;ds6N%nhqI&uK`)2pemCXz16$#S4tW=7Rk_`Ju%apQ|mhT-ayn3dvC zf+hiYHNuL0e|>0dd9XFmvD^X*FkzQ&79fj+B|SYWS32w1?99VGllho9E-5=BJ&?M* zLj)wcHNHRxanW`{AOqZDr})O$xg;9!x>t7bKf`IesbG_~Y_{sewrrXb&%-;WgI+E&ADmTQCQL_*hUo*Hv~EeNmOb^NYL#VH~}8P zVC3zcr>v7Cxv8U<+_gcVYB$7M>!3s%(Q(a14j8ei;A7lB`1@8_L7UQf2>?8HVil7-$>OS3 z!P($-y|yT1Zf1O;z}X6Y*d^!BJoQ z*|gN!ILFPm&%tDBHv^P}d-7Pt)3wOV*!CXPl#3v=$u5!p`wj+hHM1#76)tb)$&>Fs zpXnpBwr$^{^{64o%W9)t-}|OSkAh^w8<$5(K160)1{?S`3w@0lGn9MSY6hsF^m|T> z1pqcEM}Ss#henPUwB`hkSM&g5(O^~(ou1x>R7WP9+(b9jGD^CfAo5AGQCpjJGyW=~ zWHULm9x%%41SqvUeRb95tWZ$mPG2BGX?X%DqaY;S>7$&j2m9<3b^30*UuHF!)8M4t z3jZM6);hEO4ow)vm?Fri%&-KWZ4(nH>421afnPuZ*_ z4>^p1IfHa(@)jM1RlR*so|6vIH^$CCs5yr7>RHP)bj8p?2MNq7YMJy!*J~dQWRK3g zA0P9mk<=aC1eQfaGXI8(!m=CSNtLU}QV06JH-GhRt7Be=`^N+XNG3s+i>Pl2VxZ~8 zy8)X)TLi6Et-kxx7idE>)M+bEL>~0TwpHvY^_`<8th^JP??G3&e>hjy}}5BM4ZO0Zl) z&dQO0`y;d&+c7=RmYZhY$!j7z_%@n6CyzNf#nLxMb$phRfn< zU>^r@l9`$CL4=cpW`t&}dUug+t>S1lA5A1(!4M9G%RSKk;pH4!*cN>(98fViZ+|W= zU5Av9hO5%1U$Qv^Q!(l^-+}!)&8b4?-2^%?!+8)UtKbbe)W)EsD4N3E2CGpAR3xK@JUD-<2WxSr^J&$*~?`a#O@_8?%}XkO&|Zk(7ptd3 zfI-co84Jm%)@VcS*yM3D{`fp{`fd$OSaRt8z#O<)E6w)gfI`xO_=rHAaxXCm>v&s6 z9jY53Kn-5bolyv7UZ^pc_AF!pcdTY%u8NT+POU*h?~j-dNAZK*tE4@A&Q_|QP9`?s z<7e(L9cDX(iOT1xY36N6g@ug_UPAKm`IyLf!xb&xL_d|Teb?*ff5-uW_ngr3E%)M3iHruldAT8K(am=#$wz>8Xxk3m2zcGsfKlyB{? z^2b26)!imb7pWdSOZ|Ig%(Wdi2gJNEN>NQloBRyrhOIRcVe>i=OszWGNeN_6qJeP5 z6D*ePX6yo7AMlvftN{pZMJNQsxa6hW|D|{~8TSK2KP`rjOxYnu+4D$k1ZKAL3eMP` znqs_r`gED+r^ZD<`cr=}W(5AL8om)ezv@I=Pep+#MQVK|3w`$>6tvL@5DZrHJZeHer8P&i5R)NrnPq62f<0FUdUdRughN=cgOz)QOC^QfK#+dd7oR4i-B zfhJIv-A#KiS}Nvf2^rX=Ty^e5H`zY-wCglwmu8;N;ZNT7bv4O(WKVY+C2I!xkObFR zQ2r0dHd$_PP*BS)iUNIw{`)X z-X7pA1xmrn&Ct*!`P=_KSu1N}>mp?qDN_mCc;y2>01pYmr>=DSXK+4j@MQh{W;6Bb zAhYJK$5tkuj7>lDD6u1o&(yWLYFLxxxv}`JT*4xSk-(M|41-_#2}9|pOdElMVltj;bhP%D*6Fd7kgZ=Spt8|9pmXswUQxyuVQY9?LxjtCEW9-MeJiM zuWTop)HxhHQ9Q@#niGR2P3O)l4Vi0WrH?}#QGxBKx@r%MQu&p6Gr`e=LDW;V*c_lz z+q?uub`I!ELI{E-Ag~2&U*k>r5BNHA&gk-L?KTp=G4hOX&Q`{AN7}SxbE^%DB*3Zw z1vb&xmX`)csqKCxtL*Hw;@Y(3J5_77v*O(dN}_>wc_Q z9sKugGinmaxauW8n&X22H8+rz8!7(lRsD-wGRZTWW_w@lw29n8! zTl#CQ%D+F=@zc?P?O@qYc2Wx0$pV3@oSrCkwhpkRO}Zn12%9(#+C33CHpVOaa`uB# zyktDT%%-yTGL07KB;0HqnGOxGC*&eO#T4}xVjxpFRv?gp#0})Q$O88%pB@q-c2}WF z3~S)y=C+KrF*M9PRB1R?_+@PHAC%`5@%e!^sNu!TR3Nxf;!*zT(33{*7iXmUc~_^dqWU&9==wKQXUWhVy< zL@3I105qm6w14M0D_GrvM`0T+KvC7)dSW$ys=~`k8Tu4tECyhl!<5h@bgqFwZ5JTn z^n|oq4MHpk&YMuXj8%3u)_P#rTtTXkKm1GlK_WH$KLXby75WU<)3n|qOhpWD3??Zy zR4u9r$z;|A4!QU#9mmJe+C|tMBT8pV#iWv=OO0kLe(p6el--5eI z)z4Efen``6{&Y(Jy;x~evw9U3QgS&anRQ0h%x#-JLqMo;ZqGvBI~SbVw35vOQ6Z+A&Dg{h)DUN%=wD@mdMn!1oX$<2^72gwIi&dvb6m{8RvTV?sYJlmMXYClI?%I5E`PR*R=OW zcR#X^DnObdv;|GXiz@;F5z2$`on9wFJmIoO2%W~HRronqb6QqZPalr&ABVEgwhG@~ zo=YLjZ>wI?1G1S7?9hliqC&guu;Lpbq%XijOj)Rc%DSYACXV^r=GBMf;z#pp&nk(@ zF?OeTDqA#lS;`OCa=Zt2=|sk`q~r*LT9R{aP?GKF70Qt0u0x>Vq0yc*UgD-)YhvIx z7$|p^M>&&OqDym%3V%_^$dj6h3KMS`_J@M0Y6m%D{wmw;)dcakR6m#xWjtaVA#fwU zV$&xB~qHNU|M-rDg9;s(k{;5fEtO85em40sq6_XjlA8PIg16QdNB>0iFRx zMECpX4DZh~JDY(tXlA*qKB0+<2y3tOwnAD-nu}RIlM=j%@Ad*hKWrsut=*1u)^iXc!9_PDavAzs%lsmTZ&- z%$v%o&xWl!f@K_=%49eejYSpn|DN^E@uAys)8sLsg+oWR;^t(kZwZ%C7EkW5u@yze zM38l=L>AGw5ulAMrq_;oCT3p)drKHj@1WJ=()vY>h{VALOB9Q;@-H#@Dab@_gQR&| zFYLtFZ|$^qVR}XIkx*p>9HZkJBDu72;j=y)7AzVvG zI#ezwsamaf8;-J=qBvmbl|OT32I4QWAT^WCY2p|iBy!w9R8!~5HR#i|$g!@Y< z%PwodA-$G)kAfEY-Be^ghF5Iegc3Ykq2q2SST45~>#141bFe&;C4ZW#BuZ2JL+_ZU z9^pD-J@1P`K!q|Xo+|KJ9;8?+8{MLv;B?@o@8fDQ(T8Xh8=WI+c*0{kR4#kpE>(9GwqFs2{uPzp|?9=?dK0 zdAkT5e(;!(OZ8}8pp1E;Bl2G`ut~)J?;&-@3~pYLPV_Uw{JxQB~X<3Nr4&qP)#qDo2YLA zx*qsOBwaWs75hRuD(KY$n@&+q>jJ@jl2}7mK%zwB&}dErVj2gx*+Z;xOz|;N_uJ;i z=IrIlfPCQ+~tp!p~f+%J)P~sW`!E0a)nHly6e0ZYLv*U zM--B>6@_#toA8%E9_tAb(}UY`B-#v!BowKQMa==Cxe!bKkBrji1zWVsZMBIZFLXD2 z&Jr5#{ab0#E>)n)1afY*D?sh%q76{Yf&1BVWRf@yF1wm}zTw`CfFBA*gV8%eJJ;&v zf%7!yHYF=S41X_?2xaKc@7lV+IrT3@YWw8XYf|mdjdXoB%Em;3e#3=2O7Fs$9pw1m z2TRJJxDC!m35(`w1oMx;*JvOOe|Hpwz4i=28?OLNFML6!C2qs&8B}=&V)hC)zgj{1 zMWM&pCN6AhNKj}GqL0#X1cI4VPi*~H7;Lc!paIk#^tkqzJRywe<;cR~J)Y*1^Egs{_X!@aS!eKF&VRaV zk$MeC$De6P% zXXxi70zSf(I>h%ENYOYNp0i98-~) zswAp5e_d)7skclRLaI`438hxcL5WqU{pJh^H^a}5b$s7vB1gp1;`Rs;~p zg@vLoEk9a*TXF%QLfOqrRFv#d6;(9isG^bANJRj_qlNm2bfhSR3PMq6g-{6fQKS%} zkRp+yD3{1k9Ks;NaW`dkGgdcbkwdGwtac#`Udf6h($!a0kP2a-qAC)3kwawIY7>b~ zRfBM(YTiOmLP6pCbylDSCcRcTfLq^g{Vh>9vh1w<91%4t#|Vqw_Ih+6rn z6*{?f09yH+By7Q~6yE|#hzUc;C@c!rSHbV2U=ans2R{ma6r|wy5#@XED^W-k6XoU0 zC|RytDW8eqA&B4s2Mro5SfBv|0Nh?pkN5X@Rf#0L!=FnHWRd&QMZ}Vx{XK_x{tq0eDi(2&-aaEoBC|( zv#HM%&1^Qw`Nf$5itY2B9hw24R>mxJo=F9@goJ>J-(hLsZ*TVzwdGsHkG< zRiGja^a`rK+=~Ham=4qYW_~nTAd$+ZHiKz?G--ZR18LIyUef%&nKQqbncvKrG{3K6 z?mTej%&*F?<~MWZSM#e$r}E`j<@b`xugdSElFDx-l~jIIe&0(fzm-%{`BnK<`Sl|! zd{gm*y!>WDG%Qu=Q}fT|H>FGYJmu%o-;`2*Lrgj47v+>A3>zswDL*N{Ps&fqPs&fq z&!v7&22A0BA{5~#7Je04_*rP7gUb!jHm_!jCY_gddsTgx~jl zgkcj-2tzp>RE}3nM%;>MV{n;YnPq;B7+7WvEAuN3L?#hS=0_%(WllJl$oy`8WPU+1 zznkC9@8&n=O)@{9-4>aZ{=9o97GL@>ontjc$<|pP?Q~imU^jKL~rGl&o zL$qE4#r(wlY<^!&o8MQ{rp<39mu!AEJg#G!Obo#jhI*kp)eH~w z3-kM`4DF~7w&tf0;+QZQ| zSX$=mXBZVdp{$8Z0Ck^`=%Cf$Jr4BxVUlL-+f!hc`^Z<-O#E6-R z*d;7Oc6x2$J!BY2nUR>x8)nxSohLG*=|s2d<;x=kv0NGqP$avvDg=l?iA}U+C-_qB z`-8S6C{fbPqO1-PmF1h-3N<`Z@`(3(+29JmOTZ34(<&SQryUR-2Q*hud^!Nn;AXEa ziNjTV!jyARaQm3SweSTg*FtaLRAAwn;|No#*Oo*=Z|54U<#~zd`!j3AunCC$_yC2Q zg_=uSYVnq~By`82*b5(%-&>Zpu%*fm@vz9Dq}W2aUjLXI%QHk75W1OJRdkSjfy;|A zeY@?f(FnFor(i=L;(#+kT%36ml@lgN4U1gZV$PbI5T)N=^lx?YrF0KlB#cd?6rpq| zLV~j#Ai2`pr`0AV4db=wlE|yHlofQZ+bG*wK)2fmJu&yzB5X*I&nruSAJjKxozprmjtJPl20ccfPAVE^3;P`5K*LY zGZswp>*5R>M12WH`UuJ8sWnc22*h?>UGr4n?UFduvfeqqnrMq3T#7XTTfrIpq?vPK z{SqXY&p3wt2Tm#nu>m=hMW5Il3EM)#7yXb)V4J;Kd_Q8W4`TqvPcjh(x;^RWE~Vs1 znwFCJ7-~;2%hOk7)?S;+-j;mt)bt4hWu}bT#oYT4qh_kKJwVY-$Mv4s(v*rdCl?H` zx@rLJJW4>~Um0A)dT|IHN*3y~(~>cyhQ|i3m5vZzM6wPJC0J?6ArsUp2G&>YaqSEUru+yW zvBUf*#p5$+aBiq}fnxp?9PqQ*=NyK33;V~yDAq7vjbY&*c5uaGgxWn>?RyZMrJk~5 zNfPa|HK(G!Sg{LUl>NSHEvCStCjbIp1mE{ooCxP|q@g6+pBaV%BGY$xd}(IDPFrYt zVt*22YpKORcmo^rp!Lp*lZ{1Y(n1tghlT;0p$E%5h=VF$qo1|5(uU92B$X*+EG*m< zTfRYt?PpZE089Dl5v@vN2Go+3x19D^;}~xOpTB1!a=(k97U_QLR4=MG!LL#!eEBRE zo?55U^SMa}&tZkw0HET~B;|lBE_g8aOTwOd(%cu~VY(l_v7jWL`?s*L1NnA4n%aWp z=>atSIW!0$pXndcTY}%?(Xg0BNBC%2D4&l=;(*Z;+1X>p#M=pbUeJ1#oeX70uzLDL zim<#fjF00jON2jnn60UclgT^VbVQyzuRJsmHJ5N;w`SNAn_>|d46Zz!-V8|C;A1h% zS)`Pf1^Y9Ag|8*y9aj!uNs8Z8G1)&1U|4y??jqyn!va;knxX9q{}zxOZ{t)OJHau6 zjAE`c_FY2$nWoGfC>#vS9OH@cN0FStGF%9?tmIT%VHXN#7lxtxPifgngwrzeiWjJ@C(S+g%uEM>R8jC`+90@`P`AKd!~~>v9NFv2neFywwLL# z=21)e!6zIvQ$~^tgBV-}o(>c_l8vbk=ov`H;H@Y$hXL{*TrDbwPJuS3`Z|L5w@I;8 zth=QW`_JYNZTYC-|K8@+qf9h`rV9R8Z_@T;Y1s08WkcnX;jFj+l$_7jH4Jg(-K%B? zD+IN?Cu6K~j?s{fU(kUD)wtkRw>8A0NeVXI+diIN7oc0_mPKX7Xd@h$jC}4d=r+B* zU7rG?O`TEiz$%@D(*zJP1Ky|H8cuXl7p^yK$;!{V$YmQ72QM(R6$UN`4pQXyabrty zqMoWUj-+9etd2wIQ^9;LWhrV7!>t&_{tGp%KS0}#Ss-=@I#!}StvE)KN^bRy3J~Bq zd*DF^wM~)DEA(m(Ik`Ey%y5Z#gZh{L6_?2T^qCZ;i$c&;Dn(Rzv_#*jc(G6~3n|ol zoa1ySrW!xslTP)alBz?n?zN0jFQ2#Fe|Vp4zQDaDR;j*89}=ST5Q|f89ZUU`kP%l7 zN*x@G63vpHG>WNV90|Se0`T+g--P%@?D@PH{Dzl!RNjY#-o&u5 z+CTZDwheg>^tjBXynt_e;P!f>B0n-4cE(^QgSED(_X_ONscpGCsQ@V8)uLihbRqpR zIpgu1CQB)WC~2o|!pJd5kLL2dwsqS9Z?Y;lk5a+zag7$}#shMmYxSaa%W^8^HFTp^ zYFpTb7JtTjq*NbOF0o?qA}=s->~zU4J!mNV=sHkA>|sE%%{kB*jk!r7WsNU*ro3L` zxM<#QVSO}gpbI00P655#D4n@N4>)Ys>&bp<^WKr6P+BRlkbVeHHcMP4`pZs+M29{K z*#}|>lt{P{iL!~(qg}15fbmj)8b%-0B65MdU)oUwMST+x-Es^4AzrM}Z$w($NWn82 zd1nw^ARytvqO1mvD6>g8V4p>>0W*`+>~ZiWkCYTZ4WWhQLukvFG^6%lag zQjF53u%&AS2rGe`l)co-d>ctQ`x*x~v9V9NVT58Lp7ZIUr?ECyCVIoSyTiaEoHSmg zj(Q}`f<07d6bE5qj#SQNy3MFnN(x_`G#KhihECAspy64g%>WT2eWp>mJ_hq5_P`_T zi^CzB91yYMHAl8Kl zIA-ec>!U;D`~ly3Dx#+W3)GRg-FgplNuo#sVdaX}uK{E^c6IR$H8tUFoJm)hR6B*6 z+Hn_=@KcWUm`+&4UT6y6W2y-Vj-N_-Wb;F;OH+G5o4k@jXl!ATXd3X(! zrM*KBAR-{pWuO$>W;=e`ldJ3$OZuUV1^GNej!|GG)A7LrD@$j(ckys^Dsc87thHLE zNuzHKW#u&Mr8pwgb#OV1vpqZ*_h6zX(7%9)L=vIW3BfCM7kXQh5%(kI_|vDjLF&ms z0cl~YHy_#nWOHJJGJS)b8bmB~gaIH0wqr`xf{-#>Ui}vWOx%)|vI_x!^tB2|@$zgS zbuLxRJvbRlBb|%kvve?V`KHHjB#Y)TS{vrL4;h^l=yYWQ`+_TDe)h;z6|BX8p9a=R z`$LS=$VI`%{5l{u{SeEff%2~nsq_^I{sInjwQxD)_}sLca6BM!3OqJ6&`bR?IFLM4 z5|5g%`9tDRufYw0CEAfXdU|4{K%&k|4+s{D3XCz0*F6cc8r}>JPitTkpDkhiuo9q( zW`-0MpVN#z0u!pHc{cSFUV`ZUp7REW&y{GpaZ@&U*?q4xk1?cX15w03L#}lRDPN~Z z=ex_DQPZQe1k*;kcoULoyn%v6!F4nU2;vTzMCT?@ja`DNy&g>J5-mgeMD-2;CZv6* z#f+y7_3O*eVs{XPscJ2HeQ98BaTx4^M2<2*ysJs`{nYzOH{DOYpL*?M?xDG#azE&P#ym$GJ!HKw+|^d&q0s%H`$6}E?&l}` zyXQC9-MW9@{S0W^_S|%PIJ=*7KX!>oys?4oe$M^a{n-7Of5!9RbL|dxHahoX?&sXk z=iHCEA9FwEe#|xZT>PLvUK6h$y#9x;fB0D#uC_X>{`l59&!$;6#&KW6!l#A^47G*fV+(`F7KR&B z*0{!0yZ+=aDanY$q4Jsw17&kK;x+M#_&ou4Fw}qj-1YP8=R-Y8 za}M{lH-vm(+rzv5>f`kYtA5tCKrtD+ttDAbtsY;3GJ(T>e6?T7+# zuPm-al~44kg1|W=&mh&6C@K?Tl4Vk1LqwMHRNJS@rThK-pU*0=g#8L4!p6n?nkN#i z#Y+@rJ;H4DStqMCb$`9EY=|K~`W!=cdv)a^NuAU_RdPNp#~D|4M%@{PVuoU7m>Jd| zrTxmyI6FklI#^3pNiIn+kLt5(m{=XGVynqj?W#ys+p0lbY8+Ib+N2&;Q`3f6-AR@N zX^(1Mm$bF_rp0NCIGH$EHMtl=wLGkexL=lO@hD{(KSAzTMp+JA#<0b3jr3xayG-Xx zZJD9p)93OHnQ{6nhp{-9$7ZEr?hLH!;g^6coIv-wcsbVcgm`sc6 zt(hc5TS90FEr%@!*{wNcmd~B8WVE3tWL=IEq9mVma-(V-hS_Q%FL;NMFSR=2nna3f zRivtp@~B@*MHWJ)KT}3=5xKFHGD(-@@Y}hK8ejv30(MW$xKjXp@c9t-Y6BG(hN)^{ z;b&nuAzp2>GBd)3;Ys+R~9-HKz!vbh4k$L)^codOMny%Uf{LOdi!zBm zL`0>RinLN;I?rur=vvt5{JW!FeJ$FOwXhavt!hzJpT0_GCO$YhQxu&nDAY)1c6P_- zOH@TIU$q#jqJ)H0f79kQJEC^rRiTev`5!17YvJ5W^OsmX-oN3H_4klsxiBA(5UEU^)mX=9q zmc_Ho(z2keTDB~+t+oHOsH0n+4iz?z<@ANOn<)B`k`VyNzpu!y0%sa+k`j?Pm^IEjuZlbwoFQ53Spd2KJU z_M*3{*Ik$N7eCWaLo-PCys#sRiI^@tCrhW3n(3kyPG`G&THdgQ9efq>y2?W=lMxid2NWuNFnq8upaKmfbf5tbCMY1`U`aq0 zfB>n0$^Zq%fdc^=Xg~o-K?VRDFyYAs9%R6Q28yjzh=NS@iw$>am6A!~BurWeS(KzT zhjAQ7m0XN|bYmkAJ7Of4K@Kz}QO;hJnM72h!xOtcYo$Wl6&0zJLrO zzy)Mb0RtICxS)asTu=c5B#>|pBA9S=P$7Z?M0f@Rpa234IJf|XWrD@1paYyAb#*<% zEEz&(EM3LWmZb73JZy(J&EFv&zaPp`xFC1MM)FdWqmBi^-wr2-ah4@KqEJ$vyH2r9 zIec^nq2U2e(12M$0b&6OCO{BDf(0T>a6ti*IB)?%0R$2-umA&!;2L09U;+^i;D8Dd z4heTCN;3C?mi#!HCL8gkVV`oCFL=fBHn@ z79a*N5CI1g2|Dlq8DJ>1)Q;syl1yc3b2$<_axpT59CU|op;X4>M0}(P-5JG1PKcOd zhNTn@0~yT0E;Gs(B|$RL%AIm0Nsz<~C0?!+WESd@SfFku4@2=KrL6c7L*0uLyCp#Z@E9T*lMumJ`))smIf zEX}O2Yg^T#8lBNqOU=TzwdZYH>+}dBXCV>(?m2XotIps3j(TamSq+`(_nLODrZgq& z)H*Yz8+zsBP`#N=S2I63c}O5l7B_aAOH52m%p(5yAB)N3@yBQVc>nyT{`Y?~J3Bi& z!Yg`5buGdW=}uR?M@PP1&n}O>&dp_#;iZ)sQW~0}nH6Nc6-T$t=Ks)E*vN;Lc>#;D zxPyl(fz1X05CB>L69CHQp@=*f2ZTWo1ThQ)AOM0e2!J360{{qv02GCx7$tc?07l}m zl=g;vpm;V&l*h%h)3YFg$v`07Sua{3yaS>CXg&5ws}8>$b|Odf!5c$a2kyY)ZZiAI z(La+2{*1yJ1ZrX0;n1GQ)P)Qi0ZW^{@JX1$J-u?Pwn;wlf3eQ~AW;I%IP>5t@)ZUP zmE934!mPU!Xaq|aX0J0BIGNi zu-IBEgyHE)LXHcsfNV*Mc_P^@y6W>#z{D_QK*oTWY4ds`bS3cIn?TYErtb@QR8Xm) z*6QQ`?GkbjNkghOQE)==@~-7}pbnW@!8&T6R##aoM~y}YE*@L+5E~49V^|Wfw?sAv z0x`EqIDUBYmWlvrr-~&AWN0c@g8e5xx&eXVA;xdhxQ+;eL}I7XNfC}m)$=#~OEk9d9)L2U#(v0EI!*$Gp^0Aj(Ql65@Me(l#e z0{&UiABD7$)U>M&DgU$VvRA;mOWRE z|K|l-OYol6Qq2idbnk{;z3K*Ys#Z=6+pVLN7a6f~=(RDno*nAWEJMsmOTHYtQrVmv z=L11uEsSJgY76aef32WBTQJ?NYhRS|u(z&W9ee9^mPBO=y+N&kLbNvzuuEoBWX4U{ z7WdGnYp>uja@fb(m!j3BLrym=B`MJ^Ws~FPWp3L_&w~C8tR<)2Dn2>4f-Y2;R11^L zg-o~hUpNw2!A!gz?ye1N%)W6GFV;$=~OK;pLv+Ylfk0*UWT2QCUhf5 zjvA9_gHKXvdvGjGfr003dQj7r&eDYE+Tpd-)%f)`-cX|GMqtE>7Pue+j)M~FI^8M! zmW#a6u~IU0&O>|;e8~elO)Uk37ENQ!D~>#CEUzfx5%5>}V?@P7L}<`+wv5)PjcJQs zFbhZQs5snt9WuK8&_z7*f~UkCk$7FL@ZcS7!}@^Jdj-2ACFi%VBPmUy0Onl`#BU%6 z+vOcFQ7764)zBMM+LL+M)FRG$BW5J1FUlAO{-V}pm$Tz6eW?kbiz539k1 zK9iyQ|reGMU*F&FIx-<9Gxd)Y-E?Cd2T(I2$~Zj#N;J9tU*IivC8+BwbrW`xdHlyJdmbEn0+Ad-?277 zB#a@-H7CE#3V^q3U6o>@%N||#BK)D?9zS3{NM3IDYxHt})GjMkD8F^JFvXWvBo*vc zW7^gjfUCRR^bkxWT}Q)HwP;4yb<>4h`#c^uVJ9uVrU5Y?BZqYOt&><;zzP>5NRsf} zGpy){+5E252UYx^(`Oqkv@%p;Ps z1w)x|-d0bH7a$xUKTwJ4h@>)+E+ONuuC)xU;GSRE44rrI;KPumVSI-7*PS8);FKUV zFx%Vy^qi{XanhSO5LF}V1&k$$b3!YwxixN>zv<2g(x(JwmbqawxC_G^3sYG4OMlQM zL%)%R5Ms$WI@b}QkEf~tNgThZUJ~Q)++C8_Bl*^CsL(A4e*uYU^T_ABca_{$(X@zZ zpH_kByY%syH5I8t>b-{5p(pLWy~lqFo#Y4 zAonk8P`a#fw=(Wuik=7@sj;;S_*)?+B+L5f|KCtwvl>w0vAVW7#`_$DeSPgi9$ za2(3~P!7|s2-CGG?zN}7gv2ylKS4&e=!Xi5rN$fe67ZonWqT%%G>emb&Q1l02r^eG zT6DKUMU|epFEdUnt}%Qv)*}?BlLPL?B#A1~Q>Pk=I&z>jdU4AkON*yjm|3je=%N*x zl_%Be*90tXg$~PS*qV#fI*$ zq;AZtt2ke=bB56j1Jlbc2*Tq)vqc+=B7GB#Im?@8#!9GcAo*-+ffDAoT~JviITOux`)SRgtG0hcg0oLQHK8(6HHkC0P=%R*f9VUgp5PWUn(c zm4P99D5mV!!hc+)^TF)z2lpazx+#juA{D71a#9=t#-;RAf0{*i7I`98$kc3|;27^C z2y=hQqLwk92VxnW3xKJL+biQ%V@*$<#&1O@55|7XV9zY%e}!>5J~%2tkbh{#SEU*? z;EUrcD^j?#!BjXf>)685rmro|J?fGO@CkZH{+jRX{`t&=6_bX%HJe0;Ojs^lYOE0T zOOk@M(V#1<84R87iTa2%XtSC^Y9^X%WPiDivoNa2yBsMILqE6nSs^3&H7Y+Z3TJt7=eL;f;gjA9{!tBnY z*?c-{dqLYdLC7HC8brAXEYd*ve0Ue5R3{^kbOY!z4O)QeP9p|xj5qL`gYiVlzVgfk zomKC37vrS;=Y;eG_q(T#8L2IsDmqXHBZ=_5AW(fsIJfT2lDS&^cywV5<37_XVdm=d zlK?qj$mg5&YVff^{xAn=Ms1zt9}+cpv@E!2L>qfDbS z5#^H%PhPwSR{_}!bRHO|?@ookL@6cuGPrQVX%Z#v<9cvR2`;H-4&B>5;>}qv8)PL0H|5 zd-Pjz?rO_QR=(XUbRJ8-R3%8j1I*lv2jHV)v7*BnZ^2Xsd*#kx@@zYg3uns^7NaoUYdc z+D3tf4JY^LI+E^&S*SJrEc>QvQAZrwy?NBK(`b}FFh8=h)2{9Rok~~WUlCO%>TLL> z6|2tn8^)RbPsz~%ImqdhI%yVN{iJ(V{(zO2HJ}@5g=_&4r&MD(P6G~qn0cgx*B2J6#3ffP8AD?E+4pf`6KBd#-QM9u6L)<%4js$m#b*93u=BkK*SjPcU1qcU21$ z(<#SOPeDql!Kk?s7?*RE=XG^1xrq{(Q?CaKAqZS`m4aRnO*D!Q6e>M8G`3q+^j9Ez zZ7iZJ8oZQJ(L7k7I=pb@`tvVyssD2|({~dhR>aL!al8L#{s7a*9M2OIeV#zWi56ywKbL)^-2gH?qf z5_@q-%Vpu3R^JzDiVOdn`c;AXRh*U!+9*TE6Vh57HP~L)QkIZKaD$pr*|)BjO!O!B zF53)2bk~92H8eYBRJWw-6^1Qp&xnd;DHtY@M zF=53#O{+F=xfu6h;)=TX&#I z`&LRn9gqZrkQ$GhU-OF;n>nyImNWMfB>QwjYE*_!4*89jtj^fy6>a4#PjY$ z;%p-Nr;=qmVWuI->QB7RTI!%3Ttr^B7d*Dw0N5vB9(~qTdK`i{VS(h36$-Vf< zA30%ejouhpsjY@bZFz!CYt`?l(S;sYp3a$lR0>N?fT4;{;dy>VDSmqNpKATvwSRTZ z^-eHW&_=adinI>%WN2c6(FyP58moYJrl}p2c#LltJP_PQ>OOc($2p;+UDc75B~=8m z3{6;dSi$Gfu!NSAmbqHI%-;}q}djY0kd6(19`w!JO6MtZnR(~)K>byQr#&oZuea>MIK zl?-}IlltJS-&c-nm~F{YO>0!a2Ay znKY>jZ~3)#El@NxQJ>eu8qLA_tK~KWpYx8qW}N$LSQZSCgB|N)=2RgnbNEPJ`9d-`S*_*T3+( zF@>)7K$S+Yw19Xe#IRDdjnYvGq#L~sgM=Hc3QY6G_X#M&!Agar-boS4S3!osU<=1r zi^g_S0)VjB9<(#;vr4k)U-!saGm{R24jTHs;HjXW)3%^NmDg8VIqdX?B+`{)>NVq> zR%a6#P$bvIOzSjjNpmHl9G5O}r+MzN!9`S+O0mK`ArnHC1zJOr*1z_+ndb~i9&FpA z?!h6tO0jVPA1AF}_8q|(p8AswnZS6Fb#T*51%9{7v0 zgK07rAvTM2JSu-nXlIQp7@^P26y9}9$R4d58+OC2U{;lzfZF#qwA{nNrmFp-*SH@_ zf1=V=FS4V)!2*a@qU=zBN~|Nmo`NoY;-_eQwt%95%Cgz(;8SLVWOk2_^(rILIbuf| z$?~EN5Y$1M>@SY=oZZ4PMgCU9%P;=T>yReBx?jWS7z;0R89_-6)(}h({Zddg!IT;m zUk%qz>BYo%wq{n0Y3E&kT`hC|=>dvSVxo~*=L&5vA1a{%ww~j)m~DMg57*nKsu?7t zs_RN^nae4~P_M_Ji_XY_+;#Obsni*}$0_h%nSgl?88NfO{C7*Fvo+(W?>LwQrDsk= zoRa@-Ct@O?#L*Q}DQ*!^DZ;Ufn=H!}9wq(IIK$)jc`jPqpQ_4_nbAuW&43e z5V%;dQqwte!cSQYWt60T($9!)oUKCpd`HVN?dE3k#QWxi1-sK+veP-r(sm%+JY``a z;itsI`Hn!~JtwIC%+>k1k$KQ@GXm<9tdHj#KJ&Q{+?l!HYrjQwmwlPeF6$pHkovl!;Aa)bhvJ z6JH=ggEe6k@qB%^=5@|v6?C~aU%Bi%GG&vszgi(3px*Jw{xPEhLQfj#Z}kYTnJp@yTtCXwUsV`V zCb=4Pm94D7yqwi*_&rX6+{X*06i|mXLQ}k$%F^7N8BB_dyXP(b_NC)B@MN2`uaj9r z5mD@k>Cm1h=*j~4B$qHOa)kG3#m_lkE&CIadr|$ZVk{~^FX}%h9Ie!3yZKEJNzCB; zC}i2VV5;i}d}U#ENMSWe%*dixM8Ts!+XRR%T}vek9QeN^sAK>PBf1Rcoud^2jc^cF z`nSBhV3?^Ti;QwYYZuG|Te+$2O*koWoPRQ^rZ6A@b^MEjMP$MVLN$wI?p-j9jM@eB zC}s{F7O$f$u-g$Emvz@un?AIy97={_sykq-cMQnT}+`k5p;hr!q90Z3*UKQR1Qgn_rlU;V&TdQm`D!Zb56{KS@W%J(aY z%+Qlo>)kglz{|Is>oK%83!-WdEtq@AfO#a^JO)#=(NrgPx=Tp6oyHr*$s`({r2iMk zb1LWu2z_@bqD~+lHN;%2iCM3G6{FXcZt$YED-jdq6Lf*WkTL^*NgGg;Fxo^6%ZlC5 z=}KTCH#n5Q^fD*vT8&9jNA{SIu$%IP!?I1n#CZbW07gPUE?sE*c?Jo;(UeH2E8*6( zkix^_Sb|};N;JA8N5lRCpb%PtT4S6mIIPaKSiY20L9tj^0Tjge-rkpKGd!jmqU4cq zSJ=InzT|$(86X>}0E=J^IC1UtZ(qO-n45Rr)HE|a%C<&JP?87hwO_);t|_ej@>-}4 z+Ol-gwYoU~o}VH%%DVi5dlbGV)iDYq1-%_&Wf6iss8A&nOx5frL-U_#V=q@_thyDg zb2nTo-nPDYQ_(+#8MxKaoUR|#rFBi-d4~|XKbyN*jl84H11V|Bjr65D&h$%eip%x@ z$v-GfjV7l~C(;)NaXBo!n*Bz95~<4Ia%I5mBE&)2DPRGgfPogjK>8xL0O`v?+;`QB z5m2b9nbpL)OTX8ANwzy{zSw1Q$2}7Xvnk8?HpZJAz83*dXnSYnUivIZE`ow1 zV=t*e&=hKCy_}$=+2T@)S#JezI$`T3&tpEJkH#!}UL`q6M%3$qU#iryWcpDO=d73g zCkaEAgYUr32mvE!UP*&`sbQSRhh~RIS*lZ0dGh5pqLQd&2-x}=V#AAbGjbROy;PCA zik2nlCHd5M)8HXwUT~u7TdAe099;GEI#ifS7Ppa?96MG7!8X+{6)cn@EhBE=0LXH~ zC)y-bEp7W-zrnSX+WsXn%K`@zN7_&v=pF zAVs3qS??6VGL!#pWX#!ezviq;GS83q1Q>5i_|*iffw;hh{zJCX5Tu7{ z>sww~1Ko}B1yKwbobxt#;oNVwCuyWaDVIzU$~dGGF*R6;!;u0DPeHkB_Miq0UjE7# z>FTeaWdyszBd&{#fketgqn#GZrMG>=m@$Qnw$$&!?sR-|L_VxFu#&~mC^!Nl%H2I? z3lZ+xrUYA)YpOnY!TrM(bAN!+|3K<1^RD~enlnEu>3KM&}Ufak~=6kZ%q8W3B{1>{@5ayn^^I@i(H=S zs|V^XNrA*npTqoFAz@t^q{2i36O}{? zLS5pI2(1d~s_;Pcn$!nkmu%ff_%Uy_^(mDMfkH|geO4j(H-g-&nC91}a%a?a zLFP*l@&!S?cxc!mixVZ$WTF{$UAXyDe${np0UFs}(&CH~Gs3-R_S%xHAy>>DCHlej zC3eLAv5+9+i5h^_ZOPlf7kU$SQCTFXJ2fKH6}}`;cF;dbq0pWu+~e#68jskbKWb5Q zWaYq+gDwX|KY=l9l1f6&H8&aIQb9os2cf)@ zicI7ST~|NaWwx~^laYM8v-bUkWZHg<8~dW16AX{SN5%15(|qytiO; zim?a{MFEu&n%k#tAu8$d)TN7bu5L|IjmlbSI$4aMDfw2^@ zi9`pL)z=esD2SL>>Ve!o(UU5N{I< zpsd`mpit=O{rBE&SH1y?BneUQ*(p=E5DdvDdIbOg0H6Yu0hTbZrB-KZ>Zb5N;@vy%q~5nT?{av0=G~#HA33IIre0ZwZX?It zn#z|nLcF=eMB<|$Ck#b{GAr{9Y_yRmSz;P@MKX;mlE#ACaL(kyhH)q0yPq{XDy?mM_^2J;yob5qn<%(PO@M_t**n zyHp`5qycbHNZH`VGP^RMGI37#l=Dra(7M8cy&D*AyDh;`OGl)VchQ} z?t2RDPP<@>^61s4Hclb6uw)XIhNxv_RI(_Ct`H4XW+$nr3Mo4`>1JJ?Z@pOF|Gis0 zkg~Cgzrwqe@L;}*<&9x$G?rIgkT66b|HwI3kXSBjSjNI1VHNIRZCYnk_HS=w{x|u`<7XODkCm{y@-r z>je;mMYXleXRGlX9X8$RB^Hkr@m+T-JH07Rk6fC1$W_i&$EN*2((J$1cL!3&gF{1t zr;Ws=PSM#(QHkOjguM?jFcVso>B5*ip1BzRyz!T<}51`7)d z4=fT10+B#SXf#NO&_IC&5)udr0%%%U&r2Go-{wpUU;M3@Hiz`nkDls4&diSXl5?{% zMyu!NB5DVMSBR!R-3KzSHxW6+fpm({?;Si9Gpr*=hx{C@TPfZlg@<*!x2c0_+R>!5 z?)!Ro1H%DWN?G@Rk`jOa`9HJK=fU$8^4Vy{Led>HYgv`CMWsq__r|O)i6$#1*UvV; zR+&|@W>Xze71d4Q)YKFfqfgmYIh9mb0f~gJ z2@Z*$SWL|(?Jq8~p`b>>7^bA$ge5o0ybCvmQA^Yb#!cc56Dli&cgJPbG!uYekvy=V zJo=P#9`6~Wu^HP8%Q^-Dp$w}x78Yw0r)Qj2F)G?#w!N@f<}%9G z(#j^u2ybf4K?J0BAh!d-v)$Fqt~YgNbT+!v+*#TUYeSncVmlrCSXLd&X$? zIEK8&V^$uI-3_Xf$f`DtnbYMOmu&Xv_KhIfJH^t`ft+1r_Kl0BJ^yUS(mdC0#&%VT z=ey_Ih3uT;DjZXt$(U~WRrXNjfh74(-#!pj3=Lz!aFanJBcU@EIXMhQviK*-yQ;cm z?ofdWMuNyXWz|T*lcRrqZ*| z^b$^AUsx|62%(h~(hXC0M3YBYlQ&J8j;?e~y9F+Ykx8P!L}OtIB{Gpnnz(A%DUwsm zic~PVSTq!kMvEAR6A5s$^JKGYj+s4lJkK&*~d zN2{aNi*>@obG@MfL}6ipfd~c+7%*VaNFFRm9@IR~EX~Mi`Mx(qziG80-w}|T9SGTJ z_HS?fJHCFp@9sWg=XivPmQ=|4%hoaH>?#YQSr!XA@n)4+iB3FA%Yu{$!H@s}qJiN- zQj2dkyDHjE7XhJiJ8R}yF+VvTat*o4Eq3eLvcodiGA!njxwT$(0gYspFLmP>o_Ywn zOq~zJWOg9tvQjPE7$R79AT(tMGPa`{*7_>DH(jgMuvRmcnYqSV?Z{=d=iq^yOQzW< zkPX>j+TelMpasc7!@~p$2^=5&*lip*6o*_3d|hz!XwGoA|j+jlFM*Y0*WV^}_RIIr^i5O2OaAKpnL(cmD# z1PP2JfT6j8W_NV0g{9WB1j-WFgP?I3l0$^JUAqZ2MH<&ijKG=$vLq^ z1O#LeC;$@xfr6osNIW2j0)bE%5D0|AU@#aA3WY*pP#A`SAc_N#mWIJN00X3-PkJ(g zXO|&g^f~&B^S@lE(e$0;(}e&2kNG z8HeQ-(pneUeaIA9cgen{0rRhLO0MvEPAz-KoE?UVz#1`s%v*Q{h+@`Lx{8=F)~p`D z5zrC5SO`!6O$$1TSVAZSts*~X6-v3kPtxsSphMyH2PPY+39T&XpobT=l|4mZH--Qc z$zj|j!yTgA?`EN2WYmaEfucx#_ihojJYOYJde;Z9f|+R~gbfv@>onhIFj`2ew0QL2 zArNAmTB{2n->*Ly1Vow}KYqvjpvbvMl^p6zkz@Atfb) z{9cDP)Iz2ZjqIvmG@jS8yuigOi&8qXYzsRYbh$F|3Hx!nd9yv>x&ksh_qQ)iM$jS(Qr;2J;&iKQZe_eZ+9n! zbX8m-X`Sya<5o%;tJWMw+|EFOT_%6gMDftm?m#-9)jd*E=1^@-eN6ygGk`whE$*^P zQ@(^2J2G@g>MZcxC|_&%{=v|-hf;gi>wAN7{5mBK1eZ4sJr=SeLbAiF7J{2hChc5! z>nSy-^W{#C4$;ju55ahdr&P@p}Yh)c9#<9nEji-WQ zt_tjphTo|T_wgr7hK%a-%#yzV@2Fm!QXu>^HOgiH0CGC_j4_j5v_NG_u>d^$p!IsF zn#yJ(+^|%3Yc&E%qSVi7H}(^Ixq!_HoowF{QBA~WrcG(!zQjLs^mwj@Zu0QUUNGA4 zmGwAffxON(F^k+1CrRnh<{@<%1$YhzDP9TJ$pjigp3t z2uA4SBz3{jSSr0pJreN?Nt8>L9R% zC?Oy5+8mVfeUXKxYV)j7tIbhAUxX=KDCsi;tRypPRTl%qxE16~rd(C+7{!KOfeL2;YqWnqfVtZramAjqnh%zDoQ@L#K8`5HT9RtAsD&G} zSdBCNYeo#ii4n@%IU_270l?$GXXfV{zE~&&Q~?*nwm_yaRjU2*+asDA-mqr1z{45d z>)xb#vIs@+P|ZL?i+x^!`);QqV)0g$Lz0`#sLn*&f;sdc1)=F&E053r^0|R$!X(sz z=^Ss@1H;jciev$D9&|-HaSAt4uRh*B+(8&7$>MKBd}}F*){{VuZx1kQP~!7kJ7@DB zeex{!&vxT%m{UF1yk7a0-UiWs-D4?~{X95|)K6Lg;Zy%pCws`z_}?HE%H8P~RSM#f zHpXLviX(%gDqv#&2*iI6MH}2OEWt`dk=;Elb*JQJ3CjY^ZzcwF(2TfmKDLbhHoKF; z4tE)|F=2$y6cEI;$HId+5-yiviHc8)o=|(su~^i+1nFpM0n*@%XqFIDD03}f2hahd zU>&|~%hZ5q(O(JL-ml9XNt|$ZuhKwefu{Ef3XEtLc^EvYwK9d#v_=}Xm$_+nGc?L> z2xf=Nf;V*)xtIX}eY}~HH^H99qKxSzX?zTfCH+bO7f)P-r@Tkk)W<)@c4;v}S057` z9ats)3zD7WkR-lpd_NnPLO-+fK`1r!Ew4Bb^JJ0H_y?M4kRscFC0z6>8c}qKn1tBj z+d>}R4_l%zJx}66r33NZ0GeDWl&n5LW+he*19}6^&<0$|iWJr*9_ms|Bz4!k!_tvq^)z=AiOR_A~UcWM72yr3acbeyVK zs7c?7Y70pn+X^!$d1GXOq@LK&LRwBsVoJVV$rLbQtog&?GX;e6?R#$ujkJ!m0l6O& zodSsQ+g0jxm=*MN)ahNO`Kh-RpN60>$~7zf+FLgjz|>n#c)n+Aod@FGn=q7WX2UfreR8^yfHIL41rj)3{|0frz_(CSr@9=6M9#h%(w}7gSNg<{T(^Zu*OEB=sB`nwu_Q zRY~##Kcc>e-`{P-6IUI@W!nDLZ|!f}8(@nFRmKvZ7=lKL@-(U>l7tkElkx<^PlcwarA<$MdMrBsGo(T#UgtRcwSp$ppy*ye+I{tUi!AMT&BAd!YS1t zr?hW+WIz45kEj_I)JdK-@^Z4MiBsHvv9N;Y>j{Z`10;IcpY~ee&H>0_(Fo7HDRRM% zBeF07jX1GFh`^et09OB{h6`NfuW!=|S0d}7pYC=kN4n7XR>Q1}YWRvLkr+#IB^|yj*?}HP9a3Fvn7DXK5#Z}Ps@mkq00~_L4f} zG1u(SsYf<^ky9|QoIa>0yFwhN5Q-(r6X0i_F4)?Z=1?AIiT~ONjsKFqJ22f5oc~N_ zns*tYJ*LE8gSnJs><77Q4F^4ZdpU z$-Vro9Dgc?8Qd9A9;L2MuBZkdPE5}x!ta~Hnlg}=4ieVKqmJ@^pS$Fin z2~eL<>T?_wm=(aXn=s`aO%R!*sS!{2e6JtELe#-gQ6`GQ*Z2%N={U(sQ2$nS~MKMD;U;Slu+}KebB&3pamun6GijZ1FmLM)&VXUtbN_-ov!Kw0uz~=QfzN zq?FZ2lfN|*z1)ngG#oQ%Ufy}6Be{i2g(0<$m2Iz3!|-bY?9}K@glas_&nC(cC9&(?hluMWnly&P**g@ zJ9?$IVDv-P=ICrnDQ*Il=UC_*F-u{}veY32bl?$JZ(Hg)kv-tt%DD{|l+u@o(HgOT zM#(T|si#(SOdEoM5*oFk4C5&fQcPG1%pyY4WghD~PM70P*6U zc8g_4om0pI=D|)E5gtg_Q}e|L;i912cBfIeOFRw+@|=Oq3^NfPr={BMco!pP+>UVp zUtA{2F9;e#ymI-ogWQsuY6uhIRL*A&Z9~RxiNp-g#-u&KweH*yn$_?vD8rc57Dkgk zAWEl<=8d(3-&0_nO*(T~Sm?TgwsJVvabinWfX+o(^*JKlP3I5l4S?NRS^}YEI+6B|88lY>Uq&s4K#NOlr^Xo17MF9ysv9T3jT2 zXc?ibG#B>>M8YFzg7B4uc2Rc_u_{>nq_N((!mCBJD_h1L#r0FdA(l4{)|?K6B4Cg% zgmE^Si!yhXX{9-5rZxp&MfMaZd2|et6vasy&@~L^r28XRPJ*#^AP#0GS zzF4x7UIG>)y;?3YD|OgF6c%kz$qdM%Y!ES&g5M__hHV0}Xmi@BF$!zv;Wlelw&D`5 z-0FIz1iFYR$v2hd#l#DcKJ`-CHH;PSAu$uBQV2rp#BI(vlVN}+wFziKbx8o(H#`0d zgBY|JuJ+Unsv2d5SuWRuxS-^6DjMcPOK+gXIM8xcI2gc8g}s4+U?=p-VF|V_DK+BV z9~nx}xzd0$|M^M-m};;5rSQLASOT(jEXu*Mpon_stcjDH#+l>;edZPoT`hq z3H`mxYYnmx^_Ey83S84FEI5U;vsZ0}7gmO;77D&xd)hFDtaCE7p#w0W^e64<^TEs@ zkgb{8zBYp7L2LwM(c5JII8oGC_l*b9=~^*45C9vbI2qPy1wcLo$PY2NaCW5juYJ^) z6(9!5QvTLli{{JI8Ptaz2PK8)vBy8HfOq;b73m1B4*QGh7^4A!5uHhn)g;CmCUy--oZhz^{i6M>%UJjhi5I~cv+F}VjCHOA4jLp&>)&F^z zT3fUra3?A;Sk}@cpP;VkrADs3=oz%B9sYJ(9@8LfGtzfN&33j}`MUD%9BMU2FJv_o zmN$Tn3gpGP8k*4tOk|K3u{ySgWrLo<#a{Aphal(^U?bYIFq1(z$6oe;cd^Qe27zig z?|n6C8PD-`tyJs*bc-}uLIWFA0Kil9@6J z+T?rPxnHeUuqBIHODHhM8WuR zNP;(aI`*=E59Bg5h!_+Zmh^2{$;zmd+8Sz{MsvlK;PCccuuD^)o>rvlT4cE$kuAQdQ(c9S?D-^p?VZrP(A1>BkFle7U4uvI99=_pzne58^{rnW;Sd>XB^7mO4#;B}5EGrI1IcH`U0Y)4&S58}IpX1(o zahnN6r&3pu#A$!)CLH5LrF0%4Q?w7KNF}S6Wd2tLPI~02(k8N$&?f>=>InF(=!zn} zHAM6&z{5C7p;*ad0lIh9n3cOnbnoljbaDDBKJ!xXh?i3lY-N-2dVNnNv3dT~;6cPS z*GPuqIB1*wePpt7x5VgHXg?CUj99M=f5)T_?;e*p&X;p7bBIF0l{4avS5?R^XjAM6 z*<~-K@SC2)D|Bqy)s_G;JXH>gm}XZkp8EU@L*Z#E5I(TaJz(|&q2Jn6USoV%iLhxf zb9Va$ch0Uy9)FA3ozf2-A!OBrlG4%FkNQEbakd@gfwwO3a0U@^7&#%ZQ={u^ zpQU*_RoW;qZwd=X*~l3wl`E}&KlpU-*V5S=KsFcFrGpVz8AF$V`hRAm`xd1k#N{(3 zT>!XeZeiRh8CYaMGx^|E>H*NTLrM$ej4|TgP&)}>U2&#_M4ooX>y&Ku1T$SW>g%TVct{3}f}l!;QC$Zy zvTVlF?GN=#1}@iO`aOxN=IRMjWLALQObXsaHs{D=DHK#(x2}&y1y)s zOgg|A+WbQ}Oz!56yb+Qb_Z&=Y)_YkdnQ$jvfaSltYAB91meu{Q71?9MfO(iAj;ntp zni;Yq19AxQuu&x7^MZTic)OtmXYRu%mCoXsN=$(X&r zJ}gEb%f0etFD@dm1n6xg(7>>Yi1^!7Y{5quF-Z!J$s~Y~!=TpNCe%kK8FFbMQCZwP zsoZR0mqm1O5I;PZoCc4B$PI1M$MZFXr?_-N$cEabhp7e7PIh~3 z-&t1>>ktke}FOx;#ksNKYX5IUQI`rS=d>=mEt-Gn%RHeB4LM+MQ^bp zRsw7ggkvrsH6i$I$a&cqlEGz9)$3b%^|MJl#JPn2(x&+!!gA$^8d`%?(QKNPiQ}4c zLvRfei9kl2wLvK^Nl4658oHXT#H~k31{B0>v0#s7YfQIi$C`Gr)=E7N zrBzR0j=E09zQ7uR)H9}K`;)xJAFHC#Zr7>^BxXS1e15u{BXuVL z>%=pVgfqiDZx(O7iL$&UJjWcgI9_9n|D|MhTqPP5S3=}covmmXd+uRa2T|f;Adh=F zJSc|_68gAl6f1EaufeZ%#p;^FSUQ(_ir!nqg_HO^JYcwnnT?}e9ci7CD&}`I8*W#p zfC7Dd_Amf;tpjgemmXIY5#C~)968BzSVK-MP0N;&Kp?kvJHQmN(*V#i^oN^PTW&Hi z%bSCBZcvxXS?bjq1cB*Cd}8qdc0krJMoz^yh0SXmqtOm)au14kD<240b^_BZ^OEa!h5U#eT2}MoG4#nN5KS;`*YS#I&&?;bT-gR3A zG_sh_+iR57fbg@i_CKK+_*G?izIAIJuaIPJTh2IJd_|#`6tKgJ?~0PHkyW?+T$$t) zn)3K(jXNcok_yw#YLp|~7i-mKUqwyPE?~85A>yv}7axY-71nBt(Brh%_+kLl}y~ zD?%jtTkHX`o`%q`4Ot(iPK0)GLu9@lU+Zf@J`RS+eNv>A0BD&QQ;!iC2NVEv8QFL2QAV09Rux--%D zU+3i6?jz>BA80491{RN1bNZF|H3JDJN9fEXEJOP)N2H92aoghKpNywA>r#t?awih< zzmA3Cymt6Cd900WPKL}Pr6WxXzf#GOz1dkr3d4S{hcHQo;s@LZr(nA2Y#eJHTGSk! zF7aaefwmVSDKdE$M(#pJDI&vXL9m{3>20CE8u>I`a@v!wQi0Ob-funVX1=e$0i^?6 zt^j&@s1PDRZ&D0r8aA#e21Aog2+Lg%LunG6{r^Qogp48BMHEFbH}(Jn0sH{<)R*O} zP0>x7*r!u;|Maz=`fWCUAjRJCy&j0Ye?2=r+SPed@1&-aT3^*ZSDL9ZDa*ek|I+>C zTg>t0+iceOW=%GmRD32C`{J9;q9v60=3G*Q?@g8y+%)-lO`83iP4`UNBqpsRT(H1E zkuag5fN-!_7$^`23J_URh<@JX9Ud_p7iJ4D3oi>V3#)}k3e7NmGj*&Qq^jSie4oPm zR9>m>qbgHUE$7mbeju)MAssDE&$C3MC)qpJxs&KXUP<%=achQ-<)Kpz%_E8nv}7Dx zhIu)~NTHM|P_ZCl3@LSr&VoU;Sd4>bEIG6&ES0Lw1B-+OqS1JGXfQw!U|?`)FtX(8 z)!SaYUUI6-by*}73QP1Di5HF+rDq*6 zQlZ77i!z0cviQovKs*(%w|G`m!K>V8W_l_ zgvNfE)m}ACHb31&N1`YFrL*ahB2&rKlJQLSr=GeK^rSAU$Hax@!z0VOZ8EHzjI3@n z-Hc_;X3}51KK5}^QX)C?PAVE7GM5T3B2F4h2%qq5~OwXc0kxprjF5d9czi4m#=KVV;v3B*n(b{6NUe$einJy5ZSo8zE)LfwPz{ za`_dXFp&+IqeWpd=$Ih7-8+h*5EhW+MoC2@?Q;!2uy* zSU?yC0)a3f3v7K?C(BB2$t3rR%BJSE=QtG@SJEd zs)Mv{N>BS-+tbR@)nscvH`ye_n=C_eAN$mDl9gRiu3ORU#?WK%Eg|noX*T9m1ij2z z3kC!gz!bL(CY4XTdc$|jyXH0>$ z$eI9MA1u)^!ZDRg?>Bh)F03S?GL*Qk*L?-Nu8{!KI2;U9H}(@J08Se+8IrC0fq~Ex zfXzS=p|A<*77do4NzN~xA&Dp4O_6;}Im&9XRrzfW5_DJGiY{v*@ncdzlTScE-;~I1 zL|qhvQS67TzdyXS6(2P)L4^UJkwGzLLCJN z;S}S<_stZbx)o6S5+F1f({?q;8dq^W$`nzrzjzQGhP1QcRRLPk6aBrc;ei7?Rmqe6D)1J_?216d>el zC4%;FNJ^sC|N0iN*`uKYZp{Ih1Z+VR76}rhk zkF#}4COH~Meqq(052~(yh*1v;YoM&i;miurPE`OFG3PQk9UZyhdqp_CN@To|jtqKw z9}GQc>i0iO?WhpxwNfEEGC~1GqF%bMBV>?nv4nV4Bw}3CeH{-|%qi{IJoM;V8)$d; zfM@h3=4@28`a#8Z89Vx!O*O4jR~eb<^efZ#cF$HyNO2#``bQ+5R!FA2T-nqSAGt&k zNr9F~&1ZW9!RsS1oz|j)pJdQ8+x-icr?so5DY493YI^b;<>!2hnLbC;5GnisEK;8->32Em4X7$0eNh4N-pk^RO zgR%KJt(1dNtuYvhi28Ot9WdFR2Xba->wp%B}~+>)==f>+)Xj5Sq0#NNh?i8gm1FV!+FZ8X|*=5+}Sw=k7aYt&TuS zro$vF_h>&(&^T_&Ho40EI>elHjTt-innfIQODNKLK*7Y;pkon)VxK767D^1I4IW2A?o%&&Z? z_F9zWVoD;y8|#r9x+7JG45JCGOOJfZFQ_oVt!BB+noBTk-1q*oZ>^QJfeT>eI#H(X z@Bv|aLiTUn!;vZ7vH_lQCUo)raYgr%C{Db!ooK5%*z8`ripYH-qv8%S9&v&-;AP?n&TCo=8Ap|2E=$?QP_AY<$wVG7^GNMSa1`1RS;lHk$J+PLF4PGS zWh-op*tzaEkyw>`g}?yKX$e7fi7}ItMOuqLs`~g2(BzJ*0inWu)e%)41xHC%bGeYg z3!P1Gr{A!7~sY69US^%CG51IrH$W)GmcpyLs4 z0|x~lO4D3H&%cOM?vrAQ6w9t6h`9=2MO14vcS>$tIHC!HqaS6+ zIcqc5F~b8xr#l$1k#2D|+~pKmqfk83!2QpliOOMEq52x>GEGh7MjB_s(E?WwjcKNS z!D_Y2hKW0aZkmd2HegpsD`%&S)bWT4JjNJUcFH3f60u%eHrl#2Y`mn|vSj|vrB{th zawdGWaJQA#y(k8KN)YYWYRB{r9N)4K>)&$%9zetpN2m$3}?Uyd>@>PY0`o6&|$ojC>LGXM!xrcrWSv&}~vSuCrKEEOH{ppM?&5c~K_>+6~f_`WeC;nyImS|B%@Kwk#E zvH>QHzwz&>nvH>i&Ap6WgYrFY4ba}FVh;f|dV*D7^#0PCX1W^BaopEOuI`3Lv4hr25unutq5EBmlR2qiCk1ppUCt7@&@>8Ev zZ*hiUZv$kV3A#uIK?TtinbYy}sZe|NVj-ywwTW?Q(5x}4&NlL_9Nayonlg9csU!f- z&(8hisU|nV<#tpB-xOqSSsCQJsfgP)htSJy5TD><085tx6hy5J44NuAbWr=`2!uLc ztcOpWdZ5CkV^#0&>XQPbtKQ&c^Jqb%%VF{qCR9dcnP$Pq25<(rfc zHNxI71+CtDJ38Wmd|ZvTqwXgj*(l7MPEA*SoEZoN23*|ccMh3fR(;96V--DLTEl%) zv@klAs?fR27n@=~M8(kC5QOFh>u5t5Ac1A=S7O&09bX@`cy>?;KmtUO7MT3DIUJh` zr@93SP=)vb>PH5ZQjpNL_yzhZ7*vGYA6ku97qXyoW&XJoj&9;^_W}Qv3M#@G0W4W! zOX?@Gc3;Z`6>+#g?D*U^={*|)#{O>2fFMAUhBQuTfU3^Jf#r@jflkne`+{dt0O_vjH=X6MynSNA@eb4(A0OP5?R6=m(Zh@xVc&gh`y|fFJD~G^+-oYJuiL zFfR4frV=BWq2qf~9~7gA8e7jFwTwL7VBzE})Tjwebc_M0Aw`7kPd4tmYRQ}1(k*Ak zLUzP( zk&(##fifi;bFhO*iaNn71<5r*=O_iA*d(igL6U;MRFJB#f{F}4y7V=93;r$H9Gs7X z2ORV*JXhkv3>xswRfM)B)9(Q*!-e;&wb|0-x%Z$}7jki8R>9fc6~O~)2ozvnr6b(~ zuAbH|H~MH;U&~hjF82@)R+sYXGB|S#GLHe!{q>x$0fp3eE%qJjeUEp&4N*1LJlTVI zl{un@VSlx%iaR4&VE9gIR zs5_&FI3=&m9Nt+j9uIxcIqLvTo#%tsKeFaiGx^WV><0HOlb%3gHVk?XB|C6{S>c5` z`xBW11C09ISqj$w@N8FX1R5fNnZe#rLlJP#SY}6HfBcyjpDIKqSN(u`Xu*<3s(;Em za8>~39D=N*P8|tD4~F(BQDKyCv~0<_;Nv3rZ8oqPsAPx%3n6va5$%y=E!*;y`wclr zBLhY?KoHqi90NeJwn3&B;9S^C(rmSj`W!+SWCl>tR(95j5Q!kR;q_E0ij0s!@CnHG zf^388suvP9jmB&g-x1fPZ2{jL=>-fH~ana_W`s4g?I0B}0(5b*kmla~2vLfLt zBufk)bP8ihUbmmap%7h3F7>&9YoUBm97X^&JrU0nK z^0j_0j1^zpY3PyY#VJ`an}6I4{}@o~Q(ZCRf{Fz+$QL+U4Ze24IUD0}$9LyD2y@r~ zzKf`x-7_Eg5;;v+K)Dhm;|cqpxZ%)0Q;? zmdH|hWas7PB~oCy#(Jd{O%&4v&T+*(A|(&*oKhTi1D0%v+V4NpJ6fBN<77@c+*nzK z16ph4>GH$dX{I^T=Hy(uk{B8_DF`k|)kM>Pj`%;zh{ipoe}W40S#G;0IL)%e8vlTd z%y%8DPf8rEueOj`fwdOnfymQ7c2HNsbvKWN!V#$K2|2A&34az3LU%%$&^?rPIPk{% z{*e~WunMXqM~lpbjTOILEk7?Pa+C2qfhQrpv2zWr5HGbZdg{|E*+>NOJ4E*r)1UElFmAN;~PPp;hj zOu)cV&_H!?C;`(0&Rv35p(?|vIz1h2CjKn zQP-SnBWX&{s$&E-GBd*GX*vemQj|L3oK+Xh*28@RTO4bDj z1=Pg_H%tr-5udBzq2Qq+V0`xSJ1^()!pEhC-o$Z36=(Tij{NzB*~*%;=2zl+FWrx6 z%~t0-q0~C(mrvv4x?#>#L!G78Un_l$>4QJ-^U5L4b$(|t!Qjb!d@_bwkdMTUApIgx zR5cS~;o%V}71H3=J@H^O5f$<(p^foV4)e}03tcpy50(9`m~ME-1Etpr{+BB7oY#Ew%Uj9L7Yp7;opA?v{)Cj zyXli&vHMnsT@$ph3W71JTW^{4iL*B>v$tomtA0HnO(c;>Bzm$sRK>cbDPRPgj#X_A z4iBSX1Xck=#R|9}Jn+jb4i3iYT-FppKy@%nzXsI|!UA8$J!!Q{_5;$lYlhrp{Gm2x|HKzAw4l;uYeWPf0f`0(QWyvU1JPIzXh}{2AYc^X9)J7T zD&;>4Nl7YC)cj%pD1f2kKldQDg&f&_)>52{*f=uM?Gr4s%M8DqB|NIq3n|(&)XF6^ z-V6CvYW*+UvOh1&I|N@y_F&K?5zbKJ(mqswbS_2#sMmzEo}d4A!iLkt^AaY9z)*Db zOut~c%!*=ZiLjjq1Qv=aCYIMT6-a?Zqv~F80S9AVSgXSF>T^P_i^VPwfaqJ-gAI6| z2)AWG;ACeYe69njl6`7IOv+#c0Ge^G&EHDt5c}5Ix)(}`JhS@~=OKTt4?z-qYei7% zDK9PJa?=gvt$W3zifBH&?N1CLZ8A@`NnOiBu?>-`=hTW+2Czi=R*N z1jRVk)8f14E+s0G0UdC5)H0gF)p#OrhYpfXs8cdE(l7(+w*!@@pGS%CZ`UEuUf}cI z`NP!oWb%SNqfb6OiY&KKYW>Z~2%PazOy4nrTUbf+m^*nZD=bwpD!DnPZDK|dqr>U8 z5&%g+w!gY+82?!z$?JPVYTMkgCZ8>RzWeGa3mfY|%nF9G9LA1y1p~xa+`eay+{Qk5 zjt8eN8in-SQFPv&8ZD#*EgCcG|DAni`6HTFU5pnGWl^pUa^%Jw6K>8nl(txeHx?b> zLWIiOJ{%vDj0l*~=31e$C(GecmNR{N5gn0T*VgGjCu|nov(t+S4 zAN$5{%S|+yX%cwchfIn%aAMNtCf75J{lwNn+JGD_5Ry@MJoD%Ef_&d}Hg<(2UhWc; zk_o^kNCINt-Bnua=BI$BdsZZ%cf8y$*#GDbn8XffrlwW&TXL2J_%Na7X*{*r4!y+7 zSn18oAM)wMuTB=~Fot_z;w4W{7M0SBmv1&i2t)$G8J7oupusfE3!9$Ny@xp+TPi!I zGCuG#W$pj@J7r1pH|Z8uo#`N~e5f{jtvn2?$;9)mq(kw9a1OI0N;K(`lDWH27dia^ zVp?Ay3VE++`IndlWa80v86;4nI4aZJXnH=#qzVick4A<{Hb)YRLjvZ=Ii`fHL>L#A z^s`Yu-0E-3P1Ek}h))b8*_$L1wOn~59?3qw5TdhX!QrK0_5HG_z@#sd&q~#+ zCj>HgP-^=cpC@3qPX(b>&5#QP_jVgxay^z*n?TO3NoLj7VFqiq<}ucX6_~;MfPwr( z>EjmUF2W}V*A@;8PA4Nm{N`O~l2PqUarC$V=um?M<7aQzH^?!?l?bUXvo|6vC~AE= zFRAm};Qf?vjb@LN$S1~=N%O+|5Pg{^vR%aj66|D12w=+whp|{{?}lI+R<6w&F;8D;bf zSjBkZ$S1K71`=}eAxhBp4K*`MR)ePsAb`#&h{y)<2tivjP!%{s&|a|#2*>afU2|!i z|0iLbA=MP+Cseb;@Q-fo5xURgJRH3y!BAeomm??l>;fy|cx?T#mYfB51LsXCM59zHI2TDxEeDlx}zuy<20$(Y6yC6 z+JBLA&}w7J!QGHr*mur$9f&5&%4L|%qSv#KOws-?4cgc{(L`UkJ>?z8fCDfSy6Q}3 zFJ8{d*SHT^h=g#e9*OMjrLm$UhC*@Fqk|V+z5u8S_HD<}a79;${N$t|1S2^~$3ks^hGMqB zI=7d%Kt?DoTTtQrb&9%2@frLwc3ZKos*f`S1-C5xwO1lWMU3yaI?I{_jMl7`yP(?({7Ado%K z4wPc)tBh`ZE=@PbBbw6(m)HZCmmgFJfkKoKoW(_cgsg08e@^ALDYgFVsJW&r1Hi>W zeW!nv0O=Z9%Am1C`vebH`_r3kAo(luj*J@_?RSS0JSy{Ne4p9IYBQcsN$PvFW&9lo zc_zmVfpbdKLBbk<3tRf1U`{YV8dE~s$B6|ML2!r;MD!rXr8I9Y!H6m`%>`cNYk05` z&PC6TLEUH=j=F!R#U8TGvqZF|#|viUtD!CQ#ltShzeN@Hb4E@;jJ1Gcqp)lcL%fcYboz zotXn3PBU(SXZH=2#K{v~c6OKpN;$Ma>u7M#qZgx@F)jn}C6w65HJWbW#TM>VhZ)6G zp$17T)x;WuK0hVy7=F_}plmWS5lL|Xmx}EJu^2hjV=&B2AxZSU^VmRhsq%x0-9Z<; z<_mYVh$q2w$X%T%gRfzowPoTz;!KfUOn;lmp9J0X8)o&8yEXvBk?+3<%mjIWN9wu( zOyQ_b$a}t=>H%Hf*=S2JevWQ0n7oe#c_Ew>{pjY+;w?ysdlUU!?i0J7#OKOA9uL(Snp4Y8E_d_{CxQ zY;f_8Cf#X9HRB$aQZwslR={+CY(UM+s|y==5O{K(+ug9J7Kh%Cscrfce^y^B%j2sb zbg$Rz$5q$bvsyb#b;K*fd;VrN`hTli^%cbBCYdjmseHMHIbK7`sbY$f94|>E#TbN` zI#a^slBC2Y*=l@fr<`PV4d3Y=Zz9_H5av?o0ayV35@sj(m+0|tcvbO9(b)SlB4*?y;1h_H{TChFa2+|#}lBKFv& z#OAT#t#$5sH>KRP}~R8c7KjguX=v_jq;m%Tt5p5JY9=>aVYAc^vgHu z&-$C{1A>FII3UQ4#j!XGiHQ-8#v*z7O54P&EL!rWWu~4}&BgKCL&`+FUNJZGN1BM4 zmW+opJ2c!iS=W?w)iQGlEph3-IIhl_t+vYgU0c0KR$5g}z1`f@&@fPqoRT1zmJlu` zmN3eKFi3JvBvOV^7E&}x7$PQB$+fB8JDO32?);>S0}+|D*Sd5q0E$TWbd!j#w_})T zqHkdCd`@_aWAS-pn7Kx&c}_)J)!NqbjtS#Ip(vnp1KUCBT#xvQ;Yk3fjB;DQp?j9w3anWA8?&#VV-Ib!Zd=M2P>J05Q`{YqQw5iO>KUmJ13-kzrYohMyP^ z4G@bxAa4eEh6a^bwoNzz@78(mD9FRg;Z?jq)h-WwBLfY5#HLdBTUeistGu?E( z9xN}>lU(QQwWjIMoiDB09ntNMv`F+K`kl6l)`)I;2)mjA(J;YiCNy00=vq(o!s}WS z( z+gFD^mD#CIbvuQ>OaT#jy|#thBW{X1fv-BPdWp=^j>@ zTZd}(nD^u_0P5Yl7O16bF1>B(|1|^Zs`k(D_YYFbujSXu?9}D|y#J|{daW!qrK_gy zD(RhClhk-#nR%DHxu$uhFOFN>*WP}=da5k87_C`<6(54AEoi!^lHX(#=05#3KEf;} zi?Wa*i(kmrt6+l)=F+a}QPoH9Mzo1ai{^7xU8*15+`U9hT*Tb9t3*9iM8ZRNh-E*w zXz8?EdNP-1#N3mrdU;gQ(wtXi7hZn(<+3tU-)2i!n1*+q@oPWxdDkp;jm&DtVquUl zk&rOLQLOTsC6&@;Sr}^+OzMy&m!_DcQG8lV?Ij2$YYby+4w0fpS+2>&W?me(z+Lc? z!E@Ek&T9wV9Wpw81=wsCF3oUnTk_)Y*U%J+(4d3)5h0oqK|nwNfIt8MQ?m#F5EK#* z$pgZOKntXxqnsTm8A4V2+vu|{Z@@;20)CKm0H9x*jE|*`j?;n!6ALxWBlY*5Y6Y2tgsaL27q$@J4Y1 zOjS)BqKWw$mm^o3|4;yMWF`V*>VxpbqD(Obu6KJ1z?;W9MgCO!&etyTg!Y8Beb1sZ zzQsf-_)qAIGD-cO@L;2$miyjm=X}DkGl8CPNl{r-=7p{>k_QO~Gq= zI5_FUwsZIs_(GBMA%x1qWM&XRNBAo}Imo?jF#&orJ6oGF0x1Yu)!ImVbiHKgfMPWH zr-Mv8lU@rt%2nus8T5h*V~Qi{Qw*dw6pH#}(-&i6fbJM=Hf}(ZAl*nI#o4)D8#bLW z#Yic#Z%u(bZDYoamOyk)t<6HtO#(tSnVv@JOzkba6PcPb3EgQ8eVsZRG?2fIKC zGs;OI!j~nv83j)AcL<-*7|?0D@v#}DqLAaXc`*73*95`9RH_69RTS#n;;@r$w%L)ax4|d1okr&^##Lr45grlA}Ymg1fv z4rR;aiI62Us8JHaSqzOvRqUqQHeOKIn0mUH(-sk!8v=zamFJWaH>?{zMv4DIUE$`} zl+2m?Usdg7efx-lE&%LsspMIIUQmI;MCTt#)jyS>A^enR;9HBbK6y{+32E!-Ec6?p zSe2HTz8lHJQ=x=_UMSP#QURnkp&^tSa44h*MI)#^ynb)ii*R@J0%^GbT&$M14^iU* zOWS^wm~YUDVLi7$7V4(rkS@VQL4D3f?$Ey?k4N{WP;EW{XVEAKL?G0Y*P8CI*)+>> zSWQF*B0Ahi)+SK0ekly%SV?I6N^4}L{;HeAc!tFHfEhiGnkC&Z3fQuID`}ohl*FF4)4dfzTk@~Woz=eUob$AE|En} zJI62$76n&|DwvGcG}xNj=)s2hY^sRb&it4YH;>T77C$0*B$C49m~;e&(=CiV*p6jW%dh7ER4c|k_Pm@!GV z`$m*hC^iO%o1|Ag2?LawkUnU^~`$W_>p{T|zyw{jOMtkS8{v;02`XX6v%Xi^g5s$MiSy`U)3}HYJ6q9-n?FQA zGW90>juICt0Wl6nB(EZY6rUU>m_x&=NS^5i=6L`MTP=Z&W`ck7vd{NDevO_&?_T+NC?_k6ynRl&p=?HGQk#dURf5Q^qi@aB_5h6b?B>=P2}RN zIbSD)Y^{t5zI60O5L-_!`F_f**vhyU&ZO42dDE(8M4JNSgES+gYLV8SkxqL0R^dqE z!&?hclo(Ay5fm#Xp(dujD2-{~HOuo_1Ws5(x-o71!J!B&CS-|Dq9@6#6)icqfh)YH zPYj%dN?fg(kLOt0(+Lklry-8|?!ARfbf*RZS>?Qv^Jwhu}~FC9CY1;O&fRiR4NPhC586FH3X&ZG}r~2mK{5;G41#{)7zJ+KG@mo#gS^mHeR$zVne2O=eA(@ z9(Tm6O|@ue0?QHZP;palM?+34cMN+dhh^w%hC(Gli1Kd{2-{5lRQtJwZx_P$H=9 z38sr(hXzn1M$`?qEqn66gA_Ou)1Qyz)O{ogemjv&$|iW;p?pHZr!J(W_vuI~iOG>d zNCoMEop6<_bfjX~nmAO$5VqoQOAs}Jof9u?NVS&w0WLOYw`8}!?%V?tS%*NR3`|hr zoC+4_W9{L~vc{Pz>s9HhLoR8jw;T=*9G0hx8b?5Ndm_uXW83CPp(-+B79s+-`yPXb zTf#x){3z^9Nxc}wr6ZgO>YM--X5QowH60rwgM>|S{2|ffk>TizpD|73E1a9vo!p6B z&lCSm&@ZwMpPI@jRmxgIP(36~WJ-lT)ng$BtVL`@OfKaH%A}*3AR2V&L9v-BA4YyG zPTn8458$qeT@`B2en_CtZv)V}LL0y1B#R6g>Xiy-njH(<}%fZkklJHHO2RBP{gPR85K^ zV{4NWno}#RE6}csYqH#W_+`Ngb$h&lPx?IemiAoj^h;S|L7qYr5f?Y}_#Ze~cXrMu zq!4RNIL{*sf(hm>kR8&g{Z6x?5M@}|7(-F1F3FlENyWN5mxzcm9V(9=ic(Do1@<;+ z86)#Iy-2(uaB2Y20&Q4SCajNVoT#xubw2SVX(Bq^YV#BFl^Al7YS{^pHhFfBY>)wl<$eIjd<<`n3KxthrftxfCGlhQB==>CYx`7 z&`I@x6^zD#RY2mbIhbdF$|<1WMY^vZ$KZty1MaX~12XAFhVKbouGH=+E)356B!=Na z#-JlEy7DPv>ry%hc{bJMm`<81Av9PjkJeYUV^O3WZ`J62)Nn?73)_aLCbCiGxU;T^ zkgA_p|Jes`)yNC@Q^fo%l6+UH{5jW~qz5mgV+mb4>4}thmA@9=vo(o_1iSF|=OjyA zdXyRJ1F=jBNMuXg};>i&yVJV~NyNGRI2|!89 zKT^`zk);-}M2e>(80W`Xru5@rORD07G1!l zue~A{z)_3mQ0i;Yv?(!*Aa>9-Yv|x8cfA^#5+Js*&Oe-l57IjmR=Vo#VRQ-2d~}TQ z)FSa*m(AjCj_9e$!LFLur0Q~=o2|ZNEX^VWqvz+ySJODbJm*1{B983Cxkv&?!rHd? zJPM|!cl>y2O(H;aOIo1l|57Z7qB-Css$}XEiPW&PA@usxhWg(bmv;F7G)#t^Zqdqc zzNMow1VM%ZcY~HBAw}EY0>@8Lx)z*DHYLQ6>4cOlj{5TKn|f5G@MM?}jRqMB>G-qq z1XZMv+c@BEmFC-YTGbaoi*f~!Q0bwu6F?nhq7(P4wF#oo$9U+Grn3>UfC?F^yFQf# zTMwNE1x$lz-pVF&DUt(a0=FzRw0*6U7BVT_NK>O=TtGdRQECo}66P*0K_Y4{B$*D zMD#I9WkJexnH92iesnnGwYo&|^`Quqo9Y(-qYeEKDn}GnR2qCCe70qxNXbw_TwHLq zo|He^rdu5AE0&sj7)3IBSZTqsrzDl_*cD)5S`sA*ibRD9Yw0OXOMO0VrDPgJ>aLYX zB#?8BZgsQ}q4#8C_SNprW9Z3^c!+{%&fJ)5DfX~PytiDMMGpT-Ezp)-6*(Ulp~?5s zxCpMP@=y0GPj4+9-?kvA!*$4^~qL`TjS1LJDmO!c_6Zh z#;FO_j`3D^?}R!NUjaYF8wj5Oy)YyS=h0yf+sdee9jvsSAH&bB!ZV%(+KYIS(-NKl zw7-e?tBhzzaxzA>(ZKK&(MfVm&OtdX`!@sJm}7DpO`^yP@7{3bLfmRhHos{MOgkEb zk&xr&GX|&N$mUT|$i^n!$A%_%Y(`?FCNt@c|HdZJQq-c7tI;6Sl&B;Q!R>?=J#q4^ zuVOgSQ%oD#(={m$m0};#mPN%LQIaJB_BzP9Fsw?F@I-7F6(Dj_T1- zUwkX09uRT1Ek(m^r)bCmm!R-$CHjZN5f3YzmPTQkVCw5c;_2^}7EJ|^YPHkEG4g4d zG~;ET^b+DYw&g3L`KR^EW_@EGo5Srmj(|=zW#9#cHiGZXw(L|W1r!m9@0C1M44SwR zp$nNmkI@n%fVBak%PEmicZFlLK}MBelj4Vvq)gHHq`ftujle89H6-A2K*j!?=oLVFdhXIWq34DeIqeFz0c*~n3kjAo#+<;xEY zjx|YhB2{5Sl5$!BZ1ZfDnL(1JP?Og^rGJYS)`AyxlLc)p>SpHCWn1Fd8*?|IhN8$( zWXi(zP$V@@jvOIlnQy%FMH&`qg}jK(ofyP8G*=*8s(CSSw+25!dU-JfSr^x1iR*-C zRzC?#M+^78*f2bIjHit>{l+xP-Nb0&8|gP6rIXRLhw-zm+-{h52NbQKkeIbsaE`vu z=PzcDF=*B?5-jUtf@B>jp^7+Cw~uU1c|TJ@*}S-Tq8)AOMT-tf0gfFCiKl{%TPNDl z3~^(6umc-5?`V}CZ!kUSO{a)!fc>usj2VZGO{KkT(qWBHZp;cUP8r$1mu`q z>HXQPv&zK)=90v`WQ*o6^US2Q#*}GnNeiNnImNdF+koipEDTj}Akfcc@LWaFoV-uq ze7=Rpe=>;GLVQ9&@V0=YxFBk=Uf>^L#DWJ%%PUkk)U_~uNKKC<{=>K*uDWT6AjCPQ6XE%-Vn}UZF$i()Tp^*OEZiMC=k!fO@wLb_ zxOC52Ol(y3O!}OC;8z8g!F0#Qk8~doo7(Dl7H}xJU^=r&+QMK_L_e+d38x#I7V=yUk-P5*Ic4F21@yESDrl$a zh=|)Q2-LKdAAF^*Z3zL9>J->={X$MusaZkZ;Pkd|rzVd?;cXJQVA)0kgDr+_u?P|Q z2w9-mh)g9!wl#y|l&@F79>4Jr1B5KxylnmnQ6}^vp1Cer;Yti~9cQ4;FseAlYz3Jo zU5*4_C73SALYES651~1}#l>{)DjBeVx0pfXl}3!*8Z!zi`%rFbOkv*^UMY{vjoRSF zlE67}$PB0xSPW8xSQ;rt`6L*VBU)!V>~GjaZU&U>6=enTiP&^RrI`4Vamk`|ht&?o zer!`QiJ8ifg-$w*X?8AOR9=dRzCuhyhQf#})M^LR8_GSGI*p#xK?3J~nOx&zXk(H( zTO133rX6WeIg*(>r{?e-s*9+?Rhk}Pq1nC#t% zpv6;9;9WZMy!Bj{dGUq^L!sS8(?g*~Y&HasPB(2y!4vj#8We7DwkLN#tu}4Ftw-HPvvw{_XcJa=67VlfvAIYU3pCh z$^DoBJVx7yF!VL5s}N#ur(g&poa^-cDAuzQSUhVOhSn*KU|OB92nwKtB6$_)j4~BZ zXhZ^wpoTRll7py6MhyW~caT$$hx@=5gw>L6e9U71jFD1fy_Q zk{B~W7!!fhYB~=}F-=-;DI{rI9C3b6gvv++{O9boHy39#ObbdllJ zcPh+q#&(Y*&ZPzc$A%)=Kr%GlrjpojFhRtm7My4vQ7eZ|;~V*s_~U@rz*P`EA*Ogj`Tjo0w;db8V>i05SDImmyZfLr<_1?O=s zm+6jW{xyTh4)fK~S&?#SpREIOnR!U61U*bL`$ER@35Uz|GU=swn&WEm1U$6^>@#Xn z!3&iGP)!#@OBj6X6a*xvne(XA!cYebI8M(EgFhWg*!C~Quu-#&ISjO38NEq6RN4o} zbka`Vcj5I@v>bQAPcZV%OEm~rO*V!q5$k4-LE|a42wrqu6U@QkGZelWpL^ZJ%RaAU z#4#d}9E9 zwcFFt7(@`pCw}%sy0e?0pv$uqQ>GM-#5YVmGkEQ^<`^uExZRSpqedSB#>hznjw^5Y zK%=UJ?3;tONFgtr#&OFFd>p9ThDX!Cce(AiTz%?Q%l+p$v$)t&7jdwwR7dL;s=MlK z)EFLwnR_c?cX6hjlnhJ80iin!cplft=}x>83jr+Q9Ugjjuoi*Z*&Q@ZZ3v&F%a;2b zl2dHY&okI!clYrJY370AM~XIk=t6p3_`bUp@E+Lcn{PX$VL9*|v(hrC^H(fET?k@0 zrm%$|U$h}-fCnC0JBC_YHNGCw+pfHI?IxLF$Bb2gE;GMD8)Zo(A-*< z_i)}oNJ!AON#{5!Iodyh6-?Uu*JkjtF&ql*^8q~qL_(7{a@0W7>QsDeR2fyW-#P% znvFyvDfj4CQvP7@PUs;Vr@^UE9VmZ#EJ?X0q6;!PEKMMP>Ws?Gr2F_In+3d<0a^>S zCo#aRXp;sgyD7}G*bRV!4YCQ@8WlZ)D(fr%%pwbOg43p?q$F7?ObJeADvF{gio)ek z0+0e+0%pikV#%&v&pR#Um8F>BC8`OxQU`6LC23NtDB9S|-nn9o@@!*jgf#gr6SG1H zp`^xnHkIAkw^3u+tyZs`k~SU0H3emI^xNM&FX{ce;yreZks1ChZHbR6XgQ47rFW))^EqC}_m&>S{dSA$Mkj(RtUonr3>F zXf!0sbQ`hPa5g?~*o!+}D`IiIdAYC)Z+HL0R_X`wBkMM zYgE%WHkvl0rKzI1ovE^7k$h9+<>f0cU$d{r$JK4RO3(0}XlQ4PKfN`Gi$Al)y2am5 zqFK>G1L9=xc)))JuC75&R8`H+)a%2eV!_FGg_@hm*J^3%nC;nW%$2QsdE9-5n8;^&cx9GH zMeg@g%d)W0@_J`yDxapu#%h-T@uFef9p0f;+FfN`Rlf4~TVN{`*RIX%RvV2Q)7Z>p z_F-R}!+Z91OT3Za|J|U~6$`d2#H-6HH0;){y4Uq}V;yV% zuD{J&?K{@BytTVjiAG1`;ZQ&T4+(d9typyGn&V~F15_U>E1F_q%cgf{GrhGk!?kxF zm#XgOA?I0J;|*$F@6A->RhOk&ZS9&S2X(G@wPJzwz23CedRk|FvARAEiZXs*037<* z$Of0sZOm0S=C*TnpZm;Qx^L9wU1i=)MqOmoJzlol`mxKTVe{{8`1xGScbn(aa&vEu zeS_VNc5QkNG(b290dTm)D;9>wKJ3D-!Z-eknFnbhyJUO`saE0yH9E^dwr0nNbd$uc zzbr@VAU$3llxMZND`w%(X|xPmP4mcayg@GgmR|S#MEpY0XprlQx8$oEo~HTM4-5P1 zne`*$LZR86c;QUF^BOxZ&0*_ym33p6_0=>pw$_@dHJHbSo#t5a3OP-e;qz}beaEiq z!!)&4*j3nNXI=RJ6BwJoEEEfcQCv`%3yKHhA!1@#Tv${fAw|G|dpsUM;Q<2h9!M^5 zK#)L#g~Y-mf#8A&Kwy9Y5E>+i_&!*G0UD4IOwdAT>7*-tO0r(8y(WE0yU^jQ#Q0k- zIp@pv^I>Fp=VYdGY|n`z#lqe%%*D5+pki*NMcF--T8Wa->Y^Yz{8M^`2p$;V;eZAv zA{Is5m`D=PD1!zX2uKo4c!UWMEKt}77%Yv23-F*F2r+A3)6$&~!lO!ZR5GQqm>emr zOC2KDF8|&6Vdc0=L5bGVoXoltqhwK%>SSn@QDFTV{zycT=kCYe=2V)`w#F%hAFf<+y9uE;|0S5#SB1Cw=0OLF~_ytJ= zf-wQn;6MWehQ=dRXvhcSA;LjnOe_jxVo4s12Ssr=*Nar~BBqP1QQciGR=~`f^38s{ z$-1TWk?JGIGBd-oAM%mvbLpZk>NdJltBy2+@EL48g4~)s)HKPJ$YJ6QXPKG${(ED6 zN42X;0a?i=4f8PZFp*@_Yex0b+?d*0UR`6kw>PG?&+hD&r4CtM(w4M6pJm_WPk*8R z=WqTG|KE?wmOA$)dsdd@lm#h>*%l=!eZ+>Alt)QeXn9^UV?ya@CQHt;r~3DNCOXJi z-(@*v*`` zzx7Hfsj7Zb&FC2IDcFjx7ETh@qRRDYUQW5<*{PD{a5|_kK~&XC zDkxE<&Z*0D>LkVbpr^%m8+O~KW^An^MQa(EQYYO~xNs?ocEUWmTZk&1&k`r6YLi)W zGan_n&MeXE!);41r&LK&RZ^)5ZYG6Kc9IZk;r1G@@uF7BODB1wq?@QMic+T3rTf0r&To1kjlMun$bC0@R#WhwkY{Y;j$y{z(+s+uPiR8$U~A?jas_3oE+W~3yE z?V>V0X$bqNXPeB35+cN3%=Geiwx@0{h1Y#b(o8u$mcHz!CY3ZR`6Ol9a&~387$e0{ z9hSAC*w|SLN<@VZCnwS2!&_?a5TdHuI|&jT4G^c1fC&vaJg`6kf)Rm|Ai$WIpa2OJ z9wI83K*C{yViFbn)a)|z002N$FM|RG1$lZ#4f}i~VB`NSQ!MUZt!(?*u2`IX)t+{= z+wJx&yV>#kD+Vxt{Q3XQyX>xVjdyL=_J@7$?;{wF_h_)X1CQ`b!0 zIcU&$I3NzAVWGjWh&UEFXyAA>8W1!Z7O>zv91{;LE-V}=A%@2T1x5pc!EqQKC*px& z(ZH~Pg9Z!{$I*~jB*!@bgX4jQ!vzI{24YMwCLCyhumEGiVFC^b5)KZB2ZsqbC@6rC zP%IP+#X?cR1dD}Yp-`ANn;p5Aom!zeG$b4Iih8{=-(V?%+=*$ID^NEMdovN6a%#4R^XoxhT$Fn1HM6e4l&oD91UYCitc#UVrJ073?wCs>W z4pBY(9b&)z>3?~A!%pnRzHHOp?#^o7Y}-}Musw!fN)U`3Q(G(u)+gw)@&At>c&4?h_ z20_v^Gh64DzOQf8Tit%oU;jIkYj=iQbMf}wiKrny^~Urr*;GAM^Z9&bl?9Frpd;A< zDMbVW03ZMX006_15DW~5MnuwpJRW926r>#0KE8_~p?0Fro48$Pa|TJ!GD0a{#fimu zjIQ;=_WMl|u~#aCP68RQm>fnJnh3~h7|&#;a9{6b$h&Fvv(uR!1_Kdqm)1115gdeYs2{*9m`VZrXEX_ug6-Nnoi*dZY7q*NHRCa=+9h)p*`M zU(^Wl@7%eTE~q<1fTHMBxY7bofES>G?zPbs^mP+nl)i3`jK%uyd~2mW0*G&Nd%4CD z#Ehhj1ey+_md@HN6y2l!$ zmi7v!(xG)`vemF4Q%0dQP`r(adc^U|gjf^S)T<@&m1pxYF04sFn;ayDGOn=a)4Cy> zhlRmUY8Wc3;wU4pbJJ_jm;C%9$U{yIrn&#Hw zm>+@##{gRd;Xq49Ysbr5Lx^pSMMOc$>ns(;Kf&moPREvq+Xw_Rt-It$+}2H$_FB9e z)>el|4f&P`uyEgMaqtmL`nSl|o@FLH0M9rW_-?$Ef=OU1M7ZI}vyy4Se~J^q-UwJk zPacZkKle13D6~pJ!5e{Dwttcj=ouEFJBvV+jn+tgqaX>;_S5ZMqrUuQq$vQgtdYUE zVO@9_L;_3!HcU<+3%BrhpMl{C*m;WxC1Ww}a@9T}!1(wHJUGG)Qm+GQR0H*J`ALri zS;rR(dLGYeYM|Xv>Vu&ne-M-#$l77 zms@&3H4&`}9T030<8*xoY65v>V%#IP8;In%&*xvv?44%AuS2EChJ)B!cg1flAQ{O4JI_;! z$EkBQCEtX$Px1x3bX^&Ye^&Jvdbqb8Ma>P8kbVhiA4OdlF6&&L#{<=qHj=ifJhnzC z;6&!_TEUAcq3gi{?&8r(nk)`{`rKxKr;|d1-J|H6%b~B#Jp8Ucso5Qz<-2gxFDY=z-Jl6y%`YvmIwQ-oYV!xS9)8f$=dsO@{$Petv`-yaZj!?SfIl|sVs>iM++ZkW4{S5G87i#qC;u8hm5cg8|91#xHQoK-=w)B84atr zZ7O_n9b9f410VoPXKfQF;2G-;@*R`4*8ayFp?iGRUNQh3w36zvGy)Ujp@Be0s)@`f zJhHg~RspdF)izOJTgY;P@7n;j`Rm$G63R1JzCuqZIvVfT*7F97q8}I*>|)Fiql>Gp z-_O*S8AQABdbAiX9n>D|4IsIR2t&x`7Ff~-%x0}|tL8X^7;v;*o`pi1CZ9VrkpQ(# z6SoU{^P03uuR^emfq;ueW%M|F1)npKqv%d89> zZrT{*+#|t_3fFcP*lq;gEF)8yv4F6=5XKT}HT>Gvp;q3hK7%oEqoH}j!F@9F6jSkj zC4av~Xr|DNP&6JAKk%+ViP-_$oS5}nn5n@1LM;1UC&F;FAK+$RGwpaU-e}{N=SuLB zqww}+gF#H>9bK*~jObw^;49x6qhu7zO3|;d`MC+Yw9|^^p+Atd%N{SQ;1-h|K?S3` zYz(Y8u{D%#PgoX7+d1foC|ucC*^vb^XacnZ$T+oq5vh-zI|flN(QDi%CrdfDc599O zSpH>~Vh+|o>qBz$A{MXyS*n5!(hc3^_N;;HND$#8-iMV6W>8 zQ~KG$sqJa;xmrnLl7>Uo0#062(G??ew<-bA(7*g|K@@bBN&F}7xz=4?V6i7sb3PrL zry=_nf1{ijzn|Tq_p#cfu&}$$w2kX#>CeYRbhh;MWx!6171@|vvezXS&&0J1LFO%l z#YKS>P3=L*Kux$teYmRzA!&q$ECLqvO>6^1PjQKa{SRkwy1>}NWSzI&Tv_*4*m~V! zWJ|;ej<^^U_qLm2HxVv}+;MT?ELL>0^YQyL+p+@iY!b-Qr`%t*<(l_xv1dusa1s#x zydp>OaCj6r6c-h1I1bZe$&lMkCP(r#1X3*c=9pP;w-zYWa`(>;467P2*`yin?-b%Q z2$0RGjm5DFdf*tu>OUv1IQT4U3K7#mFSmabS>upHUQ zSVRDCVp#lDA_gQa7RK^apmg+9paf22UF)*>V0$&L6?p%Lv6}-!iQKjd+d^9Bw($Dw z3q;KjLW?v2HtWx9Ib^~Er2=i>EZ>>^m@mZLtF^ESY$`-N)k&UL}pi9`V9`)&!f&I%E;>$R$@7i?H9Wap~(7yq7!R?YM8 zA0uN{0;+=f)I5bKVQ$$XOrs*aA)qRth4#C&26>U+@s)!~5T2Usq0F5EH+prk47S~{ zH2Pv3@*&L<06WmOpHbQmaui0DDnc93*(PewL23qLCZlXCMBJ5KtUG4``KSuG(v-oE z)mvc3K77QS~i5s&D3fG3}8PJ@W6Fw41rOgTdMD(i3wb8 zBB>h8o;j8(kQ(}IbdTTze?0t+B_C%FEBbYDv@^2kB1|qm0SLpI1s&Wj-lbE7@K-Mw zb35#7dC<6n&S>i42d`;|d5K;bf)Iu_&sf%#0N0KDQLfqQfu{#sLD2|X0UKkNCu{}$ zeY?G|6)+s`hk{`%U`itcuZ^W)D`=>`9&Y?$D|os9#8&W15NqtVPln||u7Hsq;60<$ z-V64a&`JO}Jg2g0^Ibg*J-DR;M~c4h8<({9-@(F7oMH)){BD$d zxqLaQg$KOhP!_5CbEKKrV0jkG{Y2R)!<1t!g~(Izk~l$O56A@W{ik@dP1H|H6J0ne(dG&FvBg6rr|t|k zdi>UwVS1o(+}!6{f*I#PZy|hHJJ8?=hz~Q_8lNvX=LgR&e{XoqVGAN93fna=$h2^= zT2ydH_PZFmRk9G8mq|qELSXX(@x&y8eMw*Mow3V13S1dMDBNJ*7;(nX?w1eIRmoED zN4`q`P8?U%=Mjn=D{;QWgtOoi@Co#k&!s;{yvst6Ma2P73V1?t37}-q&qB)L3CA>b zR&;B%GNqhv;UckGaV6~shDX;`k12rVaq7s;XMyC%G#e%q!LjqZVzk0|Gt;-Xx(Lp9 zO^QJ{{L4n$XJ{~1yU?v&pl_-6nBnY@Mj^A4gRxmUZImY0Md#9J(uXC(Z7?SU4|Lin z$kSeXlqt(CpnpH6NJV(@C&RXDxaS!tXat@9D%<75*hMjjMf3+nu6&+++!`|e)LLQpU}I-0iw{w{h`kZqJ#Z-Juw{L zJ}!;OQvHCELGiapf{~-3MM1KF`ox|ZgbEu+L5UaV-o#a6lTjYjy@?CFQ%j%wUhA30 zXc6+WBTHhpt3{V^5#+ZL^$5#EHm%5H6pX7;cQ=9UGz?YB7dJ8(cWs`%7wQY{h`U&@6qx5pNYOGwJt)li+hq0DN-BV4rq({?fC5R|+T< zW4I%jSInz{Fxe<9l3*95;Gw?FAGdk95(9Q%0xCOu(FbG)pEq_r=R6DhR8JskKNWBG zw9NA4_r!e7JmSg|TDsM&36}N314E3R)C*h{i6U8sd&7mpcML#S4GUn7l-v&H@MR$@ zNx`!8nK#7pwPn&``(8HK>mg%C?$LnAyF3saf+Ps-o2|n<#9+rJs@!4bakd5_EqVho zY(0hD%L`Os>5Z$kAg!MOBbs=Y@_t|goEOqXl1JqM6cLAuyKhincH0n^BHh;jTPVo1+B>`N5liXDi3#^p0ar2Xw(nUQYJiKSd&lyj z``WMeKrTTAP^P(ipMp3Y%4!szVknZ5OJ^sJBKVbAp5?>nz<5;h_U5_ddb#y{>bM}n z#q*s4^3)C$FnJz&+HQAI^VYq|do4p4wg$Zy-*MM5IFOk@GN3A@!a_eev?s~uv;_ua z#VnT^;D7lJJ;`v4w!izBR*R#m@4qC%5j+l@R1%hsO;6mV9^T++_zwgdt5%r_AUpcU z4ZW!n!{IpjYymz)ez5@&R$C23WvJKy|2ZL9du#(s)Eq#<4qY!~fc5mW^E2@&+P+Se zj@*L%SdMMt&~9unE91j0+Khe7p?%v0B&?|8?`Yfc8>n`?)pd-F?DxlsE<1gA06$9} z3$v|`b~(l*{A*)mLCTBCQ>qd5H+f6j$V*N}IM@ymGN3TtkR(Tbq9zYL6-t)t)S=Ia zk4#QNDfkUA`DJoq{)h&BRvBh97>}o#LqMufJYc*>gyJ><(*7<2$G-r?>SGF-1v`pZ3M+U*{BRhYhR<)Kc zz#EKFSyKV_T{g6l7OEJkiP8*y6*kD7q7!M86J}7V?W5Hb=vr(x?X3<Z^^~&D6MY zfZRcy25^{+9H8l={m~#gFK+Kvdx9h&%mYTJ-L1yN5%?K;v_? zW-j9VP?2hO7uX-3vJfV;N%X(063kFo6`pK z#C=?mv$Yoyg0}?)RUJ<%eUJekH=)DhrE}XXIAgeQ3i0Pl$lCfQz>j_sd##WrNuDxc zg*c9dpnMRbdw`Y~0Im)_<%VtIq37KGw^a5HW35p;!s+W-kbyw}QZCIIi`K%W!{Y9! zXCiIpJM2Pr%I zWsG9~ynGn~iq0`11$@u5%Cy05!1xokrdo$e7Bw0Dg`bhB1R*Mo($GZH9(Qe}!D+fv zIwN{gQpXdt-jOB-N*y}EsA(9qpSpw9xAbiSHwC1RgO`?)`?Em@jTEb>5mk1Ud@LKR zGDJHvxB?svS&e7TNC$$Mhzl%*{DioIH;#$TXo-7ai#@N&IpyoogJW%7}j}bEVZNe1QAgu>-6p5CsuG-y}5Q;8s zhtJGUp=qT%QqK%yX0e&aJDi==A>ngRM4T~`9M5{vu|2ij`5ZB0$s7*P-9PyojR4f( zO&wL^L^p5KFl!}o6 zn~|qd9$5|zJo95Nq-p+qK#^Sus9o-a>%$dMbQqAt!8J@BlH1r)NyyWAGU9?688LZqP_)vvVq5T8UY}zcTmUFggJLINsBwM z1Hn!6fPU7`C<*I>Hm(;v1wzv;-e>UWN>~T-2~rzBBUBt2r_FGWuNexG#zJ+EgNXqj zbJXCWC>KQF`un0Y`+0>KI-ODjo%Gr${LvP2M;PDoOWRvRI!eXWk>;<8Z6gJBX#`D+ zRF5I;Ad|q;J;9Y4g+kp`Grlk@%*Z6i1bojdCm6Piq~fB2O=mCG=tVCspJ~Ccd6aXp z4}hPseu0#8L$wIXh>(mt2tXofat1x4*=Q^;edP{|Zf9apxG>5Y8G2qYKJkJY9Fsxl z*97+SNg51zK3ch*k7xiF5}AgxLZ!y_%u6GN6eT%MG$AM?I=$;T;XJD?Cv+e(`iS;q zLtO9xx)^EbR1Z$KAm~-_8yTM!&2%fk5rt@)CPf(4f?a%wrNKS2Yl6}XIqJ72R2l>z z)$sGoaYK8?MWR)h)tO3hOu1`-6k2G|a7?tfGM>3297eA=X17XkouQ!)Km&K(FGW~b z)6v!_J3W|6nV6guchOAKeoK;s7Y^eayk+mkzset;>arvVZ5vT=VoA=rB)m`4;l^gB zpSlH7kR>*xMfC*;zwLNbK7m^s5QSo+?IFa$0%xPO_71$QK|tdXBnl$VccHe+=dqa6 zfW(80N^BN-d~!X6PnUlLG(68c%``SDOCy;zc%tUg2}0dAfvTp!sBARp)`~pa6ae88 zUs?^3qFaRKLiAjtnIl~nUl44j=g*zh(tmV|2bI9F6(Ca&77%8B5P-Xiw})d(C5WcJ zk+hf@;i{~aUo?875=nMzr==WKb?>I|<9`o|(}zNzz$@H|`kxI41zvIGrEZSvS)7z>rs)1{=odYb*o$29x)MzmQxYBbcwg1P(2T zi`-8$J$B*Cke#=!o`5(=P(@)FG_-?n;-SoK^fbeS=A;|~?ez0#nz@5nsB%+2AutG1 zKPF|76&k0{9Bf^!Ob_0bXf33kNDR%jhfPVSn_|<*?TM7&6Xz=WL{TMtcz5ZJtVbS9 zTC(aL3jJ0Y3+H!E2N!?TO)X1jqtDiIb+!**%ag$&n=Owl$5va28M0$;A$S_J1~=fh zsp97?oH%(Y<`^iqK(aiqY2u1{IZP>*J#qBZ#fcQZjt)ye-g`Pc&KF?7L)}gRo5fF^ zJ$H2^`A>(3Bnd>Iw%-bS3s~w2Lmp7-{lpFSi6iJsv5q1@iCb_y;QPfYFDDl%?5|fj zUQKlFD;#8%$j)PG>^&tgFx{PUdPtVB*U2x!t=en$dh`+dl^m;6B2&fmoWm$&6+3p8 zaDw)1Ul#>;RwRW)aY)8&FH?TjW$wqLHhwCHJ>3jodz0%|k|yyzg&_k=DhQ8s%U~pb zkmL!fm5(kG-5pZiM#_f{2MHByCDaZPfupY`XXX8)yIM|4fTNK^RcH1xL%2K& zuXSHw_*45_!7ydy=kul=Lo`SEjT5$QNb!TLz5qT23la37h>T_45+@SPiZhdcNwM>F zamY0>DuPKwsIo+%4<6NwTS7<`=k?(&J@E3#e-lo&f5?peUS7)@VM~mO5EJ1oMm|+7 zhb_>FKIH}(WYW4$LBz7ponRJ&M|hlY+!5gl+cH`jFKTklIUpRBCu%xO zCTh&iJ)bz}gwZb1#wZ*Z^(A!bpRvof$)srp;iiS~rgN!jS0{A~@L^Mxp-4x4$J@5l zIv(IK8LWU)WeSk}z{LG=KnCC-=@m%G&UFTG)Gs-?3zd@^kuo1Taud(J3ZSFqC4bFV zMeeeLJ7MJz@16v@)hE8e%OPiNTeW1ng{!z^06AdAbQ%YplfV8Hh=xZe#E=(8dL7Q+ zy}KIO-uE*WuYJc@#;U|-xf8j%ES}pnSpj-a!o>lF^4WJph$P7!TXmBb6|Sp1*&oCX zhXwt`#6K2k9H59Tn!J!h($$!4lTD4TUa*R9dgV1Fa%BE^0vmx?b7dcT^O8nb71sXu zi8_{fG((u?eL=3^bAu?_1GnsKg;@$K*{pKDcW}IAoTv_8i)>Mr=-9NYF=%gdFH3QF zhn$xX`N;TPJzmA|zGEhcuSQiSxW0;io1T1r0E>$?UjP{{&1x>Z5J2ieW;v9v6+3yb z?-5ZiM=oDfZ)1_DOU|m69RH~n`c7^Dt52533ela?u7FVNCgJJ2PT3@cIiTWyNyBE! zI+sW;(D#|Uf?%g}svE7ry{}W9mAL_G(d1JP;-iqHTJ4O={adG71YM_F*bMJz26%<# z%i8dlO;MoGGt&7P52p{*4R_vp$Z3@3%ebIRgbv;+ z9DhbOJvG0{a+ViS-e?QDc8LBa0H4yB6x9c)yp52uYj^O2XA^cZgrhf>19XL?FwiUZR<9ofSHAb}cJT^Qh}CgkJ;5>ed%*bFK> zz>lPHD1-@YfV2}Ik?P%|A^1NsM%khs-ve?&ii|_3=U9LkWIo)8X+7>MF>r3Li#7n0!%MX6Bn>7io2nfjj3Mtwf$5({I5H|X?Z44K1UPRAuJ2)k2KXr?2Z zv}&PJ8Kq6nQ6L_!1$RJRVfv3Vv^|=9O>o(*C8j8&*i^Oe4&#NPG?d9{2liI%j1Q$Y zrLD%q-~7$t(BvjUF#yHPXfh;xB0CkSMbeGI=?DfK(+fFi!)UsOM*xa}&PxS_t50O^fg505^g~%BkdWk+fLcN6rqg@(-m?6}j zEvA}1K>?{)wq_*EHl)?uLJBev;dD2Cg?7IH_|PZW(#j)Nih?Kzf3Kjnu^%aaJEyhXdCikYq(Z?h4W6hr#+SOWT>A{3i z9b5z52{X5Fsa^iy)b&clOh}}6IB7u?N#{mALuM~^7DbKhIzzZ_($9o{O-muQK~?5J zR_Pg8AeizEN7MF9zm$fRiVVMTn;z*slhkODK~-Lq`oife0-gIvjh&w=61s`_NN*U@ z`!Ib?V4fwBDYA{={Dg26;pq7LRI7liQxr`B`zKN!dX)1A5G~b;kS|se6o)4N8XN2+ zI8woniO)lzew!_Ab00U!Pi0!QD$EJl}8gI?it23L}om3S~}-8 zu(r>7{%{QAJ9?V3$=qm)Bna?=lVaji+LJ1ZbjBA#JCv{l8QKyP)bE)6{{fEhJFsS8 zL~j7p)}U9~nHo-CYA{_;V(7Ag5kkn2@UUjFXOuPX?JMowsKz$c=^Yq`E9W`wr zQ{;<+b}O;?Qg9zmvA``}=ZrzXz1;+kltwmM-AyK;Dw2)~Z?%i(zo{S;byKB+e#qr) zsgiJq%eKv;UxcO+^068n4=3Hh6-r7R2yHRMr%|aQg-BA3IHnIdy=gRM87ct9DQf3# z&p=CxNYd{b6d`e^>JyC@v2%7CrZZ46rx7DFl(UgAdZtbR`*KR@8Pd4BnEgb(EFg97 zR@qpk6>QS7rRl5@Vs8~nVJC&tl--bp>Vp(xw~=i+rf#4=8@&^68qO+$nc9;jeyX9S z)|e+{bh2WwRex^ZXWv1f1;-jco!D$^Vn9LDj%Ef&ggVw~MC-=)n?z6f`-ugMkHlEI zr!(XegicB!Btj5T8kC_{oPnxj2e~vq2V-l8YS%2*qEO4Au(TWJJ#H2PoC*1qf83MwfMlu{vi|YdCB2hZ}>D3N+T=TR@l%_fia;H(t$6=z1hBA+P z^E8=BSJ^mQ9Y?YZm;95Nz+3bx4?aSo^XZ}#^-c7V5mQ5=^PhltUz|xbA-aO&?-!t= zp1i!3C<^W;=pRYDf0X(=Gpt4plVT#s3SG z+cQddgK@?GJ7vcdG2T)_VGc)Y8sX;LdKy9}r+o&U7E|kF$$e?sYe2(-2R)NZL+gCZPGZ?*|d2jxmG2iP}my{Q8`jsly{;OYATh8Ar23k!**6k zQ-(ApKR?w%q1h8z2-6BH-$igW%0ci760vdKBP2|d3W;QGBjAKhsy zCnkqU+Q4Ciwj+!aX33(k5Lc~OiQkMA`N=&y&e%B>y+7zfnW3DhKC0jOB@|| zY_G$wGK5KwrcU)p3p*Ajfx||rs)|m#_qs+;{G4$SBM+lO(fkyx824+DCSZ_`J8R>O zcG%ZO!2N3xxj&<1HC8x+Pt8I_s!xEjh{^K3x{`eu2(~7;48v2YZ(Kc4KL*(cHQW## zdl=yS7z;iBv>x9Ae&iqb)7*duJfo<_UZ?P-%oHn;xo|j{Qg@{1N-Z4)oO65+4X6?& z7Atb+`X2TL6bCKCk{m|SUMJDrt`T3*RFu=p+YUuVf73q8qUrX;$eYo8o=MaBh^#?3 z>rT?3e6Rq*?jXd(*&@}AhTx$37@_nfN{O)h6n)xECj<`xnuJsmC=iVX7*GM*GVd?!mfimMUfQ;6$&5ExvsmcN zB85dL>zR*EA^-pYKu`e%0R{n7r>c6nsj@;Ur1}(+si<1Tm|#rU3u0Z_S@&138&v!3 z8YS%I(s!(nP8vkF5IOQ!cQ10-TBNiZ94k~qjus8x%9&ZMPE^QYSF+@?a#IaUP!T0f zJxNp_<(bgjvvjjZJ|^ul*DSv|OO~&g?5t|EI*&X_SEK8sM4KTTygpEle4w;(AV=e1 zOfV)I2XatwfHWWtV`K1tiUAp7ePj#*IFQ3$DeNIfIU2`7p>Z4|KsAnXj018skfR(G zkfW3d7!wRQIF1CV;BYh?kOMi818H!ez=H}9$^wN`kYIrV8eBL9@PQ&ig9#8IoP!AG zz=bO$xBx)}6AdH~NGfRH5sEtqMZ{w9LZrgJ@I*WmMO+_}1R-cqnMorelB5)wJlp~z zflyp69-fpUQB162<&hJb8$)e=r1h$#}WsURvU6mPP(oq&$n`?T^9{&XPxDP zWS~5m+O(L3; z{AO{PYc9GiSiRL{lV&y6)La|gVOF5Bp5-Cg+#QQe8C}+hcCT~Eki)7<>8+C46}s-$ zvCO=NHuR^4zM4&KuP*v{vs&hzgfk=)VPZqt(w614krDM{Q>=1VibjS3)%@7SFJM3c z4Gdtwp%QR!S(kL$R7oJGO}ci=c$IipStE#JRHzhHhJ#tIiV%RwB%spa&@_~JcgIOz zeX`Du&T_Kn-|DrRw7TtW@|wlY`S(AimL}`kwmmHm1np*6t8}@sy&v0zGnOC|gJBAf z<#M`}uGOoHb%(L^2^78Og@vahc(GJf`tD$8bC@m#uO#C$pIF z_#c8tt9yIp-F205QaO;JQcADyRGXd6EII3|>z^-k=z#>=f!NaAZ2ngzZE4r(UEZ1M zuw0?xrGx|0C^VtxXy%E=A6=taY!;2qdPh~dtj$%c!b|5~ot>R}AZI!B)}4tQ&BG$r?z_2ejtBN^VYlVj@MwX>)y^$O@3aky1xB?SGV5kwUwcnA<}DSWMgIX*C64m zRL6}mKJPmT{%@>t0t|)*7BCVEW=yggnX)>`rB^i9nMOAwL_eg_au|`G-yCZR4mLlH zx#~AI%fv+UIO~_mWK2xJU|=v*8M?9!vraz{+Ysl6w^_sdYNEu#17pI%e0WrCW?qe8 zI*K;1W15}LMgTIU6m0DNS00YD-O886(fK7g?(;|3|{^1iWWYlx^;LI>zX75i$+&Ta3KIf;5(2 zt9Td(&h}U^o(Bs-ZY&&IA<_Yha1od@($cbS&^rK36PILYx*U}}hjxQYDePeWTZ?*D z2eV%j3&*BZ&GpaG#;Xh251rc?xO2OE-)J|oF>t4YCB-KYqNfND#utFhGzofR&OA(> zKfRn|0a~_cWY(vnBMUqU{W?rirKZDBFM?-y8eGjV5P6_-Bvdf;B?E+r5+)pQZn2Xr zhxAY57~T-Ph_=D8LrxY}bTMz9nIyMepoua3ILehJ9Z`NdJ`ZqU5O#nLD{MSpSTtNR z-AQy@#mN%I;xze8zutNRZrgawLzJeyC_?MQbR1IO6)V{o^o010)8sJ>F%t4KgNem3 zUwBH++Jkf^7w@?>SdnCJfWYdh8#zw2CcGVT zc1&58xaY~5LSLn)*;=>-lb&EoS|0-?hdKR>T4#&Tv@Ff_i&DCG1&{$Gxgm2?TR5kc zUY0(bxet^`^)-2#f^r%SbQPMPjHLiaZShSMlz;?~U`&nw7HXPLg%0d=@h#9Pde+pNMv(L#Q#{%G4p<&bY&if?OI4^Wf-T zNni#yiBM+@5t1!Dp0A?z=#BtPt7Wyk*!wDqVG2_}Nl9fa4V-SyZ@!yRp&U%5zqmyk@NwL(GU7fK>O<2?=PM~2`fedeOIV{CVpNCm!6Cjmo!Z;wJ3;Cw{M92 zG-ge#gu;nt;I{{x*vSCrTLzu9&P=5~PDoU#*4X?A+CeCYn%x~a$giU^7lfuevGAZv zS=k7SinNd8*ww+2yOp~rUy4m+n<`;r8v{k(I+h~|*U-I-Fkp~|xEO*VFhagi6zzi& z?+&O*hT)DkJmQp%C2!Gm7HMfAJ5jI7!GU2N*5jTW_Qm;z(Ur4L#Il1YsGwL=y|7|A zZgJjWj50k&aIr22A}Hu^h|%9lcvg|9Q5UE7Eh9&X;`R1%Wx8J8~SOXww%?&^T)#2+I}cXGjyj%+s^Fe#r#kl(LF zjOB@czzr2TBF%VM8pU@`;hDCIZ>-I(bLt>h4M^o7T6w6{r1u-Y?*7!p$LvX$tFDY4 zeW%CZeUJfX9R0+ElF$H!_NHCYcPFDbDoHCv?5Je>Q-Y{#;L;#8zD;Ua;$X*S%a~9T z_sm>ac1XENSqv?_ziJDQ%W8J%)?%!*;=2r@IisGYtvaUqY$ zK#T!RR#o~~nLhx%&K{Ernj(UWO#tcpab+BkEGl#ZsptFf1j1Pg zdbR_{pDoRmfn0`lB;-RV-jV(K{^@NAPlwij#U>&H|qv6NxDV}TN$rXg7&{v!iAPBkmaJk2aKNMt6 zN_isfWvj6&UZdZh$xgfZdg7B2?x_*eB~48)X?ZdcEiESN)*L2v>X@hxa1+|Pjl+0q zLo;Y<7)F{~g)n%TNi4zOXxX6TrrzPt6jmLeCWzhv#8t;9|5FGbSCJ{0$*%`v#=x0C z?>*v;Pe@hiZK`rQp0f4f-fXx!-JL*x{~l~f>%&vpSHPuaxk-$LguRj%G8kg0jNo%ajl~(Vjff4xVj<{dV5lpD8>~5($N8cX{86{1$&T4 zEFPvIO#PJ31ZsB}v8CzEJ`)^GI=#*{SxN@!UFO}=OoovHH1-xPk`&KRCZ^!x>IrI( zM5O4%R32&XQI~BSP%+X`N8Nb{N->+dilEaHU z6MpC4-X_a(p(v&#y_eh}xA_>l@wFPJC!>P$=EZB93 z;gnoL>5$q(2|t)>ph!VgGGU~&+o@k$%AIMPx;L?plgVN%3{$d6f}>%{OO2V5qGj2R zM;w}BTSiAMg{Vh;Ah`m~L^EIX(hKfXQ8tUXtoSM-I$V>ZaFdyENG)OxsH-jo2!qv@d^Qh z6KDQ8&Q=+WG5w5lsxGbLT%^cMt(4NqrxRifPk5)pY>y_BCKzKdnv>=zA%D+4$>#!# z)UEc(Pi{}?G-*IWC|Lxa+`f~oKiV`OBB@jWNiILl*6i@O5(}76FmN0X$XmT`aC4-h zExw%w+uNm~zW|(=jICb4--{S0x;IKIJg?)O5ftPWKd8<_aD-mqNR^=^geE*casKVd z1uAu%FpbB>+K5eQ?pmvfRS~f}$bj^ldW)Pdy(>#hdZ2l;WC2!L@@$T0Q@k`lP{sX7 zM81SCw;z?IVpY@i1e;Q|zTP87hF zDr&^kmCKn!U&EZ7sqxIJH7J?qKCvg1#zfzhczce#iQB-(^6BqAQll&(ln;#Nu*6z0 zWe)m;0tN3~-!^cQt|1W$5509>{qfjG9;IA-k7Eiguxc(zxMD-<2R*tGm3`<#B@xks zST$os#vdZIs2B+w{`8WmgrAD%v14kKad^l*Mce1Z|D$v{(~ zjKY8P9Sd{LlYs1_XMFjRV8Erlq_lhoZ_C1EKokjG63T3#J=GMw9Qjk3P6tPx&^Sbt zp>UwZZ!1)XHgmH503ZuqGX?g@Y6 zu@`Q!A@b>#{%s0XrQyyH4?pDkHPR+kn^W!E^a>H@$>9COsZ}24&<(?L{^kn{ge)o& z#C(bNHNB4O`mh3pGGFC{Tz{&dF0Xd@)QB8 z`~l`vkhS}SPw^^#NJXalus^RqqF7avX+-$ILtg~#5cP{SU+2OMSuk9&7qBzC>k30v zZw#qWyLI6dMc5<8J@Aa^prn8JYRXuKONePRfA|U^y1{$W+cZ-OJp=B|)2|7azWz|) zbz!=rhmk`p*RAk`pKK?g2fnkIalkG#R2&lGYDzAZ<3QT9ieT%diow}>yJ*qWny^aU z^bm_Cnda(9H*Uho%-l?#seeRR6<%*7IrY3aTfU{cggQR5@fQtg6<0J7Q#e;8(p^b}FE>M7PliC!8y{{I z0g~QL0;6pK5D{Uk^{3B=u&wQ4fZd)#AVC&f0 zpXdsGk2iG2rMOEnt*7XCktv3kz-=LdbY?c%V|)?@rP8vEMsT-a8N`?d4NT8uE<}Bm z8$(dh9G&IhY5bctCB55Y33GeugkCK~oMBKDJo1L9XBGjUk#>x{P~AhPT!^s6a5m4_ z6R(^=J|7f7ZYR+nhpF@93%yHa;8=KD80`jvAVx;`(6_L2ZpEPDYp4}x$~O~nf&nSS zUKk-Z#05|uH?N`wm=@M+@_;)QX9JEKbiI{L3U3}y!N3qy3QuFH(6M#ww2D9GYn(Fq70WSC*FbJ z1$Qx;{Tjyo7e|};Tc9ur!AMXI!%$SCeIuhk>W1L5A=j~u9J~5}wigAmF7TiQ^)SFr z^joi~pb-HqZX*VxgnTxjMWHq7EO!=%fcMDc!Jw;oL=d|eJOV~Ud*o0z?Oy5v$pQFUQ$EgXikVWI~^`s zMqA2f8T~;5WRs%46z1}@(%&7eI;9Fe6GCv)&y_-MJnT}Xoa>p;;mS~6A?LB&aGLQ< z7t00%ujYbwfWn;%A7&kVsu4=}{FnGr3hUOffn=y91=|?ry)=o%Y%`u9zL)M4W4G_9u=S{xUtz zz$P(Zhy%)xYI-xhP09Z(nwMBLkvPC9A>edJo_Ie5;Ed%)Gj`Kja2r149TNVsD z+T2^HaT}6lZYpkgZ!6VY;EV2&0TM)_Td;*%@+89&-;aU9C>u`xIaJ8JJW&@`1-M}2 z{xKf&*e$HLq+-O-Os@@4Nvs@1^p1$bQ*l$g7|Z)e5ylN`2&~H3D8+A9JnIe(SlBDi zT0xTKxb|&?b~z{n>=J&8RP}dk}o1Td;kkh%&NTW*MvM% z-yB6mg9+CaR}CL)bdw<9m(?=OaSB7kIgC*T|3hH}ND%%`4UIdV&>f-P)NhfiT&UHU z#~hvJkTQptaL;OCN~IMJ+ncZ+_|&z9cu|illHsc1#oSa-&o)sU7KAM|(j(qDeTwn1 ziEv}#YZDO-L$@LZ0>)-b0$8h)UzyXWgSV-3=tl-DZob<}k&oC&e`8s_RE1i&D2^uu zs;GvyC{*HunnP^1w&dJa%ENc$aC%T<9kn!@abfLLg)T1CYuzHt14&m-1;c?6ACF|; zpi$sh-8`vGCXPws3P#Df%w-d-O}wp&l8k>}rP>JDpQunOZnY}4LN`5y#*K`V1qf3% zPTCv_-+~d{%tEk^N<@=2VE4(K!H2=Tu{oC{#o)9dKg3}d2T0lK(*f#5+0ho94~Qyi zdr3Vy$08t+<5D9@tx$@!T9tl*9)^XAklJs?JjI`^HtmJUC+#RP+TKjk#S%%Q<11V0 z4*Rl7#>3-I=N+sRln^x{{A^x{B&`c=gwUykIl0sV!GDl6O+8JoUZ`GKp^JEFgPUB2 zKpCxIwApg@#GWz$Swi zlZ2%i(X!24iZWeDOjNgB+#AY)+{bqeMA45V=c`f}+5?X{Y?n@M+TaPO0}*i($JxPB zS!jAjg(eBbEn`E`G%r__lvSg)AqK=t!kZKObaM)hQEY@Rf*@@H&oTRE1t5m>Jz$?j z(z&0qI8=ermQ+OJC6!s^V=DVe(4knx(LntSfhG=x7_CoEZsLW!`riRYdQ^y_q&M7Q zUbsdi%p&ONfs-o`Jl$+*?u=TfaI3K@pA@V^JyPtMkUYt;u0P`6J{)H@YO~{DwzU`y z=}RtmtCQ^uv?+>aTGkjXS;I?SV^TQVX_IqHI$L^NbCWOTLm#%GX0(3qaco3BCCVYT z1`}_cZ5sbraTEQX_!%BK0!!#TxV54m!#g)GofuY#i($|`6J+qdA2d3v|HhD>W?Ayr zo%8b>pTOIOGknNK%3(xYR@-BTPrGEo)e9Zrt|6{7NJEsjvr zB$=vsTR@*W4YRZ5cEh*BOXyxoNrivIzNZHg=}!iyE@Lw0mOzb%Mms-`Qn9hEBY3pr z0?$g|HzIq@1dijMJkw9dq@~S))pD7SMYar^+Bm6KyStzh-3T)qGbVF>NnwF8gWvN0 zW`>Z6QPU)^0a#jv6Qc34ns8RpLmnBv5QGRwyl&L67^n%rhXHkFJ}a4P&V(%sH6j)e zOzcQm^!MK{4kgCJhXU+bi~|fo6_f(5POLqJQA~fhzp0DI6UOy<-URh!VmG*yR24}b z%Ln`SggBj)&dPQtIU6U5%cacP-0k~(}P`&Z6zI-lBt4c&%8;7FVMVZm=FzEk?D@jG^N z2<|zYwC2JN)Z=ZVXfI{l8A6vUayf}h=RF$dBQA*4r5NKg3LCl~ebwM}pVOzL-Ncww zcB)#gcZ|THV1siRtf|$b19i~9Zk)p>Jx%2PuG^==pF%Tl_(x7) z;DEM{?mZA*P*p|be~PMWn(UlT>GWUEKFYLTr@B5_Hm!?bN)p*0w!wX?+lZ~R8ZUx# z`&<_`2lszz>~Q~wJT0-rVMioE`?#7+IOpw>^|B@cF-3q}GXN3Q>EyN0l=$h?1%!Ue(-3-f(sL*YV{9AN`Y0(HLn_Kq4=A0FNEP-Y2Bnh^comzw0qA`l_W{)7h zc8BXCAd3=i!pp#NfHrW*PoQEH2}hi{q)|Q%sY}QfSP0oATADL^HPssAB@V?6#fZfg zkl#c1&h$i>7bw)8fMv9xzb72!G2471HUVALCx}9s@)NNX&~f_-koY9}Pw4swqyGR0sNstjY0&O zt{8?7#b9_%i&`Rk9dnUh_Lv$QLW3gto^-a;!ZsjIa?FQ%ea2SYd5haX#^$0JXI{(*GscDbYh@?mI?Sj;gH}|?x^1`ln~?T!mlTU-JGUMG0PSK zJ!er@7%wFUArDEmWB8Q0u8w&WOz1Wa57NF$G_Qof^lg*6~D$3D;q*X<& zgBh}l%%Thw!r7mjfp$v??mTOWVbnwi=M(ywM%h=Vms9tRDvI{u_A;YO;zeR69!tPi zafnXHqu$(kcq)ICP_1<_`G=^uyPbhG;YR8UXw+TeuB9jdo7dwPE+!KyA|&^2aG8#H z@s~#KCa6xSWWmI#A@Pa&Lh6YQP*dzYvgHl=6Fo!uPAmP?`@D~%WadMFq4#_vIbibW zrv#DCPat*CW)DrU9F?^Ja3pp^ommd^4|h8zzpq#x;?Xv;v}(4%l;nrcXZA zXiu4E&O|KYJ9%>U_)e7=rk8RKM5WN{XuW(zmhy3eRQ{4jk1@zgx@4t+mg0q^eDTfS z(jD%|l6L8neO6=Rc%rn3~1gKu2D1Xbw-P3q(yqwq&5M{@mM1(z!VK*08sHv*VAmmY?qd%n977pcx!eqrvsmzlB7l7{+R4; z5(GgI1jhqy0v`ex0(4Kj@3&Tj#-Ub}W>ipM8OQaI&o?aJkbDvqb|IQMc(`W~g$A_y z8R~s7%vQ||)zVgX%w<^i-L*3Djy${DS(WSTsvbqNnSbJEA1GT{h zSO6J(PJPOG)>diPvs7hzpFc@y6s9Wq!9~4e&&?wq#-b==s{NfJW_~b?XB@`iHcXNf zJw_Wx`JCQUuai|$9@BN2p$*SgDV|x40}sP!v?Huae!HGl8}8(sl(+~QAXFAH^?lM3@%DxVJe2?E`zA7Qs##C zicUc&D>O`8s6fGj12}A$i0E(ufeREGXhf)h!6B@H0~1PsprQko21^19>;h4tp#cmO zupom509in$zc@%hu_TEh%p!|Il{9%&uDLrkl89(xN3O?^VukQXpFAtGX>bb{M#)o< ztwY?XG0lQV879kOb88r7n<@q|lAOM%D#oCbMY9+L&opQeECdTkEG`hCKn5kT6F5jP z2*Cjb5J2W4C@R)M1s86@Rq#Ot2#ARZ7RvCpn8oKsS(d+H=v1F59hIf_wpqN$QshEt zF-#h2lKTqdY#3!(hPee-o8-5&O6SvzF-{QixQg;-Ntm-IkP#LX0#Gm;mIwr5!Bki% zXe1W5hYAiLFpwedQndn?&*!;C8^`mArxb15)>DTzj?2muIty)S9BX>EAOQgbEXV?) z_9-h<+UmNgspu3ohzboCPL)n=96r)Fj!ok@*%^n{Q#~LXm#> zk(!21lji;=O9|5Lm!%X%ahH}+m|GGBQCGRuO66(0-p6_NYt#0nwp&W3dsb@aPFvDz zt>ev2tlC6XG`rh7L$oc_E!3m-4|`@LG>HgOt2|p44J;%o8VM34V89R$?ltpNJ$)9B z*RMoA4$>QkeRwn8jiX$9bX86ESyjcKw6#4lMLSn53ycTI9Y|J-?zz3p8>&*>Dc6pQ zM3^w+NDLnmUAXFO8t;N^>MDAL67rE=X^=q(*P5&rU-fj4d;>s)E9%re=0**dRhgsU4lRxHf-C zG9^PLt1_!vA75FkLq&#HEfNzQDqd;@RCs^^4;mmah=5Qg5Qbud2n|D#fkXufMqyEb zf(56pGLpR%kx12Et9Ij{V;4gQxQbVO;Z_BZ;dt)u)jNbYj@#0gD*bddjyv6d(Mweo zwXXJ3`WH`{>IMmk1W?g>T2^VAnPfFhYnql`Ned6=>GL#CvrIoE?qM*aOe4j_M8r+R z#KcWpGNL7-CGHFH60`E|$np-e^3XU?@iYTF)!*s*o@mr zzW`$ZCPJb@LScY{LIp#yOj2V!CXMjOTH2O`sb#dWL{VjxYSI$Sg&|`Qp*yv6lh$vT zOg8Dnvz}u5cGJ%7uBz7h+&35XWM!JES@W20o7$%7b>7pAX1HmeGnvK01RFMpP@#Ni z!u9Fm+R-41E z?fUN3Q*?LVeZH^lvoOG-oOcp`U)#aUs}tSoMmNkW`jKfIyC?6A<7sv%(rM|xap0xi ziIls>kVb8_%H5Kp39`8`hG%RQDF*q~6Xc9g$*YpMn$Ut=>XK)=qE}BmXTG{IO?sH6 zrr)?eJi0pQ6i?lk_SxC1t4y|nkf8{oLc;|L)RrPmXT(spbGH?mmYICEQ>G4dR|3Fh zB2)t?>Vl%kVDe3cx2KNk^$Lnjv-_ued@zpdS5KxAVo|A{ZWWbTA>Qy_TN2eFC8|>M zeQF$;L{#UBsMLOG*R$V6L`y`=-Cf&~_dou)yPEW^?*f4Z77JTi#KfaQzw+|DYe$~z zm_$A%kqkeGlhWk*k`N=;Fz%4;T7ia=)r5 zNP^29f>dKg6A@RHMoElGGFhCqXR>lLwMoP_?`vc-6&`=aqc^H7a@ALhY`pPkYqV8c z`}_(+;h9xXAlkByEbDCNA@N^(@^xt*?jHepDr% zHqmKcT6@x-S=p}pquuA$k=AIBbl4i+H07xC1 z0#W++Avy$v3T;geIfdcHf!WA|D~7N8Qbg!NALb6kdeRXr{RYtw-!*Xeo%qbX(f4ui zQMR!w=kagNYjU1_pCH7;Mrx;eFs#lE=${W$iNf<_9e|2~u?nRHoji<^EGxgq*l%{} z;@DVz>zj#E(onxnxIl*wA~X|JdI)pAL2@)(;e{Ja-RD@W8hrlt2s@*I+_7S9k3Bn zSD<~J1fr2f2-l&QGCPK`gX)nshQZA+43i3rQwWy>=*k9W!?znW9fIXb%UNVLI$wbo zCN+?rg9q^cw<&@gppf4(`!>6<667ME89+z_<2esFOEI(?$ok;)yaLwlG`OFln(Dxrz!CP=KF~Y%67!hhh2xDrApj=OV7Or6(kml}cA) zqn5->68_x0i7o4`hue)=m3s$Dl8^Q~SwRsB=*Xj}cF}y<DA_2>JW1-R}}zz<8Td&`<#_lsRJyeNRL!?qf zD>R3xKwk)sX^f)JklImtFVWt30P8rSxs)XV_GS_y)-@FfzB9KbkhsCf(U#-e09pSM zr45>HPX@9v>V(vpirk_0Y+NyDZc__uL1lt51S<&07<4U#uLkGBZ(cP?8MhnrXHa8+ zFdKC^93Z%9ay_%bm>ern3TE;&W+hWVTSB#pp@KC&|^t0>7V7YBJg}DHpEr8Ng8vuCId? zbdkw6c4}#Euiq;&;OgZN8I{74+Haj|y4hJj1=lM80jjY!2y<{JYux<76Pmm0@*i-* zbg`|NgCXBaJzEk~)Wq-ys>h#A19t0emL$JhWsaRtYYmeLsKi>OUmz9`bPiz`G=(M^ z>bhF{U@N8(Q&g>_DzzR=D4|3Pu|qakGfp0~Gc2gVU>r6@T~Wl!p;i~1)2iSoIU4=g zpw>-E{l4=q05M=x>l^!;A6cqZR%+d)w_0zh{oO~@F;$3^Jj+Jdg->>RzG)E*3yVX^ z33CT6ITtzqObl$|P4J6q{XLYOS=Iom!=Zzq3pMoaz ziuAsU+7sy2o$$m(;ubKXW&)t|e@lMqS|*^MS9(}$%mq05!fzrR^_?f;!2D6LWBxs4AgP)1LS#QB zMlXp)_`B6c0*PWEvzT?kQ5RUqU)@S@i~mY;t2f#C)z*nB*hn&86FCjPv~*tXy%=VL zRxQmzpUZ@g953rON38E&=%-D%wFora9f*pL0?I>4s90Ak$D0%AOQ(%DE*g?sLq*M% zvt)7W1}!%@xacHGD~#^f%M2+M9+a|E0>>O|*(L3e4m%2?Bhop8z&$E0EYjKuNp{p8 zy~1~el6x!?2{5UVhi8*ux5Ln&C67%|UHED0zOeRQ_zO!GzRL8dY1k z&9>Dj;eUo3R4V*mUv=5H0KSpcT2vQU{kxNtIBag-RVhj8Pkq}Q$aGs^=Xgc8^7h|C zcTGUWYkfLV4QJ;!E19+%tqM3vdV=6HMtW)empT9w?oML=b68^v{a6ctd}VRDC^7h( zRF@CCl=>%pB;`Lr)U*y^UEgYCI8+6OB=RAsp&HVM0gP*@gNB=eSHanOgmu<%jQCj62hE4DnLkUZ0L5bp1(!b-9 zV&I7?8^DZV&gBO7Jk6TlP~8`S>gB1hjTvf!~FMPOrl zAUFToHn3%qtAc>R>Vj;LTw2%Kq+K7%cSC^U8id`8m_79uBm(WXsIjU3g2dFqOT&z| z$H7-DMkyBR@oUV+rYevyHA42Q*p7~DFVWX)uq{V=h`;Dz5RGWVIk%L1X~_TDClMh& z6HaAmrKn-~25lQG;)q0lwWxYSw0?&6)Xn6D^3=t4a19{Av8Sz@|6>6@mm#qR*#>`< zBHsm8Z1tfgy|ENp$@N~^nQd8~H)#;7h?qq`C=|0$0PFHWrFQ*g)F-l2z&GoV zFs{E~QK&yM3VFs24-p-d_y3}g#tKuOW?p_O-&WhRa0z*^qFl%8$U2St`Qy&wT*VL=*xZyy-yT2d-l^`S>)EI^3R=`1r_4ZiqKcBSO|RS|-7_S?(~H zJ;B3(ZyTdtvX6IU=(`=}tD@t6Y-BdbArRcpX3l0V31}vZa$>nl`fp-HUK1i$*0ofMu!o zPM7yldsxQ%A_}X~YqZh$s^zmg6q5)^FNBu$ z$4OK2|DIqW()4V=@&Ot1T^2K5IX*A_zym?V*UiBKr(%*Hs{LJ^=@6jcu)POed8W!C z@-fF48gK2pLmYz+mQLJ{R%F1-&#aSYJ2PEDt+kJx(}M+;fxM&WZwyXIDQA>&n8xi- z?reJQrQrTa2Ty4j+l&{ZrUsDYA7Hc<%PEXin!~=WDvmhYH_L$3(v}7wcW(-eFM|ve zWn~}E9}r8UY;CTiB1n!A?yavDMOwsfVyxCfA)8@89$$gjZ;{MUP%(KEp&}gTYN_qQ zAa*7V-T}(zhMFq0Cp~yx4YNL=7t0=)aLD^u))V-8pYKH|m$5SWcXqWmW0Ruh?P&@2 z0l5u_Q33#miw*7BA_Z_)eOF)Uo*l|O@3l=_?l}MMR!MK^T738P47e12pxg8EZgu=jJZmGLBbT&$@ECelfa#)KbBJ#>s+f-8ixkW(?Aon{EsaYK-8U6$S^xfRd28B)*~7 z-yGT)hTp8Y2j=$b9cOV(Wx3NK3#M=NKN2k_2Ml1F!5{*+C48>SSn&ib^AULTs~$H_ zi|YZ$J$!*I2+0o!jL?w@Tt*G0vbYm#RZV@X!{lSfd8+to_kXuNA~&VPr~G6*Lz_}H z??O1X`}<7@dfaP>FXI4bZCkkJS$HB@YKC7*Ll^{3# zgcym6185Q?p4vVTiIONAk%}@@!PYDq)46Cx;5~gf0ViphIg0xfxC2x<+*7vvg&3Uk z5!egDg{cY0xSWQg{foNpXLKDgA$nE5(=;$FE*IJ?8X zKg0x%NIK{>eijI(Kr^tTnF|*^piRf*Sct`yfbJe%Z#Yf6jw}7;su;&XO(g)x*4VaD z%b4i`^r8T}WBmq)5Z#8Y$XYZ~9@$jG8dT+llU3Z8k zyV)Ih1`ea9MP_1Cj_1qZqhvnXgj~0bXBE>F0$DN}JA!(#m>m0ia8|o0zZIIyIc~*5 zMQ3syOF2oy^6dd~4O7SCA-d6`7C*^9GPkqq=b&YNS4QOHBS2w7n?aQC$5&}Pb63?u z#EIfs1ei@9VfOa4nPJi`;KQby@iEpV;iswJh0yB%2@fpRuZ>#7`;-?=f8Ez!@wT8g zI+2i&Z)#;0r{HvLBqbojr$wV8oAeBJv@8NZWsvc#ddNyE9;wn%hi#C}Da?gWX?cgu z#49{%*%(tFK$1>o=6X?4FJ3eo=)o60&5&-BwlSFzZ_IKrc8RV23{3~}&iRfN(_7r; z(+PMe!|UoI3G^cY>B~jJNccFEWAEG1M^QP54M|BX2^htI;t#h>tOai{ohqqh#K?8b zc!5;;yfIO3*q$Vy)dVa~$Aj@fW0+Nv5jo7XiGT67%B{*(wfBH=@Q2&S-HVeyuT=CX zXVPfF49o~a@tqd1RIX&r(>FC{yNnr^R^-mO zEgg~V|J7TAIyCRox&Y;Pgf)_;CAFk61S4Tur{qHErxBM8i^ z##?Ezf(jh*aD9{fGuxl*a2r(wZsv@mC&iXE*wQ)Sjsl^P@4=4_+t3nW?eMvLz%87? z>8a@$x27cL#2k?9QB#zR5DIKYdrq?4Nt5C7__wqI@9fu6XRA_Bb;HAFlh;Rc{#PR4 zGg=e>KWAZ)fB02SSiokb5a*$Qj5#(1kVjynq8V#f?;d>Y;5HQ19VTN=T_>9Y%7d+V z3gOapA<*iuLMCUW>Ztpy0hT-$%jyPHHayZ9YM;ntWt*swQxZOwGV+nF{37RO;fcxi z`IWcp<*)oe0Q20TEQNXxF9vinGdWYo=rGaJ);5c=3sO77-+NcUQ7SB12D%Bq6B9OFE z{Mf-6Wyi(={G`qin3RAe5vj(mzC(!}GeVlHuXB0YL!qPxio23G7hJwCJ^bp zj6Gf&MKU}35Uyu+@h(oWx>K1wXB#xdXywNnpmw-$UtMCEHEdhC6r!?r9{i!1x4HH0 z=L5!mO9XhLKTS3PUC5_uvPpBVzE_tuV!I@lv3V25t)dbM&}WFKFMU;a07+dcNq+#A zMKVQT_sDfSq&o^66v%E8Yx+br5l=+e4aESor8T~!gJ9EDyY?@PifWc<)2(DbA; zJg|+NTx3Q8++l}tK?Q?JA{5!nCdL9OB*tT7MqC4G?2v0;uzYj~nzY)n^@YTT-G17@Du|Isn|jiX=vc z`GJ$R&v9#i!THFBl8h^ok|tiy0IIi`)vR=5n!3iCNt)t}<Qv zsG#S@-E-$a*MiWL7A`t=z0`%{AX^EzQki7&m5Wk_&rA|fzot*oaMa6P<{jwhytXCE zW!u)%v@dfbFp$8l^Ze~On84-I%d#P2f$(6Rl?JbHC~EdmG7yc|SO&5iXxAxcqQ(r< zu6;HdgJP(A5+s)RyK>ab1fQ!)6HbpAuJ)BA>}xgN?Pv?zY$;@mfak=hXU%g)B`y@f z3cq(U(57Aud|1z(#3O-XRiXr_M-VSngy*5}{@@Losw-D9B*FlvC7Fvf-As0&x%!<- zsYJXgZA_uqKN-}H(07t+)ilY~>bx~$jH`k;+We-!9myF2#6*Z~0Aqo&8a(81l{y=K zbyM3(w*pO-m{$@CY2CnD(fPZ!0e48kyE(U%x*GOnL*Sq&0(Vwb8CAU(4)|6T(u^G- z(tQgO_)3$lEMW78Q`SrUHHEQ^76%oBB;c*5V;Ws+v6RLDtDDjzZC)(;ewQoWB#>HO z#9=g=K6E7KRq&FC<%?`gAe$<*fkkdd5@$mX%@lX^YSbNrNeR}^&D{?ru8uifsVccJ zgVlNMXOH(NV(Pi9F291q1z&=`uS#EIz+t3byxA%-jw|`%RwJT@UzSxhc-5JPoVbd0 zf;fz7&6KfqU$c5H)i9SBYp)S-&nqS0H7WC{TZi(!-F&In1pzsEv4_k+se@zk*+7!T zjymh!pL#^S@kWnByfIJL6Hjs&;xVy1^F2dXs=ncs|Fv=ZuC+-510}2g1Lx!P7L_!b zhW8t=VmJwLzp*%_fTl9tR>`@Z_UCH}sbpwRzK+;ez#USFuJ+Q9_Ra0Mhlr7~7-4Eq z#-?1)rp^tyF({2v=8Z3J>g0!m?A}UO`Y=~{O1R1P*?lmNE8t|^#0>ej--de>*Vew2 zS|?1zO8cMKJ8HFWQ1MN+J}o*hOFg*E z*o~#ttNg?(AOS~ zkZ1gKBR&!N3h2jW*;sh5V42IU@)&3TOprD6QO zD4k6&ojeCX@CD{SgQ0oQ!xUVe92Hpsl88iu{@i=2T#bkc^rjk!N@?Ikw(A3uM&rhF zD`_bS+&rDdq1Tjbh7RqJV{fPrqz*=_tK^Mb$$U2w@JtXJ(I|b31Yds7IuIBM{AJOL z#Une-ECt@o=DMCNOo~pfP$0(ns zl@)co)?7V>OM$F&?zhuZfnAU$-yyl6TAGA5A@qAmn;KBOE*na@PIw}kZ zA5bhnkz{Llspb4#tw3KR|m1$XCU=kY@h_LJX^~&fgp;M ztt-!Y01l_8L<%k5J_O;YD8OjHG_4Uh&=+B#LQ@q%xKAnFHs33!;5NL%e}}GBsptT` zd!D;glG>(W!La;CHrlLX=`4dEu*z9>=c?9p@RuL)13~Wah<#~uZ-&VxedU`6>;Nj< zA2Z#8eyLI7-zg3+^2^mGrSa>w68`;o+`X&&d;8**%*U>RYOgP>QQ3*Go^)s1Z(F(8 zxHh1!DoOj3xcX&X>6^+e3_`?WT912UOop1ZcNQg-i;*}FtRHL_HN|8XNmfQ&cN}98+vVNwxS48MTsxWkf%BmGm*82 z)L=-!oeo8@u&EV2N=;=h=Wa&U1KW|#$ZCQ!(|A@IN)KpON!N}Q9?t+KTlxza*cPJ3 z%eHBUjLpFfGKZ+L6;tLZ0X`HO0j~rHV4qh0sI5vntg|kq!JyLm3mo}xt@o*A?OF?f z;dR%i6ayr$4*gyo;mJjF&2?w_^%r>ytBRu|q>W}YAb~>FL6@e1c^tMQ1 zbCrZY!JtcJu+xx)Vm4w6g$}t|Z^N_{mi-UvfiFeRpO3T%Rx0tW?*B_~_JAJfMZVAA z)_EZztvRN?t6O^kb}adUdg$Q&7VJY^g;u7M^#MJL8|2P2u~67phbodhzcccqF`taN zq*U4|08l`Q7X-qX0T)-Rzi{cKD%Tsr;C%S97 zuuGlf>oBIe*B&i!mEqJq_GL1m`qYntc4ZW6dv6KNm<<)40x$}hlSmDYu&BhOhUQcL zLAgL9V1;_GvlZ&E+C#ToX&h0(51`;=z`6%h&Vogi}G+O^l$(gMYsqYU`+%YUr9221PdAlL=%$B?oB&o zprBWN^W9$*hyZ>7gaDTck5bPd%TR;_Vv2zT2n-2RA|6)wipzIwwaC&?iAX4Bhk5Wu zM>nDy5siqXMiE`d$~t!ImA1Omh%ZRvlsvqBSt9gAg;TD_fO9 zC9aYwQmn=t*&!yxsrg_{nAxM~itJ`ZG~U}(3EDm>7n9~WKM0@mp7ash4bLPvC&?iv zv!VLv#{KdjXlVLWR16|`-0ZdTep;wBND>V%k7c|^J_lyV;Tuk;=$Z!(Y$`LHl6jMz zjwv!_hqYyKI9}FVS3JUTZrdVgu;=r>GfwZ+osVP98uZ)cqJ4A~O{dNDSy)1v!Ax4O zn3w(@RgS4K#N%9zbDR^S2`FD(cXvjh&aBRktnOAfR(23_9cQFFjJzw-SVogF+EHD# zN;vA}8rKaGb(jJSwN_OcP%2WXgbu<7K}J%olT}uK9@)u8iSDk=MAQ=n3JQMx)&Ls( zlCaqh62=ufR(K-3BU$q7&yt)ip$W(A!h(eY3k6K%g*Le*>w9di=O9W5_6IS&8R2nV z8ehvAkIW#&((wyQf3cL}ihrad#XeHZipAWKTG}0NF053Ctu^0f0 zAG>w7tpM1eHUY3!+GW-41~g4m50F2d4*(0J9bnlYLD};8fUuw}#PJLUAjR!U5~QpI zXY>1yDNGlSY5WJ?YO$3n3I&=Q{h6-<+N;GIN3zk+wsXu<}5frrNMx=Z|&fV#qaTl*X815}v|QcQsy z70Ufi#iJUf9G%sKii(tiOg@#5b5#&cEDUB$a2_V}d1~v(nrAYHX#KiLx}7c(_XZcJ z)nf~st58^cO3#OqVjN8!LE(mSl9qfb*L9h-aMNC7NVvufQt(yoa#E%p6>`_A#Y-x2 z;-R-GN1sKi^x(_*06*SuI(&=u<#rCUiS_)!bHajNj}7v-`ta=-2+n%6bASBbS@)i` zk7cg@`rhx&*_q;}IXx|3Z-0C^T~EDG1KMcHoG+qwOrC>wEVreJdC`u!I<(_3Vo~I~ zSAElN(#zWCXI1vUKJ~x|7Zbz6(gp}mcS6yV6ckDkSleJxhG6GJ z#mrp4)k;?u%J$*9Rg`d82I{CqK)A=mPI*{Y_D3-6m{BS+WaUr4^w3>}7ba`ppPjK=W87+yJ&SVhl~3&0~S(gE{I#kxI(1dEf!Tdo$M*{BaVP?5qi zK$!y?Eg30;SZ*A}(B~OvP1&CFK=DRCre{50drdyCnvpwHs}+41sB2r7>_>Wrz**j6 zcskDzI-6EZvkQ2~PE#UOl~>RMDWos95=l z_BuMpK=|i9@NpE!DH&IopPY8UKpa3IS>p&47AFj@wL~mkLC1g2zXOtRY=Y_ZSIemj zk*)rjTpGBuBjj3Ax?T!8sJR`-@~deZQQniT04VGhd~)B1m3$~i7AP6>=o!GISfsm1 zn{Ez}=!xazln7*bbJ@2a$Mo$PPNuvMJggyjPi*?8iJfnMxmpRs#Rkrt>ulrfC7}DI>Q&x>%{l z0ga|UV?$OoAlffRO9SZi~rm;0Rpi6>1yf)!ed*oi_72II+JlP5lM=JgF-4tDl>%&Mag}kPDAtRV1nleC6ulXK{RD{ zI3tD0^=H}D-}<}+ zi1#zCorxary@?N1J2k=6rnQyw6mxlz6T5hv`7sT`ofsfOsY$79OBIEY*q+cb41W}4 z#NvYzo%^N|%Zqir2Z976Y#5~mC*?t>s4AOHyJZ?i6F=1B=qY--@TAjLllqknP>1um zhQ}R!EXV}J;^09c)2wMjJ`~CMIIxHS4W25;#35SiKBa@q;&7w!j2v2vhwE3=mzF9i z4-m;6k>$WswyD$BnW~(`QhV*-=lK3WW1u&NeT5497P4ozLN-V>mPR=PN(Z~=2O%7|&`CTSaOGMA4;ajMw1H4=?9H)_)TqmWKdv;9*Eg09lb$C z0}FzX(K&$zADymgL2B~DMt``3E+^p#v7f*|ff(ieg7)FjU+yGcEiGy23Xs^$c~aT* zDmoWAz84hY$fg+(;Wlj6h$t~HxyW7CaC}1o&q-ckPQepu_gM|1{Dg-~OI+80Htz10iIB&Yj=5*y z-qR8Vu)X}XIGIkKHSB9plwVz{2+cJ^1voJa+lkI!S!~U?xigWwGj%cBJ8`gHCN=(71uve6n>y|J1+A5o;^Hda}}c3PMk41>N5j%9fGLl)t+gZSOUe16g;y&X=xKmFfgH2a5>ZZ?qmMxgbpJxoD$;x&h~nDf zrW|pi4%q0_I5XrZh`qX&G7{0Wtz!bgf+W7BS_U?aVgDKJoVs2JYHks?rZ7lKp4Ob0 zPLZda%da0Op?qcy_9GJqN<%Vb+^8(y(J!vVf;bgI6sQ2dlqM_!LYxMm=MbQz`C85l zLWu1r_@jd8?TD;VIN~_Jr;m`O2MJvWhG4=QfqJ7umP(6JnkW6V@Kp)dM-TMS5PoEQ za({4$7auwdqAX5rIK;9H!NeTWp+j-LgAT#+)J=eq@TF1T^Fb3SGr_1V-`z8);Q@jM za=dK+15pAvz&w`NfotZ6>s7%iFGD@q)-{4#aR`y22G}qUIg|yWkza^o`ezm^5#orp z3`t4Tud^t6)vJh5qkMLR2x(dRX*Hg30)txB-#I z^+6&vp$#)&&>0&2)B08@3WyXILYa0ROwyS!^(hO2T=t^sqKF|IB*lD`&GtHgoTSzO2W zLf=P3qS9hY6ViH8%0GtC^!-Lkz!GHR8JToW-Hll6%Lyu3VN*hp>#M~vX28`Ugs@AgPaQwfrE!6`5(v}JHD&y`pt*6{TYtiB$su^>ak z+oO}F8J6fLk;~ykAqj^@qEM)A@AiTo2OuC1VHkcq&4;%aeaF1a?~!U+^A$Sd6;8g) z)9}OFv-h50o=6xr3=Fd0tK0htCV75)=Qp=E9Nr#pPhJ!>)xAC5o;}_kZ*S2WlVW

IiavP6dN`10zzLCGhNNM?h86sFb3aq*Tu^FsXT;&_O>cZDY|S z28NQul+M$grw`mP4s2$ibnmPnLp7-k*9{7lD7yrG3oHmHaT_+&f)`!zT~67`RRt4HX3bK?-)CNTQmQI(Fx1?78}c zVL$R=YUSzasarO@@e}Bw84Taf@H4R|BhyWaz#N+qULwLg43j3$(0IU34oHL=ch`oJ zpC;&F!PmqN?b|l$a?B+j3bkQfEh}!Q_3*xUW=UU=)NNfr2a41zc}#^?9!jD+6rHdj zF)6xm58r9q;nY-QEFwrtcQ}lql#@F%Btaheylp(RES!3hj2?|~!qXIHl70b(;VFJ6 z$aXhKIZh{Vgb*?WNfBiaOaqUgy=PVn~m2DLN5H*rrVU3d#a(fPtGzz|%-$o*VoUW+4s~=CnP^ zuqjZ0NGA=PQo;ie>8_Y4_l->*hbF+9!eC<2xAS&7Rd_f?Jb6u{13Z1p(G%XJO=u+* z^a{3(8qMWEi{kjNk%rzCk=J!$O@hN&?A`E+kWlE;Bg(i_z*Y=>ANei5pVKSA+7vdg z(V&5ng(WCEq%6Rcu?9-1$Z)Wv=_V`blnXFIAS%*rgaSE$oiw3`+D9g7zyQ(SpN_c0+60IAm7QxEi;9Fj5XQHyw= zWXVBXY2MA1cn@j*1@4h-o6mLXDhiczdq1aoAFqWL49@1|3jdzdo$Sjp&QDbg1xJIo z*DB~7o@J!|_qHB?Bhaw;|7GzD&^{zYo#N94#ztNItjHu*b|a$pzf z(kB8>u`w;AYWMnA8KilFv41^_9xMsq)ERR$WhiU^LZ+Kvc>9-UQya{dRLD64;{Fxb zRfR%gYxl@8UI_E8b*b)OK8uJB9&w5Xd5`q|)wr0v6v*z22l+Sm{i{z<&cPP}{{`ZZ z6}|42M1gdY!pQP3>HBnLlK&FpefVre%YQYYqnosGNGn*FjFn(VzxGM`uXC*)(2j@K zh?3)Vh+58_D9JlqkbG^rwOmT>%U=l5-IX3M;CP*_4T<}&fMMiyKnnD?=Hp4;e>IWx z$E~^eOacFkm>I919WmO83He_N;ruUnLpKB~F&ZKF8nEv#+2NC^dw&}8@X@@y-!kJG z{;%?@yr+ggYU_XXL@gFuHFAX-weNqyqPCNV7eB60RQFNq27n+cb$F!#U}1Uy*n#v1 zDgeuy0Rp7$0E(KZ5*%O+fnWptOtPtI0sX!Ny*QUUljN4%S`O9zRRJCWCIJ`$_S=Q< z`|5)%YHYRK+d>?b6;X+Xszp@NvKgeBqSkF9z(7yclAbO_b(OL9Jynb6xwFp6l~*eV zcDmBJ>|A4KYQ@^A{#f?G=N`hZtB{mgZHB1V9*}mC3Zs=!BIHK42~lYoN!3P7LYNG% zz0Ka{L)Z;^72-Au;hz?wE<{6oz0KZch$mP8z-{(6dz-yY4o+^0c6L%^^)@5e?8&Zu zZ_}l5i1u}xHha4EHgBr8DOCMs9NycU-{z@oTeaES94qxzd_P9AaX%J_brTtg3;hFFNhjyG6gv{s03k^gcxE z`SjTSsIO5^w%}I@NXt(n|Cx%)Bc1ZT%%;tT=sZNt&*#=xb?4Z*@i~O_=tm7vw6Qa? z8$nHIT6(dqh_82OM*VJqH?~ld&7Ssl@^!qyN(9-~+K& z(L_s~wA5#;KJ?HbWpm0#z0H%>2T59Q)22-`qW^PVOiH(Tn|*JK^)`E&%dqs@?04lIRlag5o-fh-Qf1R1>Z@taBO$Y?2UWIkOlCf4yLV(E&v@O}X_p6X>T9U|_~r15O{oZlT+3c7ISqH;}u& zzs=JOTd(S{MPoCGL+BH=2m~k?8WBWdX`IDCSg5iC6Tkq0;2?2eI1UHHpfDf|2!z34 zKp+eVgu#GNAQT1!LL!+kP{efs3Rv%LMFrStCYGki#mm0$+#WqEG4+KELle| zU0{5P6;OCL;v`E&KyrI@!qw)A6!5lesoSh%S?I;5-hV# z!N?((L+fH%MHg4MNdZLdMfepw2O7bCQN@0q}KRK zhg`k=`DV;DQx<7s!5Xdj72UCe^OtZU$#6D5hu6CATYMpD8omFrIK&}Vk4L@4dA{_P zB=bDx8eyVpFW}nc_3~%j^o$B`0lEs3N(+pb6EzraAiR#e^YplpQ9$+MDph`Nz zyc%gbhhF3KHrJFG*|q?5{v-`qxneU;fqjWkS;SEbD7*Zs)JNa zTXkmlAWNnCg)l;o4H8QOd26z_ylKvQeAFu54=AhvRbRHbU zeoW>*SA;+!)4;&Tcm*Md6gzLKK@e!7$COrPE(QyX$28Kx$fH3tNS620Hga*SOyB@i zK&!vt%VXIZp;)H?%d`A0x&_R%)0=M#30pQD@#P5I7g@}N9t(4J47rxiQpt1GU0jRJ z!XP;(M~~rZkov8SKzHnTiN&I6cqcuNw@ zW1yabu`?Ur8D}1&HyZu2Y970-hW(@)vA~Wlb#f%U;LUgxwzn&Ad)4qR4-(U{`F_!> zR;oRZ;Y0Kg;C(Wzklj+t3%_a{yJmVd-g;t0@l^Pk<`qxhaEpw@YG9nVh>X%2m5`t{ ztidH1-w&o`&cy{rp$#=q;_Wy#^|t7F)tucVsW8qw&+aFV{%BAsk9lQ{k1zpGK(*vq z#_|eGdU7a~_m6dX42#hp^O)$NiTPJoxOn%ddX>L5ZRsmWk$bo1fEdNrgowwYc-}c> z;;{<1aSS@5zZH+I_?hAUrffdFfNwJ|9+TXrzmnYP2+CdxeNbt?bPs5+E4aiSvdhlb z0J;V+VX>lx?g((l)Q!!{syggF?wAl={=gD?Z|>NIul&W<)!9I&vIkNoz^v2r-g4Bo zY!7TlLuZhg$w(tmSMSo5nXnUd8>-47fAf@~nn2}PskKb2OCmL1BaUAps3OqJ>6n{2 zZbbp2a9wk-eZe*nOi0T(sMewcnP5{z9s4GQ(s@DZm^GWRP!{iym1#y8%et{TRz%|_ z7swnt>Bb>MzWDFGF*Jpe?!3I#jq@F~+w)zM&kCYuS9qzm^}fI&DB1Q9#+J+#P_<3~p0Nrp;-A$BYG#GR#Cmepc-+k}dZyy4 zswdlM)aA(I!EDl2Ml+oy<{Z1QA;ybh{K2HdG2k5|2q!=^qIht#Y%@#1Kme6OCW@%x z#E0i_-w3J+_;1(SR<5zQ9|+I^W8Spaq=06Lu&yy-3bfidJ&H)VFD|C97(2|i=l|dJ zQV>c1%EE!)E!sbNRaI61rGWjTji!-*9eceu{NemPapDI^ui1G8fvCy z%%#o9eO1?t?PqvY6%=YP%2`Bpz3+ataszusF2iO+{AjRMC*2C1EVS(mO!AMn4t%2` z%i!9~4jPd3 zpnEEil|>ks7_d?V@y>)XgGJ@xBJ1P=Aj!)BV*(G#3xqMnL9>?fAf!ok9s6-OV{*^> z<1A&sK-}Q$ZhCDftoWXyDyc75_A4^tSOMDGv=jfGYkI+fjLnesHWHz;UQ8%mrPC5I z2KHj8jgqvp5!~nySd5l{r_pxRz1TaiCCILXoVFOM$pp#K!tP>nbL9BC>Ij#nYZb-V zI9u(zn1Ks{1~0Z7-Y3y0BZdnvwp8X|dz(E(#fwSPNP~*jA%SoSr@9g-OOw!Guu(=U z)sr_E3_M^mTiw-@5^K=4L|^UcCo6xPkV?=8h;wCc*BM_`__||R7yOfvtQNy^Y|TI; zkj`3+F3y%Bs>fPFOT|~S7)Clt1+JQvCGuL?5=|E~tX&2mB(PMkEL}#IcIa>moSntg zEiA`+AKmF$?C+n=FqQBNUZiD#JtsyETU;dOmqxgJr>0Z^n~{O3#)%R>XrFO-Z^OdO z(I@bzpf42^?4n)zDbcck7;f|4697~t0Q4X~2s5W1EC#L@YorYlLAp!>z#q$6Bc9i< zoP_jS+#U>zh0d`%EQZx`0^as_Q(`gWaA*SDKE0hdqd}<%;GYRh^KNZ!EVkV?Hd>0x zcHz|1;97F0^1V#*+r4W&|xg78LT2_XaxOtNYAaJl|Kzr!$LXjZ*5PFMM*dm?hPy&l>DX zaw5VoJCAfXlrZajbi|EanaO(pEcSVWnH&th{fp&5i=jlfi5gn$KVav#VvLh7TXnQA!ElRY#r2%7ZX{?Dto#kezw_dS7c4@QVraZvj8#Q z!z87zLY>FI)?(5azCN$geoU8g{LE`HkG0ys7CRR1*Xh}}o@}vx+;?P)HDoZ4BF`3M zH9ue8)kGzkA<;P)`qeepDYv{8ygI%Bs>4%xVKGlo-xt_g>G$h~43^)FQ!I{@qJ}cx zhcu?AAOY+>Yzz25n{{EPDYe_BE{fMAMS&MO&Q&qYiVP<<)vNb-?WZG^4B@S70peA$ zZf`sy;H0xvU5e({t#gW*HnY=d(r{{@`JV{*Qt8GW*HWwX#_iC(5^V1c3-)DdRKZto zNpE1AG_05yEQKucT630?qNid_Jfsj%e+SHJh-LVHV2ahEeCqAKxR)$`v4q>6OKqVa zzW7u%yhDnPT&rN&r_^VzNwt&E-8Lb_+GqU-=uSxbx2D)y`SKMy(9jiE4N~^H_0a^N zqgHp%;OeA(@_<5AA_d71pfIXca?px%jvc+}o1OO*lkLHkFv1=yQ{Wb{?xz?kiLIe% zU;{p!5Vw|M*bTPs-y;T%UaO*u#q-cXC{5P&_%Fq3Q@MB=MEY->m&J4x`$}*E#2oRY z^C-5Nz1AZp0UU(=N3kj&Ti-DMKt{;m5~SD?F!ZhU5{yHN36r_eQa+4Hq!`TQe|Fey zp&&+5OlUk;#!&uVDk=7YfVFb3n|gCe2scU-<3M(Yc`NgjIXgv8F?ayET2fw-N~FR)#8b@q4lD4NZ)acQii`MzEd)D)PeipbSRM6dRzM&U<|(4lcVFpxxD&NbYwx8`ni>pCO+dk2lnBX19qwgiE!zQ=yuj0UY zK+Zu$3>?|8;P6UQXujlDp4JZ?(k}upivh6FL^@6G#vD z@j%GMBqqV`<{%mrp_xeh{hFX+_^u3|PpH^a6cz9Rg$tf1Du(2lRiI^xZN{g`wnyq|yj5K& zE$>XP!8Wr0yJ#wBLW&)%*7RK{!~w@9a;VH|k;(>KEjoB(e36#1#f@@O+O?Yaln8&O zJ%9r`)JZXn;Ml2|@&}k@xtI};P9X9r#r9KYGuH2o84)eCbi2d0;}yFgV9Ko0RtGa; z5bzbZAz}@PrS|f;zq8lkW)#Dq$^il{`{l`X-=9JRm{5Hq`O_i+hIzg<&>CQt*ffM< zJAxq;lZquKkI|cQ)r4Z@t2yDoy;TmHu?t+%7Gx74Da!I_#aF@;4X+d>QT$>w6}(_K z*pEvv6f3cYd=>T_iXk@KJQRz;VFw&h%gDjz#U z2|H_a73H8uB+Cq=SY;glBV&Y3TzsuFGO2Dn2jFGxFl>gSn4M=5j$$;cbQJquJkK{@ zHfb@4X!cPo5(l`4Hp$yL2Vvaym)V7VrYkc}Z+epYP({}u*bo!sqLP3Xg8cs`NwJ3i zwSDXBzk6d<69A5{r)dWg{b7c%8aR;(v@WIcu=*-@v(ym+fuVoEk1&LE2{N%nHb}>3 zFS-=$frQ0ot(habo5w}ELHde`7q^2|#(+-ggb?^4*HG} ztFZ7*Cncj9cx-j+-YS@?^QBZ^0GcbJj94+qM}F8(w$Bd}wc8jGcxeK&`eAj?SN$;6 zY-qUOewcz+%Av@Q7sM0(1GllgMtiWS8Ge512ox^`#|E8sh?#`B0^hf9l9Kz1<0tQZGfa)z&2hUsK(A=U>zWg&w=Oy zDCUSUCJVY;mokUZRx(!jVpP7sPKQMM8P9`Ab=%Yl-oEf>umz@OT&}{;kvkmV7XXj; zq&@@KSt9cqtXk5g@~SNw(hiuPwo=PAP}LZgx9s-dg>9)E#8LekU|54)EMRkCLiqBS zgF~2}@Bs>DXCBGfE4f9|{G(O0FsL=Qn7?3Hq8=NAVNLP#!~WL0so7`(Y8>IK55o?)a*s>Q7cexA28wXWg zO~Cyzto|d)$1oAj4Kge@3xd4JFttxvDuo!fEh(PTM$6V0Kf6P6_7dnic$bVj{k<;X ze@r|tOt9}2msq_p_9^~JbFoW2yG~q!WND`?^M&n>kkEOrpH9iViR{6BB4_ur$7wF_ zeqr^F*|kwVH!3EL@f7WwmrGYV*$#<1z7eNMD#R znHW*lGJp?=p$gf`iah={K+K8@(~$m=Q*b#XRj$J(|H}Xj+c%?V9$+iP8~*=Wq|D>9 z7cGX$m7>}z<@In61jsiGwv1no>26kg26t6)_a*M{y{RQ;PrAZl~EX zvNPu2m%{tyngMOV+S&o+g*u)(X!Je6Rw+>WC+)@@di`jGrcyWi+^&sB6oDNo$}?5S z4{9g%0a7V6kijrmWbM5N!w9U@c(Yi>Z^;S2k&J8Pkr zVwjco^Z2Wp*6vGnM$#8ncmBKFW~ju-q3R6|-DveuG-un=3~(Upte#nPUL@nS;s;}$ zC1tBHwy5ew%gue_%CDUYb)M_#p9_YUUHoD-j}3+iE9qX!Se)u$7!puWgSi-e`pBXj zIyMt(_u@vvPZW;Wgw;p{oHRy-T0D~S1$>dhl`{vQYw1DLe} zP7-IcNIrtY5(IuoEeKz+x37kWt4W$T9EF7xz|?XxH3RxFE1~?8YeK-NmAy_Zbv0w8Ovw)Fr&oyV3;Ol416C*ncv_6Fl-y!b*@x<>H2K5OdD}p&ph-* z!)K1Xz&%%tc#D8}`RHU7@mYwGgp^ghpa-yw^f}n9W8~1~RJZP`%4mpXZqN^7gcP|AKxXwOJx*`(y z?U(2T6lcDzqaHznufDAixr`}N$^2<^b%?c4$mWE{JhPYT)TIf0w-qfQ*ng%FOn!97 z%e7+ryYA%87>M#n$Z9~q(z{XaY5?j_e@ZeVeZa+yGOtx^Bl+BrJP}abJ_FszM$c8D z7@8@bY15Xo&pTnm144@puc#?Q6PCg8gq0LWpf}DE7zoI7qlz621Qkjas?iYu;>PxO z0u@WdS;0}%6n?7EN*->u#*#+TQq1A*1aV$;@2Orl;WH`Bm*TV!YT@`;7&QorU>)klAJw* zZn1q3h8KJ-Fd6(%=M97~Va}R-=M4%|RY+FsBpv>kH<$3}Y!CptHE? z&+gv|mv+GFn#(;(%OyPeB`<-2MW#1%C#wO`Ej85ZwdAfHEGqig{L^|$IE4q^MBL&) zMOGoX;}Te(cU2jvv1n5K!5x0fg;&{V&qG14Mkd5PJ;IS$rPfqsEwo{%?GX+J#>1C6 zM@k3p$2f<*wqZ{sjC@N^ZAK2pF5#(`Z>HP`0b09+8Ylp98!;xq8STa;t80`*C5To$ zJ9;FK4(m(l-9<21fH~;_tFP!xOg#noXi(*k^0AWa6V8Mcf;dn3-2nff5LNf8N zkSGoc3M2``;{u6CL_+`+*ddVw2#ARa5+o!HkO{|xV?sjWLBRq9LxToH0s+zBAc7;DEugzyJeWk4R~_vrXN7Dj^eDSy@?` zm6euwm3Wm`G*?A75fej8-7IRB)w>*(Lct=r$uP>Z>auQJ3Km7g#1JH6!offwiI7B0 zL`W2nL`WnU4hW>dU?6#rSY$w<@qm@AXaE3!A|^a{n;bUT9dBw72{Q}P5L+sn@R&!1 z;-fTtIzuBl?%_MRqFYqu^O}UP8vIBjVZO$rr*3a zWTQ~f`NZ2qH#3{g?Ec$y%QKboZ8~O6bIb}46$)i*v4Njf4~eR(s)h?@cD-J!Wg&7) z-PYBWl~r%M-TrWp6)oLC8^R$niTrQ!Q=vF>IhX0uN|$u`OS+_1A=q@!%&~-L_KEpE zpYF58DipF!7Wa0t3nm@5*S59O73tDUH(&B|*4koM>2k|nSKH@lof(a{nAM$bdg^`Z zkEeOk?MYo;zt_Cxb9&9|==F3tDtO@On%>pUiD`3m=u{|)PITI>wA@BD z7Urj0wN-_3b#toMpna-P&~;;l(pT2hDiot8RTWz2r%-Y_Kf|d~bmr-0Vb3P`)V7&3 z*J%{W-JEq=XFi;1rb59qn<)=^UMlGVzQuN*8Tdh_1aFigh zCL@8SQCUmD09kPGz=4Eeu>s?uF0DSg#d>8`F3iz_FP%d1(Up;%vmPx)wN~0-&uX<& zC}=BD?@XZ-^@mr>RJAIp6pE{?)}?u6t-7*OS(OP-stFBBm!h&-1(*wWvyU z)+(Yp>yxat3{Ki4E?7CinscGIPOvf{D5q;hw`eI51p)uOnV#`@Oju|Q*Iw)YwioYT z{l$F!u`mf;wqec1n(>Er%a9REiC!=*)6SW-Z8Nfw$z9S`zS+&N*_+ulk8QU3uKbp+ zZe!71=lkx;^Ri2qozZ1SyAzUqg+IJ2a$c90d^bneA|LsD_JTzIWm=N2crNRd6-wUH zDpQThRYHqSdf1w_VKG3ur}#onj4JD%+S<&6IsubiuMhxgkfxrc7xy8`ey2 z8JWp^LaLgoIT{{!sv#SJREKPB7>|wDu|oOPf($53L?n*~2M-Ah5(tO|g$g1pB8>-y zFhCfL3lhizgT#Ww0`hQF5NSLj7Aia_KpGiDAd%Ij zmNssCL}Vk6+akB6oSKfuUp5VIvPKJCmpnXMuJ8(vh*x+vOspwfbYKA@LZKmHVG)r; z8Vv_S!oeYNJQ_)a%$PXi=42OBVrEW0Gtr4iV3Iaa;3 z+|DqsPzn_a;obG~y2GKRY(-byxUV9&+&TqAmxq;hbw`(EiIr2NO#D)bcnv-5;Sdtx z8LPv$7938F-{qaM|@=wr&##% zk~eyg@@)r=Muk#|^Bcr@6X#CTZP4_m>6llq!K-IpeFn>Zq(P@IEp0R`4kBIBm6OhV zCY#Z+#x9pOE?e0Y%KNe*p&|CrC^R4*T9oFTS4_&_Wcz*Mt}R(@W1X=2+owgSP|&7UwDP4w zvAbKoJHiWhYi%#W&;9ixO#2X8KgH|o@Q1&8>xcEjyH0q8a@T+K-{=uYy>!#_9`vAd zKJ-C9(G0Yt3Q;5xwS_bDD{-s8fh}p^GLUKtce+^aubL6b_09V*z;-XDJP�L|A0RzI2 zKr|o-LxDgT5Cj7Pfj}4x28IFwU>F3$C<-Kj3}=CR003hF%wY--+?-+NC%ou1=5;I) zHk1Pcs9E-(BndjQw_{#XR;UJaDkH2V{qJy>T9hne%WIQ&abTYM6B5IHQQ)D`Gp8zb z<#k0g1wJHHkOg60sz^tY$n1)wG*y+}8B4h@s?Sh6*7G=%Or%P9NnvxYe9nGK*d-|# z1Qi;I)5^4RjoY8CN!&oDTM*9HUXVACw@hFcKvsD>?$!(lZgB#T^l0G09&E&|HM(*5 zsvSX!r>Rwb0aV{Q@~p)OT&4)K!z5C9kyqgx$%y1O$l|!uIODaRjy#vqumy)12u0s% zo5_$#62J%~zDe3N73zxCFcpwQ(86`@yNPb{SuL@2fRg9s%IF&ZOXkoei~CCFI{g>U zfg_n`mBqDu%lqUUPe$-q-#AL+2o`7O&FHqxQlBnbc&`~301Ux;SvYWz7Apa<;b4V| zZHXPcsf9#6m=&`{n^v>qITh0_li_gAG%`a~sZeT^nvm=v2Q;8|!a*SofEXYg+-aFi zzg-&%VqLLCoulYq4Du}-Vup3RCG?L*0r(>>p|z8L6hp)vJx_NDWu6QDC)$B)*l&;r zb!2tTHRpT`)`~)eMQ3zOeqek*?%TT*lviQeWE-&$i=6jj)UHQoSZ~0HWij zQmHV4=-69Asnts?RWWj8yuIOy+RXYu`*CiA1)M&e1|x;C0A7waja|$|K>1wjH#duJ zEd_|Hz_pc^EjV>4k+`a`;j{!ZCk=~H5uhNVU;vx#!ES>QUTr&TPW4%ov@|hAOC9|- zr2|LV5r*oV8HmluU&=|yV2BO4eAh5N3d;pBw@Z^VBPE_ZK}fxAro(8t?@w9YvDs1V zf7~fopWaF$>~fIc^&rdV49*)$Q7r{?0YcZ>Yl+ih5F3Ptm6#AW1=#&zZa(L=YtIU| zU3=?>yJIXi|C#?@kYmx7#K53(KAAlrp?OowxC?Wo6@Iv;UN^Gy_WR&hR10@|>5PFh zl#5hih55oZ#{r8h6zlUKxu7Q;Cjjm5zp6h})|nf13L_J1KzyG8*C-1nQ`7}}l#`O4 zA+*9VSco7e6J-U6UX$-3>xH0Tb@bX)c!^Dib=q4z5c``97i3C>^DROjyms+2!8r7)I8@_7ey0M~j`}6{E8f~H zvyj1(H5dpyn?TkQj?!kidj81|*SH~TfgXUh%Oe5_66eZQfv0JVV+y8nigoj)GE8?U z6Rf2)E=2+ZLV2KMq1Yb%|HOF}a#u$?g6d;k#gKUryGNRPUZhjA_d4%B7Ase0p!&dgQeS6;INpQ# zBwtandT^v&Y<$3rcX}TCkUij);I8?{HiD6K-igx8w`Gf_oUv zB&y5>VjLf(zE{sjs+o>^*f6SjdZ8ALAx^V1si)cyZ4l}hZ}l%zrght38fh|1iJ%sx zSAQIo`|^)@5Og?(Q&Hx>zE`u*#7iT?h#X#jlS_AI0&JQIT7< zY3dY`Td_7&c1XRfJszt^TiqkWXYR7V_ox zoXcHb7j`E{HbU(%Dx4_n{b0g`Xiu^o1&vAS%H|J|%qn$-hQh>YW0p&~0}?{Lt*!$J zYrvL`F)&Vl$ksk2h}r9_=(DhCxp;{sm!*D58H(XuC|>5jR6MfG%Komx@nE=;fJD8Z zIn6970`~Joma8xoNf$mq{6DG#xcHuvm5UHa_^GvviTXzcwebAz9}gE1msGoxx1CA_ z)LFDm9s=C@JM-%sv5>A~8|EK8oL{b;E4sr*-ujS&6F@mGuLc#Wp-sq6#HXmMBLh>^ z0r~dl@6AYC$onZu$k{Z$JYhl5UM53GpC&hRU$!RmIiyNXy?FJcdm4+K%2=J6S3Z@{ zjH9DmA|=0Wn#HH5{io=+%_*D>N-*PNNYO?0nAk|*_c8Sa+yWcL583$}^hq(zVz`?- z6%5jyKOH$Vo4NDA&F$8+6G$-L2$_Jl!>NPO9YzF|J{v$#4buVeo=fCPEq!ef!rbWB z`r9YgTpg0aRXsd3T71WIRa!V8@Qq(^k)@TGVHjkkJ&TevzLms#{J2MthsxQiUZcor zqD$;`>@7j>6bDe}0ber*qXChBI`TREtz1jA;ZF~TT2)lld2WHs4AG+Q4e@=^PxqUt z6(1lXigz0ycnR`DiG;r)LGr#BG+AgQh(L!zhoW^{EU!Rq15xdvb3)Gdp8Or%e7i(uNtZ0fw6O;s1pY}a4&(jJ{y9=dg#*3|P5P#L{N%_J0WqF8dwfvp?ancMx zQaXmp;WWALaFoQ-yK3{>m>)AnD*`_VI3!Q08;r>o#Bib!)1dP*5}TXVf0No zbyrP+$bw{0kMQ8z7kRUyw2%$Wa*>yNlvvHhuF3mWH52q5Z3Sn@FflNLeqBp2w*p%r zzm|@0hTb^UeD*XO_3}!$G<$qx2;&%RPBBnzg6RICLP*YhuZdzTM3rz9b_@u)Pn|XS zE7=8XBV$LI%k%R%1P8eGchX>uq*0+Jk`s4NbE6Nzlb~>D_JJu$U%(GcVKTGrziB- zIlNokN2-3<2No3`f?|_an9j0g9%Np#ycJmm2?$;gQ9yXGcPIj@)zEK%V9O9^?0qkM z)zTPE8n`xp+NAbasvDr6)-~MW{?#`5;AOZL1bl$2?l>f@40g+EeXEbDI41&5p}A9T z!pnY`mrx)kFF1xpCMpV39_~x(Y4&_dW#CLq)Xq)fuC;0Yl>Hwqh_$o?ye1{KwM~nP zRy^9{zl)c>o>I>Xaae&H9kx+Z<;TEwa~(6RtE{JzM3Qt_4QuBq4-+I^l4@6`{7{+Z~1Hfv8af!&)OG$b4u0lu3QPY_K429}we?b<8p!`ADm!K~xiv__g3{ zN6)D?V6_j5*!+MVBR*v5iyQ$@-rXL`kUv8%yaDQl82qpd`3HoPSe3cOAB>orL!~?< z2kfsX-sL3KT((qt(=YR}^FgGC&`56&vL@sQV%IDoDvQ9WhKR2eO3<-UfY|)X6BY{| z_K*%6Lac0O<)|7=A z#nNe#wGb!>#5NY53^}!;5EpMSwukrYc%1LYS^*5?Dxq=4Mo0t4CkZ@8DQ?LOX$aQ4 zz~+|#vy+7ejlxgWX>$zXO|nAVHFPDm9u=f&-6|x}KEaRdLYxK$Di8$7R_+#I*>WZT zOCR~tQ#k&#VkqPLF2P_R<|rD2k*cKX#f}|1pGw@ZgSeP$?CnMyT&X5X+iCDn^bgz?=KX^)LMxPa==&dJqwS{VJWa? zVT%Ic!%Ybn$)@W2y;eqr3Io#)&d@&x)tE4PBRriug=AJ7;@MQPDM;QbP@oV084D`_ zV?)AY;W$U4wMSgUb1;i((OCS+=@21|xLE29bo0;YRXf!mpIc$CD*vm0=}prz@(hU$ zZZF~6paqm9_*3My`m8wxwHTG0?U!?dg*qf$+mLxEja#@8?f6fCFrk#}4s3V6GdA2KxB5cy$6+8HJJU?hB@%I86 z4rG{Q5AhtD(mhw7YGU&vo8_eN0D^jiW~r&El+5os{TZjT7t!z4hOp5HoE#o|gHc|D zdg7TKi35Vj#_Yj5z}?C&jxbY`1C=kUW3mFRY&%eza!S{nogJ<<@fO6}EhtgG6IuR! zuoLkRWYS-;em(&*0sw#cN(`7i;Xq@w;zdQtdV3HqlG*n3x6^p)8kS|y(Xo!^xZJ(B zVM=1L5b6W2mnwI7jDP_=*ANUu$1QFJ3ZAq?+px-NKS-XvIBU6=5ehtb(zMJ_ zZGpy~qLk?lh1ZXN%P?XMn_N_z)Ym5cut5+1gz&?r2Z2lpX$_1r`px!aT zGL9b_UpsNoCNDf*m&jB(iozya*+k$>j@Wl$M0#wKV*6)Cg^px!iH{*s5>rlzQD8YJ zld$!v?=;=PvvXEhq4!!sPU4MSGy_!NVP}nYWJIh04kB}s%zM8anCK@^%)!wo2sn2m zhkkZF-3h1*pWlX%5d$dmIL#notr||8HL3y;oLVZ@3jrG)Btm#m#t)PlE|ak=2@MEv zB0wt(aAeb)r_P0>!9S0J5=vm1l^!Q$hp!s;&(z5-+&5xvv|qm5y%KCpur@+lsM^&< zzFClo<=yWu@X6gIu8QQ+Q5HwvYD2KA)Sz+j3LQ7yU_U6DPApAFa5*a1Brvo}dZN9` zfxl{oLe32sT3-7X&mE5{mKup~2Io>Px+fpW?5;%psF2H!_@2crJyeCfthnsWX4T5- zV0~UtOe%x{lhE8@skMxmi0W$A#O81m!^IxFaRm?6!OlCB$D z@Mcd(=OGH|69!qEjFKvBD(mC%t7FkG9&i@vq<6!-1uI>BMsdV=WrPSgj7I<4kt(&H zBqYVawz#m|h9#X6TV6D`RttQj_|w`xm1hcy>@YcSxr;Nwh7FA&Nrzp$;HRjE9eSyu zRWjmmN~OzCMdfQ;^j7wE`M2XjwtRZU<77)fiwgb){2?U(Cvf3IG`fuxch`YI8d_j+ zQg_J(pSUj#WUKMz}zD={!8fo%$euuYPLO&V68H0dJS_Ma`>l3or~_g23c#)Dy%8gWps z(&3qdP}=9tz~BxxjjgdtUnm=&Wfs!BHLbW1BdceM@fRU90vP)thCPtt-nKo7pK-ja zt`%c%@q|k22}rCXn){t;o&|PCeSC_1-UsniO-$lPj>(<%bGMEU<$QNyaf6RUow>1BOv9i9{z3z z^rzbqD`eL>VgkA>=%@=dO_;m(e#XX#SZX9ehG#@)W50A-QI>+oi2RF3{Bd*Z$C^iV z+gg*h{;2?g*Z`nHWURmkvf6G96!*SyQ_NbtT4Z&*I55r^6Zr!<4D|x3yUOSyb^RfT zkQOUE5?whK$Xaa=)^fy_(v*)c6`Z(YXij`^bR5UY@E7`$Wqyqr=LdK%A549>i(WX*_~#|t=( zE^A!98OFe*2fXe*x;l7=b@ps@JsomAA7BKj{G<1nu#AykbsXA4!X$CU|F>V*K}LWy zxLpTMFFY%J5ChPMvCyc z_o&2>oWi~d&bw|KFfDYOjt!{OaZon2yd2Quu5Q4xM#llEJlmDlz&9V^sOHc}n%|p;9|EwBiRPd2TLI1?={^5T3&r1DQ?KAM<0sY!6TqI?Ro4e&MHr{R!g0QXJ;4ydV}K}|8pLRltf-|RVj z6p~pKkIM$?C0>`Of|(=mgqk+w?nz)Q)xdh}4V_2q2F7?aR-GnmU^Ysv{?hk>r@hqW zaKUwt#cJ=rz>@T)iLF2wMRG88l>9wZ#e+VI!`)1`=#Yz*pe#&aG(&aG7C5pVgRCLP zztJpCpM85--__<~-Y0$UTT~4^?)CrhDO0@N!Vm@(tuoqS$${b&n?!7Q-7M@jC+yKl z+jJL;Xv?_O z?XtvlY39`BXR;Jos7zm^=I8VyE?{a)(2TCAJ;$o)fpaW1hb@knZOq?DfKkTTw@@}C zp$!Mr*gAz&2B6)lxBr-b%%2TNCAS;!W>#?k3M`oaPZ+>}v#W(54OyQqg}C;9BqQJ0 zv-5M*?4Nk26i^PSnaG;bW$VbD!)npu%cN!wGSbqm4@%9%AtedByUm-8xsx<{O ztjW-4UC9l~-?ac=tqiY8rUo@%G1U(Jd99^oEcLRn^4vIm_8H!kOXhz_w+}JAnS{^@ zh{M#rt+%zpCU|?(1v)fn!OHemL^+1sl?A;sgeEl`$!ng_ea`3|Uzqf=S& zr}dSLtX5r*Z<&K@ixp!CW~aRk#tMdiQXm3Gs<&6ch5ZDnSHsN*d)Y@MsS6ciG)%`>xN$2Rwv~00n5bWiA`}x=UlPE^J<8_UI6^ z-epFkt}Sm59>MV2$9ZeeF#KuYF?%~A%%R5Mml;Y@0oOwQu|AR2LPw`5{CQQ?0XHNJ zBpe{cK)^}Z`{gqE6y35e_U14><$T0K+K$;bs?6?iJY$>a0ZnSmt$Yk^fodx)3Yb~C zoq^L+&ywlFU-&ur8Gg*}ru(>QZ=Dp31jAT zs~ZxHKDBDE9RR^*4CIN&GI(wVpvpEWNLe<5qRiVE$Yfr55J^w$8tpMu{(761+WnmD zm96l8%q+ldbpwQ$95iMWlcR>4ped9p@D zq=t?d-+X8HWMTiP3bFTMgSHKM-UP=Cxoa$iz>;cX5uy!)r3z}(>6ke@0`-tYP9IEW z%Az|gAe!5FT4P(LqE$z1!HUQr+%)mOzVyNLGIYtyz6+E>+3zjs`chGd@>*mp7Cu}y zh+F$sCkv3Y^jDnLzX5X`%A|8tpaH`iV>y3SQkHSMtT7K(06f4wjE&36`yxGWNy%;h zmhWloA5r~jobd{ww6i$A5`(Efla=vWCV3C*f92Sg zW^Gv(6HKX?C7l=gyO2zovtCkT;&$VQJaz#EfPyMl zTzf#iSArChbjb5MQ!Y!Hd~yW=m561J8!Li_2a4a|$6mTryr^-~{A~RJ@If<&8 z`6S!#X)WWkZ9V7C?Nr(|o${QGGN+8r=JVY$vz_8HPfbld70OJ{)MaL|T$q{d6s9|c ziGx@)W)Uy3{V8KK*4Y^oTf$;%B)TW|#`)alm^QZw{Ui2>*dLntA8hGKr7D#yJ?KGa zjSbp&cWY^RZXb#5YtAg2miF0;ZZE2#C7F6|l`@}V)3!x7&8unYXUgH!bf>Wj<$v-F4{3+tfx=d!|t(l2~@~dGbw@CTE)U zx0h#1-X?9b{)2<5h^F+r>MyIonzNee?bsmGMwXvx+0EX}d{x$tPy5w)%;U&G5^IMH z-G`&oq!7!d)2otn7x<1dIn%mlt+n^_L_>xVj4^``-tjb8 zANm)JClC*3BT780tz=_#Tl5PCg$fc55HN^9p@HEU$}Ce--Gh{{#Ee3bjmM(_O~Ik6 zqx6tD>rRj&afoYpRG=7F9(`$V196PvWU_EP6by%|IulFhsccTC7>xy@0|&iA&RdP& zyvq}n%c#Tyh6)(2*xX9WOIWy3S;pCQHv&5jzG_8w-R31_i|fLZ#9g!Zd0g4Ve=;r#=>CK&^a-&hXAu?!H(| zF)Gn;ct~)t@+(Gc-YE~$u+bQ^%X8pB;qf2=|H8?_yIU;C8VLj{y_p>z8a>StPM|f; z9waap3Kk$DJanqq$|tirR?YoT09j(H+>ii-1_Yym2cuF(S$!6viK<=n^f2L}uwX=B zczA%~0ik?nx{;H@*JD{m1Mh)@Vc|dm2@Vhe0v?C}0tyt+fCB|bQI+GE&PF&cj%lhx z7BChLh6O5gIas8)#zxEER1yQT=61ZI%)4EYKspzK;jmCZphAW<_d=f$u}GAkNdyLk z#$$m78ejki2ZRI%yxvW6oZOQSryJyLe^7l zw_-?TrRI*2JB_R=7Q$!a(xXJV1$sfL085grmazyO7Zu~J1U zDg~P#tZ9PODU}Z1;Dpu>Q3%I;p&?0KkuLLtGj&K}=tGh+!dow^A?J|fnuumYbSfRP zvazxCf(8ii)!0^B{Tka^HMVt3Xd_7zTYXQB>82VdN#mRPuI`Qdny$vz);eoEI3}uV zbxvKbFKe2*UeDC7+O4f>!`ePZ*F!a0omu0J5$)SpPu6oaW&MxKYRs|O_^579BuONxaq`}}lU?85r!Ect zjwG11*}X13L>|ifXDEAjBr&AB6G`gL$$OGX^LR&+JKfbCNyx6&-;rdFB>s_1S!KH? zl^oWHBt=RjIg$(-{_b{1Jfyp?unKSPp1niEJCfY7sw#LO%c9zS>`XQz|BfV>b}uA3 zOIM5PdH>W4?~WvfHAi<-T@Kx|G*gHq_d*gflGw5N$!e(YNP>2qidVHFNsAxE!8_gu z@jiIR`ye5Sll$O(@IHY%qFujfdF)98w4^~L4Xk<|B)YplY0%G4gVdVJ(wa2*)q|KG zw3!DJJxEDO13zid{FDbPXK;uHXK2vu!5$6zJOH?0$oI3?6{p2i5+41|&^N+7BbJo;r7v`bP@U5%A1#pXW0OS=aYio9wpAn>qbk{#h|48)BooDYXtozbkB8>VV%oF)q&43FSwN=0W8zzRIQ+@*6dCFQa#c#08!{6hX@ zB_>WTs8loUOO>>zZ~AhI(eJ~h5SK2CEokKk7b{h`l=8v_?YG#9z`Z%|g=@%~6_-PPZj1bM3NNyVR7tFI2_H{Botu<%_En@u4WnvRp}0%J#B+F&5U< z?|htt7^+jwuc**eWQEPB6XR*4^Tnb@icX?aM5k$#%`rM+?yy5scOeo_97_nq5w{Wf zyq${p|2iuIk1tB<4E632pUZ!IuOs=DSWGI$<3y%y{WQ>-u&e9Uq7qa^g0W!Sq7sOr zOL%$@93oKguwbCTLBSy5@j$@@7$PvLZBadV@L=%ZU_jtU5=?rYp&>3LxpX2)6G=(FTonG|NF--LF0*=~d~8B%ixUL!-o& zl)bV>gNP*e+KS4uHRbZqF|&pwbtNRBH#2!M)E|-<(jD1_Bv}`aOs{e!lvrDux800>jG00;;O z1LCr1C{02ZgNU(v;d1F8h-3{6zs7*;uF+t9CB#9Tw2901Ai*V6Ea4 zSrp)U=ITg;O73a8?+Nw+8Z2BOfH6XoXD?M@18tkaQh@cw}12wwA{~bZn zhjI1wa4#wyp(2DZSj>th{SPbm^M|477mV_D2uG>e!8NogVHWt-L95NB2qhT+{v=R1 zCtxOy=H@pi6xs3rH+p$|$ofia#e|?x7T9@4`>V4$4Et|&Z&oyiZxC7?A{ORzhJ%g{ z3$xWP3%_;?%@h-cp9h#g(fT@6_=(FfFaNknbRf#iEf2-;A!A55lck3au1sMp9AqE& z2|rDXaE_CnEe5M*#rn-03u)$O^;AgV(3;c579$u7bUQt4Q}Aq|b2lIdCevIK)g(X) zFbPQ^mI2~4P%Q`V4L^g@o?a7cm<{_}SHpqSh&G6!Ds=bSDgc86o>wePxgnG}BI_ur zcmf(gfxRz|%wjbuPCB|vVF1jdgz34B(ga~3Gs2&#;Kum36chCOji zrPD&&A=sUaPe7(=1-M_sGH?cO(X&o%p5sT?1XV=JKZm? zn+%A|g_~Y1u58rPY<)N0|LzQNCVd}Zi!12%ED{BXQ{fgJq+^89_ysQpp+;^F9-ax+ zpIF0i{F``b1fzEDy0}tdX3K!tT2qEW#4sfxh-c7H7I`L_Q}+h!4nbA8%9JBCgQ}4# zeej9%*Rw&g2Tf1bm>(+vI&3zRVLK>uMK+yy1L!motQl?6G{)w6%mr01wV+0#VS@D$ zf;6At@9WYt7M?`mZ|c&)J`hz>BVjnMm$>2luD$ftwZ!>@B z6`uZ7JNTn#%HMqmgAoiSCPh&;t`Rfygk&$5)#%>sV6jual9gIBz4P=LXIAV7f9YO4 zt1*g8w&GK0qK(U*R=4bkS!=`tP*ItGV>9ezAG2XWw}`WYVqWs0|@DZ1p3A5*wMLQgdjEJl@gO( z=tm&`CJ@N`yaGplu|`rz3=A0>AD`_R!5+)iM=FoEDBiw=m*UR%*B=EmiAloC4~FyC zuzR8L;*jv`zq&1003b#)!8=_43?n1Gm{+lRz=>trAcf$L70)0=;8Z0S#XgXmUg9F3 zmiAdQ?+4{5u85mE>)stp`;<@MulcSs3Tfi#g>TU5OB@nJOpssJs0`ScIJsrMsyw3^ zfVW#@N5MBO$|>zro2a1~ol8}c3!yj_@)PiE%OSAVnL_^=FG&0D{#rIJy&pq=x8}1 zfZCNa2FYT4R?dG9#vW>$p1(*rTUAb~Ksh0BTu#n@S9SOaw#&Vvo-#7Vi|AWpM#)kt$H zMH)YsJGI2i4JRa=9L;~Iv4s;vsUb}`ndK%>PB=mC1mPqyYD-dIbg^f9T;!ba1!qFA z5uB`eb2tMhWlh^${`4?U1)L>V9RTNmdKEisuea1P^sauh8KGP#_|2ekcHgw$wZ0h& z*&clh`6i)dZ^1vM#I^UP)rb#ov5n~U^(M6Z47?4!DU_TyZ+=gT5ZAa=OQ&Incr%;o zCE-oh-yDv=g#hc@fvDNjEPfGIew_XKY(#tZE90 zPFY5Pg!F^uCdt!^+`NDhj+@-aBqs6%4uqSi1RoaMtjIGo@oy7^)*SH6+%kNO>^7+y zy(8SF%R_FPIBU~%X`5Je9RX5wFJ_x83vK4YHV+#Du1!pmjUKEozuI)+uW&_*WvML}Arjv~{9&LpPU>NC9)muBekWod2^NTd*<|AI>Mw$9!~X(|@vPMXb}3-hBz zClMgh^a^8PALCBqcmiY69WM1T<8D9lmYg=Uv58z3}6*x^MvXnGGPxtMvF6=+r^Glu}0iHi6$cetl8 zbNiWTi7#-7h&#|{9+2BEVhJ6prF0=aGn9-8a`sHk%2GJ?GASmM_X?`i5>gk<~C(P;3Gt5bGo~`~ru5kSbAfASY0(%X$CGd|OypnO4z72Bs;Z z(KiUvNo8UT5%hXX98#ZRE@Dy+#aI(e7bF#&ka4NpO*kp}VH5btcbS}c!SLJz*a;Fw z#>5v{6H2)w#$?;8>~GW5ka@wFL+lbEUraa?%cuf>Iwas?-eIP@DrYh>igwT<3uBFy zF8-nr;YQN0Wk_73=2p0Eh}gU`v_faS8NCXYA3--{gE`UdtEg;t4TCvKQGb(5=mVYT zdo5BMvIX<9Q0&^c|46|cP<^;#j!Og+Of}CZBzS>f-o#4^J}^Bc)!W8L(;`AAk5FLI}1PrAZ)Fm(BJH!tBEr}tjTUDA{C1{@tqEXamD^Z=+R>?JI zHQj_no);VR1hoyv?0P;KF!hcJVH=BFR7x?kG^?>H%UY;i=v4rXrcBB1PU|3a-eJwSmkN zF*n<>MUp*|i+uGMhi^#K>&NNTWyPbs0%S>}rV%1;<+&$s@gv(^c0m)3(n%t}K2NupGt7X1(UQ9E*k2IO>G%3-KitB39h{Arn zgTvo$zg#w1D9|i=)oaIw#u) zo#cP&ICT>BXb2|4Q(k|8?j($PCqbhdPg1U_InRp|hiU4`Y%KgajXmic185_O@2C1dR4;q;u(V_f}q4PV5_JLjY|7e`ZmnVM1}#c#@yrD5keaY}H%Q^Eo2o%Ir)^0=kI z%BLijeLKsGlg2EeGB+0>dL1f_59J!PDQV*T>2a<}B^OaDCwh#j9AO(+Ih9~=d#PV4 zsIqocdFgZGNSP{^sWVF^C!I{eRav<+vtyO5p1De`@^wuHNYF?WX3F$ge7JrXmrp@N;X9Yc`?uMM{H%bZKV;THvd+#mP|O6#Uy9A%$02D z%2&Ku9lMgUOspE^Bb&4I%HFEdMfOT8yzVQDoc$etWn#*4?llkiL0}n5a2aipAs7Fc zX9$*O3Cg)3ETtx9Shm9_uDlOaot3Z?e^_Ppv{N<~OZDhBC$OV!$=2I6mfOweQhG#9 zzC4zmWtNRm$pnsMLGC-5f7GvpO}qgmuzZ8EtOAv#VM1J%JH&KhW+|M+klH)4iiyLM znmyv72F6&Dop8fj(`q;N9<-DK1`}c=o6(M$M9ai%^bWm*w46Fda*wCuv^=?m#C=X` zfO$YIsc0PhsFopMCk!h3Vhi0h+Yl0K>Bu7Iz8opD`Jb&(Q|wp9_bVI}IS#n1Nl*jg zcM&UYTu05XD?Pyn+u!^;Q|I$iP7F$$$0FT-IfBgsqn5AO9=Bf?!6%*zO+ z=LZbnc?q%9W+ZwUm_If;z4QR7GGX2Gg3RZ7DT}J=IKtNWGgkM6nU^RkWG)=rB zQWrkcoj#T~iN8$TZbte`q^{Mib!`|@bSb3&rF{Wl>fi#*o?HR*)p;sn8*mt1atZSU z&NspTfC)f4T2pKcs_#V%RDslfER#@^-4t#!eHzh%yjvAT;?=*fQ}TB}6zuX& zdwF#*^DEz`k7Gu@H_2_2+(R&I;bg$=4I(kpE1ldT>c=O;VD1_^LeqEw2Qp}4`7e@|omyU!a**`Tw>oavIhVJGyDU$Fwq`(k7@g&$vYDvm9yzY-!zrDG)|Hq zmoAUwHTNmgb^d>X510cQA1#VuUqT|T(~%aEa?A-H!o1uH*zVVM`2I3OQ~FK2G7&mk zYFA8SP%S8#IJd{Py3)%QiMCH-D%@J&*D|{S2J$gT6z`C!E^~d-TGYO7MI7AM#&j@> zK4;IY;PW^_65p73_p$HB{Pv}K_}0sfTmOQ;L=1P`wkS=(rN>60*xD2*!5U2awDWYP ze4B|MvQSy3cB=yk&vWSqOODKs-|?#}2#raY86iZW+DI_~L^O<}3|o)#31g)n!=eew zKta>0*GYks09LD$I->hx67ty-afhLTq{N~9ewk;#sBK9B{tqn(F~QFkuy$v>e(;07 z3XBqsY^z6=$o_PD%?Sfk!jzHci!9*&Wsl(tH+c=#N_&)lKL&{K8QEJxkx zn)WY;a#z@658qA;I@K@nj&mH;RYUEQ#t23B*plz2XI+^`^1M>7mPkV#Bn#tvCy4@< z{IDp$O=0UW=32^CQtD7}D%#Lq$9-E;b`7UnBN@;hi&hGzs0CEw#xvQEC$GG9PxoHH z!Bs>+UZFhzgtH9zjA`R6c;Ko(PHonQKl>Ec%>CkSKCNsu4U%F8Z;A;u63181#z*n2 zUn}px1-54J4cxX0W6sL{_we>A#%VVt;Eo!%M7nLF@$K{W2Zmfsl!lu)!r`tv2pg)$ zKzkP++-W3*pl5rqJ=O7IXNp1dt+%KJ-&LcGHUP#P?&<|W)T=46LVj6$qH5;Z=$^P` zjA#fNN00Qtt15*MW;dQy%4U*>JRMSLC?bu%v^Du;YhJM$lq*j!urQPr)Wf-F?U6;BB^l0xgmtu&iRVNJOi5m00qx^=Hq@6Wtpww<3=Ih$U zyiKWhdP!yzzsD@`B-8vHLfFnJS88C!-{|>piZE9_7{ky*(T-L;@Dsi4ZY?ha4tPi; zAx=HAe+w7~1N&*gFvL(|wHjU?&;l=ml=jggAHZDl;wm1J7t98{b<{ru(fBWfLPhR# zfmLXQ(P^KSjzMa-tghYK#kt%IS;fBAnyK;;9{+8c` z(GqCd@UPt`H1*qH4c1v11w9KHRBBHEFh!7;@KSX4D9r?Kem&dYeI5tN z6lePEU?Jz8YA!c_5vCJS!Kj?Tv`a!L0lnTNFPPcTkNjv@6@IgX5?JclT&|2fuZ5|m zFbsrP;bkjRou*rd$&Q+bEY;JraN}Wx>*3}Otn{D6J!wqMY2%E1mw=baOW9o8_ z;ZkZDlGKg<;b&+3XbaHDEia9DN4_k)n6pG-%Tsd~bKJDbjK?J4jg^j>j=d&+fDGv6 zos)64D$T{Lu3!ZtG^5=E8y^;uI;7%`sy?e@C-hR8a6umrQz>F5kTzJ8p-*Z#-MHM; zX;jY0Ow2?YsCD+a6zNW~E8IEm49N$z%1*-25rs89OX&L@y!mgCs+B4ojieEp1)W5V z5cD90D6K}+iYbwLtB{FGVu>O(LIR2fiXB$1cQvu=M$l1oni!`qX5}h!pQi`XRX^U` z0LCuL!SiwPfSb)v+RiqErh%YCq!?a~h|nJ9T|LEIhukn>HP8XM$2Z;>BLk?B-iH+G_f&U==55IR(tM{oo z)MDdlXg-~7dA@UXD)|zh_XG`*hvFC-l&2wcEJhdxH)&93SRM8QOAU)7oc%O3Olu@o zYJ-FS*>T6nX8u~#&^q8&L55IDG)uUr193t*O8_&6Gr~E0ymfEWf`Wt=R0$?W#1P1N z&i@SEQIRv-yt*6V1p}*HYe$Zj%e^JdgTH*h01bjX-ya7K47%Mg#o#)c--e$6=WrVL zmnC4yn89RbDMMP)DkCFkrm3K&f7bhYTAda;jpvpL@*Do0`Ggcp_34d)JTUgQ?_yN| z&ldpKi7m)81jKFG7k1%$;pUTMVhVt>6k350#MR`fQ;wg2fTZpddre}&c8l97?dm8S zEQn?&+}lJaAxYj?D3TFJzB9I2OZj$#ycazXQ(Be1ag(Y-WMUlG!r9#ClqJ)`Dq=un z7*q1v%%-{WnZWBcKV5c@J8c^7Cd_+Nnv6I&lL(W~SbkVln?Xh$DIU_ui?jfxVo0Qy@c1A15| zwI09{Kk<57Hv=RT_TYUQKy?W4L4&j)bD?S_GbU(x23jU8$!OmLW_1`~a2$xys&U}n zEyjT!z*>O5C=TdcBo2U`5C{4i9+PW)DIADeBOJKe864;fO>ltz3mnj(>R-t?^f%Bm z`3=yn(>I`{#5VvAUi6G{eAc5kKs)6PX!BwiQYUZdO+}mL3_F{OmaUWy$0f=DW*Frf zYI!c9!?4T!_l}Qp-iVk6w~ovjq$FZ+j(J%#nw@4)CVTeucpf0duh^WX`g6dRd-fbv z)M<0Hx(=oPcD*2hrWG&KGdz6AM?Fqlg^}6wGmR3E&;c}!R>%-<#~5vg^zSNUyq^4~ z1KhU2u-es^fi{%g`q@#AJu7;*Wx1uD2M&*eD8s;%J!(5#@)0FMmP){V#v1Ms|-_GE% zjkNSkV{?atQJu*DrI5ey$s{Y5@;D}W@Mz^;n-|1jswtlW=LcSsSL6&3dBQ|BC!OFK zKfCwsUpD)-YPx9YY~m`AW(+Hw51SqbL?Qlj*X-r+n ztV&3`!s#32(?%v_~4~at_Jy|D}EnB8nR0d+*cBe!3|D{jO`%q52)0)9C0PA2oA<%*LOi(rQwPUR4s8!e{Z$PK7Ju7ALWtD`<4$V5=L z{WL=!x?ZBXrH!bCP7%x7I6aq7`$A^@>147Tp@|GJM4F3{pS}mH(HV$4-$(uoytw`^ z3XmbFrv}~6Fm!TXjL=ECp3gSZ1jSDeAw|^Dxj4L=a?8%AFI$6SZ=s&{3R#3{%y7q* z5O%;bU6Kdzj-#k2vElBjClH-7wKX59blGzo!oi+<4}7S)j(E1m+fk@#R-`ttyE^}s z<2iX}?vGLVN8pLD_XTYst{;gB7V)C|$6K{Hzf=wSP(JRc3)6`r z?K4X*8mS6qeZPZjg@_j6F^XQz4|A!iHue04FU+Z-6=zEUhwb#(`LI!EW>9n!CU?;_ z4%AK2^2S!Owc%3{TKU!<+8k;VUW_PsgS}t^3E+5{G8|l10kGB7noIcFSV&023vjWX zA}rPNydW(U^`gXJpmbh~pF+wDZzbv^hASu+9EQ@6)p8+1xcS(0Hwip?QiAY`iicrL z6*se~nsf$fVVFl0#!6;e`02qEy3Hr@Hw{B9}hLEvC)#@pFuB zVK*au@mS6L+=3sMV#BJ?Oa$U(hMWKn;9YQ~pqRwgR;Y}Z;<&;@@soSSZ6CXVm>&j8 zJ>RC;=UA1FR3F#??`11_TI@$K^Cju}li-IzcmA6?Ev!&Nq(ttn#`lW8O`J68USI@k zk@{#O^+*!o95uyYYsJ0*r4$|k4#&zkTU=F@80K?Xu!}6n!iGA^Q`^?1j)6g51g=Aw z0DX*s0chlY02U3wCxcxZHb*D?Uinw_`kO2}a> z)Jd=~U<6yyd0}#&$K4J}{ZC8gF?edx7y?(m)OH=LA0z>+t=bkQ*6Sb`uu1z+4gs4Z zzZcQJ5T>#K12e$_>_s9kgG9D*g*qJcZZ z;Fz=zs}Z29%^3S48OT(`Qp>N61Vx#(B2h>a1*!tO{#aKq)q`3A~j6(x6qE)#=2 z4InB7Jm$$NUtCC~1OJmL%d8%t0o$A_flMni)gjyLG6MWW|(2j2*Ux80i*$0Nm*)C<6UEj zWychg#(q3v;TqTFLlA3?&#oz;;IOr>va7q@?QVBF@{#SMJj$ay$~#R>=Q+1O9AuF=2?Eh1Ns}N+(}9Hm z8c+ia7)JyboQ3Q#0YQYv1XExrFwn5zK!LJ2;KKoqr2qsdbZ{Vn19X)R%$6z0=%JFK zDYfCoX8jywew@xIq;*vm2V<|ZMOlzp9LsW#uay%qU)={T$<$3*>~4(1p)7l{(=vs5 zTqEzxa&Fu@=qi=v@vz(r8%U_A*ozOCj06oVT(EG!0u)5pV1W%4d?+9S=0b-G3lu;= z0t5?S5W$647h;$zMh&}1QaWl$MHy=R9E_68DZ4lrHS?^SO{#1<&Oxhpzfu^z1~#r~ z%Ce8g;Wn&M4aYLRG0UxQEg(shL++9^wRaMzJtaQ7{SVl%Tcxz6nukmNQIeQY<8q!yKHsL5}f4nAc{p}5dgz3i^v{4NVJ<^;NH*2RHi&Yr$?{0 zQ94Dcv&CXC771&;UJ(!oO^*g4;>v4y2rx*vP$9xBL}bW-p@9MpA)*XPu#TRsrQ0Ax ze7LZCgz7n#U9qnCJ7(e$T|7y@Cyo9;jZi#1-QF*OurIG<`ma7bSY+C&QLKZdSV~rE zrRuuZJrVDC_wD>onxD<*F^_q~Bc6Qn@0XdGS@fjmsU4*!jp;Z1?EeVa#t_UmZ z{G7PxX#aoTu$vhej0p__Fs_av8j_)pL6jX+vI`0h7D;BW;ngk@uI(Z!u2zCL0|Le< zsN|ww)Qp(Ag_MSamWM9+D8=_skVqR#HmorDbu`MM_{9`Pse7C)lBinQN)QYP5olXT zf&hHAjCKm2d*(W;xxOOa&#JQ8blK_MThW&Os;9~3BHPw*Ev;Tlvs$CwMyFog-tFbx z?SH0|o_e7_&wv(Aa>)3e*VtMuv1>@@&^2n}FxAVWk023VMIAp(U61qz5D!GeVd z4y$7{h;QCG++ef>QL^75L_8X%(fmpzfv^MzIY@(?q)8^2fN{ZOU@#~y1W+V0U@#LD z3xjD$D2xvrCNOA7Adu5+sGxAcSkw^@FOjSsJW#z}eKi**-gj-YSJnIX^}eFnef8B> z`|2x_5T0}$mRVJqm09UoZ0@S6s;a80s;aUo>#)+5F5)2JzORN5gjaVag=iETA1!TF zVOU|b+kF#+P@0!cdPkmWz`!2>hD z9z1Y(AXCk}RoADtDw1TqZdK$))wZ^^t!?Xm>M%7|b2V3U59a35>2y8W>`_%csH&?! zHbjW1c-E-ide`ac>9NbSOdBNw)y&Lq{=-Gm-1DCIESKF-9*=d5s#uz*`Lk-w%1ZTz zPPH05m8R-C)~$Z~I#ncj-Kt*PTva4#RBu&9l3OG>U|^0e^skm zbyYW2z4e>yw_V7S#!GQuJ(Gx8Q$ZEM@Qt#d=o)m+T= z(&?J2imIz8A0k9UyWMqq>N2%bv>(*_WM;1B>9>MGOYV8k<2&W@(P)nfns_TCTh&?e z*>rb>r+nvg-=*Ct*)8FV@QUnEi_q%KZ1s!~GmF!(gBc>D5da8C01_YoM*H*a-h=XQ zv)T|&Cz(~mmpu^pxZ;(!R7OvoG?AfRDK!TlfqnkaK;Vjf`$ET0U{WM(HKXT= zS)m=|Pv>AEn*Sz)@FyOzGZR>+OO??yme)BXNE}*WJ!5@p)KO4Ek|g-ZE@20kBb=#Q z9`%cw8o;LKS!D*wQQiNZ-a+j)GEdeFN+3G7f~t*d&&#+35kxrxrenELrRNd*5-u}; zg^DpweXLsGwnOZ_aYnfGxRoK*T+~u|@09}g2|*e~#_KGmcS6BhChP~yJIY>8A1QlM zj~UOJ+z7eskgIcYfCQq;ja(Fg1o@|4iwVOo1fnzqDuhh6c{>Y)si>I6mnVrIiJK8M z4vsSw_Jzh$LsK-C{B))f$Gs4Ob3!d=q8KxDH;vNqVy5a5JRrSHB6WVvyllUWL(qXn zNGzrU?~-9RAWE!CLz)5FXX-N$v?2#YtaY-g4S1(B#vfJC1M>4QoU;W;_2K2ej^Wod zylgNpts4bnZDk!R&M<~9q8+BzTsw@Bju@+?;xQ7+MOz737K^ekmiurT?u&=K48^nm zDbRy!u}T8iWXg@8C*&~RGW7rnaCU2pkxsb9!JH>c+1j`<&^}99LdlIx6T|2R%Cy1n zumq>F=?;2?Joy+li(Scw`&+#g5yKdV*En}?MzG$+?qive+osfZej`yr49qTs-&hL60Q~Y0cBXBj%>d|zUQ;;u(BMAi6li;c;iXE#>j9$o z5sLXXy7T1>(zML~noL}RRfK0*+>#heT&~@cl17em@CIpH-8d3D%5j{t+h##CE7B6; zV5~2nG0)v=$4cz>ACn**Q)Ws^iqK$)RML<{V!#VhPz)a`hk!6etxc7gk%cY{>`h@OV zs!4cAWgbX*;B3MFmS41v1iZc@#()iR%Em$b zNtjk<0L1k^BPJZ6Ghvh?q|JK05y(cX>^VDH^+j4K8tbE9){`)6-i^lLL*i01u0m4E z0<4fgVDVg7meFOhM>zZB=C-<;lNFVq{i?A}Zkg^H%21L6stPo=7v!p=byW%XrUJ;- zB#SV1XOok3)MMC0xTv^{xdkL-w?eN9)ALoufnA#?x&HJ*#IG7Utz9C(YQnaKAfB_k zv5{aEl(1^1?F_l@>0z}d*o&DNSl2PJN(O%_7`nQ%Yq9!sHdX;6x-l8S`LX=Y;~`R; zCSct}zGDW4<2G)$NQbIYy_2e{(+7hWRW(Qu=`=&MOSDeY_)hXEG1Z=;c|}-}_O?1h z&@mH@b9HJ+9i{pOs@#AChm|rmRU*O!Kt(Tz?=qc`ohIT$^(Wyl*}D-PLs11%iJmSQ z%Rs9+e2wSxj#e5`H{jrt4BZ_psEn%XjR{-28P!G0T~ z5|?0vD0b8@RC9n**@G-1(Csn|1Sx1YwcQm?r4v36ya+z&R`rf(2bOI_y^Cxh`e{zA z_ISFbpBvzLlX@g_Mo|XU^R$NSG1OCOw(i_4BSozxp)}~-)bSq=BdbuOlPu5Y4u>W> zs09@cFR%hY{$7&Hq^X2h*sMOXY8N$R2ej_ zom%5M@3`_mMFkYqz%t1a0)X07fI4L`5INeTjU)lpAe`z%r_Fn+i6?N}HZ)3CRYr37 z)JTJqVq)0?qyb{@RoRkJY46EdS=bST=Caf&gjj3Ar5VlwbI8C-o#UMb`V=u`aUHU(&gZ5U35h>fK16wsM1^=Yc^=oLj0qK|#r zDPC#RNayB57>C4ts?-j_A@I|ZQYys!G$l3Nnjl@z6CV9^+0-?(^ll;8ekzuUQnUs1 z@HH5+(;jGBy~qX=f4bxml^4vBNSvLueq^Z}`cpvVAYeu1#B~INW>7Y%UHns`cxA@_ zX`n9OH1OCpKQKBW2vgy4G^kSirFU$?0_m#lyZxo;lLST`L^

tM0LuQhQxvd(j4AV!N8>3l7 z^Jd0a*{o6J_h#$4rm@N^t1C7i&IE<)e8aKH!=3fj(r$`$qgGaR#@r5kgjC~jpO8V9 ztbu9V9K`#DOF16m>>gAIJvNNlU{Bn5Wrf+7DV+f*QqGCd;619B-m)IJnK~-bAt7LM zmj^e_5dj@2TE)3v)R2I<59fg&;Y4vGNOL>w>1#Rk=~?n}oHrZ&_fBw>Z!&a@t<`Q+ z%?6h+Dqy{72Va6lM5EdVjk*CPYU3hO#Q%tddV}$>R+y&Pr+{Ksyo)eUv8MB&e3}u{ zMc14jZ1O}A8IIp0=bRN5B8Ev%6F}9hb!0lewc-Y`v0nT)ZF;Mlmcif@p*9&05v5#7 z7Q2#V;#?NN1n4vaAu1S0&}RaPf||7ihQ=z14;Xi}RLM04rpSKx*VOy)^pp4s%49CH zpMtnyF_J47h*I90J}?n~u1P~ws=l{I8OaGK@{#3%!L&@@1YsRBE)GWeIr~lH zwCG4bq^M}nQK<7+bJUNh8nWa_`ch7}W*^hEx=h)`Hp7-VQq3nt2h~fzsg2Az$D@;h zidIcho8fI-&2Na*6I*FmsJ!fBd=D}Lbq=oLV{Ub>sAtUenC%dW;OjFwlq;;ypvxy; zq5Q02FX~0X$~oA{D=J>1vSE+6qDt~>5nB8TE$0TLYaM057p_;$6+hE1j|nBj0Y=f^ zX_S34{sg66NVN!-=>#Fs); zG!r`ls6~^Rwc^kAaf~46wES6^wx}p9g_JfLFV2N14h2)FoPI4!8idp-|8z*DY{aAG zEd?M68_`ZA7%mT=Ij#`Nw`sNSVd5G|8C1`3x-yK?g64ol2P7WQ2nJ4gq1Dz@xEZo2aaSMVp^q1NP*k ztv=`H;P_GQZVsv3Bj-zy7lctlF5)!wB~nC|PddhIgtqV#xH$iz$)TaCfI0q!AbGd( zDx=rs-h1`VwovL=^C3E4Z8#bmhUlO+%zq3P)+xIuQK`thAI0hBr#CxB*1Dtz@?gKn3eQ{GY7eFkv=gY0{6%((qUeQawd^o%#ucna7)TOIcPN-?^n^vLOo6%?7;=c z!!?RY*3iQ*! zYBD53g+6OgPWCx5*AUTWbdunpEZRAhU|UYwJHxgxF@U@wXjx$!*0J zdjO@FA!|YGM;k&=B=W=D!V??>Lk1r#T5mV-IgynJY;ysgBS4#{L`As?07i@3OEV>< znm?_VU`Jq4KK|KrGV2orn8)5)FYiR%f;_3UB?6-$EY>EDdL1i|T6b!cU@D}@kQ0;d zsL@d6DH6)wLina@{O$T&jTsjg%o0vgee^Cs?l4JiR>&^dd{2-CiXn$db{;m|8&P@T zJtOtGTjs_MTHNx(T*VmGpb`GG5rqx9V(Uk7P7MW?MH`WH0dtqCz}U#t z5m$RW!`fUxb%O@*k!jiZ&RWPkLxQ_ff=Kns1r2a2SM3zr#0D&qx;ErdIAwD;g{l)T ze>%Yz)tixUqo{p8YxqiCRXz@F;;wcX03Q{ZGN&bpPA<>y)&)bSDWjHv zx=;RR_wy4gR4{zZEo8tW;ck*#q-QTv*)XHTXR#rPRx*Jd$pjxC)_;KC!bT6vI1o>Rs{s}E+jY|>2fSeljVhB;x{KFJ9 z!NF3+UM@-ijI`h*pR`JYd3`F>|Nhc(VkTzaB3^|=RC1U3mvMsm-e|^f+=j|~dyf5t z91HL2KfgnUe;JhlXeQfI879V`Qbe{lUwmHra1%ZG|LozJdmP=PLo%Ozx-9vu{cL2M zBM7fshknMC?uJZD?mH-?N;xf|TK&?Y5UAV0|8;-ufgw-m8L3kTU%TL+Q_~73PNpy` zX8HJ1NfGcys3PqRi4w^B@qPwNLy2v_Bx4jo$nv$nqk&)K0cE00y6-gT;yC$Wx1g3d z^X&QijE8QBA2u7lJ!_%1StAVeCwF86b8Cz%4PvQtky~Ly5Tv3pVWMaz(rfOrE2D|4 z^2&pA){2X*iyg*`DljbRa8cY6*i_zm+|(qR6rj=9t-(uHUAKwk(pF?BeU^h7DA4_M zKMkzUu}Je1jOmsiRXa=xDRc@76Z5oWIVl>8V!h+7Jez8D`yY0?EnXj$-MKV1zp zkTZ-70si5FWf_Q{I4h$K4)(rS#&@7)MhjmuKA71C1gar)0|YE8LXbrc&Igc~xloN8 z5M=5NdO~7ASiG17sE%rxTXQ&h?zjPK7HYDBT($z)1XAY`5Q4|E=1s8a%7Zh^;h_gk z5VBA(a=`(5a)OY?Fdqew-TK5uEarzI=XGP`ATYQQ}Z{ zbk}#Rg>zio%5XdmoZL7dBgnabEAhA{E$_}6K@EYPbQ`r|U&seGf~isGhDL)!gd;HR z2z-cZr5*vy9XITJQb}~QU^xV#1}P(2{C)xne!x8`djK~;$iI`K2`dRqNFXH==;5fv z-+^u%-ZURsPVsddakkPsuttxPpsI-p zs&Oo;NpMj)39i{L0;6pheQh!oV zNvfd^Dn$)!nyqKs>^O@=y~bd}ERU9k`Wlx|pl1CHOzT4;G75!1#lj_DB=y3=Y#)Xz z{fDq6rHt+OPgZ4>MmALueUF*G--vSi$&ULsA)k1^G-(y!j}%x_;U>QrPg(QBeTF+I z;8ep5;#?wIQKTzxVF-+v_jYXMDz*})1V7ORiZ$x!7wR^>5d<^`-Bb4G-vl1r1u__sLP;y zgjPIETYN&#!fI#y-gJrm6^_U9S`3YOJXF!7#%}}5k-OV%_6SO?7ec(>$A01#t$RtN zgPW*kD}os!e>NHmUr&pujL|g7A-I8ugv{WpMmW$7eG`qb%e4W( zPnAe2tb@+O08daL;5YN!c{x`!gkuNX?ZNmW5n|_&iY-;)^la3S z1k`#-)}>Ry9}b+k%>5aXzW1?R);lJO)yW=AdKje$6Lr#7BG8b^3wCs9JDg+^2^d%m z!KtAt?HwO*nl}ZLtwxn4r9VqFLuf5F0CGqJVmv~{gnHIv6KhzOSz_MdZnbsi$Gy6N z!JmY7&t^4oGIBpt9u`_sBD~fJu`z-_FGBr(g6+dPU#Rr|2`f`1Da5OD826ZFg=l=#d9MycRokA{}i^mz%sGS<;*W zsY83MfX4WAq>;%csTivwrmkYLMC$quLP%C&VOj+sG%+VP3kpfcq>2u@qjG^m8bQ$< zoff`XrWY%q=wW&)06cu+)w63cYeC>d2o;;-%aQCUk}*^2>zQ(g!f zO2f%x+vp1A$6s7XaiDS`xi?0F5o!gc=0DQT=!^+;3O7B{nJ;zx5V@HAyV`0}|M4*o z1$1`c@Rt3be+@zmqToRi0)qb!5)F9n8#LoCLKqm8NKI$Ec!7L7<>gZn!LKgg3!&#< za86uBjPW%&gkk?%2`s<-gc;=`NiHOfz~kumGb4Dge>9g`Txf_V)h8GV^w29xM#}L( zf+gxWt;`BM)pFI{GZW>QGD;%O%*9dunCBYvp`f-TuKt>m0;Gyg!wR~BH*PnT_uXO` zbfVq(fOVsjKoC-@qt}-vL{&-FyI8mX+aG`oykiO_HRw#iipjryUBp=taG#Qb;-E_M zFB(1&J`J=ddP5;T{_>++?#hwB5Ngs5~x#BiX*jn;dYvTadRsR6r&%Q$RbA zRq^Eygvlm)4(2kjPZj)=cVQzor)o?Wqq&7GD=}B$%A2Y*Hdu`z3x~ZI;1bZH^|$2XSnl zWgbQz5*Y3R~*k1PSlj1x_H=< z1lvGz&Pa{rDm>T8L>C&mT_M?uqS3gv%E9&X+7c&3HdtLsqg`0j3i%}Z4Oe&GeD`-tYYE5?huD9PIvLoW5EiWw{~QWMEfv zJz8{Tdtuq26Gp)}*kb04dGix!bsX}-Vv57MtT1Cj<{5;+Mfuwp;XBzOb>LljN|X?M zaDis`cyH!`hZR9U4#_u@s?ph3R_%!G`%y^FxjpxM`h>`kS;R3-9eTVPtmHzMHFzC1k3uDX#u8MRug#@1k*L=vpaQ-C!(mbq%v?Ayf>!Z4pSUqvs> zcM`E*I%8~>mz9wUui-Wl!mvFoUkuK^ZzQpHPq65(Z*ozLVOIh=4QobCSsF>Dt{17} z8*s!FL&Cz`Bdfe-XPF!A{d?Ka@CEu)$SCS z%^V{>9Hwa71wmz6$Qi0pj8t2dAa=7l{CZ3jzsAXxBBGc|+ahYts6-QA2G$sIivXIvx;2Y@}bl3-sBNfi4|oA_%X z7&QZ1oe286eqr~z2qPu9BX6MXeGpH`hQ-Q>lEN27QBz#R{J9Q_7aN@SN`3}q?0adP z0e_>B=nZ<8yJ1&Xt`n%tTb6k{AiJV<{J2vJ5skT{hw7*wGruRnp8q29iMXbKI!=ig zDSH{U8tL*u22oyR5n3cWf>*=l#y5&aN^-pk%yCIzsX4hOldBCFqhH{$78i5lxrSO?zUCL2 zMzIma^u4FA!5=@r5FFdHcG0P)6oBb0q5kD_H-OnVfWb=>@%O_r+?BV7i9?VW-xSQb z6*>Ej=bF@F;%z!%tAWf2B0l(@4j46e0~f@HmtYp{C&|*+#E{G8mRvR$&22Mdw0#(^ z6*u&7Lq6n||A%z|k~$u*Vhnm!QQZm7ZX+Fr*#WY4L^myV} zgcJ=ujKYYTRb<0l%tRoYxJ5U|)xm@58e|xU*7AEWk3`@~U|?n~F6QRBCbjr{%`YTx zfhG--DGQPoi#fDl#2}`vO_T{Ww?|t!-kUMXnNYl0?u0`9uyzcWsoV+f$xMbkC+NfC zAWE^=v*;0*9D`?>G{+DoHNV^>+6|>fNI{Ke^^$S?0+%^#*d^mg8+;N9aC}CLa%m6J zPAl05f&7Qk%gj4LQDe;t^1Wi*F&A*lTzYA1MqO^Lsr&3V`?}FAM=->Y6$vVnpHgN- zQ>Zg~G973$JT>Z>Gbn9}S^|tR%dKg83L^yFrXy;hz8sSvDD9k@Bu=J3Ed`{(^m84< z5m+K~d!-JOXvtb;jT7F?YXLKe_E1+2lVqjPQ_@sCVR)7zFi>#&DR#-3&Aaecinam* z(@mU^$eEUcZJV^qcM`PJ$l)hwyA>@kGsIj2A!^+6r=ct~iBfdbdsM-|fxOSaM4V?C zb`KLy=Y#-qoosmj+77CS3T%2qA(SyH)NpVzI=7HzBUSG;OE@@K_rl^jJ_uBJ;?O z34wg{UtIC3F!3BP^c+JL{sXg&e_*b!O8gN`Rs`*H&xs&&|lv za71LEkNXxb(a25|-|dQkQ3Sro5~&dnGG^AS)nAfgXPDfGbhg74SwK1h{6gJQ6hV>z zh?K5C!BX_9FP)5C#6;qAel7z&qpR08aWYs+{3=p}o8i$)t|sAzG5JO&dY`FzJV9=E zH`J$KpkbVGVGbd_ls@20Ldc244-7%KL8(b|Y)24e1V_O<%+8WfhC&0saB^5uh8zy4 zRXVPQah#(730ah(FD~zBb~uk&1~N$ySq3m1C|xRaH1fUg0gi3vjv23T1#Ern6328M zwpba%9Lm55E;53{0E6ZZr)v&n3{M#$=}dy?1xg*nhk@E~wLSEM&*R{*Pa6Qs!*hf; zayT8tFD?|It5H{|vOqG)xG4-6CYts8pjMuG70lYJy(X&kua6KOpr_E#gTF`Km`Fl1BWS z=>&2y&QE7EL!$YDUxCL@L9-?$v#Ujkc~B_Zn+8LlOlkzZP6{C$tFD_yAq-W>j*!sh zrH4O4#vwM(wHFFR4D}X3gt!Bqe zq9u1(5=XCOtzyXzXGuoBlC+hYd0!!s%jZfdL5sLcI7QK_Y%91zz4bSb>gq4x0X*pyvVGq;isum_$z`h+frTFzCh7i;3Sdeat=)mobJPJyBi3 z53m}om2$hC^(U@eHhR+?nf@Ne(;z~mq8Y%OaAdgLE$%Udu>tDu93m+Iag!K`lcP1F zwdf*EHX(NeS;@#(DCvobe-ROp(gi(NKKHxy|G#wt)&UIy3j)WLXUk<}Rd&jD&dzP! zO;PoxWT|PIShB1tb%bz^u4X;MK zyE{zXy{q2pnI)n*EzRCe_Ma=%`k}Sm!@GX)RX>*h`TzCl=-U`k_x?ej`}9LUw)XCK z)z-W_iAO3l^jSSUA5EV!6MpMyNK9;KI(CD5sCZGGpOjYKnj+wpmDYdFs zeb8skYkexWtao#Fdq4CmnOV#{*R+}Q8hvZ^*6PfhXDmsL(wtUlh=yLYLOV389b0py znx6^Gbth}w-I$Ky~6J|O!R}mM{Nr#7-NJnGVAv3jEC+i$jnq@b; z$|lUT)l7HBSag2Qc`(NuorVx^@@J-&$a+jksdO{X4ViRvbA8Sd9d&&OFl5KOF1MEK z*lj*$%6|DYpOuQ}=jVTRqN3W9Q61F{T76SC=bVFiJ@fPCv}Pk5>rEra9~^u( zgRpqDA&}YhF>y>B6UT8N2009J7-Nt_p@=Yn91$jvBf1vY3P5es7h0~3)f5r`sUK~Vu=kdSClOjv+O08t#~K#t>J zpl}%EKp?yXfN+=tf&iC4+P6o(LOSEoDV5nOn#oLAuCkrWSug+Qci9TJG_UMay{L}L zt9Poc?tS)ozjWqadRG=6^yZCAYedXiyIalkH=Ue%w75s6Bj4PT$?8L;@*Ybo?#VS*xs*$|iF?#ryN9U0`#tAw_gqb`7pa!KTV8J} z?HcW&J-WMkruACwtLOBns;26#?j6%(npb*Ik7=eJv{C8xsGh2{XOiVfG-j1mc`Bc4 zZl9H}&1d?lm}#3&I)^ZG4Tr=>)zt2+dD~n=X!Ta z@9b&b=3O4 zv$gYG`h4e8Q@S6W>6Y%`e)qX&cC@Fd+Meyu(3YFDXZu1sOZSuWJ1mixo}AvLN%dl*Q@B25pjTqp_6Q&UA5=Xk{6-mMd?_9v-Ykt}>D? z?GGgPKr(OCyil5Ehz2t+LK!k)q(+L3wT97VmZB_5X;$MhHIrgEm#sH2v4x+vXe>y{ zHx|o+#xz4owPdRfv#(SKl9fJv>G$}2na!>xlb+h;jj7R|{`4l&6KS4alSmT@bV8ca zm%d8+0H20%q1YnQ1_fyv+hX}@)1+!rGS^!*XGYUfp?JIG13{fYzKOiF{h+F>`F(`) zDNGIS-K7e%XRfEat9#8G&3}(5dMBOqq^;}bUW)sJ4^jCJHqjT96 zfI^XG?I?}isIW}xncEr`ifZp&Gn-vBN#z5%yrzUi*=_LfK=kUiXZ$(;GkJT$Cu}P;hvtpn*aJ z5=#b$g0WCIEI>#sh~lWASU@~vpja3cBn)6+iBKpU7KY^k1SGic2Svgm>EM8ugaafZ z7KwxbVu^@IB$!}%_yZCT4-*`qED{L`10WVHPhR&vb~knlGPr_IxKii-@LMw z>FUBNH#V<<)F|s0%0jWCX{oe4`Q@UqjW4dG=8|mbq9kat^gya^%%+W|Ya{}KAf~zL zJKlL9XEnn*%LDn((!aUW_-R%(LxyG(9?kaA$S#}EjJ9N>Dfu>`xlm7*DDnVjlZ{M2 zB$s}>i*(2>(yeYS`CE2X)%-KL%r4 z4CSrq}t#U6G zV-{)yzY0T9aBZ7D1qaLOf!K-=#Ga6eXiKzZrD9@v@0Jdfg(1QD8sYMmL zwVuD^YGIfS7mFfnBra2)3|g%CjYYh(ZCBnNh%t}w_?NAoEt03k0+14*vx(~B(EtP} z`T!Gh02mAm21HVcOr+#cn54*}4-95jj8W*rOKkZyJdxnLalFa4fMI$`LOqISKF`51 z_VvuJv5FLtKx4oI?j>TEN$rbrL<^Hm2kqRh#Gr-Ky1Qk&60Wg#l6Qw7F1!i;nPeiwSxhMd`sn-OP}PCHmYKj}b8hM&zP0mX4z*Uk5~E z`S~zLI~RHTgQN2e5h^T}M)`A3rjb*KA#l4kv(Vc`P&Pku+x8NE5dJJGMC4N4!q|!y>c3_$3z4rIij?J_|1bo&)K5~ zTRYSAj6U=)Pm_ZN^!&P?`x5A-PyU>~$kS>yK~5+vg?h&E;ACrd(Sc&*lGh&*8H52`)JC4kQ$w_-!O`3hxDbHdeslOJH zK9_w<4}|%4B6*KaR+Y22I|4FNOhDk#DPf%Jbu`${1HFgMqCrT?2Zo_0A~oiY*l6__ z?`$+U7x^6ditKJUCgbb@BsSI*2$j^S5R-oNGFq_&4hM}6KQlcQ#G0b$$&l)FHCY75 z?eDnFMl&Xt+K6m{Hi*rSki;J6a#Lm$Es>K}YZ_0mM@EiLMM|Y`WH?z+2+9c51uogU4h6LQINdZ9d6rYuHv?Y=0XUE(?rn;Z~W^36`HAYCVnu{ zVy;^169Q!OCkTzfq7CVxA7ISAF(R(ZKf9q*B#LN;gEQg6d<_MW0K&-!!ZbCzV}VX- zydUBWAaF@2)gVF;HcDDGWrfWrj)&uc^Mk?B;UBuPQTQ-JjMQ%BButm&lptH?tSm|! zHG?z7w~vWPsl`J?i;lUM)0mzaO(c4t@&QUpBj?c9_3-J+f?wEb<+i(>yG*1T*|w^@ zT;SN0A{1JH2e1%@Yk*Nbh1`NWW4I6#vk_;!aWJg_m+fH%kXVXWyxgG~*ld!QIdZhQ zk#iw6j~a@w1y!GasPXyNgan|5u8YVOSV1%p+{+OXaKIHS%Em`@Of_ZU9m+0s7l`C* zH_m-qvgjC#BssGK#u`2x!Bqet!bg4x8NP?&3UIz$NpFOeyc1xHzP{HJAEn~P31I=D z)X0pDA{i)!WqF*J^C$2I#S$x1eIG*|uJOr|{}Ldq2}I}@cj91c=Sf#nP;NWDn76y| zGPKDmzoSDh&@)04=VDhAl@%<73yV89%uI&%#Nm!AE=DMG*P)s^qh%>({1C29}#11Aa*(O=;^J?f z;x=wKj&746?Tr%Yq#I@N;pI-lOQZwzo-%C%{Ev{u0&OFw7<;? z#wCuq>+H;{gz`|44XhbaHG@R_ne#;^$;rOV;~6`r(=QEA+q1vo z!(Kf!7dg(!2Nw*=G%+`5o>PR#lx=q~5M6i)3$4rLOW(8cfG$ufb&;{U@!QBDJWg$C z_{m{MDn{6{QH%r=X{vX<$mNWGjtuu)6Ou|RY|>pvC}x#~z$Vc!2+)ud;&ZD=)IV}S zP)ZXK83uW&9xub^nmF522{9Tm3&~jbpPMY<4a^6(QTRhYN@09tj}*~0ZaQ7mN-`EX z{SA`wC5(d%@xb6Ye#r)DU{13fB;;eRO&=8-tX&4xn?~O$*dsO^ooAcA7AkYHMS+b_ zI%tqizh#*+GLC1^)z3$O#GRxXgiZ#uYb9~edOP&~N*QTv`Y0vQ%~T)TbXsveN!&An zSg_J=P)ae9Xlh4O&Kcz>@okoIf~dM6`c6jff+Slb_+;eyggHNvRN@d&hiw~*1sN^| z2$1%kM6_|2;G_Sk$J5%b=5i5D0NQ$j;*kJjQje^`~uaA>D7nLXZbR-PU!a-GciC@DR2RWb5THy4AnC|KP;|={E{#6 z;LeO`B%+kWZ__&(&OvquKpGGYZfIa)7DgBuq-G_lx%;LmbqJ^9MJY48@f4VT?lzzy zReY4m_&d172ov%Xb)=ccT|nwfcXl>Q3WZ|D_3e{=eJ>i;`c9pi z(PJ)#VI{0^zg}cM_qPXweu4;>6xxR-sk(42cJ!Cjc8UnQZWR_qVL7*>6P)Z0#4{}K z$~fDG7#V3zyL)*$OlJ&VPUKtOK;XzT-=UVm0Ase(QDRP7ejRBqnCMB8)bwNwlB9b} zcW`(XrL~pc5KwT(wbWsdg@q}qanb|j`hdYG)ep|_jhsVPMoC@l`EfQ=JWdgV?g$Or zEZHZImdTf=zHm@#!z{U%1lk`t&VZl@2+(G6YQi-ZCFf!$1p+VSA!MK}!XX*$e3CRb z`-DrWtV(BSNsvYtUQl4_1gJNs|w31A+opKov3pn}2L)-vm(CADX5`x%c?LBXK@D!SMmPftDI>4h( zimq*No9QcBeDb(_paM!er_v%S6VePQ0iaR`Dm9?e0xA#E3@BNc`bLYJm~;ub45G@| z+8l?=Ryy?Sf^N9BSYP2!wBCqY5tJt*Q`LCc#RcoLO%n*vy`ZCgBnT%ID_l7e&D&{s z3EKh>PLa-!7mF)swL8-ikizlScO0gtZ6V6Z(3uP4dXfw+&tdW4=!sd06cHtH-vfHo;Io1KvW0D1E06d$D~!KRH#l6vSp z%ID^9__cU}xHnA)*8!~}Z0n$-yx9ofvU4JSrVXR64e_Ldb{QGGS=ug^A2&6`NkTsAL5Y_XQxtYXFj3?J#rDZt zSS&&dWx54UHyUX#x1v;hbwFu=Nvfd?|KG6W2I{OL5T z_jZkENUVvck#Y4HoB(SBj_Ya_s_6^hWawJua8s#XKqWmr%GKvAkN^tV+jFhy$@T%y zo3ut~=zxdNN5zGHU+Rs0&N>-0%CxBrUkW^F11c{@fj-?+7cqDdw4BBbN@+UMxW937 zS5RC?ToQz|f=+4~ij5&^SfK1MVTH#bv!F~!2x&rd(m^Tz`;$S0H(57-(cIJ-pm~?x z;0}jy2_Pg=*64t?6;mwmn9gpP)Z7h5Gax|8m&Nk{jj}_m{_tm-3c~^Dgl*0!1d2QW zPGGIp=8Q8DJ{+(EB#;|g>!-vWi=2)nZ;{R^nXwa6XgS4{R6Nr^v?2j`K!WSDYviFfM=y!-DMWW(0a!4 z!#uY&nR;4!F`~%Dj7B~L>*0P}U_X-S2}&JMG&C6X;U{u@kq7&D`fxrPX%#~mqA>%^ z5k?Lo5{kKjowjnv4IPnlKv(^uqcbsc<~n%1}PVticebKE+nJ z1a-r~qS;%RCorBUezd@St$$&;m}>eJHFyTKL2)TCD%6sVRIa%7PA0z^u*=XC4DtIt zD!yX~AyK-clXQ2byjJ*4+L7MOr8tmiCbW^kIi*dAG!CoJC|I1X+2XomRFFF!{WuD< zlp1!AukrpjKr&>YN=J4xalI4pq!XNUXc7tfz_=Q5q<6M1DTDUT(&KUhty?1P&X*#P z`{Hhnv_?9IPv#n}vf0vdByngu&Et8p`VxFgo|-XSHO`SD;s*vJvKk*aHh`WH`5s)} z{BYxbZlH5yN-~s~Z-KK!8tsKr+`?cuo#wb!K3ea zHW(un8(NSEPFFQnNE31)9VIT`g1du4KF%Q$16nxh?);0IU4VduHpZX{!P+q5(4z$! zKfwME3CjZQu&X(4REY35j(Qnh+rS+Gk>&@E#b(C2Tp$LSeMaup3lYL8Q9LQ9JL566G=-GDv`RlY&NPST z1ks_HE5oD(X%O)nhXaTs62{~VWWITzT{hTa> zrxt^_Lyxf~q&=(0rV24OXu48cH(-uCzDHtNjX5c*hLln!C3gmgN)G&l7K2qqJOdFx z2ET^*qfWiCP#`UlnLd%sKjxDVHKK`0kSA*tBS(=Tv%#7DsgU)5vrp^9AUiJz4@H`; zHTw}Hu%Boa70TwsFf+aMdv_Xb7OOvUO77&Ff-x+2k!@gcffR-k{Il8f$b}U7z8b<` z%g6NobIxb8Dz<;VZ~1}PtG3u`Jx?`4 zM1}H2nFh&PW1Z~u-)6)|gHgxQ;^fg_yJ}+L7O}n}vsn~tkqg<)@B&;%rtkf^P#F%9;~no$JMBfgdQXIX-m&8Hf177>Vd_Gq6#GlMr0SYE%_O z{4D%;5t-KfXu-m1Vc;{I{%2qj-0#_@F;&a~WT^A}rpU zWvjJUsAU%Ndw^L|EU+A%c}bApBrS^@k<(sl=kz4_X4^59QYg0JaEFDOxA8{qNpIy( z@?hf|ijTsgR-d7!C8}m5R-2h_F;|JK)3ydf7c0R>Dm3Uaf*$1QO~w-Lil3yPb_r!s z(jK(1Nt9^?2b}y)(rP+mChbYa2y$4Iyb|~_|0yE5r5APGy5~&~7m2^EGik`E$fMhN zwk-|vqvQ&kqH0Txk$l(+BA{f$GEHEV?&~q9e4U2@3JP8|$v7WmkQGK<~oRZ6#FRMABKtyLuE>3`QQC8aqerp5iG=-i1 z)`s9oila0NEN+0J8X;nhj-=H1oIZWTOce1yonC=kAGM%MoqTyNc^y&?h0% zsownY$SHbUKzfWzE;d;iUE*X_dYM3j|84Kng2k`cGMw1SyabBUVJaRBCBeDSUY`V% z<+O%a0RIUx#73deCv`Fn_%7;Y0$z$ANW_6Lalkb~=$JPsU0k`}L%`;) z7=X9CC0~ked8$;rN5|e0|8wWoos83Ph11SqOhyF5k7`p4r&7->ew5{MjZ*-2knOgw z9DIU`+O8TJNO8%ycj0>$#1c2ro+*=QLD%V6VREi&mxBA}i>-rZ0vD@ttbdlSbb!f2 zk~+W#uqD1Ha}^|T*p8783eFWz%chHfW^iJ9byxhY1eLHx&h+HjLCBt^AbfN}VB#Bi8(+HHnkD-t+KpXfdd zza=3S01)w7n{+`b@tk|ODG+SRd|Qp(lM7t7pEkidh zZRQY>(L&O|rLWzXQpRdLvVNf_>LC?nCraf~H^CfVu9HZ8DtrvFY#>mu$$n(gAvIgh zfKrJ_ZfP*NBfCA7OLW7l@k(>lP*E|N^4-+V6f<_kQA`&Q)CNY67BE5XPB>l?gt?|w zatXJiqa*wOW&hpzVL403S6VHk34nKDF&~e1d4v0I(#0N8xdc_$UPy#?$tJ zqca`h7lnrzx#2LQ}xSf7$Ai84?w8-Wx|aXo$m@ zHN%KdhLO;UzloH6d~tKlFl30rSXyuZf-=LHyBPl9CVN00@iF3cga9!x9dV}Aq?D*C$XMk#lI zZRp;)*wJS=IyRv{+|c|42}eg9&;2DbBc8QbcaK=&MS=rIvHoQ5J)06pQh-mz;wrn( z99E8VvVg^$X7E^EM@N0%sig^%Gz1VcnhR5h1sN|^=~O`-rGRFJ(EOQE#ovrTI8%|L z`qNNm(`EB+NDVkibA2oElUWYzTVq8DRP7Z0Pl+@k?x+OtAL0^MbzaLS=`3F)S=sxY zUy~lr-y>Qg@Nk1prz(=7@FA;c$+q+pSB{$#;^$}}0nO@&rOq7i%?0O1rA_j4ixf^H zl1^hmBJ5&^nDSve$l!8D4h*2FiM1Ri?||y)(k*)@pB8$96cnpc0S~&ZBVicr*Pp3f zwx?S;mWlg+FL~f+Sn{7q+w5G_ZJt`-HEBm6p%`_Nb)q&Hy13C-yotF za=&|?&KTUHJ9zpzoH5U{?+N!0^q)&D<8yZS30;SJvBhT6K6D_-WI%obgH1*E9VF=B zqX1~=HX0xBz!blnPOuH31&&3fY1r(sXQLhBT7B-cISDGLkYX(e#iV&uP?6);)}-wL z5J*T&cx@ZXlf_m$TV*i90(zV0*}@uvyMz8+9~yyE9jcElISdts>Bf@r(!zflG%y3D zKf0FyIMh@(DOn2@d1Rs*p@XrKB@}(wcF4T4c$ahrY1$k&`(4?rE0_HvLZD^{CYZb+ zjtr1u)Y;U;s|%r;Q*LudgFfc0@NZGEh7z>_-W9OQ7FytThC&5p5^O{@WF6t9=RR3U zOx&#Si4I+PZv;+s8QrAJ$mYqPjEOy6RFrbroapd~=!#~%Y!>WR;@NK8G}w0*dD+DA`gIRjGWNBI4W< zeo~i>**~_kk{mq+qrr3A;Y;8n>fudGgA;a9f|QkD&$!E-6~zQU(r)R*Z66ZZb>PL0 zqT8I8^eUpIka#S~Rb*IL27Dq32nirF=@u}@o}$2swKO;_;yHrcyTPg~(0J|z&XOA{ zj^qqNkI9Wn5;8VDQB36`l^%d=c5hJh{D%V%&%14I=m;W?zzY4yW;K(@aWnl@?~d_M z4<>k`hyG>)Gzd(%hnv@Br{(lWtvlq~ry>C?nBf+d0X|5Wl|GL)hQpudq*PZM^uYrm zHoj|RUE_-|4<~w)lYJ27JaQp3JdoM;=RrrK8Os_KA@ha|GGCBnlYuLCI(ka|$;2OP|`u;a%~;PtaBU_#%3 z)XfY&;Eey(NC&!{l=6O+@z=gJq!~=~(q6Ep1!I@~4-f$^{!<}!)J2R6soB8G$g5G?wj-ht-LW(5oM4cC1 z-h+vNpdUp46kya*GjD|Du>$Z6LX?(4Bjhx#drTGaq3DovoD7>rwNok$lm_#tF%)Qx z6~NB8Bjlyn!&hD|K{7$bo!)0#ra5_x#a)-$QK=L6j`AXcN zuSHy1+8emlM6C=^I_$Aor!0JAwE5^%vnh6O6^;)K1RsD zTMsxVMUcKBR-jA5Ar6jApE4!l>MNFjmum&0)!%|EZ4us<2umP#3E3#?Q4s&*Js>FL zh1BS&Sy_0u&(I2%&6i;36|8YY(Xn2d?9>8Ra2~cNJIhBFZi!%QwqJh5R_r#urQ|4@ zb9*DfH|933;jf;-Z%>T36;(fGfR ztq4{$HuyF%``H!~4F@6X;f9S%QZ0!t81?pGl0XK>z^-=IhK*B&zsahAQ64a_W!fa} z`e-YNdu{c1%npRL7${W@H^rKOs=>01MBJFGM#?m3Is02JH6uaDC?oEnvmeRp?B zw2Q}FRj~W+Z0mzbuITVx>pPx9qJ*JuGZPz@ zb<3vCaWj~PD2h0ih7p8;pB~VUX|=l?2!AMnV52*o@S+DPK~6$E#Yt>C%(6HNj7zQt zuQg!ZNm;ZpJ-RWcG=vdHAOsDS^mWxtnj@>dhDDTznHoL01@-F$7oQ|TXVd0_xjEbj zo~8w78mJ+KwNTflm2N!Sa?L>VMoGR@vSnaphQ=ktD^A8Nq3EWSX)tte^xGHn34-c<{yZZRPpni~3h2}J-J zftx7|-4)l3a#f|#OdHh@g~8NMju~g%gO=T)v!=1%O-tjNQTM&q5Qy$WCW)KRehp|sGFbPiiR2-6m#Uw-sQ%w53lQNHw$2l9YPWLVE$KHBzzaCR$_rq$$$ z+91IKk4$5DGvqKthY?xZt1<$_X>?3!AexwVNu3FEIIYXA2`@s?dZtiNQPRa>x=)H)pFCmI*iC?*vwI)}TMK$6wFmZPgMsH(@hH3!T4@pwJwK ze~_bSL|F!eg|WhxhLxs(jFEtV=U`#z6r8(+vv6OV{Y>a7;8c4?4f&hO#vK^EUK-lE z>Oz)&bx5E*P!R4+4%C=IMX~#H*Iquiz-JhBD^N&6iUJ2O2unN1jNwj#x9OH`C$IZF z7m7^XzS&GO>ump{$~e||A?KY-m2LZB$~3X1!66H}p@n565yl09qcwa6lZC#QDGm`F>jD@%sIllYKfzge z9J~+6@FtsAs+pq|8^z zve0pl7^U~J2_y^Fce5>7!~+Iv63rJpz-S*%0CpZ2X)B_IYpW;Dt{^m6E3j2xQBJux zZ6Ebd9u_DWk9x`=OYL>^#g!iy6AJjDUCW|%A8e5+Y>~m0JHPFNTA)X8S1i-E4Ef_N zdPkNy{r#YAmp`u~5G5YgT<+0)`GfE6vRm8kB_3kgkYk+YERMj57_Z19&a|n_(d`W{ zTOqE|g)iCYsYq9GA<0#Zp_i|0;5psOvef~sggGQ^!)GEq{c^QdNy0I4az4NXnz@hA zLu_&4%jYvO1XHLrq$pG7rz7HMf;-W`&f_f0s<*0DuX_Wt0%9+71@gm6rN9D#iqm8^QQ zJT-)!80|28N)B6=GOK3&afTtc46*a21ti5d;&QTKPOM>}5N4o-BO-CnXMMJWmD9p` zUQuhU)}m~2GE2dw!i~dDIEw?wD942F53V?ZT~$VaRX`|TeKC*ZQl9Z45o}r)!eE4G z@O0gfCY{IrL^UZ&bjXnyOsf5XVpm*= zUE3C3fz`??jeK3>8u`-=yjh)FY2yuP6VXKj_^i&7PyfO!*uR^Ntov>ln?@5VAacEj znelX>g|AQ-z>;v#=l)P5u(XJyBkNjFT(3zY9(Ufm%`Dm1YG8r@jNSd~7A;hwc=W78 zA=IqHaA?F;5CSP)^)Rdj=CdUQ8OULP3owNglK$+U!Gw*;SA+T@16jjj#3lk-!;%zF{B`TU(mgHH z5eQ{~F$OKHrmDMBlr^HyEX+pB+v5=l+L+je{}1QbxNdz$gLf>-h0uZl<7@F=xwI~z=vRRkNZ{Fk zaWn?EJi9p%&YrPRrl?U#xxzI#GWjWldy5dqb=!)_H-5I0nViz<&e()?r43Um;^Vrf z(j>WD%n=zi-UcbLfXk)5 zwxtYC2AhyEGTh8mU)x8$7?G4gi*kYAhXpzPTKGRf%6d9!hdz{&8^zT{336GN0o&(tU$IZ2HG z1AIa=HnEehKUIj(MqfhqxJoT%P9OW>E*_UwS`c=+ z7pBR9FSkz`rUsSHp42Q8JvSBz_``=i*UiYIXhj7cC|bTWykA~ihWV{nW)KvU!EqQx zv=0AvRiSjVZY^h&C-&unWx|=z0_7Mt4apVAhp-#bgdqzp1rJRETy6m` z9WP~T;DBQ|VaUwox21Z=JECjF<=aD9W|WBLuZYMyWXI9?hD7Rg*U409{>fMr+1cH0 z609Udpb%?;_oa(SkT?+YG1D#E@2BUIO(Frv{I;7fGNa)z0OY9`L&`zTD$8ZqUp2BL zr|Lnl6FVx7g(G=W0FmGyJwBP0WjUXt+pF3c+{j53iFPYV8XK1WU_8*A9zpCOru&eY zHG7+aP0$9V;rxMta(&^P8nDWml!VlPxdok7-$H-?%BYrzeyQ)7VKAPP6Ikuf5RzqY~C?yHy|tV&JNaGJaTR& zg0wcGXhmfR?e-Sc!mA)w+mp2PCCHtOTZ}{$5g&km3oRv^aeP8|!xq~F=PCoAD*875 z8d}pM3!2E)2J9Usix_x?{F%_p)kfzKR+k+aEYg9>wilKqZMJ7l5j4+?Y{d~Mb@Ucw$oK2z`Y63XHz z+!8qpkyifT({eBsgt|;Jm)7l3LU_f1ryWUdlXEURp;nu}nMh3NLY)J+(ikJnE4x&2 z$VuWQvVB-Ho7+UDh`w-((R8rO8<2B^h$c9~f_7^&N)^rzX%b-@e6f|oI9F>@yrVfx zcDA{F9D^eB^z(ZwiC;&9nWAo=E`K%2=1!#?=HfLBY*?|+&@5$HBOjziX5luArk+ION__1eC~MjcAuA;c5WIq4-4ffi^kjOqmqX$3@|XUD#f+ z)K2J0I@mE6MzRd)O$b6-!xSHYPUuj(E(V0v3U>$sdG;0)ULs{=Ls$el0bK4yRLUgd zt)fPHGY`e@t4uPDqtT5ClcV*Hc{~wD1R%Mh;3t^avs`U|B{v5XHcyAqc$X3qsK+Ez z?e0~WZ;ROY2+aox4@a&mq!$#o=mjd#0fBzFle@MLX*!gl;LUq%QD>1h;WH?FxK|o0|xxp zc7ResL`ed|A^@BV0S5d9kOle^B{>wWI;wgeIx2i zTWMkjc8*R`P6`(|3rQvORaTnR7%fT2b}~h(j1r6+1P9reF({FtgsF57NRn?L6^ikr z_&P(Gls>C;;!znYg}QTqmt?+Srl>g(&|@4cF_O<(h|q=$X@c*oq;hgoXb^l2unK3; zKo~cI4UV8ixZMO}3^}Sa90>ur)H?@CPR&zvL_V6)VJg8)jnlZFeWnNr=9(eHN_JwLNuH-(~AZ|g-meddbxn)Fc&OiC=bB~UM&amkJNcEjNy%nXu-L4$T_Ky z^U^9TL^Tju4H2e>5$fi1OC_~ZQg!@^d~>=3tIVXEu^G*HtfN2Wq{r^eAghF#@qlV` zH{`p8Ow36%8S&^alv1>Ej2U;9EDi=LRbFfxXLT^xB!$#dVbV&K(+4y-XD~j4q0&UK zQ^*u7m@HHht1{-xd}dx+2rSgNkb(hzRB$!H46)rHPMT3A_o0UalQKs$PNTr1fh$Qr za}(HENqmA33=44Tiful_H)!HAPTDRL*c?pG;2QOrJRXLOV47Q)F&>=(&y+r5SKbwj z=iE9u>Y8!ZSVx{qM?Xx?uvJlCP|skw391j(Iit4DyedgL?ex@*o|%wI!!Y3Jl}<31 z1nt9XWk6Ph8i}QZAN-(^%p4yKH5pt6M*;?8&d+_g?a3n(@OmGmKxdzw@-~*+X%_A@ z=M9036jCg&29}{Xjzb_O(6~YP7zM{LAkttiRx1sZ=#)zjFPNG6Ph}}(W?oAASE8L~ z%3@SJK`^KdYBO^M&3+am8tBbvVW6>DA?adaxYA<^&KV4ZBcKoWbL3!yUI{{AFE~kT z7D{<&&SghUa!otSlv;LBcB0Ro%N*%cDqF9HDb10Cw@^kuC7lbRX(&xv$}>s*QoGYX zXOJ_`p)whv^n;krC=6RMZRVWzD39{Z*n{x}wn`e~88&6*{iG#5zdC1dJVmF<$-x`b zF(0PWLK)w4502Q3+Sx1dRm%OG<422q@-t)UVEB#U#!%Lwa3~zkRp%LmaR#TMA%!6e zrF_1Yysu0c%DFJUp?t#-(v*aoVMqs$VEhW`0_J^}p3BRt`RGu=n2rOD;S|^~clA*4 zrp#D=pAj!Ca0xCie0HNMu2KJ1hN_i)`!^;h{lB~U)cjsGe!q; z4>|;QHXXex=4qy0Re(;t87@CDI9@O%g_LxrTn17zYQ_)3Fy@b$WR{LT@7wI0_On`z z@Y0E8$On`J3z4phY87Q69#J!-LxHTw;Mq0G4X8;cT-^iwkqd_P$~}3;^C#!0GiSR& ztAVHr(oLmIIR3%Ma2KADPaq`WJ*jh9DFmLY84WyT?&Wc+eIY2Cg4qHicM&ujfl&69 z92{k1=E2O7`zSlpU8phfq)_lV6yE4SO3?anAxHsB$v?^!d0EL&%Aht1MoK|Vu_?dQ zGt2~{H_XXyg$yFXClv_;2{Wl=7<$5>N;|>BILQL4vpgI%Mu~{Y6f;3ctR!ZE20svC zY6u1e<#gj0AR?hTUu6g~DCOf{H_UYic*F5ZPB~6c8UszOXvnJ?B$(hwIKdf@l$-mi z2xmyc2u>KQDnaK;1sdh!BZ;iRsG-m-$tB*$Ak4uB!3>jX!$DE92Ev#!1Ouw!g{n26 zj=2fx0Il402q}>8lPW_nh(HqvgnaUbV7S?^35`BU`huLAay9}F*9y#8K@A5%dRz!< z7pU1Op*`nNkLlBd2b7FyxQOKnM*~WQ@CplwmO%!!XK~QdUwpErATd zkdPE6AjCRmHRXm?l9zAT7)Den1Qpnb4TG*}SGJSVmte4JP)IcK$~6H6#WzR@hQS+0 zXppN6rF8HV&QN27lOPC!+Jq2t*CSQW38$G1JTuIulyif}D6o;H=_5pn@S>qS0nLn- zNd*slHld_ICgk^ckd~w*oiVFbUCKbJZU&qy`|vT$k!NOTJPsg0fB*mxAOHb5eiiWg zxx?|0k1^6p3KO!uN3{<)owqMGZ^;J+b9 z|G}U9FZtMZno#0!KzmzIIj7-We_ zFVFjg(@CvFgXx2b;8Gv&G8Ib`<`c|)C}r0b6UOj{i zQl=PX7M$Z#7|fKTS?PJ2X>fP1q`^#7kkDr^pY1F-2v&o>${{w(O2JBCzEL@P%n3|& zo_d}Wwa$fsNRH-U1$3yGXU~~n1UhP?mqZ|?l8nKDnLVRO%x5`hnCE4ccymqV7-FWx zj}bd52?h%+wMC*jcGoCegj{g=A)dfAsr>2^Gu~M znfW4tDca>Eny+#U3M!bRsWK8(%dZTB!$9glxI)1Ps-W&!fuMrY7=C;k0g+1LA|*92 z%0bBRNMIF0u!Idv| z5wL`UL*7uv!Hq$Oa%l4vq813^hwv=W2p5n(hy*!uppl`9Wtw4Wo~sO*qlocR3So5S@5Wj(lWJ34H}T4SON=J(1a%BKE$JygUzX+LoftNtodm$ z6oNH^z?2_ch=-9>Eab<@VH^cI8z&w72tz*nfah3e$q%c0)N1Gsq|-!Gdr?a-s?z#+=8J>@o>qkTf}rkDRGXBp&ywF<3%?g$x6t zF-Q;EZ~;9uMW|rj5gbNM3TRUb>sV@-4@V7=4?~f0)FRhlf~$c?BVf(VKkl5OFGFyv z2NCqJT$nM0v9j@F#d}B>vpcOKSh9>OuPh%7O&Eka!N`C#gz_`%Xt|=~s*k3Q`Y>cc zCc!|^fCLuQU>9Zv(~mO-6BK%oQ8R|x4VCGS;J|`LWST}uU~>Z+2Qtb)!*g1cG?kvp z#9<(VieO`;Q1-kHIadnKgY+>2?}AbC>{uvV4kQzh4VF|VEjrO0LD{4*q$ca74xAd5 zSO%eJ$gs>9o2a}Lfe@9d;0sEm6d@=Fflvsol%cMLxQ6DzP%Q!trDwQ0;m)HO!MH#+ zq%nrf8sP*QXAK;>iBXmlLI@E;{8?~8(K%x0XnOcuJQp}{W#vHZ!V%$ubX^dP$+8?7 zWhkTsB^XK&kTQx;w2Y!fOO}FJH{9nsJWiRfOf_aFdeTXZIR;q{vYR<8b06;c*+fMj zG8@?qwm>kcFmvO_lOZdIM=viaO=g=fu{>r;Nyf_pCtYrB?G@ z3PUFc)k&Sy|25C~oV0w$Od-qnFQ5`3nw41|)ybK&)jEk491F=yjtYh48?74^hQK^j z;&4NnFUS;x;DWqzQs$8ReB|fxEE9)a*=fE@VHn3WO`X=2Fz&lDQ}L*00^yo7^I|XG z1uIkDm@a6AnRN;WqL<+D>`s;Xfta}+At><(_))K9~Wx7>=t=?ZI`!};*rS+Pl-*h#- zG_d)->D3*1X>J=#RU`9t6^-m+szp9Y%e8hnXOXlQ=el(YjF%!Y4LhrAzZYlH%5hH!nL@*8xv>dz3ZM8 z?a+*6sSu{>%}O<;p580GbEtNc_NGybQ0w|aEmYjs{mQ4ccI6#4?bx-*l+2W>NRiZM z&8xMs7G`aZW`Gd3Hu`G55YK+PXXIWnGp~um2RZh0n>oGV|t5+Kx?@ zJ+X2{Dc%vQ2(g3}OZCt7kM?z}y*pMOdCSN|SgEJ|?fvQgwP|Bz4wem~4dMq&1U6vp zWmR6QvYfKAt%RssPwjDxa;t0+)0tJMSxZT_X3Ce*p+MWb!>{w0xiwd`Ifr6U2-~`jf@swag}BWz`exqr+-!2G zj!TI1>dt*1FX7y^_6!xxYj`9(TJ_L{$e-(8+g&&0Egycz$9rDp7cJtLj#p6&y(Z6T z4R2>Jqr=nPXl!k63h6E*r8WO~yVWKV-mg1q_ZRU+MoODA@?z;tT+Rul`pwW9*-fc# zw>_<nBh^=vN@VT5?zZ~8O5C}5)zH2$y^~3T z_L`$qw2A2|L|56W4KclCr?2ry2-{X338M8NE#9G^e0s0Sg4UmJook(Cab;R8ybw0} z9n33S)7}x42>6|axGmx>&}-eyUvFXBnVT_R`YdqiKQH+f0lK7S(^@R^)a{ww)Ry(K z5Lr7@H)T<4mtrMZkWUt7arG+J=jv>6TP<6-R&y-8H?*o_QCs898H<41qgw@$m8uHe zs|wniv6g0vTa!`>-O&`Z9u>y5N-2*F)QBdtH8pU}}D0E$@c!KuwM(;-d zPuz<>;Wa`!;o8Cr@6cQb@$^1T&~2t6T?jQ%OK3vZ$OP?_VU{p0;YBSWM19jrRIOj4 zX#Wzp`VzW%C2A*2+#;0-?4ELwyrdSY7GBa@vk+5@w-CSars_`K3Cg{B=YB#6A#Q7W zpN_2=)zV!3+TRpSE35K!N4=)fxi!<&nw4T2?v+(!?U+>nSwv66aC~qRpWtUka@ixMfdr3L!K%KPmF_rJHnaEk1=; z?G&$lPL3kt+UB>uA$`eWuB0HKAfPFokB=v2$&!XSCoL?rbdobal$T@-f-~ixWh;0y zNh%0P5Rf}FFP%lnt%*UBUk*9g7=l1LZfQAg{kUHtsbLzXQB;cFPMb<&fI-dq={%R9 z{-kQ@>E|_l671g4aoHsh2s%cb5ja2oVsi#!a68Tr#6&cO`p^PHSh*TlTB*=J;e%((A2gRxNAVO5Urgs+m{ho~f1QN9)XMv?r34 z31_~RaCFp4SEEkDq|4GWO8OU5LjC)Cb+t<}o4HIWICg-;R!?MZmIV#vsB zujff3*Lt^ux_L_?*KKPI&mUSPZ@H2vDG7^`Y((o*k_c?Tj@OKaYKv&7W=M#+y{uLB zK2vRJsfnmgRotXhi=*BNfofr!nWsJ#r!7MDomvPo^x>uM*`@QjKDt_3%#`=^w5Khw zcDi)ZxpR~D5>3|R-ia0>)zEtMp}pn9*O6(3-i~jG)_$$&X zc=mtm!WH*^?rb)?^Ucnp&H~R~L|r`EF*;A#Zl6S}I%g5syt@cM`c2ZF+1(EBRoh(z zR?S+cXmoe7qwl?nr;%Q1?WU%;XH%OoH6vZCvZX1_JlbxJwRg2PQX@h$`qbh_$3{&E zXOWJkWv0E*S&N2gq?fjNBU`OYbFF`AXs=~1S)z$tRYcP~H2dZck8EjtXg?}1&7ld? z>IF7n$==KSZtD9|v^HrW&`UIQG~^Xzi5K_&m1Dwav+&Pf=-2sQtd_3s z^`HAIZH3x@k+kUlWH0Gb)?dFJ{U7>oei7J!4XhC56|Sv&*|hd_m8)~*agG=B%0Ax8 zMOJ>5tIB43UPLOJ%41_Ibmx%lvKPk}t5=bOwV6oP%r$z+?)y42(bgUL=4AXC*=gh~ z@)*?=S<_RdRU%vyk$ps(CVLZ2u3PPm_BPpyxk>Y-O8YMD+a;dzGiP7j{%xVSnr4@p zEo`!nw#)YExYlNDA%xJq)A5WN?x3y6HOp~bj_+jSZw}7jrS8pk@Q}zyi#BvI|(2F--V>zwzY;{YvnU4JmPk*bU^^-1Al&79jL91=q8DCXhO)gAe| zRJKjpnYklcEpPruN1s;Y=KM&Av5)25T9vLUo{o^{$iAzsn|sZXuA!bV^@KHgs?^h^ zZ%)`E&WY=4>4oi?{(Gpe)$4^2l%rF5x?1|3^wWRa>Thv75$+*T)u-;QRG*BVrjyXp zIeCw2bT#^--sViW&WO%PsG5-w>bgbW$fZO=2$7_VsZh4A?$nxTqeCj>bRHDKpJt8Win-Y<& z9uXN%dF{0>*${nhhG<`2)6m=saj2&IaJ)%pA?9e4jKrvS(ny@`tX-}8B&znWM78TK zEvgfqoL5zopxPo6mPvR`BELrJd$rUeYsu7VwXpA*cb-1ma^BOqcT0KlilNmxlQ^yO zCZe%5s8uz=8GTlPL}WE8p%H^+v=AMR|(BZ*rMA1*+=gKk(aoG#sOW_lMWgg z0RR91024q*$0ag>NE#(kj^i*08DtPK02lxOATl5zQ>3&27y=z}79q&iHqS616kz_d|2(rL zMRuSg9>7_L(mLW%-KIrUYu$8Z2=!HVi0~D1p?JU{TqFIntABmL760xC?@76um3C^N zjykiV3#}A zg?A!A!V8GMw85j-Yk$a|Ce-)nR}3dlPvS841S^Vd-GKY@b=rWe2iI)94qulEPjtw( z7@fAf=rpwJmMQ)`Gu$!Am(q?rJnG(}HIlGmYT5X}`xdl$ZuSVDOIb(8x(`d_>;7e> z?2$D};B+NSf4DcfqP4-vCYUYrtzfhl-WLb!N^$_*_waVZ@ZSZ2LNVrpL%w*s=)+V3 zW4`x(uqQqu7BJ8=w3E~4U713gKNzxr#PS7%=C$&kgg!p)a=;V--ZF##)p&l%h(}lH zIxvYzN<%J8Wsre{XOHJvn;Yjm?x_+;EBF=MU?5*gs77Z!m?&u+j^PcBkB8$jvxY_W zQ?C@aD2Ta{%mRc&f*!y?>UlzK_1M1K`z(7%(?%r5@3(oLRsSe3MGv^oM9_u4@l@?M za*OWBie5QZxqeiGMV;nm8tH3(v9XJdRqh5nfu?5xj&p3zmz1aQ*u&@Unjd=OZJ9$I zHjILdULv?L%%r1A)2qXE7<|K^4wy9qZ@YFET1*qAdJa{{6BWwhJt#ch-QbopwVqfy z5>N0C{zi(UPpj^6QGq$QGOL$caw&7mj*1z*m?64UpfFe#tfpoTRdlcv1Q~S0hBf}^ z=Hz9CKu!ZVVI-zWB!5tdz=B7C3Dj zqpTzvhtW8iT|FSsB$r9Y!iwDG!N>Adc53DjqqAu@jX> zD)m$Y&$)`1=P6+g*3x^9gPGlBq~3cSxIINywIsV5nGPUf#eS0#P4e|+`hu+(ZHH-D zUxWt?Gn(~rrBp&gd_ERm}RV~lAmVHvo?UA3rnJuY6jGsX(T-yVkOkxFiU&A5^7IeDh zV5$kHsUJO*_2x5&eXL>faKWm}Cx1-44#eY!3sd(t<;M=P z2v?5%#ox|X9v!o-{ES(QzLkJVtZ}r7qCbfmf;m9c3O2~M7`o874{k0cXC~L>dU(%MC znjOp%dPhNIv##5aTj$!OJd-D0L6ut)&5@FliEzNlBwye2E*$t-+w?LiuIm0 zhQC;b8586SB930^*F$Yy84Htae7Ec_v8T?@5BnFSDJGuaIFl8H+!rwV`4?=UsM&U} zfzRIy0gs8Dq5INhfY4GPd?WSWVE?Zwj*C&(bWjm-A>6(LLgkBBXSD(&kt&C)PDt-Y zl5*?kd6}Vp=<|3VjbcAQ3Lv(8`|tq%(zYSP)SLmd%UNu=^Ibg9Vwlzf-?2SxZ-w|T z3smu7#zUekTp^Cfyrpna&kOK98{s>w8V6GRSWe=T{h|B;#m%#>9@I&&`OBTc3m~0k zaG+mp+3IvYe4m2Rk$#RovB!4JICxQY%cJr`^gl7!n~}$4n3P$_#aE4Akp|%m&LEf2 zl;x|(!7RPwcADTYD|JXkmOTGXSn>r1VYv^s7W9rv9B4Q=YWYc771cm`x}o*=Qg zt>`BT(+`&_R7+@o#0Y>QYuU1d{*fJl{?Blbz`3F6noK6c^`HC##uxq$^am{Av?T$B zuO{FePl3_i9QCB)DKUmhM1z>Oc0Go8x1n7Whhd>>2dK@}uP5`_H+Vb5NMZP%Q@vJi z-GSsL)ix#a#zo9*If>;uLnj}4>XI}|+V1})b6Z3_lg7impP~9WznB6wrhKwQe>asd~I7{@&L`d>GNT$pl8=s*wWp8b5DVdg$ zLM{N77gu@=y{OPeoQ$^Hl8g6Ks*0(+HVSw?J2#oP12DHe_LS*f{=|#a@j}P}Rr^n# zbS#ln7fs_Frd5XpM}U-dceefyn18r%-{QJ*WAT^Lclt*B=WlSjivEraxEFC4_x%P>ONc%^}ktYd&iWc5)xfyB&9H1LYi z@qu0MG@}po1T2aopXBQ|TNkRd}_m>R^7cnu(6P@jZ3a_c(dk=L%w&0wYiQzLph+B^3_)|)M5 zX_;V|whZJLdViJBl6Jsr!JwrVSF6_fFk$NA0z1XG1}2FT*cyobuu0^4oe{rZD8nTM zQXhqUK1A8)a)2U(a`<-ol<&K;BNpCF6`pgox(uP}8CqL)S4*bXK7j!TEl_0wXqMXX z`@bdcE!#4f{ThnqlcZ$v744@p&9^j{POt}XVg;nb{~R2s{KZa(4Iegeo24s=)f+Kr zyX!^kA}?|ygspfHg|Bi4WWRvtR|PfuQuE`*nI;?q<4Hwt3ZNJ=TNrwR#!$(AXOdA# zs$sjYErnr=1%AaL^{8(7w~=RQKFLItO?8BFxWocfIGNkKE1=wb#f{WAqE{#(=8|nL z@dvlrtKBf`=k^%?kF-a!RyI+{upI}{MpR|0)(VR1yypTv*DqnZ9ufZ_u6i*oe;i{% zG`P)z45Vzs#RF|9i@i+Z6kHVhAUy(`KyBuz^RAllYsIFtnA@m2<}<&C&7jImfsQVQm4i;SU+~02sc9732E_60M4=X@Pd2fuLRgL=%tlS^ zLe-5dVBF#7BU{UQ&e-&E!+Lp=Vy@l(lmLhcAoYPiF6!^`ZmcMlp5~Cf#QlD7I{3sy z^#6u=r|(N<)M-WFXUYPi4M&DSUtuL@yvFpAzJ*Ko)I`a3en5%JE^5WF>ZN@4yX+kR z1}M&VNi~)mE+?&nUMTK(h;ag0JdT0NzymbkTz;!vN^Dsv-`tXZa#L)`nc=J@y0q6xPFOZ z@Z6C1suOWp4FWyelaLLDkZN=C>pIO>wCh}nF}!4Y9@wZ`*T zl9{`th2OS{VOhS-c!%|Xjk`qO23x!Jg_mHWta?ynCn#>8y%0F-d9_?y$dFmsR2Eq5 zgwM-ECtt^rR_-m3$}nXoA$sm|ZIt&ORr=nU?$O{sZ%D0hXU+<=a17jt+r!`k$eWnN zXqDbgL<^bzY9x}9pZ*hWs?&TbQyffcuRWMr|BO=YLZxHopN`VulqxDVB{C(iS8q(b zmB3-A0o7Y(j6No^B>^=Bz1A&zvGt*YQzOgVpFH0+RmuF#CrjO6@?5|Hj6-Dfe`67u)Ij)3Ayt9%vLz@nllwv9g!za30J;HmFmoyI>N(sm*24BL?r-BFTy zG)&H;f-NUh#0W48rzdzxV^{QH&`sH}m72!u1TGo_YMPkv3xr898x6ffXXP5G{MzW; z#cDLUaC$zHZ4e+7l;MGb5$Z6F%2?RY7(g(dZI!#Fg009F*!(>PQIPsoggtaBm%RzL z5o}OhnuQ?tNss7|mcLtwYVYv}rRc*LqyqJD%Vo1g%y{rPHD**sr`&DA;`$dGl5osm z@hj|^rJz?b#YkX^qG9zCR9L8s#D}C2Y~Q)6QFOz}IRxc27oR=PU2D3KVH|^ZG%IWs zj4Pe4r1MEf!wIfZr^~<4CN>4Wx!${cY_(T?ak$Ypt{U_*26E6T4%L+CQHn#gcLr6x z#@Y^_E8T@;LKN|}QYzAI36RC6UEpAw>x;ycF)=XkH(~byeeg>N9B$QIk?G>*oezJq z#5{1k)vpJzWm)%pra{hAJCZmcaj_<>dsA#j7sP~`%0#L3#5fD)TjNNgEAKb6SR zoc#lT(@viSQb#cTI~U8tG;KST{?k175}0I#n6`hr)}8i#U|vz^frFHU28Ig|ERSzr z@UIRI#E=j0Bx#i_sEV52kn>hP#geY4g^Ah@^E34S)9HX!AP|9bkWRNiGK(}g$Cj9W zpK^6mNWxBt1IT<+54_P41BcK$`|}Ee3NtlfT=amELCFaLtS1#V2N}eA(L*ZV&#{tP z8;Ee6;{>{P31|Wm+-zl@pg(K*z!n#Hs57t z4LAom5LnKW!*1(3Tl06?4#CC_Q!IU+JTM?wk49rpJv59u7Y>6mgVP77iQnTN<5N6& z-^2xx#3T}EAR6nL3^)r>P*$i%yA1I4f<)O8rwV6b9XJKUl#h$`Jc$o9`SyM2zJ|1( zjKFNsZQ?s89Cz9Pp@|$`m_Eb4-U^n|sW_Cw>)V?EE-kg|W#6(r*iC=%jWh-;znIWIWii4+Y~pT8s&d1DRX-Y&P7WbV(L z^Lg>PG*!p9u<_J+M1K%`M_F9?h-QbMM~2ivMO=tUrYZC@>!4!;TzhYn<~`MKv$({J z&z8*G#rt=M&J?5dj9VXcMt#SPoE_-+n}SM}GAx{?t&}GYY9O7(>A&L*7~51Z^|B2u zY8Bq=!?ZaTahK0rO8-R-x zdtwT$okTpS?2q2dK}0 zYzJmKqMI(wdstYC&J3}9^lL|)udTgL&>efJ zvAxbgc(|ja^4`pl&Arigse?uRjQ(D73Hl@ea`XzGt`~hRHcL1wSA?m`faZSFZ3czK zeqAFVIhn`%tyD(uSpfs|_+b0)sBa`xuhp6;s}UT2cu)P%SyR07Ivj=o+eVqxiH51e zB@5X#wKIehLd@gi59)V65V1EQgOBvzJ1f$l(XPZ|_qbAbt4uN6K5LJ!Dse_>k9n3knX*;3I8goo86b|HJaT8W2Gj8?|%{9qD|z&?oa!&v}8ipqHivW zw90>BI0nf%61bz%)#bH`6115$|FohcW#P<`;zMC>vws z9zqCAmn6iDkG~+o zZ~^R>dPTVdB;IOoM37tSRxKS(yXZauPP4RN>AKOx2D7tw;+1_7e@=|SZ zpHmW7mH-_FdWiJ)o$kNT42ELX_mU?|e`?V2Wg5Q)UiTm5hQF6j8n}w*#rgKiZrxvJ0k@z9HaLY3~m8quLI5*=>|Jcr$zE);9#l`d@E{K)K;#2UElI(nB=A zhR@wLeKu!g{Nt!+%T-MmW8>g6wqqp?PMg%D!f}RZu`eIu_sjWkTh>1(M2|YU*7UoT z3Z9A0;4dds@mDCqzq_(W$ES?VVDYy zul|zl=^3wG_c$uk-n3HTH-2Oawbi0)URL*wse8Q%-rwCS^T*B1MtpO798=pCl&<){ zJ1t_NIJ4Jl#8VD|6P7*LU^NE^X0>fB5tSOxCawL5n!Z*UW1T&!bB4x)A#+caT&vVk zjSEOhh5|0759+CzH|(+bEQufLdF7{~=BuBnXiAAYM9(c5zCH$bcr&O5-VMR`9Dj&~ z=C=j^-3TB2{gC)wS*)@3cydjudv;F^PnNLzCl_S)XXg@htn_||D+<}#c|dT-k7TLf zW{FqqS0xus*GBQlySi*c2Y=y*Va+N{g1#~M_Ik)xk^VVK9J>26$5GH%cEpEOsc@~b zX?04j*p)(5cA=mp8|4~`%2v6{MuKa|GP;@(Wwhgm%y7gaSMTs`rkXkcrdxo|0 zie-Q1{J(=iWneO3qy&r$YKTBYO|l!cP+qe(2Bqm4Q|9yfeF5$Wnb+rNym4Z9YJgAL z2Y3jVrnZdjuXXRw-H^9?PIy!S)>n8DiM+hi~PiJ<9}MI)=+k4!mgEYfo>K13BF;xZTE(#HhD$%b$0O(hLJNNknw zz+!IW?FW#}LI?X^I6+>7+s7f4VJxvyqIm(=4)GL$ z_LoshW6vL+{CpQP_tK1fzXL#3#Lo7lcshI|VH&|4sF5;dM0QsjE6?=cq75JoWOi~);TU&e_g-OK)s zHGpR$n=Ys?bG<88GFngixb&_&`PH?Me&$wR;$OpVf(6h&?k6(!1zS_0lt4ENnBYph z#vqN2hF63Q$|~qiAm-W_MKdyFAvhQ6Evad4b){E7y9*G`tJQaL zGxhh|^JdmJXNTj)QP$TY7F{YE%5pzMujc%iv{`m^uZrai-@ zgAS=%iN=$-P*fxj{D6;t&(>AHz{W@8Y@d7%e$+5(-wRQHcpOcbb|7INNiwmRWjk{` zUCRP`&l^oF{%Ag#^WbLHp{W@hyX#ZBkw)2WLm?_93?c~^oRIPBnFCF<>Af}Kl#m?u zH{vazuWBc2h-dk#f0hp$15!KO8i0KEM{+|aB>k;m+$Ha~ymNd(Inh z~(F*ZO)E{~`uNGQCu1Vcfs9a?yOR%|0pPwLNX~ z^ehiCAsGCd_}at#4_fN6JcK0|0~j&d%%0Zfz=I=jv#5kBH7|1syIF}(e_b;`5}3XJ zZ?w&fQOkQb*=Kw>J?#DYZWiGCx70uE^icclabn;jpAzIP^_1D*)wrHn8-VIH>$d=T zh39+o96Xg_jcaSTmFZ+{kceF`Kkl`1C4GoBi%|LzT)uJ{|LJ)uBEnkqP!m@t8dLDE zwCh+hXY@D!ho}dq+l2ojtRjy40rz>eMSR)D8(U>tevx?_LuYI3J(EjQQS($h@i*=tu&ug&c<`%b%_jDJVv$eP91Vg((^8}W0jJ6IuM%QylcT0*G z@@Kw(O(K*WwZ3PqL$Ja_w=2jx&Qrv}(3c(<{y)5|*s1lL{+}7*>UsoK6`?!6`=rrF?8P!pYBM4m6sgP-GkJm>?p4E2rMYMcDRv`r((`~N?onPk+rJiJHwK`9fu?}>Nw_Qmsb-b>dXnr+?ssxv7*?3TgDm@oqKI+*M<|-92VgTX z3P2LI_)X{*+<}%`LEt>X|8>Tj*(5D$?rwx6dua=U#=&6bkGFO)*SDtz$%`BJkA5SY zgkDXdzrb=tpJ0^5Pm5fZ80gn1utJ1?n{2v$;(>PZF`W0rXdCbM_<$_j$RJe zU>8mR;6r?tqy>f@sJ2m{3+MM&*R)ayO?yC;L{0J1Mc7qOjByclM zSAx?9maEgIFvpQP)Z0pZ8-94i{ZQjSlUKRvg22NMY5IxU;$)1pIk>YEpOO$Vf%MjX zhV{Jyb*34v-!;BdM_f^@m}gy9Sf1co?w_Q8Z5$Bm&lS^E4`(AZyFmS}vH{OW?rava znH-M})&{|CMapO?5e>43={;%1U3;Bzq^C8HJxaMznadZsydE#_z71rF4Ys4s*PmCo zMZV&6qR;EZJhb1|{^jX2m>s0E9gqBbcmCgmx@gixRkEN|wUc$$ySGWfTCRC*3HS=U z~}~Czrm0_$tx}dpNxEs^|xuEbChA^qUnH zeHs0FQ(^jY?rcA}bTdnDEd!MltvGzbAJlz&-e+|m%v0uX{vQBUYAZ;-N=i&I#7WI= zoMCLsQyOJ0l1ybzg=RP(>ZLKP`A7+m5~s%M>tVeY-Ke-&_3>{g&p!EFt1Zm}il$hXoWLJP3^c|m%O(6+;Kqr?n%sW{nqG-I<~(6|g# z%C3%eVde##fupVAP=HbWZuDtF##lA3M|MyP`g+5n9e>3Xyu6`R@k9XIdGoxOjBL|< zE`PVMTUcPLfzjV z)!C3N4-{%m%cWjk3&cXd&km~?O^E9uT)Pa`tN632FM(On^OIoXgDBp$s) zb8V+Pwmnl4Wk-k#L-Iaw8su_9p5i8MMb2RdnfZ%<8W+9b>9Ez4X)**aDVK8)z5p5tb0C--1Fmf%T z4U9?rHkBDjDoKtAHfl+Z*0tg-@pd%O`Eo-M{0+K zn{9?0L5iX5Q5VBok0Ko)+e#NAM@F$#~ zhZo86=^2Fx zsxWeNs?||3>Nl z&*7z4Hn}8_DQWV1wNMM?^6=Iy(%shBfIp*ym;LD zi_*Qx1Z_Kwi9COR?5^A=GxW3R6Bi@$YrK=RG3Z%EY~}z?~=xjxSZNw}>CW zKXFEGA*hPZo7hNAH4x*!$A7xy!@qHC0J+`+gfXgs;JUFv5REJQTtTG!T(JnXAG~|O zN>3kTl#4M7*K=<@&Tloe(bT{D1CcLi1ml0fW@%>;0>8v(4+Te_==&`rHpl*|e+eEG zQZ7#*oDm&Y$eP(hjVIi6+NXfH^Z+G6f?^|6>?QXQG>s5n3yRP9XnAw+j>qy-)D(Qz z7-XDtFEWzMgk@VElO@VhqnL{U7HD7t0|xtLcEzdF0?xfEl%*P~Q07R-Zy{xXFJlKp z#|qD8IQzk^xvBzM5kqSCdd_!Em*{8h zD~ps=`g^IE)A3BF|ELj4`Z#&Vj|SyB2md0(3T`<1me4kZAy^vrWUJm5QnAm0kZnMZ zLd>ky9_+s&x(oh4d6m`!7kq8R+uuGI(==$#sf)Q*U!UNC9s%M~YI1SzwNA!9hqWG} zlR0r0VTGQ^=3lft z9g`A`e=>dB18}HU_DUo)se&PkR>++GaUcz;|jO-R7y2Cn8~FrGmeG#{+Zzhkm|M zt!l}(7gVF}iJ6gpDkJn;7}9PjcwH3;YO0Sd?of}mQaZ%)Vqd>mETxPBMe!9NrZ-Iw zC<^+TNO39->mMtki0Qx-)A6IA8$ksJ??l(oKkewamJ-GK)!Xh3 zkk`|cUV&uIrs^6|qex+|x!{8)c&}1gp!tsiHJgKgdCOaRsEz6i4g^4K*S zZDxGEQ4r5Tazw)CbugcYPS-*_fwjqTP`hCYF<*V6DhC@@{K1xO zR#lv!z$o@>D4Q^)RQVp|gvPCB#JqVD9|#AP>qY!(6_f-efKC`wj_@I%_?Whmu0DA# zDF&CA&wS!kSDhU-aLoBeM?wqIs>1;|2AtAP&Ig-4zhwry62v_*B1|9wX(ANjQ!#Yg zpNfLfGge8xhLysXE%^5*SNX-bE;32n67JEp_OkY;=_WGSrTm2H8f&)q%&&Q)_e2^N z34e?LN}VDyiM(4ec30+SNb;sLSQ>*<$Jvpfi~pKuCHg%Xo82oR{u&6hlIGJW8{`#a zN5+B2V@)7oRr`N5&AQ!22KW?roA$Ciu886kT4p^81kZ(D`YQhl6SiD7YJ&5)oe@YU@ z;<+8mUt~5sJ3*t=pWp*0qUrzH7o5F(Cu!JC0|t%|nvFiV%G#9W2M?OLF1f|oOK%f$ z`)EIl$4T;}6|HFN=vxYTjWgPQsKirHwtEQ^pmVV)vP6>`KXZJ%cDlt%H-pepCikjo z&OeBcRha)(<|oMgK5Em%{t20MUd_V>IK;tv84R z`($)U%79WNS_Kc$-xTo~+IMHzFjbW<3NB_Fv_&b_XX4h5!FMr%FaQM@E*+2HlV?vA z@?>9F67%3S7;k2F%`6I}5}EgbnFQ^X&AW$*qTR#%6V7|Kh$iDAbaN=QhMmSzL}Jc} zra==%;UUHpVLpe9A9Ii!1|r!*;_g($p>q)i+b9!iLExi=mIaS{;thzmRId^RDm%TC znOEov5WR)c2Y3(>m*HRwNR|p!{@OwNy%+lkWI_Up2f->a&}NP{2h7#MO(tjt)?QIB zHIWw1c_JU{T_d?uaQOgBl9G~=WW2n69(%aL3$E~>2P*;}0z3oHy1m_*`=XyS_J!z- z&QQ-;qNlz(TfX_t@7-*%(UWzqZpPT$EK!?V`e#H=QtV#X+t&>BT6G(L zHQQ%)&8}I$uCwYlGxlX(MaEBdywznbD=RNlW~__MYaJi6!(EgUeK{3<-Ftg^(RbOV z^DghQ`^%2A?{m)=Gj}aq@}2pWbzM7oo73?ZpLsRo;rcz)!^2$tx2$L8;{}tIu`cVf z=JG$|;_|ZZ@>=#&)_-NTdf{KkMU~mI;zHfkg}X5Mlh^-ROx|RMh|lcqQbc zOK%yOkr~tDKSxI1L{@G+WaM7R+RUo{+q{@BnX;~YT(_LdOro!h!b3N5eJxx7y&IKFw zGVk>F_L)6h{d_SiGT!s8VyfzrOE0JB>x{8pJ!ZXKy1v`_k}untYUeL{yl$!f>Tl}r z!tyuscb(tb#lLJ>uIQ+G+jpmYi}hIF?IKZSy4?Am_ogbIDpO@nOjI8OJi7YjjXBwR7?6;n|+N+L^uCA`G>FJw(>F&<*mM{PP=H}<; zZb$E4-`cM2+TY%N+1k3Px_obc_dT8>UMJr(KeMwc_Ih^aTRvXPt?bIKyvnQ0%A3r| zmyCS45}A+x_>XV-n2+xmkMS6f6Mylo@-OeY+w)cKUA$giW?tk)UgS+)$MJVt^WUf ze*gCW?(J{w|406P_ROFC|K20(*Jt_6*XJ{I&fk2t_o?Zc-sr8!-ZQ<=6W86juJh{t z&gR7bU**+&jGfKP?9R)(yV;%m%KW>XyS#93`Esr9z2fWYd#X`nZ{ynA*4eF|ICVBGXnxMnqjqws0xKTVM3_bnL@B_S~+2y{M;m$188) z@^v2hsT+o%a|!1GSVq zSm3Y#A+Z1>(P)w+(w@d6!Q*itqR@bBfJ6ow4F+NX(NGvDNDwVjA_YT328qOCgGACI zB#{LpH5!D$fIu3Gfk*;r!xJe}n@}*0#X}-RQY4AMaA*|g;UR)VM2L{UGzXK0i7*#U zavIJV0f1-{1t4k?GjRgo&NAt~wlN7c?L3a-%~8HYC;!o*t&rLz03A3ia6tiimPkVs zX;DP7z(NBC5FR)rG*BRM00f5t3j`3Mz#*Xl1BeC=EF=|QA+-$>6*5{AphN{pf;5x} z^q}#O7&MCv69WyQrqCT$hgRuXby)m!9jt;G0`$2N`^$4&>(R* ziUWhg(I`ZY2}h(^L|O!121kM+X$qiVR!AHf5Qigb84`(R5MFCq9{cvN>PeZl%gC@QHoNOq7)@TDC!}KvcE4d6r~;% zr5+S@;Gx(0YrglhGTxml^Y(rId$X(SGymT|mzE&o1d-XG~J2L)-zJ4LMVlV84 z&I=bWT;|qw{@#VVs;a8GyRWJX7q)vYJ?-UGPF$F{@Dq0}RPVJAHxHAV)TAb*Oehn| zgaTzknV8f?lO{E(NhO#ZrA3O$;7D4eDCc;Nr(7=LQe64|ESa=FD>w52K1Xkv4`3YF zCn9O^)~&UYwXu(neK94pE>GB0T^e;VuH*yAho0QG%O5T@L>itiS%5`Ric-{sQk0?= zr6x5gHK|EWN+nQAO=?n;Qj?mLnv|N9Qc9@=N~uXrYEqM$)TES}lv2xQ^rRL=EsC1d z^B@AjMN!M5C`B#Hx+N#IENWREOp{KIMOkjyWmy!pEQ(qbC z6Srv=(=-pXd(A=%IS{nYvrof3T?{(78n@5c#){ChwxV;t&NwtPtC zNkHuYks*P|6=;VE85n2)9XNE@3=2#G7aT~^1qcNsMFJ5UA}Hu^VJ|2^foZNacaQU| zlh2LNZG1F3FT$WrtiEVDud8D)c7(@dqd7U-%xArtYnyd_@LF4(S~_kVbIVJ5+Iys z0R@CpNP^Jd8DIf}1rnHG5FjZe3kqCxNT4ymiiHf&l+Ycge>JNe?#J2PL%hfNwGBVt z(k~%rTVEAg5ahId9O(U)uRZh5YR{&rGS107%R$|59OoK`k(-;vYc<@qeOo@waRXh1 zr!4>(009RCNJK~=MT;sV0?jLE1TYW_24v7HfPf1X2S}}=!T=N(*g^-bkTlF@GxQJi zJPEaHs2%dHv&Wq6X+92?zqkXc%2o*g0000$0ss?0iz$+jC>#l57=~dO1VIpkh%tr` zV}LQn7=?^V0thGJ5Q=)N1=s<8>JW7Z-Wgt#%^_ir&rKvvMc-gXj*tX1|4(Lx^#=3f z?D5JKx6&SxYZabSdvNEKTL@8p#mxV<1ue!;>QgjG9Irm zEmBg@g&Ry{c8{0i^V{S$n7gkH4B`l4)2+6O+d}Va1DGDZM_f5pVxE=b2-6|V9~WRi znX_Wbt~L?I!+Xd^lrq9cWbFV1wi^TIXO4wP_y|_($vMPFi1Mb7PLyL8|EXYHD{_qw zxe(FK@uBfEJ%fA%Gf6GMxXd9UOg;i&U6UvuC?ht+@*!KIkX=6b97@t{O|o(KZv$|e z54=K!3*SBiCG@>bbM2`m%X4C7Mp zgFxH)V@SYiMIe40muU)&ANr20Q*}oWJ=rWZ-WEhAT6oKK2BMbW>dEoHXHd%(yN7yeogU3^D`$0}v z=<%>D5MTtZF1fyNT-$H^f)7P)D}KR&pqL#FKMM@ru(@9#`A|v@Nep!VUMC@TgGZbH zHMsKq$Zj#&n@OFTN-10-55ZBenM&|tGrmKhx z11sgvE*jzfJP%19B?2{{J1fz=mFd%31(A^8Qt~V&-8-u66+Q&p8#`!kT8?DDtDH4 ziynTC$eio17W$T+X`cRq)GfCGs{JTs@ z=*1aY8l|tn5EGsRY2qhZ!QDc1^RvW8w@r$qTb0Irm(*jbi_+wu41uNC=*(M&huG06 zB2qh5j4eb{7lWcef(I14warKBi?BO3tWWDI6)bs-aJz&nje_w;U)5p6FSOyD>6sG&E#bFn)IG@-qvMoZ46Hdp(Uz$(QYTw{4 z@X)dK*R+@LxwRv4#dZWuB!zcup@`nm=`~Ev%^g6b=#CW1T!4ob8D1B{=sMuRXB?V` z#jc>NWy!vx=dOmpd^W_dH+%HCXpf^ENdX_Z*w#)|9KDwceQ)?M^CCWuK9Yhya#4~f z>0^8^Rp0WFM2h)HL70VoOd&%=&^|g7`C`!2{D>a`mB`_mF%= zaztZhtK=~;UR&n~z?0LuO{Pjk!I1cBgQ=HmM-Jz8?cR}5oPiWP0yqIB{FhUc%p=ny zeEYAHnZICc5Xd41RZ_tZz7 z@FAHqv=eX?Z_ZF^R$?T5-5 zB>aN(oYWL@!3}T{>WnmE;dNGak{GO@M(<&4J_%_kJTb@tpD+kY(i6$P+J-=gC`mY& z1#4_z<8+jC=~5aiH3UGElH6jNe4`;mswv4;mzlw4I9N-dlI!|Q!Z5Kcsi_1!{0v7D zr6^WOf0h20P{G^EO76n`@>T)~xDt|BML74Agz=ShWbOvxtYg*l-8q%Yx!y}=X9^df zCmO7^dxyjCfg<29QZv?*>HLNU@Hk^f@B!>`Pkkl?q=?ofzNhT>>Wou_Pq9mFk2XF{ z9CcKy@`;A5o-I)56Zn%> zr|?JhyQkN#e7dN|;c*VJL;qy`7GeYoyzpdd1ORo&Dan$&l^CGH9wIqFh9d)tAmhaT zAW+BQq5^;d70`zstinLWcFKR*wX;k)qD~`0Q1+62)RN}ZNX;)gBH88y7F6^{fECH$ z(Q@kUuVx%n5^vnceI}{oJ}A(}g5jx~&_pQs%%pYv1%6*RLsKSHcl82$tIEF=%818_ z;=SWHEo9X~MMoGQE!7w&kJ<9CWU*J=u)1z)=Z8kzp@Kz~wtCCA%)-s9{IuAOLuOBx z86p^u&R0WuMj+h~qQI9SYRV6WOsJ+rocyppI>T4aK zsNv$==gR9M7gsA()L9sV^iF!=_`!NZ)8-~p@eiu zMNUUFJY<#&#)w7a?A1gNEb=vGIZ$q{IT=m%u!6a0Kr{9$m{zsSKkzGTknoPM|I3^| zyL`I~aF352d{8o`qr>*@2dl5DsjFH!!gvvKq4>7A^|QMI(gy3 z3~=3iXz=&vv=;%){ma}z)v%g%FEp!yZC&5aDxA1z8TT+vn$!(6mD}r9Cs?&oB+Bhz znM(gKqKCVD7cBkZJWCQvQztJoRDhak0k9VdyTNk*h zh(KyL!OrHm?pK}Z(TM17P9Um5v)SXO_jZ}cT@0w>fEGy-Hb2rde`bt`8q0tbNKQ0< z)xp~J4_KJptFjI(tuNj7Qj+cv_KedpZC)hHTufjD%5fMge!nRXs@BTwdPFT}zsg~h z>|Q;@;MxEZg~Y*p@zlgxg8BFptd4DTj&`0_Z|O!kWlYj={vjjJn=tRj=3mv0LA)cs z!f@wPg$QEU>f>^n+B6pgs|oW}B4qZ`%>?h4vm8LV;}kHAbA85bT82G7p%Njy!9`K~ zQT=U&)xnyD1TC88H|rJzS|dXZ;eg;lJ0Q6KAUtmU>GHNM>&> z4-q+!WuBa_SX}DMG6=MNT|2n+jfhpj4(YG7;3?t}0Ms^;zy@M9o%bg(VwHbSLAjj^ zH#aRs!E~UEO1B|vsF__1F0~T!q*V9$PxRq@=WB^#Tc) zWN$}OOgn|q)ZD$JSWOP!wJiGMfGnYJ0!YkZR;g|R3Iiot)w>M%<`64>OTtq?n}xVb zz@*OYirWdy4%%#VUm`sI5bSfn1VAlBWG8#&FrMawGjJDL3bCo$n>4vs8|`$K-tQ8t z`puSadvY_8O~}W7vPE-4IZy=z_)$xw`1-LHz|o1hnf}pYsN1i_EGJy7xZk@> zf!F<`Xoi@h3YGx^o4XK|dhVHLgS}YAM29WYBTgoh(@T1K#Ye2=l-9M<%(UM~LDcWB zgO3T{ZtG~CVVYs}UdhFDB&WxHE4bi40L8%g>%-60q-ei-VFn)G5_u8%5~RRAu#moYiaP$8vSs+5H(*qHCz_N8>7O0) zXUKC*aon%4YHc~oAbTDOQ&36K2R26y^rUvu>FMYFB$>#K^tZ765O>HRL5fTlNuBiu z6&FZYm^P@h0%}{tOO@Uz*E*L?1@qb{F%n9-3CV}F!gbf>4mvvoq8itnqY?UlA#ZWb zIT>ti?s}!G(`Q&?w_>u!2^eN1QeYX@Fo-xkqGBPhy@GrmTFrmbeh~!f_&pv|n6+e> zYuF7jvX3$9fUV%YTKA5IQO$D-;8zMz83=?<3Kyac3IJcx{TTwL1|B8@!M~`n6_^3k zW*N}P!=Ar^f${nAxTkA%P(b}s!n4}2U$#CT@K~%qThT%%>BG!M&(!Y<`ay$DI%Vd| zSL(6cD_MaP@tjV(VTmzBPDQe|KX&Ahh_WKlS)D>BeN~(At*6$OtbZ^Vrw}{*4q2YV zTyvp$5?=P_=H2)eI|8gSCGW{G&)Mr|c%Fofxw2bG5kkB#s*(>|986eLcg9Ks+##EZWI^GGDG z(U*C4S!Yy9>tAXC2y|y`wzx$Ru3vK_BUzI>=+#auRcYsJvy<$nVW1@7Chev-i zfwq?+FtL_VgG97QYn|j%(=pROHEDhvTXmL3BPAcc%eVb>&7S^E_kdhM(Z(ED?~E$r zN`jG*m>AJ&ynK>Bg2Oi?QVLJ{R&bTK+sdn%Gv~vzwUig;a3xnA2Qx6bwe7OmS^pH> zN02&s6gvi=e^o0~`lHUgZ*c~#mIK&V=E*Ic0g4IFZga)j`szCpXB)6wfK_cmj3bp& z1bu)hgJKgMs+>4_3ax&Z79}>9*!^M{j|8$|KhB}@FU2AB#L)X*U1h^yA$EyoSlV5h zHo$x(bISp5p+u1z*CXk91|1bFSME-*k3lL}<2l>14!7nH(&? z;b52S$^SD#`YPMf*7D0PLfW z0z#~@L_Yva?4?nMD3A@UCn9FpxEP+X7_m@>H=&UVth4JzQB`HDQQ2bz1K=iW@qwAJ z8EoJI%OwSN^}%Ypsa?s3hOO?Cg)J19rvEPyH+5~1Q2LgfZWgmnPSO0T!zc8rN zWb+%!fGp-%re0GeY^=0K=ui^5eTwP=Ec0b&KTv_X$L4!2N*!&XE5=Q@2zMxVc@8Wu?*u*v<)HILwvJuT6D9fqpT4aov~bUjV{IPA^z@L@F`S}f%SeUQzY zQhK^}<}Mc#xsjW`&=Fr$ltqD_k zvk1(p-|?Wj9(26W9<&m7J6Nsi0ZY;%t<6Iu;;NV> z%JBQUvvweMC}<6RP`H%I+Fkn`+rdb?`m6g~ z3^bw8tP~a$j_Fl$B7j$K^H{AQT$JSV;J_=&wy?U!ZBvR_uS0@EXKZ_VNg$C+70l_2 z8I~zhj?cvEJ(mMoYws0wdur*U5GZqdM}T2^Z#)`)wI|-9J%s`%IOLOW(}>Z6+rJ>5 z^RwYS5xATioj-r)pE-s674<)quRecU7+~j&3pvAcY9T--%}24xaqI>3aBOm1AJxiln0Q)9L}aw6_WQfJJ8Dh~ zkn6e=P|dlQN~KhDE9u)VGbQdV^Te}UQT)2#FWI?SnMT@PlWM);4=+_eP4)Q0gmy-t zxQ4Y&xf4xB#JW>`HTPsQLoel8q6SIrKdyzs_jj>`zQ3e{Q%w-9NY}yCq82ox$w-Ix zTRC&LYjU*(E-ldU6V1fT;o!P1XCwV~m!sczuwP#+xPY^mm zaHb#X42<7*%Q3gS$4U0hiM;k&U?eX2vH{3)lS%o5UBo>C3n)1GYLRb^yStCf&Pe7T z!@kiJ3S1iOHCg8_2`nawy31ME^q7K3KG}J%b{j7{73Grmsf}*S-Lv_6t&V^e(E1yF~0It z7TbhuT14Z8g@o?B4%#aE8Xe;lukNdxv#o(wm+EDxAMDO=V2F^voVjoK&^1L@ z77~oXznVg5vTuGG*fY!I%+vJ&gUml4(`mZeQoy~5%d4eyl)&#wjE1I|H^NWdbZfYu z@x>n^0y4AVC>hahDOBic&HIT^ad#!$J@uGX5in^R`4mBA%Cs!l#n~pU{;^Bp3U*g*bOe$eXhg*qg5+ZDK0~=fxCj=f z*Ri20Rw^k89LLMo@WZ9cogNbp)96fU^Z+0AJiQ{>zGC!BSWSp7*p(00{A5J*dEYa+ zsViS(Xh%33#lMaL1rto>6x^wFU}n&v8^cWEox=RX^x{`wU#GA3UhvjYmwrfq+S--6 zuLf3L8Hus-Aja%vr>>7$>&l0dYnkV_Q~Os~GB@)J;zRLV7mbPaQFazZ0H%{)OLH0X zI=b2Nbdk=5h-RO;0@QJlR80~dKE>hSFbTPPNtXnB|2xxu1Al+(@+Ye*0jYk(dLg#o zFg^Lo=5rB{k1J(idGzHtq`OxudqgTn+`vSb5Eh*YJBAA)clNdiJ=Ki`^c4Vb@T$QS zng*P^kxoU}0I_rlvFwwa&>C0{DtA#~$@GJijc5?p%d$;1SFcWnpAS#P<|-aHaLX>B z3ceiNX$~p4KUfW@Z-_W36%EpJt0;I4oJ8n_ZP-lsd84&sB|B!{b!ESB*Ck6+aMxs~ zR~AV=Sd-YBFhS)pTu(7e^5iam(*s20`oD-PbjY8hbA-=aOX;hCfS679HK`J*?5eD` znr8R~D>4<^SW>%WqA37YTFha&?*(-cjfRbsJS!)N)IHw8iaM<#>q~{03>^h*JmymT z?Nqk;67bYF_R0^|9#qPTxgA#$6bqB@3cYe8xp{0)ihQ@YS;b2XJB~=$y;7;fNYVX` z)uxGQ2=i3k)7Vi)=cRV~%#Ueq>j7+ozVAdYZWB%|cnPI)?3Y|O?wz%I)M=ZU(PZP< zY#hY!3{UP2280?{0!r>@V=lgbUC9wL6P279SJk{)oz~#sXW|3PrbO^xvItvlom|Ej zzrp`5yvn9c*CG|@|0-M$xFMQv>y^F5THL%qG*^oZ>h%hid#e%v(&(5Rz_9<;6Q9+d6?c1128W!&3o6TS!$PBu^3n zA?l!DYC?(~{Mq4fHWEXIaJ3z`UtNTEFVHU}11>_raEyfA=uf^GR{J6I# z)&uW|FKOemk)rLChC)#Rd--K7z+)iHKK%)Y>nNG|&LUwqJT$uXz)B~!XKyVrReQ`| zJ8ckpdvT3KPva0+bT4H?i7V@2ehNG~UOD9-ULX#_LzTP?t}l=e(K@Kj3`Mg&q7rDC z=!&(Wfv9q=n-m(Eghg}?mB7~kV)4*lIrUKJ((}j7YrSp@0wlVmqq-5wO=YuC>4YG@ zAVp1$GZ-kU@C_%9@ZtD8~+}HoXa2+RWuvK_y?gkwE}mr z7RkzQOz}F5KJdu}%HygiMD_w6D}mOP7qz=}RV{);rLCGhPZE{S@IsUy(k14h#6u4= z9?D5`O$As&mnLHPP0MvUoUdK0!cJt~{}QcM`aWuMvN95G;MQ%+4Ku!Y|>H8o?A zJ1V$OpuPN@8b9z16r?Mtaz9g6veP@lA4wBv<$!dTsjR;;HTuSn++uY3XK76P;L&#Q zEVhDLUyvivHgY*;Km1EnDpW=SoNrgc&52nwBYEG{<%^Jqcc36@YuL~*YM__FCN{v|mHoG-}POzndY`to>pd>0>b_`wi3ZNhqO|F#DFPe}I zXJ&EC^RIM==ABp`j{170Zx6*d_U#UpC zfwqNeWF@YrZ6^jABaR@c#fXteUe!biGuTs-y>pP18~l1{I|yQ-`NGQp5cc zL+Mcitu6+7M$M2FVP z>Jx>}TR`FGL{XObFuZ*pS51&gf==dzd}myE*{R0C@KyU<7{)WMeGePbNqrT@ z?t_MGvv_oB+Z#L`1s69AK@-*nCN4%v~&*%YXU zD{?JNi_ae4%&rFt1EQcbC|=^8!A6XJJ2&bL@fqct=v(6V81$J9ATy^yZ=Xa~i+gSu zM01K3ZbsP_y-oE)Rd1wW|L{1N_-tBxW-Xl(DZ2lm5q<~+|L39z$;K94s$(yQsAjd%txArQj8(`5{Y15uM-&eJOI#A#HDCkhQLxTBi?{ zW(m-nusTh{Rz^U;k`l`XvobMtgD)h@4)?(J%dq}$)+5Zj1471c*L?d?sp zm`)hnJs!ULJ2!>%`6m-AfHG;}cVFt$pcz?J*# z$b!Y z$GDm=))`Fi-kKDb$2{C0p@y02!@?ipyD^%-6)KA5>#K+cWiZDE zXCMbP0I30sE4#dX{G6C5+Pitq%0{MvsgD}ui?P`u2=1)UZZ_D0dIpnQCVOWI2N=ls z48kqA;PlW0|3Keb+Xyjgwj-|hNJuhDU|h|#-Nd%WL5EX`kiEWhruGR`y5AJR>X3=Z zRndw$mu3{0ZL;O2J;BBP7gyQcXdtS-&~1%t6X%NB-|DtvWi4_ZZZe}awC(?g2Xd$9 ztk9&eDXmGFY|ASS!Xrq+Uj_l?(5mKWCZN!{`C+ml3N{_33knC>^ed9kNc1?Q2~-Eq zGJ)ZE2K%-^?}=0ZGwqf?(Wumea^wi*gIzk`en!51#(r>#5mp(3uz9inID;d`&JnY8 zQXIxg*ixQI_?a*HZ$3!M^w)4`Li+|S9}`85K4Q#oltx>VP9wWjpR{}AxuJQP&dkm{ z&n+F+j$XhV{AX+PlGr$(V#z}*2?J)NB$r9nOIEdhO})ZP9a)aT{A2N^ zCm23UXxd6F3=SKm(^d^QRffTs&!MRQr{6CQH>&#e3Mu7ET0XrC`a_!_G(umFK++SW zV+mNNV#aE4u(;Fk(AwTJk+KqNg*Asfw{dbuVd?YJ2a>PsV&J32haSp2& zj6z3<1#tHtjs7L?aUO;-S-11azFtunJS;6b51c=74~NcN2og;^Wb-3HSPMW)L%sJR3GVZ&H@CZeovmRwdWRlMD?x}M-6MwQQXBIy?*j;A@& zEm{3r#N4>mPg`+8yqp2=(R~iN`B64I)EXX?dP-C#+Vc1QB~Cf_&g9W`lIP1I-*wgr zHd&)AF$;knk+7}OceQA?h>vsOM7847P}ArBGnun(RV`rQ!%TNWYR5)A<*!prgocqw zn`z)rvh72oL1?1-YUcQ`0V0*7c@1x2# zvCPE^L41XXhb7MjR8uPwX6rVC=S&N^2hb4Yu>U{~XwlNrLmK5vJmS|E_cX63a^M)V z)zI4^ivL}@f+a{tn?vt}p5?;~vnit8sYuMw*6EUWZI~GfTL3{ozP}DJA7tW#e0s=! z^im3Yj0Xf83+)`ccz)!oE*f!sG$0zL5kRNHn=c7CfqQVY22FLaE&$xvNg(`1AQD~1 zbF~45154@l2_%|ciE-(34DF>i^Tt zvoa)M^VjFyw2@)65&1KEh&WnNoB`4L#GIj~OD^kd;<1TRyM)ct(kuk?>!(!MpV5%R zsb6YqCV`FyM*LDF1=nZ(pTn?EzmImsh5R6h?kK5rUj?__7N25URep9*yXc+<=y)^_ zt>v&k)W^=ZHM~NTC2QA9c>l&^K{YQxF}hlfp|1lXBxjFT!u(|F1dC!zLcGZo0Y^1z z0!Oe|J1OEE+*wETIu;OVTsXb3hW~Ze%uk;$O-N6$+<|~u8N~+M!Xua)v0$|ZKf%2% zMJK*ZklYk*hXT;$MnwYx`J-+&zCPO3=11j7wO|9oS_5J3iU^4S?TxyG%mJr)$ z4jgr#4#*&BFL~%6j4n-W$#Fm_;q^90VMI7xPFM-nm2e8^-=+4h0$hK&O!J52l>n!RvjJ`4IYkzfwQ{EnW`W_hgX#i zR%D=jO5PVMmR(^xgL2MBLWPEvCq&hxwsf?IX|&$x^mDAMbb|kmtxa^OxGuWj#-wq3 z+k;T7_mDFRDiwtf`2b9x$4tvfH_MCM!(+Iv$q`)%K+7M*#gJ{pY?n38Xo}r!3fFgP zlL`8B&g+>hMWYL8utR?6O^8^`rU2z*wy#UXK}OI5zSQR&f@Mo+^oJ75ow3yDa&#(w z07xDocO3<}Pcq=j-KO{@%!_YT1My^PGm(Ecz%FBQFd*bKpfS;O(esz5`Kh-Y-ijdQ z&x*fTNf9!C`qFl;Orh_L4elyt%O{^s%3 zU2w7yiM#S_UaLn+jk5p5ntR8?F)&md<+Dn(GvMoQ?7c^`_dCgg!3F3-;*JOD(B7^h z95!92{NsD%0r%5zN*Ma2qBVdon?X_;bjPHnOd34m$G79Hya0CnF_5tl&3#);X$Cek zU<~-8$_jO%7YCY0w+L|0WBRXPFFLU@bKAqA5;EP?OrJ3mpqa@DGpCYT5FcqX_$Nd# z+L&d*V*>>2=EOjHCTr2fHSF&N_U=}=1$+u40S8#t)BYX+yW`A!c5rk{uvqMOr@~oW zl}F;*KW#w?ig+uS1t({QHDlN;-m^B}z6AZ_A;7z!Z!g8Q4DeU7=e-V3aXa$`tvaBU z+fbYZ1J79-m51q%Y}dS0gxroRsG?!hT!i(!SBmC9HeHuo*q7*DW~y$O&*JBfG=I`% z0$fM=EALx;foy~dxzk%>BLg-D8IXxM>XjqgkDabk9&2en-CLFEL40zxM;=cRRFEAx zi=+9b3W6B^9hcLs5TKBkR`s$O5x(nF74tV0%ZuH94D zWNN~DEBcv-S$9j{T1xH$`E%76i0UXwO%s6aa(Jw(tS@5>go{{`@spbY^A;EzSrPm0 z5uuC+F#`^D47AcBEwZLhZSg?hF|^nQb9H#7RQ|Ag^7Ld1@m=9NJAbBZKB6pb*;qwd{y%}sNWmAc zVj|w_JaR;#XB8DypG>+u38ne>Jxufy{vJuuZ`C0cWk+5>cKS( z^OXKq*};(VCjv=1#$ZSO`JtcGP=chL7m9Zb6&Ud1y5T0oM8e4;t^w!%{ zGCKJq`y9kbm(^HCS>R$+j!3jso*#b`zS!tiw`wVCs0hfj2wavb6NiInwgR?=@ixJ8 zoFAZdy!UjP6WeEEEc^VwLZ|2d)SO2(M+bd+>T{c-6)Ms}pde&VU8nwtc=M+t7O&9(*O~TO@;r6F*&# zBgv^li5x6(5hXw=vT?pq$qo8f7&4)NgrIknyV{HV)ENE-`-NNg=8NWxG zF48*blQ2re{=%&cdc*Q7w$bP$&q7 zM07TWF9Uwe>w74X;t*zSlPzwj67n!NLHSHLo`P>V%`hwBv!XPB_%$OICH3-QfG%iH zQ4)Zc+agPZk3-6>wIn)UyQVnq<4Fz%?_mtq#))miusjhEAL4!O3PpZmmKT8-fgNe< zTLmRwC1g;|bcRud@bcegV}>{{PYy#U1c>8+H3}{DvuIjrMSvuc`6vW#r%2|Zv|*QCoLUes%a zxY$6WG8-*u0gzc}tLfxRX*D@9KOvhzw;u5L!$x&zHzhrzs+6qQNd{f=5gi(Qtm%0e zXGM8(Ap0X+*hm^~P=&shO^J=G4xEA=F&a>sLo6z_qu|-oGnKB^=I#K`XT1G(vX0|6 z^V5A4_<_;w;O}+1Jd}4$7;)jexII7FVqFM;jz}3(Ft~SiYOV-0lq!;kl^$2(Tuj9F!Ndzf_Jk^!y`|%JEsTE#~YN+{BQ*B>Y)=jGL^y5^p57hr(Dd%DVu^dRBz5 zp}q`ZZTk$8n>hsf)u_KN}X*>;D>*v^;gy;s)F&S2fTy>`cwsYQs%1fTt1k zQ$MRUCsN3obEeGzQr_v6M!;(XPF*_Y8+M1AGDLVerM&aN+$AZbpzA-wkBk{k^_bKl ziMNQQPI)oOl&CY2iF3rDz2baxz-y3^D}g(x%dL|Azc|R}1Rr+1ePmbmzc%~_;z*Ud zQ<7Qev*T8UhpcJ~oi;EgPQ|XHpJd`Rv2*o~Ox2bSq=!XD-Qb0hTeWRAG;Qb9on>%n zp*wQDUo;()sjeTe`jc`uk%UseN4;goP1ja{eAU?Rl@n(v&=47I1Mra`di`m@Kx>2Z z8#vJ$>*uqfyRFR4_|s((=9C+WmkCC5G3Q}w7U)u4liYLk6&VK(Z;ky4H;<}E!C85( zzr?!Kx&HI$)b2HU>QWUXH*B$M-BEi9S{IwJ)<13+c0QgJ3+)<3vJiQnc-c2uO~luo)lMLJ(4mmkr3ULj`tc+sY2W$+1>`5YHojcH;-5>*5 z_Wivz+wtmJ7A zGFvsa+wXg))Nv&gJ#R!!tqid91&>c4^iAcR80dOuicBju7ZMjSr!;{>DtO>~_e-r{ zxz)Eo1C2xlJ{Njq396s3%(x7tkCUsc!m->NII#`0*$M6u&Bgbe;c?DD>z=Htc2O{E z$j0YFjV|tvQEt1PBwcs|T&y5L+=d;pXvDBeriE=^ZM5O3=KOX_Rn0x5^RbFq4q0JC0SD9E40Q+&%()9om%(ZzA05Eq@~4 zPUuUxEef&kCtFR74j5u@&!C+PDX*>pVW_W0rt2Xa-xS4jKw0-{#Kk5b-9CcZlDBfF z@N(?7o-|;bh#FV-C7Bdcg&+-AzK&CoWpv_O#FQ|#Hi8p`?H&L|jR*^mj)k!(a$2x8 z-$RfrhJFgaA`$9_JBPGcaaE#zK0_a$I>O=({QYu#+QAH`YO*M`q8Ulp4-#F`!%mjC?0e|f;4fGpG!eO$Ac95Z}J-4tF+w2^-z@vM_r zf#QxS=7##X^{Da$4T=DwWTdCsmeDeoz_!4Lz+bx=WOPW$9)bV}bVl@)eWzCkNxC-; zHN=CfW<qgPB%o+6-l@{N#oK;-bIr@N@QN4pPf-G3p$k#)+?wRq6Tc@mC_3Bg(jFqJUprzULSgOBbMz zdY-~MSYYZHXN}Zy6;ni}*hR40;o>KSZOwN~J+pL)ws1}f1*ul->$AR-jGHl-pBG#2 zrsX8842lqMWEwIYF8IAl?>yf`+e;wN=4h~9yH@P`~^8XPjI945_RsTEpwHt2*@$6@sQ{?zwROeAMU`W

Ea32Mt2N%^9Jr0`7j`A@ocd_3~TJuDb~kA5__W z8%dyF*H%DNj;mwGolU@w{M;VWcFUzv5{4-|Qj=fxP1y+IJC*?1ZXR$y(WbzdOj3J` z9V2v#RE@D}!G(2Fa?PTw@5#=08ZJO>WPPyYVf1qiJ?90oUw}&884GUH=>^a1ZPIZV z`f5y*$95wPv@~eE1VxgaKH*T6O{ti~;TUrfAbZe_V*>jUwSNQpitIxsSbXC=gv8m* znAwkAnl!)bOB#iO`t+hher7jybDqzjOWBZ_p`sH}1ZTKnyohi(y#?x?;N z{kLjQ0J)CKjfq&rY^h6IHVBJM%D}5A`^ao&MgxJhpR?H?8tqtaZx8}ycC64r>)i}l zB}DroC7RvZ^qroV_*khrQsaS@ymR;mbvDwk3@7k)q)&r~0+D8>;FPu!CWCHBgvgkz zqJL?%Xo`Y+VIKxu5&N22L1a%*LCI)?NX^78!nDPF&A`_SSO?_-R}$?Z%`NYJiL)nq z!#LxiLK>d9T||Qe0$}loxLSS^$>6LVQKVQ*!-31D{;Oj@BtSbDa=`g6GpR$AZ6Gqx z<9f@QvN?z7za^1G?7L>j4EkQPqL9gtS4cs)RW|=NkNHLLdzxcJ*}E=;vYb6GU5e0M zY?Qd-M!PQsDwZ82E!cKf1G7~icLPgY4jk-~6gkOfIH0Vid}ev%$kObZRWwm0Z;0f0 zbNn>h06T8r9{NUz(LNGfm3FJI*nR>ozdfjb=@vD!_Dr&%kA9aOhMF5N)2n1{@i@X- z(G2S~?12H@_86=H0+N^}o*c)DbtW*&?)29rFD7ypWLnWl6JFPfuQhv0LWaet_>krt zN*mys(seZBlIRO8rhHz(S>g0rB`X5JHe9szJ8qovNDxQQvbbm)trLdyt|ddu=ZGz~QD`Ba7D#TaoynBCK`L9`5*E=Xd_ItE#rKY)ML4UveRljxCNvYG=!!m zcwc0dCI>yFrBF!18h{gP*5EE(wlPK2s9IyA=72Q8lz9v~l#8F}dFFfG3Vj$d)K z9-VVEq|bxhInwjJ=g`lq>4ZvAQdj|NkGIURY5*1uVgys?$8MCwZ8|(o;~ea7vNL&L zD|@tHZFHs%vABK*4^!l4cIq5nY9l~Se%=s7g)$tOz9amZh6UkBBM?VFO_t}VLSYOP zTc{zMWUfU3S!S%!cMc3SQ#H5Z!;K7%_}KJX-z(A>zPBiY_+GDtc@vs8wD$tAw!u(H zpAe=mL4EHy1z`YH^A1lnujTcieO=UgQOdVgR|%X1!h?3}S~UK8ELIb^s?1hn(~H>f zDx+~55ASxmCeXTB5XLj6ASGGtt@ZKwzr|OnM>P(=R@DrdMWBf-z<=W;XC)BO*BM5? zhbU8~we5-ra-2g!(tNJSf;5t5hpqN4mjlA!G&N^I0L^)n4c7#3D(i<>gQ`7M)v#W~ z3K9hZsK_xUA^gf^@=p=MHBz@Xs@0B92j8M+#Cg2YA0)Ty5&p`iI1qlet&qdFT78i& zO$W@h*KFU^K^jBKE3@rwt>QY9&@Z;d336rxKYEraaVI_Q$e-T+{uXW4R<-)ku=tFsfU|uKPkHv$P_k6%V47y+4$<7q4gAb zGh8OX2}-WR+a>Na0Xw#R7PvtgqzU9^Z{4#zz;OZmSmeu`fc8MwDp8V`SQ$_--Zi~v z%5R5&V?UxnOx7vnU^RaABRIn&7sd70pZH7|t3oqy_lTGEqT$rUZyNJ|Nz z!MrKP6w|@ySg1Bkl1wNwbH7BQ`!!e_Sake-rOdxoLRp5Wn4mVM#k z8zd2{V~f8>T3Kaj=Gll)bmqYcfhp+a#G>=LEJl z)DkeozB&|_3#cBw8|-VU5#cADV@6bF!>ITa3r&#N59zYP5K#^63c@(Y{xlx;&bSszzU!D5CUAU&a4o1=WB|aV z;AC@W%W0P54hgRHC)u}B6`E8daH*z63VMx5$ zKtRMxQpagpgi#@hP&R;CWO0Jc2c+Mc15K-w{!9@ay`nOq!iC=4_PX0E84RoFVl73G zKh*>CMoO>FA$?5SBwv_y24&SLPl7k+5^->)FG1quq8hm*=rp2Z?8Lh!1oXd z*U-1q)iQmgHiNsha#}^9X`9%$9iQHfZdtN@5@9-`)dqf z*Vbrh4Vq6UDWq0WdXh=UiWKB#YCtk@_OcaY?r!vn;tr0gt7U#t)jd<=Cw7%3uIL72 z4OeC}tz%62&UAmoctI=T6H0juhbG{Jo_ESuoaZO`8p)k%5!btnO*C6$e@`*VDa zyuQ)&zQs6rj?bj(IxEAbDy78$lQ3ZEBR#lQY0U^7hOz<)YU4}>@ungfPGJ2L-VES~ zut60TgGFOgbu?L0RdR|+!lT#;s-6y;fP{Y1x`>iG$&pgBvILF080bdrtt|#rNQPj@fh3RG%Apc}A&Q1DKC(lua2fRGAB?HYk3R#C zGXZyjp7b%Mi>V|4;$^W@6<(>3wK?_oq+q_z5;}(S5fT1!MsVf?`UpCXf$YRF^%l+y zc+sBZs;5G=6kJ{4CMAHg)ve^XOTnWq-OUt{eR0$%!Qui*Q+P4CY};aHEL!~!I}+XE z3`@%vSFC7PEZdkI$OJ1bm-3pnvjR{ezDi}M2lDz#=;PovEI6{ETFY=PEqy0EWH{*# zsK9En`a)q2&fI8LA0bEAThHd_>LAZ)glNR~Mg0I*+))L^vJ*ho+T*bp5+eW5N^j7? z#;VI8KW7CER6c(lO6rvJti?Vmk(hstiyr~n*4*I{5QmC7w+Br&?2kjE^&!iRnDDeY z#ot|o%CR=35+sb+Bptyh-vZCZjXRa7aX?yh^NJ%+cWh@+p1Of|j~^2t8sY!=9KZi3mm320u%r>-f}b}$d!hl}jjZN%$i2mz<2%0~2I2=#R~$Q?zr zYeWzo;G|e1m=Q)A%KZK*;kENm-sny>=^H#M*&I(V9&dB|BK3!r*4WxUR^yr=9O3Li1W2|hDB%KjI0aF1}0ly=tAs-DL-jH3=FY8zN^vgN) zJQGL?1k+`gmX@a*mq2=*vyZYrH0znK)*f5iytjGD%qozqF`Qqs0;$!S2t0r`bvASr zsF@PT4}AWPMT&k41u~d9dO%_PUm`_`= zQXrRewQMQ#2H0ZOT+Aj`4IBli2VtY)*{8Ngcb+t|Ejz5)OgC;-5WKFOOvvTk!sdR3oq!_Xst6vtf|aNsPE zoH{FjbobV3Ky8^+I(_a0M5Weg=x5vHm=J;%$eo!$%0zC^h+Hs<5Xms`An2wzW)7neKT3}0 zY#a?`DDarcJd7SxGKc)tkjRez;9qo=&{bpY`#JBd(AS&6vWvlvqeC$aXB&un88Sf- zF-XHPg>z=0q2Q3kC?39Yvm01wz44rl&@PbM-OLK)#^cGXZP~z)K-E|u^fkP~V%;Q= zF)Yk9Dp&cAAb|i0sL1=cTTI0r?FT_gooQxVmu=$YAS`ba|_HjYFcFD)BR#g`yVgnJ%*yd4L%lpwA! zOsD4*R^?ja)N21&?VrlAsg;k>!sn=N+T@CCxDfGGpyCTca#zl@PIx z$51Zu7>H`FMnQx@(2++z3&RCbW$1v$oQmO57*)y!!XSz($V5$SCe}n)Q_WcOvF5@w zAEw!`Sf#ou{pvngCA%u!6_P;mu!=WXRRA|Y$iHY;-4&7OtU43)BFa6Brcqd-x)fLlL%ptOS=q3;|DjS>0*Gy7YRaH7|s}cy-QC`){S}&V=S;_2- z79%4(5z!FG)o~m~4@0C<3m!Ni))-1nAr5+Rg(${h3{S~p-s%kNdaUc9y@P%S={YEn zdJg_|mQ-g%b!MuXvjc|I`~&R)t7kyWryaIG9F1rtxDSp;0|!_FBBfnII&R z^lRu-+RlCJ$6=DYC46hFfk&K~D>SlZ0LE>X%5D0G(@KksIxYXWE(jh#2Ji?=h}wP? z^P)5!xFV3Np;WEE5i5n4qP@qH1#rP77VJeA859F@br4)5fChs~4)-3>w5afOHPdK~ zPTeWpWX0{xtjg3qxJWQ8>~w-jfH#yvyenK)(NzYNG(WVmIG+Si)kq;jdoAILkRgIX zKSYkQ_FNL5B`8{QVnLmPM0P1@SzJ=2q=Da~Wdd6~NcVe~1wY zD{Rpge&veZguhi4K#=O8vJL6z2_L&VgBPja zkwXryPHh1^Uv`)Q^+nhEXc#dXGS_8}&ZLs|S!m=41@;4Z?kmuAB&WvxQ!OP3YgAWK z$;NzexEKAAa&jlw%}g}}v`zrqHvAt)sw-zHfyZ$Hr0`<2)xeW&IQgQ4b0u=x(m(uk zS>@@cy`0VafS8mvLUXjrN-GuGzHX053tzzL?KUH;MShy~l2alo=71AVeTUG(MQWfRy0yuS~&IZTq z|HXp`M=q$sl=sktP7ic@#F<3&^71|!dRajAvEFViccu30%B2_!;vsI_r;v~v!f_Qz z+U-WJ)$mmgs_<5{f(SkU(BPCMZj!}GsVYTAJ2!$gtThT2^HT`*uj5M54hyePL_=%A zyCqCrYowgc!F`;Sk2_6-jr`vp{yF0(osnuGIQ!D7U8h4fT4h&Xl^5W@!Kb`*i|~?( zE_5L+BEVEUQI?IH^BW=TUiX2C68hHY7VU4<$bp6M!(qk>@Y!pnu#UOix(aTpYBNsD2g92Pz4*C*I|H4rdJ5n{c~X!%+x25rd^sw#$?Q~fX)Fpg zj&+5~?ie)!b2row4&6<`JywtS54f!|EBg#wl*z0Fw?*lI@*id33Rp0_5I@oxF@jBI z285{~_jU(?Y!~YA0*2Yuts7Y>yaeqX;F%b~kwM9{L@uotb%p|=3_KNP2M^Sk5Q~f$ ztn_g3KwMAP-id8-^#ekWI_)}kE;SzVH=ocHWPu5^mH%!shm@dcT{!i@jJ#Tj262lT zDQ{^5WhCniH)S8G?^|O!J_|G)lWpKbl&0+}h*z?(;~FOSA1tK>v1z+v&T5dTu}K`zLhzwDbZ)F=Hj4xS zv7g(C5+n#{*ctw{P~&YS)gyjI#5bn!hG+0Uyc5Eb?_;8{YsmOTu06 z#DI=tDmYLnVa0tw9u3r5<2uwebL<87@G$NKHI`@hBP2E|0DpZyl)VuLWtY+-Di!Iz zq{s~bR5SpS*Q5J^OU;)4U}?!cM`G$tX;ZNqqJE@^nQe}-70k*&o;+kIhyn>2T8QXu zWQkpSJ2lAbg!C$EKi_Jmv4hdO2}G8sRmvb(oH4-oEY`@1(i%k;9jL+0MWQ8{5t7X$ zIpT7F^HapOK*B&^)DBw<5cO;_U``VR-eB-t5D`Bx5!}mf3U$?c6Qs#Ts3>DC_=Cq_ zktl=kGJ%K_0l{9^R2~OgG-$(Q>EcQ?$AxwuPG+En?r?bNCxt<1x?Gln+a@i}8 z^8x@Wnot}pM_LRjKoX8Xc~z{DNsecHv=7Z=Pk$Gy_6)^&nW!Vrc<5QKxRFs`;`1fdY-SHS$C(EYw^j(7c<{Z%5^$uGI z0foI~{%S;`72n<4v>HorHTJNtY6gO-#`I?#{v_>8oMeLlO#mhhxEP<=Nh7cz*Z7LLXhT|nF zTCS)PS1w8txAr$+Vha*Pf#w1tA@HHKKcOJU_n=%|A`Q;`8w*+rRT}=)qjvTG?m+d@ z(bU3TfB~s;FT{qxb7@4?pmkrMUTx7(E76L1F@<&5tgw{gZAry_O(EF572t9!>%zoi z!Pb4GUQ_NDryAsRQS4*Av`q;h^+ujA;BU)?2tMf#5UMMwrA>DL&P`|v7BW1jS-8=7 zy(Y(98z&~f?w@uM&nK3eKd2%eRnh{?i$EpyC{w~a2q;KO1U|i7_V_sg3{YwCBvGMa zM>s}O!Y(^I*DfuN06R8pCt(XA#InC}O5a1Q*GiG(YKf!N)Q7r=)YZ1G*|+wNav_(H z_QK&U2?99q&+xP+ zmHOwwhcs%E>+mIFE(hL#I{_MY^#P%L1<+1`aB*43Eq}?7tQ`jl!LsL7g)cZdMxw0b zHc+ZAP@16@4Cpka45JXmK^jXML1c1IE$_^i zHk;A`L@TS;70sdwF{~w>Dok&n{c8jz-WX6ZrX)fFS5I(Pod4?=2}}dkRe6;DqO^3! zE?piFr8Qno1Vc7WjXyTBXkFOL3r#>1AO{d03b?Z3v7$Q!>n-R+$?L5wQ&H-*Byu2_ zXCmnG1ks?=8f76kh7lR_gIK-)$6%&S+JbMPvc*qm0RRIElYmk`Y)wq?L z?EW*0kVp#w#xs?e?slU?8A;Zjih%le7HMtsq%|Z`Rsx*IKZ7Gnz4Yy8bviuR7fWU{ zg*IwdjTM1wk(Q0p9&M1YbgeW_2YS`4+s?nn40KJKe&jo&9M(Dl_GjU@ab~avBvW8F zX7Y0=<$<;jgm$eerfSmrAL}x^RJ!S+C`QHPiABL06j0+3>bh|PH<~n|Q-m4t@)17a zceim=p#cYdASFlD|!Q@Kh8DD}aQhy3 z42{h&K*MUKQy-G#=$|uH=B2T{cgP&m*%HTP*0?QSVtVF-45WM}Wz2s#OyGg0Y7Eo< zqo{RCVfqOM6eC+wuob%%`b5hIzgAtONPml^Sw7Ho3bi_7tU;A5X?HO)h_1}{X*ibd zr-FTVdc}>7Oo(|rlgZ6QQnLMU*BDWoj+;;ZD?q8!$QFyt8tWtz61mBBQhk$5oe%ok zFiW7}c$mlo760vL^qn0m@e&q95mN5gB&uZMTVnVJ6`hN!{$g4zU8z7qb`La?3%btW z7Riq^+3g%UAD%iJGG$CRbpy(ZyabS;jwVxO6R#4}4k!plOleinVsJ$$3l1QPINBg3 zWFD)Rt$=yjQf77UC1zo8;Z;HoQcmy1oF9@-Zmfgcm_MZnt>Ux_=6-u!`@v~Ag|(uU z1pXn;h$=G?Iu8`fB;r%z2j>2`Exf>V4B!ejLbb$ss@C**L*dUq9I60Hbzu_`AXk3{&rfOQlfJ%qpx&yIjc*;Kq4S zh{KUoJzyu{$pNKpIOh8zH*}Q`<4dX+EyLn@fnG`t5kx?X{hG19WY`)FU!0C3B54o2 z?!P&Yw%J*gH}sQrp^JkG?gZyZ(Ma_CiI=wp?>mY|E!EiMQ%6G0!Qhl^B`9kd!Jm8t zwJ3p-r2TR}fd6b$p;}<~gxPh#SvbM&V^ju6$3URKI73~2-#!^H(4er(ZS^?tTej_T zJ7c+Hc!yP5x#+dId7CmLEDkD*N3p`zWY+@$j7JijcWDK72O>tMZ-On{BFNoADiDT6 zB#Jxfc|!Tf>HnKtEMn1-M&VR9o>0SMP~HAK7FcNTT$nN$FK`HhEGUsd?S5m;s-t?I zW`p;6tm;NAwZ69-aylvJW6pdL_(bZY0~A_1;RHMcXTDoG2v+kw&^t25sw0-u!yJlN zjMSASwGfVQKF~-Evc_ben>Pe0hVM2vyQ*WgM>|_6H}4O+ANCJ}Yu_g0$2?xXVr*9} z3wA;Tu*f=93Q(^jKu*kja!$A;*PO;d3Rd*}>z_fO1QCnFwkQ315d0Ltq$ z9~@v!kpsiPuBhE#vTrO}=~Lw~)(tf&wAVam;Z}+{qU6mCE5wAlb-7koq1H{Rvh1 zZY?u2O~KTksb7w=ImYIllg4Lirc~@~%KO8&tI#9|XVNa{=1tt+Qi@sFZE1Jr2TN2* zeR^=0IZU^Q$ZvTGZBPbg&@hCSLxJj6@a=!;J5*UX$S48_44A+jFz~g+Vq87}!DAQ)9p36)iYoRfJFj;evP2O&6EifN!lGCGBLSQv#iLvpm* zl0zelfzvA@Sw;gtwyVYHW&7Mqz%(`-)08jq5VP{-|q~tDU0Ry@rsky>4v!px{84l;=xqQfCWv%gbMrk$}=bD-vK1`tR7T4e2 z2)s~4fvBR&B*(Dy3QI#(EL5>VHB)r~gRoT9s#41&wOqgelcbTy{stNOH=O>u|{=2=+Az)`g?~^UKD9Alt^q4Dnx-c6j%XCNp^jPb>HhPT zdXXv*Cn41sC883w6vglPx8VYx-Tyb!b=`+VVqD7SFoY2DLhuOLP24o$Qkz0Fb32D% z>>Xa;*5z1h4b}~b)0&vq7>i9v^L=?YaUDz(jl}Frw2I23cBzSI=7s2kw5NDRT4yK( zC(wM#OVAHNDj_Z54>}LB3<~ri7@jJqF@$7(CqLqcM7MC^?fX9LyFO0obu7rcQV0wF z>s-#keZC_cy}Nz4LkNn@=E*@d7zOM=X&k&~e?#c??%V#_rKQs#nxR6;ju)^3A2a$O z`c1e~KIHSc_8Yg*R>U`KHr8%|xS_(9R)tXiv`!=>oISeJj3YKP*s>>8GSV@uF^+fv zWAb1B@0H`L-PMt}Z^SJEbTaVu}aregHVi|)(Bhdr*Xl3KoQ%WN8@71EHhx?0R71)0F=w(LQ;`Wx zQ1~QfnO}KX75t4ZDdgsqS6Gw>N^Q#O4E3e7p$sb(pJblIxMZD-icpTvJP8r$bbMo| z-IbWfUMQ@xw}&Pv_9P^dnP|0=$rcfFysWM4R2)SEgsjM&VzV{WAN}DbOTQK(A$l_ozvqSnvt22+ZQLtC>JqoQD~84 z5m(Vj1<^tiZSd?4|H9c#YY$D@Xb9FJUow<3beAXs!*BYK4#N$DA*CXoVIadvv@n)p zLz^=wR@hWHmlS?*kZ^Ej`UO)(luOV?P&&{naQ!u(?*Bd6N8QJ-Lu_aYA6mK9tn(+Z zAB!;iV(!ge#a`&S%4uil6h%$J43TpoF(lsL;kDk;oywiD9iLg<$<@{IRMb*r_+0VE zm|Q5iB)NOSj>EVG8e(pnP-XYgAXkR}Ejvr(u_6-}$x;i|V`pLPO`N*hv zG?@6ur;($yJSivh-5ytV>-4W5XW3>#Z;^{5XIKR!ou9b z{P*+m-PhB{|Ij^b$nf2Ly?)PuHJG0X`IxxRPgvJB_BGd3Q$8cSi1~3Qzi67gXii#U z=nSku4`ZMFd9`>Y0e9xro2nUTm?pcY(We7vT4Lr@#YZw^Il7|TRkG>%8s~!;g=yUY<~yr zMeEHqdS2w^&9(LA+uhAI?J?aWU2$E!AY|NJncQ_;sxivRrNkA-#cd0cm>{>MM_4(R zY-?-FT^pfCwNe`~5jsI>H-UVX!)9kkC8#nhLqf%5W7a}8K2~IGCf1au&m>}_E@JxR z4liaYP%n>AUQ%q(d7Ne_8ta2v<;8usMP*_rixIO?!}X8y)0k!kSK45gxh zONEM^E|4;l`cV2%m}X2Ko>6nr2%>5%nj;z-Ivi>f`r-5Q_mk!`1kY^Gu%4kZo*Y76 zhvW_U5>i8Fb*HFqr?zwk&gNXs&>)-`Hm5g5aW`33HElInG%aLCXNuZpu4G0ZUcd~- z{KO2y)F~9q$wU83#Y>^fmP^q|%MOO`sDl+T?N+1-2td*T16Kin0Z}Lof+R??oFfXG zdlNtb!r_omC=iQ-!EiVb27|#sFc=5`1p{Fq5DW%`0-+$u!5NLk1yy*Z7=xcA_j5dZ zF!f7pmzAe9Oilf!_I!dQ>?@q7bFu$G*rqh;IV;>Gp7J&U_MPxg!$$Ixau9**0fqhN zlt(kkU;9^2Y;tdKHh^UNUh<+Peo-gsUSG;{v-I{UcjYO7IsnkatNAXriw4ouQG z2Z(vvR=~Jv+XH*z<2)DNzQus_1g1SlX*EjK>zbC|^8p{b1Y?x8(Q|_RXu21r4G4PM z!YT|qvomJ#Z3lLuG+9c2Hf}w>Ul>suz9LYp@phY~>=vQmM3|P>1btm|p)~nKkM&2N z{gBaw(#(&7;C+zC#~ut zm>uA|c8uzM4|wy-!<)x)=X{~Uc9@rsxsil zHqG9UfIiKeV+g6MpWb;6v~K90o~?U;p)4`RPR`2Yn#i$oEc?k{L#~)9gK63IaF>t4mAuo~Dr#tHW8C3a<2RF33AICR&ZfPfZ!qheN=?qr1b}eN-yF^S zw*mdmy{z(ZYR9=7d}uAthEWg3Z&ys;5gD0lHziae!3JDbU<0ZS>BH01J(WkbKy*F9=D&V0>s7hD+M!PZhFQ%V?~TTaZZ!B=Er)t*Dlz>BKApG^}a#&a{QWa(jIl@YFA(>y}2; zvSf4ijGnHUJ1@spu=Uy#%5{PpXRQss!$@O-TUu9iz8)-9*->8?j$jTMBCx2-UTnx( zQhzkmj{aw%8{vL&FCJ@T0z`1*C?y2{!Z$W2#_lo__l50rqSvCsEddg64E|RxLMpwq z@l*=;VNWS4^y@J8lJ)XZYferi@xgH1Y@FB+oG`HcEg8cJsZcba|1-ux#7SVa;jZ-3 zOPrZDH{8x2HZ^hFX_PEhZTfOp?iRLZFdiWyq;v32!|V?%R>3FHR_MTJ7JIBzV4Kxs z#8nt_t*|@Hn{4bq^J1A_=Hy>>bPsrt_h$GXn~Rj|Q&GP#U}V{wtO=v z<(|1nrHxbYXwG!iFGJsVs!}P+b^m1jTxu<`osfx{!pC4v$Z1u#O8=7v4ecycUH2pv zwI38cC2Zt@ShET<8IB?_~zV zQ?QcXQHzw+g98TD(m~hv>$Jy2y@f)J+^TA>Q7Er9GC7^VaBsj~a6!QhzO2?L;P4r(6Rt^|8ba)D-FSSL+xH z#dt~roMS*a1**tTbe`*}mm)-edOwVv8|GREMr}|&?rthDYpW0==>t&7fsMN1oDOhz z*h1YZnn7T{)aTKw9EYC3n`dZtFAPz993N0HflT4v5uYcUmw5 z06{SaP;S8hR^$d~0jrx_V}Dk}mcIZqT%^$(VShc~CN}6o#97md47d%VG>Rp+<{JU% z5|abIl3TD1S9-HWID+#WDjx)rJQV*qY^{}T7hP?qI;4=#<;6zJ>`ZT+ZL@ybL7j@H+0Fo_5Hp)(y3U>UJjU+PY=dkW1zv^ z3Y{wCLlJQs6l4`J+@H+m8WD_g z>|D+7$i!?I_R{=S>843B6BId^4V&8y`>qS~5Kct8Lnp9;T(r^)DBPg6aWi0wk>A=9 z3eSFlgec;m6}~SmTj73{f9y7dTiOv^%q##K!>arns9OdZfytSLRmM$&y)Ppsn25Hl zy39rVvbRw%FPzXJYhVvid%VLv3k|(m0{CZOzR`Wnl@HFP*?WW!lf%nJRz@ zX(}L2-DhB><4*8Nd5Wm?jdOOuAI^oQaA_9?Dl}yhE~Re`#$Xd4=+bXi<3obc%SGsd zp7WZuWWibY~(1~|9bIy5_A8<34Ueb)JQ*>%OUqKrMNCZ`f^k_07X@x^s~M{8QiC^oEhvx zNIvyYxN9T!W3B@Y)N(NJMI09{)I(GQZMY)-=GaOB1Aj8muI$Osso6{d69-s}0sM{r zs~(^eN64S4;&F7&5L?>H7vwWzU#;Qr4BN}gZkQ*fOzwokJ7TwGH}VUm-J}N;q#V{o zRTj@VTvI{mi8b4>dzj4m23N*tkaN&8r7bSuc;TM`)&un5sH)fr9_Ifk$E*=|1?v>o zk!AS(n|71%2n{t}CGO*JdpaoQojIg9bARsR*uD_`E0`PPfYzs!$T^O=R?wzbA2y0a zTA99V5=UrvxX-yg$IQY8z^Rs^Y~m<>*6eyr;&^xw7>|+I;Np!9Gn@nONc;86R}qk%y}Zj(4^ZIj{X6XWAz?_Ls=KhYjp z4$dJzzp<`9NDB-)^T~p`?#RVl7j6I|8IJ42S6=5wtNs=^^@Trz0pnhFyiv!X6~VDd ze3as=`D{N+XyKUS6+%IRRdU5bptHMUkclD7_Zsz`T&ccH7}<3mmeR!UM%FIIxUtno z2|+NNi94J@Fq{|5FMrE}gW=GH+Fm&D)5lIh+C@&Q1={#uI87-|KS*kR;T$Ax;piST z3}*|WO1b~t0$HIMaDrNY>Zeyei#}Y8(`FE?7fyf7D?vIn58@V%kpEP!Y~h#<*)qm` zV^8`P&N8tNuzF_w{Y345w&YoHE;$)|z!nZV;8{2ok%tkYUpd2CJLx?yj3Ax+9RDv> zh6tYG=!6?UQs3>VOlz$W`iJWY(;$g~I27|F15Q@k8}l0NJzr7opfl)ma$((pZ87ac z_lySjwu0_y@uP19JJ#GwY=fKShJ4R;+B$BZZ+gQf_ZzFAnugS)c@(rdS?Yl>_ax1B znJw;aM&&zTMo#aGz~dE;@*f-fx<_*g5mGg$e1b!i($Q$0oNwDzojY`X_l9&5t?FoU zik(gvWMhqke)E8`qyd~}FicK5M$)SH1(at(J7t+VaVgEZL}6xmajCbl_+6?$ zVeN+8@5%1{)pc4Fp;->=;&TueTc_^9AS2z@OE;0U z!}yyq_uy@Vw5b`?n`*wblr`V!W0i$A~wbOY2}j4?nt0-}I& z4>{7}=uiWaE6&?}V`*-*OKvRCCHMRsJn%g4QI54yr{azX<&wiGR!wdI*91-QCh+o7 zlViQ>PEmW$3&N<$Rcs39mNHDsggP@IhnHnSt@@JfD--IubPdiOz@0cnO)Q&NRKxqR zl~vCHX33^rJ3QtFho4saR+uWupi@r8-bJ|BX?^wuz#~kOl79aqMi5F~3Zm+YI5?jS zhS-%ofwn}8n7SmS`lU759dE}=!6jH1r5C#U}|g2_CB z{XEKHuxjaz{kfp<1n3T<3=loNz-b}MOo=#$aUOyJEdbFxR!R4^cX_PVjt8RYqW9Ut zl%IZb;vqr+d)5tBJfgW-NC*t_LK27vf`_JPAM0N^g^SU+8guY+T>AAWkFT77-_UD8 zj{3N(Kp|raof-WYVO0{I6gTDsz$-vR7A}Hs?RJwx`Th5#`S^hU>+sJ|8xK!X(@I(v zaE4l1?3582jEfAY8V*-{{8XpGYY#yMgp#5tR5`38@T5Q8V3X#Fg(ZkTN;37WqRc0KsD{jDy7 zPm1R=I%$knW*jsT30#mKJq)v1Q~&935|(tip6xfZo}If~*Z{f)9U%;`Vf?(alq5+C zHE%r<0tNyN0{>6{TmL_I<2Q}}QV=nI105gUUFkuK8qAKPgaP{vL8^|U7T8bVIPx%_ z;~|2oQT@#;kRcv|j^JU4hfL_burjyQfEQe;0l-?Tc~jwBkcgETZ~#^lhSQ^ahF}MZ zEFMC-HdYl65n=i-g0gYq@rf%NO78K(KYjd(bNS;TvJsz(c#w$KIpun(ne>^1| zS}F8ymu^!Wd$#l2U_sx#1_02gRefLr0HA$Q4k?!U>_DtA_`SR%0O~NV)}Be$59_mP z)FP=yr4FOE3lyRTp{9BIsX9$KjaoL-sAVaQTGnsW(t4wof@svzN28WrGiqsIMlCSR zs0A`GYJpys=*wE3S&k$IY+wMR)&*r{Wn%EGP3}xatxxhTav$;sc{^$Z9LF6i9Y?O* zsVNzWaioPEi=?=THkd#W_h1-91yeH^M1UYsN{0ye;x(A<$-#@kX2GUN!Lx1FL5(KT zAvuWktP-}vUC@VJUJLtLY7lrqhztM%5U{Em5`^T-fR%xDPbWhG423}kL);!6gIHOJ zu@E71f)%z{UrbdQOCbYM%qI*5$sjOc^b@+C5-x#rWu)#NI)a2>g?k9C2)_apfvmQd zQ9y&R-QHHlu$T#p5;hHiVIP3TD{Ld!@NMlzNF4(-AOz+_kb~|7H?KyGsGdi!8zG{Y zAhH;LL3A+YYTD9t>v!eLM=62YBgFMT*-k#C@V}5WWrw9lUlGf3o7t{2-1R4l7UyU08|2b z>4B)p<5e1%qf})aGf>{DohV%7+XEOb@Qo7?GRz53OH>2&pRUB^xO82F!L=s*1EzBo zC*T2+hm}jCo_eKD3;9X`^l6lbMLAHyR+Ot?MDt$Y6{|JE*GXoza#JvQZWl25Q&cy| zbwNs$|hgcz5CjY?)Asn^#8MjLNyKoJVK^TVDq=6r`o|gK@>5p0@`U8-E zC2A$*uZxPm)`bK2m)+lUKWa_euVnV$e(Cy6>zDc;^(Xy#^atT@LchAx-_(?t^M@cX zdd|rYME+~R5b_fc1o{7&7&B+otR@y-S2N0Ir6hBuT}jqF3~i(Yl?Jc0j8G7iLC_DXSB=!0F{%q0P7fFtzgZuo~R`T5a|l_>TCL|3xL~JXRDE`m!bkIIjbE-DXSo$ zW~-8_POBQLaIn^@^r`|>1*saU;;2e03AN49lA8I`W?3e)*+J8-N$- z3r(=>wMKSP$7428Xu1Sqz0*mC*X~VlBuo#vO1{F)3iJ@xt|B~+jh*(iN4Zi)Kmq6> z2C7g}JL$Nmgpq7}>=09W5EFuwoX8b@_LY966|zoZOfpIeLNrNgrV5K>fY8ET(~%{W zJy02ip}J5+JY`xH0ujKeFruZ`*3j_Kbd+?{){xjxgdy+B7E=MCRG|nV4X5wn%&C96 z5R@S3iP@nKD!jw(9=)aKuV?U7?R2Y04N!AseOI46IXomh89d5iM8gnc#TMi7y(fhkpm+2;V})X5aozL5CI=nI0SZx z__*pge8@4S)mbPj3fnCNg+U>(fDlC_RuURwSkz`nP*HZX9|p>yAWEX*p7sO+3SQ80 zQHs#c7&P-8o1;Lz!H1Bsne`2x;tF1|3T_F82p;8X!%+ci9$E2dN9n*iveF#qeq0d? zq`(Q3zz24pa$9KG4%(W1*mQksramxzgFyOf!b1A&eMhnlw1M_ykIf-r5qkx|<$*!n zJe|BDaG~&Gz=eRb0N1S7LGOUxsowl7PtDAx^aA0X^tA6jdK@8odA;F0EO54Yrg@cl zfq7?nrg%s2JZZe+1w7;~UA$PlAbFyAgF(k@!^6X?5J(Y6y`#JfyaRTJbysvJbJxGy zxXZRn4t61wjhzt^_F&zhP+iWXR9)p!WF8Tvoykn5H4E0V7G%nFI7p*M_s*WwP;&t5{J65L-x3wa?DDAfPMhAGT6NZZp^fR z$w_f+%Gy#HlS%=%N0lr~qKc6ce*+Y*i)+av&%mpY{xq7!MtkWiJ}^+`y{+P}uwn4ex3wQl1nIP5?+J1S`230MRJr#Ty<&%SarG&tbDX ztbsc*!^@m6t9Z2>dPBeL_Ub;`le^@g`MVfex3ml}5z0&fL*MuMqXF=EtlX5jr ztu#1?VnE6{0D?tdC&zL1srQ`4a;N&li@XDB^V6+57Kbura|QI;iBI%lugP!CCwR)? z2=gc2nISdO;%iFEA)btvAbxH~!K2*e$s0m%9@-RVSMwBN)Sgex)?+_q-wSi1?)MsHL{? z2-mb&BVSO?@bWf;s^1N8`FtQHSA?c)pKs3jGk;^*I~l9UNa3WdOl!(zgqO(z3HR1D z8XxLy@<}QQBZ~G9ClIu-hqXTwhQS{4A^qy>a1e*@KhgNkE&G~sP$DKrP^L=eB8K6Z za>wah+-jUTIUg1uD)Eqp&?>-@$#GLr{h@FEOMeV8Ni!uK^RE2BUuxsp_$|qIrS#Nl z)_ZK0-gzzfV4!whjw9Kb2f6|`-R5|YhX*c0D2qOfrX3HL6j&QbFp!m`0N*Y9UuNV$ zbEc=knEv>-UqPRol}z2m-Ya0PBcj4YyMt}S@PS}0J!A}M8kvvr04S1k$cNzdJZ$(1}PQbp<=^)+?5rAy&ii}aD`yg4#CDVcn7yYYIR&RZVSNxAd5 z6_Y={@p{pz-F)7wUG6T2ol*h;;TUpK=J9f%xaQ*@j z_1K-E&)sDp+Y#}mJqy*Y5P$X4mg&joHVo2nb~*RjYPfC>27}%D^nncH*!ra8goIoW z-PmszCTQ{W^fU(DRE?=y;@&Shw?m>v5@6cZi@{IUdRr@+T0*bS@-jbx@i0chU#LOs zt+ogQkV}K&5MNtwLpENw(Z4@&Ok-otr@J^e8QEA!%w4J&kuiEiXCE|=wM(+#eGB^_ z%RK*~dQyTHwe`VCWTBUT6WqgKTVKWDP;RE)RBXEK_QK5 z)|R3VLm-V`*=~XynXKQ07mSUD%Y-rVDSuhDQw!M@ZYrz%s%g4vy3U0ZQfxH9m~6%1 zk%kN(X-Sq5k=!YyO9gPZP#A2;pwX=c+pOUuZ4V_J*F=k%NG=f;t8Fnn(j*5~;;<ChboYn(9Jnr?fp5vz}mJ5_i_U1+8@yq8KoWda^s~`G!k#YkTO5b6)nVf8o0L zneE-#;|sbF>;2LSj+a{n~Mn!dk&z-`RQY{ZF=IQi+-{-nPV@EJ9zF%UP9wsrSBN_DhGWh z%=zcD5@xMl79oF%lcPoCh*|d-T<p-I5_T3R$uR_HKDD_j{+u=GYPf=x@(c^Ed_@idE7)EZzwuSp1|p5feG6srnw9z7Nx-KNr(7iMHVB zG=MEW?D3xZaE;ahOA@P#!Z{!@UXEbJ`a8E@LI&K5%k9S=^9hdIZ-6I2&O~3KZ~Mvf z0^V&cl3VXU!LVtMh1g? zposamJj$k}2y9l6DdN?uM^I@dz_iZ}F;2Pf+!&?WMT%JLQun9Jn4kt9Xd`Ga>frF4 zVx>=_AZ~}Z9x4>KFIH1X*WyhNV|3g7fB|OuxaB5L9XjFw*7u zGGL5qsB>X5n070Wtcw^1&!qR+0rlQKWU5+kJvi!2csov&Ne0;jC>xr+C4f@U`Fla? zn~;ozl=6kfid9IzLE38VKnCLp-Y+KAFHy-+WWgeelD#1)lMXp(s`$=`I!IA8Q9cG5_8kZ1_zIP2E{BT*J@qY_Eki{1~N(U0kZ+C&MoZ zQN#a$4REI6Cp1iL((ngaGkz5nX!svxbYt*i6svF_ORpfn8Ge@XK0CZG7xzbiba~;K z!)N3uVJmDihvziLm!mL{yPibgO9hxYY$=;?LI%AuCVl=K6y1@P zrN&9SB@`2=3mNp0Dk6gxbsDDo@~E3ksrxZchLritmQ$X)SQK94lyZ$xE2mhb?3)>* z#63T`>lm>uM43Utds-xtnLo-E+(MU_U3vAL*vond> z^@4vQ;@Dupa^X_~(c|9E{8(Z$)}k^^!~P9e1R^J}`Fbd>!DZW~Arf+;cfLOa-bEUw zo^}c}AxY`sW37Y}i}R<|1eRaw;blm^)RRlP6x2&SIecjeKHZ;h3xqE949d?vx}F^1 zOWG~<*yI~mBD%%Xt32B5db8BSQ;b|FWT|IT9kUHlKkam}SL%V%=Us>^^)x9?D{wYH zbU_0?X-h5^iG4vW{Kr!H@}pKrhM*|&8@ zCqk%++SBseDR6I78a%=+sL`AHfT<}r+jwHqvxp7NEMwG2wDG_dI^9QSMsYD+|HjCrLg9vXJ^lH=S{$^O`Mio6fc zxOfgnmE~lBU6l@JN5aw3Gcc@-V&X&pYbD!A>1Y%`C~$ct44}&1YWB^yoQOLQ+Lh*F zp=?+ZoVaiurO$8+{G-0z2=4JMvrE~&PzOEdndSn2M~(?~9>LcT`;!D$L&!WN6h0XW zPDMg7`L)Hg5ej<@Ep{BCb6{t~hy;j#)5;YQ4mF}r9TkBp_w^1ZiJ7Sf6FjpX{mH|f zu>Lwk!0Y>z|laWbjUO%J2Ch#dAd_a2OsAmxk~Q z4QuGHVP(+Ds5U)7uyZ$>?i(;W49KvN&p`kRj)o}5>9tS-QoR-u1NLN3P@)R30h$H0mr_{-__(bcJ(;u4U8q z6*Z@&dXT8R(pu-alL0$94pmn+K0W;2V?||!zB`qULg}aGdx~!B+`uiU8l`>ea5^W2 zW(}CLcH7IQc>FkJb~17F z`j+UfR_ACeUk+B)?}687ihz;_$WR^?i%`_Rks47H2}KGzMM$E8&U1Jbg07iDlVA_5jofMB`Mqj+rrloUpFl`i-mSDkXc z>*&&h0<^LpqMB#8-AQkxF@$Sa<2q=3q7tAAP%s=Vw-d~qFg zK2a5*3eXrD*SO+3sDDrupa@X12v?Zmx`d7@$muyu$b(8ixg)rWC9Y%W9L%56LO*JQ z@s-M^*)O^CjXXL5ogw5w#hnxe1CD|&x+J22LLfDV8jgm7wN2H7JZlMP2~gwv5Iq4> z%a;FP;oiQ-4s~#l2WWuR4f5~`pjD7+gA^LjzAub}up_8!q@7SDIa)z8pa*ca@{d3@ z>o~3O4|dV~PTFAbpmrXMB;;lUl|w36on+XL7I-9!b70(mmZjo(TWWH*jbhrtudLY<*S zu%q^LXNGkdu9Nb^vz}A|XbMy5L=`OEGtCHmQiVlRNZFwdk@`qk#GX_Dsv>1B6J(Sc zP30jyWIhJPx@gG+%JNghJkxKQxP2)FGx@}uogzA^)z3~Rhlp5Db0s+EzO5=o#soKC4b$ZeJvfNg`*X8IAw11 z(Tp{-hemNr;ypq58H=fSEXn`8^^saSjlRs}hQqhTqzU{#PQ96HLC0agu}#-p_~nP63tNrpxx z9fMVylMAgsl>0>$oybsAB6(oc=wyz9G9LpAYV^cR)(0pmB^(4>)#=Hpm8hNg|DXLr zeeDT7YS7tfHF|P#qy0gSFY0So!!EH=_C&aE#!e1ZEpz!e9}9)?(I+J2VC$4i64pdX z%@e-UKI%-K3F-&0=QYbv$|FlVu_GasL5g;JJ^m5O#1Tam$fLc`PbSdj%*Un=7X+6q zX-I^7VBKiJCFxUdBh-YO`83`jjK{L1JR*AGz}zI$Kfy?khx+JV6qp_xH>g9rfg%j_ zd4v}F*+Np-IGpl363WZuG85|e7Ly7iJNlad6Y?XE3q)^rzDeB=1v`-MwT_=EDFDmOm?g^xq9$uv`kj!Sn6IRg(BsJ zVGd~_3I=F!IHnU7)Sr|=4>E4ubUk@loRX8ci;E}$hMpn2#mrbe=%K<_m6oaj4RER; z3SMa5V<-!u1fT#=GGeicmr67A5cEm(j;PZ^v>d&9)QvN2DySf%-UE0G{Vy)PY6^5B z0P^*24Ztz!e*tV|e3O>Ijso&Bi_GL8%Gi&CV}(*Eh=c%_d=g+cv4Q+E5Kt^qgHA&k znkx&p)A!@-#`F1cpUaHb?R~p=()V`9mx&$4Poo}Hftp1zVH+L+me3LMj5AbVwZzek zLA0R<`#80Y1E!Rdr6>O6o)v&&7P#JsTiRe8Y8_d4h-sqQ=Aw{~yD%G;R)v_o8aGL_ z6t+%$Gs*KUmPVP|Xgn~!@hg&I)uTfE28>^}QsEc-TCca>qP)~fv-Gg#FAqBOm(g~VULQyFK@Ee=ST+A3pGp#5<)=p0V6 zSib!UmMJTfDKw4|lE^PoG>oPl)4@(kPD++pTqc-@GW)3jbbLNL?}V_0;(Kx*ZNWuFp!2JSt8;xcqRG(!@u#PAJGU{YRU zD?kCql=TLfHzi++$r_hQF2R^vOD(lp@k~7VhO+QXfH44M*o8s2lP^OML}J`HWFYT~ ztDh&s7~&mL;@6)*^#&rKibb#ips>S=jbI=2*~p&w{swWE5M1 zW~#+6-l%1{_nUgclack*l_4T|fTVTZw%+{2a=y3Z@ZR~C%IAL`EV6j;!hyM;} zBC5*sH;km_?&Qxiu5I^G8pXhWk%2jX#L%U-1R*UbgL9X%mod{!{Ixql48LT1_*=}_ zv@`7Qf#1^1zY8N$59ebM9K`AFis&?`vm^6nJ{cOF|0O}0zuTwu!-&pdLpWp9Giqp*k9j3}`CQuOJg4%ZebJz5XSp~Bh)i_0XVN3ygtQ~n#U{mJ&RIh(zngA%^ z3DL?@TzES9=jY9l1)S~fnJV44W0!oE@&^TS_GsOQFG%6xW1sGC4ZG*LGODl*L53)d zMA(+YkjmmSxFlqTP$-ppY}!A&t9pdv;WvBsIdE7eXzl-+U|a>4kSp7?43*v1ox9T< zZcGf)gt>qmyZ$BrJ>hU@?$7c>5Mz9x${G+o%8<`kDhnE?_?WNyji`a>?1#Q@gJ^H| zP>??1Jb&hUaT?wm9e2ohK6o$V?K6-#%^~^nUHIX2zDy&f7-U9%@)5sIfW=-!UIEdn6I)2iVn@c$6tv2FuWZG63dm@i1#aLgUZ$tgQH zd)!Ish!cuWc?%FkU;#?)2|f?N(Exw%`C4CGL_|skdJozI(*x)ObYV28zy=8(!9)fd zwp}PB6ttN&=bd?dG;*UBNFXHzjj@h#AchDT2}ve6F~Y>OY7F`6M1tNN6w160Xjq`4 z2!j?TC?bb>FD48&j4&F*U<8woe$f<}V|&{3ws~ph1mzRNXM_)uPxvRWPm&}bIA!H< z>TP>;GzvM!u@R167%a($R*K_b^0{-D8wWQz+%w`*O{)c1geV9u+!$#uM@Rl*c|i6+CFQd)%_}VHUG>3?BTIZ)Rkjb?T0o0551T zYYw3xaf)k>RqRX4k`1Uh{eT1lgTbICa9D6_l?WokqyCzpa+fHK)Fy?(7>)@HEXJIl z;wF)z2r1ZX&X3gOU|u#>bJr&jyT;U@T27$}t06T6ElY+Bq4o^L@P|J{N3Ku^ z14AH@LXbreCGe4rBr*-ZD8tQA3NuM44+1iIg@QFEpMN=i`~1%Yqe0!C_e2g{x*8h# z_Vs4pJ`=RB29^mD?VCg*Q6vcE393lPhA3i6Vo+?1i}S#;Lr|uY1bOnqOZ|drDL1lU z^E?;D+McD~v66!*@p5VggW99EaedfoR*X|%*(P?+RR)!-8S}4^K$Iw;~n}xhR=_Ejvu@C!O5`G-FfSDQ@*KY zn(ZsY2USrUOMO;~>)xz43rRarDawFUQvcd#z7MK>q?9COh*HQ9A&<9LnHCFzA)d#3 zdxQ)T26s84N)QC&s}GxVa2TS9xWPJSkRe2eCUIA&$SQ_W$q`nngb0>EJ1JA4IDg|2 zNig}klOqjG0u%b*zsRV=j2o8YQ=@M16J<j-epR9g9s)&RG);6&;tKnITCK zYC(b^-90$^O!w)+4SF@-LaFT$rhQOxLQA%!Z>lipPD@MwTuMh`2*+h^q*#TpIb%2_ zv=9uM2qhpIeNZ_lP1F~Vh`1n_Aq5OJQ#o{k0hRNRY9G)Ekqsi05+i#7DI?n&GKff0 zWJ;l#)XR)vgdp901nbd84)oas^1NboG}Q1GODra~@l5&5ii%GN?XK4cglf$y!Rqy+ zxsODj9|@y^Myf|OnpgTl!E>2Qj}ugkHiOnTrI(iW<=s?@o_Z?R!DH*NKYS^hHOtA% z!Ma8-j^a1C8o6FCu41l)SF>v{*TJ!tNOg)81FPW(>u*Py);SuP^<=&36YJ~Cxu*74 zpV>zDm8!n;SZRBPnCG8bB9O2`^$DUYQL3j?G1zc>bBHuC7Pv&en!3HBy4RVr>ueUg z!Ke#I6Wm2YrY{tmBS=ymnB?rGC8?+aw8Lw$EX8Lvu7qMMRj_%cH*1v6u{k~rmE7Az z?}r3ugSdzz7Ov^~ifDZ`r9-S(!CORx=z|==7^JL(o1iGPno$x9$nZ+eQUh9jmBlJJ zF}_+w|1tk;#8w`g&*?)}b+jof8&Q_aPM2N}N|mj>o&;4@#!+P}p{STuQ;N+GSmmqg zRN0`afXYXON%c`y5Bw@Bm#9RdOa-MX!Bm5>A(c-R9Zo&W*zdWMz~#G3#AP%gJZH+L z-8r=?wduJmS`;SOm32*Nm4g}5sz9K%oGl8iDTRE#P0Qp~<|SoQ2>pYY-FBp)WC&zZLB0~mB8ZV8z(CFmd4W9j%LMWn^@2t^ zlnf<@gd`!$!Gy`$_~{bIoqQ#UnZ8RdOGHm{czweI>hzlLUF+O+VMFQ9q5}!4V8B|Sb}lR!Uy+YaF9*Y65Uk)~6hQ`U1eq)w zo70hD;;PLO`O7aEnjT6r?P_9h}+oE&B*OHp0m6ghL(#Vy=`gkZFA? z8xOAN=874EsVYcE8(9~IV|?QCLPNWK=AR1PK1`CYp_P-(gUz74ieSOfCZK)g@!(@k z6TwWa!eGTEXE$?Nz41bB`D$c+;Umfj@*I9H0f`)0Je!nLI8*OFpP!hsh_b%S+6zvC znQ|nxNH8I+>9dWoLgHqQV6g}{dZREPG^qI?R7p%JSn8{}*^q$R^UKawy-;P23M3i| zizErE$fxcO(GV|ac}5KibhHqZ5>TK*Mo`dZHojTe%d%PXmP!0im45En?XKW~hJW6d07p97J7E;9CWY~R>#ef&DwKNdDFVdO(IjZ49R z(&Nb@)4c-QXGla$P|616927yUNFfj3B?QOhN|q2KNChb$S3kv%AtMaicQFECZ-yj? z2BDltQOqbfXHf_Ni6Wfhw3O$bKx3Wh?$SOyBp1mdM~ zc(ipxlo<%=f?FPrF_)vE*j1iQ5<>D)OCO`VJu~XLvPr=V3`54qG*Wd9?ifReH_!)W zaAiSoaunq*D+GfR!4N2#LBW9amDPz#Lu6Pdu)Y$SC~OY)JS4_Pg0l=^hG71sF}k`Z zuM)$gD1mxHh0w-B$q*!=U>8ReD1R0Y#?op+ktGIL1=Dk8ug|?9Zm7kAvT8>aArXOs zheX~a$bna(5F*UVN`fOnHpVQ7xnUFO*vgd%2#x3kG6G@kLy-!i!M^f(m1lVx)?K|N zLN!Hc_6b*F*`+yx5VmDRq=VoJ9Y2$N>KzR6;gJPdW{Swc)K{+BN>`{z$e65fu;6~9 z;FJaFrgjNtKPvZ*gnKJ8_*sEQD!SST3ldT~KI2;PUyc5f3h35GU`)Yg4WxVoqp|7RoPd{`QAGY_WN3 zTGR63-T6F8>Adda^A-yEp5Yw<`G~QQ&!wMT^5R|tm)y6{$;Er|v6GL{&jk%jkkj#i zQdA%+=#9n@Lg>(F5b{yyHbs!g(Jmol8NnE1kcSu+W6W)ggM0yvhHH)*^WaH?%&3MA z5-&wu2nvZ==?r?6BJ>Ik6qYp%J|_=M*aWdxPmo9x*{qRteG<0zCJ>_pLhe5Zp@dw7 zKn@wq=HYCVtPJFN)6$(=j^R`ww<^t3GKr2aBeOyh!4#K+mh{T9Lz~*O7P91C&T1ab z!B1YUuY?okso?4Elt={H&@+*S?YRjO1*9U7C^*)pAR_uTv_iU(8L8K+q^w<1Voo^= zt^y;M3K!gw*-`0H6(j$Gi8L~wPCXi#(dN0?<_kN^GAf&lkP9_fjpNJDo6forBdr;Z zs;c#zrpSfJC&)LS+nUH{9P<2%=ourtgt*}5@o z-cD^mArllt8Oef@56W>hZUpo1>|^KLluK#B zu?yCk=V}HY#_@H0YSUv^Zpek(-pPGjSsIk!NT|9I6-=CokdZ)0NSRjF%Mo#c{ODe! zrVCL@2r|dRr9zHz8{#=EL%ARwW0024$O~%228ImA&#_Um$V)cQ^Sq_m`Jc4Y$Lu|y zaEchcNiG|oAT=ect_gvD8buQd84V!{;$?`;x+}e0*)+=-65|3^vlQn$gxSh@w#_#4 zeIiq`;n;@E_(bp=PFX7$fzkp*Qlgm1&KO9+NKr*W0SXdhNR05U(3O}_N`&AvqwE-+ zZf0{|Hl6jkw zHmx!ltTT(CVM;+K-DcMGytkjI!-{=pqA_|eEQlaNn4{SU>@y+9$teZ)(RyLEPrn0` z26omeC;fXJ$dMe${ECsbPYKy4b%)XL)5&%okl~yCtCBEvHWr72m-oE{+xaY0+Gu~7C4nkm9$E9 zbK*35&-2T;C~sD_w*f=z2FuMi6Vkkzt#JnG8k$t3bP7ZtjmSa)@u;Sw-D!IW*czNB zCg(Fxyv2J_%mb*H-OHeO35RfWO(q5gKXt#M1w{L2v9I_{it(5ch@^9vQ=Y#0^~WX8 zD2d)6O^#NFG1Cja^lEHZ#N$o&=+$+0K0_Jt1HR61n-KBIq+SnpjOE{jn#<6K|0Z;e zFLPX|%vzLfC{3RP-4uq{Y%dalx5ZT^hNGW%cFUt``1ruv6pFCj*vvm~LPx4-QJawB<*Rr}%$fP}xkD#u_bXOrb!_4f zl}H>C&$8EG1V`xG`Tthdlp-A0@vO_@NwUI|=Xj=-L6-oO>w)r8i_9DxNnk@v3iBxJ z4KH*?h)(>$9YfT&qu;j|F+0~p(DTYFT{{Gier8(Pi;R$f#36~``L-yL=?`|hn>6Hi zQW`<=u2r(vjrg1emRwP`vP-vo^eo?U&M>H?YKb?7;w8|A;fo|kkoyGvM;j=^n@8Iw zZ?SfG$zA8nZJSV)VTsXenZA-M29GZXvX=9Y8+!ftx$gqSs3MrqJu?cKlw%Vb+*yz8 z&h5~BTw2?|Y024i#{|B^D@UIs4*UG^0g+q^>BSA|&L$pZkFW2l#*MAedMfN;$aCD8 zzDbnK;aH*jP2q^n7MV=ST=h*y9%Z`^nDgHdY&&<LR{3-?u)#vpNZ`+|s z#~iZ3gwyz|#5x&)>asxhUU63$nU~!4n{veQ)XVVQQk@h+L4wq-Qrk|)MqMQ7k%Shlnfh{ z{Zd~&N3U9M+=*zL!#}6pcg}=lbwMvk<;9r9X&Q)9=G^Th|Hf)*%+AQvUEXRgjCZEv(V*k)_>_;Mp@FOOb;)v@%*z85Af{4%lzQ&&}G-|^lv+E~!^IMel>-TY` zaa{TC?ASwJS&eekZXyVF&96@$g|ai{(gf>x8X=cSY%!BraFPQ_- z>ett*ExSr`EPz#z45VUS#?_%HamHgZ=n0=>gSispRiMOPvSmj9iSM7Aqn$$g-C=Lb z&nC2$Z5!X9eq#-#w2Lm*Om~=my%eP%NDezdBtJqmzDR~GpIuazG>DwSEmjGFN?uMzA0^bfpdwJz)_khp<`vY6#;Q9Lb1-n5t)so4k0_5&ky1VDeKZ~sdk zrq+7Tsw4>})$o=)X*ZiteC6{Fz^uC142a~15Sj5>M2Q@Qt;gJ0DbeG#vZxUpPjZ_6 znCq6-Cr7!txa*?~yNby_e+8&4pLRMwmnNEafD9&I4u-XvNl z_b1Xg^zWzU{2S)9opq~(W-_sG@rdf+Y^Ke)GjKo9D%Qf4EJ^$;;O?i{Vb=iG-72jY z8r~I^ZX#D&QExDkzR3@1TgdBQU6r256rGmW?Z6FXLrJ|*L3+q5UMVlxAZxS3)M7z2 z>}zLctq=+#Ybm|65EOL%IenPWCfUV0lzZqS+YsNGo2@vwSpI!McGH}SEQLe* zrbl6V{_bb4xdOQYwuHhS!MPm^&)WtTTF<-^Kag2`EA4Xo)09^~Va+tL9eXr$()=8S ze|pHCXyi4L6J7ca*8HY=LwZSflZ&ICyLC`y5RR=~Onw#okfZ2@;cB?AX{;!d^QHAjK$0XWB z34+eG;ac33+=@=ZnVK;r`dq0lLZ@|o`gp6L=j<6fh}j7ds9j}HJdJ3 zAfn5-hSJ3MUkFNe&dJ23Sj(0{MQ;C{1)#y0UDxXm4qq2p*-vTIT~G>RVncdDg?BkA zMBvNUq-X1Jw{UKhG&G}iG3+ZC5|%a;YbB$;o{AG|PS)ZTmOX3F4D@W{3E)dlI1?7QC4XP_p(pLbJ0>b(pxlx>R!9da;BL zHVOkzKx`NMrx?!XffEreJHaHai1)W=J0n!>qzHEJB>tzyiW;h1?{~G~di2C`IFj;* zwn_~v{jWglc&#?|%AC%l{0n|~=-0dr<{sK=>s2us4!|@o2nC}7f0E2<+EJQXh_qme zTf)-1h;YuWdTfcfcA4$#JE$3?2b>fpCK#^6V#gN! z22P>sG|NVlWwGHf8NW`*UMPJdkCcxv2P#eiX_W10VCelBuQM(-&GAgtosagKwS*YN ztzOdZ$Sn>M;wsh1_=aZk?V)uzOVVZYHe>k4Ixd`(wp=5dg~>mJM`rH1*62f25l4+$r{=cak$ZD10GVd~ZO9z%Ju<5) znUkZ>H+7(zqbPZV?Pjf84$!Jm5W(Wt@rUl67$d|uWeNQ6VwGj2_Ep_qb0@&=2$y{^Ph|eT1C8gh2+A+ zDUGtJVwF!{Z2n>tGVdPIm~92e8rPDs-^KZme(sxeBtp>1s^_bH#C#HEKKjd&V$(LW zA@x1d{Pe%pH=5@LTsRt5mUA#!KLswY+7A$f)TodWkO>)iPH`}XDxp_%J8`hF$s^Vq z0vu*}#plZMy_@h*bmfYsaqV3taIQ=Vls%MLt40&k(UlSaWWr%Eqr~D8%E@4I5I$&2 z3;(wQmMbfKlv%<;Dg1M3156i5-iO8r0ip-~QNCf+>zP@{n;nNMJSX1q%!}C#NXn15 zO}=(JrX}VKBfyPHo-p>vS{clJv5ZTYoU+Y(sglv-fH!by8O>pk@DGV?cwVHVmzl|D z49?f%XMM{?1u_KHt1@Ul!(EJ&PH7j=XL;v)Jbw49?vSU)hWVFV7X<=;27Y|5d%e7V06_fVKU@k15gs+;TJ2r z59*NbCmeg5M-Jn0-mgL;{(!ayOMX7QrgR5dJXDvVJz|WLDa0~E9cJi&Q;wYbG=k+o z`=IE2h`$9l9zVrpnDwyQl|d3KZyrT6kWL!7M)~6$6_KMJ ziqZ&+KT9MKp;6`VKp9Y)oRp*FtWE(PlbI;`+xz@hTr`SPCe5^sHrUVaPUCV|kSdvKBRPW;nIUrITwV|v!feU-* zEH}8Y2>pk}Bu6~+3IlfO(DmFTFRlt3a2x-urgofQ1N6BqK)!3ylyOkxmZpGp1dFszZplrmzPqB}}_c9?C` zc^aWY05P@5ysQ~n(CvRkk<~+a)@3yXW^??`dzm?0Ah~yQ7u(;Ns<|A>8d6O)&`oWZ$1DMi66U* zuS^qUj^-Zy$&LLPY44>uuyP}N?K z;N)oO*|XNVuFJ{W(<+$>u6b=YWE6(LSjLARqUN`7T)Kv2<5- zgX#fdr&COUZ;IS;;ycKcRG!A=(}U-56LE*bPz>Z&LCz%aEt2~vMxTDkGe_T*!$PI* zxuH}Mn1+!}S|H}(A!=0lOoR?;Wc#wUtY)hsZDVQUcy{lQNS8*8kjzeu%oTKAz=nZa zs!U|hjA@K=msTEb_RWUbEwZZ-`a%?f=w-vHlm$Q27juI+-i2S?y;hOM%eRry*1LXL zqD3)+K7Nq}I$mcI$2%_lgpP=R@-B3yB8OQETRj_u^M?~+GIh7odC(E#u9tUjCXbOC z9D6p+f8Wk8aBM=UY%+RDh4iN`&l{2}M*_Kp3ij)x>^ zAV_ao?#>i(X=TbxU%|x;&Lb)1w*8)HIbM3;aS3LBDU}H z%Xn#eiT(-V7LgJJ`Il=7Em3o-yi5b9Q)K9n92$^i(PfmMv?EdSR9@xKJ(Kf&Jtv-i z#@OKV!!cSmo8RODQ)ENWw7fwKe<6CidxBW;xp_irK#hp0H)F_McY_L@S|Z&Zy|=}1 zWyCTqe~1Rr*A@cPCCBZ#9?xI=kfu+>gdHmUZ;0JJ*REwEcykgS>yw@z!;pt&imQ-i zWf?(ED|o^8<|9j{NG+;2TOrJI`8T*U1tL)}_{H1+q8%Fi5Qn%b(ikm_r2zZU37wI! zx(bM{96b65FkS5J#1XNveC*mrn3ol_^Azt13Z0H)5Uz?O(%2C1iv0ODzFH+*i3E53 zmQq2KrX=X-Z|?cWb(uZO`3GiBop*jmR(;#gjs`IXI3nyOBisGTzF36wNh{3>9i8TZ z{rV^~2Trfi2rxfgwdS?$e>W1H6QrE7r0s08A1k$?G_6@7iS(>w{`mEfnn!(4xgEojvnfW#fxXp1U=DP=fIn^d9!IYagcugBQAoW zc}jW@R#PQrOZ@&icMNo>(D%Zi$-SJLt#4fDP*fBY_2`WwNo={$dD`-}O%f+sg$sLz zt^s|ey*|5s71NnL5^X}Od8h|JnNthT3PMZr*-hLFxBR%!NrXsZmqPJ9 zhVL2Q$Z(MFa3GSJZjb5!-p5gtj`3gc7B_cbi*s)3@j ztHYO@Fp_Cc+Y^Y*LUP`MFx~2K1aj7x%)z4IC6nc5<+VGbQ6549A}5MPjM^LV`Q;|j zM;1&%tYowH0s9-kF2)9(Lz=LpOl7COf@6cu2z^KEJgvCbUcy-#>;_Lqv4iF%RWE|D;3~=>}S=UXw}W_h$BxhacS)~Tnv!42)zK!l=Xt}2QwR-K}N6i57fWcob` z9!CLdLntu~|JeMw>3@UrIrHb-IAjTZD+}nf3HnZ#m_De)At^a;P(+Aw-1B^eEbFao z|U$j@+enqD?{60cA&)xlj2Ey7BtuQ#A{PVC;33}knSpLpgLyf#WD zlM=VBW_a&Y&?$#*7meOcghY1ET@NaWlKN>J1wce^F~L;@k^_UI_`>H>ZA$hIQ+)Ht z1Du_d86d!hLLf8kbQbr$2{64co2lFH0|hz2#ck`QD$g4u6uc|@nRE)*LI-({b{EJ{`(67b8le5G1w3kK%K)in(Az0^1go&Sycd)dho4ioUQ= z67~5x5o_oH^pX_i*$UC=*qsoE@V1y{cgyuMTLiql^f-ttzOrpbh#Y&b{=cU@d1v%M zK`e#6z)CUSWXnLMaF$00Evc)Xf)T_Ia5A&Y7`kZv4~jRXVLqCab<0E^f|4gXXBFzg z>)vxD-pdMnCh)~)_aX*X1P}&?A3nrWGycJ&nLjuVdcj>_SMb~pZja2ZccR%8Ixjxa zdFu7k<(v4Jb&ur@n&Gp#-~6vouoP6yPMfjmcqrnSP^9yTo)AfYK$G+B0nQ{3DOUbw zVGvzS2fmukZg)D)y}9lHym+V}d)B8CLD^4o+e3^K{D$6|+{6eqWbjZ%%&j?wHDhX! zz%>R2`J%xPZ%$D^8sMa&5^cwHG`HLhZXG=g&tt2i=0+|H7 zeKUK9w>7CnDVIczrs4YA<$$T1CRc_p;}*fqzd5`5^8e*sxjoQ}uD4M_*()L40^C(H zGraQ=&S9SojAI>N6vku_x_LnYRiV&%OslXV-?~Ex6SQITXXC~Zxs>^PiLyA~M%?<4 zkqty9jR2yfrF~o?>1N_G!^3^DEiPrFE!R6>WgM z;vIjEyI8s+0`5(2YdW-JHZ`w?PLaQ-5f6kIoq~1qNWMAs{t96chqLUN z%=~bQi0hE)xD=Zop~)Bx$)N^!NZ%qFj+^2d+~v;>!<~0jw6Qmz;ckKjIn3=jr}eXMB*ym;lqvRRmw&egqM6` zLA?q~h|2|4TRjl)yDYl}BX|54-X0*mrl9GM-tCVomicyf{YtyQBheLB|m&> z|GKixJ)b3KD8xaYY*J1@2pJ&_^r(3TdPZ(7sK(aVESl4{-OtyPgcr!<2X{c;iY95JWnZbHDa^nHxX zAhcxEJ7`3wgz2csEf(`ZzENT@wKNpH53)h`f(ZtbY`aTKpwuQnJ2H(J9O?5TijR@P zwIW;q0004001`C)|F@mQ0W;Y1{U;ekIq8tJwu%xROY?VwRKSPS9kb8or z6Y&A@kOD{gQ5*zx6958ND-4hU9?pn`CUs*5r2^jFIs5VL@zfss;m#G`WR5s2w#8NA zo&M>Em_yglMKnjZt9*@GIkpvLtmI0K3Pt5nVs!PtJh@->TmSVU{iUbp<$2YdGY^`d zyaWedvD0>vJsh{=u6RuRjZ2Vl7X$@StGlc6YCBW)-$u0y6cb7!wsyl%vI#30il}7j zXoxe7;WV;t!9*GzI=9pU*OoMFIv_+$m^MhlH=zl`aRGq7pzuH>0RRya0Faq9P!?f0 zZpPI#3I!(x%IeoPg(Q{JKGY;PxO2t$KF!Xkr!g8)le4jONhi}WU8QJ|KQ(paI!QaPzKX-)Xhc7O6d;j zM4E$vTA$X0np!Mw#uc>}t}-#za+SxBs`1+jYkzK!w71SR(L}MLNV8}jsaSCRoypC^ zCaNTB^}?(_Pj9*WouSRuL`5wO#pcH%w_;q;qLZeY5~(oPQ>4ZQg;rU}b^-(sil|tS z*t>)*Te!R-aH(>!RGFeo9uXExRVhwJz+Bb~aYmoGnx}(1D1T5XR$M76qO5T9QCv~- zB|oJ%_}0{&&vf1=Okx85lLS-<5Wop=rSv9zmogYd6n&%lHxBFb*aEdi`rsaZEM;hwu-H_HEf+No+Z(uVnwv1 zSW~PQYsG4@sH`}vW{FugE6k!<9~KQuf;D9oSy2|t8nR3lkwvnEtoUldnp(M5RIAmB zwUSy9t)f=b)xDZm_3E>#S;?$sR*Mz8GFRuSxFT2OYPhPc#?>7~WmY>CMNt%0Q4}?& zsI^fdjfx5;Dk>@}s+}mRVl;|T!9+zxMMbR^B`&I{iYiu_%2Y&EDk`C(nx1%buDR-d zf+oVJ%ADpoeQ}ER6g<6n8a2@HwBV`6Q>!M&O%|JM>TP;h7Nj=)W11S%CQRa*Bx>4f zT5B594=kFdG<9g&Uz&L7CHAEtTw-Xs`6Tk~vbtXhu^slG2DqGmWM`3M7rjAC*1|RE{_zVI}1S+KfgaG73}_ z6e_q;(9vi#8bxeGGeKnxS&5zCv zpZO8^zlKD4tj?667WlqXGMCLQg&ytf#PVy-d zk~DZoUUm6L@-O-a^8f$DeYpBQ82i5O`wI0^>^b+Bsm%29DaCX)olfb}yQJsCn98>8 zOV&r2rxo8J@!=5~%DVgc8R(4k{srGYdZ*>LzJ$a2&F^Z0=2L zL)=t_$GTv)AhRwAiW(jbfjJW=6i&r0e9;A4!~B$n$7u?NlIa}kU<|ATl9^FN?3xA< z8V8gFqHol$kP1 zSSf2PFc^w`m@v#l&4y9YpN6xthN&QU!#A=(#y}OBgrJE>3A5_#g%Nd^aPX@oWDrsp z1Y_E}3X6B_c5bH&&=zVj>jx8tK5@=Xn3x9vaTbUnO5RxTdkMU}@|psxCm(S7pzVWV zAAEhVK*c|l`CtqZe0dLoAZU=s18EqK+bbR{)Wf(30WAazGVzHA+=5i7#vc!4A*vk2 zLM$ExxBRp)K@Magcp$oiHH4fbJigrl?YbS%!p9EUK$;K1CSk%}XpJYB#vKQNhe%8% z_Qt`4igCb&UL15mMEps)Q=6%JidA%Kd1b588ewr*Jj=oASS`z6Nv-Z7x@bj3L@N>z z3Zl)ad1~DZn0BJy$zv1~zn4ri)EZ7ADr4|e{L>Gp@ z46UyuOg;&ZgdSBvwNW`Lj-o+tP)(3qr_|^47JWQt&0~JBLF(9fjv^i>Fnxc9pcsf8lOW<)%@K{UXQ(D-%o$Ur6&aZ+T~bG=I9o@Av!RT`a(YX|r8c=t zWG4QE6H*gW59vaJAt529s_P!I%iR}S#+KU@_RJoRE905*B1B#pwiQlAhB8w|lo^Gj z3{4u8z&4?m?SUa1&D_?lY$OEd{K6#~#y~OcM%t24>4G*?aqAXz!QK`KA+Q>z#%*Cj z8Bhh8aIrv2n3$8pEm46Kt|p8ua2!TTMk}mhu~aNFREiA(BKGAXgn;6O+Cp1~7CAxV zXd-HgXe%PRs-mi*MU+!jQ&q1btJu=#BtE@P5=2s_#i=|MPm|NkM4CiQ8xxI5Z1PQ& zp_n21L!_Zyh%OW{Bry~lB8Ntys!+UC5em}uZLi4h5TNK9zLcc|UMRN9fg*oaD$ zqF+O?XUS|-L)1FitbW*w>s$+tbI3Z3- zt8*ic#QhxSoX^#qILeQ6K|F|<3xo?VS&*P;s7;08Oj}`Mht78>+0ZgcVHaFc`=s^DMmac)zt^ z1vG&aMXjKUDMQ7owhGK3T?NumjS3npu`m-AOqfGCDu|kJE;r>?T0;!kKqW`kjKYx{ z_u$`FFlZ`ywNPlEK)G*C75l1?f z`GqB}GDIdL4R>NL&XKr|BeLseE5)c{jaWrU)LWhU6uBg-r#h8rMgLc)Ad=#x)nnu( z;g4T_<(ElMOyU~LWkg1#Mk;KGhD?o7Vk3dp*f8*XUp3){5}Z&jIt>#GoFEWFFwc@G+z#}hB$P41EEJU` zRFwoofeTe6!5q6lK>!u2}3yv z0zwEuC?p7iVJ0LXE<{xy=G7nwhuXB&pbM62FriZo079TG+|;2;!w@Z?24i^9f-XqZ z=KVb4v)AHIZHDuT$cTTckjd9pJJLG$|#cvzn2|ue9P=6$~mk#(?1) zqva5XJiL8#&=U*onm92dGxLy#pMQjgI1%$0Zb&ZTkjm`#UG>s=WH_^kE>?8b1wl)f zRSr30WqXhUjf3UjI0V5s+`&N`WDW{V3-y!?MdEUzIEwLz;u*z}i%3+&wTLA}ln^5A zVxmnn(L@u6=rA!b4$(kDL79X;7-+a4q+kFL#$mV+eo)Tg#zU$J4~HI}GnCFyW`;yE z?(cTs}Olzb}K>VKn#wlpd%Gf!rwqRRHNQE%u)1kDH5u?q3RTb$b<$_ zurNau)B^bwNCSHkE}TrG5X{yh2B86jKGYY4=7z(7Lc(B#!F~3Dl?KzxCxibltF%p%PzfavfY-2HY^loMgl64Sl~#A1VNNcg@IBDXqA8% zPL#l(bitzp!7#Nb#S%u!h7trjLl6uXPCCLMn*>4dg$M}~Apt-LR1cdfF$m^CB3gz{ zbb%5LqCpYS3oHXg(NbK=#UKcchM)`|VnDO-1~q|v2EoFR7;MB@@Fc`dA>vk|2^1Ws zq{F*PgLsXuYjhiKhIkT*=Xsv9oRc6o8w!O&p-@=PA)J`DaU92Ba1724jRG3?`y?im zn25yajSV$6Au*E1?8a&;7OSD5(}*NQuLV^ND;A$g;AYc>>RgH>MxiV?=FF`F2vV&Gz8w@7UC;Ll$^ zGLb31F+9H#QX_4Vi?@AydT&AwyzSjZvjB&R3=~?Ht#* zZqf`PZmgb)#wvuQGo81h^;);BGrZUs=Zx9Wi6|FkqjvSxDz_M9im?h|tUT?qi(ESD z7`KLDGh-okFc#EWX_l4-ktgy|#xTb6i#7Nf)gsuM)Le^UEk;roE3aN13&UI-nm852 zyE!CI1@k~`+2un3_|bp^SuCNx~^DoKmFpNZ>;oGHpn)K>Y*}J8T~O z)WRqbLWSA%CKf6<&s_5+XdogvMRpdbCWc}*0Z~{;$YfK~ZNUp+i=Bp$Zh}AT!_78b z*tn4l_BQQgl|?i}Y_V;dzzf{Oh7C~$d7y}Wv105MGZI`BBMoIgXytMYg<~9AT9{Zo z5VOyC`3*X84;1-XQ)slATPL9OFJ_XhS=hw z6s_5_CGJ@4n(66ih^A`Ftpxpo=bvK2#JSH5#E;zaqmNQlq7hw`(K;pg7ueuxjNpWE zB(BjQGQMJz5>g2(hGOAEXbMGSqy+pzNA|F1%B*BYGYsPyZ5ExyN>;-PqLL_7LSc$} z5{MT_M2x*L2_|7LdY!%?RhO!%wVQU9ZqbQwH5cbb+$JW0zfi@HkGK_HL|#23iRspt|mqS09fDV<*Rf5ckIMM6}BR8;U7&V{^a3=w-d zS`CK5!!mgmClsw*s|jh~SmV{d3Rk%1`qZX6)v1|ctHDe`rb-x1w-!WP6Q)y1=#-0Q zMOItY&^R?97dIM54MxHyXyOie5Q7>t6RHVCg@#JR(xpV>(sb0IUSRc|hlG)wGc2q|9OKNa_{opFe|rQE>O!QH_DCka(;*p;Y&M%ui%4xRJo*wCS7yPF>^B; zE-KurX@noCQGb0@p^oS`x#fNupWaiNa9xkX_oXB!sfdY=bVep( zSXGUfcw&(lnZ*-|&RR3emag5uY#)TPuyRUSppz6@F@c1Xm8`Sy#+#~&alF`L=rN^k z--s*qfi4R}MI|+jvBWH58q5UQgjJvk3B;0;NNO39A?c$bMnU#Kqmx3Fq=AF!(xCK$ z$ZU-Z99Mt!pi! zz=f(mFxCb_3o+3J-o8(!cxWWNnGQ;bDys~k%P6I6@H0?Hde(j9xAADa+mm~B)vFf_ z2G^jeibYa0WtaPPAw)!EMr7^UwF_?z$_qjh?W6K&UGL{lM#&jZ4 zi%4ko7G1DKJ|6}WEMpi~;2BCDz8X3*l=ybyX-+)L zuaWTRjYkhnl)OYXA|o5RNTnhoB2uAHDwPUlGMP{&laP>*Nk~YwS`DdIsZ?r}N`2q= zeP7pgUDusXr_^%oMakfd8w?D!U{#IZk#_yB$5)A*lMLrt(OF2 z450{T7$aj9>w+`qS$O-*JMZ16N^eXg!*7NX8i%U+Nms zAWfDwj%*yq7-Ni46%4O}Jq5>73^@?}^ZNJyjzjFZgKUbTqPjf(1kK9Da9HLEV!{wD zu%skB$iP{kkqpuKOF}4WVhSs+h={b4j(A#^`_WD(wIb?hth7>a##)pwE4*-qBje1o zTOK6DA{DhHw(Z-v;wIFB!K@ISFpzvf9B3lQ2KrDJTn>x~2EiF93o%twRb`bIN;g|{ zsgu-9ij-QVs8Xv+?Q`5N@&D%E`y&hvpBV1>$aJAxvOshS zMnWgw7cea39luhQOZ<|T+j`4VG&ND)luY+HbNaG>_lK9>@|GEdu5!4-bKo@4WZ+|= zG~Rd|nXrjzVXp6bY~7mZq93R#JQ!*1nf> zz*@u2SqvM-4)HO_Y^0k#He18cV;V7~#4ILeVrGFYSx^a5;Xo%!1yPM;zU-GpU4#|q zE+dChFqvb|Z3%Oh1s#zQ&wPerLSm?ks>H=0F}27jA{Cj?f>b+Uq(V}8@Kk=` zC@K*Zc7YTKhQTsh7Gfq7@q|9a52=ToB*=u67Nn|cgRn|2>Z%JeG@(ch67+YKTTbQ0 zCth*d`!4w1vMV8BXiABPLoMA(ro=I`qguSS*Z5jr3##37|u;i|Xn_NTom&HC0hFVYb@pTo8nWsyK>|xk`O5-Tx3pjU;dl!u-(6{uW<=QVK} z`XH01%s5H#T>3QCREdV47KDgO!VH(885$4ONSKvKQHn*Q7(_@N&1gm*ee_XBG1}-s zu^>Ys(C9I`1o}Vy@yA>kA}k*X%|gt4COkx;Bk_k?9+L+FG4NEu;P(q%hh<~qusDNH>vj9EE>KBfh+&B*QeDE#Ot>en zu#qaL2GZPx%BV(FI#3!$NN6EUhQ)$Va0QhEt-%bU*Qs73sZG7uFj0T#>THnKD{vUm zD-fzs=Yq_FcqVjbC<8CF5=8zAf-bDW3o^eJYN#eTO%)~ZpIQZ~PX!DWH1+V*0?p9V zj}%eDE>cn9B~k$c#Yj0SRST<_Fwbx*RjH98sga@-TWw8?TGLvKtg?i4mT<+DtY2jm z^P11!iXtK+A|fIpBBB$W=tL(v(TPlCdOWsil3L6JH?$!c&eTJj zn0zu4MJi$;OI@U5w(Tp@mMcW)nkrH&toEYX98 zid-FaB%>Hn&<9tY0$FHGcr2(Ss1qa(ss&Ln6F8w-!4CN4MSz!ofv9cFy*0Idg5f2 zw1weA+71)!LTPkuv=l=-PDDZ{92`%mExf7`bkH!adHyirxhQD79GjUab0tWGNF;1z z4!Q+KE4l?c26{*8OkGsBkU3-yio&q-i6d^fVGT7f7-*c3bYM9LbueY2-8(;MLe)^R zZB#W8tB83qhFFY_^)Y&kjQOuol@zCD%0(+;v1OtlowQPEL4-nvh{8co6j-iu`^F@1<%5}7ydqyLxzJ-;OAiz$U!R69p_&I#eN?2^i|DnWs(FoH69jbE$YpZHT_NM5 zagAq}=~_)mP1UAkrf8<@srVF;6hllU#H1z&2dS1@Z%wr&tztzhTG3jItznsE)>*@{ z$_mz4W05ton$^T?wbi2@^(cy}h}A?fYJw^=EQ($tA|eu%NQj20PCNxcm^cMBADb3V zmnLdLhGJy%%O*6DN{EE1vh8$Ar3uy$=FMpMIJ9C>&_)vkWRMAZXxl&$M7657KC%!* zE28PM;CiTfAH>n=69fy=zn8m$`A^(&*+1mD=?xc&`A8c^7%IGl(Vx%IMFLeENU1K^ zd_N(r1T%p}21f622nn=?VL!nHo}2}#ecva6!eO9;E(G^_!HiE32o*Ih4?|IT@d*Mp z2+IrAVz^089`ewkmj?g!dx{K?X)%_VNcJYr}ueVIawBq}6Cl#mP;XtO{Jf=;M6 zkQhu4QLhRc(Of3zy5O-)FfCKdh-GSIIx;gd^O1Q=A&5ehqC_RSRaO~AD8r#CLkaOQ z6isRplxjmXME;k3-?zk_lT77KJ6iZWGQ92t@^DT&6Go=f&om@Z+Q6aFt+RH5ImD;I zWTzEgk~qk)Q)7pLwgs|v9g0bi7l(?qh?n!IlLHCqP?$P20tf&A5CBt$0U(UTD9tg1 zm>K|qcoTq13TiW97Q`UPAY=$2h5!Hn0000000000nOXq;`8^?{+7_s6vFmD@)ryBn zztVj+?ZMvegz`uxO;cGepC2(qT`v~UxDlp_DS#E(L%CV<0dBb}abC6hza7U1V`_v9 zNU9|1400;7cS&yGf5Q1R`~PL=M7Mx>kVHG_5Fi?%W)=+-CXH75il85yJb z!9&r!r!q{g=JyDN&qCK4o|1?D$X|8gKsoo9dH^Wq51UX2L|z|YqkxEMHOuyXk=>o& zW>A%CwH@0;N|RVIYIH92++?z=08CqM%lIU;Vd9RWJm;e=^o2=9UiE5B{}5Ryzir7y zmOq35?JQdZbE4$T1vO~BGV_ei&yytsk701<`hEB`P774%fh$al>~?rG93+Ew8HowT z!hn-}ujA;U=mu^N3I+1}^hq0Ij1U>t>K=}0hUE7td4fiXi$8Rq<>qZRt*cn~DWf$E zv9a<{v8LY2>T2`i)R=T{NbFM=xyz1?uvbJ&6(-EFnZ6}NDQ(G*V%pR0jtjYTsj_7x z|E@y>O9N~HHJQi2{Tq%e9d_MfY_6*(lVat1fSDUG-3<23p-Rl|kaO)52G-|{TuVOz z2*rb6`;9_xEKtPXf}yZ-PGCTD`zQ8bG?M{qr?3Go{Q^|gqJND@CEuN87xK(1OM^wz zUj5G#++hdGGl9@7zo*n9FCs(Ae22c@(>>ls&c z7-TzSgE(P^B&y~k(rd8gnLsLLGi(p=1Dj+gn;pJhRHh#D==oni&TUlsaxV?Iu%LC9 zcTRy?-tZhXF-g)2g>yf|^m}y>eSJg{n2*CR1#k8M(Qq=+-oVJU3fk)GEDO?T@U63J zH^lkGu?+?=L$9eulo1;#UOKPPpyzSdO*3}TJ~GXiPE=UdtFm6%u5XDjRCAf#d<<;U z=n6*7w^Yh)@4e!6u{h^wVbt}YOfX0rtRyhmBZu0`GEiFTtO6Karo)k+Obj);IAt2# zT%A;wFZ?Oo$YLPcB9=hDKJ*)}jjT?w>gm~dx|(A3Z2Sr9W#*)0bNs5^84&S3HUfNa z!n7qrULr}#Gtr`iEHMk5Dk&3Lul7n>g<%Isvlm7vh^SLWxfH0gD7II7*ggk?jD_xm zN8^k#QB?2+e)0>EB4-oH2H%(F08+RCRT!Bx(4fa0L*25zUs9z2#P5oRGL$8)>C?>O zXhb1&?V(x~tszrHZ?LIMz;m)AN_DZ@qF7rvk9cQi;hgHi&USV=wgTlXmrY8M_UZZc z^2A|5$(m}%Ak?^T0RO73jeDH~D@*<}N<4j0B%hMU?IHUrod?F0iK=@#bSQ!8+2NG? zbA*iBBAuAA1&^H{1CPMtr*`G%{knsrsYnCp zl%`p!lb6Tj8OrS38?1g!%tMD_6ZIZRz-%5G$w8qmd&+N-l(!ODjy@DCLh{j72>VHd zie($jamO3E`{H@1J7@VnL6mWQNB1!8VWk?}&ukimV~I9^5w`f0F1A7eFzZ$@yvl9@Kc)qij|-rFi! zM9D1gbo-+6ktSJ5nCwrueQa5D-pEGC*l-o2uXGgeS#l#;Sdo0t$=h#-_i{X6LSSN_ z*mrUe?x#?+JPA+pRjER=Cfk3{KQK^?RbK-kaGQE-bP{0DG&tGAkjC5e0MZCqerB*K z-ma#5Dn#ye^YhFJ(3Q3&v&weo*ehR*kr5UD)&H2=X|7D_RQ^d2E zO4#|SUh=l6{IK};UXZgJMX>Idf_haBc`c_sY1I_m*V;4ZM{R-b-yLnXFf9fMfrRNH zP0=AIH3FXNeWAmoBdtc%$E}$eOMsqLv`RvDC^VH=McY)0hYWtUAbc&p<>~Xxfg10- zudaC3X)>}oE2bwd4G|9F)Y~-qcdS#92zrAP&FlU^W8Txfk3%g-cW#C3TSO?hPRC@^ z4w#;rmg*H-cK&3BhHs@D1 zOAXyIjmWNEk`_HLIQS{Eq@7d*LZLQ_`8#AM{#l4W+4K#VOJUP070w@rpC3x~;!q|4@Qvv`?P_6`DVVxT= ziwu<$yy4a;HS`HfB{czOLed@flGObv$ME%i{m=c8!nc2>a7Nw-KH};>>#F(7E)TCx zJiogKs#_FsEwa&(*)(f|tU@A&pm93X@mDPU_LmrmiY_2c)zF_^6)HN8C`IPnaa_&; zoi-d|6D0vHqp%UKC?goRyNwjI?8u!dz%6av-J>$z1Fd}##4wT1S}D%zb)eX}X^@MJ zQJiYmBW7ldg5pUsx|dL7#hkV-s+Q9A097y?m^=TTu=yJYG)>ktZ4OKadwkEnDrheLQm`E7c-oXHB(5W`LJ}oP z1hn8>!fN$HZRmG^krI^g!XL>?Yegh#^3UA8C*5n`)~l~TwSp<%Pqo! zaB`Dam%Sqna}$-szoRLf)~JS%MwghH_|HrC+ccWWb}iQu5+SK~tPU|o!7K2GgsVnW zM;;1^Pldw*)l-uy57A}TGkzw%T8jg~ZP0@$l`xm3=>i;gWLTsG#A|PSl9G90e!$Ew zBlMe=R*}84-mY~IvQtEo}())qCmF?xJR1JZ3x8~1 z$DjW(5f1U$>%;M?`Tq${+v?I9VeokD9~_^S9Gc%Fn6I%PhKu+MMAZ#>K}R90ClA6hcKst zR+bXuftJPUR6Vb1@l~b^yEIb(mY+^ZM5|h(5{Mv2C!RkuiN`g1Cu@sLIj`cGMW*bH zwAR;T#Cy*={RPG=mDKzl>ay}P$KTsTPG|HR2)Tj+Y7+4;Oj`HKNq575*SDSx_n}%^ z9&FqxTlRcl-Lr%%BXam8nuKIbtS=fP@khY}<@~#c3@PHcq(&k>)J4Fmdt8~AD+ZSG zvwkd0+ba2+lZ}J08&KWIPqZ)B54KbSuF$2?<65|VUg*NkHZc38L5}%{C8rF^jLuC{ zl>{Bl+OVU5?H@EzWtdcgt=d}yZlXfIsmzeOC!bQ3xWgZ0DhD1*{(0`zi>v1u0cQ^X*|xfEl7{?~F@H_)SnCbcX8~Z#UM?K+c*A>X^ydjqU=( z_udhk0=PmOrTid+6@r8$(FFTxH;9!dVBE zsuiWHl))S<@F;KY$J&SYi3AG-1|V?+0_I46)C@oHPe}7?hl8L+qx^`R2$OC;Bv#&5 z&~MeG7%0C~4)24^w;VV=!18^efbYiN>Givbs6eiZ_zfr?udsEtSj7qj?`1K2WM5C` z0UZ84|4JJr1eZlb@p#g*4A{r#^Uiljj+E=wLv!&JZ+B#^k!ueo(e(Z=zD-OwqD34G*^=gx`m$A2*4OHjh?Ph9Ls8|H6qLbNuLrbAg$RzNfbtVBHcs?Fmn*|n ztenmQRXI%$IN3x}&I4_|+iVuZl<+t5GmK^1hwk8t-Hn`(xgDG*s1l;OVnx8|KYTUN z>t{w#rYMe|{c)^$02=UXNwfj5@`nPJN02pga)+-;Wc6a!XYa^c98u$n17V5ds2~B9 zsdpNS+r@^4rP$-HD~PE9S|BeSy;_zfpJ=I@L$7guBa+B65|MfM&1+WUG^{Pn{ws7; zLZo0DWjMfdQH(w^_G30`Q~A(9`D4auteU$nOEsJV*+q-(-t7gv;{FgvfPF#H$x8qT zK_v#7-V+|AsW(um2C^xz0>;v*`;T~4y>JOeS8)C?YD?<~zumu@cKPIPe zJ^-%Oh?zQ$ZtF};$9ywjKrwA(w&Bf>f$Q5XB2~$DbP?DJTCf{v@qa{~BF*2oMAUL{UOO03;AY zK_M^z;RlHU0f3+w3krZBgdZdX3IPOSC@cU15dJ73AOI4Gp`Z{LfbfIFfB-;n#kk2{ zX{A7%lig@#DR%bdUSUyaIY`5l)B!CSVNj|Qsf|KhXcFW3k?Igc*Wi67>Wn>GQtEoT zNya0egLn-nA9w~`I8J0NQ}gCfWVx!f{TIt7y3ipd87 z#!2fy%Lzu~&b@tw_dUbLXH3h~vFU4O5bQ2Agb>Fe7lifDax>qpDVnm}ml2OXM}4-9 zDL0eJbTe65E<2p?kIA**R3+`kg{!FL3{&Ic(dm_ zlyQMSUvH($nBrvfNJ6AAQ z*qSWmbFAV1Kmn}lM_?si8Q3Ytc=Z~XOsaAc5RK0A)8kfGs3rEr$o??P*v1wy zwTS^S%zBb{2}Ew*kEES+qv(0eK?=+9vK;~ZxvAN?djO41hJB&jz^mTo!_!r*TZl~D zWXVH#Oh3ZNlP8<2Js4C6b8FL_YnC#3SAf#ekS2NM7LKS~?jDB&P}mC%qVd}cP0MnQ z25Kh#Nth3v1@O7NzXAPxRU1((2!2?>mi}Xqe!jkYds5LRe}=dobD84=J`t1^)X%@* zEtrS(tHm#}wNKufxk^1+q-$>(QBDYIz}V_l!C1J!{ELOrYn=A>ES0;QB`BQup{dIjk#zqcy7@$ywq9lhSRx()S5_hEq=z%9 z#CgPs(*>CRbIb0d1)gI%G8RiGSO6Ey-LCU=D?in=ehB2cMxs4Pdh1FQJ7CPnZ@{$D z)(yr0ONL7M9mKJL;o3@pFm4A(hG0qYe2t;_z?>ug&S@gw3!IE$^mGQ~&<+p5y72_d^68Q{y0(paf<1NRi{#qfK z9AqW{VsHx2w96RLxJlM}yU&zjYHvZvIEag)rtlSMA*WO^FBZ^5(Jj^ z)w8$ajNoS{jPmM~6!lmB0=b2+GTFZ64ylH5j^89=o!MF0R+R2FY=sq7XS}0xY$@6K-w;Nc4+ZCDQoEuGVscxgsJ5hku-GG_UAE zt-v?*sBkez&~#YsN|mm-_|rU1ML z6c!A(MQaV;3;W(}R23j%fDd@>vk?lNmR)Me^=ba_>(rGfR2T$!@*`~yf#n|!xG4(~^26j|{SWhaISryu8U}m-Q zjV%col%0Z;lRZxk4KIohW1d6h6*a$0TWh}JS7UUo-5l()R>J0xS@jLfyk8=eY*%!a z?MFkp>7`SV$14=c8#ay4d^F?#P$7;&jAZDtExUf+;RKPpS<;<&UELg(-Sdz$h?4Jo zQi^m9wqT)F1O1#WoCr@l(@}d-!aMTYE`B?u2mKd+DoQJtGhQ?CZ;SwNi((gyVbF#b zQKf}Gp;i1RMsmb0_lX^iIER1L=Sz#x;e!*^2zd^#wljw=xmol#9?r(a{%pG3nmwjM zue2_-O;({xF`nd$rZ+%VXeZu&Gb@g$l@2Kte?uVg#~WS8-N znr;m>hGg)gLq_XAO|Y4cAzR~8WN*nstc^K$uybjZuRA5(&1-q}hFzyV334;h=*F6h zKH(XcdkqMtlO1bcQ9u@3i~sE`EYRkuzE@ZLkK7w^4dMNIY~uR4vYm%W3eR^+{|lQ^-HmL@_8 zM};fzI4ZQKOfZFz#N5hUO$L^G{@2`I+}A+!aWtaBQOV?0H_cLkTd8H%q3OYbRl1VR ztP>eov1qa=t((uQyxR}i7Y1FQ&mbQ z;dQ6A37$D1Wx$~R91K^tTWzRapJL7#!DT&n?srYhV^R>z#nJj%-mKKAjehUk`AZ3% zh?XQ$^Rs0_(0$hY3uh^>K9tvriu4<+7t(_FFw*DP)eY4ws?zUcqmN`)#GWdnV#Fo? zZWAjhJ8`G~p0NRVOHRiX8}PQ@A(y%2_pe!A(Q%G+UB%IfTs$hbGSHruMh4EOa;^+x zbcapgSkGN0VVX1RQ8c@7qw+MY+C z8TR-=4=;tY9jxsN_l{C@j8K26JP|Q`-&;Ner;S^kBOuyRxtlIerF1EMa)@#7G5r&S z;CE|zmlB0_4I_N5hH()gqdSxKg$+OQ9UqfgQVJ22luA+Cq#%M_GZRx>@JjC;y@Q5D ztsC#2S1#Z$q{KfthigJW zE%tGccW;}IxjwWVS$_U0A=)e5dR_&4i}q2l7t;l0P}o8VP)$@qg$j_u;uYR26R%hN zYEeTe%;q7=#(N*7r(9dLvR5J-=DQ`k3H!}jD|@BcW9p)ny%O3q(<8-0;V@_xw*<`f z7hkEqyr5Q3OShl1nV~KbLg7tQE=ezkb;-6?_Np?D7@wu_jgwD_P6G2`7PcSk;jkdQnKsPU>e{ykde>?(C4Wj$Tb;1AaOInIe2)C8K&{p=Uclc)!heccYb`uO2 z;v)Y>SlKIjAvL8nu*zOVddA0LAti;xws3#nwY*>$^+nk$x;Rqysyo^+a7*~IS7B`l zLu~)D)3~}1u?DJ{DD#uE&7i)^ZT2SkUfC%4sSSbcb&Q$P6eq*Vx7_>L5)efz_4Z89|*mX}zOFb{D zXi;FOwG-V&EE1%$T}cbA_2o5Y-rIdiH_VZKQmnkT$Hoc#@16|P}zq=>z~QMuw`QrboJm@r*q?C=VbomExS+BjftR4y*@=@pGjODi_D z>oHtLiTJ(KJ2gJd{(faE|^WRJ-juH7=&0SDFKb6y!vYReZg=9=(m*m-|$T zF|cJWFm3yHyf7%?$!-Yt2KVAq!&7;#9*Jxm>&;5PCLOp5ncHiHh4leO-+`OqBZQ#f6g$*B5lGy2+?Z&J0sWqnVH zoprr+)?&H4j!-i@!sAZzL#Ta3?E*X49(53PjQ3K=b2sqi+{sYgsn(0HSJfLa@-Hhg zvd@Y}eaeW8>MuJ&8QIN>ME*IE2!PvA*RLo->j`(HlI}l8G>9&yToe6qmg=&%a^<>b zp~0@ZNJSA*jLTO5Kt%)qlH$bIE8Qjc`_$@+w^yVnYMd(fgh=f)aonE+(;w&@-g>1= zq4;eRtMaiRSF+l{U>A<`-Q~|*&gv>*(2Wq>E%gXCih^LPe9uzqU`3Tg3?6s#Jwk0q zRBjJ>Z`)rh5fo9D3mrw2rQ-rz16ySRTA)Kn`_>U22p*fFiTHY@6Okh2sv+JpX*L-T zTeMB_WL61K*I6dQ^=Lq;wQ037TB;Fe(KSm^h|n9r?t<=e$p{r3ZDSPOx7S`kTBxoR zuFOo= zG&FXfaIFVv(Dmw&0%vB2 zA0IrIKzzNrzR`~1pI5*9jjZn|34O*k=jme7_HkD~)QC% zkRhc0Lf3`tgeG@`w=xF{fw0fp{*3f%(tP&OiYXl&c4m6_`nbv&nu?aDZXgu(OhY?o z$y9lSIr8;U;Gi}(7~Fv0!cm1~-2lm@bnG;v22o#0VI>>>#~?!)xK6b3OsYmb!l7zO zC&$t=Gg_*w{to;Rx5?G3Xrb<$(L2+}BJ#&fs4?gj^TC>hxNbaLqA!Hi5pI^ZWDFws;@Idw%7u56pZ_;LJcANj2V;B=iIz>Gidw zQBa2ln)VqRXPW{wOtZ=z*96b+5@JR5)8vB@^LbeK<$9mo-VM4gt~Ww7qRV{Z^CP#(BtUOu383 z(z&w zoT-`@;}xA6jj-iU$JTr?|5Gf6^+N|k9WMAtxeG_Zsw7w zM|p=MXD{Iz)}r|=CstbXLOel{e=#HEvwWD;@~Q~FIBaALk-Vc1{*ScG;Y(Ar!%S*Q zys7c_;-*V5j&|2)8oE6rbFe`O^iJCz@7-hHQ7fRnuT8+=fmu>L!y#<_WEo|x)U&cO zGhK?pRT9J+4a8peRng5RLQcCN%@Pegot+^#dBG*dSlcRX@bBx+r!hf@GNfn~g7@g4oqJb~dj6=MDHb_ZCfQXs>uM6=bF=0TWOGd64PKp*a1hhd=WCxr0S=r&?yX0YPzU`6r}wZ$bv1K zGXG@a?qCC^3go~O?VYV41CSvTh94a1WI$4jYjZ}w3DnBL8M`P(tO-j(KXWM-OQyDO zJ@M3t+-Y`aq4?)n@XqdGqCB__brKW*|wptG_SH%&!g)>5T;f~TS+!5M^ zHL*Pb(~KsPCOB<(ufk&38>S{*LxEEE7{K$*WCU6awa%x72n zYRCZ>Zb&c}&%#QvZIToFWM(Sr`^dJJ!bd z@Du1>QAdLfmVYxSK7YQWi4=(Sfu(mL9DdsZq+44wywx!S-bx_zHykM)y0K3JHPj~G zbK|56CcUIpKL-Hog&LOQol5IiD=OG{Jd1fJ|JjHK9a^9d(bYXCdUaJ23u1T`tx^|2o z@fr^J23LtCb-E(Bwz1eFqMIxDLm@hGWpE;QE?emzk!FZ? zl##sh>|HLGEKdAJD5$Y}ZdMw<>)R_=Q{I=9=CikS*Gb{_`JTG;pD6MtlJeN1m=0Lm z-LTPYaj4^nUH+cr7-R_8ff|~>R^CKwzVf}j3>92`KH)rdUeRJLivS1iey#+p3luxP z9jQBLtY8@$1QD6CSGa1hDwqs1VEbn2U#RnQ5SchPn~{9=WuS_p6LhYwa9*8?9kkWC zn+#}a@i;^&A6(^Fpc_lbQ5jiq<#d}09qCfzax&6V%;f}(8YotxqSr)5`&qzBZB3?e zH;B@5KQUq&$rhr1ik&O6>i)Y-2V>O490!D`yW}_>MsHsiBK{WSHU23_8zAUhb*)|a zZ|&CK+}sK($W?{25|CThqKRC2vo_HdD=F+B9>TTj83E(f`((xVT7-+F^0nYTqP$VU zXO?vRGgn27gGz;$t?IP|-+I8Gf2D%oUk#yJNj#Qu;=JIes zcY$bBF+$0?BIBdIgBhgbS45Rj{hTR5XNNA4o3jBqI=zs!2ZP_%T*_r)9>StOL;aLr zkH{mTke`XEO0nK>OZ_ufozLgMlufNe;x_}eh(_YnvNc#3LVhet(S6$@|)#6?eT%;{$ zX9;viAkt~q&6}xmi2Chn$CU$q;S5&JJzh)GHM-ET0Pjdew5xDMv{Daoc>t|j8+y5L z6~!|mDAL4eD1I8EBwQf?x;ol5@ELYZtZS^$onU3M#c@y#G~aZ#vI@PVPw<`II87kb^Ojer|(2njEy&Fm{npt05^lFT5@#EDj58 zC2pCRrlkB0y|tFFt)_JdwPXGK7xQA3{5?a9&{Ol~FJCjHu0NR-a@=KvP_ny)UIA{S z)^}`xFo)RIT5JZd2laBnL7c1281*Z2WY+UA4GTBKNjg@@DwR&*iG7RAp}Y%ca)l1c+uq zLZdDuGIXKSE)+64oQVsCgeFI>kp>QoGlL8Tg{A{C;h7kwL)4r~ln(jwU*4EgEZ@Jj zoO?q}+xWlTbKPsT_0-;BQ|DB@QWs(up>Z9ZOr?O$^JtllGGM@UY&0Gn5CiJNb$D!) zJHXt=Lrpp~%C(FKXClBxi4d^S5I`CD=y2Rd(|HEkhlc?Jj^p8w@lYBDH0ppxhsH() zV8Gx(VADF9$>d4kfXOp?CeP%VJdUfbDXFTeDz0iOYAd>WwxcFgOwCr*T}OA;bWKgw zMdY4I-}m{QpL@~&dtyq(bjL*J=I+*3%y!g7{D_m$F5_ZKpA=1Q3M#@?%|`l}NLjZP zQT0*NKi?CjW`F-w|Ns3@{fVB^nKN~!d_>ARCQpo>2qpZqkKS`#^W8O*yJu?jM=2BA z8#__Ad$K2cdakELj7ee4?e#?QcHi9nVdndMiuIVRIN>$waW> zU|iZoL4t?Nv;yX#;9`T24%pkqMlU#yr4T~l&{?>=)i@dV6|vifSS9s+ioPM$%U?fT zp+&1&Qie~LQswl&lP0CkJ+*A={#aARZLBpVQ4b_6<20I%fk#LJM<678kXR}an9N6q z8DK^PcL_=7f&@Wf!6QF38|lzta2gouA=Qs|rRKQ!b)|H#TpxSfxLkhA=cK&v6hDL- za;Z^~G4*`C-{mKSEF%;g3JnYdm`<(_g9!|W5=%h|G)YrnA@L*`2Z9y?;_!j+I1ms{ zy>{eYlD@jOLaKPZ;hl+H-QiTS_ zI}{uTki{GvfM_HRE+dx%k%`S(&;=3*^jfB8`$G(KDLO((o29PWdk?w9b zDs8(EE!B<^u4lK{pIP0bmC(Qaecaqq$dF^p6b5gl_qGYrDNkG2uLcNU5`{DXo{SRV)k(^4h@e`38Bz%L@E}-rczvW*~u-&8oN@cqzq>yR4iar zP;_iERD~fgE+Tf~f$aplh+IfOE;blvB!q%JQie90c`t3xPE{|}g&b40Ni{Wl-JBuy z67{CO8Vi!;sRlTF%pAi zkuuy76c!g4j^dPb9-LCd);jfE%iKD(Y@x5TsFpczKW6Q_UoP^+SYq|9+@Vu%PFE(y zSRnR+^AtonIExbkk~^{?1PXIO!e9y6gdkuz4l)av%RvanB01l*RB7`1`NJwH{<)U+W5(ozlBXWV@0|bx? z#p{VmhiVb&IgAbnO{SqrWTu$dLzYq{sWf|2}0r*RyU0a0;4K!76+8kybqgDg1dYW3M-W6bhRG|t8`^)_)NA*st+Un8>i7JdE5p{%%jIx+=WF#| zs!rzK>MQ>A*6Z<7Y!P_FVHqkT9vF;^5sQIAvsfH9AQvpd!FXVNNVqEiflwWm41P!~ z^`doMjZ^Mh={a|+b#}i(=#?Sl?0u0TdX4I>wq}e1$vgQgZ#vR+bmR&Ekn9PpucI&K zql^C2ru7~=URTYFaeKJ#Ez}mfl*-Gk^469N7vbZ&lvmVuM9WA!dy#vUDajczBQNs$ zrbXWDeN=YbRSK)}#*wb}YGc$WRntTy0FtI$8 zbTA78tGLTxXlNMe_=<(I(26BtVr8_=dzW?@8!F<1=tXRA?vq^Z{b>MSH47; zlJzw^l})XE$!#$%U1dedT=7dR83*$?;5sxUEFj)F3MxV(!95%lU^11>K}Gs7;K1wx zv#5~BV7Q8BEEFg|V5n$T&7LDvC{y2;Td5VJr}Cs2*CSKN*~+C~i_JNg8WE;xgAX7a zECtQEXk=UxIyRo=k!&y!6hXoO>EOt`gbW6djz~gSiE#3KitibH6OrU5T)DQIXov#`*?K=6Q}aVR)w5KG~yrg)+D z5^4&yraT)%?NfV9E%)@39aHpmiY3NK)n;=3T>MBSmM+nA_?h{wEnXQJ4vu74v*0`^ zAS?{ZHB=fN2ty}ecg7-sP!CO~9UKIv19Oq=HSX^QiT$VpTC!FtBqIO-1OULZkqEFb zFcgkT!|`}bQ$@-s&>j;&%m|}!91_V<6vaRg#ZU~wAPj>b2m!_zWRNmaDxv~F06j`A z`RzkLj@Zu00?Me}$rqkm<2uRH*((k+te2X3ZBn%aJm5Q03RKzxi!Q#t!c>7ZG1u|e zD&!!@EE__CX%cUf_|jft6(KYpOd;z>G1nB~`dP-@=ci0LLf1gfj6-)Ze5z#fJWZf9 zQ?aq}Ai7yXqjYwg$9zZv5v!SGAiHtUATH~u`iGoK`Y1qDBN1!}a_DNYZ7Gl4Gd7v>31wd|jM&7aYjk`q692>?0md)*N{#v zuv?dQMn-DZ2`c6Ani(|)1$Ky`CE7-4V~N z`h~V}HSbq?Ojp+Qi469AEIt7^0Tzyv8bAY7Kp3iE-YDONx#~A6x#<##R?Off0G%ZO z;2;WQLjA{BY7)E%vYm=|7Zt%n^rO_I)vg1R^^L_+a8 zsh#bC_Bw}e*cF`wWg@OTYk2IPp)k~1QdF$~3b`C^f`fYb?bbS+BXE(0vhNoGljpIR z#5N3%_AxE0KV}0+LlR-xi&!}~43c@n7MD7bA{>zXb5vL8*PpS6+uD3&#agzF0Luf% zNwno0{{-HZM&w;#BvWwogv$U?D_TKuk8@Uq?m{dakkDG{Cu5$N=2LZLw%=z6LqY=u zO1ZVp7{W+6dND~j7)k*pmX?TBRQrZ7LdF0?g5>FUnnM_2U###QX5Yh)V`e~^K>md= zoLHA=H`|C3QSd3&g$-Sql z;b4bpG}jTn&*kW;q0MHBCoqL_v=T{_m!kHa&C{HR;L_^8pLEQ7srf@lo)qo!028<+ zxeftFsukIHEwFwg>fr9<`E+wgGW-i?9sg&?lrqeTjn$`WuT&@PbnLKM1bHr)1UHDa z$#1Ul9YblWVZ3ha;SK#U*5`9!GMJGEhTXAsG165_d5MZv0-=bqn>lNo%`3$@xs90V zCE64TnEY=qcJt!%cTp09Gb?aZOGw&bSElb9mjC1Z(){+MJ${ul0huuGX5LV;{CWCw z=RCj;NJr=0Y1ngug6iQ-2+?%)yK*dDV2ds3b0(_>*JL zeblcEj8FZZZM|qi#qX&;k=PwL!VoF<3Z}$GMZDO~xSF~|QcIf%OBZs)ylS%?pFH+c zUe>0vD<6KKW1f2@)o}SZLppE8fn#nu#E-uj*A)ulYt`@F7iwV^-BUc(k8opy5U}Nd zC9fXCiM1O_Ea1%+h*pFPV;mxzSwL;g-m4jnAFuQ!PVnx-vyaPi&9?al0P5(nfSxM1 zeRbP4|D}ykNNbx!-4D48Rvt0E4IT7P5U#e8W)W0})xYG5fX}hmSRXN}N(<(wb`Hl_ zVx1wQNs(upVQ8b#xo#s`l%Rt5dUBM z88Hj)S(Q?jYpoWnZ{lB)`Pe2|He6aJgOCs4b&bMjjJzm|`8Bbmf61RN15Y0`c(klu zPRkBmxAn4Dlo_d{V(H;BsyZ%vJ4g#=M%-ALFeCRs8IPA4(0Lhfyjn2>6rX0yz=?!- zz>L5S%#P36l9@pf`IIsvkwTV_m|5M48Cr~5GXo+Hlyhd)g{ywZ43dt_6yDOJnV@g@ zOqyXNLim^&jh&f&Glf+%6ON7gjv~UK*9VioL6(>CNaWeorHxuqwD`$cZ;?v9-zCd_C za7IH1XWyo1>C8fvd`g{>03gms&TQ=D3>z-3ok3^}&bhPt!dpLd224k1BCcTZOu}ru zojg;OY4fo&9yvS17ROf4zKwIj>>2acq#r!{p~JImU$lI-&>KFb&v0egeDsV)PS3tI zvGp@+>zJ57Bh5(l!)KOse1?#h7SI5C24@1THdNjt*^#l0dHl?i&d(C^(h3?tL2#Nu zt3f%y7iciFfu_xsme3UP!)GZpM3fqkHYIQ$ew0tX2{^&Y!$SVDZz_#x@FU9(d8#}- znT?V4Hx~1z{oLrzi}jx~6MWZJ7j>XB3BnVyn5{RoL1(*)p(%B*Gq4l7#Ci&Yqfh?~ z9qXt(JcaRAR}l)wgm|UCM9)^`Vx5x|I7xk@4rIsZ#)45U8m)(BccV)Ngy9K^$R6EJ z_&E>K;XobKeWXKbh5q`R!xmzj=AwCb8~aJ;#eqk4TTRVN+bUgfX|YZ;W$Cn1aQg^k zaC&lc2%-Z#y}6a^E(NgPU!D9+-3dn2J7&F(mq`6Iywfkt@J^Y@)Y}6~88|}qkULO9 z6Im7mO)xrQDZDPmSkl+jLMQwnil2%QSRxN|7b_gy1to1PjOF$jedT5Tf(Z!t{ATZ^ z3!JH}cY16LLKZX)!hx;0EV&e!vmO+&z_@poV3UCMy_NrN!ol&m)9D~f^qsKHZ&WWk*r7L@zK7;Df*k8zPCI z?1{5ToL>U9`>Lb#+0^s4xe`mYwyo+9tBy-ub|UxSAgUD2q0mB523+H8QAjw=;{@Ck zmtIrGAA`p%i81^}fBgcpeJz}faztVgzec{V5BSQC?mZlx%IWGA9>Ow(UC_{hftCw& z_Or65>G@{-ELBOx-KK4R{PbT37s59Qk0q#?il_Kh*G) zp^_;3`G_(#UXQ-=1o@2u8@`3SKKVWmt%Oj8@S)SLP}vgsI!4uTS8BGRcI(w3AP?zJ zt$c#|2dV1xN?-2#l-UCL34QeYf@aQrnBzx%%($IVr|6qgYp8gl@Wl5I%n*~aJOhZJ z9xa7BMY}4PTlPgBXFK=2nb1Ygry`|5G!^)F0$K^-rpR0`WK=fjQZ&^|!Y0#HT+xzeo&wgQP#X1q4!J^YztQ!nme;J{MJJ8ooKLu`mfJOgrvd3(# zc9;S3jXDzVbqgX(Bap&%pu$}K#J0--1@^WT%mtN6 zxmTd;Mm3nf6YSX(PF(~{vdA0M&SeR1?wGcE46brHs#${iK^D)CumnZ0jXyK_xLX6g zD|Y4cEk>P*JOiA^O2_~#EGeB`yj#N23|x#R@(E%x6<3uUC0QO0AjgM-*2CR#_|ZrZ$^#{;Z-J{m&jT=iu``p|nDFyAjSqvcan03sf1(&i% zFHiy8X&`xcJ!;ip(`(nR++o-J4Ngc9Q##c|%!uRseXH7y4oY_k=R^Glw$K>{a0raz z-{(sfqOUt(AquHAR9hp?rPx8a%OLnq2z1l%5vsbXWRIuAw1D}Cx^vdn;&g7JT@O(n z)eFPty+gYV2rYhTDg+f2%t`lYQPQnN&mR%*AaTb}{HQEB6O$D)8>EnakQ?ysAN8wOKyR+ zKOym&%U=P9{ykeV9VJCGnRTSZNQYA^#Zm!Ahs7Ir>ScxK0Rz)oDo$WhCBEP&ztLAm zup`09DX)vnX>nPf*oeauMMGb9F~W}74HXwZ>(N8hl|#2(Lr_s!N*BRyH;RKi&=KG` zh3n%DtAJAW@2qOBEw`{vHajmTxJw0%nHW^-tK~j=WbQv@?k+k7hI3S6SOvrD4`ypz z4LZ+tM2*56ktCg2I~2evlkU&St@khel?okQlLIGyd}1QTC==*x`b!hBSmFMAS-xv+ zDNh8hiZV)es4-CRtt-ntB9d9IjCvrHQM7T0CEtszX9Zq8#`dC8{>xVn1=7w%?-@SP zN4Km0DXUvUb#gCz>(&F@Ef?19l>(sbprvd*#3rxa^~pqh(51lv746sRy5M8|rLzaE z*0;QD^}0K!&h{J*xCJcjy!tcQW8Hu1Caa#)IJCxdnWzsd{D}uE*2r#RV{g~n@VDPz zmUOmO1G3Swv%VOkZ=8)92s*nophs)WyeLK>AtS@56RhYjbC@eLW2%&X25`+^PVH1R zo=R$D#nH;Z+}Z~)0UX;AEROzn7tS35Rh18f&=C}IpZb?eCdKhPrRHXX{4`)>WvcE# z+g8D?$aEwEG=q5Pd_>RKQ>0LGzj4h*I_!|ar^zN!?JE-APy(%a84TJ_p!N+Z*e+V- z$SuCcvAubO=5r;|3!61y018eh)gx5e4?Hl+NF@v{bJ%I1b~&G?B*NLZnWGNVM@u+e^S}UX zlxyvb3S_676qo&vK+1b4R?XNkGdJDqq0dbF)X9)qRQA>LwCjhX$H`4lS63neOBiJk z)%3F&J%;Hg6e`Szb>KYRN)LQ;Bc-t?;lEqeO79R@k?(6(Ge`IBlz%Q~FQMb!f76u= zf#t48TC1E$F|%g`Q5-0n)aJ31^D={S_~&#vu&G$gk>cLL(P0M-F^|3t2#~CY{v!G>V9Lzsi z*2l5DMV$gyL*&Jy(;!uJC8_!k9z`jsEXmhC=65X;EE`F38!+Z`)&T-m?GqtAd0Sux zd%)is&sK)7ecOr<7di!#->%WUjmS42>ajW!rs;nRciXTdH`lMLi-R%pajw=3U2gxN zZ@4r9%{-H@$RTd_4)yS;GWi$85A{rZLe8W9r-GK<=LDdq}nv!mvVJ<9xT^9`kK(CTq{A_`qlR{ z2J5sl7zKiBw-E#A*05Dn9 z5Bh-A3!3gVW7QAkI<@JRs_FBT7h`$N{Lqg)OvE%f;5qpfY3NL*ex-9Cy+JAX*ZXTy zkzcun((gUxX!{^5Q6zf4zZ-BL*WDZ$`o5>WnIGip6{+|8-qFOb_^fe&sfbYUqvwe= z%7L$MZunKSK1Pu_Z9CLF>)-kToo)>(!ICZ#7>=eju4vfqXMl2)2<~Pwi6|G-sbCM; zO_~&^XQgGlU2?-_7MRC&nd+%@BhkOtD>a{HMwx=gpv`7+1C_b_Ln$i(BE%UY-N>N6cx4 z4m?c(w@TxLa*`R9Ifn+Z|Mcb6ibRmjvlkU^*i5jSK4Ipmh^jQEE&Qn<*sGuBMDg;N zhV!uqVF~PhfwP`xXfbOFZy+j7Es8>q>oO@TfmL%IL_*|jyTQf11`h;e4@)QRa6OkW?2EA8|+ z_20C2wf(#QRw z)^s1;;k^S-`0Yt{Q#Q?0D;E)sJ|_Yt%6qBS7tli};2ZV8+>?>fBiL-}#)D7ItoomJZw(LUG7rQI;GCR|~F`4ppP{vg1gfvy+ z6CzU9^`T^WMV6Y)S~&_D#OE^M z(ud%7E~}+c!I$6uH?9w5@Y$o z*m6p6nH+l`Eo-Y&*!CJ3-~lwrT3SSn>PO;hLBbnF?K_u6$%{~fm=1@*;O5e;+RYbO zN2F90?m9GZYII8Y?TD{j`R0F>^=}dD z1#|(P4dzo~p!!=9XXzGD3!-9V1GC$JV-t%f9{mC_`UC4%46H|AjQ@CVTPCGt--Z9!)7Uk`>u?oxI55fBg9UMbn3~h2G%(i zWt|>oq86}GqIm7hbVeFZJU zvEi#3Q9Uz%Db4_vd!w0wAJbTit~A?owGXT_1vv{m8DbSE{#$*OFt`ejfwT#3lE-Ou ziw*+ZUM&{bPoML5^|BCTL4<;Rkv}&{;T|_h3Bf&yod_xB_Hyu&Taiz@P{U0UlzcEItIf#}B-vK7oxsp5XnLKBi^s6O@i6I+B-WHSw{xq5jeAg<> zQI8sB`Qzh7FKP@#saWox+~XbP&BIJ0ZuN3TmA&bu9Cl1c?}nV05;<_FW0W2nw#QXq zT8flQRzaGqt7fMu570Cncn9*Tzeoj4`jsmcJ=WD4BbKg=Y&&F?BK1tax*a{SZ$N(TamXw`=Yw z_AnpjpvKHUqn(xVyYg;m(KdmkSWfAzuuwPgF$PIg33pbN@GBf-qG&}f=H0dys;4P zj?nJT5Pf%8M}p|{xcfcjOQ{|1A{NuST$|TQnmQSlw2Bp^#GSB-22eN=oEFo6^I2%C zmhDb=|IIfM1KnzYgacN0f&{147PDTju8uA%ufFTGG*7LLqkuDksUWYQokT6Mpx5c5 zxJG1w)CM>r91dhm{@5|r_^`un;&CvHqJD5Q_Zag*hjT~LU40ym8P(x)pg9H$;gU71 zVz&^^iqhJvdasvemX~6wDWwu*N%N}89x2^Us+Lx@Qdz?+c6oUTJLz;~h3TX!d(k5^ z+TLD!`|9nfdpeNaUgt(0hGCv*WEeY2@04`TjE~7I4C9-JG7MvLP3E1JRKP3N`);}} zCCzdNhgK1zOgxI;x zGcwd-lvFOGvQl}qOlEj5x)t5(vR zSGRQJ+q(H~%OOF2Ycv=QF2jR4_@osL^$f|z6?jL>dYJRD#u*Q4D=hU)-f&%^FSglk z56jRtm6jm4-BMcRt1VNlsl2pA*EQ6W_z|h>p8u7 znR;Yc#bhUlzRWVo0wD|&Evy>^#s zt9~rxtg0%_M5=Nonv&wH?s68@M9rkCE9AuI6Bjtn5y4ELS}GT`#CzROe3mlYL@L6N zn0ikNCjy2A#605jRkTVTUtc=z1BSa|hI`oXC&{JXPWFxj@zQ;?^xGprRtcR1sYO)) z9F%h~4W_qI)G^0VkfFCmhWTI=caBM;IBLw1XAT8rUa21ml8QH7)ku)jsWRzU^*U}M zLG&~)t;(g*rPNL*nN`tzmCe-uulw)nzm7h^zVJWq(h8|>bTs>l@x*9WW=}#~Zt0kX_%q#yrXQXlwv7*Tg|u{ed@{-C7|$J!xx?3o=unb*8kdQ-gqU|m z)i?$jTkINR93O|8PJ(f?qy!8n_e~Dq$_*Xy1JL6RYSG3 zYO4LJf1cV&l_1v@#-VEXnEjgK@Nnpup^=9<6bG|in{n=BVwolXV2nZfGhC)tT0_0m zqoTHIhPhz6v}a~ntN97nDs@aVqpU(-EJ9qm62yuJ16xpWczh4X_Is@E4?48vJ!B4l z#dYu0ykvCDWUgxz);x5j?3%EdlB-KWpYv$E|De?1skBNLpx{Jkuzg+VW>4S0cQ2&t+nR0BTXQ*Y zp4FKi7PZYvOWR0k`^9dov^)|db@hiws-ji)h_^eW+o7FJ2yqRm4ht6@M1TN-`b$;r zm$WR=WlyWaEJLgf&IArDh>(GVgNa}=I0`_Z!I2EKXOzIrBEl!g~jle~rt$Jo1 zq)&P@-Xgv?skP_#v;o{=l=W+xrzI`9yxMSO9g(^S_hO2@OM+fA@e7RP!zvd+P_Pjq zg%5I_2kni3sw=wD>o=Ps#W3P(;}4GFK-WN9g8qz|St3RI)VsnEt7|WA%OLkH+Ou1V zD|^>Y;B6Xo$oVDC(wjP^qvp~fA3M|K^26-A7~i#N0`uTW2gOAhRnTSUcAnW1^gxhrluR38$uDw z+`FUL`Grj&W9;e;a-{5{5SlBRdj4O$necW&t{D8#_`q)7G`T0y#^5Ct3J9A+{O5Ih zORS$uiuV;BeRz?Znj-9gFgQOgp>$2M@m&UyZwO%K<7GE7>^Te%P-*amIgk|3;u`B$ zm|$m17Y{~p00|QaG0lq7Kn1^^)w4x!F<7;O`fNMYtk6|QflB<(BU~oFEn1fQh5O@? zN|ENNsh~(vxUx38aM#o;yb0!&%j^W<&;vw?{%O9%YPc~W+)GG8a}e9M5AR?JmwYjI z06J}UL2I%0$zzf?a_`q#?$7yxY)Si_E=Vl=nytML*H)x(4BSNJ%o>0s2|#4iuF<#e z>%ryW#6ej+M$p0FD=uR~*irn($mgfA;vZZW=eHSBUK8doA&D-xYA zH^9YIV_1=F*gO}rqEnc&UQ9r5xsoMhA`#&wT&`UQvy?SE1fCdh4d8v@4r`8$xd`?( z(AGw;wY?zuf-9o{rWyl-E`z__tN6rsF6iVo_Iv$&n-JsH)LuJNfs*GfWo}TFUmgtj zq6KBtQ}3ZafxCALfu@=)vhBHxYA7Eqhe@csC#uc@y-dRR0-JVQqrIm=ktm9_9b(Y3 zh4tucsDJxq-kbwe6T;cHN#X|dhUexWM33CoDW`us^D`Np`<iXjdR+?^21 zr_@l-t<0*6dhx{n2OoXenu-%mvYgi?)#g|RZu$es$0$+e;D>m224rA_q5P{_lkZu0 z2W6HX`chWlv2Ncj0_fV&nhfJCX0lyDt+r3P+zs3c2$dR5Mrce`1tEc)Kvkmu=#0tpc)UNmb0e<=NGL?afGxox=HcMzbmKx>A{s9dRm%@&L%j6G0A@TkA zFFs`U;VO5mSirJ2`a7Qa#d?`lUj@yBT)={!2@V9~^ETYutW4ZS-;eZh<}W;c?U_gd zl87_aSLJ!9?U!+3l5>Yt=JfNCYUxB|P$)42FOR`WdLXD5BDTDO15vu>tS5x18i65a zow1yjV#<&(^HZKVqKfT-rrhVfh%P0Jft7PaH>3 z3_7T!O%|JYCwG}K#{BhU025{I{;|&IBGz*|Lf*Gs}B6|L*@04e{D|MbtrxDXz6;}TY69?#4-BaIGq?&oL2b$7mj8v@sjE`^ zn1$^2L|rNxVl>_i<-(qV5{@5h#K@Bi2%$gN@a}yhP9TBN96p>5?9%I-^hbZI_&}$R z;SQcmbLdr8d-&z(`to^nZ16-SD~Ex_?Opbm%{|+}xkwNV4!rzP9utfFqm{dqpOkAe z+>C+InRD&!a?TUQI-&<-RGI3hCwrf@^oDDg)0#{7XL^w{5=wP%kRWoC|cEg(H>P}Vzv>vg9t8pOi$aN0yYpo9^VM4OC^wJ zctn+USb<8yG#dQnM*ECFEdGlGI|Bs1>?4wvoUe)7#H;6 zjMqgg{23&#%f%&e0+Fq>VXV9;&s~0FoW+H!CRYF^`2`BO-Et1<=p)S?V>|URK%xZC zEdT>pTjZb}7h-IV22Ux2_F4HCP@(ycF7*R7xu}csR#+Ie`)`w-`MD5hL zK^8p(!*Xogq^09UGt{iZ+*|D9P9$pgK~rxq1I-YwIoQrr=iA&c3C#?u64rs9N_0kW z(Vc!Z#Wo&I6=}0XwEYs3}EpY-mSTq&Wk0l@tm4vpj+rF4=fC!S@%_)t~=CMNItl z2&G&>2Nn`}3ChOkGW(KKYM%pD4W>l0o-GpS5Ga-UhC*$Y z%B}Y7QK?yq=)t5<9pkqLA4)vfCN{S$hSp2XG?$Grwi*zozBSh>GyOFIKZ><4pcEGc zVQ7^(38FVuo=1!&_P$Og=i)wuCn|ziReOYI`Sb^rJSyjv3{Uk*H@=heNa94N@wJN= zJPxm=IKcL5$t)1=&cBYvT51I38j>7lPZEf_S#7j;B=IZt2G zezMR)&Yttv8%Pkd^qN8uy<3X7eNV%{UhE1{mow@4>p(q+bJV& z3~G<}#LqiQO}qGo+*$c#EBzmfkBuB_ts*)ii1WuF^l9>JYCK~lgItV5=ey|ec^*=P zo6qsl+pcgxoFtUyfk$-n{0qx{X;9_F75xsJue7WNgF2w81R-1B%JU0XKbn{&eag$2 zUE=Z{2jk_m4q93;MA+wzfjk;i4f)}9)@XZZ_xi3g91#>dZoqY?rAtwHeMp;>E~EFe zzwlCC{9P@oZv+BqV7AF>%U~`7PDm(d?O*sg%jxUFwGdw%FB0tvqR$4>*-L2h{wEy! zGFBu^cKV?E-Xz143$otHEwLMq7n?|9*z`xjQ>r8ts2S|6LgTf?FDj?;rINQaYiq}f z?C&!Z^fE3)8vHy(QuYFvUZ#Tt;qTNVmXEj5A9xG*MC`28pl?dH;ZPBSzR`sBj+Z*A zEOXN9DHA~E^>`$W!0^35FCd)C_!bM8Qvm=^?1KE>+SYg8j+mxs3T-mLR6kCkzkkYg zyjl~gfC7NoJ*1e%FMLZZCW)Cyarl;i2o(5|?8&2rkh-gdJ7bnn_W_#14ZM18a6#9tHj1=g$WVZgMQApj@(x15Ou9DJ zFAyI@8!AZ-U-rvgVWSzV5Fo%E0{ESm17_S}OvGE@Kkcu!HTYU;nUH%VC-L^+xE;rs zh>*Y0s1Ig2JM&*R(Sa8&7|2BjL1X{MNwL?rRz1r`qG8EqR{NlCU~m9QiAqTv1&PkubsXP{p$KPj@UMilx?BZ(oQZV|DY{yDqKPM|riCMKbGdT~OW0G6|a9I}D8biJ}R)_C&3UCyq2d}?timSQP61BIHib*BP{Mg?Df2l{;M9f-UV zN7a{3BNM-#s7WT(jq~pbV5sD>r~gIVIxNJL7|~CI+DN0k zp`C+cmdGryo8!xsd#YHGN)=(lXwqtCF2lmSf#g=olxa+3@ax2P0S|JBw^fSof(xuL z-pmRT^D=5`v5!Ip>IgaFsEvcu!~(I&av*rdn-6P^oF@xF<~mE!2doYhmRJ!Y!_&vS}j=h2LT`DyVCIJ(eLHYS*AS0qwF^rm)$6rnQW z&M3qveRhDw$x2K)RP-~&UMs?}cWHO=^3&bf78c@+PQ|udPifH`?coUFz@+MMFsAA2 zqP0P_FA0{(C|@WWEV}Em^eCPZhgd|Hc4VRzx0}l@paGIC%>F}#86hfzyZ=NnaLD%E3u+Xtwi6K=Ja_F=qb^Yd#~dEEnKUc zuVWd^IEL|4;M}izFa4g#?Bm(u6z+*?zUt{1W#9x18&nGSio`SHYZZ0AYSw@z2O42G=o$(*2p)?SlPkXdQLrROWD zto^fKx_R?%y#}q!gjsiM+9|Ftoqcy+UfxTLjva}7x9=Vl2hDnd<{z+#l)_KSG6^K4 z$#t~ARA0mAlVo93pz!o0fa7tmx}AW{t00KyH+jX z=lfu&6Lp~T zdsq((>y}9mpY@B~FO@{2jUC8V(_Yn5KsdcGde2(>8mVjT1eKEEGoIQ`q|{0!)TCf3 zzgF{Ovyx#7>l}IY=2aA6%Jr}w(krAeZ*mul$rD!8^TEq<(yNb{;o#}mI`&X*qZnxi zl-1NRN(}sAVecZ2ug&)Yw&!uN!*46G`TX@BOk@NhhZy`1b*Db3!#8BNhKI}OBRVlE zCWxMHCwdw+zV|rFP98x}R=kxyQ`}*p?a=hTRrylNAb~{Fzmrm2;IEC}K>ozYw^A=O z^^z8S2jLEld%D8? z5!v=j2db~J;{zdHN92&Z#hnsa5V;=Z3`K(saH#baSkss-EhniEK)Oxfnvs^qh~@iR z*Pt%rdt_&6n)lNv70iI|8PxqFF*{O&aE{-ez4L(8t1M8$>9j*k)@aKxuxUi_wH|;q zrLmX>I;;E)oGg{8Y3NeAJpzohPziANAUd@8KiAQvjtmSk01{^YoQ6v|M)<&^Y@rYW zg@n|do5_-S3$CDum6~~X>wwBNgVCMGv3Y?|N)O@mjY9LCBDU$IUK$z`aU?(V97fgz zB8xvze2(5`#IOyFwr+b`%@kPNyvPj~h+_MXOa(>51+&VcN!nLPA%r4ci4{x@nD^7-B97#E0`rgTuOtVa( z`*e~36}O2^Szi3|Js``~g14u@Gm!8k>f=X~;9ox9v_PQ&&RlpZ4|*|#XbjtL8y8w% zz?RILzJO)Y@YMVM2Yp=n`D5OEmllwjDta2pXn9BZB;L%LpC1U&bz9q-xYGl_*F9rc zq9g^&&$@6c*%aqC*Svc&dw8|g6w4PJV+gZ*$DvF(y5q#H1;%b(h^Fa^M!?f;AYsvE z$2!j|Q8b*WzOJd(Zn-GR8m(O~Z(^bBIlpsu#^*()IOyWVpcPvJAG^nb>%41LBST&O zLAceGc)f!1Dnu}k>>~-XF8yaCSg+U&5aaA8CufD8O^WL$7!C2q)nW7xmef* z2!`#Me209`P8o?2y+~x4x%1)GHsKwZr1t>tI3+i52qoo*aOf%jy+UMb5Whu7| z;Zv4_IvI(PfrMk{CADE*N0a@3ri(kr>C0h?`u(jXMangVf zR2v44d;-(f?YQsnmB9c4V50e>5Yzz#;vDTtVFJ~VW;}Oj9338CT^0&&A(Hbr{Y8yE zt5oosZu$hQjX0g!Jpzm|*o4uw;dufFXZ~5&IKuJ6MX8sVK>7Pe4ir_)U~Gu?Fgc6E z$1IwJp9xac%}XGpjg>!(Zw+}Wh{1!WJff+}WJX(f2j+>Ls(!dzqC_z{N0fg|W8tV2 zY<*^3g)dlWnmgYJsaqe#WxS5(?RiEVE<`)fcatzW&jeHn4?ve%?2fQ|dto(F#Nx;m z5nlSTz>Cl}1c+KTC}dt1FwV%WEG&32mu$-V+KZ7W*#Z6sd#vYa>H`2`e$XEA`V(k; zF3NsI)V+LESIi*z8L;lCCd0wf}8dC6|_w?tAIBNj6v4@FWbpFj-)j zo#=%<1b}%B@-tOEZhC-uL1xI2k#V*|0J99tI9innR}!cj8FB;kVu5YA&l|v;;2^Dp z?s#n~UOHC|g^P-Dl3y!y_4Hr}1#~T=u1X@iob97D20@Fq;vQj!fw`JKS$zfWzPWBJ z1z)%9l z=kcxZ*&Cx99V>eiwAM=p%Ono$gCTe%-IKNXf^8AH<^CTV|LvfV)9h(OVU>MmNONVw?5%Bn=W&p8b@qsOm11|=Lp-2Dp@nakNFHBY3;~-nCCmr6aKqtd>K*teenB! z=xh6#s(DTn;nJ(Nn;Bi0X5=X+i*@z=Z^x zgkZ_m*u~}AhP<7pTa%!-W z&`34JWdi)p;v|f`k3w#F5+oLHgO31DNB%>D7LIA&8EaW%$+0Sf4%zO%Mk;plegKI# zG8}wzu%F9gY0>K#{6d4&vR%Z;F}rA+fZ{cOmb7MQSV-J%Y0gx9NRk~0Zthw(zb2O= z1$`|*yW=hV{2+a3*xiL)CA~s%7|1066}a-A7b(-ol+=H<0K)C}nM>?sFlEXQomdfK z0sm~ihyFI6??1uyAA$x5=760K6DSi#5L_VTaK|c}waZF*T~@RIE^YR(X8fx8h93fioGoy=1-62Q)cQzDbR0 zG6(^B-oEuYd#-S_`HsKlt90!SP_`P4Sf2`fdDqD}ZVKb2#+Va5(1GRv$eg3>ez%OG~l z&>nNA9+^bk+QB86AXoopwtVL&Ege9TB^F`Q{H`1NGl3_2H%yFiq!LlG@kneRq&0!Z zcDcrH05H-YA*~{Cm3TIDNNB@O6%VCGRMpYAg^&Az`w+HjaBfB(HT}bqzpTD?)>7YwrG7wh4Njj4<_@abZ6x5 z$~l!Hu8AP_1{J78{8IGA{z&UC<4(f!1H{m8AfPx*88Ob6IJ2lji=`l_m9A0B}svW`@ak` z$&j;2AtGd;#nZ|r?aE2x-DL^yTxO-(g`hBFF=o|h$;T1-b`zYR=8u0iz!C+Wf!tvc zp9+bu8Qys1;+ZR+L9U+S@^R)9S#2y1$Q|xsDtR!i3hkZ4Kom=p?i2;YIkG(mM7b@) zSXV-$wNOrIpqr`0^`+9AH8mi~jUwA`Rz80>Xl#}yi1>VoPYk5_QVH+|L8S(>ZK_;g zB|h?IVLERYpH{5%uyt->7NPFFUuCo%+5CyZcVh!2U{rvxKlgsYwq?>MFp zsHCD<8Vkso2#B9x*>9Lq48fg^3AV}s;(rgkiW<>Sn1tS04rHAW>nX29XXA98YUSY? zM1X{%#^hk_3`9JmRr~OC(PE*4Iy=x}>+|x%=0gB*YT$z#eMbdVpky8hRJy(<;{%X3+USrSTBJ4lZJvr#<~Kn$qL}EMuHEx5yb!hx|&9O!^K78AQa?! zm!-**faX27df?fUfJbg!gUSta-^8wa3qfEwbQksw4eRKial32=GDeUGC{lR`=x9L) zXY(ANreBqQilZ={;pTMe3p2Dm$H`1+5dg0;Xr*^{pr9TPssM&0c}Pu}|8fGC_EBBt zZr`ND7Z_`8IVr@vlh&53jzuksIcgb#7QWOU{?~8}xIh`VH5i7NB!93h1PYFfo<7Ov zL=XT*>Ttp@uQbr5Qe8ziT!}N*4GcnM7MmP_4YyoM%PhH0&CyVOuM;%+7PASuv^tTB zBo2xi)oFm;GCy)rqJ=iqQeV#KS6%o{J?Qo14G!eKk*fg53rq+GLg#c+3U=e>j7#UU zA+``*BUpyuWDa5tY)iAnjYk^pG{X0s^!0|aPBHrX$AI-!zy zCB}&)DYIv%GUUaSHDX~S$_$qQq5-J^C5y2!N#-P>JW3I=Cq?ECDQO&)UMoAyOwn^F z*ObIwve=OaLVv?M!usuj&=Y`y`mWW_M{-eG^~BV|E!4U#r{UI-9qkXq>l#jkM2K}+ zS3jz&OOu(E-FF@cC5ZB#7uAbqmzGP(bx%u5L*Jn)o*mj9-eK99-5sh8(RQ{|uMgA6 zzqKY&9f(yo97#|mDq7d-7U*40a1`AqkOu=h1IeFQeK6v1`ntx-mJXno1YFM9!^S68%cW?`fEYS-4+JO1v@gly^$_?$00hLB4GA{dD`hb{SU> zwGd5Lh3%^WdLXh4>1CfaBStt74u}u|f+HM40&DnJwk9Y3jQ4G{qRWSN_U|^*tSUn`F zUZ_@vrg*e8BL5$YT&zMX+$^Iy>8X}g-RkwS1CjIwyULWlbR|Q2k;nUixV!CU0T>3^ zwUT8#_SEw@ER!;4@BASgTuTw z+skIV?6!Y@v%hT5y561%k5H}9)cX8^&}^Xp`r0JD!h4nJvtsQ7v>k~0AOi+9qI-$m zE3(o(JrKFR#GeWiZnf&BrnDXqb&0B%x>w3XMXjRniY+|)>BlNN#LCMm-`#D|oVtSU3*%Z*{Tu^DO7%Bt(Q@;jqJ zYLo+ykYh`$2VyNS7zhT%LHY@i zR+3}WBxLC9_aJFxO^WW4bKi4Zp*8NjISVyA9*9GsKM+Tl5??kgNU-_7PRshU02`oyn~jPA6o7*J%GM6Vp`Fq8k^+$$?(t;5)#dB- z`u~na95Gi9F_+k)`xS)(Y|{mxVAr)SJiGOw-5&j~RYNQ6Oo^%06w>MA8Lx0{x{PG> zrEkolC@Fmb)ll;ck7yg-kPOSv4sC{JXihtG@0oZY>J2|ekcz2j_w@?z=~TFcw{{YI zZk){bM)mFK%Ep|#?Uc@j(hUZx*+p`&?*q^=d-NB!R#%AWI*9S7?D8|#WmCv!7Zd$ zj{NG(*&8%QKeNS$xCqZ$(`Q~Xj62p${jq8@F<2zDOzs&8iN>8j?Fkkb|i*% zBu#$wY0faRlJPgud5d7XStTW@-E~L|;Kao$Q zb*uh}>?pTCCURTL6g{@XAmcG+CXX6HA~v#fnEHva+H;H*MUn;*tmEu_t!x_lwIW1E zSihGQ_5B!LC1e=qP;*05rW6rlZkSSaxRO@&pd^CD7&?P&xnuN<3o*nfLyV&3C?k?5 z(=<{P%T71P=_#tlD5i*X&2farkx=0R1quYig$fp6(BJ?Bj>CZo4Gj}4B!J-<4Tu6q z0}B#dz;G;>69EGh2!spFu>b+G`Kl~~g=0a(1PkYZ59lYd3KLYApuz;`*AWEX$j-NK z-*;=_7SdhVkmuL^LM(LJE-YtpO#IanecxC7-YRNWe^!=FBF07U=<3 zXGzn!_tRhFH)Q|JAy1E@Tka1Oef==T#39TruYbZj<)j%`A(v={%61Ma>q7s`8eWy(1Vi+7uHqF!KwgwqM+v=Id@n-BbtaA`u zEVhENf~~AJpMq_9rQ3LpzkzxHyf5ru?H>m(H@(v%D(@00cgnO|p|0t1(eeR5>Ne_- zEz2EG3(r%l=mHkv?vn7R09WQVwYsBedo0l!+%!r`O}uxWRrm+u>v5Y59xhj+2}4C> z%T=;22CHTAA0J=Y!IiL#{?+tee$=OYuj}{8RgwHH#wW9JSi5w5E+5N9$)zzh9cy(S z8Q{WURY;@3h*S*07Kf-vGWml}9u)5Q(p@kHbXA)1vOs2aXp1#;x6$Iue^+mh7wf{K|zV~eLAN9jG$w$5bTT;S0}kxC^2#bN46Y=7BO18{Ve z3`#+Kr6oNzEo^pn$`WT2Tp(Ey5LX%>GYvfnngJeBD8=Qzh(X5;v$||cG4rOCtW6&~ z7hNv8h1Z;0rgt#*gbOK`6*kD^PAp#1%+4J%VfiSyohe)N4QcKLG8D3ve$>c}cJfgW zdWBZGIMNdoc^A)u{M`9Q${VYfaj!u==$0p&y0WDakz8%(F82?CnZ1GvYORN|{?KlU zZKTQ_NQ_~)(lW{XVdEr}4~8UBo%;utRaA?{KeUraSPs*JV|Zw2c@FxuqsfgOVB}r5 zACjgA$M%uB1X?qDZ5uG|*J8skC))pu(1oNAKTTE{98vM;R?0TKI4WBNo85-lR1hz0G zm8og>(V^ky^~T7oo?B`(VcPuHXhy1ly#`sWQe>Z8z(BH#R*>s;ULzFmlKD)&iKq#5 zOzA99nt3{Pp z`~=rv`K$r9;{NpJN9s?8fQ`Wo6+>g&Q#vg_(rNPZ6m)D=pcl6QHj=gl)P!gYFtf-O zfJ-%T_y{lg_-Q{_kL4s_!T9rs&a1Q^tQRZ)h@1B#QLQGmtPJ3+Yt*U){RmM08G-em zQL9{Jy{QLkYFwtLU{3#jqsh(J*-B-BW=B3eyM%ly<{@XV4-=SWE~ zm!QI2GoI3L=By6vChoy%6pw={KPxM-8P3-V@OYyI81@no+scCB3d(FImg4A~oCBM? z>jw@8QGjfvou!V5AmV)_;uv4U1^>Z)R+cqp?2YAs01c%yGdwG%+c%a=U3EBVn|-Na zVyU!Ru<4jUG$QNBUMAvz54Nw$$6jG!Y7X^VaY-G^cyZdjK#}q*R7Z2<=VmlXZuME> z`>f x(Ibn&kj`E;s7bP(;HKOUa3J^0eROpKED1#kdzUQ)Qmu_H+#sEu&M4vP%we4BT}F{1305o2pva zNO}VVCsTHqGawy(!s;2`L%5sLupZu#swa)#ELW1rwM@1cHfXnNemm8DnKIE=!%h83 zsy^=a71%_!PL+4LPB#j7FT4*qJ@H0%5_ukK0$zos5}z1D7G*L`{fe)uq=d?q`QM2Q zNZd3uB(-d!?`XXOhagc^!poyTRUQN)Bv~$MBfGhYLcW67n0Ua4!5%O88tpudouml^ zZkXVA{&D!ZDc`HoW?Vi9m=zNrdB>w$IXa@YOKK`E+p_`&%8{qp@2PPd_*0FWBi;9* z6tyByi)0o&n21+yB6$2QX=^6RGZEk{$afk74=Ga?r>AyFKKmu*D@Af|Wmb8hBzSKz z+qSVU&c&U&Fr(=E_OJu{RM+;TVC4(7}Y=(h*CB= z^gI0%VsJq($)F?4KuOuT~Lv!OE`f^tpF2M8pI80U?W|E!0OQ3`I(iGg%jgDP_Q&Arb0+WQQmTEZ zhDnx!|0;QBoHEQ~5HkySuP|u|K+Bz;o$lhsIng?f-hOU|#k$s!{=DvOjuP}yToCo% zRwk-`?J1FbW0f3X0O zOMEfKwf3Rf6H~)F_2t8x{t3stuD5Hm8n5 zqRw)^usEV7)v~em@2QOO5nz(aCz1~?fV^L}9_t|z+hiG&SP}vvW0_@hkx*M}5xXO> z#gbu+SIjAB1SQ=vMOUrq)jUiO@{Oh8;|d7T^Hf8F;@(Dph?2;Tb^`w9196_nu6D|Q zxw+T~j-}`FJFwp$wPB|}v#8xKbXcGRGMr;#ltljb=Cx*>2@m{m3$8!VE@BT)r{9jN zJwKz3D{yKV5DjQ5ytx^z{-EgFv}0}p6c8w6VdoXL$}yINHuX%vy9)6tsD{JlnS)6k z;h`?M3gHw$1+~iDGk_!GH~-c>3A0C<3Qc(z@mNiIn;XmIDdr=Wpf$`YE((94OgQ3^ zlF7F4Lz^f51#sh=eVLdv;@-jYG5^}edJ8isCcqgxDYad_qZkzPZq_^%_yh`<6lQlINR)B2(HaB7n0 z4}4b9^iXsCC`Jc^J{1LiY)D_tS7|B=3kNm3##HeB8ik4iO9#RSc5Z>wt;wHS%*{Gj zM__jDWBI0=g~SSE-}C{k6^S>E4m^puq1cb`u(I2Tjist)DhW>D(&2I-7ZqO+w9JEh z)Iv;&1pahV+PC(8`Ca|EQb5|6n@K3ILUmIPF7?*{{83lxNl7V4f!Ba*4$N#s7lafo zOxiJ6ftxJ4INl-VII%h?)MTl*tMDiPcy$L zbigcoHyS_>ET^j|uVuP@wQ4SOO#($Gj_qdK2pSP+wmQ=Y5y(=5NcnT5qU9uGCOLw3o9m|wmZB6`@{|K0U6DQY|-R7U4W{hzy6llM9rzgiKXWmyolj>EgYOg`Gy#o z(CIiiR*lAdZ~EfPTE*2$(;Ll1kLWL-5F%pCorn{p2=kE^sN z%WZKe%Sr$}HdX~rMTi+q+5|5cNw^W*^4gHI;7U6y5wy*-j7{qn9ew={8on9TNqW>G zBpS86<7t#CbN;UTD#)9J2^qOFq^KiRzuSt{n))P$GIg+#kLiJ#e9vf3?Jh;bi`xZ9igj`2)AsXQ1ptPfFx|_-=8M^0~Y&2tejyN(|mml>yV#sZspuYsqWSs<)FX2?Ia#0ZO8$iLKP*`Ch4l-p#!cNhP>0ItA?i>M1 zN}v;18k1IM0E5Z_==vol+0yl?Dhrips-ju2Ffwu`*>3WP&KBn&s)3to9$d{wy)S=S zrOGa75l0|`qwE3tRzSI$b=DHmVToNMap3w#pWi`;<{S^^KJhA>C2PH84 zH&}3_b=qcpxnd<>LQk?-u!%l#@P@I@f*TCUR%*_Y>aN#ahN{d6xqw1jFKP3j_@o6l z#H1Oi;Z&fZKszRdLmXV7RJ5?uDym@wdtI3S^Rh90gt&G2ERX`-aqSwg-3w&!Q}a&A z{k$V?Y9otBjp85xs%#Mh%vMmV)1BwD*i|WiyE7P!plF){k^D+UqGj5ZZ{I>e$Chpf za$(V4XDJME-xY)YuVA%`>`J*I!>O&Uf1YWvXbR32TA3IOj*h&{>$Y=lAh{%( zrZF2#`9D#*7Dwt^bzbpo(^s6O7(P}se}KO`(}mLK0HiRixLzW{=(htckVkX{>0Cp2 zcJwt28d@ouO%8}Rw8%m~#TfJ}ysq^YhKDSv5@(5pZLYX8(sajMG)dU-@pyj zCd-JYmT=uT@Cz2Wzrp4PcoNTqIplNCCPj8l(Al=1_N{!#RpW3!*H5t`rs>+zL8Pth3K%ztP{ll*U6L4M6 z-y?675pEl+Lmvz?Q$5XlBfxw^$vF5*_Ih5~Q z^HBw1F*IcNgyJ*+Bp|uMjp56~Oxrr)9|!pmE7)v~}&Nx{7A4s`v}3X{QO zaD$tEz_kVk{q*j3E(&yhfLW+GdXW~9_8MAb(~{zX7SKmS z0>SZ-R0jK>yNm8A%*Bsfsny>TuN+(JG2_x{!2y4;Vb136Z(kv;giJIz7mW1q#-llg zzrzl2`Ta?$FWX47(clhiORqxeTE-)i&E!RnM90O{+H$1_{hN@un?~SVMgcIT4SK7A z=2#(7K@>OgGcvlCL%8|A@5-kbWqV0Y(`(lAfd)&b&2i@;B{mwqo55v++m+S3hzBd!9lsz zHB(>|Qc*^3l*E7tDsxB`hh)LCvM)(E4PE#NwvE&>fPGE0{OGF_>Q9W79v@Z>v zziShvb25SiCxk zlvgLNr+6ue^8AWI6Z4dbmc!}B)gy2$L>@eAWHSK4X6i<}l&8t$rS5X*al5$Wl@LBc z8%T@d+=A~6arOl>7`1{-dSTP}0GCs-+fHCYkukk94~c0&9^c@M++|RctSR_h9~jUn z7tLaC??%nI2i{qPiGNs{M5bk`09h6#v5#aB#W=d>B&3-lUy0a0u)2fHP-4z*GHNZ~ z)^ez_drVmF&>mnK&`d%{<#R)hLXsH+sBA9j$r0JY6B%2@DzW{ioKM!!=zn~5v2+3{ z2mU#9vK2Tcu1W$@iq}{D(U2t>R#45-jRJ~zLTbPj$U?d}Y%e;LK1zHsrkdxN-AUDi z%%~_UKfwSj1F3s!-5$-G1xwk57za13Ia!~gyTwcRFM4V-j)Si|Qt_b15C%$dQjQaK zly~DIu0TU4QTmfvfTY%!axFZnRwh?(@@6o(U@9H!fE`i&Z-R522{PVBuWp8*Zj1#X zvB@)a8Tv5TPWMYjkB_lhQM+`$14>*dnoarQ&ZwH688i1(ww(qeJ~GT14zKzifIf>3 zuDrkrfMLD)UZx(+9&n%o_8eUA|A&ANl})WLs6*Y#q?U_`0@YA_!pHB(R)w8ikh{4j z$h*QV@+??3)S6^N(?oW;P|DsW{4jZ|Uyy7k`6p-dlqjf0HS<&67ZKi40Ctdn$x2aA zT0n^JaIc5-8~ib+p^j{bK*23KPNSuglMGr@V#Id;*Nd0$Fmu&JlZXEL3-V8_;3J1T z-9U|7g5O?B^%u${#dp9&GcZn29SCfhQKvhOod zVUPA6I7qfAwGif&x1e5O{XPiYk|VgM`9c_$d=KWG)Y_SGrCP;J2OaDeqz0SyV8(1< z02tM}NYFZO$U@S!dMDf(Zp98UGan;axRlZ?<8^;GXz6yGX1_sjKu1JJ^C0xfc6SBz zg_MGqPA}Q$6eOawVFu`3Z7*2PfMb0aW`I-v;^`L{uDywDJFsO<=ny%u?md-R9($ez zr8e0r({ih~qv^x~{)#wbdIkV)`5CF+V4y%g-e9JKO_~%v@kqRwL=_aPD$(jhQ9glJ zS}|E>0@TI=^%2{h=()55++;;By0r~hJa|?XnerilS4wyR!u7TPj$={;3!6fzd7LlR z*(BxT<*$Pahc!{&_6xUo3YuC<3UtKquW=C+#f*K_tHgbjkxwF0T2F|A zm%$kopOZ4d2Tf}P2P_oOFr=Mpw-Y0i;l1{0m@qHi?+!GLDGQUz0w%V^%|B`k z{l#Ef?7WRdRU)FR$yim23kReIVOK@ATU<5t491obE$pnVs> zM_GCy`!w%M;Z-LmD^mlk>n&=_6g-$z7<8p7&4>+GA(iQ{I(W@#7%a;OL1Y!{1*0@^ z?p=p$2Q@Jle|D;F!HTE4SiN+LS?$O9YtSJp$KNk$(Ywg$#|Bz&vr+#|Kbfcy7ND#C zQ)gDsMm;Z`+5~sw+tfuJ?HU(Ml@qGy9O3&ve)Q!|dZ-;^>qWZ+f1Zn=kZ|&zkcEqP zWPd~dgyMr-@aci>GHgJSgvn|-5*Gv8T?Mn}x$B2PgZhD>Ggix+4oDH6RDX z5_?1fz)p5Mat>lIn9&W`O;Qj}m}Yi%5s zCNT0#fG0g09{}TJ&qgr2-5Y@W%K*POBHobi2Sxb49q&YR-B*Bnuw0Hy`xUsUsaBMH z6DZ1z&Fopghco4UYOKmp3MO72y9hrEY^@&wEb#lu2LE0m-7)b9a5(*FsM&U|>hyd^ zSz}ghGhMS;vkD>Uf0XTEOpSh^%8_Whu&n)EHi(OYz8^+b%WyQu8jz{QFE>XEI9=pB zT)ncY&LrG{vjQ84>ZibLk1$bYUFwzuAA&I@AD*p$(n!%B7`)2AL)6PqB`RWyIO(Xx z9d0pg71E0*LO`m}YPk9+Oc;~^SV}>3enl^#nrhzodZgmJty_oF^vnU8AO=JbpaBfs zdyWVFPj-{NVVIl)IRrrjG6csu-N)lg4U2z)#dnq(RT3S1Uzq)i%PQ4TGMoEmOdNml zzMSqqT~vg33`|$K_DR^kz&@&d4eTpg&71diJOrIX?3>fUrbo1mG}_iSkvmz2e3;zr z%7v2t)jCCxpMg;HB_+|n5`_Mh3Hrn6BZ>2iGS9c2ZxLiQ`#j9T9GH)jz6EOL+w;r< znS*^mT0Vik>6FLIdlo?!N8Ta}9REzd2PnfKfYm9;jqt!(>BAFwTY9KVbd`eMFbl3gl)mvvaZiIcQH{S#Ve7{mI#G~yn zVN5=Q)bk~BpCMliY!uw;90X^Pag1Xcd@Q~~Fd^9M^>0!-B`FQlLZukV!E{zF5*m^R zAz9{56E1Z)OoN$O3ENtt;OcsnX@U^%Hk3LRx+YPSsKUK1>1tj?DF%@(!Hj*15P|+Y zCSofDHiGS(HpugY@e3Aa0U@oR-ISk)ror#BLJ8!%AQTk_L{B7SRcfc-=}Gdjh0Z}^fTlB3lF6155i8!KRZ*@DLqtKtJib9drv*Vk zfr$f}thGHTk9(6IIcewK;n7_J10A7OMQMmZ$IiWKu7=4_rs&iQW7-@sh*)CHxe6u5 zmK>BLIkwc1kmMKx~5{ zp#muiM6XP4#FeEuihwjiIN6|*sIrQIUuPM_7i2=pRaO#Z5JGX3nHj0$(G-~6*+fs| z0$hK?M&|&~YUg^eO4JHhc_{7>FTrY`9o)A}q*_ zQy#=9B+Sc`7L?oe4>}3|oBa`V@Li4JyKIy4)xM3NV*{;wk5ahC9plgz4hCP_(Xn#) zio?W-6`Cn5G$Db7KnbRL6lWhT28BK2kjt`LmeUCM$`o7pEFOV z1EQi(Nm5HlR8^-zRWl||Q6BQG^u!61k84$8&nVQZ4Ca?A;!86F6Gt~U#xJX(`NuaZ z+g>q}ToMT`kP$V6DT1MpLHsdKyrJ_mL6Oo_JsHWAp%8MRz$|;A`jFs)&Md1WtYgvq zX$PLf@0&5UA#$8x7Z)y)qVgu)*1ip|L#9)dL#+8guX)`Va*(5~AmY68im^RFZ z{nBVLunY^j7m!&jM<5DhoMkX@T8RV#>1Sf^R`AG#(6u6)p^Kw(4mlMHAq3)8Bj}hh z;({X;3ce)9H_FNp8zUHWcR(pLEBtb5Dg*<$Kn*KD_2y7Pr(GbqisCb;XbKA+mwPpW zH#iB_kT59qG@uPeT3GxLD=E0=d^#mZ@P2gK2hpa<^9V{$`eunf>Of!60yFW7L7`n} zSEW$}2knC;X-*X5p2r3AZI~iTxPVu6VouE2+FD?8B+R7sbcy|FE3;#w6rt%VXo(DE zz0JPsH1ln;1|2MVG^3Rq8w7VzxSAVw71W-1O7L(e*iN>W1qGYO$b^En^G9?Hoo{DcbAtH_oYGR)djBkP>cqJ!4L*K$k7@>OkR#CIW;t436)b{< zjSU;Qt63{M*4hx7Gb#F?-BHZYjE1*cRQ^oZPUOga5&W^!3vuOU;4_>^zDZMWtYoJ*+?QV91!fHH`c@++boKV>9^ zoN|&fw{aZ>iUZ<3wjq4YpwU->;qK*75XuLoj^d(pQX&Q?CW)pK!GLyBhc3$dP&`tH zc0&7iCr0~Gj<+VaQj>s8=q3;9Ra>gHp(YEle0URPKomq& zG!BZ9coe&!53$f@cyx+fScFRC%0^@@av~xZvdCV3MI#Wu!g{zI5%F@n61w~thGheb zTGYWpN5VPgS#s?DMkr`RzFo~&g!W=fcjOZ8h3%>KRd#k|xi)8+{q1QuE;e54=WS>t zo5p7>Wpg)CI)Y$GP{D6cAu8tyquVtyD?|!XQ!$0J(re|cUx^+ED_F4Z9|gG)qL%Hv zdlKGR_&oepHG}t{6Gi^!dR86pg5$Kx8U-DfE{rD54iad z>EVTQQQm`=^Whzx*~}5zEvFr349qsB6NsoGA;{4Zu@KI%t0pv{OV#bkG8%LQA%i^` z!xHKOaYFIxWV=dNP$3)*oqXK;IcwwbILi>0Czx(6%r>EXJL=UynQ>hOM;{m8jI^OB z2y=D+MWQq(|K!^QLELHL4hBmQ*fEg%rZ$zXyTYF+yl}6E%CuIaAJ`XO%u9hYvP2+* zBM1dkh7wlCX9C1p+EPlSaf|zkP+mY}%Qhg979{8*fUmjEw;zKkD< zhiLc@!3j?G)~>>hz;KkHle&M*jk*6s7%rbc2fMHb_P`$OW+!`@S!}(6pn{HYS%%^2 zYIP$FRi`oaABWUO>UA7@hcLPXbP)n=(hzzrIXHS1J#&6SF(>*>Y+@3VdCY76C7NHU z%s*sewN^NCadJc*`INktP~^X$mVB$Z{Aer6?&sUun&7Ymlc?q2?uEUuKBBE}TkD(s zLPL&m!#B(D?S$~a4Xn^v&q9~v2x@Y&Dk;YTV^9?sgPGtQ`1T=$zZZhFT2t(Z9dR2g zvTRg@1ge(mtKHs)4Pu}M4FR~HyeY!V9(=(IIX#$E~s-}&KLV{w(Wj?;PX8!Sd+klHLDFm^UXfX z_}D+nw?C<7>F8|bmtXdhShA4cu8w5e@u=h9M3_3#F=JF?&j(@2XP}a&_HMXn=s1Ir zLZCpx%b))lGr20Ccun+_vJ}r=4W~)TLvst^iPL%r(nfX3PI3A~JuD2-8^Ixfy zS!vYC?y$Mawby2#LVR=BX&4J6Z6l^ZQ5-0c3Q#RQfl?TvNjRrNHX6>%HHQRs z!qJiC0$ovJWfes~T)})od;=Xq_MB3J8jJ=1b7U4_LuG_l6puf!69x*Ms%5H10000W zAOI*=*!Hx;sxv~7>Wmy_B_&y=ZOpUF>s5kYZ)D3vyIKoEBU39K3z=H9%us0Qf(_ zYi{9ZN+@DxhpZEX(KYaV4Z6!?U96xSK{3Ufz2B*>Y($9ogqBz=qPs3=mNl(%DJ0epCAWv7G59)1ZO?U|m+ zO0GNUw%gTORHst{Co>Kvz1vHD^TyRdp6U9EoYGf`&1sFwrSNy(Xnh`OV%p=|dSu#j zCecy3+$KTjn!P0s4x*ZK9m97<7`D65R}-ISJX_p+*%~=v*zmDP9&XT_5G+X0M?Z##J5WUSQ^w@PVc~UmG~i3WMqCIx4ibU5Gthbh0Hk z?RQEwk7;RkP9oxma3&BVl?Adw1{>O}_&q1b-R5N=qe;;9nvrQ;Z`>%W-bUDB=Pce` zWv3Z3Fztbkx2#7bU318gEq&O%mj++k7j|N_;aDp@xIw1PjU^cKk{dM4!l?6R;$@O# zkB+u|92&x{d)r!Si;%C*Apub8vb?VJoe4Ix?Onr^|7QCfy>|58Q^trLEJHo%nHt!m&&z6bXme~QnI0kc5l?U zSVUJWbIZ)=we7(dMD3Kl zk_tSh-DKl@#qe%wBUQ>Jg75Y2_ZT)Ig z2{cbf2J?rSKNr+w@yLzwlR6clJP`LHf`IViH4xp@6aSs=<@7&Nrfq(`D%eMd}Wm_(WHU&YIu9#=i$VsA>`wHSi;ZN0VvJhB4@nQ45E{K~?g z#(+0Ozb`{Uq&yAz3FRV`kWI3s4HrPD#x+y0gZWgD?^L{n=qSgVpdRSM#gz2~owwD$ z^y6=4d$wqPb2_OJbt7G8>dKBhtW(jcPKe2@<81tM^CXTuLz`oSjU^7Q`LFTCnl{$1 zyY4P`&6TjJrycS~QrRY9TnCvi+k&wDFR z5NAvH2l2oON3}2R8z!yzsDYZl;`Dmbj~88q{N5g7=FbfBnI^dwHg6Xx z^e-)vk+LOodm z_;t=Ot1-ng7ZU@}#WdP>GVxlqiIgXPlcmmPGmPTY`jXZJ5}7}NoNv&EI<12*5%F$P zCS1|8p~;?V2h%o94+cV7fwu_GDWaC&X|HE46zckb=5Hl3DImp`0qLU$C7=Fn;2J{^ zozm-nGWOuwNJ4~DWinX&R*t~I(JmbrS=LaFid2BxD&r7KDG$nGK9y+jh13{;P zBk#j^+m^q4T6JWtXuz9hY`wT3&yx}e`9E`Y(UvrOv0wB#-StBudReVl>7gWTpvf37 zJ7xNZ$1NjE1qFsQtU&nQB31f0N+Kqda2_&A!9~3k3ZNWqVyipBio_0qe$VSG)At|5 z0hDBa{{c<^jS}a(*{-^}b}OJCLg>I{kf>hidY`BvmGz`SiW^*QPxdbR4qYJ$)&>Xv zG&Ua;WyUZtf){t)5n?0GDfM`B4J+%s1qz$PZ8smfBrGt=y+7!QgNF*}} z;~(loz!RO&vgf0IBQ}#HqL}R=_~`?hNP26IdMLa}{X)s%F4apP(SqXA(lUAkw++(x zLL$gRZsUy-c?${b*gQxf-YQ-5DG}4h?67Hg86W)gq(AE?^}DM5H9JbyUWV3#M;6f} zc_%iDmzKM*&LRWkQSg)^Ld|AU0mQa&SaLpGgZzR8K%D~EL_8t>jcgRNVUM_mPS>v{K;}p^V5jYI1Sj+=I+x^4*A*GS|d7 z08iiM!&3H@{WK0{6Wq9-s$`Gn(LbW@8M=>njs_W>uj2D2511dV?82)@R3<>be1K;U zNkHEPH|Ta$3+923DD0k}V3!tlIoo0E*)gVUKs zn4QTrnSexfY?lSaA|`K+YFs@ogwk`zVOq!x!yI3e7;W=kMn*NyPrT7H8$({KU$ZWt zMMTOT$OtJS9i(gs)>3%O=+k*CvQ;`dP7+ayb0+8Dm^Q*<_bWOv8tcz#J9o~8<_Bx( z-@IOP*);yN0OKY-Rfv96q*Ga~F9(B6J%t4s>Pc$A2vGMfK^9-lv1^NNDn~c>+zm`J z`|c!5z4I=E;yP!9@3XKEv|+worzv6_V-2-DvMua&SUy2w50Y-0O10XCbFy9Eis%&G z+4|V$wjjJ)3kAld@^Lq}*YY(79SSTPL}p7+ye~7##lh*I;dDze>YT!-FUhsY-5rKE z05Q9-4Y6|sWO(gQ5~C>u+^ zb!?6pe%F;>!+$+`BrSiU?`}mnZ-XT^HgjlH0QGtbA*tFK40-&^6bxAQslnAZRQn#j z8!PPC5IW}F8gknw8YSn-cA*{EhTYkz!2$UCw)ah@4Y9{uV4UWg-=5#3IIx{%LGf3-zP4lxvjxuKX&QD`GC5IzVQMwnoJenfuM1|Xsh;gFg7#L% z@wZ=L=P_oSEFNhDpZw79ocWW{V7gNF3+x&SxkWBdaV%iO=^3Ry z7q&QFE^9x5(e!`0gpf8(qe3qtVwMidBj*vr7ZtW~drSatrxyX-2^;jQ8x-K;%bW`l zQMkzLoJPbj61`OLpIONg5ttNALq|)Y4_e-tv2Cg}aPMeNC}+_&gTDlz)Re)e|QWoeP%Nhiaztl68J1U{e4jVG;O za%I4o`p4W`EkIC?bM`+0f(PFI>>37g`+$v`Eo@CrmqU~GY0;F3`%DI{Y4WpT|3}1c z`2fjZi9%a76ykdl<~{PzvBbG4AHsXvO9q**R8%Q0&G1ZGH>0wOy;Z1K|4sE4^Hr# z7#XYGijJXx0KsMF7!0^ryxYOBOOsFIt=(#5EC*|`u9M|?6fPyN9trf4>7K$}I7H0# zP}-#oxn-|cJTlNoI}{)~4V>WIn>S7|k;G5Jqo8LKuAq~SvWTuI%&m7H&;)&Q$6Z}d z2PWIF_^aSNUBP1M03T$#BdU2JedruV@!FZ=mz~C*Jt(ueA~4YX;y31iM^D;mWXs(~ z6#&j0+D>B%Z6pgLZB3d88aO*X9?g#bx3#tNy4iCw!?RpE>O5-D0t(@G=x9$FbU9B) zznLXFAOL2zH77?uVrSdAGuqiG->?f9Og@kB`v4uWU6zf7GD$@_;Dh? zFG%{R_k#>r7I#D?F}L`BZhF^eHVv7C`qh!+NcaB2p#v;(&@qQd$Bc(@b_WeBAlo!f zk+$*~klYL%+ZZ~CmoB;=+S*~rhb*>~iMIu<90h(mB+bF=FzgWFEa;9*ppU(h+Z=Ij z=H59Gy=9qOl4Kd1H;(IDwpfNPPa|$$QlMgm+%>RiSV)#S;cV!|IsUmjRhb5$vO^9# z5Uoe48o&M3@rkdHf>-bm>{sBzKJ%@&K8AcD%!w(v<_cC|ESynXhgq1+8iO3AL^b{# z-y144lt>X`5@@WsvJ3o6dWhX&v9o2Xn~f8B@Ej&^J!}>ppRAdffCkfcap3*a==bcf z)OGx6FlMuMU5cy+YwufwY%+1XX=+Mqr*A<aekeGT)xATMRwG-0RZQ1Dy5gR>Gzj8%} zV_3(-D^TaCc4UFj{GtJT{8~?okoi{=0tqKqnB-?nmR9uGfOJ)ecGyotl#OMI1}p0; z11G|^@wd(9lzeuV)lc$FgL8V+Ut%Dy(;wpb2paAl!FU6_J%d8BvVBd74h@Tt zGO!ybDN|zGuJ|F@Z>CTmV{?>9^T7cL_ebZ zDko0psltoPH2}!f4joqS)XX6#oO`BUT1E$ehs@;!4+Fq7byBT7i_u&S?~^@#2%e2* z6)@ZjhG9j77}AC-_gV5$JB!@&M~B0}aU6D8P}Jzuz1|-Te2>;YWdgxa8d` zdm>nX-uzCX*cfkqH#bEaV?c8dxv)vK4JLUB z>8ZyLgBKM#TAi-r`D0NMd0M%Vaf3p5QSQpngCX81jx*lEe>Pw;qQx--A8BN?P?2cT zZ^|%8ikI{pV1^<}%Xe=3|c;aQT(f#{n>_OfRvj-i*u8J;}K019u7(a zuIaOOGJ`YX3vEBrZB7Rq(m0tdfE9vP?zEG#QV}?gA5{{cm zgTpl85!hxvk542{T9~vVWs{8CyR>e24f`C`SX@tU>tU)x({Nmp@wJX;uVffSg81{d z(xw55de0aA+Oez`_e&NH3M3kna^gPnJTrw@!V3T3fPCoNdXX6XPKV-wh zkxj&>q-jeEs$S^c)0u;*pcx(XZk zj;<@&Mz%JFzjfMx$sf^g9F1YqEPt zya7^3HiZ`|KrSTlF84Om`GeUW=*;{6WnBw$KeGzo%a^%tv1pbx!|&2HhE~m_muwP_ zGO2L-^bh4vSyS;$feFn-J|9hV?2jeqx~{<0cMKM1$VF4e!xHaH4Kj)bo;W^0_}juF z=cN<5=ecad+*&Llys@^N?Z@9pUG=NmA>*L0FA0sm|-?g0cuye4`AwOpmL#8_?2njAs@1ZV4wR3r+P#ifbc3mxU)P#pFq%4VAdT(u*S$-qzFT#2;n`_(UO1WDQ;P49WZ2lFN+xLfzyz7c zmtUBc+Zy;Uoj+(gHC|wADL$UD_^>)TCQ*|Z6bH19czn<}D#yh_YIoENMZq7#`;DBF z%;qYEe0xfauXA0R(hxwWQ9AW^K#nOE93253XW+18c{v*EEE=#xPZZ8z1@`Uy$MnU+ znRUSjm-j>tF+0%2o}%==t6F4QMkT0kDQTTl@MpfXc(0RA*{75=RjUv;nNPci;Vp-< z)5hz_E~k);NnB&OSaVF*Mfj}npiC~ueF;Y?Izt7sj+v$G?kF?Na;*&WdEu{B&wJg> zYQj9mP%hkBTG3w6wTr(yuo|M|iWHoQ0t{+knIW*uNt0Ef>MX9&GV(MyhRQ+CA6(8h z@fRx&MDYE~bae9IK`WtIvMYvSFm`E!%l^q)J|AKR63uSzx(S_O9X39A%nIbt$ObMp z3XgZ^nt0^BY4AKTXb!AT4}OQ54Kiv4Wv__FU~^JdwO4k)oeB3e=;Rw$Spo|?`+63z zJ%%hlihO^Q(Rg zIpHUjGxP~ftzGdRdSmI6wk$e0!O?^h`2*I3RO+oYD z#jv`na#t#HLqj^eQ(+uLj_!8?6E4M`rS@UO%8^Ld5W;=4KS zP(2KTz9%*bcrE1D_RT5z^;^OFfSY1U&RuL}fk0>h7w&j@Fe2p8KoSR-%_tY{2!M)9 zE4cK=roV%rIGoj=oCXuZLHB?^n;R`^s>V&Mw4sbSe{&E;9cepngJ+^a?U zFHk4MUnl=)KP$iO|a?BxlUIZ1N;>T(a-T{r%NU7j3N0vt4d+c)oNCu(yvAD%J`4 z$Fuz`#Q8xHUN%jB>rf!^Rs(KETle z@?s1PYA#x|fO{rhW`54u*{$*06@08>;Y)Ve<#KK9dej_bB2)~Aw6^N3yAQH+LJ`iZ z=P$a3vPUS$kPSkVQWI^~^b;{SWnPZvd)oYNKO(%9oXpN#G^pp_iQ_XVyFYC#WH`g~ zFx`iMgRm1x?n9n{1v@^yzuOlU`rPKKj*dr0W)~^>YH+UD6{dD43t@q*{4{&8 zh2Nf*T#XW#DUN35LL8{vH`Js}rY3wLg+ESF(|__4K86EJCiAic4C|+2=1nRgdGhuc z!Xd&a@sOR6bz2S-EtotLj9ThyZX;YM!8@iVGZRgT4ymJ~-jxFgX_0b^e4lx)I-}pu%RQ9t8tAZpPJyXwh zWT##;1fBaK)O=2^M=FfujfBo$O1Rsk!NNG?JtFB|5Y*3<1-)@wwzX zj$;YJu=zuTB0MaGC;~nfj`B_d$#&nRH{qnc6S@m!L-~#}YEa$F?C(J@5|+6#@%y!+ zQEI1+yhZ-IDaK|1IpV36CEBQ%VRa0@|zSw{A476{FE6%zMRt zoQhy#RpD7YA-X}c!24$H!tJqH#y@EDp%{!2)i6$n6(d30V|ly2% zk#2`^Y8dK=eouqqfhdN@5jZl+Y+<+`rYMc>bXkHnG4f++=M?HgD%ItrZ6rZza11FI zlJ2Mc{8}@O%Lii^A1^u4t*DX7Hn{sM^&Zx>1i#Kpj_gqL%NEeN%(GG|lw8U&y^?mP zc0fnKi9^hE&xo1T7POVvDX$KW0#qi&G7iulitWYN^Gqc}h*%hPXbWdP$gh}lP?sT3 z-Y*~f>tAFi81fkG&hHg$`UsSlA3jWivqD+a)ZYxxbE-wguID<@T`F-ul`)Hu(6yVB zE*;;+4CrF}APT0Bi8)pbF+P?TuejzBp!W%#+(aNs0_pcll;Bb{eO^{6YkUhZL=8z) zf}UcF7=d;HhUkG#kdIAP4i2NL=vnY;Z^o6a zVlnF$%9xg;a;!0xPZkZWGayWlVb^Rv65d0H@yD6mTy2OP9OnO&tpu|;`#6Sij6lS4@uG%x7~*vT%$P-jG(uwn7I|h(kQI!)`~4Bn&|>6w#;C&_EKyK z`k6VZq0C^G|9JHq;%VzJfplzX?_ff5Y=ICLt`4@SY}$I{@@n>?ts+u`*HL-EzD)2f zX5A&RV@l&EcnfjA9(W7*P1|<0QbrQ)Y?J4`sMSjN;G9bYlpGscWw})XA)N(AyBas+ z3e&Zc8)&9zl!@D7qaZz==H0m};AaMA?qd0qEfBADFSbabjF)-*u9ltLK{jbXwh%uP zUhY2)*KDE`1MA&=Ib-#MF$}Pzcomu0A{KG3b3+$g3di$#$q!PpQRDY^a_i5!Jznym50U(-?KL9ba(X2~81oR5y~hZc-5$se zG_hbxOgo&0zmZUUg1~sehO^0OA6NmNL4kYV)OJp+b3l{=a7Ije`Og(i+?;(W^#`Vjp+0s$ZnCN}oC ztukP)(1$?mTe};fq$6drneoAK zO7SNET6|jPuVKkJUE_^)NdsR54CU_rY(Sf23zUXRlu)|lGg*i2FFT<9u{bm9I4y(f9Z9~LbgW#mr(8_;m!D^J11X+K!c?R z0ou1W=J_Drpy#9D+YOKW8BKZb%LV?5ox78rT+IjE5K*Gk^7T1YWeieHnK)lPvY|j1 zUYG+-l^tDh$SN|N=suu86UAc-G3AGnZ^Trx5#+Iqxw8tczOjta-8nLJZzx$&)MWni zs*L%r9Kc2c+&uO0un<<%Le~K+BI6yLMYpliI_B{%MPw36Gy%wp#9OG@UR)8fDsoDb zE1hVs>`vlWD66EHdKPJ`a}E>|)r-O%PByD5j!~Mzq7n_$=D9(yR*E~geCf*LoR7yK)|*E1&$(St z(bljG?b{0f6`vRGPAvx65%tGldh|IIEfJV`Xo|QUE6$W451TC%Rfjog(+!&msu*{x zGOCzj4qcXnxD;&{Jtn&MSNttJ8>JYs9zBqFlmLQ;x*c=Ipa%!N3bW4FXDTERK7dTI{wZcr4Cs!KxcEu881@3jGka*i+*> z!m|=65A{Zjg>6?y(|J!2SWhU5?qE1Tgyalic0wUSlL<#BlvYZla5_=!`VZr^H&kwW z5g23d`Sf6^dh0J*r14fJU9>AS(6{L1ssnj6e<9WqC$kR0>z)Js2)Ezw-hy_SJeCuUE<2n`H4jaF0 z9z;cpG77WJ(02lK%u7hL{8IHmX0&SZfe*Z^G#HC`aEof_W!=muDo@7=eKX@03GN3-i(*DKv*krY_D_ZdV_ zPL~ye3L?j>*r^pqJ;w-W3T8zVgv=7{a{!S=*_nw3EcH&9SCsI6Q3(iwG_hg+_8l^ab)aw z4xMI%vWlZx%8w{xuTGkx5PJGM1`LpILL=bT>#EZ>)8hVw!0eQ?j7b5Y-53C^*GW9h|3SNEejtpxXS;&e;tW2NVnH zL$Ihl4}@)_LF3GeCIu|e+YyB_>(E4XNbb|G_svbBS~f5s3D%vXq)3)Ki>b2`nnK7F zIrO+kofX=)75zWN6mpN+)Q)tSXqOE%BftI-HmEed$bM=k`ahRaRGG` z+$hOYGEul|&<2lAxix1)r+L(szLcdvp#J}Vmeq2GXK)6Lt6fL!yBrlH%EQ0QjB^n; zO(m6GNKgr(h;t^&U2ESEtH8jTvfeX|f^PAFrhpZtHVVw%hSOzL`h_O@~mSzzoG0UUgK#j%$#$6->-6A>K| zaE(L8hd^QOdpT2wP2-Gt6H7KN_r228apXE=D`T{=0E*MBN3G(}a! zq87cVh@wb%V^+-Mjzq2@#5A|s-@8EO!fWGbD8xinBn zgR%663+3NHzQM>Azgj!iMznz*HZ*Ec2z)e+m4(%>RVD4!s6tImU8;QXsn&0XVgfxF zuaA*%AIT@RiSQ&$WB!;BOysOgLiNTbA=rCEBfW6;C1;AWy2g}A$$lJR&k zR`Xt@1K=RIkRpq6A^0dnAs3M$RBj$Z82Xu*bt?n)PBVmsC^K~Aaq#bQ_CBhSD5JMW zV_-V-^#S%(@8*hA&7_bWU$L!8>^x5eZ6Af)**tgPaFza<4}afNC&6FjNlBqG@50Cc|u) z53{URMwnvdSSb>WTQI7sf)r*^!x~o*5fK^1DM)pyx^KcB0zdU2ylOoJFD_GlZmR`V6nhIOMUW06NLGtFtnp+_ZipdQT*!K=7K7|YSvVR&Q~ zPfoXIt3%i!ob|%BiD#jsbf{t7nE&|OKZG)JJ&H%dhYDYM<-TM-)Q~@#u#sD+AI;|C zEow8mja3U9rDjtr^Q>dbg-p#%2qtpoOsHOrObC@8StkH+y3fZa^a=C)Jqb^#6KZ%B zQDd&!b_L^^xI*VRZH4=)T2-NEZ&k41nU_Ep0{z&>F99L^;l~J(5O^nh%zT7KLdbsiv*u#%0?DqiiGgdFIRF2HkZ&lT`a+lWCW!})jpl~sw^}Zn z(aD?&k4bFe7;{YG-W6MHaR?EM__y41&rNsLV;$?zR1;lv)I}RzVbY{am;UJr4}~9o zIKpFr>THU<7ukGuYalwKYM57prADd}RiTSoJyH1+g@RO4)YR0UhLEvLeHGK-Ug?I@ zU?&~&yrdFRpNUuVi#nAY|CU$6tNusj2I&RQX0pZ`eB>)`@Q`aa%<*jqUg@?mKewH? z71pr9zEZS9Y?wJZ8iMemqHI|sBUzD68E>>GB8e!9k`{}oC@gqoJ~vDzF@%XZ(=eDk zld%s&Nsow9m76RKRTsX`Ky>l5%A6r_s6W#Qk^u>6LlrtOI(|VjDiL~HNA8ZI|C+F7zOc23?2Lmi%;DH4VP*6bv6A%D_ z1Q0N=fWZL|9weZE0~9=PumAuC7${(Yg9a#AzySpcIH1760t+T^V1WP?Nbul61riv5 zKm!I17&KtefI$NW4Hz_F(0~C03LGd`Cfs6YY(S|cMPBfF82k*TbD7PY81E@F`s z5fKp;afXOJF;P$`f^OqYZX-5BHXKt;Rn=I-#!9ofG8&deluuf(d6CqxG($8+xiSqa z>?ScYAroVErZWtusSGW1nK0d}WB!PL72&+h!n>!w{4(>F7}L1O@t=v}I-{vD27?q9 zLqaGLG7~b5SJ}-TA#6SF#vLcgG2%3tS(YkWmC4EvvxV7H=F2a$T#w_&nwWFUkZZ*% zw)|oji+IF0t;|BqxhH12aRHq6eM{=CAp$g-<>8mUgVovH}FY8Ij1oKLn z&*$~;Pua>glX>FB%Xq9q>O{yp??fj$(Ft^9%o%aQgA?pLvXYRp2Zki*Gt(F(7#57A zX`lu%AOHrjKs^%x$;-u&i8RV#6h<+|AY%*wfDi%z0Dur<24;v>paV_+}TtnNt;KG_$my)~BQqGO@DIvXbm(pB$Am zNQiB|`i1JHL0n&Po>;$EEAn1-vuU=6`bkf{RsFe0CQfK@v2xh>zH3v5u++#CMa1p# zB~U;I8XJV2q5L5FU9=xI397%@2zuTaitk||wm7X$A+`$$Ib-5$B5tl1KX@`P?hbwW zsD57H7YGpHcY-L#z4)&;3A^96&qDTF?*o9bH6?z>MVo2yn7+n!MeapVB}~ ztAX@rPDY@-)OLNrh!5RlYeOgsy^#0W{`ckZa~*(Ed>p-KI|JX zke73xc;oX1VDs@g41$~@wvC44VX+}+A|DiMLb9w`zQwQB7r*f7(MZV?=CH_;gv4u39sa5i& zYL{~i)~-mWX%&_ss{z?*Y4BJBaxYUnZ)&*l-+m03bV$BYAXk@9{DnHk_wkemXpMDB zuhA>3C%p!yB-E z9Ut9_&5$kSMI1YIE@X6WHuS5u{d;j zoMz6~XLwqhBX9nCG>Tg}Zp)$yYRvy4S2j@!0-8g_!_Ls%;UX~`>o0*euFsz~NjEk+ zN09;n2su>!)o%ikdVg%>Hx{#*0i-6F5F0-_2|M-BoA4(Ao~8eRZ37mE48I&BbdyYx zM#c`LqbT|R*NUZY8*`<2kE)^u8qRDD_Xkxw{N+X`sXg5dh4xpX&geR*AcQ5c#((02 z41K#CnL~>cU-lJbX@J0~i(~nPt{Pc_+{L(N99B0d(3M2OcvDW|7Ir(Q@K_M63Fpwl zsn?cX#sKZ~Jb#DR+za_5nEFT>5BCz&%z2R4}nREt3N~q(Lt`G1poep zP|^Xg{xd#uhU&LPmONBd``s^~hz47gAMj6k8Fs{x#C9AQG|tNZSWChgtf0fXjPj>5 zQYOnZ-cr@jhhD_Ky&Tes#f4@7!Fh_GkWg{nR1jA1;Xcc|@;MQys{v5tQI z+9HX=qjap-ONb|+P^vvZ3Kv3gx8q|MO9+_@N7l|X;E01DRQ8Ms1qj^3=E{&Qu}}oc zQyk0C$X+dkIH4B!%Ep1oE9^qUNU8#o)WiS3tedWuhQ?H)laB;GksRpJJQ%IHHF;zc zYV+5q>fAK@qEgrdJ^IUxO;~Mty3|qt)sIw&a^>L2=EknRtgb8ya~koE%E^@^w^&=! zC^bD5%GDC3>sz3<=F$;noo}utjUzY03dx5OGXK>0oJhy@(1b!a5KNKox4JT0)E5DI zg0-wZSMypY-S&VlQ>FFvb8QS$Dw16Ep4UD+#OCWkaIG2rCFFsMcG5t4f?AngOUe({ zT9hU~40&S2FW>PPnhT;aIEpcezJz)!7tg6vM!!Sqx`68|Xl0cm?O^FSR@Vj5IA(|n}jHZhW?QqN& zf#GR9N~d?hR2t2Ss)990$jE%l5{#%K4C5=c_s%lATy+LZX!P~>ZG1btKSv&ESY5p! zH6N5OziLZn{XlRWjx@V_O+F`r-+mI*PVV&2a? z?*0(q`8r@p4x=;?)$C%2Z1#PLoXEGVjqZO)b(ibb+eUxJP?x`^j0z$jZH(~wPunRd zS}i}QQ>Ns|raV<;~sw2m$1o@!}{M20NjHLQKAvTqGuaH4+aFZ3m80FS zkt!{bg@TqD3OD(r+k(UcIVg=RKnHZm#s;?q82&D`Ct1`)!tN9`fAJnig&fKNH3w%t zGBkZ4PaPG5*%%;lJ&T2fYyoC+OGc|LorShU^v6NQk$D-U07GdU8~0FBEtqs_d8_6f zF#(Ab5Z1LgMVzd7NDhcRv9j1`4#ZP$AfvkND|D97w5-*dQ5TR+)gK38u`EvX9Q>P+$g%nM5Y7xzHWOq~> zm-@!sSQ;BuUZ9f}X9@a)$qe4aL|jM}U~*M1iPs9T=Yr68gtgd%ITjPZWJU0tRx12S zn+5}p?GS@o#o{PfzP(MOqEJdHek24O)*=#u*waFS0JX`l1=<@%i`1>u(_hz58rn(p zPM$Bom3_lF5yke?=$2@UygFQzeWST0E+7}|Lb^=gBC;*9eAt`U3ODj~y9FntR3Dmr zI#Ci zaP)+M1B_WwVzVV@A#OS@`d_-o?l?u);p{vMmvMt8qAl_auz1=lSzYHi^6glv3>1T1ETkI~ojii9`==OkSxO`4) zUf>E*-`Qch&p!$#e0D=a;SMH#QOH=56UoQD261gNwAj}2Q_XR!q}_?A3pc@&2Ict| z)1kSHLuHJhYes`wDc=H~CJ`dX*WfQC`zHZF@9+T*?$rd{y@mmUeqpHv_w{8HB@VL2 zS`o}mgxY3svtm`>nGmtBWB^$}roTKaL+O;vrp@m4pAo&)^}e>sfmY3biW>)F@X>ek z+gl4GJu^|EBZ%k&sKDxOxpG=q-b-M+Eulz~xn=lxI3ZlUNNwafKcgq~2U&Yt%{3*Dz$Em z(Z(BTCOSD{7+A&I*;U5CWI!>(OG55~2f_*^7GFe^&{QM@<(2T|f-o6#?E?gE6h$>L zCF=qRm;vxX-rHvkfDuNS0RhFpqA+O0Ky0Fd5eMV5ZO+xHB(w=vrjj7)xzuuxztGh2 zm-@t*IXW@e7E7@&-h>k2jdT0yZCRYD0NYDV&|sqKg6cgHPeSq$T({LItA)|s#|a_2 zVq^U};-Afp`K)~*6EO=MZnxOzCO;Q`A3FBtfY3*X(Cz- z^;YaNcEvYKdDAa^;jD zM>ApkX-3Tm*Gc|xdbcTcKJLQpFa%Fp(1oJ54q3J!*C z&-)Jq1XKZPM>RAozz8T6cJSto9ovs{nuu_Upi2ME{o^MnF#)noc9Y%tUT!Fg2><{9 z001ZzX#-{hLIdX!Awpn5gT!lG7KjIhVwK?qBrpyrgMpEXh6)N(e!pLneU7JtmhR1^ zSGzJ1`gQcfq-PgCo+)c%)GE3&NpY*A(sGeO=E>7u`>1t~Ou|MZA0s?c;KJ~+E)OFh z7YvOPArX1FU{s)Upy<$!2A2jH2Ob0rMh3(hgVOh~Bg5@<6s>h>nVw=(WyXivHFYva zVY8{8g$yfg<7n$V_jjvdO!vH}N-0I3c2jTroVPt=QVkI#5()^pC=L&ihYyPerX=EE z;WVI7Ab)@uI9N1zq#>Z!!O(zkfIxB*2OcAR;6s8!8Sr>b1vF1Wa7l(qJe}XLBqg$uyEkjVR5W)xl&}yta4;|*nioX9PzEe!-O(#^)Y7X zxH5X~PFiaH%}qLt`eaL;5*jvUjzQ@^&m2Zveut@^j-5{I=T6%-j;ND_1%(4E z;=sYj!^TELD@6l|h+9~YxG-@bA&?m#$q^E;hk=ElG0me2h4_fBSboMKv87P1NiGw;U19Vw8 z;*fw&>Y!+lpyVQPp%__U!~;YEq@;x5z{W&Eh=Byx=s%%_XHXese1mHB%0HeC%9Tu6 zkG4?>Z6(9==`fwbev{l!^(BOmaa%3LzFL(U0iiH)4hb!xNLWAsB&5Luf`h|>OC1Rq z6&NBHMSzwO5HOsC0X~WZ>g(7!7Z+q)G{breXk{h z3J5_TBx@j^q@#l3pcx*P#)k+9COm{i3EE%?bm7nlNidP1B@C|?3Eq4Cy2Xk47)HE^ zPpd(OZz;7>$=FKAxFh9eH<(LhO4-Q{(dIb^IU8)F{4AAv_T#ikomGg_8epKIGfZ+s zLD8;(kr<~6Xaoq#$ObKf5MjV!=}ALE9Q>g`Ac4P9okgY$ChJ9K#j5$uV7iled(heN zv8`TfH6!hGEG53PG-uE<{a?jN|DfUJ1*G>d5lLHsfQ51BO9X?$0)__z2E;~4$Ogz! zIJh4`BOfFX9G8-XlM6hT?)a{|#Lud3l&>9xP%U%QlZxi3^Hp#Bxe?`B2qCssOyqVx zEr0J#sunUjJQmJ@k%tXlV9*{00wE#9wKe~8 zEl#MH%3MlBg@hc0;Kc>u;}JX#7F?hVg@e9eixX|QxF}$lSlk|NymSUp>yXIwVUsyC#jxJtF0(c9kVGv>(O!=(~IqiQIBJ;4P+70 zB@A*U4hKR)mVzNcpDYlHOG862P#}5WH6|D9U_fD`08czp>$Do3b5-l6$WKaZs~026 z(_49dYv&QN)%b4fJP|EDd6FrF%wDtc0FJ;RW5R$!c32o_5XTBE%Buj?;zi`T`MS){dPu3eWWNO1qv(-LEvinAr4jf407qc3uy@UZ3UCk*}AJ8!gAZ z>1gR~+;mE1vC~XMO6RoPdarlydHXW9rZOs}lATvMy-n(Knf<_m*00|;iU_fq?NR^V z_LkmA8FJ-^msxBYE$VDqS?yfaT;0s&tQ?U3h`x?)yy(Wr$IjoI{_GaRev6^9rKX#M zh|c{!?bF>Nq3~sVU>-!s4o37N|zc%OMLL4^PBBk z%=GqvOAi17)}ovX73j2><-Y#`6lYeje!q_CZQ-_bXY>zTJm>4XWX03hbj`Dz*`?z@ zBEBR3Tkg&AEPmelewK5~n|q@=2*v*WS+*^i&D?HTW!-b@>&?YaL$uaDM74A~>Ynnc zXX*jtTC9;RikN2VjLa9(HlnUjbLa9(H zlnSLn(MW&-N`+FPSSXi^g>tzZKskUi0&=-nG#D5u!G(m5l;9-#;rK>0Qm}{sAt1u> zL7*fA<1v)RM8ZKTAcp3(RSF$E^?uY>!e&igQ+JlS^Luh4`elj@v2k^|PJgfdQ^09_oLzvp zG$_PJM#YJO$A#<0#)1MRARudUC>A&<8hkW#D2z1N6Qgvu7fJz?3Povrd}x5&xtp`O zB~vQOYi`|VZrzlMsX3yi`FW!`<6ryRHGfh@OKYJ$BU9^iy3>o#y_ZtI4l>BB$($WU z`-&Omo`sqB^V=-lFHcq5^mMgZTl?eQ@=j4ZwLcp@d;PjWt5iB>XRSZyUa_JtKJA82 zPo9->p|)A%s%Ne0BC=k`)N-nNo)$Gk^+r@mrA@idf_HWXJQO%sHP_^8wkR`OH(#eZ ze6D81TI?LJ&y&wT^!lyQm{MVjty_C`T6;F7g6WjXT5C7u%-Y!>aV`HV z5t**E*w;``N{dEin@Pmg$_NwHT6ayZ>K$jl{^q`2yQ)QfJ-h$<)?#>#m+5G^w{&e& zaCVvQYiYT(Y+ZLsg)0jWKVB> z%w%LoOjfl_rBqPO($4b!NU41C>-H+Y9UAYR-gwTKi)_x^e~+5T z){V%Y-h7O_$Zn^ail=B{ZC>ln&i2#08C%m=HC5Nq_>A9-Px0Tsv!6MgpF1s!X~>7% z`-`a0kLmV?@2Bp~eCk^{Abk%VKm9C*UZB0tqMFYAl*%^uXHzPaSG~@mQtOSIOxDvk z=5!~O{?5=IFT%xUYTm0&{kZep!u;tRiRhr?nRU-8Bmy7+0to;x1Cs~<5EKv$jDzuz zP}QN7$LbEZ%ce;nlhGY;y%37$7b9gvO1+Hm4P2D?mjcK8CJmbri^EXxFB%^5rS0-* zgzYn?9%>W;qL_XfQ2Qv|yDif032`IEv)>kgXpBSkLsX35uo()PyGlB`a7lL}#uOKu z?j`99tX!W)jS!wh2hGHWnB@pMKodmph8L!YUs}TYLJG)%UC48)J{FPTGGag=Frk8r zRjw@}kum5qN&x4UNEpv>@uzRIcwL|`q3_Jn!QdQSdzvH6)V8ZiQtGB0tl?G)xy;fM zJsRA=-Gs}W928%Ydb;%{8fOf7Hl_)U%=N3YkT^^)EIpV?gm670a+(dum$ks$d^(jp z4+5q59v6Qld0odu=Sh}F34HUmER4kCqm(zC??}bzvXi$X)5yy$eQ42 z()d<@;M>V7te(tEM67t}kt7sN3S6}+a;!Gd5GG<~g1A{@Mj=X__{xb2sI_Iu#1mK) zLWRWN#a_zC8$w}+k7<3U->$_9kQlvo)+&*iP)<=-rMJ^=tsA2PrK67VXA(#y@3gr* zMN45Nm3Yoxlk`RRWVbHVtBowfNA;>WxCWG2dYyB$qMEzc9o}AU){J3-&rwPbTWBnr z+;@~n>jv5jxbctyiCG`p(TA1X>=3Sk7-Q~Iry2^OQR;?I0hQ-hfZtM zX+g&96?dI{#L zr4UnwklN2#LZrk8zC353k?2Hub_5#X;D6{6YB{BS4b4-t=WKmRQVbUqA7PwYp5JG< z^~G*5fqaeS4+QzI<(O^yA-tmS{Q*#EJX})$c z*c5_H1UbG|aV9B8yFI_*A+|()ydJ4AkgTe1Np3b|^g5i6+J^+%U;UoZiniO#7f=#&(_^f3AUj z?lPFu?GtNCFl$3!wgiH0+9SS?Rw63sxDph&a$^mSXTB;IiJve)63&I zF1I7Go?jEi!pu-Tp%|0b;W@K7waK`ZhxJx5id)Og%5Z9Fux9x2z%!lDDA6YgRC-%N z8r`UlMsf>flXZnaY3qQV=q_$QA!S|KR+dXFhv392JHTWSK%l~sN zcun3~CUV(kOgp4Q2x2l5oT-ca28qPJoG;*{#D5exoW|*7aV!pi0-w>Y)gTQ)y6QV( zC@UGI7!@^uQ$d;l+_aRw9~P+;k%)4R$4QEQTzhhJLktHWen%xzCp#rBC^k|6uUIH_ zdyI!v>g3dE(vtQe1Yip#tP;cr__l?aW^!s9@rp#K;8{uhVM_SO zX@Y5%5zS^x|9ra~HB~Z@%PgYYSU88^S=i)9FeiiUY`0Mcz1`)Em&3%%qbX`U)tkDY z#75TAXoSBHf+Dbbde?TM3r`u$52p)AQ>Lm^&3ZqGt3boGd@B9SI5W~3^86-aHtaOT zcZ-nD*TQPU&L!ew>=zGHgKYb(8FRXqbi#pmz1&67bxXiV*U(SO4$cjtFwa^1aO3d0 zQb|?mN`G+qw0sB4f;$b3(2=jQ5`g2ywH2fEKcZ?l?u#xkg=v#UC#jT$eQ&dNg1**Z z?rGS6qEK-uN7_&8i>Gu+KACxx*iKzf^xR%z$QYVrYzXTm<7NYK5f`gbUSwyRuqXxg_k{Cp^D+5VDfZ8Pp9iLb9%v_Kyeay{gA`0?cmJ8~jXN!*Gf z^sG0s!A<|@^Y>ZI*EvdK)`N+2#eF7lOWhNedmGOYoRbvNO z&q_H}I=QL{zQ=wZcGKIZL}ax zGe>FRr9i3T{dlz5F!HtJ%Nq)LkJTUu+0TNf4#GI`W{Qdli4M!lU;*NymcMCkV*D1B zNOUM@nO&S&vryQOM2DDI%z=Hu=);qVoE704^u-|uhJBOa6R&#|NYVMw%L7VJE0#yGt=Yu5+Vki51dyh zrX?c{)xjn38I8=BLBy!ufm>K5I*KW4MkIprD&=ikm2&ObH4(Y#+Z9vLVL= zsHTEY_{aHnig}<;Zgv`mP~&+C9j&_43@tfGrx-W13NDKMBy0x;8_4K2#)BWxlaVkV?F@o$C`!%&qeh|)B)|0Yf5bSQk;uGy+alyB+dIroXHtT}Fsd-LFFx$rSnm5z|W)=ZRYoMArXwO0? zPY!57#6J|Q;^@agC5_61QV`{5A^S0=2W3dg-1RZ(Vw(Z!bmP7ycbqO6Rw6n=Hb3vxElqEz@haVO>;Gs4O6d@no6qtrc0aPf?{I}PbK%@n-%cM%c}A`|7#c&^N~MO83bobF@Cs+2jPM2nIGei zSvdqK@la^_rUGD2+#%I@z19f31UwZ)T~eMCd}smkg)?yk^d?vqiiHZx2c)u0{X*GI z0$Ei~0>4lmqN@?_P-Bao$c5(YpaK2VT04!-gje~f+blrhq4+z2S4A+(jo}!A6{08q zD?qVFWRy2LIq&=`PviRZ=S8ELrTcx@jla_KbVj`o26HHpPa_J-7?jsq8~&y6C2EnI zFl6L{pAj%lOZdKj=EJ6jso-HqJf4Xq4^Rk?Qv4|H0r_A;HQ|h*@>f4ziiO;JpIN_t zLA!ch=FM$2iq=`wqopB{mY^&G52H-9FhztD#K<9#{|CBG;V>Q~%3*S|ISy92dTn*7 zEvpoH%;N3Z%{_h{O6lMS&^wTkbl4ON-X=Jg!nh8k#qw1o^R0e5^!}lhFK+i!RtMgJ z#WgD+`bd07(8*KbGPp9QBG%esi4{|GZ{R-q@ zgSZxOI?VVx3pP76HT`NqQc8tFqN7Sv0m#D*vFBcP zV$i~OXH|2YB=utGaF0cqg?1vgfhm?kzoZRDC$UVBL`5p$ngd~ zK2o@%Eba_O@DoweeuCp#{iN(oDVIf?) zXIf>Vxaa78#P^oBnZB4pyf9dB4mvSSLqyt=6x?94%Ac$##;0l$F>HUNyqfr%ijVa8 z$Dq)SiPHAlanRH-ovmZjogmDqERqF~cVkH(?hA$E8%v7%Qz`j2FYQlcoT+u`np)i> z=wUPtMj=bwtdW$&aVU~-YArf}#57K=WRiWJEUqm}=tH@ljx|dIdW6@WrIJtAo*MBF zYYCC@fxli>-PqNlrF3v1$#1)8Nt=M4{Sv~2zv{^1S`u_+CESvwB&`dc zxoYjd;C>&^y$JnB=)y`D&PXjx;>y?q;Sp4)$Q_M#BV`&S2WTQ}xNz&y2+6lc=I>Gsg(}TRU9Pj4q6^ZSxyrBi{%|gHGfy_VBDLe+HkQ#@ zpoxh0aA@;sr4bBAC1vw9!m=+=jIFoJb|EV~9Y>Seb$LWR1qyBh6|L~^iZ%R}>u+a{ zMd+bc)yq6Eg7!$iju5K=rLk4lP`x2oD;t%0Zk|45kUxq7ET# zNt6imCcl(KU(bzk+z~_i74AYGit01wicxH@^|7{}rznDV4qJ^;V=i~Sor8_Bfj|$0 z!)zi9JB50jAk5(p#IEntF=#PRWqx4SR*lhljCeSmD}}v|-%Sfl87!pAuhx@d8SiAy z*sM{31qu0TQsKS4U*XJLy!GRvm5Cvl{jzuN*{^^=;B&<+VyDqzL%Ezxx!Ou~Hxhkb zKV6VUo)!KTMZzok4(e~qUAL3Hs0jo?PEKh8d|;i9%)n!Cg)q<~fN!%4%*tyFS#IwI zdchxyw=QFA*4dfhBF+s?0X}arvtt3@J=#}~$09Oaw!|5+L7?q{`2;y&sQ+kB5Lfm@ z&@+o!ClJyS+IvF4W#Tv;qQsk1jOlwRw*8sXu4w0TL$O+FFL&UH>zq9B7w2gm2uoV) zKxiq%!WsN%?8(mtqsV>CsCxIHUYY|xA@16POlGId48WGY@Ay5W&ScvKU3)CLuZafI z7A-9-y1|>uxSfKQYW2+p#`F*zqs_q0tu*VN-DepdDAm230Z-h=`BQZRDl%gEGaFX z0^!u`Bq31>gw#6->8L1>rP%YN0VohTQRh%*ClEFD#U)@9NK9M-Qm2?eFy&E64oe_! zIvGa8Ng!u(6OGh}1cH+pQzd}v2t@uLEf^u+k7xqz@HG4b&l`}V`Y5c0FDU%_i&#Dd zfyqdkH)qJ^g@9CKkCa|OKzQO*l8SqPP-+~O#&&>g+Q%mW#sMO!PM$=N4G=|ngk;|;PM2P! zSI4Qi@vW1H)5sKTdch$lJ6Cc-VqaCKOE4xs$Ic#fs#8TLB>Pr(x}+vOzEgPwkPbW9 ziLz5tD62d{s-a%z>COn515Y?rc$(C{YEO`)wC_EAB22=er#ve?QNl0PrVWtm2<+%bBCH$&E zwUpx?4{9YCDTkoytOSMBzEzK=pG4DoOIIMg{3mdp)X; zsG&PVmFG%Sl<=!c)smxoT&hJ^hUOquofWB&*ta^>C7$&7R7F`J@i3L0C{rbcvPu<_ znwL0L`FXrXYnzHu@4Tve+UH>7RTT%T`dqOJNq^O=AiWWfU*&3q1X(*&GVO7!fU(4` zCBZ00wC8658~JiL3uoo5N$gui3(`q@9IaQ1xai1NTrH>QXbnkKDfVj;)Nv#fLeIDX zDXPE&k{+SRKks_)#3r6}a5?%UPE?YW-Q>(}p5=7$+7tI3En|zk^3&q3^6DCngUjp6 zd-f3}cinERjW*O$YY)2}y>@w#Kc`pj@yV1<^{1-w^;+=26zLvcM}{EdaGqk(s_kB2 zCPv*qQDa}E8wJsPeZ3Rf|8B<@lnbqKh3=>MaxqZ^r@sGsP%WM~J1$$w#WAL{_8V10jt9!eSZ?Tgzt@ z3J}p~$SS3!Ao;jkLDgh#N7UGH?phJ&hisjTi=IhA-ULN$=Jf8ZE9`=K&th@lI*P+> z$c3`@mN*hIc*EQv0>(s0pnX$G zb#{Ysv2}P_7Y-|e$Y?P3F@J?=SJNgMB0#y&14NL*y$)lRVc}Fm0>+$**?P;RP+3$g zzZ_S7YYkAlW$_oA@ex)QT3H&azn)eeL{P;=FLmjF$jD|-K+|#Vhx6QG(bxg6S~%sU zIOSr_IdTM2#=>OCJi~W}{s?O=O8cTv2y6jv9Raf1)!L}SOrKdzT#M?UE&Lt7RCbQT zESh5j4$E(J%GC8L%v@Udj&MB%z%52A*`mT~rx)@89zy2)r<2??2O3n+g^r_W-k8o2 zsBMMvq8bjDWprbtZz%xJrVZZ4kPzZ6X@fAHJYnkaOK2`Z3>WOXoBKTaTCA-&LiBa8 zM)9Zh6fxgLjs1!T!890GO@GY%8Frq6bSuIrP6fz$siy({%h5)pUOk<<(3{wDp4}la z8&=l&qT75X{IvbGz*{b?c5jJ%sk^J8CUseJR_PDWjwpOn0oO>Op|e|*b-$`i;dpTH zh3!L0x+MaWEamR)Y5IrT0x(MH%C_@Gx7+=~qMth|WRucHZo3R`6!pv9LMG(wcSXv8{R8jU?n6>(C+LHWkK z`%`GX2vz#0Tl3(Q>^n8U>JJxf&%NTRLKfilxjdu*E{RE!z90}3qkfje#*iEbQHEqO#Q&Fx9(Z;&K$*k){4fz)6XIx;I zv-sr!CxcIL=qLAW@nX?W-ENM})|`_De2IIAmlp+;CI=J5IOJjWR>7h{9n9gRcjyoi zAg;XXMOs%@J+%r?ZzxEMRDWzy9j0GZ3Hf3{T5y6JMTxtJeo}6r@#jWH6-b)UqE7>B z9eEm$^p>f2#VRJ8xDv;1^AL!@M+9n*tonk1oDCalYFmZLjm8&ce3oq^>fggCdYtS+h zE$>n`SU$@amqy~~(h_o7iIWk$AXbgSeS@*YpKU}6P&<%dV&N!rIRw)3 zkA^Hnu^)xCMNMw9+>@+$gqS0gOy4bv?GwMR_`jCL2Qmd)g~7Oakkc zX2WYS8F!Fd%g|zpV#=oXNB%T9)}$BuVqk4f^xbmln)N>JueoCp=w$;ZWR#TS)}Ifv z!2kwfe$(37rGhlYAiivOCKQS|c&VyWMokfyhLo2J>{d-@tR>3!WY{Kgc_P@D6aBX$ z`(NZWgypR~g6$s5%ElZD89JNPLdYI1mm8##T+0ckySyGn7^|>`pcKztxZ`3ZT_5!- zskr#TtT%60dX*43aRI(iL>d8v?EtFD;2I!64G3cZn1YDD^Shmz5}*71tiKwcoFuad zZ+dofcekFP0i*$%0f+O5ZPMq;lPI$IRzBlCp}@gz}XqUpw{qPQDH zk|VC5Do0TTmB|N#wILPZw1d++?}tZK-xZGC+2?JfvKGzuzn$hUGY+;x& zW5+BE_wiLZkR|imvip|Xw_JV8)}IRFC_NrdQnb=AhVpU@8rS||9A#>V6jy=_7l#o> zQK}S#lafq*$!Rwe`4I8an4a_;W#!MpfM<}k0Y@MG=cfnG-V-kjIW0OPBdbcXjs}uK zqzZ8mhKDFWfi^`9L6Q*WQ>jJvVGqi(ShwD!>n2+_DGcVi(Vz)Wu5NX3#p~-Iy}r@w zmtJ>aF#U8qc->yN2QD(x)ObZlS3{>j-gHRQf)q|B9!DJ{lXQ%u1rp<+6CFDWlQQHu zC&^-GvNBC+T2tTH+6l9gFw-VXBlHnw4t}OH&9j--G_w%LFk2X9%oNiaq9{03g*8Nr z6W`GlM-$QYA#_;4g@njg-QV3CI zRorpdO*>5*8v8^#p!|=O^SW1NB?BsRohz@Bd{EO_<>h78T`8oT5NTFuh`N!I5u}Q7 zU~$8U8_(R25*;n&8-|Y~T2#J#;6Xd{@9uZ^yY%>O^!Uc-Iy0q@Y;?jO>ozFxYL@)ifKe{8v)nsY)x>#i}e< zUVoH!y)j`hqSAec7~E5LA}S)h1CO6WwFSk71^t2!1nHBSC=7*1W;KSF)%Ot~APiX4 zoc#4??->89`VkQl5V|UwM_QS#>nhn+sQO!;V(}QY1R3xLJoS}3!r`M>Fj#>G(?Jjhp@Y@Y9o|Y9I^4mHeG0E|`UAWh zE3rO3*&UFPFnH+h-SoYO=4um$oX<;kSo43-9yAY@f8eZeHs?6+fG)}8|DC*cl7a{- zh5D^O(ZS@g?X)-+1`1$7)z79=vm@H{K2E2nI(Xtx&TE_E(1$SlEKQ%S8`G4g#WEl_ zv5BQAO{oW$Hq#2OoL`yc6#6^0?B53MlNU@H-geHaAe zCz2@lAe2YOZfBD*#q{E`I0`hdxMG3~7D5no<_{`tpVG5~^w=fM7+L|JQH>D|*|8}i z%n~M1_389j1xXBGHXwwUnxF?gUeVZjP*y}kc92s2SLI+#zohvPj01fS$Z8F=!FsB3jZScHit_UD7|DbhNk;xtRl)wT;~~J?~Z}*?Dl2SJ;^eK;nFNCkOKe;7671_3X>uU zR*}MNqa$DJKrG~*Gh_%d46&vqi-RDlxH6rhR32_(4h^jG!azBBzZ>+6bOUCU@#aA1cbqWP#6>lgaRTVNfd_j2M$>)C`#CyGJ`=eNS;{l^*(*X zoAFSk`{38nk9wYK3=%U6fGF)Lm8c%8TLG9Iye}oPitW>^kY)GvVz2e4R)le0|p1!a>7$Ygr5|I3#bb=TSC_r2A^TsB|CW!?@`24#z1Tj zN?omKoF(H0>I}U(;E4xY9eX96!^uB6nW_wVkaCFJ(7C`yxD{{o0+3tw1XzIH3U=mToOg0m{&7XXkJ(-xK^4fgo{QIVn>?F%T+-_d5)6< zLN5qK#K}Sl^0Dv1j4IF*nWs`3yb2+en87NXIKJZvbAys zM$BhJ-LXOQ^3ES%^}~t0LB=6i%HlMyMV(2hm$Z`C!j}bk3!gxDsxhq)xE&ueHZb*S znBRTsu0KR-m{|%z=h`^H(^(KHfdO=lo5WUoj2IDq)V(s#h}JR6fDjcYmb@q;dfjo? z6D98E#aCh9D-%~C{7|(T)IOeLXj4Q^yNkGMi|0zT12dA&F>sYR3T20YKU~uFRVEy8 z+rm!D>wKed1cwvioAPpesVn%i#VtKT7-+IOFV0%sG@0;tgn7ep6^#@zfX%$zo4Pm`wudKPJp|1%vc@z(_eDh=R0Xy_Sj@A;^PMZ!FTg`?I_Wmoc+QHt$PRA8I8c-m zn%bH?fl^I12lw)NrMz5y2$wtU$kue}wAVVGPgwc#?6u>6HX&S5(mcKN1%x3+`En2* znVARSeyoAk>iuv~r^5I_@1{$6VRWBU)wUTe=|P$+#g%wFawo(lLb zoV2A-A=j&QHUR24d|-i(OXDE@`T{oeidO4XZ2reYZ}3Id!G1hnBK?~w2~_LSGUHU^Z)(gXYX{DwO2q9kw65DERa z0Z$#$Z&|Qcn%rL8-L$>E;TN#xYL8T93&Z~&g?f+vB@Cp{@d+mO^w9ERivtqn{K(Jx z$}I)0nk4)mUf~i)#Y%79HX>y^Pqh z9%NCMJKDR0tsD6Ih*2r7GCo%mDg#;ElhHoS9&R8=BURzy^#*#{8xjMjrLwS#QUAZA zCJsn%@P5pQTQgaj6xC9jn}?SSx6zXKzS+4U+h}>r={7i@p%6#E3i%)H0Kkz&Thm*| zgpJrowflaix5BHyJAYOBJ$Cx}dyWNukCuHy(2DEXi-3TrIxWwQ#b!6o@at*mz4E~| zB@Rc50Uk~>z*^t4WT9V57 zYXee?cTdJO@?zxjz~N0uC_Zv>vNyNWb`eeyi~}z*yC5YtV6NR&h|r5jir3|XoR}C7 zsva0*-&~z*n_P1{+br3u13ep+2&kQNB1Y~D0WL?WI84>JB2)@FJjpifX05?Id~0?^ z*0p8@3j5d#q@tIY|4oJAdwAqZYi9erk_Qqqcob5%| z$3Qak{xqPbo}fsZX*J$R6)nLJ&{pt?0@~Eo z+K4EiL-bUmD(a7eAD}{fiMQj)USjz!HXgv|0ncB#%RCJ~x zHIi$32f@7u!7)tIxxT;6*FT8DJx@qKjDDU?&aW3wxWaUeG1>p;*=XmJFAe)R{P>-( z3S#m@Zg}UOd-9McK;AxSsBs3qqqQ^VIesxDv7=YERbf(yoFk0w0h2ZakUrYQij7o8 z#7Cu4u5*)P10XKZf5#Xfrl=>GN@&fT$5#~}+!1aGUJB&|VLbd7^;a7?+z5(5r*&q+ z39NDCy9O{fa#|wrjZ~nuZmIeFC2ZVWRq&ur$Fw%|_hdDsbRZPT1I+u!Uv+S5Q|Sla zFf!5hU*Nwy0tkGdu`%b7L4gd zB(krV$V0^e>+RBKJgTC-esoJ#;jo%)x##tN&2+?r#DfTJ_{ve1U~wmbQM_QyuDWFNi#?a$U~dkw&y|%QjfI-8 z^w{r)O4$267+&V_s6|m}{%$7&#muI1iUWm7nK8n~JsFD0X!R|^lEj}F!x9j;;}qdX z1h;<4p)H(>Pc_=!tR>#i*`*=Y25PApRVYm}77jY3kl@`0tbMBlY+p{jFJ`)W@BxpM zlC9_06%iW|A2!z?X{;%gWk|=gJ~E-2Dyxa5j;w;6P3GH4j7pc-W$DNWoZ^~xqMm7U z%#qsAU~L$kpt$Z+g_vSDdBzL{dOa+oB|&_=kKaT4Qbyv{szh*_*&7-w|7s{sRg{tY zdTexL6|(bCg#nrKsqjMeEVTUf2CWZSwMP!1w%@moj{T3R)obPizn+dP;HA^Db)X{r z8s~_S{$5`X3-{l=IzwMWlh0Yhe1_N$^cn@AycU6dvTLG(Q`A?+ zM#y^V&oOifZse}Wv7`SuBK$$od=eG^{0~M_BJjM z<7x&z3E)vOEEl7IUYxwZ*%k`2L7#umwwXr{e?}?`3(qc-Pvblr zoLKSH`ygko5*2X?gS^b-cB4xB4g+46(}A-(L1g z;rqi#0-SuE2s8sv5o4%|H9p=%lMpnN&j0orFJ0yQ{c zZ)l%3qwZG$(@ztCV0qoZs|$ug+SCAUYXd{jaHl?GmnVzIfn5n)ARzoR2yRlxUM~o5 zrIC&iuBzT9!r#rXw+tMr@o8BN7NC`5Uo_|Nu(z*jAx*FK z27(q2$7w|1s=UOrYZtf%N(WhivpU?~T=AK`#$Eckq5v%dJB}s|P((EEs^TtUlq*!* z@-Kyok|7pxnQ8R2qoSs~CUC-*19S95(3thIssBFe=E=#Zr;Z*GU-ZF5tgj+ogm9xI zJ*TS>Mg4rR}sM6^bQshK6z38{}{qU|lCz9jJB z3|TOeC!eM|n%-MLn>;JgWw`8OOyivjbBGp04K%SVj#rO2*^#>}cGIT|=5}rPX0wj` zY~6+|N`j_CtBjSR3Q$=R%33ntnu}yV4OTZ#LWbsIJ*MV zT9^UwM=E5)aP$Y|ZiFSE1oj}t`)2ZQRGjY$l=6j=$WlFCWDTP>1wiP8OGIYF`F3@i z#Q923tC#Plvdg*teDJk1FzZfAqZc4XGb#(qI3pJI8YsfGU#V$wSqLQODASfe5{x-< z%vfN_TnU+mOeGS>wzNrMWW=u&QEFR^>IFZYr6@b;0NQQ zui|*J_;$nq)Yt$xNfA9!3}~3qTrz5U5(0u|5+UJZP27*nHO$6WiV5n37o2@w=s=Hj&e!*ECF@pxYr98rWI z-&$F2mQKQ3Th2TjEr?V&P3Za(^Pp{cX2GT-Hzd-?G{v#@65c(royWKrRSVAc!K8#- z1SR=WVwoI}zNfoBlO_#5HQKNeqdX{+x%tj$UTDAR7Y3Q48m)XNA4Hv)l%Gr(T9$&?B#)^Vzl2Uy*s;1Fe)P!7Aaf< zxkxJii6qdr@i?;Swy=su;^k_-!vI8g zYr-Xla6lL`YErQ0(%_Y$(3y!6)AiVo-<%72&B7PdioctPoay$VnRJq65zniOPdze> z8tw?hVH^b7?%5Q;PiH$um&tX;_vor8d=Um}NH`IR<;ZX!G&=!A6;n20DG$c7vflS= zoYUJtj${{P=T9X5{+pdu*Log7)H)YCZA_$=zMI5?wR-2NBPG?(k>-$ysjX}PX7+!) zPl3=ejtv4`a)tTjFqg-{;HUGd4m4BViIOt;zHqm*5*m?g3Y5bE1V+qCec5^><+;Xw zy66E(D&n_E!bh^E6BK%l^tfRV7J;V$>Pn4ZNoSBH(pbu#@x`-{g*$7#H>M z97_VmUlRSqG=)GRcRz7Jn#1H4;oPA*==3U)tn8!hliU^66I~T>6$Np92(cBmcjh}1 zWWSR9qF`ej;1O}{z2QFJW7b#>!!yKn_zx;xrQ94}wgN~Ua?Z4Ie8DaY9Sc&$SRpj| z(-Ap=Qhh!2`^Wo^#4kV>r2Q%c5)Rf+GbF@i(m`VZG^Y+du;b*V)N4&KclC*p8}%GzMWUM~;4;nzWg zwO5C+fdxm1ZiG`GQ0B{@9wa&h!I=b~seIiyeEpr8CpgiJcG3*Isk|1u)#RaIwr>_Q zf5!oB7K^5DVD~qTCJ-VL#xul!^0Sa(WpitRjzj}DQ6e)_U{!@VFM<6ccEmgGW`mKv zlulVn(;-kV!=qYez_;q{ZZ%f5=dfdNABB9qC5Z+XB9Rs98CdKh4?XHSf06{9R?7Yu zvN|EJ{3(`{AaaHi_)dpctnSnXD|B-N!7>FevH)F}pj1~tu?5OmAFlp0mCM88jt=vS z7(1Aa9{6H>-(e&J#UfUU_#pZ26J5EH-v46)xvAJH;KZ~URU9TDU`8j2T?JwuhO8Q1 zpjS~9{ju9_9|1#NaIB>f!q`t|>FyyGKLZiHO3>A6=y5ocz5>TCB0{4ooNm4epuO73!5y@@JKddjLRPB!J4|0GzYU?tYNFCLnu<*|~k zN~KGAXR%D{mJ*?srL4q#x?oC$i?hnIq<5CYtVVzcQF>{pSIOE()$@^A$aR<8U~6pN z2C^D*=~^4&*$|WS=w7=EZSbA$Q4u}l8VVB}4Ab@y@muxWs>{DL2Uyev6>%|3CXcxI z3}&Ic#MCt%V{KBJG8N`(t4!2wc3iyg$2%p?$AOQqk9B zIVoXUE@;lp8k1&GDNMU)QYqE8i+j99t1DBqV>GsLw&2?>qj8F*Wp>(NBn8VhOF`6A zS6oW+szJ!esw|hvE!Ku?5yIz{lCb|aU1x~u>{}SYGIR%nXu)lEuGDM8JpPWy#y&)2 zJ}WYtHR^b@WVU;~0|$HE`3^%OF5ti&&nZ4Uv+xtBhyVe>*ZW$Y+fys7!v#goXC_5`YZZK;32zE!9e?*T* z4u9l`9jQbR`ytxx|MC&6_xpT^)bWU*_7$Qb{)e^vL-zhlMy%sy-67R?zq>m-wzE5= z4nceUwhgI6G`p=0Q8cu!wmvqt&fl3@ty!uXVriM@{gJz6Wlc>4OA|>u@{*4nB6!Fm zeIR;R9=W-PW6DlTxO{D&wuwbeNd&ndF03_EhMkM43t1ZkC;N)GJGwsg9;f zDUYd6om#3cs7~EMRC}iqKJ`3R@|4+A#ZIMcQz=tg@lB;niOjLHa|YE+%1cRgNh)Pm zQmM|9O1Y?PVkt+nMN48s)G6t*l#0mA`dq35W~UjU&k!74_&4m#&brR(tj@5`tj^ku z&Df01j?MCvfBMs(Hk;XL+v+K!aXl?PwLE1sG*1&xZ)b;TgVA6#7!Ah3XfPNQ=s2)I zcPJGaD3IV#fd*uW;{Yc_AVC9DFmPcjpaZl*0*VTz37l-eVCgcZBsrO+gOX%-IeBVj zt#paOO}NQgg4(QFjvaJP64#loqB#bo_UFc#dxhz&^xlVL)|2&_;dA9!%^ zVF5N24MbQ#4`hHKK>_3kBr05BbIhG$v)6r>>6kbr-Q@`JC;2p1Ns6fDjpdkw&~`HK zv!$RtnPt!6Ee2WTDW>|Gb(Fe`%iIln`_2{6z#<$hMF|fS8f*spkTiHuQR0+fAOR8* zDnvZ6P@xv&0V6(G_)skABdv2(MlreMjOlc1ddoFIeNf+NVme<*c_k>}qh>+0iTO!Q z>275Z8nnbGtS7R;1fel0B?Te1r9h>YoUUs*x#!+P0spH5-Nr0t|j?CrPNsLoK9$`D5q2AoLhvl@~D+`#WZcP za?8hxDaO`BAYg(LVj=_pX=(aJm=~$N9d|0H-c(cfNX1k{RZkTuQ)Cr2-|zSNh@H>B z7Z@%uTr_|oL<9~Qtj=&XZ@5|YB-m@(Hz7I6#kkLJq5WN^CV|9{AFr8Gcqy!Q9BzSZlgoy0I9}lf$U{Th{ZZK>B zOUfN|L+jF3+Nsl>(|Q`ws+lT_jnouQoeUrRG{*jkxyNExt|fQnVkc}0z2l6l{>EE3 ze!20?`)9s-|K>C+tY&MrW^1~(YV|Md>T-ws3U|Jr`r9}5h7 zTBoBf(hSgAe*67@KiU;$eLk;TO zL^X$)fXrm_H`VN`u}L`QKZ|lC$x9kO@rqo=R-&5{Qs+&48SECvy3HduOQVqbRw3vc zgMr4Zb3l&~TCT#ACtjBuSk8MU1iAsM=}zO;4iy(2AhV08jdwsD!|9|>2&1>&I% z8#AHLZx48_;=u0tdk;nB7LWBKA)SAw-RBGL5E^8{Bxj;9J&YQpNGh*oc6+SYX%bns zQT>p@^nBSGn|oO}Y?;&JuG{-6886oU5m4-I8!#4NLliZv6eY3-*brKecfcdk8&Ft6 z7JZAdOAE)edr8h?23K$^Nih*Nz<@e=s#PY{WVLa}lhiQ5DoUrK5`Fr66S<(SnQb;kLCAurCy5MNxQ^p|=1kU5$koZyj zA=Y3>b%n~0h~VCuj|dW}^SZT&M8w(<22;kW;Ws$Mlvw%%TW#pAw?6e_NK@c@D;+W3 zF5X1God7tb@n#tz z6F0D$VRVQh7!=$52~cg`@){pchT|zsM2gkK4nieCXA@oelH??3FZ?K)bFUu5Gk6hd zE`F1!UkPfwnJ^h-f~b}htd5AE#<2nCn|kriX3^`O@wGbt7;zuXBZ8iSGd1JJL?oQ) z!Kgj+1Q1V=2Raxukd(WO+Qg+6P7%{gYuwcB6!(BwlcyriM(ycut9&WJOqbT>sd2ck zYSUpZl_aBftwJ+OYa>TLJrg6a%rE%^>ygAwPEHkAa9;$791Ub}SH~EPCPwb$%{d9q zH`K0qU^)e0b6Ov5v}}srZ{OQ#LeDL@5-g#SG=Pm$1469h3=G9k)qS}uoqVY_t{Pw+ z`n^ItoIdaK#2O=lxQ3$B7Z`ey zsZ4?#+r&onY={t!|G11Nq+Uen^_7gqJQllwnQ$W$2eKTL;= z3x&}J|0VI@ZxQVuOnB{TkSs`M5Qeufw&n)7wh$vwC@j8-Qx)%$FM0;?J`8&T1ecL z2|A8?a@ItPMT(6a2kcA^C|hchRV~|COH)3uiup6R9H=(DNy}yuJ#|dOh`i}>60K?o z1-BnLtDI0TdP!I?Q;6Ik>l=}tj7a`eoJEne-t3yGg-zrp+yfK|6gshZ#)nB*phJT6 zs^C1vB=SthY2u*wrl?Xm{X<7Ftqat>dP?g?gY>NXOc8dkiS8C(5p#0ROJ~mLX<-QJ(Pvvv$}Me&VAfjf?)+`54`u{iVG(qW zHB3f9!JYfq5^Xi74c`FQ_ov43S}w>0D&{)s`0)z+Ba|{9qKM%Xyi7!@P9{neF+rxv z)ci&H#VNBB(M4*fx9J;m6jeL9wj+EpP^!ox65I(wq!JQ|LP*lrD6FV64i32UBOI+x z06~ejNYlqzim(*l95wW&qP_}ecaDq+O<0OZs*{c)02PdLX(vGfJmkwkBE&Gri~#12 zL^Gs@+A%NCn$xC)pOSS?-|Y$cD-Adx_)F13>m3$C7;O4C(+~N?U?2u3 z?$zjZb1N@%?uoq}HJ*wIqi46-Bxf8^MCg3g1jq`4+4U>P(cmH*oMV4jh3f11_(V(` zRrSp;u%4cXW+W$!tR<^~jA3xdnfDV&^1`6W34$V_J3~F?#51xZwNKnP12~sNG5zL& zDk1!7bnoju6IB9#^F?=qa1$1a7~3F>-bECIHDE65I^sL{AXLiX?>BeVb2(A-5 zjELV1aqp)C+|yuSCC3I6#HSc6G9eseF+b`v!sWzZ{rR$#jw6a2kBgr9jTcJUomI{b zNAySRY2a(+tgPu(=gvsj(b|}IKC6RQbzlNv$DWN$mIyImr3!sKWehb9wdltK34Hfv z<^d}rwaRjL%o5!wko8_c$LPtv7IHMVvY#19RzlJ)tPtm|2M7(CA!$?T)UU-zb1St( zFJIGxo;{;0S~Qfjw08zAd(KS4ht`#O_-vUiEr@m9h}V^LnkayyxIHi1YD+=wF?(;i z65tYIWGOaCSpt;}EvUhi<)hZvjxwk$H0g;?ZywQ6zfd(PE=fLU=49ZEG$yIUp@U(!oXmKP;0@Gh|eyVYA(l88U5#!}gqr><)<= zGWX}>qFG=@GD_~?BFoT2$|Z)4G2FxgZ=7n8Gjdc_ z)fvaBLOU$kw^9QcRbv98@xpDKQfWYeGyq}TL5Lco#8?xQ#aqB}pK%m?-0cwDV(=$v zsp440)cNwqA71-hV#-1MMQL2(6-JZK1}wS`Em_@!g}}vNSa->RMAj+}A{cUad^rd+V6m*6=EcPtJi1sT`NQuZYJSGtpj{R# zg5q((W5~w>B4(1oKuuhDMOZPpX`Z?;IXb&q2Tw~!KNoBq9X?ScjK18GEJVMd9BGvd zeIwb3A4$^Bc0{}4vQd#}g3*)iP5AZ14a1Legc$Tk%9W3fNK}Cl4IpFR?yxKmot~XU zn6R12kaT(ODcbqchdZ5Uhf4}g%G3lS1I3k{kIzKJElcsltGMh8;*MpW;5Z?Y^5;9z zd_cWt(XpPLGH1V=qjF6@1MqxGhn|XuJsD8CKn)ay+aNF?$%t0c=J(Agr5QjVsgR7@MS!g-5}|kORsm zl@_f%;;=VJ$Esd;Fpi}iZE80bcNu1%62?y+sTLkHx0pg^lflLV;t3Qb12EDkVTE2q3f!Uqw=jWz<@Q}ZB@ zWQ92JMZ}R6qGJmd2zz$s?nUX8V6NSm2#N|DAx_1$bzlXE=#EKrM)m3fWD~uG+kXlp z`K&^6@VO^pqo7!GRk#2&AadDp9U!vCg}VWiBU~R{7NJ3`3P48MI9bOw_igQ_AuPES z^Wy?btU=O+tk(wBUi$-hK`Pdzyz^%^$OcIwgEfGq3ikRNGzE6iCD<{kckio@jv8R;-p&4$|h(JjF=ZqfWamAmDN_Zuzf8;M+acOsXEDy%28x z^o&vL4DZO*hK_jQH;!;lM^rB6C<|+lt4}3RW9!RcQ@F}k5>5VGu$|%}teN8vIyz=F zU^^39ew22*BG!4FwM?5FLqrhe1PuXIjWZ>8knCNSLH=Ew$X?^@6T^DAm?=lb6wZer z4Ia`hE?CAgSoY%w^R#A>^}HnVfOjwqZJxjoVDiv~6m2k#oO&KeAe6SAP^KV=Aj#Uh zeI~L)1Q5B~J=HEhgTEU5I;jA|j411_)Y{uZHNbkMNueXGtcfg6LI`{Q!|syQ#q2|+ zN72c&iE;FB`6`Omf?XSb)!UI~UHZksDgEdN50P1T5h=H~aUGW)*eOj(dqFJ{^{s>}<6+iB^pYfuu94eEqBG3b`qhfr*W32sYYf~K zCg6on1ILp0Xp0%{EhTxlw`@CDcNCj?jr&8;XcX3c(YuCqPin?4gX|jCvo6N05(%lP znN8C)>up{Mt<_{rT}7A^B5N3|y4Z@AK*>oB#E6qk# zg;<)6sh4Im@twf+E>&EC>#8nr-NoxYsC}qFsQn)D5Z-DjKU%LX4D|DzB!9l6V(rPv z#?(}mlg&^|Ehn2pd0|dARpIAkBe-dxvc8l*6^xqljm%h5t4#M2)T@2C8FGDY8Jc}= zO)&K`&tCeNsJdf^SgpnPxmAbY=a%qb1!L=UZyx}Tq$%-Vzx8wLQ1@?MKYa-6JigrF zdwL`PW-1pr@qyZoNQt}y!UNS!iF6XmtFs7!T9%4yT*|157*q%6UF37YNL&9U$kBDatIb08l?-P@>^N>0uqc*H|%$3GwI<_MxK; zLOst?n>M{+hLZ`=k__<79m}ute}=lO4ng7Y=aXMUy>xJ<=Wd!=1o`NAST=^Xhi8rI z7!ZV64N~qFc(MMVDre!wAm>)Bnnydkt*$4`E z*)*{-z;hG@cp*pxkXelKiFFrT&0bR?K;2j4M}CO_Z(ptMBTEwjgurZ*?)a2W1n`+z z;q8iAc)RQQ7_rui=7osbeLyp6j^+*)=&Yc-3_%Cu!~90AC$%X`H1xQu1w~(M^9=`x3C#=!%`4+Ms%`DI_Znb}L;0d0jIrulzos zf|GCsld{SEn>|9p{AZUWmGY_rTxQaI!=w-ZL>w(m`Pv=HmUtnRV&Vl#U%6qfeVQb1 zpVqUK!h{fXWW|2^5Y!N6AD2g69L;)KcApeTo<;ta^#C0t47uGte8LjCw__gQ(v#S0 zUICY)SQ)Jq``$W_HUdcneuOwhGt;GV0U{;ebOC~+lHMC*t*{L!RIOo=DYgNrNYJ`q z&<>a*vutve6s?QGdFR?-Dvhq%G-OchZjdIEGh?NqqCZ-`NH#fm@koNZwR=RFL1jU2 zU+u<%O|2d<+Gt z!$cBOV26*IX4oA*NLjN`kF3~}Mlb;fErcY;b>}!XgcP0nGs3_ji#&QiG0|5zW>=Kn zw$f`g;|HsIUm=x>^k2;t6QMyi>cWM0SQ5ZwJ@OKYABl9J0nD1NB&1v5X^JGRg-%uq zYqufZ)?K4hVtI@2aQ_RbWE;0+LPw9DO_aP_rw&;Vx)z=}hD#$BN7E#EB{9bQwNaWt z(O2zGUR<(C-~v3Uk@FJIdrb--xY6=bj@Y>~cpx4BG^F+Orm7!^h$bZ8T&;~edy=Gy zG}&$(bAMG@WP<5A65cwe)5K~}0kx-&$Xw>-Nre$)jzwGQl=yhVfm|ft$3yZ-D@#Gr z#s{-ot0E~9HywON%oh_?3KhkkG1hJ|gSJh*6p8$3JnD(AB!v~jQ}hu$2=`c?L|hrr zJdp+(h=;PxH)jNQ{Nv;**BeG&X8Ry|qn&^$MdmX65}BINuN?8*o2MVTMBT`iAe+7L zHp+OqqcF@SxR_|NhH~;MruMeA)ey6RJFDBkchfhboNbDQf+@7z-VoxuB~C-j)PfZg z-!UQ#D7dQ6>kfG0GAr=JGVAdSc;tEmdP`JionVr_&S5 z41CdGhFO-*n_A!r5(Z)Z;3ng4p^U`?N6;9S$5JDk!z#kf4ab#?0|vde1e3%L zU>tZI>1XwCA@Y(mW04<DS_;Rl1;HGrH4bfN zNCpj%I0u?CB=tH1M`Kp1hNsk#LVzeKN-6a=y}eF*B#H=9*elMA7w&O35o@gwj7`xp zI7V(--$OKdTG<12>_|3(VwIE%H-gJ>LW=BV`A ziUz12hH_`h0g|%%^Q2N2NEiUcic#KGJ7L6cjFRNQb1@1u!P6?DoD!hKG!o`I!uFLgBP*hoS?t;-8#6izj*@paDHW zymStQFP&{IU;&;YOq__VIvqstlMldGnr$~Q7|&Be(MFKmO6P8ak(U^ zjf42mGl&#TMp7m>_{#weB8&23!aV4J;$qGaCo7ZyXhJ!f@}8WoA%!QZp;A5rNC1q& ztw}>uD4}yNwJbKr)p3v5h{Af10{=z?rnlLHL{4L}?1)xNF{c@`ntcM9%>mP2SlXVX zy*=<=21!t4M zK|g=csTuRaAZ};G+;Ga#@^juB>rc7u9(#4(*AY9AA!xMw5)Na&aD+mWoODF)fc~;b zw4KU~4GoH(qK-GtdU{S-42T@Yy*-9?2r(B4jXTFD79oa0Tu@6hDY!R@q++9xqUfXp zfR3bAxi=CVJ|J-DJ$eu(LkZdm@^ZLGk5YCaA-SE6vy+0FX%S>69fFPw!~3&y$L8r4 zplp=}gtQ6BD@Eu$!uR_{{kmaGBjK+Gtq#O8r4Z1H+0-z||+XU@2;LoQQm7O2h; ze|WjI^wN^wnMX%1qgVAHw3xLlR+9Fm^|+`WaD78jwIIX| zg{Vy`?v2CznIr+hR&FVWrBt9#`1Ts;`wxg8NP|yUgFF>#{lz$3H3kxhV6pedvtgVI z0qV|F7#V=MAaJ=NJ|B!F+c#ThV3XsZhzt>qNE^&w@z?!w?2HXK2sCnxag3mFlc+(psEgSXn{O@?b*U1L zY!S5Y_eQEmM2sgtf(`p!Uof*F6w=(el%!Y(=ug18k+Aig%iz? zX<{`b3LS(Dg(4A=&?SU>(2*EPEVd9zDOE9u4@MS?82urP9v#T4!e_w8ghQ#Z$sMpdnGohLb=PA|J%tkWd}SsW?S`Kqdz1z$7l@l${}j zXJ$Y#O39!#5tV%-15<=T>`KZ8s~M!msURiA5BixwiPTGqABb7OgOcF*HNX(yi&84q?KaKs5PF228!xip)+CeNb5UD$LnDJO*{PDa7v5pmXShHql zfrO;23{|hH3I>B?1sTCj2QyMS7z`u9U@#aY8I+ltwAI?DMl{u0y+9L0GNBleNF-&C zf@JF9rxTgtV;WJIh^j;Thl++6bou{|K_hi!kNlvZnTNh+w|jD;JuY!-?RE!)MIHWM zUi1Gh({JwOJ;$`c1}||K#X(GTnBc&lA7d(!9z!>!Ur+-z$X_ZQmFcA+LWVs0=|S?j zUyje7cP{KpaA^ zVv(+3kuL5fRU{mlVvcFE%5WL0p@}?^g2)Ev%7kpE+(J&t`v2-#`?k}IszfTLHi_Db zWK4rsX>wWy4->gu9?CzAe-PpSqIi&a)T!J>dF7SMK}=+c#da=c?Hp*rA?#pR)eDDL zZOzm=jT+cM4QmKPZg3k~CE1xyNg0f4m`bP{f3XRl;uc4utRY@~s8kZ?Po_3<+ znbu4QBblGW*$5{%9JjXF#J8ESikX-VbwY^g1eusnrz#nekP!LiMJ|u~C;HT&kW_+< z1eZtLqfu<9#Ld51W`D%L|~nv{cm3CfvZ&=xaKg>q743Uh9nuJkQ~WQ z$Hqn!cQ6puy0B0a#{#z#(SqzjuQ!B_?QN@g#xvE^P1mvgqWd;dBWq>cgd^E>dx^+= zsx;BXq)o9|n^d!E&Sq;e%n=ezAs)K)Xo@$aR(KA788Xc9Bg0yT^f!hZQVfS2SjPNF zL)(!{RS!I~_$)MZGNVk%Kpl*nh&hKO$#3jnB!@NDS|g1#)=1;j#_>q?*s+|LaZs62 z#@d79Hg3R~OpJU53^-8?3It#v!K*Zw z>#mj!ZcwS*TU4ye5r<2WlHo!$B~b!$l(ML+hM{E5P;DkP97{!z*QTnm1>S!t46BPS;P|lRhO*dwem#Olv`9Iq7~6-Ohh!_r*(q) zO~#M3FlLom)$mf?7mv@3RkRW-yTU=FSc!syf+Q%YUJvqk(VR}&?9xD%)vvv(5!022TUOJ+fVK#O4%1TlsHLks}~00IC2fDl8BafoOr0RaO7T_YOT z7tOck3RxQ}d4(y~7HaM+L>tu>&nUPBKtvHC2ygi2TEE2Is-|;mjMSr};TnJwhxPKf zLNA6P0^ln6Q(fyC8`x_c4hICQPVdwU6cnbIU#Qa=c6_I;sqQjTaD5Y20lAkUqH*XM zMb=q%2)W8NLnBy%P64W0%^gGla^@&G#7d3pP$#tbtL18=CAOG&HpI;a*z17;(-6_R zf0oRNo^txI3QB||kUtw$LtDjp3$W1^wxl3ci&z203LO&m88VZn<pLZG~gGq0=qrawi#@hrE{j(VSK1iTwVbq7`yUP-k(% zkCQ9MqYgve8?|kzZ1P4i2bai#`ZbdUrQm}EvDc=LDMfE>RWR96Lpd77CFu}6LUk3Y zCAH?iWvm@pN=fiLw5aC~m{X}fBCqY1)U7=eh(^)(tg_y@3qIeIM9a_`zb4R>h+E%B zU$&g8K47YqLGEr2rNCx=MJ$HJ(8Hyuc{^)Y{x*4bXmb3vnID?sFKOW;E~6}IV~y`8 z!?m@=Z~m#8K%eRq#rbFGG97=6JHq2}50a46M#`7KBmzjyZ*Lq3GQ0z!@n9gw1J3W{ zE868%eEj2o+zej;tDmK5tl%F0dF<@ph8)rw6LGIXk#Vuj_J(DC#cROQ_C3KNs{Y=KT0E64Qhtg5WK+qc|6J8E_OjRzA!W0tVpE+Jq1 z{ruI2A|I7PViRTh^cj^Am2T1W|mL+Ig8Wx@f2b;`t0Gvxp`dfqLSoUR$ zh1C3jPMAJDIam@g&&86H!bX>yh)t(W&9*VobGT(BkU9Df**+A=A%BU9f(5ciKf-N* zfy~i&$kx$74*8QzT;M?VuE`7~;epKe4OH<62(q|sv0@;C?2D`r+8{yZr5}O|m>@^# z2H-kDLH@TlR6JCWd2tbe7Fdu&>56d;7i2GP9h7A*B9{kLpH?DmggLpS4XACg!li}m zONWRf;zH&N!-Od2g&Z9J90CXo*YYNOps=3p}d8w3%Tnm6Fqk2b|Qn(d`P^O)o;QjKi=D zD0uL!(+#lSgTl&YdQ=qP(skali_cPGXd4Y(CH$k=Y3JR zaqdd-0rq*adOq1xO2JDId_N37A~Gm@Lr*%aVXGo{Ley!{8BbY0$WN)9ebpmA>!f9d zeBSxBo}W@og)}}0S!RH;Z1m%P__~^JGr+~hl)qNdyy)zr0~A`uaL8e2WqIU`t_Y1I zG@U&K8is;_{3$$L9QGe>Q9{}qp==dahqVm0kA$2tx=6(n8l%&9XWIvNG*o*F%0|c% z8la1p3;>a1p z#>{Da*IN0ERn7@^opc)SA+STJNGmGZrH|ns#3<6G9>%kv5>V!DsvCFc2-NRNKx*gJS)9XYn zHnJ(Od_cZl_;Ea(5LqK~KE$GLexL5wwP6<9*dv=jRk9SMb;((6F2?tY^B%pq9NI_ z5FLMgXR>Yobw&N08%`IY`?laTO2$cnrz07~6?Q9E)@_IwnEWPwRj+&nkWNWJ9)rYn zd%6T9vE<>FO*N#EN5;-&pzefJ5_>Fai6b#+O76{hY13GRFf-aUHv)HgTTcRTxi>M7 zr#_luu8Z20;F%h4lUNS6TZr4sYxNA(u|`9Mw%voc(1zD^n$Q)mS$Ba$55>KZ zSVPGWV#HNuGwMc%@qlLQ>7<_*@y4exe*} zed%fus9m3w12%clR?G`q%g_doEmUf@ZvBMD7#}tP{{wXrFUi7j6)qImG+r4o)Uh;- zz%;~TA1WlaJVnG$Dc!CHj<*Tdx4pxJ&!0JgEU3nM@=638AX>||zHcqjJ~y4!5b7ZU zPGKE9k!Ebrx@C!gx?eZ4sp{Jeq)(S`K&K)5D|icg#4#$ck?xBQBcGDa;WO(aJODo>y7$N;0i~IQOT^@6 zq10RPS;9vm*S$g>bNG^J>|hNzTJs+#{1`FR$Qb4W0u*Vk?g&gF>ENJGib~vWQNm9;ggX9s8rC|$he?d6Y=z%6du!>(?>?ziGzxR?ovy^mwJ=NPe+-_ z6f2Z?{+-haU6f?FIhZ}*^le@#SJ8P+n;acOmqf?7 zVA>L`L-O7zLVrz`Rb)M6I!lf94^AUrN8&Z0w85hoG76p6G6u(3g=5i_7;eFOf+*EK z1F6Jz&R2o-Kk7m#SvcLgV!|1#lm3aF`YS9C$=Bg;>B$ZGzj!VR=_{hGQg1iZBTEfX=uJ18$u2bVnSPOGr;|~hhLC=ep;&$ zi6)+@C40naNttWx#b`HgIt-+d@5duTe&0?+5j^8Gv>ibJ1rIT_@7^G!4TXLyeQgEG zZBq81@io;R5t2Z=3L>X|U<1}*2~GvAiZv6D`%WPkJaZ8cwEkjHa}ev<@Jc3s`5XNV)>+n)Lu==}9bi$j!blf0^waW1wH=o{&u7 zU0smSHmu{3jpA@Hqv_6k@PxujlA|@7c2J{1>Q?t-8x~hhuFe7dALo$$MH8?~VFJR4 z$zjKhgJ;HOglLzgQM+3YFHr;l&jH0AECk9r;AA3TNj`YoP6-a~s-yPtaA?{$k2#7# zMd1W*o~)I#2WFBFU7Lv^Io*OWTi*l7q9=TeDj;mj;+Nk`IhIKtiS#n&AO9 z80>ic9Bq=FuQY;2i_YY41xo7`kq0>I3!zmQu#-d2(@8c{S=Wm8XJk(@Ct3_xx>XAZ zy`07ri7+Pvf+xyS=s=h@fYMCPgYU6U{Ut_Lm|7*6LIik4D@H1kD@~XCiV=QH%6hdc zm?=qX>RNu$p+ZbDDL;pOjc!1;@Mv@=qwz?emf<5dg8BsnrwsT`@7C^7wetff{^@F~ z%#Rt5AQ&gWtKw?%v1C!OTT=sYPusy9gPHJH7jb`p;Te$hxiLVy{E$M!-#?w2xO5at zHHn`A4G0{YUH~h^2h!sY(2n|xlw!z~tIX?dkA?KqV_jP>_*n+<9rzZ_(*fYX#1J_+ z^C!Uyin~G{0dK)ihbk}zqm6{(p$M*bDV14Z>j7xPFlJ|-#wP@Vh@CbWcgvliD__2+ zqhk`?1`F9WN5%E9aM2){>)EYRrmks92FwR%+_n~7bzGg1_QQNj6b_qf%(jKoJvo9N z4U=Vio*%f779y7bxR@@EAI^?msfj9_ccIdQW&!jIf?nSUgkL%kAG+#jL~aJEk^ML2 zcS!}~L^nbo@7kT)v@R2YvEetqDxJmhvW-VOm30=Rc%8hTFdS|gM8_qE$-*Q5!c}C| zoj+Hc>C~JT{J4<;Kw8KLNo+f!PLGu~sI9J_3&E2QBlH_SETu(iiNe3ZbDjWMlF|+1 zvoBef28H4_bwh2a1idhrLIw%y3PW-`KYzw_Y{1F6#$f1AIlE4f_1dvLmnsLb#890s zB%Fs^juLha_dML_r)h8z^l*!IJIeE^8s+$w+{yrseq)YYu^(QO*#mD9wePXh!*?E6 z$$co?6d{x6cpx~Tlqb(of3_>*80B}J!dto7+1fv=rET2@iDAOyn&Kd9FcrV*#5so) zBuxk|O|W?mFZRe&KsTx2Vu}ZFgET1!2Q0Eksb86HkuT*$3wJf zJ-Oj-1|ImIr2m|Qfq?5`SGtUzL!vij%A`p#RV>inIUP0dVQ{kb2FYfS9ZK1G{0%;J zzC*$Q;lA28n}ApvxWH&X@w@fy9L#4ONL?7596ZGf`$B4s7_(UOwK?r51 zrA%xN7J>=_;9#GWZX-*`IZ)ZWDXRzP;K}jLIb$mpwx#t76vT2A2}ihjV#7^rP{qqb z8gDrF7Fk%=4yPz9RCr!w1g9BUPC>RXznDRT64*CHMkB1rat{j+Su={UDYmOCEgHsx zfL*4{IOr4rNqN!q@k-$KHKi^42jmPWVB#OeuXPj|@euHGXDJGae;F0jED<_pvHF;+ z)}r)zLS1N!2+&T-YOl4G@&eP{-=QUt3VLM2ZgZ&xQesoEK)As{K@B0o@dqa{mcRt; zBYNY43;pJ+$TYeMP`x}?f!=N^$D&1us0}t1o9(Cs7{_N45k49wOeC=9Pm}8EKKWR> z)WrL&gq$Ewx_OnAQ`pudZ*(ns)V{lEG4Vz0Z2C*tysb0=3Lx3WwWu+wIvRw)5yU4m zjWS*Z%crvLp=Z)67S`sTOOYFEaEb`FLcuI60P0qS=tyJaCWRa_dmjbF-c1M*3ul5^)R44* z8H84cH!678fg@aewH;e;U?UP30@{&*nEQKUB}KQ}IKpf+o5u?_vhrFwE5Weq1v-dP zwiWP7j|M+SCAjU%8Bn8E0Z0sxH~Xu^NT&6WO66x-;kwnZYAQ_6f2|1}fmt@u`&2a* zqv*g3=#Z1e3g~3Xn2_RLZ7g8j&j`1yLchPxB6ar?j}m{hNA$R4!Gwo1Xoo0zlN#co zoE@T|)ls4G#%z0TvIuu(UfdhwWfgk!YmnJRLFDU3W9LNwa;mZ;enA`)s*`7tPlUeJ z%d-1Wp_fWO(qK?Q)6-g5w%!1dVuRs#;v{RdAO1C88wmMZYV12Fxkdt%0$IaANYz(p z&9opA_qssLp&{O}y=`@aHnQPNm1-8^7g(1zQWltE0XrGzkxYE+-$zd&5@?#v5!^5} zc1>mXrZ%PYEG9qqys1u( zFW8J&nV<77 z@3HpJYePt(6H$~Ah#Vt+WF<$Tw{x-bCztuczM;PL@m9D3xoSeLZusU9J6;>d>rjCW zJuwq|*J>q1)U_mST?*$@qi{*v7?~BnX#XRn#g11TgIuCL3P8#aeS-{jt*RlIb;2&T z*WBtT8np39GOrLCn6=`G2~TQuwJKqJ@@+hQAo7IMI-@59dl0FH^sS4Kd}t-dm-=f= z_cw)jZSI3&dGw>tms2#s|CoUgXmZIS+z}2ATYS#luQnc`RZAx(s$Yx{ToD+kYL1O1 zKH~8v4{vg(Cq{t8HW*l@kOlJ*aa3fBErFV+v*5U?^@M);QP-$-;8_mCpmF=WHV*ERR1WIywNu zz5&?cM3q4L0zm_yfXC9R*huo0^Te^)y|nT?Y7sfq=urTU-LQyK-b=qxHJCOoPD#^t z-;uasO+;jC(v=Bu0LRv^A?9TW#)`5N*&?W9|4=Icfg}#N!Mb5OENrIVryRc=w(AcI zKR%;S+mV86ZQKI>@7J>6y-5VNMfj|N+vk%*q**-%-j`m2O_JCv$`36aT1Of_CwGU7 zhYdpGBlJ|>1pzR1k?s^T%~@ghC`O*HhiZ?D!em08SPGrc^PMK1IIZKxYT_9y2H7&9 zqN~kpK?n;dO;uHWR=k$<;7B67-vp2*K?RM9dRfa#L0e@9z~E)2=!js74w!HLz++D4 z@aL4{l7{b~eIr~ds^Vw!eo0Bu6USa^J*1PZVLt3?P#i^wJ*mc?D7!o`1HfjzKm7orj|H`akbx4RAZG*BMA`A^ zqJGCf<1IISlhH_^OQGtl5%FA^-+vrL_u3DK&zB%|y>(_aQi;(JciYjf?Qi~_Us1E={XgTrWFfVWl)AhSO2wVmiDkkRfzZC*8-Tl8pUvB=D4YFWs4t;U?d>fU| z&imFRLt8_()JENu6WzH?To4S3Y!M8vbwD(vY3v-QDxFe^3I^TFXSJ&rU%0hZ|1e&x z{17+ljP9a*b_W@lhBe;JQRFaQfi@sb){~7W;C~|WPeOfi*MDQb;~{$TWNS%)|C!rv z!T&rKE89|as%bcQ44?=hvoUXdE$7ce(9sG=-ONg3&|vA@cjj4o14P~cdXDH3AYf?< zumN}@_IfZlA%xLTyvQ-T_Of@TlqnGm1i$^O0wMto0SE!MEqKPr=&_lCqcMPJU#%QE zP%{=>J&VC!>4xCES9ifBGaVOI+LDW_I5w%pxZFu5ieYktN(m5%F{PGQD#^Oa%8RkTQSSas+q)0t$~HBm6%J6_ex?|NLp7y zbpQai)YlCo6If7l(*$NzH4DY)3t1jg`Ry`u|0*8? zcjd&JZ|};j^70KNw~rs`S9BcaH^`WZ^2^YYGYv+M87U+&$~jj(iuEFA%Eg(M6mN7t0Rq;~pL6y+vrL|J zQgZ&puxq5qH-GZK4n(Ojc|`M-MikS;6Q}37m(@(w(3O>lXpF{Ah^H~JT?`VbMQ3%l z8J>o(%U&OPXqV!USns^{U+%xsFNfAY(^p;(L-v9_+RCSDD6{29X>}m7eVdh6u0UMl zu^1*fT!%=CFGk6eVvz8q*6AKHp@%H=(1k-(!j~9#b@RF(X%Fuu2GyWr=ZqaQ;qcGg zs{yp0aG$wVS&>=Ugziq+;9AMlW_mSKF$N=ZHj#pY7>ofC4txqV_`N zU~s@Op*K}4Ac(?rgc4q|!X0@rDrzX*iOgNyon6WAI9Pvm`zSLO=89fqGYtMa82&)v zK#s*Z;FQq7hz15Gf+>*b07JMMOnpFfkYI4aCm{F$gU?XrsaIDv?TJ_oQ!$}>`l;9zMXVI~ibGY76bE;(J5@V-FNWWF>-E)s>C2{-8l7sU zntN0`tuCE($NiNg+TUrNKHS6*(8Xdmi&2UCS4sZ=Kc*$A2K#p@CCl=k`CGnx`Bfdu zi6K)C*SfY$jA}WpSbJ>6*w>I%Nt>25X-Q*yv=Td2Rd#wpG)`)Br8W;r#}K~_1FVRW zxKp_yIW__49{>};fWly4ATS(@#9%-e3=iOj(P?TBM#vZT@UcdZfmJ(QE#`$UtV)O4Aa1BU{SY#$2DVx9X4{9pl7?_`;2N`SVF3|m72 z+WvxyAh0?-{Yg+KjJXarXagBec2N%xx%w*HJ5_to`=Z3ikYXzRpBj`{sX)U6hXUZ@ zz33Z?b83E3g5oFV2;|HdZ%_?OVCb!Cx|KU68slPQttXBi_AVt&(0ap41J1{TTSU-k zT9VuSTAoc_8PR#jks2}RI$e_3G7$9aA`RV2GgT1vg6RkrFw@GUSxzgKH&I)JQ=k}* zYHLK*Fte9dqio=B6XwlR8fJDxM<_FenXTL-27%BK`&m32pHm1M&2|4RcI^xERxv(J z;Grk2hVHNboD^D$W4sOD-qazA5~?xRhw!?f!TB#1f=vx5OBveG*vI zG4|!jzJTT|mM7hH;G4AY>45+eSF9>kV_sUA&-T@2$lAc! z{dtc-&HRr)dB0{@9;{DE_tgWRClV|XA{Ksgv2sKL)pyDXhA9grVF8kV0chAp_)R3{ zE7Mgozci79kEv+)d<}C7z!+V5=g?xTwg{HtF8k5Q9uJc>Umr*glYvYO#jRU)f|Ceg z+8^!^+*QGY({c>y_m1p+sEFZ3wwVHjHxNT%B{)2^8dtJ^ebmsKfJRAva_VAPdA^~; z8RkzLV1KqMkZq@1SAx(I&3R3@RVHVmX)8r8*BbFthP#wFbR&F;WHi7FHQ8;>L+I4m zZg`HRByyk)y1yXk3Al^N=m?!G-X_r(sY$7^xEcF%65<=n z=Nr%+5=~hcMC*gRrd}dsWaNpKTcZHB6{0hC1J9yo0fslUIM6)Von{XWiZD+OcK{wi z^uQwzz{9nNNEv}lx-y#u>Rrjmh|fPMRqFZ~_R>bugU?}iaj8Ou5Js`%lg1=Fl4$MN zA42P^fl7|_QP8vk-2i_A*+U$Iavj)3JOTtQoe?HuVgm!`AT9D8wCF&uzgdvE;ihEpG5u zY2uZ-;u;9t3I>OQ5ez3Xv;;LC(&2MF4wKw|_^yqeQQD46EwWS{dvQl^BpV)RgDEEo zz?kh_48zR6hEf=iCkSb0i7lLoctstH4QeC`*4)=#S$=$I zd`V7e)h8qhXy|0h5t?NC#EHyK@u88&w~ryl+O;)yA+1a8CBUa^6c5fA|97;6%Z$%N z2D0FWsuJ+f10>*(u%NC6w*1=$f+3lx1rg29SXG^Ed`7OyfZMC)!n)a zfZ=+}z2nTO0Jld4BOf3L%sbWSv?&E6_Z0b?4{;2=e*IzuPoF)4v~KG{@>T2Io#SHy zzn!YvA$oZBD6ME{pbXDXVdO`Dj>f%D=Sf!F+zuWA5ye5A4x|)>u5vq^Al@S!L*<ZvK`j=1v;7Q{pj zI!t6D>X3YsI`aMx4`idu&6ZmEaVYwVqnFWJ>w*x^f+_?y-(h&2Z*r z?VXI~RPt|ODTVlv-Z0#6+uyhfk3S(g^h$!a?L03c3|6WY+V$au#J`rK z#GW}qaOKlO>H07v;Gzd>TR~(8L!K}Lz{WNy<$I^vV9A(raJ9%pkwRs)sfU9sgd3VM z(d9cC$G~V%1?JutJ3#}HA_miNJQM|dQr?-d>iwsCh;=XDMHr*8reWpg^bWJ4p{~4c ziF_%mG?=>aV^pd}Iqx$_H{$qpJ%1~Ve zHlTV7Z9Nd}Vi+)CuN$;vpW^-w2|`)W!>HS>iL7FCQv_lyudrAw1`Ki_SAT}e{EeVw z*8f@UQR-?r=f;6x zZ99`F%HLMKn5ygk;Z|T%WhNMp%mxRsHzSq*PxuTkVLPf`EOY|BtLo44`{HS+* z001Ci@cXzP4gOf&lz947iF^eJgtiAr`yq!4=z-xl$i8<}rruC6%H{S|gZ0xlT!)DQ&B zoKa1-j~fCYDq3eDrAbVU4i6fuo*xaZ2g1869r5CtzagneFH-WHIg<=z>7;r>&a=6~ zyxLIKxN$y*p)v5!nll;gk9_}~*picZ<0MJTz=w7#($IFi%TfZW^>8ju&M8Nv=9=SX z+=h$=(k3nik4(#;_>UK844fo|JUkq&Hm#0N)v8HriitD;UWb0ds!wg0sfLS>7?sJ0 z$oix+$|x&GBoQIVrPr84R~3>*#K=9s6Qxouy4=&V_46xI#9KM^c2Pj8bQQZ3Toee{ zuL-O!aHG~R@HS5)s#8IU?y~J)-j~SK9h$x!Q43@RR6m-y>9PF*xG1h!o8M5 zCfc>|&UczNsqu}@3_*!OPu->p9=Qq=PO{bP&iT(tTvM0(Z0aYg50IAXE2N&EzmUi@ z)mC9_$**%abB2H&4e$TF@6-r+CFJN|d3SNAiC9c&bvAQLCqC+=a2ZI-pV;lY%2?R& z{hYiIu_n+Z*%zxRNd+p@?kKT*w!3h6$RNcjD?KofEqLJAi!d7DZB^m-Z+6?a-)_85r~>^9io>?0lf)J zN%rmkZaJ~&Txi+&+y6R-W9s=}cSNiCA%4?5Q=cJ>7f!!(MNfPPHQ9ZJyyHvfM8?vo(&EF~fEGnD7RBe_tF9?Rf=FOFGCFElvB!(%ioO}RO?)Za)a=r!G zlAOtYh2?enFE*~5&_5d0jTI1-wgn2Cpo!5AHzM)7pGv2=L;Arv%B6GH9TcoHx(@gn z_CAf^{46Azy5?squS)hG7p;Csp_RjC2_c^Tdcep& zZ66qLog9OZId+F27#}X5r>A_(-?X0(>DG;hBslLHC-jO=PuXTPC{?^z{b1)u9qM5zUs|n> zo2>Mu{dQ-P_lci(X2_WC17jGT5( zPnkZ$ABz+|CUzBrMYyd5BT0437YwIjC|WYVc36nGB|-+VhUO4y(xaIqGtgYteK0Xt zRnh;X4r{vBHm})l1@&_=Q$&e3D4m`FnX%V@qpZ;CC%i+YY8I%qEa6ke{3{=Ll? z!-S&6mt^r+ERv3%g2qW13XJrH(BM&$5^@AR2Q4flfgK2ABBvq%>`t6hx0z0t555@# zSO_asr+TI?jn6GtByVf}%3C14T`N!1zIi!e6=CiGNgPO0^?HtChDDFQ;bbtxw>Q+* zXdXPPoPW1_bf4P@gDjx@3AJ@jC|y0ai;^!fnLa@{&Y?MqIE0Ey0ASCw79<{F1A&+B z)HqWm?cc%6G1b`tgTI&6`zU$917UKT)Q>uT>_uqean*WusTaOuBqU>E+o6zPFcT?* z?Xg(&&c;Gm=>th3AN1R=J4Y4_72gLl!;FVn4B&AndLSsfRFw`apbb=Rbpng|H%aR! zDy;#&uN2osIm^kT6$64qxPFckPfo{~DqY3W^Jp77ISKPDMZhI1=?{nlSLx-0(eOo; zZfz7pB@B6JD71d&+ZvX~i%WtwLxF?r4xD}gZzb8HdR1QAI;{sU1XL4k4Xv>XDLvHO zX2e(Pbo>-&>5|O^Q6`u^X)(n7(e>kUiVh1DFWl2g8ec9yA8~(W1mVEZ9BfQb!H^D9 z(ZOn?KTD+IC|UpZh$L`^>^EyvXv}>!!B#z!cQ-CZS6e?}qBuZwR}91s2DABr#JVfm z<)m4t5dK^nXtU2^2_%5Z3?3Y60}V1JswXwXsTpvldh(V=V`OlPi$b3Pi1%2JLAGi^up! z0-z%wRYZvLuI_0pM$5cNB!-I)9lf@$nmo$phH(i7EeRe%M&kdGq+j^^f|mD5PzmFu zS&_tvt8vORM4|Z7D$;{sfpGNW<^=fxD@sGGcZ75k$U6=z5a=~6Z2pfpo9Dhp+cwS- z{-p&JEMDmItv)9_rcOzqyJu=pThU-|Ex=(HBV4d?$Dejq)zLDejDi2B*I)=frJ1P8 zB+T}1J==eO>ONC7ILKXl^_SJXKTH@0(EV-H1N9-SbHt)sLDA_xv0FlQ(eKkwyQU0y z4%$J2KF=Gscd6Rxu?8)-R**_}<>Ixu#JHDh`XDp`WXAyd!|WL!Pz?bvST>NOSe4#6 zkEaIZLj-8>BzHxm#88I<02~pY0h-TR+iI(of61Gldvyq0Snl=Orr8NR!l3N6`j<80s<1@SlPzPGgj`HVU6`PhL!EFy$6IRX8*C}cyinv z5Gv7>uN)ud*qDSteXrNDcfK=UI#7gVXfDjHe9Z}(Wo1^z7iK8WYCx!3{hG!%$!b8b z)q@ zh|iOu*&r?$6X4}KArfd}Y*5LVp{ni<$A%HQ+C@Thw_*@y@?vOMx(tLMqP(vwlqatW zwyLS>O;+Ay(WJ?R?w}AhQZnNnJlK>D1u?zCwM`W-g^-z%Vn{kE4p%0PNQ|b-Rf&jA zqQlY;u|GTkfdOG?5VJ*BYww#Hh_ARhaw7jw@+QhCiboIvjb5$@A?idClZq6_m?~Hd zQj98ob?$AQ&j17rKtO3U0f7M_r>;k8_b)$5zg=`yWt1Ta65|W3!J{!PihN=eGms)# z*e+8am4RME7DX8w(8i4G-0N=6dR9Dh3;81-S$)T9-BrhDS$8d9!Ln{FZ_GFLtD$>0 zAe>!UmF?)Mqz zi8g8fv}r~=TG3OQPkmdE1=w88-7O0^vJ#}Bw2Vo;u(TCqWKAh>D4T>3L=IwbC`3`r zJE#;3Sx5u=%nc$A2x;Hy1A_DXrd`_M^ybL!XPJLF%ky$yMznuHo@F5#AzGa%=fo7x zFc&$L8KfwKj2dNXMf(yr*f2(2PsBh*+MyNa;Y6+Q6)BA_Pj{!g;eddz`_szogEgfZ zj)X&H5I2lDECZcY1_>fd_?#*EAXtu|LWT9ROi`wUC0LvKgb50F;SBvUzorAijb#Hs zg|iyJY6%)>IgeVe>=fNik=+DG1A-Gh{^j9h&8v*h!L7hj&K32l194^IN00)|cI_WVf<9VH$08L^hZps)nr2 z3^ruZg1CYx9ln={zNA^%;7qdO1WwTvA5*TIE|2UfYHjmWmF@HOkn#u<$@bCROLl;Q z0~Fj?K8(^N1X9T{l>Q^8QUO?EoVT$sUf*=&SbW7 zd5Fq&sphoOn6unoRZFH~1PgA8$Vql*sE{3sV1eyy;SHac)_dY9PdUBzPWeJDsfUS} z_&J=z#BnwUgq+@j<6LcZ-tLGTs8oyb;b!HB7m+0)$dJK?prTo^Qj{QC!y?xXq|^}> zz6`6r!qzRsaSL;tg#*IWgC7riJa|;!3RiX?5IVN|T%9n%fZ+UXl}U9qBUD9nlc6>U z`NqgXrxp`M5LFask#n_R(KzQIK@guz7Kv;gM~!B@@}|1+;9DvCjG?YswDU$UoKr!kX$4%`k@{N=TH-;R<#reng3N zSeeLyPB&cj9AplOoSeACtD@pKh-Zi&`)#UjQ}lm8xO>W;KY4y~nw<9|-G;#(k|1Z~ zpCQ^C`YIU%tIFg>hC+kz$bt%D+Wmq4@GV(3* z&_%e1`X!T{{cA2Jt_9rT#+`>XDb-SNx0<=j?Odg4%VRZsSpFmnGp$AaHz)Dtc`808?7w+59ite)jO4Mpo6$~f_ z>lbfE^bg_DHKr^1YqURdSDFUf0|y*m*!jL0(@nYy%lgE5tr@7@$F}zN?vG|d7FF9M~T;_ z)ue$qa1jXWz$|k{XWMKJ6bd4j0yKC2j4H{__yS(YdNVD6wljxjh~z?=K{AIWVGnKN zkX&TY6vU(CM3{-2x52S0dr9z)9EpNuPYOi4lYOLrs(-jUKqjZ(g_%Lfh`jOLtVma? zX!dSS(9Z;$&3ZMU#!7~R1c7`mG;pIovK-pmA3)-)bo53(L+GFMVC9zydEWUw9HL-1 z0McNAg=`b+<2*)8h{DVS8Si8fQkL+t>l_`MA*OVp3L|5_4rly+oCUq6r#-W_TN5D8B#h$85S%1l8S&&;i`nKL$t`@h%N$Dohm4{zpg4Ydd^fPY?!o%8iYtS(1fi7 zDUu-^nWo|L4_^>v-6ffZ#bTKC04u4HBnafBz@ri@dq#@{f#wy65Hit#)EpF!Phmup zeQHl@b;pG067*4m1&$V=Dn{!_G$%SdQ}5jQ+X83@C{{biJ{viixt5gaSogyc zJ$s`cZE5C^m^}F-q{`;Se74BgkQw|8Ekf?J5?Ld}!otXgBX>b(!`^X{y03^-$t|qI z1G8BK$YW%HY}*UJjT-sXS4VDS`0plV?aTVs=frE<=x7mA*EV(zzL`If{h0-4c2a>L ze{Lu5Eo(6t>$xf;n!YjX$YtJ^0%KvtNbxUkzgShtotbA&x&sR#E%;(FDi7MVskFOf zQQ!sS(jeia{gIKUPD!Ck--_F5hu%=;vLc5Kn#ORx7erlEGQ+DT@3?w1TBU$?a2(?I z^nSOOCc$0Jh@u3uUYR8JUeMsEuQKM)RY}LLB#<>^8lsPyP%VtG6*~bitC_J)sA#uTo91Ay+a# zP&(u^h>tgLn;jl@`^D2*ly#vAt3~R_N%KIL5ZM7oUf3sDcelay0-FKH81XV&JiVPe z%$=y&0I0KmgbRVmj5GV&20gop(O0lXo<%Z4fA5oE05zTg!{gAfukUFrzN#5f6^H;0 zz31tyG$W&O7~C{th+TQ?+(rpLD-=wU6p6Z>QnLW^G&B~$WvWpu0G4Z>?z73aDR)vD zhX7JUc7Y^F0(&8AOjlIAkx__t={?Ys$RB}3EU>})4fL`z*A}^fY%f^Hhy1}S2Hae< zo}x~J1>K;h9JHDdTO?^HM#v#z1x`h#2^M~rhMIJS_A&3!%nc%8F8{9kP~udHmA5H3 z0I2-l!m^uv<&MUQ(-xgs-OB0kDy1q1P%u50a?t#4@jje~@{wTFfp{MnNZBnD?G5T; z#GXYXVluH4G?VOox8Z6EMMGt{Ba{*fZb%s`HZT$P2QB?K)WrD`5GhMmAl8P`*&BC> zXzA9Q5O{E(7#I_fs@4toc;sgaQEZCxeq~XEd6iv^ClR;woKcQji8tuBenJDz9J>x+ zlQCAhYQ+9?x;3@1@#j1+J_dFCHF^YHh7V~GvkwSdS?AS%0uLa|K}t@x%qLJM$a3ni z9hi{SGRN8>UWEp&qrD=3=`KsJM1Mk-0s7+O0@FM|-X51>2u@VOO9(gNxGKYT#LboI zkOMVMcJY*S$7-mhm)S4QxM&URCz|uWg!>cV$14RO3i2xrZi?|O!ba1vYcD;Sde#ff z=P-X@nsPSq5Peu3ey`wVD{G{dE(!1+Jb;474A@Ol1y7LmWSuA1jckEh`iBki5uErz zCsHchY`_C04jifiBjvPnV5Wx}`-5PTyc={h{4;o^NoH+dBX!3TN#nF3WsmQnGXbg+ z@p4GOo=F}KbiW6XX{@wGD>ONDHYNnb|KsNmsE{=euFw;!1<-g*)CX9VTCYTNLY}pj zUf^XFL0xS1tJvPKDm+|3_7@}A_*k{0dP}Mp!1#h#YHmySsO7dYL&l^N(~s4(wSpu! zr(x8D`YZmcn-M~00kG{*RL{fFg0pJX63<7a1?BI;^zR=hk5?kZYBQ^S?t!W~fav zbuY{tpfg5c34_;wgrP@x7;CMc*4}}knVe&& zl|4+705lduO7`1n^{bV`-6Vq6>d%DZx9265Ax;)WKvP^>cn@gq?~DeVF3xW8o8)3^ zg71>IqBrrA-FVbZkv_HjQ6kE!woUp0C5^{ zu*E0L+IO+Gl>74@;8VQ!{vz=qh$@q4pkMiRYEH53FLlTh;9frR9HU=pC-tmnHc$u0* z`&8G#)2N$o|A4>^i0_L}VV<=6p|f0oPv*z7rSu)iQ~P6VMDvU7bB zfJhD|MV=XJv3emq>XUneKnhML{ed58^Y2Xv^#whs!~moPn$7*YAFs}XE;9d+cjK=t zcTa>JWx7gUIVsc7T& zpY9srnYSvZJ1shP^49=;rDiXFOt}7&Z_=)Zw9lXliPJrYdB}sOb>)~-+3#I-OsWWL zNIGv5d-bbLqol|3*=;{l%#V;6x?w3bSH5KqjfH#DuD2GQ?2Q zd}TRxe3YboYxrs4?%;;$^B@cifbOCC0I&wI3rcTM?9)2? zE-mkn0JJPYihA_^g${`rm<;>LzNU4@zUbkWq6RU>q-Cl7a15o(8t3W{I;97Yho0UR zk0=2;J!Q;pYX~kMY#^kOu~cd#km{YV`*h9u4K6cK{t!35L8imQ>zpAO1qrdRKeY5m zs3bCBoKR`IMf0Rk&7QC{as|C02{^hHc;6pV*yr{UI4DPbG@nD0Vk)emvO;N2;^mp^mr)yp86#ybWO;eUajZvPiLl+|43$OVtl%CCF(%aWB=5SckI&)wA0fjg~CF-cjNH}K!JxxUL~ol;HDU)K)avIUUS=g@W27G> zX=8{!0xyZFEZNNjh42pox+ZChN2-gd>@G(~qr_KA-O|`^aA8`_QP~b-J@K<8!n$gX z0EFE2PIeKodkyom>&x^$fX6XrwB$M~t~h)*Enacq(jbwVMGVb=mkI613~gG7UET)J zuvThMk_gJEW+=}9G4f*gf9B+@rzQ%T@0@#$EdVQ zhFcl*eelP7a#V@qse52N?ha44J6v)XK6;%^&?$++)NEWXD5S$eI+4Z40{AqdsvB?Q zy_HC$M@%F0YKd^n4qY@dEdF4^n0tkHSf-w%h{}&BOWAkq(NsSdV~%CtQLmrO(cy@r z<+mj9Ctv_S*-fz@z9-v+DOVsou*plCgz~sSHpxT;e-wZYbY))A-NfNPpbFDXcP87W zv#zTZ4@m?pa_kY}+xHQs4{%LWOiBzZLUi~%#qcdl+jm!6rYBws2}^e7gCPq~E8mPCimMK#8hH-M1?h-FyH>Q-(aMi-@)HmKp$2*nhde6-&= zVg#!oXAuasv?C9tEBcl$EMrz{m6*F`&tSC3nKyicQXIS`4@UTlSX}!pNXrjH&pEdw zY`B}stDEER${`EXi%I22sL;wNdo&3e(;z$XzkI^R?w)__WZXNQa-BK*UaqRvmtNT` z$J2~HFpSg|0H8R4Dz5j@H0c2VYz=HVzNbnK8g%al>C`PhJXnA9Ap>SK)(T?F&KX`aj9YJ#~!!j7MuAnja;l249hp4)%o}V zvn#|`F&w@%OWd;dB3&)9IkHo#dnzO`m)>V{N)0CC>E?s%)Q#_0TZHYj2iaF1ZipJ@ zDO;2rd%=uA2CtKL5gTO3wx!lVMY!tNM6W5xQ@9b+;Ot5@En}8in#G(tZ-$Ay`EN>x z!HVIS*k)+TqO{Nz8`B>l)gVo1RubL?eE0x5ps*Du5Q{b#z%s@yR7X3HH`LT9f`?fv zobd-Ye7A^{768hgc@O~rfWZk}0aF1(0SE?!gam~HB7x|WFFaBq;AwabT&m9Wvr8*> zSnQ5|dKjJ+=Mf&^o%NCj+7oF3L%wCw=$Ons(}s|Q9*A#)iFkI*9|_Nf>>wa;Fd!NT z8VnB&3!7R-&L(Q**+6fA;PC?keXOlXS>Sh6-q;o z)!DyBS2KLjC~IA*DLw)IQ)h`+uw{B8-35tCdBUM1gslbXps`>9K4~bQNk5<<4GW&_ zQDSEC2NXmAtf`P~G+lH-F89dI=?tPK2NCoZD(cM4YLLRpE1#H$QvvT9A-dZ%6jp>Sf_L@Fp2M~OsgF4~Wk|9UF;Yud+_f~gc< z+7?buLlVq7l3a4Kt*&;l7Lnu$U5_N`Ov^(PSq+_LowUmm?Xn{JHM-J;PIt!YqK#TI z+q{3YCvA_J%$7vPn#fnITu!UYSMYgtUgjm$Rp;egVz$|7L9&{9PUe@TH847ol+7iQ zVC}46ueR4K@0(T5P1RIQl~YyTPpaN5j|xfbi0DW3B8J0_o}v2Vc`8SRvT-*D4hN0} z6c`00H1b*y@tlQ*l~=^7yiVn>=J|Rik5fI#R&BB-U9q;P{xXte+CQG@Ac@T<@~uRf zWvmLT7EoYJf>5F<7G6jwyk?f)fh25}%nw)%ZCs zm&+}#sR!ASz3hnj(#w*mT)OlCk!@VD3^Y7g6A&m!FhD{90hdHn%zAc{STj3HqsOjL z*&)d>dx-g7;`OdqG`t@64lCQM-wUr~-PON*xw%M!nIxslosOp=N=774z22WlW29}lnbwvq22WVN(t@sqa)q&%zTzqpF1leD_3Mj zxBJYk?qknuk0f|6x*dJLl4-NL%AUAMa!5k9kKOk7Q?~4D|EtGLo8Edmt$jLe@F)B$ zvo>!!i*rh{q&eA@Th^TNcQlf+f56E$5DXr_$;O?VURRkjUnn4Y9XwcgpuxCe`Vp1P zEm+02BRn`z=!`LVq+NjzFd7I4Lg6}thkM1Xd*vod_a&3?<~E>0NUo!S&!XqC^kO@R zAiA1(RC;Z5TDzQ9d*^DM>uu7zVn7&>;9w{`-C^;V`|$rIVX}JzI3JL9GAw_z=Fe|QIe&}GBXoNPG^*7Mwyj`hGtpPYM7TG zqr<#0$7N<%=oZpL;-#0vw0qjQlCr@Fj?0$HmsPq_(mW)g-%YbrhedVPt8p=FMTTcE zG{%ky_y8;^`7M#8x2&SF<&|Y3iJ_$egTVlDJU~2u`XKerdeuZf8Pv7si*Gs=kzk>C-*=e)Sx7|Mh2ZjO& zG#F>-KVQ8XTe%Px9X5Hn$PurD)un8<)Rl`P@D}|uI>oWBC!G=GZ3lQJDX(Rz;4nA-xMn1*Kq@j0w$q? ziR}PF)c?y3d4d6Mf!{PXMCNZUMJ|m^3-nuHD14l&M&F6xPj?{zLdY2=8Ztyc&ye2M zz~cn@l$E8S(?BDTN>XhZfO6hFl8$5ejz^fFlSJ@=-+2QNckQ@~f|H+Y0VaST-#+dC zMq13BE3cKw@f@%~M!Iah z53G$yM1fJvYyK7f$KcEmOw!4qiL$m9b50Ssf?2>$S^Lb`I79Q?jMv9M>tt;er)V+U zP)*@1U?3@zTl9+vw{#K2bc%;1mGDsmra(Fp_2booEUQ3^Y);}4^%})k$_?X**ef|g zxkrj5Z9IMK_mIK*b!l~*JwSXH25Dl&L_C+jp8TOL0k{-7TSiDh#0_;>fetB}8tVn7 z9V#`E-PdCyzs{;m(gchYCoD)52+QREoC7sP`GwKv5NLq*9>~%zF;Ht&uos8CXJ}7X zy`H)d6oW7Hkn?X>M?|cuC{?Q)RmNyRfQ4a^!OvfbYdEYab}p>xeGDSs+J}zqf!CuB z7!G83XEl*nj!bJNGEC$1k?*u?BN{m!ETIj{#xY#!4~Dsphl=v9@kG2a3Dcn%&GCCP z6ANxbe;*-*y>+#&qM>88H!Or@gnS{Xd;+hI@kKB)+%_R+YC5a_$gLv@W`zLP`HbBw zOzZ>$gmS8$r3ic(1OyS`SjI18CfduOSmia^vrZU31+c^Kkj1{-sPaMMrRnuBMuO;7 zi*O5Wku;2+$tdi|{h4A>3xZYiin5&ORIP+ymJ<1lm>cWN#O_3mIR|~mT}y_oYSoF9 zNhbzpbEZ^x-IKNrt=6J}=7IZsA=#>UC2!$ui9|L%0MTxoLo-T+C?0EyDod#1|10eN zbb*1@yM!rx1k`P!7*)IqycE_%IY6bBb-@n!qARSm_J8nx<(;`C^(vIcKgVABfJ80P z&^dZqpx}$%;Ap^+UI7ltlCM|tqH|cNhIrN`otNXWzgwjR6;&Jok!fD|g7wpE)q`|L zQC{oWFmHP2q|6Kxmcfq7Dd3ZVqtqPDKj4xz--uZY*vA*4UDZ8qmS}$QU#1hDHx$!0 z1+?9^q&<{9;8R=cdleNl8gu-2P&FOG;rzF)9Wb(JnbX-yW00t8si<86K-jS+=L#qi z35Bc;e>A)ee*T~hWaX9!p{uwtU%!Rc?O1J;m&sIvIZ?O%m#fO0v?iP`4sQ>=YMdl4 zbqIvQc~uwmsAYZlw8$9)s?JpfU2Fl*?X&6`D}_bqN7rYQ?OPS1gl&-pAn0tr{aOWj zGx$5CY(Y;d87W~shE4=nL8mo;jqpc~cP*Dnq~sW3h2GZ)m*RYtKB`wfFE7%;>DR>W zKR==g1?>ZI>Tz26aV)k>VSX@Uw&|Mk5qduG0Q_RABoU@EWY0zKo3w5Wnk_LF*J)28 z=s7&N@UKUw%GmzTChBx1+DoGtln`^7>RHieoM789RL|lH(7kcut}_j~QefdJkM|TZ z!DjiG-mM-;Q*zZ#57smH^I||@EkFFWAs5{?9+Ie<0`JI0k>LLP=y4`cQ3yFdBK*#b zoxoIsRA_w#|0(!&QcId_FeGy@OOaI#=Dmg!N};Al=}Lm|)=JD;DKG~P<{Nb>&>E}a zjeKI2bUvQg!&rm%H|J-%trldFTqIO=WDdNv+Krpt|_L^r#O)Ux%EA#c!{ z%DA*ZUE-KU(zJ-5&Oj6>sNIg;3&`PV67C0x04&pmv@I86rZmg~85s}`O7ido5%YUS zEWc_lLFh?FOs$O1oF_t1y)6*^X^~t|c$XXyQMVkZtB2$=mk}?Sd+X+E^!5^J0V}a% zuFUh^LT?7r=i;*j1>az`Ixs4*`g(s`JRFHoe;?=WW-V#>RF7>=jkeT#bD9HT@=Zib zWm_s%@!Ff*8$ZWYaJ@>jXyFNdA&Fa+M54S&j@c1>#zAB0jOga~)O+pT9Iaqa-4j z#rbz&_$rP@E9L8ZO`ZBM+$tV53g#6E< zF5CK6VjU5=1it~Y6|w0fg_KoljRUu){Nih;Md;Ny#>ZCdvAFwP)>f~*ABqfV8t%6Y z_^*;vK)_EZBSvJo1Uw7KKz4Y+%3#kxF7gj(TSw%fx_us9^3MM`A;0Y+$%_=J3ShqN z1hZWF1FI3Y04NG5`Ftv(3P~frbhT(hs&ES~q0+UPv$qPH@SIZBDH7Zfy~SmMTLm&^>X< z7f0clPMyGFE96)12h$NNLC1!N1NWKn`EZ3bs8&+1hH)8oXpN3i8c47t%3Opt?f52a zxGXTa$VXd1SaJsha*Dkg@Nu+Q?!t*Z-pz<3JF${Fb1)l}$o12+B`{r;^(@>b0G^4w zD7kdb`|IxtzmVWyCku~aSr6z*Shv@rqCfVR8L4J~OKk+`{xqP@(2*Wf$my(7(NP>#O>-z){`Q7;{t&waz);UAAJd1>$|Bz%<_S7#5fh$CEAG7#nl}9ZG$%u3 zDn~}!)X*GKj-b~eZa_Ipk-HJ(fp#U`^|K8fAdUcxkquD;@L$?Zgx35S>ZvJ<4HoE`-~&@O@Iyj!$^PM~a1H8-q^3h5N4tgv!yhLyw&OVazNbe?wUlYDaEU+8LT0Pj_omK;4y%LLYp(`snms59<4Lhim5DvRN zCW#!|$soE&CE7DdtOl0KY)6%p?hhX7S^iT<%`s9T^1dO**p&Yd5x6ig4N=oT-}D~j zvUIB4G2&x6bnQO*)&D(#aoq1%LL4U0cy61>DK^ zpu5tLE#={Wl@GYaH%MCR-Y<+=Qt)QD57XWN_iVqeX)Lgq0GQB5ZS}f~TkGmuHM@l2 ztg*tIg7~S9G*Y#>kh+YNSxJ5+?|L{Fu}mbf<&fnj z`D1Zg_2TuQ+mz|;h@=|5fL_K-(om%#OQ%6EUr@@~1cXK|QKf6_tn?lb7}N>Psv=o% z;+i>3y#w_{{XS(w{>UXmnb3lS>L67&CBA@bVUv8YJD+*e5+;TvB}QovlRHX`>l>kmFJ?RV1SCGA-&`3xTl=Yv5|VIw^J)r4r0ffx*CKjtmcN4Zsau zAf&J9=nnN`i0iOXdQ8y!bKg_Eq&}F5Ih=@j^QkMpOm7LVnI%OIDA)eokvG8Py2N}s zzAkn@vKml_wK&2IxH8rsq=5jpz$g+E59tE5GU1hT|A5*n!j2RTg-8fe`O?!E@iPVG zg@PR=IZ0%&jz&Xv;;5QbohsPzwfzvlK9}|nQfa_fW;@(wBmG)Aj*;|OM^9vje!ij> z#5if{MO|5}o#GH8P63%r5mG|bAg%>3Q24k{mt2bSVE;2`q4`VB5oF9D`q5ZFR6z;)d6B1yITx$;}#idgKES|mgD2(jxp z;~m4_q8laqDUkW66CU%FjtxGbO9`lOYMh|UE~SqppyJS%l&`&It%Q_FbU|hJBU{vf z=6FSr%E%b_Z2>B-UIiV1cZF4;z6BishKx(yJkgJUBoYTpeDABtm+nr;hZpY z-zDDaft}Qs7SU(0BHYWo^q)fVKQ~@;xRU7N13-t^0qadnL;kaF03sIjzt}3w-mUuB zcSN;u$n#x9LG?c!rM-)Kz2@$xNblYjWg>kZ}mjfMXDp zp5i1qM>fyk2Ym^s;4gFI2fbV4qGM!1z>33krck<#Fd}@9s9mKyp&cV$y#ndCgF-vN z@y0Jt+|HY^YQt+mXpW~gz|lmbxqC%%wiL(d*s$mp71tBoO8VE8ROp2w2F?g@2ca^g zY+z@NxFg-Y9{h)_?ikqCCafOUQchgc)&K%tyniV~Io8IhTNqJslH5oo=%;2zCX0dY zkpVxMr-~b=YaVZc;c6cx$nT`uB0>A|=TuB=tfy+SlFCjrsx!R76)Gy=sJKCVh59N~D;NG1zzXCVE>v5@*pV%;5uDDM=z=$XS)({Z6HQ zqSg8K8l-4726Mqy!zW(x+rcw!U44PZU3#Sgd9NA*&_}j0GK)4(iFM0T=dzrpvC5g; zoON1-15{%kV}o0FmYfEokqNMAx>~9DQ6{wP(%4h8(%r`VA3&E)`-pPs3J}dF^+x_> z(;V2YbiQ*C4MFaT=<}^8nbQp^UYM#WBqQUo=6Ahn+E0Uy_xVrNV#0ji#bL(ZyxDtu&>%X}{&ySWm|2fu--~1%M>L zj3&H9LjbCQ_|#mgXBTbpYv!Cm3R)BJdz1yRi?;wsRrSp)t>lir3b(Y5ed|W z3K#h#9R6mI-acwaUSB~r2c#HObuRq*sf?xTcsz0zvclx?nD zt!@}Nf+gKQfDGFzPdANXar&yly{Cqj#bNOg!nj?Ti;{3ykhThb%;HtF7=-ZZH_QNB zT>;BXOG7Wt0(B)_+x@?cZjyw!*@_l+KbgB>gabYaUy4k~mIW*UIk5uAXu?WWN;Ajm zES=b}t!WBOcdx8DSW%s&+2Ej3IxM(Z$+}5I`_w?GHs(1M=wvTdbdw1k)-Io-MFg@E z@q{D8BP)&P-pOZMNr6OBDVgzDDLnze-brG;+BYuM?%0hXC=*ED%(Q^a)^S!4bu1%j zzpqb^JLnNEu1}|q{S}7@s>Z) zGC>ac5iz!h?8YW@{pQ@aWO&<^njhH)n6lkOcnpXctNvjg`8-R%IoKQ!@1Qrp2C#oX zOq!yX=u#x$41oC%3D1}KqBfv2feVuAU?s~7u%6}ce9o;nKR9MQN|AXo88h_m(0rrF zL|&@JLDuFKM@X|`ZDb<-FZ!qMM|c7BR_m2UMC$mbmVhvtaL(JJ5iSZW&=}A(%!&{ID z=OsSeKJY`k*gX$KN*+kzG`{dsV-_M}SHhqn`wjOY9f8_S<}oMGfvN=su(2Ucz`foC zyqU{(_;)>IRxSeL{7Q);gs1srQ85#Pe*`|j$5JasrE*U7aShy@t-E~aKq8Ni%K&Vl z%74eaplV=%40zy|EfOo-vJWBi*)vdNJODsL2x!|>Lus1gH=wkRCqt$?0=*#!t(#1s z#LMgYdpHl%N^?~7MH*wEP8F#SWF3(w>W@?LG~~5uBGf;?%e#;DhkikS-J<|VuxD~; z_$qQht(Np#mD_1d#zIJJtWUhv5TF*Qq{MyCj1#GrjwV~3Xwj<{SDDc5+1D2N6FrjLEoR|QnZ9z%+h%Tl|5-B5I2qbVJDKQqg&nu0| zN(fsxKd*P@tx`zgAmlekuSngb05jPVsp@I3=hJt+?at(@W*O5xN6o$_L)z)G=jmW` zAj+-euJ`)0p$09fvVdknpq>ucXa`*Q0IIyK8YuvgCdB|yglQ?70nj2(HX+=$lI5|j zwa%@I z3Gv8bSZRZ0$)VXG`JPOj4L-TFr`hxdF(PjgBs^SLB3w`;XrO?>n3A_FYvx8%H^bnx zMJ@WObae9oIgCNe*p{knC=_)DD)wS1&c&2z)){Bm#Q1+@EEh}fl=PKiv)5NM;{&1T zU{0r=-X(Xv1PgPXLx)8Na)_A@#4__rIe0bfImbDa0skTHttxk{p<%up6*#j~)&7`s{`Ok?XnatPCiLsR+?4VjD`GII_2 zwxLW*q8j52%}|wIy0m;+QDQa?eZ}L467i)(%;Mp_G=qb+w_&1!5^=Z`jsC7UJL}C6 zYnCNb@kSZr-W`^aca=q$%UGevg`yS=95e0)^Xh8b1`p(1gM)@=NobNFL76bY99p)6 z2SNyD_Ub5UC1KGBYYc5Y4o4p3w1{LFlkq@`TsAEg>ExT%$T&TkazckzI8Ey6lnHk_ z5iT5vO(7y&m?p>cQB7@Y^mEXy41;z&kaXQ9Ot2uK!D5M!00R&cgE{E3@v*4FiWe=5 zHx!nMjD<{5G{vYErl(vyWs=eO71y~bM0g-?GGrf5W}?-fR#ZkoNecxU+G36r4Mlco z5+&iH@WN2!(pX%Pi$z|UTjX77ywOnHc;?ds@gQOo5iyaF2RL|?wZgj-UR6zzUehU{ zB3v+1CALnh&ZJW&1O-g`1N&vxFYU6A@JS2+!B{LvL^LQECL$&*6Aul@gM$eS5Fj)( zAQ~z>G$PWR{7$R`8FNxX>*+V@GmVhTGh61M0*o{=WB%1pfB_8%3J((v%9*X99F?LN z&pX;I6w}2lW?3vto!Y=EW*S`dbn1hz5ni1w; zUSdU7ghZ9#J+Ngd$!nJAns{p}z&HSc00#yL0U1b42mk^H5EBp@5(pqTfY2bJG(=cf zkZ3?EKvX~^76}N52?`nrfVeQhXiyrI28l%jg#xlArQ_gTa-%%bRT5o7M>*J}mW&5! z@RA`aLZm@NN>_^lXvzb@Gv$HU{q`IGvYm)9F9*|E{9z@xI=aLgULyJuaV`;imvlE; zA!9dIVObT{s*sG1wwApPWL}8K(V4M;LFd3=gCznn8jS^`wI~{H6r*f0UMRFEQlVHD z#eGT(no?yUMZxGTciFh5@hejQPSz=VO8N}e&m_-ktZI(NwO4Jhw3KBql%gITNSSNS zRY*rY5Uc~4LLZW@|0u%YjEIsv%c^TA8LRBofsEBCJY-Q@V^T{Rw7W4_JrHXh$dLmv zQB`d;RB5A95gB5bpi^tmiWUzQ779xSPv%9F^|It;Y8HCvm>z0!HDq|$$w9&_xT1S8FA6?fI(Oka?_L%i7gkE31v| zK(JP1RezDaa16)HuA;;`d2Ens67O=}MNJaN4&+I#szi=f9mwE;BwE!6eIVtgeFVS( z@niERCn_q#y5wleWogP#mN!MG%wKI+JMDU(tSg`Osx6bUZhTfau}Hj!h>z}v<)pp4 zT~fQbpL_PiL;Izj*GECSdx!E-KG1!iB5DhB@uR#4tqV3P4l$=^O`4R?+@hWN#z4Eapw|%UEuYW$LRC!$i3@;`v!201idWi0qI2yP+lK+U`^t45?fIYGga2U7_vPV_ug9Ddb9E%f0cc{|% zsObn1!m?}P5;0>zmP7{SIN}>s8)}2B5GWPW5V_Ry>b{T>&`}8iCG$coF{J|GNWN`s zP1omRspH`e)I)BxP3k1yC?wLdT4AsQFOWFW-#NCbyWaBDNCpBoV@SkSaf}gzZVj*I zGE9&<74^6*pU;wx-WLPfHd3H1$DYJvP|#{@v_%zK+!Wheo{G4!n0^YnQQLwwhytVw zC*fO!I^}LYKOYKpQG)3pu!9G2&XLWlLZ2Ev5@xQjV6Pw8x6w&kQ|>fICn=sj#0Ar{PD#H zPaA!PZ$0p|XB2+T_yQJ8DdGlTL<~EO07}alxjsBTN=9>6FH|S!z{JaM?X~(8cLzI| z|8~t1&`%I5Ej;OIq`Fx5XRJp47Zl0&F z1i&e!ci7kZd93w~v|*;97g7f;Wce?-q*EbfTuFArOWb<)6y0ujK`^}SfmCk4;MCjC zK98buYKDtn?wh5{R2D77b&J=)!QP(+W zfvUN>g`g5!*@c^b3au=)nb-z*^MFhCDOW?HWHc!Bv?7Hx*cJ4}`8#jpQfz+oybWc1 z<70=R&rM@U98i?JD$v-k5Zend20$jT#s42ED|tPMLp=mYWdg22nrF785gj%ULU?@x zb+xLqJ@3lIWk^@j6oV{*udZH`qZhSB=m@W@dX)bN=|Va1uAj?`n`u4mm6c9#1p6L$ zyYF3laaAXZ+qz%?+~;E45W{C;YkPUH=(0tUwz6bi?%IJ_E z18RUPC?7nae(2IWX-+Xxp=Rsj6|&|*mQ)7ia1?o~QYzd_Ko$40wlYtV2vm^?DiGLDEIn7MRtxey8OwllZwG3;V1SGSyWJF)khEkM@P_wJg zIgzSNTH%5OXKof`17%)CuUw)(#D+bgsRd`k^&RjkY-dpo=OH#h6205e5_q4~OqOgK zzvBmcVA~`jXh!L9E6`vYw=c8=@{yIciF(u1uTv=u7A$n-2>#xfGMp>uh41 zG922ByC9%Egu9cbRatLPM?632q|jJIM(IQ_k`;%LVIDgawebFHUB8AZWJQe?{BE9# zlL_+JZ_)ryIEs&x`ez`h&J+eQ)et901gbTVpmD_LTnXnp#}&bPwWZU-XTZLE10A%b zwUYdh0o6kUkmPDZbD#h=AOvZDfo-3Zrj)7tlELtceP)#)eL;+)WsmB_#t#VnX~LYU zpsHNsCBth?@V5dsq+XSpD=>6}7napcp4UCFtf&tOk3Ko~t;TyKw zT%c9rv+QgEwrrH}wNU_^g{H_BG@o4}IIcTWka#e};tG6CLj)#_$6Au^w&g*z!-`{@ zRS^x9PUJWlOy~e+98t`?9cqI2P;HDOGVuffC3W9&_)2iF1`E4x%=rR_BstJO1s;F` zBo4YwYWbZTd6gQqvnax-(B}-Rw~+7$Z~HFIN%_6g1~>RFG48&2wkB1fXrA5(VlPNI zmmN0B0s8X8V<@k&lO_piAm3vUBR=ii$fV#TKov1YDVwtKGWV7bp&k9))~j>#07^{n+8#tZ}X*%TY)Qn z1|FE8g3vpyir`A4P1R-wjxOi|QHnYCn?Ebx<+oDUisF1VKz^60iVgGA(KabcPI^dg zTxhAH*KwwSB9C1_5%}tEuS;9~i&9kqe!Z=!@8cOekF7dAMy_AUj)fFT`H?J5^D2J- z9I-sO68F*a=5s(d@G#G4wxshw07twKa%J%!a-uX`D!o{GDaa|yQBC>&t*<#_8FC$Zs+?L}-N!O0&z_9D|MOS_R1b-Lfyi_#VK;^XUGly=6;WBhSr z2{EufNE%!WfNE3!??8~0oT8NiGu~GWO3_X}^km;mCahL z!bFl(MQb7(RtpdQ=9}bZ_a9XsKU3IEE`e7MJ36ij^%{mjb=&Ne)n2zGr+EBknq>AE z?}#0ioF2Lvys`NsKhMRVjA56n)*+Y*6Yrv-XUgTW?oP|yoKPBh6uNBrZ>z=e*8)5j zZM*>xj?p)jAgz**4=T^9<70>6To!^Tqh)nCfSsh>|9^uZPXQz@SHijhXiZLOt{;vh z^i;#E+(K=lC2|_^sxkVg$Fr0z!gv9KP4Plkw~L*FJ+ZpL4o2)KbmNa?cwtA1<{g-^J;aKLL>xNhO- z(~A88+!7X3d-nafgo$5J2?hms8%EtW4wAE#(H%cE^P-XJ3e(f>W0_{9W`owzr1U<_ z+nInX)AVSeRk^3|>D-36*%A`Ek>4cReW@AObZ^FO3ANTiHnuZ(BdEM1i|nhn~N2ltvRa1GP& ztG)}f_mrMGi6$nNc~fpo|j zCMXy)$P66(jp&=yI{m!;SS?}`5s{ZN7DBdLkY}@^88z?GJ-e{7q&DmV3;DZJ!Xu^a zj+?3Cw49NvZIuGj84d)Gst$2#tJNLT*;u`SKnp6>YSe5NL_noYoKOMhS^0379%*|D zFK8e0Kg$fq6PYr(Qn5NT3i8q8V7g2-PB&>AM5)f0P~j9+A-Xkd)L#=Ya7Li{k-XMWKuMO_=BH5r|~QI=~NB7487y693@D<9g7h;>Im22UN z%Fj{SVSrzk+Elx#QxHnr4a?eVCnA#NIZfIrUsiq&D|+W zKCX(%u?wQ$p%jaJQk|}XO`S?`_+YSfDP=Y(l^;c9VXBwtCX_D?NZsL$OA-j~nA&gn zkXA)Zxv~`LmGkl`qwej;1Wt$C2N;Dyu<4cruNH>Y@$^`Aj5Dnf(Fx7+j@1a%10F=g zhGuXaq~ng0Xz(jp-Fg{*(p8seML1ikAY|{I&!?ZNC^@%IR`IAM#D(Gmo8q!^q(7!x zr%wElQesPhCTUp&O;b2fG`NsfySJQeS_IMX?^DN^^i3Gv1@n5R*-%iEIlL&4w@*uR zbjqwrxvP*FXa1sm|J>07wXmAXg7Wb}8()zl%rm0QE0mPHBh%}~dPbCuDd4Tf$*>xl z0#{7h>}qtrVWFaf#i7j&)!Z8Kkcql6<2mg_IP`bPNEgulcLxgAC>0|Q}RL@h1)f| z+MML{K!snZ3P;bl%87+}roKp2rhS2}Z^)kPjNOO~4IRDbZGp@^rwZSD?1WbwVGKmoxDo^F(Luy-DYC>P?z%Q<$Xx^YsZAX%N4-P5TzB&Z=z!XlaPYNtCYYG)VvH^>jc%+<1|-p70o*}fQCQ>4}_H3tgt2alp=Z@ z#dA(5oD>M)Y(=G*!u z!8*)oK@j59u!^`I@+eo;W}xI`A<`L!*Q=En#3wRfk4Eu6B((_#Ea@(}pE0Q7wEigo zHVmSqy+q=?<^~0c#o)?7^!u(;|GSmG`#=;t_?p1XCh8hPJfma(^x=M{iP-k3h#!*F zq%3FcQ>)WO896UOMM?V!Gqg&L!u)FEqN$(;9AStrJg4}PH-BSM*5V%9bOQ{(Y#jW` za=YNc8Mu-DXxB-8+{}q{6V^u;<`nbE`F=(~Y^zzc_k?%ZNxYWOzQF1W2vka=yfaOf zqYmcq77IqXNcTmzhp$Vv9cuDgk6n5PgaBmC_(%*K0US4oQ#r_a=|`1a8?u4>C8q~$ zrwLHuc&0NS)}tCMv#I(SeASSy)KAIhwo8&hrB4LogDR(HWVq*39ICTj|FpcN+8 zEQHHVYgUAkZ>IE_X-!e`Thl^pO;G6|3#yV@^R_v{%-fh+-M>E=@hkwc^ccxS#JF}> zz)}GR6pEeo#|ScLUpUqnCKrY^7;M5OeaiRzOA>hhx)vDjj zA7dqM3WSWU212o5-+o_!VLTF@>oRUI%tCoDY=S)UMdf1O7}6&pOT#uT?mZaAEn7l* z6whg9n!V*Jt9Ex~GqT1C;%$@!k$@Q557q*y?FH$yDd7*>BU?p&Ldj(6uMzrMMsF#C z#m6_&o*)W*sjhI;;9}QWfIcFQ0}Q2TTspn%W7G>@2ZPrbAvgbXUQ+4vsoqK)t6b&_ z2>+e^{{Fkt$X{Mp6;hRd7L&ds@l@07aI)~Z2pq{3`9od#6;cKN9_l2^Bl8vsK#f+P zfp^Hn;2de~EvbeyW0Mku+oVDd&_G4L!)zxNhKXMx#u)=c;GVRcTYXRm7~p|F=;+{8 zFo=vH31)!xfwOF?2M zn>_dah6dPCG19*+h%HfYa8{4M>iz8W5?--K;(@p@NP2$=4y= zausHo8+67%BmkVVjD4U3GESAlg^~wx`&kM^YE~Z39G3Jd)X*v>KBqVe2o*IlN(jJK zDM*H3`@J$NE32}Q77T%K_5~KQ051vRr}X`UtXSw}$Q^U)00?TX3iy371dOu|rev-$i48E$>Hk&>Df%MkirZmgQg|P|R=-HTfGn)xsEWk!+PD#!eSu=T9s6Z8Ocjoy?YC+_)`~nrZA0~#r&x7n zX+M@EuP35X@(`ft*!f;#eqw!^F~tOdmO#t7FSKE*rEUjs4U(0g{h(_qgTtEXw~mC3 z^~`n@yNy`@3+-V{*K4%cvVW+vf-!+DkeH&Q1<&RCC~~y{b%S(EwE&}zi{8Gt5&haK zl$6$(os_Oqj$zI#m0E-?4f_+Ok8JWN;Q~*t#{jnT!~(z9!_P#a0Z@<)@G#F*Qm9-4mlr98P*( zl2E>##2h%cFqULSyn87>@Z_0B40lxm2fS8-#8*g1HNQ2+=hv=G9O2e=G>~Wj+Q{0$ zmFQwLe^muGX@tUxuWJthceZ;0FNO2*TEQ@wtCau9hX@lraX&hh8sXa7pOfrU6%AH@7d!t$`duR5vY$^IRah#O5pz*X&-D98FEFRRo| z$abien9nXX0F#gT{s6~w!xt9W!IO`1SRs_|Z=<;3xGO5ew5-&TBwv2RZzyGj6O&5s zh;j!Xbc?z#FeFe{!_eZFEmtUon4sK;zB8E`#_!7EuJM;GmN4hA4l~5efqUBaRwBjD%@ImU!${)Z+Op<;@GltI?gJU!(k=M& zH3GY;uBXF9630$#>h!HB{-lq4CAcU;aJNfiS(nO<^y)N0NDRleP?D(53})He8Fd?I zFd+-h&hpJ*bgTu4P)e2INc-|<0^|k(8mMp=ClFD?VgTj?Mn*-VnyT^l@3#%&IqVb% z`h_HSDK5fwe+Y}Js;WwFWC48vcLC=5&SX8?g~m~6t3rg(+_}ORu|_iFEp(j-R^d?% z7TVB-SbARenk9@KEg4+E5W#^53J(vtgcWvIU@ebl2~F2Qzf}n{g+NiAuAS(1sBRJxyTx4;w#kg79727A3ChIZvL{q$qe#mr(MgTJB#>gjHaucE?gmP|;_ zx;ozBb*^-ChwnUh2OT10&ACH}ht8Z`WwDe-mmy_8=Be!Zn?%H@iod%f<0+MXghq8l zOz%tIeC+IJ)24m+D+WoOCX2a*f{L^!49kkdBGD^8IeGZkUiL{cvnfK(x1CN!Wq;}t z?!r1#K`09b7&s^q7_77ydZm&u1y*8aPT}RXm?V=cyhxU^#3i*RlX#(pifBm|3o81= z?W-h0yIN*6w>n=#L!u}=BoZv8DvHIWkCLT?Ef(w1LUE<1c&kvH^2L%Y%7PZjB`Bd} z>du5D%`D7R%@;Q|R%tsQ#zP`O&g+R-jg7+~M9}?eZX9GR77B~S1~ZGQkvdB9#R*Fk z6A303OTu$+vZzJjB&*^#^cnk7^CLwf;+xalMA(l(Xn!@tY(Akbx*stb&jUkdk*WFo zkjW!}zy^19S4EmL$6^4NXU1WoKQB+foj7+`Km&{eY0`;_G(Rez{?U*&C)hVXE(#nI z?WV-JDUm91-tbT-L}lTL_uWT&0K%c+!S4O;$e8I_PjfxZ^)$>(vsuFB(SkvO^fF-% zLV&<8<9O4IBWr}}u)6jM&1OQgPe*7r`-*c+jno9K!OWwNWNY#061lb7!PnsD^$rpZcJLhx-t(e6vKFzF*sMI?SBD+K;@Hiz&J5Gq z!NK|B7B{xIy2at*$_5oAQ(cJv+?hv{N0W$TO*WrN`OFi}WaZ^JCxCEBG{ArZ3MdeQ zo_rpbHx7TOoQ9O2%!uhtPSRxN5xE~H>LjSk;z<^T%85)8iMqFppLOLH!P}7nB>)R1qZ(a0AP|yxLp#fl75);+m1R+QQ5LI`I8Av;C=>aDvJi z3^|m>(WY@+Gx7NGfBz@Co#=&v)8qr_uAfnQh~^Hy2829QnB# zhgmDpvQAl;#^D?^4(k+>RMlRmt#MF~=tpFRr;3QM8=Cx3HIA|xYxdcV4lU7Vb(SYz zh=(2#i1k7|Sf#Z{mX+1&Y^YW|q-KU$`DPhMP3?@?u5aHPN8MXby{}ePwJ$n?I!Kh& zqT<;;=NlPE9(IPd@FokqI0(T&v;M-@KiGHhDDN0--3UZ_M-hkIDU%`$DF+Zm?jz#J z2_byAGO$cwfJpGgn*JVhtzTR$m~6iE8pj8_aHYW;gNdWRgYO`j&HH9%<{|b%%<_zu zMay>(+iaT`@eaQDb};=$KzQH{iG8>u`8h2`0+0^`1OO92!GXb$FfY3hJC=pYFWTgK>jJ2hRm~em)w|Pjae3 zv+R-f=iXi{1kSOz_oV;FWho{lBy$uU`pCx)wVqDQCcc7g{H_%wmz@sr*boF};GBgE z#y1)b8ZBarO$=~N%>S&aMn#cw%x*~nO@{uV<<0qug$Iu@9kT&peFpY@Q}L30YhOp~ z0UpZ1NoxqB_*F7_+1BRJ`sbHOX8{^h$iyffVm*Q(zB}BNrX|JE#MYd|d zH8#gJM9Wc;0;9srV`=w!g$IA-XR)8W}Z8M@_gTVrPq>!gwq2jiI&coHpO3%!?C2-AhgxM$8rc+HAW!yFWLy4wq( zaRJF&;Ak;P*Tk;)G}Lq;`L%a_0~l13!zscuEq{D?ZuO$Rjr7)4a7Af zl#y*t?Lr*{VJly~%sD$2MTswSZU#B@fSJSlWtcg8me4G#NPR*KB@Qb-}FBP3^-4gavV0cpItcCO&i7>Uzv^oel*q9=f zIlq`|vRm$UaSXUWRpy{C%Ha3Rx?2ra3^smDzh2cOiNHw+FsS$K>IiH6b%X1w6)JdC z*Wifk;!kES@_cl;^Jx|Fj`uRz#hRy5pPa>qEgHs*LMe zLm-wQ%sbv@z2k){v?!iOrK+gyNB%LVBTpx+EmW**p-*94$mCxG!f_ zCdpL};irL_XOi#HJ0C~H%p1v$61m7x+PHn=c2kDV_?C(=r;MzS z;jTJIU4tJ%>~f$?$Emy}%r+gO<(0$Ov(IetC}T|_L9F0p{C*cma(!A2_NSxcalBAg z{htsS%)va^jC{OWPP-g!)V709zb$yM^6=nRvT74@+b-ue9(VWj=Qbpjr=&J_jb!xD zKbSOV$Z|Pp!=}$%rPakZgH3x2+bGvv-!_0|E~m%ZAp^Mh2J4QOT~2Dk!sc?wS%NN7 zK;X7c*&`C0X+CB-9u^nOM6s?$N@tQNX|IJYC+^|@D&&>3g=I>&NJYO|Yu=f?d%8Ru^5 zbUz`22#s%rt?nUOx2KB(61R5qL0adZI%33d@N^W`ZL@w_%FRFan=YGF(-ZXPy^O?0dn` zni1YpDr=;_k@e$ZrJrh2!vdYc3GtFhUpsRg+~GZF73AIhSpVsIDCwBtt|6FnYs@&iq*e|Z&x zajARAA@ZfLR;jOGSAE_#bSg9WN1LQQ$_+#Xvco^*7=cBWA1(y5lJFAJ_dT{m#!i+} z+My?CnUr{tA;hk%>UhZ0E6N%OMFOlE+OYUGn+Ysn$jJ+ud1uZ(T^>?VeQ2P6v6z32 ziCsBq*VWj7g9Abi0T5q!ZPo#YwimsD0s8?++@M?^xYgGAAJ|Pp5QQKIDJUjd50rP< zI)){%#zKBL^*CKlnK8*Ujy})c*JJ$-ZoEY_0CEugLG0M4jH0?H6D71F$hj_L8J;Wv z1~J>gdQ^{9av$fvy&sp~_G*7j-TOHG)WG2ebk_~BVS4k<~}q;>{vt;bo^**UCr zv5>Vko(D3C)nd;69BH(ddsDESVXkslXEj%UyB1oPjn~_2Y{D7PE}6NbiGBw~xW`$edmIh% z@_((Zq=Ff4Lw(miRPRqsV_blDbW7nq4#y5X}J$V@0c6;Z$J#$5k*n~b2qfQl|Y&f2O6 zLW6mS6ES%8!G;4Ma(4Dn+D6zqelPSZxjOrnq~+sKg;o@tB`LGkup9Kytl2`heI;CJ z-|M>O<4~g<>h#sp?1@tqnaWe}oy_=(bm!xQz#upmVyOKlLLbLKgSuZW%6CI-qE!-y z`Btj*aZ2#Ic!b>F)W><6^?Ge_AQ5g3O_>H)Pe(?+2c*IeEx@N0x%x{zB>*ADMN_1~ zYY%aheVj{(dr2Q!n!0Dr-G^3tDhl{<`f~g@yYRe=s+Yt^?F}mdQv1m?3Wt_lx_CK< zTCd2kempk(fEILebC@$mK9JEGODCXa20v5|JUIS3k{<_oISe8CcPIHJ^JT;L<*>rG z!$d1Tj+3f>= zpGo`}QROF;D((*kwFRg;!x85%=qQ;B@Ll&qMNE>UG8*XEMr$Tz_)he%C1ejRoNUX0 z1%-USRQbDV@5QUwRSzIJR!=CJ(boUqzy;Bb@dQLi1|0RSk-jTtK^ZHu)#)K*#r2L#)- z5w&T65ma^V)<|h&^=%(W(rHo;?27}A9_J|PXU0f&RF{X=tT-TyQ7WKEPBeW}HnZTn zx_Hb0nM9A!pNe`nLl6u(7aYKtOekbHy1TEJT>h-TO}lwdJ-WQkx2*hhqD7m1F=e8c z*$Gq$Ds0SjI%cHgM_S;6t?%TV6#B@+QgTiR2R_hr z+_l>LW}?+zPp5h5$)vAd^zzMI1!zqeZnSX|2)@)(G5S^ruZfjW9ot05_Z}JJnsX?J zY$Eyo0bLqO{vEAhXXo=L8^mBb5JQ-H7;s=tF0XdX2y_E{@nv#^H7(vBd%Bw1NK?1L z=(YbX*m?(@!FwoX4_zP|vPhr2MCf?F-7RRWvIL&R&z*Xw&PTqaueUgK%MHn z8g-U7mY#%DJ)*Ty&~2tNaHtPqwnd({G6bQCZH{HC;xv?msQD~p6xv{z8=h6Bo-Ghk z@JO~8gCo&;A-9MC+I99;OYp?@u7fTSVRwcDoVC`wN9tt+NLCGw+LvJ7Yvk+$F&_B+ z-23?J5U1~CLX?nNuCcN4=2KZ%N=D&P+IXW%r(R&-0<5e+jzLc1%UaILT`Fm~@=Psz z?3tK|gEx6b6>?MAA&{s=tVi7FWVTu!8o9)c*77DdD%tr&BNoMtc4;yT3zg0BU>>zv z4l*$55_@WqamM9u777bz;lz&uiYF!n-%ii~k@aFbXfF!^7%5F=uuRKL_@Sat9;!B< zk5>6{b)>W^v2k!ge!5<7Sy5mOH;|2HyuP^FKkFF3XY{JbQh%qYz5O*h|EgV zS@ezPC$yCEUyW36(}X0sS&O%B17I~uy9S2btIsg%ykwq2yJWY@cT@8mn6p~qohVUo zzv4U3A2zORbq179kO)2i>W&vm)^xROeroAAt@}z~^eb_i4z@U)oVjb-8+4q7$LhXd zl_@dd4Z%5w+Uo7c;evxy5PA^92+2ZRT(^qEZI%2|KWTdiN`+tT=lN4WuO{fc%gl+w zCzC)cF?Zvzl|wiJr1LV^edpEhrsc79&Mhs9petMq9)$X?%V;SPX%=A!Y5lus?3cpidi!|phqQXhcR*yza8fAu0Sm&A59l_xw- zhKzZ$H`sLB0dU;K#36CfjTy_hg7Fvi3!(aPA1HwV&TO6eD(`mH* zs(r4qNyAJ^o_)1UFYNpS^-JgKK_gnKNV28^2f8|QtO)9)MH0DCEl~CuF5#vyH3MD~ zZr1$WSTvbRVioGUG3w;#OE|&By#5lfcxyC@?-`HSbQ1m?C^eT3#00Ek>|j_ssfZDhash!wdMBCgu*}YJ6(u}%`Ie!NcW zleHP`2S8+Lt`x`Oe<+L-;Ah1w)RxKLfUmiX+1-BwFr$2Ox$pLo-F^GigPS<|TNFo1 zRd(ppBgNxIz#Y3KfUh#R?-pJpu#yWTi3wa&ym;4*VGso$Uji4zJf%O+J>tU>xCsoY z5U*XyZwWv~SuRCZ8!ZXWTc4$wES@Xb3`ZmR6*9LUCu_&Ot8LT@+o|j-dI=~H}EInB%mg+jrbmN{K-sFdxUxJ zWlFIN2*(%=7^UCh9uC@a(ExKxy7cZ48!?|U8^}M^s>t_#k!B|%-`>Zo9JDyp0MIvB ze}1r8-+Cx=N!U)v$8hqiFB7*~8Ar8X`zXUf#zO@-)Tf`u^2hY@U3M^5-;aS|{&dBy<4ww`|P>;V@y zvyN<2fUBjStvbzByc3ZF7^gr5$1J-iWvKuPp+` z1DQ9$$U3PIRQ4Kd7evw%#f3=h`9Db{CWRvGR2k;rkx zrps~ShscccnY1^A@RyQ|AYWIMJ|zN21rw_sh?hDCnIj%b8;3jqk)`Ioh7_zfdpnQ- zIoS#$tm7ZYi&AKBIFhO!vU4C&+WY9pkTVrvq|0}tpC5Nt4FqJ?+m?6AT1gSI;@yad zEsVSCE9rI*0aAArW+n`MHWuCXYLshkaqbNUHaoG7+m0>=Q#3!tpO=bR!slYbW6Ms zm$mcYre$cH3`bGMlgS910VG$gT(6{4%v|-|xv(icakSb4C!AOEU(tQ}XLSa%n!kE* zP;KfaahT2^?YxSI@08)oUITzc0Xv(=QZ#Z&W)Qo}1B;;a5s72i@puRUhyH6_?7gMW;0xw6{@>eP zJ2r|XE|h18o@MalX6%iPu1;y|Y|GsJP6JLi7$oYnNnf!x7!8Y(DH@7>sEIVGu>n_Y z(a}5PXiWkES#gr)k*XEk;6rX7pH=gKBB{>r?MxOkvdy&nQB-vMMEciI52p7t=*$up zsxQB{xfT2*wfL#OFe%*R_twfgj~p6fs#gVH-WSx5ny=J|sbtM~{a{n{9;bnW4nKFq z(R&>~vKkXd7j;R?m$_&!W<2r_b%A%#uxFBSI!yR6^9pe>#c=80Q96%GXROrMR+j5BY-}+PLNQ!CtXkIBhU5L*%#V}LtYGv zQx`>}PqL2NB?y{xz0nss1xF)0h$bN$bluip9bAD6TNbGN$j2EI7aJQEbv1YbiZY8z z@9|^Gj~EJB$B^xX$m1_5w`^_5l>QH1k_yL;n$=k|?Hq4MqljIRMA;N}nS6UHKzwK9 z!OuVgQTI&Owqk57BrDP;`J=yO|5>)#s+>K#5xGT2WcM%ok0{?hsI&cjJii&PPNz1uk z)aO1mOKvvzpjn#aBG7)vr4+e8}ih zm^_IBz)V6QhgziQr_QhzIUJ6_EKcY>47=jm&kVenwd3cR9YaPivZ)*5hIVpkP&7T6qyiB zY}v`}P7fQfdj!xHAptH!po)JC&?03&FLzz2quAK5+8|<#lA97D+EhIioYiR~MlTu@ z8E99@;YPvO_jx(`tTjR0-5|p&J|uo8f)8qFU#(TnqsXlB)%J43#jV{DLnHP%Jg~7H zbd$3aF0pUph8e6#-bo1(Xql{7rhqRGCMTYhBBC8_;88f}zD;JyhC7ou;Q3UdjfH>t zQ6REefOJclOxr@(!02T=Cm73CNiO!$BDtz&Z$vIavS>(S!w4_a{VrwoND_{pL_Uqe( z+7}KryBD*|C_TvsgyaA^j(`yzUI= zKKZZKWV9IKn|zT!UdaDvU-Cr%?fdQfhq0?&k6(jXU4GY5H^zu@*ZcX-x%SR~?zh{l zW|zAdyc_w_QnxcIliF2U!)vdYOOvdu8(&ga>*O+O*(#Bhph632&sKAtIg@<1PmE{Pg8t&n#!N zVjO*egVr^3=E$54IU@$W9M144Z^fi$Qj@Z})Os_kiQ!+x<`uKCq0aKpp(gy^*HmbSU4dStzjB03wzyPJiphAM281|7EH*91@uH(h%@n^Su zm?7SU&S(?8v>XBhl&0|s4q-Dk^E2cB=4~YAS-Ik^c=4FO#Wc_V#n=2%PFX;KyZ9lf zc$&8IF52a%c-}_8JQ;oZEk08=9nK2$WoEd^d%Llr#W0T;^C_D#v5gI5V{;GUfjn<) zZUb+>xtp8c%@pj0-W$@zpfO@%n649pE=GS|&#U3}V#+^cW>RXf`=rJ0LJ`9sIzdj6 zzK%@9QX(%chLq5+@G4hfNmb0TjB)HbVsx0sl{2xEh`|agEfJ$iXd`ijL@HJ!gN=|J z2`wZZsS%Qhap%jUjSLArb2mq9_k}VvHlfL4VBmdeo?a#D)!gZ%Vz};Pt+Jx0i0EkO zt~NjL(obADPpX%M>ps38SWBRNE;}#Gp#)lNf)Aj1k2pC=h_u z8z>ls5gRd%sEDo@R#N2`s=)!RZ>PGgrnZ{eN=o$LR65W`dgU5m&>AX1ff(dhq*A+- z+C6LcZdtp-mi1V=8arq@ILLgSB61#V zq%#UP+lgUjbaOGq=u$Rzl$scHF?xDAki|4ai@_l=UROs|*K1YRiQ%%R#AZw-mWpv4gX|NI69h(1FV zTy~$f-)S-GW~!G^)#@K1sb6T=E96^9jFU)xJ=javU5uWQNn)H(0taq`J7f)rruY+p z0D-{4gQ1}~7zhIbVK5*J2!p|3AQ%h=1OkCzAQTFVgoJ6JuM5w^5t(Xw0+x+6o=n4H zt?>{giHLa8J`F_`&UgrT#$5U|q_)Y|?ir7h0l@Sb4?)m)qCVLkkD(DP@XD-1A9FE; z{iKfZpkX+4akrlYeA#EjVCC?7%t#@I-flVoRn{%sK{GC_FI+rvC6_JfOGh~bP~RCaX@+Th>iu-Zo4xCr@eR(^|d=Z)9}T^T`jY&3E3Dk znYFmScr;+lx-EOR0quosy~OPbl;{_a;AT++lnC{z(d75g(l*fsvEOUVm;uD?M6kj| zsi2&PxTRtZ8Srg?L9FvP%5jR2oyx~HSUml+-Ag~8Sm^X+s)@0jNee)QIZFjb@7nV$ zFrdH+7$aD53tkJ%oSUdX1}2{V-#eh@0gKDlU}y;zK5A5OV&c&l8)1T^6pP2-ShozL z5(xY=4HdpHc|JfAH3K1HX;TGkp~bTZu&fM`072d!MMu*c2%c2_6&K5)h<3*^ruXn< zIYx>P4_Df5S5r=@?wX4%>5pWJOQhvFt)H3^v12M*!!l?x@g2D@9F_7R^utpfI-}?xh|eTyp(}D;KaAo#vG4NAj zA0nyUACFRU7d++PO^)vZi6-`nuXrY{GcqC^`xQUOg3Y%4j#+xnCO|{Gz<{4dq2zaB z*yFYU=<<|-GhS10XSnOBKH=yFG;t|>Kb^u8T9lc&08godUdgqZxDc{oaxx0Uv!-Po z5Nwzb_k?kbj=0zZXHy^pqSkEd(4R9Z1bV}Gl?M+=;}r6W=;5tc~PnCyC4gdlG^{$x}v*vStTtr*I+A(rMA+1G1wz#7+k-9`LCQ-#ai#7q&eI zK_&1AeL}M8gw8d8U=Q^Z?WZY{T7U?Kr#9l)dMs#uDuehu(6TT|MI-Ve$x zW9?Eu7nx5Zz%yhy_&(D^h6(VjX4Je~ng&sT=Pz6|MwZ*tn1{bjW;4bn&d30db-47> zNr-^FK;!T4tQ%5#F>K4c!xE_fp`Lm2020KyDj&)V_Gky=x2E4o9>`$|xA1=S1nlFW zN>)O4!QO*-x2Er#H2STzlHun~>*v!NQGJJM zFwm!WP0*)J!wCf91l_S*wY=a|ExY=T@aq*oh-3dXw6=oCraBDw?n`zeouMvctnY{= zhSu9!-}$i@8g5+QDWB6|t?K|#6FYwNO-`3Z@Z&6oXx6g9wFe_lduKTYHKzvTyEc+q(8bh-Ev0Iy%0cCCA z{^o)DEC9z8ZRXz5(Fs`=P!aXXPtE{AyNa+L01-Zfw z5gGh1lQJW^&4DozDB$JI)>X%%d#C)Ax_1V{=dgQ+R(^{5;2wEqr_iIZs|YNU?ZT$i z22Xd$@B1X^ZHj*BXj{=DENX+I7w|b^qw%_AJ2>`?ptySHYJs+hxh;ypD`27zF~HX! zVBdctNG0Nq<--wmXJJ~(irBAF`2F3gE zN%GEx@o`2@H%b-wiegNScv$!XqF?8VA%G6$2rRqz?nqRXyed(FRjTv%pA8vjdFPOY z72XlNF;{8x@nGq!H(<88t?08G+bw6-N28mU)I#}Nc;|zQJ29fyxc?0rqVj1eGt>+#Vrp{oZ)c1!zrQGZ49yLpY}f%D7OW`=FLZn(j5g%;21vMaimXSwV!x|l{QC)A}@*o z^z{~v|8QOwZvISLSayf#)6yb0X)!FBX9>1a(5k>~5j_{YKtzZIOmgAMIv#lAYdu6M zJ9okcLXV*}S#n;XdVz^pt%Rci0N%kDbig}~keBWYP!k2-2{7=^pWBOtGsAetJQPss zq2QfvBVB`cs{hn3i!dTssBNSP3jr7ak|2;ddLiu!F-$?w;B|QEr zQ^@>sASMbu_HfCzJ4B2;3j__W;&ul(y()ye-66+cy0<&M>~-+lolSzfBa9YN>2P;g z={#lw_`&C5cP=FJUAb1w!hR9b*duyREqS`syjTfx^vM(2TN+Y^OnWYa8HPA zI+jIOJCP$Tp6;O16ci{1GiI~0xkL7XnEDXCGSOA~cDGTHYtWl({OSG$l6rbDLcO_D zG;D=V0a%y|1m}+7ICrXh+Mt|Y?nH40ixRH%V>_!bK`AK0j=tCMl4udNLX!b)nOrwF zc3PtwU7R9~q^A%pkyDX7$*>ta!N?s`b*H8s$p=+crsDj_9kh-td^U$hA0R1kV=Im1 zP8N@n@=2aPVkc>Nha>V$NA|{X=dKM|rR_T+eW##X*au&(fH-9gp<=A0#~l$q?lAGt z$fhjrtd4_5*LX@AOjY{+5=W3mnU)sN0Nznr*~dO1dTqiTk{HIKaHnV)6A@Lo)A=B> zg*&|3tMI~|O<=eqNXt=ahC8f!p7n<=q}<=mWd(QZ>Zk?%RrY08m?GYeoUtP`-VP4( zb_{t!p*rby@+4(oox~8<8GHncb(R2EfKdF~G787x&hzi9D4P9L5+cUd0 zzyPFq_fFd>lPZP)a4abj@6GNwHP6nmO@@^2dhXc!-dIPHhIW8{IOKo0eC(I`vsK^Pe9jXIvpFaS?%v5J=-l(e}BKE=tU54BAOH-sC|{ z5w3-eh8D=371ED1L%QI zu6BOuD0=0ZP~gFu0btQz1d0z1byDLfxHPDp)HvJ#4ixw?aM@AwvY~da@zBANG40$q zNjxpiDF?EkUkBD;)0bY+4u3(%nDq?Zfp%`Szc{JqRX{8|+P?z>+H8=S zq;(TF{5M9}Rjo*cFdNdy4J|u~$;&V#V8$^5jP4OPXg8uR3;iJCZ`yrf$KKf*m_6XE3)S`a#cW zu79J2hL{I)}!{I%+5sgGdjVLY*;K-EU$pjE}akV=a(M}wt+ zx$vX_2uGcWD0cc5;PAQRav6U}uwdR+wqZxh-tqui-9u}N_CBLg3ch{MDr3{ zMqxHGnzj`}Jtd&5nchesO;KuyaF9t=j<$Je4eY$dXlJv|ft__iLMP+NlOmdM1qLJh0rpK~utPZ8$d*eH zO;PI?KAqhZEsS?Bq+>eBVdu!^RfKqdw|JyF*!gZe*eMaIVI#ktSjWhDvqeW_f+{D3 zov*|M<+$^~3|*?@RF3nYK(-mzZ+W@EO%Vbilgjzk$U;gAi*oEGk|jY?s7=p{OZ2G#ipr%9nrLTzi>^pgMH34lRO#YX5;;0C;Pqc zER#0Wy0n)mh?Cg`>UthRgn1r8Vq(s6G64=2*^l8{d&K%A8dPu0PB<_Y3 z@l#yW#{2{6Xpg2*3X7v*IDdPCrDD|--pG4lTz74 zcAP+V(RIzvo+zVisgG_Df|yh<}3^c-i8+)l0%k|b0vu2PBPI^Ko= zA82A%!dUTK_GJA8%cADKoc}=-fMx+kZI=A+Yz|e|d9~?a3cv0)D zccbZ4+M2}GyFm*Yzu;~}NmBbVS^J&f^hfNlA^S_UPG|tX;8|Zf&hb>Ov)Em@X?wU( zVu{uft~91Gk|u|qXu)+T>#QANSy-T~P_mSD_yu~8u9fH)>wH9qV)G&=m0e)!QcEQ! zqTZ~U+;vq9Og@7nRnTh*UGvdTp`!1v4$X=ES0{{2c?1P1Zm6RqT~7e(R7q?JSVtfz zU6DKj>--v6r&wjKIlxcUQObdfp3PGyT-y{m;+%wjp20e)vLDa0a7-$3E6iv+=`zeEmJ;U(7{yy*pWdl-+KK}bjHJZraf4wsU96h#)Wz=cfb0UfAO^|goOBCc zV#xb$OJ+0SRZX`r&slONz1a5Z2qunY?OpEGxpyWp`08+alor!*hACei>_5C(5kZ_- z{fHa}nZ!R0k=2R@;u+etl%ZnP$QQ%(ga(p={94Msli&W15Yr?n(lBqBa$IDn1QZHJ z-!21fl7bRIT0}JlMoVEpt;$Hp>d(4yd)Jm@HxjXwX0L; zfEe7>;q=iFfq354S)Jk4Q77jMs<$KO)_W2SC~%Maf~V4wXX7X!1HaSLg41OLTSE5u!<*hT<2!JTtx+&qR z&Q5484WseER$>|fGn=L1*rm0AE!4N=liV6?ah`9)BU21Rv4CGi*MVri>`<}fa70TO zd>y{VfG4Ly>iN^LSjJ0rK0WK8O1*dPiE(gJ64^J?x~0M0;K?pv3qD5FHN^-hEX1?j ziL5@jW9^{x4f&gLklI3Q(qT*uzNHf#+q;zBCq7-i)SDUPncu4ZjCl|8{+X=$tH8qH|VG zCMi0HAe6`^jn1*pH#x(UMm4CnE9~K;bN;3fu{nk*Z;&^%4B=h=B&A2eI98;iq;mox z_k|p$yM0{ASsyJhDRaROr%|~FtP6*Gk;XEna}en$)Yu>Yb8kIml`m-^(+2Ehdpd`( zQ?U6zoUwo*&4*_=%_F*AWZ7ClGw^!~k~PEtH$ce08P6iD6{a~YZt9%x5UO+9ql!^= zj*1aPm(@9bd*W1MnkD-fX(kdhHCOug*RXfM38laCBOW0Ke|JS8olrX)_&4Sgahlp` zIaB1D+t7EBC{Y>qpgmwU)8^=$@9G|+=n(3N6J?io`sGql9Kv&;_=sZ>nOQ%=^FfXf z1l<6tf+!FiP!ALvpbr6st)LHWx=cIAbIX(|GN#!E>-q@+`vB?y0Rh*N>$ToG>$S`( zW^e74TIW(L#NE5Koaj}@D!LSxOYOAeGT=b5*q&G+Dng8VEOC!@tYf{US?&~YN_(0C zBrr!6f!z>94pHO=A*j_O7TK0T5d{-mUByS2xK(u1OGU4ag&gs7vCu}20RpxIrS#pj*b?B?n@#2L?M3}JOGD~)*yTP8&axlQIX zkU6OvbdmOuMAMMN5ZgUW8~ImqDH)>Z^PIPcEmIxh7efA`8!ClV zGHaU>)($Z&u_0a^5)zvxbu2`KL6li>>R@ipXlw^lX5$bX|EyyZk*b{mBtY@Ge6IWb zE)xG2i`9rj^yT8qRj#|1%U8SV^y&JQE_rKrPOY}umRQ>wt9y0Jt+KjBR%_AKTC`fL zR%^}GTDAsrr@PVJWbVIR*v{k_AYd&6zCt(}{9q{L|HpUZvm1}^C?~EVpC#-LNbW+A z+{lGE`CxpvldX5%!}B3ZP8{NLA!N>|>Zm9levB-H$68Xgtd>(mvLT|OqM~vMY2=ZR zMh7m_DH@zMrOyZ;pr8Q*CTPG|&KaUsgy4a(3LF@-)(4jg?Y#3m9k#sr_SP|HZ6Q5*w3>#cEQqn1(P02UC}+iBAzb>HSy;j)mwL&1s5CUS(fE z!SKid;UF*&!375}umA)h*bqkM^8D2{pk7XwA8+0VJsBWwhLl%MR{V>_A;$Cbr#A%? z&_cN8;t(ZgvFheGoD%+}x>I~&s+fu_ggK^%82Z^GNs7D}@~61!hOoO3A`fvt`<;PxK$YVK63FqKm*?;Qi96%-Kz{Z>eAWk5 z2UaqO&y%O;z{<0_db}#VtFF$v>*@2IZ>gKx=;m|_@e3&U)tmwe)ZHhJqN58j@(?5! zUx<19Z_IMMk0r0_432*nq{s;PedBSk2`{-I#zgVFw3}276EtL6?F_qKN2FaM@1yq? z6zT-vFe3CPc^{8YYScKsj)pELGdl@E723dH2x@D})@>cu?Q}?f&?bV_J+3InnYsz( zFp{iRjdRvZH9uD&MEjr<_&~FJx*nYc=RQ|{(D+~UiYMmKxer>48r2gGBgJuXiMFby z2!&ZV9j>iE{M5rDCPtG(et@}`65^Z+-KWT7gW!Il)Op>R5#7h%lBUsp)_2nTNQcN$ zB;ALCV%_MpmG1NTrsRVqHVVy=jo#wx(xI=uU#6MMm6)GMEuk^ z!1WAU6~bdI@c>xR>prNo+#6HRt8gGbsweU61Vu z=sqR^=srE<`FlQn$v83!_b1&DRYX55cf@^A^51+oPjr1WS7`jbTFdRDi#jpHChdrf z2pVKspYc4~h#Ph0@D+hxl!loW91#E{E_BCrz8Dau$@&xu09igFM^rHOx2%2i2sV55 zLBxQZi-3@x$BLVsx>v0sr_WitkUjoWYpE; z$e#N3K~4RdzO6?YjfDZ$b%gi-UP%4*;Rh6)5FBFC+_gIRuw2Gy9pmb;{`GM=GH$UL zz&?OI##C$>_qf15u^Mp7hS>OPm%oom6nJm4U?0Bo9_16Laeu-`SA87B zA}QP<4i~sK>_bJn;)nQdT1?6@NRVqWir=)*qIG(qfh_pNt0kMf;? z%wip}4=RNlWSB-e6fkbvm=TX7qj-DSRM%M&U*3%&bYv|h*|I_U!HI025*%->XhcT4 zmuLve7^P0}R9qjILNxMbHeGu6O&DqCX~o5xCqzy#a~@isSd^Vx-0*;;rS-Q(L5*9K ze3369ZYEd}t_{K?9)Uin9VCND)m~#y zKEvf88ti0tKpCHaB@vygc`GP*y~SeI$5~pZfS5~oC*8AIpWiZ*BQW|v2Ssn0%+NIw z!zD0VfO`$R`tZL%0{>Y~R^RyeUVU^6!JsA69#KMsVtIdvyci&93Ps&aE?i8`Xk@ye zwJ9mx(^YLpJrH+g+;Y)W8_jhaq68daORYe`EE(iE-N-ha#(w zGRs+PJ*$tKVHw)V?Dq7{!P4rJot^QIHZ8s{@lh$l6yyszLu}pLLsXyl+o(Q;|3_S( zPT&EP_sYpy3k_1aYv@*_`ZyLSG)-79)rX?z5}dx)RG&(6P0;7v&l>G4iW4=KJ-XFQ z(}-57k8VQ}zsL=-3Y786&dyZw`6;!>JFa7In+<5Gj{^zevgCGb#;U8YNxx@7I$;=JSR|bCBI2!`_HkvG&KK-Gt3V?oV`DjYhClO-$3S4_l z^c$Q$lyDFC`5;~@3$AEL$xrKJfUd9eJ8NHT`dq}rwa#?+)*$z4n-2akp^;I)8{8#!zs$Pu-D{Z7P>nN0=SnX;PDUe6`+X|Jb~yO^(m4N z{3GZw7&xqyMSI1*^we>;9;w>3#t}%3vOZT}`XHd`I<$sX?;vfMK5nvkQFpdPXXFhS zJe3scaKgGqAk#H6d6@kUZlGriqo6V8;f8`PtIZ9QNPC@@La)AW`)?8wO5VAUYNU!O}tY*a!q*aUu#he0? zc=Z--=_5y5J~z5uC99-TKqz$6MY9_u%KGOFRw=|J`&2)CHmuM~?JJzEhN7Ym6^;VsBvc|k7JYch&sy}!j9=d2qZM;z!Sp2aG55)N zl4U>(xR{KMi@A>iFA;-z<)3?(D0T9$am&hl7BbhxL*0==9h{aIeCQKZ3>jDUp&WvI z4{!*^r?-(UWuyZGPsXEpA3STDpFN_K1jL6SZ<%s&oHOL zAnGe}C=^jN_B=r4*0#(EpFB-w8U<_ceA?73cb;iGm*5vjbn2hbwDSEj*mypIj8-_f z1E0pEPEN79+6yNnau7kMEH^uBa03+^<$R2bRYJIHiY+&P2}>P{_1{H!g=_rwiVYv( zBbVf^IcR{8Vv?x_s;HVux?>aCKa&qifsFt5Oh9V0Pg?0;2%5P#620vTOoSF_d366x0LQ{=g4nj**d4My!c?As})ef6#mIBq@ zR{D?cpb=moPxqA^?vzu#zJthwnKT^VDWbtGjEr#h*INq z_d75_GgD;>vcsaVg~L`uKtYE9WVZ_Hs%z2sz1KiDQ_@CaDl&WzBsxomH@iYjJ`S8t zaew3Z)Js#e$)~FfH~D=2tY>z6UcrDA^3785js*!Ie)y|Qllnbgk`Kzj%;VE~pnCt& znE-mh2)HT!g~X~cP}90DVQS>Z8nDOeX`Iz3R}=Eda?G&5Y7zb^qD~>7`{zr5X9;Tl zmUBmr^M&(*roy|XYb$Yws!imgRs3RPA8l^ie^5Xv{g*4K2#ADGDMUV{M9nvau@uw; zV}+^z(k^Y7WV}L3pOqF36%8Zu`NEIdVH;e}TPed`{{h%@k%+9dK>P|c)* zWxR=eG#3`e$V`!smhsSvd~AGt6L;pekBFKFu@Y%S6IAOGABr)aMtL4%y>Y-_nirGa z7_f_ce8fL2>2>%RT0}+x7i2)$EU$aVJ2VX-AJG}@6FURc9UrTReP0u}_Mj#7 zjt_-Mtr$`>bv-DE!l!{!5dI+Fm^_{sP#5FVI*0VtPm_-ubrR5Fp zv^7UlLRRhf^Ev+n6wB(N2sj3D?;)JkBw)Ntd~cIFqeO}?CxuE;1Yz}BwcCvkNxi&a z|8@&En>bLg{j|H7N zBcx^RH{K45nZ>9gG*I-|2npX8^%g#LYlm^!F~uc`jwJN%;XG6+^BbIhHrdkJ!zW+g zbtuO|?H49wN%zURDRl&97p$rd}zN-^cVMLpB^$I?Y7$5Ln%jhFo=V3 zL-#>pLM{n<8+eCW?(vmS*K+w$)Z#~oyYgh0aklzMf4N5|q425F+p8@CE95Zu{4rEk z(s)wk)>sE4xy`+pW|pgudph;N%l?ub>tik!nhGK}!EA&;p&q+BL9$#QPJtZSTjv_v<2QZ{z$mr-(-W;dnzUHB z##}&8gF=v~LyFTP(s`%>L?y!-${5eP0;-jC6Cv+l3PF1u1M3{VF7Ch<=4J`Cf z1A0E!69^jI43H0k2A&%r%p*0Bw(Sunci0q+42*daQY}8 zhsJeuz?F>le4-v^Nl-VLVB5$teKpck@`o0>wJZFJBkQZ-PO>d~EClJof<(s*vqjV> zGzSeq2j!ApdC?Ncl*nKYABAE;zPd^BCFzwQ0#ciFm=+)~#&$M+V9{(xW;05s^uZqC zZf_%=uPu1=8%htp|&u`6;LCKptdc#t8|IvJ|+E z>;R)nxuuNvAWG_@T~tui0)#7xQKC{KiDNF?dJm$c7TTwTq0plkLzoqCN#h!k!c-a3 zLQT&xmY^klihZoy6j6>^sGllbI*`njjHHNKxRh5Kjfrxr?V-yBNKrRQaTI|eL-O<# zm27mw#nCHqicsM4dUdrWKLS+wHqz5Wct~<@nv^H2LQ05wdh`}zH1{k@C;E7YfMj=C z4Sd;j$?0Lqz_JEqao&@DAil)`f-;<=c`cWoY`XMp{aFD;QR%Ts=xlD0{7et(DAF^k zEyi^g=~3@#oVw)E;{aQ4%Y0l0?q)Qt2waoWaFsw3s!hipW6@7xf{Pw~)BsdEL5Oz# z8+}G;I?sRO7Nw$=;+3~paDv+Fo#L8=vS!=i|xN88eaW%IF4Xl>uL?0&MT(jYS_K#>@&H6|^9 zqD$HgJ)NgnM?+5*Sc5-Vo=L=-xZhrzL6k%YmP!d?oZJZt?mQaz#m~G4O4$kh1`g<2k8jK9gvW#=){q z$|p)=d56g#PT(ds9UQfS0@L?>_L5;3OF z5{@b_3B3ynhTcw!MzvioNn;RbxhK%SPm;f83_91;u&l? zm@u`3I!|GrLCn|OPALd`|M9L96TTT&fs89-w%zH7zLHBsfPlO1mZVgm)Ufr_e+@)2 z$qon7>p^MMj-Y=24PK{@%f{T=NM_4X|N1Aj%zx7je%_55Qn0C z)pd+`azCu{z^QB7G?BJbteF{yh~?nI(U_daNTA@8&>7D_Bhf%JCcH%YVdihLJe)+- z!z)hZO&4$Fu{r!o=JKq|DCNA8OqnYWJ%J{!NYA>Gr>FA#=5Qs&6)Wvg_|A z*4+b4V%I?Fi|1)q%K(1zFdSyaHTHgUO#W>0Cyy}E6=0hWpZOd{R$M-nLxx^jh*pI8 zdv3c01hNqp2lWccBu-TCBt*8q}NL_cZp9 zJRd_~aiEY?qxjY{GgppfoO8a&>i12GFT3L?K$n&OVM+ zQ%&%RTFF_j&<`7Y$Q2(@Rm3}IvCK@{cx7sco6?OZ+oz20?#7e9vHl8vqsMJS^cr(? zH!DT@Vt+;}XbcL~8>-t@5BSUpB*f(wMKIPI&!RJF`Nl(=*x`iujVCw~R~wBu$|syf zGaP+#Sip@F9FJHi%3qgoJai+(97W-9JYSP5QID@OuzGLAGa*u{AIiBuqIh-#atHX$ zcaw;DK9$MoH5h>uDqi0y1zIAOJ)FM*xHX1(OiRlT>rR$~kR^8%5Q1ke4~jY-U>*YT`T2 z{w8en8?;{kjM6;YS~cQWRq8N&N>{fcGOK+VV^o%XuY8Yrf62Y#?R~4;v!eEl+{f)u zI6O%Lf~in>spgsFwyO-49hD4Q^EX48(*6)o8~;_?<;jo*XjG^+dtcBqWkAQ z_eY7&=9$i!&c}Z0eCmzcuimfT-Rs@W-p#J<+|K=|yB{zsiMu1G=kYT)KXWT1HDk5r zRoJ^gVyu?O*+BM@_O|~&AkJ+C+S5JHWVam*n zob24V{rl-`YKn={eux*@rsJXfT}JBc$&GsH-1z8x$IacYxV?vNz1xrh` ziC8pNV>Q+zQIy7NtcfQrbJMQ9u?+J!q`8KoT4gi9vU)lvAX*j3jhH4 zd(Vtd{onKW^Nkaw^Ib{Kc$tow41~+0Y<^?pTnIo6wCL?!Vq?ZHw|{)czh6Aj(eV!9KBBoIR6m9zB3p#Z@_LLGM30u`O>y}p6?G!wqR4!m ze~5UvST2Ybe^f(VzvYqr7IFJws&2Noci*y{-YG&fKjgW4yuP=;hfe@f!@Na_jEHFa z5ji%~jXtAqw!*eH#uM8&%AH0jUB7U3F5hpZm|tyLhu3k|t%hkFf3{uD>nOHcrkq~M ztbT9*8AltXx^YY~yG%;&Fm9=LXQ=I{NXUTK01<_8#LJF1n+9rtVa>rQ?_j z7t{47zqOx|AtpDDTe9ggts1pty2t1p^Gz$J?IfOv1fB>6g^2}YA+Z(}Rue(8N7 zJig7HD4L%rr`VT}l|hoQ7LC<37|255kSHjS#+slQ4Z>IsX@W`+&jg5rMOrK<>rqHF zmIOxxhy!G?@F*YyW8+K6NkdV0pdss*jOfCPx! z^6uR6a3%`3xBH>TzQ)$hP896yHneW-&gI=iVczy!WzT)qUG`f9LDCSNW%54~`7(5W z_qxPd7%sA-Ww>% zSq!2~G#EPIxA{dGcRs%wjSc|7wiV6&@V95f6 z15sc&o(RRDFd7&dG-#kC3lolpL1HbG1q~LIBuSJ+NfM+at9FY!(#y@X%)>;Ph=-Wb z?tU>w$cK4%*Lk<3Dl4n|r`avJ@N$2@IR1i*B5U7;;nu(Of~m_(Bj#n{=H1&}_17hJ z(O+GbPh_1--lCgJBD~C{kzp>69{-m9GrU`ty#)__AppH+E{g|tYQ=f3dJwSS%n51o=gncW1%&U4_MHkuy;gF>g!4} z^CF$b&a2s-n$E4goxQ;Z*T0<4&79T+EVAWhYz3ab&G%UmWn*TluBs!{OVr-Q&CJzR z&0AheTRrX8MULr;>HEvgXbh*k*Qbn(zIKGk$jr=qea+ZMiL2LoiI*syd&i1xnJBB? z$jYU^WuhdeWk2>QNu%15I=zg?&f_zvuQJW_ABiE+CFCU(G=Lmu{v>`qsUBv59P`Kk~hW%If&SX9P z25UsWcjkBKKQU3z6g92hwEn5qEKhCC`D#?-yc@Gk!>P$zoV$PCN-3&F{TytBZfMWg zhKMg>|9ew(#}G?ZSjbv1CKl;IxxI+~_U^8ct}`eT%*4ZjfjXI$%{%*<4p$MG^~-6d zF+OKhdo9yCPXC&osy9kc6ig8P28%neqY3KVAsPVy00;m80KfAVvOLYDI#Qhofe>)u zh8QNB3HcDrn-_OPHE<-7fx80N+Lo_=F%OEm#1b6MipXiHE?UuQJ+MO=DIAs6l8wBB zVT|rrT|k?^kPXOqx6#6&eC*C1md)3+E}^n{O%sAy7NeLz4_KzVwV^$R33l5uzSqod zUjT|g+Y!bw5kuq$bV(8X!?~vNm6pmv_>u^pz%-wfPx1=&)Z31By4J@~EQ4ujm{rhh zz%C|io~!ofI)!BTnR#d)-9t?|1%;hkon6(2{9Y(e^UjXp4u=t1Ad?(&tDGSoxU>dR(V8M$f*dW%x6CP+QWy4v zwy6=ED9AMBqx+GB1-A4WN?5lYqV*Fsr8fz`Fi4Rz3wUCgS~~%e4rfgEoXRvFH|34X zUWcncugR`P9`d~7gU3y&^AMJYdEYqoLTii^PQ`7>?dAe!>;rkTIiOPX&o|~ z{Vgu!{FZ|$LaqS^kT4!Evr?vkrEnu4TZBE(@#wjO*T|4oI;J^`4cY-c!Yf9=Pz}$` zl*#)=vC9S}UG5~5***J!!RGUsa63)roUNg9rfuy+3_F5ln8{n}PL%}Rl6U#}Te^4* z+&CoNY9+6^q7o+q@@RKzy8*VIBwRv{-~;M9F}2qFP-xb20kJnho~-dQ7~@0Z0m0ay zw(3b?GAFG;gfa79(kk<4D#$#V-88(6`7%8*U!WaBq^FM5x}L^vRGyfbc>-#>KF>*K z8jmnWyTQX#Kaz7+Asj0vH#iRpPqXo*A7yUF&ycX#T)J=b-wSG^w#OSrqUIdj6dAGX zjN*!ZS2<r+mGlD!xS5+EJb2LvVGDT=E-8@qJ{Kqg}m=lo&pAg;t1qQ-E@ zoR*ivwk2(EndhvEqMO~D+y{cI3b!%Y=(g*m#%u#c%5v~GY9xM*>7ZIXauq?F9rQ6A zxw2~q+fS4>5F>U_rWq5xf;_Z<*zZ32+yv2Bfeizt_2xCvSuJ2e%FxG7Dz%ZI)L0ym zqXIK&P(ci%=m8hwfOWOfnWf3518kl7s@cId&B`eqAR2@Nta7U36-U3 z0L9cj?m+|;QRziffP}_0J$Bv!BW#KYVC}~USPANsFrcp`%CV5*( z0s}^QtP_E-aaAELD=RuYbBs~%-;)um3Hy9G#1g}XEZr4WojWHX?&nW_{c=f+7of50V0jlvD)o{cPjJWT2NpF zqf*MI#*H3hVG2`5|0CzP8|75TnJyN@7%q7MfI`^5;3${>1788+?D+Yjot6~m^+Sbe zAxckTY$&s$x;-fi(u&c_PzNJ4YRWeOw~HvMM)nZWu8ogQyKK%&kWr@ifT??YhL14% za>OwB)QZYRL@j~2zQ=^e+p?{tbX@EN$%rZ!E2fCvG%m}%MEbHj5s@54-X8CvXKE2Z z$*~;eX#X7%6fGLfQ=r06<=`|#2)8%~XI78V!==JAH$bMSJfvE>X4-Cpz zUfi_@{nEtj0c=_%sM0`KSfAq))1qlnpq5%|#q?=X#vxJ(+C-_;g-T4}CjwGe0lUG3 z;<8N@?5P?lIK3PB!R<_rD6isRvorVe@ODxe)m|!^6u;|uqjdB_1*3T&U!}IU$7IM#2jw(! zm8r_8{~x5PN7U8(px8e+w|2Qb00!V2rYKfm*7*VEki-;iUu{sIYXi^AM<=nkG(F}Y zbS-HmL@V~}ee@}6$rc9)=ZZ`jzY(dTcIZgV;-~PSoF7vdH<27Dv}I~Ba)gNqje9Tk zX@~!)p1iyvIWzG9*AGQ2Ipig>w~JvY`ILE`qHIBmR<(3VZ6LP*&qq$Cu!M*RVQPK~ zziZlNN%{ zc40lQn3qZ=7uG>vAyC;D1Wu#QsQgJCxnS~ zHmpVT`WmpKOq|7WvX18!T^E3dcc2d@!T;m!Z&o>oL|uFAt{s~eoNcR)V0~YGtrD%j zp3z<~ce)K?U!YLCO9u)OvOe--1f7%~L;8qqeoV|jq<>@N_+ay6rr=2R;T5JALG2UH zW4tKfrYH-0jX%seU85IbMc>epfYi4)g%M#k-2w_6B=~$c&(jUess{>NptJF(+g5a# z`=(ix_H#)wn3#CFYIRSSB>i-qm0+-VaYNB;&qa3#^dTSJ{ag7>_Ii7JmUo6O7J}MK zvI=Q+H*7k0LGWYCTG7k7>^2sDE~FV928u&ruJEHLjPo&lD9SJ%7-QPc8fCLd_}|1d ziBf62v{7x(dSh6Z3Duj<^#pe5j<9WlK~J55=!13qS5|rKDGL6%OKB`VJw01wsO8WJ zzU<}^;qY_Om8tkr$x9}#v+<{bfM!Fk;)PHzNBYT)TVu+ zkoH{977{uaWR2qSzIic?dx2!z;p@ zUhuOB^3~pgEe#V0!YG-E{9ts&7!gj48}bJsI8C7=jIiWKAzoa?*hg2xH2&}*e+s#^ z5-m$cwcW-pj$M?i0cYg8a)yU`D`ANqur+m<9x&YKu$DZp4~D!5{YEQOD27;t&eSq7 z6$6G5lwBTW!Q(UFwZ`g`w7!(TLd9H#|Cm`^Ea8?7@o(rDHo+*UI|qapfq!1YcF$Wc z^J#DCKGy()TXR1^EjFJ_QKs;YYu7_)Beqp3sfWnoJ61J}WC&lM^*xf{0Xl4a>5Dr- zsId{g<7>vvX>9am<~W1kq)&vxp@_+NGo@Vw+c(6L=V}gmdftK6-~@8R=+7pa<`30i z3RDzl&^|g3*I0@3y{W#pGoY;x0&ViLiNwq?yLv}k(}PGs-sE}5Yg>1}?KRLA=-#IL z{BC6QauOnI5jd*vd~^<_bPHrbR0 zseUrTZW_}Jce#^R>d>B7Plm^cuQ{P6F=K?0519Bg@UyB zV&z9sRXkVDQwf*nac~-=U}soJblc2x$LG+7nKq(~warse{_y>Ujr(ou;zn-%x8W=3 z8!uZB9>$3R{uf5M7L2XLc$KXQgH7f2%GkZ5)E|IqK^>0#tvw zzgkeZbk+-RTO`dy5Gefrii*$mxGScW8^vY|#=S6pTu9N07o30;W+$}-NNbH0VBAtG zQ84PJc+4|J4?D-+Cnv6)YE%;12R#*&7>ot#Jmgm9FH!`$c`fWuOextGI!&4oyp^p= zv54y|UzR4m(MT-xA!BRq8x<__L~`Nn^R+86T6hEEkg-!pJ0s*I?9&${K*Oe{ahOMa zMYXxw+M9qDIZX6_cvqyuOp%*$Py{7@VjvhMJwujKaCn~3a1>er;7X;Us&5(3qC>5u zQwMVhCPfV)HoMLx9TIIeM3vcv>Y_#$cXR49F-B7ONq7z#xOI( zZvIjP^qP};d*+H*&KOMdPEZMJ&Xp!e*1@p3(-aY6)SMC~KNcfslcuD0VW;HxbbwPg z8_eh`RS^0V{_nM@dSK(J7iTx;F}XUp@(Dz~B z`0j;FVUDbfbGtI)W8M^RQ_3rIT9+g90oztigPiwHn;&Q#Jd^)uojCx-IDfsKX5@knu`8XL13`Dv^#7$doA zhlX}z^bv)FXCmkw%5_uar?DD(P}v!~BP6aYo;Weq`|AUIVWwo)vLCIv(bkZLOiOHi ze369A;0RYa-T6v2VasvbIhc$Ob#1dVf+t!D(ej!X)X1X;1F+eX z5TTLKakpxN6HC`fF*{;@j%#SWhsC|`Lf7$|>Lh9+t#Gmh*w+KLHKuFZLnd^~kw8s4 zxmi2AC4aFvw|1Y8#v^`>?yY)i*q#!w)3Dfw-V8ak9)5|{uj!W$qD*bWcE z_!i_QPY;%aSZWu0~l{-jn%+dqSurHKz!ZOq}nBIz$LlCrOC8vA-i| z+vgVZh~!X0TjcQ{#D#=sW@;a&1N=E8QK)8UE<(^y>`|GgA6toS*M@gAB#$j5q^5yB zE<&Obff~I=b?$)=-xZuOkxtekSmU5k#x@ zc75IB_KQ|h?;-%cOeUQOC$|z^*l)>{>sM_|!6A43mUujVdWn%_6QIuh!K}gwfKJ20 z52gpc@?7TIFO*0Cc`j1>GWo<72Uc6c;vr8ccVxf9>k43oD8Ae7ao}cI6~MTT9ip2q z^5*eqKzo*_EUJJzo7~MW2hr=bAq$UYd;V2$-zoTIY(0wVBg+;38Wxr;;6N=j#>#qHg-ls?4p+rb>oVCE{fn z8*YdR(Tqxm#0A8;ZQ(iBO(aBW1Qcf73^afa7c&gR3w96;E5MAZ*Ehh(U%L9-XK8Ot zucHkUZWYy0krMkcpsyJK955rOq~U`-0Fuu;QRcoRtydO}V;lMW>!{Y`7bXO5ilLeT zq=|Bwu?+KDdbhrFDL}`j+3sQ~Q5{CW^k8F4^`J+#&#`A7fc$4a-#z4%Nz8)Qe!#Ks zj@Evl!*}gcnn{jE8K-FbXs!<3(Crpmz^35sM0}i&Z6aU+l$}DlYgtayIh%EBxkuh) z@SUj}8*Yxj$UeHbcjw5_fj5xr6s#xQzyoGQz)qSwiK+rZ?%_?KGOg*o9U)%%Erszx z1LifpG6f(bJPQ_Rf``b;bW68ekn>tKRG{*tuTZoyJyD z3$B7c$Z;q>d~kf+vcym|Rn9S_>+3}5BZ36xdA-T_#RM*O?E+zN1ei9;TE;W%qxnv6 z7mx2H)#jASiZ<@AIoODH!ZzT8Oa~JgC73SDew({Lg^#m$-*Q3*5$o)hBjoJ}rJe{) z^Wc$w%*kh(+4X#ZA-}P2bFdGsI)s^Kc0Ei1U?A8)8f>of1W<;s??W-H!N9PjQl^=c zk;v02{NYslxq}#yP}XTeD#u)z#{e(evp0?0*er!VUIna_$3eSIxkWW+k+^mR2+D) z#2;sJ`mgZ8UeQdJDNV?33^URVda$Iw29L^gsw?=5I3W~X(6<9tjymcL%Tf>E_;|{q zagdErboEae0)9v_N8&6>lNuy63l}#m=@IY5WdhA+F)u!4Lxl8_`_~9=A5^3OX;U=H zy9b`@COt4IW+#??yrW3uH(U0Q5)B%B z#E=wh*#~h4-!+%4vJV=BH_Q_H5K%~Lp-K1lDN`*bnzZ+h?C)_@`otzYmS7G}qfdth zv$Z~Zh{D2Hvx1O34TaSmnUqGk#tvX?XK3G`Q)C?gPceFB987Rt6(@bY87+Pgk}wB< zKF#$A0!o7L;9emrcYwPaJZCK4v0P%<92{#HnBwzfLZ9A1gA3wOdM>JiGnMf3w&)_phXXc2s`++i ziOQWAusYQn5S6KVfLaw90fYR}AApZy8VXvvInFC0Sv<67E&QNOEol2Z+}uRkF&Aj- zO*D*p+2GbvuA#_HM{eH5dLsf1^bl#pR~EEb1I9tas@vu(O4qaisWmaQ$J@*~kG!U! ziJvFzpWcBH%HGV!97}R3?rmCua>~`}cWKNg`b!2(2wq@KN9Cgsv3XN)M zHurzdh@)HlVi|;=tYQ2#3k8_?blc_IR<-?Qiv&`e&y_>BI2559U3ngr)i@t1j&QX) ziWLWa@ZCmyq9KTAR5Y)*F0;GNO?tDXm70r2;~MIg;|8;Z&HpevfrFJ%CYIC#VY;2- zL)#13dcs@|f%Ea%-Ws<;%nWmQCX_K&lQC((1iAX5nu@)4SPSGb*D#*SuTH|omRkcd z(5ExPl!eBHeN`MmT>^654x^Y+Irc4jxp>453&-8rQl4 zIm>FR=dP}l+ekcc%#VErrK{sLJyocm<<=)(nM<<_KwHw3$wr3egQ7Q#=bKQrk|7p| zEqP?qkJ^e0@sy&grN~Ac7pF~93Pn8QO{XD<8oX<(DO%GLi_IPGEWoN;`fa^758O2cB5T91orLQ{`&lxS%8I1ryq34~EUgE9yv zrjKO-Ii*0lNCLw17-P$(M@;C1w`dF|BnY04jq!0LQO*Z z$+X^B(%(y-;EtIym^19=eNM$XyEMNM#|ZdsE#}uQkp28o2Lca&qBD-q^4kpciJch{oNM)CM-DS>dMDX{ zJorbrm|0n5Q|p7GYi(Z)6~ZW`mO>?4k_I0#iOM&oA~V=A?5+@HJgf03EGMT(|K+#980~*Wc=tAbv1d)(_l;~F zI&Ls6iH#Uh{|Wd5!~)m+SjR#-NOd2fIXOIeN8Iawep&;-8K_d;=VN)qkFmGV&Fdv2 z6L7{*iYF{WHpE&}({eS60>H%Q_RQ14MsP|V`EW2`@EaM8N15;;w)-#yLWq8IDQBwn zd$Q20&(3%`8j=mWC-6i>96#;uc-7@N6L@)2VB67gEK$hpKxg^#Qpu!1p)AMk6lO4! z6Azqs4YLW5>HH~Cf*;JeJY=xj$}I4s*PC`I={vonnWDEUOwtft`QbN#w`2^(Va(mY zX+F7zQ__6?(u?7mJh=>{WuaixQwt0pZj$kUXF8umPlgf|V1!F4lRXHR+Xk_6ZsNKr zK=Mo`Yp_O8rI)=(EG(WXec_u`3V~R5E3x#MQs_h1HXQ4zGttpB+MBEDp8@P)rto|j zeCVZw^v9za-ubOO5L+~$er4X%WU2M0!V_(!A(dA0oGzP2OKKjYKP@(-IwjtdO4Djn zoTL&}L|Yc53KwN`iYn<@!_f8xsY*o!uv8aj+4O*tiDE5=Z(XDSUqGP0erVN^0?}hD zozfP#MWB6Jm69Z!qEUyg1sY55hp+WYhvAvPbDJq!<9v%hafnHtB5rBP-7&Y9k>9$M z6m2OghO$+V^cT&&#zmA~8p<{;8eN1ECw|z#kw^m(3Y6U6j5IwN5p2Aynj(on9>3ua zqKhpXHQ>b@`m2r^-!>}0v}GVg4PqjB(ldS>x%bZBo$I3_Ovw@56#vuTm)ctjkK04yA;kGQ1UF!vC(8it{V|>&+7=OlQ`o{ zPO%E;lHS?4r`79VDkQ~Fzv)gt!S1^rEjSEI4gKVBMl9U+BpY|?KHXpbdvokIYGzM| z`t)#I$|F_+@iLEbQq+ionR#lNJWra$2q3kwqkcFJ&!n?UAOe5bJ!q1o2SXAs<1=Xw zEq+mx68V~O#}q@?{CiRk8Bb>e7cH1W`t*gof;|_+kpd4q*U8+s0RorhuQivjfYCd} zK}6Lo?1R4a%g?!kI!5LuJC_9-jJg1DDLrVKmVjyQ5o{P$!POE^ELzs1QAp`FiVCN; zCK3=zl#V%jA?k5)sTWFtcRr6tn0LyUHl98-fMb+LXnF&YzX@{aJ`q=+Y_tlsK~r!w zdBK%JrA4tvAT*QeH#Bz)Og&Vb9CDE{QWMy|xA9cVUr$NH;dJ5owNUH|!i=$f0fW$k zcUknV&h2SG$L4scE7qqI$wDGr6KtK7%l}?Td@@(9U!wYlg=8l$F@ftMS5yE$g;PT@ zYlQLH1*6wyb1NT?=;MSz&7O+cr*BT#c{sS2wX4hQrc=_ItzV*c_(i_1)1Qa^%GY(`d1%voT@y(O$vt0JDlPU{qpyofFCq2Q z*F_-|o5%XPCWC^HVPDsD&+T39>zek(wmtWCQ4+;Q@_k*?pErNu*EJm!dvyG|Qraln zMtJ#k@Y72Q3uN`c*6_!+C&l=^zpf~u*yQ=wm3kEW3;%Vco?`R!zphlP*ct`EF6ve6 zj|*TIHBoGn0_;j*%I(1pu!|lJ)xba#uqRp!k&zJpfcm^QRiWtKRn6frcTuQ-E&Eg; z96!~o7eeC^OX$cxMf$UXbHW?58jg$qOPRXIWZ#s}D%p1{k{OEE=4zqOCCy$@;n+qV zyY4Psj(E-7R;b%8bgR>qeX~{2gtDypz*>?f`-H?NUJ`MCN?R-~aNZ4o6p(I}p>S^bFgn)lY&-<9cod}9DRDuW<(fc?TvGhKc<)tT~ zbUIX^sy)5weTq7WdY?TZ#)oo469qHRCBo*ytpNemD;9=|M=_9-)0;+z6IWNGdZU{SAg>fvvUWB+KA|m11kpml6rY64xtm#wb zO0t{mmVesL;s8Mq1VIqW1vdj112hEX&fgOm$*O0;Af*)TFWT?gg#35@a(iZJZ#+|y z>v!zj&1t%cy|z z-Tw&{EP9%z)-Tj?qyR*Fhl_t*ju6|{EtqZFns}nWUPt;OS3uj>^+MDG$iQ^{f zW+|9yo25y)CC%Bya+REEUMlI#B-)&LYvG}z!pwtr=XlGEY0(fb*Lxjq`q|pwZSvhR zUr+O7x9P^0`>v{`Kd$9vYp2q(Q)sONT3w6-vm=L8g?esYXY~JDqNY2=!TW$doCwK0o|-ruN*W)suU+%~7+Kospiu zM{Z{n`ts4^xvXBH+w%9b+PeAt^MBs@*?9e+N!Qn%aIN#`Ur(z4MrgH4<-`B2X$_`j zUq&T8=?^lg_KmVJtIyrd61u)Vit!Y)F;dv-DCc4CT6A~j*352Av*E_QN4zyR)4c3G z^|gDL}0d(08K3h0L&b`a|yHqmojOZsDD(b@?Wx zKI`1ND|%e71|hTD(A)8!Ix=gls<@2PcpjgvuKn+{jk?#a>9wk~(Xe6O%fxH75ihQ_ z-dBvMnq85Vk7M($HD+UXzf+68C|Qqcn$^YM^|iHjj&J+7uGiRdzj0i3TgyMXma6_g zmiP=&Epyfl@_RRE7OGsRHrbceaQU-Bx0b3aDYeQAovNjlN?SXXw)P#_uB*E;X_vmN zF>jGViR>ga!}WK(lHtDoM@!~Sqgmy+>)*U`gpNY2$=J(zO(t8)bi>vNk9jnj&@$US zyn5SI@?%TBgw)Yr{W-(;c+NampVyV$E{)yI%(=?9oXqN+$_1GwKWF*d&cd4PNjP`2d?%;nmrYAc z&PU6#-dLEO6TKBrnK3c_5iJqV4iV1|NgAeMzoC7S#=nB0@9&3w=I71M_x#!S$#;&K z$H&HcrOBCIo*rUe<}%(}UT*d}V&WCQ!iPuP-L5RzwEQm^>HhA8x$LX^dGT*;(=N__ z%kHG_o@8DoXK^Q8w2`qi;_jcK@*O6kDPsFRYp(v6f9GdLw&dLO-ArxI%r0p6>R`mE1+lli+empt#+UFK$dF3gwAB+(q1nSGDBr0?$T zulxFM>0e`YS7R-`OH^+k(N%99w^cb+aUa<~xy{pm%kJ;~zWtV8``v<3Y`4v~ES~R{ zyWQJ0Q^!Ts-LeI9w^(i(o^Dyx&D`AFvblxJD#22iO;wTkv-qN_VK#djV}6?-avtgv zJxwjWxTsDw`!6A!Zw^&SrmEe(_LjQ`xPTz@L+c7I;kwY+ZpWnWODgPZdaG@m6wl8X=YaDQq+D~*^!g^ z7&rHb>FqZ+_i_6lYi|;KZ7kl|UR1j*LB}qJg#XGWu`|@g!ifbj-QSQI8H-*oVo`#> zh>M$vSnv+H$)m9~n_>_%Rp;DKC6E7E0^IqzeTv&u^;)$6JJ4VPB|Jo!FrmWHK!66z z7BE(NxHuaeRImmIBtAr_*aaj|jOZX?M1*ZeXfz$fkBWD#K4$HiJlTs6tFl?sJ$%?8 zlN|lA^$ICPL+X}kOw?(xQ964P9UM1)Wh5(@BP!(mWBYFAs9f34fmc55X1ry4>UkU^oXXR9Fz9!U7INBnSoskYFM2xKV4Zby9E2`0kl6U5iw; zI)wtXH)Bbu3l4z+F$f3@0uzLw zn3y1e1R^dNe1i=b6D$WdDMoBCtqf2!z_6gy1P0CY&RcaOV*su361DpM7ooC@5@TpcFDg!$AlK383SkMKlPrAfrNp0}wp$2M!8>VS)MJ3??QV zXHvcpGMMaFF7jS$%aKXFsN5vpRR`VUGPDRkt=Cn{jUBt=sPi-#wmz2lrjk0ziI*=? zLSE^N!CxR@BIAGyW7JVeK?VXIX@m)Jp(2PN02&tx9%`8k4jevsFhGL?hKB|i3K|eV z0}TkE0Ha|TG@u|O01K3chDv}z(nw0dM8*e|@j-b|ppJq9l!Bx@U@Qj$>;O9;9~%ZN zkT9V@0u(4!p+Ld_6ACyOjSnbAfT5v|f(sG_C}2fk9#G&xb?ETnqM|}U0}WhwP)R*7 zp(5j9VPWw=Anq(YXk=7mETAz$<3q;7K^~3*9*&3O;dnS6j)z0{4HenXQSBb9zW&YM zt*U&g*#EnIwvP-Om;{MK!-LVt$W&Fwh6~3d-PK*)&HdcZy|{aKl67{zW@m5q(#KZ} z&~N2-ZD;gs-+HVZxJc%TW`5@Sd+&UXi@&(|Uu4o$@zowgv=YIc@ee>pZ=OpXT{@?81u78gF-t67Ij^x3HmRxK32vPd@flMb;&*s=U0X z8@{KitM8lJs(IRXWHZt8QRW3V7?PYGFbZ>oh zijJm6>P@98w)yjIpV%48*36EFa-R1m8zMfM>L1DL{{DVA%q=uKzfEe|TCAAT=W&#l zTDJ%xBVD>W?HHuabR~7Jd;6CyofSIMsR{SLyYnwG_x8)Rcg0Wt-egxKXpZirnY?#@ zAD%b2Hg9hdb>?I3RCX|df`r9|2^1(M7`PJU%}#dz&+zcaR^*ti4k{!ZOi)~yU@?(^ zL`B6!0W2meC`eG4Y1?UU?5iOFEFdT#;b4J;2?`1oN%)gLlWox!ZPEC6kpTn>258{_ z8w~)9{)q+>6&4UA6ks4hf7Y2ENhY(ozCIl3mc8kCa3?WL z#m)9^$G78=m=DL2#4~h7y>rcnqtUL@?$`d|aD4UW+vq^tz1{N`m>*G9R9nN;O?Esm zmpe00&B-^OITKblWko0GkLc=n@VDn(YyU6fU;dHHZ{*fj!(9GkJXJjaaqI!eUWJK( z(f#{wRh6h}2FJtka6B9j$HVb(JRA?l!|`xnWH=lShXo-I2YEP%g~f)06mY=9@oqjmO4gy)4sN<;WphGhN00000000wm5Eu@NhXW#kL>|UT9Arl10~5f?Cz60L z91G+yilQJ0f*6Kj7{nMuh%p8sqmW}ZH6Q>{Xh+6+CWwXhe%^PGqbGKnh5AJSEi_xU zQfm!_#=T-&*C!|hyS+?84TNieIr~;4yFv(X4-`Tas-lezH`Qlt7s!(B?)2Uz0kJor zO8~a}PanmLuE)A!7NDcBPmF+YGt<*iQ+JRP2Y6Xd0i6dpbuvx9n z8%Bgnmk$1bEMXk}yk_}nPPvzUJlERL+w7qY*zIKewCoD6v0mk1dX&n8Op>yxq7_WuDyy_AGF}%na3_8$Pbif(7rZf`ck*ZcY(wgobmWlu`F*u<+*! z_ilLW$4_FfSF$upKb9y4(QH z;X+Ts0KF~*+vTSO5NPBQ-7dkgk~ni%;tdz9v#F6=&~o!AXxm9wr<5>zuLWU`5!0k&(S|r_}8`4q?V9&hk^WBo@4K zKW_#vmn&4wFVZ+J&glU3@t3!p0GoiH^SYyX<@u=SOh7iYJx<|H0i^y|8x+A6jZW?5 zaX9)rQ|oatJihm)GCiAY*RaGJ-joL^Lb;;I9W%PC&+H*t1C*tO#x!mnBU^^ZaBr@f{O#5#i;Mq0CZWIi8{46 zns*$f+dS?}jzO_s^X9-L%#|vE`G8i-1VNuUWG_%CE0bL!Y@jh^YdPH{K z@Z0dY8TGw>`mTVx+P9Y^;^b3r#3>9Hx^#u8n=30O5$4k==IH@;sQ}q65w8g6S+gw z1!0*rS|TjOPn9e{Uo8XMy}~zJG;A)BnspG1ZMwljXs#rG4>3yGkV#k$$Y)6je)LLN zs!G&(3Cc2bJ>mf&%_G5f;R*glp!h4y54KC9>Xqz;R3eSyVT}2ge%rX060}~MCpo}T zW32V9%o4}#)}gC{nkCuoC+%smXb;vU%8Wy$zHt!GrM!3p{od^CXl6SIsC+LGQ32%c z+uUH;z47ke3CCL9^YUNAwl&Dwa<_QmAo{AqjW+pQNbwA+kEZ*v(!~r*+o>t@5@1k^ zODT&^B+CKF5zEma2Pj9`9yAbpgXIirutCDY)|o)$6HZ11iRbcBc<28`Xx@SD5^q6++lp+2r~kgyoAk77sdAmkjbPv~6B;CR zMF5VIO6fX=U;+CqO-5_`?Zi5=sIXIfp5zvD$3C6TKBy}`aLJccO!X4e_zVyBFDik5~R2Mm&xyJ&I$GY z+DULy`8(=zXkhT z-ngC9>ok?{lt*;EY6UBrg!FnMbeN$|HxoyFdZ>p+>5P>QxR@5k9&xTJSas--hA}tj zl_*{*XvD}d(FtgAMUs@z%Wa0f`%)gwdw9}B-6Rh$jGJ*oSZ-~LrdNJ=F+I4Js+nYu zFq&{dc?SJXTg9+7AJI_nGSUvMxx7$fw+`Bc==GuP)|>vkjh0NUFwfDg1vN5zY(fJ~ z8JBp8n^83o;$_PY=0zn4@dqg_53ATrsR6_ z7e{K>lM(B-ZMZ#qP^SPdWR4x7nhMk9o)1_cvQ-d(qZnLQUT*ew_^7?vXEP}u7LGcR zg)uckCfKAW7ia`LS=XL0Lf$c@DIoJEu&x#D4;BT7D`Q(+b9EB~7Iw$UZC&*|xyqrT zFz*8JU-hNn)3w#e^+QA+96cTwh!>-04fz64FvMf{`Zc9lJsxwoGc=w$eh0EwPoBT3{B~CbcETigd{P0Tz?D1Nc}-gEvmuHo)DWn7&^y0W{ltaz&rd*K#$# z(OtFZSn(aqOvHd!by20Ku8%DYbgH{u&uQn6uJd1m2<-~KgC+i0cOc>c_j%l+&dn;Z z&wv`jUdWR0_`<>ouvP1N<_4x`)%`D9s|?gwdX}#`LYyY}MfF5gl1!@FAlgIWUF}?b z_`g8YlU^pl6dJ3KgYK_{p~8+pZz{SXif!lbGnW~HyupCun_n*UNtssIR4z}@O|?*; zdB)|TF$1UIG_#)ZSoIkr$o4^rt=IYwj+MQhT!kx5@0<)V&y4;~LM3x(CD3(* zM1am`j)foyLwYc7P33ns)^RjGu#_i7!AP1`EBgpti=v9cTl4~e#K{}J46N9IbE%BK zq?N?+#%vWgD}iM@%0swm)S9SmSlg)_3IVy!QyF;;{M(~hEr>&3f_))8RZql71@gJCJ&EpHg6!m;rra3^#EXSl!6DOHoK6?b1rPp&j)V zOQTpU7}*jTxm4%w8lK29d0@u(d*+3xUL#8Cvp8~`jYk}r>%q$LB22z3EKW^zX4*^F zD(`{P;ngFt>I#H}u75)WlDO2h8mwiG1aa)ZqIFi``H3MRSQS2+8dXdUpJ;V~b8n&% zQ1VhAOq26nN`BG@}9?O zP3`6V((*sz#R+ZEi+`&KJfX~C!WaOECwd49##o~wI~wWLgYXp#D%?~ydcLp_N2_Hh zQVe~?ZQe}ubUm6Yl+8_mte)tBmmm&#+oggOhVv{CMGv-u6%Q8E@q{)@n2iSZ%`54J zK{gH-8-4o*gvLvn0d5I{ydW_h0YW6@xUqs;Lv_r>b_aPxSPf8;@z0T9*LBf2bL3~&9b zd@!u?El4G9p96tP`6GrvnKSDb2MKk?f^sTfb3eMy=8{64+6STqt4Ij-hSMz#56;S` z>Q=;#u!78%dfoHy*%}@gI@YS=E?+zY3|)C4+jB>4HqL>-ML_%j#U>=ec5VLjgsz|1qUlSJulG0mafMAzfMJ)^P40)km5)h{_J!}VonQ_zv)N9&6YrdNYaYTL(?N2QzSRd$%5GDer6eglO*&p%1ygh`WOd> z`f3H5&;9?e87uX$k6naZf<+{_{O(qf1O~2wzLH?r+*^>XSl+z#paf=q0-8!Nys$pD z5@gAcsak?-_x6QLV9QKEWx9DJCZM+yw|GsOf|~obGCAXCYxHJy{9%bs`0ZbM(Ia{g;=+?|0&|qRoX1xC zrrsnn)KYu|!a`LbIQKw=?!co|my%nPK22XMm}U-B4^VE-`kZ()dftBB3poaDtZ z=|sP^?kS28!D3Gn(rP9P;4J`9#~;1?KLvgJZ-i&sV%I=rfjD@Y9>=(_MF_mwBh zb_?!#s;nBfR%hMD=7fG!>$RSq4TM$uwwFobhx?pAfX?~c>|=TRGBfMt+*gW5F+g`s zLz6J$qHhQ8@$6eZ;KmjM#a}V37RhX#zJHNB5Zei5-g2Z#7uH*VKqEa}P>I{ETNfOt zy(UjL5yU^mNwS3utE`}uYlF7}{SWM&Qxc=W7o;z=lJJ5mv2)N8Mc~nTf@w6>j*H zj{zJ)H8%w*v+U_Cmo8Jgrt)DkYGMsQ;^3k4wkG-+wP`Ti;`L-uLBK!%GXR|4|IG;G ziOK(P#IfO)k`wkY_?k?)`u>0P52y8(2f$d{2H7b~tR7^g{?Y*&QE>CR(aNe%cTo@B z9zYH>kqJlk8Noz75}DT11?J6pFV{|F==z@X!g0<|PXNHDS>ZeA!}j!#OV3S4^C0}* z`Y$H-pL1+R7=C|AF6s$X4wF4}MYJWlbfQzU5m^GM^NtO1l1zmGlxlu7XqsK{b<_6+ z5u45#)FQM8!4oY7@*c0Z^klJkX!Z3Ca7qI|z#2UQRRU2y|0lvLB6is7oxywxMw$Z= z-H&KK^pB7>!VSRlkvaYW0`&jfT@~epFA4mYb+iT|f%WhHsiSx!ZG*2I{36A`KW?M@ zo_g~u=1m4ywAcjK)wE1rnDanxL};m4!@bikvS19S_HS!t?sq!u=NHx>#~ah=YTYh$Y?C)Y6@&Ftw=@ zVHemRo;{rHLeY@odl~&1nPZzgDHD3J{|uJD6o9X? z3o3D*d?2sv&2*su3y`1%1%vgUf{9KNxV&KhK}uQWKc+rU+aLRU5x!LmABBlu*gtwu z^!n}C#2$UK2pwD6rd4kUOJBEq&ufdj6T)`H1wo}7QVM0oJ2spSX}u}wMu(BBb4GxE zp|DYK7MfSu0=hSYr*Mx{{1Qn+NTK6z)EV~s*jrwHy1P>nFofPdVIN))HAMEgI%*7_ z_O8Y#iqLq4CgQ;O3MT;41zWPo@<*PcxwU=+rE$O3cIfc@4;@)oi-|7B!g+fDHlVS`3U+$khra1&%=m7J{E<#qu#Ptp! zU!MCBCR3<*PA(K74!B{zbQC=T?>BdWeLhJR{b?=tUZ*uLaEn+irArg}iykW-AY8u6 z#q)ejlY^k8yCFPE&SIw%5zvZA32EZ>`pVipwzNDAEupvth`IB+_Owpd1m$F3OpMG+Ox2q6bdC|r9Up@NH zePB|Gaxu(F2;9SNt9lA>*nO{+z+FQ+9dTF-&Ih6lx*Uan;h&k~O&Yjpi2i%-Rsv=a zVZ|x{A+ydCvv3FK0{35{%H-@L7%_Cnw$(?_#U4f_vwfKqIsE0+_eCbQGzfjnib{ZH zQjG9+jvZ1kEnf7SiMpYiSF;PyQ)N#a2?RjFvRiXm5wdY? z5x)Z$mKrCRRWq6UWnP2!nWxDm--NR1=LyeMJvtB3g**0lRysc1s6EM&%;%}RSsk}$ zH3bRgZfM;8B(a@TyDl?njm*Gsscee%g$<%`9S2R}CzecRAs$u^Rw|&E-BMK}MqKn9 zQy;_>_YzjjTCL16gRMd?{<0Ream0M^nP#h`Qt6R-9%7$_ps})<0vyipY@~yfG>es@ znuWNv2#UctAx73(3mRD3D!n$(nrIiC5TCG*71g1XcG6ba7c#(wR*l@ER?3?u#EFH) zysEKLR~7``wIUznI3|oz2Q`@6I(d7 z{GMgXG$24|GhsM!y-amRYvA z8X$7$2I6ljRF4IM)+viXUT4+>>{0+?Yxdg8)S0s@pAMWsRUCi8%> zr8&)7)6&U~lW8!4Sg8_%A#b~dk?JD>hhR3K2?PAdPI6(@X-E7^M$YexUwfcRXgLV z!o%$8(Xo#EKv4$MIpS)xpC6-I>8FUD$?~x{fFO}yzTXw9kNzomJ+rXO&ounpv{IrEZ1>VNBvr;$F2b4K}#2C$5Oqd)HF= zP<}yr4#%*Fgb(IpDy$8hCM&-;nU5|-=8p#MT-3>Xct8nsK}wX8ob{q_;YfLzjM8Am z;a0SszX$nEe09U^Sj6KfV8$A2Pb$Q%*1EOIK9sxLX}-Q3HL4F35ESWDLSkIgeyfpP z6hDf|`?@3`O0lJi(4*@`)XX*a{}WlNjsk7FO)O8BqN!Dk2w4~6V{X_yP-TfkcEQ^! zcCC#ez-bUBtDXI5lWa2d5dDUSsjrd4^>!aa-*TzxMTr3n~6eW%osBiowFB?Yb*5b_&%7@cbs<@mm z({KUS3_5Tzj{YwrOZx(#JfiKosN781E|}=;>XrwN<4A`Ixsfd#7_vomjt0kl6Cycf zNAa|!Dqf0`+A~hXY5Ulnxj9Tqh}@CkvUOkjnGnNM4BPiZV-S|={&i%NGoS@N)?yjJ zk*+vK6vUO2po>6~SezdG z?=cj^*OF_|G$M1jYNKu-r~Q>NN$sz(^(BE)dp-`LWL z=pO6VcCS zK7-b_T_==05{w$O_-Ca7SSvKfiVx$BqS#&gwAZgt63~t|q^J<4*rY)@<0o|k6k{y3 z@X`V*z2SCQ-AH$EN+yH4!5Ao{bY012n#ZO^P#Ntf!7_Np``8@)}&4hy(l=L$?ok3FfU}}p%W@g|9h*`~lnB)RKU?5j< zNV>yJW)w3`*DnGIF^fB&&ohOUh?9T|rmPe&d$CB2AoWegnK9;L%H9I@u#ocg~Ya z3;OD9F{ftJ?lP4jRtdyt`w9%sfeCv9Vg+_GxGX7*jQe|x0O#LW&VFWZpiYH0GmJ<& zXu0soc+_CF$P7i`W*&HUHyHj};~j7fLW&Co66IHcVsN!>3W^(EhI3RQVLNx3nFWu7 zHBw~T%SA2?V7UAo%!MteLh+d#ibfBhZPx~NR+uS_sfj-nRw?A+m9e#E^T#g&TZGn$ zfUizJ3U;=NouZJnWRP@WL5?s40T%WQ`3mD|1+WtI31r!51<{)qNv`kJF8I|VjGndw z)Ah0zjd(N)CK9n9DJVqbxC-_IR&o;NY(R3k6l{0``?WL+urLQF`f<<3ivdwn35vjW z03u=?XK8uhpI%CXr{Af#>$xF|no(TIcht*KPxf3D2FITLct6yPU5K8~5E-Bjboz($ zJ-=_)Bbq4}{`b$q&kAigx6QPCPkv77m4S2~$Vu6_Yx&7Wo4kv&4Bn#7s-_~N0dr0gTLy?FtQS;-2WS*Jy_d@%OT>gP6!r@?T74TJ{8>FhU<6*qirqY8hIltG22$IHU>5gj#boG zj%s8MF+T@>u%orHF+WtDo;OVAa3-LC4S04~ATdv7Q9L)Xsh9aAL=?pv z``3^(YeUm>6EmXtdm+t5F%y!{%1ucGi8RcYxH>YK{Y2u@WK29Drb)yGY$tytCNDmS zf^TGl%Rd>g)ha^@IQjx89Q+y?wAXs#C#7I9c|7V7S}y1SEDDJ)ovYe z3^P5=H~R3y`6Uszbq!d9nJL0vHsJtXPM75Es&xN|609`Mmk;xg8hoQiGe^ zDZ&7$g`{J@?IWZb=xlZCLDtF7bocO)YOOjw2v80}9Aai#Q3$nH(|~OMY2Hp&4Cb>k zpDv*j}i#*{@YI-jEYuq!jjPHvqKYK##ZKdC#mPmLpu zn9Ts8lrJnm(U6ggKQrL)7A-mAG9#7j8qyDNPVva9U9_IYvne+qHJg1>ST<8wW2ir? z?-kL6M!Iw6%W#U2dc(!l74e&^FXM?9|d{7{HZ9nIWv8egqfv3(+| z1N=g@?4Y56;kSwkU7ZHC?b3DpsL5bN-2|}agGjbP0-$_PW-h|ViQMUjw9_~tL&`_t zTDgctCmPYS_IDGP9Veo88$rU@1C4yN$593;Wv&t+MTM#<KZTPHn71a32_M_To1YQcD5=)N);E4jjR?i=yt9@K9Xzf{EzU4^fcUiG%Y)4b zmIpCisp4f(mfgR~T3-m5IwEA>_ZvGrsf1Frdr0!47r~S!1##sS11x^CQ^IU-QzSg} zY}K1_wm-2tiK*TOE62;pxOl49tY|jsPt&@`6oe+T6MzW4ZQ{P4%FnuOD`kQvTvsrX z^EA|J9QZJ~r#h}ZiYUGA$~4nKm7|`jvAd(Pg{X4?48dQ5k&B(A5I|8mq0Ir3z7zIF zb3jjwNtpjT#Q3i30F^)pFQU_952z9lD3<$x^BMwcR#F+?NHoh%U&Zyw?S#xo#j%ix zv?5;uc5@E6D#=}P{kxa_Xm*bjTNkGzRjb^$Og7@qq+MS~=3X;oOUqAe8))dfW~GAG zIz_p~n6{Z3Vocyyg4GT$2Hd@kUP9kmkr2^1B zJQh%)TMKIhkc;{1gfwTG8Zia{%Btgn<`!IwDQcPobd@Va<@EJFYl2uHW>@`bnQvnFylN2*;*pV{;uRyHc0H81? zrlpWS;r1ey!+i~Nz7SIOzHb7iO5Tm*-{eyl zObrg>RPL0JAJL@6Nyiq3@DG*i2o}YUeY2u9c+M*=b#o#CoapQ4&qtOc9fdHGs6-2f zRTpV6yn27-wvn-aZp|K~8&DW_hWW&OGIp^J9We^mcG`%QrDil4JP2i9sJllYVFjz{ zUV+?KE?qhkBl=8ub!0p=JgS*IRvLTAc+WDJO3HMCq))vB(daug_zodCJjHDumZ z!307FEZIFSY<^rk&`RybcN@735u?ok4`iP!Vi8!6z!4yxT(I3f)Uqqjpnulg=wY6b z04nmtw6VH4)kD^tt-rN23kMrR*I%-raHh2=Oo9*@6Th%&6sp=EbMZZ-=?1#ARaq)3 z(pfUpn|3I~5PMd8^dfodN-C6e)o;^>6!INi_Wt!A_{HpF{J@W~A^$$Z==$2TAqG5P zM+@#D=YHS0_EuP#QOZdeo#o^z z+1k7+{VX>A5-I;ilyk`RYXB7=9C%B{iIcAAu?ssz21zd`fN`>-rfi6{)?)5R1X@@S z^Y4H2EQs19XLn?y5n=I3NwbT9L)yp@%yZnlVpV z5|BojLJ4&kj`C2dT#AczctWTCGvcO5c-Xq&av7Dfo^xy5JR8UjZwHIwxXXc(A^@!$ zr=t+~wrJNcL_(-h^@EvFBF_j@cgI{(1a-{CNNmlTpE~(R95FCKgl9E3je3X}} zZ}U7P#JXj(<(ES6I37hWj2gcA5}ZV&Uc?aDd$j!`>JrX06&!twcL{tbmTL4PBZd$qWc{UIXJ0S>oIisMd@C6$5 zhrn-y=+~l`fVuP$C>-H7&L{bwCZ%-#55@CZ3HLOq)K;8@5K%WD6cuJ4skXOa(XWlp z*qQ`!z0`bj=u3H%0JRBt=s=fRG4x4<0tzq{{po^pLlq#JQ1JD_i&KO!(&~ftMM@mR zZ)WrxEwallZ_pooO5TAJY!O?qq)uqVX&kXXU@BD16P%MS2ldLSSTSlYOo$vEBRZ(> zamK_-{<9K<8^a+Hu96I&K_Vgx}?Zr{*AUhq%3Nc2j z6R1Y2d~zjB!&@u|$}8O{3OEYE?~WD^3`Y7goJ1NEU7_|UoFJ3GGS{Q*ay&I9g!Y@s z_|otNMTSgtU!qUg_G3;AtUF~I5hzrCSK-Q?gC|l7F+w4fq1y@?p7@fgHD?ZnDm2Lb zCX^xC2`b1nj2$1(XK<&Z+|%bAuB6aPzOx_n0;L-z)z0{ci1Jc61yyv=<`$b~v%d1S zun;Cx91C!Z32op4o4SM~!m(b{*NC%}xoSkx-Q8g&fgzFDm}Y7K%HAnnZ^pv8pjKgx z{ks4E^RTVlun`Fw`gLIF71`^66n_gJq_l;*mA3GwBJd-Tv?Fkm(j+~VooFA19*imi z-DbO*K7}my8f*(*D!4Hs!e8R$KMJQ%6t)HTp41kNR7+l;!nPP%x(JiIDxAKNUcFO= z3_NBamG5Y4*2%h2epX?iC?8>`$z+?T7!W_PlDLr!nG7|_QTRyUS=G!r0@d_KDi@49 zmCZt!_vRTa@Li`G(=W<^iX&MEbn}|v%4y=XF_qedqmY>lscZ)$qS*eYB1dAHZ>Sf3 zzuIm9lnN0QW`;N^R#eLgSB^3`=A@p(u_0^>pEAgs1o4fQS9D5_Azsh(OSIk0IA49Nq@VA5Ah+p@HlM%1&i$-3UfO&%00079k~1t1cLhX z%5LZA4oWW$Fzh{E=7_tioi4<~k_}Q!x>NUg;_f{lw@=XA&Iy9be5gCMA^U`slub3U!_`tWgbr`_)Z%U5wqtc3+dBRC&fJa98 zt0HOH%hdhSFmzQhMMXjkPs0yh&u3?Cxo;=vPG<~`5G;M`rx=riyOKBRF|UmyQIMDj zYs+fyW`Nw|jX*t95O3RcLd1XCnu8JJydaQtQI#l9L5X+3Iq)0(eNeDJ0hlb7;~E?z zFUT+Va&0e8S=#3lV4dX(w&=1JOq8AJC1kKg$62-TH~D;B>KC3FSAO9pzH%6H z!4L2!&H!(UI|fko$8tb(veI-Y0u;YM>HFvfyZ{chtPoN+qyeUC0UX@Pi1ANVLgH~SaF3GiY!D_|;hXDY#*qG{w8dfH(~FnX0SR}^Ab%8|Pb74(5langEbcSjgfmV2 zs$w$%P`Yj~ugaESc0Nv#2#J)%qJ)V0lDdjkW+$LY44=p4H3dH3Lm9P@6SGu#td1!| zRBlSMYJgtYtGEdRPFl$84L()ICihV-j+zN#p&7^jmwU z0+AVGJb^D;<{ZNIkK^1A1`y{t3vs?m4o71|rbbY;n!AU9o{rGNY5p|n;eLCFytU4R z#LPw46s8eaw)p>tmMUe%O?ra;auaNecYRpRimB^6u9~|$QiIC9O(IBP+DU3HA=S6) zRI-j{Fug~={2Jw*Ksc`;wKTsZ6ye^j-U@j+k6zM~v$!34M#yKn-Va|LoJzs+Z_HeV zE!QJQfnP@JsXFb~;bir67cdQ7W>22iLpHG)?41ZPJ&INxCN_;9cZD~mQ*10|Wy2=2 z=k`mDMMcg?Y7hwCWEK_qD-UW>k%bTBIF(aLA?1gtJClowPcX~OcJ3*V!7BXv*K<+v zOH@l~MSVFRYmrN{i;9}gqeTLYB0w7u_(eqlD#9uUA+L!)Mh;gxu|h0Ga{WroEhm1D zn6UZGfk&sRwT?Yu-$G{DOBGQx<;|74U#+TyT&htO?76QB3tQe zGYt7CtF7`WX3F}sfOM)Qu`(XNA3R%91x?l5=aI5!2|+_F1;pW1EmIAE*CA`;V&F{o z$Z)@_<)L-3(f35oc5UT}P5Q`~?Y=a2mWYlT+-ExdI_cPZVy|_UNq0ubXxX0`lTLC z>L1f3k9Mj-kcfg!wXE7YDdPy9d%Ec(H%vh!^iASeU<8cc2LiCzjAlj~FfZ>s?uK zzAUoU$OS&QvMaVjw~_yc_%hHc*R0L%puUywk4;IWiQOlEV8=6!vaRGyU<4VS{WP;J zj-^7`CykV)f*L`t4w3z0Y;F$^(NgK{-X{D$u8l7e3}(uu&O%L8CVbJPttVOlZ#USE zq@viAM5+;}qF-IrRF#u1s2{rAKhS&4en*tz@h8<}mb&l6BO#N$gYBbCAA_k}EpbpA zYCLjC*)o|dwN3{&LeM7@&}N4rkNEC_u9NYWN3#Zbl}jiVxD+V?O0$R>D)5jbYyj+c zVq+bdfmzMn-M7{_*ZxFpTh#itGcz+&TI`+~&5<}P4x=NU0kZ+70Z4)b0TCx6kscO^ zz#>Q>3K23y5CoBkz!2e39EwAc#lN4g_}umStL|lsxTJK5`CLia=WE(|#9f?!9mCyLiXi&3oUouX}dWMZ5iUQCIC>RO;Hjb8qk5(lz@pF|CVDv-+G{8nKJL zO|&|#;tTl*2MH3&aB)t?|B6G8qu3Nj^K!lG_q7S%dc85EGF@ob)?R&iYrA{-UA)bl z@8;q~SL-phsvbul7luC$fMEfHgRvGTe&c@k_N-?D&YlY_?k}`h?OWWqTB*ukoGmIL zSaF!@>0*e;c}if0cs-2MLF|*t}-YMNUv0y0_T23HiGp zFV5btQyggvvxh|DKo}q*SZHV@NTN{z1c^k0pkl#60|q2eL<0tmhJq-NghPYw8_l}X zxX~KZ%Cvw2`b=@;gm2+(TVdedO~H#^M2YcTn4T$)dMYa`YqGK`SN$>-TmM@s^6Gb? zo5zifXKZ9_?0Q<7RTkHMw-oKV#<;d$+uydXdRzC}f5G`D91gp4ZIO%535VjTim&5c zW7~z=f9FH-v{KyQX$viUS_VZ1MFz!DRcRa^jKhNk3=*j!5mBK6NmQsH0Fr-mBLYF73Z*nS5Dh9^(4Y}9z=elIgFsXuK%?OTvCwEJ ziG*E(Bm$>PX+@n*|GIRiI)j=wOY)_};*-pb1h1SU|CMvOLEKX4kn^y>f8xQqMdnE7q$LIhuNs+*DzXK!XjL?<%~Vd4C4-F5 zp-Cr%OZgq#L)J(s5Bs%cgWy zH}RdJzaBh5APN|bhXxLUAQT`t(0FKo@xVg^4;TaFa4sMX5y&`TcyK@v4m|L{P=Ik7 zFko=-00Ib*B7$)MgYlpd;XI&VAP9nxP%t7KA{r5>K^8a;4T&^}0|L>=05KX54-kxl z;m}a2n^@01>JjxDb)C!FuV(E-Y|5L46C;~3p1nCArTw0LLVN5?xKh_dpTm1&uxn!EsIke}kWlS?!yA`sgk7O#C<@yvhYHboS zw}fX1qE5F`=G$|9dZwj!_VnajC$HsI;gdG6&z(apPJZ4I)PG5;QX`dx=8nejcUJk6 zbJf;-cb1&1RM(){zduRibBgyH`*OKduhdFy?)UqgbWY-zk#y-*QgvcHKF7t3={Cki z#BwGXO-YV!ubfF`&v%OTr3l}zhrVe4k$<23miXq1&V#qOvP4(53Hft7_M9G{vQ>ns3L{)u4%9P8g>RCib z&99zF{Y4S^3mSrE{#MNU^|)8dLA>3Sc?ya^@G!q@bp36K&&7phji-j2EXrGi3ZcZG z$jzfJ)Sp4*+{Wu~Y7wZRnGF}whwHu2wD~1E=;R{e%9)AEDlBud;`0ps?V4TO#kx0? z$)IHcBW*J#e1=y|w>kmTAM|8#DO-CYq+sCL+h6Ohr(GdqJH=P&LUgm0#*D2;i zKAo5&LU`_r7-P+AxUB($2u^SH&nidRYSG!;W;>RUrwrz4oo&* z43dm_3y8P!@=p{;#nL8|FC7vEP+Q96{^gz#?+xqa2kdI`5~7x0E<5(OamCFAAKLC3 z4JkS-B}kV;Jnrl{_O^NVtc*=;D3EzZ)U8wOct&?)e46d4G<}v?KqdQ`TdfN7-x}LE zyF`>&ZBTa(D*56R zy2=)>F&rE>b>jArW?Z&Vo~?@@I9P{TI_9H&{95AEe@SomaG|2HG<6J=8j1xPGExl1 z-ljF&oGa>;P>*a(MngV#!&u>Gl#)`J(yF-TI~JjsQTv;KgHxNLN3Rs|N}L)*ZaL9C zMz!$!JgfY*zQ-Af9X3;&lZ4E1=p18bDSue}hR>mBbc!Id2Fo6`tr-GW;D>q_bRkERI;T7-aiP_?qFF@4)%9~TTIzG>;7i{jXaAehQo9pQu`3cA zsOLvqtrIoOy}ykjU;DRu=w9lS@T!S*6>P4d69Tc=-&ha#+!CGpB0oS11b2uD&yy3@ z(&QB4PaKQxp_|%JIjle|3^Et1A!&-YsYYm#I`Vj_awmnS)L?amtr4m5KI18`%;{%q z1;830Cf7#6wU{S^^w{6-4tek%T%;82wQ4KJFdiC@uK$3{c}J!t1(1)!R8~TYrh6{u zBc1#N>c(^N;Xs{t24uB09uPd@02O4eI0QGT?xY~v++={%hB>X!=X91 z8r%jkSF&~Z!}0H>wjnY!Psd=<>70A?A-lwe*FEomE+A-IoS zM6#3A6{FzEYDx+eHaqmY3N^yK>y9ah%O!r|EZMsttcy$vUw3+NJu5hs;r_wO>+dN5W=pl!NT1f4NU`v4S$P&A2uB!qwrq7pw11;^yva~yTmleKe)y&@YcU^kb_eTClzq|| zbTK!$&Lcy)UDkGD@9Y}cW^dMz*a`%lN4+&YCKgJDHtfW3YnN?qqBNJb-+Iz()rFjs zDYinEp*2wYO4-8_A^O@$0Q&peAszT`{s(4f{xA4Bb96Xz;SaOgjNA$ah&nmB%I`eK zKDla}gtMI>1A4CX;!C0vJeSF-lKz}_ku|Sb zai%I?@VfRpH}igSnPrKlL#(ukPz|wMTzcCnvCMq<(IIje%3zOCJsv5`iWe`lVil@L z0NO-mPKDhYYob?T3N;;y1*s+iH5YSkFN_(GJ9fBD#wRtWyIcZfif955!kz7NsY7Q` zX>wlfbI2sd5!zk#AY{?8pHM~}80jkraC*2UOz-Otyh!7ryq6GAoNeDF+s|3(2(hsZ z@L#p8gMV*~^wEwLa(}n%o!K(e1kpph5Thldl|@0L3Hm;4E`T;o{}gsjf<%L@PRx-& zvya_=t?!!3Z6CYIqv1fz=tF4ldCmD(0F9qaW-*zb14tK?wr>DC_Hz$oQ2-@nEhI?O z0jjfn;PYuMhRSZ7-I>Zqn}c(25;LULiWuR7HA4!ooNhTD$vhOh54Kqmj$E3K~pTOpll+>8wc{pt4 z5>$CHq-5TPfRvjc51gn$_9^gLNMwq5Oi4(m+NDIgv0|Ug4Ue9DZ88-E6HdgH_;+e@ zNZu*qD{f{n0Vc5iQ*E zU}lGk;zzZNYf+`b1|FllrIEV+#TSF~(Kp!yT||`!aaD(>X$vD0;$w^1j%I^aUGa$&Vofz7vonAvX*O}icOYO5D z*IKF092Z=;ApuPIvts+lw%U?j6lY8F@;4BIIDAM8jUZptl^p|3kXVO6iTap zlyN+j8$0u&{fE?9p!{N8dT{@h5Y2y!d`LaG0sNtK?(-%!~&_GH7EaVdl;~H@RgehRd6`Z{Tt(j3) zpL+wyrRYL~^K+M?2twijzT1)xv|%()qCJ4i{56Xba!PU@Y_MNHVT^OAK}9h>1u@(< zxQKj&Ag)bRz!}=CjT70o7Cf?4QWGF3W=#tsr#46dIvT*KK%vqhyH1jrj`ln*k7R!v zj3;Y^S^&CV-(`Yo+5qH`wGEY+a4+$I9m-ZmA%M;D&8`!b04`!csewlQmH1AZn`Y<@ z&7=HfqObl3DzDE1k|>y9DcS!-E^$fC2v^b%^QKsXsGR-}r_1^C|31QzkM#fJ+mBOw zyqBh4(NV(}9-2&&^F4;8AxE?O?uO}Npm{>Az6u< z+IaZ_W8^o1_#TFx44=ie&r=YE{Cn4`B8gMGKVAND=+6K)Hzt>5hmwS;;C1L3%>(NE z`)o~9Ty!R4L_tqP|023;(DbiJ$i^1t!;q+dQ2eomQ$%pq^{;@n7W-G@mdORIbK_Tx z3rTmZej0`|G?q{;<9kWJP(aX)p5?OZ_Y;tpk5{qs(J4}@ayY>FP%As#001 zpuRCbMje3W`4?7SHURm_cM=Mw78Zb@A_CB*0M2iR1MIKDi0qO7zhbiB0Ej3y5||KD z8m=F0BP0+HLg5^Mav;aHEo~ShFBh6$4XVi*L8iTsf(e3?~^8N?3MftiR9*8HOsHHWB1*VYj;a_W&Da}IbN#qM~ zh}W7CHo3a=Mv}!YliJUXYs(2b0Y7;==`f7IMqC%_a3M#9kTr-~5%dT;zCES%rK_BP zrfWiafd?n~!w|IPc#8M>_89uV=ffsQJ5;_r_avsyGY@y$Yo2^a{<@4-u*U7o(drV@ z^*2Knt@0S%ai*Bt8X$%&t1+G@+sl~kktTI>FmD#E%V!)3qLxKJS6xk*DOf!{3bHGT zaX_2^C9x8jzEh;*o{B3fKoI@elK&Zw)r9(Ox!~ z4T&R+j~L<`{h2RM*xR>&!uy&-v2}C?@XFF9P+K)Q>^LIC(=|*Jy z-PH~K;VA!5u5XFPwd+RAIvVgzd<@+jtetd!oS3TvY!u7>)IvRRk(fwMUkSWccPC1k z3DlEP?FIrj?WUp*y-qzWc*sv75yy);ZKpHFAkjPyM-n={WU*$){F`!kYOStxt*Sq3 zijiZKP?V!#DLP!?D4C-BmXdlh8RqE74+^<}4V?xh^S|M&v(A4A+U5;kp4P6frWF2x z7nO%j#3w&b{cRwddLQfckcWD^r|fxw7h^pCjvXNJ+Ux9@Grl$)^#p|Y+=~Z17z)$Y zrre@Wx*i;Qd>IH&pz}gT-oqvg5W}JnPbV|L{l=4&#Y%W!|HaX=5$G>Yr%C=&a_eE} z?1`|w41+ z^G;JmNzx)UP~b5XzNL-e;}q1W9&e$54yss8>TbtDLoAoX0M*z`39)zOsgwFAH4mX8 zD!7$}^R@c>d#X9Ti@8cjBgxV5fZ*=>x@8m1aF3iEslZ}T7{L+Knd4=bptvWSr0JbI z7gm}>0U8um@Ec3f%&9@e5Lp4Ilp5*~>|yGucg=2~5s_GBlhcG70FcwT?Rs5sR<~uO zj$75bmdkuF6wC=8Qd)ztHu8c3P|f}kauO|t+p}$he2I*`leFvk?BW9UzWJm#mFB>R zW5?OCxQyF36Oj?C%BZZX`@Vy;;20WiMg=UDXIxmurq20inN*p4kZ)TqlkgKOqVnOr zo=mz3C|BDhmVGh9GRoOW z?1(aIbpUV{R_j|M#lRsN>$?dF9i#eIHQea@g($wP;&3i?$I>*SCkeG4)Qu&AJcA9sb0&TRdXttSyv zwZNZ6ac)E;7MTIrmR0lvl9mAilCpJbjr{ls+ZZI@i@8W-mwx#wa-T&&V`)o6oW#Tn zW#a^_f++J0R0xr6%zyVKA>xIGz&5nztI24E_6Ae#>^svJMA$$x|NEqdJtjlSYl&H# z!Vx77Hur zqj_t2+>am{=v7l&tv^ffZO^#6gjZ;KQDlo`1o^+U+xD`_c^|E##Rnae#0iLlU4Qzg zjc$wLjfW_a&UGk_07@h$RZwDZO_I^f(6CTbb%}Uj2Rn+Wq+*L&?IFZa*t}^a7?SW_ zu(hKrZ0)fXDu>QB0wvu16j6j?lf}auv>4C1=IYY!C!5xsewWo3#9}eExWsR-%&CB) z1lBMoZ2xDg(fWw8lERYdW!O?n#Ecq_ngm(i`Yefv;5x>m(Tu@f(g5)mNd;>!O4s+# z@D}FZC^6RJ5Lo&2Br)!W!a8x8$pDCUo>=jFzfMThphLJkOai|KlZ1I;!BpbJ^c>Sg zeuGE^8SXJFoGk)rtYiQ`;DGQ%Dp}yj7@hg>MshaWOKa!hJ!o2ah)#Mx*&vP1Bs0n% zs+9QY>ngmBH_mC2Fz*`)Go6uY6fTM5mPqa4rK#FowZOFnaj5k`G!vIZwpW%{u09rUvY6%+(hS_-UcHtZ$ z(2*kG8|Cv+ZYWhi2q{R0=A|7^s7jznX-mxcNE*7I$gVUw|CY%@CGXgo?=e%{aa5!q zDP+}wm@b$uw?VOtg3Fuiy9o18a@Ixn5g;OBAa80oQgZkQ^KzuE@RYzgxrqdH4n?X` zCf0O^rE^@&p$KwkP>Ca@he=NjDs+Wx1m&tIO&v8A|0bz+I(vj0OY6(!W~Q>_H6ACo zZm`LKi6H9EyEbHf;T_{u?lc}{%G?a_DfRrhwDHa3H>(+b*IO?D&zZ?d$w!V|d$nGS_WgN@tL>&?)U;1SQH`4x+)K=yBbT~}cp>_K| z1KT1jy`kQc-m~M+tJZ9hf$=KNIN8W}6m+;-PIVKOk{mg4dt)^uOz~=K`caie%(!9s zqnElCA%E54w&7#tGsO0gT_$D79O8%&qq{stbg->n#n#Rk1RXO*NQIbiIV)ofHBL#| z)cWOpi{C-_%6%8&V`P#jNrGBpApg!NT+)z5!EEArMIxrtB;q7 zP@Ea(s+t<qng4j>co$(wQRo_DBBY>f%Fja{U-Y+*ub$$!NSSG#nAZa9^}T22f?KV=lpR zj-Mh$krkN5_{RjBV!;%I0v%zmJsMxQSC8%8j3)8CBC6R?F zCeq+d0mj~hv5Pc^2OZJ9i18`PJA(2S7|`l~#!*zhefAWl%vr2wStAJJ(+LT&6(V&? z!PTb#J+j>nPODhB*H9;3LZ~vg$~V+_e)GvsAIM+v*rarWS12bF;;6~M1gh&8yZ=1X z1z7MMgJ^hzQGd*q(#$yX&L^Hs1SN7P3dc0amNImPqZD+Y(yZ+D8I|%2rLf5*s=X^5 zRSG;qrJnI)QiTnJJtmNIm`kZZ%^+e0>G8&>PcL8L<$AFA=32N(HZdvS9HAVWHQkke zAe7@t~`PktdIBPV5_iYH?PFTlido>;9!HnayX15`f;ghDaN|8eJfc zcC^tJ)^RFFi6f8Y$vXWvM2)RzMA47A>N z6E)et@jP>C_{+I;m3mUF!B@#qL!n;j?OR%Gtx7YHGz24`CobNHObeU!vDU@BwW zo$C<_kH3g#iyxh(-s&Snsz)SP5>KOMId7nR+jbhAY}KX9Lg9|q>E2C!8$UMu<%>Je zl2Ax|e1^0KjW^!8Oo(iD?Gzv_kD}-=sahCV#f&y8_kJQ}WjL-mS1yUov9=^G?T-gM z6DL$cel|0|96cWKq|iH8<9N=NN40~LI!TF97eQo%U1S|VG`M%B+%s3rB9Y5-T~7j$ zESzQLuqiK!&6XScKA_m6{?u?(3?b&i|9$WVhf%5EM3Lr}U@vr*wv|0yYDP1E_}m`sc0?&G9_f zzx}keZ<`xi?W!;C`JSGlqMqSl=e@pqg|8AD-~H9DC;#@Y+Er_7nqIBa-Q;UZPwn|( z-ZOV%XC`N+etPQ9%*4#p)J(->adU*@JJ;$AZT`T3o@`}z6x`Mv$~EBD^3`cCcYuF76fVw{rm($5o~ zUjB1l;(0eRZeO=z`!e@&-#I@2`#CDdPH%hc?#u4JzD}KpYlxnPs_?$krZ!ggRJBt+ zvu6EeWaK<^X=_btgDQhIh*tH_AphGxhj?Zs6H`SUK?{K zcUrPa`adsGrRwIYv*homRDzyTKi_A*o-Ves-hD#X)@!}jd&O0>#N?@Z-1~2OdZ*Z` zn3Ywrs=B}Qd3@)Y`gm{nowtcQJO5MFT{H9UG*5kE?@X7hEODi}m(JT0xexU<-F5S~ zLeFzm^V?*;nu&Kgsbwj-Q%df3+A4YOB|K=|Me97HdwAVKP^0T^-95R^vE|&lCxW6|2kFDPuh99fUiQ_! zblZMBpTFnOp7UoP=h``Ab*`Pewy3!UHP_ob&E2A-*H~n$q2IrC{=WIkb#|!#nBVmS zl*sXrAi_a`Y>-SOk%ThAG|(!bfC36|L?Dxep~3N>P$&<_1hOtvEFKe#q;f&YFj-Ip z!(vUI2*iPaLjk1%nK&MgWkMsvsx%Rd%0v=HFdP~biwzYB1A6eRiG|}4sdzvUNCSql03(8yiDg1ESskJXM>Lr%hJqq#8Vn7P3<)Y17OPTe zc$pyJ@kk)4Ofb!br&*Nd;_=XE7LZgLjYy;6tO>I+IN(SKNMHcMvB05VO&SM9a^ZO3 zz>s7#D}n^8z|a7BI21S>3LKA0WJM$qM+9*=A`3&qay$ws;6aHT6bL0lQ;{$mk4Hq3 z;h{LznouGW%wvF~0)ikJlF5amnN%VV%S0jovNkv(kOUi60U!7G|L=NU$VemIR%rzs|k9 z{LHtUD%tpWckTA}_J7%T_q@&bcpHE9PS=HGEHCT3$E zrsHc{TAJnY^LxTJ_r$iW)H}=)FThmQDWxKRHb2CNmbi3VdyFCAYW&ij8uU`H4 zcYl9BK0p64H$U?8E8O4T|NVW{SC_B!|Mk_s!e{))|9_SDe`fgq_xeid-z)yK-+kZr zegAj-WUJ`zp4N7)j(Uge-L9Ug-q9W1(c9I#LRZJ=ZLHVakG-vId)c~`E5z*dio4g> z$JW<)+h^UVuAf!%uXR$ME_?rZZ5MeJ@+#c*b==e4Rl2RKe0O)%ZFhR+re3Z}%T>#| zYpZT=S8a9Gs-~$zb#;ARSLNTz+^tI0tDZJ~ex|3krc&p7f4$vX+|<<1eauttWNDUa znR=}}@e!ZfQGHUUj;IXCOX^L14_T2DQ7NN(O}xZLm6-Q#3jf%U*8=^}5f&VDLIeqk z3keucfiZF%8|X2;ey~lOPO~<}h8tuja##(c<W(aCl^v>RCdc=^MdPmSg-)?d(Omkcvi@I6KUvVJe zvYQ7WcTs?~khlj5q=6!^p#cj1IItiX4JdT@=z$b~02?B3aF#g{^xQko(%&YEo+n~8 zx5v4COs5^YLr0x&9vf5DbfTx5h#4DXD08LLrrgcBPqH5bMbGa_jM6zg4H#4$AV#F$ zLBr{TriB7cc|anGWCp|nAviM0fFOc!AYq|GzvAMp$mzwyr4hbIPx5+a>s`)z7Svpx zI~sZSav**Gbx?%PVs*@MtXC3($$lY1&@IQIG zpg-nMf1-vQdXg2Ji&k@I2{nY93N;Rdy88}CjXit6+O3Kb87?G}ibjUY1q%=g1X6J* zxVTV(WXSN~P!MkK*^7aJkAIjFd;iifF&%TacJjg~XCGfQHihwXCN{G$%Q1c{{yXO; z^tQf6e9Km+cwJOf)JAPo)ybORC;mCuljrGYBfgs}enl>>yK2QPvtf(x*{+cx!hmb~ z{#we%PplNyYKhFUsOf()KC{@VQheg;XCh;M{vo2`eq^E#6IvTCI&;j&%=q7&5fo4` zI5aXW%0$C4;6e8i+U`ATvgRFLVlK?v=RkaDW3e?vBoyUR6&Q z6CF_z71K}+*&5%`5FgX=Gx71w@G>&NxDP%nI971)G#6bIEq5YT$JRN0%fZN?QVvCL7ep_3Pd9?K?TLg8Rcyy~lUfbJuf2|8@4g^uFJ?FulPkAwOAHlo#gBp$7~- z*z|;o*KNPauzguxUOet@&DC7%Yt7}pUF_|3{3W;iTkcHAi2JY1o{}*ck&~0r%~83T zS-um|mx8y~&(PDRp)dby!ZU>IzV|yi*S#O#&G{A|-xJsNc`@BJwuR<&O#S$`4&Tqy z!?(lT*f>b?&UyHlkn>LQk~Z@j^CvxK)N?oPOKPgO`1+{(=~VxdVmc?jW9UJfGV`DK zpX~E6spe*8s&Oyk+kJ8;?Za1H@ZGKPnZ?}z?%cfG9zk04+`s!BNQ8|bEvI@Bq=nU- zq2LHodv7dhV@V6Mv9T=7vM`efhGJt;_hkSAaG0^Jt@9qmYXrN9_-YaU81o5$9x$v4 z9X0{i#snl_?la%c5dRf7Z}Ztg#Vf8_h(OdJ6$zbLn1y0t7G_}d)VHRdd%d)X7 zOS3G?vau{1OJbI^v80VP1uh-JgQf>N=df`J}T zY%ohj1+&3yFdNJU3J@8Ap%vf&u^`_80SAx*6)Mjc7|{j{3<^+MB4P@Z4NU|Ehd~0% z8d|(=q6KwYWY%PMYqKI=cKUEPjkTlbG`=2hXctml=bUD(+{S4PdLp;IXkA9=(6h2x zYb|Dn9LM=>p5_+?3KtYORH!&;ATdG1I#9sELIcr+3k(*610)LsljZmmz)C5SfG`>g z#4w7YD2SmLhG7^(j3LGlWQ;+`DWeS_00_<2aH@0Mzyjyi+6(;2LZ6Bx*C5RiLB_d& z;{0QM1m9(zFv1WhJhxnjW&(Q`N5pI;{w@h;l5C4R8`$|LAC<$)9nVN!^SmRz1|*aC zB@$>Qn1qHPK`PQ$+9ga6ZImr%kJc}?G4IrjiiKU-i_s`6hMMNVT=#lFK}W6no*7Gi z-)Q9&oO@SS%8PhnMXZxNuWZWP4vjg+bZ`>%j)-NpQ#Dc%{cMh?V?>u_D~)R~gguLL z-cx=OLJz`d5>I6ji3<)b%oHk#%SGIQZZti7(I7i;)8O8;^g^<6Ry|lQ(BTJuuKWJI zpHQpnYK@{FuePMRH)ned7g%D1cYIiR%u%(X1;^!}#$>GGFO~d-bO0e*Gkl6K=c*Ok>S9jp z|7w#rtRlwfo3aOZa^O-*k4i`eDx0?$&kv?lhf4A)KXQ`Mtr8hTLdI*xAf9*)w^amr-L$_J#b;Yg>8F91#iQumHNOB>D_gqn6>Qm^() zAvNe|k7?FTFkm}^!1cC(9vG$Geg&2$bwohlE{x)lArNOz9;Zda2YgmsJ4U6QFf86C z@+l0p)_OM9H=_p!)6+!@P{4vUOsx3#fTE|CY&2~L+`Z=S49V;>=CCo6_n}$x#@-5j z>pbWh&dhNOdqx)AY0pp(lhk=)1B*kYkj{Qf??2`pz+b>gpiy*uM%0K_n_7(ye1MFJ zXv@D4h$-BmO`+d|V?54a?|axDGvH*?>CD-EdDt`dPKv@-nYKWG70Uc zNh&F8nPpolq^?*R^bn282S1!n~yyqRjnQlt*HZ7vQk(#e1N` zOxuulHll;(!}o#;Ok6H~pj##$I-rdIf^GtJRWcpmc`(<*aRYBbuk|7K1uf0Vy43(2 z@W#>hF6d<;m@c((TAHWyHu42(5~?wcjXdBM1zMOuFDX*3CCC6xK(fC^Or&yt(1%SH za698>l9%GPN-->#TT4ukBu-VoMH4R_eKw*6YRVt%Q9MJsEn`pRGJxFgW-FP5z!gfD z`-E^{kDI;DOV@-^kiL-8u4qKl8SD^cJ$Uq{Ck)~PPN=QHZ6^i-W zUCUj-db8P@u|3xS-C>o2>dj*Cu0(WBk0;WtaNiy{IQt!(2fWBqd?!pH#g)POTWJ<6 zkXQ&uZg(O9dxa@~CW~;I=>sJP3Zx9zl=Q-K|NTP?$oNpR4glrq@6Xj-)IhMI}HB^-i6 z*wF$?ihoo!oD)}Ac#7jdFp^KH%glKp??@=QAtz1tSpO!Z6cFWz@b4fwkfY5M5PThfFM9BD; zAqTx_p#{gfQr^;NBhIUfPDE#^MTP{u6c{FLKs)4(2}5Y*VkuZ2#Ih_fdr+qqA!!_vWTd>}dv4bUdocKNV)L{Y$u>x{HJnwd!7>K%_#q>>CH>L&bS+N^Q$mVF8rTLJ=yG|0MmzdLp_6QQRr;@& zxar7ueA!AdGk|Y^srowR>(R+GghE%r!!Xg^NN3C#Ft2OoNMUNmIFnd>Owbjy|NWPB+zCcRpT@74u<>tU>UQYiB}@7(8~Avh4ezrj;rp6 zJT4a_0W(>HbYgd{`J`!Zh6d=Yu|2Hz0aOH2sBT(;RYHNGs%Di!Sz0uzc(G(cZ)Oav zv^Fozvaz1WL{td=9=r>s-=0yeR(ws}Y@Uj5!xW8iM<|2q9ZCCc`A6J!7?F&@740%#1Me7u zyVsQylORw))#{1Q8JlC)KzBokd?k(xb9k5xLlG;IPZQZSs{9HR5L+z76z#&-(L{8^ zCa7q%gwpDR9lnUDiQh+r97@4c?3{$h42Z&h9&GgxOTvseKof^fy3=Rm(FC7V7>W*3%e3OCRhC zbW8{{RIq3*x)1v@0__WQg|BjPza$*e$6TnuT!l(gTofUD2*WZC*Y`gvUW=MrRE^)h z9+xl?5<6X$ebM?FcMz~0{RTVLlup&ct7V&OZRY2vqWnw%m1n7HpfX-o~zRLWxnt@d_6>GAyKH15}rfn@aFDcfEv-)cOaqmGcEo?^0O8Yn!glLLW^Zu zry}TB`n0ctGs1upGj+71JG4k_F#TKa75_W=#0M!Gwwk5$!wgb=UzmXOQv`O>mziXZ zo&0wqg<6Y>U^&OAmgoOKQ5|JomRNEu$jY3A7PbPRdw1Gy@0(xBiSVwZ~5*$t;K1@3-+)4@z z`Jb(U2`eMHFxO^pIto%^cL5=cghbk?T!ujAjIaG3zCm{eitu87xe+sUAp`fAx7;B- z1NX{%UiU@# zL!LKfsU_k)J>?memOwxO|My4D5ww8>rsry*e|=e%hvOrV>Mn)9u~eu~SQKRGd01;4$7hscajvJ&Bo(U+Hm6tn7sRq@w4FeEIYwe66tcdAschLxWjH_GN=RB{E`n*V-`$;I(rjszI%V$bfgD3)+i*=~kER+7H=q%)zm~jEh2LN6u|{fMENyx^l{vua1$S#hz{!MK0Rbh8k2i?!6Re0x zS!zw1oC#UPY%B+=(&sK7i{roiT#}$g483?^V2OgGTG?!OV-vF_jbl}cY{+2Nf1EK8 zEF;bIw5>(|erg&8Fl+uRUE8_PX1xq{T|1R6vAM9Vh+Oc= zkb#u+fLc0f7eH()<-4PeLze3R!uGGp@so?$7rk^N2&Hb&(n#4Gvq0(xz8PVPA$9R4 z6eVvwyG!|-qICq_r2>IBt@{tGyT65fUB=;UX`HlIR3-;47576qE*blNAqR;ia@zi= zTn<}1_B8HrmXW%Y^NLbAp8Zuehg%uz7tVojaCI?f;!5UVCfD*g-*Wg5pi`(AsmnR9 zD4pZkU&`pP^&dz_1g0#ypi{UKI-1c-Ih|-D!4Ig@$b*T#q=SS~I&FWntPU;ygLOn@ zB$|slwJoWmHjS3o2}=_E06VSeMiqPqxChh^0hz$a_ZEnjpm1R7ALJ>R17L`|e_)=r zDk_8x&3gh=dB1QN*$UphLWKB;X;&ymbuMF8H(#CbqzzoZlQ&@$3Ye>?YVnXrC{b3+SVjwKIj95N8M8lZo8r~}$6a8ji9{+e`hmE1oZ{jY<-zvih|w;{EHEYT2fI|X^SdNkJTS5ir#MeOBGL{SHzkgsu%ds zZd0-d8_9H!ps-epSa)Y^&^O$~5H1QcJnVCu&ld$l7emlP#c2+6e0?#c`aK$2&j`&5?IoH+81-nR%ps;hZf zMgefPYa%iQMvKm1_V4~&u&lf8GFw`s=d=M{`5AVW|JW+pGl$L^{tiODMaONJWRMh$XlaEV~ARrmC_# zbdE(QZiPAxaN^BKblyE8!I-S$u01gfl;0B(oi6}73{+vwN9;J}zAIUt3q;4M7asap zBs}XVGni^>=rC(3|EX&3l2}KP4Vzn->=6CfCRnE!yF=%IHX2OJhPYhjAv7O4w~}he zSk#_^d4&ktceQseVqSlAnFZR+uOf%mHZJA=pwG1o?$Y-{Vy1D2%17qdB4b`w#jtKG zpFG4}uvdqB|Ieu6&C6Y&LGItKF~&$K4iN43lB81U{xGU(`6kT97YAez(DF(T9lXvA z%_D3GX7lM{y2zBb($HgnE~yzJ7S;?tOu_J|7ly&)%M?)oJR(#Cr%XZFD-j5Ddp%NR zy-fB7V-weIGmsh6{_9+jT<_=kx1dt}nARD-k##;BK`?9lUn`U}Q>~X`#E7qi^YX(x zA2U+XKiE-M4trPhe2sid?0JR&JK{*g#alfZYT^+cwEa_j+VC^946~jIQU+aQhebp|B&#gzpkKcV%9&!IM zn2Z}1+k#O7%Y$;3dj0ly^WRG}DxadaiT!$&=6^ke;8WN+om?$l`^N`tVzaQp1Hp2? zR25kfl{0JL4_goSdGpGf6sW=y%6{MUY43T6<5!qPwMlH!Ut+uWr8 zQtC~h1xzdTSwH&&qykryJ*cVyJ>;TGEfuX|cbmmnGjAgIC!$qI9Pg|Y zLq_{8{Z++^haukHKz$hOr1lT8G?muL9)U&r{9Ylx;=(%6B#eig$vR)f{jtb+Q2CbO zkqJ$O?G3;pfwS%-6~aL$jH^~2>~6f!1o$qIhaa0KPL#3_TWA)da^cpZZm`%`@3%za z^5>UG^Jh?0!LwZsG>a$0{@B7s<=|OUIosgEONV*MpeNI;$8|s1h!;K^mDuYdJ9{J( zGkL|Qn-_c ziGe0xAS5l`b$W-Rq>l;;lzMp^i1vrWkjLp)Ao*Hbg9H+#I%@-KXvW?!Q%vrfm*ll= zduS~EH_4Mp!^8&U11yW+%A1pHfn@-PfOr0yCQ?vOzlhV9Vrev%9oNG*O&PE6x}}^) z7Vp+F9tlyMXEPW_vbbk1nu5T?PAb_wd_KZUSlZ$ zwqHOq@ z9TMnuLbi3eqrnO8Tm_5hO`QHfJ>o=w|$EmATRgM^C~`-dd9dXw&dGXaSQwD)fMuKxFPoP8-0g0d{2`cyK3N zbGHIkuBjs0F3Su*uTQFtAVuGN6q_tfZ!t(n%7mYBU!5=&$pKHC=UOW(fp`iERdNwC&y({Z+R68OU_^4u$7#+C6rMC%c5{0b_lN)*bY-(7pRl5-sw>j0?Nai8ll;juW)BOx-junWVPg@wB zn}7kz61rg=|9nF}(c0)nG&(=pJUYMVt$KdcaXox9WrZ_Oxw%K=e+KXuD)9{}OVSxo zc6^Sjw=Xp*et{l1WIrP_Z(hoX;P!Gn;@77H%CueFLZvr;RM7k2$Ph*>s ztIZQXsg13vkjw{gu`>#qIaG&$b80Rp-6b<#H$Tl)sMIf7U2UEk^|_g&b%=`R`$9qi z-{hleu?pm{lLZ#lANPvA{Y7J6<)KiuV1%aC9}riR;7+?YCL~^)mUe*2L9Ah2R92X) z0_UWnJ-P^ecu&rQi<-H$$*tLJzGkl zgRLu20CW+)nhfwm>>lI1eZ?as zF7+4(HSU3844%tVmgaWWZ16=rLZjm7Jl=yp0AJR1UMocuza73wG2*D7G@ILn4}JH# zd-}N_S#M@jI_0sxhU>@cHq`rbPNeGI=2AYc5GJRa35k~wUUoaL@OCElE&KFYaOqQu zArc=TCj3-qpajBqMnXy5gTS){1om8x%@yF}Oh7(={Xu^>pacW92Rd~|fb&ECxidTR zG{VN(Kf>(Tk^w%Y)8K+HbkOX6PItT6RJA-2?xSl&C_BuxVuZGgUnIc=E0!Y3X1NgQ zAihxMl?;aFt2%NAmqH;R?VNsoHS>S`?B9V!{>(o8E_xWS{x2F05k_ z28VvA4v!5k!qS4+epV@%b@1HonPV)viwyc%H~)pbY}pK=#{%M00(>x{Pu zYXOnpWTMOa90lcoFzd}+i4@k%OQK|>F$pP{CdFkt%_NtyWPG1B$wxxz zGhroA&~uK#(bH!B8y(6szP#H0ta1~vs7Xrf1anZytb4^lL|W(!2_J!=h?>7Em)Yw< zDa!zgLEqXnoM0)KK6h%af!ogn(b-ENDs8{nqG7vSPa@CfDkQ;hLy_MZxVmfqAgT7B zQ>(H-b(~K{71$~ZhFxXpjmgs5>d27=!&WF3K)6tkvF;h;0&=xJt^TsMV1Cl_kuzhZ zc&RLm!b5hv1`GyP4x0w^@3TH*f`k&a!n!(eK*TsIzO2Hpj?w7Ca&xf+d}C6TqaueH zt8x;(l)Xy8p$pK~i`nlj1|3V*RxzahIxfc6%%FNnOK>SlyyBS4&vg5m8jF6P3)ik? z^0bv2ka0X`5n4z*$F;y-KzQagH7$T$C&n<3p~bQ8o&-n zU(TwxgX^60YA-&gr%&Zk2Cb3LGyju6T4guvW`2=i)H73}$>37=IqI~MP^B?4gC{8D zDh~pQ^X_#-Axz#5)hn>kQdpf;ZJyucH3+Wn6$*7)(yI?zi89@V<)B~y0N+nNW7QOx?0`M9rF(?;+H@Bf|PjmoJT-dksSvD1^sUL zfh72f#jGtd`F`;$WeW(0?|0s;SEmLWQVlID@tzlw8l|IL5I9VYv|Fxtd*!{8YJPtE z-=H~=f@L+h$G~<#7&ns%HW*DH%cU__sQ>0Q)GZkqNx3N#yM~52yam4hzdeZ=diGjo zg`Df`3fjSd2L68u1W5pD&9_j}Td_sLmI!cTNEc*+@vW{^GH={*~!Rn4g?J;##IQCehfrWr1RsH}3J6wQ6w{ppKL~Dz+-blx_jsFx6Dc&$2ScF9I)qyxZhuNEH%koRLY5-5G z9$hUHVWldWlQfv*TM}#G@7&j2)2HN)DgLmm$YKgi{96-ZrdF>}z zOppIW>=Mgcqp#A2=s694#R4d1sIb*~$V`2s3DE3TY-P;oyvsJ_-k2ko5MB*K#l`}T z7W^e(uQ@Ih^cJh1Qp3I-FofvSq>JW7IUiPa#PeDROnB&_OoC1|4LH?eAv2(i%FpNo zhr1lUrRIoHYy^p*EbN$-6j{D-gM&7r>=~A%2W&yW37q#ae>x@K_jKA>H3%F294f9w z1+9}ami2U+ux4%qbmaskMF3%6kPZZ0W`y?B!AYGdObo#$JZc6gu$R0u>Wobxf$F4} zZxq-|3#2LwbUO?e%{pIh(r zJ+XJ}1J^|Jc+$?nY;VS{y1GlQhQh@Y%kyYvr1VrOdvu}6fdqOJ07kk9eaytRnedj+ zR#Nc0kQH+>^>r3xIp4@ud-yu3RP=9phh;|gIL=Hk!X*xp-wQyh;6B~+Du@VqTr|uF z>hy@)(YRKR4Mpm4$KV+b5Fs0({k{+?fRqS|@%XpNAFU>|1bZvLE*dQ|d=Scd=tBXH z9PLkr_)P*bzEIpifITC3tB&QIkRg^3X3EfdFmU<)QETP|&8!HyRfIaa+*&&|??ai7 z2ytp;ibp|sh%WhhruCYl%O72V#uADT^FCObi?h6_+wdcGTpKM9Q&@FAqA|M@$9#14 zGx}(f-j$Gh1+T88L{rfZz)(}E;K0TZuzt{lA{-0ro2AnWT4dHOwjbM+^T$J~HD>}Q zLuB9yvJw1LC7bT}N_$Y@E|cNeo^QqVyGcDr-*2gqaafx4THt)G0N>u*Ad*)=-o=I@ z@Zfk>RFw|fp9VO$R_fZ<_=pd&B#Lh-9#$E}gUf!7yRF<~$Vp-d6JMtxbL?q9&-}bwCemt8hzW{U+q7@j~ zmv&@iL)`GJD{DEB`tzPn;V-AP;hD=$EY_gNWgMvYC;e*aWJ?+5yrliB}WT}EdvWD#- z@5Sa9$aW)CSKcH$F0hhSue*Kuv46B*MLCmaE90L7PfhclRiQei6NF2>p>7@mbD~8o zfdY`|ur#gxgzM^4O#}&S5cop*Yn9J^QV$yxHa!PwDy>gnfaj+|f5ta9bu^Eh3NH7@ zO0Iw?2!(h9JGq<6{qwda#Lo;qNeIWaolAFwd$zC|2t0?g4a7GWw&PD5)p9M(9 z!&oy2w{$PZMSTl59^0Uh=D(~C{7!L!4aHTg#W~ISiI**&B-+ft`eG_r|GD0>Z=Wx0 zepl1oIE1>udU|BoBha z`1BXJ${}N?X5938Pl@B-NAEL*{hEt|WJ>9;Qew-09>`SKg81*`yd_rFHzVhOSGJ>o zC8JaKDK^6{{I#7?B}d}+MjE-#zZjt6+=DLvaOo+3ij$y~5AMHLa1`}E#41K^uah4p5TTp*sGxR9vFctuDaQ+^+oP_%|4O-2|_t2{C1Y``ui_Ib5Sw)haS4RqJ*IZe>oPj+(9>exKEkDZfzom^L}`wQ{SAm3S1{!6^uxZ z!GxC_%ou`Tm(lm!`9IuUJ|C-KVKD_2g%jIYZS|sLRY|&eBuZxG^p=Q_$s!}Gu|8$3 zJa0jWg8H_DV;A39{=z!I%HQm9N1N?)?5Mb&A`9?66if{=#jQ+OU0VbfW8b?%+N8UB$OLrf$L0JG2{0=E zNpvTh=yKtk{8pJ&PUt^lAGK4v8s5y!LIds&^`P%VygHM&sTv-+0QfAjAw=1L@cyP% zZ+5?=9U2zTzed_C4isx%50o1K5utE0rFzJS3PGb!<}ZB@`atG&4%&$JL|E~kwlHP;s@oA}zw9N^UqgyEvBMz_M@!-{1@N5X&oei&sxmic8Y( zLYNWGo&FRojw9+2l5c9k3bX~MN$aWeFH*S%z^jX&D7lMQ|23A`K zz}QjlZ=A~-_DOGHTH#O5npWy1JYoPasj1m~W_-mg(pZ0_OEE)4ukjquvjbSO1g#jB z-+s?m!rEam*qhpDhXn+cH;)`<#5C>P|y0I;!e7^h24v@EI{O_E){0Ti z$fZ0)fs7-Er@28o*{Bp|DC@mQSKiRta^j7^aghL{xOtnu;SHC95S=Z%X>$0rQ0OF2 zs|7iKn6?w>s>h{y`AS~1s@kYElvJBchDoXU?h`7}+wmM0R6Vd@;3K4(p~VoGqWC=~ znDdPf2v8_oD)Q_&8cjS&wfY0wv>o{MF=O3FMP$ zYy3)~DmF@oSru&8zVpKB)X{_3*M{B19S&T&@TRFD5kJPO7Jy+krF6v-DV{HAN(GM3 z&9$D7>_;n*Yv!9qc<-&+mO&*`GqFxb@=FSh-!gI2V$`sFIYyL+<_WRw6h>Pi6n-Q) z-d`w|-G1Z|e%73m5WFEqdFGxSB-Kgi#u-h>#bby<%66fFd(QW;ksL98WR&ce2Qb;OBiDL@Mj#clA8$ylVupB8FVw|FA~n)7aNF_A~> z4C%u#R=SoKN@POxf`o&Jp$tS1UwW4ld2e1RghRV)W@$G#;LvQ8tB=OEW~d99YEGz+dT5{Tu@S=$y~3+#H|au`II^VTMq@e1?ti$Otuz zgg7D&r)X;s^S22l_~_GrQCCJ&_IB!a7-r7i4zxylJ4`kmdl~j5va`3dqw;YO6FJx0 zv9#V{nRz+blWDP*#l9N=f*~agv9de}ln}Pr@MGZsxFZCj4vx647m|f+h^rgPv+mDk z5o?(Y!U`uxvzpXvPOfgyl(@-^!!qwMG|RNN*F7@YfpTCjZF>nH8kylfn;9@NgD>5@ zbs_J=0{)Dy>ZzE;5Eb;FO1T@H}7C|A57=Dmh!v%E)7XRg0E~e<)^=Z>uS4 z^7*F0I0)8*R0?Z3XDLbHzUv*MQ}{5<`HA9YGi^?55u(>kq1Q6dYZnpBIPG&y@f(v0 zO&*gr;fMx<5Qj5aAkkN_fxgXP(Y?E#2EPJmqq)W_Y$|uWQ5WvaLV{=WFh+hSR?k|N z&?_>+c*ihJ*iDHT!U^nC!~!h>_zpHwU@p0CqE`Ob_UI-IJp+V};+BCQ5;3HKyez-) zb1%*StNQaJ+^`3_0RKwr2>gajeRLU$DX(wnqM2U%v57No*7T%i!tqyt^m>f^h>FjYCyHHD_Y$0;{nqZlWZ`B2 z={g`TlgS=I6*-Mg-=|V6^`Q7$+R_EZJynS}=%GapB#=;U+++pGP2%Tm7a2^_SsnZd z=v;nnJ4BA+25bM14NiMnfkeRypF|&hhBK!PK7Keq41Q2cHHG zDx}aTOl~<%i;%H!>r_$)0m&&?H%!rtcmF7i9NNdi(dsrW+B1-{KecMWgs>E0yPCrt zJ6eCAp^5UVXe0BDeZcuXQcFqDcGKA$YxPQi>Drx#faX@Pq<3tW2YbIGJE0tk=kpj3 zPQJ@>^eIC<<(Hcb&+9ppMXwwTyIy5uKe{006ZGa-$0@ge&fjZ+SVU z-%r+`O1wwB)g)0646)&$VpLFu=o<+NlMz$Sb=T$^m869yP_;yO>7*iY_SvJ91DIjPx(6X|&FO^+@J%wcO#gf&N(iEsN~Wr3wfhkY5r90H zz|2c?saaZ`*SLk^Nn4%d%z$#Int_EToaB-=V>eC2C>CuHBaQwyXdoEPQ(nyw!X# zOi4XOXY~*C#QRTP?0B*ubEmJnIUg{u4voq5AQe5h0?o*l3{6b z#(k25w8>sdNxP77Rr@HCNBJk zoo(EdxlYb9v|b`N^zw5uBkEjKSK$DDEln(;L)u`H!-Ke-PdVnw9c8kZc&w7fGlLzS zJ&7!?2RI~-X++84E3zSPeAIq+-nCRKjWY?_v`3ZYS?Ca#IwKw5YOo&qNJ$dPY<2IF zHP)}BZ$>%3NZQv7cR%te(w0&T4?Br=`&6F+DY%p_>lb%TtW&|oSYk)(2Tr|bna{Tr z_aPy}vK{r{rJlrxzc;cIFp$T-O@qO!Xwjs22b*2)U}&yV(=7_zB6PeftiTh(MEbw457%3!&it5!1KCI zPv#TE3d{fzPu`&PR@z`dy7mz4_V;pu{vkQr_(Cn_>teS%k4%YbxS= zMq5Qa>w~Kzufy}d@VW07WqNY{JQX$?(u-R~BMXg_{KCSpWBZMl(F&nAQ!qFg(HtI$ zR0;0VLDclf?9`MLI#f@{*#fhPtw=1AiKWwmxA{0-HKeG=5HFf{W!2Ipxn_hF1;!>yvR z7Bzmx?)Nxh`Ew}-@5aPBS#=UpzsNy_y?LW{!Dkj-01~Zd)h>*P@-WtJLq#tc<1x`S zb8H9V>`xMWg0-*VJOg4{atrAi?qLGASN-rzbV||JKn0)K2O&Wo70y7e!7%oIG2`nP z)0*@G8~bbYH=8`o<^veLJ^)dm^TiBy`mk)b`S3PPekIM?@omQF-Cs?3mlNa`IAkZ^ zgc%n_iq)Wx5QMIGSd&Vz&ZHYhE{S&%y~6@xy8GP&t4;$R>cXni(H1x-Y=%<3Ue49#baA z4*x)Wfr%kQCu+}|6>D5<0u(6b!n#Dr3&^Ikh(4V^UWo)~KSSXxq9taFeS{O#h=UO|v5r-cEGiPNI5)?PIQjDB)G<22e%jZ{$XbcI-woK{=R9Fw4 zK^;d5yU2XTU^!S#R3yZ?JSY`>o0}V*n^WqHJzM|KF*Tx{?gF?oJA%un6H3MtIW7nW zND%0yEPd{oQJs>}VxJ=mLyHc1oU^G-eYs9al#h2xo*6FcTh=!=K1GR-Gxoc`dHl8> z$LBF?6>R7zAe`pMge!c=9p>xZWDKc0d zb$0*&0H{zo0X6|O0mB<|%*oN|qgD&VB3DCt@5UKk=o`a?4I-!@6g#1i&InT{tm9oz z_;EZFA`IXYE-*l^(K@@Au;Js^i^dz>#LmRtF_V|nG~9;VR*t)G6zKGixz5pvgI&(- z%#5zxZq!y=ZlE*O`Bh?zO;loANC_=8(XKM-C`h1k86aSQKs=7`P}zusdUm$jxji}v zm&qH|s+%*Fbr2(U{+vk==gTq0LGqv%l^|JW7De)En^{$5^E45|+v#-rgLp-l@q;j_r5YSW1Mgshja&T5s(y9s%GUWU z$J}>^iJj$czGu+3*vftouZSST?FZ2_`$5pQm&vSsO#0Na3TIq-JqU`>&_H(<&CpwO zWEu`JnvoO3W==Xs5obhwhE6|-5)p(D2QhLGw+inL!Vbd6UJ;G$o^jvlLZws;Fraf+ zkq2=Trp3EqX$mH6K>n=iFsP6LgnolGX%ubWJ+34Jg_4&o8m2-9JefmBKwWU@hqfdw24ItB?96S4(M zTFn$Y7`uC!(FKq&TpxswbcsoN(Lsn<{0ErmMu4{B-Ft%z<FJ&u}BZHSqQ4nnMR5TD_W6S~}ta)wM<$wUM}rGs#5CtGP9@{(NX zL#{M;jX)Y{0W=l?0Kf(sSfIe5!2$p@+|9W%;!3?mk#=^&AnKDsN+kmX5GG(?fr5o0 z2g~G4V?NRxM2$GZh~^-Q=wy7^hDAJ^y?iG0+!d}|q{YS{yl6;`O&pchO>b-t;%3#k zVX7R}hwsF}`0+Bckr`E**Q2+LJk0A=$}WN_^O?`+$lM`!C*v89WF<46-7^{0>E`n) zmQjs67MY39ZZR`9N8EnI$Evuev=kr2)TrS;Ra&oTq!G>fuMo%_0;$kx_|Nkm{MFzog^)941!CIHso95;e!zu`x!l-jKwk>Jz)z55mL_ zV#X-d88RZ1q8aC@m+y@KY*fs0GmRRV&AKJ})}GX!t{m z8Tb>x0Kx$QVQ@ee3B#Z;7z_x70%0%^5C{XpfKVV93W7)yDS?>H0c8UO$^SA7Q0W0# zR^b5@&VffhK;V$wfDTH((f}0HLA97?&^zFSW98PMh?!JkJG9Mo*NY^=eNrO@tzW-v zz&}u+1=L}{8A^u^^krADNdsl6Ad-Z01EDX|6h_ArP|)6!Nusqbw~|SAXJiuB&>w;l zGAZllqrYnco zbV_Ss^dYUxhtjWP0EU;}+PK6~W=K0!VGzk?7kcB$x9F=^p{M1*@oq6;M}iEXl@&YY z<>rtl?GCQ+TFQdNlK#e1qzmy)mkvZR6$B342CR5-c%aEtD^mndC0X3zg^%WmG}T9; z-V$>SYFpu>Woy$_ohmbc$Ps)q?Gd46NGJzu%4`vz04;4^0iVXC63t`%nk%C`2R99& zHP9CB;6`E`jp}yojBWaFa}Bsf=qj`9=!NMgji0!AjYSXZSBnHO7<74?x2Dn zwCQ|*WTJ)P-`hkZnw~He1xROS^bqAVq4YS<?iF*xS2nySk_qqRHXm5T*xvaTYW`3y-A5#(j}=sKQ7%xDVPw2~|+ z%H1&o*CH`FjbqW`f>+whhTbNGBN2WnmQu_C3xPslXD}r|&p;qY&`R(E4hUKy2Lcm7 zLO-??LnR~;VMT+Psn*GcbVTUZwri?+JG?)7oW!m+?BKz6#txg5MnfFC1LZp}Bc^Te zNNJl#O_W|y?f1!BxA}s)_|4Y!^D{&Z%9=$ znz+r@hz8KLxiQ`VGT2%RIYLU@1&TPqGZg_(rZ-SzWT7#TB z2NcW8@IhrXd3=DXi4+|kkj)xEW8$<+-u0aqqA7TA&J)po0lnxo}SBULTKiYMBX-Kpp-S(4d`+mSsf z?Xj4=29zo{WMl(T!ergVByrH9%STL@X2Z7gV0bTVK&C#XRkUxt;$b& z*;QP)e0BW*kNbe^AuWs9i4p+RqBt&MHH>eEqhNE0hW@l@wyr5;G@n9uiUDn+(R{LA zr7>%ikaCK+0v&Bb@ydi~!?R`P6_RCZ$o*h(<*zX~*)1e3wTcQM)p$e1mBk|WKywd; zIpdCcr*Vm~;!mN4sol~h#1svL!v9+nFE2*|wdOIj3|s6wJ>d&~-+!E7 z9=ATUC5f+IjO?*zAAaZfSFv3LGLxv=X02~PKk|Us{}=(h6E8~LwL92WqCv?G@Bf2@-3TPY39!ISR(`j(z)MV3o^pa0~*cl7_CX&-M8X z0)esn$=2wIL@Q7UV?AXlG4#-uK0Iy;04jU9P2 zCvy*e8pWk!^LaS<#7MhkOPHqhPk&-mPkQip*Z=aLAzpCUc6}$ zO+o`}nJ9plkT_E+>#aOC!a*=o>a=_=7(86(+IQHiE}1>Uk#~rT6G4ircfiI-Jf@S5 z08Jr?#vXVjc&eKzMPVhS7#b)R>qMJaIh=EZM>C-0ETwMDH`^|uKss0BokdTVxOdss zn{*lHCf9c(gBB}dJ2>+`+CIw_m%4*7WpSFwi7I(_YNrL+~#CV&{Ps1@! z7syY-CL^0H)P&D7_1tOoyGJ22ZyrEQ_HS&j#(O64!C6MFt5&7B|2gkwSuDaiHW~FK`FEHZ!-IY|Cpxu=4>QvtA>ngVQE>tx5k4JTD6)q_yGwu> zRM7l>IUg1DvF+7>FH^eAF7XZYUKM{yh(0FwSs}<@tfaZP$CYCKDp^vlD41DVJ)RK} z9JNttQsH?8bG?jpelF3fLR%2CZ)#uDBd)>d@)d}`W~vQj0Hx9_n#?g42d<0YE+n1; znW+6o`sivW#VWIjbubugN2qmRTdA9}TjbhK_N&Jzpogt;sG{=Y#siEqGpoyK4&`p3 z65zU+mpR7c21`Ks-D3)%z*kKvc^Jig4szc$iKMSU zD9#25pyqSHXEY*@Fk5g6JArT?k|b-!-0oQMf82B>2#&0mW1CZnk#3 zM;zFWoLH{SLJ)$?2JDqaX+lO=Td*a3n5=XZPfbLthFJq;##M>NSN>!8BDBjzF-a?; zKSIKG*mLvH$y{E}^0Uwh0{Q~j2d)=3gm>`~VwVdtAnX4Wi7>Ta7+$FCe9%N-S~1Erxk)o zyKtWUl-URF5t1`*9y%6If~F&_K*HgY$(MI^-lW3@2K6M6`=!LQU{!yh#X3MS8@Z(fL! zW8>*nRgz7Vd%;2CkRo`GxYD1zzi`s4s-&D!9Tr~eIqk_IE?C0($d)p91wyHoL@T7) zx`%dfz;n^Y0bEGc3^IDvIeEYGLFePup0VZ| z4@PR92NJ~3g?6wTd|obc?uB*3cs2fjo@JbsF1109l?Nm$GpT+-Hj?KsgI-XeXC(JF ztNffuVTtLcMMy=~<7VzyG>NnFG5W}?XyD@j89g1ixAQ=(bBkvJF69nl*5byx)hV~Y z3xgDuC3U7!7tN${B42X&#BA4pU|c4b&iQ7iBHs2jBrVbRz^>f7ru5Guu)EEBn)uVp zc{AsihR83<)`e%kyWsWDp=De2hFMBaaI;Di7<0u-26_mpVQV7y^ubAk3vg>M^? z1?qt()R6YT$2mg4Yqf*eumWJ%m?7jxOyO?`{=+k#;&T^a*>8cR6UO5LiBLH9ubpn9 z1p1pqrBT6*ATLHLJ~r^azn#djSBR2NvB7i6V<_IPWrA^1c z%*O`JK9cqtb^UC4obnJ9OL64mk8ybQbur~Kon|ao)F*HtN+IjOrZisQ zuZ7~Wa8qELt}GpgmtP;H+Fy8ARx--eBw#IJ28(@M?6>JNjYRt%%=wL{=vQ#`04&py z72XrrCqtS7%kf2Q**g$epd_&26T~qK&f21M>GwSqb7#-o#KJ8Vw*M4hu9{^BIeY2? zq2G71AiXNVm}ah=k%QylSP|ql0Om|d2cM|=`O-E6`a-n?76CtpQtB|ESb{I`ek5J} zIa)XkrW!F*od;&3_g0s=E`-1Y^p@-r#pt4&?jld=WdZ z!MmUP-+LGAg))6Yx5rMV%}M7q#3JnY_Pk7M6m!-^W2yPMO3e11D3i-xHAuHZnr* z7WMiEuI`%LCQbc;(Bl6io~XUBCAXzL3m@bDY%n#s!k!F$u#Xf0R&nbYR}c(P!Z0VZ z2Fkg;Knxl6B9WlQ1LuM;HPxggOnK!C+_Inux&YV0?SY@l*QgM#(+j&ax_HuJ7{U^a z@R)U>a{gWO#AKNC2TPgblHmy>GeIw=p<1R9C#iuP=~C_XTzJp1}GnWBZ076h@)_zI7U)v|J1Ep}-jE`MktT2(EH9UMAreduT|S z0V%ssu6zj3cZj&qfK>JBX1*eA1Xd1BQ&pcxoIUtF?`4%451vuND>n z*`M~P<`m4ZuwnY1wawAZWBluO*qz{ZoG$)wJTVmq!AJ#7@eEA+NkbwIUoaeLAz#Pd zk5oU7a1A~D!7DFDUEl^kqY)U@*ovnln(F^9L|`SPH_FJ_!rtw9IT*vy^0$Hq2%KRw z!r)SlJ;;CY)GwVnV2ozJ91xC4J(3P+6fMiWez7;8DLwPTDKxx;LTC^0oqieb_(f9uuBR;oaiKe$Z}?wB72f-vzJ;& z?XY=4Nvu80r(~*BrvRCC5rQ*EMAE0E?298OC2W}Ai~7vW>b!jdKE&qyYHsktnS3Fw zexIg?JYDs4@RP2fN*{g+;`wp)3IE@Jb3>GG7T^FG)dN z1i1p#&}pIzPMZ?YneqdQU-oJ)okTXRsCl=Mv?8^ zMA&e3%J)j+pxt<%Q7`BnZ9r2V;AN*8uPly(dX2N3UWG9VrZgLawR4wvry7gR@`dG2 zcSQIko93B0f8kql43y4m3TP~rv>XD;8GFyGxj^}KmA%R!e0fwVx}LNF0(jF)!KfaNZIiDlWO~R4PnlQKoMZEq*bo8u zzRa)`yU`jE44(X{QXm*yZFM3M(zJG3xxrt5rQnFJ<%Qd8@fq5T-?AvJ)HGICc`Xk? zhkN^)2dn*7SLO3KrQt|859M&JbPWJ)wIB0j$~n89Ip9rxoz-oi?f}w*u^m3grvbH5 zQ#2##lcN|;kF6eCFB__GJ#+YuHK#=Q7s9C)~kgoH6_rQ}{~)tbhN2=vfOX0z&) zAZ`e|93zphEst`;Lw$BSel8u75M;U;RP}Kjut*88KO93?WCZkINbX~%{ypkFeJGLh^0%JK7WAE zsh95`-o6DAi-8Sh?P9uG)5`lXp0N$EvMF93*oAn5`<`uRr%ZsPojlNK&y?7)OgzQ>S87dsHtp?8)G~@0|mY z{1>r67*)C(s1+c5Xeib`EXYD>r>#w^chCvzvu_7}Gm#Bh<&NrD@nRH|h=tCl zEpvjb+36N$@2D3s3+7m43GKWu1Ns`|alp+j2m>lMbUo++HE8G$Vl6??t*-zzRyOhe z1N;tz@J9h+r4AOz3CD@h|2#`<20*=cdCBI!B8O5f<2oQKUDNMq=rYlcMmoUVx6?>n zL9zh)gd;N{G^9pF-A9rCD3>60@M49=vAPJ&i>6(<`hB5_?n_81XcpKUu5cPYQ9gmD zn6i{fJ;Pw9+vJ)l*K;o&$}2U`$Z<{jON+T}i`=s1*#X&tSD8#VA4YV)8cD2)RuveXL;q2 zRw*I{SzM=>MV2!|Myc&KNB3By29%iB{6EtWr_Z%rfdoeOUNJk5wL{B|xXSF8a8#R*JbERb1JtOq52ctz(fTO& zXrfzXbq#?R|DP3}j}JE8pKR4MT|-ytEVKB(I_Bh^$bkG~0vcwdZ3PX)MEy3*y>1Bd z;%>;NO=GoihcWT*H_*_c-o5WC5@R&*#7Tl8=%`Gu(#c@UN;7n(;G*z`;k~Cj_QhikTxq2@;&ey zhWP}J@Fm!y?m*Xn_~H9!_UN?-#N?4h8hjx|!3iLDdT?y?TM&A!x#Ctj1p**6M#wp6 z`&%^X%!w8E+vLNEbuVtlfVaWj@AD|TiD7&Op2QL3f7{5DSddq!;IddEdUpDOE59IY zW4L73t$9f@-OQ!IGL>7ZjT$^wqJ0P3fu#uVqe6uDr-ty>X3j0k{6BJ6i&YEgbppf0 zT970g`u}_DcKg?xsRXY2wY;Ll1NJqhi7}TA!ONl<0c-gTp!S0Ukng)$!v+YXM?dDU z&4K`!2VgJ(#7vX!tOFDtfI(4Kt8e3hNwn8JMp&LzOVMj34`Z_Y;PRYBUAXP};kMEP zB`A^PfbI_(3Rf8>F>T{=J_(HJFv^%z`0y=8q%Hvl8roeoq9WK(_%EamjS;(C%l|-# zeH!XBkM#tzp~yl{;OPLu4m7gI5Lm z@>I65V(4CDi@8#@Fiq-((}KXx;X#sdc(Z}`5{9`Yk)x484Im4P?6|_*qP&2T6=hMC zAYGg^a~RGd%Qs``t#&oVL5kbf=5w4_LKPz63h0^x$z5^Fv1fAgB&s5o%6%|G+kKJG z@02{-4!DVU^0u)7x_-jaR7tdQ02YqX!Slq*rZG+|T5@J~%JXlA%vx!to4aa~ix@eH zhO%gi!Wn9}V_upI-<#`F!4n&T`HVv*f)Jm9VYJW)@f^c<-G$l4wB2YnW{dZS*hEh~ z@47?d3z|((#R)%@Ox^=JY5wA=JV~3uri7Oh@`-0F2l z9dsjm>t&8cb!;cn|O3$R0GG;N`a|;Hm&Iy@*5090QiN`0s@wLd@`k_$x%L8{B{16VNaoOd>6|Zg^!X zogkIJmGIhz?v&HqZzThNgtb$&M^Gqi)Ol4Lih22*2_J;3^LFDYM@{!KwsH$>@<*HW4I>O6-!) z2VD)8&Gm5~1OxH0Wf87T8KNxmF5bt7x&N3b|6l52rnRScUm_WHLBPNU`o9Bn`^pZVD^vbF2@N2R zG2J^F8L8zH2K`+k08J0=w1t!hv2mG401RjoCiQFiBus=+8J=ly{-e}A_}cR)iKD2e z_sR21)n|kKY%sNc!iE|urq{EbJr*)m<;nU^w^R*j+g73=-$<_rrR9m7^o|oF2*|Mp zE>X47sArm4jpXS9U~`U=8B*p_#tX}iA(*Ius04M0#BJQf{zbfjEwK6;a!6hd3*Go>GB`F_SM2C;w) z{HF<`bF11RiQKhS>S_Uz^npu04-tu-RRc(&w?!@o#^!x{WZ{;hzf*49F)+paefX@H zJx7!ab4RIi$!X$WnvW8CcTJdfdp%?UoSy-@;XEKI;B=E1D9}*&x;1Agl1<38Ew8|* zn5Uc1!~IKfDK4V>EzTBWm|=z)%Fp*d*_$dJ9?x?dS&L+A>R3& zlX^Gb@uAJVTe~}XS88T@(5ikrqj+}Kj!s}1&(vDvyfnH@x;&US=PJ*Q^5~hp-oALO zc*T==PSu*VBh}tVLM;7RJZPDiXk&;6EftM;(A+T1;B7Vf5c6($PBt?73hhauVl2q9SnPrdw-)<_EtJMm*0Dj0HDy5y(k0s_u0!k2maJBZ zp-RfA!y?ooJv}mVQ8paa&@|DtMrPus{Lv zkf9+lfr70-_?rgi_(b!ieAi0|Z5b2nt2S#6$%J2N@`6z!0%qNDzo%0*Yl(5)qV$hy{ei z62fw!qC!#ukywy`xWK4bU}!)p7!M{Ki2xofAS@D%h=~9iBp@0VASB6y!UKs3NCi`& zVPS&fK>{Lx1knUR5(GJCwc2{lk?KzuZ>puoilwho(sQXqq(z$h@4vg>{r*<{pEp?f zgZBS*zIFYdvQK!YwDOPL%iMMQvHRNh$0+URbfTh99NkqkJO9kH_dJ!(L^bC+k81vz zhiHkGKbw{(Gb^+5=$?`i4R=v*a?OdSV(0^X9{X?3b9iezD>OqpdE3#n&&*b3 z$1V+xtj^Ub+uzR~{Odgb@|nxZ@T$s%MOE~(Q^MV|w|l4u(_E$=qq$$LibjTH@@)HH zrPHX_J9l?@XX(!D%`QxDAx&tJR+X8fG}U3%AyjWs-QH<^rh2tL+w-+OYg>CP1GW;O zQXcLP_izvQU+>GF`|nSH=H^d&Ytuc|OzV?SorFrLglc)2R!mIIMAJ=rVQHEqMUur# zL^OPS@dyhKjdr|aqKG$hK79T#eRx*1718^|_G@SIkgoQ5bHnmFb?&hSrozLrYnwA?93og>uZE4oNNwU{F1K^8Wa8Q0k@)n+xTwO7$W zODl0u-7=}cBwk}AM3S?Im8K&iGDswg`Gw;43eS7jvj-xHZrxq?hl2w9@2c(9=vona zpWNQ7jh>zKe8GeVB$)6p0R#^Z96+IhhY2P~XaIr=+ndy5$RKg&)gy`G^NV|3G*)wE zdWq55>5g_}-!m&KV|%-vAghGC_43{$Y~J2}Q$h_4;(t#AgV;4Ni9_A(mL!Nl5Ck#C zB*vJ;m;^BiVv>r+2#LcZK^z_#CSczHz20G{<0SpP~0~8xH z5Wxo)B0vceM*#x~5=h{<#qzpa7W1N{%feU|O2f-G6x;=)9JN|{hV59OqO`c=Htc70 z8%GI=3b?n=Op1M$mfoyso@V~1dz$)lP?`-5nfR~skR&l;fdN7S!hwkj+uQ^2u$MUa z*UVK~by%aDu6h(^8a8e-`qT$q)@7!it)W4pLU}Eh2#5<8T-j{>Pn&+tPG*KAdv@{` z)(dU#M_HQXmrt|q@F>eJud3F_M3+W&bo8T}snP0Py^NLXW>b13f4^9_hiFVZvxj&z zb(o4@!>cmiGWsH`|J%xbcG_m9d`WqSM`lvy<O6k`9Gk5Dm|Ne5-^2}Nj0>eHku+55F(^R}#f)tQyrFR)LbR)^34&a-v(yEcv zSXWxVS*>f|>buqSRo7Q-PlGot9(|Q-Di8Ci=*ugEMssM_`L5D(>lW=OGZE96g_&xY zscS=NxF#g0bIrOCZF5?o^PKnlx7jpXKWJU+GPb))w?{L#W)l?EiO$qV0&4Y@Sy$B> z8X6K48qjCzUZv_gpdZGz%soQ#$f6j*DA}b*f{S}{ZCcl)$&0nuBqXujKO7#FUnPT$ zR)>n;SNSZpOjVKr2IMXm7>kOE1pp5c5M_4jCXy&6)*VoPCQ)XUNe}@tIIL_WsWR#E zp7OD4XYTXNb#!FC5(SSx@b?zd{w15JGk`Gwc1^WA8;fyamdZF$jjkpgj4n0#6-Xnd zj(i39@msfjJ$bo^fu^1#bHVu#GR}9q-ZDR$e%-|%} z(qw&-0EmutlB_<*kTCSZOik939XF$BJ@TbYip;MU${&lnCIxT8Yc(jv^{rIW^uBjd zUlUEd6WEpcR%p-7GlfPPp_Laq*3qF3JTFjI;+ zfuH1|rQv|_L(iy}vaxKyjOHu!iI+)m& z%t0ET@*};fV_`{*fz1ZuQo&KkBfw`|s-=b_dAn=Nmitm7NggI%QZwq?8cVejG*6RF z8@@_*+G!+P1FbE(@o6P#5_yvlH0peFe3YRI1H_8oS=G`(DLnnJKe= z{BC|VCBHwP+<@;hB=;tX@>Y@~R~}{SMj~;{5SBUYV=WBo$CtBTgb)gJkidsh*` zrVF1QnH9kxW`_)62rlWFv%`avZg|Y%PvQ zI$ek~Yy1_DRsEPl7#VCUE9nk7#Qa>th68HEWDZk3q^(HN1ZoB82U3=TvV)WXX#goO z4;)|$#KMWdBoC=7rcB2qYJUgY7X|m!Bk5F9IccRT!piiTc5TV#?Zqa`I8#WZ;-Y;O znZ+^nAyWy(R8Bv_<*{u;#IKkgUCF$uR<5Bf&~zZ_Y=|5=;Zq7#(k=el+qic{X|_(9 z@oD5KQJw)-Ky_0)O)ncMjsZ%;Tn?WVtQd_Pyq3X~H`;S)5|z3b42aLL@tL^3mP$^H zeTaNc$Vl1>6gDgAr;axjIVHnQTtpy+K>D%{pBgccArxAhzh)57ga@h#w@uk|pwy&K zTb)^{bfHo#SgZMYs13lnxA(+sU|fu6Xgue+ND#mUab zGbDS&0ytwh>Pf<9QpxX}aTdeq&y;e!q^;cqrE}LyFkwrbo=69Xlpe>;6B~64aFwCq z%`NZ<>uso0lo_=V97ycO0?i1OG$%tBrARS#kYdNZ_8G8jdIOP*g&ZZOEikq=W`d*1 zlHZtvYwiv`;DGz0qVmXNK;z5eCc=qv<>WDuKuishNKJG~`V$Vv+Y9MLOX>fc3H1o* zIld;fVZqUe#~bm#llqVyWk0*S^9BKz3ZgM!bJ~X^V`k_zX$a)!OVs(E0#WPN)8#4d zdgr(a#ZNnDxzKi?9|a?u`UvHua@fv#=rHQE4}uwRX}^rqOozm0mfr16X>h*fg;kD795`mOuyF&7U-eIN*e%z8L4UHGf zL@YFJ+<`VjO}Vo;3@?q}UU}L|Dl5lPHTcBgu%Qd5Qz@G7p?kq#sb05Anqw(CJa=V3 z1PKkfhcts6@?nXISK!E1!DTpWN_J|$kE840T+IZyy2|zN#dMT>b=7t|?n6~#CtGR6 z1AQh;y%VFJWSn1}44C_3akm#pNVZ4}gR7}SlQkPSWaR8Iz$To#m@N2fSioHL+b$eL zu{9P6c>`(eDYR=>^OL&KRufYa~FZJd(=!5(ehQ31vPhkfg&1)#pT+bUM=}U&$(7ntOf@i_sBLDaWWCv`?TB(&Fq> zGpZJx?jWFRO62DaPuF5h8nz_ZHeR%Jrp1cX5WqskTDpAd}D((Gb$FpI&8k!<+cD*9n639``U|@#QWCgkK{TK&{9ieE0p6 zh0a>j#ye9y?NDSReI*)INdRV0fq6kvf<=(H@}N!S)N`Ujp*y8;PSHRVXi(`{NUCUz zNQwO>r%!+raqJo2Gq^7%yL%6vUlAu{Vi4}0Yb}A#b}0ryZbfZNku+>1KRv=oCB^3= z5$1uTPxFK4;b1J=0v?VotaXG6g{z1IoRpfD5*Xq+mdGPpjHRBco-9hBD71&v(9kN@ zvWx{qb1=&37e?s}poDniH(eSDI3D->`!-&*NP?&>AXXmM+ z==~aaHd{h%WQcP2+<0uAmr%0IP*-G*`7qb!A!OEVd{9@G>PJr%&tyReB^i@ec|e#X zffQDbMCI&}r3>c}N>9{z9huH)s)EtR1kIAc;uuK`Pa^a$3<2Zhq01W}7|x>UfUtT2 zry`Pg0u-#Llv*tkcj=YJWo{>9P#QGTc)|N5Me{j@|KzOT;QoYOj6A|I%cHPvB+^{t zW*DMFg+96qy?=eBlazAe+o5|#siAA`p6R#xV{lbt%>HNEGKi zJ(?9%tjz#Q35SQnB6XXASne4>Bop+&0yoqV#}#?(V*sgL#v4TjGOoPG43KdS2A@|) z4G|je+$R9j<#6uJN(iwlZ&}FagF%w%6CjglP;X%^x6j>qq)9hMNurPepNl0C5vqD- ztCgb%kYC=&16}d>YW-H`9+BNk8)NuOczCz$6I~I(X_Rj{96>;Li_E~smA^c6KmAg& zP8p=31QN+Wh&48t{uMR&H&r~DS44URw*K^U5|YQYVE7p~ga z%>cb}sy40@R9qn#cTqcq;C6Fhj)O5}g2)d|f)0u_CEcRUjskKhS-1Uewwo?$w~?|s7m6Jb`aR?o_%V=uw2W1i!A83v|t#_;ihv;W@}P%`Z(sNPZc+{_jmV8 zoFlV}diaKD!@CT@8%qKHSf0Va#}w zlq~22$x!WOvL<+H=M|kRU;7hI&>eAe^=Q}RK%T`C!vtVOn^2l9Ih<7THKjfAZrt^oUAv){3+SKEjhzY?2kXF__wv zV{oAbh+Kc}&kPyiBF|@rY5*8tN*VSa(%Bk_n;4e*#;FG6N;JcWa&xw=V+K1wPu zRs>fWuvP;NEDp=U`eq`FKYXP-nV7HP0F6Y+RI{zIK>(=vEmifqV8_>A(sHkCSx!>E!t>$?VeL2Dx}ln^YfqH*S>vu-33rwl16c7sY(W>4R{)>l0mD>LWZm*0*G zfHyfP!ByB0HU$`*=45VHvYR7z%tm2L1)Orljq)7ds*I}Pm4w8#kM4^vUEID$({XH^ zvSKP?s`uW=rgcw_N@v)O!H%!)@1%%U1!FHl1yoUy4Y{J6R88LaKdBjAw8>VmHNoY6 zzM2rFHW)sIMxWC*TzRC%HfR!HL;)V#NC!lUKm$yOQ!qe{LE6fOX@p-cd5Qt~bb_*z zTZCt_n^-H+JQA~BB0k%vChOdW>71u3PH{`zfSnd`3c|N2J(tmCoQ{Enn#9VQ2Kt$y zZs;cMGk^l5;Q(1F$&UyNGk1khIDrZflZ0KrLAH0peNI#HglfzwE0Q+S1`#1p1@z%$ z2B@}>k`z=cQcR#jW|4@!83t@d66d5c(o>4AIUevysJM|LI{nQ-6V5MGWRQK*2!nYV z3}%w-dh5cBBcrOjHP|u5!jvjQP2?%1SSu0{cdmm8MTNM1Ug%(6z0X33N(%a@ITpjv zq%)6F!yRnfjo1D{4t6+D9D42?hJr^UJ8hNXP7)}^PpSB(FHOxT#i%6NG`rHE=u10< zk61dx$L%A@SptngF_bU>Ju`-^^~MAp>-2NC`~I7G0r|)(fc|z91sYP}~^AuBN{I_|DrU{yec<4=LMuQ`AlP z$Y|fKb~)ZzNv|muUr6bfb~S;sGoX*hN=P@!3*IDUeukJIlu{*N1S~aXxG5ErPiJo& zjFo$HYoFOCD?~V_r#Ch_eimo`#fwJv)uNhXS`pat!%*|I{)GA*s6mj*y;ZBH%W)Ev zP-`*$2NWWtC2rLcaWLHv;sT|WC?&PjS3Z}@cS;8B?&Dx}?qF$}t0-|!Q++*cP8R5O z5xshcdgqfRN+vOnU{6rq#vp1XChfA|5^jnJjJe9B*?b#^DMYpl;J#>{#Gj~p?H7MD zNjOoChE7yn-Mx#dvE*W@VhU%xPbudkMJrRNT12oRGhH%2bLsH>%Jb#zXm9S(I=OZL z&*&)HeLlyk$vwlmnz&9nvp~e&&8kW-QePYZPE*yGPM@4m_umXlXN@R2$DKr@kpo{UmO4aFt^GDVKl>zD znA&Ry2YiWtceMfy)A^)`S0N^ZbBg&11Um7yhWYPI@UZG(%PKMVMr3~_xh)zSK0;Y< z8YM>T%HbOwG%{u)?Mc=#p7Ya05ZJ5PC@I z43hF>U}qLa(%xq;gK~;}OxDG6@d`}7LxUH~Hihv@eYAX?mkj^LKIc##bX4)=%wjwAyFjy zz#>FEoKUF;*{GUwhdAX$g)x^*;oWut$~`Z`cUPI6n>)HY41Zj{M~Jx;`A60h>#t-3 zzACjn{`d1?9r zJm1U!7nDk3yR8SfhLw~v^~u?!ily=L4LshMu(9*_ zQIXMoHq6;?NycxW0eSahUr!KM?|&4U_qBdB+p!K<=F`2>EzMi zE}X?6@1G>TCmIfR+&gJF^FtD6f$Ym(4o)hJkY*qsZ*{lz;U5GBC?uTB{&q=mDcuhd z35;lltS$C{ZV2^!>5^#F6QTxs)HtO3iKpZ9VDRZ|+8oQNkx+!>1JlwoRdDeSqR1hh z)UbwqW45=JJJ7(O=t~+X9=u_)-7!URIaot}3}QW+%|432TUyPq0LJ1kb2x}N;0ix< z7TfgO;LK(2kRy0Ok1>PppoJaWEcJ}`_5K+m_!aSTo<6IG*?BmW=3E+UMIzz=+;Ty8 zR4TDyDR|R;-o?%6s)MJ^uYi^c-OKUQD`5wZ}*T*sw z(q@ucy*sq(xZ=tLy{lnI%9d0mRLdUq?Jp2-iITgfPBuual}^(&mZQdSu2X@*4GSN+J%zBExT|A`4Ac5^d;XG z*Z|b(B`aa%BJ!y1!sC{ks$ck#_f zkF)~g=$37=-Zs%SK84S=Nk;HhI>txHsuT4z9(^3e6$-%=SW*Ix)z8$S>xlOP?>UeA z(V;Gccc)I;G9!P})ov*JdLuGG+oWE0`Heec0eA%gPdD@rGyxCl_1OLUL_dTMp<(lQ z>Oc8M3gPraFL#M@Y%dUMd=IqMd?Qc2aMnc5aH|*Ah#Y1{=Sm=ifU}oQTNFCv4X^N= z3wyz-)?t5WtE$^RU4w z6%*U#hk|{Mbd?~z60>O*k+f9E4A%1ij+2CaE!lnp%0S8IUo1I1WPf;K&CXZIqII+q z$z6OhD)xJ8Bsh-_d<%$~(i+WbnHwj9w>4o~Z0R(T-QDg4{u5K=>`h4jWrNOt$QjO! z+4Ix79V$oZlVT%lNvd??-$3o`SSZ$UJ`r0A99}cggQIqYEi*J)V*X_PU2|nxa4|R3 z3d9p#*i!nvN&!#(%Td&&y~&cg0A%9rf|(w=xcnY*t?a}r&2lMRK&>Xtbs#1smnjw} z9gtjFmlcq2Eth2gV~7r`Goz1yi&KX?r^6uc^54#alM7h6qSD~Uq;;z&0aHxfDn^(@ zFL*ok;3>HeKVBpjnlaoQfGP8F8IXGZ}xk34?{~p zm4q_rw$}r#u%nv(4hGQp+o_JfdumrCJduxe$09K^Vib0M%)yog3m>A2DLXi+*PN0k zB@g~IN!x%@0v|{-t4!`t&PCV^csw5)j_RJq%y+k8|9$r+RIJ?t1}ppxKkrWr2_h~I zbZd%xdu7pW4sNCkY)aiP*Xs&}tvayu0*b)gZ97!Lm&`N@(~H)bToZgAya`UsN+sM$ z=tc){$Aq%6_#hEfX82* zuAWmKqrs;G3^VhlkY&`AhICKg|d*ADk`fIJzBgrun(0JKb*1ArRh-z2L7F?tem zSLKu}z?v3(cAh$w4;Y_OmznexZqEBKwtdk-CSG9Mi94HHK@Zb%@0^jXDJNbVZs%d& zoAoYC^CM%^mW!9PBgv73)QwER1HI?o(5Ajoo})E0E6VgEv8{^h2rwq%0n20fHSXvJHp!Kr znH@0pmzhzUNwZ^(tQg(>WVy;#rW_ItumbHWgFFic@Vjhe*N`GDZ@k|jX>d};2ie}{ z!Y9Q#XD}H?)FMi{0ENGJ=q=Quuzoc}PY@O5Z-GnZcBF!X=|LbklQPEv_Me z^OR{(&5+#ahBmHofxLT#lPU^=ZvL9HTxgsC+cx8nH38(+B;41jpO;&a=`z!%rP-TK z=fY*6;F1oW5Y&`6&Yw7=42i?oVcX}a*)0bBLRKuI&Gh~^uElIrV^^ZXF-dY)trf`m z8MaK3m`92r0gau>P58;)3g6F@q+~Sv+?}H=x_px=q6Wfk6I!Bg6noehhePO_-W5f5 zE%e|#-68a8m|i}astbdF8rXDUG408O&ZPC8nxb2Xnow{#ZKP)Mes1w34s82WffRy=x^*V^amI z`c6e&OBkx^blz9wVxAannjKMazF8;I*L%CPqEY|Tzm00ngk%zLA{}&dBo6v)MJ;4h z*xPjKEEn5nz|7Y^OHY?0=_AN4y_+F#qt>=9 zQOt~)C*H=nC9vSw?W)7fk1Wr-_Nn2nvn9(NwA2lAy!@=P)a9F6_}x9&ys{sWc4JNN zn)H__%9)-2C3;ywknvr_HJ8OCWGlB?Bpmqg3T(ie&?en|g4WJ0551Ted&F8FGf9R~ z*eF{qSgTqVFLKv}am6KM2XP7c zin!LoCg%RdtWzEy5h^N&4{PCzS#ERiup6t+&Pl_eNRXMMkZV#HK~LEU+U-mbTHv6k zjY)k$>zuIO{H>b*>R&mjVYt(7$0W}P<6K6d!4=9jeDl?ee2Tz=FV-`-c?r!f-S zASe)$#GPkylc}lLuLA2zT`F6A6TWXx&e-jXyiJ|Oc1ZliiTG zMZZLDi5-cr&#RW~Np<6tCd&`WCb`EIt~us%L!G$f+4}1pzR-#Skiz0&u+MMvSp3s# z$sadZw`WFyFBL~FhO#TIxUy z))7T)w{!rT7_>o?-rWE#4^8}^MZ@ZqCZp&R)sm||=*`*~K-Bta0yJ{bovyIe?T!4& z#&N2YX_N>0+>nLEeO5BG8Yc>d=eMI0=oF}iU_9!wwIqOseP&N*k$7>{DEm*Y^%Mqx zG;8AeyfiX2fC^FBNR^Wu0N8t#Fu`m(WF|~^WaL|CqED%OL2v@x1X1p~7f3p365F2g z7SSq?6%o_(gX@9E&>TImLd*yPU~{ktBrFWIc%d5hKh1^>KLkLY_-q&Ajyf!D$TW;< z&UY}8W|EF5KG%V1DyuN9h;*o9Tdi6Le)Yu8V-ZeX-_d(5UDE|o>fGZ&+CE%Z3YJ8h z_CZ@kY=z#boRI2}N{xqyQ36mw?}~JFl~8X7M;F3+#cj5%bAM@BOv3#+s)N*+reasZ zv!&5)$|l395&OLF{6eKB8R3m-LZ8#qFje+a=3)!@nV*gf!AOGbjR}ZD-~f3*AhS(f za&Xf13d@hIfbLCejDrQUX}s(}@&r(65L~)+fod1acBy-8xWHS1`4H5&YmE3rFK7k} z^u=Z;)gGckGfdbUuC?IIBG7{?38Vo8>vz@Oy5-|k#> z4G6w1&M42C=_)sDgp5Z&2eY>Mc_0jf)ULDC*bIXj*os;ZHun4P>X|q9oTK1h_D?? z_4hS06pW~D5IZpIGw4VfLdR8f+l*79CwPR8o(k`xk85Gtl2yT*@ax=Q=Y;k2d4I1< zGl8zSx|2Y99D!eV=V}_Ma(vma2D@<2zNK81PC&$Aw(#~ToNqlilKI%gx_v!GgL zYVYm6y+;}N1-_ z&-tU~L$qj|*1}m#wnzS;vx7Aa1i-Ra-A0+?Bg1JNlAXNn7aECprvmHVa=3}K&DJfD z-$yL0n!>Y;3JS7#3trsn-d_`if^hHL(53)RK(W8P^?%?@Plj_77Ivh~H1P~-w#V`u-fsLgWV3uqX~&Lw3+z0JD)O|y+(p>xh9J0V%cA`RNnvXx zKCP*=k{k{lq##kOuQ=iA`cHTtEVpswzGXKhC8fs&VPtcErB;q`$(G{cAk6R9GN<)2 z_eTrE5!DzjIsW6}HKY9P`Yx!x!&^p#bTueGZ z8iQY9y9taLW7*HP)Ff-67DaDZ1Tk+8b3S{|K3`{JO@dd?Sq{%f-K^8w&9Yn!r`$TV zs4cd_cDCud$_$IiAawz?|HFmgWwz+9s<3SXTERgk2{QNMYEv!zE5(uc73jAA=F(Sp z-<3?YUz2()%#+CXMg=(2c11fP&F%WSJ$TQCUMAr-f{KKLV26=mgdo8A>C0u!S9YDq zh4Qw@4d+pSDW9yh@HThT7R6Qpf5x(~ zP@szHH*#CQ~Z;WYi!hj0_Ewr+MW-z@3K zOnim5L|na{vTv_`bIfz`Psbfr%k6dheq=x7X^)3*ruB%|&GUN)w|L6ptuLWC_dAZT zfv$xNdl>Ug1DoVe#Nqs+(XfqW;Yy+iD$$)D_fkh%)7$1Gek=mA(at<9$@LS@0w4L8 zg*63f=bpQclk*yo64|iHwSgdnw))p)m7VP&ua3~OL?sXckB$cKHc_ajiALRN3cE)c zsEtCF_uT%ib)$Tz$lMgSqnGYVD4%VXqbN&uT;W73a&s4Lo7>8c_>qb_fAUA44IlF( zv4i;XN(;G1KcrV7X#?%%Tj}9>1e49;CtqnV?IecPezMc(mKIQBZARAaKg1v-Ybo-8 zMH5r-T)>jXdhd^5y9jgB;%P;K<@_4tnzdQ_z_rzMQ5ctjAvPjecaDt;fX?%k%1Y0opPk3G1 zzSJk87A{_YEO~!eZue~s0`j_U#o2`>zg@gX$=bor^&3%v60{D-yBD&BtS#)=K7ui! zEV0CT_H{q<1-F?yrw;B^GICQz<#)mil!wEwzo;s^OcS)HIRVi5*lOtiuWjoaDxb*D zRniy7pd>fJy{ZNp`p`q|3~Ly|GK#63nj(u1cX<9{CVZ`F4+&mvWfUCeH8l zO{wf0?Prv9b;PpCzAH2n9`Y-lj5S(;3=$k_j9ZAb$3#Kym(HZChx$btM`%Gk++u!b zeNUsLxjcQIO`+|*E5W%1XA=s@%#UTklN-)_XT_0obeNZPD}4)*biUGAZAQ`skQ{9I zHrd!&BB%QdflDks(?9HTvy7#g^jjL9gYBTKblilA$cc^o&-VWO#3`U0K*bH^K>F|GhuIy>1x_L-hTz^2Pr`l`cg&&sIV z5+(>6=}tLdD<0wEZU$%vG|~yg1MrbwArlT&@;ohGAt2D{Z;EU|+84$zORFT^4FhAPtlpWiLMwE}pE`uMU0!x3k0keS7IX8~OE+S-Lf%<1 zOs0&EtTeNvs=hZq02_F^P%KBr&l z(8O$GW1$%l;J_k}o~qeZpJw`=uX6t9YNwKOdFdaZrAOY}3O(qb{hWP?%&GUNZheM){FQ;Sa~)W*KW8Yo7v%I=&AJB*IKJ8RS-b4 z#50j?L`Rn+z#ZR_>S!}Wvo9o@Cc4Ve!s5i;#vzq_2DI#h4hVvH7v#0)R1vl}(8LhV zj!y{$DJdcL+)`93?<-6^{?p$S=`LkCi4QHN1{v#It6(bJ*N6#jNH^?!;2Q8#kK0zj zj!c1H^`%Y?5^SH`_mK$37sZ&BN>tDpW~EcWJ!WWj+f@g88cnwerekn|Cglgv1#}zz z8PN%1)B!5xtKKz6-G22pEs(s^2UIQvm>F|au;>Izm^xu$8z`dL62kU1W|!=wiy^d7 zbAkrmZov90${yqxLu>$wdF3vw#{pUlDJCxPsAhu!4$9MLhI#V(SmTq~c3G46W&^5# z^C2$g?jj;0WhaR%#Xx(oun`Lf7B&JC0{H=V4&L3J+Jk1d8$|Qox!L69)#2T#CEw0p z^T{{gZ07r2ewDBH_2}+)BqX}Y^v!SDT2)n*)~Aa~ckMw_bJ~`OzGYT)EFz-QYfZOL zSD$H}oiq3|GO3oOrTeNXtYn#ZXm#q0?o3#=ee8Z}@hq{>2FrP48+6(}TCJocN;=0bpJr({ zpS1aFoh(y|XY!KtY-OcqetKAnU3SW>-mL6KeUAM;Wu|a#c4M}3YuxgE-}in0_HB7K z6Z0Hkl(jK*kj!fD8B5f&32k$Z*_gC5e+{OY%QmTLgK9f-9bG#O+D+HH^v!>_z9%ga zu{LiEG5weEk$jHLs8!kVE~EJS<~F8VHlIURTbV;&F|{ZBq5L`LHf26^J~nk^C8Npm zL32aBv}>){Q(0I7#1Y~2pS>_!=Zr%BEpC)6F4A^hJ_;GU|~2Sk0gOSfUw{o01y;_ zfWQL`3Mk;p&lQ_R!T@45c;VCi}J zrKRW8e#htacBSjFw5pz~o_s`lTuQ6DyK9wa@-|PNbKj-SwB@1a!PGpYnVE-)mIo8_ z#_IXqeI_Cz>f*V&Ov|t3E#l%?JV!mmR4i1)uSrTgo0wRM=O7}U(=#u`bBKork0BbK zWBcOq+!`a|(Z^GWXv7=svqfh-#|Ue$OzlojZN0?I&8x&Rw?B2Mh<6UR?(Xi6er9)f zcRss2cuI$KNOwkOxRmy-t=(H%uXnB2)6>+=O;XKFN;5S}XdBZ*!((}8cUE@JRLzc= z*+H|lGIM>6y6nu1d^5W;TVBb|j_sV0k)`?fTUA;_TUrNEO$)0MOHr|~>Ui;0cvobV zRcBvU_VQ0%>M~coP7=E6tgDW4UibSs^}5{@%j&xO-~H!*{U=}lxz|6Xm%h@^(Dm$f zt-n-z+f}unu5_g>FP&&J+Q)4E^Zsx3S6a5sCUT}p@71hP&zh%x^{jsL?B?;g%!8WG zvo?zd&*9<8vt|(w9B6^HEKTxD937b^{bn!W7+IGq^tYV z;pwVy*UWTvYlmy}R&D06s%l?_Raiw=byc`_m3VfwN3~h2n=g9Z<*M5=Rp}B{olPyH zs=BzSoHXatTv$He-s?=M-dV5JdB>dNyg4&jns+Ayd1gN{#b@T9`FyUyd}bv&jfWOb zTZ?z1KJlayqGqMiolirDNXGOX5)xji3=MC1Lqo&D!sCC(qkktfBI5A}?ci;Cw%(oA zE)Vmbzq`D;#5!l4x9;}NcK@$;pPgy9qO;R|?W&e$Ogl`wtxa1}qpPLUj`pg(mEA>l zCH+mA?4*CJ)RvcZBr}njky4$sjINgIc2 zeYA~aXfz_euFhA7%&zRPP^->)uAhIdI{VI7S3`Se+B2VhYql*@Q#G`>h4$b5)Ox!z zyNr-3Zq~!3x!cAEHoT`E+QBfkOsp85ey6r4MzeN z5sQdL#6q!9EEJ1~C1Q~PhX4JItgqBtTD#>3K(JRV9zqJhE! zgvJA6f+Uf^fsr&EiH9Tt2^bHH10(TpEJ*~Bg&_d}1`862OZQ z@nAeCEI=3&I5->tq8K0ng$E2cNGvdb1te%-JR%MRAT&?_2Lwno9*l$nlt}fFXdzkyH?YAmV@^iI6bBfTZD}7+?TIaZtdBAc+9sAmQOi zD3Jpm$|Hd+AdLqjfh?2;L}_SXBp446hXe_X!~=o>5?FZPz>zc<4--50fOS-z-V|dB8)|ZSu~Oa6b={$jUDwZ3&ldQC>DwZ4F(S&V8KGMP$o2PQT<9qL`Ai9DyzBBb)+&;E$-qzYjth$n73tn+!HRYaq~8B zb#L!>@2&inepA0|*Pd%#R(sr^+f!x+l5O}Czzz(33z}D5TILqUeyz>NJsPd1UaK$Hk8pk)MZ*{Cyan)bS_3)__%GcVetoX)B zzKQ^Ft`&2wtX3;^0jQNyPB?;u5i%#ER$8m25-Hv3wnK7uxm>k+X%xdgBXAmL*>DjB zrpSAK85vhWp<`^}}Q}gyq@5&%Y@qFla!tk@I zJ%OnEgk~+PTW`Gl(j9cc;)w0wK!8UD-jFxKO6d~4((<97+vi;I==>Ws+9MaU@(@!t zD!=Iup<+V3jj7N2zt9n>0`ActB-O44;ij5>97=$sX8yOKN%TCG5|Iti50$f3+&m7~ zC)kaHusCxCxhElKL8Vup=inQDYd6HH)ZhvZZ0;PWyZT#* zLsULh>-W!Fm|U{Y)h1i3HRa%KDIg#*lv{Ji)*45R6BwUlfexrC+@K(^^VxK8UBx(_ z?B}fk)&Vv_56U?)y6iBD&=c~x+mAbZl4oZAd^{0nHmNNL4y&hTQLVS!;t-buCZfn+ z@zxjr(q}DqZ2i7P$6=SqR3n^TH)3cH{HI~LRZTm(V~+P&jkK1Mr9o(vZ(RYI;JE&M ze8yJV{ZE_C&83Opu8Q>(9nm}?B5kJofL~Cvmw9*m6f&`92ntKl8J{mRt*@hSi_D_3 zxTdf`ZO6jO|7E0PJ&mqA3dBa0rBR1XiSgu_32CP{L>@dcZHIZhO|xCWQLQKi%pMj@ zMDTdPvNF}}q)CAKM>Est7|&fw^S5Bv*_Z^Qav@0CK>qfD%Zk=E+#anDAYH|3nv(Dr#!W`02^F4{>A89zHZE4>s*4OS^IWQ2 z&ByuUh2!xfyg^IqIP_v`E7EC{3tRst4OCVc4_DWqC-}_W$yV@-s_L|?oFjx2t;WPq zqDOyn_#=A?C*dZm6TS0X_7J+0xL3=SSR7i|7Dm=3^oMKE)NYCnEgTgq87Tr$l4J|+ zTO|b~0ovq_=-fcS8x!dV4i*bfVv^GqEslcN=ek(5ZCH# z(i^k|ZI_{?fhHgCj*F__{e=OnYq7(=0@yIWo{;y>D!akTj*#cCajBoX)Ej+Kxyu(V z^1WzLZ~!jSl+z>ex_TtL8@khICVO#5Y?z68COX-(eUAZcq%AKvwwYw*37e2#{Pf$f zxRxr!)xX3achwI*49rWs&e^218vS5_8-vHt*S? z)*}hd2w}O`earWdJ~QLXA6F~g-m|)#RLH*ot!_n7NHZj~phLF#P~B<{sT9T3FL+3H zns3z%tU~cavc45k#ka1%f4m}qIaF8N!cFPmlrwtB!y9lCt1CX(xag2g4I3(Ch2qK~ z85XB0r)$hx6@|!sPXF?dMMJGP8US5|QD=E(;ZD%VN}{?${`vk&+1W*f>UqzP)m`k7 zoHsb1W6AiT!{QK*sjuf{*iha?3yZmGAaYr5!JVy1=10>=I(+UXl)(VOVsyC4{}Dpp z9R}a;K|o0^Pe&s0maNNJ5jzd;TFWW2ew8}W4N(>fb1Tge@K1eejCMn{GJ#Y?l|mq6 zyh7z3fp=}q_qHROYHFvC*;16&?56|f+q+~ilGZz*(R#N8Whb_%j` z$9ofhvHU7+PqZu3dUoi>W%yTy*RO6PVQ~zw@udm9i{@8(23NAv)0^Xrm4y7#;FgbA z5;PjM6_5u^E3k4e3TD&yOLU!(TT8I1pD~l2<8>wcCi$;<8z`)vfE(%5kSQ$ZNLQbQ zh-2OUQzo>;z=<*luez`~gzu2XvG+h1NCyg@JVdj8U>>Ycr+t5(!9)jMM62n$5uh9oRqjRAZyNa6@5d zS5FDxx@Zl(!AZb4#H0vqQG`>hy2P88dijEQXj1snAig+p``24O(ZY4hJK8J)?ub6Y zKV&iRNe!@fkA2_*hdEy)7GUlxD9mV$OdWJh;Sl5?d&u^j!d$< zUbmHbzt0Aq)C|A?!^1*s3tF#jmrF?sq=v5p4zDx9PPd8@0E0sIOxQ!b?zz)*;Z5&w z8~Q=)g{FIXvP|eB6lE*-5^`CR6eF)JZh4@Cz!wtJ2mbEc``zwSm30rLUX64_Ly;aibt55*|Vd5KBIqXZYf{h0eJ3m0`cXiGDr&TIhU= zsf8|#IjPvZ$#3BvtHq)aZ&7E^mmeU7Ah;VD&+$i=(kYkQ!$PDc?q?}UA{hfMSvSE^ z*%ssv-@fGBwAj-{lfe|qqcX@WfK2p>gjX(;z9Wy`zsq_y2VL**7p3ASpNl-ITto`W zr)9{I*~J7EEO@Utn-C7=R@x6`hJ zGE^bUoTJ3wGKhBJa1seHv>BUILkW)UX101b{96kIe&JZ)MYs_O=&)e>hTM=DX{+2Y zIIWEVcKZjYp@!XXC;;-GSylMtLi|*RYDU}O!e1#Mycf;(Xj%bv zzwY*&{d}lTEc!w3h_S{xH9D>4zsz1pwQ4@CMXmVb3ygSYPpfy(Q!`XFa85|PCL$rQk>8t14>vUi{K;y zP!(c!=5av+GJ81lP-RpcXE~}B3W)`#>f)m#XB|-07D!4UAc6ytYGgDtIofyEuiaP7 zLe@Uk+g`gG)E(TH6T!NkQn+%IBSNB|@TN8g+C)?G>Ya~bgQCp3Bw!sga@DJhTWEQvNPvBrDB-(a zuXq7|>|db#Qm5fnc`mY?#XAMNfFxLVTFP{;#=l{D%<>P_vdCCK0Ijre090&wr}`el zphclvj3Y`dd#-Ot(ZGbbN=+Z?g`#x>-Do%2?S_GKnQsiozv&PQsPsK|W8UFXnc3x; zTB@vZ`Cd`M+cK;;bPLSNa%jKqB{VDo-5L|mPwrg$oyC8f+qMAHtmruh9J*480%Z*| zW>zk-qhI&ZCJE^QxKMDzqR-z1{KRdJ4+jG4=4QXW_F5;S#eQk!0@^GMi6``Yov=j= zy@J-y?n`Tt_TcH`v!gv@ro-k^<%4GN`qPrV$AYxRYx(&B+Js?&H6?tPZD;yUE`u9kAHRm#I1Zvd|@djst@PC z9C~+Bad0+}CQi`39T7U@Tq*=ne|64hgZiwGiC(gLf zChr@)nd~oBdMoe5;7;iBxYCLU`kE)?zCasS=|YP2iK39t9F171qOIP}KfXL~5b;Oh z9Jji27bhaYDO>t4`IR2qrar@jp!yMOTd?j(hg4pnTY+F*#xX zXo-anmNN@;dA;e`yd#Foe-WK((t&|Z`PTK~yvO*f);p{EY`^E!DGk3NQ;nu`Bh>Pn zc|}uo`UZ;(X!CetQIQt4G~pbpAEy~c=Z81@H1?$&E}W1BmXZ*~u1*7U`NN%okrCq+ymSI>PdxXKEi?dTU*D0t z<%B$Al`zZNAF+D$cN@u=U{fMhJQ%Gcx*vxks<^${#s|aGjZb{(l&}pX>4^&XIsqoa z5{wQ#@X=?G5~F8CJo^iIpeF7Hc#Uk7Hi*KI%*D2olJ9+^W#EAeiJC(jL4D^XFnKr@ zgAr7aidnyJE}G?A6S5Wbo!<1lu;}7a*^+ERFlYEU>_uG0^zDGC<-2!`U2~GgaI(10 zA4hi{ddvtZUpH2gqkJ3Y+NNjg_<*SYvVu?oD~uwQyC-xI{8<6F11_F(D6kO^(mNzA z=10OAHOtJQ9ur=oZarS}i{Spvk(#i3}Mc6Exb>a8P zXjCjOyB zHIaZt_d~kzM17e9c!n)x18xCf+#%zVl6i=SI`ovc;?PM}XV`#VYqB+2UxL0nq$8X^ zzDc+Vf>G%ee}PmM?p4f(DN}5SCR}(Z!$aBr2Ff7Q@9!v9_9(LD=UPE)0Z{9xq!sX$ zC%PG97+r^4O-9}YVE$HWhQkHuLK@V@`&3t*mtqC=ppet4S3*O|`>F6F_ZPp6m3Xzt zrZniDTlsknaTS24`yAX3baul_t_Z;vYEe;1xg?$uf2K)&z zVD+P;iZ;CRzu6m*SYS0T21SAhP zwJGUl1uEfNG*;z}nvAqgp{GtlSn29%W4BDQR8Q2-CM^ZAo(yf0`yn$qvoN2g^j%Jf zi5FdUwiKP|HWP>{$D-sN=ErlYSG0)D?rNkKCF%PE&&r_=2(M5I=S{VroC8--X-6}x zj6z6sDF=^QB(|)wh)MF5D3ixQm=mh>1mPcLLq)k`#Xb*{RNZ;ot9AsRYY~8>uooq9 zh_6oS(av!4uq!*bGH>g}tfuu?i(ysh%Bfe z$WyC*)wjUoYGI%c^v|8fiq<~Cq|bC1g8U{d&r_Zb1M!i3*<|6KB&hkhL;zR zbBGzwUD;BHwFZ6^l%FdM{VT@tD``FnZ4~zg(i)q!R^qV5Vkm8iwIv(?`71!37@}RY z^JH!4E~irTpdN&VbfB-p9q(Y{KrmZGYE%4WcXN?epw&y4?4c?10|SG&rj)Cwm=MKx zsIVo@8Rpa}CG@Z)U0sAU8OO?u;~P8Q)^;6DLHzR+f;r13EDc@znrVT+uoO&8gMMP? zYe^TFW~?FHAMJc)pmS?f5v^mf4kVTvHYX8+*MNJY&~C3Cvd~@T`JT@{RK*68MPLa9 zKNcevV^tqkjMWSB3Cf#PpPz8ILsFQ@cK8NfZ;r4vl4A;ZMp(uhf76g(JVDyOv}DY3 z9nSGryAG(nRKGcs__JWH?re+W_2Psx834i=smwGBC&%&>w$`Vq7ccI|W(j}s5h4^v z!NwhY-l>!F!|Dv@ROH3yB0SV+#rBGm6h|1yQPIGC-Rl*)w$iP_5+E=V9${>HuxDbc zy9qsU#ukW`--@~wIBmI6{~3$>($+#t=xO%UEP(p(P^_}jHdWdgmY?3lj^_^N5~TS? zUBT4wqZ+Qp0O-YHf*GLY;0hN}!BStb(l==H%-qx{deyE=LWk(<_x(d+CuV^ohU+X2 zJ&8})Kn=%%alwmdH7QCMS$JmNsv0QrJdRtOMxL{2QoRGE;}n%ZcZe1=*qj3!_`^zZ zo`!fz1Z>nh;fjp%Dc=Ce$k%19Wa*rv?$%Gb469;OTH~;(OgF$WA|kx9QTKGsQlp43 z^`O&(`YCRISV=MK83ikB73vQwWd#>W%8QN_zM z>xH9tQwmpB0kzIC>bi*)_c59+)@Jz~FG#S!Y+jCg(dO%#_b5#qmp2lTPRjAL=xU|x<5t%vn(Hz>m3Wk}Ucb`0WuF?ALe$!gKMUKuc&Yd1 zqK%9$CrM!OvbYytk}(b9M#8|*?)IVELb5Y8S*lt&i4ggjqqcnOC@R;^s>2;>fQanR z#Z+TA#cni0lrC`ENA!dhZo>D$AHZASl}*a!d!N;ohaZCL+1LRG(R_S8R|8KkQ zIngB-znd=IG9q&Oq=wO``-*R>md$DH*DU>afgWhU2dENadUc$9_+1R~$9S;7Dtq=Z z;8zgm!ER!HL?91AbH#l!4hIm|hfl6(0JHcD2{kQDk|e9C6&Uy3w_K7k$2FMM!NlBB z+dEL&t+uPJVOr$WZbi2Lc{8K-yvXl%cSO^y7!y>^)sCw%w3aCo&ROslBEd~>hla30 zU}D7(J4PyhF|3QcV89z;m#r=ANSnmXr~!tNGnl2OPYES$Dz<3Ywc8|B61&6R_&iJh zUXW0UWUpLtspNVo5Mvc6f!iYp`ASFx*h%N>3%jRll}bh``2h{O22G9f7R@fj*z}R> zkL8c=tiQ2Nd<%n=o|Q|!q6E?7VyNRP)|lSTSy#xamm;cKHYkIHF1}XGHuV@nGwTW# zP%oM$)LXHJ;2OhC&a>LDopdP%U#F(4A|kY)jp!yOr1?q#cbVhk-dd_6;im($${p4j z5Vqf?trBx|6jP>jmZs}SoC%IK=>12eHZhuNq}8@7g~?cPOHMX3VGrX z;iI+d8?r&-Aa$iOHzGr&4K4D+99NSkJ46$oQU{iHZtZNu9&3i`0%fA#WN6aA_#Wl{ z=$n)6pM~?-Z7LdwU*L|gW4r||WB(d;G-YWyt#PHPORQne55OiU>tSOKqdAOti5hMt zB9U8A@30Cu_|(X*#u&?t1U0!B_@KUeR;lzbU%Wg4c@(nu!nY(*$751K^`^6p5$aj9 zi4N2HnVC@li>*OW3k|e=v_`pm9J2T_aZnLE`SBUC*MSQ}ge?h^v>sNSfnzXGQ5Zk# zLUgj?lEEjKA=S`k&!eY;#9sA|`bi-I2Y^kps9sysEHC~?1zbJbdxjcA0md9ikSz0# zeSNsq&vtc6VV4@3jCg2YP{6brhE-HJba?FEOq-#_WOr43#;`+KY~#uzUmg{78|kPt zZm<0oY1-T1osl!pz(VWeZ((wiqC9nwn>{Tu@*6ncT((ps=DVTvoSK;K;ghvlSeGKnPF7wW^?aR7HfWF-te~6ArgL^LjbZ$mw2#1Xlo1T}?6n*^DouoU9A#oVetROwxK_ zSR&%je{b>-*l*h%3qSn2#M6kqiN7w9oun5Rp%#uKD3$WA&_KW}t+exm5SNWpWvdHRm2W)$}&rMF$(_Go)Y045>5cn=V8MHE|IoT@{4g8> zgW?)crHNhw;8MEg;klF0{~F@n5Usr*N<&_~(9J@jXw-lOO3!R+Jh8Z`N*u~oEC16u z9V7%N65Z)Ts_1m)ML?KW2DrVS@UaF`q*jFd;T4UvBCcJ+*s^xJ(q9~G1OK^}d#v5a z5`#Da1^{NyfB^HWtrPPxB7|#?m4(o884n9`2ey*7>pt>`ht@tO6l@+K8Ue3I zPhFaPHe3+ApvZz-nElB-Hu#doefb;@Dj`?2H31lpP6`8%%xwx|W{_5qQ3$1#jQ_eX zJos+Edpr5DI;uPDs$m=6N@Yz4Z{y0}^}ACN%b*vFu&G76#GON&s@PH!f(*#1D8LLI zX-%nz9@2fITkr7==6AFxQK}F7LcXJo;t&=MS4F;l$Fn^Pu>Ik5Z0^KK8Zdz91HaO` zWJ&_#Q4P%Y@um~x8s$jVEqxfnc$*yJ2-Q|?R~!eFpha{eux*tms%n`F<{_hMQdH8= zj5cJHBy8L?_BRuK8c%g^gXWq5SwE?iFyOa)kR)He3Noh^L+4 zl+>Grds+l}Hm%h0q}(*reLn!X@m82x*@`u6neWLNVDf`O59(ye`Z{X{3@A~nUgW0< z=AzVI7g`-TvqeK#g0vPpG#IF`wactHORX} znE;r|iE`u;n?qWk6v2Iz5+FD#kouXgsJ!^}v@7O}*U+JjMyV~rMWYx#$pn)sr82i# zgg8i1Y}U%3t0wFT#6a6|*y{s689Ai~NzJCxhyKeZ{?GeMmvfVu#~eXJ9 ziihMF6QcJ@dUg&sxUQ^y z;}lTvTq9_};{%K!wbBp7hBYgXLuGprj-k6d0VMHyG8}`H5vOkTX#rsqK3Sro;WVQZlH45FZUK5KHiB zWa1l|s;-85xs+!y>T3@C7B1-aBS)!6&s)W(l-3k08eVT}ALf)Ab3nit4H&itNoBOj z;uoZ)i_`2CM$rD%<`_2FhYA=>lUY)!n_9kQ+4VCIvVh7L$=i|(z}lr1)26o!Wz}x2 zoaJElzhMBeIV669XO?Lk1GkNyl3}6n19L+t*5eprL8WjLk%q{6Jwt$ncL!yaoiG;? z#@d$1p*~${PO3XKlUjOla+FXU&r?VB`4Jt^eK!_uwJzBjj?xTIbOWa~>D^;Wh=&Dk z+IS{XaSB3J4L#-?{I6xV67FCX%9I2vB&0V!01Q&R2{{W zQCUHPx$t`RusszQxrvbxDuJ)~mmNeQ5bu1BBn8fR4-uhXJg0KulLLs`1-i#J%jni} zJQN2LL{LcO(3g|}jMiPTWeqh`v=S|E1bkvd7z&4mR-xGli@ngtf%w&?OUP*LhN?SPt_c~%J=w$lVvj}=t_a`C;}!1{ z;9$_j2H!8bv|#nxMhu+S!t73|+;S%r=7I7<<2@Ra29A2Z?bF3lbaIZP-3t}LZFM~| zjqd>mx|OkmC=y?|!$KhK*t8)rF>w8|AUfbCBn!f>y-so=OH)xYi1B6x_PmyVb@pef zt@c{MelY(zzp7a&6OsCk<~A;24J{MKo0+g_0=I$+d?sxPWpukRc7^KFp;jI`#XJK@ z@8Ts%>2X^)36SE$#*jN?JWc(5XhGIQrFAbhjK@7jW+JQgDggsbB#=eOsFFQ#enODp z%<`>v&8#Ky8dEIVaH@2oQcI4|lMX=~qe}G+l9vERvpH)B(ltw~^2rLGS~$sihXa+YG%g3Dh+|uKC7QpCm#uVH{&XyO(Q=%nrn^dYGE_J_6c?j z6~#g*vwjQV;ZlGAAOQ{U`A zABqWoKqGSlIqp;Kr}>n#dX5!!woR-?A67^?>Y-7mw-a*9wc#h4t;N)`S(7I?+E`uA zc_qq_aU+EPrGK^G4Ua}x!5GJbcSqT#A(K!AVu$I?K08FH6x8msnSKrsRQehB4Lc0z9>Qx5>R}VT1`fCsggeq*PX@%E1 zJyB!zDnQXy4HI!xi6snSPF{9Qi@u73^|)z9Nmc-zUJOu2jLZsE<2g0~&KR^L~bTG(BbkJ7&FigrnBS^kT8OEE?QNv@dcFaOdB6A&w08PG=N0bUG) zLDiY#LN4vUrWd>0UPz8aE*X&=!|y($w#A=-jl#TgU<+8!=NSObXdw-Hu3A@aM!^rv zJLF{?fj{d==(_j2vZ)u)>-rqeU`6Ybmv=f@a(&Aze=T;3xdJd3Imglfl3TQSM5U4s zZk3E=k*zU`3=z-f2O%e>ofNU6mdP?3k~pxEaahk<#AR@A;N43Gh_T`#-Xr5)>f_AS zM0#Wczd*ReEFnh7)=sIv%B*S|6dZ3G${=80r5Go`35ih3FpXB&vYPU>+Xt{=$*LEJ z5yV%k{;fqe?xaeb*fQRHX`VQt#|-5a7zZyaUf62s#3b7U+CWoSLX+mr)l8wQ1@ z3_{*AsAGKPyK%^QkTjfRmpWspF^ecG52Y`IN}n=l)bxnPNdR|>X0q$dZve*8f=@AUA{L6)yT6n>$||nG zq_bmPAxC#VsKG&NW@V7P3RJD|T6%c}vyr~Kx!3g2O(Y+Yb=Pld%aGk5EeQ0+kx7Yr z37nmr)P425kQAj>)%|K@M;ca&r+f)iQKjff8M`oo4XO*NFyUvxOJ;y-|VH*rc+y@1S3UpF=2FnPnqrB(_!!-LSqP5%2=WI#Y_& z5DWDPE5U#QJtS*Q*Px;i*QG}ZiB~=x)+!AMLpmB4C%>Jq#WJ3BL!qXvB3f6-|hN-dU1U|iBWN#eJeFq@hAS8fqezK!K zh;w0ei%t+yFi+#oY2>JfG!T`W(<|d@EF&)wV1nZ!ooJ^Q?QKwv`~y-N##A@Hoat1l zdN{HBFWZZ4SFM}7t48s(D!SBFf z30W34yF>9L5?!1PeBz>_WUc-iC3U7{gC?lZe2AW{^hk_rnMHBpN~a26$c!dHf%vSM zE4k4TQ)`7UomdF@n(;i(?)&e#V@EV2Sj~F)T)P-jI5&Ci(*p_)VT6MiUA2JxGB*}h z3E2%46G#@=lSZwzWj(u9#l7xkR%hM z6w4Mye})k6>K(gYr%C%22oJIzCd||GetnS+RP2jRtdjuONF%v)x}|9M728`$ddZi1 z#8QLu%sbNQr1Y(my`prZm^@=MDd$g5$Do>KeQD89-{EA~q}40b?LfA}n+4QpQKsC$ zNqdwP@RP3@h^GtlsUJsA7T%{jfp*?h7S0e2qm**<@Bnh#lbE7TgzO2))MUXE-sAf7 zk$^(IT~Bc0}w_t(1D;bJV$cnD~BpbQKP4Rm~2~OY^z}=4FOX&y13)u%jaN6*jYvcMens;qefG z)qB&mA*b+UMU@F9N;1+Lo5@%bsX)jS6nQ3Q%9$lFQ|u(S{}}_%mHp=x<;3$HD?XzT z7hK~!jD}KW%G-y@0P&2mkDfT!vM^(C&5r0kPe`s%MIW_oPV=uPDTo{j+$Y0H!o^4oea-AXa zzdmdw%TXq*qNv$>_p5Cvh#l(;hgVl0Gv}PD|BIxAtz$iWQ@MskN;fyChIb9Bd@KEM zjE_c+QK*z8iZ<*n18myG4%3Uu$RTk3yg7;t#av!LY1)7u^!MR6sU!}Uk8kK zZtzUeRM(K}rnAW_|9I(%$z`2~RU3!Q^4Uexm!;{DMN@%iB1&kFh^fCDD!PtxzVQTi zlf*J^Q?%}AZqoc)YB2Yc=(v`Vw?m+48c@YvguLalk@J7fNSq>zzb(MUI~32=TA3o4 z5-7?`#M)SxxB3%e!OMj{A?B3P?u5K821xu!G%6S_s;L?wi%J3Pj4<@*#?65@JmE&St;B)bZ3|)z3%QX9SfC&MA z5QkxJHz2Bs;Exs)d?d{&e|TiSnUDkB^w4A zU#qrpJ~hG6T@;e#Zp((Q3kmr1?tS+~tQ*R~8hk4_3J5$q@-)4M1&G^6bfb^Qsj;+| zw_d%#(F%Iw)GfrS(9CjbC|t_Ggo96pRg z#9%HO8hI>_7&MkfZj57;MU{{!mXIj&fEU#Zt8Qh5fiy*zXvRp=$4kpG}cZZK1hZ<*HezX-mRHO44U}YO_WoPvgVC6OgqV$!^SygDa7}EO0 zB+Blt44Di^6jTtUTKChzLH^8P;jm!0^1a>i-yQtbj_14p4Ne2=ML^UA*RHIl+@fh` zsD-Y$XGAPJIy`?W_RvhOOHn;gGNLg4nPj)! zDzn-nYdEXJP1Cb|3eezev;4m5vXWK4g%hp(zMcD-1V zvJkVd%(hNO<#N8Dy`chK%+%*!2&=Zr5`Lj1imkK^Dou%kVgIaGK~RW%P9RE|KolZ5 z5e87efj^EpV~;1M$BjAmj-5S8M)Bs%CWIiz*#jYGxy?G3X6`eUend3AMH-9<>>MLz zm^#O9S_h4>S!c}2 zO3Ea8vUJfHMPtirM9+S%&zF)AMZ#eAChfY_%h+kY2$4dGomn`m6d%x z^yO{govrS$nBA`QPKRo@c4=q3I|-~|LlqJP2@+02kvO0)>w0jb;8N07CQDlCX49{jN**!of7M-EQ)KtS0g^A+6SAUi>ZIu!c5%DCjr$Qu( zL?wz{D`)T~U9UueOB7!;tU_nK)m+;m@6mA$1LR-;p+Vu2KS|_#$W6^0Q+7Nl%fs&Y zanN{o1B;H@ku*nAcBo;#FYB)!D(bBis}!kV_)#5?>OQr!m%=dz2NW0&4y9I_JCz)D z+-!0j#>cLr9+K%Wj5~M^L;LuFw?nM60(_;+kj!#VG2u4rfmW7b;jsXN2M`((Cq^Bo zIb;nbCmoxKF+{9R?p>C-krlFOiIHfKN{*Kth8?#1mpvse3(2(ba7dMq)Ch?}58i&S zh=fOYLLw|e)GHdHxfE@*`CM?%d)C^rqE+_nzkEAC%VpO4W7TQ*iaiRG!UZG(^YH7^uZ~=sa1cwI>4H_~i4h|}_{3-)rLZwkARn=l)NLh7QI4m3%4hx5c z!@`2KXcl&&%dlRCx8JQ;$+g<0#q<`*_`3^ly+Vio>8%MVtrn^tByWQ@sUEQku->oe9v`3U;?8Mp#lgPD4Yi4JfJ|KI8=~{ z)5ozke$x3Jc+wcjk*AdGQ6w>3MhxBLmC5CjAQK_G;|D2O6C z9E52w4glgQ;2RDhWnbiveUjSRx~GtS`BQ5NUz)a~9s#A=*5Jq$wnEZ|C16h z^6&qHdo%R}x!9K*d+9PxoBiMTc;^-tS5LVF=GWR_JR9|hp1PLX9n6NVF_+q_cZlJ# zrMLw9nMxxS;*ouzVIPD6cq|}MQ-=ODD1Q*$4JU$&&tLbCLv7F)l_k_Pxx( z252;)2n>|9QExsw@q!CuQjomLP0HC(3fZcJ?IRW70W>W42HET=d=*Btf}N`t4(66C zXdp+8_pqiQ6b1Q3&m$=zX+kd#_vb+aiuQ?t>3k`ZHW(6p+URy1ymY%#U6AeH&vEnO z;rWr#=lx?4K6E?sL491?us!0wn*nds4X`ZH|2+Vzn+%}m0Y{*pk5k709BbPRcwg@! z_iL+b55EKG|K&jXGWzHq9Rqo&{2)a%HgS%Fd(Bf@Oisj*XbpE4>n9%rT;r6~$X0 zMX{0m_0A8O&LW3cOUXiY2J6m-sRcrow(_K3)E9LUer}h1aIAI82z4sjpPvbghF}pyh$%ctB=c+q zX1kq1MYKp1zfBdwePa!$_Kbwch^()~rZ~vF#lM&>*{&@ygPDa3E*GT;jTq1P)sHN6 zD@5qXHRA1Yys#FEZvGF3_}Q~~JUN`#ExxcyWkm@6VG*+FGk#OHFYK@0g!b&>=@b4z9Y0A=xT-)4gx*ZwLI zQ|p(qY1`4~k_O*yA;RPtp$Sgq|C1$e?1Z{4_7^Z-e_I<`Ia_}bwy*wHk*5C2iJkub zyh?xB5u?AF(saH!sF;Xs;2+w!Uc`unY`3rvDCBNc0#pSG$aY7vi-)`TDJ}qB>d|Sf zjZ7L#y2a=9f)W6 zUI#mFMoIEGuI7V@5|j7Bu>q53dfJxO(Sz`9GBrT#B`phseijz6^N=UbDJ;5dMo=G& zgat@Iw!RYEx!c)xH~|5z0~8g2ub~9kNrX{UQSEG`#WpS=vB8XPmU&&5O=JEVWMO72R?h)&XOTBHM061-<02GTf0N;WpAV0z{C`@ID8iIH_rJHOTbh#kam(}(ohm$=|W!nWT zn@SQ&c-UL9A#B|KXX()2-~v>k`K+VZ@-P##(SBVhE)t84a0$+c=I+Az&k#`4|k0JZDyDU^u=J9`g{IIkeA+OA75{1~)%J~ge{)XuAovkOdc`XN`W z&!;03AncZ1>XK3PtOlRl{y3O4o18*nl7_9~(447iutP|VTe@m9GUjTZ>T=3Whqo+R zdm6l-AK=nzLxSav=Ht9wZ$N@s>dv(eNMH~_DG1;y8|_A|BpqbXpWoCDrFoJTC>>1t zl%Qm&>8csF<{7rvb^LcZe3-Ql#YO{ z3U9k9jRQH+q{>c#t;&yh>CS~yyU0u8w#a-uF6sGT)?o^*!$m^b0ep3HdMR;4fb4#D zoogX-qP&yHxp50B95prv^J!N~NZZ29WpyHJlnc8~>`4THszwF7&h$pc)+fT(TvC@E zl|M3I$Z@EC+pY`((=*Ok`b!BdMXnAGpyUsIXB=E#*!WqDJC7vk5~Y@q_l#Slr2Htc z84_j?P|`Cc5G*v+VV*5hDVg+``rC?fW8sqbl~D841cInZp>*T>P1ftG!&(sjo9xqo+1-pgU;`^?A*p4U3m zwX}OS23vk-Z~g8GM(F6B1Be55|8uJtDi^W4v$J95AXAq+2Vv)iOhxXh#MsbSs9-ST z1@dI@*9MBB?YxZoYu2EhPuZlCan2i|bavjg_np$XypeFA1Th$xTZ3nAaDjNIC`Ix` z?m_IK_5a-&zRhN_@_fqoK_AQ=z zlUb5;I$9P6hXwug5I{w55~G5ian=?YWunMIq^V!`Q-Vm-f@WxS)(8_-P>N(wkR^!| zzWrY$wy61hi)(6#;ge_)`zhQ0t-8D!K?Y$xXS)Cc!zY}X_X{C_iy%5cc?diW%1NNI za(af(CxcPSsL#KKl%S^#;kAq18JtU2_GTB;c8Zr!fIr__90NYX$;D?xTq&S&6%%m< zXI_X-tgRJm?we5eVH$=LB;Q5Wo{0WHegjbLab#xvpp~8IP6br~!_p$T_M^lL=Fs>^ z*x=}et?)vhVM9BtMwomZr1b^eN@|oqhBS$V?}MRAn2&GbgOG=@(kzcV8NpUF-@eCL zPN2+qzlyVgkEQVo`apaJzuaUR8Awq_Z04EA*4TqrNI(ONmccg)0GEUlV$Vb8Z4gWxx|X{^*QZpS>HT( z{;(N%bccvqaRw2kAq~@vKh@|-Z&l_AGNd|C9ORxgiqKHannc0~aMh9u)NfSlIoG>l z#di;bC2Hf-xm;ch9DsN31lW?KAP4OtIUqg;9?*2Bj(!}7l|Z&9p?Ax_w>M% z)sYJ8;{v4hz~fGoRGBNhL5YpkvRP&l7%1W8msCUw%I*)N#jg;f)UfG~BJZ_y*DadYuO6Uh=c#Q?l7JpE&dy}vhD5fif zb)Kytqq*gQbh(91EV)I`J#K;YNY>iWv2S07FcaEKZ^a0uy&cR~hr)3qE?t`qq&U1fLXInOjY8v0#w0=l z2nF`mL|(VMwm%K$44OlIGg@59j!<5-TUp2UQg~2gT9Tzy&=!+eCp1LyM}s^!LwHV6 zP>E>`4C0&+P)p@NmUOVM3TS$oC`k2@ld?m3XMy;|#3~gu6}D+2&E7+Um7R9)@`PfU zarL^>{^tK6hf`w2>9TDj@0ay(~HY_{` zXrY>!;IgNn8b?JX|B0_+OL5p=<|ve5(c+-aO!xo?L+MIADxZR}SSTg^w#U>xz~vqU zmc`29FFU~|$jplMRk3AT{c+D3noLQ^3Et2lsA5n81a*IVI~P7HzTJ9$8bQuTEnq4N zsW~;Iw;ZWJZvyfgv&pY<16;y;HC8x6ToD#sY6wuYgR>sqaXuo>iZnjk!`eI*v}if$ zxadvjXA2QEr0B1|F}RO}5epNh8yy!!V2c314q3#QsfeHW(^foUo#o4=w6R;;QH!a^ zygb-fo9IBJM#ZFQ5K_&cs2HmHDq{tyzxiCH9JnYv`=@D|xUy1OTq`_Prn;OHz#5Mv zzbDww;_jfx7SEF>Dlm26PHh{1<&o>iC8#YE8CSnnmB|lWe#qffYv}-z09uLBoGA{l zE@QjEPA0GG{=g7n)F`8xi+I9?Y_z75%=5q;I+nmQ$$UAj1uxis%Xhd)_q|pY@eQ%k z#6-we{2}c?27~lu?&XbnJ$geFk%@Wb*fltu&_!c>ye?fwrD7#DW1IPn?>MFDNuIj_91O%o#?& zkKNfXml{w#D1n`Pfz!as+F%96%N(MF9HZEmxr;#*} zs6{L0M$Kb*?wOb2+_}-}Vk~Jaj_Jgc8<= zAhR7z$Q*Ak+K1@!2{EsVbfSUOS=!k0P_K|FvZ3vAoe&jf2kV4h^bf1PG_6aRiAb=X z!~?#&>>AhW{(g|ziOmie)X1Ifxi{+4Y*)UI`98DdOdFt!*COO(A{OG_RPyI6+5>!p zudEjr_W07;EE^3Brf!O2uh9dVgf?hE0G{PmR$l(xe}T#?=tZ{`bzWLJ>k@MI#UYET zC|V>RAF^_tM1mqnk-|He7#js;1!jJ@tC>mr;eZp;rzQj?DN!+O-;DiDY8h&(Z!Q4p z&8bJn`$Jb+K9^8)0uLv)(tZW}O?C0(jxY{b!DF{cVw>P+r%~uM3}JqP69r$Qj$^|Y zIVW^%2O?NV5;sUyfBBTbS~p__mjk2#7|D0}kqO7r?ebrBipa8P zXQ#74+Q@UpFU$^O)}DhlIXGVPfZpJCzYh7VX6CnniT+B^rc9?FoD;e9Qo)7qW~f_v z3|`f>ChdIMeQ6C9=!=c>#TTrZXf}f?{xB;(aeE0hj|@}rrkE$qtdd*ropB(5>baUA z3f~6WE1M=vKul(sPn0}yc{{S>wA4An&eLm2@Yn?$@(PCT>5~xZOF<fOH}IAb?D% zviglAq|fIs8X&Md5V#SY&9FFd`IVkjWfKgejWguqieDU$R!q{V*Uu@cDP)3QM1zE3>m2m(qqe!TdQeY*}e|p zT~FQ@Ba<1a?^OP{5gJ<(+;EHmOaclZZ7Yt(hTd69lguq2(rc)4QzIwT=P1*%yh`VZ z3Z0{%)m)2>O$P_ln+<9)OcRW?8C>Z@{V~^8N1r|(k_Tq!5m(bZY<37GgY;MVd`uHj zEwYQ`h-2YgQYClhMPHX_COWx4^Hc$K2-AavhMA06$0xQ1<`q@|WcIFYr@hV!H;&aARucU!lt7n0>bz$|+$ zWKL*Ab)hNXoUUdY!!tWLLZA{GCv|2eSCSDLU06eK_PbsS*b0p=ycwS-rv|JuRmvdi zg@hK3lfxvMbQr=xy69@Fhz8A|fpcPlYoE6a02Sz-bW4a$UwJ_GaEx(A+^!t)95M`$ zzhgYZJPVbDtbjO@{C#n(!f!>T_?3Q&go87hJ((QFUA8Py9pR&Vu)zORhKKqoUI07QX%rzlZ&kyM0F zh@@reQk-B+SuO&u8jbu|J0?}E=Ix4KlA$ok6#1F`-(7A<4#=;d$fDD*v$6zl&dhss zBiq$L9TdHDH#T=WMtvr4MqSsDWWp~k$BJ93Z@eGDkoEU=$Z3RDeF!j@^J;T zwQq`_g*#fO(%&N~kYgvGJE<^I&~MBMlA!qWUO`1BG%}zMj92Y}MnWFQStqHt&g60N zx8x*JQ-Hs1uREX=AO_)xI4gHj5QJqdfkTn5|AJz6Kk{-Y-;dOAZQKEL0l<0925?{Q z1gHSYYbtZY%(^Nzi22wMGzcG5RA7lX(i@q4(bR=rI&G)CU@JRVC{YTswRe_Y zL<~CQssd}+cYR*}P)60Q|1IG*0@?|Vh4OHQZf?hn?$AhZsW!OonJ5`?A_ z5^`q7?GKX3G&0N!_)T@BA9qNAA=jsb3jrf=m|@n5R)mqb>TdV;2AG0o@F>A z*rt4sL2TPAjRbu8rYYRrj=Em56kUBMla3OQ)dj>9oB=5PM5DBT*lXA^X{iYP=ioT(YW2B5i?+6)%(YhNzLW-l}lICUue;0Q= zf4H>`()^Qgu8~p(1kw9BLTxO7iTp6>t>j^c;`_b}sVp4n$j?T;873%RvqLm+AV86w zv9|p~`cWpBoryp@IaK|r?Gn#V$LQ?SyxbO6B1~v9-7?%>%t3x2Lh&z#W+d|$SO*d| zNZX~0c*h`SyR3Td7s1b8pfzGx?#h0SdO!B3iYcz3yqzi{9zyHmC24Ddg-svSV*o`o zw}Q5bTc?U-3lWY82)Y~$A5kG4!64>MN{|xh#dU(j&cI*e)3cXckvDrYm?g82uc&Ni zwfRBSQ=9XGXm|+>d2-yw*Uc>$AXNzEiacQ+$v&%h*8|PPX5id?8N@54vmWp6pKCm& z*X3)J5#$Wn7C?$pws=R$f6r7y0vXprvS_nK@65u2mbN^}mrYvB$IRz>r{XO6Q-$}q z2oGg#S?@V6@fajjmJZ?HjU`+}I!lUL1~fOuRIMVd=gw^A`Of}P#j#M|)Z2(}vtD@w z&XJc>P%^pUk1NMUMWDKVScq=*4{Yd zi6#;u+q@JrdLl>wcHoiD`uGyH8K)2D<-(pe4GpPKon}H(D3@4&5;zF?3u34XZPr;v zZxAU5LOdP?#2_oyK2|ZJd_9`lL=-Lr?%f+lsieQ`0hTGNiL^mvic{f3Hi-FBk1CQ4 z;;v^V`Ua$Jlh=Xc;QS#6`QsDn?7C5BdDpVhxS(q}roENPUK$Cb=PKxORYfu4Zli5V zZqGOur@{CsKh3wbCVp_IZHf^Znuy7qxOPa0L8@>GMJT-RHvl zptIjJwr4Zy^CGj2qZQIuGpVsl*ROoS*ukw5HKRMTmfqfaN^l?wsU2ag;yEpgj^GGo zk|}w8pC_$}v+}O(k4nD7LUwLT{*S=8k70x25eP%ck5~jVW{C&g}S;04&$8Cmp`?9VNhX&TYbkZC57DuRebwQA(dPf4S zyj*XOPyXV3S;U>RcSKH+wC&xBrk?HRl-9jJ*nZTF(ZS60S7ih|76RH=VJ;}Z76yy~ z*wwGB03RzvL|i(r@2qzP9C4GoA|hoSHO`Hjwke9DCixX30dW9KdASOjp*iqO~Tp8x)`~EGhf(ksyLNY8e*$6#MZJK^AUeXkl`fbTxmsaSyfJ1m64)oo`#??4MBM{C5(e;Obkt3 z6m6X@e0GJ{L1eVWZ*_wh#JFA&#S|GANcE%Q$Rpc{B%E9eOH~}iaYkBu3MAk5mlO%n z6QsDCU=cG#Q6NMo4POyWf-oIH#AI@KBRPnC9%)>bxWh1rDF!Vw6B#_RxIChlX&gR$ z7-PJc<1h(Ig7PM54#YTOyfa1T$t6cMg;0X#oa5$p!$`#8=`5)OLH=ywDblU$>>~Yo z<4ceb{DDbX@oonnEK7B~ufW zlr=^h%obG?c9M*`9fJ5lZYS;9N$b!cW~(r=evlQ2O^rk`(*sG6vAkE)pk&6t#0Y~D zbcsLPyb*(V)+LgV;}D{EOd5$9?LqWv9NF0*bP!3<3*(T3kmRmNh&)@l@Q>_|OwNAO z@i~ksO4Mu|Hky7qNJu7O*eKGlfi`YNa$G?quVEmsVWY?_ri=o!XnQXf1r=$<(ITyQ zLKs9}U@uSL>xh~-BWhxZpd<;R?)X7Gj*&S|>_r$6=(74%QbLn;L?(0cN&GkMe+LcP zeURpL!!UsuDNG=q%yfaK|IFna&zO&1eX_7oP(ny!9Had{la&-Sm>dTaOs3BLjhi8p zsZQ(?>yZ!^bOjnmVF5H8;Lvyr&FDoVkKs!O=fYB zgjt?vW=^x2#nP3Pm6cX`S&Gt@o!T_ZWs*&^xLD3x`Tl+B3;llic=F3Pdg%v0_`w(N z`|rP_y%d=p>sWW)b*oSPkdE}CyYI^saqhWsem1xH%-{LbPd9C8OZ)Yi%{JSvjcB8f zKAO;lF6l#0(MA9KGo8(R<};re%_VKvIdU~3VI5$vIfrxRquItcGqhr5cV#mpx|hwYV3{G&6f+{bUHPi4Sxt|< zXivUISxNTltEx;zRaAw>5V6W!))LWEWvaZ~J3^btMx{knRHh7#euS8IbcADsrYyQ@ zQH!eRB`Q^wXfSCgGn;Ll=q;G7-oD4!`rJ~z_3T4LMlJPf4VBcq($$?7q_)+zZHuqi zxoMp)Sc1)Y5=9)H2naa+xv!!kTaF z5wH3*G|XBlQ>o{}Sq(Q?N>Qo9UE=f2nunpxODTBS(q7Sky`tfMIhUp5;b$;wbGvhv z?8>Zm<*v9)_o1?9gI%vAo#qVd`MM`F)LBfPs z@Bs%87C<-%2Mu@tg9QnLqX9u6BS08DARatG0E7f$!2_fLvH${u3JTx>1`jA;U}yj# zA)x@FP`K~^nW5eAT2yLFR4TQdPG36hG|BQgm^np6bu_O{XWHp>KAmDsKJ!`dX`?4S zJ1m$6wE9vmeKZZF3`)}gD;=5j{zA!3V5-CgB! zxm;Sd+mFa(}x-yyuVh1x%BMHyBD$UZW1~gf* z)og=k<9dCTc0}c?-__;yuF8tis-UZ0RjjD0)J%1%Qymo*RjcMc9T*d*lAbh`WK&rT zjnXtHQ(~Eli4Y6k(NVD~9%~~rBP%(x!Sbl?ugjgKzcUN#oJi9_@Su1E}rL5SXs}xEi8s=Gs zn?8T}$03(OH8ckahu=_$>@77c!y53;RbE+Ht*j0?gLcac4W^+FNIzSNa!&KVmHo_H z=`z&W&Oci==`AWMGBP^)^@?q&JZ`zY-TkIjw=_jIHTI0{fMom;KmOEucZa=0EG!oB zvd}Pxu$1+8p?{p(Y9c4u{39Zg(F&~$$;?L9S}p0)1 zNzHUVsWv}-(1oTsqOnXJDq$Iq@PveCcu2*g>QQZ|gjYjESt4tx<)LDj+4}RNC;d0n zyx2LL_Gac{728g=cf)y#o<&A^BG)I0fp5w}f@zTGcp^ih8bTsVBq}6SLWk;b6AhU< zltY?ks3_A#m@c|35C8zkwdg1aEJ71Ury!ivSRvg@29btFTStRH^2j_V_JVkA!Vu$W zM@}1Rn>k69lpmze{qyY$jsPqeGC8cU84~h5BSG~8w&|&obvrf0Dv6if!v*lzAFE&Xy_YC2++W7$QBYB!9HZI_Dw!oLg63)0E%S< zf<&gRh;)-XAB`lY_?uTCK0_jov?Dnp8&M(Q9qg0U`M&SKpuw6&N`LHCw{=2<#HuP7 z6*w?Kg9;WnI2I%{s9?dM00%6<0TC!b1qh=KJm$sgK;z6t4j+{ih7`t;n0*w6i@K;J z1I>m7GR}jF;tHZj4^~624xgb+h)`u&vZVT#J*ZUQ&+468NU#Y+koEEOsoMALz6nHN zCm&m*I1c}ygsd>IC=%!IC5%JDIO2M!K^=(3YK%>RTu zJI{!|j}bZ?BKjQ=eH(ocbV$R5!VokF;`?s=8p?dvc<+b9I{sev2OG()!2!+pec$)T z#`k?cq2TufN4mh^vP}U6_%k#rB#g%|Bpk;#qM?vbNGK5C2?aNpmI4oWpn?P>6cF>8 z6bQin@solfp)eF86e1J~5eg9s5eg9s!_Ytj2@w(^BrpI$goG1_paKKJ2uN@c3}FKZ z0S6p_KtLb_Njb;^0Tvj+12`~&5ES4zz$KspC18Mq1`Il200~HdIIJj&$mD5I4a1Z} z_!9t$3nd|8FcRc2ih>vjVi<&Bj3K}fLkKd)Aj4FZ5eQHb4w>OTL6nv_4lqxOfQ7J$ zQN=)|cU6t5M_=@`N>nf5)Dw>>lcoxAuUe}Zr1%+}2saUxMCm7&?{c;gfEgm=Q?i*z z3m!aVH~x^C5zTuogwUiEsyc|(5S;Qk7mR2f!=3UZO)4u@N;#X>JIks_g6k?Z4vI5) zqO{VV5+fxoFm4B0NrP~N_hj)lDh~elKDb78$SNwlkMU4bwA`lfo4i+0NL((kaq|t< zSN&48&IAGZEI~2YqfN%U_;MeBP71jO|JyEqU}=wnceVk|u!G7@IVctR#GtwWLx;?5i^hn`ghpD;VH!m13Hp(TX@lzt%4;#f6M1g& zw0K8|TL>i`cmiw&H>ABTV!%e4M@sHW_W6adt0yAJMddQF`-P2Vy+baci}~#JXdZcr z7f0E^C+O{)afy+C$oBg?ULR8~Y8* zIxg3_dYUe}`JP&upF(Mc0X-;uSAIrG4zuRv&rUT(ls7{{TQYY5O99JQrT2H86 z!X2Z8gn=(Q^BWmiegp>c-ZA_+7YZJFA(5pt(TU_aCyg4Hrv7;v;8jJsR^kvE(6xt# zmdgypXdXesz_!LJ7=U^dqKV#V)Sw`own((pJf5PVr#RDL-GK{dfFRfNdKa zSCyKF%2?Vt3#ok3R^>MB3EP6Z|`Wo zm&25ay9Yv8x5mk|RgtGkkCEWoI?;LJQ5*HSM?<%0sHh-uoAMjHHSwYX2uU`Hb=^N}BP@YIf`4 zQ<-dm#Q)E6q9jp(K#X%(ctKrV1=MRj*ea{?!o}@E-y*yI&Mzw|PvR2TeqU0lUohKq z^{Cad7>Mp!e`*tQUK?TOT@}h|u87B9q{+Di1^j2ED1HE$JEo`<7Mf5IO(GTR+SY3d z8@Abh)>@6^EbC>_R2uLU+a^G?T}h^Dk%OmfI(RWc_*#`<<*q?5qLj9P*?j>tm% zYi+Aq4-)~OvFEdl-!aT*zQ6X5sjUQrO$R%1xBGcTDua5Be9;!D)!qjQl>_V_aI@ST z*vV2b{pG^4q2ZBt;dJ2iU8K4orj+Ul`FM3>CIIy_{SLU-ZQGCf-UOFGt^C*wVm;?< z2~NVt>Rsp?2U?fgYf+w@j)F%}g7RKrp1e!`mKIo6I%Ph2DZ|3pl2a`vyAtDeEZTs< z`rS!P7?ufz@4}5M5j;ycGo7U067Dnpr;0&OK$$c8%}0Bu=Z8^H2l` z(hnx@>e4|9z1x0Vg96V6+3TDI|D=JC839EY=>He2%VCmO=3$BHO@meAgDC?U2Qq-C zOT^~*`_`}Kh|OuB<#BlQ=xfafr_LSOE>=RL-l)NtbNIj_=%C*#fyqEr%>*&@%Qquj})ei@S30Gxfi^q!bm%UWpNBO?NOir6p{VQsY5ki8cPv!J5^A5B6v88Fv>QU zA9oWC)oH+*j0^^e#Vc={B5v6%!3>-B*yj5)kv{Xji~l?{h-8De%F(UlA4ympG}^<^ zBCs+v`HG^wip!odK^2+i5<2vx79Fsh%3Tg^M%eXb zDS^>XE`+^KOC8eyE`cf)Woyh+h8gcmh&NqaR?bs(vKomXTf z(`zfh9%$g|UsR7|C>BT2!61`s1He0-Esx50p9NUB2XmR5!oxVPM*~|^Aed(Il{HDQ z2&Pt}H14-ixsQ0Y&m(?eorc|g;lQd_&9n{Bc~BAL3v^IVDM?G;X{f@2m)V$m^=d)8 zWJd$cD-c~;-Ec{#AXI;INrOl@p}*p)xS)B_k?4}RP5=NWiwPr9crwdq7YMsj5#*UI zN~{rV-uPuT4nN@5j4Sm`tLj`Xk`~8#AP)kcib`^{&@W=ds3}OdYS~7lj}S&XNiCPh zj`%}SL6@M=1lwk3O4A%H^5Y1=(>yk3nocmWE1G-AP|8B7XZ7ac=k4@3mQJ}6kQ<`z z0vv2NnSsNZZfq+5l=Pw5OKWNfz-RQ}oYhk$#X>(gIaO=hD%K5m+D6Q|lG8+susYGh z&&+;Wq;cEWyxT#2&J;Qrinonp*L+}Z9je+!4M*Ni^SkW5n`WwDpm7J z5!M&`FGQIfg{AOU+dp8veA%<&CYT#xH%`v_r(pqp!rIc|tcY1PE!V&1gJ&r)R!`I+ zua};9O~rDcqn*;^Ad(fX4-`%Jj(e^m9+(y?%1mu-)=bYVAQEJWk zOe5NAQtzNBc1u^6o|%A^%p97kV8Bh>H9J`1vsuWqSj8|zLje708d;pwoYRL6D40hy zHQEHP;vnS0muyfmzDm@;o0RcYp##Lj-Jtd>e39m2)sjzJ)wN=!=KqGu-=dO3zw5`8 zv_Zg)7aIF!&gl#1YWR4S(ok#pIC7-IsS&>IO>Ed|b1H<0w;#>YgGP~Iu4L#W|G~l^ zL!XeRvg8*hF&(y9wgq0Lm5Ao!)<%_9^6kg1XO~uLsN~|X{cD`{7sY_T1%q%B zRUB^3re+_P>G;2(7=V16qlox)y2YVU=SH4RA2&)GX-<(&i4EPHW2w=a!q7$3jIkM5P;Xo-Na9tqst@@I!7jke%Aluxb$!F_fR& zIn$VtBVvfI@}X6Xh~c>!=2m$qY$7+SO<+x&r5YMd;Jfm5IrLJ!HfPOHeC0%CpC#b_ zto{GQrJMr+T>x$9|;Fv|KGn&(4{W1kgznt30NLtRWbldurJ4 zN{cBQwOkz;(`lD*(><@vEyy>i^|fM}YMDt|t4iAGY&~Np2&FJoE)WF(YnP8Y3}MRe zo?Wm4iOLQ?CY{b9x$`&}Oif74?7Xx$fre$ltFA<{;Kzyf=D4$qKI-b}Ue9?MVM8?f zE0*=$GQfnB6ZOP)QXvB!37VCLq6kB&buxrnf)BcgI;vX}x;p#)hzQl^P{BtYxatVm zKRnbA zCSOCto8*}Pvmk3gqr!L1XgG|5v!jq1iu5mN_Gf}>Uy%DA_eSoBDRS3k@P&^BbRr8e zli#+(KhQaHeS?L&>p(k?mb&`++!g6oa}W-kcK0hRG6ggzJnybECD7#4(DIGuyW@j`^M> z4Ui6FfBu@lLPQ+dTY*!TKKgNy8PWX^l;&1L_AX+>D~9BR^2WCMyqA_xxu_w?*}Mdv zq~&P*^y}!YP;UhbDC7#bg4YN4^b+bRg{u$c3MNm`KLEQUS70`Up5R#X-gEoc&O`XFyb*{s|7#8SA1u?kgL^zsVkidRjlmLtP)fw zeuY%a*xN-`_@oID`&(YnF)Oc`v_{yb^ZI-u0}h*#??8IJDlIB1JgcQ*p93m^hP5>o=K0%QEdRc(GH*L4TkH*pG$WiJ;K06HF?p0_x4<| za2#_3KQ#-y2vn;-Q4s$^?-OvF;@AouzVqr{q$f@LvaHrJF+XZjq4R{gMW&CJPL_dU z7wk3r>O@6QYA#k^L*LR7E&wn35C|29XQ!Fp9uF;TUR0UaT*+L9Ym)CRCmMf)ZMT?h zJeuJ6j?MltN z>TRD;0=>V4O|wCVh8wGs&_ZaPD~GlH?vxEJzN8gw6xz*f`^#zoMBc^@H6#ULpz$r8 zp*-cIB`+h>8SUpmwci6;-hK}1jPvi{Bko?!O_*WCr}8ykQ>Uh@$kF(-JE+NY(bVan z<>9%7JY)vc;aWqUbKmJeMR_6xkeEagAobBbvUao?_gt~Q4E>y6WV_*6yp~%5bMs>o zsaIhUQY^;#Jug7Qv|yNEd_pv}BBGHkhe|6u6_u*Ba%}!REJ7)7jWUh_F$a{97#HQq zINXtpQRzNbQhF#Q5gg0rt^!or486wCge#OhAQaTiP+fw{whskIRUGrX<~jV zv;bEp(Z3oy?}itnw#`qR9AO0oy?WyFXM9Br_L?|3OlFz?eSnpGx=NHQg)moMimp&U zo05V79+%tMMiRg}=R;LEUf??7hm$L%m6l0qsXU$aMe`b37oS>WGO4%#TvBBC|B69{ z)<(D(s=u4wfYq1Tyyh*I`2*%lROBaDxiH)gyOQ)(>qpg;LL#XHJG6XgTzgyOS3D)q zq@_Bh7YEIIq?;4*^j||ORc949fJMZ=+eOeP%`VNI-GmZD05POhNam0KzX_7A8ewLz z5qD{oQODXZJZ7r{zGt2*MSPxIP;agC?TLTYd4 zfy3PP3aTrg(`zA}!jDpA)NfK{q&HG!bi+|))GMRPNKc9?qgzCkQJ)M|M!Ga4sBhC( z4>>z64I~9I1)to5*U!EdXLO8k?`|K6XWak`RlC#whwY?^<`qi=x)5D6~+PJ}0{%bM@MOW%ThHRuH#ABFphaGQM#h<3lBR$TDN`+{aE zQ{U@>K6ms;W4`jCk0ced!`>HmuMsyeb!r9KASaoXy&OfM11_@W(PKnMB3vL z1x=Kb80@Wpqi`TIms9)?ew(pp_O7{vWMDv(!$tG?gUZvgJ91|$bsLSr9E60H&cqJ? z74$G|OXRy~Jp_YLRpTID+1bXN$lzS`4TVMf%NV@%X4^yCga@E>gnI! z>jdRn8L#zQ+6D+5=`9tj4jF!?e1#ZOL1qzt-rUMiHNdU5hwP|{gt7U5TazFn@{1633LK`({75G$04|>qBD8Sf zcc?pS;%1xCWG32{p>lbg8L5?Qw&3X8-&i~R>3c7>K(Pup>VeviuQOA)Y^JM!VESSwd!@Q&tbM8Ni`ZCcZW4Ne%7ip@78-MaScOGYp9sEzj6kS&T|(7L6?gSSKL# zZ7xHc=?-X5`&*S=fD&2ATM&a||M51s62p+r>ay0qW-gBmKf5=i=NSZRq-{C7AGCCf z(f(?|hpe-V2kPQl+uM4)9Uk8gR;$(ut^x5-zlIkIrpxRD$*ht-8{Y-4kDx!rtL}Ha zhiSR$OKUtPI?UOCh{KUaG4BkY%O@z@C(j3q(wVmYVq1Vocr_@OYl8N|UIP$`cFwGN zu!$~>OBP5n{;z!*2zGIh&zs^P#R7S3u)=0QHZ=vmDWjR${j|G8(C6nw^Nrwu%u^sc zfwXwG2%CFz8U*w-mW5+(B)!!?^omP&DOyY4CU_|Sdedam5W+wr^wk_J;nza2IQ=|p zPeuf^vOe<=Z+si!HjXNfnqNnom$_^U+w%a-&C+0YEea?$XC$G| zZLd=-F5;yS)h2yuPiiHTNf24qM zit1E$!50UqzU1x+0KvV1Zf zK2vZm2>`9TTp-z-tCj8O#31xI+0C@`6NreKhSM@#9nZYc!x6SILK`u}vnmvR?qzq{ zaC`O~M8zYVxi>Kwc>EKMXF@9sM2fbRtRDU%TOeyYX1Loi^LHz3^633#JZ496mt+eV zSY%jpRNE7i$`M|%5Abm7q3gdmxr z(T6jR67)GK7{+t`Dt9B53|-)eE6y4g+VEmwmDob^GzAsi7N_hHHCU~?JG*?WRM z_o0-{?Q{m@sY?O;NKtLUG+$X8()ksols^U0X?!)gW?%~d!$842xDo!(8?JDK4ioF@ z8}%v@Or`R6;7bxP{*;O)tPwU0=k#yQO!-<|bN&W8_wpbl5k*uLn_L6n85V0@B>Ffx znqz3YCyQQ|Cwr)&+fDMwMu=TuGdwocJyI1SUceANc4a^&^vn!!jGd*KL*R)+a~!WR z0R#y(ZWa20eB6WWxpa|MOMD`Q(J?)KIX4c{B~lWYF`OJ9AnIX7TIGkS?ya;shG3|v zZEGy-U7}INVC(8-HRjGvbG9E%AnzmkqXnsm_!+n{CwZm@0#b6}{%@&@AXSwou(K?^ zzFnVNh4cwI1&9)*206XA4JQMI4b?eCrBM$W#YRn}xo(h;7zCCirp!ckS1R*3nFUw* z&#MiPck8H$(0002ScoyTl*0|%FVEXRmmr$qLU*4BU|R=6nu25UBMDR45L)Z=__v{k z3cr-GNPKCaEgMvhp7fxud|_^y*?bVz=M9M&x;$7()!mosEU<{1 zyRCi|im4i+B_dXCKyUtABWHtL9(Ev{D&{OHOqG$sQ8w!4D()$p+tU87my%aJM&JA) zLrv1yBtvUgxQ=TlNjC&Lu+nh2w5Ybi%iD!h+}}kMOU{#Qq?kiT1gD|n@)$ZgN&KD( z!1kU7Ekfu>A*Zb=$yO~F!oIZuwh1{H>zm(2`v&uNMru2SiB$A?pq@nqcDOo7=QJ)A%+~a6N><@ zV(`8QH4Yv6H@>|CZK)}4XUQ!>qwFb9TSQL(I`Zl=)Jk{OsJ2?OdB_T;&&`@6tdM*# zi}+P6?lMRN=HYj=`u};=BVBXKA4RVTj}j5YDA(K*`hJMvDEf?|8@GPaRbl%cAyDb~ zi8ypM+cRZL#?%?Rhn_NqqT2}5yt}(noSjujb|`?YK8|83wFJf+SQv_hL=XW02bh9% z5bOFTi6cxINt_Q?FzcCG_$6^9RiH|#;LR84lIhiYLQEo-hc$Tyb%kpghh%Xm(8GBr z0S*}&%OEwTV@((C)NYBKOPOsEfF>jsPgNet2xNKAqk<}sMwIKK0bD4m`#OjZd{b&? zA?-8M(zf8)$&}AhD|A^otYbZ)Bs>b*U{5&n+|iti9{sM%?rc-o?UfG;os?N8BD)XH zRw*E!u{{|+PxwHmDpm?K(4ax~t(dJ|bb5FT9cl@Aoje%aoB>gRC^M7##3~J(mdt(H z(VQXK6%-tdG%j-kJVj?G@>j7m4Li}~e-)pO7AX6Nh(z+AcD0v9qy^jN2&Mzq8wCdJ})?VQ6E`&f)%|b59PoNe^f7GiIq;N|jgDbhfLlQwW zaZX|@gESEA+;7LQVN*+XK%3&_bx_1wP<5*<3_wvq;gSu_sVuc6fsLpSd>wDBiOiUn zloguIVK?lxn4khFL4TFGJ)+0MP;XAT6kY5nv=vfbkCu z5gj&UX$Zi=ih##byQ6*|3n6OoN-xtJ%K{Y)$kQR7=zG~f3#d8$V7L;^0U=VC{}->n z9UA>n77~+cAp6hw^CZ;VQfd5iLt02-AJ1}`#&s?1=kR*Kw z35bxpoFRVWhGP>dm0zLav;!iEOWJAu3*%+2k7~LNx7tUY=g9Pvt=kysLZ}u!=vhe^=TjJtnJdKi_Sa3x?qm zrwJr;|2d)p?Ky?6)6IrEa!PzpOH3hM-RL1AzEOUlMc|?h^eV+rC`kQ@(|aF?9yJ8( zr4&vWi2?^9vI%~oPDYPg4~pT(Jx0l~o<;XsUFxxNiD`@4X?hmV%C8YTU{l^|dMts$ zvp>4b_A=uAKUU@nY0lZdIlR-+Z?@h_|F`(1WwoRhyf5d6eE(ApSZRWM*2Hn#tg*Am}IBF5|{;=39 z&TsSUkC{kf7S+fnHqWKChC?=R@h<1h4}t>XjMiaeeedbJ$ZL!_yY$+j*Q{c#3Z(+) zJbfRj)o5-uFOefTBUoxiQ=Fk5$li9gBl+zM0_f3XMA~9&B-AA)dpE*pz>v$59C>dd{S9H+!<6%t0-Pd+Va;pD&5Nsh_G z383JLBnfC!+fkHIj$c+yo42OSJXkKj@?Ugea61cWqrH9QFiP%=JzT2s;cz+!y^k46 z#rYqW`gv6Eb6<+2K%KCg7GydgBldrcKU}Obr&Wg(X8G|$(}}AW_dYs@#8c}4t#)rk zKIx96pnVFMcrHe~x zE5^W*2w@=ejC_u~d5$qjw|<}bL3uNBMH5P7D13=RyPH6&!ZWUjh=F^x7K95q*g+Z# zE*r7J>+{pf{2q^7^}TsW9wlc3D*}gFi**CX%yBQ)_i~F6QXzr48Ke_xzuEyPe^L7m z^T~#_ujWXz)UJC{3{WtU@8}RJ4daw?iA9Ox;pax3C?1vNn=HaPg@-Xjk5_}_EC*)u z%Ju)gBaHX8zFM(CLMDWgyYb(Nr&kk$fkn=TsE$6f6f z&JUyZz#-)Xqm3uh=t$C!zS4)(r=TJ@gUgn9wRbey5LM{vJLn=Oz=wsMQ|9YzpMkM|Du6b^RnPI?a`Mp8 z^qxNNk*Ed1B!D2sv?va$EdM)APTPc5ueVBw#L&i6(@fc)>friDQh6S+g`2LQYR4*p zI7kI+so|&k(%`4MudtMbs$_$tROmW(HTR6cuh!cD%o*mYbPMWqiW=O1W1vn3Y zNgcWmwo^?yyBXR%)#UB|AtS#dSjwHEi0bolvv;g0 zX#Sx((*8rWLmMJYHN_Rhp^;Cx;u@H0M;zeS#Up>I_T>H?bIjN)on~zScAiD@panH0 z!qb84oTlUuDK+Q;DK#Si(%Q5jOggbS!osPnSUQP=lNw{iNsR!q3$T(ZC8b)J-P>kA zkDh)idZv@DvpXFZX(H`Poyk1s-Q_7zm=yw0D8T0(Iq)9g-Jz1k>lspG5w1LUd0H9K zj~JysSnKuM!uzIAN!G7ysdhtOd9n|#t$U*wJFY@%kli0rH!^Q0D7C?XIPwdz*-&ptO_BG~4oYd=U4Uf9?*u%Ee8R^0xIeD<7sP6hBN!(?SU$U< z+34R9fSHPO5y8U~vYPGt&5drr*2N1R=zz2jZe5;4?2yaz*!ypf)M_6)8pu}3p#%a^ zoEK3L!uXSfr)4^7w{Di<*tkP;>;J9$*Qp_y1eS48h!6WBz($+-0HE@42k1%`d2XJ5 ztu30V7*As(Kg`#F7yBRjOgU5UA=rIE30aEy3T%vGaeVRE8rRi>Ove;{aBz%xUlulw zjsA8OA_}4aS{0j3?1o$y16x|Y{pYNdMPW=rKRscF?~?}tW~^XHD-)+WFnKo>Q+sOv z=gXR^&)}CX@j%h<->i0450hfO_W%qq4@R{8PrfKaU41!&v;tfLU>Opof~jMDfe3#b zpB`gtzVfU5rj@{x$?Gg|rSvbTfbBJ8*x))*30+nOJj(9M0>u^3u#7)po{5~l?~MCf z^|Sl}2<=T!b~TB*^7A|!79c>TlYYjPvu}T2N-jjJ1Uy zbQOWzi2e3)G{Tct$Nr7bKV3)=(vIXc{vEdJbwz&0$S-SpOh29%b~G|_weI>T2Cy8)Cq*~H(Nl#FR0 z*q~>YAPfGE;;$FOt;R>Ogg2zYLN>g)cYOW>8KO6$c zZjCUKm)}n8=qUilZ^g$pRfsQQ-{&4lX;Lr;v&&dC@@xvr*{{|cVwNXht5{T*^%*8i zC`%DO%$de`yPS4xV`S26}0CAS}FP4uLt(67ZoS{{OQ5OZ4xDIbYFf{rM;>Xj2Nm!?rYr zS!5^M;3WWzY3evIeH<5mGt!x6e=FA?>B=Bg%rs&MHdvM>Z`7b#;&)g(`hc-K6AxC; zg^##;N4r$vS$MPEHE!}Wp&}AYSGy;3ZgV&)UZFrqsHhb6K5?MBwGT*xvSZ z^6>lHD{qm0*5Wv!opoU3BIx<{I-qYy4Nb#iO{PKVH~&Nj)y%Aye5>1UIY&EU7br_t z8?p6AD+MQ{!yd!R>|WmQ^?1OS5!-{$sEEU<7}_H{j~{4PwR^b^Uw5^~4~Wn1a$iQI zbHyyv8fo`}rJF0b;)h*1DXdRGyO)+o>L%SA>9M;!6}1t!lz9uTuu3vN3c)p+VecIP zD(BAH=3H?V{GrHpgjG{33|CAT$Q71@=qcG{XKj`Asyf!NulhVlxLSJMxPJrzV^r3KBWifN+WYv z<%dYStL^;HC6s!{$s|O&BzV$;SVYA0SvqgTZ9i6^njczAx&>Lxhq^?;+OE8sPCiFL zkh`y&5AGzTVauvh6Pa!>txpV<3Nz4sLPD`vlN!iIfdr%{C=}{;r$Rb{50GBNC4ZcQ zY^WpU$i`1-{syI9PEZx-PIpl~3#M#Bwt0ljZF$2b2&S|S`3xw9LliG2e_o*EUZXK2 z^9x@)Q%gZ01Tw`aWDmM=L5YG1txT;`{)5ppncVSx;t#Likbgrcw}@Stch!qG&syGSiBwnrZ`KW zcy?HWh_l-GV2q^S7G!&mxcxJ`u2v(qDpMP%fUIWKzkIHHec7FrskReKIGZqx*mHs8 z$a*R4JbxB=tyRwh5NDv96r!a154h_X3WH`Rl~8mD7!5`7+4)mH6ra{(C6T%5MWEvy z!-M#5zp;dXv3s8qdTL83(UrVIckz7CGV@wPP&-BoYJz`A$s-N9cpy`Ta7iP4IWhRM zm&+?T6qEh5J=#J+2T%5K}zC5JZ-d6oK-t-+E4lXOIJi2sAB?()H+=VN&`h&)y|*pKy1sbo!58lF$P zKa;;eXc=FW<)5^wT!Z%0TT)ch)9%X}m21QsTzkqj+~!MM^Kcv3donVc7Jk_i-b-pF zizOx0$)l6+nT;tu_P6QwBU1KYEQ2^xD;1pcw#abWTy1FY>&&d1}H)#E=10?l% zF}I6$55oo{Hcasz?HTy>@Zx6JC!(ORVZ@IER~g%aqAjy)_GeRmB@)otv@}LwY&I?V zi)&;yEio}sJG<~IWL@fVv*-V_mglD5M*IG5D?YaW>ITHc6sM=?^p6mqCIgc!5@iod z3%fwQreW!%4zpW-nwHv_H;V|&UA!Q^VLbQK%*txTt)G#W0um)|`$jB{A<+qlUzqKY z9kmFYS@1;tc2iR;IRb&Oi}qBi?c_yG`!rfkM|pc>a=$9c_|0Wgxmx|5?;ElU{VT^_ zzuT8sFT7cqX+@3TUku+6=Zhd=x0B&mN~#$!Wm(d(seBA1EJ{nQGcERK-W<-Et(+B4 z6WC%fFijw@)jg|p6n}*M?s_?PPiqv+MA>~TbZymc+#zN)NjTf5%>-6Nr-LLeXT%BM zqd!Yg3y_3O3@wUmAb4!?j`xVFf7t9f71K+S=TxluY^VP%tQfWh=WNEZqZ28<=nDu1 zCc8mJw#G-KV~*}uhgqc`ORr}RcT3x?mL8xP1O`+e@fRyu1+n2I6r~-O3ou&`zI7xH zyqH13)a8O74w}xL(k*EEbFAbCnts)#S3uJn>|j=C38X~ygYW4Tqr>xVAId7~1Wlk( zs}-zmu#V>Hc+G}4clVUb2KGU?8*`jfUS^KYHVb7Xn(Cs=@}4DIywLVJBTP*M`jC8{ zoD6s{WeKx=T|YuMa+?pKFiU?1Zs6F|sFnTCFr-3kb~_~65d7^$Nun;O^8%YZJiS5e z@OHo!teqp_?VgHoGqKBq!xyJ zF||iE})@t(21V7yQ{vpAr-P+iUxfp=u@!7R$Or^QrEh zEL$dr7oicRkM$@|ifO9;@HBmlx7G%0-+!>bNbU>DJYXCP0jj@r94pY025bPRN}?^O z2}ip`L}=42AT-$YeEKX$AD1)#vl`^Ye2T1)Kq-0lonwPk4_ZO~{c>--M{m zc|oOg;u~Zxzr-|KVarrvrbqAHOuNth)4I)H-R7-s^RI(|ctuF9h%B2y+ask-3fn8m zBF2OH+B8k2PWj+3lR}2kLf6ve`RGcA6sgR>)$i5gZbP}nPGLbbp z$OdoF=qU&h^k#xw2+Bi}+>yEDEf?~6e=r+sG4JhKHLg{Ewe`DulDju_DrIs(rRcG? z^13zJeF_qTSIV3DVhMe3Qrmi)&pKD}%eAYV%dD-dylRV_irhygpX&D*jncoCb^AJj zfy0DFd3C9jyM?j57~7Mm)!4~HDlPf!sHEyXW~6)Hd@WPT7&l|r^mIA(p0AXM$UGj$ z3ISnRxDii`CUfI^xk*FA8upzzim#11Ch48E!MD)T-2a(>M!@u@Cd^5D^&=5SghA_BIw{tV<#1Ce1Y4 znzY%_At6Qe^bQuPgz8ukJEm$1*|RX&vT(c5j!af-G;ed;vF+G)Yz71b-khdnjEE@R zB_v%=l{I`tOxXunvgOXwE|cj_rq)S>ZwDWC9yS{Gb@>{-mwR}8mwcCe++aXJTt%=l zr_CvsrDR!$#0V!$cPzOIqf+U-tPwpw=)4qK(ZI$-LszjPU??mq8q3^=F0P%O*{vR?E2lfirV#ZQl{#sY>D=gH zI&DU;(anRV zd>vMX3>zR^tTN)VZRWC`X4k&@*DDRIWv*4WCGTTfkHmyQ795I21w}*w4uovT08MOQ zq05_%SjJn%udK7y`6}y-m3r#}QP1r|c>%Dat8#n$#k$W?f9X{$!P3x>ZTqZVjP3$Iz&Ka@?`lug-?5k!IlSx7`!P*6}*c)&p60mJVB!vlr~3<^w$u)ttLh7*AS zaiAh%fenxZsvwBMNJL0PAYj3QP@oGYG(Z)MnOMLAQA}8{K*yp%vEcvSs_ zg7NEr{q?_K{K&}r-uJ%uz3+YR^pPN@fDA40n zvA3dEdsiD3>lh!f^xbQB-1PW*4Lgtf4qQfq_j>z!`9A2iT1%(J8xKMGx1quW(6eMs!1{5kND1ZbNP!K@{*Fb;?_uv8x_yG$EDj?8sp@0i1 zF`FkEB%GTNu`&zVgL~|tbGRg9JgADHBDul$(M4(TQkA?+(&E!-?^1~X@OrV)0U5pN z>AkB+!(ktg#IEpHBuU!y_O_i`>SJku+PvGeV_XeqLR;|^w=1Z6b)C}zmXj&qsrJ2_ z^l9}e!#1cGJ@=J_lyhi0#m)%<6m?x^%Uq8N_V?kMAcAHonnxdAVuBml4U#qGT2VJ) z8|d<-(&@oU0)2t%1)tPR&QFOpqrYmAnl=2~gPR$fCHM%c`%mGZCjv3W?j|r<3ObPL z9M)oInwO&z5==%_92Bl&M(&N(AGdL%2SSO1@N}gTxDKZQy$;o=O`U5E^n5;CAvL8Q zyc}&dv}hI*bR<%Yuel+|wn5Gi8`8MGvk2F58V{f!YR=LU`r}CT%BWsZ-ejQ^g*f@n5OPjn_#TFgT{B*u(^4U+$9`*r3BJMB z1JbuRgjt3qjE|o{kpRYu$s0%7nKpwbPI|sQMBrQ)ggCJVtI^O<*@36rRdv zVh2=%I*m7(gtY+cc2pPa()3O~bOev0YCNX%RMHB)luY%MgfuH{P~1Bq{qMCa+T-}w zltyW%ji%O^31SU81lsfOd^v<$#J+DzIvb0YVU4N-JiNu9B{)P3UqUf;;}Q?t_JjA8WEli3i_qy^N}#q+y}#-NY4LB_fPMPEIaP7Y{~%AS=t zYzCam=mxAMso{XApT+h=9W8_bd{RU5#40K95L}@6*7sKMb-`q?C=;ofi~4#^AXh}0 zA%-9`MGS7HF)iU^vWPri*9GRBH14#GG+e0@cJMON2!1h9i>x$lih~2IbLC)h`ujJs zWN-%R7p8$y0fWiCB*>p*&1&6LlLD4%qbuKoVmv@r$_lsmEx~;3Z$4K(E;KOKMJRJ| zNyet3`L{tk6A5|H6^7bFFISJr>G?1VkV2CNVVbg?T&7Ajn&AOU!4}Orc@6{5{+FU7 z+`-svXmSJLeckN2pEx7?28N~A260<#HD=>b^jCVb@E!{L5OF7n@|?4{UL&`UE-S-q zILTwJ3?%NjWV!mHNdc|Qovp!{*=TUEKC^%dW<=(iILO<&cnY-@_2X)}2FB?)l*y_I znev*HlyMUbrpAc`gfs3tC7;|)-hD};eaM@6@TgKbu$&tK`Y$C?1rj1HBF2C=_Ob;TSnrU!snzv}(mz}QA2SLj$C{l>x(Bu;Z-K+$1)gnXoPygIjIk?BDyUkR@ax_x03i;2bO1~pj$x&P-)iSp@`OjRbDjT94 zjeMcLr_Xf}PSx9Im_ND-O6Yi!E%6nss+Q!umS^DdgZV4qwWoKN>=&;lhKnX&+ zT5gy!sT+VzbeW|zN6VT55+Tzy-m<-nMWw!xuA9>O1he` z48AD5@>)%z%Ha9p4~T@7c?r-o#aeo)WvoK9{Q$Or3x@jL;A#yaa?Bv zb4Di$jkL`Y!yadQIWNG#NmR=;aPv78W@&m3M&we^w9@_aG@^xzEP4E=Z9eYV#M44U zUQCR`I|5yKayB@%gdq(T5d^Y`%L!e1LfNLM+t5Tk<*rDD5yBqO`9I{;=8{ODdq2%| z7W^uSg6Bn>>k``#S25y`UNfqT9A;2UFAPm#RI961zUqhFW6Nz|78@$kaNJgHapb1X zM_Y!p=Be}}<8Xa#l(i@jxr-Ym{!Segiy$}f(KsXwN{TySQq(o2^0}28|GM9t@voE* z^hxJ@ruS?7WUn<>m41Ya`Do7%%SZKJRJCh{DRfUg&{f2;bziGnOWBL53u%dJ`F0W} zA!OnsSagKlb_J1Eh3keoI+o(Gl}D%g5EC#ism=`;O4WQ8Fn@Mtb3j%u#jVERd&G0t zZJbh?0!}5}a?}>hHg4=VxVucw7BS!I5+Xyq3E0 z={utpO@cfnMOOM|-kSThQX=jSn}F|8D>vTcrxheUF^v#ox2cEG)5Uedl3aUbAgSo< zU4wmkDM6f0qBiYQqpbT1jmZf7q%Z8NFkjs_ycr{yFH@&1yG9Jf0j_feu4KN|n3jxb z7{pmgx~7q5ls8;m)A-(MT1$&Kr}2M~xlx|pZ_Hz=()^LGd0+q(Jn%esJ<|^cvZ!GN zfMh~NX>vDJp4LB9;q|vzVTU6he80N^11>yL1Wci$r^TW^b^dlhA7k6<>5z!qah}_N zqV2h5Xo<+zy!jSbaz?sLn@o)7mf6YhBJM{oKlc1^${`tCG)8%sBcsZbusDblXMv(U zgo2`N;=ItJ4Jbr<%<6^gQ(c$8R?+l@hfc4}@oR4rcGdva$h{N(4C`qnq!B*)B((6e z>fLO_jaTxtnoim@JqdBqo;tthaY28cUVmGV=%&bT=BBOR4VZ+6D&bd^M4yxgO zn$Oxt>A7^!z`ldS49`pD@OvTi3US z6701pI-T)6=_)}pOVQ1_T^mkJLd%0M7V;zv6(e#3(Fs;^s}t07>&Aw)DoNg(pvrL1 zeR$i6k`wOl{4z@PJhbIePdh{Loqm#)+DS_7$8r!h;KGIZxq4l4LD9X3jj$l~QG7#!1Pw9xLfZMTeIvX-Fy%cWfP4eF3aE4G> zMhIljQr=Hx`1_V2r`$GfMPx=d4wd!^5iFVkv_G(H1yIU+=5gG8a#GC^ZLtR#AsTOS z+uN}1B*fXjvO*lRlWESaMMD%wH)|F!j0>QNM@6FkCAcs@4O77SvxHH?zL-qiZM13B z^B*PUpDM{%sBx*#rDh=ZWTG3(Egepp4pNMAt0dko9H3cE$4RuH@YK;YMd`*JhX(g#Q7j!=6VGehMDI})lBnY|U1szRLfoyN$qt-bWW?kF{ANQHb`L5R(1(_m160*aPKO$i{S4aBYa zvw|0NTG|6~is(4nB15rOF*IS}#p9rs2|=J@WR5hHj-^RBGWduU!+3|)0o_41?iE6Y z^YJKz&T2IRxfcJVOa4N~JIqwphZs)^B@=y%sa+MZyi7Ony%`amQ`B~<#lQ7%?8t2} zjBiXVjh(9RxOog%eOSsFAI8A+`lQS?+4<2+Wu5%Wsc%j=H8YmR1&b{ZpNTLNX9yGv z_`Qi#f<^Ht1gA#vha>IK%dTKygb2qS^X#g*Bz8~f>|UbM-QPe#5vLj^Y#gEm2Sz?Votom)j@Ml7&;NzR0fKWp=6+*DaIiKRE$V6 zpj43>AQF-G;Jg5+0tf~-Z10SQP6elfNkDWMrTjE;h5p8i7uDDr zV-qVFXYp222^--~Q`w|sf8j{~8sFG5VjjCY*HUVD3DfOie@zNYYHtt}G}?YC6$o?-`=1fxcwRK4$kR z{z#{ueqyBR0F^FK4nQ$V0@4^rneUKNzH4nkcuIdLXrntx+7_n~1kZJJc*H)IT`QpS zFEgaRzx~DBwM%3$|>o;{xCSLOaycz+>k!TB(fYg@Wao5ldA0Iz(;Q*8L??e z#R{7M;!_c&znF3YBBfT_djIWXnI=H+s3nL{{gTp@Z*z(BJIS~x0}<1gRltuoP<)C2 zQCN_|46&)A8CG95K!MAY1flFpu5&PuY7ddp#w&NwJ%D{{JR931ol*=? zNEXg8)h|#9m}-IMKWT_cO_>3sShHP@_*Pjd(a3d>6OBlHj*zNvlf26JEgX(Ru8l=C z^I71Pl1Q(ZqMYEqgDKvul1np|Lf)wks|2Z4`Z~-xj6F%+our-GToif~&e^^rVkKjg%ZC>u)#M9x>En z*ckLSq@^X{DbmZc>8(!$BmgawikciAqkqI13FVlyE5a6`3W2Dk%_h_UfC@rdG4&h} z1*B5{G&UCElwg`SP8leNF=8OHwgQ{I+_*BpL~@yeDV1J&?h*cU5N;wH7|~p(HrBTP zQDLY;{!$)~^vqPwCD!;lhN%FA65y1TCJ>nNg+TMB$s;dH&F~oAv0ye4>~a*A5G#6}9r z1p8yBVE0D}!!R@dTnb45pH)rwKg;74F^ykUKz)@xXNn;yeN+z-^E7Y$A6F-IuE4A3&R*vwL3!$}Nm6G6x;jBzi)AU*rUOuWuD zz3YPfmnK5W*QtYiCjNOo5Z#3W7f3ET3aQwIldp2;HK>KZdP2&< z=~M(qpUWz!Q+RtvF$~SMb*|W9f?dHU(Fuo#VM?x~ia2S_;R}M(18ju~FvTrQ6#xw> z2P(8d)>3XZwVq^kbdhd3J&5$5gKS`c3f9z7q`U>pSycM)8$h8LRL_-Uq_)YMB4I30 zewAuTdO6zJ&;1}5M=F8996>Z;5%e~4g^myFusvs|`r#B?^j|;q(K1KR_&w+@I-+9q zgantuQPg#V#A+awxSm;>#^&7-tdT5&#|E*a7^w3Z=@xOi)c4SDJJX+kfDak007}l>PASH(@kz*hPZ2b zF#|4bpmWG&#xmT%*mE*J%Ays|CDus0#^D%IkW!?WaDv4jSh^+I;n{e##E>O)a1rTL zbfJ{=l@&-756L4R1ww)+rvZ7jvV#VKF+s7JM9P|sP^Sf|0-zd7GEP~5QVr=zbxnF? zY6;coRjj3eNP~fN+~e2<;AqRPjy6OU$va7dfHjgxe6h9j>8FCD(?l{&Tgt!_fOx?+ z528(F1x=NYbnt|UB=sJlBvvEJ$q) zqP-meDBwViyMJbtBqs`1K>SR1(e-vc_0TnBQO2hi=`uqkC>b(QxzkNaf_OH_D-8Pd z!J21UgB0dTO6|g+lt!3t3S^{ewgNrd*(j_ED&X6h3Ow@Ljlfon>`*mVChhD9Q=7F?EccIXE zNk8Mokre@SBuPS_YMzVmTh)Ht**Wc`(Y5bD^d#@oRYO$}OjASMWPnm`laSoy1OF&~ z68f=}kc2Qq4ro57BVQ&lq}L!*huD=;tS(m31nYb>bDQCSL1$!qEgnJ#~I=jV19oI%OnEqqJvHks^YS z-h$mZJwzjvBJZaj^RwhJ>Hf#XYmY=%h?lu4vJv}-)?ik6DmjHG(uQloW9Zm6X_|OH z9jKu*Bqi~xTiS13+L(YO_|`BPG>Fdos8Qd>mKB`S#)`;@9c zeZhrFv}8(zU5c8C7DI0UOFfYjEu$4r*g*1)7N=toWbC6$%NtC*ktsb08FCr*43Ctf zMlD)?62|*z5}8AcrB-M!T8fcl;n>9e=v%@$EdeE#0o($%Clt+yVuF*VRUjJ0u}A{M zWHpFW!@K{(!hDmN6SV|K=3s-&f$ zwreS=))ESM6pX9_ik^y8BoH$I5Ci7_AN~x~P6z>cxZkhh768m5B2w<{H|5uaiU0rr z0m20)1^onPLHJ-8hUtx%SC~KNh?8z{wWOl`3;(&{aqwX<$U%NiNHieV2Ui6ROcWl* z7Ybh(3O|W*z}5@bGKE7b2r3HmSQG*lMGTTCM+`+2CyYQe2S*StmJd7-Dhz{_!NjSE zgPVkfDiDeL36~#Aw^Quk7KHD?SA+oy7CewpnBclo7!`v{K@3`|xH8BAUDbwJoW+M% zEsC)qh{CngsrXe)NagfFx(4YI(k^Kt(@snGPD!ALL7EDh5^XxwhC*k7g(q}Th&#W| zgDg6BkVHAgVob1tOk80!3})~Q_o5&KOe8E+U=ihp3PmvtN~ys$sf_Z>h0mB(RjWY_ zJPa14ic)7LJS-PgwwYIsv!C*XQVXq4K&60G*8N5nF$)Q7WBVOuP26RRY1{w%P z0|5tD1rDCi#M<1PVXYu4hzt*XC7f)Uq~h+KG`w7UwN_4c#%lD;WR*=6*)^=5Fw@K6 z29f#{u&8@g48~_?!ioxfqPf%yoFuFw+w5tHE0Hi9xkw2@7SR zj8|_pi=j|fsa05vh5JCDdP+o6uc$3-xKg+b@~RX8i=U=AIGQMLjjnZ^m*X-N!Ik$X zR(LU(@5&I2e>-Fp0xp zFhfJrih&Bwn zjNQ&Iv6E;76(JCjC-Ow0q1BHdh$;vg;s(Sp;M7ozM;s6b!Nbrn)e{Ezq61U9hC)R# z!ig795ROPIW5fV$azNbSpyUWw1AI-=Off(-s}ls9YsO>oQ6Vb3U>}rearc8cgA=7V zSZEG}LX=YM#ed_iB$cE>6vcbA+slJSLF>UdOOiC97oNk>TWkIe3Wp=M`Cn zmNyb-pB5*YQ-UCh$=Wc$VStH=_9})X6skxcB#wZB;cG>lV(s;!cd)R2ByK7W5)N)! z@TNnj3_TY1yQ$Dx2Q94?);oi_!QA2DKlI3FG@7ciNDxFJe3BTfcx#9SfBY~E)@p?) zgr$h1%(3!^QiAbAsVN3YXlT^J+V;Z@sK&pd)piPrsT~Jdkcoi`9+hEAMZKa#4pkIY z^9|LZ~~|OK)^# zHp0cBJ{L&n*`O3wlAt29Y_Qc0g<`uxJcT04L<_p7CVEzwZ$7v|OrmTCcBQ3iAc(oM zlza}w1eZHaArpaA6yik@Jf)l#ioxK7R^dP|@})&Ce48obsX<2@CgSJa_!g4`QHaV1fgr+4xm$7jazr0Y zb}&%s#1!FIBVeCK0zs60Fz6L|D?yl_QR#~E&n+B=5{4KM(I%NF23IEvFR0X_$Dc6ACQ_F}^VTL=7njg+&<*Tm+RWyRKG)-;awAIK_%q zI}l?p{1@(rtj1W3y-G#4*E)>=1rk`G0Uam^z(9fnSik}W0~Anz3rJuBMFR{lfCVH_ z0S**^0EGl7P!ND9$UJ}`0SG=oOdx>)0_d8lX_+9I+~jd~8b#x1j?hhrrP&1QQRFGx z_$rUq({ZceX?hG+pRXX1Tti zx^?Z;*0-s%Q)ey5KaS_d^J)$>s*lF>$#LlM$99C{+A*n)$;X%_btDa<%+_StOw@tL zB+L#%Y>Y9BXCvpbMVBSSTNX!NWt;6F9W#_!XFV*N#nD%lJ&HAZ^J6ZHUN@80x1Bid zE}K7!Y__>ovz@axx3m!&b2b~XtP$c5Vx?pC#ztm2%lWLrDkq{{-Ie_-AIoPoEK+jz zL4!qz{e#Y`Z0u-fS1#mS*jHAn~z(hQPJ8k3_i&vXx zNytn|2w@EmC9%_JIPHnnbUxG11fv~wNtQ(}i(saap;7f{u*O*J{fgwRYBX-DnpAn@ z%Hk{k%5xpt7a`;^jxM{46pgCZbPPHpGPM!OF*hQ$64mIG{8Ddd3ZH{=awOV~4&Di! zkb9(&^zWR)&D4^5sH_7mTh|ZC?pwzytE@h*YS66%Z#2j|?bBpCa`*o2M>{XEp<$O)r^}W! zI%$)xsXe_0F{^Konmu)w)R^7rjC#_wgldmGwkGJjCh?eT5;9Jb1E=FyHuNA9jUmK= zRvLqe#KxH<$B{8$81W?pU(}zRjE4=Et9PzqePB|$`hgzne30E~ z$FI*CVuHu~?cc!TBg0Dvsy0#%#2n z4!ik~2bo(^HJL1#lXRkqy~v|B>_l$Rn7KbsrcTJ!Z$7RaEi)m%@!QD{T{O8Ylk*LG zl*;7pFf#qzaVvV@-y?Bjd4$usaZ%P777hxh5NQ}o9Gs#T92AO!lf?*$iXWoI8+h!W3sFKVGLeZG)7aE89iFvBSa`-#YRRxvTd7#8 z^s*wUR4NfJwGafNLKq^-&|oEpN{BeryCEC8hde_=DF*If#j*y)2ZKRDXIM@Y?+wKi zRmw@V;#Wq+hohYaB#J$csKKlhF`S?iCM?9ka6-dk1}cWvFx+Oy;YA!?;f4Xth7VN; z!i&#BDX$>BL@5{HWzZy06`rA;N-z^{;=*gUaWNI_#Gnio`xOc`5Qa=_7Fk~5%IkcO zH&pTbT4u@=f=H{7FEGiMpUMvc5(1VQ-e@SUh%1(GFXe8B$5Ifsu|kD!C<7$yXDHZP zgsr1ZnZ0Bm25B3L!B7_v1>f~ykS=COrS z%)z_eo1%@G+GI`1%OmeKcB8SWXVO#h)b`xW6sObT@Z0H3XL_zIQXObaB8a~vccXMk z2s;wZ%+h9ANh-5iS?PAXC@FC$OPM4`nevqQ%qKY{W&9o1KsqG-xUWfbSY=jb_%TOA zVp)i!`Y|pfLQY6XXoU3JAT!!Xt_Iym2nmuLA0zRg8s`R)TsqI5nRt0e=yr%kZgzJ^ zigsz8vpJicot+&yn;D%Es)2Pzd`8fU>RD7Rm+R_eG(~6gQ63%1e4R|i->ZJVA35~r zH+O#|X{7lklgmeLXGOH8l{jad)g!<6dW7cEbsWT%@K54LXsMrbr?g5)w)4utGXKDiRvCq9Y>YMn%N8ktM3apO{A$kH_Cx zXS>_ocXGe9AC+BMj=6RlTiQy7H&pk1lCK;2>1R$Za}pD4u!>kLV;b$)a}r9km6@H{ zH90wC6SLKI-*%Jx#=ke3O=`^96df)~G4>Whe7tcC>%|oMcje z*mOoet1>Gyt09ei5567=>zTJFp!}OoNXHnWWrq1WHJ7yg7Rk<#^Y_ju2 zc6(Rvxoxy7JI3i@y`za}6w+PZi1@+sncPinT)k!{H-6(}6VdCVWat}R{pwa9R=gh0LruooJt&v$~lHlw{zFD6(OPkB*TJ5nJYcwuvXP8TgnT7r<36n+5fP~xCZZCR5lKLuSKXHOjyp5E=!EL^ zK|?pSPVxzc?@=|EZ$5GQ?33QWUdiWN=b4bf;ln$;G9DpF&dCpJvvlmAKD{<)%SKM4 z=|-(N@4ZUHYSQC}M4EmMSE8GS__<+u&B7UnhFCK!GPGe!tXS5LkE&u(VO=o?30p!U z>}3gi?3}S_^f(t=tOrsN8=k?E8tA-H#%mtBFppPqbl1GH8@i)IBOEzJMpvo_o$$=c z$bB7Y-+kPVTCYb$bu?E*>mB|BjaD2)@;0k8pPk3UHZG%=QIciZxSOox@YdgMQ?^1| zi9gP@;)knN8f!bu#@f8kwPPNlTH5BSu(D;YQq>tR>r}1L!}}1L5Djd83~{DychOHA zqseK|i^HmDD;@_wXd@aqM)gz;R}mpcA2m@DB_$G0k|7z1pzFwbL@1{Si|BYlG!ZoV z$!Q`27-b>}2pNyL?8Zz86_v>+H3kYEU;zXWaKHc{;D7}T7(n0w3uNE|7AP)2wt<2I z6G(vI02de{j36!%6j-1DL4gGd5ELLNfPe!K6cmgC2si*i0fK^pfuI0E0fGVq1qccX z#z27t2nqmzpa4Mtd;|aj1O)}7AOHa%1Avt(jn%PcCjM4fbu(00N7%xd5`u~5~tqbVx0o%QU@X!k5CG8I)-S&cly$x5Qv6rJ2eM5jCv5z!Ud-S0(zeu(I% zlacJQ)zGh{XX&zZlP`xp^g4a+t#^K^-|CKoGO+`X{{KJyzkmN8&oTc0y7_y1;rs;0 zwaj6bb99O6XQoF@`lE?H8v5uH`nXx|d^{gs`RAYcX1@8EwMwhOvR@^#%UR#9e(R6@ z`t|D*vyQuLpE=oQuW3%KVo!DRxgM>zy6mc`?{SLmbVU8FCy5?u_v4=WYme>woSvqy zfAjgxd-%&yWb?V*&_%ELoPHm#eKcennj5$Ona{k)Cbr4+z~>BA6=kMzo;Wu<&Mz}( z=9!6v=`3bpj=4);&1GI1T3Q-4f9AZ(a?+hXT{YcIW7C+}X-+0hnq<+5XgoysgAfrB zjd(~%-Bnp#SxxlUQQ0Wc*&dIgyNQmTzo8p)oreAA^Ur77=WF@QXOC}u_BpSIJx}#| zo4dK0Go?AxJb8!aLo}PwjAla1GpC)M&t@~%WL9QoW5t_b?f3rRd`|7L{qfY_Mi1xE=-Pel7S*mjWEbAH8N0I1+U{z@ ztLsS*BCH8(Qmr~G*{G`8{aeF0bxqw5>J)p!p7ztzbTY?@V>_C04(yHZ>dx|Re1?W- zSyt{Ctm(L>YFzgGsL`0lV^X9_^Dz3jqo47|uK6f=3_E^j#>6m3C-a`gJYy1OF`Jjg z>}9WW*(u9*d}Jg;vZ-n1yh??q7Ul6|W6D`qY!)`@lSGL&>2pqgnlqE{WD|!GA{uS@ z4uALf6uu?GB5ZMv`7vWG6}+7x+4X97R@Zu6 zAsyPa&Q*tF{KFcpvrByS+4o*2d%ZD@YOn4+UqY&0GkYahbG5nK*d5M&?sJ_hX<9ES zTQoZ~s-x98=%U%V)*>{sH0#u?R#hxhy~>7KWJEW5xBli^mJQ8To6Yv-u-axCoChZ$*UQ}nqpnC@EJ@HLtqf zDrL)4w_0tXaa2}o%Fd>yn%SAEnd%hwSVtnH-qNl{lMN$Q4pK3XdPk+YI_mkTWMovb zB1)nb9Z`vhjHf)6PC{7y+ffsRfh>A5->QD()bzGtIXu`B{R~m=+ zmhQC^G<%ipt;)Lntg=(q_chTwWPXSK{=Lm=?>m2gdwfpwxz)ULV<=s6dDygk(~0hV z%+Z8LII!?~Bk|nUotrgmdamhQVq`X(%Vn!wMOKAmGMQW~*>~sNck?zZZM$XHcE{#% z`ZMnMjvd$hEIL1CvXfUM4Fj1p_!Gd13x^Hz++@@mi%=6p-bHp*MGp~3KJlRwi1AK zpP>xs3cLf_wS>X1p9Jh(;R&P1$)wUWgrNC@3|Q{+cXSO}T2~2tkscw@Sq>o`qbGSC z#C@R^SH&{}g5D?bEy%gW!={y!|_oYQD^c)2m(rI8Els3CSU2@zijNWRs|1BS~5Ex#2A(;(Oml?@Hkh$mt zRiu37A;xVwfLrIDpPh3+GNI))byI_y*Oli^wA^&+ z3Y#o8lysd!_bihzzk~vzs6l>`5DuMb$XG!k0q*f%@*jQYR1`Mf(RC`LQ#p@*s7A`N zo}NoU`CL?1o@ncfZg5flrQRyy=1op$CA)l$@gJPf*-@u+R!e~@y8W6rqI?Jdlks`W zbk@qI^;3M#7A;A;ZL4520~)2;Exs6o{`>fdXB7w!xEZ$E8v}&TQ?C5tc#ksN&PQ*4 ziFUAjZ}9&1nDU`e%%|t`7kAWg28a3Rxs%Aq;`SppTGlFewHbm8YDiK%{wq89 z0@rlwMdi$f!M!w81ap~T2MWUZzt5m%=)rQuaTE8XjqW&*JJS@Ziw(k*!m=~se9aKSp`+V$0Ug$#pwowhH$OF`L5Q1I8E$Q)Uz-IU= zrfz~teAQ!Uu89_wzir|kRs;sQl74Rm1t{=-)yaWH+QtHG^NKS@fvGHl1r;p_d4woyp3uBe+6?HppWuo z3~A!YfLLC*j0?fxXlSxgBkE8|?WP1u-%Bv4{Vkbn7V6Y`uYWQpVNNH2bC!esox+51 za0lXz{dr=-j-)5D>;|w(w3+qE&h~36*|Yd364!D1tTusHj)cNXwLYXIj3d}|gVSXj zD@(*md8&x}Hbz1oMfRA-+ZSdLCJp|!9MqM6*SJ?w^srmP9`mIiM4S&-)+puWs9*Y~ zA0}gg_1JWPDDpunpOYS3RQx=OS!R1EC>u9dCNEirg8|t}61Z|J8oa3usnA)FFAxPgs(U9X!{o8-ARhPBl|ss{2*W+6B_8!MrC2vQ#n zRx787v~>g-xtb&wW(1{yN-FL2k25QFJ_B5BTQ>HDoZg;TCwolr9D(lsr27W1p@;jR zgKqi}W7q^5!tGD7^kcGBDcDwi7TfNT$tRM@WD*Vt0N}SN6MH3z`)iwK;*>9R=BN0S z#^TUwlT8+9j>cNv;aEswE7VPpMo&}?^al8n020a3uCq**BKyL=$lGCbW#=95(k!^Ns?G+EEs`K{C#M4jg6M5S4hl z3|f>tfv&J>4+T@p*^I|Dh))iNx_TnJzkAvyP?U|WG>$?pZnZ#~CkecHA~JTlldR~V z(!Q7{*?adF==`=$a!z`CP;*EMKgDOOB+=rH$uz0*a4t(<%5YQI(u;51gij{(J$^F> zuk@%CEt4Zi(<^wGXPGGnQEt^Vw^r~F;~~DpJOUuc-=Kq}oKs363nJ+io_>FV;Xh1oygVn<-vw+4hKyHc(}Ao--2RLR!BFGH7@(-i(t zAH0uf>EFCk-i6}E-ldil#)I^oTKM%aqS{`$c`Swbwf3W}D5a@s3Bc15m+nq2d*upfy z_Zn*-8YYA3b-VHIxe*_dgE%7^%n`PJks<_>dlkVjpPwPoIfsbu=l+Ydd|zxp>hg5x zFG>ZDBd}Z=P_N6H$OJc1g!G~(Q!Nfsq!-pgdKS4oMSX^i_8W??u#pYl|E)WxGcfRr zpJ7@e_@z%w*~I11UdH=$&OE;D&|~q^pq@c1=byRQkhkKmn>u8CvUm6sE9-&)rP(F7 zrfHjb8p?>T5hP2b1rjyX!M#SS{hZR?10xslQ5)n{ly@x{TMM@{aU~^1moL%%3!+FBU7MqJ2AdY082=Hg%sf|i1lCm zW1xPh#m?{pRO!sKPiQOaXM9&=?Jc2n5}w@6&h&4UFer8RwgHz(K8*WfVMH$H>W$csOTg)tEOY{6E6S!*L4y^of>Sc|LcNYu_%0yV=8IOL}pQAUd4K{?kjO zQK;B`owvmeiz_z~-Nm34wZI@{>wPGd&3Sb57ebCHQ_?9C%FYH>apj#%B0ovwEKVrm zCRR(E*Ar`=Y9A|4hEY0T&5dW zoLY&^ZIP>T{elul?k7N3wDP;vP||aOq;i3#==2EbNOmhqXW@^v&zE&N|Fzea?TIB9 zTD3I|#+jIm-ZCk80LNGs1SrAE_v1fo8+7Mle zOHU}XM~*0S9{|a^XPgk9Z$V+Q$2mUT$gh`l{c0E0ucF=a4i>@Q|7Io&xZ$xI$%Dw- zPgM}gKIV}wCGyuFNCvuszWk3(Hqbm= z$u(RxP!gnuH2&x?PSIEm>CRaFXS&zHBbrqyG1m=8R_ncHk9J4au_!L-gJYpE{`&x8 z;XZJX*%)jBwhCeO`dF8Hm6q{ZJhNoxy2gvJx?=cU?cC!*6C0xzFPm*b4BPobbR(?99(V1{ytPUo#|(sv>`y-e z^J_0c&V>tFG!qeQ5Hgj2)7p#4qD;_QzA1XSL&HCPuDoR81V#ooP9Hao)t5)MX<6Mb zw}XR=*nb}coHg2)j59QHN9hYr=%SMXz8t2$}(+roxAJs48GwOQ2;~X zTdza|5ljuG9gG~+7|;hAKY|)9lCe~J+Hf4D;E{%a`=u23PoeIA9&5E~YY!V_2!gW} zIjC`AE+?7)f+6|kY$Q^4>mu>!^0h^AhYzd;f2HC4E#e-P28lM~SDjyzVS?a;v$?ae z05>tO8LDI;0zU?s9rcg>COr1vk>Y)|ZG+}X7cnTDI+ceYZh6A6DM5^In@>KU)PzA7 z;r(VYH0}97jU)5ZFkGhXart7A$IkWhQ>P@Alf`;SjBIfkytrvfV;9M2Y=>1JlO$1;jbc}#7@GMe zFQr<=(ROclz<3q{D=sDZD#nQr^l4z3vyoN<%))(_pq6KEarBTnJvNbw=wi1z3O_Dd zy*zcKWmDGTMfSRMXdMm_9`CF83ZcFQ?F}w&-dJ@I$soza!q~fIEuAXYGCiR5A?z_x zVC8wdlSo-d4*JZv>Ti@iSR4LKO`}x-9!M=4BM;H>nN13Ol4pd60k*eonGs#xF0;v8 z*-QWA){H{rp+_;ao^`~AxuZH*(F@Xb--kKY%HGTt7K-WrS5Sa3@b*5O$?Rk$G=-gb zWiy~B^UI?>vrZtc1AMosPk0tB)CT$i)=wjVc#^Y}h%Rk%sRKiB;q)eeym0;D&qkPt z2`|xfrqYn}eBI>=+9xA9Mir{CWbmQ33NbH_Hu(uhDYCnhnZQJi+a((ZI^yqxB_OnS z%_Aq-t%PybjT>(TK1GQuTojnno!EfikyPG$TXz+2Bf7 zH`MTvfodAvNtB8*82Dlm)VOVM2yl00Ld?9L-jNBA!A*oMSS`l@iRD5%;x6Nbb^P7Oo~D z^k@@qD4nH_wWaYTGstlNO%&X#=$ez#I~Za(HvL*N=(m6BICOW{z_ zw1o!=QkIWO1wlb-tP`Voz`FDx@(NtRTe&$ zsQ1uSje7 zSqadAQ#Kve1KON6y_w#yagC-I4wH2fgS=Ha6u?i1wSZPbPX)8-D{Kqt!+#6jnpT^@ zRykTnI?#;MWiz=f?|n|%oFu1ABXbKZ@D2P&h3+hMNA>S|Gm>2VxulpGs?LJzKIj}&x zt1&J(JqBd3ZR|T=$S`R8j0;XxQ|GfO2#fUtA4Ne~C?7YAoCL_eT@o3U(V;~)Cw+sW zdJwHV$-;EVxEEVQZ*t*02|7DA-UVjR*Jv938GZzbfz!_ns?eCx|HSZt3YH?1lk zSl1kj*Se1nj>6i2=eWiqz<8H5j7hrCUNH9EY#7KXb=Gp57$; z(sq|Ay4;vRfrhWw?1`~Rj?1EO9I|rZ{ z)It6Xi(?sDF!v@0?+4zD7ATt{ItEPi-tv%=dC(B$#hsviC>LeQruy~T96J|iEWOw0 zly{hU6*U*@OEc}J*V9RM(FBi3MSZll!@zE~YojX3aWW?w-~$5}Jhm2Db%pkW0!6k4 z=gVaw8CggO0U;o7)?e1XyFXWItp))rG(DsNYX}F!BftTVtRuMg%i;(se@$V{(OwyB zzew1a_tvqaD65|YIe0P0{D}EtG^@y&F4(IbonRTX<3&BjznK2%kkCSF;z)PdU}D4y zX}~YOJ9;q)Mv4{uJk0glut`5)uh(a#kCB*TM}!?|MD;!)IF#bP5{JG&OhiW(+2k1t zVhc58vj1`rX@$2`oogE{P0H_*2i;EtJ=}&iY3G)8!I$8*>k;&y@fS3f01YdF&%gV` zM56{^hBX$0zF58c`{XDThqEo z^@HZVmb@Q(EI?13$S}SJ&la1(#h%_>5P>Yu-R(X@8TvSRsLN3*>;uop5F%M2RMW`V zXgcj$f`_|p+9i4N_j1K_dGD5~Fd~=+=$Ldsb24qXvrv+h;52v;n)4Xo=4pJoQ}q>C zh+;>?PM@eI<~cy>;48gO6K@b?=8cH7XMO}l>8Ht$MfAhn5&(lB9;qE3ODY-ls>trD zo1&52mYD6ztqfPq5gt%BBo>&Q6ISVPgmMt(xg2T^E)}Z(LCTNBU|HdmOa6 zs+7>_pYZ|jt6CX`Y%=BSnTTcpa+l}^s)MIm1wnIl3Joqv{PzN4CzcS;fkk${p%DR6YMY-< zNE0b^A+|-2CK&tc4)4Lj1Xrz+E5|^Ksbiq{Q9Gmnb5JA^!@EdCIJ`2d6B{wR9nc=m z4NPjBdV%rr#KRa6{d>2hUNT1ad!Ft6#gZ9xEFdb~$v?oM;h86k2FG@mZ^FspPw^6n z92bn{!_8_ey1Hq5ebO!+b`hL_*P8(tCGh{ccuKv*9lAe|8=06c$>}c}4QzGrGhaM- z-Ee<=!&83J?rLCpOo5X!)+-*z-dqz_PEcuWikbpJinw3ZdoGD?<}yM`xhwHVBk<_+ za*XN#Bbnry@CIz6o^PFGQG(bxMRH?~03~1cqnza1z}FtZsUD^#EF6!*WtYWq#X+$p z9~U*Q`Cy)8Avm@e9$9ka_wCY&XbSJGnar$o7p$a_t!-4WWDs0jqlO`M+^;`+;}PN! z)sY~my`#!aLLb4j!tR`a?01SOS;PcCZ*|ZbO;{4k66v8Uabw^Wc4$7?y+|krF$fGI zC8YU6Ldvr?y?E+g#zvG!NF>|ikOUbZ{21o}JxJ)stbEV6M*PO!HHH z#RY62iaB)56t+$DUQ_@s+|Aa>AgklW8-r~QN25r2HsQjsS!3xgJkO%irf)ejYplIb zh{tam%h_x`O8(xTaf<3K=nm@j8QOg6KGSwsZ0kLNR|NaE!qLO2{*F>D*sKU3 zJ&)ksg@v7p6r$-t{1hPM@BzK=L%q0wF@0MbYUb%5rBkxtxrhy^75jjUe}}Izbp+Bj z%;7VEb)8Zc1T8#`5R8U5PCQp^x}kc}%tm}qEK;gZ4QGO#SoeSuhs!MIf0naZ=9Qj` ze>PKNa$#&rJ9}mH2J zCqkoZmm#pz0|V8NXvjCGSDDL|)$@Hj0@U^(Cgs=)Pb_PL!aG3MaSefC{%XiG^TT~w z?OAukkT!dc8$_iU{797i$1U=%(L=+FsfTGnq+o_h+y$ zKF1jA`E$)4%M3BuUSQ#;^GVm;u7_M)MS_{rELOV~$Iul!9^yaPQ>5@*yQ1J%5C0Nf zEp`*Ua~2n8!(`SMLkR&D+0U#CDlAZGKq=9-a!v+m9tZKRliSrqS~GG}iL`MctDmJC z3y96(MrZ-cUYK)*@p(wcZ6;S4kz+*N(lnSqsVakO&(Y1CRgB`5p4tbrvV_S4ulSW(T3w zEH;O{WPT%KcuW9gAN{Kfx)sr>Lc$6-NW@oORHjjIAfM`T#X*(XuB>#VYd}_O-LH2I zK{?>0H5xkUkbv7FZj=K&HTM-^Yg`VP%&HWB^h8{o@PaGUi+{lGQ9Uj^x(c{cOMYVWA{5z_Zh z-5n_oyR)s|#*c@q7lL~tyqSs$kb?vOLzu1F=D8zZKw>1|hP2IluSMau;V-``Rwjim z)R-B};a<-IMorm}vjhl*F)Ug{0HInF(8*=yT>XR?hmvJ<0V;E%m{Eo*in(`Nv0V`t7XK2`9N7f5Cs@Fxqi1CHuXng1>oZS}@P_;o zF)E_qD_~vQFV?u`Bw8tUd?d6e5&4wB>I}X%g?OwD9JO|1+0^w!AO~eCrXnaJN$o^Y zuEvKy0X(2-F^)mZNBqk3HuHF^-3F)|s`KyI!V z`jgT#Oaom%K5J=@Il-8)z{A|*B>K~XWpWQcxP`4oQTIXSLVBE&G#HKTNlQ{_|?l#^!F56bxpfm1p86!lKS6Ap(A%-(Tf37*WUES@KG zm1lsYf`g}QW#aV0kI+#`6~4^=Sp|Z7YXDl6lTN-EvRx82NUtsJG1gz)kGwcHI4G)^ zqA`YS1SKTFLK=WmGt%9EHATkXk&0sqdhH&u@^8d*vk?v!mX;7M^iuEGokZkS>3O!6 z%^ZzjalOyXsKv3qm)B1ZGgK5C&{CGzULt^j>5r_sDq+^5e+*x0&!anKM!Rz{{j5kc zqqn6y_Cr3AAE_oS+HQJ&9{;h0F@P)%SFC&NrMVy+fueaXihKTXpKD%)2@qPT-{@}) za-woV@O%6f#Ti+v(180%S3g`JBUbdVOudEj-iaGr67+Qx`@LO)4G$rrVXX1N!Z318 zM~UTi1nZg76O+JSzlm<7spS`7MVk-sxRJ1(Rc?tv4*lt9kRwfw)`GmB$M>xY zuFX5A;D0LDJc0%E=QmUAK{QdMqB?x*X?;pt#qtWq=KJ?E$<5Sl`9a}`A`U>>ChBZu z2#FLDO);gN<(RRtQH$l`RNH>L6lIA9j=T}5JKtuciG85(bi)vq6CC#&1TSovpgSes zY#`n1EsDIPHM?&*^<>Ah5R9NFSphvjB+TFI)dFFOOrxoW7td^2i|RZ}uhjCECCG@v zgIhzZ)nLD&($#U6%u;6v>`E(qNoeO0Oo*Xk5wWken#cXElCjdlLm)il9k0zB(w^AWHq=irnRO5A zr!Q3Mw!!k@cQ_6Fj0@OG`qN@bxC}*> z`7v)(!^W%52WFMc)iWKzw9uEP)TU&i1=v#%2TItz^T7gEun2n0!>L_u>e8TY+33aC{RuDH(?t zBd--L&YYHrfNF6cCeQ6e%NKEzZ`>r0u?i`%+#`T94M8dCnBQj|bnOCFj6r>9PGmKn zl!{QJe@vHpX$dIfICpU4Pp@yJHNuO#v&!ag%8<6T5p7@fjJ!dfY|+-n@aMs~Nx5~% z);rff*LoccD!bEcvxF{v$Et>5&8~?R&_F9MXc`o2hmW&t`4R2SE3T88uTKsyo}Vk?f16fn z0zJ}`NO}Pp21-_H778kG$hgDKf*XxMiadrpw2ge~C|w+_k7q+YJt+q36!9XYDqMDc zPZ7$E2=3!dT~{NKh7YrAX{8mVh+{*i1(09`1?h)9^oDqNaP}}-SJvz^QO^zc+#--cZID}e~7T+_Lb`O5F3OSfyd3VM(kRUtqVT#6*qxu$f0C}U;0V> zTCr(|?Yt(abG)FtxkiYzHkVI(b#&e(phMGW2k?`PHZ88lktB2p;2?bX@}3bgv=u_8 zkrx5f+I6HaP?XvudFBv2;q)cWM6p8Fj#f_8Sv4Gr*PO0@dU1SW&@?;>(JV#CD(|gW z5z)>*dkMGEF9=< z&dw5EWQ89r?pW_mG;hxMD-Ceo+-)@nt&3z8HL5ytQ7u>T8 z1C&3AE-eWUAcwT04dko}yIv03>Au4BE00|ekdgc*Av#O6doj^RCI;~!{0r3bpgD!_ zjHpy^!SSDkf~9ar`R?g9wK(+b-_)*0%*>);9Ivd=i-8L9g(7%4a`ZfU>#wC{6p-z&HcsTQa9 z_xJyWVq|m_*P~7Vh4S%{F^kk!0#(r}$=>S8pw#C{ zsy~M&kqG;biOdw|;5c^07JB)VX%+v(TF94=dY}UY&k8vDFH$-R#EcHdW@K;uTQ2 ze!+%S^|Roq{3{kjHRL5!G{>hD5x9)?8%~{fpQmT{($WI^D z=pe{;3^cT|e|H<~hk~rf5C^)>^(_V(&K<-4%ipj@U#TDq70IB*s^_S4(B8PHEF}{n zPr2oYGaBpZlA!EB`1`YQ(d}mp2KZCUirv?;#+CE58?fGTz!zB)49Nrl-YBr*_;mV{N;HP#kqi_qb6x+S<0FN^RI`9Bxmw6sb(!+Og|J9^ zEu0}zDK>nUaisdfhw!${Kn!wCvy+55L%a9AR_-EW4~FuzO^PR^X1lH zXUlI21&`Z>BP+Ie7b>v(IUAcRG~I?n)!N+x1LHONAb@hj@;wDiw!(~mS?`FtaYjRM z@DE@7&!0skV#UVX=+oN;nRXA?UCSS&RYUs9fniw}ds)JVj{Q_yhm4C~4CAfTTsL=J zw{l>Ho}kSuXJdAXf53EO#Y@+tVjm4@EG)XXt0q;X^E1Nx)+rmYsaT4%)zG`*Zobd9 zRCBYEuYo+#1XKpV?^<5BJ5i9{hngXZ;2-eDz=v6I7isjIywi*w(4RIF-U-t4oFT`B zt=XX5-sgmz^7RuMZ?xx_vXir+@Z^R(l-YUeV9OP3=6O~|+awj&^cRc#M~H!FI1jXK zRqs^4u@@ak{g$f z=(B!v3`DIqnG_}{;7S@$Hf32sOEEFk9qyo(o{K4{8HgyxU5>3;!TV-hi0+i@zV7Vt z87EQplx4d0u?2c@q9|e8_S#$ZO#<2ieoD3Q2g6*y{%*(XJnt^#CgNeT?H`rB4bs1u zy3a`$Ug*Rlhw2_RY<-N@;Oxw+mLdK&2BLsl$=w->2m67A*TKNSu@&BbP`>XZm;3AZ;r)wNW46h17re z1&x)7r`-2%XV zf6v#7k0oGA-~#6e_9Njrom@2T^DF#1B9afGohHH;>8=C=5rPg@;Tng~;~n&c$k@vl znxNx;#m*_5(6~e3A$PZ=#p%n0KZMi2E_TP)-Qc0V2y&!r0bl?yj|3ioS&a?sy6H}y zAGWVGxqFpGl4&f1DW~h;I~KknXuZ@yKVLcoGv&^aY+%NGxd3|7qYM1cv_y5Za7sKU zgWWrnyX_@!Nu0iNw}j?=C=kWhhwga}a^dmAsLXC0aibpHMLD;lBT8JdCNW_Yon~sj zozp+qf!BP1UW0CM^7%&CT;JJL(@$#J{fCSi+j-V?Ol^PS)BT}9bPbPSfm2U^tyOQ2 zgggvU&`c0$>{#p=BhtG|Iv$C1{b0RZ^lc=PZlhC&jFQvI0s`VJ>|2k<&9@QdFn9c>L?qKVh3<&SOH21X2Upg3p zj<)BD9~_$K+8uhdm}3?n6zo^`wn2+^?2b%%!UP316YDn@Nl%xp^_#RsF(ia}g~PCh8@&uHSBDfsmE~As=Ll?$6>!D+_oq*9NGI25wa;Z+A%B zGz-P1=sm!uAduli%R$rUEkeysLH}R6?;HnV`k@q;^Nr{@W82QSXN90xqxCL_RcAw! zkQS5S^~W{UJ;LSm3P!_>(~^Tg2pJM4$67F11y^lF!B9t0}+_GKzdk{KlAf>p~ppluHT&OQlov!U*Wc>YZ}8lKHBa|%PM-D)ioy73p3;Okk zZ-Ly3*0znkIXjAzg(c?Cf<1G+Y4eiAqh*&6e@1nNH8jhr#nJB8y$cXD=+Qvwsj6kJ zfY8Wo^Ljp=@!(fydE}E--ckFLVL^#u12^?T5VCS%Q?-!Jrl2z^ly7dZZYci|Ks>~rnqi#0ytHG{{9Ww0x$hoGtcA$& z@3oxYStkx1!Um4)HT3&WLVrQdsdu*=4uc`2+6*W^E?-4k$Z_!J!wUx_Dx53>d??c8 zeC1iOGi&sA?%klbqUn9s79V=`s)xTWM1}pB>^+Z_c9$6}L=Z(=lK=2Y3un;kR z;MNKczrGxC`x16P$i={Ns;U86q*pBuQ+OIr4A{B26hFNZPhHebT-|p!)})#L9WQv9 z&wcm0i^F*Wyi2Chv851zIo!&kDyndP!n&5u#SB&&XQBdX57#Ssl&yFBEZ;^|Or-mt zjOs$j?cbN*)VPO)>&QR-HRp8aWyUK`d0XBAB`-M1b_) zW&wRFYl-jnN3!%3L!4ZLcS@<%DBJ<|NXCwrV0*8(y^WxdOnbd>@a$BZVcT??_AANc z;Fpo{J2`@7UWtvZXjn7Es~-Iux*M?=Wmgx^qdbv#+mzGTHa99-C;U-MZ8soIGgr`a z3;AL1M=@a?f=EB5`j4PAwBL-zT}lMajo6Vs_zcqOOEC4EZ;M3VEMyahvNKJuG>F5w z43*$y6=O>Xv^?{|woaKpNZ{XkeZ&y}ucC3h9i=&9flP%G4gq5TAyewP!WvdyE~lf zAgoo15jWLcKO1bGZ261}=DX4sAL3vw?Z$=CD|V@zFHe=x_KEF~FuI~Z5Y1E3*+6D& zIs^OwfoyhAwY;JQv(IgXW=V2)iT8b07~F?)K#m`Hzdtz+7j{q!&tX(_gMttjCg|b&$k7#l^ zqz+C(wl6TGd;14*?j|omv~aBtODbRH_nVgIf|Wl&Nk=F8hs>!k{&E3)#oOfnDHTdM}{94lKP9bsBy3xhJ}@FP~?i7l(}Ebcdcz+WwH>$-8#&-oazKAr z{>Dll@1p9(1(JW9f+wL3qGJ*4v`7x>g6I%&V#wQ43?}=J;%yT&c6=<_6cbko1o3ZM)+sX=GV}Gi)<OtbT;q5A%O=OMaqwruZ8twoo+s!R zrmDfZE2dCpFJfpkXR4B7mT4+9;Eb@OjPvHkrTW8x!!YJ5X_LQ9qGPL0-#M+=%XUL zRbv}gPDviNB5XKOt#sVKUtf8S#%_}b!K`6n4*S633%>HpUT`Ct(cFwsvd$0FtsgLt_XLmb!&`y|8KMpT<8bEH+6ac$7VZFVvsVRPXZ@KJS9)WaE9>@6bGw5WkQ9bR+=pQASB@DQnA z%0-dRZRv0!-oG07t=TcyY=&#w{6)e`F4v;{FY02O9I#hKxcN9M7Gso>2DfW*+PG6l zTG}BGVx`#y(rUk1vbtAUcsh6b#Uo-2Q}<`fts`FSpT`H&c$o;HWKdX!4N^E&R%#A> zkugtiNbtFQN4aiLA0|x3pIzjGj?%6G_Y&2`HKvzD0CwcS9Hg==U0vmc-)!U?g*j($hX`x1rf{;7twV2~pqd!R~!sM3jO{ z6&Y-oUMu6&!|VM$SVQf$i-m654b&@p!?z#aFitE6AWOqUTcHjoSIG+WMMQmGpXkHx z3M)I)j#gV)+mK$=f?iGLOYH=g_dKy0ICt;N?!idemkfSLD-_h>Yz5;HpSm>CA0(Ly zMX&0@f0mhSoBtmJ5Ty@)A8paZGAWQyYJNS-Wk?XuycBDPZBMc3DS&1$q?l5zk<{i) zOz(b{o>szcg~Sv+tQ_u3l#RO!g9QW@MKTazHx;R zuFZh&RD%9XO_BE#(rugt-(F3@9)(JH@idnc-`^1;YQArzB**V~m_e0CmMmtyHBo0% zW%;4|EYTm!#Q8%qJF%7#wr^ECeZ$Ia^6C(NB!cs&E#%x`jC52+d@AuSbS3Uu>5!y* z8wF+UTli6wibjuS!S<~Iev4FihqFZkwf@J@OZ~P!4N^Cl1%$=0AP9E`cy1JN@)@7x zcc|Kg!9MSLJ73~4bx z!aTP2jFuE_EdO7RKe{ZSS($GhYirpoj3%GRQDe4g<@p~=OvQUzq={~DXoYo`@TScL zChe`}TJ(kmcq%dD^EbEutI>pjCKQN8J=W-g7y)!+0ZQNw$~VwY#p@_u3w$Zpfmh)skI>#mw zyY%op>oI8|5+oEC`q>h&5o3(hUyY=XCtz&DEu1Z6# zmP${vj-ynk3S}cQt6Z3ApHc`7y=@vai0J;tr_$l$c?PBDx!0A}xjUC=sZTSylDR$} zW_A|Ry3f^yYE&|RtmbQ%!lcepWtX~tNq5&Aw8ql)?Nz1)NQMm#2?21Z3=a(sgotWX zBw=dN(vu&E}zGyD`T&{f%LDG`TT! zc3~^ez#Kdt4oL(MB9DkY76>y5dps6XMy9f9jxf=0wOWOCpFMg2(dMz~Lbx9ZZKh0i1(&Kp-Y6 zoYRr9l!iJfBO^UDI2txMB!D~|APsOlAO-^sIFtqplG0EYHWbJsK{y^89S@KOg@=eo z1d#_1CTwUZ5Db*Y0|JMdK!S`39y~TW9vYknL?Mj_NnO~0m{7@JFb|EbUKK4;M^!D6 zX7^5^mh2Zq6d_Cu=p|m>h6kSSX{7s{CkS@PXrd{pZceU|VOSQLt zYL%}Z%~{W#Mfl259#dPxbcxS75+))9$dI8pAg(ALHYC;1fLQ1P3gzp<>ZWsCitE`^>b#>F`aWeBSfDd(&Syk)bCLK%r|w0Td|Ih70gqfSjs;0a?%y;erK4gboQw zsnqCn8NXJD&EG5XM|f6UYCMDoE!11e?W}1=XR4E)TxP8@kG=kkJN1xrW2*74B3*ry z9;Wk8=>UoZDllB|&=G-Rf(0tnfs|o@PPhOuKnE-Yg#@GmP1wjV0R#+SXdux7OK~e> zM9u&9WisE)r1ad+MWy&xCPF9~m)==?(xdTZ`HTCUxqm6{zM^%IZI!`Ps?*${gEAR% zLO&EiNg{w`aF`563Ba+DG}HwGAPx!-3>uV0heGJ=GSq7a2Z#JUZH{8nGs}J%q-XYY zLVVIs)!nsPnx~f;R7T#5c6jI<1R*iM8Cu);XnmU=``K({zt-k|y_%X*sF-$k_{yCuT49xw+cO zy!igIjHjsBy4YfRb;Lx=ND?`0-8<}*p#cHF$mApp)OOgZsNWZi%g+x?u%IweK*R)$ z2Z{&-B2YlXhM=nr0LEdWcw~>lL<4}K3&A-2Joi6J`8h;a?K@>9@x|-^R1(7_({ItO z?R>3DY0OdcZ|n3{lJ2(Vw%*px)@!!De^zJi6qj|g?p9U+d(Wiv^*vl3d5`xM{a#r~ ze9`v%mP{pKTpHK@Z~JWby?vK2o%VB?-p~DCTt~Zg_Mdp(W`MCo3fm34p7R0VTM)5T0x z?7#%{9PE2dAGOs4-sihXz=IQK+3Gk5fl>^6DA}$5P_jUMWS3V zpKlsUcqjnFfd`6KE>`!x*(bCuU_4+Ufnq=HEG$VY7r%K7c?%F0*22;jmfnYm2Np}h z;h-cB7DY*tk}QiWw!JofH7l&-yZ?-i-jtysU0X?%)wZ>B69rXC5CXC6M%#w4Coy*K zE0KqY#q5+sNt8rMCX_@;l!u69Nje}TNs?fYBuSDeiIQ|I%d#vQN+gnO5J{G$gM&j! zl!PO>umEvkA&FQhk&TLr4kf~&0Rp+`fKVbzqC}L7h!RmE$_C1X1R2T&M7cQbKt%=G z=%65hf|OuE0m_Qw3$Ti?iGl-xfullVg2l#KR8&By36>5Ulx-Bc{eRxHn70&Tr@x1* zrl0z$kxkAHNapgU+liTb%eLh%dfB&V-txEHrK1fnz%7cm^q<1;xSWo^>hYUz;XZ8q zM8m{(zc>7g2;$yycz^Vkr~aJI*{6Da>3935w>U0mx1nKpTA>2dvC+YOOQ+$rVSBe}1*;~@%GaB*3tQvZ~2zd8y#;S{d6rI)3a64vo%|@710xYZIgHZ zq1{6s-?FOb-HpqkW@2YAos08F{F;4pdG@zPc6{c{UX(96n>SI-mm88YH0WZPDCvt3 zAPOcnHvkwImyV$*ijrsExBM=BW}f$J*tcY&^u8YR(!SsNh_C-JegH6YI8qvON_ZKRC%=Dld_f5&dK z$435K(w*71d$rLuNtDQH_qIc(!}W4~M&TjCV53fUeUKyib$3jSaXUtFIW9iDbg_EG z{grK|{yM4kyC}Eq;{ug9mwfKX#bVyrjk?T91|9swO9vATOt3nAwj^7GZ&wg>ap zNy}_E854{5@B;LROxGn^wU^!fmZIQKB~+@d(t!);jAq+W+ArC|6S)`19Tv1GM^H8X zH-VENvqXn1-{&FsmW5b;4j(H+ir>`vSTd`C8EZt}rN1XRe;FWnhm>jiKr8JZ@Ty-C z)FT>3ghkvI=?P#{UXYYMoWSR7LML`)$FuE&FzouZ-)FQ9-YMdhx%xwv z6V$Vhe@GQNb#pqW0@NlR69C0eC8tC{iX`(=Fvd~yLmeAZ+PKuc_GncYL#MYJlcCqx z#!4KwA(D+(qCYg9KL6{4Nw z7Ne`lB-01_)wZ%$31J&WH94Cc7619?=kzM#3+LdRO;NfZF?kj@@ny(x(b)OuA*Q+E zW>O$5s-kZcQ69{sh00rvPJ!k!lf|<))>XMkh2RWG7@ux5tuZWVc8yj@079!sT}%y3 zomJ8TfNVl4zK@cINjpxdV2T(>O-)us*8m!av(e1fBMEqA`Xq|va4M8d?`f-@JXEhK zK4%4|A-z^21G&$Uq1p{Mm5a; z`i&@nWi)20_rgFD%RfTXpYaSa1uuP|q9=jbdPQF$7QqzNPU((wLlU*ghP$E{9HFHr!HMxIU*WBe@|Q zJ#FF|DlK%3m{=D<5D+|={i7`GdoV61F&D!4v01)JVbXYdene7tVJJcRf643vB@Tu2 z2aGdhT^t~#Q==P0+OQwl@IAw7zW5TpV=z-@Q-*^eXQ03Lvn^eVZ;1ByPLF`)Za>ZzN@?jL#jLgj-(%F!q@{E$!C_OSL zqyCxZhE{fVcGAjJSxfafD(ceLPGcNZM>b^?JyUL|W@ltS*A2StK$U-cpw(| z=Xs68!JS-bHKT5xK4|?8ZTVPl5{4*5fM;YO6e1!OXYs>Sub;=3lB$39`frxxAIC^U;)A4RGXLqX!>DyrP>BFZfdQ~wY>94uw`38Zy5A*qX-YXNMu=)GBHG2)HyXxd5*CW z*IfeZ0d3EctF#H`LmuXtV1huyyO0{OadyPH&%z^t;41AF4_;*HFOkkZv3`jlG=kmscb`8h#7#-U%s zA4t|FD2JX>WL2jY)B}~K6jK{6a*Y0EhjfNVVC>p{qRxcNQ%{>Cg`H&g>?i?FPubw1 znK8BYgi-|S5oC7BRiUkh(d=%PTqSmAjwKN99SI#hOd0E?|CtbbPt%lSwaHh$i@WHw z^2KktF=xW(coSWr&_`JnM5x0SrMpMYU=Uu`EEE(O2@G1RdpI*}s993vsT^Vk@<^S= z07f6Og$IJxLC2!0o`McSOV0xy&L4D8nsO+pyik_&J4vlI=K8lOSMz+FqfCf0{Q?K> z&rs;0#UPNOKtplr0UHx`mg$42D|hZl1x^3 zkx#4S-h<8(CIX4{VTi2|L4$qQp!m~>>Gu#|ck->XxmzyOWNh1z4fJ&6Z(XA>4y;uO zZUIpn3CQPfQ}Tx;VXF>(;KV2tAbiaTZw##0r~Pv#&RM0U&j5Q` z2&x?8o>M$P8m~`>jFoq&+4`B7=y$2hRr^~U(h^vOpmbjOst6-W;4iug5pCH89u1ua zib_{xLl4)P52s^liqS!^f{1bAafgvHI6SmHATP#w6y|>iIk#OE>HRKmMca;qVW%m~ zw!dyL&bGysXl})lvAYa-BdyJ;!M)Mjd_hRg{kiQ+1V`4`v>Jg0xo=pd@Y+*2J=Dfj zX?l6lSXZr=C|JsJS`~#Dw3d4Vx6>YHoD;ODHY9JzOY5fuIj&7|odg@9&tsjTPK~o} zkfM}R#)xvxC*@*m9TW{$@}pTYhK&Yu9+a$ydsHxw>>z3sNOEd!tYLXb%0xyv^(RCJ zvKOfloxxEM94R-@blLjvkvAXGT#x{#DnBQlAk%4JfXzB|4}|{c3~OEtYe*1+F6j); z38gcHRPcF8(8_4gO!RjmP?BtYiC%ktZ6HZ(>$lwW2+SK74lHB!kqs05txP8RBV__W zP>`Tzqa-i(i1wyw!AVBc=#cCML+urrW==BG;S=Sh&z6)q6f7gSW=xT!`m51!8kw{m z$m9aHh2fn%;;U)y#(?9nOhHPiN{wJ+t-F3Tv4|ju0N(uAZnryXRs_YcM|2Rg%B@it zZ!q+`3MDLzWI$u$igr{*PD(Z@1#Sc1!IOBP=G93i8t=3{gp*NcK^jz5sbj(N_?ZHy z&5H~wcx{H_#*nSjFtl<5um_IGsK-dm{;q8|0vxxU>9!rwv5u*>9aHxR7+1EP z;YQX^xmL5tDNw|Xp?g6`!4~1Wvj$EjZqz{_arr`fp|+zRJZ%RK;+GF4J<4F}okjj! zR91sy3L?y53*+mIaAPR@kdBM)~{*I_SapcSUW)3n~IVRNpy& z0FpL%Q`ga|uK_ojpf`yo@HEOW8&^@gL0*e8?l@i-mJxiK(qo$zZ6)UB^CD2(Bmfme zAyeq7mUDyN{c;w)CBm^GHt(*dh;js#;VPHQu|v=2uIm8U)F7B;)~0c0szMKU_haNi zP#Q({mKPX|H$=fMx_Ye{2Wtq)3p-SNyDnmjtP4ff0Wa`Dbmy}j$ z`M!5%wZfhw}OXNSEsGgmK zEY*1R3eWNBh%UhGK^qF4E&N_*#=y#l*4g4`_x*~x6$+@`7G!46f|M@Gm#>*lK9v=+ z3lFV%#E!Y_6>VgdT>?}J^XiszqI*z-^@ZzE`aEDQg^VC*wMI=tpsc+p=ikkv*--8_ zWsdT`I7mndco)tR!g8DZkqqqp9e<}DpHFaBc;auSxWfF>e4^n_drthSAVq;DEFmxH zBp)&Tg}cv|sAC5#EAwyOaMbUr2Xa^aR3u zqBXHmzSNY2_vQlW?S4-nh;}7XnX9!?04*`3H zVMqLFGb0AXWOok0zeOfuekUocS=a73O}gA9904*?s2rsh2-U*b-Oi3O2`eA#w|>Wiu1G-(-Od;=5vVSTmn>&&;bkcHH~mitFHAtlD?Wi&VJf8=4LhDy8KS* z&x4JPCBh(c?G@g}a&jEEIuqSE{V=oOW+8KBdId9&JGVN}W|J#2cL-+fxX1ehq5OV# zfYUNy=|-KhbVIVzBT0$42t5c#79G=#9vYFNZj&g~$ch-3C{P#dLyBS~i4^OHKuNMS z@NuC5K_6>L7*k7{mf;gVzx~EZL?b+MGZO}s-P+iv?Psc^6sTPsRQ5#PCK_ZVFD2ZQ z&2&vP8K9aC1a=Vzcc)Lfwya{9D9T|4!5yYfVr{)V3Xe|ecRR!et-Q2nAQy?atOTt{ z|K0N8p(bs?_5ibSmmyVIxW~&QRLWHF{@siRX|18~iP^GLQ9lH{O}{4wbZd_h*Q|&Q zdp62~!l*6!UJwyHC16MK=!VDAqDI%kQ=1i3qVg{1ln1RIWt<_=?hPQTUF%xA7P=dP zHQeAQT68Co_(f1_7;NuGx~(OQJkJ;zTJY9$BbKNrjN`NvsXXM7u7;MAh8j0LVT<@< zMBl-C&!F9YG=xQ8MyIqS$M>g5gz+d^D<0x!7~*K;Dh3k;`a zJQ8zWhHS&%&RTg>o-?(TY$igpcoP|q8Bv)n5#=dqH5*A>g5K&8Vui9!%t%#_MR`lLwIVx1 zH%iK1zg`Ph01M4nEzL`zIP6|H?<|Re>Rei_KSmp8>ohsT+7at8i1CD$k=nxVGTU&V zf#0yJH)g%~5XPcEUL5!S@=C9<=4tk0DzG&b)W?xH)GIN;wW-siQyG-_pY-2{x4S#S z2UPyb!d@@IyZSG>923pv&_)JI-@F+^c=3<>X}g4pj~%bz4&FnNT{);sYDZGIx!P+E zux3%fXa0B>2`LQA&g=Z>k!L~v&YIv@P90#1TG>Hzn)rJf`*j@@dSQgZ4zxcmU^H^L z6!sP+h_kL(sK{yg*TwiMsGWT_?D1FutUcvJb4YnT)E$&e0VCrLsH~wYmv?Z0AnS2A z@C~@Z)5R8#Nm;c|bJL!4jj937*sxJHMN3&@P(xyGqJpDgd1xZ}nPP~l`(}zwMgo_3(fR2+ za(z{2?bLbrn5g3dwqDU48<#J2|2{*>zoHm#U}6|%k3(7#bL}0WUlEJiwpJ)y!~>5r#u-p_%g{7MzbB|~sfi!CBg z)*eEWQ7DD(ccl$Zjo|JqLyolE}Ds^%M zxpa=*?dd?vI`B$f=4c(C>?F_1Gl^Sx4CTFi?t-pK@Xv#xd5!>Ht5{UvbrN{`V)7Aq z%5=S)U0AQU;kjsC$ma)%(#Pna<+5r^mX@VFV${WXB(BSRf_LTO)UO;8{q!z306Rd$ zzo|)id-=?4$i6NAQ{Qq+|I@>qr6tQ-%waen`@Vdh`jv%g8}mBx zF_)AodYXrn1X%Rs-s&LWQI`Zr2&7bYdXZq7!Aths zyLO-&&QtBIKEC&W92cG){}WTvAHgzdqbE}5VDg;=M#57h=@bk_P2NUBh{FUV_(_vA`vtze-nv^e*3#M zMhAn^%-r~iF*gR~zJ1~$cZ&O09g$}`0+d4TtW~nhg5-{xod%oG4a+?c=5AWom2Yus zZffh;Rda7Mr_Pw^Ik@k+n>6UorFSob1}qA0r-Z_e>?z@&+@ICMqqrRrDd8Rj?dilJ zVu5%%TVXfG5n70_C1ALsv@&-kdJQ9+NI5543kpT#1X?;Z9!{Sd8CHUBxx<=-aTF(# zLXZPuCT#DH>}Ox~r^{BUi9 z=F0HGN*xJDAdM)wdJN^e0JPmP(Z_RUnTv1orSxY^WVxM}FF&%%087kA4dwm1arq@J z$OpgpakR$uQXJ3VU$#jNRaTsASLxcOQ<2po;);3U+~aE;%TJP^_el8^#aL;Gw) zyt+Kk3DrLQ9$VA7`DgSL@Z$08bGFOi9)qT+PLwK430J})_bht7jWTQ|t`!khBE