From 062ba694b59b0845d94bf77c207864cd8be118be Mon Sep 17 00:00:00 2001 From: Julius Mieliauskas Date: Fri, 19 Dec 2025 00:46:02 +0200 Subject: [PATCH 01/23] prepared container types for signature aggregation (devnet 2) --- lean_client/containers/Cargo.toml | 3 + lean_client/containers/src/attestation.rs | 20 ++- lean_client/containers/src/block.rs | 17 ++- lean_client/containers/src/lib.rs | 4 +- lean_client/containers/src/serde_helpers.rs | 132 ++++++++++------- lean_client/containers/src/state.rs | 155 ++++++++++++-------- lean_client/src/main.rs | 6 +- 7 files changed, 211 insertions(+), 126 deletions(-) diff --git a/lean_client/containers/Cargo.toml b/lean_client/containers/Cargo.toml index 011bb4e..b6a45c3 100644 --- a/lean_client/containers/Cargo.toml +++ b/lean_client/containers/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" [features] xmss-verify = ["leansig"] +default = ["devnet1"] +devnet1 = [] +devnet2 = [] [lib] name = "containers" diff --git a/lean_client/containers/src/attestation.rs b/lean_client/containers/src/attestation.rs index 6ad0f56..6a820c7 100644 --- a/lean_client/containers/src/attestation.rs +++ b/lean_client/containers/src/attestation.rs @@ -19,9 +19,9 @@ use typenum::U4096; /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). pub type Attestations = ssz::PersistentList; -/// List of signatures corresponding to attestations in a block. -/// Limit is VALIDATOR_REGISTRY_LIMIT (4096). -pub type BlockSignatures = ssz::PersistentList; +pub type AggregatedAttestations = ssz::PersistentList; + +pub type AttestationSignatures = ssz::PersistentList; /// Bitlist representing validator participation in an attestation. /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). @@ -57,15 +57,19 @@ pub struct Attestation { /// Validator attestation bundled with its signature. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct SignedAttestation { - /// The attestation message signed by the validator. + #[cfg(feature = "devnet2")] + pub validator_id: u64, + #[cfg(feature = "devnet2")] + pub message: AttestationData, + #[cfg(feature = "devnet1")] pub message: Attestation, - /// Signature aggregation produced by the leanVM (SNARKs in the future). + /// signature over attestaion message only as it would be aggregated later in attestation pub signature: Signature, } /// Aggregated attestation consisting of participation bits and message. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] -pub struct AggregatedAttestations { +pub struct AggregatedAttestation { /// Bitfield indicating which validators participated in the aggregation. pub aggregation_bits: AggregationBits, /// Combined attestation data similar to the beacon chain format. @@ -77,9 +81,9 @@ pub struct AggregatedAttestations { /// Aggregated attestation bundled with aggregated signatures. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] -pub struct SignedAggregatedAttestations { +pub struct SignedAggregatedAttestation { /// Aggregated attestation data. - pub message: AggregatedAttestations, + pub message: AggregatedAttestation, /// Aggregated attestation plus its combined signature. /// /// Stores a naive list of validator signatures that mirrors the attestation diff --git a/lean_client/containers/src/block.rs b/lean_client/containers/src/block.rs index 9c0a1de..55b4727 100644 --- a/lean_client/containers/src/block.rs +++ b/lean_client/containers/src/block.rs @@ -1,9 +1,12 @@ -use crate::{Attestation, Attestations, BlockSignatures, Bytes32, Signature, Slot, State, ValidatorIndex}; +use crate::{Attestation, Attestations, Bytes32, Signature, Slot, State, ValidatorIndex}; use serde::{Deserialize, Serialize}; use ssz_derive::Ssz; #[cfg(feature = "xmss-verify")] use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to_the_20::target_sum::SIGTargetSumLifetime20W2NoOff; +use ssz::PersistentList; +use typenum::U4096; +use crate::attestation::AttestationSignatures; /// The body of a block, containing payload data. /// @@ -11,6 +14,9 @@ use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to /// separately in BlockSignatures to match the spec architecture. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct BlockBody { + #[cfg(feature = "devnet2")] + pub attestations: VariableList, + #[cfg(feature = "devnet1")] #[serde(with = "crate::serde_helpers")] pub attestations: Attestations, } @@ -45,6 +51,12 @@ pub struct BlockWithAttestation { pub proposer_attestation: Attestation, } +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)] +pub struct BlockSignatures { + pub attestation_signatures: AttestationSignatures, + pub proposer_signature: Signature, +} + /// Envelope carrying a block, an attestation from proposer, and aggregated signatures. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -54,7 +66,10 @@ pub struct SignedBlockWithAttestation { /// Aggregated signature payload for the block. /// /// Signatures remain in attestation order followed by the proposer signature. + #[cfg(feature = "devnet1")] #[serde(with = "crate::serde_helpers::block_signatures")] + pub signature: PersistentList, + #[cfg(feature = "devnet2")] pub signature: BlockSignatures, } diff --git a/lean_client/containers/src/lib.rs b/lean_client/containers/src/lib.rs index 511db23..c73a9f9 100644 --- a/lean_client/containers/src/lib.rs +++ b/lean_client/containers/src/lib.rs @@ -10,8 +10,8 @@ pub mod types; pub mod validator; pub use attestation::{ - AggregatedAttestations, AggregatedSignatures, AggregationBits, Attestation, AttestationData, - Attestations, BlockSignatures, Signature, SignedAggregatedAttestations, SignedAttestation, + AggregatedAttestation, AggregatedSignatures, AggregationBits, Attestation, AttestationData, + Attestations, Signature, SignedAggregatedAttestation, SignedAttestation, }; pub use block::{ Block, BlockBody, BlockHeader, BlockWithAttestation, SignedBlock, SignedBlockWithAttestation, diff --git a/lean_client/containers/src/serde_helpers.rs b/lean_client/containers/src/serde_helpers.rs index aff4d60..0568f71 100644 --- a/lean_client/containers/src/serde_helpers.rs +++ b/lean_client/containers/src/serde_helpers.rs @@ -34,26 +34,26 @@ where pub mod bitlist { use super::*; use ssz::BitList; - use typenum::Unsigned; use ssz::SszRead; - + use typenum::Unsigned; + #[derive(Deserialize)] #[serde(untagged)] enum BitListData { HexString(String), BoolArray(Vec), } - + pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, N: Unsigned, { use serde::de::Error; - + // First unwrap the {"data": ...} wrapper let wrapper = DataWrapper::::deserialize(deserializer)?; - + match wrapper.data { BitListData::HexString(hex_str) => { // Handle hex string format (e.g., "0x01ff") @@ -62,10 +62,10 @@ pub mod bitlist { // Empty hex string means empty bitlist return Ok(BitList::default()); } - + let bytes = hex::decode(hex_str) .map_err(|e| D::Error::custom(format!("Invalid hex string: {}", e)))?; - + // Decode SSZ bitlist (with delimiter bit) BitList::from_ssz_unchecked(&(), &bytes) .map_err(|e| D::Error::custom(format!("Invalid SSZ bitlist: {:?}", e))) @@ -80,19 +80,20 @@ pub mod bitlist { } } } - + pub fn serialize(value: &BitList, serializer: S) -> Result where S: Serializer, N: Unsigned, { use ssz::SszWrite; - + // Serialize as hex string in {"data": "0x..."} format let mut bytes = Vec::new(); - value.write_variable(&mut bytes) + value + .write_variable(&mut bytes) .map_err(|e| serde::ser::Error::custom(format!("Failed to write SSZ: {:?}", e)))?; - + let hex_str = format!("0x{}", hex::encode(&bytes)); let wrapper = DataWrapper { data: hex_str }; wrapper.serialize(serializer) @@ -103,9 +104,9 @@ pub mod bitlist { /// Signatures in test vectors are structured with {path, rho, hashes} instead of hex bytes pub mod signature { use super::*; - use serde_json::Value; use crate::Signature; - + use serde_json::Value; + /// Structured XMSS signature format from test vectors #[derive(Deserialize)] struct XmssSignature { @@ -113,65 +114,65 @@ pub mod signature { rho: DataWrapper>, hashes: DataWrapper>>>, } - + #[derive(Deserialize)] struct XmssPath { siblings: DataWrapper>>>, } - + pub fn deserialize_single<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { use serde::de::Error; - + // First, try to parse as a JSON value to inspect the structure let value = Value::deserialize(deserializer)?; - + // Check if it's a hex string (normal format) if let Value::String(hex_str) = &value { let hex_str = hex_str.trim_start_matches("0x"); let bytes = hex::decode(hex_str) .map_err(|e| D::Error::custom(format!("Invalid hex string: {}", e)))?; - + return Signature::try_from(bytes.as_slice()) .map_err(|_| D::Error::custom("Invalid signature length")); } - + // Otherwise, parse as structured XMSS signature let xmss_sig: XmssSignature = serde_json::from_value(value) .map_err(|e| D::Error::custom(format!("Failed to parse XMSS signature: {}", e)))?; - + // Serialize the XMSS signature to bytes // Format: siblings (variable length) + rho (28 bytes) + hashes (variable length) let mut bytes = Vec::new(); - + // Write siblings for sibling in &xmss_sig.path.siblings.data { for val in &sibling.data { bytes.extend_from_slice(&val.to_le_bytes()); } } - + // Write rho (7 u32s = 28 bytes) for val in &xmss_sig.rho.data { bytes.extend_from_slice(&val.to_le_bytes()); } - + // Write hashes for hash in &xmss_sig.hashes.data { for val in &hash.data { bytes.extend_from_slice(&val.to_le_bytes()); } } - + // Pad or truncate to 3112 bytes bytes.resize(3112, 0); - + Signature::try_from(bytes.as_slice()) .map_err(|_| D::Error::custom("Failed to create signature")) } - + pub fn serialize(value: &Signature, serializer: S) -> Result where S: Serializer, @@ -186,10 +187,11 @@ pub mod signature { /// where each signature can be either hex string or structured XMSS format pub mod block_signatures { use super::*; - use crate::{Signature, BlockSignatures}; - use ssz::PersistentList; + use crate::Signature; use serde_json::Value; - + use ssz::PersistentList; + use typenum::U4096; + /// Structured XMSS signature format from test vectors #[derive(Deserialize, Clone)] struct XmssSignature { @@ -197,79 +199,95 @@ pub mod block_signatures { rho: DataWrapper>, hashes: DataWrapper>>>, } - + #[derive(Deserialize, Clone)] struct XmssPath { siblings: DataWrapper>>>, } - + fn parse_single_signature(value: &Value) -> Result { // Check if it's a hex string (normal format) if let Value::String(hex_str) = value { let hex_str = hex_str.trim_start_matches("0x"); - let bytes = hex::decode(hex_str) - .map_err(|e| format!("Invalid hex string: {}", e))?; - + let bytes = hex::decode(hex_str).map_err(|e| format!("Invalid hex string: {}", e))?; + return Signature::try_from(bytes.as_slice()) .map_err(|_| "Invalid signature length".to_string()); } - + // Otherwise, parse as structured XMSS signature let xmss_sig: XmssSignature = serde_json::from_value(value.clone()) .map_err(|e| format!("Failed to parse XMSS signature: {}", e))?; - + // Serialize the XMSS signature to bytes // Format: siblings (variable length) + rho (28 bytes) + hashes (variable length) let mut bytes = Vec::new(); - + // Write siblings for sibling in &xmss_sig.path.siblings.data { for val in &sibling.data { bytes.extend_from_slice(&val.to_le_bytes()); } } - + // Write rho (7 u32s = 28 bytes) for val in &xmss_sig.rho.data { bytes.extend_from_slice(&val.to_le_bytes()); } - + // Write hashes for hash in &xmss_sig.hashes.data { for val in &hash.data { bytes.extend_from_slice(&val.to_le_bytes()); } } - + // Pad or truncate to 3112 bytes bytes.resize(3112, 0); - - Signature::try_from(bytes.as_slice()) - .map_err(|_| "Failed to create signature".to_string()) + + Signature::try_from(bytes.as_slice()).map_err(|_| "Failed to create signature".to_string()) } - pub fn deserialize<'de, D>(deserializer: D) -> Result + #[cfg(feature = "devnet1")] + pub fn deserialize<'de, D>( + deserializer: D, + ) -> Result, D::Error> where D: Deserializer<'de>, { use serde::de::Error; - + // Parse the {"data": [...]} wrapper let wrapper: DataWrapper> = DataWrapper::deserialize(deserializer)?; - + let mut signatures = PersistentList::default(); - + for (idx, sig_value) in wrapper.data.into_iter().enumerate() { let sig = parse_single_signature(&sig_value) .map_err(|e| D::Error::custom(format!("Signature {}: {}", idx, e)))?; - signatures.push(sig) + signatures + .push(sig) .map_err(|e| D::Error::custom(format!("Signature {} push failed: {:?}", idx, e)))?; } - + Ok(signatures) } - - pub fn serialize(value: &BlockSignatures, serializer: S) -> Result + + #[cfg(feature = "devnet2")] + pub fn deserialize<'de, D>(_: D) -> Result + where + D: Deserializer<'de>, + { + Err(serde::de::Error::custom( + "BlockSignatures deserialization not implemented for devnet2", + )) + } + + #[cfg(feature = "devnet1")] + pub fn serialize( + value: &PersistentList, + serializer: S, + ) -> Result where S: Serializer, { @@ -285,8 +303,18 @@ pub mod block_signatures { Err(_) => break, } } - + let wrapper = DataWrapper { data: sigs }; wrapper.serialize(serializer) } + + #[cfg(feature = "devnet2")] + pub fn serialize(value: &BlockSignatures, serializer: S) -> Result + where + S: Serializer, + { + Err(serde::de::Error::custom( + "BlockSignatures serialization not implemented for devnet2", + )) + } } diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index 4eb0ffd..ac84e06 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -1,13 +1,13 @@ use crate::validator::Validator; +use crate::{block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, Slot, Uint64, ValidatorIndex}; use crate::{ - block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, - Attestation, Attestations, BlockSignatures, Bytes32, Checkpoint, Config, Slot, Uint64, ValidatorIndex, + HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators, }; -use crate::{HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators}; use serde::{Deserialize, Serialize}; -use ssz::{PersistentList as List}; +use ssz::{PersistentList as List, PersistentList}; use ssz_derive::Ssz; use std::collections::BTreeMap; +use typenum::U4096; pub const VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const JUSTIFICATION_ROOTS_LIMIT: usize = 1 << 18; // 262144 @@ -47,7 +47,10 @@ pub struct State { } impl State { - pub fn generate_genesis_with_validators(genesis_time: Uint64, validators: Vec) -> Self { + pub fn generate_genesis_with_validators( + genesis_time: Uint64, + validators: Vec, + ) -> Self { let body_for_root = BlockBody { attestations: Default::default(), }; @@ -64,7 +67,6 @@ impl State { validator_list.push(v).expect("Failed to add validator"); } - Self { config: Config { genesis_time: genesis_time.0, @@ -206,7 +208,11 @@ impl State { for (i, r) in roots.iter().enumerate() { let v = map.get(r).expect("root present"); - assert_eq!(v.len(), num_validators, "vote vector must match validator count"); + assert_eq!( + v.len(), + num_validators, + "vote vector must match validator count" + ); let base = i * num_validators; for (j, &bit) in v.iter().enumerate() { if bit { @@ -230,7 +236,11 @@ impl State { } // updated for fork choice tests - pub fn state_transition(&self, signed_block: SignedBlockWithAttestation, valid_signatures: bool) -> Result { + pub fn state_transition( + &self, + signed_block: SignedBlockWithAttestation, + valid_signatures: bool, + ) -> Result { self.state_transition_with_validation(signed_block, valid_signatures, true) } @@ -314,7 +324,7 @@ impl State { } // Create a mutable clone for hash computation - let latest_header_for_hash = self.latest_block_header.clone(); + let latest_header_for_hash = self.latest_block_header.clone(); let parent_root = hash_tree_root(&latest_header_for_hash); if block.parent_root != parent_root { return Err(String::from("Block parent root mismatch")); @@ -554,6 +564,7 @@ impl State { /// # Returns /// /// Tuple of (Block, post-State, collected attestations, signatures) + #[cfg(feature = "devnet1")] pub fn build_block( &self, slot: Slot, @@ -562,10 +573,10 @@ impl State { initial_attestations: Option>, available_signed_attestations: Option<&[SignedBlockWithAttestation]>, known_block_roots: Option<&std::collections::HashSet>, - ) -> Result<(Block, Self, Vec, BlockSignatures), String> { + ) -> Result<(Block, Self, Vec, PersistentList), String> { // Initialize empty attestation set for iterative collection let mut attestations = initial_attestations.unwrap_or_default(); - let mut signatures = BlockSignatures::default(); + let mut signatures = PersistentList::default(); // Advance state to target slot // Note: parent_root comes from fork choice and is already validated. @@ -581,7 +592,9 @@ impl State { // Create candidate block with current attestation set let mut attestations_list = Attestations::default(); for att in &attestations { - attestations_list.push(att.clone()).map_err(|e| format!("Failed to push attestation: {:?}", e))?; + attestations_list + .push(att.clone()) + .map_err(|e| format!("Failed to push attestation: {:?}", e))?; } let candidate_block = Block { @@ -666,10 +679,25 @@ impl State { // Add new attestations and continue iteration attestations.extend(new_attestations); for sig in new_signatures { - signatures.push(sig).map_err(|e| format!("Failed to push signature: {:?}", e))?; + signatures + .push(sig) + .map_err(|e| format!("Failed to push signature: {:?}", e))?; } } } + + #[cfg(feature = "devnet2")] + pub fn build_block( + &self, + _slot: Slot, + _proposer_index: ValidatorIndex, + _parent_root: Bytes32, + _initial_attestations: Option>, + _available_signed_attestations: Option<&[SignedBlockWithAttestation]>, + _known_block_roots: Option<&std::collections::HashSet>, + ) -> Result<(Block, Self, Vec, BlockSignatures), String> { + Err("build_block is not implemented for devnet2".to_string()) + } } #[cfg(test)] @@ -726,14 +754,15 @@ mod tests { } #[test] + #[cfg(feature = "devnet1")] fn test_build_block() { // Create genesis state with validators let genesis_state = State::generate_genesis(Uint64(0), Uint64(4)); - + // Compute expected parent root after slot processing let pre_state = genesis_state.process_slots(Slot(1)).unwrap(); let expected_parent_root = hash_tree_root(&pre_state.latest_block_header); - + // Test 1: Build a simple block without attestations let result = genesis_state.build_block( Slot(1), @@ -743,27 +772,34 @@ mod tests { None, None, ); - + assert!(result.is_ok(), "Building simple block should succeed"); let (block, post_state, attestations, signatures) = result.unwrap(); - + // Verify block properties assert_eq!(block.slot, Slot(1)); assert_eq!(block.proposer_index, ValidatorIndex(1)); assert_eq!(block.parent_root, expected_parent_root); - assert_ne!(block.state_root, Bytes32(ssz::H256::zero()), "State root should be computed"); - + assert_ne!( + block.state_root, + Bytes32(ssz::H256::zero()), + "State root should be computed" + ); + // Verify attestations and signatures are empty assert_eq!(attestations.len(), 0); // Check signatures by trying to get first element assert!(signatures.get(0).is_err(), "Signatures should be empty"); - + // Verify post-state has advanced assert_eq!(post_state.slot, Slot(1)); // Note: The post-state's latest_block_header.state_root is zero because it will be // filled in during the next slot processing - assert_eq!(block.parent_root, expected_parent_root, "Parent root should match"); - + assert_eq!( + block.parent_root, expected_parent_root, + "Parent root should match" + ); + // Test 2: Build block with initial attestations let attestation = Attestation { validator_id: Uint64(0), @@ -783,7 +819,7 @@ mod tests { }, }, }; - + let result = genesis_state.build_block( Slot(1), ValidatorIndex(1), @@ -792,45 +828,48 @@ mod tests { None, None, ); - - assert!(result.is_ok(), "Building block with attestations should succeed"); + + assert!( + result.is_ok(), + "Building block with attestations should succeed" + ); let (block, _post_state, attestations, _signatures) = result.unwrap(); - + // Verify attestation was included assert_eq!(attestations.len(), 1); assert_eq!(attestations[0].validator_id, Uint64(0)); // Check that attestation list has one element - assert!(block.body.attestations.get(0).is_ok(), "Block should contain attestation"); - assert!(block.body.attestations.get(1).is_err(), "Block should have only one attestation"); + assert!( + block.body.attestations.get(0).is_ok(), + "Block should contain attestation" + ); + assert!( + block.body.attestations.get(1).is_err(), + "Block should have only one attestation" + ); } #[test] fn test_build_block_advances_state() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(10)); - + // Compute parent root after advancing to target slot let pre_state = genesis_state.process_slots(Slot(5)).unwrap(); let parent_root = hash_tree_root(&pre_state.latest_block_header); - + // Build block at slot 5 // Proposer for slot 5 with 10 validators is (5 % 10) = 5 - let result = genesis_state.build_block( - Slot(5), - ValidatorIndex(5), - parent_root, - None, - None, - None, - ); - + let result = + genesis_state.build_block(Slot(5), ValidatorIndex(5), parent_root, None, None, None); + assert!(result.is_ok()); let (block, post_state, _, _) = result.unwrap(); - + // Verify state advanced through slots assert_eq!(post_state.slot, Slot(5)); assert_eq!(block.slot, Slot(5)); - + // Verify block can be applied to genesis state let transition_result = genesis_state.state_transition_with_validation( SignedBlockWithAttestation { @@ -838,49 +877,45 @@ mod tests { block: block.clone(), proposer_attestation: Attestation::default(), }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), }, true, // signatures are considered valid (not validating, just marking as valid) true, ); - - assert!(transition_result.is_ok(), "Built block should be valid for state transition"); + + assert!( + transition_result.is_ok(), + "Built block should be valid for state transition" + ); } #[test] fn test_build_block_state_root_matches() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(3)); - + // Compute parent root after advancing to target slot let pre_state = genesis_state.process_slots(Slot(1)).unwrap(); let parent_root = hash_tree_root(&pre_state.latest_block_header); - + // Build a block // Proposer for slot 1 with 3 validators is (1 % 3) = 1 - let result = genesis_state.build_block( - Slot(1), - ValidatorIndex(1), - parent_root, - None, - None, - None, - ); - + let result = + genesis_state.build_block(Slot(1), ValidatorIndex(1), parent_root, None, None, None); + assert!(result.is_ok()); let (block, post_state, _, _) = result.unwrap(); - + // Verify the state root in block matches the computed post-state let computed_state_root = hash_tree_root(&post_state); assert_eq!( - block.state_root, - computed_state_root, + block.state_root, computed_state_root, "Block state root should match computed post-state root" ); - + // Verify it's not zero assert_ne!( - block.state_root, + block.state_root, Bytes32(ssz::H256::zero()), "State root should not be zero" ); diff --git a/lean_client/src/main.rs b/lean_client/src/main.rs index 396c8f7..cc44285 100644 --- a/lean_client/src/main.rs +++ b/lean_client/src/main.rs @@ -1,7 +1,7 @@ use clap::Parser; -use containers::ssz::SszHash; +use containers::ssz::{PersistentList, SszHash}; use containers::{ - attestation::{Attestation, AttestationData, BlockSignatures}, + attestation::{Attestation, AttestationData}, block::{Block, BlockBody, BlockWithAttestation, SignedBlockWithAttestation}, checkpoint::Checkpoint, config::Config, @@ -216,7 +216,7 @@ async fn main() { block: genesis_block, proposer_attestation: genesis_proposer_attestation, }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), }; let config = Config { genesis_time }; From 4c53527287b43e7c9f82e99f15faed1d1022428b Mon Sep 17 00:00:00 2001 From: Julius Mieliauskas Date: Fri, 19 Dec 2025 11:11:48 +0200 Subject: [PATCH 02/23] minor cleanups --- lean_client/containers/src/block.rs | 19 +--------- lean_client/containers/src/state.rs | 26 +------------ .../containers/tests/test_vectors/runner.rs | 38 ++----------------- .../containers/tests/unit_tests/common.rs | 8 ++-- .../tests/unit_tests/state_transition.rs | 6 ++- .../tests/fork_choice_test_vectors.rs | 8 ++-- 6 files changed, 19 insertions(+), 86 deletions(-) diff --git a/lean_client/containers/src/block.rs b/lean_client/containers/src/block.rs index 55b4727..ac5063f 100644 --- a/lean_client/containers/src/block.rs +++ b/lean_client/containers/src/block.rs @@ -170,25 +170,10 @@ impl SignedBlockWithAttestation { // The ordering must be preserved: // 1. Block body attestations, // 2. The proposer attestation. - assert!( - signatures_vec.len() == all_attestations.len(), - "Number of signatures does not match number of attestations" - ); + assert_eq!(signatures_vec.len(), all_attestations.len(), "Number of signatures does not match number of attestations"); let validators = &parent_state.validators; - - // Count validators (PersistentList doesn't expose len directly) - let mut num_validators: u64 = 0; - let mut k: u64 = 0; - loop { - match validators.get(k) { - Ok(_) => { - num_validators += 1; - k += 1; - } - Err(_) => break, - } - } + let num_validators = validators.len_u64(); // Verify each attestation signature for (attestation, signature) in all_attestations.iter().zip(signatures_vec.iter()) { diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index ac84e06..46641e1 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -135,18 +135,7 @@ impl State { /// Simple RR proposer rule (round-robin). pub fn is_proposer(&self, index: ValidatorIndex) -> bool { - // Count validators by iterating (since PersistentList doesn't have len()) - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match self.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = self.validators.len_u64(); if num_validators == 0 { return false; // No validators @@ -486,18 +475,7 @@ impl State { if validator_id < votes.len() && !votes[validator_id] { votes[validator_id] = true; - // Count validators - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match self.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = self.validators.len_u64(); let count = votes.iter().filter(|&&v| v).count(); if 3 * count >= 2 * num_validators as usize { diff --git a/lean_client/containers/tests/test_vectors/runner.rs b/lean_client/containers/tests/test_vectors/runner.rs index 9e7ef36..910fde5 100644 --- a/lean_client/containers/tests/test_vectors/runner.rs +++ b/lean_client/containers/tests/test_vectors/runner.rs @@ -83,18 +83,7 @@ impl TestRunner { // Only check validator count if specified in post-state if let Some(expected_count) = post.validator_count { - // Count validators - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match state.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = state.validators.len_u64(); if num_validators as usize != expected_count { return Err(format!( @@ -436,18 +425,7 @@ impl TestRunner { let state = &test_case.pre; - // Count validators - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match state.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = state.validators.len_u64(); println!(" Genesis time: {}, slot: {}, validators: {}", state.config.genesis_time, state.slot.0, num_validators); // Verify it's at genesis (slot 0) @@ -555,17 +533,7 @@ impl TestRunner { // Verify validator count if specified if let Some(expected_count) = post.validator_count { - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match state.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = state.validators.len_u64(); if num_validators as usize != expected_count { return Err(format!( diff --git a/lean_client/containers/tests/unit_tests/common.rs b/lean_client/containers/tests/unit_tests/common.rs index 77c2dd5..781c43e 100644 --- a/lean_client/containers/tests/unit_tests/common.rs +++ b/lean_client/containers/tests/unit_tests/common.rs @@ -1,7 +1,7 @@ use containers::{ - Attestation, Attestations, BlockSignatures, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators + Attestation, Attestations, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators }; -use ssz::PersistentList as List; +use ssz::{PersistentList}; pub const DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const TEST_VALIDATOR_COUNT: usize = 4; // Actual validator count used in tests @@ -12,7 +12,7 @@ const _: [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT] = pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Option) -> SignedBlockWithAttestation { let body = BlockBody { - attestations: attestations.unwrap_or_else(List::default), + attestations: attestations.unwrap_or_else(PersistentList::default), }; let block_message = Block { @@ -28,7 +28,7 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op block: block_message, proposer_attestation: Attestation::default(), }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), } } diff --git a/lean_client/containers/tests/unit_tests/state_transition.rs b/lean_client/containers/tests/unit_tests/state_transition.rs index 91edfa7..aca04cd 100644 --- a/lean_client/containers/tests/unit_tests/state_transition.rs +++ b/lean_client/containers/tests/unit_tests/state_transition.rs @@ -3,10 +3,11 @@ use containers::{ block::{Block, SignedBlockWithAttestation, BlockWithAttestation, hash_tree_root}, state::State, types::{Bytes32, Uint64}, - Slot, Attestation, BlockSignatures + Slot, Attestation }; use pretty_assertions::assert_eq; use rstest::fixture; +use ssz::PersistentList; #[path = "common.rs"] mod common; @@ -78,6 +79,7 @@ fn test_state_transition_invalid_signatures() { assert_eq!(result.unwrap_err(), "Block signatures must be valid"); } +#[cfg(feature = "devnet1")] #[test] fn test_state_transition_bad_state_root() { let state = genesis_state(); @@ -93,7 +95,7 @@ fn test_state_transition_bad_state_root() { block, proposer_attestation: Attestation::default(), }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), }; let result = state.state_transition(final_signed_block_with_attestation, true); diff --git a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs index e2d230a..5718d48 100644 --- a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs +++ b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs @@ -4,7 +4,7 @@ use fork_choice::{ }; use containers::{ - attestation::{Attestation, AttestationData, BlockSignatures, SignedAttestation, Signature}, + attestation::{Attestation, AttestationData, SignedAttestation, Signature}, block::{hash_tree_root, Block, BlockBody, BlockHeader, BlockWithAttestation, SignedBlockWithAttestation}, checkpoint::Checkpoint, config::Config, @@ -13,7 +13,7 @@ use containers::{ }; use serde::Deserialize; -use ssz::SszHash; +use ssz::{PersistentList, SszHash}; use std::collections::HashMap; use std::panic::AssertUnwindSafe; @@ -299,7 +299,7 @@ fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAtt block, proposer_attestation, }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), } } @@ -329,7 +329,7 @@ fn convert_test_block(test_block_with_att: &TestBlockWithAttestation) -> SignedB block, proposer_attestation, }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), } } From f1f955a9c17852b9cf978a6396b1deff9be2a4c7 Mon Sep 17 00:00:00 2001 From: Julius Mieliauskas Date: Tue, 23 Dec 2025 16:27:26 +0200 Subject: [PATCH 03/23] added tests, fixed some types --- lean_client/containers/src/attestation.rs | 106 ++++++++- lean_client/containers/src/block.rs | 211 +++++++++++++----- lean_client/containers/src/serde_helpers.rs | 5 +- lean_client/containers/src/state.rs | 19 +- .../unit_tests/attestation_aggregation.rs | 132 +++++++++++ .../containers/tests/unit_tests/common.rs | 55 ++++- .../containers/tests/unit_tests/mod.rs | 1 + .../tests/unit_tests/state_process.rs | 1 + .../tests/unit_tests/state_transition.rs | 101 ++++++++- 9 files changed, 549 insertions(+), 82 deletions(-) create mode 100644 lean_client/containers/tests/unit_tests/attestation_aggregation.rs diff --git a/lean_client/containers/src/attestation.rs b/lean_client/containers/src/attestation.rs index 6a820c7..302ef08 100644 --- a/lean_client/containers/src/attestation.rs +++ b/lean_client/containers/src/attestation.rs @@ -1,8 +1,9 @@ use crate::{Checkpoint, Slot, Uint64}; use serde::{Deserialize, Serialize}; +use ssz::BitList; use ssz::ByteVector; use ssz_derive::Ssz; -use typenum::{Prod, Sum, U100, U31, U12}; +use typenum::{Prod, Sum, U100, U12, U31}; pub type U3100 = Prod; @@ -21,11 +22,64 @@ pub type Attestations = ssz::PersistentList; pub type AggregatedAttestations = ssz::PersistentList; +#[cfg(feature = "devnet1")] pub type AttestationSignatures = ssz::PersistentList; +#[cfg(feature = "devnet2")] +pub type AttestationSignatures = ssz::PersistentList; + +#[cfg(feature = "devnet2")] +pub type NaiveAggregatedSignature = ssz::PersistentList; + /// Bitlist representing validator participation in an attestation. /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). -pub type AggregationBits = ssz::BitList; +#[derive(Clone, Debug, PartialEq, Eq, Default, Ssz, Serialize, Deserialize)] +pub struct AggregationBits(pub BitList); + +impl AggregationBits { + pub const LIMIT: u64 = 4096; + + pub fn from_validator_indices(indices: &[u64]) -> Self { + assert!( + !indices.is_empty(), + "Aggregated attestation must reference at least one validator" + ); + + let max_id = *indices.iter().max().unwrap(); + assert!( + max_id < Self::LIMIT, + "Validator index out of range for aggregation bits" + ); + + let mut bits = BitList::::with_length((max_id + 1) as usize); + + for i in 0..=max_id { + bits.set(i as usize, false); + } + + for &i in indices { + bits.set(i as usize, true); + } + + AggregationBits(bits) + } + + pub fn to_validator_indices(&self) -> Vec { + let indices: Vec = self + .0 + .iter() + .enumerate() + .filter_map(|(i, bit)| if *bit { Some(i as u64) } else { None }) + .collect(); + + assert!( + !indices.is_empty(), + "Aggregated attestation must reference at least one validator" + ); + + indices + } +} /// Naive list of validator signatures used for aggregation placeholders. /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). @@ -57,13 +111,8 @@ pub struct Attestation { /// Validator attestation bundled with its signature. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct SignedAttestation { - #[cfg(feature = "devnet2")] - pub validator_id: u64, - #[cfg(feature = "devnet2")] - pub message: AttestationData, - #[cfg(feature = "devnet1")] pub message: Attestation, - /// signature over attestaion message only as it would be aggregated later in attestation + /// Signature aggregation produced by the leanVM (SNARKs in the future). pub signature: Signature, } @@ -73,12 +122,51 @@ pub struct AggregatedAttestation { /// Bitfield indicating which validators participated in the aggregation. pub aggregation_bits: AggregationBits, /// Combined attestation data similar to the beacon chain format. - /// + /// /// Multiple validator attestations are aggregated here without the complexity of /// committee assignments. pub data: AttestationData, } +impl AggregatedAttestation { + pub fn aggregate_by_data(attestations: &[Attestation]) -> Vec { + let mut groups: Vec<(AttestationData, Vec)> = Vec::new(); + + for attestation in attestations { + // Try to find an existing group with the same data + if let Some((_, validator_ids)) = groups + .iter_mut() + .find(|(data, _)| *data == attestation.data) + { + validator_ids.push(attestation.validator_id.0); + } else { + // Create a new group + groups.push((attestation.data.clone(), vec![attestation.validator_id.0])); + } + } + + groups + .into_iter() + .map(|(data, validator_ids)| AggregatedAttestation { + aggregation_bits: AggregationBits::from_validator_indices(&validator_ids), + data, + }) + .collect() + } + + pub fn to_plain(&self) -> Vec { + let validator_indices = self.aggregation_bits.to_validator_indices(); + + validator_indices + .into_iter() + .map(|validator_id| Attestation { + validator_id: Uint64(validator_id), + data: self.data.clone(), + }) + .collect() + } +} + /// Aggregated attestation bundled with aggregated signatures. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct SignedAggregatedAttestation { diff --git a/lean_client/containers/src/block.rs b/lean_client/containers/src/block.rs index ac5063f..0acf1b2 100644 --- a/lean_client/containers/src/block.rs +++ b/lean_client/containers/src/block.rs @@ -4,9 +4,10 @@ use ssz_derive::Ssz; #[cfg(feature = "xmss-verify")] use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to_the_20::target_sum::SIGTargetSumLifetime20W2NoOff; -use ssz::PersistentList; +use ssz::{PersistentList, SszHash}; use typenum::U4096; -use crate::attestation::AttestationSignatures; +use crate::attestation::{AggregatedAttestations, AttestationSignatures}; +use crate::validator::BlsPublicKey; /// The body of a block, containing payload data. /// @@ -15,7 +16,7 @@ use crate::attestation::AttestationSignatures; #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct BlockBody { #[cfg(feature = "devnet2")] - pub attestations: VariableList, + pub attestations: AggregatedAttestations, #[cfg(feature = "devnet1")] #[serde(with = "crate::serde_helpers")] pub attestations: Attestations, @@ -51,7 +52,7 @@ pub struct BlockWithAttestation { pub proposer_attestation: Attestation, } -#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)] +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Ssz, Deserialize, Default)] pub struct BlockSignatures { pub attestation_signatures: AttestationSignatures, pub proposer_signature: Signature, @@ -127,6 +128,7 @@ impl SignedBlockWithAttestation { /// /// - Spec: /// - XMSS Library: + #[cfg(feature = "devnet1")] pub fn verify_signatures(&self, parent_state: State) -> bool { // Unpack the signed block components let block = &self.message.block; @@ -138,7 +140,7 @@ impl SignedBlockWithAttestation { // 1. Block body attestations (from other validators) // 2. Proposer attestation (from the block producer) let mut all_attestations: Vec = Vec::new(); - + // Collect block body attestations let mut i: u64 = 0; loop { @@ -148,7 +150,7 @@ impl SignedBlockWithAttestation { } i += 1; } - + // Append proposer attestation all_attestations.push(self.message.proposer_attestation.clone()); @@ -170,7 +172,11 @@ impl SignedBlockWithAttestation { // The ordering must be preserved: // 1. Block body attestations, // 2. The proposer attestation. - assert_eq!(signatures_vec.len(), all_attestations.len(), "Number of signatures does not match number of attestations"); + assert_eq!( + signatures_vec.len(), + all_attestations.len(), + "Number of signatures does not match number of attestations" + ); let validators = &parent_state.validators; let num_validators = validators.len_u64(); @@ -193,60 +199,149 @@ impl SignedBlockWithAttestation { // - The validator possesses the secret key for their public key // - The attestation has not been tampered with // - The signature was created at the correct epoch (slot) - - #[cfg(feature = "xmss-verify")] - { - use leansig::signature::SignatureScheme; - use leansig::serialization::Serializable; - - // Compute the message hash from the attestation - let message_bytes: [u8; 32] = hash_tree_root(attestation).0.into(); - let epoch = attestation.data.slot.0 as u32; - - // Get public key bytes - use as_bytes() method - let pubkey_bytes = validator.pubkey.0.as_bytes(); - - // Deserialize the public key using Serializable trait - type PubKey = ::PublicKey; - let pubkey = match PubKey::from_bytes(pubkey_bytes) { - Ok(pk) => pk, - Err(e) => { - eprintln!("Failed to deserialize public key at slot {:?}: {:?}", attestation.data.slot, e); - return false; - } - }; - - // Get signature bytes - use as_bytes() method - let sig_bytes = signature.as_bytes(); - - // Deserialize the signature using Serializable trait - type Sig = ::Signature; - let sig = match Sig::from_bytes(sig_bytes) { - Ok(s) => s, - Err(e) => { - eprintln!("Failed to deserialize signature at slot {:?}: {:?}", attestation.data.slot, e); - return false; - } - }; - - // Verify the signature - if !SIGTargetSumLifetime20W2NoOff::verify(&pubkey, epoch, &message_bytes, &sig) { - eprintln!("XMSS signature verification failed at slot {:?}", attestation.data.slot); - return false; - } - } - - #[cfg(not(feature = "xmss-verify"))] + + let message_bytes: [u8; 32] = hash_tree_root(attestation).0.into(); + + assert!( + verify_xmss_signature( + validator.pubkey.0.as_bytes(), + attestation.data.slot, + &message_bytes, + &signature, + ), + "Attestation signature verification failed" + ); + } + + true + } + + #[cfg(feature = "devnet2")] + pub fn verify_signatures(&self, parent_state: State) -> bool { + // Unpack the signed block components + let block = &self.message.block; + let signatures = &self.signature; + let aggregated_attestations = block.body.attestations.clone(); + let attestation_signatures = signatures.attestation_signatures.clone(); + + // Verify signature count matches aggregated attestation count + assert_eq!( + aggregated_attestations.len_u64(), + attestation_signatures.len_u64(), + "Number of signatures does not match number of attestations" + ); + + let validators = &parent_state.validators; + let num_validators = validators.len_u64(); + + // Verify each attestation signature + for (aggregated_attestation, aggregated_signature) in (&aggregated_attestations) + .into_iter() + .zip((&attestation_signatures).into_iter()) + { + let validator_ids = aggregated_attestation + .aggregation_bits + .to_validator_indices(); + + assert_eq!( + aggregated_signature.len_u64(), + validator_ids.len() as u64, + "Aggregated attestation signature count mismatch" + ); + + let attestation_root = aggregated_attestation.data.hash_tree_root(); + + // Loop through zipped validator IDs and their corresponding signatures + // Verify each individual signature within the aggregated attestation + for (validator_id, signature) in + validator_ids.iter().zip(aggregated_signature.into_iter()) { - // Placeholder: XMSS verification disabled - // To enable, compile with --features xmss-verify - let _pubkey = &validator.pubkey; - let _slot = attestation.data.slot; - let _message = hash_tree_root(attestation); - let _sig = signature; + // Ensure validator exists in the active set + assert!( + *validator_id < num_validators, + "Validator index out of range" + ); + + let validator = validators.get(*validator_id).expect("validator must exist"); + + // Get the actual payload root for the attestation data + let attestation_root: [u8; 32] = + hash_tree_root(&aggregated_attestation.data).0.into(); + + // Verify the XMSS signature + assert!( + verify_xmss_signature( + validator.pubkey.0.as_bytes(), + aggregated_attestation.data.slot, + &attestation_root, + signature, + ), + "Attestation signature verification failed" + ); } + + // Verify the proposer attestation signature + let proposer_attestation = self.message.proposer_attestation.clone(); + let proposer_signature = signatures.proposer_signature; + + assert!( + proposer_attestation.validator_id.0 < num_validators, + "Proposer index out of range" + ); + + let proposer = validators + .get(proposer_attestation.validator_id.0) + .expect("proposer must exist"); + + let proposer_root: [u8; 32] = hash_tree_root(&proposer_attestation).0.into(); + assert!( + verify_xmss_signature( + proposer.pubkey.0.as_bytes(), + proposer_attestation.data.slot, + &proposer_root, + &proposer_signature, + ), + "Proposer attestation signature verification failed" + ); } true } -} \ No newline at end of file +} + +#[cfg(feature = "xmss-verify")] +pub fn verify_xmss_signature( + pubkey_bytes: &[u8], + slot: Slot, + message_bytes: &[u8; 32], + signature: &Signature, +) -> bool { + use leansig::serialization::Serializable; + use leansig::signature::SignatureScheme; + + let epoch = slot.0 as u32; + + type PubKey = ::PublicKey; + let pubkey = match PubKey::from_bytes(pubkey_bytes) { + Ok(pk) => pk, + Err(_) => return false, + }; + + type Sig = ::Signature; + let sig = match Sig::from_bytes(signature.as_bytes()) { + Ok(s) => s, + Err(_) => return false, + }; + + SIGTargetSumLifetime20W2NoOff::verify(&pubkey, epoch, message_bytes, &sig) +} + +#[cfg(not(feature = "xmss-verify"))] +pub fn verify_xmss_signature( + _pubkey_bytes: &[u8], + _slot: Slot, + _message_bytes: &[u8; 32], + _signature: &Signature, +) -> bool { + true +} diff --git a/lean_client/containers/src/serde_helpers.rs b/lean_client/containers/src/serde_helpers.rs index 0568f71..01604e5 100644 --- a/lean_client/containers/src/serde_helpers.rs +++ b/lean_client/containers/src/serde_helpers.rs @@ -187,6 +187,7 @@ pub mod signature { /// where each signature can be either hex string or structured XMSS format pub mod block_signatures { use super::*; + use crate::block::BlockSignatures; use crate::Signature; use serde_json::Value; use ssz::PersistentList; @@ -309,11 +310,11 @@ pub mod block_signatures { } #[cfg(feature = "devnet2")] - pub fn serialize(value: &BlockSignatures, serializer: S) -> Result + pub fn serialize(_value: &BlockSignatures, _serializer: S) -> Result where S: Serializer, { - Err(serde::de::Error::custom( + Err(serde::ser::Error::custom( "BlockSignatures serialization not implemented for devnet2", )) } diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index 46641e1..02a26e3 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -8,6 +8,8 @@ use ssz::{PersistentList as List, PersistentList}; use ssz_derive::Ssz; use std::collections::BTreeMap; use typenum::U4096; +use crate::attestation::AggregatedAttestations; +use crate::block::BlockSignatures; pub const VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const JUSTIFICATION_ROOTS_LIMIT: usize = 1 << 18; // 262144 @@ -294,7 +296,20 @@ impl State { pub fn process_block(&self, block: &Block) -> Result { let state = self.process_block_header(block)?; + #[cfg(feature = "devnet1")] let state_after_ops = state.process_attestations(&block.body.attestations); + #[cfg(feature = "devnet2")] + let state_after_ops = { + let mut unaggregated_attestations = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + unaggregated_attestations.push(attestation).map_err(|e| format!("Failed to push attestation: {:?}", e))?; + } + } + state.process_attestations(&unaggregated_attestations) + }; // State root validation is handled by state_transition_with_validation when needed @@ -688,7 +703,7 @@ mod tests { config: st.config.clone(), ..st.clone() } - .is_proposer(ValidatorIndex(0))); + .is_proposer(ValidatorIndex(0))); } #[test] @@ -828,6 +843,7 @@ mod tests { } #[test] + #[cfg(feature = "devnet1")] fn test_build_block_advances_state() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(10)); @@ -868,6 +884,7 @@ mod tests { } #[test] + #[cfg(feature = "devnet1")] fn test_build_block_state_root_matches() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(3)); diff --git a/lean_client/containers/tests/unit_tests/attestation_aggregation.rs b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs new file mode 100644 index 0000000..285aa46 --- /dev/null +++ b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs @@ -0,0 +1,132 @@ +#[cfg(feature = "devnet2")] +#[cfg(test)] +mod tests { + use containers::attestation::{AggregatedAttestation, AggregationBits, Attestation, AttestationData}; + use containers::{Bytes32, Uint64}; + use containers::checkpoint::Checkpoint; + use containers::slot::Slot; + + #[test] + fn test_aggregated_attestation_structure() { + let att_data = AttestationData { + slot: Slot(5), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(2), + } + }; + + let bits = AggregationBits::from_validator_indices(&vec![2, 7]); + let agg = AggregatedAttestation { + aggregation_bits: bits.clone(), + data: att_data.clone() + }; + + let indices = agg.aggregation_bits.to_validator_indices(); + assert_eq!(indices.into_iter().collect::>(), vec![2, 7].into_iter().collect()); + assert_eq!(agg.data, att_data); + } + + #[test] + fn test_aggregate_attestations_by_common_data() { + let att_data1 = AttestationData { + slot: Slot(5), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(2), + } + }; + let att_data2 = AttestationData { + slot: Slot(6), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(5), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + } + }; + + let attestations = vec![ + Attestation { + validator_id: Uint64(1), + data: att_data1.clone(), + }, + Attestation { + validator_id: Uint64(3), + data: att_data1.clone(), + }, + Attestation { + validator_id: Uint64(5), + data: att_data2.clone(), + }, + ]; + + let aggregated = AggregatedAttestation::aggregate_by_data(&attestations); + assert_eq!(aggregated.len(), 2); + + let agg1 = aggregated.iter().find(|agg| agg.data == att_data1).unwrap(); + let validator_ids1 = agg1.aggregation_bits.to_validator_indices(); + assert_eq!(validator_ids1.into_iter().collect::>(), vec![1, 3].into_iter().collect()); + + let agg2 = aggregated.iter().find(|agg| agg.data == att_data2).unwrap(); + let validator_ids2 = agg2.aggregation_bits.to_validator_indices(); + assert_eq!(validator_ids2, vec![5]); + } + + #[test] + fn test_aggregate_empty_attestations() { + let aggregated = AggregatedAttestation::aggregate_by_data(&[]); + assert!(aggregated.is_empty()); + } + + #[test] + fn test_aggregate_single_attestation() { + let att_data = AttestationData { + slot: Slot(5), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(2), + } + }; + + let attestations = vec![Attestation { + validator_id: Uint64(5), + data: att_data.clone(), + }]; + let aggregated = AggregatedAttestation::aggregate_by_data(&attestations); + + assert_eq!(aggregated.len(), 1); + let validator_ids = aggregated[0].aggregation_bits.to_validator_indices(); + assert_eq!(validator_ids, vec![5]); + } +} diff --git a/lean_client/containers/tests/unit_tests/common.rs b/lean_client/containers/tests/unit_tests/common.rs index 781c43e..26fa0a5 100644 --- a/lean_client/containers/tests/unit_tests/common.rs +++ b/lean_client/containers/tests/unit_tests/common.rs @@ -1,7 +1,7 @@ -use containers::{ - Attestation, Attestations, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators -}; +use containers::{Attestation, Attestations, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators, AggregatedAttestation, Signature}; use ssz::{PersistentList}; +use typenum::U4096; +use containers::block::BlockSignatures; pub const DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const TEST_VALIDATOR_COUNT: usize = 4; // Actual validator count used in tests @@ -11,9 +11,38 @@ const _: [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT] = [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT]; pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Option) -> SignedBlockWithAttestation { + #[cfg(feature = "devnet1")] let body = BlockBody { attestations: attestations.unwrap_or_else(PersistentList::default), }; + #[cfg(feature = "devnet2")] + let body = BlockBody { + attestations: { + let attestations_vec = attestations.unwrap_or_default(); + + // Convert PersistentList into a Vec + let attestations_vec: Vec = attestations_vec.into_iter().cloned().collect(); + + let aggregated: Vec = + AggregatedAttestation::aggregate_by_data(&attestations_vec); + + + let aggregated: Vec = + AggregatedAttestation::aggregate_by_data(&attestations_vec); + + // Create a new empty PersistentList + let mut persistent_list: PersistentList = PersistentList::default(); + + // Push each aggregated attestation + for agg in aggregated { + persistent_list.push(agg).expect("PersistentList capacity exceeded"); + } + + persistent_list + }, + // other BlockBody fields... + }; + let block_message = Block { slot: Slot(slot), @@ -23,13 +52,29 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op body: body, }; - SignedBlockWithAttestation { + #[cfg(feature = "devnet1")] + let return_value = SignedBlockWithAttestation { message: BlockWithAttestation { block: block_message, proposer_attestation: Attestation::default(), }, signature: PersistentList::default(), - } + }; + + #[cfg(feature = "devnet2")] + let return_value = SignedBlockWithAttestation { + message: BlockWithAttestation { + block: block_message, + proposer_attestation: Attestation::default(), + }, + signature: BlockSignatures { + attestation_signatures: PersistentList::default(), + proposer_signature: Signature::default(), + } + }; + + return_value + } pub fn create_attestations(indices: &[usize]) -> Vec { diff --git a/lean_client/containers/tests/unit_tests/mod.rs b/lean_client/containers/tests/unit_tests/mod.rs index 16a5646..b9f442f 100644 --- a/lean_client/containers/tests/unit_tests/mod.rs +++ b/lean_client/containers/tests/unit_tests/mod.rs @@ -4,3 +4,4 @@ mod state_basic; mod state_justifications; mod state_process; mod state_transition; +mod attestation_aggregation; diff --git a/lean_client/containers/tests/unit_tests/state_process.rs b/lean_client/containers/tests/unit_tests/state_process.rs index 7db1849..afc1887 100644 --- a/lean_client/containers/tests/unit_tests/state_process.rs +++ b/lean_client/containers/tests/unit_tests/state_process.rs @@ -106,6 +106,7 @@ fn test_process_block_header_invalid( } // This test verifies that attestations correctly justify and finalize slots +#[cfg(feature = "devnet1")] #[test] fn test_process_attestations_justification_and_finalization() { let mut state = genesis_state(); diff --git a/lean_client/containers/tests/unit_tests/state_transition.rs b/lean_client/containers/tests/unit_tests/state_transition.rs index aca04cd..9fe6abb 100644 --- a/lean_client/containers/tests/unit_tests/state_transition.rs +++ b/lean_client/containers/tests/unit_tests/state_transition.rs @@ -1,9 +1,9 @@ // tests/state_transition.rs use containers::{ - block::{Block, SignedBlockWithAttestation, BlockWithAttestation, hash_tree_root}, + block::{hash_tree_root, Block, BlockWithAttestation, SignedBlockWithAttestation}, state::State, types::{Bytes32, Uint64}, - Slot, Attestation + Attestation, Attestations, Slot, }; use pretty_assertions::assert_eq; use rstest::fixture; @@ -24,13 +24,29 @@ fn test_state_transition_full() { let state = genesis_state(); let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); - let signed_block_with_attestation = create_block(1, &mut state_at_slot_1.latest_block_header, None); + let signed_block_with_attestation = + create_block(1, &mut state_at_slot_1.latest_block_header, None); let block = signed_block_with_attestation.message.block.clone(); // Use process_block_header + process_operations to avoid state root validation during setup let state_after_header = state_at_slot_1.process_block_header(&block).unwrap(); + + #[cfg(feature = "devnet1")] let expected_state = state_after_header.process_attestations(&block.body.attestations); + #[cfg(feature = "devnet2")] + let expected_state = { + let mut unaggregated_attestations = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + unaggregated_attestations.push(attestation); + } + } + state_after_header.process_attestations(&unaggregated_attestations) + }; + let block_with_correct_root = Block { state_root: hash_tree_root(&expected_state), ..block @@ -44,7 +60,9 @@ fn test_state_transition_full() { signature: signed_block_with_attestation.signature, }; - let final_state = state.state_transition(final_signed_block_with_attestation, true).unwrap(); + let final_state = state + .state_transition(final_signed_block_with_attestation, true) + .unwrap(); assert_eq!(final_state, expected_state); } @@ -54,13 +72,29 @@ fn test_state_transition_invalid_signatures() { let state = genesis_state(); let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); - let signed_block_with_attestation = create_block(1, &mut state_at_slot_1.latest_block_header, None); + let signed_block_with_attestation = + create_block(1, &mut state_at_slot_1.latest_block_header, None); let block = signed_block_with_attestation.message.block.clone(); // Use process_block_header + process_operations to avoid state root validation during setup let state_after_header = state_at_slot_1.process_block_header(&block).unwrap(); + + #[cfg(feature = "devnet1")] let expected_state = state_after_header.process_attestations(&block.body.attestations); + #[cfg(feature = "devnet2")] + let expected_state = { + let mut list = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + list.push(attestation); + } + } + list + }; + let block_with_correct_root = Block { state_root: hash_tree_root(&expected_state), ..block @@ -85,7 +119,8 @@ fn test_state_transition_bad_state_root() { let state = genesis_state(); let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); - let signed_block_with_attestation = create_block(1, &mut state_at_slot_1.latest_block_header, None); + let signed_block_with_attestation = + create_block(1, &mut state_at_slot_1.latest_block_header, None); let mut block = signed_block_with_attestation.message.block.clone(); block.state_root = Bytes32(ssz::H256::zero()); @@ -101,4 +136,56 @@ fn test_state_transition_bad_state_root() { let result = state.state_transition(final_signed_block_with_attestation, true); assert!(result.is_err()); assert_eq!(result.unwrap_err(), "Invalid block state root"); -} \ No newline at end of file +} + +#[cfg(feature = "devnet2")] +#[test] +fn test_state_transition_devnet2() { + let state = genesis_state(); + let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); + + // Create a block with attestations for devnet2 + let signed_block_with_attestation = + create_block(1, &mut state_at_slot_1.latest_block_header, None); + let block = signed_block_with_attestation.message.block.clone(); + + // Process the block header and attestations + let state_after_header = state_at_slot_1.process_block_header(&block).unwrap(); + + #[cfg(feature = "devnet1")] + let expected_state = state_after_header.process_attestations(&block.body.attestations); + + #[cfg(feature = "devnet2")] + let expected_state = { + let mut unaggregated_attestations = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + unaggregated_attestations.push(attestation); + } + } + state_after_header.process_attestations(&unaggregated_attestations) + }; + + // Ensure the state root matches the expected state + let block_with_correct_root = Block { + state_root: hash_tree_root(&expected_state), + ..block + }; + + let final_signed_block_with_attestation = SignedBlockWithAttestation { + message: BlockWithAttestation { + block: block_with_correct_root, + proposer_attestation: signed_block_with_attestation.message.proposer_attestation, + }, + signature: signed_block_with_attestation.signature, + }; + + // Perform the state transition and validate the result + let final_state = state + .state_transition(final_signed_block_with_attestation, true) + .unwrap(); + + assert_eq!(final_state, expected_state); +} From ed67aaf959a481a99db6ae1ecb3679920444152e Mon Sep 17 00:00:00 2001 From: Julius Mieliauskas Date: Mon, 29 Dec 2025 12:37:57 +0200 Subject: [PATCH 04/23] fixed environment selection by adding a minimal crate `env-config`. Added readme on how to select devnet --- lean_client/Cargo.lock | 18 +- lean_client/Cargo.toml | 8 +- lean_client/ENVIRONMENT_SELECTION.md | 26 +++ lean_client/containers/Cargo.toml | 7 +- lean_client/containers/src/attestation.rs | 6 +- lean_client/containers/src/state.rs | 4 +- lean_client/containers/tests/main.rs | 2 +- .../tests/test_vectors/block_processing.rs | 10 + .../containers/tests/test_vectors/runner.rs | 19 +- .../tests/test_vectors/verify_signatures.rs | 4 + lean_client/env-config/Cargo.toml | 12 ++ lean_client/env-config/src/lib.rs | 1 + lean_client/fork_choice/Cargo.toml | 8 +- lean_client/fork_choice/src/handlers.rs | 183 +++++++++++++----- lean_client/fork_choice/src/store.rs | 3 + .../tests/fork_choice_test_vectors.rs | 10 + .../fork_choice/tests/unit_tests/votes.rs | 9 + lean_client/networking/Cargo.toml | 6 + lean_client/networking/src/network/service.rs | 7 + lean_client/networking/src/types.rs | 5 + lean_client/src/main.rs | 54 +++++- lean_client/validator/Cargo.toml | 3 + lean_client/validator/src/lib.rs | 120 ++++++++++-- 23 files changed, 430 insertions(+), 95 deletions(-) create mode 100644 lean_client/ENVIRONMENT_SELECTION.md create mode 100644 lean_client/env-config/Cargo.toml create mode 100644 lean_client/env-config/src/lib.rs diff --git a/lean_client/Cargo.lock b/lean_client/Cargo.lock index 93fb9dd..910d9c1 100644 --- a/lean_client/Cargo.lock +++ b/lean_client/Cargo.lock @@ -859,6 +859,7 @@ dependencies = [ name = "containers" version = "0.1.0" dependencies = [ + "env-config", "hex", "leansig", "pretty_assertions", @@ -1399,6 +1400,10 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "env-config" +version = "0.1.0" + [[package]] name = "equivalent" version = "1.0.2" @@ -1412,7 +1417,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.1", ] [[package]] @@ -1586,6 +1591,7 @@ name = "fork-choice" version = "0.1.0" dependencies = [ "containers", + "env-config", "serde", "serde_json", "ssz", @@ -2311,7 +2317,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "serde", "serde_core", ] @@ -3217,6 +3223,7 @@ dependencies = [ "async-trait", "containers", "enr", + "env-config", "futures", "libp2p", "libp2p-identity 0.2.12", @@ -3265,7 +3272,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.1", ] [[package]] @@ -4268,7 +4275,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.61.1", ] [[package]] @@ -4919,7 +4926,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.61.1", ] [[package]] @@ -5340,6 +5347,7 @@ name = "validator" version = "0.1.0" dependencies = [ "containers", + "env-config", "fork-choice", "leansig", "serde", diff --git a/lean_client/Cargo.toml b/lean_client/Cargo.toml index 7d98ae7..9e72c43 100644 --- a/lean_client/Cargo.toml +++ b/lean_client/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["chain", "containers", "fork_choice", "networking", "validator"] +members = ["chain", "containers", "env-config", "fork_choice", "networking", "validator"] resolver = "2" [workspace.package] @@ -14,7 +14,7 @@ containers = { path = "./containers" } fork_choice = { path = "./fork_choice" } networking = { path = "./networking" } validator = { path = "./validator" } -libp2p = {version = "0.56.0", default-features = false, features = [ +libp2p = { version = "0.56.0", default-features = false, features = [ 'dns', 'gossipsub', 'identify', @@ -52,8 +52,10 @@ version = "0.1.0" edition = "2021" [features] -default = ["xmss-signing"] +default = ["devnet2", "xmss-signing"] xmss-signing = ["validator/xmss-signing"] +devnet1 = ["containers/devnet1", "fork-choice/devnet1", "networking/devnet1", "validator/devnet1"] +devnet2 = ["containers/devnet2", "fork-choice/devnet2", "networking/devnet2", "validator/devnet2"] [dependencies] chain = { path = "./chain" } diff --git a/lean_client/ENVIRONMENT_SELECTION.md b/lean_client/ENVIRONMENT_SELECTION.md new file mode 100644 index 0000000..d906c9d --- /dev/null +++ b/lean_client/ENVIRONMENT_SELECTION.md @@ -0,0 +1,26 @@ +### To select which devnet you want to compile + +#### Option A +- Change the default features in root `Cargo.toml`: +```toml +[features] +default = ["devnet1", "<...other features>"] # Change to "devnet2" if needed +devnet1 = [...] +devnet2 = [...] +``` + +#### Option B +- Use the `--no-default-features` flag and specify the desired devnet feature when building or running the project: +```bash +cargo build --no-default-features --features devnet1 # Change to devnet2 +``` + + +### Running tests for a specific devnet + +From root directory, use the following command: +```bash +cargo test -p --no-default-features --features devnet1 # Change to devnet2 +``` + +Use `` to specify the crate you want to test. \ No newline at end of file diff --git a/lean_client/containers/Cargo.toml b/lean_client/containers/Cargo.toml index b6a45c3..29e8ecd 100644 --- a/lean_client/containers/Cargo.toml +++ b/lean_client/containers/Cargo.toml @@ -5,15 +5,16 @@ edition = "2021" [features] xmss-verify = ["leansig"] -default = ["devnet1"] -devnet1 = [] -devnet2 = [] +default = [] +devnet1 = ["env-config/devnet1"] +devnet2 = ["env-config/devnet2"] [lib] name = "containers" path = "src/lib.rs" [dependencies] +env-config = { path = "../env-config", default-features = false } ssz = { git = "https://github.com/grandinetech/grandine", package = "ssz", branch = "develop", submodules = true } ssz_derive = { git = "https://github.com/grandinetech/grandine", package = "ssz_derive", branch = "develop", submodules = false } typenum = "1" diff --git a/lean_client/containers/src/attestation.rs b/lean_client/containers/src/attestation.rs index 302ef08..9c3537c 100644 --- a/lean_client/containers/src/attestation.rs +++ b/lean_client/containers/src/attestation.rs @@ -111,8 +111,12 @@ pub struct Attestation { /// Validator attestation bundled with its signature. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct SignedAttestation { + #[cfg(feature = "devnet2")] + pub validator_id: u64, + #[cfg(feature = "devnet2")] + pub message: AttestationData, + #[cfg(feature = "devnet1")] pub message: Attestation, - /// Signature aggregation produced by the leanVM (SNARKs in the future). pub signature: Signature, } diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index 02a26e3..b176e55 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -1,5 +1,5 @@ use crate::validator::Validator; -use crate::{block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, Slot, Uint64, ValidatorIndex}; +use crate::{block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, SignedAttestation, Slot, Uint64, ValidatorIndex}; use crate::{ HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators, }; @@ -686,7 +686,7 @@ impl State { _proposer_index: ValidatorIndex, _parent_root: Bytes32, _initial_attestations: Option>, - _available_signed_attestations: Option<&[SignedBlockWithAttestation]>, + _available_signed_attestations: Option<&[SignedAttestation]>, _known_block_roots: Option<&std::collections::HashSet>, ) -> Result<(Block, Self, Vec, BlockSignatures), String> { Err("build_block is not implemented for devnet2".to_string()) diff --git a/lean_client/containers/tests/main.rs b/lean_client/containers/tests/main.rs index 96deacd..4d48535 100644 --- a/lean_client/containers/tests/main.rs +++ b/lean_client/containers/tests/main.rs @@ -1,4 +1,4 @@ -// tests/main.rs - Test entry point +// tests/lib - Test entry point mod debug_deserialize; mod unit_tests; mod test_vectors; \ No newline at end of file diff --git a/lean_client/containers/tests/test_vectors/block_processing.rs b/lean_client/containers/tests/test_vectors/block_processing.rs index 4dcd641..caec865 100644 --- a/lean_client/containers/tests/test_vectors/block_processing.rs +++ b/lean_client/containers/tests/test_vectors/block_processing.rs @@ -2,6 +2,7 @@ use super::runner::TestRunner; #[test] +#[cfg(feature = "devnet1")] fn test_process_first_block_after_genesis() { let test_path = "../tests/test_vectors/test_blocks/test_process_first_block_after_genesis.json"; TestRunner::run_block_processing_test(test_path) @@ -9,6 +10,7 @@ fn test_process_first_block_after_genesis() { } #[test] +#[cfg(feature = "devnet1")] fn test_blocks_with_gaps() { let test_path = "../tests/test_vectors/test_blocks/test_blocks_with_gaps.json"; TestRunner::run_block_processing_test(test_path) @@ -16,6 +18,7 @@ fn test_blocks_with_gaps() { } #[test] +#[cfg(feature = "devnet1")] fn test_linear_chain_multiple_blocks() { let test_path = "../tests/test_vectors/test_blocks/test_linear_chain_multiple_blocks.json"; TestRunner::run_block_processing_test(test_path) @@ -23,6 +26,7 @@ fn test_linear_chain_multiple_blocks() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_extends_deep_chain() { let test_path = "../tests/test_vectors/test_blocks/test_block_extends_deep_chain.json"; TestRunner::run_block_processing_test(test_path) @@ -30,6 +34,7 @@ fn test_block_extends_deep_chain() { } #[test] +#[cfg(feature = "devnet1")] fn test_empty_blocks() { let test_path = "../tests/test_vectors/test_blocks/test_empty_blocks.json"; TestRunner::run_block_processing_test(test_path) @@ -37,6 +42,7 @@ fn test_empty_blocks() { } #[test] +#[cfg(feature = "devnet1")] fn test_empty_blocks_with_missed_slots() { let test_path = "../tests/test_vectors/test_blocks/test_empty_blocks_with_missed_slots.json"; TestRunner::run_block_processing_test(test_path) @@ -44,6 +50,7 @@ fn test_empty_blocks_with_missed_slots() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_at_large_slot_number() { let test_path = "../tests/test_vectors/test_blocks/test_block_at_large_slot_number.json"; TestRunner::run_block_processing_test(test_path) @@ -53,6 +60,7 @@ fn test_block_at_large_slot_number() { // Invalid block tests (expecting failures) #[test] +#[cfg(feature = "devnet1")] fn test_block_with_invalid_parent_root() { let test_path = "../tests/test_vectors/test_blocks/test_block_with_invalid_parent_root.json"; TestRunner::run_block_processing_test(test_path) @@ -60,6 +68,7 @@ fn test_block_with_invalid_parent_root() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_with_invalid_proposer() { let test_path = "../tests/test_vectors/test_blocks/test_block_with_invalid_proposer.json"; TestRunner::run_block_processing_test(test_path) @@ -67,6 +76,7 @@ fn test_block_with_invalid_proposer() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_with_invalid_state_root() { let test_path = "../tests/test_vectors/test_blocks/test_block_with_invalid_state_root.json"; TestRunner::run_block_processing_test(test_path) diff --git a/lean_client/containers/tests/test_vectors/runner.rs b/lean_client/containers/tests/test_vectors/runner.rs index 910fde5..bf23138 100644 --- a/lean_client/containers/tests/test_vectors/runner.rs +++ b/lean_client/containers/tests/test_vectors/runner.rs @@ -552,6 +552,7 @@ impl TestRunner { /// Test runner for verify_signatures test vectors /// Tests XMSS signature verification on SignedBlockWithAttestation + #[cfg(feature = "devnet1")] pub fn run_verify_signatures_test>(path: P) -> Result<(), Box> { let json_content = fs::read_to_string(path.as_ref())?; @@ -571,25 +572,11 @@ impl TestRunner { println!(" Block slot: {}", signed_block.message.block.slot.0); println!(" Proposer index: {}", signed_block.message.block.proposer_index.0); - // Count attestations - let mut attestation_count = 0u64; - loop { - match signed_block.message.block.body.attestations.get(attestation_count) { - Ok(_) => attestation_count += 1, - Err(_) => break, - } - } + let attestation_count = signed_block.message.block.body.attestations.len_u64(); println!(" Attestations in block: {}", attestation_count); println!(" Proposer attestation validator: {}", signed_block.message.proposer_attestation.validator_id.0); - // Count signatures - let mut signature_count = 0u64; - loop { - match signed_block.signature.get(signature_count) { - Ok(_) => signature_count += 1, - Err(_) => break, - } - } + let signature_count = signed_block.signature.len_u64(); println!(" Signatures: {}", signature_count); // Check if we expect this test to fail diff --git a/lean_client/containers/tests/test_vectors/verify_signatures.rs b/lean_client/containers/tests/test_vectors/verify_signatures.rs index 2bca4ca..cfc3301 100644 --- a/lean_client/containers/tests/test_vectors/verify_signatures.rs +++ b/lean_client/containers/tests/test_vectors/verify_signatures.rs @@ -15,6 +15,7 @@ use super::runner::TestRunner; // Without xmss-verify feature, they pass because structural validation succeeds. #[test] +#[cfg(feature = "devnet1")] fn test_proposer_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_valid_signatures/test_proposer_signature.json"; TestRunner::run_verify_signatures_test(test_path) @@ -22,6 +23,7 @@ fn test_proposer_signature() { } #[test] +#[cfg(feature = "devnet1")] fn test_proposer_and_attester_signatures() { let test_path = "../tests/test_vectors/test_verify_signatures/test_valid_signatures/test_proposer_and_attester_signatures.json"; TestRunner::run_verify_signatures_test(test_path) @@ -34,6 +36,7 @@ fn test_proposer_and_attester_signatures() { // Run with `cargo test --features xmss-verify` to enable full signature verification. #[test] +#[cfg(feature = "devnet1")] #[ignore = "Requires xmss-verify feature for actual signature validation. Run with: cargo test --features xmss-verify"] fn test_invalid_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_invalid_signatures/test_invalid_signature.json"; @@ -42,6 +45,7 @@ fn test_invalid_signature() { } #[test] +#[cfg(feature = "devnet1")] #[ignore = "Requires xmss-verify feature for actual signature validation. Run with: cargo test --features xmss-verify"] fn test_mixed_valid_invalid_signatures() { let test_path = "../tests/test_vectors/test_verify_signatures/test_invalid_signatures/test_mixed_valid_invalid_signatures.json"; diff --git a/lean_client/env-config/Cargo.toml b/lean_client/env-config/Cargo.toml new file mode 100644 index 0000000..4b761e5 --- /dev/null +++ b/lean_client/env-config/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "env-config" +version.workspace = true +edition.workspace = true +authors.workspace = true +license.workspace = true + +[features] +devnet1 = [] +devnet2 = [] + +[dependencies] diff --git a/lean_client/env-config/src/lib.rs b/lean_client/env-config/src/lib.rs new file mode 100644 index 0000000..972005d --- /dev/null +++ b/lean_client/env-config/src/lib.rs @@ -0,0 +1 @@ +// Empty on purpose \ No newline at end of file diff --git a/lean_client/fork_choice/Cargo.toml b/lean_client/fork_choice/Cargo.toml index f906f59..b16f561 100644 --- a/lean_client/fork_choice/Cargo.toml +++ b/lean_client/fork_choice/Cargo.toml @@ -3,8 +3,14 @@ name = "fork-choice" version = "0.1.0" edition = "2021" +[features] +default = [] +devnet1 = ["containers/devnet1", "env-config/devnet1"] +devnet2 = ["containers/devnet2", "env-config/devnet1"] + [dependencies] -containers = { path = "../containers" } +env-config = { path = "../env-config", default-features = false } +containers = { path = "../containers", default-features = false } ssz = { git = "https://github.com/grandinetech/grandine", package = "ssz", branch = "develop"} ssz_derive = { git = "https://github.com/grandinetech/grandine", package = "ssz_derive", branch = "develop" } typenum = "1.17.0" diff --git a/lean_client/fork_choice/src/handlers.rs b/lean_client/fork_choice/src/handlers.rs index 618c8c9..fa9aa89 100644 --- a/lean_client/fork_choice/src/handlers.rs +++ b/lean_client/fork_choice/src/handlers.rs @@ -1,16 +1,13 @@ use crate::store::*; use containers::{ - attestation::SignedAttestation, - block::SignedBlockWithAttestation, - Bytes32, ValidatorIndex, + attestation::SignedAttestation, block::SignedBlockWithAttestation, Bytes32, ValidatorIndex, }; use ssz::SszHash; #[inline] pub fn on_tick(store: &mut Store, time: u64, has_proposal: bool) { // Calculate target time in intervals - let tick_interval_time = - time.saturating_sub(store.config.genesis_time) / SECONDS_PER_INTERVAL; + let tick_interval_time = time.saturating_sub(store.config.genesis_time) / SECONDS_PER_INTERVAL; // Tick forward one interval at a time while store.time < tick_interval_time { @@ -28,11 +25,25 @@ pub fn on_attestation( signed_attestation: SignedAttestation, is_from_block: bool, ) -> Result<(), String> { + #[cfg(feature = "devnet1")] let validator_id = ValidatorIndex(signed_attestation.message.validator_id.0); + #[cfg(feature = "devnet1")] let attestation_slot = signed_attestation.message.data.slot; + #[cfg(feature = "devnet1")] let source_slot = signed_attestation.message.data.source.slot; + #[cfg(feature = "devnet1")] let target_slot = signed_attestation.message.data.target.slot; + + #[cfg(feature = "devnet2")] + let validator_id = ValidatorIndex(signed_attestation.validator_id); + #[cfg(feature = "devnet2")] + let attestation_slot = signed_attestation.message.slot; + #[cfg(feature = "devnet2")] + let source_slot = signed_attestation.message.source.slot; + #[cfg(feature = "devnet2")] + let target_slot = signed_attestation.message.target.slot; + // Validate attestation is not from future let curr_slot = store.time / INTERVALS_PER_SLOT; if attestation_slot.0 > curr_slot { @@ -52,28 +63,69 @@ pub fn on_attestation( if is_from_block { // On-chain attestation processing - immediately becomes "known" + #[cfg(feature = "devnet1")] + if store + .latest_known_attestations + .get(&validator_id) + .map_or(true, |existing| { + existing.message.data.slot < attestation_slot + }) + { + store + .latest_known_attestations + .insert(validator_id, signed_attestation.clone()); + } + + #[cfg(feature = "devnet2")] if store .latest_known_attestations .get(&validator_id) - .map_or(true, |existing| existing.message.data.slot < attestation_slot) + .map_or(true, |existing| { + existing.message.slot < attestation_slot + }) { - store.latest_known_attestations.insert(validator_id, signed_attestation.clone()); + store + .latest_known_attestations + .insert(validator_id, signed_attestation.clone()); } // Remove from new attestations if superseded if let Some(existing_new) = store.latest_new_attestations.get(&validator_id) { + #[cfg(feature = "devnet1")] if existing_new.message.data.slot <= attestation_slot { store.latest_new_attestations.remove(&validator_id); } + #[cfg(feature = "devnet2")] + if existing_new.message.slot <= attestation_slot { + store.latest_new_attestations.remove(&validator_id); + } } } else { // Network gossip attestation processing - goes to "new" stage + #[cfg(feature = "devnet1")] + if store + .latest_new_attestations + .get(&validator_id) + .map_or(true, |existing| { + existing.message.data.slot < attestation_slot + }) + { + store + .latest_new_attestations + .insert(validator_id, signed_attestation); + } + + #[cfg(feature = "devnet2")] if store .latest_new_attestations .get(&validator_id) - .map_or(true, |existing| existing.message.data.slot < attestation_slot) + .map_or(true, |existing| { + existing.message.slot < attestation_slot + }) { - store.latest_new_attestations.insert(validator_id, signed_attestation); + store + .latest_new_attestations + .insert(validator_id, signed_attestation); } } Ok(()) @@ -125,8 +177,7 @@ fn process_block_internal( }; // Execute state transition to get post-state - let new_state = - state.state_transition_with_validation(signed_block.clone(), true, true)?; + let new_state = state.state_transition_with_validation(signed_block.clone(), true, true)?; // Store block and state store.blocks.insert(block_root, signed_block.clone()); @@ -143,49 +194,93 @@ fn process_block_internal( let attestations = &signed_block.message.block.body.attestations; let signatures = &signed_block.signature; - for i in 0.. { - match (attestations.get(i), signatures.get(i)) { - (Ok(attestation), Ok(signature)) => { - let signed_attestation = SignedAttestation { - message: attestation.clone(), - signature: signature.clone(), - }; - on_attestation(store, signed_attestation, true)?; + #[cfg(feature = "devnet1")] + { + for i in 0.. { + match (attestations.get(i), signatures.get(i)) { + (Ok(attestation), Ok(signature)) => { + let signed_attestation = SignedAttestation { + message: attestation.clone(), + signature: signature.clone(), + }; + on_attestation(store, signed_attestation, true)?; + } + _ => break, } - _ => break, } + + // Update head BEFORE processing proposer attestation + update_head(store); + + // Process proposer attestation as gossip (is_from_block=false) + // This ensures it goes to "new" attestations and doesn't immediately affect fork choice + let num_body_attestations = attestations.len_u64(); + + // Get proposer signature or use default if not present (for tests) + use containers::attestation::Signature; + let proposer_signature = signatures + .get(num_body_attestations) + .map(|sig| sig.clone()) + .unwrap_or_else(|_| Signature::default()); + + let proposer_signed_attestation = SignedAttestation { + message: signed_block.message.proposer_attestation.clone(), + signature: proposer_signature, + }; + + // Process proposer attestation as if received via gossip (is_from_block=false) + // This ensures it goes to "new" attestations and doesn't immediately affect fork choice + on_attestation(store, proposer_signed_attestation, false)?; + + Ok(()) } - // Update head BEFORE processing proposer attestation - update_head(store); + #[cfg(feature = "devnet2")] + { + let aggregated_attestations = &signed_block.message.block.body.attestations; + let attestation_signatures = &signed_block.signature.attestation_signatures; + let proposer_attestation = &signed_block.message.proposer_attestation; - // Process proposer attestation as gossip (is_from_block=false) - // This ensures it goes to "new" attestations and doesn't immediately affect fork choice - let num_body_attestations = { - let mut count = 0; - while attestations.get(count).is_ok() { - count += 1; + for (aggregated_attestation, aggregated_signature) in aggregated_attestations + .into_iter() + .zip(attestation_signatures) + { + let validator_ids: Vec = aggregated_attestation + .aggregation_bits.0 + .iter() + .enumerate() + .filter(|(_, bit)| **bit) + .map(|(index, _)| index as u64) + .collect(); + + for (validator_id, signature) in validator_ids.into_iter().zip(aggregated_signature) { + on_attestation( + store, + SignedAttestation { + validator_id, + message: aggregated_attestation.data.clone(), + signature: *signature, + }, + true, + )?; + } } - count - }; - // Get proposer signature or use default if not present (for tests) - use containers::attestation::Signature; - let proposer_signature = signatures - .get(num_body_attestations) - .map(|sig| sig.clone()) - .unwrap_or_else(|_| Signature::default()); + // Update head BEFORE processing proposer attestation + update_head(store); - let proposer_signed_attestation = SignedAttestation { - message: signed_block.message.proposer_attestation.clone(), - signature: proposer_signature, - }; + let proposer_signed_attestation = SignedAttestation { + validator_id: proposer_attestation.validator_id.0, + message: proposer_attestation.data.clone(), + signature: signed_block.signature.proposer_signature, + }; - // Process proposer attestation as if received via gossip (is_from_block=false) - // This ensures it goes to "new" attestations and doesn't immediately affect fork choice - on_attestation(store, proposer_signed_attestation, false)?; + // Process proposer attestation as if received via gossip (is_from_block=false) + // This ensures it goes to "new" attestations and doesn't immediately affect fork choice + on_attestation(store, proposer_signed_attestation, false)?; - Ok(()) + Ok(()) + } } fn process_pending_blocks(store: &mut Store, mut roots: Vec) { diff --git a/lean_client/fork_choice/src/store.rs b/lean_client/fork_choice/src/store.rs index 4c746d4..3296d06 100644 --- a/lean_client/fork_choice/src/store.rs +++ b/lean_client/fork_choice/src/store.rs @@ -85,7 +85,10 @@ pub fn get_fork_choice_head( // stage 1: accumulate weights by walking up from each attestation's head for attestation in latest_attestations.values() { + #[cfg(feature = "devnet1")] let mut curr = attestation.message.data.head.root; + #[cfg(feature = "devnet2")] + let mut curr = attestation.message.head.root; if let Some(block) = store.blocks.get(&curr) { let mut curr_slot = block.message.block.slot; diff --git a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs index 5718d48..50bd240 100644 --- a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs +++ b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs @@ -256,6 +256,7 @@ fn convert_test_attestation(test_att: &TestAttestation) -> Attestation { } } +#[cfg(feature = "devnet1")] fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAttestation { let mut attestations = ssz::PersistentList::default(); @@ -303,6 +304,7 @@ fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAtt } } +#[cfg(feature = "devnet1")] fn convert_test_block(test_block_with_att: &TestBlockWithAttestation) -> SignedBlockWithAttestation { let test_block = &test_block_with_att.block; let mut attestations = ssz::PersistentList::default(); @@ -405,6 +407,7 @@ fn initialize_state_from_test(test_state: &TestAnchorState) -> State { } } +#[cfg(feature = "devnet1")] fn verify_checks( store: &Store, checks: &Option, @@ -493,6 +496,7 @@ fn verify_checks( Ok(()) } +#[cfg(feature = "devnet1")] fn run_single_test(_test_name: &str, test: TestVector) -> Result<(), String> { println!(" Running: {}", test.info.test_id); @@ -624,6 +628,7 @@ fn run_single_test(_test_name: &str, test: TestVector) -> Result<(), String> { Ok(()) } +#[cfg(feature = "devnet1")] fn run_test_vector_file(test_path: &str) -> Result<(), String> { let json_str = std::fs::read_to_string(test_path) .map_err(|e| format!("Failed to read file {}: {}", test_path, e))?; @@ -639,6 +644,7 @@ fn run_test_vector_file(test_path: &str) -> Result<(), String> { } #[test] +#[cfg(feature = "devnet1")] fn test_fork_choice_head_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_fork_choice_head"; @@ -682,6 +688,7 @@ fn test_fork_choice_head_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_attestation_processing_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_attestation_processing"; @@ -725,6 +732,7 @@ fn test_attestation_processing_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_fork_choice_reorgs_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_fork_choice_reorgs"; @@ -768,6 +776,7 @@ fn test_fork_choice_reorgs_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_attestation_target_selection_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_attestation_target_selection"; @@ -811,6 +820,7 @@ fn test_attestation_target_selection_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_lexicographic_tiebreaker_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_lexicographic_tiebreaker"; diff --git a/lean_client/fork_choice/tests/unit_tests/votes.rs b/lean_client/fork_choice/tests/unit_tests/votes.rs index 805e785..4a1b688 100644 --- a/lean_client/fork_choice/tests/unit_tests/votes.rs +++ b/lean_client/fork_choice/tests/unit_tests/votes.rs @@ -7,6 +7,7 @@ use containers::{ Bytes32, Slot, Uint64, ValidatorIndex, }; +#[cfg(feature = "devnet1")] fn create_signed_attestation(validator_id: u64, slot: Slot, head_root: Bytes32) -> SignedAttestation { SignedAttestation { message: Attestation { @@ -23,6 +24,7 @@ fn create_signed_attestation(validator_id: u64, slot: Slot, head_root: Bytes32) } #[test] +#[cfg(feature = "devnet1")] fn test_accept_new_attestations() { let mut store = create_test_store(); @@ -63,6 +65,7 @@ fn test_accept_new_attestations() { } #[test] +#[cfg(feature = "devnet1")] fn test_accept_new_attestations_multiple() { let mut store = create_test_store(); @@ -94,6 +97,7 @@ fn test_accept_new_attestations_empty() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_lifecycle() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -129,6 +133,7 @@ fn test_on_attestation_lifecycle() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_future_slot() { let mut store = create_test_store(); let future_slot = Slot(100); // Far in the future @@ -140,6 +145,7 @@ fn test_on_attestation_future_slot() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_update_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -161,6 +167,7 @@ fn test_on_attestation_update_vote() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_ignore_old_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -183,6 +190,7 @@ fn test_on_attestation_ignore_old_vote() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_from_block_supersedes_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -204,6 +212,7 @@ fn test_on_attestation_from_block_supersedes_new() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_newer_from_block_removes_older_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); diff --git a/lean_client/networking/Cargo.toml b/lean_client/networking/Cargo.toml index f107994..8f47702 100644 --- a/lean_client/networking/Cargo.toml +++ b/lean_client/networking/Cargo.toml @@ -3,7 +3,13 @@ name = "networking" version = "0.1.0" edition = "2024" +[features] +default = [] +devnet1 = ["containers/devnet1", "env-config/devnet1"] +devnet2 = ["containers/devnet2", "env-config/devnet1"] + [dependencies] +env-config = { path = "../env-config", default-features = false } containers = {workspace = true} alloy-primitives = { workspace = true} libp2p = {workspace = true} diff --git a/lean_client/networking/src/network/service.rs b/lean_client/networking/src/network/service.rs index 9c0993f..93e749c 100644 --- a/lean_client/networking/src/network/service.rs +++ b/lean_client/networking/src/network/service.rs @@ -311,7 +311,10 @@ where } } Ok(GossipsubMessage::Attestation(signed_attestation)) => { + #[cfg(feature = "devnet1")] let slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet2")] + let slot = signed_attestation.message.slot.0; if let Err(err) = self .chain_message_sink @@ -521,7 +524,11 @@ where } } OutboundP2pRequest::GossipAttestation(signed_attestation) => { + #[cfg(feature = "devnet1")] let slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet2")] + let slot = signed_attestation.message.slot.0; + match signed_attestation.to_ssz() { Ok(bytes) => { if let Err(err) = self.publish_to_topic(GossipsubKind::Attestation, bytes) { diff --git a/lean_client/networking/src/types.rs b/lean_client/networking/src/types.rs index 37644c2..028a883 100644 --- a/lean_client/networking/src/types.rs +++ b/lean_client/networking/src/types.rs @@ -93,9 +93,14 @@ impl Display for ChainMessage { ChainMessage::ProcessBlock { signed_block_with_attestation, .. } => { write!(f, "ProcessBlockWithAttestation(slot={})", signed_block_with_attestation.message.block.slot.0) } + #[cfg(feature = "devnet1")] ChainMessage::ProcessAttestation { signed_attestation, .. } => { write!(f, "ProcessAttestation(slot={})", signed_attestation.message.data.slot.0) } + #[cfg(feature = "devnet2")] + ChainMessage::ProcessAttestation { signed_attestation, .. } => { + write!(f, "ProcessAttestation(slot={})", signed_attestation.message.slot.0) + } } } } diff --git a/lean_client/src/main.rs b/lean_client/src/main.rs index cc44285..d1c3e24 100644 --- a/lean_client/src/main.rs +++ b/lean_client/src/main.rs @@ -1,4 +1,5 @@ use clap::Parser; +use containers::block::BlockSignatures; use containers::ssz::{PersistentList, SszHash}; use containers::{ attestation::{Attestation, AttestationData}, @@ -8,7 +9,7 @@ use containers::{ ssz, state::State, types::{Bytes32, Uint64, ValidatorIndex}, - Slot, + Signature, Slot, }; use fork_choice::{ handlers::{on_attestation, on_block, on_tick}, @@ -95,7 +96,10 @@ fn print_chain_status(store: &Store, connected_peers: u64) { println!(" Head Block Root: 0x{:x}", head_root.0); println!(" Parent Block Root: 0x{:x}", parent_root.0); println!(" State Root: 0x{:x}", state_root.0); - println!(" Timely: {}", if timely { "YES" } else { "NO" }); + println!( + " Timely: {}", + if timely { "YES" } else { "NO" } + ); println!("+---------------------------------------------------------------+"); println!( " Latest Justified: Slot {:>5} | Root: 0x{:x}", @@ -216,7 +220,13 @@ async fn main() { block: genesis_block, proposer_attestation: genesis_proposer_attestation, }, + #[cfg(feature = "devnet1")] signature: PersistentList::default(), + #[cfg(feature = "devnet2")] + signature: BlockSignatures { + attestation_signatures: PersistentList::default(), + proposer_signature: Signature::default(), + }, }; let config = Config { genesis_time }; @@ -234,7 +244,11 @@ async fn main() { if let Some(ref keys_dir) = args.hash_sig_key_dir { let keys_path = std::path::Path::new(keys_dir); if keys_path.exists() { - match ValidatorService::new_with_keys(config.clone(), num_validators, keys_path) { + match ValidatorService::new_with_keys( + config.clone(), + num_validators, + keys_path, + ) { Ok(service) => { info!( node_id = %node_id, @@ -245,7 +259,10 @@ async fn main() { Some(service) } Err(e) => { - warn!("Failed to load XMSS keys: {}, falling back to zero signatures", e); + warn!( + "Failed to load XMSS keys: {}, falling back to zero signatures", + e + ); Some(ValidatorService::new(config, num_validators)) } } @@ -417,14 +434,29 @@ async fn main() { if last_attestation_slot != Some(current_slot) { let attestations = vs.create_attestations(&store, Slot(current_slot)); for signed_att in attestations { + #[cfg(feature = "devnet1")] let validator_id = signed_att.message.validator_id.0; + #[cfg(feature = "devnet2")] + let validator_id = signed_att.validator_id; info!( slot = current_slot, validator = validator_id, "Broadcasting attestation" ); + #[cfg(feature = "devnet1")] + match on_attestation(&mut store, signed_att.clone(), false) { + Ok(()) => { + if let Err(e) = chain_outbound_sender.send( + OutboundP2pRequest::GossipAttestation(signed_att) + ) { + warn!("Failed to gossip attestation: {}", e); + } + } + Err(e) => warn!("Error processing own attestation: {}", e), + } + #[cfg(feature = "devnet2")] match on_attestation(&mut store, signed_att.clone(), false) { Ok(()) => { if let Err(e) = chain_outbound_sender.send( @@ -520,10 +552,24 @@ async fn main() { should_gossip, .. } => { + #[cfg(feature = "devnet1")] let att_slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet1")] let source_slot = signed_attestation.message.data.source.slot.0; + #[cfg(feature = "devnet1")] let target_slot = signed_attestation.message.data.target.slot.0; + #[cfg(feature = "devnet1")] let validator_id = signed_attestation.message.validator_id.0; + + #[cfg(feature = "devnet2")] + let att_slot = signed_attestation.message.slot.0; + #[cfg(feature = "devnet2")] + let source_slot = signed_attestation.message.source.slot.0; + #[cfg(feature = "devnet2")] + let target_slot = signed_attestation.message.target.slot.0; + #[cfg(feature = "devnet2")] + let validator_id = signed_attestation.validator_id; + info!( slot = att_slot, source_slot = source_slot, diff --git a/lean_client/validator/Cargo.toml b/lean_client/validator/Cargo.toml index b658c48..8311b9d 100644 --- a/lean_client/validator/Cargo.toml +++ b/lean_client/validator/Cargo.toml @@ -6,8 +6,11 @@ edition = "2021" [features] default = ["xmss-signing"] xmss-signing = ["leansig"] +devnet1 = ["containers/devnet1", "fork-choice/devnet1", "env-config/devnet1"] +devnet2 = ["containers/devnet2", "fork-choice/devnet2", "env-config/devnet1"] [dependencies] +env-config = { path = "../env-config", default-features = false } serde = { version = "1.0", features = ["derive"] } serde_yaml = "0.9" containers = { path = "../containers" } diff --git a/lean_client/validator/src/lib.rs b/lean_client/validator/src/lib.rs index e26bcf7..2c65fa7 100644 --- a/lean_client/validator/src/lib.rs +++ b/lean_client/validator/src/lib.rs @@ -2,12 +2,16 @@ use std::collections::HashMap; use std::path::Path; +use containers::attestation::{AggregatedAttestations}; +#[cfg(feature = "devnet2")] +use containers::attestation::{NaiveAggregatedSignature}; +use containers::block::BlockSignatures; use containers::{ attestation::{Attestation, AttestationData, Signature, SignedAttestation}, - block::{BlockWithAttestation, SignedBlockWithAttestation, hash_tree_root}, + block::{hash_tree_root, BlockWithAttestation, SignedBlockWithAttestation}, checkpoint::Checkpoint, types::{Uint64, ValidatorIndex}, - Slot, + AggregatedAttestation, Slot, }; use fork_choice::store::{get_proposal_head, get_vote_target, Store}; use tracing::{info, warn}; @@ -172,23 +176,33 @@ impl ValidatorService { .latest_new_attestations .values() .filter(|att| { + #[cfg(feature = "devnet1")] let data = &att.message.data; + #[cfg(feature = "devnet2")] + let data = &att.message; // Source must match the parent state's justified checkpoint (not store's!) let source_matches = data.source == parent_state.latest_justified; // Target must be strictly after source let target_after_source = data.target.slot > data.source.slot; // Target block must be known let target_known = store.blocks.contains_key(&data.target.root); - + source_matches && target_after_source && target_known }) .collect(); + #[cfg(feature = "devnet1")] let valid_attestations: Vec = valid_signed_attestations .iter() .map(|att| att.message.clone()) .collect(); + #[cfg(feature = "devnet2")] + let valid_attestations: Vec = valid_signed_attestations + .iter() + .map(|att| att.message.clone()) + .collect(); + info!( slot = slot.0, valid_attestations = valid_attestations.len(), @@ -197,14 +211,52 @@ impl ValidatorService { ); // Build block with collected attestations (empty body - attestations go to state) - let (block, _post_state, _collected_atts, sigs) = - parent_state.build_block(slot, proposer_index, parent_root, Some(valid_attestations), None, None)?; + #[cfg(feature = "devnet1")] + let (block, _post_state, _collected_atts, sigs) = parent_state.build_block( + slot, + proposer_index, + parent_root, + Some(valid_attestations), + None, + None, + )?; + #[cfg(feature = "devnet2")] + let (block, _post_state, _collected_atts, sigs) = { + let valid_attestations: Vec = valid_attestations + .iter() + .map(|data| Attestation { + validator_id: Uint64(0), // Placeholder, real validator IDs should be used + data: data.clone(), + }) + .collect(); + parent_state.build_block( + slot, + proposer_index, + parent_root, + Some(valid_attestations), + None, + None, + )? + }; // Collect signatures from the attestations we included + #[cfg(feature = "devnet1")] let mut signatures = sigs; + #[cfg(feature = "devnet2")] + let mut signatures = sigs.attestation_signatures; for signed_att in &valid_signed_attestations { - signatures.push(signed_att.signature.clone()) + #[cfg(feature = "devnet1")] + signatures + .push(signed_att.signature.clone()) .map_err(|e| format!("Failed to add attestation signature: {:?}", e))?; + #[cfg(feature = "devnet2")] + { + // TODO: Use real aggregation instead of naive placeholder when spec is more up to date + let aggregated_sig: NaiveAggregatedSignature = NaiveAggregatedSignature::default(); + signatures + .push(aggregated_sig) + .map_err(|e| format!("Failed to add attestation signature: {:?}", e))?; + } } info!( @@ -224,11 +276,20 @@ impl ValidatorService { match key_manager.sign(proposer_index.0, epoch, &message.0.into()) { Ok(sig) => { - signatures.push(sig).map_err(|e| format!("Failed to add proposer signature: {:?}", e))?; - info!( - proposer = proposer_index.0, - "Signed proposer attestation" - ); + #[cfg(feature = "devnet1")] + signatures + .push(sig) + .map_err(|e| format!("Failed to add proposer signature: {:?}", e))?; + #[cfg(feature = "devnet2")] + { + // TODO: Use real aggregation instead of naive placeholder when spec is more up to date + let aggregated_sig: NaiveAggregatedSignature = + NaiveAggregatedSignature::default(); + signatures + .push(aggregated_sig) + .map_err(|e| format!("Failed to add proposer signature: {:?}", e))?; + } + info!(proposer = proposer_index.0, "Signed proposer attestation"); } Err(e) => { return Err(format!("Failed to sign proposer attestation: {}", e)); @@ -244,7 +305,13 @@ impl ValidatorService { block, proposer_attestation, }, + #[cfg(feature = "devnet1")] signature: signatures, + #[cfg(feature = "devnet2")] + signature: BlockSignatures { + attestation_signatures: signatures, + proposer_signature: Signature::default(), + }, }; Ok(signed_block) @@ -284,6 +351,7 @@ impl ValidatorService { .validator_indices .iter() .filter_map(|&idx| { + #[cfg(feature = "devnet1")] let attestation = Attestation { validator_id: Uint64(idx), data: AttestationData { @@ -294,6 +362,14 @@ impl ValidatorService { }, }; + #[cfg(feature = "devnet2")] + let attestation = AttestationData { + slot, + head: head_checkpoint.clone(), + target: vote_target.clone(), + source: store.latest_justified.clone(), + }; + let signature = if let Some(ref key_manager) = self.key_manager { // Sign with XMSS let message = hash_tree_root(&attestation); @@ -331,10 +407,24 @@ impl ValidatorService { Signature::default() }; - Some(SignedAttestation { - message: attestation, - signature, - }) + { + #[cfg(feature = "devnet1")] + { + Some(SignedAttestation { + message: attestation, + signature, + }) + } + + #[cfg(feature = "devnet2")] + { + Some(SignedAttestation { + validator_id: idx, + message: attestation, + signature, + }) + } + } }) .collect() } From 671715d6f2b146127eb57b114e35860700cc7caf Mon Sep 17 00:00:00 2001 From: LiudasBaronas1 <144480589+LiudasBaronas1@users.noreply.github.com> Date: Mon, 12 Jan 2026 01:16:22 +0200 Subject: [PATCH 05/23] Refactor: remove redundant constants from config, implement ChainConfig::devnet() and update lib exports --- lean_client/chain/src/config.rs | 57 ++++++++++++++------------------- lean_client/chain/src/lib.rs | 3 +- 2 files changed, 26 insertions(+), 34 deletions(-) diff --git a/lean_client/chain/src/config.rs b/lean_client/chain/src/config.rs index b8be4f1..537d13b 100644 --- a/lean_client/chain/src/config.rs +++ b/lean_client/chain/src/config.rs @@ -3,30 +3,21 @@ pub struct BasisPoint(pub u64); impl BasisPoint { pub const MAX: u64 = 10_000; + pub const fn new(value: u64) -> Option { if value <= Self::MAX { Some(BasisPoint(value)) } else { None } } - #[inline] pub fn get(&self) -> u64 { self.0 } + + #[inline] + pub fn get(&self) -> u64 { self.0 } } -pub const INTERVALS_PER_SLOT: u64 = 4; -pub const SLOT_DURATION_MS: u64 = 4_000; -pub const SECONDS_PER_SLOT: u64 = SLOT_DURATION_MS / 1_000; -pub const SECONDS_PER_INTERVAL: u64 = SECONDS_PER_SLOT / INTERVALS_PER_SLOT; -pub const JUSTIFICATION_LOOKBACK_SLOTS: u64 = 3; - -pub const PROPOSER_REORG_CUTOFF_BPS: BasisPoint = match BasisPoint::new(2_500) { Some(x) => x, None => panic!() }; -pub const VOTE_DUE_BPS: BasisPoint = match BasisPoint::new(5_000) { Some(x) => x, None => panic!() }; -pub const FAST_CONFIRM_DUE_BPS: BasisPoint = match BasisPoint::new(7_500) { Some(x) => x, None => panic!() }; -pub const VIEW_FREEZE_CUTOFF_BPS: BasisPoint= match BasisPoint::new(7_500) { Some(x) => x, None => panic!() }; - -pub const HISTORICAL_ROOTS_LIMIT: u64 = 1u64 << 18; -pub const VALIDATOR_REGISTRY_LIMIT: u64 = 1u64 << 12; - #[derive(Clone, Debug)] pub struct ChainConfig { + pub intervals_per_slot: u64, pub slot_duration_ms: u64, pub second_per_slot: u64, + pub seconds_per_interval: u64, pub justification_lookback_slots: u64, pub proposer_reorg_cutoff_bps: BasisPoint, pub vote_due_bps: BasisPoint, @@ -36,24 +27,24 @@ pub struct ChainConfig { pub validator_registry_limit: u64, } -pub const DEVNET_CONFIG: ChainConfig = ChainConfig { - slot_duration_ms: SLOT_DURATION_MS, - second_per_slot: SECONDS_PER_SLOT, - justification_lookback_slots: JUSTIFICATION_LOOKBACK_SLOTS, - proposer_reorg_cutoff_bps: PROPOSER_REORG_CUTOFF_BPS, - vote_due_bps: VOTE_DUE_BPS, - fast_confirm_due_bps: FAST_CONFIRM_DUE_BPS, - view_freeze_cutoff_bps: VIEW_FREEZE_CUTOFF_BPS, - historical_roots_limit: HISTORICAL_ROOTS_LIMIT, - validator_registry_limit: VALIDATOR_REGISTRY_LIMIT, -}; +impl ChainConfig { + pub fn devnet() -> Self { + let slot_duration_ms = 4_000; + let seconds_per_slot = slot_duration_ms / 1_000; + let intervals_per_slot = 4; -#[cfg(test)] -mod tests { - use super::*; - #[test] fn time_math_is_consistent() { - assert_eq!(SLOT_DURATION_MS, 4_000); - assert_eq!(SECONDS_PER_SLOT, 4); - assert_eq!(SECONDS_PER_INTERVAL, 1); + Self { + slot_duration_ms, + second_per_slot: seconds_per_slot, + intervals_per_slot, + seconds_per_interval: seconds_per_slot / intervals_per_slot, + justification_lookback_slots: 3, + proposer_reorg_cutoff_bps: BasisPoint::new(2_500).expect("Valid BPS"), + vote_due_bps: BasisPoint::new(5_000).expect("Valid BPS"), + fast_confirm_due_bps: BasisPoint::new(7_500).expect("Valid BPS"), + view_freeze_cutoff_bps: BasisPoint::new(7_500).expect("Valid BPS"), + historical_roots_limit: 1u64 << 18, + validator_registry_limit: 1u64 << 12, + } } } \ No newline at end of file diff --git a/lean_client/chain/src/lib.rs b/lean_client/chain/src/lib.rs index ef68c36..12cf630 100644 --- a/lean_client/chain/src/lib.rs +++ b/lean_client/chain/src/lib.rs @@ -1 +1,2 @@ -pub mod config; +mod config; +pub use config::ChainConfig; \ No newline at end of file From 9fbbf348f0fa8ae55278cf6c55394f22e149cae6 Mon Sep 17 00:00:00 2001 From: Darius Spr <108625236+Dariusspr@users.noreply.github.com> Date: Wed, 14 Jan 2026 11:57:59 +0200 Subject: [PATCH 06/23] rename to PublicKey and use constant for key size --- lean_client/containers/src/block.rs | 1 - lean_client/containers/src/state.rs | 2 +- lean_client/containers/src/validator.rs | 56 +++++++++++-------- .../tests/fork_choice_test_vectors.rs | 2 +- lean_client/src/main.rs | 4 +- lean_client/validator/src/keys.rs | 2 +- 6 files changed, 37 insertions(+), 30 deletions(-) diff --git a/lean_client/containers/src/block.rs b/lean_client/containers/src/block.rs index 0acf1b2..00f5894 100644 --- a/lean_client/containers/src/block.rs +++ b/lean_client/containers/src/block.rs @@ -7,7 +7,6 @@ use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to use ssz::{PersistentList, SszHash}; use typenum::U4096; use crate::attestation::{AggregatedAttestations, AttestationSignatures}; -use crate::validator::BlsPublicKey; /// The body of a block, containing payload data. /// diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index b176e55..5e3dd52 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -107,7 +107,7 @@ impl State { let mut validators = List::default(); for i in 0..num_validators.0 { let validator = Validator { - pubkey: crate::validator::BlsPublicKey::default(), + pubkey: crate::validator::PublicKey::default(), index: Uint64(i), }; validators.push(validator).expect("Failed to add validator"); diff --git a/lean_client/containers/src/validator.rs b/lean_client/containers/src/validator.rs index 513b09d..29f5716 100644 --- a/lean_client/containers/src/validator.rs +++ b/lean_client/containers/src/validator.rs @@ -1,21 +1,24 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use ssz::ByteVector; use ssz_derive::Ssz; -use typenum::U52; +use typenum::{Unsigned, U52}; -/// BLS public key - 52 bytes (as defined in lean spec) +/// Size of XMSS public keys in bytes (as defined in lean spec) +pub type PublicKeySize = U52; + +/// XMSS public key (as defined in lean spec) #[derive(Clone, Debug, PartialEq, Eq, Ssz)] #[ssz(transparent)] -pub struct BlsPublicKey(pub ByteVector); +pub struct PublicKey(pub ByteVector); -impl Default for BlsPublicKey { +impl Default for PublicKey { fn default() -> Self { - BlsPublicKey(ByteVector::default()) + PublicKey(ByteVector::default()) } } // Custom serde implementation -impl Serialize for BlsPublicKey { +impl Serialize for PublicKey { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -24,8 +27,8 @@ impl Serialize for BlsPublicKey { // For now, use unsafe to access the underlying bytes let bytes = unsafe { std::slice::from_raw_parts( - &self.0 as *const ByteVector as *const u8, - 52 + &self.0 as *const ByteVector as *const u8, + PublicKeySize::USIZE, ) }; let hex_string = format!("0x{}", hex::encode(bytes)); @@ -33,52 +36,57 @@ impl Serialize for BlsPublicKey { } } -impl<'de> Deserialize<'de> for BlsPublicKey { +impl<'de> Deserialize<'de> for PublicKey { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; let s = s.strip_prefix("0x").unwrap_or(&s); - + let decoded = hex::decode(s).map_err(serde::de::Error::custom)?; - if decoded.len() != 52 { + if decoded.len() != PublicKeySize::USIZE { return Err(serde::de::Error::custom(format!( - "Expected 52 bytes, got {}", + "Expected {} bytes, got {}", + PublicKeySize::USIZE, decoded.len() ))); } - + // Create ByteVector from decoded bytes using unsafe let mut byte_vec = ByteVector::default(); unsafe { - let dest = &mut byte_vec as *mut ByteVector as *mut u8; - std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, 52); + let dest = &mut byte_vec as *mut ByteVector as *mut u8; + std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, PublicKeySize::USIZE); } - - Ok(BlsPublicKey(byte_vec)) + + Ok(PublicKey(byte_vec)) } } -impl BlsPublicKey { +impl PublicKey { pub fn from_hex(s: &str) -> Result { let s = s.strip_prefix("0x").unwrap_or(s); let decoded = hex::decode(s).map_err(|e| e.to_string())?; - if decoded.len() != 52 { - return Err(format!("Expected 52 bytes, got {}", decoded.len())); + if decoded.len() != PublicKeySize::USIZE { + return Err(format!( + "Expected {} bytes, got {}", + PublicKeySize::USIZE, + decoded.len() + )); } let mut byte_vec = ByteVector::default(); unsafe { - let dest = &mut byte_vec as *mut ByteVector as *mut u8; - std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, 52); + let dest = &mut byte_vec as *mut ByteVector as *mut u8; + std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, PublicKeySize::USIZE); } - Ok(BlsPublicKey(byte_vec)) + Ok(PublicKey(byte_vec)) } } #[derive(Clone, Debug, PartialEq, Eq, Default, Ssz, Serialize, Deserialize)] pub struct Validator { - pub pubkey: BlsPublicKey, + pub pubkey: PublicKey, #[serde(default)] pub index: crate::Uint64, } diff --git a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs index 50bd240..4f8521f 100644 --- a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs +++ b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs @@ -384,7 +384,7 @@ fn initialize_state_from_test(test_state: &TestAnchorState) -> State { let mut validators = List::default(); for test_validator in &test_state.validators.data { - let pubkey = containers::validator::BlsPublicKey::from_hex(&test_validator.pubkey) + let pubkey = containers::validator::PublicKey::from_hex(&test_validator.pubkey) .expect("Failed to parse validator pubkey"); let validator = containers::validator::Validator { pubkey, diff --git a/lean_client/src/main.rs b/lean_client/src/main.rs index d1c3e24..ad2276e 100644 --- a/lean_client/src/main.rs +++ b/lean_client/src/main.rs @@ -164,7 +164,7 @@ async fn main() { .iter() .enumerate() .map(|(i, v_str)| { - let pubkey = containers::validator::BlsPublicKey::from_hex(v_str) + let pubkey = containers::validator::PublicKey::from_hex(v_str) .expect("Invalid genesis validator pubkey"); containers::validator::Validator { pubkey, @@ -178,7 +178,7 @@ async fn main() { let num_validators = 3; let validators = (0..num_validators) .map(|i| containers::validator::Validator { - pubkey: containers::validator::BlsPublicKey::default(), + pubkey: containers::validator::PublicKey::default(), index: Uint64(i as u64), }) .collect(); diff --git a/lean_client/validator/src/keys.rs b/lean_client/validator/src/keys.rs index 13c0deb..cae38f1 100644 --- a/lean_client/validator/src/keys.rs +++ b/lean_client/validator/src/keys.rs @@ -93,7 +93,7 @@ impl KeyManager { ).into()); } - // Convert to ByteVector using unsafe pointer copy (same pattern as BlsPublicKey) + // Convert to ByteVector using unsafe pointer copy (same pattern as PublicKey) let mut byte_vec: ByteVector = ByteVector::default(); unsafe { let dest = &mut byte_vec as *mut ByteVector as *mut u8; From 830a70c9935d52fa0bbe7fafdaa381bc09942716 Mon Sep 17 00:00:00 2001 From: Darius Spr <108625236+Dariusspr@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:23:13 +0200 Subject: [PATCH 07/23] hide key size --- lean_client/containers/src/validator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lean_client/containers/src/validator.rs b/lean_client/containers/src/validator.rs index 29f5716..8a2da60 100644 --- a/lean_client/containers/src/validator.rs +++ b/lean_client/containers/src/validator.rs @@ -4,7 +4,7 @@ use ssz_derive::Ssz; use typenum::{Unsigned, U52}; /// Size of XMSS public keys in bytes (as defined in lean spec) -pub type PublicKeySize = U52; +type PublicKeySize = U52; /// XMSS public key (as defined in lean spec) #[derive(Clone, Debug, PartialEq, Eq, Ssz)] From 572d54578a8d83439631624734110dedc2a4e2eb Mon Sep 17 00:00:00 2001 From: Darius Spr <108625236+Dariusspr@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:34:01 +0200 Subject: [PATCH 08/23] format code --- lean_client/chain/src/config.rs | 18 +- lean_client/chain/src/lib.rs | 2 +- lean_client/containers/src/attestation.rs | 2 +- lean_client/containers/src/checkpoint.rs | 6 +- lean_client/containers/src/config.rs | 4 +- lean_client/containers/src/lib.rs | 4 +- lean_client/containers/src/slot.rs | 45 +-- lean_client/containers/src/state.rs | 26 +- lean_client/containers/src/status.rs | 2 +- lean_client/containers/src/types.rs | 2 +- .../containers/tests/debug_deserialize.rs | 43 ++- lean_client/containers/tests/main.rs | 2 +- .../tests/test_vectors/block_processing.rs | 9 +- .../containers/tests/test_vectors/genesis.rs | 9 +- .../containers/tests/test_vectors/mod.rs | 17 +- .../containers/tests/test_vectors/runner.rs | 314 +++++++++++------- .../tests/test_vectors/verify_signatures.rs | 6 +- .../unit_tests/attestation_aggregation.rs | 30 +- .../containers/tests/unit_tests/common.rs | 54 +-- .../containers/tests/unit_tests/mod.rs | 2 +- .../tests/unit_tests/state_basic.rs | 22 +- .../tests/unit_tests/state_justifications.rs | 32 +- .../tests/unit_tests/state_process.rs | 66 +++- .../tests/unit_tests/state_transition.rs | 2 +- lean_client/env-config/src/lib.rs | 2 +- lean_client/fork_choice/src/handlers.rs | 14 +- lean_client/fork_choice/src/store.rs | 8 +- .../tests/fork_choice_test_vectors.rs | 16 +- lean_client/fork_choice/tests/unit_tests.rs | 2 +- .../fork_choice/tests/unit_tests/common.rs | 18 +- .../tests/unit_tests/fork_choice.rs | 8 +- .../fork_choice/tests/unit_tests/time.rs | 9 +- .../fork_choice/tests/unit_tests/votes.rs | 213 ++++++++---- .../networking/src/gossipsub/config.rs | 4 +- .../networking/src/gossipsub/message.rs | 5 +- .../networking/src/gossipsub/tests/config.rs | 12 +- .../networking/src/gossipsub/tests/message.rs | 4 +- .../src/gossipsub/tests/message_id.rs | 6 +- .../networking/src/gossipsub/tests/mod.rs | 4 +- .../networking/src/gossipsub/tests/topic.rs | 4 +- lean_client/networking/src/gossipsub/topic.rs | 14 +- lean_client/networking/src/network/mod.rs | 2 +- lean_client/networking/src/network/service.rs | 34 +- lean_client/networking/src/req_resp.rs | 74 +++-- lean_client/networking/src/types.rs | 35 +- lean_client/validator/src/keys.rs | 29 +- lean_client/validator/src/lib.rs | 4 +- 47 files changed, 777 insertions(+), 463 deletions(-) diff --git a/lean_client/chain/src/config.rs b/lean_client/chain/src/config.rs index 537d13b..1d762de 100644 --- a/lean_client/chain/src/config.rs +++ b/lean_client/chain/src/config.rs @@ -3,13 +3,19 @@ pub struct BasisPoint(pub u64); impl BasisPoint { pub const MAX: u64 = 10_000; - + pub const fn new(value: u64) -> Option { - if value <= Self::MAX { Some(BasisPoint(value)) } else { None } + if value <= Self::MAX { + Some(BasisPoint(value)) + } else { + None + } + } + + #[inline] + pub fn get(&self) -> u64 { + self.0 } - - #[inline] - pub fn get(&self) -> u64 { self.0 } } #[derive(Clone, Debug)] @@ -47,4 +53,4 @@ impl ChainConfig { validator_registry_limit: 1u64 << 12, } } -} \ No newline at end of file +} diff --git a/lean_client/chain/src/lib.rs b/lean_client/chain/src/lib.rs index 12cf630..9496841 100644 --- a/lean_client/chain/src/lib.rs +++ b/lean_client/chain/src/lib.rs @@ -1,2 +1,2 @@ mod config; -pub use config::ChainConfig; \ No newline at end of file +pub use config::ChainConfig; diff --git a/lean_client/containers/src/attestation.rs b/lean_client/containers/src/attestation.rs index 9c3537c..6779b0f 100644 --- a/lean_client/containers/src/attestation.rs +++ b/lean_client/containers/src/attestation.rs @@ -126,7 +126,7 @@ pub struct AggregatedAttestation { /// Bitfield indicating which validators participated in the aggregation. pub aggregation_bits: AggregationBits, /// Combined attestation data similar to the beacon chain format. - /// + /// /// Multiple validator attestations are aggregated here without the complexity of /// committee assignments. pub data: AttestationData, diff --git a/lean_client/containers/src/checkpoint.rs b/lean_client/containers/src/checkpoint.rs index e635ab1..1b36f31 100644 --- a/lean_client/containers/src/checkpoint.rs +++ b/lean_client/containers/src/checkpoint.rs @@ -1,9 +1,9 @@ use crate::{Bytes32, Slot}; -use ssz_derive::Ssz; use serde::{Deserialize, Serialize}; +use ssz_derive::Ssz; /// Represents a checkpoint in the chain's history. -/// +/// /// A checkpoint marks a specific moment in the chain. It combines a block /// identifier with a slot number. Checkpoints are used for justification and /// finalization. @@ -45,4 +45,4 @@ mod tests { }; assert_eq!(cp1, cp2); } -} \ No newline at end of file +} diff --git a/lean_client/containers/src/config.rs b/lean_client/containers/src/config.rs index 83bf459..fed2b7e 100644 --- a/lean_client/containers/src/config.rs +++ b/lean_client/containers/src/config.rs @@ -1,5 +1,5 @@ -use ssz_derive::Ssz; use serde::{Deserialize, Serialize}; +use ssz_derive::Ssz; use std::fs::File; use std::io::BufReader; use std::path::Path; @@ -26,4 +26,4 @@ impl GenesisConfig { let config = serde_yaml::from_reader(reader)?; Ok(config) } -} \ No newline at end of file +} diff --git a/lean_client/containers/src/lib.rs b/lean_client/containers/src/lib.rs index c73a9f9..f0590ca 100644 --- a/lean_client/containers/src/lib.rs +++ b/lean_client/containers/src/lib.rs @@ -22,8 +22,8 @@ pub use slot::Slot; pub use state::State; pub use status::Status; pub use types::{ - Bytes32, HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators, - Uint64, ValidatorIndex, + Bytes32, HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, + Uint64, ValidatorIndex, Validators, }; pub use types::Bytes32 as Root; diff --git a/lean_client/containers/src/slot.rs b/lean_client/containers/src/slot.rs index d845ec3..17f5439 100644 --- a/lean_client/containers/src/slot.rs +++ b/lean_client/containers/src/slot.rs @@ -1,5 +1,5 @@ -use ssz_derive::Ssz; use serde::{Deserialize, Serialize}; +use ssz_derive::Ssz; use std::cmp::Ordering; #[derive(Clone, Copy, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] @@ -37,15 +37,18 @@ impl Slot { /// /// Panics if this slot is earlier than the finalized slot. pub fn is_justifiable_after(self, finalized: Slot) -> bool { - assert!(self >= finalized, "Candidate slot must not be before finalized slot"); + assert!( + self >= finalized, + "Candidate slot must not be before finalized slot" + ); let delta = self.0 - finalized.0; - + // Rule 1: The first 5 slots after finalization are always justifiable. // Examples: delta = 0, 1, 2, 3, 4, 5 if delta <= 5 { return true; } - + // Rule 2: Slots at perfect square distances are justifiable. // Examples: delta = 1, 4, 9, 16, 25, 36, 49, 64, ... // Check: integer square root squared equals delta @@ -53,7 +56,7 @@ impl Slot { if sqrt * sqrt == delta { return true; } - + // Rule 3: Slots at pronic number distances are justifiable. // Pronic numbers have the form n(n+1): 2, 6, 12, 20, 30, 42, 56, ... // Mathematical insight: For pronic delta = n(n+1), we have: @@ -64,7 +67,7 @@ impl Slot { if test_sqrt * test_sqrt == test && test_sqrt % 2 == 1 { return true; } - + false } } @@ -89,32 +92,32 @@ mod tests { fn test_is_justifiable_perfect_squares() { let finalized = Slot(0); // Rule 2: Perfect square distances - assert!(Slot(1).is_justifiable_after(finalized)); // delta = 1 = 1^2 - assert!(Slot(4).is_justifiable_after(finalized)); // delta = 4 = 2^2 - assert!(Slot(9).is_justifiable_after(finalized)); // delta = 9 = 3^2 - assert!(Slot(16).is_justifiable_after(finalized)); // delta = 16 = 4^2 - assert!(Slot(25).is_justifiable_after(finalized)); // delta = 25 = 5^2 - assert!(Slot(36).is_justifiable_after(finalized)); // delta = 36 = 6^2 + assert!(Slot(1).is_justifiable_after(finalized)); // delta = 1 = 1^2 + assert!(Slot(4).is_justifiable_after(finalized)); // delta = 4 = 2^2 + assert!(Slot(9).is_justifiable_after(finalized)); // delta = 9 = 3^2 + assert!(Slot(16).is_justifiable_after(finalized)); // delta = 16 = 4^2 + assert!(Slot(25).is_justifiable_after(finalized)); // delta = 25 = 5^2 + assert!(Slot(36).is_justifiable_after(finalized)); // delta = 36 = 6^2 } #[test] fn test_is_justifiable_pronic() { let finalized = Slot(0); // Rule 3: Pronic numbers (n(n+1)) - assert!(Slot(2).is_justifiable_after(finalized)); // delta = 2 = 1*2 - assert!(Slot(6).is_justifiable_after(finalized)); // delta = 6 = 2*3 - assert!(Slot(12).is_justifiable_after(finalized)); // delta = 12 = 3*4 - assert!(Slot(20).is_justifiable_after(finalized)); // delta = 20 = 4*5 - assert!(Slot(30).is_justifiable_after(finalized)); // delta = 30 = 5*6 - assert!(Slot(42).is_justifiable_after(finalized)); // delta = 42 = 6*7 + assert!(Slot(2).is_justifiable_after(finalized)); // delta = 2 = 1*2 + assert!(Slot(6).is_justifiable_after(finalized)); // delta = 6 = 2*3 + assert!(Slot(12).is_justifiable_after(finalized)); // delta = 12 = 3*4 + assert!(Slot(20).is_justifiable_after(finalized)); // delta = 20 = 4*5 + assert!(Slot(30).is_justifiable_after(finalized)); // delta = 30 = 5*6 + assert!(Slot(42).is_justifiable_after(finalized)); // delta = 42 = 6*7 } #[test] fn test_is_not_justifiable() { let finalized = Slot(0); // Not justifiable: not in first 5, not perfect square, not pronic - assert!(!Slot(7).is_justifiable_after(finalized)); // delta = 7 - assert!(!Slot(8).is_justifiable_after(finalized)); // delta = 8 + assert!(!Slot(7).is_justifiable_after(finalized)); // delta = 7 + assert!(!Slot(8).is_justifiable_after(finalized)); // delta = 8 assert!(!Slot(10).is_justifiable_after(finalized)); // delta = 10 assert!(!Slot(11).is_justifiable_after(finalized)); // delta = 11 } @@ -126,4 +129,4 @@ mod tests { let candidate = Slot(50); candidate.is_justifiable_after(finalized); } -} \ No newline at end of file +} diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index 5e3dd52..5056fb7 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -1,5 +1,11 @@ +use crate::attestation::AggregatedAttestations; +use crate::block::BlockSignatures; use crate::validator::Validator; -use crate::{block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, SignedAttestation, Slot, Uint64, ValidatorIndex}; +use crate::{ + block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, + Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, SignedAttestation, Slot, + Uint64, ValidatorIndex, +}; use crate::{ HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators, }; @@ -8,8 +14,6 @@ use ssz::{PersistentList as List, PersistentList}; use ssz_derive::Ssz; use std::collections::BTreeMap; use typenum::U4096; -use crate::attestation::AggregatedAttestations; -use crate::block::BlockSignatures; pub const VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const JUSTIFICATION_ROOTS_LIMIT: usize = 1 << 18; // 262144 @@ -305,7 +309,9 @@ impl State { let plain_attestations = aggregated_attestation.to_plain(); // For each attestatio in the vector, push to the list for attestation in plain_attestations { - unaggregated_attestations.push(attestation).map_err(|e| format!("Failed to push attestation: {:?}", e))?; + unaggregated_attestations + .push(attestation) + .map_err(|e| format!("Failed to push attestation: {:?}", e))?; } } state.process_attestations(&unaggregated_attestations) @@ -566,7 +572,15 @@ impl State { initial_attestations: Option>, available_signed_attestations: Option<&[SignedBlockWithAttestation]>, known_block_roots: Option<&std::collections::HashSet>, - ) -> Result<(Block, Self, Vec, PersistentList), String> { + ) -> Result< + ( + Block, + Self, + Vec, + PersistentList, + ), + String, + > { // Initialize empty attestation set for iterative collection let mut attestations = initial_attestations.unwrap_or_default(); let mut signatures = PersistentList::default(); @@ -703,7 +717,7 @@ mod tests { config: st.config.clone(), ..st.clone() } - .is_proposer(ValidatorIndex(0))); + .is_proposer(ValidatorIndex(0))); } #[test] diff --git a/lean_client/containers/src/status.rs b/lean_client/containers/src/status.rs index da05ba1..d68c7c3 100644 --- a/lean_client/containers/src/status.rs +++ b/lean_client/containers/src/status.rs @@ -1,6 +1,6 @@ use crate::Checkpoint; -use ssz_derive::Ssz; use serde::{Deserialize, Serialize}; +use ssz_derive::Ssz; #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct Status { diff --git a/lean_client/containers/src/types.rs b/lean_client/containers/src/types.rs index bb2a64b..7d9aa4d 100644 --- a/lean_client/containers/src/types.rs +++ b/lean_client/containers/src/types.rs @@ -38,8 +38,8 @@ impl fmt::Display for Bytes32 { } // Type-level constants for SSZ collection limits -use typenum::{Prod, U4, U1000, U4096, U262144, U1073741824}; use crate::validator::Validator; +use typenum::{Prod, U1000, U1073741824, U262144, U4, U4096}; // 2^18, 4096 * 262144 /// Type-level number for 4000 bytes (signature size) = 4 * 1000 diff --git a/lean_client/containers/tests/debug_deserialize.rs b/lean_client/containers/tests/debug_deserialize.rs index 0b5a6de..1d28df1 100644 --- a/lean_client/containers/tests/debug_deserialize.rs +++ b/lean_client/containers/tests/debug_deserialize.rs @@ -4,45 +4,52 @@ use std::fs; #[test] fn debug_deserialize_state() { let json_content = fs::read_to_string( - "../tests/test_vectors/test_blocks/test_process_first_block_after_genesis.json" - ).expect("Failed to read test vector file"); - + "../tests/test_vectors/test_blocks/test_process_first_block_after_genesis.json", + ) + .expect("Failed to read test vector file"); + // Try to deserialize just to see where it fails let result: Result = serde_json::from_str(&json_content); - + match result { Ok(value) => { println!("✓ JSON is valid"); - + // Try to extract just the pre state if let Some(tests) = value.as_object() { if let Some((test_name, test_case)) = tests.iter().next() { println!("✓ Found test: {}", test_name); - + if let Some(pre) = test_case.get("pre") { println!("✓ Found pre state"); - + // Try deserializing field by field if let Some(pre_obj) = pre.as_object() { for (field_name, field_value) in pre_obj.iter() { println!("\nTrying to deserialize field: {}", field_name); - println!("Field value type: {}", match field_value { - serde_json::Value::Null => "null", - serde_json::Value::Bool(_) => "bool", - serde_json::Value::Number(_) => "number", - serde_json::Value::String(_) => "string", - serde_json::Value::Array(_) => "array", - serde_json::Value::Object(_) => "object", - }); - + println!( + "Field value type: {}", + match field_value { + serde_json::Value::Null => "null", + serde_json::Value::Bool(_) => "bool", + serde_json::Value::Number(_) => "number", + serde_json::Value::String(_) => "string", + serde_json::Value::Array(_) => "array", + serde_json::Value::Object(_) => "object", + } + ); + if field_value.is_object() { if let Some(obj) = field_value.as_object() { - println!("Object keys: {:?}", obj.keys().collect::>()); + println!( + "Object keys: {:?}", + obj.keys().collect::>() + ); } } } } - + // Now try to deserialize the whole state let state_result: Result = serde_json::from_value(pre.clone()); match state_result { diff --git a/lean_client/containers/tests/main.rs b/lean_client/containers/tests/main.rs index 4d48535..f951ffe 100644 --- a/lean_client/containers/tests/main.rs +++ b/lean_client/containers/tests/main.rs @@ -1,4 +1,4 @@ // tests/lib - Test entry point mod debug_deserialize; +mod test_vectors; mod unit_tests; -mod test_vectors; \ No newline at end of file diff --git a/lean_client/containers/tests/test_vectors/block_processing.rs b/lean_client/containers/tests/test_vectors/block_processing.rs index caec865..5bbc997 100644 --- a/lean_client/containers/tests/test_vectors/block_processing.rs +++ b/lean_client/containers/tests/test_vectors/block_processing.rs @@ -13,8 +13,7 @@ fn test_process_first_block_after_genesis() { #[cfg(feature = "devnet1")] fn test_blocks_with_gaps() { let test_path = "../tests/test_vectors/test_blocks/test_blocks_with_gaps.json"; - TestRunner::run_block_processing_test(test_path) - .expect("test_blocks_with_gaps failed"); + TestRunner::run_block_processing_test(test_path).expect("test_blocks_with_gaps failed"); } #[test] @@ -29,16 +28,14 @@ fn test_linear_chain_multiple_blocks() { #[cfg(feature = "devnet1")] fn test_block_extends_deep_chain() { let test_path = "../tests/test_vectors/test_blocks/test_block_extends_deep_chain.json"; - TestRunner::run_block_processing_test(test_path) - .expect("test_block_extends_deep_chain failed"); + TestRunner::run_block_processing_test(test_path).expect("test_block_extends_deep_chain failed"); } #[test] #[cfg(feature = "devnet1")] fn test_empty_blocks() { let test_path = "../tests/test_vectors/test_blocks/test_empty_blocks.json"; - TestRunner::run_block_processing_test(test_path) - .expect("test_empty_blocks failed"); + TestRunner::run_block_processing_test(test_path).expect("test_empty_blocks failed"); } #[test] diff --git a/lean_client/containers/tests/test_vectors/genesis.rs b/lean_client/containers/tests/test_vectors/genesis.rs index 0b1d3d3..92acf25 100644 --- a/lean_client/containers/tests/test_vectors/genesis.rs +++ b/lean_client/containers/tests/test_vectors/genesis.rs @@ -4,20 +4,17 @@ use super::runner::TestRunner; #[test] fn test_genesis_default_configuration() { let test_path = "../tests/test_vectors/test_genesis/test_genesis_default_configuration.json"; - TestRunner::run_genesis_test(test_path) - .expect("test_genesis_default_configuration failed"); + TestRunner::run_genesis_test(test_path).expect("test_genesis_default_configuration failed"); } #[test] fn test_genesis_custom_time() { let test_path = "../tests/test_vectors/test_genesis/test_genesis_custom_time.json"; - TestRunner::run_genesis_test(test_path) - .expect("test_genesis_custom_time failed"); + TestRunner::run_genesis_test(test_path).expect("test_genesis_custom_time failed"); } #[test] fn test_genesis_custom_validator_set() { let test_path = "../tests/test_vectors/test_genesis/test_genesis_custom_validator_set.json"; - TestRunner::run_genesis_test(test_path) - .expect("test_genesis_custom_validator_set failed"); + TestRunner::run_genesis_test(test_path).expect("test_genesis_custom_validator_set failed"); } diff --git a/lean_client/containers/tests/test_vectors/mod.rs b/lean_client/containers/tests/test_vectors/mod.rs index 8859847..acdc055 100644 --- a/lean_client/containers/tests/test_vectors/mod.rs +++ b/lean_client/containers/tests/test_vectors/mod.rs @@ -1,15 +1,13 @@ // Test vector modules -pub mod runner; pub mod block_processing; pub mod genesis; +pub mod runner; pub mod verify_signatures; +use containers::{block::Block, block::SignedBlockWithAttestation, state::State, Slot}; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use std::collections::HashMap; -use containers::{ - Slot, block::Block, block::SignedBlockWithAttestation, state::State -}; /// Custom deserializer that handles both plain values and {"data": T} wrapper format fn deserialize_flexible<'de, D, T>(deserializer: D) -> Result @@ -18,21 +16,22 @@ where T: serde::de::DeserializeOwned, { use serde::de::Error; - + // Deserialize as a generic Value first to inspect the structure let value = Value::deserialize(deserializer)?; - + // Check if it's an object with a "data" field if let Value::Object(ref map) = value { if map.contains_key("data") && map.len() == 1 { // Extract just the data field if let Some(data_value) = map.get("data") { - return serde_json::from_value(data_value.clone()) - .map_err(|e| D::Error::custom(format!("Failed to deserialize from data wrapper: {}", e))); + return serde_json::from_value(data_value.clone()).map_err(|e| { + D::Error::custom(format!("Failed to deserialize from data wrapper: {}", e)) + }); } } } - + // Otherwise, deserialize as a plain value serde_json::from_value(value) .map_err(|e| D::Error::custom(format!("Failed to deserialize plain value: {}", e))) diff --git a/lean_client/containers/tests/test_vectors/runner.rs b/lean_client/containers/tests/test_vectors/runner.rs index bf23138..0ed8ac5 100644 --- a/lean_client/containers/tests/test_vectors/runner.rs +++ b/lean_client/containers/tests/test_vectors/runner.rs @@ -6,14 +6,19 @@ use std::path::Path; pub struct TestRunner; impl TestRunner { - pub fn run_sequential_block_processing_tests>(path: P) -> Result<(), Box> { + pub fn run_sequential_block_processing_tests>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path)?; // Parse using the new TestVectorFile structure with camelCase let test_file: TestVectorFile = serde_json::from_str(&json_content)?; // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; println!("Running test: {}", test_name); @@ -78,20 +83,22 @@ impl TestRunner { return Err(format!( "Post-state slot mismatch: expected {:?}, got {:?}", post.slot, state.slot - ).into()); + ) + .into()); } // Only check validator count if specified in post-state if let Some(expected_count) = post.validator_count { let num_validators = state.validators.len_u64(); - + if num_validators as usize != expected_count { return Err(format!( "Post-state validator count mismatch: expected {}, got {}", expected_count, num_validators - ).into()); + ) + .into()); } - + println!("\n✓ All post-state checks passed"); println!(" Final slot: {:?}", state.slot); println!(" Validator count: {}", num_validators); @@ -100,38 +107,48 @@ impl TestRunner { println!(" Final slot: {:?}", state.slot); } } - + println!("\n✓✓✓ PASS: All blocks processed successfully with matching roots ✓✓✓"); } - + Ok(()) } - pub fn run_single_block_with_slot_gap_tests>(path: P) -> Result<(), Box> { + pub fn run_single_block_with_slot_gap_tests>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path)?; - + // Parse using the new TestVectorFile structure with camelCase let test_file: TestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("Running test: {}", test_name); println!("Description: {}", test_case.info.description); if let Some(ref blocks) = test_case.blocks { let mut state = test_case.pre.clone(); - + for (idx, block) in blocks.iter().enumerate() { - println!("\nProcessing block {}: slot {:?} (gap from slot {:?})", idx + 1, block.slot, state.slot); - + println!( + "\nProcessing block {}: slot {:?} (gap from slot {:?})", + idx + 1, + block.slot, + state.slot + ); + // Advance state to the block's slot (this handles the slot gap) let state_after_slots = state.process_slots(block.slot)?; - + // Compute the parent root from our current latest_block_header let computed_parent_root = hash_tree_root(&state_after_slots.latest_block_header); - + // Verify the block's parent_root matches what we computed if block.parent_root != computed_parent_root { return Err(format!( @@ -141,19 +158,19 @@ impl TestRunner { computed_parent_root ).into()); } - + println!(" ✓ Parent root matches: {:?}", computed_parent_root); - + // Process the block header let result = state_after_slots.process_block_header(block); match result { Ok(new_state) => { state = new_state; - + // Compute the state root after processing let computed_state_root = hash_tree_root(&state); - + // Verify the computed state_root matches the expected one from the vector if block.state_root != computed_state_root { return Err(format!( @@ -163,59 +180,69 @@ impl TestRunner { computed_state_root ).into()); } - + println!(" ✓ State root matches: {:?}", computed_state_root); - println!(" ✓ Block {} processed successfully (with {} empty slots)", idx + 1, block.slot.0 - test_case.pre.slot.0 - idx as u64); + println!( + " ✓ Block {} processed successfully (with {} empty slots)", + idx + 1, + block.slot.0 - test_case.pre.slot.0 - idx as u64 + ); } Err(e) => { return Err(format!("Block {} processing failed: {:?}", idx + 1, e).into()); } } } - + // Verify post-state conditions if let Some(post) = test_case.post { if state.slot != post.slot { return Err(format!( "Post-state slot mismatch: expected {:?}, got {:?}", post.slot, state.slot - ).into()); + ) + .into()); } - + println!("\n✓ All post-state checks passed"); println!(" Final slot: {:?}", state.slot); } - + println!("\n✓✓✓ PASS: Block with slot gap processed successfully ✓✓✓"); } - + Ok(()) } - pub fn run_single_empty_block_tests>(path: P) -> Result<(), Box> { + pub fn run_single_empty_block_tests>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path)?; - + // Parse using the new TestVectorFile structure with camelCase let test_file: TestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("Running test: {}", test_name); println!("Description: {}", test_case.info.description); if let Some(ref blocks) = test_case.blocks { let mut state = test_case.pre.clone(); - + // Should be exactly one block if blocks.len() != 1 { return Err(format!("Expected 1 block, found {}", blocks.len()).into()); } - + let block = &blocks[0]; println!("\nProcessing single empty block at slot {:?}", block.slot); - + // Verify it's an empty block (no attestations) let attestation_count = { let mut count = 0u64; @@ -227,18 +254,22 @@ impl TestRunner { } count }; - + if attestation_count > 0 { - return Err(format!("Expected empty block, but found {} attestations", attestation_count).into()); + return Err(format!( + "Expected empty block, but found {} attestations", + attestation_count + ) + .into()); } println!(" ✓ Confirmed: Block has no attestations (empty block)"); - + // Advance state to the block's slot let state_after_slots = state.process_slots(block.slot)?; - + // Compute the parent root from our current latest_block_header let computed_parent_root = hash_tree_root(&state_after_slots.latest_block_header); - + // Verify the block's parent_root matches what we computed if block.parent_root != computed_parent_root { return Err(format!( @@ -247,19 +278,19 @@ impl TestRunner { computed_parent_root ).into()); } - + println!(" ✓ Parent root matches: {:?}", computed_parent_root); - + // Process the block header let result = state_after_slots.process_block_header(block); match result { Ok(new_state) => { state = new_state; - + // Compute the state root after processing let computed_state_root = hash_tree_root(&state); - + // Verify the computed state_root matches the expected one from the vector if block.state_root != computed_state_root { return Err(format!( @@ -268,7 +299,7 @@ impl TestRunner { computed_state_root ).into()); } - + println!(" ✓ State root matches: {:?}", computed_state_root); println!(" ✓ Empty block processed successfully"); } @@ -276,38 +307,44 @@ impl TestRunner { return Err(format!("Block processing failed: {:?}", e).into()); } } - + // Verify post-state conditions if let Some(post) = test_case.post { if state.slot != post.slot { return Err(format!( "Post-state slot mismatch: expected {:?}, got {:?}", post.slot, state.slot - ).into()); + ) + .into()); } - + println!("\n✓ All post-state checks passed"); println!(" Final slot: {:?}", state.slot); } - + println!("\n✓✓✓ PASS: Single empty block processed successfully ✓✓✓"); } - + Ok(()) } /// Generic test runner for block processing test vectors /// Handles all test vectors from test_blocks directory - pub fn run_block_processing_test>(path: P) -> Result<(), Box> { + pub fn run_block_processing_test>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path.as_ref())?; - + // Parse using the TestVectorFile structure with camelCase let test_file: TestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("\n{}: {}", test_name, test_case.info.description); // Check if this is an invalid/exception test @@ -327,7 +364,7 @@ impl TestRunner { } let mut state = test_case.pre.clone(); - + for (idx, block) in blocks.iter().enumerate() { // Check if this is a gap (missed slots) let gap_size = if idx == 0 { @@ -335,19 +372,24 @@ impl TestRunner { } else { block.slot.0 - state.slot.0 - 1 }; - + if gap_size > 0 { - println!(" Block {}: slot {} (gap: {} empty slots)", idx + 1, block.slot.0, gap_size); + println!( + " Block {}: slot {} (gap: {} empty slots)", + idx + 1, + block.slot.0, + gap_size + ); } else { println!(" Block {}: slot {}", idx + 1, block.slot.0); } - + // Advance state to the block's slot let state_after_slots = state.process_slots(block.slot)?; - + // Compute the parent root from our current latest_block_header let computed_parent_root = hash_tree_root(&state_after_slots.latest_block_header); - + // Verify the block's parent_root matches what we computed if block.parent_root != computed_parent_root { println!(" \x1b[31m✗ FAIL: Parent root mismatch\x1b[0m"); @@ -355,7 +397,7 @@ impl TestRunner { println!(" Got: {:?}\n", computed_parent_root); return Err(format!("Block {} parent_root mismatch", idx + 1).into()); } - + // Check if block is empty (no attestations) let attestation_count = { let mut count = 0u64; @@ -367,17 +409,17 @@ impl TestRunner { } count }; - + // Process the full block (header + operations) let result = state_after_slots.process_block(block); match result { Ok(new_state) => { state = new_state; - + // Compute the state root after processing let computed_state_root = hash_tree_root(&state); - + // Verify the computed state_root matches the expected one from the block if block.state_root != computed_state_root { println!(" \x1b[31m✗ FAIL: State root mismatch\x1b[0m"); @@ -385,7 +427,7 @@ impl TestRunner { println!(" Got: {:?}\n", computed_state_root); return Err(format!("Block {} state_root mismatch", idx + 1).into()); } - + if attestation_count > 0 { println!(" ✓ Processed with {} attestation(s)", attestation_count); } else { @@ -399,13 +441,13 @@ impl TestRunner { } } } - + // Verify post-state conditions Self::verify_post_state(&state, &test_case)?; - + println!("\n\x1b[32m✓ PASS\x1b[0m\n"); } - + Ok(()) } @@ -413,50 +455,64 @@ impl TestRunner { /// Handles test vectors from test_genesis directory pub fn run_genesis_test>(path: P) -> Result<(), Box> { let json_content = fs::read_to_string(path.as_ref())?; - + // Parse using the TestVectorFile structure let test_file: TestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("\n{}: {}", test_name, test_case.info.description); let state = &test_case.pre; - + let num_validators = state.validators.len_u64(); - println!(" Genesis time: {}, slot: {}, validators: {}", state.config.genesis_time, state.slot.0, num_validators); - + println!( + " Genesis time: {}, slot: {}, validators: {}", + state.config.genesis_time, state.slot.0, num_validators + ); + // Verify it's at genesis (slot 0) if state.slot.0 != 0 { return Err(format!("Expected genesis at slot 0, got slot {}", state.slot.0).into()); } - + // Verify checkpoint initialization if state.latest_justified.slot.0 != 0 { - return Err(format!("Expected latest_justified at slot 0, got {}", state.latest_justified.slot.0).into()); + return Err(format!( + "Expected latest_justified at slot 0, got {}", + state.latest_justified.slot.0 + ) + .into()); } - + if state.latest_finalized.slot.0 != 0 { - return Err(format!("Expected latest_finalized at slot 0, got {}", state.latest_finalized.slot.0).into()); + return Err(format!( + "Expected latest_finalized at slot 0, got {}", + state.latest_finalized.slot.0 + ) + .into()); } - + // Verify empty historical data let has_history = state.historical_block_hashes.get(0).is_ok(); if has_history { return Err("Expected empty historical block hashes at genesis".into()); } - + println!(" ✓ Genesis state validated"); - + // Verify post-state if present if test_case.post.is_some() { Self::verify_post_state(state, &test_case)?; } - + println!("\n\x1b[32m✓ PASS\x1b[0m\n"); - + Ok(()) } @@ -473,10 +529,10 @@ impl TestRunner { for (idx, block) in blocks.iter().enumerate() { println!(" Block {}: slot {}", idx + 1, block.slot.0); - + // Advance state to the block's slot let state_after_slots = state.process_slots(block.slot)?; - + // Try to process the full block (header + body) - we expect this to fail let result = state_after_slots.process_block(block); @@ -484,14 +540,16 @@ impl TestRunner { Ok(new_state) => { // Block processing succeeded, now validate state root let computed_state_root = hash_tree_root(&new_state); - + if block.state_root != computed_state_root { error_occurred = true; println!(" ✓ Correctly rejected: Invalid block state root"); break; // Stop at first error } else { println!(" \x1b[31m✗ FAIL: Block processed successfully - but should have failed!\x1b[0m\n"); - return Err("Expected block processing to fail, but it succeeded".into()); + return Err( + "Expected block processing to fail, but it succeeded".into() + ); } } Err(e) => { @@ -501,91 +559,110 @@ impl TestRunner { } } } - + if !error_occurred { return Err("Expected an exception but all blocks processed successfully".into()); } } - + Ok(()) } /// Helper: Verify genesis state only (no blocks) fn verify_genesis_state(test_case: TestCase) -> Result<(), Box> { let state = &test_case.pre; - + // Verify post-state if present Self::verify_post_state(state, &test_case)?; - + Ok(()) } /// Helper: Verify post-state conditions - fn verify_post_state(state: &State, test_case: &TestCase) -> Result<(), Box> { + fn verify_post_state( + state: &State, + test_case: &TestCase, + ) -> Result<(), Box> { if let Some(ref post) = test_case.post { // Verify slot if state.slot != post.slot { return Err(format!( "Post-state slot mismatch: expected {:?}, got {:?}", post.slot, state.slot - ).into()); + ) + .into()); } - + // Verify validator count if specified if let Some(expected_count) = post.validator_count { let num_validators = state.validators.len_u64(); - + if num_validators as usize != expected_count { return Err(format!( "Post-state validator count mismatch: expected {}, got {}", expected_count, num_validators - ).into()); + ) + .into()); } - println!(" ✓ Post-state verified: slot {}, {} validators", state.slot.0, num_validators); + println!( + " ✓ Post-state verified: slot {}, {} validators", + state.slot.0, num_validators + ); } else { println!(" ✓ Post-state verified: slot {}", state.slot.0); } } - + Ok(()) } /// Test runner for verify_signatures test vectors /// Tests XMSS signature verification on SignedBlockWithAttestation #[cfg(feature = "devnet1")] - pub fn run_verify_signatures_test>(path: P) -> Result<(), Box> { + pub fn run_verify_signatures_test>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path.as_ref())?; - + // Parse using the VerifySignaturesTestVectorFile structure let test_file: VerifySignaturesTestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("\n{}: {}", test_name, test_case.info.description); - + let anchor_state = test_case.anchor_state; let signed_block = test_case.signed_block_with_attestation; - + // Print some debug info about what we're verifying println!(" Block slot: {}", signed_block.message.block.slot.0); - println!(" Proposer index: {}", signed_block.message.block.proposer_index.0); - + println!( + " Proposer index: {}", + signed_block.message.block.proposer_index.0 + ); + let attestation_count = signed_block.message.block.body.attestations.len_u64(); println!(" Attestations in block: {}", attestation_count); - println!(" Proposer attestation validator: {}", signed_block.message.proposer_attestation.validator_id.0); - + println!( + " Proposer attestation validator: {}", + signed_block.message.proposer_attestation.validator_id.0 + ); + let signature_count = signed_block.signature.len_u64(); println!(" Signatures: {}", signature_count); - + // Check if we expect this test to fail if let Some(ref exception) = test_case.expect_exception { println!(" Expecting exception: {}", exception); - + // Verify signatures - we expect this to fail (return false) let result = signed_block.verify_signatures(anchor_state); - + if result { println!(" \x1b[31m✗ FAIL: Signatures verified successfully but should have failed!\x1b[0m\n"); return Err("Expected signature verification to fail, but it succeeded".into()); @@ -596,7 +673,7 @@ impl TestRunner { } else { // Valid test case - signatures should verify successfully let result = signed_block.verify_signatures(anchor_state); - + if result { println!(" ✓ All signatures verified successfully"); println!("\n\x1b[32m✓ PASS\x1b[0m\n"); @@ -605,8 +682,7 @@ impl TestRunner { return Err("Signature verification failed".into()); } } - + Ok(()) } - } diff --git a/lean_client/containers/tests/test_vectors/verify_signatures.rs b/lean_client/containers/tests/test_vectors/verify_signatures.rs index cfc3301..13692f7 100644 --- a/lean_client/containers/tests/test_vectors/verify_signatures.rs +++ b/lean_client/containers/tests/test_vectors/verify_signatures.rs @@ -18,8 +18,7 @@ use super::runner::TestRunner; #[cfg(feature = "devnet1")] fn test_proposer_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_valid_signatures/test_proposer_signature.json"; - TestRunner::run_verify_signatures_test(test_path) - .expect("test_proposer_signature failed"); + TestRunner::run_verify_signatures_test(test_path).expect("test_proposer_signature failed"); } #[test] @@ -40,8 +39,7 @@ fn test_proposer_and_attester_signatures() { #[ignore = "Requires xmss-verify feature for actual signature validation. Run with: cargo test --features xmss-verify"] fn test_invalid_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_invalid_signatures/test_invalid_signature.json"; - TestRunner::run_verify_signatures_test(test_path) - .expect("test_invalid_signature failed"); + TestRunner::run_verify_signatures_test(test_path).expect("test_invalid_signature failed"); } #[test] diff --git a/lean_client/containers/tests/unit_tests/attestation_aggregation.rs b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs index 285aa46..72d48b4 100644 --- a/lean_client/containers/tests/unit_tests/attestation_aggregation.rs +++ b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs @@ -1,10 +1,12 @@ #[cfg(feature = "devnet2")] #[cfg(test)] mod tests { - use containers::attestation::{AggregatedAttestation, AggregationBits, Attestation, AttestationData}; - use containers::{Bytes32, Uint64}; + use containers::attestation::{ + AggregatedAttestation, AggregationBits, Attestation, AttestationData, + }; use containers::checkpoint::Checkpoint; use containers::slot::Slot; + use containers::{Bytes32, Uint64}; #[test] fn test_aggregated_attestation_structure() { @@ -21,17 +23,22 @@ mod tests { source: Checkpoint { root: Bytes32::default(), slot: Slot(2), - } + }, }; let bits = AggregationBits::from_validator_indices(&vec![2, 7]); let agg = AggregatedAttestation { aggregation_bits: bits.clone(), - data: att_data.clone() + data: att_data.clone(), }; let indices = agg.aggregation_bits.to_validator_indices(); - assert_eq!(indices.into_iter().collect::>(), vec![2, 7].into_iter().collect()); + assert_eq!( + indices + .into_iter() + .collect::>(), + vec![2, 7].into_iter().collect() + ); assert_eq!(agg.data, att_data); } @@ -50,7 +57,7 @@ mod tests { source: Checkpoint { root: Bytes32::default(), slot: Slot(2), - } + }, }; let att_data2 = AttestationData { slot: Slot(6), @@ -65,7 +72,7 @@ mod tests { source: Checkpoint { root: Bytes32::default(), slot: Slot(3), - } + }, }; let attestations = vec![ @@ -88,7 +95,12 @@ mod tests { let agg1 = aggregated.iter().find(|agg| agg.data == att_data1).unwrap(); let validator_ids1 = agg1.aggregation_bits.to_validator_indices(); - assert_eq!(validator_ids1.into_iter().collect::>(), vec![1, 3].into_iter().collect()); + assert_eq!( + validator_ids1 + .into_iter() + .collect::>(), + vec![1, 3].into_iter().collect() + ); let agg2 = aggregated.iter().find(|agg| agg.data == att_data2).unwrap(); let validator_ids2 = agg2.aggregation_bits.to_validator_indices(); @@ -116,7 +128,7 @@ mod tests { source: Checkpoint { root: Bytes32::default(), slot: Slot(2), - } + }, }; let attestations = vec![Attestation { diff --git a/lean_client/containers/tests/unit_tests/common.rs b/lean_client/containers/tests/unit_tests/common.rs index 26fa0a5..1a648b8 100644 --- a/lean_client/containers/tests/unit_tests/common.rs +++ b/lean_client/containers/tests/unit_tests/common.rs @@ -1,7 +1,15 @@ -use containers::{Attestation, Attestations, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators, AggregatedAttestation, Signature}; -use ssz::{PersistentList}; -use typenum::U4096; use containers::block::BlockSignatures; +use containers::{ + block::{hash_tree_root, Block, BlockBody, BlockHeader}, + checkpoint::Checkpoint, + slot::Slot, + state::State, + types::{Bytes32, ValidatorIndex}, + AggregatedAttestation, Attestation, Attestations, BlockWithAttestation, Config, Signature, + SignedBlockWithAttestation, Validators, +}; +use ssz::PersistentList; +use typenum::U4096; pub const DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const TEST_VALIDATOR_COUNT: usize = 4; // Actual validator count used in tests @@ -10,7 +18,11 @@ pub const TEST_VALIDATOR_COUNT: usize = 4; // Actual validator count used in tes const _: [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT] = [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT]; -pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Option) -> SignedBlockWithAttestation { +pub fn create_block( + slot: u64, + parent_header: &mut BlockHeader, + attestations: Option, +) -> SignedBlockWithAttestation { #[cfg(feature = "devnet1")] let body = BlockBody { attestations: attestations.unwrap_or_else(PersistentList::default), @@ -19,23 +31,26 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op let body = BlockBody { attestations: { let attestations_vec = attestations.unwrap_or_default(); - + // Convert PersistentList into a Vec - let attestations_vec: Vec = attestations_vec.into_iter().cloned().collect(); + let attestations_vec: Vec = + attestations_vec.into_iter().cloned().collect(); let aggregated: Vec = AggregatedAttestation::aggregate_by_data(&attestations_vec); - let aggregated: Vec = AggregatedAttestation::aggregate_by_data(&attestations_vec); // Create a new empty PersistentList - let mut persistent_list: PersistentList = PersistentList::default(); + let mut persistent_list: PersistentList = + PersistentList::default(); // Push each aggregated attestation for agg in aggregated { - persistent_list.push(agg).expect("PersistentList capacity exceeded"); + persistent_list + .push(agg) + .expect("PersistentList capacity exceeded"); } persistent_list @@ -43,7 +58,6 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op // other BlockBody fields... }; - let block_message = Block { slot: Slot(slot), proposer_index: ValidatorIndex(slot % 10), @@ -70,11 +84,10 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op signature: BlockSignatures { attestation_signatures: PersistentList::default(), proposer_signature: Signature::default(), - } + }, }; - - return_value + return_value } pub fn create_attestations(indices: &[usize]) -> Vec { @@ -109,8 +122,11 @@ pub fn base_state(config: Config) -> State { } pub fn base_state_with_validators(config: Config, num_validators: usize) -> State { - use containers::{HistoricalBlockHashes, JustificationRoots, JustifiedSlots, JustificationsValidators, validator::Validator, Uint64}; - + use containers::{ + validator::Validator, HistoricalBlockHashes, JustificationRoots, JustificationsValidators, + JustifiedSlots, Uint64, + }; + // Create validators list with the specified number of validators let mut validators = Validators::default(); for i in 0..num_validators { @@ -120,7 +136,7 @@ pub fn base_state_with_validators(config: Config, num_validators: usize) -> Stat }; validators.push(validator).expect("within limit"); } - + State { config, slot: Slot(0), @@ -136,7 +152,5 @@ pub fn base_state_with_validators(config: Config, num_validators: usize) -> Stat } pub fn sample_config() -> Config { - Config { - genesis_time: 0, - } -} \ No newline at end of file + Config { genesis_time: 0 } +} diff --git a/lean_client/containers/tests/unit_tests/mod.rs b/lean_client/containers/tests/unit_tests/mod.rs index b9f442f..1bef390 100644 --- a/lean_client/containers/tests/unit_tests/mod.rs +++ b/lean_client/containers/tests/unit_tests/mod.rs @@ -1,7 +1,7 @@ // tests/unit_tests/mod.rs +mod attestation_aggregation; mod common; mod state_basic; mod state_justifications; mod state_process; mod state_transition; -mod attestation_aggregation; diff --git a/lean_client/containers/tests/unit_tests/state_basic.rs b/lean_client/containers/tests/unit_tests/state_basic.rs index 5fa16e1..085384a 100644 --- a/lean_client/containers/tests/unit_tests/state_basic.rs +++ b/lean_client/containers/tests/unit_tests/state_basic.rs @@ -1,5 +1,10 @@ // tests/state_basic.rs -use containers::{block::{BlockBody, hash_tree_root}, state::State, types::Uint64, ValidatorIndex}; +use containers::{ + block::{hash_tree_root, BlockBody}, + state::State, + types::Uint64, + ValidatorIndex, +}; use pretty_assertions::assert_eq; #[path = "common.rs"] @@ -14,8 +19,13 @@ fn test_generate_genesis() { assert_eq!(state.config, config); assert_eq!(state.slot.0, 0); - let empty_body = BlockBody { attestations: ssz::PersistentList::default() }; - assert_eq!(state.latest_block_header.body_root, hash_tree_root(&empty_body)); + let empty_body = BlockBody { + attestations: ssz::PersistentList::default(), + }; + assert_eq!( + state.latest_block_header.body_root, + hash_tree_root(&empty_body) + ); // Check that collections are empty by trying to get the first element assert!(state.historical_block_hashes.get(0).is_err()); @@ -41,7 +51,9 @@ fn test_slot_justifiability_rules() { #[test] fn test_hash_tree_root() { - let body = BlockBody { attestations: ssz::PersistentList::default() }; + let body = BlockBody { + attestations: ssz::PersistentList::default(), + }; let block = containers::block::Block { slot: containers::slot::Slot(1), proposer_index: ValidatorIndex(0), @@ -52,4 +64,4 @@ fn test_hash_tree_root() { let root = hash_tree_root(&block); assert_ne!(root, containers::types::Bytes32(ssz::H256::zero())); -} \ No newline at end of file +} diff --git a/lean_client/containers/tests/unit_tests/state_justifications.rs b/lean_client/containers/tests/unit_tests/state_justifications.rs index 9a7b0cc..afdd220 100644 --- a/lean_client/containers/tests/unit_tests/state_justifications.rs +++ b/lean_client/containers/tests/unit_tests/state_justifications.rs @@ -1,18 +1,12 @@ // tests/state_justifications.rs -use containers::{ - state::State, - types::Bytes32, - Config -}; +use containers::{state::State, types::Bytes32, Config}; use pretty_assertions::assert_eq; use rstest::{fixture, rstest}; use ssz::PersistentList as List; #[path = "common.rs"] mod common; -use common::{ - base_state, create_attestations, sample_config, TEST_VALIDATOR_COUNT, -}; +use common::{base_state, create_attestations, sample_config, TEST_VALIDATOR_COUNT}; #[fixture] fn config() -> Config { @@ -49,7 +43,7 @@ fn test_get_justifications_single_root() { let mut roots_list = List::default(); roots_list.push(root1).unwrap(); state.justifications_roots = roots_list; - + // Convert Vec to BitList let mut bitlist = ssz::BitList::with_length(TEST_VALIDATOR_COUNT); for (i, &val) in votes1.iter().enumerate() { @@ -88,7 +82,7 @@ fn test_get_justifications_multiple_roots() { roots_list.push(root2).unwrap(); roots_list.push(root3).unwrap(); state.justifications_roots = roots_list; - + // Convert Vec to BitList let mut bitlist = ssz::BitList::with_length(all_votes.len()); for (i, &val) in all_votes.iter().enumerate() { @@ -113,16 +107,20 @@ fn test_with_justifications_empty() { let mut initial_state = base_state(config.clone()); let mut roots_list = List::default(); - roots_list.push(Bytes32(ssz::H256::from_slice(&[1u8;32]))).unwrap(); + roots_list + .push(Bytes32(ssz::H256::from_slice(&[1u8; 32]))) + .unwrap(); initial_state.justifications_roots = roots_list; - + let mut bitlist = ssz::BitList::with_length(TEST_VALIDATOR_COUNT); for i in 0..TEST_VALIDATOR_COUNT { bitlist.set(i, true); } initial_state.justifications_validators = bitlist; - let new_state = initial_state.clone().with_justifications(std::collections::BTreeMap::new()); + let new_state = initial_state + .clone() + .with_justifications(std::collections::BTreeMap::new()); assert!(new_state.justifications_roots.get(0).is_err()); assert!(new_state.justifications_validators.get(0).is_none()); @@ -149,11 +147,15 @@ fn test_with_justifications_deterministic_order() { // Expected roots in sorted order (root1 < root2) assert_eq!(new_state.justifications_roots.get(0).ok(), Some(&root1)); assert_eq!(new_state.justifications_roots.get(1).ok(), Some(&root2)); - + // Verify the bitlist contains the concatenated votes let expected_validators = [votes1, votes2].concat(); for (i, &expected_val) in expected_validators.iter().enumerate() { - let actual_val = new_state.justifications_validators.get(i).map(|b| *b).unwrap_or(false); + let actual_val = new_state + .justifications_validators + .get(i) + .map(|b| *b) + .unwrap_or(false); assert_eq!(actual_val, expected_val); } } diff --git a/lean_client/containers/tests/unit_tests/state_process.rs b/lean_client/containers/tests/unit_tests/state_process.rs index afc1887..5df98cf 100644 --- a/lean_client/containers/tests/unit_tests/state_process.rs +++ b/lean_client/containers/tests/unit_tests/state_process.rs @@ -1,6 +1,6 @@ // tests/state_process.rs use containers::{ - block::{Block, BlockBody, hash_tree_root}, + block::{hash_tree_root, Block, BlockBody}, checkpoint::Checkpoint, slot::Slot, state::State, @@ -26,15 +26,24 @@ pub fn genesis_state() -> State { fn test_process_slot() { let genesis_state = genesis_state(); - assert_eq!(genesis_state.latest_block_header.state_root, Bytes32(ssz::H256::zero())); + assert_eq!( + genesis_state.latest_block_header.state_root, + Bytes32(ssz::H256::zero()) + ); let state_after_slot = genesis_state.process_slot(); let expected_root = hash_tree_root(&genesis_state); - assert_eq!(state_after_slot.latest_block_header.state_root, expected_root); + assert_eq!( + state_after_slot.latest_block_header.state_root, + expected_root + ); let state_after_second_slot = state_after_slot.process_slot(); - assert_eq!(state_after_second_slot.latest_block_header.state_root, expected_root); + assert_eq!( + state_after_second_slot.latest_block_header.state_root, + expected_root + ); } #[test] @@ -45,7 +54,10 @@ fn test_process_slots() { let new_state = genesis_state.process_slots(target_slot).unwrap(); assert_eq!(new_state.slot, target_slot); - assert_eq!(new_state.latest_block_header.state_root, hash_tree_root(&genesis_state)); + assert_eq!( + new_state.latest_block_header.state_root, + hash_tree_root(&genesis_state) + ); } #[test] @@ -68,11 +80,21 @@ fn test_process_block_header_valid() { assert_eq!(new_state.latest_finalized.root, genesis_header_root); assert_eq!(new_state.latest_justified.root, genesis_header_root); - assert_eq!(new_state.historical_block_hashes.get(0).ok(), Some(&genesis_header_root)); - let justified_slot_0 = new_state.justified_slots.get(0).map(|b| *b).unwrap_or(false); + assert_eq!( + new_state.historical_block_hashes.get(0).ok(), + Some(&genesis_header_root) + ); + let justified_slot_0 = new_state + .justified_slots + .get(0) + .map(|b| *b) + .unwrap_or(false); assert_eq!(justified_slot_0, true); assert_eq!(new_state.latest_block_header.slot, Slot(1)); - assert_eq!(new_state.latest_block_header.state_root, Bytes32(ssz::H256::zero())); + assert_eq!( + new_state.latest_block_header.state_root, + Bytes32(ssz::H256::zero()) + ); } #[rstest] @@ -95,7 +117,9 @@ fn test_process_block_header_invalid( proposer_index: ValidatorIndex(bad_proposer), parent_root: bad_parent_root.unwrap_or(parent_root), state_root: Bytes32(ssz::H256::zero()), - body: BlockBody { attestations: List::default() }, + body: BlockBody { + attestations: List::default(), + }, }; let result = state_at_slot_1.process_block_header(&block); @@ -115,13 +139,17 @@ fn test_process_attestations_justification_and_finalization() { let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); let block1 = create_block(1, &mut state_at_slot_1.latest_block_header, None); // Use process_block_header and process_operations separately to avoid state root validation - let state_after_header1 = state_at_slot_1.process_block_header(&block1.message.block).unwrap(); + let state_after_header1 = state_at_slot_1 + .process_block_header(&block1.message.block) + .unwrap(); state = state_after_header1.process_attestations(&block1.message.block.body.attestations); // Process slot 4 and block let mut state_at_slot_4 = state.process_slots(Slot(4)).unwrap(); let block4 = create_block(4, &mut state_at_slot_4.latest_block_header, None); - let state_after_header4 = state_at_slot_4.process_block_header(&block4.message.block).unwrap(); + let state_after_header4 = state_at_slot_4 + .process_block_header(&block4.message.block) + .unwrap(); state = state_after_header4.process_attestations(&block4.message.block.body.attestations); // Advance to slot 5 @@ -151,15 +179,21 @@ fn test_process_attestations_justification_and_finalization() { // Convert Vec to PersistentList let mut attestations_list: List<_, U4096> = List::default(); - for a in attestations_for_4 { - attestations_list.push(a).unwrap(); + for a in attestations_for_4 { + attestations_list.push(a).unwrap(); } let new_state = state.process_attestations(&attestations_list); assert_eq!(new_state.latest_justified, checkpoint4); - let justified_slot_4 = new_state.justified_slots.get(4).map(|b| *b).unwrap_or(false); + let justified_slot_4 = new_state + .justified_slots + .get(4) + .map(|b| *b) + .unwrap_or(false); assert_eq!(justified_slot_4, true); assert_eq!(new_state.latest_finalized, genesis_checkpoint); - assert!(!new_state.get_justifications().contains_key(&checkpoint4.root)); -} \ No newline at end of file + assert!(!new_state + .get_justifications() + .contains_key(&checkpoint4.root)); +} diff --git a/lean_client/containers/tests/unit_tests/state_transition.rs b/lean_client/containers/tests/unit_tests/state_transition.rs index 9fe6abb..7725210 100644 --- a/lean_client/containers/tests/unit_tests/state_transition.rs +++ b/lean_client/containers/tests/unit_tests/state_transition.rs @@ -167,7 +167,7 @@ fn test_state_transition_devnet2() { } state_after_header.process_attestations(&unaggregated_attestations) }; - + // Ensure the state root matches the expected state let block_with_correct_root = Block { state_root: hash_tree_root(&expected_state), diff --git a/lean_client/env-config/src/lib.rs b/lean_client/env-config/src/lib.rs index 972005d..109ac2d 100644 --- a/lean_client/env-config/src/lib.rs +++ b/lean_client/env-config/src/lib.rs @@ -1 +1 @@ -// Empty on purpose \ No newline at end of file +// Empty on purpose diff --git a/lean_client/fork_choice/src/handlers.rs b/lean_client/fork_choice/src/handlers.rs index fa9aa89..9f3837d 100644 --- a/lean_client/fork_choice/src/handlers.rs +++ b/lean_client/fork_choice/src/handlers.rs @@ -34,7 +34,6 @@ pub fn on_attestation( #[cfg(feature = "devnet1")] let target_slot = signed_attestation.message.data.target.slot; - #[cfg(feature = "devnet2")] let validator_id = ValidatorIndex(signed_attestation.validator_id); #[cfg(feature = "devnet2")] @@ -80,9 +79,7 @@ pub fn on_attestation( if store .latest_known_attestations .get(&validator_id) - .map_or(true, |existing| { - existing.message.slot < attestation_slot - }) + .map_or(true, |existing| existing.message.slot < attestation_slot) { store .latest_known_attestations @@ -114,14 +111,12 @@ pub fn on_attestation( .latest_new_attestations .insert(validator_id, signed_attestation); } - + #[cfg(feature = "devnet2")] if store .latest_new_attestations .get(&validator_id) - .map_or(true, |existing| { - existing.message.slot < attestation_slot - }) + .map_or(true, |existing| existing.message.slot < attestation_slot) { store .latest_new_attestations @@ -246,7 +241,8 @@ fn process_block_internal( .zip(attestation_signatures) { let validator_ids: Vec = aggregated_attestation - .aggregation_bits.0 + .aggregation_bits + .0 .iter() .enumerate() .filter(|(_, bit)| **bit) diff --git a/lean_client/fork_choice/src/store.rs b/lean_client/fork_choice/src/store.rs index 3296d06..8165443 100644 --- a/lean_client/fork_choice/src/store.rs +++ b/lean_client/fork_choice/src/store.rs @@ -1,7 +1,6 @@ use containers::{ - attestation::SignedAttestation, - block::SignedBlockWithAttestation, checkpoint::Checkpoint, config::Config, state::State, - Bytes32, Root, Slot, ValidatorIndex, + attestation::SignedAttestation, block::SignedBlockWithAttestation, checkpoint::Checkpoint, + config::Config, state::State, Bytes32, Root, Slot, ValidatorIndex, }; use ssz::SszHash; use std::collections::HashMap; @@ -185,7 +184,8 @@ pub fn update_safe_target(store: &mut Store) { let min_score = (n_validators * 2 + 2) / 3; let root = store.latest_justified.root; - store.safe_target = get_fork_choice_head(store, root, &store.latest_new_attestations, min_score); + store.safe_target = + get_fork_choice_head(store, root, &store.latest_new_attestations, min_score); } pub fn accept_new_attestations(store: &mut Store) { diff --git a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs index 4f8521f..bb32273 100644 --- a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs +++ b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs @@ -4,8 +4,11 @@ use fork_choice::{ }; use containers::{ - attestation::{Attestation, AttestationData, SignedAttestation, Signature}, - block::{hash_tree_root, Block, BlockBody, BlockHeader, BlockWithAttestation, SignedBlockWithAttestation}, + attestation::{Attestation, AttestationData, Signature, SignedAttestation}, + block::{ + hash_tree_root, Block, BlockBody, BlockHeader, BlockWithAttestation, + SignedBlockWithAttestation, + }, checkpoint::Checkpoint, config::Config, state::State, @@ -305,7 +308,9 @@ fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAtt } #[cfg(feature = "devnet1")] -fn convert_test_block(test_block_with_att: &TestBlockWithAttestation) -> SignedBlockWithAttestation { +fn convert_test_block( + test_block_with_att: &TestBlockWithAttestation, +) -> SignedBlockWithAttestation { let test_block = &test_block_with_att.block; let mut attestations = ssz::PersistentList::default(); @@ -419,7 +424,7 @@ fn verify_checks( Some(c) => c, None => return Ok(()), }; - + if let Some(expected_slot) = checks.head_slot { let actual_slot = store.blocks[&store.head].message.block.slot.0; if actual_slot != expected_slot { @@ -533,7 +538,8 @@ fn run_single_test(_test_name: &str, test: TestVector) -> Result<(), String> { // Advance time to the block's slot to ensure attestations are processable // SECONDS_PER_SLOT is 4 (not 12) - let block_time = store.config.genesis_time + (signed_block.message.block.slot.0 * 4); + let block_time = + store.config.genesis_time + (signed_block.message.block.slot.0 * 4); on_tick(&mut store, block_time, false); on_block(&mut store, signed_block)?; diff --git a/lean_client/fork_choice/tests/unit_tests.rs b/lean_client/fork_choice/tests/unit_tests.rs index b4490da..480ba3f 100644 --- a/lean_client/fork_choice/tests/unit_tests.rs +++ b/lean_client/fork_choice/tests/unit_tests.rs @@ -1,6 +1,6 @@ mod unit_tests { pub mod common; + pub mod fork_choice; pub mod time; pub mod votes; - pub mod fork_choice; } diff --git a/lean_client/fork_choice/tests/unit_tests/common.rs b/lean_client/fork_choice/tests/unit_tests/common.rs index 9539c34..dc1a762 100644 --- a/lean_client/fork_choice/tests/unit_tests/common.rs +++ b/lean_client/fork_choice/tests/unit_tests/common.rs @@ -1,4 +1,3 @@ -use fork_choice::store::{get_forkchoice_store, Store}; use containers::{ attestation::Attestation, block::{Block, BlockBody, BlockWithAttestation, SignedBlockWithAttestation}, @@ -7,19 +6,16 @@ use containers::{ validator::Validator, Bytes32, Slot, Uint64, ValidatorIndex, }; +use fork_choice::store::{get_forkchoice_store, Store}; use ssz::SszHash; pub fn create_test_store() -> Store { - let config = Config { - genesis_time: 1000, - }; - - let validators = vec![ - Validator::default(); 10 - ]; - + let config = Config { genesis_time: 1000 }; + + let validators = vec![Validator::default(); 10]; + let state = State::generate_genesis_with_validators(Uint64(1000), validators); - + let block = Block { slot: Slot(0), proposer_index: ValidatorIndex(0), @@ -27,7 +23,7 @@ pub fn create_test_store() -> Store { state_root: Bytes32(state.hash_tree_root()), body: BlockBody::default(), }; - + let block_with_attestation = BlockWithAttestation { block: block.clone(), proposer_attestation: Attestation::default(), diff --git a/lean_client/fork_choice/tests/unit_tests/fork_choice.rs b/lean_client/fork_choice/tests/unit_tests/fork_choice.rs index fc1e7e6..d2b3833 100644 --- a/lean_client/fork_choice/tests/unit_tests/fork_choice.rs +++ b/lean_client/fork_choice/tests/unit_tests/fork_choice.rs @@ -1,12 +1,12 @@ use super::common::create_test_store; -use fork_choice::store::{get_proposal_head, get_vote_target}; use containers::Slot; +use fork_choice::store::{get_proposal_head, get_vote_target}; #[test] fn test_get_proposal_head_basic() { let mut store = create_test_store(); let head = get_proposal_head(&mut store, Slot(0)); - + assert_eq!(head, store.head); } @@ -14,9 +14,9 @@ fn test_get_proposal_head_basic() { fn test_get_proposal_head_advances_time() { let mut store = create_test_store(); let initial_time = store.time; - + get_proposal_head(&mut store, Slot(5)); - + assert!(store.time >= initial_time); } diff --git a/lean_client/fork_choice/tests/unit_tests/time.rs b/lean_client/fork_choice/tests/unit_tests/time.rs index 03260c7..ff99491 100644 --- a/lean_client/fork_choice/tests/unit_tests/time.rs +++ b/lean_client/fork_choice/tests/unit_tests/time.rs @@ -1,7 +1,7 @@ use super::common::create_test_store; +use containers::{Slot, Uint64}; use fork_choice::handlers::on_tick; use fork_choice::store::{tick_interval, INTERVALS_PER_SLOT, SECONDS_PER_SLOT}; -use containers::{Slot, Uint64}; #[test] fn test_on_tick_basic() { @@ -31,7 +31,7 @@ fn test_on_tick_already_current() { let initial_time = store.time; let current_target = store.config.genesis_time + initial_time; - // Try to advance to current time + // Try to advance to current time on_tick(&mut store, current_target, true); // Should not change significantly @@ -86,7 +86,7 @@ fn test_tick_interval_sequence() { #[test] fn test_tick_interval_actions_by_phase() { let mut store = create_test_store(); - + // Reset store time to 0 relative to genesis for clean testing store.time = 0; @@ -101,11 +101,10 @@ fn test_tick_interval_actions_by_phase() { } } - #[test] fn test_slot_time_calculations() { let genesis_time = 1000; - + // Slot 0 let slot_0_time = genesis_time + (0 * SECONDS_PER_SLOT); assert_eq!(slot_0_time, genesis_time); diff --git a/lean_client/fork_choice/tests/unit_tests/votes.rs b/lean_client/fork_choice/tests/unit_tests/votes.rs index 4a1b688..d6c2ad4 100644 --- a/lean_client/fork_choice/tests/unit_tests/votes.rs +++ b/lean_client/fork_choice/tests/unit_tests/votes.rs @@ -1,22 +1,35 @@ use super::common::create_test_store; -use fork_choice::handlers::on_attestation; -use fork_choice::store::{accept_new_attestations, INTERVALS_PER_SLOT}; use containers::{ - attestation::{Attestation, AttestationData, SignedAttestation, Signature}, + attestation::{Attestation, AttestationData, Signature, SignedAttestation}, checkpoint::Checkpoint, Bytes32, Slot, Uint64, ValidatorIndex, }; +use fork_choice::handlers::on_attestation; +use fork_choice::store::{accept_new_attestations, INTERVALS_PER_SLOT}; #[cfg(feature = "devnet1")] -fn create_signed_attestation(validator_id: u64, slot: Slot, head_root: Bytes32) -> SignedAttestation { +fn create_signed_attestation( + validator_id: u64, + slot: Slot, + head_root: Bytes32, +) -> SignedAttestation { SignedAttestation { message: Attestation { validator_id: Uint64(validator_id), data: AttestationData { slot, - head: Checkpoint { root: head_root, slot }, - target: Checkpoint { root: head_root, slot }, - source: Checkpoint { root: Bytes32::default(), slot: Slot(0) }, + head: Checkpoint { + root: head_root, + slot, + }, + target: Checkpoint { + root: head_root, + slot, + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(0), + }, }, }, signature: Signature::default(), @@ -33,54 +46,59 @@ fn test_accept_new_attestations() { let val2 = ValidatorIndex(2); let val3 = ValidatorIndex(3); - store.latest_known_attestations.insert( - val1, - create_signed_attestation(1, Slot(0), store.head), - ); + store + .latest_known_attestations + .insert(val1, create_signed_attestation(1, Slot(0), store.head)); // Val1 updates their attestation to Slot 1 - store.latest_new_attestations.insert( - val1, - create_signed_attestation(1, Slot(1), store.head), - ); + store + .latest_new_attestations + .insert(val1, create_signed_attestation(1, Slot(1), store.head)); // Val2 casts a new attestation for Slot 1 - store.latest_new_attestations.insert( - val2, - create_signed_attestation(2, Slot(1), store.head), - ); + store + .latest_new_attestations + .insert(val2, create_signed_attestation(2, Slot(1), store.head)); // Val3 casts a new attestation for Slot 2 - store.latest_new_attestations.insert( - val3, - create_signed_attestation(3, Slot(2), store.head), - ); + store + .latest_new_attestations + .insert(val3, create_signed_attestation(3, Slot(2), store.head)); accept_new_attestations(&mut store); assert_eq!(store.latest_new_attestations.len(), 0); assert_eq!(store.latest_known_attestations.len(), 3); - assert_eq!(store.latest_known_attestations[&val1].message.data.slot, Slot(1)); - assert_eq!(store.latest_known_attestations[&val2].message.data.slot, Slot(1)); - assert_eq!(store.latest_known_attestations[&val3].message.data.slot, Slot(2)); + assert_eq!( + store.latest_known_attestations[&val1].message.data.slot, + Slot(1) + ); + assert_eq!( + store.latest_known_attestations[&val2].message.data.slot, + Slot(1) + ); + assert_eq!( + store.latest_known_attestations[&val3].message.data.slot, + Slot(2) + ); } #[test] #[cfg(feature = "devnet1")] fn test_accept_new_attestations_multiple() { let mut store = create_test_store(); - + for i in 0..5 { store.latest_new_attestations.insert( ValidatorIndex(i), create_signed_attestation(i, Slot(i), store.head), ); } - + assert_eq!(store.latest_new_attestations.len(), 5); assert_eq!(store.latest_known_attestations.len(), 0); - + accept_new_attestations(&mut store); - + assert_eq!(store.latest_new_attestations.len(), 0); assert_eq!(store.latest_known_attestations.len(), 5); } @@ -89,9 +107,9 @@ fn test_accept_new_attestations_multiple() { fn test_accept_new_attestations_empty() { let mut store = create_test_store(); let initial_known = store.latest_known_attestations.len(); - + accept_new_attestations(&mut store); - + assert_eq!(store.latest_new_attestations.len(), 0); assert_eq!(store.latest_known_attestations.len(), initial_known); } @@ -107,29 +125,55 @@ fn test_on_attestation_lifecycle() { // 1. Attestation from network (gossip) let signed_attestation_gossip = create_signed_attestation(1, slot_0, store.head); - on_attestation(&mut store, signed_attestation_gossip.clone(), false).expect("Gossip attestation valid"); - + on_attestation(&mut store, signed_attestation_gossip.clone(), false) + .expect("Gossip attestation valid"); + // Should be in new_attestations, not known_attestations assert!(store.latest_new_attestations.contains_key(&validator_idx)); assert!(!store.latest_known_attestations.contains_key(&validator_idx)); - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, slot_0); + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + slot_0 + ); // 2. Same attestation included in a block on_attestation(&mut store, signed_attestation_gossip, true).expect("Block attestation valid"); - + assert!(store.latest_known_attestations.contains_key(&validator_idx)); - assert_eq!(store.latest_known_attestations[&validator_idx].message.data.slot, slot_0); + assert_eq!( + store.latest_known_attestations[&validator_idx] + .message + .data + .slot, + slot_0 + ); // 3. Newer attestation from network store.time = 1 * INTERVALS_PER_SLOT; // Advance time let signed_attestation_next = create_signed_attestation(1, slot_1, store.head); - on_attestation(&mut store, signed_attestation_next, false).expect("Next gossip attestation valid"); + on_attestation(&mut store, signed_attestation_next, false) + .expect("Next gossip attestation valid"); // Should update new_attestations - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, slot_1); + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + slot_1 + ); // Known attestations should still be at slot 0 until accepted - assert_eq!(store.latest_known_attestations[&validator_idx].message.data.slot, slot_0); + assert_eq!( + store.latest_known_attestations[&validator_idx] + .message + .data + .slot, + slot_0 + ); } #[test] @@ -137,7 +181,7 @@ fn test_on_attestation_lifecycle() { fn test_on_attestation_future_slot() { let mut store = create_test_store(); let future_slot = Slot(100); // Far in the future - + let signed_attestation = create_signed_attestation(1, future_slot, store.head); let result = on_attestation(&mut store, signed_attestation, false); @@ -149,21 +193,33 @@ fn test_on_attestation_future_slot() { fn test_on_attestation_update_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); - + // First attestation at slot 0 let signed_attestation1 = create_signed_attestation(1, Slot(0), store.head); - + on_attestation(&mut store, signed_attestation1, false).expect("First attestation valid"); - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(0)); - + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(0) + ); + // Advance time to allow slot 1 store.time = 1 * INTERVALS_PER_SLOT; - + // Second attestation at slot 1 let signed_attestation2 = create_signed_attestation(1, Slot(1), store.head); - + on_attestation(&mut store, signed_attestation2, false).expect("Second attestation valid"); - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(1)); + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(1) + ); } #[test] @@ -171,22 +227,35 @@ fn test_on_attestation_update_vote() { fn test_on_attestation_ignore_old_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); - + // Advance time store.time = 2 * INTERVALS_PER_SLOT; - + // Newer attestation first let signed_attestation_new = create_signed_attestation(1, Slot(2), store.head); - + on_attestation(&mut store, signed_attestation_new, false).expect("New attestation valid"); - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(2)); - + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(2) + ); + // Older attestation second let signed_attestation_old = create_signed_attestation(1, Slot(1), store.head); - - on_attestation(&mut store, signed_attestation_old, false).expect("Old attestation processed but ignored"); + + on_attestation(&mut store, signed_attestation_old, false) + .expect("Old attestation processed but ignored"); // Should still be slot 2 - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(2)); + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(2) + ); } #[test] @@ -194,18 +263,18 @@ fn test_on_attestation_ignore_old_vote() { fn test_on_attestation_from_block_supersedes_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); - + // First, add attestation via gossip let signed_attestation1 = create_signed_attestation(1, Slot(0), store.head); on_attestation(&mut store, signed_attestation1, false).expect("Gossip attestation valid"); - + assert!(store.latest_new_attestations.contains_key(&validator_idx)); assert!(!store.latest_known_attestations.contains_key(&validator_idx)); - + // Then, add same attestation via block (on-chain) let signed_attestation2 = create_signed_attestation(1, Slot(0), store.head); on_attestation(&mut store, signed_attestation2, true).expect("Block attestation valid"); - + // Should move from new to known assert!(!store.latest_new_attestations.contains_key(&validator_idx)); assert!(store.latest_known_attestations.contains_key(&validator_idx)); @@ -216,19 +285,31 @@ fn test_on_attestation_from_block_supersedes_new() { fn test_on_attestation_newer_from_block_removes_older_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); - + // Add older attestation via gossip let signed_attestation_gossip = create_signed_attestation(1, Slot(0), store.head); on_attestation(&mut store, signed_attestation_gossip, false).expect("Gossip attestation valid"); - - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(0)); - + + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(0) + ); + // Add newer attestation via block (on-chain) store.time = 1 * INTERVALS_PER_SLOT; let signed_attestation_block = create_signed_attestation(1, Slot(1), store.head); on_attestation(&mut store, signed_attestation_block, true).expect("Block attestation valid"); - + // New attestation should be removed (superseded by newer on-chain one) assert!(!store.latest_new_attestations.contains_key(&validator_idx)); - assert_eq!(store.latest_known_attestations[&validator_idx].message.data.slot, Slot(1)); + assert_eq!( + store.latest_known_attestations[&validator_idx] + .message + .data + .slot, + Slot(1) + ); } diff --git a/lean_client/networking/src/gossipsub/config.rs b/lean_client/networking/src/gossipsub/config.rs index 05488c2..67061bc 100644 --- a/lean_client/networking/src/gossipsub/config.rs +++ b/lean_client/networking/src/gossipsub/config.rs @@ -15,9 +15,9 @@ impl GossipsubConfig { pub fn new() -> Self { let justification_lookback_slots: u64 = 3; let seconds_per_slot: u64 = 12; - + let seen_ttl_secs = seconds_per_slot * justification_lookback_slots * 2; - + let config = ConfigBuilder::default() // leanSpec: heartbeat_interval_secs = 0.7 .heartbeat_interval(Duration::from_millis(700)) diff --git a/lean_client/networking/src/gossipsub/message.rs b/lean_client/networking/src/gossipsub/message.rs index b578e75..4ac1ae1 100644 --- a/lean_client/networking/src/gossipsub/message.rs +++ b/lean_client/networking/src/gossipsub/message.rs @@ -1,8 +1,8 @@ use crate::gossipsub::topic::GossipsubKind; use crate::gossipsub::topic::GossipsubTopic; +use containers::SignedAttestation; use containers::SignedBlockWithAttestation; use containers::ssz::SszReadDefault; -use containers::{SignedAttestation}; use libp2p::gossipsub::TopicHash; pub enum GossipsubMessage { @@ -14,7 +14,8 @@ impl GossipsubMessage { pub fn decode(topic: &TopicHash, data: &[u8]) -> Result { match GossipsubTopic::decode(topic)?.kind { GossipsubKind::Block => Ok(Self::Block( - SignedBlockWithAttestation::from_ssz_default(data).map_err(|e| format!("{:?}", e))?, + SignedBlockWithAttestation::from_ssz_default(data) + .map_err(|e| format!("{:?}", e))?, )), GossipsubKind::Attestation => Ok(Self::Attestation( SignedAttestation::from_ssz_default(data).map_err(|e| format!("{:?}", e))?, diff --git a/lean_client/networking/src/gossipsub/tests/config.rs b/lean_client/networking/src/gossipsub/tests/config.rs index e788d81..4fa245d 100644 --- a/lean_client/networking/src/gossipsub/tests/config.rs +++ b/lean_client/networking/src/gossipsub/tests/config.rs @@ -1,5 +1,5 @@ use crate::gossipsub::config::GossipsubConfig; -use crate::gossipsub::topic::{get_topics, GossipsubKind}; +use crate::gossipsub::topic::{GossipsubKind, get_topics}; #[test] fn test_default_parameters() { @@ -24,8 +24,14 @@ fn test_default_parameters() { assert_eq!(config.config.gossip_lazy(), 6); // d_lazy = 6 assert_eq!(config.config.history_length(), 6); // mcache_len = 6 assert_eq!(config.config.history_gossip(), 3); // mcache_gossip = 3 - assert_eq!(config.config.fanout_ttl(), std::time::Duration::from_secs(60)); // fanout_ttl_secs = 60 - assert_eq!(config.config.heartbeat_interval(), std::time::Duration::from_millis(700)); // heartbeat_interval_secs = 0.7 + assert_eq!( + config.config.fanout_ttl(), + std::time::Duration::from_secs(60) + ); // fanout_ttl_secs = 60 + assert_eq!( + config.config.heartbeat_interval(), + std::time::Duration::from_millis(700) + ); // heartbeat_interval_secs = 0.7 assert!(config.topics.is_empty()); } diff --git a/lean_client/networking/src/gossipsub/tests/message.rs b/lean_client/networking/src/gossipsub/tests/message.rs index ce062be..9fd25dd 100644 --- a/lean_client/networking/src/gossipsub/tests/message.rs +++ b/lean_client/networking/src/gossipsub/tests/message.rs @@ -1,5 +1,7 @@ use crate::gossipsub::message::GossipsubMessage; -use crate::gossipsub::topic::{ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX}; +use crate::gossipsub::topic::{ + ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, +}; use libp2p::gossipsub::TopicHash; #[test] diff --git a/lean_client/networking/src/gossipsub/tests/message_id.rs b/lean_client/networking/src/gossipsub/tests/message_id.rs index 4eb3302..17a0b51 100644 --- a/lean_client/networking/src/gossipsub/tests/message_id.rs +++ b/lean_client/networking/src/gossipsub/tests/message_id.rs @@ -1,5 +1,7 @@ use crate::gossipsub::config::compute_message_id; -use crate::gossipsub::topic::{ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX}; +use crate::gossipsub::topic::{ + ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, +}; use crate::types::MESSAGE_DOMAIN_VALID_SNAPPY; use libp2p::gossipsub::{Message, TopicHash}; use sha2::{Digest, Sha256}; @@ -134,7 +136,7 @@ fn test_message_id_uses_valid_snappy_domain() { let topic_bytes = topic.as_bytes(); let topic_len = topic_bytes.len() as u64; - + let mut digest_input = Vec::new(); digest_input.extend_from_slice(MESSAGE_DOMAIN_VALID_SNAPPY); diff --git a/lean_client/networking/src/gossipsub/tests/mod.rs b/lean_client/networking/src/gossipsub/tests/mod.rs index 351a897..15f330a 100644 --- a/lean_client/networking/src/gossipsub/tests/mod.rs +++ b/lean_client/networking/src/gossipsub/tests/mod.rs @@ -1,4 +1,4 @@ mod config; -mod message_id; mod message; -mod topic; \ No newline at end of file +mod message_id; +mod topic; diff --git a/lean_client/networking/src/gossipsub/tests/topic.rs b/lean_client/networking/src/gossipsub/tests/topic.rs index cdd09df..7e3d70b 100644 --- a/lean_client/networking/src/gossipsub/tests/topic.rs +++ b/lean_client/networking/src/gossipsub/tests/topic.rs @@ -1,6 +1,6 @@ use crate::gossipsub::topic::{ - get_topics, GossipsubKind, GossipsubTopic, ATTESTATION_TOPIC, BLOCK_TOPIC, - SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, + ATTESTATION_TOPIC, BLOCK_TOPIC, GossipsubKind, GossipsubTopic, SSZ_SNAPPY_ENCODING_POSTFIX, + TOPIC_PREFIX, get_topics, }; use libp2p::gossipsub::TopicHash; diff --git a/lean_client/networking/src/gossipsub/topic.rs b/lean_client/networking/src/gossipsub/topic.rs index 9a4e7b5..09fcd33 100644 --- a/lean_client/networking/src/gossipsub/topic.rs +++ b/lean_client/networking/src/gossipsub/topic.rs @@ -45,9 +45,7 @@ impl GossipsubTopic { let parts: Vec<&str> = topic.as_str().trim_start_matches('/').split('/').collect(); if parts.len() != 4 { - return Err(format!( - "Invalid topic part count: {topic:?}" - )); + return Err(format!("Invalid topic part count: {topic:?}")); } Ok(parts) @@ -78,10 +76,7 @@ impl std::fmt::Display for GossipsubTopic { write!( f, "/{}/{}/{}/{}", - TOPIC_PREFIX, - self.fork, - self.kind, - SSZ_SNAPPY_ENCODING_POSTFIX + TOPIC_PREFIX, self.fork, self.kind, SSZ_SNAPPY_ENCODING_POSTFIX ) } } @@ -106,10 +101,7 @@ impl From for TopicHash { }; TopicHash::from_raw(format!( "/{}/{}/{}/{}", - TOPIC_PREFIX, - val.fork, - kind_str, - SSZ_SNAPPY_ENCODING_POSTFIX + TOPIC_PREFIX, val.fork, kind_str, SSZ_SNAPPY_ENCODING_POSTFIX )) } } diff --git a/lean_client/networking/src/network/mod.rs b/lean_client/networking/src/network/mod.rs index 7609d9a..1900ed8 100644 --- a/lean_client/networking/src/network/mod.rs +++ b/lean_client/networking/src/network/mod.rs @@ -2,4 +2,4 @@ mod behaviour; mod service; pub use behaviour::{LeanNetworkBehaviour, LeanNetworkBehaviourEvent}; -pub use service::{NetworkServiceConfig, NetworkEvent, NetworkService}; +pub use service::{NetworkEvent, NetworkService, NetworkServiceConfig}; diff --git a/lean_client/networking/src/network/service.rs b/lean_client/networking/src/network/service.rs index 93e749c..8ae5729 100644 --- a/lean_client/networking/src/network/service.rs +++ b/lean_client/networking/src/network/service.rs @@ -341,8 +341,8 @@ where } fn handle_request_response_event(&mut self, event: ReqRespMessage) -> Option { - use libp2p::request_response::{Event, Message}; use crate::req_resp::LeanResponse; + use libp2p::request_response::{Event, Message}; match event { Event::Message { peer, message, .. } => match message { @@ -360,16 +360,24 @@ where tokio::spawn(async move { for block in blocks { let slot = block.message.block.slot.0; - if let Err(e) = chain_sink.send( - ChainMessage::ProcessBlock { + if let Err(e) = chain_sink + .send(ChainMessage::ProcessBlock { signed_block_with_attestation: block, is_trusted: false, should_gossip: false, // Don't re-gossip requested blocks - } - ).await { - warn!(slot = slot, ?e, "Failed to send requested block to chain"); + }) + .await + { + warn!( + slot = slot, + ?e, + "Failed to send requested block to chain" + ); } else { - debug!(slot = slot, "Queued requested block for processing"); + debug!( + slot = slot, + "Queued requested block for processing" + ); } } }); @@ -382,7 +390,9 @@ where } } } - Message::Request { request, channel, .. } => { + Message::Request { + request, channel, .. + } => { use crate::req_resp::{LeanRequest, LeanResponse}; let response = match request { @@ -398,10 +408,12 @@ where } }; - if let Err(e) = self.swarm + if let Err(e) = self + .swarm .behaviour_mut() .req_resp - .send_response(channel, response) { + .send_response(channel, response) + { warn!(peer = %peer, ?e, "Failed to send response"); } } @@ -528,7 +540,7 @@ where let slot = signed_attestation.message.data.slot.0; #[cfg(feature = "devnet2")] let slot = signed_attestation.message.slot.0; - + match signed_attestation.to_ssz() { Ok(bytes) => { if let Err(err) = self.publish_to_topic(GossipsubKind::Attestation, bytes) { diff --git a/lean_client/networking/src/req_resp.rs b/lean_client/networking/src/req_resp.rs index 51d705e..bd6c414 100644 --- a/lean_client/networking/src/req_resp.rs +++ b/lean_client/networking/src/req_resp.rs @@ -2,8 +2,8 @@ use std::io; use std::io::{Read, Write}; use async_trait::async_trait; -use containers::{Bytes32, SignedBlockWithAttestation, Status}; use containers::ssz::{SszReadDefault, SszWrite}; +use containers::{Bytes32, SignedBlockWithAttestation, Status}; use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use libp2p::request_response::{ Behaviour as RequestResponse, Codec, Config, Event, ProtocolSupport, @@ -46,8 +46,9 @@ impl LeanCodec { fn compress(data: &[u8]) -> io::Result> { let mut encoder = FrameEncoder::new(Vec::new()); encoder.write_all(data)?; - encoder.into_inner() - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("Snappy framing failed: {e}"))) + encoder.into_inner().map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("Snappy framing failed: {e}")) + }) } /// Decompress data using Snappy framing format (required for req/resp protocol) @@ -60,8 +61,9 @@ impl LeanCodec { fn encode_request(request: &LeanRequest) -> io::Result> { let ssz_bytes = match request { - LeanRequest::Status(status) => status.to_ssz() - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")))?, + LeanRequest::Status(status) => status.to_ssz().map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")) + })?, LeanRequest::BlocksByRoot(roots) => { let mut bytes = Vec::new(); for root in roots { @@ -77,12 +79,16 @@ impl LeanCodec { if data.is_empty() { return Ok(LeanRequest::Status(Status::default())); } - + let ssz_bytes = Self::decompress(data)?; - + if protocol.contains("status") { - let status = Status::from_ssz_default(&ssz_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ decode Status failed: {e:?}")))?; + let status = Status::from_ssz_default(&ssz_bytes).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("SSZ decode Status failed: {e:?}"), + ) + })?; Ok(LeanRequest::Status(status)) } else if protocol.contains("blocks_by_root") { let mut roots = Vec::new(); @@ -96,35 +102,44 @@ impl LeanCodec { if roots.len() > MAX_REQUEST_BLOCKS { return Err(io::Error::new( io::ErrorKind::InvalidData, - format!("Too many block roots requested: {} > {}", roots.len(), MAX_REQUEST_BLOCKS), + format!( + "Too many block roots requested: {} > {}", + roots.len(), + MAX_REQUEST_BLOCKS + ), )); } Ok(LeanRequest::BlocksByRoot(roots)) } else { - Err(io::Error::new(io::ErrorKind::Other, format!("Unknown protocol: {protocol}"))) + Err(io::Error::new( + io::ErrorKind::Other, + format!("Unknown protocol: {protocol}"), + )) } } fn encode_response(response: &LeanResponse) -> io::Result> { let ssz_bytes = match response { - LeanResponse::Status(status) => status.to_ssz() - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")))?, + LeanResponse::Status(status) => status.to_ssz().map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")) + })?, LeanResponse::BlocksByRoot(blocks) => { let mut bytes = Vec::new(); for block in blocks { - let block_bytes = block.to_ssz() - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")))?; + let block_bytes = block.to_ssz().map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")) + })?; bytes.extend_from_slice(&block_bytes); } bytes } LeanResponse::Empty => Vec::new(), }; - + if ssz_bytes.is_empty() { return Ok(Vec::new()); } - + Self::compress(&ssz_bytes) } @@ -132,22 +147,33 @@ impl LeanCodec { if data.is_empty() { return Ok(LeanResponse::Empty); } - + let ssz_bytes = Self::decompress(data)?; - + if protocol.contains("status") { - let status = Status::from_ssz_default(&ssz_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ decode Status failed: {e:?}")))?; + let status = Status::from_ssz_default(&ssz_bytes).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("SSZ decode Status failed: {e:?}"), + ) + })?; Ok(LeanResponse::Status(status)) } else if protocol.contains("blocks_by_root") { if ssz_bytes.is_empty() { return Ok(LeanResponse::BlocksByRoot(Vec::new())); } - let block = SignedBlockWithAttestation::from_ssz_default(&ssz_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ decode Block failed: {e:?}")))?; + let block = SignedBlockWithAttestation::from_ssz_default(&ssz_bytes).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("SSZ decode Block failed: {e:?}"), + ) + })?; Ok(LeanResponse::BlocksByRoot(vec![block])) } else { - Err(io::Error::new(io::ErrorKind::Other, format!("Unknown protocol: {protocol}"))) + Err(io::Error::new( + io::ErrorKind::Other, + format!("Unknown protocol: {protocol}"), + )) } } } diff --git a/lean_client/networking/src/types.rs b/lean_client/networking/src/types.rs index 028a883..bbe7cba 100644 --- a/lean_client/networking/src/types.rs +++ b/lean_client/networking/src/types.rs @@ -70,7 +70,9 @@ pub enum ChainMessage { } impl ChainMessage { - pub fn block_with_attestation(signed_block_with_attestation: SignedBlockWithAttestation) -> Self { + pub fn block_with_attestation( + signed_block_with_attestation: SignedBlockWithAttestation, + ) -> Self { ChainMessage::ProcessBlock { signed_block_with_attestation, is_trusted: false, @@ -90,16 +92,35 @@ impl ChainMessage { impl Display for ChainMessage { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ChainMessage::ProcessBlock { signed_block_with_attestation, .. } => { - write!(f, "ProcessBlockWithAttestation(slot={})", signed_block_with_attestation.message.block.slot.0) + ChainMessage::ProcessBlock { + signed_block_with_attestation, + .. + } => { + write!( + f, + "ProcessBlockWithAttestation(slot={})", + signed_block_with_attestation.message.block.slot.0 + ) } #[cfg(feature = "devnet1")] - ChainMessage::ProcessAttestation { signed_attestation, .. } => { - write!(f, "ProcessAttestation(slot={})", signed_attestation.message.data.slot.0) + ChainMessage::ProcessAttestation { + signed_attestation, .. + } => { + write!( + f, + "ProcessAttestation(slot={})", + signed_attestation.message.data.slot.0 + ) } #[cfg(feature = "devnet2")] - ChainMessage::ProcessAttestation { signed_attestation, .. } => { - write!(f, "ProcessAttestation(slot={})", signed_attestation.message.slot.0) + ChainMessage::ProcessAttestation { + signed_attestation, .. + } => { + write!( + f, + "ProcessAttestation(slot={})", + signed_attestation.message.slot.0 + ) } } } diff --git a/lean_client/validator/src/keys.rs b/lean_client/validator/src/keys.rs index cae38f1..7680102 100644 --- a/lean_client/validator/src/keys.rs +++ b/lean_client/validator/src/keys.rs @@ -1,8 +1,8 @@ +use containers::attestation::U3112; +use containers::ssz::ByteVector; +use containers::Signature; use std::collections::HashMap; use std::path::{Path, PathBuf}; -use containers::Signature; -use containers::ssz::ByteVector; -use containers::attestation::U3112; use tracing::info; #[cfg(feature = "xmss-signing")] @@ -42,7 +42,9 @@ impl KeyManager { /// Load a secret key for a specific validator index pub fn load_key(&mut self, validator_index: u64) -> Result<(), Box> { - let sk_path = self.keys_dir.join(format!("validator_{}_sk.ssz", validator_index)); + let sk_path = self + .keys_dir + .join(format!("validator_{}_sk.ssz", validator_index)); if !sk_path.exists() { return Err(format!("Secret key file not found: {:?}", sk_path).into()); @@ -69,20 +71,20 @@ impl KeyManager { ) -> Result> { #[cfg(feature = "xmss-signing")] { - let key_bytes = self.keys + let key_bytes = self + .keys .get(&validator_index) .ok_or_else(|| format!("No key loaded for validator {}", validator_index))?; - type SecretKey = ::SecretKey; + type SecretKey = + ::SecretKey; let secret_key = SecretKey::from_bytes(key_bytes) .map_err(|e| format!("Failed to deserialize secret key: {:?}", e))?; - let leansig_signature = SIGTopLevelTargetSumLifetime32Dim64Base8::sign( - &secret_key, - epoch, - message, - ).map_err(|e| format!("Failed to sign message: {:?}", e))?; + let leansig_signature = + SIGTopLevelTargetSumLifetime32Dim64Base8::sign(&secret_key, epoch, message) + .map_err(|e| format!("Failed to sign message: {:?}", e))?; let sig_bytes = leansig_signature.to_bytes(); @@ -90,7 +92,8 @@ impl KeyManager { return Err(format!( "Invalid signature size: expected 3112, got {}", sig_bytes.len() - ).into()); + ) + .into()); } // Convert to ByteVector using unsafe pointer copy (same pattern as PublicKey) @@ -105,7 +108,7 @@ impl KeyManager { #[cfg(not(feature = "xmss-signing"))] { - let _ = (epoch, message); // Suppress unused warnings + let _ = (epoch, message); // Suppress unused warnings warn!( validator = validator_index, "XMSS signing disabled - using zero signature" diff --git a/lean_client/validator/src/lib.rs b/lean_client/validator/src/lib.rs index 2c65fa7..752cda8 100644 --- a/lean_client/validator/src/lib.rs +++ b/lean_client/validator/src/lib.rs @@ -2,9 +2,9 @@ use std::collections::HashMap; use std::path::Path; -use containers::attestation::{AggregatedAttestations}; +use containers::attestation::AggregatedAttestations; #[cfg(feature = "devnet2")] -use containers::attestation::{NaiveAggregatedSignature}; +use containers::attestation::NaiveAggregatedSignature; use containers::block::BlockSignatures; use containers::{ attestation::{Attestation, AttestationData, Signature, SignedAttestation}, From ae512b9a62c6065373e5f3e3e66049e50943714b Mon Sep 17 00:00:00 2001 From: Dariusspr <108625236+Dariusspr@users.noreply.github.com> Date: Sun, 18 Jan 2026 23:34:59 +0200 Subject: [PATCH 09/23] Remove all-features flag. Cant build both dev-nets together --- lean_client/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lean_client/Makefile b/lean_client/Makefile index 582e56b..6539dea 100644 --- a/lean_client/Makefile +++ b/lean_client/Makefile @@ -10,7 +10,7 @@ check-format: .PHONY: test test: - cargo test --workspace --all-features --no-fail-fast + cargo test --workspace --no-fail-fast .PHONY: build build: From 67497f6d13e3a58cf4c61c2e76cb83042c21f8a7 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:47:57 +0200 Subject: [PATCH 10/23] feat: add discv5 dependencies --- lean_client/Cargo.lock | 285 +++++++++++------------------- lean_client/networking/Cargo.toml | 9 +- 2 files changed, 107 insertions(+), 187 deletions(-) diff --git a/lean_client/Cargo.lock b/lean_client/Cargo.lock index 910d9c1..61be8fc 100644 --- a/lean_client/Cargo.lock +++ b/lean_client/Cargo.lock @@ -79,7 +79,7 @@ dependencies = [ "hashbrown 0.16.0", "indexmap 2.11.4", "itoa", - "k256 0.13.4", + "k256", "keccak-asm", "paste", "proptest", @@ -500,7 +500,7 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" dependencies = [ - "base64 0.22.1", + "base64", "http", "log", "url", @@ -529,12 +529,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base16ct" version = "0.2.0" @@ -551,12 +545,6 @@ dependencies = [ "match-lookup", ] -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - [[package]] name = "base64" version = "0.22.1" @@ -962,18 +950,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - [[package]] name = "crypto-bigint" version = "0.5.5" @@ -1109,13 +1085,14 @@ dependencies = [ ] [[package]] -name = "der" -version = "0.6.1" +name = "delay_map" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +checksum = "88e365f083a5cb5972d50ce8b1b2c9f125dc5ec0f50c0248cfb568ae59efcf0b" dependencies = [ - "const-oid", - "zeroize", + "futures", + "tokio", + "tokio-util", ] [[package]] @@ -1212,6 +1189,37 @@ dependencies = [ "subtle", ] +[[package]] +name = "discv5" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f170f4f6ed0e1df52bf43b403899f0081917ecf1500bfe312505cc3b515a8899" +dependencies = [ + "aes", + "aes-gcm", + "alloy-rlp", + "arrayvec", + "ctr", + "delay_map", + "enr", + "fnv", + "futures", + "hashlink", + "hex", + "hkdf", + "lazy_static", + "lru", + "more-asserts", + "parking_lot", + "rand 0.8.5", + "smallvec", + "socket2 0.5.10", + "tokio", + "tracing", + "uint 0.10.0", + "zeroize", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -1241,30 +1249,18 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc5d6d6a8504f8caedd7de14576464383900cd3840b7033a7a3dce5ac00121ca" -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] - [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.10", + "der", "digest 0.10.7", - "elliptic-curve 0.13.8", - "rfc6979 0.4.0", - "signature 2.2.0", - "spki 0.7.3", + "elliptic-curve", + "rfc6979", + "signature", + "spki", ] [[package]] @@ -1273,8 +1269,8 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8 0.10.2", - "signature 2.2.0", + "pkcs8", + "signature", ] [[package]] @@ -1310,59 +1306,39 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff 0.12.1", - "generic-array", - "group 0.12.1", - "pkcs8 0.9.0", - "rand_core 0.6.4", - "sec1 0.3.0", - "subtle", - "zeroize", -] - [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.5", + "base16ct", + "crypto-bigint", "digest 0.10.7", - "ff 0.13.1", + "ff", "generic-array", - "group 0.13.0", - "pkcs8 0.10.2", + "group", + "pkcs8", "rand_core 0.6.4", - "sec1 0.7.3", + "sec1", "subtle", "zeroize", ] [[package]] name = "enr" -version = "0.7.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "492a7e5fc2504d5fdce8e124d3e263b244a68b283cac67a69eda0cd43e0aebad" +checksum = "851bd664a3d3a3c175cff92b2f0df02df3c541b4895d0ae307611827aae46152" dependencies = [ - "base64 0.13.1", - "bs58 0.4.0", + "alloy-rlp", + "base64", "bytes", + "ed25519-dalek", "hex", - "k256 0.11.6", + "k256", "log", "rand 0.8.5", - "rlp", "serde", "sha3", "zeroize", @@ -1444,7 +1420,7 @@ dependencies = [ "impl-rlp", "impl-serde", "primitive-types", - "uint", + "uint 0.9.5", ] [[package]] @@ -1524,16 +1500,6 @@ dependencies = [ "bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "ff" version = "0.13.1" @@ -1797,24 +1763,13 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.1", + "ff", "rand_core 0.6.4", "subtle", ] @@ -2407,18 +2362,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if", - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "k256" version = "0.13.4" @@ -2426,11 +2369,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", + "ecdsa", + "elliptic-curve", "once_cell", "sha2 0.10.9 (registry+https://github.com/rust-lang/crates.io-index)", - "signature 2.2.0", + "signature", ] [[package]] @@ -2639,7 +2582,7 @@ checksum = "c7f58e37d8d6848e5c4c9e3c35c6f61133235bff2960c9c00a663b0849301221" dependencies = [ "async-channel", "asynchronous-codec 0.7.0", - "base64 0.22.1", + "base64", "byteorder", "bytes", "either", @@ -2710,7 +2653,7 @@ dependencies = [ "bs58 0.5.1", "ed25519-dalek", "hkdf", - "k256 0.13.4", + "k256", "multihash 0.19.3", "quick-protobuf", "rand 0.8.5", @@ -3037,6 +2980,12 @@ dependencies = [ "uuid", ] +[[package]] +name = "more-asserts" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" + [[package]] name = "multiaddr" version = "0.17.1" @@ -3222,12 +3171,17 @@ dependencies = [ "anyhow", "async-trait", "containers", + "discv5", "enr", "env-config", "futures", + "hex", + "k256", "libp2p", "libp2p-identity 0.2.12", "libp2p-mplex", + "num-bigint", + "num-traits", "parking_lot", "rand 0.8.5", "serde", @@ -3559,7 +3513,7 @@ version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ - "base64 0.22.1", + "base64", "serde_core", ] @@ -3611,24 +3565,14 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.10", - "spki 0.7.3", + "der", + "spki", ] [[package]] @@ -3718,7 +3662,7 @@ dependencies = [ "impl-codec", "impl-rlp", "impl-serde", - "uint", + "uint 0.9.5", ] [[package]] @@ -4100,17 +4044,6 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac", - "zeroize", -] - [[package]] name = "rfc6979" version = "0.4.0" @@ -4389,30 +4322,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", - "generic-array", - "pkcs8 0.9.0", - "subtle", - "zeroize", -] - [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct 0.2.0", - "der 0.7.10", + "base16ct", + "der", "generic-array", - "pkcs8 0.10.2", + "pkcs8", "subtle", "zeroize", ] @@ -4507,7 +4426,7 @@ version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" dependencies = [ - "base64 0.22.1", + "base64", "chrono", "hex", "indexmap 1.9.3", @@ -4624,16 +4543,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - [[package]] name = "signature" version = "2.2.0" @@ -4708,16 +4617,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] - [[package]] name = "spki" version = "0.7.3" @@ -4725,7 +4624,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.10", + "der", ] [[package]] @@ -5081,6 +4980,7 @@ dependencies = [ "futures-core", "futures-sink", "pin-project-lite", + "slab", "tokio", ] @@ -5135,6 +5035,7 @@ version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -5245,6 +5146,18 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" diff --git a/lean_client/networking/Cargo.toml b/lean_client/networking/Cargo.toml index 8f47702..6b116c9 100644 --- a/lean_client/networking/Cargo.toml +++ b/lean_client/networking/Cargo.toml @@ -17,7 +17,9 @@ snap = {workspace = true} sha2 = { workspace = true } anyhow = { workspace = true } async-trait = "0.1" -enr = "0.7" +discv5 = "0.10" +enr = { version = "0.13", features = ["k256"] } +k256 = "0.13" futures = "0.3" libp2p-identity = { version = "0.2", features = ["secp256k1"] } libp2p-mplex = "0.39" @@ -28,3 +30,8 @@ tracing = "0.1" yamux = "0.12" ssz = { workspace = true } serde = { workspace = true } + +[dev-dependencies] +hex = "0.4" +num-bigint = "0.4" +num-traits = "0.2" From 24bbed93e346e034311b9ddbe75e7a5a26a6f232 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:09 +0200 Subject: [PATCH 11/23] feat: add discovery config --- .../networking/src/discovery/config.rs | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 lean_client/networking/src/discovery/config.rs diff --git a/lean_client/networking/src/discovery/config.rs b/lean_client/networking/src/discovery/config.rs new file mode 100644 index 0000000..b613cc7 --- /dev/null +++ b/lean_client/networking/src/discovery/config.rs @@ -0,0 +1,40 @@ +use std::net::IpAddr; + +use discv5::enr::CombinedKey; +use enr::Enr; + +#[derive(Debug, Clone)] +pub struct DiscoveryConfig { + pub enabled: bool, + pub udp_port: u16, + pub libp2p_port: u16, + pub listen_address: IpAddr, + pub bootnodes: Vec>, +} + +impl DiscoveryConfig { + pub fn new(listen_address: IpAddr, udp_port: u16, libp2p_port: u16) -> Self { + Self { + enabled: true, + udp_port, + libp2p_port, + listen_address, + bootnodes: Vec::new(), + } + } + + pub fn with_bootnodes(mut self, bootnodes: Vec>) -> Self { + self.bootnodes = bootnodes; + self + } + + pub fn disabled() -> Self { + Self { + enabled: false, + udp_port: 0, + libp2p_port: 0, + listen_address: IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), + bootnodes: Vec::new(), + } + } +} From 3de4a45800455f4cc3db71780616d5bdce7e528c Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:22 +0200 Subject: [PATCH 12/23] feat: add discovery service --- lean_client/networking/src/discovery/mod.rs | 219 ++++++++++++++++++++ 1 file changed, 219 insertions(+) create mode 100644 lean_client/networking/src/discovery/mod.rs diff --git a/lean_client/networking/src/discovery/mod.rs b/lean_client/networking/src/discovery/mod.rs new file mode 100644 index 0000000..d0b67db --- /dev/null +++ b/lean_client/networking/src/discovery/mod.rs @@ -0,0 +1,219 @@ +pub mod config; + +#[cfg(test)] +mod tests; + +use std::net::IpAddr; +use std::sync::Arc; + +use anyhow::{Result, anyhow}; +use discv5::enr::{CombinedKey, NodeId}; +use discv5::{ConfigBuilder, Discv5, Event as Discv5Event, ListenConfig}; +use enr::{Builder as EnrBuilder, Enr}; +use libp2p::Multiaddr; +use libp2p::multiaddr::Protocol; +use libp2p_identity::{Keypair, PeerId}; +use tokio::sync::mpsc; +use tracing::{debug, info, warn}; + +pub use config::DiscoveryConfig; + +/// Discovery service that wraps discv5 for peer discovery. +pub struct DiscoveryService { + discv5: Arc, + local_enr: Enr, + event_receiver: mpsc::Receiver, +} + +impl DiscoveryService { + pub async fn new(config: DiscoveryConfig, keypair: &Keypair) -> Result { + let enr_key = keypair_to_enr_key(keypair)?; + + let local_enr = build_enr(&enr_key, config.listen_address, config.udp_port, config.libp2p_port)?; + + info!( + enr = %local_enr, + node_id = %local_enr.node_id(), + "Built local ENR" + ); + + let listen_config = ListenConfig::from_ip(config.listen_address, config.udp_port); + + let discv5_config = ConfigBuilder::new(listen_config).build(); + + let mut discv5 = Discv5::new(local_enr.clone(), enr_key, discv5_config) + .map_err(|e| anyhow!("Failed to create discv5: {e}"))?; + + for bootnode in &config.bootnodes { + if let Err(e) = discv5.add_enr(bootnode.clone()) { + warn!(enr = %bootnode, error = ?e, "Failed to add bootnode ENR"); + } else { + info!(enr = %bootnode, "Added bootnode ENR"); + } + } + + discv5 + .start() + .await + .map_err(|e| anyhow!("Failed to start discv5: {e}"))?; + + let event_receiver = discv5 + .event_stream() + .await + .map_err(|e| anyhow!("Failed to get discv5 event stream: {e}"))?; + + info!("Discovery service started"); + + Ok(Self { + discv5: Arc::new(discv5), + local_enr, + event_receiver, + }) + } + + pub fn local_enr(&self) -> &Enr { + &self.local_enr + } + + pub async fn recv(&mut self) -> Option> { + loop { + match self.event_receiver.recv().await { + Some(event) => { + match event { + Discv5Event::Discovered(enr) => { + info!( + node_id = %enr.node_id(), + "Discovered peer via discv5" + ); + return Some(enr); + } + Discv5Event::SocketUpdated(addr) => { + info!(?addr, "discv5 socket updated"); + } + Discv5Event::SessionEstablished(enr, addr) => { + debug!( + node_id = %enr.node_id(), + ?addr, + "discv5 session established" + ); + } + Discv5Event::TalkRequest(_) => { + // We don't handle TALKREQ for now + } + Discv5Event::NodeInserted { node_id, replaced } => { + debug!( + %node_id, + ?replaced, + "Node inserted into routing table" + ); + } + _ => { + // Handle any new event types added in future versions + } + } + } + None => return None, + } + } + } + + pub fn enr_to_multiaddr(enr: &Enr) -> Option { + let ip = enr.ip4().map(IpAddr::V4).or_else(|| enr.ip6().map(IpAddr::V6))?; + let libp2p_port = enr.tcp4().or_else(|| enr.tcp6())?; + + let peer_id = enr_to_peer_id(enr)?; + + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Udp(libp2p_port)); + multiaddr.push(Protocol::QuicV1); + multiaddr.push(Protocol::P2p(peer_id)); + + Some(multiaddr) + } + + pub fn find_random_peers(&self) { + let random_node_id = generate_random_node_id(); + debug!(%random_node_id, "Starting random peer discovery lookup"); + + let discv5 = Arc::clone(&self.discv5); + tokio::spawn(async move { + match discv5.find_node(random_node_id).await { + Ok(nodes) => { + info!(count = nodes.len(), "Random lookup completed"); + } + Err(e) => { + warn!(error = ?e, "Random lookup failed"); + } + } + }); + } + + pub fn connected_peers(&self) -> usize { + self.discv5.connected_peers() + } +} + +fn keypair_to_enr_key(keypair: &Keypair) -> Result { + match keypair.key_type() { + libp2p_identity::KeyType::Secp256k1 => { + let secp_keypair = keypair + .clone() + .try_into_secp256k1() + .map_err(|_| anyhow!("Failed to convert to secp256k1"))?; + + let secret_bytes = secp_keypair.secret().to_bytes(); + let secret_key = k256::ecdsa::SigningKey::from_slice(&secret_bytes) + .map_err(|e| anyhow!("Failed to create signing key: {e}"))?; + + Ok(CombinedKey::Secp256k1(secret_key)) + } + other => Err(anyhow!("Unsupported key type for discv5: {:?}", other)), + } +} + +fn build_enr(key: &CombinedKey, ip: IpAddr, udp_port: u16, libp2p_port: u16) -> Result> { + let mut builder = EnrBuilder::default(); + + // libp2p port is stored in tcp field, since Enr doesn't have a field for a quic port + match ip { + IpAddr::V4(ipv4) => { + builder.ip4(ipv4); + builder.udp4(udp_port); + builder.tcp4(libp2p_port); + } + IpAddr::V6(ipv6) => { + builder.ip6(ipv6); + builder.udp6(udp_port); + builder.tcp6(libp2p_port); + } + } + + builder + .build(key) + .map_err(|e| anyhow!("Failed to build ENR: {e}")) +} + +fn enr_to_peer_id(enr: &Enr) -> Option { + let public_key = enr.public_key(); + + match public_key { + discv5::enr::CombinedPublicKey::Secp256k1(pk) => { + let compressed = pk.to_sec1_bytes(); + let libp2p_pk = libp2p_identity::secp256k1::PublicKey::try_from_bytes(&compressed).ok()?; + let public = libp2p_identity::PublicKey::from(libp2p_pk); + Some(PeerId::from_public_key(&public)) + } + _ => None, + } +} + +pub fn parse_enr(enr_str: &str) -> Result> { + enr_str + .parse() + .map_err(|e| anyhow!("Failed to parse ENR: {e}")) +} + +fn generate_random_node_id() -> NodeId { + let random_bytes: [u8; 32] = rand::random(); + NodeId::new(&random_bytes) +} From 117f1e7e76872748989a05e3a9ac046e76d971a1 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:34 +0200 Subject: [PATCH 13/23] feat: add discovery module export --- lean_client/networking/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lean_client/networking/src/lib.rs b/lean_client/networking/src/lib.rs index 2a54c28..d8cd874 100644 --- a/lean_client/networking/src/lib.rs +++ b/lean_client/networking/src/lib.rs @@ -1,5 +1,6 @@ pub mod bootnodes; pub mod compressor; +pub mod discovery; pub mod gossipsub; pub mod network; pub mod req_resp; From 91fb543fb8a1ceee6ccb124efb15b12770e4a96c Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:45 +0200 Subject: [PATCH 14/23] feat: add ENR bootnode support --- lean_client/networking/src/bootnodes.rs | 86 +++++++++++++++++++++++-- 1 file changed, 82 insertions(+), 4 deletions(-) diff --git a/lean_client/networking/src/bootnodes.rs b/lean_client/networking/src/bootnodes.rs index 264ec02..427f4ae 100644 --- a/lean_client/networking/src/bootnodes.rs +++ b/lean_client/networking/src/bootnodes.rs @@ -1,6 +1,11 @@ use std::sync::Arc; +use discv5::enr::CombinedKey; +use enr::Enr; use libp2p::Multiaddr; +use tracing::warn; + +use crate::discovery::{DiscoveryService, parse_enr}; pub trait BootnodeSource: Send + Sync { fn to_multiaddrs(&self) -> Vec; @@ -24,17 +29,90 @@ impl BootnodeSource for Arc<[Multiaddr]> { } } +#[derive(Debug, Clone)] +pub enum Bootnode { + Multiaddr(Multiaddr), + Enr(Enr), +} + +impl Bootnode { + pub fn parse(s: &str) -> Option { + if s.starts_with("enr:") { + match parse_enr(s) { + Ok(enr) => Some(Bootnode::Enr(enr)), + Err(e) => { + warn!(bootnode = s, error = ?e, "Failed to parse ENR bootnode"); + None + } + } + } else { + match s.parse::() { + Ok(addr) => Some(Bootnode::Multiaddr(addr)), + Err(e) => { + warn!(bootnode = s, error = ?e, "Failed to parse Multiaddr bootnode"); + None + } + } + } + } + + pub fn to_multiaddr(&self) -> Option { + match self { + Bootnode::Multiaddr(addr) => Some(addr.clone()), + Bootnode::Enr(enr) => DiscoveryService::enr_to_multiaddr(enr), + } + } + + pub fn as_enr(&self) -> Option<&Enr> { + match self { + Bootnode::Enr(enr) => Some(enr), + Bootnode::Multiaddr(_) => None, + } + } +} + #[derive(Debug, Clone, Default)] -pub struct StaticBootnodes(Vec); +pub struct StaticBootnodes { + multiaddrs: Vec, + enrs: Vec>, +} impl StaticBootnodes { - pub fn new>>(addrs: T) -> Self { - StaticBootnodes(addrs.into()) + pub fn new(bootnodes: Vec) -> Self { + let mut multiaddrs = Vec::new(); + let mut enrs = Vec::new(); + + for bootnode in bootnodes { + match bootnode { + Bootnode::Multiaddr(addr) => multiaddrs.push(addr), + Bootnode::Enr(enr) => { + // Convert ENR to multiaddr for libp2p connection + if let Some(addr) = DiscoveryService::enr_to_multiaddr(&enr) { + multiaddrs.push(addr); + } + enrs.push(enr); + } + } + } + + StaticBootnodes { multiaddrs, enrs } + } + + pub fn parse(bootnode_strs: &[String]) -> Self { + let bootnodes: Vec = bootnode_strs + .iter() + .filter_map(|s| Bootnode::parse(s)) + .collect(); + Self::new(bootnodes) + } + + pub fn enrs(&self) -> &[Enr] { + &self.enrs } } impl BootnodeSource for StaticBootnodes { fn to_multiaddrs(&self) -> Vec { - self.0.clone() + self.multiaddrs.clone() } } From c81b048bd18f12e80cc395b5a7631bfc4a2b0aa7 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:55 +0200 Subject: [PATCH 15/23] feat: integrate discovery into network service --- lean_client/networking/src/network/service.rs | 81 +++++++++++++++++-- 1 file changed, 75 insertions(+), 6 deletions(-) diff --git a/lean_client/networking/src/network/service.rs b/lean_client/networking/src/network/service.rs index 8ae5729..daa735c 100644 --- a/lean_client/networking/src/network/service.rs +++ b/lean_client/networking/src/network/service.rs @@ -26,6 +26,7 @@ use tracing::{debug, info, trace, warn}; use crate::{ bootnodes::{BootnodeSource, StaticBootnodes}, compressor::Compressor, + discovery::{DiscoveryConfig, DiscoveryService}, gossipsub::{self, config::GossipsubConfig, message::GossipsubMessage, topic::GossipsubKind}, network::behaviour::{LeanNetworkBehaviour, LeanNetworkBehaviourEvent}, req_resp::{self, BLOCKS_BY_ROOT_PROTOCOL_V1, LeanRequest, ReqRespMessage, STATUS_PROTOCOL_V1}, @@ -39,6 +40,8 @@ pub struct NetworkServiceConfig { pub gossipsub_config: GossipsubConfig, pub socket_address: IpAddr, pub socket_port: u16, + pub discovery_port: u16, + pub discovery_enabled: bool, bootnodes: StaticBootnodes, } @@ -47,22 +50,26 @@ impl NetworkServiceConfig { gossipsub_config: GossipsubConfig, socket_address: IpAddr, socket_port: u16, + discovery_port: u16, + discovery_enabled: bool, bootnodes: Vec, ) -> Self { - let bootnodes = StaticBootnodes::new( - bootnodes - .iter() - .filter_map(|addr_str| addr_str.parse().ok()) - .collect::>(), - ); + let bootnodes = StaticBootnodes::parse(&bootnodes); NetworkServiceConfig { gossipsub_config, socket_address, socket_port, + discovery_port, + discovery_enabled, bootnodes, } } + + /// Get ENR bootnodes for discv5. + pub fn enr_bootnodes(&self) -> Vec> { + self.bootnodes.enrs().to_vec() + } } #[derive(Debug)] @@ -83,6 +90,7 @@ where { network_config: Arc, swarm: Swarm, + discovery: Option, peer_table: Arc>>, peer_count: Arc, outbound_p2p_requests: R, @@ -147,9 +155,36 @@ where .with_swarm_config(|_| config) .build(); + let discovery = if network_config.discovery_enabled { + let discovery_config = DiscoveryConfig::new( + network_config.socket_address, + network_config.discovery_port, + network_config.socket_port, + ) + .with_bootnodes(network_config.enr_bootnodes()); + + match DiscoveryService::new(discovery_config, &local_key).await { + Ok(disc) => { + info!( + enr = %disc.local_enr(), + "Discovery service initialized" + ); + Some(disc) + } + Err(e) => { + warn!(error = ?e, "Failed to initialize discovery service, continuing without it"); + None + } + } + } else { + info!("Discovery service disabled"); + None + }; + let mut service = Self { network_config, swarm, + discovery, peer_table: Arc::new(Mutex::new(HashMap::new())), peer_count, outbound_p2p_requests, @@ -166,11 +201,24 @@ where // Periodic reconnect attempts to bootnodes let mut reconnect_interval = interval(Duration::from_secs(30)); reconnect_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + // Periodic discovery searches + let mut discovery_interval = interval(Duration::from_secs(30)); + discovery_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + loop { select! { _ = reconnect_interval.tick() => { self.connect_to_peers(self.network_config.bootnodes.to_multiaddrs()).await; } + _ = discovery_interval.tick() => { + // Trigger active peer discovery + if let Some(ref discovery) = self.discovery { + let known_peers = discovery.connected_peers(); + debug!(known_peers, "Triggering random peer discovery lookup"); + discovery.find_random_peers(); + } + } request = self.outbound_p2p_requests.recv() => { if let Some(request) = request { self.dispatch_outbound_request(request).await; @@ -181,6 +229,23 @@ where info!(?event, "Swarm event"); } } + enr = async { + match &mut self.discovery { + Some(disc) => disc.recv().await, + None => std::future::pending().await, + } + } => { + if let Some(enr) = enr { + if let Some(multiaddr) = DiscoveryService::enr_to_multiaddr(&enr) { + info!( + node_id = %enr.node_id(), + %multiaddr, + "Discovered peer via discv5, attempting connection" + ); + self.connect_to_peers(vec![multiaddr]).await; + } + } + } } } } @@ -595,6 +660,10 @@ where *self.swarm.local_peer_id() } + pub fn local_enr(&self) -> Option<&enr::Enr> { + self.discovery.as_ref().map(|d| d.local_enr()) + } + pub fn swarm_mut(&mut self) -> &mut Swarm { &mut self.swarm } From 1987e788bcf2a16a3ea382207d82452f667fcfbb Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:49:07 +0200 Subject: [PATCH 16/23] feat: add discovery CLI arguments --- lean_client/src/main.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lean_client/src/main.rs b/lean_client/src/main.rs index ad2276e..e19e186 100644 --- a/lean_client/src/main.rs +++ b/lean_client/src/main.rs @@ -120,6 +120,12 @@ struct Args { #[arg(short, long, default_value_t = 8083)] port: u16, + #[arg(short, long, default_value_t = 8084)] + discovery_port: u16, + + #[arg(long, default_value_t = false)] + disable_discovery: bool, + #[arg(short, long)] bootnodes: Vec, @@ -297,10 +303,14 @@ async fn main() { let mut gossipsub_config = GossipsubConfig::new(); gossipsub_config.set_topics(gossipsub_topics); + let discovery_enabled = !args.disable_discovery; + let network_service_config = Arc::new(NetworkServiceConfig::new( gossipsub_config, args.address, args.port, + args.discovery_port, + discovery_enabled, args.bootnodes, )); From 605f812c71fc9b807fb2e9e5178059f3b0f14723 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:49:18 +0200 Subject: [PATCH 17/23] test: add discovery protocol tests --- lean_client/networking/src/discovery/tests.rs | 1422 +++++++++++++++++ 1 file changed, 1422 insertions(+) create mode 100644 lean_client/networking/src/discovery/tests.rs diff --git a/lean_client/networking/src/discovery/tests.rs b/lean_client/networking/src/discovery/tests.rs new file mode 100644 index 0000000..6566e29 --- /dev/null +++ b/lean_client/networking/src/discovery/tests.rs @@ -0,0 +1,1422 @@ +//! Tests for Discovery v5 Protocol Specification +//! +//! Based on the official Discovery v5 specification and test vectors from: +//! https://github.com/ethereum/devp2p/blob/master/discv5/discv5-wire-test-vectors.md + +use std::net::{Ipv4Addr, Ipv6Addr}; + +/// Protocol constants matching Discovery v5 specification +mod constants { + /// Protocol identifier + pub const PROTOCOL_ID: &[u8] = b"discv5"; + /// Protocol version (v5.1) + pub const PROTOCOL_VERSION: u16 = 0x0001; + /// Maximum request ID length in bytes + pub const MAX_REQUEST_ID_LENGTH: usize = 8; + /// K-bucket size per Kademlia standard + pub const K_BUCKET_SIZE: usize = 16; + /// Alpha (lookup concurrency) + pub const ALPHA: usize = 3; + /// Number of buckets for 256-bit node ID space + pub const BUCKET_COUNT: usize = 256; + /// Request timeout in seconds (spec: 500ms) + pub const REQUEST_TIMEOUT_SECS: f64 = 0.5; + /// Handshake timeout in seconds + pub const HANDSHAKE_TIMEOUT_SECS: f64 = 1.0; + /// Maximum ENRs per NODES response + pub const MAX_NODES_RESPONSE: usize = 16; + /// Bond expiry in seconds (24 hours) + pub const BOND_EXPIRY_SECS: u64 = 86400; + /// Maximum packet size + pub const MAX_PACKET_SIZE: usize = 1280; + /// Minimum packet size + pub const MIN_PACKET_SIZE: usize = 63; +} + +/// Packet type flags +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PacketFlag { + Message = 0, + WhoAreYou = 1, + Handshake = 2, +} + +/// Message type codes matching wire protocol spec +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum MessageType { + Ping = 0x01, + Pong = 0x02, + FindNode = 0x03, + Nodes = 0x04, + TalkReq = 0x05, + TalkResp = 0x06, + RegTopic = 0x07, + Ticket = 0x08, + RegConfirmation = 0x09, + TopicQuery = 0x0A, +} + +/// Request ID (variable length, max 8 bytes) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RequestId(pub Vec); + +impl RequestId { + pub fn new(data: Vec) -> Self { + assert!(data.len() <= constants::MAX_REQUEST_ID_LENGTH); + Self(data) + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +/// IPv4 address (4 bytes) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IPv4(pub [u8; 4]); + +impl IPv4 { + pub fn new(bytes: [u8; 4]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 4 + } +} + +/// IPv6 address (16 bytes) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IPv6(pub [u8; 16]); + +impl IPv6 { + pub fn new(bytes: [u8; 16]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 16 + } +} + +/// ID Nonce (16 bytes / 128 bits) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IdNonce(pub [u8; 16]); + +impl IdNonce { + pub fn new(bytes: [u8; 16]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 16 + } +} + +/// Nonce (12 bytes / 96 bits) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Nonce(pub [u8; 12]); + +impl Nonce { + pub fn new(bytes: [u8; 12]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 12 + } +} + +/// Distance type (u16) +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct Distance(pub u16); + +/// Port type (u16) +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Port(pub u16); + +/// ENR sequence number (u64) +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub struct SeqNumber(pub u64); + +/// Node ID (32 bytes / 256 bits) +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct NodeId(pub [u8; 32]); + +impl NodeId { + pub fn new(bytes: [u8; 32]) -> Self { + Self(bytes) + } + + pub fn from_slice(slice: &[u8]) -> Self { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(slice); + Self(bytes) + } +} + +/// Discovery configuration +#[derive(Debug, Clone)] +pub struct DiscoveryConfig { + pub k_bucket_size: usize, + pub alpha: usize, + pub request_timeout_secs: f64, + pub handshake_timeout_secs: f64, + pub max_nodes_response: usize, + pub bond_expiry_secs: u64, +} + +impl Default for DiscoveryConfig { + fn default() -> Self { + Self { + k_bucket_size: constants::K_BUCKET_SIZE, + alpha: constants::ALPHA, + request_timeout_secs: constants::REQUEST_TIMEOUT_SECS, + handshake_timeout_secs: constants::HANDSHAKE_TIMEOUT_SECS, + max_nodes_response: constants::MAX_NODES_RESPONSE, + bond_expiry_secs: constants::BOND_EXPIRY_SECS, + } + } +} + +/// PING message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Ping { + pub request_id: RequestId, + pub enr_seq: SeqNumber, +} + +/// PONG message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Pong { + pub request_id: RequestId, + pub enr_seq: SeqNumber, + pub recipient_ip: Vec, + pub recipient_port: Port, +} + +/// FINDNODE message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct FindNode { + pub request_id: RequestId, + pub distances: Vec, +} + +/// NODES message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Nodes { + pub request_id: RequestId, + pub total: u8, + pub enrs: Vec>, +} + +/// TALKREQ message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TalkReq { + pub request_id: RequestId, + pub protocol: Vec, + pub request: Vec, +} + +/// TALKRESP message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TalkResp { + pub request_id: RequestId, + pub response: Vec, +} + +/// Static header +#[derive(Debug, Clone)] +pub struct StaticHeader { + pub protocol_id: [u8; 6], + pub version: u16, + pub flag: u8, + pub nonce: Nonce, + pub authdata_size: u16, +} + +impl StaticHeader { + pub fn new(flag: u8, nonce: Nonce, authdata_size: u16) -> Self { + Self { + protocol_id: *b"discv5", + version: constants::PROTOCOL_VERSION, + flag, + nonce, + authdata_size, + } + } +} + +/// WHOAREYOU authdata +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WhoAreYouAuthdata { + pub id_nonce: IdNonce, + pub enr_seq: SeqNumber, +} + +/// Node entry in routing table +#[derive(Debug, Clone)] +pub struct NodeEntry { + pub node_id: NodeId, + pub enr_seq: SeqNumber, + pub last_seen: f64, + pub endpoint: Option, + pub verified: bool, +} + +impl NodeEntry { + pub fn new(node_id: NodeId) -> Self { + Self { + node_id, + enr_seq: SeqNumber::default(), + last_seen: 0.0, + endpoint: None, + verified: false, + } + } + + pub fn with_enr_seq(mut self, enr_seq: SeqNumber) -> Self { + self.enr_seq = enr_seq; + self + } + + pub fn with_last_seen(mut self, last_seen: f64) -> Self { + self.last_seen = last_seen; + self + } + + pub fn with_endpoint(mut self, endpoint: String) -> Self { + self.endpoint = Some(endpoint); + self + } + + pub fn with_verified(mut self, verified: bool) -> Self { + self.verified = verified; + self + } +} + +/// K-bucket for storing nodes at a specific distance +#[derive(Debug, Clone, Default)] +pub struct KBucket { + nodes: Vec, +} + +impl KBucket { + pub fn new() -> Self { + Self { nodes: Vec::new() } + } + + pub fn is_empty(&self) -> bool { + self.nodes.is_empty() + } + + pub fn is_full(&self) -> bool { + self.nodes.len() >= constants::K_BUCKET_SIZE + } + + pub fn len(&self) -> usize { + self.nodes.len() + } + + pub fn add(&mut self, entry: NodeEntry) -> bool { + // Check if node already exists + if let Some(pos) = self.nodes.iter().position(|e| e.node_id == entry.node_id) { + // Move to tail (most recent) + self.nodes.remove(pos); + self.nodes.push(entry); + return true; + } + + // Reject if full + if self.is_full() { + return false; + } + + self.nodes.push(entry); + true + } + + pub fn remove(&mut self, node_id: &NodeId) -> bool { + if let Some(pos) = self.nodes.iter().position(|e| &e.node_id == node_id) { + self.nodes.remove(pos); + true + } else { + false + } + } + + pub fn contains(&self, node_id: &NodeId) -> bool { + self.nodes.iter().any(|e| &e.node_id == node_id) + } + + pub fn get(&self, node_id: &NodeId) -> Option<&NodeEntry> { + self.nodes.iter().find(|e| &e.node_id == node_id) + } + + pub fn head(&self) -> Option<&NodeEntry> { + self.nodes.first() + } + + pub fn tail(&self) -> Option<&NodeEntry> { + self.nodes.last() + } + + pub fn iter(&self) -> impl Iterator { + self.nodes.iter() + } +} + +/// Calculate XOR distance between two node IDs +pub fn xor_distance(a: &NodeId, b: &NodeId) -> num_bigint::BigUint { + use num_bigint::BigUint; + + let a_int = BigUint::from_bytes_be(&a.0); + let b_int = BigUint::from_bytes_be(&b.0); + a_int ^ b_int +} + +/// Calculate log2 distance between two node IDs +pub fn log2_distance(a: &NodeId, b: &NodeId) -> Distance { + let xor = xor_distance(a, b); + if xor.bits() == 0 { + Distance(0) + } else { + Distance(xor.bits() as u16) + } +} + +/// Kademlia routing table +pub struct RoutingTable { + local_id: NodeId, + pub buckets: Vec, +} + +impl RoutingTable { + pub fn new(local_id: NodeId) -> Self { + let buckets = (0..constants::BUCKET_COUNT) + .map(|_| KBucket::new()) + .collect(); + Self { local_id, buckets } + } + + pub fn node_count(&self) -> usize { + self.buckets.iter().map(|b| b.len()).sum() + } + + pub fn bucket_index(&self, node_id: &NodeId) -> usize { + let dist = log2_distance(&self.local_id, node_id); + if dist.0 == 0 { + 0 + } else { + (dist.0 - 1) as usize + } + } + + pub fn add(&mut self, entry: NodeEntry) -> bool { + // Cannot add self + if entry.node_id == self.local_id { + return false; + } + + let idx = self.bucket_index(&entry.node_id); + self.buckets[idx].add(entry) + } + + pub fn remove(&mut self, node_id: &NodeId) -> bool { + let idx = self.bucket_index(node_id); + self.buckets[idx].remove(node_id) + } + + pub fn contains(&self, node_id: &NodeId) -> bool { + let idx = self.bucket_index(node_id); + self.buckets[idx].contains(node_id) + } + + pub fn get(&self, node_id: &NodeId) -> Option<&NodeEntry> { + let idx = self.bucket_index(node_id); + self.buckets[idx].get(node_id) + } + + pub fn closest_nodes(&self, target: &NodeId, count: usize) -> Vec<&NodeEntry> { + let mut all_nodes: Vec<&NodeEntry> = self + .buckets + .iter() + .flat_map(|b| b.iter()) + .collect(); + + all_nodes.sort_by(|a, b| { + let dist_a = xor_distance(&a.node_id, target); + let dist_b = xor_distance(&b.node_id, target); + dist_a.cmp(&dist_b) + }); + + all_nodes.into_iter().take(count).collect() + } + + pub fn nodes_at_distance(&self, distance: Distance) -> Vec<&NodeEntry> { + if distance.0 == 0 || distance.0 > 256 { + return Vec::new(); + } + + let idx = (distance.0 - 1) as usize; + self.buckets[idx].iter().collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use num_bigint::BigUint; + use num_traits::One; + + // ============================================================ + // Protocol Constants Tests + // ============================================================ + + mod protocol_constants { + use super::*; + + #[test] + fn test_protocol_id() { + assert_eq!(constants::PROTOCOL_ID, b"discv5"); + assert_eq!(constants::PROTOCOL_ID.len(), 6); + } + + #[test] + fn test_protocol_version() { + assert_eq!(constants::PROTOCOL_VERSION, 0x0001); + } + + #[test] + fn test_max_request_id_length() { + assert_eq!(constants::MAX_REQUEST_ID_LENGTH, 8); + } + + #[test] + fn test_k_bucket_size() { + assert_eq!(constants::K_BUCKET_SIZE, 16); + } + + #[test] + fn test_alpha_concurrency() { + assert_eq!(constants::ALPHA, 3); + } + + #[test] + fn test_bucket_count() { + assert_eq!(constants::BUCKET_COUNT, 256); + } + + #[test] + fn test_request_timeout() { + assert!((constants::REQUEST_TIMEOUT_SECS - 0.5).abs() < f64::EPSILON); + } + + #[test] + fn test_handshake_timeout() { + assert!((constants::HANDSHAKE_TIMEOUT_SECS - 1.0).abs() < f64::EPSILON); + } + + #[test] + fn test_max_nodes_response() { + assert_eq!(constants::MAX_NODES_RESPONSE, 16); + } + + #[test] + fn test_bond_expiry() { + assert_eq!(constants::BOND_EXPIRY_SECS, 86400); + } + + #[test] + fn test_packet_size_limits() { + assert_eq!(constants::MAX_PACKET_SIZE, 1280); + assert_eq!(constants::MIN_PACKET_SIZE, 63); + } + } + + // ============================================================ + // Custom Types Tests + // ============================================================ + + mod custom_types { + use super::*; + + #[test] + fn test_request_id_limit() { + let req_id = RequestId::new(vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]); + assert_eq!(req_id.len(), 8); + } + + #[test] + fn test_request_id_variable_length() { + let req_id = RequestId::new(vec![0x01]); + assert_eq!(req_id.len(), 1); + } + + #[test] + fn test_ipv4_length() { + let ip = IPv4::new([0xc0, 0xa8, 0x01, 0x01]); // 192.168.1.1 + assert_eq!(ip.len(), 4); + } + + #[test] + fn test_ipv6_length() { + let mut bytes = [0u8; 16]; + bytes[15] = 0x01; // ::1 + let ip = IPv6::new(bytes); + assert_eq!(ip.len(), 16); + } + + #[test] + fn test_id_nonce_length() { + let nonce = IdNonce::new([0x01; 16]); + assert_eq!(nonce.len(), 16); + } + + #[test] + fn test_nonce_length() { + let nonce = Nonce::new([0x01; 12]); + assert_eq!(nonce.len(), 12); + } + + #[test] + fn test_distance_type() { + let d = Distance(256); + assert_eq!(d.0, 256u16); + } + + #[test] + fn test_port_type() { + let p = Port(30303); + assert_eq!(p.0, 30303u16); + } + + #[test] + fn test_enr_seq_type() { + let seq = SeqNumber(42); + assert_eq!(seq.0, 42u64); + } + } + + // ============================================================ + // Packet Flag Tests + // ============================================================ + + mod packet_flags { + use super::*; + + #[test] + fn test_message_flag() { + assert_eq!(PacketFlag::Message as u8, 0); + } + + #[test] + fn test_whoareyou_flag() { + assert_eq!(PacketFlag::WhoAreYou as u8, 1); + } + + #[test] + fn test_handshake_flag() { + assert_eq!(PacketFlag::Handshake as u8, 2); + } + } + + // ============================================================ + // Message Types Tests + // ============================================================ + + mod message_types { + use super::*; + + #[test] + fn test_ping_type() { + assert_eq!(MessageType::Ping as u8, 0x01); + } + + #[test] + fn test_pong_type() { + assert_eq!(MessageType::Pong as u8, 0x02); + } + + #[test] + fn test_findnode_type() { + assert_eq!(MessageType::FindNode as u8, 0x03); + } + + #[test] + fn test_nodes_type() { + assert_eq!(MessageType::Nodes as u8, 0x04); + } + + #[test] + fn test_talkreq_type() { + assert_eq!(MessageType::TalkReq as u8, 0x05); + } + + #[test] + fn test_talkresp_type() { + assert_eq!(MessageType::TalkResp as u8, 0x06); + } + + #[test] + fn test_experimental_types() { + assert_eq!(MessageType::RegTopic as u8, 0x07); + assert_eq!(MessageType::Ticket as u8, 0x08); + assert_eq!(MessageType::RegConfirmation as u8, 0x09); + assert_eq!(MessageType::TopicQuery as u8, 0x0A); + } + } + + // ============================================================ + // Discovery Config Tests + // ============================================================ + + mod discovery_config { + use super::*; + + #[test] + fn test_default_values() { + let config = DiscoveryConfig::default(); + + assert_eq!(config.k_bucket_size, constants::K_BUCKET_SIZE); + assert_eq!(config.alpha, constants::ALPHA); + assert!((config.request_timeout_secs - constants::REQUEST_TIMEOUT_SECS).abs() < f64::EPSILON); + assert!((config.handshake_timeout_secs - constants::HANDSHAKE_TIMEOUT_SECS).abs() < f64::EPSILON); + assert_eq!(config.max_nodes_response, constants::MAX_NODES_RESPONSE); + assert_eq!(config.bond_expiry_secs, constants::BOND_EXPIRY_SECS); + } + + #[test] + fn test_custom_values() { + let config = DiscoveryConfig { + k_bucket_size: 8, + alpha: 5, + request_timeout_secs: 2.0, + ..Default::default() + }; + assert_eq!(config.k_bucket_size, 8); + assert_eq!(config.alpha, 5); + assert!((config.request_timeout_secs - 2.0).abs() < f64::EPSILON); + } + } + + // ============================================================ + // Ping Message Tests + // ============================================================ + + mod ping_message { + use super::*; + + #[test] + fn test_creation_with_types() { + let ping = Ping { + request_id: RequestId::new(vec![0x00, 0x00, 0x00, 0x01]), + enr_seq: SeqNumber(2), + }; + + assert_eq!(ping.request_id.0, vec![0x00, 0x00, 0x00, 0x01]); + assert_eq!(ping.enr_seq, SeqNumber(2)); + } + + #[test] + fn test_max_request_id_length() { + let ping = Ping { + request_id: RequestId::new(vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]), + enr_seq: SeqNumber(1), + }; + assert_eq!(ping.request_id.len(), 8); + } + } + + // ============================================================ + // Pong Message Tests + // ============================================================ + + mod pong_message { + use super::*; + + #[test] + fn test_creation_ipv4() { + let pong = Pong { + request_id: RequestId::new(vec![0x00, 0x00, 0x00, 0x01]), + enr_seq: SeqNumber(42), + recipient_ip: vec![0xc0, 0xa8, 0x01, 0x01], // 192.168.1.1 + recipient_port: Port(9000), + }; + + assert_eq!(pong.enr_seq, SeqNumber(42)); + assert_eq!(pong.recipient_ip.len(), 4); + assert_eq!(pong.recipient_port, Port(9000)); + } + + #[test] + fn test_creation_ipv6() { + let mut ipv6 = vec![0u8; 16]; + ipv6[15] = 0x01; // ::1 + let pong = Pong { + request_id: RequestId::new(vec![0x01]), + enr_seq: SeqNumber(1), + recipient_ip: ipv6.clone(), + recipient_port: Port(30303), + }; + + assert_eq!(pong.recipient_ip.len(), 16); + } + } + + // ============================================================ + // FindNode Message Tests + // ============================================================ + + mod findnode_message { + use super::*; + + #[test] + fn test_single_distance() { + let findnode = FindNode { + request_id: RequestId::new(vec![0x01]), + distances: vec![Distance(256)], + }; + + assert_eq!(findnode.distances, vec![Distance(256)]); + } + + #[test] + fn test_multiple_distances() { + let findnode = FindNode { + request_id: RequestId::new(vec![0x01]), + distances: vec![Distance(0), Distance(1), Distance(255), Distance(256)], + }; + + assert!(findnode.distances.contains(&Distance(0))); + assert!(findnode.distances.contains(&Distance(256))); + } + + #[test] + fn test_distance_zero_returns_self() { + let findnode = FindNode { + request_id: RequestId::new(vec![0x01]), + distances: vec![Distance(0)], + }; + assert_eq!(findnode.distances, vec![Distance(0)]); + } + } + + // ============================================================ + // Nodes Message Tests + // ============================================================ + + mod nodes_message { + use super::*; + + #[test] + fn test_single_response() { + let nodes = Nodes { + request_id: RequestId::new(vec![0x01]), + total: 1, + enrs: vec![b"enr:-example".to_vec()], + }; + + assert_eq!(nodes.total, 1); + assert_eq!(nodes.enrs.len(), 1); + } + + #[test] + fn test_multiple_responses() { + let nodes = Nodes { + request_id: RequestId::new(vec![0x01]), + total: 3, + enrs: vec![b"enr1".to_vec(), b"enr2".to_vec()], + }; + + assert_eq!(nodes.total, 3); + assert_eq!(nodes.enrs.len(), 2); + } + } + + // ============================================================ + // TalkReq Message Tests + // ============================================================ + + mod talkreq_message { + use super::*; + + #[test] + fn test_creation() { + let req = TalkReq { + request_id: RequestId::new(vec![0x01]), + protocol: b"portal".to_vec(), + request: b"payload".to_vec(), + }; + + assert_eq!(req.protocol, b"portal".to_vec()); + assert_eq!(req.request, b"payload".to_vec()); + } + } + + // ============================================================ + // TalkResp Message Tests + // ============================================================ + + mod talkresp_message { + use super::*; + + #[test] + fn test_creation() { + let resp = TalkResp { + request_id: RequestId::new(vec![0x01]), + response: b"response_data".to_vec(), + }; + + assert_eq!(resp.response, b"response_data".to_vec()); + } + + #[test] + fn test_empty_response_unknown_protocol() { + let resp = TalkResp { + request_id: RequestId::new(vec![0x01]), + response: Vec::new(), + }; + assert!(resp.response.is_empty()); + } + } + + // ============================================================ + // Static Header Tests + // ============================================================ + + mod static_header { + use super::*; + + #[test] + fn test_default_protocol_id() { + let header = StaticHeader::new(0, Nonce::new([0x00; 12]), 32); + + assert_eq!(&header.protocol_id, b"discv5"); + assert_eq!(header.version, 0x0001); + } + + #[test] + fn test_flag_values() { + for flag in [0u8, 1, 2] { + let header = StaticHeader::new(flag, Nonce::new([0xff; 12]), 32); + assert_eq!(header.flag, flag); + } + } + } + + // ============================================================ + // WhoAreYou Authdata Tests + // ============================================================ + + mod whoareyou_authdata { + use super::*; + + #[test] + fn test_creation() { + let id_nonce_bytes: [u8; 16] = [ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + ]; + let authdata = WhoAreYouAuthdata { + id_nonce: IdNonce::new(id_nonce_bytes), + enr_seq: SeqNumber(0), + }; + + assert_eq!(authdata.id_nonce.len(), 16); + assert_eq!(authdata.enr_seq, SeqNumber(0)); + } + } + + // ============================================================ + // XOR Distance Tests + // ============================================================ + + mod xor_distance_tests { + use super::*; + + #[test] + fn test_identical_ids_zero_distance() { + let node_id = NodeId::new([0x00; 32]); + assert_eq!(xor_distance(&node_id, &node_id), BigUint::from(0u32)); + } + + #[test] + fn test_complementary_ids_max_distance() { + let a = NodeId::new([0x00; 32]); + let b = NodeId::new([0xff; 32]); + let expected = (BigUint::one() << 256) - BigUint::one(); + assert_eq!(xor_distance(&a, &b), expected); + } + + #[test] + fn test_distance_is_symmetric() { + let a = NodeId::new([0x12; 32]); + let b = NodeId::new([0x34; 32]); + assert_eq!(xor_distance(&a, &b), xor_distance(&b, &a)); + } + + #[test] + fn test_specific_xor_values() { + let mut a_bytes = [0x00; 32]; + a_bytes[31] = 0x05; // 5 + let mut b_bytes = [0x00; 32]; + b_bytes[31] = 0x03; // 3 + let a = NodeId::new(a_bytes); + let b = NodeId::new(b_bytes); + assert_eq!(xor_distance(&a, &b), BigUint::from(6u32)); // 5 XOR 3 = 6 + } + } + + // ============================================================ + // Log2 Distance Tests + // ============================================================ + + mod log2_distance_tests { + use super::*; + + #[test] + fn test_identical_ids_return_zero() { + let node_id = NodeId::new([0x00; 32]); + assert_eq!(log2_distance(&node_id, &node_id), Distance(0)); + } + + #[test] + fn test_single_bit_difference() { + let a = NodeId::new([0x00; 32]); + let mut b_bytes = [0x00; 32]; + b_bytes[31] = 0x01; + let b = NodeId::new(b_bytes); + assert_eq!(log2_distance(&a, &b), Distance(1)); + } + + #[test] + fn test_high_bit_difference() { + let a = NodeId::new([0x00; 32]); + let mut b_bytes = [0x00; 32]; + b_bytes[31] = 0x80; // 0b10000000 + let b = NodeId::new(b_bytes); + assert_eq!(log2_distance(&a, &b), Distance(8)); + } + + #[test] + fn test_maximum_distance() { + let a = NodeId::new([0x00; 32]); + let mut b_bytes = [0x00; 32]; + b_bytes[0] = 0x80; // High bit of first byte set + let b = NodeId::new(b_bytes); + assert_eq!(log2_distance(&a, &b), Distance(256)); + } + } + + // ============================================================ + // K-Bucket Tests + // ============================================================ + + mod kbucket_tests { + use super::*; + + #[test] + fn test_new_bucket_is_empty() { + let bucket = KBucket::new(); + + assert!(bucket.is_empty()); + assert!(!bucket.is_full()); + assert_eq!(bucket.len(), 0); + } + + #[test] + fn test_add_single_node() { + let mut bucket = KBucket::new(); + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + + assert!(bucket.add(entry)); + assert_eq!(bucket.len(), 1); + assert!(bucket.contains(&NodeId::new([0x01; 32]))); + } + + #[test] + fn test_bucket_capacity_limit() { + let mut bucket = KBucket::new(); + + for i in 0..constants::K_BUCKET_SIZE { + let mut bytes = [0x00; 32]; + bytes[0] = i as u8; + let entry = NodeEntry::new(NodeId::new(bytes)); + assert!(bucket.add(entry)); + } + + assert!(bucket.is_full()); + assert_eq!(bucket.len(), constants::K_BUCKET_SIZE); + + let extra = NodeEntry::new(NodeId::new([0xff; 32])); + assert!(!bucket.add(extra)); + assert_eq!(bucket.len(), constants::K_BUCKET_SIZE); + } + + #[test] + fn test_update_moves_to_tail() { + let mut bucket = KBucket::new(); + + let entry1 = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(1)); + let entry2 = NodeEntry::new(NodeId::new([0x02; 32])).with_enr_seq(SeqNumber(1)); + bucket.add(entry1); + bucket.add(entry2); + + let updated = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(2)); + bucket.add(updated); + + let tail = bucket.tail().unwrap(); + assert_eq!(tail.node_id, NodeId::new([0x01; 32])); + assert_eq!(tail.enr_seq, SeqNumber(2)); + } + + #[test] + fn test_remove_node() { + let mut bucket = KBucket::new(); + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + bucket.add(entry); + + assert!(bucket.remove(&NodeId::new([0x01; 32]))); + assert!(bucket.is_empty()); + assert!(!bucket.contains(&NodeId::new([0x01; 32]))); + } + + #[test] + fn test_remove_nonexistent_returns_false() { + let mut bucket = KBucket::new(); + assert!(!bucket.remove(&NodeId::new([0x01; 32]))); + } + + #[test] + fn test_get_existing_node() { + let mut bucket = KBucket::new(); + let entry = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(42)); + bucket.add(entry); + + let retrieved = bucket.get(&NodeId::new([0x01; 32])).unwrap(); + assert_eq!(retrieved.enr_seq, SeqNumber(42)); + } + + #[test] + fn test_get_nonexistent_returns_none() { + let bucket = KBucket::new(); + assert!(bucket.get(&NodeId::new([0x01; 32])).is_none()); + } + + #[test] + fn test_head_returns_oldest() { + let mut bucket = KBucket::new(); + bucket.add(NodeEntry::new(NodeId::new([0x01; 32]))); + bucket.add(NodeEntry::new(NodeId::new([0x02; 32]))); + + let head = bucket.head().unwrap(); + assert_eq!(head.node_id, NodeId::new([0x01; 32])); + } + + #[test] + fn test_tail_returns_newest() { + let mut bucket = KBucket::new(); + bucket.add(NodeEntry::new(NodeId::new([0x01; 32]))); + bucket.add(NodeEntry::new(NodeId::new([0x02; 32]))); + + let tail = bucket.tail().unwrap(); + assert_eq!(tail.node_id, NodeId::new([0x02; 32])); + } + + #[test] + fn test_iteration() { + let mut bucket = KBucket::new(); + bucket.add(NodeEntry::new(NodeId::new([0x01; 32]))); + bucket.add(NodeEntry::new(NodeId::new([0x02; 32]))); + + let node_ids: Vec<_> = bucket.iter().map(|e| e.node_id.clone()).collect(); + assert_eq!(node_ids.len(), 2); + } + } + + // ============================================================ + // Routing Table Tests + // ============================================================ + + mod routing_table_tests { + use super::*; + + #[test] + fn test_new_table_is_empty() { + let local_id = NodeId::new([0x00; 32]); + let table = RoutingTable::new(local_id); + + assert_eq!(table.node_count(), 0); + } + + #[test] + fn test_has_256_buckets() { + let local_id = NodeId::new([0x00; 32]); + let table = RoutingTable::new(local_id); + + assert_eq!(table.buckets.len(), constants::BUCKET_COUNT); + } + + #[test] + fn test_add_node() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let mut node_bytes = [0x00; 32]; + node_bytes[31] = 0x01; + let entry = NodeEntry::new(NodeId::new(node_bytes)); + assert!(table.add(entry.clone())); + assert_eq!(table.node_count(), 1); + assert!(table.contains(&entry.node_id)); + } + + #[test] + fn test_cannot_add_self() { + let local_id = NodeId::new([0xab; 32]); + let mut table = RoutingTable::new(local_id.clone()); + + let entry = NodeEntry::new(local_id); + assert!(!table.add(entry)); + assert_eq!(table.node_count(), 0); + } + + #[test] + fn test_bucket_assignment_by_distance() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let mut node_bytes = [0x00; 32]; + node_bytes[31] = 0x01; // log2 distance = 1 + let node_id = NodeId::new(node_bytes); + let entry = NodeEntry::new(node_id.clone()); + table.add(entry); + + let bucket_idx = table.bucket_index(&node_id); + assert_eq!(bucket_idx, 0); // distance 1 -> bucket 0 + assert!(table.buckets[0].contains(&node_id)); + } + + #[test] + fn test_get_existing_node() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let entry = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(99)); + let node_id = entry.node_id.clone(); + table.add(entry); + + let retrieved = table.get(&node_id).unwrap(); + assert_eq!(retrieved.enr_seq, SeqNumber(99)); + } + + #[test] + fn test_remove_node() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + let node_id = entry.node_id.clone(); + table.add(entry); + assert!(table.remove(&node_id)); + assert!(!table.contains(&node_id)); + } + + #[test] + fn test_closest_nodes_sorted_by_distance() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + for i in 1..5u8 { + let mut bytes = [0x00; 32]; + bytes[0] = i; + let entry = NodeEntry::new(NodeId::new(bytes)); + table.add(entry); + } + + let mut target_bytes = [0x00; 32]; + target_bytes[0] = 0x01; + let target = NodeId::new(target_bytes); + let closest = table.closest_nodes(&target, 3); + + assert_eq!(closest.len(), 3); + assert_eq!(closest[0].node_id, target); // Distance 0 to itself + } + + #[test] + fn test_closest_nodes_respects_count() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + for i in 1..11u8 { + let mut bytes = [0x00; 32]; + bytes[0] = i; + let entry = NodeEntry::new(NodeId::new(bytes)); + table.add(entry); + } + + let mut target_bytes = [0x00; 32]; + target_bytes[0] = 0x05; + let closest = table.closest_nodes(&NodeId::new(target_bytes), 3); + assert_eq!(closest.len(), 3); + } + + #[test] + fn test_nodes_at_distance() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let mut node_bytes = [0x00; 32]; + node_bytes[31] = 0x01; // distance 1 + let node_id = NodeId::new(node_bytes); + let entry = NodeEntry::new(node_id.clone()); + table.add(entry); + + let nodes = table.nodes_at_distance(Distance(1)); + assert_eq!(nodes.len(), 1); + assert_eq!(nodes[0].node_id, node_id); + } + + #[test] + fn test_nodes_at_invalid_distance() { + let local_id = NodeId::new([0x00; 32]); + let table = RoutingTable::new(local_id); + + assert!(table.nodes_at_distance(Distance(0)).is_empty()); + assert!(table.nodes_at_distance(Distance(257)).is_empty()); + } + } + + // ============================================================ + // Node Entry Tests + // ============================================================ + + mod node_entry_tests { + use super::*; + + #[test] + fn test_default_values() { + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + + assert_eq!(entry.node_id, NodeId::new([0x01; 32])); + assert_eq!(entry.enr_seq, SeqNumber(0)); + assert!((entry.last_seen - 0.0).abs() < f64::EPSILON); + assert!(entry.endpoint.is_none()); + assert!(!entry.verified); + } + + #[test] + fn test_full_construction() { + let entry = NodeEntry::new(NodeId::new([0x01; 32])) + .with_enr_seq(SeqNumber(42)) + .with_last_seen(1234567890.0) + .with_endpoint("192.168.1.1:30303".to_string()) + .with_verified(true); + + assert_eq!(entry.enr_seq, SeqNumber(42)); + assert_eq!(entry.endpoint, Some("192.168.1.1:30303".to_string())); + assert!(entry.verified); + } + } + + // ============================================================ + // Test Vector Tests + // ============================================================ + + mod test_vectors { + use super::*; + + // From https://github.com/ethereum/devp2p/blob/master/discv5/discv5-wire-test-vectors.md + const PING_REQUEST_ID: [u8; 4] = [0x00, 0x00, 0x00, 0x01]; + const PING_ENR_SEQ: u64 = 2; + const WHOAREYOU_ID_NONCE: [u8; 16] = [ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + ]; + + #[test] + fn test_ping_message_construction() { + let ping = Ping { + request_id: RequestId::new(PING_REQUEST_ID.to_vec()), + enr_seq: SeqNumber(PING_ENR_SEQ), + }; + + assert_eq!(ping.request_id.0, PING_REQUEST_ID.to_vec()); + assert_eq!(ping.enr_seq, SeqNumber(2)); + } + + #[test] + fn test_whoareyou_authdata_construction() { + let authdata = WhoAreYouAuthdata { + id_nonce: IdNonce::new(WHOAREYOU_ID_NONCE), + enr_seq: SeqNumber(0), + }; + + assert_eq!(authdata.id_nonce, IdNonce::new(WHOAREYOU_ID_NONCE)); + assert_eq!(authdata.enr_seq, SeqNumber(0)); + } + + #[test] + fn test_plaintext_message_type() { + // From AES-GCM test vector plaintext + let plaintext = hex::decode("01c20101").unwrap(); + assert_eq!(plaintext[0], MessageType::Ping as u8); + } + } + + // ============================================================ + // Packet Structure Tests + // ============================================================ + + mod packet_structure { + #[test] + fn test_static_header_size() { + // protocol-id (6) + version (2) + flag (1) + nonce (12) + authdata-size (2) + let expected_size = 6 + 2 + 1 + 12 + 2; + assert_eq!(expected_size, 23); + } + } + + // ============================================================ + // Routing with Test Vector Node IDs + // ============================================================ + + mod routing_test_vectors { + use super::*; + + // Node IDs from official test vectors (keccak256 of uncompressed pubkey) + fn node_a_id() -> NodeId { + NodeId::from_slice(&hex::decode("aaaa8419e9f49d0083561b48287df592939a8d19947d8c0ef88f2a4856a69fbb").unwrap()) + } + + fn node_b_id() -> NodeId { + NodeId::from_slice(&hex::decode("bbbb9d047f0488c0b5a93c1c3f2d8bafc7c8ff337024a55434a0d0555de64db9").unwrap()) + } + + #[test] + fn test_xor_distance_is_symmetric() { + let node_a = node_a_id(); + let node_b = node_b_id(); + + let distance = xor_distance(&node_a, &node_b); + assert!(distance > BigUint::from(0u32)); + assert_eq!(xor_distance(&node_a, &node_b), xor_distance(&node_b, &node_a)); + } + + #[test] + fn test_log2_distance_is_high() { + let node_a = node_a_id(); + let node_b = node_b_id(); + + let log_dist = log2_distance(&node_a, &node_b); + assert!(log_dist > Distance(200)); + } + } +} From 6e3040d4e1d6094e5edc9ebf2b43341601ae4eaf Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 21:26:35 +0200 Subject: [PATCH 18/23] fix: update outdated readme --- README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5028b8f..d473719 100644 --- a/README.md +++ b/README.md @@ -23,12 +23,13 @@ leanEthereum Consensus Client written in Rust using Grandine's libraries. Run in debug mode via terminal (with XMSS signing): ``` RUST_LOG=info ./target/release/lean_client \ - --genesis ../lean-quickstart/local-devnet/genesis/config.yaml \ - --validator-registry-path ../lean-quickstart/local-devnet/genesis/validators.yaml \ - --hash-sig-key-dir ../lean-quickstart/local-devnet/genesis/hash-sig-keys \ + --genesis ../../lean-quickstart/local-devnet/genesis/config.yaml \ + --validator-registry-path ../../lean-quickstart/local-devnet/genesis/validators.yaml \ + --hash-sig-key-dir ../../lean-quickstart/local-devnet/genesis/hash-sig-keys \ --node-id qlean_0 \ - --node-key ../lean-quickstart/local-devnet/genesis/qlean_0.key \ + --node-key ../../lean-quickstart/local-devnet/genesis/qlean_0.key \ --port 9003 \ + --disable-discovery --bootnodes "/ip4/127.0.0.1/udp/9001/quic-v1/p2p/16Uiu2HAkvi2sxT75Bpq1c7yV2FjnSQJJ432d6jeshbmfdJss1i6f" \ --bootnodes "/ip4/127.0.0.1/udp/9002/quic-v1/p2p/16Uiu2HAmPQhkD6Zg5Co2ee8ShshkiY4tDePKFARPpCS2oKSLj1E1" \ --bootnodes "/ip4/127.0.0.1/udp/9004/quic-v1/p2p/16Uiu2HAm7TYVs6qvDKnrovd9m4vvRikc4HPXm1WyLumKSe5fHxBv" From a3574808ff6a570faaf683261459e9cdbbc0dc20 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 21:49:01 +0200 Subject: [PATCH 19/23] feat: update README.md to include instructions for testing discovery --- README.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/README.md b/README.md index d473719..81627d1 100644 --- a/README.md +++ b/README.md @@ -35,3 +35,33 @@ leanEthereum Consensus Client written in Rust using Grandine's libraries. --bootnodes "/ip4/127.0.0.1/udp/9004/quic-v1/p2p/16Uiu2HAm7TYVs6qvDKnrovd9m4vvRikc4HPXm1WyLumKSe5fHxBv" ``` 4. Leave client running for a few minutes and observe warnings, errors, check if blocks are being justified and finalized (don't need debug mode for this last one) + +## Testing discovery + +1. Start the bootnode + + Run in the terminal: + ``` + RUST_LOG=info cargo run --features devnet2 -- \ + --port 9000 \ + --discovery-port 9100 + ``` + +2. Start the other nodes + + Run in the terminal: + ``` + RUST_LOG=info cargo run --features devnet2 -- \ + --port 9001 \ + --discovery-port 9101 \ + --bootnodes "" + ``` + + ``` + RUST_LOG=info cargo run --features devnet2 -- \ + --port 9002 \ + --discovery-port 9102 \ + --bootnodes "" + ``` + +After a minute all the nodes should be synced up and see each other From 94ffee89995195efc675c3e1e8c15da4e43ec26b Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 21:51:54 +0200 Subject: [PATCH 20/23] fix: update README.md to build the client --- README.md | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 81627d1..ea3b63b 100644 --- a/README.md +++ b/README.md @@ -38,27 +38,33 @@ leanEthereum Consensus Client written in Rust using Grandine's libraries. ## Testing discovery -1. Start the bootnode +1. Build the client: + ```bash + cd lean_client/ + cargo build --release + ``` + +2. Start the bootnode Run in the terminal: ``` - RUST_LOG=info cargo run --features devnet2 -- \ + RUST_LOG=info ./target/release/lean_client \ --port 9000 \ --discovery-port 9100 ``` -2. Start the other nodes +3. Start the other nodes Run in the terminal: ``` - RUST_LOG=info cargo run --features devnet2 -- \ + RUST_LOG=info ./target/release/lean_client \ --port 9001 \ --discovery-port 9101 \ --bootnodes "" ``` ``` - RUST_LOG=info cargo run --features devnet2 -- \ + RUST_LOG=info ./target/release/lean_client \ --port 9002 \ --discovery-port 9102 \ --bootnodes "" From ddbff9c416db274b6cf2ee25d3c8ac9f12585b5e Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 21:59:33 +0200 Subject: [PATCH 21/23] fix: format files --- lean_client/networking/src/discovery/mod.rs | 22 +++++++++-- lean_client/networking/src/discovery/tests.rs | 39 ++++++++++++------- 2 files changed, 43 insertions(+), 18 deletions(-) diff --git a/lean_client/networking/src/discovery/mod.rs b/lean_client/networking/src/discovery/mod.rs index d0b67db..7ee532b 100644 --- a/lean_client/networking/src/discovery/mod.rs +++ b/lean_client/networking/src/discovery/mod.rs @@ -29,7 +29,12 @@ impl DiscoveryService { pub async fn new(config: DiscoveryConfig, keypair: &Keypair) -> Result { let enr_key = keypair_to_enr_key(keypair)?; - let local_enr = build_enr(&enr_key, config.listen_address, config.udp_port, config.libp2p_port)?; + let local_enr = build_enr( + &enr_key, + config.listen_address, + config.udp_port, + config.libp2p_port, + )?; info!( enr = %local_enr, @@ -118,7 +123,10 @@ impl DiscoveryService { } pub fn enr_to_multiaddr(enr: &Enr) -> Option { - let ip = enr.ip4().map(IpAddr::V4).or_else(|| enr.ip6().map(IpAddr::V6))?; + let ip = enr + .ip4() + .map(IpAddr::V4) + .or_else(|| enr.ip6().map(IpAddr::V6))?; let libp2p_port = enr.tcp4().or_else(|| enr.tcp6())?; let peer_id = enr_to_peer_id(enr)?; @@ -171,7 +179,12 @@ fn keypair_to_enr_key(keypair: &Keypair) -> Result { } } -fn build_enr(key: &CombinedKey, ip: IpAddr, udp_port: u16, libp2p_port: u16) -> Result> { +fn build_enr( + key: &CombinedKey, + ip: IpAddr, + udp_port: u16, + libp2p_port: u16, +) -> Result> { let mut builder = EnrBuilder::default(); // libp2p port is stored in tcp field, since Enr doesn't have a field for a quic port @@ -199,7 +212,8 @@ fn enr_to_peer_id(enr: &Enr) -> Option { match public_key { discv5::enr::CombinedPublicKey::Secp256k1(pk) => { let compressed = pk.to_sec1_bytes(); - let libp2p_pk = libp2p_identity::secp256k1::PublicKey::try_from_bytes(&compressed).ok()?; + let libp2p_pk = + libp2p_identity::secp256k1::PublicKey::try_from_bytes(&compressed).ok()?; let public = libp2p_identity::PublicKey::from(libp2p_pk); Some(PeerId::from_public_key(&public)) } diff --git a/lean_client/networking/src/discovery/tests.rs b/lean_client/networking/src/discovery/tests.rs index 6566e29..8bdbf82 100644 --- a/lean_client/networking/src/discovery/tests.rs +++ b/lean_client/networking/src/discovery/tests.rs @@ -445,11 +445,7 @@ impl RoutingTable { } pub fn closest_nodes(&self, target: &NodeId, count: usize) -> Vec<&NodeEntry> { - let mut all_nodes: Vec<&NodeEntry> = self - .buckets - .iter() - .flat_map(|b| b.iter()) - .collect(); + let mut all_nodes: Vec<&NodeEntry> = self.buckets.iter().flat_map(|b| b.iter()).collect(); all_nodes.sort_by(|a, b| { let dist_a = xor_distance(&a.node_id, target); @@ -687,8 +683,14 @@ mod tests { assert_eq!(config.k_bucket_size, constants::K_BUCKET_SIZE); assert_eq!(config.alpha, constants::ALPHA); - assert!((config.request_timeout_secs - constants::REQUEST_TIMEOUT_SECS).abs() < f64::EPSILON); - assert!((config.handshake_timeout_secs - constants::HANDSHAKE_TIMEOUT_SECS).abs() < f64::EPSILON); + assert!( + (config.request_timeout_secs - constants::REQUEST_TIMEOUT_SECS).abs() + < f64::EPSILON + ); + assert!( + (config.handshake_timeout_secs - constants::HANDSHAKE_TIMEOUT_SECS).abs() + < f64::EPSILON + ); assert_eq!(config.max_nodes_response, constants::MAX_NODES_RESPONSE); assert_eq!(config.bond_expiry_secs, constants::BOND_EXPIRY_SECS); } @@ -922,8 +924,8 @@ mod tests { #[test] fn test_creation() { let id_nonce_bytes: [u8; 16] = [ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, ]; let authdata = WhoAreYouAuthdata { id_nonce: IdNonce::new(id_nonce_bytes), @@ -1337,8 +1339,8 @@ mod tests { const PING_REQUEST_ID: [u8; 4] = [0x00, 0x00, 0x00, 0x01]; const PING_ENR_SEQ: u64 = 2; const WHOAREYOU_ID_NONCE: [u8; 16] = [ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, ]; #[test] @@ -1393,11 +1395,17 @@ mod tests { // Node IDs from official test vectors (keccak256 of uncompressed pubkey) fn node_a_id() -> NodeId { - NodeId::from_slice(&hex::decode("aaaa8419e9f49d0083561b48287df592939a8d19947d8c0ef88f2a4856a69fbb").unwrap()) + NodeId::from_slice( + &hex::decode("aaaa8419e9f49d0083561b48287df592939a8d19947d8c0ef88f2a4856a69fbb") + .unwrap(), + ) } fn node_b_id() -> NodeId { - NodeId::from_slice(&hex::decode("bbbb9d047f0488c0b5a93c1c3f2d8bafc7c8ff337024a55434a0d0555de64db9").unwrap()) + NodeId::from_slice( + &hex::decode("bbbb9d047f0488c0b5a93c1c3f2d8bafc7c8ff337024a55434a0d0555de64db9") + .unwrap(), + ) } #[test] @@ -1407,7 +1415,10 @@ mod tests { let distance = xor_distance(&node_a, &node_b); assert!(distance > BigUint::from(0u32)); - assert_eq!(xor_distance(&node_a, &node_b), xor_distance(&node_b, &node_a)); + assert_eq!( + xor_distance(&node_a, &node_b), + xor_distance(&node_b, &node_a) + ); } #[test] From 50430a742ff59e5c14ca1756f0e87d152e104ef4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Titas=20Stankevi=C4=8Dius?= Date: Wed, 21 Jan 2026 15:57:47 +0200 Subject: [PATCH 22/23] syncing service for client --- lean_client/networking/src/lib.rs | 1 + .../networking/src/sync/backfill_sync.rs | 262 ++++++++++++++++ .../networking/src/sync/block_cache.rs | 205 +++++++++++++ lean_client/networking/src/sync/config.rs | 16 + lean_client/networking/src/sync/head_sync.rs | 184 +++++++++++ lean_client/networking/src/sync/mod.rs | 44 +++ .../networking/src/sync/peer_manager.rs | 199 ++++++++++++ lean_client/networking/src/sync/service.rs | 290 ++++++++++++++++++ lean_client/networking/src/sync/states.rs | 45 +++ .../src/sync/tests/backfill_sync_tests.rs | 83 +++++ .../src/sync/tests/block_cache_tests.rs | 102 ++++++ .../src/sync/tests/head_sync_tests.rs | 104 +++++++ lean_client/networking/src/sync/tests/mod.rs | 7 + .../src/sync/tests/peer_manager_tests.rs | 50 +++ .../src/sync/tests/service_tests.rs | 101 ++++++ 15 files changed, 1693 insertions(+) create mode 100644 lean_client/networking/src/sync/backfill_sync.rs create mode 100644 lean_client/networking/src/sync/block_cache.rs create mode 100644 lean_client/networking/src/sync/config.rs create mode 100644 lean_client/networking/src/sync/head_sync.rs create mode 100644 lean_client/networking/src/sync/mod.rs create mode 100644 lean_client/networking/src/sync/peer_manager.rs create mode 100644 lean_client/networking/src/sync/service.rs create mode 100644 lean_client/networking/src/sync/states.rs create mode 100644 lean_client/networking/src/sync/tests/backfill_sync_tests.rs create mode 100644 lean_client/networking/src/sync/tests/block_cache_tests.rs create mode 100644 lean_client/networking/src/sync/tests/head_sync_tests.rs create mode 100644 lean_client/networking/src/sync/tests/mod.rs create mode 100644 lean_client/networking/src/sync/tests/peer_manager_tests.rs create mode 100644 lean_client/networking/src/sync/tests/service_tests.rs diff --git a/lean_client/networking/src/lib.rs b/lean_client/networking/src/lib.rs index d8cd874..3f333cd 100644 --- a/lean_client/networking/src/lib.rs +++ b/lean_client/networking/src/lib.rs @@ -5,4 +5,5 @@ pub mod gossipsub; pub mod network; pub mod req_resp; pub mod serde_utils; +pub mod sync; pub mod types; diff --git a/lean_client/networking/src/sync/backfill_sync.rs b/lean_client/networking/src/sync/backfill_sync.rs new file mode 100644 index 0000000..5cc034c --- /dev/null +++ b/lean_client/networking/src/sync/backfill_sync.rs @@ -0,0 +1,262 @@ +/// Backfill synchronization for resolving orphan blocks. +/// +/// When a block arrives whose parent is unknown, we need to fetch that parent. +/// If the parent also has an unknown parent, we continue recursively. This process +/// is called "backfill" because we are filling in gaps going backward in time. +/// +/// ## The Challenge +/// +/// Blocks can arrive out of order for several reasons: +/// 1. **Gossip timing**: A child block gossips faster than its parent +/// 2. **Parallel downloads**: Responses arrive in different order than requests +/// 3. **Network partitions**: Some blocks were missed during a brief disconnect +/// +/// Without backfill, these orphan blocks would be useless. With backfill, we can +/// resolve their parent chains and process them. +/// +/// ## Safety: Depth Limits +/// +/// - An attacker could send a block claiming to have a parent millions of slots ago +/// - Without limits, we would exhaust memory trying to fetch the entire chain +/// - MAX_BACKFILL_DEPTH (512) covers legitimate reorgs while bounding resources + +use std::collections::HashSet; +use containers::{Bytes32, SignedBlockWithAttestation}; +use libp2p_identity::PeerId; +use tracing::{debug, warn}; + +use super::{ + block_cache::BlockCache, + config::{MAX_BACKFILL_DEPTH, MAX_BLOCKS_PER_REQUEST}, + peer_manager::PeerManager, +}; + +/// Network requester trait for fetching blocks. +/// +/// Abstracts the network layer to allow testing with mocks. +#[async_trait::async_trait] +pub trait NetworkRequester: Send + Sync { + /// Request blocks by their roots from a peer. + /// + /// Returns the blocks if successful, or None if the request failed. + async fn request_blocks_by_root( + &self, + peer_id: PeerId, + roots: Vec, + ) -> Option>; +} + +/// Backfill synchronization manager. +/// +/// Resolves orphan blocks by fetching their missing parents. When blocks +/// arrive with unknown parents, this class orchestrates fetching those parents. +/// +/// ## How It Works +/// +/// 1. **Detection**: BlockCache marks blocks as orphans when added +/// 2. **Request**: BackfillSync requests missing parents from peers +/// 3. **Recursion**: If fetched parents are also orphans, continue fetching +/// 4. **Resolution**: When parent chain is complete, blocks become processable +/// +/// ## Integration +/// +/// BackfillSync does not process blocks itself. It only ensures parents exist +/// in the BlockCache. The SyncService is responsible for: +/// - Calling `fill_missing()` when orphans are detected +/// - Processing blocks when they become processable +/// - Integrating blocks into the Store +/// +/// ## Thread Safety +/// +/// This class is designed for single-threaded async operation. The `_pending` +/// set prevents duplicate requests for the same root. +pub struct BackfillSync { + peer_manager: PeerManager, + block_cache: BlockCache, + network: N, + + /// Roots currently being fetched (prevents duplicate requests) + pending: HashSet, +} + +impl BackfillSync { + pub fn new( + peer_manager: PeerManager, + block_cache: BlockCache, + network: N, + ) -> Self { + Self { + peer_manager, + block_cache, + network, + pending: HashSet::new(), + } + } + + /// Fill missing parent blocks for orphans. + /// + /// Recursively fetches parents until: + /// - All parents are found + /// - MAX_BACKFILL_DEPTH is reached + /// - No peers are available + /// + /// This method is idempotent and safe to call multiple times. + pub async fn fill_missing(&mut self, roots: Vec, depth: usize) { + self.fill_missing_internal(roots, depth).await; + } + + fn fill_missing_internal<'a>( + &'a mut self, + roots: Vec, + depth: usize, + ) -> std::pin::Pin + Send + 'a>> { + Box::pin(async move { + if depth >= MAX_BACKFILL_DEPTH { + // Depth limit reached. Stop fetching to prevent resource exhaustion. + // This is a safety measure, not an error. Deep chains may be + // legitimate but we cannot fetch them via backfill. + debug!( + depth = depth, + max_depth = MAX_BACKFILL_DEPTH, + "Backfill depth limit reached" + ); + return; + } + + // Filter out roots we are already fetching or have cached + let roots_to_fetch: Vec = roots + .into_iter() + .filter(|root| !self.pending.contains(root) && !self.block_cache.contains(root)) + .collect(); + + if roots_to_fetch.is_empty() { + return; + } + + debug!( + num_roots = roots_to_fetch.len(), + depth = depth, + "Backfilling missing parents" + ); + + // Mark roots as pending to avoid duplicate requests + for root in &roots_to_fetch { + self.pending.insert(*root); + } + + // Fetch in batches to respect request limits + for batch_start in (0..roots_to_fetch.len()).step_by(MAX_BLOCKS_PER_REQUEST) { + let batch_end = (batch_start + MAX_BLOCKS_PER_REQUEST).min(roots_to_fetch.len()); + let batch = roots_to_fetch[batch_start..batch_end].to_vec(); + + self.fetch_batch(batch, depth).await; + } + + // Clear pending status + for root in &roots_to_fetch { + self.pending.remove(root); + } + }) + } + + async fn fetch_batch(&mut self, roots: Vec, depth: usize) { + // Select a peer for the request + let peer = match self.peer_manager.select_peer_for_request(None) { + Some(p) => p.peer_id, + None => { + debug!("No available peer for backfill request"); + return; + } + }; + + debug!( + peer = %peer, + num_roots = roots.len(), + depth = depth, + "Requesting blocks from peer" + ); + + // Mark request as started + self.peer_manager.on_request_start(&peer); + + // Request blocks + match self.network.request_blocks_by_root(peer, roots.clone()).await { + Some(blocks) if !blocks.is_empty() => { + debug!( + peer = %peer, + num_blocks = blocks.len(), + "Received blocks from peer" + ); + + self.peer_manager.on_request_complete(&peer); + self.process_received_blocks(blocks, peer, depth).await; + } + Some(_) => { + // Empty response. Peer may not have the blocks. + debug!(peer = %peer, "Peer returned no blocks"); + self.peer_manager.on_request_complete(&peer); + } + None => { + // Network error + warn!(peer = %peer, "Block request failed"); + self.peer_manager.on_request_failure(&peer, "backfill request failed"); + } + } + } + + async fn process_received_blocks( + &mut self, + blocks: Vec, + peer_id: PeerId, + depth: usize, + ) { + let mut new_orphan_parents = Vec::new(); + + for block in blocks { + let root = self.block_cache.add_block(block); + + // If this block is an orphan, we need to fetch its parent + if self.block_cache.is_orphan(&root) { + if let Some(parent_root) = self.block_cache.get_block(&root) + .map(|b| b.message.block.parent_root) + { + if !parent_root.0.is_zero() { + new_orphan_parents.push(parent_root); + } + } + } + } + + // Recursively fetch parents of newly discovered orphans + if !new_orphan_parents.is_empty() { + debug!( + peer = %peer_id, + num_parents = new_orphan_parents.len(), + next_depth = depth + 1, + "Found orphan parents, continuing backfill" + ); + + self.fill_missing_internal(new_orphan_parents, depth + 1).await; + } + } + + /// Get reference to block cache. + pub fn block_cache(&self) -> &BlockCache { + &self.block_cache + } + + /// Get mutable reference to block cache. + pub fn block_cache_mut(&mut self) -> &mut BlockCache { + &mut self.block_cache + } + + /// Get reference to peer manager. + pub fn peer_manager(&self) -> &PeerManager { + &self.peer_manager + } + + /// Get mutable reference to peer manager. + pub fn peer_manager_mut(&mut self) -> &mut PeerManager { + &mut self.peer_manager + } +} diff --git a/lean_client/networking/src/sync/block_cache.rs b/lean_client/networking/src/sync/block_cache.rs new file mode 100644 index 0000000..d87ffd6 --- /dev/null +++ b/lean_client/networking/src/sync/block_cache.rs @@ -0,0 +1,205 @@ +/// Block cache for managing blocks and tracking orphans. +/// +/// Maintains a cache of blocks and identifies orphans (blocks whose parent +/// is not yet known). This is essential for handling out-of-order block arrival. + +use std::collections::{HashMap, HashSet}; +use containers::{Bytes32, SignedBlockWithAttestation, Slot}; +use containers::block::hash_tree_root; + +/// Block cache for sync operations. +/// +/// Manages blocks during synchronization and tracks orphans (blocks with +/// unknown parents). When blocks arrive out of order, orphans are cached +/// until their parent chains can be resolved. +#[derive(Debug, Default, Clone)] +pub struct BlockCache { + /// All cached blocks, indexed by block root + blocks: HashMap, + + /// Blocks whose parent is not in the cache (orphans) + orphans: HashSet, + + /// Children of each block (parent_root -> set of child roots) + children: HashMap>, +} + +impl BlockCache { + pub fn new() -> Self { + Self::default() + } + + /// Add a block to the cache. + /// + /// Automatically detects if the block is an orphan and tracks it. + /// Returns the block root. + pub fn add_block(&mut self, block: SignedBlockWithAttestation) -> Bytes32 { + let root = hash_tree_root(&block.message.block); + let parent_root = block.message.block.parent_root; + + // Add to cache + self.blocks.insert(root, block); + + // Track parent-child relationship + self.children.entry(parent_root) + .or_insert_with(HashSet::new) + .insert(root); + + // Check if this is an orphan (parent not in cache) + if !parent_root.0.is_zero() && !self.blocks.contains_key(&parent_root) { + self.orphans.insert(root); + } + + // If adding this block resolves any orphans, remove them from orphan set + if let Some(children) = self.children.get(&root) { + for child in children { + self.orphans.remove(child); + } + } + + root + } + + /// Get a block by its root. + pub fn get_block(&self, root: &Bytes32) -> Option<&SignedBlockWithAttestation> { + self.blocks.get(root) + } + + /// Check if a block exists in the cache. + pub fn contains(&self, root: &Bytes32) -> bool { + self.blocks.contains_key(root) + } + + /// Check if a block is an orphan (parent unknown). + pub fn is_orphan(&self, root: &Bytes32) -> bool { + self.orphans.contains(root) + } + + /// Get all orphan block roots. + pub fn get_orphans(&self) -> Vec { + self.orphans.iter().copied().collect() + } + + /// Get missing parent roots for orphan blocks. + /// + /// Returns roots of parents that are not in the cache. + pub fn get_missing_parents(&self) -> Vec { + self.orphans.iter() + .filter_map(|orphan_root| { + self.blocks.get(orphan_root) + .map(|block| block.message.block.parent_root) + }) + .filter(|parent_root| !parent_root.0.is_zero() && !self.blocks.contains_key(parent_root)) + .collect::>() // Deduplicate + .into_iter() + .collect() + } + + /// Get all processable blocks (blocks whose parent is known or is genesis). + /// + /// Returns blocks that can be processed because their parent exists + /// in the cache or they are genesis blocks (parent_root is zero). + pub fn get_processable_blocks(&self) -> Vec { + self.blocks.iter() + .filter_map(|(root, block)| { + let parent_root = block.message.block.parent_root; + if parent_root.0.is_zero() || self.blocks.contains_key(&parent_root) { + Some(*root) + } else { + None + } + }) + .collect() + } + + /// Remove a block from the cache. + /// + /// Also updates orphan tracking and parent-child relationships. + pub fn remove_block(&mut self, root: &Bytes32) -> Option { + if let Some(block) = self.blocks.remove(root) { + // Remove from orphan set if present + self.orphans.remove(root); + + // Remove from parent's children set + let parent_root = block.message.block.parent_root; + if let Some(children) = self.children.get_mut(&parent_root) { + children.remove(root); + if children.is_empty() { + self.children.remove(&parent_root); + } + } + + // Mark children as orphans if removing this block orphans them + if let Some(children) = self.children.get(root) { + for child in children { + self.orphans.insert(*child); + } + } + + Some(block) + } else { + None + } + } + + /// Get the slot of a block. + pub fn get_slot(&self, root: &Bytes32) -> Option { + self.blocks.get(root).map(|block| block.message.block.slot) + } + + /// Get children of a block. + pub fn get_children(&self, root: &Bytes32) -> Vec { + self.children.get(root) + .map(|children| children.iter().copied().collect()) + .unwrap_or_default() + } + + /// Get chain length from a block back to genesis or earliest cached ancestor. + /// + /// Returns None if the block is not in the cache. + pub fn get_chain_length(&self, root: &Bytes32) -> Option { + if !self.blocks.contains_key(root) { + return None; + } + + let mut length = 0; + let mut current = *root; + + loop { + if let Some(block) = self.blocks.get(¤t) { + let parent_root = block.message.block.parent_root; + if parent_root.0.is_zero() { + // Reached genesis + break; + } + length += 1; + if !self.blocks.contains_key(&parent_root) { + // Parent not in cache, can't continue + break; + } + current = parent_root; + } else { + break; + } + } + + Some(length) + } + + /// Clear all blocks from the cache. + pub fn clear(&mut self) { + self.blocks.clear(); + self.orphans.clear(); + self.children.clear(); + } + + /// Get the number of cached blocks. + pub fn len(&self) -> usize { + self.blocks.len() + } + + /// Check if the cache is empty. + pub fn is_empty(&self) -> bool { + self.blocks.is_empty() + } +} diff --git a/lean_client/networking/src/sync/config.rs b/lean_client/networking/src/sync/config.rs new file mode 100644 index 0000000..b51ff16 --- /dev/null +++ b/lean_client/networking/src/sync/config.rs @@ -0,0 +1,16 @@ +/// Sync service configuration constants. +/// +/// Operational parameters for synchronization: batch sizes, timeouts, and limits. + +/// Maximum blocks to request in a single BlocksByRoot request. +pub const MAX_BLOCKS_PER_REQUEST: usize = 10; + +/// Maximum concurrent requests to a single peer. +pub const MAX_CONCURRENT_REQUESTS: usize = 2; + +/// Maximum depth to backfill when resolving orphan chains. +/// This prevents resource exhaustion from malicious deep chains. +pub const MAX_BACKFILL_DEPTH: usize = 512; + +/// Interval between sync state evaluations (in seconds). +pub const SYNC_TICK_INTERVAL_SECS: u64 = 1; diff --git a/lean_client/networking/src/sync/head_sync.rs b/lean_client/networking/src/sync/head_sync.rs new file mode 100644 index 0000000..a6754aa --- /dev/null +++ b/lean_client/networking/src/sync/head_sync.rs @@ -0,0 +1,184 @@ +/// Head synchronization for processing gossip blocks. +/// +/// Manages the processing of blocks received via gossip to advance the chain head. +/// Works in coordination with backfill sync to handle out-of-order block arrivals. + +use containers::{Bytes32, SignedBlockWithAttestation, Slot}; +use tracing::debug; + +use super::block_cache::BlockCache; + +/// Head synchronization manager. +/// +/// Processes blocks to advance the chain head. Works with BlockCache to +/// handle blocks that arrive in any order. +/// +/// ## How It Works +/// +/// 1. Blocks arrive via gossip +/// 2. HeadSync adds them to the BlockCache +/// 3. If parent exists, block is processable immediately +/// 4. If parent missing, block is cached as orphan (BackfillSync will fetch parent) +/// 5. Once parent chain is complete, all descendants become processable +/// +/// ## Integration +/// +/// HeadSync coordinates with: +/// - **BlockCache**: Tracks blocks and identifies orphans +/// - **BackfillSync**: Fetches missing parents for orphans +/// - **SyncService**: Orchestrates overall sync flow +pub struct HeadSync { + block_cache: BlockCache, +} + +impl HeadSync { + pub fn new(block_cache: BlockCache) -> Self { + Self { block_cache } + } + + /// Process a gossip block. + /// + /// Adds the block to the cache and returns information about what happened: + /// - The block root + /// - Whether the block is processable (parent exists) + /// - Missing parent roots (if block is orphan) + pub fn process_gossip_block( + &mut self, + block: SignedBlockWithAttestation, + ) -> ProcessResult { + let slot = block.message.block.slot; + let parent_root = block.message.block.parent_root; + + debug!( + slot = slot.0, + parent = ?parent_root, + "Processing gossip block" + ); + + // Add to cache + let root = self.block_cache.add_block(block); + + // Check if processable + let is_orphan = self.block_cache.is_orphan(&root); + + if is_orphan { + debug!( + slot = slot.0, + root = ?root, + "Block is orphan (parent unknown)" + ); + + // Get missing parents for backfill + let missing_parents = if parent_root.0.is_zero() { + vec![] + } else if !self.block_cache.contains(&parent_root) { + vec![parent_root] + } else { + vec![] + }; + + ProcessResult { + root, + is_processable: false, + missing_parents, + } + } else { + debug!( + slot = slot.0, + root = ?root, + "Block is processable (parent known)" + ); + + ProcessResult { + root, + is_processable: true, + missing_parents: vec![], + } + } + } + + /// Get all blocks ready for processing. + /// + /// Returns blocks whose parents exist in the cache or are genesis. + /// These blocks can be safely processed in topological order. + pub fn get_processable_blocks(&self) -> Vec { + self.block_cache.get_processable_blocks() + } + + /// Get a block by its root. + pub fn get_block(&self, root: &Bytes32) -> Option<&SignedBlockWithAttestation> { + self.block_cache.get_block(root) + } + + /// Remove a block from the cache after processing. + pub fn remove_block(&mut self, root: &Bytes32) -> Option { + self.block_cache.remove_block(root) + } + + /// Check if a block exists in the cache. + pub fn contains_block(&self, root: &Bytes32) -> bool { + self.block_cache.contains(root) + } + + /// Get all orphan blocks. + pub fn get_orphans(&self) -> Vec { + self.block_cache.get_orphans() + } + + /// Get missing parent roots for all orphans. + pub fn get_missing_parents(&self) -> Vec { + self.block_cache.get_missing_parents() + } + + /// Get reference to block cache. + pub fn block_cache(&self) -> &BlockCache { + &self.block_cache + } + + /// Get mutable reference to block cache. + pub fn block_cache_mut(&mut self) -> &mut BlockCache { + &mut self.block_cache + } + + /// Get the highest slot among cached blocks. + pub fn get_highest_cached_slot(&self) -> Option { + self.block_cache.get_processable_blocks() + .iter() + .filter_map(|root| self.block_cache.get_slot(root)) + .max() + } + + /// Get statistics about the cache. + pub fn get_stats(&self) -> HeadSyncStats { + let total_blocks = self.block_cache.len(); + let orphan_blocks = self.block_cache.get_orphans().len(); + let processable_blocks = self.block_cache.get_processable_blocks().len(); + + HeadSyncStats { + total_blocks, + orphan_blocks, + processable_blocks, + } + } +} + +/// Result of processing a gossip block. +#[derive(Debug, Clone)] +pub struct ProcessResult { + /// The root of the processed block + pub root: Bytes32, + + /// Whether the block can be processed immediately + pub is_processable: bool, + + /// Missing parent roots (if block is orphan) + pub missing_parents: Vec, +} + +/// Statistics about the head sync cache. +#[derive(Debug, Clone, Copy)] +pub struct HeadSyncStats { + pub total_blocks: usize, + pub orphan_blocks: usize, + pub processable_blocks: usize, +} diff --git a/lean_client/networking/src/sync/mod.rs b/lean_client/networking/src/sync/mod.rs new file mode 100644 index 0000000..8c0aaf2 --- /dev/null +++ b/lean_client/networking/src/sync/mod.rs @@ -0,0 +1,44 @@ +/// Sync service for the lean Ethereum consensus client. +/// +/// This module provides synchronization capabilities for downloading and +/// validating blocks to catch up with the network. It includes: +/// +/// - **Block Cache**: Manages blocks and tracks orphans (blocks with unknown parents) +/// - **Peer Manager**: Tracks peer chain status and selects peers for requests +/// - **Backfill Sync**: Resolves orphan chains by fetching missing parent blocks +/// - **Head Sync**: Advances the chain head by processing gossip blocks +/// - **Sync Service**: Coordinates all sync operations and manages state transitions +/// +/// ## Architecture +/// +/// The sync service operates reactively: +/// 1. Blocks arrive via gossip +/// 2. If parent is known, process immediately +/// 3. If parent is unknown, cache block and trigger backfill +/// 4. Backfill fetches missing parents recursively (up to MAX_BACKFILL_DEPTH) +/// 5. Once parent chain is complete, process all cached blocks +/// +/// ## State Machine +/// +/// - **IDLE**: No peers, waiting to start +/// - **SYNCING**: Processing blocks to catch up +/// - **SYNCED**: Reached network finalized checkpoint + +pub mod config; +pub mod states; +pub mod peer_manager; +pub mod block_cache; +pub mod backfill_sync; +pub mod head_sync; +pub mod service; + +pub use config::*; +pub use states::SyncState; +pub use peer_manager::{PeerManager, SyncPeer}; +pub use block_cache::BlockCache; +pub use backfill_sync::BackfillSync; +pub use head_sync::HeadSync; +pub use service::SyncService; + +#[cfg(test)] +mod tests; diff --git a/lean_client/networking/src/sync/peer_manager.rs b/lean_client/networking/src/sync/peer_manager.rs new file mode 100644 index 0000000..e88bd15 --- /dev/null +++ b/lean_client/networking/src/sync/peer_manager.rs @@ -0,0 +1,199 @@ +/// Peer manager for sync operations. +/// +/// Tracks peer chain status and selects peers for block requests. + +use std::collections::HashMap; +use containers::{Slot, Status}; +use libp2p_identity::PeerId; +use crate::types::ConnectionState; +use super::config::MAX_CONCURRENT_REQUESTS; + +/// Sync-specific peer state. +/// +/// Wraps peer information with sync-specific state: chain status and request tracking. +#[derive(Debug, Clone)] +pub struct SyncPeer { + pub peer_id: PeerId, + pub connection_state: ConnectionState, + pub status: Option, + pub requests_in_flight: usize, +} + +impl SyncPeer { + pub fn new(peer_id: PeerId, connection_state: ConnectionState) -> Self { + Self { + peer_id, + connection_state, + status: None, + requests_in_flight: 0, + } + } + + /// Check if peer is connected. + pub fn is_connected(&self) -> bool { + self.connection_state == ConnectionState::Connected + } + + /// Check if peer is available for new requests. + /// + /// A peer is available if: + /// - Connected + /// - Below MAX_CONCURRENT_REQUESTS limit + pub fn is_available(&self) -> bool { + self.is_connected() && self.requests_in_flight < MAX_CONCURRENT_REQUESTS + } + + /// Check if peer likely has data for given slot. + pub fn has_slot(&self, slot: Slot) -> bool { + if let Some(status) = &self.status { + status.head.slot >= slot + } else { + false + } + } + + /// Mark that a request has been sent to this peer. + pub fn on_request_start(&mut self) { + self.requests_in_flight += 1; + } + + /// Mark that a request has completed. + pub fn on_request_complete(&mut self) { + self.requests_in_flight = self.requests_in_flight.saturating_sub(1); + } +} + +/// Peer manager for sync operations. +/// +/// Tracks peer chain status, selects peers for requests, and manages +/// request concurrency limits. +#[derive(Debug, Default, Clone)] +pub struct PeerManager { + peers: HashMap, +} + +impl PeerManager { + pub fn new() -> Self { + Self::default() + } + + /// Add a peer to the manager. + pub fn add_peer(&mut self, peer_id: PeerId, connection_state: ConnectionState) -> &mut SyncPeer { + self.peers.entry(peer_id) + .or_insert_with(|| SyncPeer::new(peer_id, connection_state)) + } + + /// Remove a peer from the manager. + pub fn remove_peer(&mut self, peer_id: &PeerId) -> Option { + self.peers.remove(peer_id) + } + + /// Get a peer by ID. + pub fn get_peer(&self, peer_id: &PeerId) -> Option<&SyncPeer> { + self.peers.get(peer_id) + } + + /// Get a mutable peer by ID. + pub fn get_peer_mut(&mut self, peer_id: &PeerId) -> Option<&mut SyncPeer> { + self.peers.get_mut(peer_id) + } + + /// Update peer connection state. + pub fn update_connection_state(&mut self, peer_id: &PeerId, state: ConnectionState) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.connection_state = state; + } + } + + /// Update peer chain status. + pub fn update_status(&mut self, peer_id: &PeerId, status: Status) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.status = Some(status); + } + } + + /// Select an available peer for a request. + /// + /// Returns the first available peer. If min_slot is provided, only + /// considers peers that likely have data for that slot. + pub fn select_peer_for_request(&self, min_slot: Option) -> Option<&SyncPeer> { + self.peers.values().find(|peer| { + if !peer.is_available() { + return false; + } + if let Some(slot) = min_slot { + peer.has_slot(slot) + } else { + true + } + }) + } + + /// Get network's finalized slot (most common among connected peers). + /// + /// Returns the mode (most common) finalized slot reported by connected peers. + pub fn get_network_finalized_slot(&self) -> Option { + let mut finalized_slots: Vec = self.peers.values() + .filter(|peer| peer.status.is_some() && peer.is_connected()) + .map(|peer| peer.status.as_ref().unwrap().finalized.slot) + .collect(); + + if finalized_slots.is_empty() { + return None; + } + + // Find mode (most common value) + finalized_slots.sort(); + let mut max_count = 0; + let mut mode = finalized_slots[0]; + let mut current_count = 1; + let mut current_slot = finalized_slots[0]; + + for i in 1..finalized_slots.len() { + if finalized_slots[i] == current_slot { + current_count += 1; + } else { + if current_count > max_count { + max_count = current_count; + mode = current_slot; + } + current_slot = finalized_slots[i]; + current_count = 1; + } + } + + // Check last group + if current_count > max_count { + mode = current_slot; + } + + Some(mode) + } + + /// Mark that a request has been sent to a peer. + pub fn on_request_start(&mut self, peer_id: &PeerId) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.on_request_start(); + } + } + + /// Mark that a request has completed successfully. + pub fn on_request_complete(&mut self, peer_id: &PeerId) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.on_request_complete(); + } + } + + /// Mark that a request has failed. + pub fn on_request_failure(&mut self, peer_id: &PeerId, _reason: &str) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.on_request_complete(); + // Could implement reputation/scoring here + } + } + + /// Get all tracked peers. + pub fn get_all_peers(&self) -> impl Iterator { + self.peers.values() + } +} diff --git a/lean_client/networking/src/sync/service.rs b/lean_client/networking/src/sync/service.rs new file mode 100644 index 0000000..1932ad8 --- /dev/null +++ b/lean_client/networking/src/sync/service.rs @@ -0,0 +1,290 @@ +/// Sync service coordinating all synchronization operations. +/// +/// The SyncService is the main entry point for synchronization. It coordinates: +/// - HeadSync: Processing gossip blocks +/// - BackfillSync: Fetching missing parent blocks +/// - PeerManager: Tracking peer status +/// - State machine: Managing IDLE -> SYNCING -> SYNCED transitions + +use std::sync::Arc; +use containers::{Bytes32, SignedBlockWithAttestation, Slot}; +use libp2p_identity::PeerId; +use parking_lot::Mutex; +use tracing::{debug, info, warn}; + +use super::{ + backfill_sync::{BackfillSync, NetworkRequester}, + block_cache::BlockCache, + peer_manager::PeerManager, + states::SyncState, +}; +use crate::types::ConnectionState; + +/// Sync service coordinating all sync operations. +/// +/// This is the main sync coordinator that: +/// 1. Receives blocks from gossip via HeadSync +/// 2. Triggers backfill for orphan blocks via BackfillSync +/// 3. Manages sync state (IDLE -> SYNCING -> SYNCED) +/// 4. Provides blocks to the fork choice for processing +pub struct SyncService { + state: SyncState, + head_sync: Arc>, + backfill_sync: Arc>>, + peer_manager: Arc>, + local_head_slot: Slot, +} + +impl SyncService { + pub fn new(network: N, peer_manager: PeerManager, block_cache: BlockCache) -> Self { + let peer_manager_arc = Arc::new(Mutex::new(peer_manager)); + let block_cache_arc = Arc::new(Mutex::new(block_cache)); + + let pm_clone = peer_manager_arc.lock().clone(); + let bc_clone = block_cache_arc.lock().clone(); + + Self { + state: SyncState::default(), + head_sync: block_cache_arc.clone(), + backfill_sync: Arc::new(Mutex::new(BackfillSync::new( + pm_clone, + bc_clone, + network, + ))), + peer_manager: peer_manager_arc, + local_head_slot: Slot(0), + } + } + + /// Get current sync state. + pub fn state(&self) -> SyncState { + self.state + } + + /// Add a peer to the sync service. + pub fn add_peer(&self, peer_id: PeerId, connection_state: ConnectionState) { + let mut pm = self.peer_manager.lock(); + pm.add_peer(peer_id, connection_state); + info!(peer = %peer_id, "Peer added to sync service"); + } + + /// Remove a peer from the sync service. + pub fn remove_peer(&self, peer_id: &PeerId) { + let mut pm = self.peer_manager.lock(); + pm.remove_peer(peer_id); + info!(peer = %peer_id, "Peer removed from sync service"); + } + + /// Update peer connection state. + pub fn update_peer_connection(&self, peer_id: &PeerId, state: ConnectionState) { + let mut pm = self.peer_manager.lock(); + pm.update_connection_state(peer_id, state); + } + + /// Update peer chain status. + pub fn update_peer_status(&self, peer_id: &PeerId, status: containers::Status) { + let finalized_slot = status.finalized.slot; + let mut pm = self.peer_manager.lock(); + pm.update_status(peer_id, status); + debug!(peer = %peer_id, finalized_slot = finalized_slot.0, "Updated peer status"); + } + + /// Process a gossip block. + /// + /// Returns the block root and whether backfill is needed. + pub async fn process_gossip_block( + &mut self, + block: SignedBlockWithAttestation, + ) -> (Bytes32, bool) { + let slot = block.message.block.slot; + let parent_root = block.message.block.parent_root; + + let (root, is_orphan, missing_parents) = { + let mut cache = self.head_sync.lock(); + let root = cache.add_block(block); + let is_orphan = cache.is_orphan(&root); + + let missing_parents = if is_orphan && !parent_root.0.is_zero() { + if !cache.contains(&parent_root) { + vec![parent_root] + } else { + vec![] + } + } else { + vec![] + }; + + (root, is_orphan, missing_parents) + }; + + debug!( + slot = slot.0, + root = ?root, + processable = !is_orphan, + "Processed gossip block" + ); + + // If block has missing parents, trigger backfill + if !missing_parents.is_empty() { + debug!( + num_missing = missing_parents.len(), + "Triggering backfill for missing parents" + ); + + let mut bs = self.backfill_sync.lock(); + bs.fill_missing(missing_parents, 0).await; + } + + (root, !is_orphan) + } + + /// Get all blocks ready for processing. + /// + /// Returns blocks in topological order (parents before children). + pub fn get_processable_blocks(&self) -> Vec { + let cache = self.head_sync.lock(); + let roots = cache.get_processable_blocks(); + + // Sort by slot to ensure topological order + let mut blocks: Vec<_> = roots.iter() + .filter_map(|root| { + cache.get_block(root).map(|b| (b.clone(), b.message.block.slot)) + }) + .collect(); + + blocks.sort_by_key(|(_, slot)| *slot); + blocks.into_iter().map(|(block, _)| block).collect() + } + + /// Remove a block from the cache after processing. + pub fn remove_processed_block(&self, root: &Bytes32) { + let mut cache = self.head_sync.lock(); + cache.remove_block(root); + } + + /// Update local head slot (from fork choice). + pub fn update_local_head(&mut self, slot: Slot) { + self.local_head_slot = slot; + self.update_sync_state(); + } + + /// Update sync state based on current conditions. + fn update_sync_state(&mut self) { + let pm = self.peer_manager.lock(); + let network_finalized = pm.get_network_finalized_slot(); + drop(pm); + + let new_state = match (self.state, network_finalized) { + // IDLE -> SYNCING: Peers connected and we need to sync + (SyncState::Idle, Some(finalized)) if self.local_head_slot < finalized => { + info!( + local_head = self.local_head_slot.0, + network_finalized = finalized.0, + "Transitioning to SYNCING" + ); + SyncState::Syncing + } + + // SYNCING -> SYNCED: Caught up with network + (SyncState::Syncing, Some(finalized)) if self.local_head_slot >= finalized => { + info!( + local_head = self.local_head_slot.0, + network_finalized = finalized.0, + "Transitioning to SYNCED" + ); + SyncState::Synced + } + + // SYNCED -> SYNCING: Fell behind network + (SyncState::Synced, Some(finalized)) if self.local_head_slot < finalized => { + warn!( + local_head = self.local_head_slot.0, + network_finalized = finalized.0, + "Fell behind, transitioning to SYNCING" + ); + SyncState::Syncing + } + + // Any state -> IDLE: No peers or no network info + (_, None) => { + if self.state != SyncState::Idle { + info!("No peer information, transitioning to IDLE"); + } + SyncState::Idle + } + + // No transition needed + _ => self.state, + }; + + if new_state != self.state { + if !self.state.can_transition_to(new_state) { + warn!( + from = ?self.state, + to = ?new_state, + "Invalid state transition attempted" + ); + return; + } + self.state = new_state; + } + } + + /// Periodic tick for sync service. + /// + /// Should be called regularly (e.g., every SYNC_TICK_INTERVAL_SECS). + /// Performs periodic tasks like state evaluation and orphan resolution. + pub async fn tick(&mut self) { + self.update_sync_state(); + + // Check for orphans and trigger backfill if needed + let missing_parents = { + let cache = self.head_sync.lock(); + cache.get_missing_parents() + }; + + if !missing_parents.is_empty() { + debug!( + num_missing = missing_parents.len(), + "Found missing parents, triggering backfill" + ); + + let mut bs = self.backfill_sync.lock(); + bs.fill_missing(missing_parents, 0).await; + } + } + + /// Get sync statistics. + pub fn get_stats(&self) -> SyncStats { + let cache = self.head_sync.lock(); + let orphan_blocks = cache.get_orphans().len(); + let processable_blocks = cache.get_processable_blocks().len(); + let cached_blocks = cache.len(); + drop(cache); + + let pm = self.peer_manager.lock(); + let connected_peers = pm.get_all_peers() + .filter(|p| p.is_connected()) + .count(); + + SyncStats { + state: self.state, + local_head_slot: self.local_head_slot, + cached_blocks, + orphan_blocks, + processable_blocks, + connected_peers, + } + } +} + +/// Statistics about the sync service. +#[derive(Debug, Clone, Copy)] +pub struct SyncStats { + pub state: SyncState, + pub local_head_slot: Slot, + pub cached_blocks: usize, + pub orphan_blocks: usize, + pub processable_blocks: usize, + pub connected_peers: usize, +} diff --git a/lean_client/networking/src/sync/states.rs b/lean_client/networking/src/sync/states.rs new file mode 100644 index 0000000..5506fb4 --- /dev/null +++ b/lean_client/networking/src/sync/states.rs @@ -0,0 +1,45 @@ +/// Sync service state machine. + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SyncState { + /// Idle state: No peers connected or sync not yet started. + /// + /// Initial state when the sync service starts. The client remains idle + /// until peers connect and provide chain status. + Idle, + + /// Syncing state: Processing blocks to catch up with the network. + /// + /// The client is actively processing blocks (from gossip or request/response) + /// to reach the network's finalized checkpoint. Backfill happens naturally + /// within this state when orphan blocks are detected. + Syncing, + + /// Synced state: Caught up with the network's finalized checkpoint. + /// + /// Local head has reached or exceeded the network's most common finalized slot. + /// The client continues to process new blocks via gossip but is considered + /// fully synchronized. + Synced, +} + +impl SyncState { + /// Check if a transition to the target state is valid. + /// + /// State machines enforce invariants through transition rules. This method + /// encodes those rules. Callers should check validity before transitioning + /// to catch logic errors early. + pub fn can_transition_to(&self, target: SyncState) -> bool { + match self { + SyncState::Idle => matches!(target, SyncState::Syncing), + SyncState::Syncing => matches!(target, SyncState::Synced | SyncState::Idle), + SyncState::Synced => matches!(target, SyncState::Syncing | SyncState::Idle), + } + } +} + +impl Default for SyncState { + fn default() -> Self { + SyncState::Idle + } +} diff --git a/lean_client/networking/src/sync/tests/backfill_sync_tests.rs b/lean_client/networking/src/sync/tests/backfill_sync_tests.rs new file mode 100644 index 0000000..f90445f --- /dev/null +++ b/lean_client/networking/src/sync/tests/backfill_sync_tests.rs @@ -0,0 +1,83 @@ +use crate::sync::{BackfillSync, BlockCache, PeerManager}; +use crate::sync::backfill_sync::NetworkRequester; +use crate::types::ConnectionState; +use containers::{Block, BlockBody, BlockWithAttestation, Attestation, ValidatorIndex, Slot, Bytes32, SignedBlockWithAttestation}; +use libp2p_identity::PeerId; + +// Mock network for testing +struct MockNetwork { + blocks: std::collections::HashMap, +} + +impl MockNetwork { + fn new() -> Self { + Self { + blocks: std::collections::HashMap::new(), + } + } + + fn add_block(&mut self, block: SignedBlockWithAttestation) -> Bytes32 { + let root = containers::block::hash_tree_root(&block.message.block); + self.blocks.insert(root, block); + root + } +} + +#[async_trait::async_trait] +impl NetworkRequester for MockNetwork { + async fn request_blocks_by_root( + &self, + _peer_id: PeerId, + roots: Vec, + ) -> Option> { + let blocks: Vec<_> = roots.iter() + .filter_map(|root| self.blocks.get(root).cloned()) + .collect(); + + if blocks.is_empty() { + None + } else { + Some(blocks) + } + } +} + +fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { + let block = Block { + slot: Slot(slot), + proposer_index: ValidatorIndex(0), + parent_root, + state_root: Bytes32::default(), + body: BlockBody::default(), + }; + + SignedBlockWithAttestation { + message: BlockWithAttestation { + block, + proposer_attestation: Attestation::default(), + }, + signature: Default::default(), + } +} + +#[tokio::test] +async fn test_backfill_single_missing_block() { + let mut peer_manager = PeerManager::new(); + let peer_id = PeerId::random(); + peer_manager.add_peer(peer_id, ConnectionState::Connected); + + let mut network = MockNetwork::new(); + let block_cache = BlockCache::new(); + + // Create parent block and add to network + let parent = create_test_block(1, Bytes32::default()); + let parent_root = network.add_block(parent); + + let mut backfill = BackfillSync::new(peer_manager, block_cache, network); + + // Request the missing parent + backfill.fill_missing(vec![parent_root], 0).await; + + // Parent should now be in cache + assert!(backfill.block_cache().contains(&parent_root)); +} diff --git a/lean_client/networking/src/sync/tests/block_cache_tests.rs b/lean_client/networking/src/sync/tests/block_cache_tests.rs new file mode 100644 index 0000000..7197fa9 --- /dev/null +++ b/lean_client/networking/src/sync/tests/block_cache_tests.rs @@ -0,0 +1,102 @@ +use crate::sync::BlockCache; +use containers::{Block, BlockBody, BlockWithAttestation, Attestation, ValidatorIndex, Bytes32, Slot, SignedBlockWithAttestation}; + +fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { + let block = Block { + slot: Slot(slot), + proposer_index: ValidatorIndex(0), + parent_root, + state_root: Bytes32::default(), + body: BlockBody::default(), + }; + + SignedBlockWithAttestation { + message: BlockWithAttestation { + block, + proposer_attestation: Attestation::default(), + }, + signature: Default::default(), + } +} + +#[test] +fn test_add_block() { + let mut cache = BlockCache::new(); + let block = create_test_block(1, Bytes32::default()); + + let root = cache.add_block(block); + assert!(cache.contains(&root)); +} + +#[test] +fn test_orphan_detection() { + let mut cache = BlockCache::new(); + + // Create a block with unknown parent + let unknown_parent = Bytes32(ssz::H256::from([1u8; 32])); + let orphan_block = create_test_block(2, unknown_parent); + + let orphan_root = cache.add_block(orphan_block); + + assert!(cache.is_orphan(&orphan_root)); + assert_eq!(cache.get_orphans().len(), 1); +} + +#[test] +fn test_orphan_resolution() { + let mut cache = BlockCache::new(); + + // Add genesis + let genesis = create_test_block(0, Bytes32::default()); + let genesis_root = cache.add_block(genesis.clone()); + + // Add child (should not be orphan) + let child = create_test_block(1, genesis_root); + let child_root = cache.add_block(child); + + assert!(!cache.is_orphan(&child_root)); + assert_eq!(cache.get_orphans().len(), 0); +} + +#[test] +fn test_get_missing_parents() { + let mut cache = BlockCache::new(); + + let parent1 = Bytes32(ssz::H256::from([1u8; 32])); + let parent2 = Bytes32(ssz::H256::from([2u8; 32])); + + let orphan1 = create_test_block(1, parent1); + let orphan2 = create_test_block(2, parent2); + let orphan3 = create_test_block(3, parent1); // Same parent as orphan1 + + cache.add_block(orphan1); + cache.add_block(orphan2); + cache.add_block(orphan3); + + let missing = cache.get_missing_parents(); + assert_eq!(missing.len(), 2); // Only 2 unique parents + assert!(missing.contains(&parent1)); + assert!(missing.contains(&parent2)); +} + +#[test] +fn test_get_processable_blocks() { + let mut cache = BlockCache::new(); + + // Add genesis (processable) + let genesis = create_test_block(0, Bytes32::default()); + let genesis_root = cache.add_block(genesis); + + // Add child (processable) + let child = create_test_block(1, genesis_root); + let child_root = cache.add_block(child); + + // Add orphan (not processable) + let orphan = create_test_block(2, Bytes32(ssz::H256::from([99u8; 32]))); + cache.add_block(orphan); + + let processable = cache.get_processable_blocks(); + assert_eq!(processable.len(), 2); + assert!(processable.contains(&genesis_root)); + assert!(processable.contains(&child_root)); +} diff --git a/lean_client/networking/src/sync/tests/head_sync_tests.rs b/lean_client/networking/src/sync/tests/head_sync_tests.rs new file mode 100644 index 0000000..21131b5 --- /dev/null +++ b/lean_client/networking/src/sync/tests/head_sync_tests.rs @@ -0,0 +1,104 @@ +use crate::sync::{BlockCache, HeadSync}; +use containers::{Block, BlockBody, BlockWithAttestation, Attestation, ValidatorIndex, Bytes32, Slot, SignedBlockWithAttestation}; + +fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { + let block = Block { + slot: Slot(slot), + proposer_index: ValidatorIndex(0), + parent_root, + state_root: Bytes32::default(), + body: BlockBody::default(), + }; + + SignedBlockWithAttestation { + message: BlockWithAttestation { + block, + proposer_attestation: Attestation::default(), + }, + signature: Default::default(), + } +} + +#[test] +fn test_process_genesis_block() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + let genesis = create_test_block(0, Bytes32::default()); + let result = head_sync.process_gossip_block(genesis); + + assert!(result.is_processable); + assert!(result.missing_parents.is_empty()); +} + +#[test] +fn test_process_orphan_block() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + let unknown_parent = Bytes32(ssz::H256::from([1u8; 32])); + let orphan = create_test_block(1, unknown_parent); + + let result = head_sync.process_gossip_block(orphan); + + assert!(!result.is_processable); + assert_eq!(result.missing_parents.len(), 1); + assert_eq!(result.missing_parents[0], unknown_parent); +} + +#[test] +fn test_process_chain_in_order() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + // Add genesis + let genesis = create_test_block(0, Bytes32::default()); + let genesis_result = head_sync.process_gossip_block(genesis); + + // Add child + let child = create_test_block(1, genesis_result.root); + let child_result = head_sync.process_gossip_block(child); + + assert!(child_result.is_processable); + assert!(child_result.missing_parents.is_empty()); +} + +#[test] +fn test_get_processable_blocks() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + // Add genesis + let genesis = create_test_block(0, Bytes32::default()); + let genesis_root = head_sync.process_gossip_block(genesis).root; + + // Add child + let child = create_test_block(1, genesis_root); + let child_root = head_sync.process_gossip_block(child).root; + + // Add orphan + let orphan = create_test_block(2, Bytes32(ssz::H256::from([99u8; 32]))); + head_sync.process_gossip_block(orphan); + + let processable = head_sync.get_processable_blocks(); + assert_eq!(processable.len(), 2); + assert!(processable.contains(&genesis_root)); + assert!(processable.contains(&child_root)); +} + +#[test] +fn test_stats() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + // Add genesis and child + let genesis = create_test_block(0, Bytes32::default()); + let genesis_root = head_sync.process_gossip_block(genesis).root; + + let child = create_test_block(1, genesis_root); + head_sync.process_gossip_block(child); + + // Add orphan + let orphan = create_test_block(2, Bytes32(ssz::H256::from([99u8; 32]))); + head_sync.process_gossip_block(orphan); + + let stats = head_sync.get_stats(); + assert_eq!(stats.total_blocks, 3); + assert_eq!(stats.orphan_blocks, 1); + assert_eq!(stats.processable_blocks, 2); +} diff --git a/lean_client/networking/src/sync/tests/mod.rs b/lean_client/networking/src/sync/tests/mod.rs new file mode 100644 index 0000000..8ca76e5 --- /dev/null +++ b/lean_client/networking/src/sync/tests/mod.rs @@ -0,0 +1,7 @@ +/// Tests for sync module + +mod block_cache_tests; +mod peer_manager_tests; +mod head_sync_tests; +mod backfill_sync_tests; +mod service_tests; diff --git a/lean_client/networking/src/sync/tests/peer_manager_tests.rs b/lean_client/networking/src/sync/tests/peer_manager_tests.rs new file mode 100644 index 0000000..295f81d --- /dev/null +++ b/lean_client/networking/src/sync/tests/peer_manager_tests.rs @@ -0,0 +1,50 @@ +use crate::sync::{PeerManager, SyncPeer}; +use crate::sync::config::MAX_CONCURRENT_REQUESTS; +use crate::types::ConnectionState; +use containers::{Checkpoint, Bytes32, Status, Slot}; +use libp2p_identity::PeerId; + +#[test] +fn test_sync_peer_is_available() { + let mut peer = SyncPeer::new( + PeerId::random(), + ConnectionState::Connected + ); + assert!(peer.is_available()); + + peer.requests_in_flight = MAX_CONCURRENT_REQUESTS; + assert!(!peer.is_available()); +} + +#[test] +fn test_peer_manager_add_and_get() { + let mut manager = PeerManager::new(); + let peer_id = PeerId::random(); + + manager.add_peer(peer_id, ConnectionState::Connected); + assert!(manager.get_peer(&peer_id).is_some()); +} + +#[test] +fn test_peer_manager_update_status() { + let mut manager = PeerManager::new(); + let peer_id = PeerId::random(); + + manager.add_peer(peer_id, ConnectionState::Connected); + + let status = Status { + finalized: Checkpoint { + root: Bytes32::default(), + slot: Slot(100), + }, + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(150), + }, + }; + + manager.update_status(&peer_id, status.clone()); + + let peer = manager.get_peer(&peer_id).unwrap(); + assert_eq!(peer.status.as_ref().unwrap().finalized.slot, Slot(100)); +} diff --git a/lean_client/networking/src/sync/tests/service_tests.rs b/lean_client/networking/src/sync/tests/service_tests.rs new file mode 100644 index 0000000..d851f45 --- /dev/null +++ b/lean_client/networking/src/sync/tests/service_tests.rs @@ -0,0 +1,101 @@ +use crate::sync::{SyncService, SyncState, PeerManager, BlockCache}; +use crate::sync::backfill_sync::NetworkRequester; +use crate::types::ConnectionState; +use containers::{Block, BlockBody, BlockWithAttestation, Attestation, ValidatorIndex, Bytes32, Slot, SignedBlockWithAttestation, Checkpoint}; +use libp2p_identity::PeerId; + +// Mock network for testing +struct MockNetwork; + +#[async_trait::async_trait] +impl NetworkRequester for MockNetwork { + async fn request_blocks_by_root( + &self, + _peer_id: PeerId, + _roots: Vec, + ) -> Option> { + None + } +} + +fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { + let block = Block { + slot: Slot(slot), + proposer_index: ValidatorIndex(0), + parent_root, + state_root: Bytes32::default(), + body: BlockBody::default(), + }; + + SignedBlockWithAttestation { + message: BlockWithAttestation { + block, + proposer_attestation: Attestation::default(), + }, + signature: Default::default(), + } +} + +#[tokio::test] +async fn test_sync_service_creation() { + let service: SyncService = SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + assert_eq!(service.state(), SyncState::Idle); +} + +#[tokio::test] +async fn test_process_genesis_block() { + let mut service: SyncService = SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + + let genesis = create_test_block(0, Bytes32::default()); + let (_root, is_processable) = service.process_gossip_block(genesis).await; + + assert!(is_processable); + assert!(service.get_processable_blocks().len() > 0); +} + +#[test] +fn test_add_remove_peer() { + let service: SyncService = SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + let peer_id = PeerId::random(); + + service.add_peer(peer_id, ConnectionState::Connected); + + // Verify peer was added by checking stats + let stats = service.get_stats(); + assert!(stats.connected_peers >= 1); + + service.remove_peer(&peer_id); + + // Note: Stats may not reflect removal immediately in a real impl, + // but this tests the API works +} + +#[test] +fn test_sync_state_transitions() { + let mut service: SyncService = SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + assert_eq!(service.state(), SyncState::Idle); + + // Add peer with finalized slot ahead of local head + let peer_id = PeerId::random(); + service.add_peer(peer_id, ConnectionState::Connected); + + let status = containers::Status { + finalized: Checkpoint { + root: Bytes32::default(), + slot: Slot(100), + }, + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(150), + }, + }; + service.update_peer_status(&peer_id, status); + + // Should transition to SYNCING + service.update_local_head(Slot(0)); + assert_eq!(service.state(), SyncState::Syncing); + + // Catch up to network finalized + service.update_local_head(Slot(100)); + assert_eq!(service.state(), SyncState::Synced); +} From 4851f059a2f34513c14caf1aef966ee65bf4c575 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Titas=20Stankevi=C4=8Dius?= Date: Wed, 21 Jan 2026 15:58:25 +0200 Subject: [PATCH 23/23] formatting --- .../networking/src/sync/backfill_sync.rs | 111 +++++++++--------- .../networking/src/sync/block_cache.rs | 28 +++-- lean_client/networking/src/sync/head_sync.rs | 15 +-- lean_client/networking/src/sync/mod.rs | 17 ++- .../networking/src/sync/peer_manager.rs | 22 ++-- lean_client/networking/src/sync/service.rs | 46 ++++---- .../src/sync/tests/backfill_sync_tests.rs | 12 +- .../src/sync/tests/block_cache_tests.rs | 35 +++--- .../src/sync/tests/head_sync_tests.rs | 27 +++-- lean_client/networking/src/sync/tests/mod.rs | 5 +- .../src/sync/tests/peer_manager_tests.rs | 19 ++- .../src/sync/tests/service_tests.rs | 27 +++-- 12 files changed, 192 insertions(+), 172 deletions(-) diff --git a/lean_client/networking/src/sync/backfill_sync.rs b/lean_client/networking/src/sync/backfill_sync.rs index 5cc034c..3a6887f 100644 --- a/lean_client/networking/src/sync/backfill_sync.rs +++ b/lean_client/networking/src/sync/backfill_sync.rs @@ -1,3 +1,5 @@ +use containers::{Bytes32, SignedBlockWithAttestation}; +use libp2p_identity::PeerId; /// Backfill synchronization for resolving orphan blocks. /// /// When a block arrives whose parent is unknown, we need to fetch that parent. @@ -19,10 +21,7 @@ /// - An attacker could send a block claiming to have a parent millions of slots ago /// - Without limits, we would exhaust memory trying to fetch the entire chain /// - MAX_BACKFILL_DEPTH (512) covers legitimate reorgs while bounding resources - use std::collections::HashSet; -use containers::{Bytes32, SignedBlockWithAttestation}; -use libp2p_identity::PeerId; use tracing::{debug, warn}; use super::{ @@ -74,17 +73,13 @@ pub struct BackfillSync { peer_manager: PeerManager, block_cache: BlockCache, network: N, - + /// Roots currently being fetched (prevents duplicate requests) pending: HashSet, } impl BackfillSync { - pub fn new( - peer_manager: PeerManager, - block_cache: BlockCache, - network: N, - ) -> Self { + pub fn new(peer_manager: PeerManager, block_cache: BlockCache, network: N) -> Self { Self { peer_manager, block_cache, @@ -111,51 +106,51 @@ impl BackfillSync { depth: usize, ) -> std::pin::Pin + Send + 'a>> { Box::pin(async move { - if depth >= MAX_BACKFILL_DEPTH { - // Depth limit reached. Stop fetching to prevent resource exhaustion. - // This is a safety measure, not an error. Deep chains may be - // legitimate but we cannot fetch them via backfill. + if depth >= MAX_BACKFILL_DEPTH { + // Depth limit reached. Stop fetching to prevent resource exhaustion. + // This is a safety measure, not an error. Deep chains may be + // legitimate but we cannot fetch them via backfill. + debug!( + depth = depth, + max_depth = MAX_BACKFILL_DEPTH, + "Backfill depth limit reached" + ); + return; + } + + // Filter out roots we are already fetching or have cached + let roots_to_fetch: Vec = roots + .into_iter() + .filter(|root| !self.pending.contains(root) && !self.block_cache.contains(root)) + .collect(); + + if roots_to_fetch.is_empty() { + return; + } + debug!( + num_roots = roots_to_fetch.len(), depth = depth, - max_depth = MAX_BACKFILL_DEPTH, - "Backfill depth limit reached" + "Backfilling missing parents" ); - return; - } - // Filter out roots we are already fetching or have cached - let roots_to_fetch: Vec = roots - .into_iter() - .filter(|root| !self.pending.contains(root) && !self.block_cache.contains(root)) - .collect(); - - if roots_to_fetch.is_empty() { - return; - } - - debug!( - num_roots = roots_to_fetch.len(), - depth = depth, - "Backfilling missing parents" - ); + // Mark roots as pending to avoid duplicate requests + for root in &roots_to_fetch { + self.pending.insert(*root); + } - // Mark roots as pending to avoid duplicate requests - for root in &roots_to_fetch { - self.pending.insert(*root); - } + // Fetch in batches to respect request limits + for batch_start in (0..roots_to_fetch.len()).step_by(MAX_BLOCKS_PER_REQUEST) { + let batch_end = (batch_start + MAX_BLOCKS_PER_REQUEST).min(roots_to_fetch.len()); + let batch = roots_to_fetch[batch_start..batch_end].to_vec(); - // Fetch in batches to respect request limits - for batch_start in (0..roots_to_fetch.len()).step_by(MAX_BLOCKS_PER_REQUEST) { - let batch_end = (batch_start + MAX_BLOCKS_PER_REQUEST).min(roots_to_fetch.len()); - let batch = roots_to_fetch[batch_start..batch_end].to_vec(); - - self.fetch_batch(batch, depth).await; - } + self.fetch_batch(batch, depth).await; + } - // Clear pending status - for root in &roots_to_fetch { - self.pending.remove(root); - } + // Clear pending status + for root in &roots_to_fetch { + self.pending.remove(root); + } }) } @@ -180,14 +175,18 @@ impl BackfillSync { self.peer_manager.on_request_start(&peer); // Request blocks - match self.network.request_blocks_by_root(peer, roots.clone()).await { + match self + .network + .request_blocks_by_root(peer, roots.clone()) + .await + { Some(blocks) if !blocks.is_empty() => { debug!( peer = %peer, num_blocks = blocks.len(), "Received blocks from peer" ); - + self.peer_manager.on_request_complete(&peer); self.process_received_blocks(blocks, peer, depth).await; } @@ -199,7 +198,8 @@ impl BackfillSync { None => { // Network error warn!(peer = %peer, "Block request failed"); - self.peer_manager.on_request_failure(&peer, "backfill request failed"); + self.peer_manager + .on_request_failure(&peer, "backfill request failed"); } } } @@ -214,10 +214,12 @@ impl BackfillSync { for block in blocks { let root = self.block_cache.add_block(block); - + // If this block is an orphan, we need to fetch its parent if self.block_cache.is_orphan(&root) { - if let Some(parent_root) = self.block_cache.get_block(&root) + if let Some(parent_root) = self + .block_cache + .get_block(&root) .map(|b| b.message.block.parent_root) { if !parent_root.0.is_zero() { @@ -235,8 +237,9 @@ impl BackfillSync { next_depth = depth + 1, "Found orphan parents, continuing backfill" ); - - self.fill_missing_internal(new_orphan_parents, depth + 1).await; + + self.fill_missing_internal(new_orphan_parents, depth + 1) + .await; } } diff --git a/lean_client/networking/src/sync/block_cache.rs b/lean_client/networking/src/sync/block_cache.rs index d87ffd6..c43ab69 100644 --- a/lean_client/networking/src/sync/block_cache.rs +++ b/lean_client/networking/src/sync/block_cache.rs @@ -1,11 +1,10 @@ +use containers::block::hash_tree_root; +use containers::{Bytes32, SignedBlockWithAttestation, Slot}; /// Block cache for managing blocks and tracking orphans. /// /// Maintains a cache of blocks and identifies orphans (blocks whose parent /// is not yet known). This is essential for handling out-of-order block arrival. - use std::collections::{HashMap, HashSet}; -use containers::{Bytes32, SignedBlockWithAttestation, Slot}; -use containers::block::hash_tree_root; /// Block cache for sync operations. /// @@ -16,10 +15,10 @@ use containers::block::hash_tree_root; pub struct BlockCache { /// All cached blocks, indexed by block root blocks: HashMap, - + /// Blocks whose parent is not in the cache (orphans) orphans: HashSet, - + /// Children of each block (parent_root -> set of child roots) children: HashMap>, } @@ -41,7 +40,8 @@ impl BlockCache { self.blocks.insert(root, block); // Track parent-child relationship - self.children.entry(parent_root) + self.children + .entry(parent_root) .or_insert_with(HashSet::new) .insert(root); @@ -84,12 +84,16 @@ impl BlockCache { /// /// Returns roots of parents that are not in the cache. pub fn get_missing_parents(&self) -> Vec { - self.orphans.iter() + self.orphans + .iter() .filter_map(|orphan_root| { - self.blocks.get(orphan_root) + self.blocks + .get(orphan_root) .map(|block| block.message.block.parent_root) }) - .filter(|parent_root| !parent_root.0.is_zero() && !self.blocks.contains_key(parent_root)) + .filter(|parent_root| { + !parent_root.0.is_zero() && !self.blocks.contains_key(parent_root) + }) .collect::>() // Deduplicate .into_iter() .collect() @@ -100,7 +104,8 @@ impl BlockCache { /// Returns blocks that can be processed because their parent exists /// in the cache or they are genesis blocks (parent_root is zero). pub fn get_processable_blocks(&self) -> Vec { - self.blocks.iter() + self.blocks + .iter() .filter_map(|(root, block)| { let parent_root = block.message.block.parent_root; if parent_root.0.is_zero() || self.blocks.contains_key(&parent_root) { @@ -149,7 +154,8 @@ impl BlockCache { /// Get children of a block. pub fn get_children(&self, root: &Bytes32) -> Vec { - self.children.get(root) + self.children + .get(root) .map(|children| children.iter().copied().collect()) .unwrap_or_default() } diff --git a/lean_client/networking/src/sync/head_sync.rs b/lean_client/networking/src/sync/head_sync.rs index a6754aa..fb57c78 100644 --- a/lean_client/networking/src/sync/head_sync.rs +++ b/lean_client/networking/src/sync/head_sync.rs @@ -2,7 +2,6 @@ /// /// Manages the processing of blocks received via gossip to advance the chain head. /// Works in coordination with backfill sync to handle out-of-order block arrivals. - use containers::{Bytes32, SignedBlockWithAttestation, Slot}; use tracing::debug; @@ -42,10 +41,7 @@ impl HeadSync { /// - The block root /// - Whether the block is processable (parent exists) /// - Missing parent roots (if block is orphan) - pub fn process_gossip_block( - &mut self, - block: SignedBlockWithAttestation, - ) -> ProcessResult { + pub fn process_gossip_block(&mut self, block: SignedBlockWithAttestation) -> ProcessResult { let slot = block.message.block.slot; let parent_root = block.message.block.parent_root; @@ -60,7 +56,7 @@ impl HeadSync { // Check if processable let is_orphan = self.block_cache.is_orphan(&root); - + if is_orphan { debug!( slot = slot.0, @@ -142,7 +138,8 @@ impl HeadSync { /// Get the highest slot among cached blocks. pub fn get_highest_cached_slot(&self) -> Option { - self.block_cache.get_processable_blocks() + self.block_cache + .get_processable_blocks() .iter() .filter_map(|root| self.block_cache.get_slot(root)) .max() @@ -167,10 +164,10 @@ impl HeadSync { pub struct ProcessResult { /// The root of the processed block pub root: Bytes32, - + /// Whether the block can be processed immediately pub is_processable: bool, - + /// Missing parent roots (if block is orphan) pub missing_parents: Vec, } diff --git a/lean_client/networking/src/sync/mod.rs b/lean_client/networking/src/sync/mod.rs index 8c0aaf2..b8d2fb3 100644 --- a/lean_client/networking/src/sync/mod.rs +++ b/lean_client/networking/src/sync/mod.rs @@ -1,3 +1,5 @@ +pub mod backfill_sync; +pub mod block_cache; /// Sync service for the lean Ethereum consensus client. /// /// This module provides synchronization capabilities for downloading and @@ -23,22 +25,19 @@ /// - **IDLE**: No peers, waiting to start /// - **SYNCING**: Processing blocks to catch up /// - **SYNCED**: Reached network finalized checkpoint - pub mod config; -pub mod states; -pub mod peer_manager; -pub mod block_cache; -pub mod backfill_sync; pub mod head_sync; +pub mod peer_manager; pub mod service; +pub mod states; -pub use config::*; -pub use states::SyncState; -pub use peer_manager::{PeerManager, SyncPeer}; -pub use block_cache::BlockCache; pub use backfill_sync::BackfillSync; +pub use block_cache::BlockCache; +pub use config::*; pub use head_sync::HeadSync; +pub use peer_manager::{PeerManager, SyncPeer}; pub use service::SyncService; +pub use states::SyncState; #[cfg(test)] mod tests; diff --git a/lean_client/networking/src/sync/peer_manager.rs b/lean_client/networking/src/sync/peer_manager.rs index e88bd15..575e722 100644 --- a/lean_client/networking/src/sync/peer_manager.rs +++ b/lean_client/networking/src/sync/peer_manager.rs @@ -1,12 +1,11 @@ +use super::config::MAX_CONCURRENT_REQUESTS; +use crate::types::ConnectionState; +use containers::{Slot, Status}; +use libp2p_identity::PeerId; /// Peer manager for sync operations. /// /// Tracks peer chain status and selects peers for block requests. - use std::collections::HashMap; -use containers::{Slot, Status}; -use libp2p_identity::PeerId; -use crate::types::ConnectionState; -use super::config::MAX_CONCURRENT_REQUESTS; /// Sync-specific peer state. /// @@ -78,8 +77,13 @@ impl PeerManager { } /// Add a peer to the manager. - pub fn add_peer(&mut self, peer_id: PeerId, connection_state: ConnectionState) -> &mut SyncPeer { - self.peers.entry(peer_id) + pub fn add_peer( + &mut self, + peer_id: PeerId, + connection_state: ConnectionState, + ) -> &mut SyncPeer { + self.peers + .entry(peer_id) .or_insert_with(|| SyncPeer::new(peer_id, connection_state)) } @@ -133,7 +137,9 @@ impl PeerManager { /// /// Returns the mode (most common) finalized slot reported by connected peers. pub fn get_network_finalized_slot(&self) -> Option { - let mut finalized_slots: Vec = self.peers.values() + let mut finalized_slots: Vec = self + .peers + .values() .filter(|peer| peer.status.is_some() && peer.is_connected()) .map(|peer| peer.status.as_ref().unwrap().finalized.slot) .collect(); diff --git a/lean_client/networking/src/sync/service.rs b/lean_client/networking/src/sync/service.rs index 1932ad8..0ff3c77 100644 --- a/lean_client/networking/src/sync/service.rs +++ b/lean_client/networking/src/sync/service.rs @@ -1,3 +1,6 @@ +use containers::{Bytes32, SignedBlockWithAttestation, Slot}; +use libp2p_identity::PeerId; +use parking_lot::Mutex; /// Sync service coordinating all synchronization operations. /// /// The SyncService is the main entry point for synchronization. It coordinates: @@ -5,11 +8,7 @@ /// - BackfillSync: Fetching missing parent blocks /// - PeerManager: Tracking peer status /// - State machine: Managing IDLE -> SYNCING -> SYNCED transitions - use std::sync::Arc; -use containers::{Bytes32, SignedBlockWithAttestation, Slot}; -use libp2p_identity::PeerId; -use parking_lot::Mutex; use tracing::{debug, info, warn}; use super::{ @@ -39,18 +38,14 @@ impl SyncService { pub fn new(network: N, peer_manager: PeerManager, block_cache: BlockCache) -> Self { let peer_manager_arc = Arc::new(Mutex::new(peer_manager)); let block_cache_arc = Arc::new(Mutex::new(block_cache)); - + let pm_clone = peer_manager_arc.lock().clone(); let bc_clone = block_cache_arc.lock().clone(); - + Self { state: SyncState::default(), head_sync: block_cache_arc.clone(), - backfill_sync: Arc::new(Mutex::new(BackfillSync::new( - pm_clone, - bc_clone, - network, - ))), + backfill_sync: Arc::new(Mutex::new(BackfillSync::new(pm_clone, bc_clone, network))), peer_manager: peer_manager_arc, local_head_slot: Slot(0), } @@ -98,12 +93,12 @@ impl SyncService { ) -> (Bytes32, bool) { let slot = block.message.block.slot; let parent_root = block.message.block.parent_root; - + let (root, is_orphan, missing_parents) = { let mut cache = self.head_sync.lock(); let root = cache.add_block(block); let is_orphan = cache.is_orphan(&root); - + let missing_parents = if is_orphan && !parent_root.0.is_zero() { if !cache.contains(&parent_root) { vec![parent_root] @@ -113,7 +108,7 @@ impl SyncService { } else { vec![] }; - + (root, is_orphan, missing_parents) }; @@ -130,7 +125,7 @@ impl SyncService { num_missing = missing_parents.len(), "Triggering backfill for missing parents" ); - + let mut bs = self.backfill_sync.lock(); bs.fill_missing(missing_parents, 0).await; } @@ -144,14 +139,17 @@ impl SyncService { pub fn get_processable_blocks(&self) -> Vec { let cache = self.head_sync.lock(); let roots = cache.get_processable_blocks(); - + // Sort by slot to ensure topological order - let mut blocks: Vec<_> = roots.iter() + let mut blocks: Vec<_> = roots + .iter() .filter_map(|root| { - cache.get_block(root).map(|b| (b.clone(), b.message.block.slot)) + cache + .get_block(root) + .map(|b| (b.clone(), b.message.block.slot)) }) .collect(); - + blocks.sort_by_key(|(_, slot)| *slot); blocks.into_iter().map(|(block, _)| block).collect() } @@ -248,7 +246,7 @@ impl SyncService { num_missing = missing_parents.len(), "Found missing parents, triggering backfill" ); - + let mut bs = self.backfill_sync.lock(); bs.fill_missing(missing_parents, 0).await; } @@ -261,12 +259,10 @@ impl SyncService { let processable_blocks = cache.get_processable_blocks().len(); let cached_blocks = cache.len(); drop(cache); - + let pm = self.peer_manager.lock(); - let connected_peers = pm.get_all_peers() - .filter(|p| p.is_connected()) - .count(); - + let connected_peers = pm.get_all_peers().filter(|p| p.is_connected()).count(); + SyncStats { state: self.state, local_head_slot: self.local_head_slot, diff --git a/lean_client/networking/src/sync/tests/backfill_sync_tests.rs b/lean_client/networking/src/sync/tests/backfill_sync_tests.rs index f90445f..f646fb2 100644 --- a/lean_client/networking/src/sync/tests/backfill_sync_tests.rs +++ b/lean_client/networking/src/sync/tests/backfill_sync_tests.rs @@ -1,7 +1,10 @@ -use crate::sync::{BackfillSync, BlockCache, PeerManager}; use crate::sync::backfill_sync::NetworkRequester; +use crate::sync::{BackfillSync, BlockCache, PeerManager}; use crate::types::ConnectionState; -use containers::{Block, BlockBody, BlockWithAttestation, Attestation, ValidatorIndex, Slot, Bytes32, SignedBlockWithAttestation}; +use containers::{ + Attestation, Block, BlockBody, BlockWithAttestation, Bytes32, SignedBlockWithAttestation, Slot, + ValidatorIndex, +}; use libp2p_identity::PeerId; // Mock network for testing @@ -30,10 +33,11 @@ impl NetworkRequester for MockNetwork { _peer_id: PeerId, roots: Vec, ) -> Option> { - let blocks: Vec<_> = roots.iter() + let blocks: Vec<_> = roots + .iter() .filter_map(|root| self.blocks.get(root).cloned()) .collect(); - + if blocks.is_empty() { None } else { diff --git a/lean_client/networking/src/sync/tests/block_cache_tests.rs b/lean_client/networking/src/sync/tests/block_cache_tests.rs index 7197fa9..7a0e39a 100644 --- a/lean_client/networking/src/sync/tests/block_cache_tests.rs +++ b/lean_client/networking/src/sync/tests/block_cache_tests.rs @@ -1,5 +1,8 @@ use crate::sync::BlockCache; -use containers::{Block, BlockBody, BlockWithAttestation, Attestation, ValidatorIndex, Bytes32, Slot, SignedBlockWithAttestation}; +use containers::{ + Attestation, Block, BlockBody, BlockWithAttestation, Bytes32, SignedBlockWithAttestation, Slot, + ValidatorIndex, +}; fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { let block = Block { @@ -23,7 +26,7 @@ fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestat fn test_add_block() { let mut cache = BlockCache::new(); let block = create_test_block(1, Bytes32::default()); - + let root = cache.add_block(block); assert!(cache.contains(&root)); } @@ -31,13 +34,13 @@ fn test_add_block() { #[test] fn test_orphan_detection() { let mut cache = BlockCache::new(); - + // Create a block with unknown parent let unknown_parent = Bytes32(ssz::H256::from([1u8; 32])); let orphan_block = create_test_block(2, unknown_parent); - + let orphan_root = cache.add_block(orphan_block); - + assert!(cache.is_orphan(&orphan_root)); assert_eq!(cache.get_orphans().len(), 1); } @@ -45,15 +48,15 @@ fn test_orphan_detection() { #[test] fn test_orphan_resolution() { let mut cache = BlockCache::new(); - + // Add genesis let genesis = create_test_block(0, Bytes32::default()); let genesis_root = cache.add_block(genesis.clone()); - + // Add child (should not be orphan) let child = create_test_block(1, genesis_root); let child_root = cache.add_block(child); - + assert!(!cache.is_orphan(&child_root)); assert_eq!(cache.get_orphans().len(), 0); } @@ -61,18 +64,18 @@ fn test_orphan_resolution() { #[test] fn test_get_missing_parents() { let mut cache = BlockCache::new(); - + let parent1 = Bytes32(ssz::H256::from([1u8; 32])); let parent2 = Bytes32(ssz::H256::from([2u8; 32])); - + let orphan1 = create_test_block(1, parent1); let orphan2 = create_test_block(2, parent2); let orphan3 = create_test_block(3, parent1); // Same parent as orphan1 - + cache.add_block(orphan1); cache.add_block(orphan2); cache.add_block(orphan3); - + let missing = cache.get_missing_parents(); assert_eq!(missing.len(), 2); // Only 2 unique parents assert!(missing.contains(&parent1)); @@ -82,19 +85,19 @@ fn test_get_missing_parents() { #[test] fn test_get_processable_blocks() { let mut cache = BlockCache::new(); - + // Add genesis (processable) let genesis = create_test_block(0, Bytes32::default()); let genesis_root = cache.add_block(genesis); - + // Add child (processable) let child = create_test_block(1, genesis_root); let child_root = cache.add_block(child); - + // Add orphan (not processable) let orphan = create_test_block(2, Bytes32(ssz::H256::from([99u8; 32]))); cache.add_block(orphan); - + let processable = cache.get_processable_blocks(); assert_eq!(processable.len(), 2); assert!(processable.contains(&genesis_root)); diff --git a/lean_client/networking/src/sync/tests/head_sync_tests.rs b/lean_client/networking/src/sync/tests/head_sync_tests.rs index 21131b5..2c0e199 100644 --- a/lean_client/networking/src/sync/tests/head_sync_tests.rs +++ b/lean_client/networking/src/sync/tests/head_sync_tests.rs @@ -1,5 +1,8 @@ use crate::sync::{BlockCache, HeadSync}; -use containers::{Block, BlockBody, BlockWithAttestation, Attestation, ValidatorIndex, Bytes32, Slot, SignedBlockWithAttestation}; +use containers::{ + Attestation, Block, BlockBody, BlockWithAttestation, Bytes32, SignedBlockWithAttestation, Slot, + ValidatorIndex, +}; fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { let block = Block { @@ -22,7 +25,7 @@ fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestat #[test] fn test_process_genesis_block() { let mut head_sync = HeadSync::new(BlockCache::new()); - + let genesis = create_test_block(0, Bytes32::default()); let result = head_sync.process_gossip_block(genesis); @@ -33,10 +36,10 @@ fn test_process_genesis_block() { #[test] fn test_process_orphan_block() { let mut head_sync = HeadSync::new(BlockCache::new()); - + let unknown_parent = Bytes32(ssz::H256::from([1u8; 32])); let orphan = create_test_block(1, unknown_parent); - + let result = head_sync.process_gossip_block(orphan); assert!(!result.is_processable); @@ -47,11 +50,11 @@ fn test_process_orphan_block() { #[test] fn test_process_chain_in_order() { let mut head_sync = HeadSync::new(BlockCache::new()); - + // Add genesis let genesis = create_test_block(0, Bytes32::default()); let genesis_result = head_sync.process_gossip_block(genesis); - + // Add child let child = create_test_block(1, genesis_result.root); let child_result = head_sync.process_gossip_block(child); @@ -63,15 +66,15 @@ fn test_process_chain_in_order() { #[test] fn test_get_processable_blocks() { let mut head_sync = HeadSync::new(BlockCache::new()); - + // Add genesis let genesis = create_test_block(0, Bytes32::default()); let genesis_root = head_sync.process_gossip_block(genesis).root; - + // Add child let child = create_test_block(1, genesis_root); let child_root = head_sync.process_gossip_block(child).root; - + // Add orphan let orphan = create_test_block(2, Bytes32(ssz::H256::from([99u8; 32]))); head_sync.process_gossip_block(orphan); @@ -85,14 +88,14 @@ fn test_get_processable_blocks() { #[test] fn test_stats() { let mut head_sync = HeadSync::new(BlockCache::new()); - + // Add genesis and child let genesis = create_test_block(0, Bytes32::default()); let genesis_root = head_sync.process_gossip_block(genesis).root; - + let child = create_test_block(1, genesis_root); head_sync.process_gossip_block(child); - + // Add orphan let orphan = create_test_block(2, Bytes32(ssz::H256::from([99u8; 32]))); head_sync.process_gossip_block(orphan); diff --git a/lean_client/networking/src/sync/tests/mod.rs b/lean_client/networking/src/sync/tests/mod.rs index 8ca76e5..995bce4 100644 --- a/lean_client/networking/src/sync/tests/mod.rs +++ b/lean_client/networking/src/sync/tests/mod.rs @@ -1,7 +1,6 @@ +mod backfill_sync_tests; /// Tests for sync module - mod block_cache_tests; -mod peer_manager_tests; mod head_sync_tests; -mod backfill_sync_tests; +mod peer_manager_tests; mod service_tests; diff --git a/lean_client/networking/src/sync/tests/peer_manager_tests.rs b/lean_client/networking/src/sync/tests/peer_manager_tests.rs index 295f81d..eef61ee 100644 --- a/lean_client/networking/src/sync/tests/peer_manager_tests.rs +++ b/lean_client/networking/src/sync/tests/peer_manager_tests.rs @@ -1,15 +1,12 @@ -use crate::sync::{PeerManager, SyncPeer}; use crate::sync::config::MAX_CONCURRENT_REQUESTS; +use crate::sync::{PeerManager, SyncPeer}; use crate::types::ConnectionState; -use containers::{Checkpoint, Bytes32, Status, Slot}; +use containers::{Bytes32, Checkpoint, Slot, Status}; use libp2p_identity::PeerId; #[test] fn test_sync_peer_is_available() { - let mut peer = SyncPeer::new( - PeerId::random(), - ConnectionState::Connected - ); + let mut peer = SyncPeer::new(PeerId::random(), ConnectionState::Connected); assert!(peer.is_available()); peer.requests_in_flight = MAX_CONCURRENT_REQUESTS; @@ -20,7 +17,7 @@ fn test_sync_peer_is_available() { fn test_peer_manager_add_and_get() { let mut manager = PeerManager::new(); let peer_id = PeerId::random(); - + manager.add_peer(peer_id, ConnectionState::Connected); assert!(manager.get_peer(&peer_id).is_some()); } @@ -29,9 +26,9 @@ fn test_peer_manager_add_and_get() { fn test_peer_manager_update_status() { let mut manager = PeerManager::new(); let peer_id = PeerId::random(); - + manager.add_peer(peer_id, ConnectionState::Connected); - + let status = Status { finalized: Checkpoint { root: Bytes32::default(), @@ -42,9 +39,9 @@ fn test_peer_manager_update_status() { slot: Slot(150), }, }; - + manager.update_status(&peer_id, status.clone()); - + let peer = manager.get_peer(&peer_id).unwrap(); assert_eq!(peer.status.as_ref().unwrap().finalized.slot, Slot(100)); } diff --git a/lean_client/networking/src/sync/tests/service_tests.rs b/lean_client/networking/src/sync/tests/service_tests.rs index d851f45..1799397 100644 --- a/lean_client/networking/src/sync/tests/service_tests.rs +++ b/lean_client/networking/src/sync/tests/service_tests.rs @@ -1,7 +1,10 @@ -use crate::sync::{SyncService, SyncState, PeerManager, BlockCache}; use crate::sync::backfill_sync::NetworkRequester; +use crate::sync::{BlockCache, PeerManager, SyncService, SyncState}; use crate::types::ConnectionState; -use containers::{Block, BlockBody, BlockWithAttestation, Attestation, ValidatorIndex, Bytes32, Slot, SignedBlockWithAttestation, Checkpoint}; +use containers::{ + Attestation, Block, BlockBody, BlockWithAttestation, Bytes32, Checkpoint, + SignedBlockWithAttestation, Slot, ValidatorIndex, +}; use libp2p_identity::PeerId; // Mock network for testing @@ -38,14 +41,16 @@ fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestat #[tokio::test] async fn test_sync_service_creation() { - let service: SyncService = SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + let service: SyncService = + SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); assert_eq!(service.state(), SyncState::Idle); } #[tokio::test] async fn test_process_genesis_block() { - let mut service: SyncService = SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); - + let mut service: SyncService = + SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + let genesis = create_test_block(0, Bytes32::default()); let (_root, is_processable) = service.process_gossip_block(genesis).await; @@ -55,30 +60,32 @@ async fn test_process_genesis_block() { #[test] fn test_add_remove_peer() { - let service: SyncService = SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + let service: SyncService = + SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); let peer_id = PeerId::random(); service.add_peer(peer_id, ConnectionState::Connected); - + // Verify peer was added by checking stats let stats = service.get_stats(); assert!(stats.connected_peers >= 1); service.remove_peer(&peer_id); - + // Note: Stats may not reflect removal immediately in a real impl, // but this tests the API works } #[test] fn test_sync_state_transitions() { - let mut service: SyncService = SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + let mut service: SyncService = + SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); assert_eq!(service.state(), SyncState::Idle); // Add peer with finalized slot ahead of local head let peer_id = PeerId::random(); service.add_peer(peer_id, ConnectionState::Connected); - + let status = containers::Status { finalized: Checkpoint { root: Bytes32::default(),