From 062ba694b59b0845d94bf77c207864cd8be118be Mon Sep 17 00:00:00 2001 From: Julius Mieliauskas Date: Fri, 19 Dec 2025 00:46:02 +0200 Subject: [PATCH 01/27] prepared container types for signature aggregation (devnet 2) --- lean_client/containers/Cargo.toml | 3 + lean_client/containers/src/attestation.rs | 20 ++- lean_client/containers/src/block.rs | 17 ++- lean_client/containers/src/lib.rs | 4 +- lean_client/containers/src/serde_helpers.rs | 132 ++++++++++------- lean_client/containers/src/state.rs | 155 ++++++++++++-------- lean_client/src/main.rs | 6 +- 7 files changed, 211 insertions(+), 126 deletions(-) diff --git a/lean_client/containers/Cargo.toml b/lean_client/containers/Cargo.toml index 011bb4e..b6a45c3 100644 --- a/lean_client/containers/Cargo.toml +++ b/lean_client/containers/Cargo.toml @@ -5,6 +5,9 @@ edition = "2021" [features] xmss-verify = ["leansig"] +default = ["devnet1"] +devnet1 = [] +devnet2 = [] [lib] name = "containers" diff --git a/lean_client/containers/src/attestation.rs b/lean_client/containers/src/attestation.rs index 6ad0f56..6a820c7 100644 --- a/lean_client/containers/src/attestation.rs +++ b/lean_client/containers/src/attestation.rs @@ -19,9 +19,9 @@ use typenum::U4096; /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). pub type Attestations = ssz::PersistentList; -/// List of signatures corresponding to attestations in a block. -/// Limit is VALIDATOR_REGISTRY_LIMIT (4096). -pub type BlockSignatures = ssz::PersistentList; +pub type AggregatedAttestations = ssz::PersistentList; + +pub type AttestationSignatures = ssz::PersistentList; /// Bitlist representing validator participation in an attestation. /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). @@ -57,15 +57,19 @@ pub struct Attestation { /// Validator attestation bundled with its signature. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct SignedAttestation { - /// The attestation message signed by the validator. + #[cfg(feature = "devnet2")] + pub validator_id: u64, + #[cfg(feature = "devnet2")] + pub message: AttestationData, + #[cfg(feature = "devnet1")] pub message: Attestation, - /// Signature aggregation produced by the leanVM (SNARKs in the future). + /// signature over attestaion message only as it would be aggregated later in attestation pub signature: Signature, } /// Aggregated attestation consisting of participation bits and message. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] -pub struct AggregatedAttestations { +pub struct AggregatedAttestation { /// Bitfield indicating which validators participated in the aggregation. pub aggregation_bits: AggregationBits, /// Combined attestation data similar to the beacon chain format. @@ -77,9 +81,9 @@ pub struct AggregatedAttestations { /// Aggregated attestation bundled with aggregated signatures. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] -pub struct SignedAggregatedAttestations { +pub struct SignedAggregatedAttestation { /// Aggregated attestation data. - pub message: AggregatedAttestations, + pub message: AggregatedAttestation, /// Aggregated attestation plus its combined signature. /// /// Stores a naive list of validator signatures that mirrors the attestation diff --git a/lean_client/containers/src/block.rs b/lean_client/containers/src/block.rs index 9c0a1de..55b4727 100644 --- a/lean_client/containers/src/block.rs +++ b/lean_client/containers/src/block.rs @@ -1,9 +1,12 @@ -use crate::{Attestation, Attestations, BlockSignatures, Bytes32, Signature, Slot, State, ValidatorIndex}; +use crate::{Attestation, Attestations, Bytes32, Signature, Slot, State, ValidatorIndex}; use serde::{Deserialize, Serialize}; use ssz_derive::Ssz; #[cfg(feature = "xmss-verify")] use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to_the_20::target_sum::SIGTargetSumLifetime20W2NoOff; +use ssz::PersistentList; +use typenum::U4096; +use crate::attestation::AttestationSignatures; /// The body of a block, containing payload data. /// @@ -11,6 +14,9 @@ use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to /// separately in BlockSignatures to match the spec architecture. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct BlockBody { + #[cfg(feature = "devnet2")] + pub attestations: VariableList, + #[cfg(feature = "devnet1")] #[serde(with = "crate::serde_helpers")] pub attestations: Attestations, } @@ -45,6 +51,12 @@ pub struct BlockWithAttestation { pub proposer_attestation: Attestation, } +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)] +pub struct BlockSignatures { + pub attestation_signatures: AttestationSignatures, + pub proposer_signature: Signature, +} + /// Envelope carrying a block, an attestation from proposer, and aggregated signatures. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -54,7 +66,10 @@ pub struct SignedBlockWithAttestation { /// Aggregated signature payload for the block. /// /// Signatures remain in attestation order followed by the proposer signature. + #[cfg(feature = "devnet1")] #[serde(with = "crate::serde_helpers::block_signatures")] + pub signature: PersistentList, + #[cfg(feature = "devnet2")] pub signature: BlockSignatures, } diff --git a/lean_client/containers/src/lib.rs b/lean_client/containers/src/lib.rs index 511db23..c73a9f9 100644 --- a/lean_client/containers/src/lib.rs +++ b/lean_client/containers/src/lib.rs @@ -10,8 +10,8 @@ pub mod types; pub mod validator; pub use attestation::{ - AggregatedAttestations, AggregatedSignatures, AggregationBits, Attestation, AttestationData, - Attestations, BlockSignatures, Signature, SignedAggregatedAttestations, SignedAttestation, + AggregatedAttestation, AggregatedSignatures, AggregationBits, Attestation, AttestationData, + Attestations, Signature, SignedAggregatedAttestation, SignedAttestation, }; pub use block::{ Block, BlockBody, BlockHeader, BlockWithAttestation, SignedBlock, SignedBlockWithAttestation, diff --git a/lean_client/containers/src/serde_helpers.rs b/lean_client/containers/src/serde_helpers.rs index aff4d60..0568f71 100644 --- a/lean_client/containers/src/serde_helpers.rs +++ b/lean_client/containers/src/serde_helpers.rs @@ -34,26 +34,26 @@ where pub mod bitlist { use super::*; use ssz::BitList; - use typenum::Unsigned; use ssz::SszRead; - + use typenum::Unsigned; + #[derive(Deserialize)] #[serde(untagged)] enum BitListData { HexString(String), BoolArray(Vec), } - + pub fn deserialize<'de, D, N>(deserializer: D) -> Result, D::Error> where D: Deserializer<'de>, N: Unsigned, { use serde::de::Error; - + // First unwrap the {"data": ...} wrapper let wrapper = DataWrapper::::deserialize(deserializer)?; - + match wrapper.data { BitListData::HexString(hex_str) => { // Handle hex string format (e.g., "0x01ff") @@ -62,10 +62,10 @@ pub mod bitlist { // Empty hex string means empty bitlist return Ok(BitList::default()); } - + let bytes = hex::decode(hex_str) .map_err(|e| D::Error::custom(format!("Invalid hex string: {}", e)))?; - + // Decode SSZ bitlist (with delimiter bit) BitList::from_ssz_unchecked(&(), &bytes) .map_err(|e| D::Error::custom(format!("Invalid SSZ bitlist: {:?}", e))) @@ -80,19 +80,20 @@ pub mod bitlist { } } } - + pub fn serialize(value: &BitList, serializer: S) -> Result where S: Serializer, N: Unsigned, { use ssz::SszWrite; - + // Serialize as hex string in {"data": "0x..."} format let mut bytes = Vec::new(); - value.write_variable(&mut bytes) + value + .write_variable(&mut bytes) .map_err(|e| serde::ser::Error::custom(format!("Failed to write SSZ: {:?}", e)))?; - + let hex_str = format!("0x{}", hex::encode(&bytes)); let wrapper = DataWrapper { data: hex_str }; wrapper.serialize(serializer) @@ -103,9 +104,9 @@ pub mod bitlist { /// Signatures in test vectors are structured with {path, rho, hashes} instead of hex bytes pub mod signature { use super::*; - use serde_json::Value; use crate::Signature; - + use serde_json::Value; + /// Structured XMSS signature format from test vectors #[derive(Deserialize)] struct XmssSignature { @@ -113,65 +114,65 @@ pub mod signature { rho: DataWrapper>, hashes: DataWrapper>>>, } - + #[derive(Deserialize)] struct XmssPath { siblings: DataWrapper>>>, } - + pub fn deserialize_single<'de, D>(deserializer: D) -> Result where D: Deserializer<'de>, { use serde::de::Error; - + // First, try to parse as a JSON value to inspect the structure let value = Value::deserialize(deserializer)?; - + // Check if it's a hex string (normal format) if let Value::String(hex_str) = &value { let hex_str = hex_str.trim_start_matches("0x"); let bytes = hex::decode(hex_str) .map_err(|e| D::Error::custom(format!("Invalid hex string: {}", e)))?; - + return Signature::try_from(bytes.as_slice()) .map_err(|_| D::Error::custom("Invalid signature length")); } - + // Otherwise, parse as structured XMSS signature let xmss_sig: XmssSignature = serde_json::from_value(value) .map_err(|e| D::Error::custom(format!("Failed to parse XMSS signature: {}", e)))?; - + // Serialize the XMSS signature to bytes // Format: siblings (variable length) + rho (28 bytes) + hashes (variable length) let mut bytes = Vec::new(); - + // Write siblings for sibling in &xmss_sig.path.siblings.data { for val in &sibling.data { bytes.extend_from_slice(&val.to_le_bytes()); } } - + // Write rho (7 u32s = 28 bytes) for val in &xmss_sig.rho.data { bytes.extend_from_slice(&val.to_le_bytes()); } - + // Write hashes for hash in &xmss_sig.hashes.data { for val in &hash.data { bytes.extend_from_slice(&val.to_le_bytes()); } } - + // Pad or truncate to 3112 bytes bytes.resize(3112, 0); - + Signature::try_from(bytes.as_slice()) .map_err(|_| D::Error::custom("Failed to create signature")) } - + pub fn serialize(value: &Signature, serializer: S) -> Result where S: Serializer, @@ -186,10 +187,11 @@ pub mod signature { /// where each signature can be either hex string or structured XMSS format pub mod block_signatures { use super::*; - use crate::{Signature, BlockSignatures}; - use ssz::PersistentList; + use crate::Signature; use serde_json::Value; - + use ssz::PersistentList; + use typenum::U4096; + /// Structured XMSS signature format from test vectors #[derive(Deserialize, Clone)] struct XmssSignature { @@ -197,79 +199,95 @@ pub mod block_signatures { rho: DataWrapper>, hashes: DataWrapper>>>, } - + #[derive(Deserialize, Clone)] struct XmssPath { siblings: DataWrapper>>>, } - + fn parse_single_signature(value: &Value) -> Result { // Check if it's a hex string (normal format) if let Value::String(hex_str) = value { let hex_str = hex_str.trim_start_matches("0x"); - let bytes = hex::decode(hex_str) - .map_err(|e| format!("Invalid hex string: {}", e))?; - + let bytes = hex::decode(hex_str).map_err(|e| format!("Invalid hex string: {}", e))?; + return Signature::try_from(bytes.as_slice()) .map_err(|_| "Invalid signature length".to_string()); } - + // Otherwise, parse as structured XMSS signature let xmss_sig: XmssSignature = serde_json::from_value(value.clone()) .map_err(|e| format!("Failed to parse XMSS signature: {}", e))?; - + // Serialize the XMSS signature to bytes // Format: siblings (variable length) + rho (28 bytes) + hashes (variable length) let mut bytes = Vec::new(); - + // Write siblings for sibling in &xmss_sig.path.siblings.data { for val in &sibling.data { bytes.extend_from_slice(&val.to_le_bytes()); } } - + // Write rho (7 u32s = 28 bytes) for val in &xmss_sig.rho.data { bytes.extend_from_slice(&val.to_le_bytes()); } - + // Write hashes for hash in &xmss_sig.hashes.data { for val in &hash.data { bytes.extend_from_slice(&val.to_le_bytes()); } } - + // Pad or truncate to 3112 bytes bytes.resize(3112, 0); - - Signature::try_from(bytes.as_slice()) - .map_err(|_| "Failed to create signature".to_string()) + + Signature::try_from(bytes.as_slice()).map_err(|_| "Failed to create signature".to_string()) } - pub fn deserialize<'de, D>(deserializer: D) -> Result + #[cfg(feature = "devnet1")] + pub fn deserialize<'de, D>( + deserializer: D, + ) -> Result, D::Error> where D: Deserializer<'de>, { use serde::de::Error; - + // Parse the {"data": [...]} wrapper let wrapper: DataWrapper> = DataWrapper::deserialize(deserializer)?; - + let mut signatures = PersistentList::default(); - + for (idx, sig_value) in wrapper.data.into_iter().enumerate() { let sig = parse_single_signature(&sig_value) .map_err(|e| D::Error::custom(format!("Signature {}: {}", idx, e)))?; - signatures.push(sig) + signatures + .push(sig) .map_err(|e| D::Error::custom(format!("Signature {} push failed: {:?}", idx, e)))?; } - + Ok(signatures) } - - pub fn serialize(value: &BlockSignatures, serializer: S) -> Result + + #[cfg(feature = "devnet2")] + pub fn deserialize<'de, D>(_: D) -> Result + where + D: Deserializer<'de>, + { + Err(serde::de::Error::custom( + "BlockSignatures deserialization not implemented for devnet2", + )) + } + + #[cfg(feature = "devnet1")] + pub fn serialize( + value: &PersistentList, + serializer: S, + ) -> Result where S: Serializer, { @@ -285,8 +303,18 @@ pub mod block_signatures { Err(_) => break, } } - + let wrapper = DataWrapper { data: sigs }; wrapper.serialize(serializer) } + + #[cfg(feature = "devnet2")] + pub fn serialize(value: &BlockSignatures, serializer: S) -> Result + where + S: Serializer, + { + Err(serde::de::Error::custom( + "BlockSignatures serialization not implemented for devnet2", + )) + } } diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index 4eb0ffd..ac84e06 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -1,13 +1,13 @@ use crate::validator::Validator; +use crate::{block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, Slot, Uint64, ValidatorIndex}; use crate::{ - block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, - Attestation, Attestations, BlockSignatures, Bytes32, Checkpoint, Config, Slot, Uint64, ValidatorIndex, + HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators, }; -use crate::{HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators}; use serde::{Deserialize, Serialize}; -use ssz::{PersistentList as List}; +use ssz::{PersistentList as List, PersistentList}; use ssz_derive::Ssz; use std::collections::BTreeMap; +use typenum::U4096; pub const VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const JUSTIFICATION_ROOTS_LIMIT: usize = 1 << 18; // 262144 @@ -47,7 +47,10 @@ pub struct State { } impl State { - pub fn generate_genesis_with_validators(genesis_time: Uint64, validators: Vec) -> Self { + pub fn generate_genesis_with_validators( + genesis_time: Uint64, + validators: Vec, + ) -> Self { let body_for_root = BlockBody { attestations: Default::default(), }; @@ -64,7 +67,6 @@ impl State { validator_list.push(v).expect("Failed to add validator"); } - Self { config: Config { genesis_time: genesis_time.0, @@ -206,7 +208,11 @@ impl State { for (i, r) in roots.iter().enumerate() { let v = map.get(r).expect("root present"); - assert_eq!(v.len(), num_validators, "vote vector must match validator count"); + assert_eq!( + v.len(), + num_validators, + "vote vector must match validator count" + ); let base = i * num_validators; for (j, &bit) in v.iter().enumerate() { if bit { @@ -230,7 +236,11 @@ impl State { } // updated for fork choice tests - pub fn state_transition(&self, signed_block: SignedBlockWithAttestation, valid_signatures: bool) -> Result { + pub fn state_transition( + &self, + signed_block: SignedBlockWithAttestation, + valid_signatures: bool, + ) -> Result { self.state_transition_with_validation(signed_block, valid_signatures, true) } @@ -314,7 +324,7 @@ impl State { } // Create a mutable clone for hash computation - let latest_header_for_hash = self.latest_block_header.clone(); + let latest_header_for_hash = self.latest_block_header.clone(); let parent_root = hash_tree_root(&latest_header_for_hash); if block.parent_root != parent_root { return Err(String::from("Block parent root mismatch")); @@ -554,6 +564,7 @@ impl State { /// # Returns /// /// Tuple of (Block, post-State, collected attestations, signatures) + #[cfg(feature = "devnet1")] pub fn build_block( &self, slot: Slot, @@ -562,10 +573,10 @@ impl State { initial_attestations: Option>, available_signed_attestations: Option<&[SignedBlockWithAttestation]>, known_block_roots: Option<&std::collections::HashSet>, - ) -> Result<(Block, Self, Vec, BlockSignatures), String> { + ) -> Result<(Block, Self, Vec, PersistentList), String> { // Initialize empty attestation set for iterative collection let mut attestations = initial_attestations.unwrap_or_default(); - let mut signatures = BlockSignatures::default(); + let mut signatures = PersistentList::default(); // Advance state to target slot // Note: parent_root comes from fork choice and is already validated. @@ -581,7 +592,9 @@ impl State { // Create candidate block with current attestation set let mut attestations_list = Attestations::default(); for att in &attestations { - attestations_list.push(att.clone()).map_err(|e| format!("Failed to push attestation: {:?}", e))?; + attestations_list + .push(att.clone()) + .map_err(|e| format!("Failed to push attestation: {:?}", e))?; } let candidate_block = Block { @@ -666,10 +679,25 @@ impl State { // Add new attestations and continue iteration attestations.extend(new_attestations); for sig in new_signatures { - signatures.push(sig).map_err(|e| format!("Failed to push signature: {:?}", e))?; + signatures + .push(sig) + .map_err(|e| format!("Failed to push signature: {:?}", e))?; } } } + + #[cfg(feature = "devnet2")] + pub fn build_block( + &self, + _slot: Slot, + _proposer_index: ValidatorIndex, + _parent_root: Bytes32, + _initial_attestations: Option>, + _available_signed_attestations: Option<&[SignedBlockWithAttestation]>, + _known_block_roots: Option<&std::collections::HashSet>, + ) -> Result<(Block, Self, Vec, BlockSignatures), String> { + Err("build_block is not implemented for devnet2".to_string()) + } } #[cfg(test)] @@ -726,14 +754,15 @@ mod tests { } #[test] + #[cfg(feature = "devnet1")] fn test_build_block() { // Create genesis state with validators let genesis_state = State::generate_genesis(Uint64(0), Uint64(4)); - + // Compute expected parent root after slot processing let pre_state = genesis_state.process_slots(Slot(1)).unwrap(); let expected_parent_root = hash_tree_root(&pre_state.latest_block_header); - + // Test 1: Build a simple block without attestations let result = genesis_state.build_block( Slot(1), @@ -743,27 +772,34 @@ mod tests { None, None, ); - + assert!(result.is_ok(), "Building simple block should succeed"); let (block, post_state, attestations, signatures) = result.unwrap(); - + // Verify block properties assert_eq!(block.slot, Slot(1)); assert_eq!(block.proposer_index, ValidatorIndex(1)); assert_eq!(block.parent_root, expected_parent_root); - assert_ne!(block.state_root, Bytes32(ssz::H256::zero()), "State root should be computed"); - + assert_ne!( + block.state_root, + Bytes32(ssz::H256::zero()), + "State root should be computed" + ); + // Verify attestations and signatures are empty assert_eq!(attestations.len(), 0); // Check signatures by trying to get first element assert!(signatures.get(0).is_err(), "Signatures should be empty"); - + // Verify post-state has advanced assert_eq!(post_state.slot, Slot(1)); // Note: The post-state's latest_block_header.state_root is zero because it will be // filled in during the next slot processing - assert_eq!(block.parent_root, expected_parent_root, "Parent root should match"); - + assert_eq!( + block.parent_root, expected_parent_root, + "Parent root should match" + ); + // Test 2: Build block with initial attestations let attestation = Attestation { validator_id: Uint64(0), @@ -783,7 +819,7 @@ mod tests { }, }, }; - + let result = genesis_state.build_block( Slot(1), ValidatorIndex(1), @@ -792,45 +828,48 @@ mod tests { None, None, ); - - assert!(result.is_ok(), "Building block with attestations should succeed"); + + assert!( + result.is_ok(), + "Building block with attestations should succeed" + ); let (block, _post_state, attestations, _signatures) = result.unwrap(); - + // Verify attestation was included assert_eq!(attestations.len(), 1); assert_eq!(attestations[0].validator_id, Uint64(0)); // Check that attestation list has one element - assert!(block.body.attestations.get(0).is_ok(), "Block should contain attestation"); - assert!(block.body.attestations.get(1).is_err(), "Block should have only one attestation"); + assert!( + block.body.attestations.get(0).is_ok(), + "Block should contain attestation" + ); + assert!( + block.body.attestations.get(1).is_err(), + "Block should have only one attestation" + ); } #[test] fn test_build_block_advances_state() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(10)); - + // Compute parent root after advancing to target slot let pre_state = genesis_state.process_slots(Slot(5)).unwrap(); let parent_root = hash_tree_root(&pre_state.latest_block_header); - + // Build block at slot 5 // Proposer for slot 5 with 10 validators is (5 % 10) = 5 - let result = genesis_state.build_block( - Slot(5), - ValidatorIndex(5), - parent_root, - None, - None, - None, - ); - + let result = + genesis_state.build_block(Slot(5), ValidatorIndex(5), parent_root, None, None, None); + assert!(result.is_ok()); let (block, post_state, _, _) = result.unwrap(); - + // Verify state advanced through slots assert_eq!(post_state.slot, Slot(5)); assert_eq!(block.slot, Slot(5)); - + // Verify block can be applied to genesis state let transition_result = genesis_state.state_transition_with_validation( SignedBlockWithAttestation { @@ -838,49 +877,45 @@ mod tests { block: block.clone(), proposer_attestation: Attestation::default(), }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), }, true, // signatures are considered valid (not validating, just marking as valid) true, ); - - assert!(transition_result.is_ok(), "Built block should be valid for state transition"); + + assert!( + transition_result.is_ok(), + "Built block should be valid for state transition" + ); } #[test] fn test_build_block_state_root_matches() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(3)); - + // Compute parent root after advancing to target slot let pre_state = genesis_state.process_slots(Slot(1)).unwrap(); let parent_root = hash_tree_root(&pre_state.latest_block_header); - + // Build a block // Proposer for slot 1 with 3 validators is (1 % 3) = 1 - let result = genesis_state.build_block( - Slot(1), - ValidatorIndex(1), - parent_root, - None, - None, - None, - ); - + let result = + genesis_state.build_block(Slot(1), ValidatorIndex(1), parent_root, None, None, None); + assert!(result.is_ok()); let (block, post_state, _, _) = result.unwrap(); - + // Verify the state root in block matches the computed post-state let computed_state_root = hash_tree_root(&post_state); assert_eq!( - block.state_root, - computed_state_root, + block.state_root, computed_state_root, "Block state root should match computed post-state root" ); - + // Verify it's not zero assert_ne!( - block.state_root, + block.state_root, Bytes32(ssz::H256::zero()), "State root should not be zero" ); diff --git a/lean_client/src/main.rs b/lean_client/src/main.rs index 396c8f7..cc44285 100644 --- a/lean_client/src/main.rs +++ b/lean_client/src/main.rs @@ -1,7 +1,7 @@ use clap::Parser; -use containers::ssz::SszHash; +use containers::ssz::{PersistentList, SszHash}; use containers::{ - attestation::{Attestation, AttestationData, BlockSignatures}, + attestation::{Attestation, AttestationData}, block::{Block, BlockBody, BlockWithAttestation, SignedBlockWithAttestation}, checkpoint::Checkpoint, config::Config, @@ -216,7 +216,7 @@ async fn main() { block: genesis_block, proposer_attestation: genesis_proposer_attestation, }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), }; let config = Config { genesis_time }; From 4c53527287b43e7c9f82e99f15faed1d1022428b Mon Sep 17 00:00:00 2001 From: Julius Mieliauskas Date: Fri, 19 Dec 2025 11:11:48 +0200 Subject: [PATCH 02/27] minor cleanups --- lean_client/containers/src/block.rs | 19 +--------- lean_client/containers/src/state.rs | 26 +------------ .../containers/tests/test_vectors/runner.rs | 38 ++----------------- .../containers/tests/unit_tests/common.rs | 8 ++-- .../tests/unit_tests/state_transition.rs | 6 ++- .../tests/fork_choice_test_vectors.rs | 8 ++-- 6 files changed, 19 insertions(+), 86 deletions(-) diff --git a/lean_client/containers/src/block.rs b/lean_client/containers/src/block.rs index 55b4727..ac5063f 100644 --- a/lean_client/containers/src/block.rs +++ b/lean_client/containers/src/block.rs @@ -170,25 +170,10 @@ impl SignedBlockWithAttestation { // The ordering must be preserved: // 1. Block body attestations, // 2. The proposer attestation. - assert!( - signatures_vec.len() == all_attestations.len(), - "Number of signatures does not match number of attestations" - ); + assert_eq!(signatures_vec.len(), all_attestations.len(), "Number of signatures does not match number of attestations"); let validators = &parent_state.validators; - - // Count validators (PersistentList doesn't expose len directly) - let mut num_validators: u64 = 0; - let mut k: u64 = 0; - loop { - match validators.get(k) { - Ok(_) => { - num_validators += 1; - k += 1; - } - Err(_) => break, - } - } + let num_validators = validators.len_u64(); // Verify each attestation signature for (attestation, signature) in all_attestations.iter().zip(signatures_vec.iter()) { diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index ac84e06..46641e1 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -135,18 +135,7 @@ impl State { /// Simple RR proposer rule (round-robin). pub fn is_proposer(&self, index: ValidatorIndex) -> bool { - // Count validators by iterating (since PersistentList doesn't have len()) - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match self.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = self.validators.len_u64(); if num_validators == 0 { return false; // No validators @@ -486,18 +475,7 @@ impl State { if validator_id < votes.len() && !votes[validator_id] { votes[validator_id] = true; - // Count validators - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match self.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = self.validators.len_u64(); let count = votes.iter().filter(|&&v| v).count(); if 3 * count >= 2 * num_validators as usize { diff --git a/lean_client/containers/tests/test_vectors/runner.rs b/lean_client/containers/tests/test_vectors/runner.rs index 9e7ef36..910fde5 100644 --- a/lean_client/containers/tests/test_vectors/runner.rs +++ b/lean_client/containers/tests/test_vectors/runner.rs @@ -83,18 +83,7 @@ impl TestRunner { // Only check validator count if specified in post-state if let Some(expected_count) = post.validator_count { - // Count validators - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match state.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = state.validators.len_u64(); if num_validators as usize != expected_count { return Err(format!( @@ -436,18 +425,7 @@ impl TestRunner { let state = &test_case.pre; - // Count validators - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match state.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = state.validators.len_u64(); println!(" Genesis time: {}, slot: {}, validators: {}", state.config.genesis_time, state.slot.0, num_validators); // Verify it's at genesis (slot 0) @@ -555,17 +533,7 @@ impl TestRunner { // Verify validator count if specified if let Some(expected_count) = post.validator_count { - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match state.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = state.validators.len_u64(); if num_validators as usize != expected_count { return Err(format!( diff --git a/lean_client/containers/tests/unit_tests/common.rs b/lean_client/containers/tests/unit_tests/common.rs index 77c2dd5..781c43e 100644 --- a/lean_client/containers/tests/unit_tests/common.rs +++ b/lean_client/containers/tests/unit_tests/common.rs @@ -1,7 +1,7 @@ use containers::{ - Attestation, Attestations, BlockSignatures, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators + Attestation, Attestations, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators }; -use ssz::PersistentList as List; +use ssz::{PersistentList}; pub const DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const TEST_VALIDATOR_COUNT: usize = 4; // Actual validator count used in tests @@ -12,7 +12,7 @@ const _: [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT] = pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Option) -> SignedBlockWithAttestation { let body = BlockBody { - attestations: attestations.unwrap_or_else(List::default), + attestations: attestations.unwrap_or_else(PersistentList::default), }; let block_message = Block { @@ -28,7 +28,7 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op block: block_message, proposer_attestation: Attestation::default(), }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), } } diff --git a/lean_client/containers/tests/unit_tests/state_transition.rs b/lean_client/containers/tests/unit_tests/state_transition.rs index 91edfa7..aca04cd 100644 --- a/lean_client/containers/tests/unit_tests/state_transition.rs +++ b/lean_client/containers/tests/unit_tests/state_transition.rs @@ -3,10 +3,11 @@ use containers::{ block::{Block, SignedBlockWithAttestation, BlockWithAttestation, hash_tree_root}, state::State, types::{Bytes32, Uint64}, - Slot, Attestation, BlockSignatures + Slot, Attestation }; use pretty_assertions::assert_eq; use rstest::fixture; +use ssz::PersistentList; #[path = "common.rs"] mod common; @@ -78,6 +79,7 @@ fn test_state_transition_invalid_signatures() { assert_eq!(result.unwrap_err(), "Block signatures must be valid"); } +#[cfg(feature = "devnet1")] #[test] fn test_state_transition_bad_state_root() { let state = genesis_state(); @@ -93,7 +95,7 @@ fn test_state_transition_bad_state_root() { block, proposer_attestation: Attestation::default(), }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), }; let result = state.state_transition(final_signed_block_with_attestation, true); diff --git a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs index e2d230a..5718d48 100644 --- a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs +++ b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs @@ -4,7 +4,7 @@ use fork_choice::{ }; use containers::{ - attestation::{Attestation, AttestationData, BlockSignatures, SignedAttestation, Signature}, + attestation::{Attestation, AttestationData, SignedAttestation, Signature}, block::{hash_tree_root, Block, BlockBody, BlockHeader, BlockWithAttestation, SignedBlockWithAttestation}, checkpoint::Checkpoint, config::Config, @@ -13,7 +13,7 @@ use containers::{ }; use serde::Deserialize; -use ssz::SszHash; +use ssz::{PersistentList, SszHash}; use std::collections::HashMap; use std::panic::AssertUnwindSafe; @@ -299,7 +299,7 @@ fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAtt block, proposer_attestation, }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), } } @@ -329,7 +329,7 @@ fn convert_test_block(test_block_with_att: &TestBlockWithAttestation) -> SignedB block, proposer_attestation, }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), } } From f1f955a9c17852b9cf978a6396b1deff9be2a4c7 Mon Sep 17 00:00:00 2001 From: Julius Mieliauskas Date: Tue, 23 Dec 2025 16:27:26 +0200 Subject: [PATCH 03/27] added tests, fixed some types --- lean_client/containers/src/attestation.rs | 106 ++++++++- lean_client/containers/src/block.rs | 211 +++++++++++++----- lean_client/containers/src/serde_helpers.rs | 5 +- lean_client/containers/src/state.rs | 19 +- .../unit_tests/attestation_aggregation.rs | 132 +++++++++++ .../containers/tests/unit_tests/common.rs | 55 ++++- .../containers/tests/unit_tests/mod.rs | 1 + .../tests/unit_tests/state_process.rs | 1 + .../tests/unit_tests/state_transition.rs | 101 ++++++++- 9 files changed, 549 insertions(+), 82 deletions(-) create mode 100644 lean_client/containers/tests/unit_tests/attestation_aggregation.rs diff --git a/lean_client/containers/src/attestation.rs b/lean_client/containers/src/attestation.rs index 6a820c7..302ef08 100644 --- a/lean_client/containers/src/attestation.rs +++ b/lean_client/containers/src/attestation.rs @@ -1,8 +1,9 @@ use crate::{Checkpoint, Slot, Uint64}; use serde::{Deserialize, Serialize}; +use ssz::BitList; use ssz::ByteVector; use ssz_derive::Ssz; -use typenum::{Prod, Sum, U100, U31, U12}; +use typenum::{Prod, Sum, U100, U12, U31}; pub type U3100 = Prod; @@ -21,11 +22,64 @@ pub type Attestations = ssz::PersistentList; pub type AggregatedAttestations = ssz::PersistentList; +#[cfg(feature = "devnet1")] pub type AttestationSignatures = ssz::PersistentList; +#[cfg(feature = "devnet2")] +pub type AttestationSignatures = ssz::PersistentList; + +#[cfg(feature = "devnet2")] +pub type NaiveAggregatedSignature = ssz::PersistentList; + /// Bitlist representing validator participation in an attestation. /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). -pub type AggregationBits = ssz::BitList; +#[derive(Clone, Debug, PartialEq, Eq, Default, Ssz, Serialize, Deserialize)] +pub struct AggregationBits(pub BitList); + +impl AggregationBits { + pub const LIMIT: u64 = 4096; + + pub fn from_validator_indices(indices: &[u64]) -> Self { + assert!( + !indices.is_empty(), + "Aggregated attestation must reference at least one validator" + ); + + let max_id = *indices.iter().max().unwrap(); + assert!( + max_id < Self::LIMIT, + "Validator index out of range for aggregation bits" + ); + + let mut bits = BitList::::with_length((max_id + 1) as usize); + + for i in 0..=max_id { + bits.set(i as usize, false); + } + + for &i in indices { + bits.set(i as usize, true); + } + + AggregationBits(bits) + } + + pub fn to_validator_indices(&self) -> Vec { + let indices: Vec = self + .0 + .iter() + .enumerate() + .filter_map(|(i, bit)| if *bit { Some(i as u64) } else { None }) + .collect(); + + assert!( + !indices.is_empty(), + "Aggregated attestation must reference at least one validator" + ); + + indices + } +} /// Naive list of validator signatures used for aggregation placeholders. /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). @@ -57,13 +111,8 @@ pub struct Attestation { /// Validator attestation bundled with its signature. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct SignedAttestation { - #[cfg(feature = "devnet2")] - pub validator_id: u64, - #[cfg(feature = "devnet2")] - pub message: AttestationData, - #[cfg(feature = "devnet1")] pub message: Attestation, - /// signature over attestaion message only as it would be aggregated later in attestation + /// Signature aggregation produced by the leanVM (SNARKs in the future). pub signature: Signature, } @@ -73,12 +122,51 @@ pub struct AggregatedAttestation { /// Bitfield indicating which validators participated in the aggregation. pub aggregation_bits: AggregationBits, /// Combined attestation data similar to the beacon chain format. - /// + /// /// Multiple validator attestations are aggregated here without the complexity of /// committee assignments. pub data: AttestationData, } +impl AggregatedAttestation { + pub fn aggregate_by_data(attestations: &[Attestation]) -> Vec { + let mut groups: Vec<(AttestationData, Vec)> = Vec::new(); + + for attestation in attestations { + // Try to find an existing group with the same data + if let Some((_, validator_ids)) = groups + .iter_mut() + .find(|(data, _)| *data == attestation.data) + { + validator_ids.push(attestation.validator_id.0); + } else { + // Create a new group + groups.push((attestation.data.clone(), vec![attestation.validator_id.0])); + } + } + + groups + .into_iter() + .map(|(data, validator_ids)| AggregatedAttestation { + aggregation_bits: AggregationBits::from_validator_indices(&validator_ids), + data, + }) + .collect() + } + + pub fn to_plain(&self) -> Vec { + let validator_indices = self.aggregation_bits.to_validator_indices(); + + validator_indices + .into_iter() + .map(|validator_id| Attestation { + validator_id: Uint64(validator_id), + data: self.data.clone(), + }) + .collect() + } +} + /// Aggregated attestation bundled with aggregated signatures. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct SignedAggregatedAttestation { diff --git a/lean_client/containers/src/block.rs b/lean_client/containers/src/block.rs index ac5063f..0acf1b2 100644 --- a/lean_client/containers/src/block.rs +++ b/lean_client/containers/src/block.rs @@ -4,9 +4,10 @@ use ssz_derive::Ssz; #[cfg(feature = "xmss-verify")] use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to_the_20::target_sum::SIGTargetSumLifetime20W2NoOff; -use ssz::PersistentList; +use ssz::{PersistentList, SszHash}; use typenum::U4096; -use crate::attestation::AttestationSignatures; +use crate::attestation::{AggregatedAttestations, AttestationSignatures}; +use crate::validator::BlsPublicKey; /// The body of a block, containing payload data. /// @@ -15,7 +16,7 @@ use crate::attestation::AttestationSignatures; #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct BlockBody { #[cfg(feature = "devnet2")] - pub attestations: VariableList, + pub attestations: AggregatedAttestations, #[cfg(feature = "devnet1")] #[serde(with = "crate::serde_helpers")] pub attestations: Attestations, @@ -51,7 +52,7 @@ pub struct BlockWithAttestation { pub proposer_attestation: Attestation, } -#[derive(Debug, PartialEq, Eq, Clone, Serialize, Deserialize, Default)] +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Ssz, Deserialize, Default)] pub struct BlockSignatures { pub attestation_signatures: AttestationSignatures, pub proposer_signature: Signature, @@ -127,6 +128,7 @@ impl SignedBlockWithAttestation { /// /// - Spec: /// - XMSS Library: + #[cfg(feature = "devnet1")] pub fn verify_signatures(&self, parent_state: State) -> bool { // Unpack the signed block components let block = &self.message.block; @@ -138,7 +140,7 @@ impl SignedBlockWithAttestation { // 1. Block body attestations (from other validators) // 2. Proposer attestation (from the block producer) let mut all_attestations: Vec = Vec::new(); - + // Collect block body attestations let mut i: u64 = 0; loop { @@ -148,7 +150,7 @@ impl SignedBlockWithAttestation { } i += 1; } - + // Append proposer attestation all_attestations.push(self.message.proposer_attestation.clone()); @@ -170,7 +172,11 @@ impl SignedBlockWithAttestation { // The ordering must be preserved: // 1. Block body attestations, // 2. The proposer attestation. - assert_eq!(signatures_vec.len(), all_attestations.len(), "Number of signatures does not match number of attestations"); + assert_eq!( + signatures_vec.len(), + all_attestations.len(), + "Number of signatures does not match number of attestations" + ); let validators = &parent_state.validators; let num_validators = validators.len_u64(); @@ -193,60 +199,149 @@ impl SignedBlockWithAttestation { // - The validator possesses the secret key for their public key // - The attestation has not been tampered with // - The signature was created at the correct epoch (slot) - - #[cfg(feature = "xmss-verify")] - { - use leansig::signature::SignatureScheme; - use leansig::serialization::Serializable; - - // Compute the message hash from the attestation - let message_bytes: [u8; 32] = hash_tree_root(attestation).0.into(); - let epoch = attestation.data.slot.0 as u32; - - // Get public key bytes - use as_bytes() method - let pubkey_bytes = validator.pubkey.0.as_bytes(); - - // Deserialize the public key using Serializable trait - type PubKey = ::PublicKey; - let pubkey = match PubKey::from_bytes(pubkey_bytes) { - Ok(pk) => pk, - Err(e) => { - eprintln!("Failed to deserialize public key at slot {:?}: {:?}", attestation.data.slot, e); - return false; - } - }; - - // Get signature bytes - use as_bytes() method - let sig_bytes = signature.as_bytes(); - - // Deserialize the signature using Serializable trait - type Sig = ::Signature; - let sig = match Sig::from_bytes(sig_bytes) { - Ok(s) => s, - Err(e) => { - eprintln!("Failed to deserialize signature at slot {:?}: {:?}", attestation.data.slot, e); - return false; - } - }; - - // Verify the signature - if !SIGTargetSumLifetime20W2NoOff::verify(&pubkey, epoch, &message_bytes, &sig) { - eprintln!("XMSS signature verification failed at slot {:?}", attestation.data.slot); - return false; - } - } - - #[cfg(not(feature = "xmss-verify"))] + + let message_bytes: [u8; 32] = hash_tree_root(attestation).0.into(); + + assert!( + verify_xmss_signature( + validator.pubkey.0.as_bytes(), + attestation.data.slot, + &message_bytes, + &signature, + ), + "Attestation signature verification failed" + ); + } + + true + } + + #[cfg(feature = "devnet2")] + pub fn verify_signatures(&self, parent_state: State) -> bool { + // Unpack the signed block components + let block = &self.message.block; + let signatures = &self.signature; + let aggregated_attestations = block.body.attestations.clone(); + let attestation_signatures = signatures.attestation_signatures.clone(); + + // Verify signature count matches aggregated attestation count + assert_eq!( + aggregated_attestations.len_u64(), + attestation_signatures.len_u64(), + "Number of signatures does not match number of attestations" + ); + + let validators = &parent_state.validators; + let num_validators = validators.len_u64(); + + // Verify each attestation signature + for (aggregated_attestation, aggregated_signature) in (&aggregated_attestations) + .into_iter() + .zip((&attestation_signatures).into_iter()) + { + let validator_ids = aggregated_attestation + .aggregation_bits + .to_validator_indices(); + + assert_eq!( + aggregated_signature.len_u64(), + validator_ids.len() as u64, + "Aggregated attestation signature count mismatch" + ); + + let attestation_root = aggregated_attestation.data.hash_tree_root(); + + // Loop through zipped validator IDs and their corresponding signatures + // Verify each individual signature within the aggregated attestation + for (validator_id, signature) in + validator_ids.iter().zip(aggregated_signature.into_iter()) { - // Placeholder: XMSS verification disabled - // To enable, compile with --features xmss-verify - let _pubkey = &validator.pubkey; - let _slot = attestation.data.slot; - let _message = hash_tree_root(attestation); - let _sig = signature; + // Ensure validator exists in the active set + assert!( + *validator_id < num_validators, + "Validator index out of range" + ); + + let validator = validators.get(*validator_id).expect("validator must exist"); + + // Get the actual payload root for the attestation data + let attestation_root: [u8; 32] = + hash_tree_root(&aggregated_attestation.data).0.into(); + + // Verify the XMSS signature + assert!( + verify_xmss_signature( + validator.pubkey.0.as_bytes(), + aggregated_attestation.data.slot, + &attestation_root, + signature, + ), + "Attestation signature verification failed" + ); } + + // Verify the proposer attestation signature + let proposer_attestation = self.message.proposer_attestation.clone(); + let proposer_signature = signatures.proposer_signature; + + assert!( + proposer_attestation.validator_id.0 < num_validators, + "Proposer index out of range" + ); + + let proposer = validators + .get(proposer_attestation.validator_id.0) + .expect("proposer must exist"); + + let proposer_root: [u8; 32] = hash_tree_root(&proposer_attestation).0.into(); + assert!( + verify_xmss_signature( + proposer.pubkey.0.as_bytes(), + proposer_attestation.data.slot, + &proposer_root, + &proposer_signature, + ), + "Proposer attestation signature verification failed" + ); } true } -} \ No newline at end of file +} + +#[cfg(feature = "xmss-verify")] +pub fn verify_xmss_signature( + pubkey_bytes: &[u8], + slot: Slot, + message_bytes: &[u8; 32], + signature: &Signature, +) -> bool { + use leansig::serialization::Serializable; + use leansig::signature::SignatureScheme; + + let epoch = slot.0 as u32; + + type PubKey = ::PublicKey; + let pubkey = match PubKey::from_bytes(pubkey_bytes) { + Ok(pk) => pk, + Err(_) => return false, + }; + + type Sig = ::Signature; + let sig = match Sig::from_bytes(signature.as_bytes()) { + Ok(s) => s, + Err(_) => return false, + }; + + SIGTargetSumLifetime20W2NoOff::verify(&pubkey, epoch, message_bytes, &sig) +} + +#[cfg(not(feature = "xmss-verify"))] +pub fn verify_xmss_signature( + _pubkey_bytes: &[u8], + _slot: Slot, + _message_bytes: &[u8; 32], + _signature: &Signature, +) -> bool { + true +} diff --git a/lean_client/containers/src/serde_helpers.rs b/lean_client/containers/src/serde_helpers.rs index 0568f71..01604e5 100644 --- a/lean_client/containers/src/serde_helpers.rs +++ b/lean_client/containers/src/serde_helpers.rs @@ -187,6 +187,7 @@ pub mod signature { /// where each signature can be either hex string or structured XMSS format pub mod block_signatures { use super::*; + use crate::block::BlockSignatures; use crate::Signature; use serde_json::Value; use ssz::PersistentList; @@ -309,11 +310,11 @@ pub mod block_signatures { } #[cfg(feature = "devnet2")] - pub fn serialize(value: &BlockSignatures, serializer: S) -> Result + pub fn serialize(_value: &BlockSignatures, _serializer: S) -> Result where S: Serializer, { - Err(serde::de::Error::custom( + Err(serde::ser::Error::custom( "BlockSignatures serialization not implemented for devnet2", )) } diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index 46641e1..02a26e3 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -8,6 +8,8 @@ use ssz::{PersistentList as List, PersistentList}; use ssz_derive::Ssz; use std::collections::BTreeMap; use typenum::U4096; +use crate::attestation::AggregatedAttestations; +use crate::block::BlockSignatures; pub const VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const JUSTIFICATION_ROOTS_LIMIT: usize = 1 << 18; // 262144 @@ -294,7 +296,20 @@ impl State { pub fn process_block(&self, block: &Block) -> Result { let state = self.process_block_header(block)?; + #[cfg(feature = "devnet1")] let state_after_ops = state.process_attestations(&block.body.attestations); + #[cfg(feature = "devnet2")] + let state_after_ops = { + let mut unaggregated_attestations = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + unaggregated_attestations.push(attestation).map_err(|e| format!("Failed to push attestation: {:?}", e))?; + } + } + state.process_attestations(&unaggregated_attestations) + }; // State root validation is handled by state_transition_with_validation when needed @@ -688,7 +703,7 @@ mod tests { config: st.config.clone(), ..st.clone() } - .is_proposer(ValidatorIndex(0))); + .is_proposer(ValidatorIndex(0))); } #[test] @@ -828,6 +843,7 @@ mod tests { } #[test] + #[cfg(feature = "devnet1")] fn test_build_block_advances_state() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(10)); @@ -868,6 +884,7 @@ mod tests { } #[test] + #[cfg(feature = "devnet1")] fn test_build_block_state_root_matches() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(3)); diff --git a/lean_client/containers/tests/unit_tests/attestation_aggregation.rs b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs new file mode 100644 index 0000000..285aa46 --- /dev/null +++ b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs @@ -0,0 +1,132 @@ +#[cfg(feature = "devnet2")] +#[cfg(test)] +mod tests { + use containers::attestation::{AggregatedAttestation, AggregationBits, Attestation, AttestationData}; + use containers::{Bytes32, Uint64}; + use containers::checkpoint::Checkpoint; + use containers::slot::Slot; + + #[test] + fn test_aggregated_attestation_structure() { + let att_data = AttestationData { + slot: Slot(5), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(2), + } + }; + + let bits = AggregationBits::from_validator_indices(&vec![2, 7]); + let agg = AggregatedAttestation { + aggregation_bits: bits.clone(), + data: att_data.clone() + }; + + let indices = agg.aggregation_bits.to_validator_indices(); + assert_eq!(indices.into_iter().collect::>(), vec![2, 7].into_iter().collect()); + assert_eq!(agg.data, att_data); + } + + #[test] + fn test_aggregate_attestations_by_common_data() { + let att_data1 = AttestationData { + slot: Slot(5), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(2), + } + }; + let att_data2 = AttestationData { + slot: Slot(6), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(5), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + } + }; + + let attestations = vec![ + Attestation { + validator_id: Uint64(1), + data: att_data1.clone(), + }, + Attestation { + validator_id: Uint64(3), + data: att_data1.clone(), + }, + Attestation { + validator_id: Uint64(5), + data: att_data2.clone(), + }, + ]; + + let aggregated = AggregatedAttestation::aggregate_by_data(&attestations); + assert_eq!(aggregated.len(), 2); + + let agg1 = aggregated.iter().find(|agg| agg.data == att_data1).unwrap(); + let validator_ids1 = agg1.aggregation_bits.to_validator_indices(); + assert_eq!(validator_ids1.into_iter().collect::>(), vec![1, 3].into_iter().collect()); + + let agg2 = aggregated.iter().find(|agg| agg.data == att_data2).unwrap(); + let validator_ids2 = agg2.aggregation_bits.to_validator_indices(); + assert_eq!(validator_ids2, vec![5]); + } + + #[test] + fn test_aggregate_empty_attestations() { + let aggregated = AggregatedAttestation::aggregate_by_data(&[]); + assert!(aggregated.is_empty()); + } + + #[test] + fn test_aggregate_single_attestation() { + let att_data = AttestationData { + slot: Slot(5), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(2), + } + }; + + let attestations = vec![Attestation { + validator_id: Uint64(5), + data: att_data.clone(), + }]; + let aggregated = AggregatedAttestation::aggregate_by_data(&attestations); + + assert_eq!(aggregated.len(), 1); + let validator_ids = aggregated[0].aggregation_bits.to_validator_indices(); + assert_eq!(validator_ids, vec![5]); + } +} diff --git a/lean_client/containers/tests/unit_tests/common.rs b/lean_client/containers/tests/unit_tests/common.rs index 781c43e..26fa0a5 100644 --- a/lean_client/containers/tests/unit_tests/common.rs +++ b/lean_client/containers/tests/unit_tests/common.rs @@ -1,7 +1,7 @@ -use containers::{ - Attestation, Attestations, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators -}; +use containers::{Attestation, Attestations, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators, AggregatedAttestation, Signature}; use ssz::{PersistentList}; +use typenum::U4096; +use containers::block::BlockSignatures; pub const DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const TEST_VALIDATOR_COUNT: usize = 4; // Actual validator count used in tests @@ -11,9 +11,38 @@ const _: [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT] = [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT]; pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Option) -> SignedBlockWithAttestation { + #[cfg(feature = "devnet1")] let body = BlockBody { attestations: attestations.unwrap_or_else(PersistentList::default), }; + #[cfg(feature = "devnet2")] + let body = BlockBody { + attestations: { + let attestations_vec = attestations.unwrap_or_default(); + + // Convert PersistentList into a Vec + let attestations_vec: Vec = attestations_vec.into_iter().cloned().collect(); + + let aggregated: Vec = + AggregatedAttestation::aggregate_by_data(&attestations_vec); + + + let aggregated: Vec = + AggregatedAttestation::aggregate_by_data(&attestations_vec); + + // Create a new empty PersistentList + let mut persistent_list: PersistentList = PersistentList::default(); + + // Push each aggregated attestation + for agg in aggregated { + persistent_list.push(agg).expect("PersistentList capacity exceeded"); + } + + persistent_list + }, + // other BlockBody fields... + }; + let block_message = Block { slot: Slot(slot), @@ -23,13 +52,29 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op body: body, }; - SignedBlockWithAttestation { + #[cfg(feature = "devnet1")] + let return_value = SignedBlockWithAttestation { message: BlockWithAttestation { block: block_message, proposer_attestation: Attestation::default(), }, signature: PersistentList::default(), - } + }; + + #[cfg(feature = "devnet2")] + let return_value = SignedBlockWithAttestation { + message: BlockWithAttestation { + block: block_message, + proposer_attestation: Attestation::default(), + }, + signature: BlockSignatures { + attestation_signatures: PersistentList::default(), + proposer_signature: Signature::default(), + } + }; + + return_value + } pub fn create_attestations(indices: &[usize]) -> Vec { diff --git a/lean_client/containers/tests/unit_tests/mod.rs b/lean_client/containers/tests/unit_tests/mod.rs index 16a5646..b9f442f 100644 --- a/lean_client/containers/tests/unit_tests/mod.rs +++ b/lean_client/containers/tests/unit_tests/mod.rs @@ -4,3 +4,4 @@ mod state_basic; mod state_justifications; mod state_process; mod state_transition; +mod attestation_aggregation; diff --git a/lean_client/containers/tests/unit_tests/state_process.rs b/lean_client/containers/tests/unit_tests/state_process.rs index 7db1849..afc1887 100644 --- a/lean_client/containers/tests/unit_tests/state_process.rs +++ b/lean_client/containers/tests/unit_tests/state_process.rs @@ -106,6 +106,7 @@ fn test_process_block_header_invalid( } // This test verifies that attestations correctly justify and finalize slots +#[cfg(feature = "devnet1")] #[test] fn test_process_attestations_justification_and_finalization() { let mut state = genesis_state(); diff --git a/lean_client/containers/tests/unit_tests/state_transition.rs b/lean_client/containers/tests/unit_tests/state_transition.rs index aca04cd..9fe6abb 100644 --- a/lean_client/containers/tests/unit_tests/state_transition.rs +++ b/lean_client/containers/tests/unit_tests/state_transition.rs @@ -1,9 +1,9 @@ // tests/state_transition.rs use containers::{ - block::{Block, SignedBlockWithAttestation, BlockWithAttestation, hash_tree_root}, + block::{hash_tree_root, Block, BlockWithAttestation, SignedBlockWithAttestation}, state::State, types::{Bytes32, Uint64}, - Slot, Attestation + Attestation, Attestations, Slot, }; use pretty_assertions::assert_eq; use rstest::fixture; @@ -24,13 +24,29 @@ fn test_state_transition_full() { let state = genesis_state(); let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); - let signed_block_with_attestation = create_block(1, &mut state_at_slot_1.latest_block_header, None); + let signed_block_with_attestation = + create_block(1, &mut state_at_slot_1.latest_block_header, None); let block = signed_block_with_attestation.message.block.clone(); // Use process_block_header + process_operations to avoid state root validation during setup let state_after_header = state_at_slot_1.process_block_header(&block).unwrap(); + + #[cfg(feature = "devnet1")] let expected_state = state_after_header.process_attestations(&block.body.attestations); + #[cfg(feature = "devnet2")] + let expected_state = { + let mut unaggregated_attestations = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + unaggregated_attestations.push(attestation); + } + } + state_after_header.process_attestations(&unaggregated_attestations) + }; + let block_with_correct_root = Block { state_root: hash_tree_root(&expected_state), ..block @@ -44,7 +60,9 @@ fn test_state_transition_full() { signature: signed_block_with_attestation.signature, }; - let final_state = state.state_transition(final_signed_block_with_attestation, true).unwrap(); + let final_state = state + .state_transition(final_signed_block_with_attestation, true) + .unwrap(); assert_eq!(final_state, expected_state); } @@ -54,13 +72,29 @@ fn test_state_transition_invalid_signatures() { let state = genesis_state(); let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); - let signed_block_with_attestation = create_block(1, &mut state_at_slot_1.latest_block_header, None); + let signed_block_with_attestation = + create_block(1, &mut state_at_slot_1.latest_block_header, None); let block = signed_block_with_attestation.message.block.clone(); // Use process_block_header + process_operations to avoid state root validation during setup let state_after_header = state_at_slot_1.process_block_header(&block).unwrap(); + + #[cfg(feature = "devnet1")] let expected_state = state_after_header.process_attestations(&block.body.attestations); + #[cfg(feature = "devnet2")] + let expected_state = { + let mut list = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + list.push(attestation); + } + } + list + }; + let block_with_correct_root = Block { state_root: hash_tree_root(&expected_state), ..block @@ -85,7 +119,8 @@ fn test_state_transition_bad_state_root() { let state = genesis_state(); let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); - let signed_block_with_attestation = create_block(1, &mut state_at_slot_1.latest_block_header, None); + let signed_block_with_attestation = + create_block(1, &mut state_at_slot_1.latest_block_header, None); let mut block = signed_block_with_attestation.message.block.clone(); block.state_root = Bytes32(ssz::H256::zero()); @@ -101,4 +136,56 @@ fn test_state_transition_bad_state_root() { let result = state.state_transition(final_signed_block_with_attestation, true); assert!(result.is_err()); assert_eq!(result.unwrap_err(), "Invalid block state root"); -} \ No newline at end of file +} + +#[cfg(feature = "devnet2")] +#[test] +fn test_state_transition_devnet2() { + let state = genesis_state(); + let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); + + // Create a block with attestations for devnet2 + let signed_block_with_attestation = + create_block(1, &mut state_at_slot_1.latest_block_header, None); + let block = signed_block_with_attestation.message.block.clone(); + + // Process the block header and attestations + let state_after_header = state_at_slot_1.process_block_header(&block).unwrap(); + + #[cfg(feature = "devnet1")] + let expected_state = state_after_header.process_attestations(&block.body.attestations); + + #[cfg(feature = "devnet2")] + let expected_state = { + let mut unaggregated_attestations = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + unaggregated_attestations.push(attestation); + } + } + state_after_header.process_attestations(&unaggregated_attestations) + }; + + // Ensure the state root matches the expected state + let block_with_correct_root = Block { + state_root: hash_tree_root(&expected_state), + ..block + }; + + let final_signed_block_with_attestation = SignedBlockWithAttestation { + message: BlockWithAttestation { + block: block_with_correct_root, + proposer_attestation: signed_block_with_attestation.message.proposer_attestation, + }, + signature: signed_block_with_attestation.signature, + }; + + // Perform the state transition and validate the result + let final_state = state + .state_transition(final_signed_block_with_attestation, true) + .unwrap(); + + assert_eq!(final_state, expected_state); +} From ed67aaf959a481a99db6ae1ecb3679920444152e Mon Sep 17 00:00:00 2001 From: Julius Mieliauskas Date: Mon, 29 Dec 2025 12:37:57 +0200 Subject: [PATCH 04/27] fixed environment selection by adding a minimal crate `env-config`. Added readme on how to select devnet --- lean_client/Cargo.lock | 18 +- lean_client/Cargo.toml | 8 +- lean_client/ENVIRONMENT_SELECTION.md | 26 +++ lean_client/containers/Cargo.toml | 7 +- lean_client/containers/src/attestation.rs | 6 +- lean_client/containers/src/state.rs | 4 +- lean_client/containers/tests/main.rs | 2 +- .../tests/test_vectors/block_processing.rs | 10 + .../containers/tests/test_vectors/runner.rs | 19 +- .../tests/test_vectors/verify_signatures.rs | 4 + lean_client/env-config/Cargo.toml | 12 ++ lean_client/env-config/src/lib.rs | 1 + lean_client/fork_choice/Cargo.toml | 8 +- lean_client/fork_choice/src/handlers.rs | 183 +++++++++++++----- lean_client/fork_choice/src/store.rs | 3 + .../tests/fork_choice_test_vectors.rs | 10 + .../fork_choice/tests/unit_tests/votes.rs | 9 + lean_client/networking/Cargo.toml | 6 + lean_client/networking/src/network/service.rs | 7 + lean_client/networking/src/types.rs | 5 + lean_client/src/main.rs | 54 +++++- lean_client/validator/Cargo.toml | 3 + lean_client/validator/src/lib.rs | 120 ++++++++++-- 23 files changed, 430 insertions(+), 95 deletions(-) create mode 100644 lean_client/ENVIRONMENT_SELECTION.md create mode 100644 lean_client/env-config/Cargo.toml create mode 100644 lean_client/env-config/src/lib.rs diff --git a/lean_client/Cargo.lock b/lean_client/Cargo.lock index 93fb9dd..910d9c1 100644 --- a/lean_client/Cargo.lock +++ b/lean_client/Cargo.lock @@ -859,6 +859,7 @@ dependencies = [ name = "containers" version = "0.1.0" dependencies = [ + "env-config", "hex", "leansig", "pretty_assertions", @@ -1399,6 +1400,10 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "env-config" +version = "0.1.0" + [[package]] name = "equivalent" version = "1.0.2" @@ -1412,7 +1417,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.1", ] [[package]] @@ -1586,6 +1591,7 @@ name = "fork-choice" version = "0.1.0" dependencies = [ "containers", + "env-config", "serde", "serde_json", "ssz", @@ -2311,7 +2317,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "serde", "serde_core", ] @@ -3217,6 +3223,7 @@ dependencies = [ "async-trait", "containers", "enr", + "env-config", "futures", "libp2p", "libp2p-identity 0.2.12", @@ -3265,7 +3272,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.1", ] [[package]] @@ -4268,7 +4275,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.61.1", ] [[package]] @@ -4919,7 +4926,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.61.1", ] [[package]] @@ -5340,6 +5347,7 @@ name = "validator" version = "0.1.0" dependencies = [ "containers", + "env-config", "fork-choice", "leansig", "serde", diff --git a/lean_client/Cargo.toml b/lean_client/Cargo.toml index 7d98ae7..9e72c43 100644 --- a/lean_client/Cargo.toml +++ b/lean_client/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["chain", "containers", "fork_choice", "networking", "validator"] +members = ["chain", "containers", "env-config", "fork_choice", "networking", "validator"] resolver = "2" [workspace.package] @@ -14,7 +14,7 @@ containers = { path = "./containers" } fork_choice = { path = "./fork_choice" } networking = { path = "./networking" } validator = { path = "./validator" } -libp2p = {version = "0.56.0", default-features = false, features = [ +libp2p = { version = "0.56.0", default-features = false, features = [ 'dns', 'gossipsub', 'identify', @@ -52,8 +52,10 @@ version = "0.1.0" edition = "2021" [features] -default = ["xmss-signing"] +default = ["devnet2", "xmss-signing"] xmss-signing = ["validator/xmss-signing"] +devnet1 = ["containers/devnet1", "fork-choice/devnet1", "networking/devnet1", "validator/devnet1"] +devnet2 = ["containers/devnet2", "fork-choice/devnet2", "networking/devnet2", "validator/devnet2"] [dependencies] chain = { path = "./chain" } diff --git a/lean_client/ENVIRONMENT_SELECTION.md b/lean_client/ENVIRONMENT_SELECTION.md new file mode 100644 index 0000000..d906c9d --- /dev/null +++ b/lean_client/ENVIRONMENT_SELECTION.md @@ -0,0 +1,26 @@ +### To select which devnet you want to compile + +#### Option A +- Change the default features in root `Cargo.toml`: +```toml +[features] +default = ["devnet1", "<...other features>"] # Change to "devnet2" if needed +devnet1 = [...] +devnet2 = [...] +``` + +#### Option B +- Use the `--no-default-features` flag and specify the desired devnet feature when building or running the project: +```bash +cargo build --no-default-features --features devnet1 # Change to devnet2 +``` + + +### Running tests for a specific devnet + +From root directory, use the following command: +```bash +cargo test -p --no-default-features --features devnet1 # Change to devnet2 +``` + +Use `` to specify the crate you want to test. \ No newline at end of file diff --git a/lean_client/containers/Cargo.toml b/lean_client/containers/Cargo.toml index b6a45c3..29e8ecd 100644 --- a/lean_client/containers/Cargo.toml +++ b/lean_client/containers/Cargo.toml @@ -5,15 +5,16 @@ edition = "2021" [features] xmss-verify = ["leansig"] -default = ["devnet1"] -devnet1 = [] -devnet2 = [] +default = [] +devnet1 = ["env-config/devnet1"] +devnet2 = ["env-config/devnet2"] [lib] name = "containers" path = "src/lib.rs" [dependencies] +env-config = { path = "../env-config", default-features = false } ssz = { git = "https://github.com/grandinetech/grandine", package = "ssz", branch = "develop", submodules = true } ssz_derive = { git = "https://github.com/grandinetech/grandine", package = "ssz_derive", branch = "develop", submodules = false } typenum = "1" diff --git a/lean_client/containers/src/attestation.rs b/lean_client/containers/src/attestation.rs index 302ef08..9c3537c 100644 --- a/lean_client/containers/src/attestation.rs +++ b/lean_client/containers/src/attestation.rs @@ -111,8 +111,12 @@ pub struct Attestation { /// Validator attestation bundled with its signature. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct SignedAttestation { + #[cfg(feature = "devnet2")] + pub validator_id: u64, + #[cfg(feature = "devnet2")] + pub message: AttestationData, + #[cfg(feature = "devnet1")] pub message: Attestation, - /// Signature aggregation produced by the leanVM (SNARKs in the future). pub signature: Signature, } diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index 02a26e3..b176e55 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -1,5 +1,5 @@ use crate::validator::Validator; -use crate::{block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, Slot, Uint64, ValidatorIndex}; +use crate::{block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, SignedAttestation, Slot, Uint64, ValidatorIndex}; use crate::{ HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators, }; @@ -686,7 +686,7 @@ impl State { _proposer_index: ValidatorIndex, _parent_root: Bytes32, _initial_attestations: Option>, - _available_signed_attestations: Option<&[SignedBlockWithAttestation]>, + _available_signed_attestations: Option<&[SignedAttestation]>, _known_block_roots: Option<&std::collections::HashSet>, ) -> Result<(Block, Self, Vec, BlockSignatures), String> { Err("build_block is not implemented for devnet2".to_string()) diff --git a/lean_client/containers/tests/main.rs b/lean_client/containers/tests/main.rs index 96deacd..4d48535 100644 --- a/lean_client/containers/tests/main.rs +++ b/lean_client/containers/tests/main.rs @@ -1,4 +1,4 @@ -// tests/main.rs - Test entry point +// tests/lib - Test entry point mod debug_deserialize; mod unit_tests; mod test_vectors; \ No newline at end of file diff --git a/lean_client/containers/tests/test_vectors/block_processing.rs b/lean_client/containers/tests/test_vectors/block_processing.rs index 4dcd641..caec865 100644 --- a/lean_client/containers/tests/test_vectors/block_processing.rs +++ b/lean_client/containers/tests/test_vectors/block_processing.rs @@ -2,6 +2,7 @@ use super::runner::TestRunner; #[test] +#[cfg(feature = "devnet1")] fn test_process_first_block_after_genesis() { let test_path = "../tests/test_vectors/test_blocks/test_process_first_block_after_genesis.json"; TestRunner::run_block_processing_test(test_path) @@ -9,6 +10,7 @@ fn test_process_first_block_after_genesis() { } #[test] +#[cfg(feature = "devnet1")] fn test_blocks_with_gaps() { let test_path = "../tests/test_vectors/test_blocks/test_blocks_with_gaps.json"; TestRunner::run_block_processing_test(test_path) @@ -16,6 +18,7 @@ fn test_blocks_with_gaps() { } #[test] +#[cfg(feature = "devnet1")] fn test_linear_chain_multiple_blocks() { let test_path = "../tests/test_vectors/test_blocks/test_linear_chain_multiple_blocks.json"; TestRunner::run_block_processing_test(test_path) @@ -23,6 +26,7 @@ fn test_linear_chain_multiple_blocks() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_extends_deep_chain() { let test_path = "../tests/test_vectors/test_blocks/test_block_extends_deep_chain.json"; TestRunner::run_block_processing_test(test_path) @@ -30,6 +34,7 @@ fn test_block_extends_deep_chain() { } #[test] +#[cfg(feature = "devnet1")] fn test_empty_blocks() { let test_path = "../tests/test_vectors/test_blocks/test_empty_blocks.json"; TestRunner::run_block_processing_test(test_path) @@ -37,6 +42,7 @@ fn test_empty_blocks() { } #[test] +#[cfg(feature = "devnet1")] fn test_empty_blocks_with_missed_slots() { let test_path = "../tests/test_vectors/test_blocks/test_empty_blocks_with_missed_slots.json"; TestRunner::run_block_processing_test(test_path) @@ -44,6 +50,7 @@ fn test_empty_blocks_with_missed_slots() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_at_large_slot_number() { let test_path = "../tests/test_vectors/test_blocks/test_block_at_large_slot_number.json"; TestRunner::run_block_processing_test(test_path) @@ -53,6 +60,7 @@ fn test_block_at_large_slot_number() { // Invalid block tests (expecting failures) #[test] +#[cfg(feature = "devnet1")] fn test_block_with_invalid_parent_root() { let test_path = "../tests/test_vectors/test_blocks/test_block_with_invalid_parent_root.json"; TestRunner::run_block_processing_test(test_path) @@ -60,6 +68,7 @@ fn test_block_with_invalid_parent_root() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_with_invalid_proposer() { let test_path = "../tests/test_vectors/test_blocks/test_block_with_invalid_proposer.json"; TestRunner::run_block_processing_test(test_path) @@ -67,6 +76,7 @@ fn test_block_with_invalid_proposer() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_with_invalid_state_root() { let test_path = "../tests/test_vectors/test_blocks/test_block_with_invalid_state_root.json"; TestRunner::run_block_processing_test(test_path) diff --git a/lean_client/containers/tests/test_vectors/runner.rs b/lean_client/containers/tests/test_vectors/runner.rs index 910fde5..bf23138 100644 --- a/lean_client/containers/tests/test_vectors/runner.rs +++ b/lean_client/containers/tests/test_vectors/runner.rs @@ -552,6 +552,7 @@ impl TestRunner { /// Test runner for verify_signatures test vectors /// Tests XMSS signature verification on SignedBlockWithAttestation + #[cfg(feature = "devnet1")] pub fn run_verify_signatures_test>(path: P) -> Result<(), Box> { let json_content = fs::read_to_string(path.as_ref())?; @@ -571,25 +572,11 @@ impl TestRunner { println!(" Block slot: {}", signed_block.message.block.slot.0); println!(" Proposer index: {}", signed_block.message.block.proposer_index.0); - // Count attestations - let mut attestation_count = 0u64; - loop { - match signed_block.message.block.body.attestations.get(attestation_count) { - Ok(_) => attestation_count += 1, - Err(_) => break, - } - } + let attestation_count = signed_block.message.block.body.attestations.len_u64(); println!(" Attestations in block: {}", attestation_count); println!(" Proposer attestation validator: {}", signed_block.message.proposer_attestation.validator_id.0); - // Count signatures - let mut signature_count = 0u64; - loop { - match signed_block.signature.get(signature_count) { - Ok(_) => signature_count += 1, - Err(_) => break, - } - } + let signature_count = signed_block.signature.len_u64(); println!(" Signatures: {}", signature_count); // Check if we expect this test to fail diff --git a/lean_client/containers/tests/test_vectors/verify_signatures.rs b/lean_client/containers/tests/test_vectors/verify_signatures.rs index 2bca4ca..cfc3301 100644 --- a/lean_client/containers/tests/test_vectors/verify_signatures.rs +++ b/lean_client/containers/tests/test_vectors/verify_signatures.rs @@ -15,6 +15,7 @@ use super::runner::TestRunner; // Without xmss-verify feature, they pass because structural validation succeeds. #[test] +#[cfg(feature = "devnet1")] fn test_proposer_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_valid_signatures/test_proposer_signature.json"; TestRunner::run_verify_signatures_test(test_path) @@ -22,6 +23,7 @@ fn test_proposer_signature() { } #[test] +#[cfg(feature = "devnet1")] fn test_proposer_and_attester_signatures() { let test_path = "../tests/test_vectors/test_verify_signatures/test_valid_signatures/test_proposer_and_attester_signatures.json"; TestRunner::run_verify_signatures_test(test_path) @@ -34,6 +36,7 @@ fn test_proposer_and_attester_signatures() { // Run with `cargo test --features xmss-verify` to enable full signature verification. #[test] +#[cfg(feature = "devnet1")] #[ignore = "Requires xmss-verify feature for actual signature validation. Run with: cargo test --features xmss-verify"] fn test_invalid_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_invalid_signatures/test_invalid_signature.json"; @@ -42,6 +45,7 @@ fn test_invalid_signature() { } #[test] +#[cfg(feature = "devnet1")] #[ignore = "Requires xmss-verify feature for actual signature validation. Run with: cargo test --features xmss-verify"] fn test_mixed_valid_invalid_signatures() { let test_path = "../tests/test_vectors/test_verify_signatures/test_invalid_signatures/test_mixed_valid_invalid_signatures.json"; diff --git a/lean_client/env-config/Cargo.toml b/lean_client/env-config/Cargo.toml new file mode 100644 index 0000000..4b761e5 --- /dev/null +++ b/lean_client/env-config/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "env-config" +version.workspace = true +edition.workspace = true +authors.workspace = true +license.workspace = true + +[features] +devnet1 = [] +devnet2 = [] + +[dependencies] diff --git a/lean_client/env-config/src/lib.rs b/lean_client/env-config/src/lib.rs new file mode 100644 index 0000000..972005d --- /dev/null +++ b/lean_client/env-config/src/lib.rs @@ -0,0 +1 @@ +// Empty on purpose \ No newline at end of file diff --git a/lean_client/fork_choice/Cargo.toml b/lean_client/fork_choice/Cargo.toml index f906f59..b16f561 100644 --- a/lean_client/fork_choice/Cargo.toml +++ b/lean_client/fork_choice/Cargo.toml @@ -3,8 +3,14 @@ name = "fork-choice" version = "0.1.0" edition = "2021" +[features] +default = [] +devnet1 = ["containers/devnet1", "env-config/devnet1"] +devnet2 = ["containers/devnet2", "env-config/devnet1"] + [dependencies] -containers = { path = "../containers" } +env-config = { path = "../env-config", default-features = false } +containers = { path = "../containers", default-features = false } ssz = { git = "https://github.com/grandinetech/grandine", package = "ssz", branch = "develop"} ssz_derive = { git = "https://github.com/grandinetech/grandine", package = "ssz_derive", branch = "develop" } typenum = "1.17.0" diff --git a/lean_client/fork_choice/src/handlers.rs b/lean_client/fork_choice/src/handlers.rs index 618c8c9..fa9aa89 100644 --- a/lean_client/fork_choice/src/handlers.rs +++ b/lean_client/fork_choice/src/handlers.rs @@ -1,16 +1,13 @@ use crate::store::*; use containers::{ - attestation::SignedAttestation, - block::SignedBlockWithAttestation, - Bytes32, ValidatorIndex, + attestation::SignedAttestation, block::SignedBlockWithAttestation, Bytes32, ValidatorIndex, }; use ssz::SszHash; #[inline] pub fn on_tick(store: &mut Store, time: u64, has_proposal: bool) { // Calculate target time in intervals - let tick_interval_time = - time.saturating_sub(store.config.genesis_time) / SECONDS_PER_INTERVAL; + let tick_interval_time = time.saturating_sub(store.config.genesis_time) / SECONDS_PER_INTERVAL; // Tick forward one interval at a time while store.time < tick_interval_time { @@ -28,11 +25,25 @@ pub fn on_attestation( signed_attestation: SignedAttestation, is_from_block: bool, ) -> Result<(), String> { + #[cfg(feature = "devnet1")] let validator_id = ValidatorIndex(signed_attestation.message.validator_id.0); + #[cfg(feature = "devnet1")] let attestation_slot = signed_attestation.message.data.slot; + #[cfg(feature = "devnet1")] let source_slot = signed_attestation.message.data.source.slot; + #[cfg(feature = "devnet1")] let target_slot = signed_attestation.message.data.target.slot; + + #[cfg(feature = "devnet2")] + let validator_id = ValidatorIndex(signed_attestation.validator_id); + #[cfg(feature = "devnet2")] + let attestation_slot = signed_attestation.message.slot; + #[cfg(feature = "devnet2")] + let source_slot = signed_attestation.message.source.slot; + #[cfg(feature = "devnet2")] + let target_slot = signed_attestation.message.target.slot; + // Validate attestation is not from future let curr_slot = store.time / INTERVALS_PER_SLOT; if attestation_slot.0 > curr_slot { @@ -52,28 +63,69 @@ pub fn on_attestation( if is_from_block { // On-chain attestation processing - immediately becomes "known" + #[cfg(feature = "devnet1")] + if store + .latest_known_attestations + .get(&validator_id) + .map_or(true, |existing| { + existing.message.data.slot < attestation_slot + }) + { + store + .latest_known_attestations + .insert(validator_id, signed_attestation.clone()); + } + + #[cfg(feature = "devnet2")] if store .latest_known_attestations .get(&validator_id) - .map_or(true, |existing| existing.message.data.slot < attestation_slot) + .map_or(true, |existing| { + existing.message.slot < attestation_slot + }) { - store.latest_known_attestations.insert(validator_id, signed_attestation.clone()); + store + .latest_known_attestations + .insert(validator_id, signed_attestation.clone()); } // Remove from new attestations if superseded if let Some(existing_new) = store.latest_new_attestations.get(&validator_id) { + #[cfg(feature = "devnet1")] if existing_new.message.data.slot <= attestation_slot { store.latest_new_attestations.remove(&validator_id); } + #[cfg(feature = "devnet2")] + if existing_new.message.slot <= attestation_slot { + store.latest_new_attestations.remove(&validator_id); + } } } else { // Network gossip attestation processing - goes to "new" stage + #[cfg(feature = "devnet1")] + if store + .latest_new_attestations + .get(&validator_id) + .map_or(true, |existing| { + existing.message.data.slot < attestation_slot + }) + { + store + .latest_new_attestations + .insert(validator_id, signed_attestation); + } + + #[cfg(feature = "devnet2")] if store .latest_new_attestations .get(&validator_id) - .map_or(true, |existing| existing.message.data.slot < attestation_slot) + .map_or(true, |existing| { + existing.message.slot < attestation_slot + }) { - store.latest_new_attestations.insert(validator_id, signed_attestation); + store + .latest_new_attestations + .insert(validator_id, signed_attestation); } } Ok(()) @@ -125,8 +177,7 @@ fn process_block_internal( }; // Execute state transition to get post-state - let new_state = - state.state_transition_with_validation(signed_block.clone(), true, true)?; + let new_state = state.state_transition_with_validation(signed_block.clone(), true, true)?; // Store block and state store.blocks.insert(block_root, signed_block.clone()); @@ -143,49 +194,93 @@ fn process_block_internal( let attestations = &signed_block.message.block.body.attestations; let signatures = &signed_block.signature; - for i in 0.. { - match (attestations.get(i), signatures.get(i)) { - (Ok(attestation), Ok(signature)) => { - let signed_attestation = SignedAttestation { - message: attestation.clone(), - signature: signature.clone(), - }; - on_attestation(store, signed_attestation, true)?; + #[cfg(feature = "devnet1")] + { + for i in 0.. { + match (attestations.get(i), signatures.get(i)) { + (Ok(attestation), Ok(signature)) => { + let signed_attestation = SignedAttestation { + message: attestation.clone(), + signature: signature.clone(), + }; + on_attestation(store, signed_attestation, true)?; + } + _ => break, } - _ => break, } + + // Update head BEFORE processing proposer attestation + update_head(store); + + // Process proposer attestation as gossip (is_from_block=false) + // This ensures it goes to "new" attestations and doesn't immediately affect fork choice + let num_body_attestations = attestations.len_u64(); + + // Get proposer signature or use default if not present (for tests) + use containers::attestation::Signature; + let proposer_signature = signatures + .get(num_body_attestations) + .map(|sig| sig.clone()) + .unwrap_or_else(|_| Signature::default()); + + let proposer_signed_attestation = SignedAttestation { + message: signed_block.message.proposer_attestation.clone(), + signature: proposer_signature, + }; + + // Process proposer attestation as if received via gossip (is_from_block=false) + // This ensures it goes to "new" attestations and doesn't immediately affect fork choice + on_attestation(store, proposer_signed_attestation, false)?; + + Ok(()) } - // Update head BEFORE processing proposer attestation - update_head(store); + #[cfg(feature = "devnet2")] + { + let aggregated_attestations = &signed_block.message.block.body.attestations; + let attestation_signatures = &signed_block.signature.attestation_signatures; + let proposer_attestation = &signed_block.message.proposer_attestation; - // Process proposer attestation as gossip (is_from_block=false) - // This ensures it goes to "new" attestations and doesn't immediately affect fork choice - let num_body_attestations = { - let mut count = 0; - while attestations.get(count).is_ok() { - count += 1; + for (aggregated_attestation, aggregated_signature) in aggregated_attestations + .into_iter() + .zip(attestation_signatures) + { + let validator_ids: Vec = aggregated_attestation + .aggregation_bits.0 + .iter() + .enumerate() + .filter(|(_, bit)| **bit) + .map(|(index, _)| index as u64) + .collect(); + + for (validator_id, signature) in validator_ids.into_iter().zip(aggregated_signature) { + on_attestation( + store, + SignedAttestation { + validator_id, + message: aggregated_attestation.data.clone(), + signature: *signature, + }, + true, + )?; + } } - count - }; - // Get proposer signature or use default if not present (for tests) - use containers::attestation::Signature; - let proposer_signature = signatures - .get(num_body_attestations) - .map(|sig| sig.clone()) - .unwrap_or_else(|_| Signature::default()); + // Update head BEFORE processing proposer attestation + update_head(store); - let proposer_signed_attestation = SignedAttestation { - message: signed_block.message.proposer_attestation.clone(), - signature: proposer_signature, - }; + let proposer_signed_attestation = SignedAttestation { + validator_id: proposer_attestation.validator_id.0, + message: proposer_attestation.data.clone(), + signature: signed_block.signature.proposer_signature, + }; - // Process proposer attestation as if received via gossip (is_from_block=false) - // This ensures it goes to "new" attestations and doesn't immediately affect fork choice - on_attestation(store, proposer_signed_attestation, false)?; + // Process proposer attestation as if received via gossip (is_from_block=false) + // This ensures it goes to "new" attestations and doesn't immediately affect fork choice + on_attestation(store, proposer_signed_attestation, false)?; - Ok(()) + Ok(()) + } } fn process_pending_blocks(store: &mut Store, mut roots: Vec) { diff --git a/lean_client/fork_choice/src/store.rs b/lean_client/fork_choice/src/store.rs index 4c746d4..3296d06 100644 --- a/lean_client/fork_choice/src/store.rs +++ b/lean_client/fork_choice/src/store.rs @@ -85,7 +85,10 @@ pub fn get_fork_choice_head( // stage 1: accumulate weights by walking up from each attestation's head for attestation in latest_attestations.values() { + #[cfg(feature = "devnet1")] let mut curr = attestation.message.data.head.root; + #[cfg(feature = "devnet2")] + let mut curr = attestation.message.head.root; if let Some(block) = store.blocks.get(&curr) { let mut curr_slot = block.message.block.slot; diff --git a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs index 5718d48..50bd240 100644 --- a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs +++ b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs @@ -256,6 +256,7 @@ fn convert_test_attestation(test_att: &TestAttestation) -> Attestation { } } +#[cfg(feature = "devnet1")] fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAttestation { let mut attestations = ssz::PersistentList::default(); @@ -303,6 +304,7 @@ fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAtt } } +#[cfg(feature = "devnet1")] fn convert_test_block(test_block_with_att: &TestBlockWithAttestation) -> SignedBlockWithAttestation { let test_block = &test_block_with_att.block; let mut attestations = ssz::PersistentList::default(); @@ -405,6 +407,7 @@ fn initialize_state_from_test(test_state: &TestAnchorState) -> State { } } +#[cfg(feature = "devnet1")] fn verify_checks( store: &Store, checks: &Option, @@ -493,6 +496,7 @@ fn verify_checks( Ok(()) } +#[cfg(feature = "devnet1")] fn run_single_test(_test_name: &str, test: TestVector) -> Result<(), String> { println!(" Running: {}", test.info.test_id); @@ -624,6 +628,7 @@ fn run_single_test(_test_name: &str, test: TestVector) -> Result<(), String> { Ok(()) } +#[cfg(feature = "devnet1")] fn run_test_vector_file(test_path: &str) -> Result<(), String> { let json_str = std::fs::read_to_string(test_path) .map_err(|e| format!("Failed to read file {}: {}", test_path, e))?; @@ -639,6 +644,7 @@ fn run_test_vector_file(test_path: &str) -> Result<(), String> { } #[test] +#[cfg(feature = "devnet1")] fn test_fork_choice_head_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_fork_choice_head"; @@ -682,6 +688,7 @@ fn test_fork_choice_head_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_attestation_processing_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_attestation_processing"; @@ -725,6 +732,7 @@ fn test_attestation_processing_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_fork_choice_reorgs_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_fork_choice_reorgs"; @@ -768,6 +776,7 @@ fn test_fork_choice_reorgs_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_attestation_target_selection_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_attestation_target_selection"; @@ -811,6 +820,7 @@ fn test_attestation_target_selection_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_lexicographic_tiebreaker_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_lexicographic_tiebreaker"; diff --git a/lean_client/fork_choice/tests/unit_tests/votes.rs b/lean_client/fork_choice/tests/unit_tests/votes.rs index 805e785..4a1b688 100644 --- a/lean_client/fork_choice/tests/unit_tests/votes.rs +++ b/lean_client/fork_choice/tests/unit_tests/votes.rs @@ -7,6 +7,7 @@ use containers::{ Bytes32, Slot, Uint64, ValidatorIndex, }; +#[cfg(feature = "devnet1")] fn create_signed_attestation(validator_id: u64, slot: Slot, head_root: Bytes32) -> SignedAttestation { SignedAttestation { message: Attestation { @@ -23,6 +24,7 @@ fn create_signed_attestation(validator_id: u64, slot: Slot, head_root: Bytes32) } #[test] +#[cfg(feature = "devnet1")] fn test_accept_new_attestations() { let mut store = create_test_store(); @@ -63,6 +65,7 @@ fn test_accept_new_attestations() { } #[test] +#[cfg(feature = "devnet1")] fn test_accept_new_attestations_multiple() { let mut store = create_test_store(); @@ -94,6 +97,7 @@ fn test_accept_new_attestations_empty() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_lifecycle() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -129,6 +133,7 @@ fn test_on_attestation_lifecycle() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_future_slot() { let mut store = create_test_store(); let future_slot = Slot(100); // Far in the future @@ -140,6 +145,7 @@ fn test_on_attestation_future_slot() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_update_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -161,6 +167,7 @@ fn test_on_attestation_update_vote() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_ignore_old_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -183,6 +190,7 @@ fn test_on_attestation_ignore_old_vote() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_from_block_supersedes_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -204,6 +212,7 @@ fn test_on_attestation_from_block_supersedes_new() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_newer_from_block_removes_older_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); diff --git a/lean_client/networking/Cargo.toml b/lean_client/networking/Cargo.toml index f107994..8f47702 100644 --- a/lean_client/networking/Cargo.toml +++ b/lean_client/networking/Cargo.toml @@ -3,7 +3,13 @@ name = "networking" version = "0.1.0" edition = "2024" +[features] +default = [] +devnet1 = ["containers/devnet1", "env-config/devnet1"] +devnet2 = ["containers/devnet2", "env-config/devnet1"] + [dependencies] +env-config = { path = "../env-config", default-features = false } containers = {workspace = true} alloy-primitives = { workspace = true} libp2p = {workspace = true} diff --git a/lean_client/networking/src/network/service.rs b/lean_client/networking/src/network/service.rs index 9c0993f..93e749c 100644 --- a/lean_client/networking/src/network/service.rs +++ b/lean_client/networking/src/network/service.rs @@ -311,7 +311,10 @@ where } } Ok(GossipsubMessage::Attestation(signed_attestation)) => { + #[cfg(feature = "devnet1")] let slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet2")] + let slot = signed_attestation.message.slot.0; if let Err(err) = self .chain_message_sink @@ -521,7 +524,11 @@ where } } OutboundP2pRequest::GossipAttestation(signed_attestation) => { + #[cfg(feature = "devnet1")] let slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet2")] + let slot = signed_attestation.message.slot.0; + match signed_attestation.to_ssz() { Ok(bytes) => { if let Err(err) = self.publish_to_topic(GossipsubKind::Attestation, bytes) { diff --git a/lean_client/networking/src/types.rs b/lean_client/networking/src/types.rs index 37644c2..028a883 100644 --- a/lean_client/networking/src/types.rs +++ b/lean_client/networking/src/types.rs @@ -93,9 +93,14 @@ impl Display for ChainMessage { ChainMessage::ProcessBlock { signed_block_with_attestation, .. } => { write!(f, "ProcessBlockWithAttestation(slot={})", signed_block_with_attestation.message.block.slot.0) } + #[cfg(feature = "devnet1")] ChainMessage::ProcessAttestation { signed_attestation, .. } => { write!(f, "ProcessAttestation(slot={})", signed_attestation.message.data.slot.0) } + #[cfg(feature = "devnet2")] + ChainMessage::ProcessAttestation { signed_attestation, .. } => { + write!(f, "ProcessAttestation(slot={})", signed_attestation.message.slot.0) + } } } } diff --git a/lean_client/src/main.rs b/lean_client/src/main.rs index cc44285..d1c3e24 100644 --- a/lean_client/src/main.rs +++ b/lean_client/src/main.rs @@ -1,4 +1,5 @@ use clap::Parser; +use containers::block::BlockSignatures; use containers::ssz::{PersistentList, SszHash}; use containers::{ attestation::{Attestation, AttestationData}, @@ -8,7 +9,7 @@ use containers::{ ssz, state::State, types::{Bytes32, Uint64, ValidatorIndex}, - Slot, + Signature, Slot, }; use fork_choice::{ handlers::{on_attestation, on_block, on_tick}, @@ -95,7 +96,10 @@ fn print_chain_status(store: &Store, connected_peers: u64) { println!(" Head Block Root: 0x{:x}", head_root.0); println!(" Parent Block Root: 0x{:x}", parent_root.0); println!(" State Root: 0x{:x}", state_root.0); - println!(" Timely: {}", if timely { "YES" } else { "NO" }); + println!( + " Timely: {}", + if timely { "YES" } else { "NO" } + ); println!("+---------------------------------------------------------------+"); println!( " Latest Justified: Slot {:>5} | Root: 0x{:x}", @@ -216,7 +220,13 @@ async fn main() { block: genesis_block, proposer_attestation: genesis_proposer_attestation, }, + #[cfg(feature = "devnet1")] signature: PersistentList::default(), + #[cfg(feature = "devnet2")] + signature: BlockSignatures { + attestation_signatures: PersistentList::default(), + proposer_signature: Signature::default(), + }, }; let config = Config { genesis_time }; @@ -234,7 +244,11 @@ async fn main() { if let Some(ref keys_dir) = args.hash_sig_key_dir { let keys_path = std::path::Path::new(keys_dir); if keys_path.exists() { - match ValidatorService::new_with_keys(config.clone(), num_validators, keys_path) { + match ValidatorService::new_with_keys( + config.clone(), + num_validators, + keys_path, + ) { Ok(service) => { info!( node_id = %node_id, @@ -245,7 +259,10 @@ async fn main() { Some(service) } Err(e) => { - warn!("Failed to load XMSS keys: {}, falling back to zero signatures", e); + warn!( + "Failed to load XMSS keys: {}, falling back to zero signatures", + e + ); Some(ValidatorService::new(config, num_validators)) } } @@ -417,14 +434,29 @@ async fn main() { if last_attestation_slot != Some(current_slot) { let attestations = vs.create_attestations(&store, Slot(current_slot)); for signed_att in attestations { + #[cfg(feature = "devnet1")] let validator_id = signed_att.message.validator_id.0; + #[cfg(feature = "devnet2")] + let validator_id = signed_att.validator_id; info!( slot = current_slot, validator = validator_id, "Broadcasting attestation" ); + #[cfg(feature = "devnet1")] + match on_attestation(&mut store, signed_att.clone(), false) { + Ok(()) => { + if let Err(e) = chain_outbound_sender.send( + OutboundP2pRequest::GossipAttestation(signed_att) + ) { + warn!("Failed to gossip attestation: {}", e); + } + } + Err(e) => warn!("Error processing own attestation: {}", e), + } + #[cfg(feature = "devnet2")] match on_attestation(&mut store, signed_att.clone(), false) { Ok(()) => { if let Err(e) = chain_outbound_sender.send( @@ -520,10 +552,24 @@ async fn main() { should_gossip, .. } => { + #[cfg(feature = "devnet1")] let att_slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet1")] let source_slot = signed_attestation.message.data.source.slot.0; + #[cfg(feature = "devnet1")] let target_slot = signed_attestation.message.data.target.slot.0; + #[cfg(feature = "devnet1")] let validator_id = signed_attestation.message.validator_id.0; + + #[cfg(feature = "devnet2")] + let att_slot = signed_attestation.message.slot.0; + #[cfg(feature = "devnet2")] + let source_slot = signed_attestation.message.source.slot.0; + #[cfg(feature = "devnet2")] + let target_slot = signed_attestation.message.target.slot.0; + #[cfg(feature = "devnet2")] + let validator_id = signed_attestation.validator_id; + info!( slot = att_slot, source_slot = source_slot, diff --git a/lean_client/validator/Cargo.toml b/lean_client/validator/Cargo.toml index b658c48..8311b9d 100644 --- a/lean_client/validator/Cargo.toml +++ b/lean_client/validator/Cargo.toml @@ -6,8 +6,11 @@ edition = "2021" [features] default = ["xmss-signing"] xmss-signing = ["leansig"] +devnet1 = ["containers/devnet1", "fork-choice/devnet1", "env-config/devnet1"] +devnet2 = ["containers/devnet2", "fork-choice/devnet2", "env-config/devnet1"] [dependencies] +env-config = { path = "../env-config", default-features = false } serde = { version = "1.0", features = ["derive"] } serde_yaml = "0.9" containers = { path = "../containers" } diff --git a/lean_client/validator/src/lib.rs b/lean_client/validator/src/lib.rs index e26bcf7..2c65fa7 100644 --- a/lean_client/validator/src/lib.rs +++ b/lean_client/validator/src/lib.rs @@ -2,12 +2,16 @@ use std::collections::HashMap; use std::path::Path; +use containers::attestation::{AggregatedAttestations}; +#[cfg(feature = "devnet2")] +use containers::attestation::{NaiveAggregatedSignature}; +use containers::block::BlockSignatures; use containers::{ attestation::{Attestation, AttestationData, Signature, SignedAttestation}, - block::{BlockWithAttestation, SignedBlockWithAttestation, hash_tree_root}, + block::{hash_tree_root, BlockWithAttestation, SignedBlockWithAttestation}, checkpoint::Checkpoint, types::{Uint64, ValidatorIndex}, - Slot, + AggregatedAttestation, Slot, }; use fork_choice::store::{get_proposal_head, get_vote_target, Store}; use tracing::{info, warn}; @@ -172,23 +176,33 @@ impl ValidatorService { .latest_new_attestations .values() .filter(|att| { + #[cfg(feature = "devnet1")] let data = &att.message.data; + #[cfg(feature = "devnet2")] + let data = &att.message; // Source must match the parent state's justified checkpoint (not store's!) let source_matches = data.source == parent_state.latest_justified; // Target must be strictly after source let target_after_source = data.target.slot > data.source.slot; // Target block must be known let target_known = store.blocks.contains_key(&data.target.root); - + source_matches && target_after_source && target_known }) .collect(); + #[cfg(feature = "devnet1")] let valid_attestations: Vec = valid_signed_attestations .iter() .map(|att| att.message.clone()) .collect(); + #[cfg(feature = "devnet2")] + let valid_attestations: Vec = valid_signed_attestations + .iter() + .map(|att| att.message.clone()) + .collect(); + info!( slot = slot.0, valid_attestations = valid_attestations.len(), @@ -197,14 +211,52 @@ impl ValidatorService { ); // Build block with collected attestations (empty body - attestations go to state) - let (block, _post_state, _collected_atts, sigs) = - parent_state.build_block(slot, proposer_index, parent_root, Some(valid_attestations), None, None)?; + #[cfg(feature = "devnet1")] + let (block, _post_state, _collected_atts, sigs) = parent_state.build_block( + slot, + proposer_index, + parent_root, + Some(valid_attestations), + None, + None, + )?; + #[cfg(feature = "devnet2")] + let (block, _post_state, _collected_atts, sigs) = { + let valid_attestations: Vec = valid_attestations + .iter() + .map(|data| Attestation { + validator_id: Uint64(0), // Placeholder, real validator IDs should be used + data: data.clone(), + }) + .collect(); + parent_state.build_block( + slot, + proposer_index, + parent_root, + Some(valid_attestations), + None, + None, + )? + }; // Collect signatures from the attestations we included + #[cfg(feature = "devnet1")] let mut signatures = sigs; + #[cfg(feature = "devnet2")] + let mut signatures = sigs.attestation_signatures; for signed_att in &valid_signed_attestations { - signatures.push(signed_att.signature.clone()) + #[cfg(feature = "devnet1")] + signatures + .push(signed_att.signature.clone()) .map_err(|e| format!("Failed to add attestation signature: {:?}", e))?; + #[cfg(feature = "devnet2")] + { + // TODO: Use real aggregation instead of naive placeholder when spec is more up to date + let aggregated_sig: NaiveAggregatedSignature = NaiveAggregatedSignature::default(); + signatures + .push(aggregated_sig) + .map_err(|e| format!("Failed to add attestation signature: {:?}", e))?; + } } info!( @@ -224,11 +276,20 @@ impl ValidatorService { match key_manager.sign(proposer_index.0, epoch, &message.0.into()) { Ok(sig) => { - signatures.push(sig).map_err(|e| format!("Failed to add proposer signature: {:?}", e))?; - info!( - proposer = proposer_index.0, - "Signed proposer attestation" - ); + #[cfg(feature = "devnet1")] + signatures + .push(sig) + .map_err(|e| format!("Failed to add proposer signature: {:?}", e))?; + #[cfg(feature = "devnet2")] + { + // TODO: Use real aggregation instead of naive placeholder when spec is more up to date + let aggregated_sig: NaiveAggregatedSignature = + NaiveAggregatedSignature::default(); + signatures + .push(aggregated_sig) + .map_err(|e| format!("Failed to add proposer signature: {:?}", e))?; + } + info!(proposer = proposer_index.0, "Signed proposer attestation"); } Err(e) => { return Err(format!("Failed to sign proposer attestation: {}", e)); @@ -244,7 +305,13 @@ impl ValidatorService { block, proposer_attestation, }, + #[cfg(feature = "devnet1")] signature: signatures, + #[cfg(feature = "devnet2")] + signature: BlockSignatures { + attestation_signatures: signatures, + proposer_signature: Signature::default(), + }, }; Ok(signed_block) @@ -284,6 +351,7 @@ impl ValidatorService { .validator_indices .iter() .filter_map(|&idx| { + #[cfg(feature = "devnet1")] let attestation = Attestation { validator_id: Uint64(idx), data: AttestationData { @@ -294,6 +362,14 @@ impl ValidatorService { }, }; + #[cfg(feature = "devnet2")] + let attestation = AttestationData { + slot, + head: head_checkpoint.clone(), + target: vote_target.clone(), + source: store.latest_justified.clone(), + }; + let signature = if let Some(ref key_manager) = self.key_manager { // Sign with XMSS let message = hash_tree_root(&attestation); @@ -331,10 +407,24 @@ impl ValidatorService { Signature::default() }; - Some(SignedAttestation { - message: attestation, - signature, - }) + { + #[cfg(feature = "devnet1")] + { + Some(SignedAttestation { + message: attestation, + signature, + }) + } + + #[cfg(feature = "devnet2")] + { + Some(SignedAttestation { + validator_id: idx, + message: attestation, + signature, + }) + } + } }) .collect() } From 671715d6f2b146127eb57b114e35860700cc7caf Mon Sep 17 00:00:00 2001 From: LiudasBaronas1 <144480589+LiudasBaronas1@users.noreply.github.com> Date: Mon, 12 Jan 2026 01:16:22 +0200 Subject: [PATCH 05/27] Refactor: remove redundant constants from config, implement ChainConfig::devnet() and update lib exports --- lean_client/chain/src/config.rs | 57 ++++++++++++++------------------- lean_client/chain/src/lib.rs | 3 +- 2 files changed, 26 insertions(+), 34 deletions(-) diff --git a/lean_client/chain/src/config.rs b/lean_client/chain/src/config.rs index b8be4f1..537d13b 100644 --- a/lean_client/chain/src/config.rs +++ b/lean_client/chain/src/config.rs @@ -3,30 +3,21 @@ pub struct BasisPoint(pub u64); impl BasisPoint { pub const MAX: u64 = 10_000; + pub const fn new(value: u64) -> Option { if value <= Self::MAX { Some(BasisPoint(value)) } else { None } } - #[inline] pub fn get(&self) -> u64 { self.0 } + + #[inline] + pub fn get(&self) -> u64 { self.0 } } -pub const INTERVALS_PER_SLOT: u64 = 4; -pub const SLOT_DURATION_MS: u64 = 4_000; -pub const SECONDS_PER_SLOT: u64 = SLOT_DURATION_MS / 1_000; -pub const SECONDS_PER_INTERVAL: u64 = SECONDS_PER_SLOT / INTERVALS_PER_SLOT; -pub const JUSTIFICATION_LOOKBACK_SLOTS: u64 = 3; - -pub const PROPOSER_REORG_CUTOFF_BPS: BasisPoint = match BasisPoint::new(2_500) { Some(x) => x, None => panic!() }; -pub const VOTE_DUE_BPS: BasisPoint = match BasisPoint::new(5_000) { Some(x) => x, None => panic!() }; -pub const FAST_CONFIRM_DUE_BPS: BasisPoint = match BasisPoint::new(7_500) { Some(x) => x, None => panic!() }; -pub const VIEW_FREEZE_CUTOFF_BPS: BasisPoint= match BasisPoint::new(7_500) { Some(x) => x, None => panic!() }; - -pub const HISTORICAL_ROOTS_LIMIT: u64 = 1u64 << 18; -pub const VALIDATOR_REGISTRY_LIMIT: u64 = 1u64 << 12; - #[derive(Clone, Debug)] pub struct ChainConfig { + pub intervals_per_slot: u64, pub slot_duration_ms: u64, pub second_per_slot: u64, + pub seconds_per_interval: u64, pub justification_lookback_slots: u64, pub proposer_reorg_cutoff_bps: BasisPoint, pub vote_due_bps: BasisPoint, @@ -36,24 +27,24 @@ pub struct ChainConfig { pub validator_registry_limit: u64, } -pub const DEVNET_CONFIG: ChainConfig = ChainConfig { - slot_duration_ms: SLOT_DURATION_MS, - second_per_slot: SECONDS_PER_SLOT, - justification_lookback_slots: JUSTIFICATION_LOOKBACK_SLOTS, - proposer_reorg_cutoff_bps: PROPOSER_REORG_CUTOFF_BPS, - vote_due_bps: VOTE_DUE_BPS, - fast_confirm_due_bps: FAST_CONFIRM_DUE_BPS, - view_freeze_cutoff_bps: VIEW_FREEZE_CUTOFF_BPS, - historical_roots_limit: HISTORICAL_ROOTS_LIMIT, - validator_registry_limit: VALIDATOR_REGISTRY_LIMIT, -}; +impl ChainConfig { + pub fn devnet() -> Self { + let slot_duration_ms = 4_000; + let seconds_per_slot = slot_duration_ms / 1_000; + let intervals_per_slot = 4; -#[cfg(test)] -mod tests { - use super::*; - #[test] fn time_math_is_consistent() { - assert_eq!(SLOT_DURATION_MS, 4_000); - assert_eq!(SECONDS_PER_SLOT, 4); - assert_eq!(SECONDS_PER_INTERVAL, 1); + Self { + slot_duration_ms, + second_per_slot: seconds_per_slot, + intervals_per_slot, + seconds_per_interval: seconds_per_slot / intervals_per_slot, + justification_lookback_slots: 3, + proposer_reorg_cutoff_bps: BasisPoint::new(2_500).expect("Valid BPS"), + vote_due_bps: BasisPoint::new(5_000).expect("Valid BPS"), + fast_confirm_due_bps: BasisPoint::new(7_500).expect("Valid BPS"), + view_freeze_cutoff_bps: BasisPoint::new(7_500).expect("Valid BPS"), + historical_roots_limit: 1u64 << 18, + validator_registry_limit: 1u64 << 12, + } } } \ No newline at end of file diff --git a/lean_client/chain/src/lib.rs b/lean_client/chain/src/lib.rs index ef68c36..12cf630 100644 --- a/lean_client/chain/src/lib.rs +++ b/lean_client/chain/src/lib.rs @@ -1 +1,2 @@ -pub mod config; +mod config; +pub use config::ChainConfig; \ No newline at end of file From 9fbbf348f0fa8ae55278cf6c55394f22e149cae6 Mon Sep 17 00:00:00 2001 From: Darius Spr <108625236+Dariusspr@users.noreply.github.com> Date: Wed, 14 Jan 2026 11:57:59 +0200 Subject: [PATCH 06/27] rename to PublicKey and use constant for key size --- lean_client/containers/src/block.rs | 1 - lean_client/containers/src/state.rs | 2 +- lean_client/containers/src/validator.rs | 56 +++++++++++-------- .../tests/fork_choice_test_vectors.rs | 2 +- lean_client/src/main.rs | 4 +- lean_client/validator/src/keys.rs | 2 +- 6 files changed, 37 insertions(+), 30 deletions(-) diff --git a/lean_client/containers/src/block.rs b/lean_client/containers/src/block.rs index 0acf1b2..00f5894 100644 --- a/lean_client/containers/src/block.rs +++ b/lean_client/containers/src/block.rs @@ -7,7 +7,6 @@ use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to use ssz::{PersistentList, SszHash}; use typenum::U4096; use crate::attestation::{AggregatedAttestations, AttestationSignatures}; -use crate::validator::BlsPublicKey; /// The body of a block, containing payload data. /// diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index b176e55..5e3dd52 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -107,7 +107,7 @@ impl State { let mut validators = List::default(); for i in 0..num_validators.0 { let validator = Validator { - pubkey: crate::validator::BlsPublicKey::default(), + pubkey: crate::validator::PublicKey::default(), index: Uint64(i), }; validators.push(validator).expect("Failed to add validator"); diff --git a/lean_client/containers/src/validator.rs b/lean_client/containers/src/validator.rs index 513b09d..29f5716 100644 --- a/lean_client/containers/src/validator.rs +++ b/lean_client/containers/src/validator.rs @@ -1,21 +1,24 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use ssz::ByteVector; use ssz_derive::Ssz; -use typenum::U52; +use typenum::{Unsigned, U52}; -/// BLS public key - 52 bytes (as defined in lean spec) +/// Size of XMSS public keys in bytes (as defined in lean spec) +pub type PublicKeySize = U52; + +/// XMSS public key (as defined in lean spec) #[derive(Clone, Debug, PartialEq, Eq, Ssz)] #[ssz(transparent)] -pub struct BlsPublicKey(pub ByteVector); +pub struct PublicKey(pub ByteVector); -impl Default for BlsPublicKey { +impl Default for PublicKey { fn default() -> Self { - BlsPublicKey(ByteVector::default()) + PublicKey(ByteVector::default()) } } // Custom serde implementation -impl Serialize for BlsPublicKey { +impl Serialize for PublicKey { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -24,8 +27,8 @@ impl Serialize for BlsPublicKey { // For now, use unsafe to access the underlying bytes let bytes = unsafe { std::slice::from_raw_parts( - &self.0 as *const ByteVector as *const u8, - 52 + &self.0 as *const ByteVector as *const u8, + PublicKeySize::USIZE, ) }; let hex_string = format!("0x{}", hex::encode(bytes)); @@ -33,52 +36,57 @@ impl Serialize for BlsPublicKey { } } -impl<'de> Deserialize<'de> for BlsPublicKey { +impl<'de> Deserialize<'de> for PublicKey { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, { let s = String::deserialize(deserializer)?; let s = s.strip_prefix("0x").unwrap_or(&s); - + let decoded = hex::decode(s).map_err(serde::de::Error::custom)?; - if decoded.len() != 52 { + if decoded.len() != PublicKeySize::USIZE { return Err(serde::de::Error::custom(format!( - "Expected 52 bytes, got {}", + "Expected {} bytes, got {}", + PublicKeySize::USIZE, decoded.len() ))); } - + // Create ByteVector from decoded bytes using unsafe let mut byte_vec = ByteVector::default(); unsafe { - let dest = &mut byte_vec as *mut ByteVector as *mut u8; - std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, 52); + let dest = &mut byte_vec as *mut ByteVector as *mut u8; + std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, PublicKeySize::USIZE); } - - Ok(BlsPublicKey(byte_vec)) + + Ok(PublicKey(byte_vec)) } } -impl BlsPublicKey { +impl PublicKey { pub fn from_hex(s: &str) -> Result { let s = s.strip_prefix("0x").unwrap_or(s); let decoded = hex::decode(s).map_err(|e| e.to_string())?; - if decoded.len() != 52 { - return Err(format!("Expected 52 bytes, got {}", decoded.len())); + if decoded.len() != PublicKeySize::USIZE { + return Err(format!( + "Expected {} bytes, got {}", + PublicKeySize::USIZE, + decoded.len() + )); } let mut byte_vec = ByteVector::default(); unsafe { - let dest = &mut byte_vec as *mut ByteVector as *mut u8; - std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, 52); + let dest = &mut byte_vec as *mut ByteVector as *mut u8; + std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, PublicKeySize::USIZE); } - Ok(BlsPublicKey(byte_vec)) + Ok(PublicKey(byte_vec)) } } #[derive(Clone, Debug, PartialEq, Eq, Default, Ssz, Serialize, Deserialize)] pub struct Validator { - pub pubkey: BlsPublicKey, + pub pubkey: PublicKey, #[serde(default)] pub index: crate::Uint64, } diff --git a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs index 50bd240..4f8521f 100644 --- a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs +++ b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs @@ -384,7 +384,7 @@ fn initialize_state_from_test(test_state: &TestAnchorState) -> State { let mut validators = List::default(); for test_validator in &test_state.validators.data { - let pubkey = containers::validator::BlsPublicKey::from_hex(&test_validator.pubkey) + let pubkey = containers::validator::PublicKey::from_hex(&test_validator.pubkey) .expect("Failed to parse validator pubkey"); let validator = containers::validator::Validator { pubkey, diff --git a/lean_client/src/main.rs b/lean_client/src/main.rs index d1c3e24..ad2276e 100644 --- a/lean_client/src/main.rs +++ b/lean_client/src/main.rs @@ -164,7 +164,7 @@ async fn main() { .iter() .enumerate() .map(|(i, v_str)| { - let pubkey = containers::validator::BlsPublicKey::from_hex(v_str) + let pubkey = containers::validator::PublicKey::from_hex(v_str) .expect("Invalid genesis validator pubkey"); containers::validator::Validator { pubkey, @@ -178,7 +178,7 @@ async fn main() { let num_validators = 3; let validators = (0..num_validators) .map(|i| containers::validator::Validator { - pubkey: containers::validator::BlsPublicKey::default(), + pubkey: containers::validator::PublicKey::default(), index: Uint64(i as u64), }) .collect(); diff --git a/lean_client/validator/src/keys.rs b/lean_client/validator/src/keys.rs index 13c0deb..cae38f1 100644 --- a/lean_client/validator/src/keys.rs +++ b/lean_client/validator/src/keys.rs @@ -93,7 +93,7 @@ impl KeyManager { ).into()); } - // Convert to ByteVector using unsafe pointer copy (same pattern as BlsPublicKey) + // Convert to ByteVector using unsafe pointer copy (same pattern as PublicKey) let mut byte_vec: ByteVector = ByteVector::default(); unsafe { let dest = &mut byte_vec as *mut ByteVector as *mut u8; From 830a70c9935d52fa0bbe7fafdaa381bc09942716 Mon Sep 17 00:00:00 2001 From: Darius Spr <108625236+Dariusspr@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:23:13 +0200 Subject: [PATCH 07/27] hide key size --- lean_client/containers/src/validator.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lean_client/containers/src/validator.rs b/lean_client/containers/src/validator.rs index 29f5716..8a2da60 100644 --- a/lean_client/containers/src/validator.rs +++ b/lean_client/containers/src/validator.rs @@ -4,7 +4,7 @@ use ssz_derive::Ssz; use typenum::{Unsigned, U52}; /// Size of XMSS public keys in bytes (as defined in lean spec) -pub type PublicKeySize = U52; +type PublicKeySize = U52; /// XMSS public key (as defined in lean spec) #[derive(Clone, Debug, PartialEq, Eq, Ssz)] From 572d54578a8d83439631624734110dedc2a4e2eb Mon Sep 17 00:00:00 2001 From: Darius Spr <108625236+Dariusspr@users.noreply.github.com> Date: Wed, 14 Jan 2026 14:34:01 +0200 Subject: [PATCH 08/27] format code --- lean_client/chain/src/config.rs | 18 +- lean_client/chain/src/lib.rs | 2 +- lean_client/containers/src/attestation.rs | 2 +- lean_client/containers/src/checkpoint.rs | 6 +- lean_client/containers/src/config.rs | 4 +- lean_client/containers/src/lib.rs | 4 +- lean_client/containers/src/slot.rs | 45 +-- lean_client/containers/src/state.rs | 26 +- lean_client/containers/src/status.rs | 2 +- lean_client/containers/src/types.rs | 2 +- .../containers/tests/debug_deserialize.rs | 43 ++- lean_client/containers/tests/main.rs | 2 +- .../tests/test_vectors/block_processing.rs | 9 +- .../containers/tests/test_vectors/genesis.rs | 9 +- .../containers/tests/test_vectors/mod.rs | 17 +- .../containers/tests/test_vectors/runner.rs | 314 +++++++++++------- .../tests/test_vectors/verify_signatures.rs | 6 +- .../unit_tests/attestation_aggregation.rs | 30 +- .../containers/tests/unit_tests/common.rs | 54 +-- .../containers/tests/unit_tests/mod.rs | 2 +- .../tests/unit_tests/state_basic.rs | 22 +- .../tests/unit_tests/state_justifications.rs | 32 +- .../tests/unit_tests/state_process.rs | 66 +++- .../tests/unit_tests/state_transition.rs | 2 +- lean_client/env-config/src/lib.rs | 2 +- lean_client/fork_choice/src/handlers.rs | 14 +- lean_client/fork_choice/src/store.rs | 8 +- .../tests/fork_choice_test_vectors.rs | 16 +- lean_client/fork_choice/tests/unit_tests.rs | 2 +- .../fork_choice/tests/unit_tests/common.rs | 18 +- .../tests/unit_tests/fork_choice.rs | 8 +- .../fork_choice/tests/unit_tests/time.rs | 9 +- .../fork_choice/tests/unit_tests/votes.rs | 213 ++++++++---- .../networking/src/gossipsub/config.rs | 4 +- .../networking/src/gossipsub/message.rs | 5 +- .../networking/src/gossipsub/tests/config.rs | 12 +- .../networking/src/gossipsub/tests/message.rs | 4 +- .../src/gossipsub/tests/message_id.rs | 6 +- .../networking/src/gossipsub/tests/mod.rs | 4 +- .../networking/src/gossipsub/tests/topic.rs | 4 +- lean_client/networking/src/gossipsub/topic.rs | 14 +- lean_client/networking/src/network/mod.rs | 2 +- lean_client/networking/src/network/service.rs | 34 +- lean_client/networking/src/req_resp.rs | 74 +++-- lean_client/networking/src/types.rs | 35 +- lean_client/validator/src/keys.rs | 29 +- lean_client/validator/src/lib.rs | 4 +- 47 files changed, 777 insertions(+), 463 deletions(-) diff --git a/lean_client/chain/src/config.rs b/lean_client/chain/src/config.rs index 537d13b..1d762de 100644 --- a/lean_client/chain/src/config.rs +++ b/lean_client/chain/src/config.rs @@ -3,13 +3,19 @@ pub struct BasisPoint(pub u64); impl BasisPoint { pub const MAX: u64 = 10_000; - + pub const fn new(value: u64) -> Option { - if value <= Self::MAX { Some(BasisPoint(value)) } else { None } + if value <= Self::MAX { + Some(BasisPoint(value)) + } else { + None + } + } + + #[inline] + pub fn get(&self) -> u64 { + self.0 } - - #[inline] - pub fn get(&self) -> u64 { self.0 } } #[derive(Clone, Debug)] @@ -47,4 +53,4 @@ impl ChainConfig { validator_registry_limit: 1u64 << 12, } } -} \ No newline at end of file +} diff --git a/lean_client/chain/src/lib.rs b/lean_client/chain/src/lib.rs index 12cf630..9496841 100644 --- a/lean_client/chain/src/lib.rs +++ b/lean_client/chain/src/lib.rs @@ -1,2 +1,2 @@ mod config; -pub use config::ChainConfig; \ No newline at end of file +pub use config::ChainConfig; diff --git a/lean_client/containers/src/attestation.rs b/lean_client/containers/src/attestation.rs index 9c3537c..6779b0f 100644 --- a/lean_client/containers/src/attestation.rs +++ b/lean_client/containers/src/attestation.rs @@ -126,7 +126,7 @@ pub struct AggregatedAttestation { /// Bitfield indicating which validators participated in the aggregation. pub aggregation_bits: AggregationBits, /// Combined attestation data similar to the beacon chain format. - /// + /// /// Multiple validator attestations are aggregated here without the complexity of /// committee assignments. pub data: AttestationData, diff --git a/lean_client/containers/src/checkpoint.rs b/lean_client/containers/src/checkpoint.rs index e635ab1..1b36f31 100644 --- a/lean_client/containers/src/checkpoint.rs +++ b/lean_client/containers/src/checkpoint.rs @@ -1,9 +1,9 @@ use crate::{Bytes32, Slot}; -use ssz_derive::Ssz; use serde::{Deserialize, Serialize}; +use ssz_derive::Ssz; /// Represents a checkpoint in the chain's history. -/// +/// /// A checkpoint marks a specific moment in the chain. It combines a block /// identifier with a slot number. Checkpoints are used for justification and /// finalization. @@ -45,4 +45,4 @@ mod tests { }; assert_eq!(cp1, cp2); } -} \ No newline at end of file +} diff --git a/lean_client/containers/src/config.rs b/lean_client/containers/src/config.rs index 83bf459..fed2b7e 100644 --- a/lean_client/containers/src/config.rs +++ b/lean_client/containers/src/config.rs @@ -1,5 +1,5 @@ -use ssz_derive::Ssz; use serde::{Deserialize, Serialize}; +use ssz_derive::Ssz; use std::fs::File; use std::io::BufReader; use std::path::Path; @@ -26,4 +26,4 @@ impl GenesisConfig { let config = serde_yaml::from_reader(reader)?; Ok(config) } -} \ No newline at end of file +} diff --git a/lean_client/containers/src/lib.rs b/lean_client/containers/src/lib.rs index c73a9f9..f0590ca 100644 --- a/lean_client/containers/src/lib.rs +++ b/lean_client/containers/src/lib.rs @@ -22,8 +22,8 @@ pub use slot::Slot; pub use state::State; pub use status::Status; pub use types::{ - Bytes32, HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators, - Uint64, ValidatorIndex, + Bytes32, HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, + Uint64, ValidatorIndex, Validators, }; pub use types::Bytes32 as Root; diff --git a/lean_client/containers/src/slot.rs b/lean_client/containers/src/slot.rs index d845ec3..17f5439 100644 --- a/lean_client/containers/src/slot.rs +++ b/lean_client/containers/src/slot.rs @@ -1,5 +1,5 @@ -use ssz_derive::Ssz; use serde::{Deserialize, Serialize}; +use ssz_derive::Ssz; use std::cmp::Ordering; #[derive(Clone, Copy, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] @@ -37,15 +37,18 @@ impl Slot { /// /// Panics if this slot is earlier than the finalized slot. pub fn is_justifiable_after(self, finalized: Slot) -> bool { - assert!(self >= finalized, "Candidate slot must not be before finalized slot"); + assert!( + self >= finalized, + "Candidate slot must not be before finalized slot" + ); let delta = self.0 - finalized.0; - + // Rule 1: The first 5 slots after finalization are always justifiable. // Examples: delta = 0, 1, 2, 3, 4, 5 if delta <= 5 { return true; } - + // Rule 2: Slots at perfect square distances are justifiable. // Examples: delta = 1, 4, 9, 16, 25, 36, 49, 64, ... // Check: integer square root squared equals delta @@ -53,7 +56,7 @@ impl Slot { if sqrt * sqrt == delta { return true; } - + // Rule 3: Slots at pronic number distances are justifiable. // Pronic numbers have the form n(n+1): 2, 6, 12, 20, 30, 42, 56, ... // Mathematical insight: For pronic delta = n(n+1), we have: @@ -64,7 +67,7 @@ impl Slot { if test_sqrt * test_sqrt == test && test_sqrt % 2 == 1 { return true; } - + false } } @@ -89,32 +92,32 @@ mod tests { fn test_is_justifiable_perfect_squares() { let finalized = Slot(0); // Rule 2: Perfect square distances - assert!(Slot(1).is_justifiable_after(finalized)); // delta = 1 = 1^2 - assert!(Slot(4).is_justifiable_after(finalized)); // delta = 4 = 2^2 - assert!(Slot(9).is_justifiable_after(finalized)); // delta = 9 = 3^2 - assert!(Slot(16).is_justifiable_after(finalized)); // delta = 16 = 4^2 - assert!(Slot(25).is_justifiable_after(finalized)); // delta = 25 = 5^2 - assert!(Slot(36).is_justifiable_after(finalized)); // delta = 36 = 6^2 + assert!(Slot(1).is_justifiable_after(finalized)); // delta = 1 = 1^2 + assert!(Slot(4).is_justifiable_after(finalized)); // delta = 4 = 2^2 + assert!(Slot(9).is_justifiable_after(finalized)); // delta = 9 = 3^2 + assert!(Slot(16).is_justifiable_after(finalized)); // delta = 16 = 4^2 + assert!(Slot(25).is_justifiable_after(finalized)); // delta = 25 = 5^2 + assert!(Slot(36).is_justifiable_after(finalized)); // delta = 36 = 6^2 } #[test] fn test_is_justifiable_pronic() { let finalized = Slot(0); // Rule 3: Pronic numbers (n(n+1)) - assert!(Slot(2).is_justifiable_after(finalized)); // delta = 2 = 1*2 - assert!(Slot(6).is_justifiable_after(finalized)); // delta = 6 = 2*3 - assert!(Slot(12).is_justifiable_after(finalized)); // delta = 12 = 3*4 - assert!(Slot(20).is_justifiable_after(finalized)); // delta = 20 = 4*5 - assert!(Slot(30).is_justifiable_after(finalized)); // delta = 30 = 5*6 - assert!(Slot(42).is_justifiable_after(finalized)); // delta = 42 = 6*7 + assert!(Slot(2).is_justifiable_after(finalized)); // delta = 2 = 1*2 + assert!(Slot(6).is_justifiable_after(finalized)); // delta = 6 = 2*3 + assert!(Slot(12).is_justifiable_after(finalized)); // delta = 12 = 3*4 + assert!(Slot(20).is_justifiable_after(finalized)); // delta = 20 = 4*5 + assert!(Slot(30).is_justifiable_after(finalized)); // delta = 30 = 5*6 + assert!(Slot(42).is_justifiable_after(finalized)); // delta = 42 = 6*7 } #[test] fn test_is_not_justifiable() { let finalized = Slot(0); // Not justifiable: not in first 5, not perfect square, not pronic - assert!(!Slot(7).is_justifiable_after(finalized)); // delta = 7 - assert!(!Slot(8).is_justifiable_after(finalized)); // delta = 8 + assert!(!Slot(7).is_justifiable_after(finalized)); // delta = 7 + assert!(!Slot(8).is_justifiable_after(finalized)); // delta = 8 assert!(!Slot(10).is_justifiable_after(finalized)); // delta = 10 assert!(!Slot(11).is_justifiable_after(finalized)); // delta = 11 } @@ -126,4 +129,4 @@ mod tests { let candidate = Slot(50); candidate.is_justifiable_after(finalized); } -} \ No newline at end of file +} diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index 5e3dd52..5056fb7 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -1,5 +1,11 @@ +use crate::attestation::AggregatedAttestations; +use crate::block::BlockSignatures; use crate::validator::Validator; -use crate::{block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, SignedAttestation, Slot, Uint64, ValidatorIndex}; +use crate::{ + block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, + Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, SignedAttestation, Slot, + Uint64, ValidatorIndex, +}; use crate::{ HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators, }; @@ -8,8 +14,6 @@ use ssz::{PersistentList as List, PersistentList}; use ssz_derive::Ssz; use std::collections::BTreeMap; use typenum::U4096; -use crate::attestation::AggregatedAttestations; -use crate::block::BlockSignatures; pub const VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const JUSTIFICATION_ROOTS_LIMIT: usize = 1 << 18; // 262144 @@ -305,7 +309,9 @@ impl State { let plain_attestations = aggregated_attestation.to_plain(); // For each attestatio in the vector, push to the list for attestation in plain_attestations { - unaggregated_attestations.push(attestation).map_err(|e| format!("Failed to push attestation: {:?}", e))?; + unaggregated_attestations + .push(attestation) + .map_err(|e| format!("Failed to push attestation: {:?}", e))?; } } state.process_attestations(&unaggregated_attestations) @@ -566,7 +572,15 @@ impl State { initial_attestations: Option>, available_signed_attestations: Option<&[SignedBlockWithAttestation]>, known_block_roots: Option<&std::collections::HashSet>, - ) -> Result<(Block, Self, Vec, PersistentList), String> { + ) -> Result< + ( + Block, + Self, + Vec, + PersistentList, + ), + String, + > { // Initialize empty attestation set for iterative collection let mut attestations = initial_attestations.unwrap_or_default(); let mut signatures = PersistentList::default(); @@ -703,7 +717,7 @@ mod tests { config: st.config.clone(), ..st.clone() } - .is_proposer(ValidatorIndex(0))); + .is_proposer(ValidatorIndex(0))); } #[test] diff --git a/lean_client/containers/src/status.rs b/lean_client/containers/src/status.rs index da05ba1..d68c7c3 100644 --- a/lean_client/containers/src/status.rs +++ b/lean_client/containers/src/status.rs @@ -1,6 +1,6 @@ use crate::Checkpoint; -use ssz_derive::Ssz; use serde::{Deserialize, Serialize}; +use ssz_derive::Ssz; #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct Status { diff --git a/lean_client/containers/src/types.rs b/lean_client/containers/src/types.rs index bb2a64b..7d9aa4d 100644 --- a/lean_client/containers/src/types.rs +++ b/lean_client/containers/src/types.rs @@ -38,8 +38,8 @@ impl fmt::Display for Bytes32 { } // Type-level constants for SSZ collection limits -use typenum::{Prod, U4, U1000, U4096, U262144, U1073741824}; use crate::validator::Validator; +use typenum::{Prod, U1000, U1073741824, U262144, U4, U4096}; // 2^18, 4096 * 262144 /// Type-level number for 4000 bytes (signature size) = 4 * 1000 diff --git a/lean_client/containers/tests/debug_deserialize.rs b/lean_client/containers/tests/debug_deserialize.rs index 0b5a6de..1d28df1 100644 --- a/lean_client/containers/tests/debug_deserialize.rs +++ b/lean_client/containers/tests/debug_deserialize.rs @@ -4,45 +4,52 @@ use std::fs; #[test] fn debug_deserialize_state() { let json_content = fs::read_to_string( - "../tests/test_vectors/test_blocks/test_process_first_block_after_genesis.json" - ).expect("Failed to read test vector file"); - + "../tests/test_vectors/test_blocks/test_process_first_block_after_genesis.json", + ) + .expect("Failed to read test vector file"); + // Try to deserialize just to see where it fails let result: Result = serde_json::from_str(&json_content); - + match result { Ok(value) => { println!("✓ JSON is valid"); - + // Try to extract just the pre state if let Some(tests) = value.as_object() { if let Some((test_name, test_case)) = tests.iter().next() { println!("✓ Found test: {}", test_name); - + if let Some(pre) = test_case.get("pre") { println!("✓ Found pre state"); - + // Try deserializing field by field if let Some(pre_obj) = pre.as_object() { for (field_name, field_value) in pre_obj.iter() { println!("\nTrying to deserialize field: {}", field_name); - println!("Field value type: {}", match field_value { - serde_json::Value::Null => "null", - serde_json::Value::Bool(_) => "bool", - serde_json::Value::Number(_) => "number", - serde_json::Value::String(_) => "string", - serde_json::Value::Array(_) => "array", - serde_json::Value::Object(_) => "object", - }); - + println!( + "Field value type: {}", + match field_value { + serde_json::Value::Null => "null", + serde_json::Value::Bool(_) => "bool", + serde_json::Value::Number(_) => "number", + serde_json::Value::String(_) => "string", + serde_json::Value::Array(_) => "array", + serde_json::Value::Object(_) => "object", + } + ); + if field_value.is_object() { if let Some(obj) = field_value.as_object() { - println!("Object keys: {:?}", obj.keys().collect::>()); + println!( + "Object keys: {:?}", + obj.keys().collect::>() + ); } } } } - + // Now try to deserialize the whole state let state_result: Result = serde_json::from_value(pre.clone()); match state_result { diff --git a/lean_client/containers/tests/main.rs b/lean_client/containers/tests/main.rs index 4d48535..f951ffe 100644 --- a/lean_client/containers/tests/main.rs +++ b/lean_client/containers/tests/main.rs @@ -1,4 +1,4 @@ // tests/lib - Test entry point mod debug_deserialize; +mod test_vectors; mod unit_tests; -mod test_vectors; \ No newline at end of file diff --git a/lean_client/containers/tests/test_vectors/block_processing.rs b/lean_client/containers/tests/test_vectors/block_processing.rs index caec865..5bbc997 100644 --- a/lean_client/containers/tests/test_vectors/block_processing.rs +++ b/lean_client/containers/tests/test_vectors/block_processing.rs @@ -13,8 +13,7 @@ fn test_process_first_block_after_genesis() { #[cfg(feature = "devnet1")] fn test_blocks_with_gaps() { let test_path = "../tests/test_vectors/test_blocks/test_blocks_with_gaps.json"; - TestRunner::run_block_processing_test(test_path) - .expect("test_blocks_with_gaps failed"); + TestRunner::run_block_processing_test(test_path).expect("test_blocks_with_gaps failed"); } #[test] @@ -29,16 +28,14 @@ fn test_linear_chain_multiple_blocks() { #[cfg(feature = "devnet1")] fn test_block_extends_deep_chain() { let test_path = "../tests/test_vectors/test_blocks/test_block_extends_deep_chain.json"; - TestRunner::run_block_processing_test(test_path) - .expect("test_block_extends_deep_chain failed"); + TestRunner::run_block_processing_test(test_path).expect("test_block_extends_deep_chain failed"); } #[test] #[cfg(feature = "devnet1")] fn test_empty_blocks() { let test_path = "../tests/test_vectors/test_blocks/test_empty_blocks.json"; - TestRunner::run_block_processing_test(test_path) - .expect("test_empty_blocks failed"); + TestRunner::run_block_processing_test(test_path).expect("test_empty_blocks failed"); } #[test] diff --git a/lean_client/containers/tests/test_vectors/genesis.rs b/lean_client/containers/tests/test_vectors/genesis.rs index 0b1d3d3..92acf25 100644 --- a/lean_client/containers/tests/test_vectors/genesis.rs +++ b/lean_client/containers/tests/test_vectors/genesis.rs @@ -4,20 +4,17 @@ use super::runner::TestRunner; #[test] fn test_genesis_default_configuration() { let test_path = "../tests/test_vectors/test_genesis/test_genesis_default_configuration.json"; - TestRunner::run_genesis_test(test_path) - .expect("test_genesis_default_configuration failed"); + TestRunner::run_genesis_test(test_path).expect("test_genesis_default_configuration failed"); } #[test] fn test_genesis_custom_time() { let test_path = "../tests/test_vectors/test_genesis/test_genesis_custom_time.json"; - TestRunner::run_genesis_test(test_path) - .expect("test_genesis_custom_time failed"); + TestRunner::run_genesis_test(test_path).expect("test_genesis_custom_time failed"); } #[test] fn test_genesis_custom_validator_set() { let test_path = "../tests/test_vectors/test_genesis/test_genesis_custom_validator_set.json"; - TestRunner::run_genesis_test(test_path) - .expect("test_genesis_custom_validator_set failed"); + TestRunner::run_genesis_test(test_path).expect("test_genesis_custom_validator_set failed"); } diff --git a/lean_client/containers/tests/test_vectors/mod.rs b/lean_client/containers/tests/test_vectors/mod.rs index 8859847..acdc055 100644 --- a/lean_client/containers/tests/test_vectors/mod.rs +++ b/lean_client/containers/tests/test_vectors/mod.rs @@ -1,15 +1,13 @@ // Test vector modules -pub mod runner; pub mod block_processing; pub mod genesis; +pub mod runner; pub mod verify_signatures; +use containers::{block::Block, block::SignedBlockWithAttestation, state::State, Slot}; use serde::{Deserialize, Deserializer, Serialize}; use serde_json::Value; use std::collections::HashMap; -use containers::{ - Slot, block::Block, block::SignedBlockWithAttestation, state::State -}; /// Custom deserializer that handles both plain values and {"data": T} wrapper format fn deserialize_flexible<'de, D, T>(deserializer: D) -> Result @@ -18,21 +16,22 @@ where T: serde::de::DeserializeOwned, { use serde::de::Error; - + // Deserialize as a generic Value first to inspect the structure let value = Value::deserialize(deserializer)?; - + // Check if it's an object with a "data" field if let Value::Object(ref map) = value { if map.contains_key("data") && map.len() == 1 { // Extract just the data field if let Some(data_value) = map.get("data") { - return serde_json::from_value(data_value.clone()) - .map_err(|e| D::Error::custom(format!("Failed to deserialize from data wrapper: {}", e))); + return serde_json::from_value(data_value.clone()).map_err(|e| { + D::Error::custom(format!("Failed to deserialize from data wrapper: {}", e)) + }); } } } - + // Otherwise, deserialize as a plain value serde_json::from_value(value) .map_err(|e| D::Error::custom(format!("Failed to deserialize plain value: {}", e))) diff --git a/lean_client/containers/tests/test_vectors/runner.rs b/lean_client/containers/tests/test_vectors/runner.rs index bf23138..0ed8ac5 100644 --- a/lean_client/containers/tests/test_vectors/runner.rs +++ b/lean_client/containers/tests/test_vectors/runner.rs @@ -6,14 +6,19 @@ use std::path::Path; pub struct TestRunner; impl TestRunner { - pub fn run_sequential_block_processing_tests>(path: P) -> Result<(), Box> { + pub fn run_sequential_block_processing_tests>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path)?; // Parse using the new TestVectorFile structure with camelCase let test_file: TestVectorFile = serde_json::from_str(&json_content)?; // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; println!("Running test: {}", test_name); @@ -78,20 +83,22 @@ impl TestRunner { return Err(format!( "Post-state slot mismatch: expected {:?}, got {:?}", post.slot, state.slot - ).into()); + ) + .into()); } // Only check validator count if specified in post-state if let Some(expected_count) = post.validator_count { let num_validators = state.validators.len_u64(); - + if num_validators as usize != expected_count { return Err(format!( "Post-state validator count mismatch: expected {}, got {}", expected_count, num_validators - ).into()); + ) + .into()); } - + println!("\n✓ All post-state checks passed"); println!(" Final slot: {:?}", state.slot); println!(" Validator count: {}", num_validators); @@ -100,38 +107,48 @@ impl TestRunner { println!(" Final slot: {:?}", state.slot); } } - + println!("\n✓✓✓ PASS: All blocks processed successfully with matching roots ✓✓✓"); } - + Ok(()) } - pub fn run_single_block_with_slot_gap_tests>(path: P) -> Result<(), Box> { + pub fn run_single_block_with_slot_gap_tests>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path)?; - + // Parse using the new TestVectorFile structure with camelCase let test_file: TestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("Running test: {}", test_name); println!("Description: {}", test_case.info.description); if let Some(ref blocks) = test_case.blocks { let mut state = test_case.pre.clone(); - + for (idx, block) in blocks.iter().enumerate() { - println!("\nProcessing block {}: slot {:?} (gap from slot {:?})", idx + 1, block.slot, state.slot); - + println!( + "\nProcessing block {}: slot {:?} (gap from slot {:?})", + idx + 1, + block.slot, + state.slot + ); + // Advance state to the block's slot (this handles the slot gap) let state_after_slots = state.process_slots(block.slot)?; - + // Compute the parent root from our current latest_block_header let computed_parent_root = hash_tree_root(&state_after_slots.latest_block_header); - + // Verify the block's parent_root matches what we computed if block.parent_root != computed_parent_root { return Err(format!( @@ -141,19 +158,19 @@ impl TestRunner { computed_parent_root ).into()); } - + println!(" ✓ Parent root matches: {:?}", computed_parent_root); - + // Process the block header let result = state_after_slots.process_block_header(block); match result { Ok(new_state) => { state = new_state; - + // Compute the state root after processing let computed_state_root = hash_tree_root(&state); - + // Verify the computed state_root matches the expected one from the vector if block.state_root != computed_state_root { return Err(format!( @@ -163,59 +180,69 @@ impl TestRunner { computed_state_root ).into()); } - + println!(" ✓ State root matches: {:?}", computed_state_root); - println!(" ✓ Block {} processed successfully (with {} empty slots)", idx + 1, block.slot.0 - test_case.pre.slot.0 - idx as u64); + println!( + " ✓ Block {} processed successfully (with {} empty slots)", + idx + 1, + block.slot.0 - test_case.pre.slot.0 - idx as u64 + ); } Err(e) => { return Err(format!("Block {} processing failed: {:?}", idx + 1, e).into()); } } } - + // Verify post-state conditions if let Some(post) = test_case.post { if state.slot != post.slot { return Err(format!( "Post-state slot mismatch: expected {:?}, got {:?}", post.slot, state.slot - ).into()); + ) + .into()); } - + println!("\n✓ All post-state checks passed"); println!(" Final slot: {:?}", state.slot); } - + println!("\n✓✓✓ PASS: Block with slot gap processed successfully ✓✓✓"); } - + Ok(()) } - pub fn run_single_empty_block_tests>(path: P) -> Result<(), Box> { + pub fn run_single_empty_block_tests>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path)?; - + // Parse using the new TestVectorFile structure with camelCase let test_file: TestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("Running test: {}", test_name); println!("Description: {}", test_case.info.description); if let Some(ref blocks) = test_case.blocks { let mut state = test_case.pre.clone(); - + // Should be exactly one block if blocks.len() != 1 { return Err(format!("Expected 1 block, found {}", blocks.len()).into()); } - + let block = &blocks[0]; println!("\nProcessing single empty block at slot {:?}", block.slot); - + // Verify it's an empty block (no attestations) let attestation_count = { let mut count = 0u64; @@ -227,18 +254,22 @@ impl TestRunner { } count }; - + if attestation_count > 0 { - return Err(format!("Expected empty block, but found {} attestations", attestation_count).into()); + return Err(format!( + "Expected empty block, but found {} attestations", + attestation_count + ) + .into()); } println!(" ✓ Confirmed: Block has no attestations (empty block)"); - + // Advance state to the block's slot let state_after_slots = state.process_slots(block.slot)?; - + // Compute the parent root from our current latest_block_header let computed_parent_root = hash_tree_root(&state_after_slots.latest_block_header); - + // Verify the block's parent_root matches what we computed if block.parent_root != computed_parent_root { return Err(format!( @@ -247,19 +278,19 @@ impl TestRunner { computed_parent_root ).into()); } - + println!(" ✓ Parent root matches: {:?}", computed_parent_root); - + // Process the block header let result = state_after_slots.process_block_header(block); match result { Ok(new_state) => { state = new_state; - + // Compute the state root after processing let computed_state_root = hash_tree_root(&state); - + // Verify the computed state_root matches the expected one from the vector if block.state_root != computed_state_root { return Err(format!( @@ -268,7 +299,7 @@ impl TestRunner { computed_state_root ).into()); } - + println!(" ✓ State root matches: {:?}", computed_state_root); println!(" ✓ Empty block processed successfully"); } @@ -276,38 +307,44 @@ impl TestRunner { return Err(format!("Block processing failed: {:?}", e).into()); } } - + // Verify post-state conditions if let Some(post) = test_case.post { if state.slot != post.slot { return Err(format!( "Post-state slot mismatch: expected {:?}, got {:?}", post.slot, state.slot - ).into()); + ) + .into()); } - + println!("\n✓ All post-state checks passed"); println!(" Final slot: {:?}", state.slot); } - + println!("\n✓✓✓ PASS: Single empty block processed successfully ✓✓✓"); } - + Ok(()) } /// Generic test runner for block processing test vectors /// Handles all test vectors from test_blocks directory - pub fn run_block_processing_test>(path: P) -> Result<(), Box> { + pub fn run_block_processing_test>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path.as_ref())?; - + // Parse using the TestVectorFile structure with camelCase let test_file: TestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("\n{}: {}", test_name, test_case.info.description); // Check if this is an invalid/exception test @@ -327,7 +364,7 @@ impl TestRunner { } let mut state = test_case.pre.clone(); - + for (idx, block) in blocks.iter().enumerate() { // Check if this is a gap (missed slots) let gap_size = if idx == 0 { @@ -335,19 +372,24 @@ impl TestRunner { } else { block.slot.0 - state.slot.0 - 1 }; - + if gap_size > 0 { - println!(" Block {}: slot {} (gap: {} empty slots)", idx + 1, block.slot.0, gap_size); + println!( + " Block {}: slot {} (gap: {} empty slots)", + idx + 1, + block.slot.0, + gap_size + ); } else { println!(" Block {}: slot {}", idx + 1, block.slot.0); } - + // Advance state to the block's slot let state_after_slots = state.process_slots(block.slot)?; - + // Compute the parent root from our current latest_block_header let computed_parent_root = hash_tree_root(&state_after_slots.latest_block_header); - + // Verify the block's parent_root matches what we computed if block.parent_root != computed_parent_root { println!(" \x1b[31m✗ FAIL: Parent root mismatch\x1b[0m"); @@ -355,7 +397,7 @@ impl TestRunner { println!(" Got: {:?}\n", computed_parent_root); return Err(format!("Block {} parent_root mismatch", idx + 1).into()); } - + // Check if block is empty (no attestations) let attestation_count = { let mut count = 0u64; @@ -367,17 +409,17 @@ impl TestRunner { } count }; - + // Process the full block (header + operations) let result = state_after_slots.process_block(block); match result { Ok(new_state) => { state = new_state; - + // Compute the state root after processing let computed_state_root = hash_tree_root(&state); - + // Verify the computed state_root matches the expected one from the block if block.state_root != computed_state_root { println!(" \x1b[31m✗ FAIL: State root mismatch\x1b[0m"); @@ -385,7 +427,7 @@ impl TestRunner { println!(" Got: {:?}\n", computed_state_root); return Err(format!("Block {} state_root mismatch", idx + 1).into()); } - + if attestation_count > 0 { println!(" ✓ Processed with {} attestation(s)", attestation_count); } else { @@ -399,13 +441,13 @@ impl TestRunner { } } } - + // Verify post-state conditions Self::verify_post_state(&state, &test_case)?; - + println!("\n\x1b[32m✓ PASS\x1b[0m\n"); } - + Ok(()) } @@ -413,50 +455,64 @@ impl TestRunner { /// Handles test vectors from test_genesis directory pub fn run_genesis_test>(path: P) -> Result<(), Box> { let json_content = fs::read_to_string(path.as_ref())?; - + // Parse using the TestVectorFile structure let test_file: TestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("\n{}: {}", test_name, test_case.info.description); let state = &test_case.pre; - + let num_validators = state.validators.len_u64(); - println!(" Genesis time: {}, slot: {}, validators: {}", state.config.genesis_time, state.slot.0, num_validators); - + println!( + " Genesis time: {}, slot: {}, validators: {}", + state.config.genesis_time, state.slot.0, num_validators + ); + // Verify it's at genesis (slot 0) if state.slot.0 != 0 { return Err(format!("Expected genesis at slot 0, got slot {}", state.slot.0).into()); } - + // Verify checkpoint initialization if state.latest_justified.slot.0 != 0 { - return Err(format!("Expected latest_justified at slot 0, got {}", state.latest_justified.slot.0).into()); + return Err(format!( + "Expected latest_justified at slot 0, got {}", + state.latest_justified.slot.0 + ) + .into()); } - + if state.latest_finalized.slot.0 != 0 { - return Err(format!("Expected latest_finalized at slot 0, got {}", state.latest_finalized.slot.0).into()); + return Err(format!( + "Expected latest_finalized at slot 0, got {}", + state.latest_finalized.slot.0 + ) + .into()); } - + // Verify empty historical data let has_history = state.historical_block_hashes.get(0).is_ok(); if has_history { return Err("Expected empty historical block hashes at genesis".into()); } - + println!(" ✓ Genesis state validated"); - + // Verify post-state if present if test_case.post.is_some() { Self::verify_post_state(state, &test_case)?; } - + println!("\n\x1b[32m✓ PASS\x1b[0m\n"); - + Ok(()) } @@ -473,10 +529,10 @@ impl TestRunner { for (idx, block) in blocks.iter().enumerate() { println!(" Block {}: slot {}", idx + 1, block.slot.0); - + // Advance state to the block's slot let state_after_slots = state.process_slots(block.slot)?; - + // Try to process the full block (header + body) - we expect this to fail let result = state_after_slots.process_block(block); @@ -484,14 +540,16 @@ impl TestRunner { Ok(new_state) => { // Block processing succeeded, now validate state root let computed_state_root = hash_tree_root(&new_state); - + if block.state_root != computed_state_root { error_occurred = true; println!(" ✓ Correctly rejected: Invalid block state root"); break; // Stop at first error } else { println!(" \x1b[31m✗ FAIL: Block processed successfully - but should have failed!\x1b[0m\n"); - return Err("Expected block processing to fail, but it succeeded".into()); + return Err( + "Expected block processing to fail, but it succeeded".into() + ); } } Err(e) => { @@ -501,91 +559,110 @@ impl TestRunner { } } } - + if !error_occurred { return Err("Expected an exception but all blocks processed successfully".into()); } } - + Ok(()) } /// Helper: Verify genesis state only (no blocks) fn verify_genesis_state(test_case: TestCase) -> Result<(), Box> { let state = &test_case.pre; - + // Verify post-state if present Self::verify_post_state(state, &test_case)?; - + Ok(()) } /// Helper: Verify post-state conditions - fn verify_post_state(state: &State, test_case: &TestCase) -> Result<(), Box> { + fn verify_post_state( + state: &State, + test_case: &TestCase, + ) -> Result<(), Box> { if let Some(ref post) = test_case.post { // Verify slot if state.slot != post.slot { return Err(format!( "Post-state slot mismatch: expected {:?}, got {:?}", post.slot, state.slot - ).into()); + ) + .into()); } - + // Verify validator count if specified if let Some(expected_count) = post.validator_count { let num_validators = state.validators.len_u64(); - + if num_validators as usize != expected_count { return Err(format!( "Post-state validator count mismatch: expected {}, got {}", expected_count, num_validators - ).into()); + ) + .into()); } - println!(" ✓ Post-state verified: slot {}, {} validators", state.slot.0, num_validators); + println!( + " ✓ Post-state verified: slot {}, {} validators", + state.slot.0, num_validators + ); } else { println!(" ✓ Post-state verified: slot {}", state.slot.0); } } - + Ok(()) } /// Test runner for verify_signatures test vectors /// Tests XMSS signature verification on SignedBlockWithAttestation #[cfg(feature = "devnet1")] - pub fn run_verify_signatures_test>(path: P) -> Result<(), Box> { + pub fn run_verify_signatures_test>( + path: P, + ) -> Result<(), Box> { let json_content = fs::read_to_string(path.as_ref())?; - + // Parse using the VerifySignaturesTestVectorFile structure let test_file: VerifySignaturesTestVectorFile = serde_json::from_str(&json_content)?; - + // Get the first (and only) test case from the file - let (test_name, test_case) = test_file.tests.into_iter().next() + let (test_name, test_case) = test_file + .tests + .into_iter() + .next() .ok_or("No test case found in JSON")?; - + println!("\n{}: {}", test_name, test_case.info.description); - + let anchor_state = test_case.anchor_state; let signed_block = test_case.signed_block_with_attestation; - + // Print some debug info about what we're verifying println!(" Block slot: {}", signed_block.message.block.slot.0); - println!(" Proposer index: {}", signed_block.message.block.proposer_index.0); - + println!( + " Proposer index: {}", + signed_block.message.block.proposer_index.0 + ); + let attestation_count = signed_block.message.block.body.attestations.len_u64(); println!(" Attestations in block: {}", attestation_count); - println!(" Proposer attestation validator: {}", signed_block.message.proposer_attestation.validator_id.0); - + println!( + " Proposer attestation validator: {}", + signed_block.message.proposer_attestation.validator_id.0 + ); + let signature_count = signed_block.signature.len_u64(); println!(" Signatures: {}", signature_count); - + // Check if we expect this test to fail if let Some(ref exception) = test_case.expect_exception { println!(" Expecting exception: {}", exception); - + // Verify signatures - we expect this to fail (return false) let result = signed_block.verify_signatures(anchor_state); - + if result { println!(" \x1b[31m✗ FAIL: Signatures verified successfully but should have failed!\x1b[0m\n"); return Err("Expected signature verification to fail, but it succeeded".into()); @@ -596,7 +673,7 @@ impl TestRunner { } else { // Valid test case - signatures should verify successfully let result = signed_block.verify_signatures(anchor_state); - + if result { println!(" ✓ All signatures verified successfully"); println!("\n\x1b[32m✓ PASS\x1b[0m\n"); @@ -605,8 +682,7 @@ impl TestRunner { return Err("Signature verification failed".into()); } } - + Ok(()) } - } diff --git a/lean_client/containers/tests/test_vectors/verify_signatures.rs b/lean_client/containers/tests/test_vectors/verify_signatures.rs index cfc3301..13692f7 100644 --- a/lean_client/containers/tests/test_vectors/verify_signatures.rs +++ b/lean_client/containers/tests/test_vectors/verify_signatures.rs @@ -18,8 +18,7 @@ use super::runner::TestRunner; #[cfg(feature = "devnet1")] fn test_proposer_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_valid_signatures/test_proposer_signature.json"; - TestRunner::run_verify_signatures_test(test_path) - .expect("test_proposer_signature failed"); + TestRunner::run_verify_signatures_test(test_path).expect("test_proposer_signature failed"); } #[test] @@ -40,8 +39,7 @@ fn test_proposer_and_attester_signatures() { #[ignore = "Requires xmss-verify feature for actual signature validation. Run with: cargo test --features xmss-verify"] fn test_invalid_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_invalid_signatures/test_invalid_signature.json"; - TestRunner::run_verify_signatures_test(test_path) - .expect("test_invalid_signature failed"); + TestRunner::run_verify_signatures_test(test_path).expect("test_invalid_signature failed"); } #[test] diff --git a/lean_client/containers/tests/unit_tests/attestation_aggregation.rs b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs index 285aa46..72d48b4 100644 --- a/lean_client/containers/tests/unit_tests/attestation_aggregation.rs +++ b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs @@ -1,10 +1,12 @@ #[cfg(feature = "devnet2")] #[cfg(test)] mod tests { - use containers::attestation::{AggregatedAttestation, AggregationBits, Attestation, AttestationData}; - use containers::{Bytes32, Uint64}; + use containers::attestation::{ + AggregatedAttestation, AggregationBits, Attestation, AttestationData, + }; use containers::checkpoint::Checkpoint; use containers::slot::Slot; + use containers::{Bytes32, Uint64}; #[test] fn test_aggregated_attestation_structure() { @@ -21,17 +23,22 @@ mod tests { source: Checkpoint { root: Bytes32::default(), slot: Slot(2), - } + }, }; let bits = AggregationBits::from_validator_indices(&vec![2, 7]); let agg = AggregatedAttestation { aggregation_bits: bits.clone(), - data: att_data.clone() + data: att_data.clone(), }; let indices = agg.aggregation_bits.to_validator_indices(); - assert_eq!(indices.into_iter().collect::>(), vec![2, 7].into_iter().collect()); + assert_eq!( + indices + .into_iter() + .collect::>(), + vec![2, 7].into_iter().collect() + ); assert_eq!(agg.data, att_data); } @@ -50,7 +57,7 @@ mod tests { source: Checkpoint { root: Bytes32::default(), slot: Slot(2), - } + }, }; let att_data2 = AttestationData { slot: Slot(6), @@ -65,7 +72,7 @@ mod tests { source: Checkpoint { root: Bytes32::default(), slot: Slot(3), - } + }, }; let attestations = vec![ @@ -88,7 +95,12 @@ mod tests { let agg1 = aggregated.iter().find(|agg| agg.data == att_data1).unwrap(); let validator_ids1 = agg1.aggregation_bits.to_validator_indices(); - assert_eq!(validator_ids1.into_iter().collect::>(), vec![1, 3].into_iter().collect()); + assert_eq!( + validator_ids1 + .into_iter() + .collect::>(), + vec![1, 3].into_iter().collect() + ); let agg2 = aggregated.iter().find(|agg| agg.data == att_data2).unwrap(); let validator_ids2 = agg2.aggregation_bits.to_validator_indices(); @@ -116,7 +128,7 @@ mod tests { source: Checkpoint { root: Bytes32::default(), slot: Slot(2), - } + }, }; let attestations = vec![Attestation { diff --git a/lean_client/containers/tests/unit_tests/common.rs b/lean_client/containers/tests/unit_tests/common.rs index 26fa0a5..1a648b8 100644 --- a/lean_client/containers/tests/unit_tests/common.rs +++ b/lean_client/containers/tests/unit_tests/common.rs @@ -1,7 +1,15 @@ -use containers::{Attestation, Attestations, BlockWithAttestation, Config, SignedBlockWithAttestation, block::{Block, BlockBody, BlockHeader, hash_tree_root}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, Validators, AggregatedAttestation, Signature}; -use ssz::{PersistentList}; -use typenum::U4096; use containers::block::BlockSignatures; +use containers::{ + block::{hash_tree_root, Block, BlockBody, BlockHeader}, + checkpoint::Checkpoint, + slot::Slot, + state::State, + types::{Bytes32, ValidatorIndex}, + AggregatedAttestation, Attestation, Attestations, BlockWithAttestation, Config, Signature, + SignedBlockWithAttestation, Validators, +}; +use ssz::PersistentList; +use typenum::U4096; pub const DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const TEST_VALIDATOR_COUNT: usize = 4; // Actual validator count used in tests @@ -10,7 +18,11 @@ pub const TEST_VALIDATOR_COUNT: usize = 4; // Actual validator count used in tes const _: [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT] = [(); DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT - TEST_VALIDATOR_COUNT]; -pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Option) -> SignedBlockWithAttestation { +pub fn create_block( + slot: u64, + parent_header: &mut BlockHeader, + attestations: Option, +) -> SignedBlockWithAttestation { #[cfg(feature = "devnet1")] let body = BlockBody { attestations: attestations.unwrap_or_else(PersistentList::default), @@ -19,23 +31,26 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op let body = BlockBody { attestations: { let attestations_vec = attestations.unwrap_or_default(); - + // Convert PersistentList into a Vec - let attestations_vec: Vec = attestations_vec.into_iter().cloned().collect(); + let attestations_vec: Vec = + attestations_vec.into_iter().cloned().collect(); let aggregated: Vec = AggregatedAttestation::aggregate_by_data(&attestations_vec); - let aggregated: Vec = AggregatedAttestation::aggregate_by_data(&attestations_vec); // Create a new empty PersistentList - let mut persistent_list: PersistentList = PersistentList::default(); + let mut persistent_list: PersistentList = + PersistentList::default(); // Push each aggregated attestation for agg in aggregated { - persistent_list.push(agg).expect("PersistentList capacity exceeded"); + persistent_list + .push(agg) + .expect("PersistentList capacity exceeded"); } persistent_list @@ -43,7 +58,6 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op // other BlockBody fields... }; - let block_message = Block { slot: Slot(slot), proposer_index: ValidatorIndex(slot % 10), @@ -70,11 +84,10 @@ pub fn create_block(slot: u64, parent_header: &mut BlockHeader, attestations: Op signature: BlockSignatures { attestation_signatures: PersistentList::default(), proposer_signature: Signature::default(), - } + }, }; - - return_value + return_value } pub fn create_attestations(indices: &[usize]) -> Vec { @@ -109,8 +122,11 @@ pub fn base_state(config: Config) -> State { } pub fn base_state_with_validators(config: Config, num_validators: usize) -> State { - use containers::{HistoricalBlockHashes, JustificationRoots, JustifiedSlots, JustificationsValidators, validator::Validator, Uint64}; - + use containers::{ + validator::Validator, HistoricalBlockHashes, JustificationRoots, JustificationsValidators, + JustifiedSlots, Uint64, + }; + // Create validators list with the specified number of validators let mut validators = Validators::default(); for i in 0..num_validators { @@ -120,7 +136,7 @@ pub fn base_state_with_validators(config: Config, num_validators: usize) -> Stat }; validators.push(validator).expect("within limit"); } - + State { config, slot: Slot(0), @@ -136,7 +152,5 @@ pub fn base_state_with_validators(config: Config, num_validators: usize) -> Stat } pub fn sample_config() -> Config { - Config { - genesis_time: 0, - } -} \ No newline at end of file + Config { genesis_time: 0 } +} diff --git a/lean_client/containers/tests/unit_tests/mod.rs b/lean_client/containers/tests/unit_tests/mod.rs index b9f442f..1bef390 100644 --- a/lean_client/containers/tests/unit_tests/mod.rs +++ b/lean_client/containers/tests/unit_tests/mod.rs @@ -1,7 +1,7 @@ // tests/unit_tests/mod.rs +mod attestation_aggregation; mod common; mod state_basic; mod state_justifications; mod state_process; mod state_transition; -mod attestation_aggregation; diff --git a/lean_client/containers/tests/unit_tests/state_basic.rs b/lean_client/containers/tests/unit_tests/state_basic.rs index 5fa16e1..085384a 100644 --- a/lean_client/containers/tests/unit_tests/state_basic.rs +++ b/lean_client/containers/tests/unit_tests/state_basic.rs @@ -1,5 +1,10 @@ // tests/state_basic.rs -use containers::{block::{BlockBody, hash_tree_root}, state::State, types::Uint64, ValidatorIndex}; +use containers::{ + block::{hash_tree_root, BlockBody}, + state::State, + types::Uint64, + ValidatorIndex, +}; use pretty_assertions::assert_eq; #[path = "common.rs"] @@ -14,8 +19,13 @@ fn test_generate_genesis() { assert_eq!(state.config, config); assert_eq!(state.slot.0, 0); - let empty_body = BlockBody { attestations: ssz::PersistentList::default() }; - assert_eq!(state.latest_block_header.body_root, hash_tree_root(&empty_body)); + let empty_body = BlockBody { + attestations: ssz::PersistentList::default(), + }; + assert_eq!( + state.latest_block_header.body_root, + hash_tree_root(&empty_body) + ); // Check that collections are empty by trying to get the first element assert!(state.historical_block_hashes.get(0).is_err()); @@ -41,7 +51,9 @@ fn test_slot_justifiability_rules() { #[test] fn test_hash_tree_root() { - let body = BlockBody { attestations: ssz::PersistentList::default() }; + let body = BlockBody { + attestations: ssz::PersistentList::default(), + }; let block = containers::block::Block { slot: containers::slot::Slot(1), proposer_index: ValidatorIndex(0), @@ -52,4 +64,4 @@ fn test_hash_tree_root() { let root = hash_tree_root(&block); assert_ne!(root, containers::types::Bytes32(ssz::H256::zero())); -} \ No newline at end of file +} diff --git a/lean_client/containers/tests/unit_tests/state_justifications.rs b/lean_client/containers/tests/unit_tests/state_justifications.rs index 9a7b0cc..afdd220 100644 --- a/lean_client/containers/tests/unit_tests/state_justifications.rs +++ b/lean_client/containers/tests/unit_tests/state_justifications.rs @@ -1,18 +1,12 @@ // tests/state_justifications.rs -use containers::{ - state::State, - types::Bytes32, - Config -}; +use containers::{state::State, types::Bytes32, Config}; use pretty_assertions::assert_eq; use rstest::{fixture, rstest}; use ssz::PersistentList as List; #[path = "common.rs"] mod common; -use common::{ - base_state, create_attestations, sample_config, TEST_VALIDATOR_COUNT, -}; +use common::{base_state, create_attestations, sample_config, TEST_VALIDATOR_COUNT}; #[fixture] fn config() -> Config { @@ -49,7 +43,7 @@ fn test_get_justifications_single_root() { let mut roots_list = List::default(); roots_list.push(root1).unwrap(); state.justifications_roots = roots_list; - + // Convert Vec to BitList let mut bitlist = ssz::BitList::with_length(TEST_VALIDATOR_COUNT); for (i, &val) in votes1.iter().enumerate() { @@ -88,7 +82,7 @@ fn test_get_justifications_multiple_roots() { roots_list.push(root2).unwrap(); roots_list.push(root3).unwrap(); state.justifications_roots = roots_list; - + // Convert Vec to BitList let mut bitlist = ssz::BitList::with_length(all_votes.len()); for (i, &val) in all_votes.iter().enumerate() { @@ -113,16 +107,20 @@ fn test_with_justifications_empty() { let mut initial_state = base_state(config.clone()); let mut roots_list = List::default(); - roots_list.push(Bytes32(ssz::H256::from_slice(&[1u8;32]))).unwrap(); + roots_list + .push(Bytes32(ssz::H256::from_slice(&[1u8; 32]))) + .unwrap(); initial_state.justifications_roots = roots_list; - + let mut bitlist = ssz::BitList::with_length(TEST_VALIDATOR_COUNT); for i in 0..TEST_VALIDATOR_COUNT { bitlist.set(i, true); } initial_state.justifications_validators = bitlist; - let new_state = initial_state.clone().with_justifications(std::collections::BTreeMap::new()); + let new_state = initial_state + .clone() + .with_justifications(std::collections::BTreeMap::new()); assert!(new_state.justifications_roots.get(0).is_err()); assert!(new_state.justifications_validators.get(0).is_none()); @@ -149,11 +147,15 @@ fn test_with_justifications_deterministic_order() { // Expected roots in sorted order (root1 < root2) assert_eq!(new_state.justifications_roots.get(0).ok(), Some(&root1)); assert_eq!(new_state.justifications_roots.get(1).ok(), Some(&root2)); - + // Verify the bitlist contains the concatenated votes let expected_validators = [votes1, votes2].concat(); for (i, &expected_val) in expected_validators.iter().enumerate() { - let actual_val = new_state.justifications_validators.get(i).map(|b| *b).unwrap_or(false); + let actual_val = new_state + .justifications_validators + .get(i) + .map(|b| *b) + .unwrap_or(false); assert_eq!(actual_val, expected_val); } } diff --git a/lean_client/containers/tests/unit_tests/state_process.rs b/lean_client/containers/tests/unit_tests/state_process.rs index afc1887..5df98cf 100644 --- a/lean_client/containers/tests/unit_tests/state_process.rs +++ b/lean_client/containers/tests/unit_tests/state_process.rs @@ -1,6 +1,6 @@ // tests/state_process.rs use containers::{ - block::{Block, BlockBody, hash_tree_root}, + block::{hash_tree_root, Block, BlockBody}, checkpoint::Checkpoint, slot::Slot, state::State, @@ -26,15 +26,24 @@ pub fn genesis_state() -> State { fn test_process_slot() { let genesis_state = genesis_state(); - assert_eq!(genesis_state.latest_block_header.state_root, Bytes32(ssz::H256::zero())); + assert_eq!( + genesis_state.latest_block_header.state_root, + Bytes32(ssz::H256::zero()) + ); let state_after_slot = genesis_state.process_slot(); let expected_root = hash_tree_root(&genesis_state); - assert_eq!(state_after_slot.latest_block_header.state_root, expected_root); + assert_eq!( + state_after_slot.latest_block_header.state_root, + expected_root + ); let state_after_second_slot = state_after_slot.process_slot(); - assert_eq!(state_after_second_slot.latest_block_header.state_root, expected_root); + assert_eq!( + state_after_second_slot.latest_block_header.state_root, + expected_root + ); } #[test] @@ -45,7 +54,10 @@ fn test_process_slots() { let new_state = genesis_state.process_slots(target_slot).unwrap(); assert_eq!(new_state.slot, target_slot); - assert_eq!(new_state.latest_block_header.state_root, hash_tree_root(&genesis_state)); + assert_eq!( + new_state.latest_block_header.state_root, + hash_tree_root(&genesis_state) + ); } #[test] @@ -68,11 +80,21 @@ fn test_process_block_header_valid() { assert_eq!(new_state.latest_finalized.root, genesis_header_root); assert_eq!(new_state.latest_justified.root, genesis_header_root); - assert_eq!(new_state.historical_block_hashes.get(0).ok(), Some(&genesis_header_root)); - let justified_slot_0 = new_state.justified_slots.get(0).map(|b| *b).unwrap_or(false); + assert_eq!( + new_state.historical_block_hashes.get(0).ok(), + Some(&genesis_header_root) + ); + let justified_slot_0 = new_state + .justified_slots + .get(0) + .map(|b| *b) + .unwrap_or(false); assert_eq!(justified_slot_0, true); assert_eq!(new_state.latest_block_header.slot, Slot(1)); - assert_eq!(new_state.latest_block_header.state_root, Bytes32(ssz::H256::zero())); + assert_eq!( + new_state.latest_block_header.state_root, + Bytes32(ssz::H256::zero()) + ); } #[rstest] @@ -95,7 +117,9 @@ fn test_process_block_header_invalid( proposer_index: ValidatorIndex(bad_proposer), parent_root: bad_parent_root.unwrap_or(parent_root), state_root: Bytes32(ssz::H256::zero()), - body: BlockBody { attestations: List::default() }, + body: BlockBody { + attestations: List::default(), + }, }; let result = state_at_slot_1.process_block_header(&block); @@ -115,13 +139,17 @@ fn test_process_attestations_justification_and_finalization() { let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); let block1 = create_block(1, &mut state_at_slot_1.latest_block_header, None); // Use process_block_header and process_operations separately to avoid state root validation - let state_after_header1 = state_at_slot_1.process_block_header(&block1.message.block).unwrap(); + let state_after_header1 = state_at_slot_1 + .process_block_header(&block1.message.block) + .unwrap(); state = state_after_header1.process_attestations(&block1.message.block.body.attestations); // Process slot 4 and block let mut state_at_slot_4 = state.process_slots(Slot(4)).unwrap(); let block4 = create_block(4, &mut state_at_slot_4.latest_block_header, None); - let state_after_header4 = state_at_slot_4.process_block_header(&block4.message.block).unwrap(); + let state_after_header4 = state_at_slot_4 + .process_block_header(&block4.message.block) + .unwrap(); state = state_after_header4.process_attestations(&block4.message.block.body.attestations); // Advance to slot 5 @@ -151,15 +179,21 @@ fn test_process_attestations_justification_and_finalization() { // Convert Vec to PersistentList let mut attestations_list: List<_, U4096> = List::default(); - for a in attestations_for_4 { - attestations_list.push(a).unwrap(); + for a in attestations_for_4 { + attestations_list.push(a).unwrap(); } let new_state = state.process_attestations(&attestations_list); assert_eq!(new_state.latest_justified, checkpoint4); - let justified_slot_4 = new_state.justified_slots.get(4).map(|b| *b).unwrap_or(false); + let justified_slot_4 = new_state + .justified_slots + .get(4) + .map(|b| *b) + .unwrap_or(false); assert_eq!(justified_slot_4, true); assert_eq!(new_state.latest_finalized, genesis_checkpoint); - assert!(!new_state.get_justifications().contains_key(&checkpoint4.root)); -} \ No newline at end of file + assert!(!new_state + .get_justifications() + .contains_key(&checkpoint4.root)); +} diff --git a/lean_client/containers/tests/unit_tests/state_transition.rs b/lean_client/containers/tests/unit_tests/state_transition.rs index 9fe6abb..7725210 100644 --- a/lean_client/containers/tests/unit_tests/state_transition.rs +++ b/lean_client/containers/tests/unit_tests/state_transition.rs @@ -167,7 +167,7 @@ fn test_state_transition_devnet2() { } state_after_header.process_attestations(&unaggregated_attestations) }; - + // Ensure the state root matches the expected state let block_with_correct_root = Block { state_root: hash_tree_root(&expected_state), diff --git a/lean_client/env-config/src/lib.rs b/lean_client/env-config/src/lib.rs index 972005d..109ac2d 100644 --- a/lean_client/env-config/src/lib.rs +++ b/lean_client/env-config/src/lib.rs @@ -1 +1 @@ -// Empty on purpose \ No newline at end of file +// Empty on purpose diff --git a/lean_client/fork_choice/src/handlers.rs b/lean_client/fork_choice/src/handlers.rs index fa9aa89..9f3837d 100644 --- a/lean_client/fork_choice/src/handlers.rs +++ b/lean_client/fork_choice/src/handlers.rs @@ -34,7 +34,6 @@ pub fn on_attestation( #[cfg(feature = "devnet1")] let target_slot = signed_attestation.message.data.target.slot; - #[cfg(feature = "devnet2")] let validator_id = ValidatorIndex(signed_attestation.validator_id); #[cfg(feature = "devnet2")] @@ -80,9 +79,7 @@ pub fn on_attestation( if store .latest_known_attestations .get(&validator_id) - .map_or(true, |existing| { - existing.message.slot < attestation_slot - }) + .map_or(true, |existing| existing.message.slot < attestation_slot) { store .latest_known_attestations @@ -114,14 +111,12 @@ pub fn on_attestation( .latest_new_attestations .insert(validator_id, signed_attestation); } - + #[cfg(feature = "devnet2")] if store .latest_new_attestations .get(&validator_id) - .map_or(true, |existing| { - existing.message.slot < attestation_slot - }) + .map_or(true, |existing| existing.message.slot < attestation_slot) { store .latest_new_attestations @@ -246,7 +241,8 @@ fn process_block_internal( .zip(attestation_signatures) { let validator_ids: Vec = aggregated_attestation - .aggregation_bits.0 + .aggregation_bits + .0 .iter() .enumerate() .filter(|(_, bit)| **bit) diff --git a/lean_client/fork_choice/src/store.rs b/lean_client/fork_choice/src/store.rs index 3296d06..8165443 100644 --- a/lean_client/fork_choice/src/store.rs +++ b/lean_client/fork_choice/src/store.rs @@ -1,7 +1,6 @@ use containers::{ - attestation::SignedAttestation, - block::SignedBlockWithAttestation, checkpoint::Checkpoint, config::Config, state::State, - Bytes32, Root, Slot, ValidatorIndex, + attestation::SignedAttestation, block::SignedBlockWithAttestation, checkpoint::Checkpoint, + config::Config, state::State, Bytes32, Root, Slot, ValidatorIndex, }; use ssz::SszHash; use std::collections::HashMap; @@ -185,7 +184,8 @@ pub fn update_safe_target(store: &mut Store) { let min_score = (n_validators * 2 + 2) / 3; let root = store.latest_justified.root; - store.safe_target = get_fork_choice_head(store, root, &store.latest_new_attestations, min_score); + store.safe_target = + get_fork_choice_head(store, root, &store.latest_new_attestations, min_score); } pub fn accept_new_attestations(store: &mut Store) { diff --git a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs index 4f8521f..bb32273 100644 --- a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs +++ b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs @@ -4,8 +4,11 @@ use fork_choice::{ }; use containers::{ - attestation::{Attestation, AttestationData, SignedAttestation, Signature}, - block::{hash_tree_root, Block, BlockBody, BlockHeader, BlockWithAttestation, SignedBlockWithAttestation}, + attestation::{Attestation, AttestationData, Signature, SignedAttestation}, + block::{ + hash_tree_root, Block, BlockBody, BlockHeader, BlockWithAttestation, + SignedBlockWithAttestation, + }, checkpoint::Checkpoint, config::Config, state::State, @@ -305,7 +308,9 @@ fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAtt } #[cfg(feature = "devnet1")] -fn convert_test_block(test_block_with_att: &TestBlockWithAttestation) -> SignedBlockWithAttestation { +fn convert_test_block( + test_block_with_att: &TestBlockWithAttestation, +) -> SignedBlockWithAttestation { let test_block = &test_block_with_att.block; let mut attestations = ssz::PersistentList::default(); @@ -419,7 +424,7 @@ fn verify_checks( Some(c) => c, None => return Ok(()), }; - + if let Some(expected_slot) = checks.head_slot { let actual_slot = store.blocks[&store.head].message.block.slot.0; if actual_slot != expected_slot { @@ -533,7 +538,8 @@ fn run_single_test(_test_name: &str, test: TestVector) -> Result<(), String> { // Advance time to the block's slot to ensure attestations are processable // SECONDS_PER_SLOT is 4 (not 12) - let block_time = store.config.genesis_time + (signed_block.message.block.slot.0 * 4); + let block_time = + store.config.genesis_time + (signed_block.message.block.slot.0 * 4); on_tick(&mut store, block_time, false); on_block(&mut store, signed_block)?; diff --git a/lean_client/fork_choice/tests/unit_tests.rs b/lean_client/fork_choice/tests/unit_tests.rs index b4490da..480ba3f 100644 --- a/lean_client/fork_choice/tests/unit_tests.rs +++ b/lean_client/fork_choice/tests/unit_tests.rs @@ -1,6 +1,6 @@ mod unit_tests { pub mod common; + pub mod fork_choice; pub mod time; pub mod votes; - pub mod fork_choice; } diff --git a/lean_client/fork_choice/tests/unit_tests/common.rs b/lean_client/fork_choice/tests/unit_tests/common.rs index 9539c34..dc1a762 100644 --- a/lean_client/fork_choice/tests/unit_tests/common.rs +++ b/lean_client/fork_choice/tests/unit_tests/common.rs @@ -1,4 +1,3 @@ -use fork_choice::store::{get_forkchoice_store, Store}; use containers::{ attestation::Attestation, block::{Block, BlockBody, BlockWithAttestation, SignedBlockWithAttestation}, @@ -7,19 +6,16 @@ use containers::{ validator::Validator, Bytes32, Slot, Uint64, ValidatorIndex, }; +use fork_choice::store::{get_forkchoice_store, Store}; use ssz::SszHash; pub fn create_test_store() -> Store { - let config = Config { - genesis_time: 1000, - }; - - let validators = vec![ - Validator::default(); 10 - ]; - + let config = Config { genesis_time: 1000 }; + + let validators = vec![Validator::default(); 10]; + let state = State::generate_genesis_with_validators(Uint64(1000), validators); - + let block = Block { slot: Slot(0), proposer_index: ValidatorIndex(0), @@ -27,7 +23,7 @@ pub fn create_test_store() -> Store { state_root: Bytes32(state.hash_tree_root()), body: BlockBody::default(), }; - + let block_with_attestation = BlockWithAttestation { block: block.clone(), proposer_attestation: Attestation::default(), diff --git a/lean_client/fork_choice/tests/unit_tests/fork_choice.rs b/lean_client/fork_choice/tests/unit_tests/fork_choice.rs index fc1e7e6..d2b3833 100644 --- a/lean_client/fork_choice/tests/unit_tests/fork_choice.rs +++ b/lean_client/fork_choice/tests/unit_tests/fork_choice.rs @@ -1,12 +1,12 @@ use super::common::create_test_store; -use fork_choice::store::{get_proposal_head, get_vote_target}; use containers::Slot; +use fork_choice::store::{get_proposal_head, get_vote_target}; #[test] fn test_get_proposal_head_basic() { let mut store = create_test_store(); let head = get_proposal_head(&mut store, Slot(0)); - + assert_eq!(head, store.head); } @@ -14,9 +14,9 @@ fn test_get_proposal_head_basic() { fn test_get_proposal_head_advances_time() { let mut store = create_test_store(); let initial_time = store.time; - + get_proposal_head(&mut store, Slot(5)); - + assert!(store.time >= initial_time); } diff --git a/lean_client/fork_choice/tests/unit_tests/time.rs b/lean_client/fork_choice/tests/unit_tests/time.rs index 03260c7..ff99491 100644 --- a/lean_client/fork_choice/tests/unit_tests/time.rs +++ b/lean_client/fork_choice/tests/unit_tests/time.rs @@ -1,7 +1,7 @@ use super::common::create_test_store; +use containers::{Slot, Uint64}; use fork_choice::handlers::on_tick; use fork_choice::store::{tick_interval, INTERVALS_PER_SLOT, SECONDS_PER_SLOT}; -use containers::{Slot, Uint64}; #[test] fn test_on_tick_basic() { @@ -31,7 +31,7 @@ fn test_on_tick_already_current() { let initial_time = store.time; let current_target = store.config.genesis_time + initial_time; - // Try to advance to current time + // Try to advance to current time on_tick(&mut store, current_target, true); // Should not change significantly @@ -86,7 +86,7 @@ fn test_tick_interval_sequence() { #[test] fn test_tick_interval_actions_by_phase() { let mut store = create_test_store(); - + // Reset store time to 0 relative to genesis for clean testing store.time = 0; @@ -101,11 +101,10 @@ fn test_tick_interval_actions_by_phase() { } } - #[test] fn test_slot_time_calculations() { let genesis_time = 1000; - + // Slot 0 let slot_0_time = genesis_time + (0 * SECONDS_PER_SLOT); assert_eq!(slot_0_time, genesis_time); diff --git a/lean_client/fork_choice/tests/unit_tests/votes.rs b/lean_client/fork_choice/tests/unit_tests/votes.rs index 4a1b688..d6c2ad4 100644 --- a/lean_client/fork_choice/tests/unit_tests/votes.rs +++ b/lean_client/fork_choice/tests/unit_tests/votes.rs @@ -1,22 +1,35 @@ use super::common::create_test_store; -use fork_choice::handlers::on_attestation; -use fork_choice::store::{accept_new_attestations, INTERVALS_PER_SLOT}; use containers::{ - attestation::{Attestation, AttestationData, SignedAttestation, Signature}, + attestation::{Attestation, AttestationData, Signature, SignedAttestation}, checkpoint::Checkpoint, Bytes32, Slot, Uint64, ValidatorIndex, }; +use fork_choice::handlers::on_attestation; +use fork_choice::store::{accept_new_attestations, INTERVALS_PER_SLOT}; #[cfg(feature = "devnet1")] -fn create_signed_attestation(validator_id: u64, slot: Slot, head_root: Bytes32) -> SignedAttestation { +fn create_signed_attestation( + validator_id: u64, + slot: Slot, + head_root: Bytes32, +) -> SignedAttestation { SignedAttestation { message: Attestation { validator_id: Uint64(validator_id), data: AttestationData { slot, - head: Checkpoint { root: head_root, slot }, - target: Checkpoint { root: head_root, slot }, - source: Checkpoint { root: Bytes32::default(), slot: Slot(0) }, + head: Checkpoint { + root: head_root, + slot, + }, + target: Checkpoint { + root: head_root, + slot, + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(0), + }, }, }, signature: Signature::default(), @@ -33,54 +46,59 @@ fn test_accept_new_attestations() { let val2 = ValidatorIndex(2); let val3 = ValidatorIndex(3); - store.latest_known_attestations.insert( - val1, - create_signed_attestation(1, Slot(0), store.head), - ); + store + .latest_known_attestations + .insert(val1, create_signed_attestation(1, Slot(0), store.head)); // Val1 updates their attestation to Slot 1 - store.latest_new_attestations.insert( - val1, - create_signed_attestation(1, Slot(1), store.head), - ); + store + .latest_new_attestations + .insert(val1, create_signed_attestation(1, Slot(1), store.head)); // Val2 casts a new attestation for Slot 1 - store.latest_new_attestations.insert( - val2, - create_signed_attestation(2, Slot(1), store.head), - ); + store + .latest_new_attestations + .insert(val2, create_signed_attestation(2, Slot(1), store.head)); // Val3 casts a new attestation for Slot 2 - store.latest_new_attestations.insert( - val3, - create_signed_attestation(3, Slot(2), store.head), - ); + store + .latest_new_attestations + .insert(val3, create_signed_attestation(3, Slot(2), store.head)); accept_new_attestations(&mut store); assert_eq!(store.latest_new_attestations.len(), 0); assert_eq!(store.latest_known_attestations.len(), 3); - assert_eq!(store.latest_known_attestations[&val1].message.data.slot, Slot(1)); - assert_eq!(store.latest_known_attestations[&val2].message.data.slot, Slot(1)); - assert_eq!(store.latest_known_attestations[&val3].message.data.slot, Slot(2)); + assert_eq!( + store.latest_known_attestations[&val1].message.data.slot, + Slot(1) + ); + assert_eq!( + store.latest_known_attestations[&val2].message.data.slot, + Slot(1) + ); + assert_eq!( + store.latest_known_attestations[&val3].message.data.slot, + Slot(2) + ); } #[test] #[cfg(feature = "devnet1")] fn test_accept_new_attestations_multiple() { let mut store = create_test_store(); - + for i in 0..5 { store.latest_new_attestations.insert( ValidatorIndex(i), create_signed_attestation(i, Slot(i), store.head), ); } - + assert_eq!(store.latest_new_attestations.len(), 5); assert_eq!(store.latest_known_attestations.len(), 0); - + accept_new_attestations(&mut store); - + assert_eq!(store.latest_new_attestations.len(), 0); assert_eq!(store.latest_known_attestations.len(), 5); } @@ -89,9 +107,9 @@ fn test_accept_new_attestations_multiple() { fn test_accept_new_attestations_empty() { let mut store = create_test_store(); let initial_known = store.latest_known_attestations.len(); - + accept_new_attestations(&mut store); - + assert_eq!(store.latest_new_attestations.len(), 0); assert_eq!(store.latest_known_attestations.len(), initial_known); } @@ -107,29 +125,55 @@ fn test_on_attestation_lifecycle() { // 1. Attestation from network (gossip) let signed_attestation_gossip = create_signed_attestation(1, slot_0, store.head); - on_attestation(&mut store, signed_attestation_gossip.clone(), false).expect("Gossip attestation valid"); - + on_attestation(&mut store, signed_attestation_gossip.clone(), false) + .expect("Gossip attestation valid"); + // Should be in new_attestations, not known_attestations assert!(store.latest_new_attestations.contains_key(&validator_idx)); assert!(!store.latest_known_attestations.contains_key(&validator_idx)); - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, slot_0); + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + slot_0 + ); // 2. Same attestation included in a block on_attestation(&mut store, signed_attestation_gossip, true).expect("Block attestation valid"); - + assert!(store.latest_known_attestations.contains_key(&validator_idx)); - assert_eq!(store.latest_known_attestations[&validator_idx].message.data.slot, slot_0); + assert_eq!( + store.latest_known_attestations[&validator_idx] + .message + .data + .slot, + slot_0 + ); // 3. Newer attestation from network store.time = 1 * INTERVALS_PER_SLOT; // Advance time let signed_attestation_next = create_signed_attestation(1, slot_1, store.head); - on_attestation(&mut store, signed_attestation_next, false).expect("Next gossip attestation valid"); + on_attestation(&mut store, signed_attestation_next, false) + .expect("Next gossip attestation valid"); // Should update new_attestations - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, slot_1); + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + slot_1 + ); // Known attestations should still be at slot 0 until accepted - assert_eq!(store.latest_known_attestations[&validator_idx].message.data.slot, slot_0); + assert_eq!( + store.latest_known_attestations[&validator_idx] + .message + .data + .slot, + slot_0 + ); } #[test] @@ -137,7 +181,7 @@ fn test_on_attestation_lifecycle() { fn test_on_attestation_future_slot() { let mut store = create_test_store(); let future_slot = Slot(100); // Far in the future - + let signed_attestation = create_signed_attestation(1, future_slot, store.head); let result = on_attestation(&mut store, signed_attestation, false); @@ -149,21 +193,33 @@ fn test_on_attestation_future_slot() { fn test_on_attestation_update_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); - + // First attestation at slot 0 let signed_attestation1 = create_signed_attestation(1, Slot(0), store.head); - + on_attestation(&mut store, signed_attestation1, false).expect("First attestation valid"); - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(0)); - + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(0) + ); + // Advance time to allow slot 1 store.time = 1 * INTERVALS_PER_SLOT; - + // Second attestation at slot 1 let signed_attestation2 = create_signed_attestation(1, Slot(1), store.head); - + on_attestation(&mut store, signed_attestation2, false).expect("Second attestation valid"); - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(1)); + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(1) + ); } #[test] @@ -171,22 +227,35 @@ fn test_on_attestation_update_vote() { fn test_on_attestation_ignore_old_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); - + // Advance time store.time = 2 * INTERVALS_PER_SLOT; - + // Newer attestation first let signed_attestation_new = create_signed_attestation(1, Slot(2), store.head); - + on_attestation(&mut store, signed_attestation_new, false).expect("New attestation valid"); - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(2)); - + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(2) + ); + // Older attestation second let signed_attestation_old = create_signed_attestation(1, Slot(1), store.head); - - on_attestation(&mut store, signed_attestation_old, false).expect("Old attestation processed but ignored"); + + on_attestation(&mut store, signed_attestation_old, false) + .expect("Old attestation processed but ignored"); // Should still be slot 2 - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(2)); + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(2) + ); } #[test] @@ -194,18 +263,18 @@ fn test_on_attestation_ignore_old_vote() { fn test_on_attestation_from_block_supersedes_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); - + // First, add attestation via gossip let signed_attestation1 = create_signed_attestation(1, Slot(0), store.head); on_attestation(&mut store, signed_attestation1, false).expect("Gossip attestation valid"); - + assert!(store.latest_new_attestations.contains_key(&validator_idx)); assert!(!store.latest_known_attestations.contains_key(&validator_idx)); - + // Then, add same attestation via block (on-chain) let signed_attestation2 = create_signed_attestation(1, Slot(0), store.head); on_attestation(&mut store, signed_attestation2, true).expect("Block attestation valid"); - + // Should move from new to known assert!(!store.latest_new_attestations.contains_key(&validator_idx)); assert!(store.latest_known_attestations.contains_key(&validator_idx)); @@ -216,19 +285,31 @@ fn test_on_attestation_from_block_supersedes_new() { fn test_on_attestation_newer_from_block_removes_older_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); - + // Add older attestation via gossip let signed_attestation_gossip = create_signed_attestation(1, Slot(0), store.head); on_attestation(&mut store, signed_attestation_gossip, false).expect("Gossip attestation valid"); - - assert_eq!(store.latest_new_attestations[&validator_idx].message.data.slot, Slot(0)); - + + assert_eq!( + store.latest_new_attestations[&validator_idx] + .message + .data + .slot, + Slot(0) + ); + // Add newer attestation via block (on-chain) store.time = 1 * INTERVALS_PER_SLOT; let signed_attestation_block = create_signed_attestation(1, Slot(1), store.head); on_attestation(&mut store, signed_attestation_block, true).expect("Block attestation valid"); - + // New attestation should be removed (superseded by newer on-chain one) assert!(!store.latest_new_attestations.contains_key(&validator_idx)); - assert_eq!(store.latest_known_attestations[&validator_idx].message.data.slot, Slot(1)); + assert_eq!( + store.latest_known_attestations[&validator_idx] + .message + .data + .slot, + Slot(1) + ); } diff --git a/lean_client/networking/src/gossipsub/config.rs b/lean_client/networking/src/gossipsub/config.rs index 05488c2..67061bc 100644 --- a/lean_client/networking/src/gossipsub/config.rs +++ b/lean_client/networking/src/gossipsub/config.rs @@ -15,9 +15,9 @@ impl GossipsubConfig { pub fn new() -> Self { let justification_lookback_slots: u64 = 3; let seconds_per_slot: u64 = 12; - + let seen_ttl_secs = seconds_per_slot * justification_lookback_slots * 2; - + let config = ConfigBuilder::default() // leanSpec: heartbeat_interval_secs = 0.7 .heartbeat_interval(Duration::from_millis(700)) diff --git a/lean_client/networking/src/gossipsub/message.rs b/lean_client/networking/src/gossipsub/message.rs index b578e75..4ac1ae1 100644 --- a/lean_client/networking/src/gossipsub/message.rs +++ b/lean_client/networking/src/gossipsub/message.rs @@ -1,8 +1,8 @@ use crate::gossipsub::topic::GossipsubKind; use crate::gossipsub::topic::GossipsubTopic; +use containers::SignedAttestation; use containers::SignedBlockWithAttestation; use containers::ssz::SszReadDefault; -use containers::{SignedAttestation}; use libp2p::gossipsub::TopicHash; pub enum GossipsubMessage { @@ -14,7 +14,8 @@ impl GossipsubMessage { pub fn decode(topic: &TopicHash, data: &[u8]) -> Result { match GossipsubTopic::decode(topic)?.kind { GossipsubKind::Block => Ok(Self::Block( - SignedBlockWithAttestation::from_ssz_default(data).map_err(|e| format!("{:?}", e))?, + SignedBlockWithAttestation::from_ssz_default(data) + .map_err(|e| format!("{:?}", e))?, )), GossipsubKind::Attestation => Ok(Self::Attestation( SignedAttestation::from_ssz_default(data).map_err(|e| format!("{:?}", e))?, diff --git a/lean_client/networking/src/gossipsub/tests/config.rs b/lean_client/networking/src/gossipsub/tests/config.rs index e788d81..4fa245d 100644 --- a/lean_client/networking/src/gossipsub/tests/config.rs +++ b/lean_client/networking/src/gossipsub/tests/config.rs @@ -1,5 +1,5 @@ use crate::gossipsub::config::GossipsubConfig; -use crate::gossipsub::topic::{get_topics, GossipsubKind}; +use crate::gossipsub::topic::{GossipsubKind, get_topics}; #[test] fn test_default_parameters() { @@ -24,8 +24,14 @@ fn test_default_parameters() { assert_eq!(config.config.gossip_lazy(), 6); // d_lazy = 6 assert_eq!(config.config.history_length(), 6); // mcache_len = 6 assert_eq!(config.config.history_gossip(), 3); // mcache_gossip = 3 - assert_eq!(config.config.fanout_ttl(), std::time::Duration::from_secs(60)); // fanout_ttl_secs = 60 - assert_eq!(config.config.heartbeat_interval(), std::time::Duration::from_millis(700)); // heartbeat_interval_secs = 0.7 + assert_eq!( + config.config.fanout_ttl(), + std::time::Duration::from_secs(60) + ); // fanout_ttl_secs = 60 + assert_eq!( + config.config.heartbeat_interval(), + std::time::Duration::from_millis(700) + ); // heartbeat_interval_secs = 0.7 assert!(config.topics.is_empty()); } diff --git a/lean_client/networking/src/gossipsub/tests/message.rs b/lean_client/networking/src/gossipsub/tests/message.rs index ce062be..9fd25dd 100644 --- a/lean_client/networking/src/gossipsub/tests/message.rs +++ b/lean_client/networking/src/gossipsub/tests/message.rs @@ -1,5 +1,7 @@ use crate::gossipsub::message::GossipsubMessage; -use crate::gossipsub::topic::{ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX}; +use crate::gossipsub::topic::{ + ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, +}; use libp2p::gossipsub::TopicHash; #[test] diff --git a/lean_client/networking/src/gossipsub/tests/message_id.rs b/lean_client/networking/src/gossipsub/tests/message_id.rs index 4eb3302..17a0b51 100644 --- a/lean_client/networking/src/gossipsub/tests/message_id.rs +++ b/lean_client/networking/src/gossipsub/tests/message_id.rs @@ -1,5 +1,7 @@ use crate::gossipsub::config::compute_message_id; -use crate::gossipsub::topic::{ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX}; +use crate::gossipsub::topic::{ + ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, +}; use crate::types::MESSAGE_DOMAIN_VALID_SNAPPY; use libp2p::gossipsub::{Message, TopicHash}; use sha2::{Digest, Sha256}; @@ -134,7 +136,7 @@ fn test_message_id_uses_valid_snappy_domain() { let topic_bytes = topic.as_bytes(); let topic_len = topic_bytes.len() as u64; - + let mut digest_input = Vec::new(); digest_input.extend_from_slice(MESSAGE_DOMAIN_VALID_SNAPPY); diff --git a/lean_client/networking/src/gossipsub/tests/mod.rs b/lean_client/networking/src/gossipsub/tests/mod.rs index 351a897..15f330a 100644 --- a/lean_client/networking/src/gossipsub/tests/mod.rs +++ b/lean_client/networking/src/gossipsub/tests/mod.rs @@ -1,4 +1,4 @@ mod config; -mod message_id; mod message; -mod topic; \ No newline at end of file +mod message_id; +mod topic; diff --git a/lean_client/networking/src/gossipsub/tests/topic.rs b/lean_client/networking/src/gossipsub/tests/topic.rs index cdd09df..7e3d70b 100644 --- a/lean_client/networking/src/gossipsub/tests/topic.rs +++ b/lean_client/networking/src/gossipsub/tests/topic.rs @@ -1,6 +1,6 @@ use crate::gossipsub::topic::{ - get_topics, GossipsubKind, GossipsubTopic, ATTESTATION_TOPIC, BLOCK_TOPIC, - SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, + ATTESTATION_TOPIC, BLOCK_TOPIC, GossipsubKind, GossipsubTopic, SSZ_SNAPPY_ENCODING_POSTFIX, + TOPIC_PREFIX, get_topics, }; use libp2p::gossipsub::TopicHash; diff --git a/lean_client/networking/src/gossipsub/topic.rs b/lean_client/networking/src/gossipsub/topic.rs index 9a4e7b5..09fcd33 100644 --- a/lean_client/networking/src/gossipsub/topic.rs +++ b/lean_client/networking/src/gossipsub/topic.rs @@ -45,9 +45,7 @@ impl GossipsubTopic { let parts: Vec<&str> = topic.as_str().trim_start_matches('/').split('/').collect(); if parts.len() != 4 { - return Err(format!( - "Invalid topic part count: {topic:?}" - )); + return Err(format!("Invalid topic part count: {topic:?}")); } Ok(parts) @@ -78,10 +76,7 @@ impl std::fmt::Display for GossipsubTopic { write!( f, "/{}/{}/{}/{}", - TOPIC_PREFIX, - self.fork, - self.kind, - SSZ_SNAPPY_ENCODING_POSTFIX + TOPIC_PREFIX, self.fork, self.kind, SSZ_SNAPPY_ENCODING_POSTFIX ) } } @@ -106,10 +101,7 @@ impl From for TopicHash { }; TopicHash::from_raw(format!( "/{}/{}/{}/{}", - TOPIC_PREFIX, - val.fork, - kind_str, - SSZ_SNAPPY_ENCODING_POSTFIX + TOPIC_PREFIX, val.fork, kind_str, SSZ_SNAPPY_ENCODING_POSTFIX )) } } diff --git a/lean_client/networking/src/network/mod.rs b/lean_client/networking/src/network/mod.rs index 7609d9a..1900ed8 100644 --- a/lean_client/networking/src/network/mod.rs +++ b/lean_client/networking/src/network/mod.rs @@ -2,4 +2,4 @@ mod behaviour; mod service; pub use behaviour::{LeanNetworkBehaviour, LeanNetworkBehaviourEvent}; -pub use service::{NetworkServiceConfig, NetworkEvent, NetworkService}; +pub use service::{NetworkEvent, NetworkService, NetworkServiceConfig}; diff --git a/lean_client/networking/src/network/service.rs b/lean_client/networking/src/network/service.rs index 93e749c..8ae5729 100644 --- a/lean_client/networking/src/network/service.rs +++ b/lean_client/networking/src/network/service.rs @@ -341,8 +341,8 @@ where } fn handle_request_response_event(&mut self, event: ReqRespMessage) -> Option { - use libp2p::request_response::{Event, Message}; use crate::req_resp::LeanResponse; + use libp2p::request_response::{Event, Message}; match event { Event::Message { peer, message, .. } => match message { @@ -360,16 +360,24 @@ where tokio::spawn(async move { for block in blocks { let slot = block.message.block.slot.0; - if let Err(e) = chain_sink.send( - ChainMessage::ProcessBlock { + if let Err(e) = chain_sink + .send(ChainMessage::ProcessBlock { signed_block_with_attestation: block, is_trusted: false, should_gossip: false, // Don't re-gossip requested blocks - } - ).await { - warn!(slot = slot, ?e, "Failed to send requested block to chain"); + }) + .await + { + warn!( + slot = slot, + ?e, + "Failed to send requested block to chain" + ); } else { - debug!(slot = slot, "Queued requested block for processing"); + debug!( + slot = slot, + "Queued requested block for processing" + ); } } }); @@ -382,7 +390,9 @@ where } } } - Message::Request { request, channel, .. } => { + Message::Request { + request, channel, .. + } => { use crate::req_resp::{LeanRequest, LeanResponse}; let response = match request { @@ -398,10 +408,12 @@ where } }; - if let Err(e) = self.swarm + if let Err(e) = self + .swarm .behaviour_mut() .req_resp - .send_response(channel, response) { + .send_response(channel, response) + { warn!(peer = %peer, ?e, "Failed to send response"); } } @@ -528,7 +540,7 @@ where let slot = signed_attestation.message.data.slot.0; #[cfg(feature = "devnet2")] let slot = signed_attestation.message.slot.0; - + match signed_attestation.to_ssz() { Ok(bytes) => { if let Err(err) = self.publish_to_topic(GossipsubKind::Attestation, bytes) { diff --git a/lean_client/networking/src/req_resp.rs b/lean_client/networking/src/req_resp.rs index 51d705e..bd6c414 100644 --- a/lean_client/networking/src/req_resp.rs +++ b/lean_client/networking/src/req_resp.rs @@ -2,8 +2,8 @@ use std::io; use std::io::{Read, Write}; use async_trait::async_trait; -use containers::{Bytes32, SignedBlockWithAttestation, Status}; use containers::ssz::{SszReadDefault, SszWrite}; +use containers::{Bytes32, SignedBlockWithAttestation, Status}; use futures::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt}; use libp2p::request_response::{ Behaviour as RequestResponse, Codec, Config, Event, ProtocolSupport, @@ -46,8 +46,9 @@ impl LeanCodec { fn compress(data: &[u8]) -> io::Result> { let mut encoder = FrameEncoder::new(Vec::new()); encoder.write_all(data)?; - encoder.into_inner() - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("Snappy framing failed: {e}"))) + encoder.into_inner().map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("Snappy framing failed: {e}")) + }) } /// Decompress data using Snappy framing format (required for req/resp protocol) @@ -60,8 +61,9 @@ impl LeanCodec { fn encode_request(request: &LeanRequest) -> io::Result> { let ssz_bytes = match request { - LeanRequest::Status(status) => status.to_ssz() - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")))?, + LeanRequest::Status(status) => status.to_ssz().map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")) + })?, LeanRequest::BlocksByRoot(roots) => { let mut bytes = Vec::new(); for root in roots { @@ -77,12 +79,16 @@ impl LeanCodec { if data.is_empty() { return Ok(LeanRequest::Status(Status::default())); } - + let ssz_bytes = Self::decompress(data)?; - + if protocol.contains("status") { - let status = Status::from_ssz_default(&ssz_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ decode Status failed: {e:?}")))?; + let status = Status::from_ssz_default(&ssz_bytes).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("SSZ decode Status failed: {e:?}"), + ) + })?; Ok(LeanRequest::Status(status)) } else if protocol.contains("blocks_by_root") { let mut roots = Vec::new(); @@ -96,35 +102,44 @@ impl LeanCodec { if roots.len() > MAX_REQUEST_BLOCKS { return Err(io::Error::new( io::ErrorKind::InvalidData, - format!("Too many block roots requested: {} > {}", roots.len(), MAX_REQUEST_BLOCKS), + format!( + "Too many block roots requested: {} > {}", + roots.len(), + MAX_REQUEST_BLOCKS + ), )); } Ok(LeanRequest::BlocksByRoot(roots)) } else { - Err(io::Error::new(io::ErrorKind::Other, format!("Unknown protocol: {protocol}"))) + Err(io::Error::new( + io::ErrorKind::Other, + format!("Unknown protocol: {protocol}"), + )) } } fn encode_response(response: &LeanResponse) -> io::Result> { let ssz_bytes = match response { - LeanResponse::Status(status) => status.to_ssz() - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")))?, + LeanResponse::Status(status) => status.to_ssz().map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")) + })?, LeanResponse::BlocksByRoot(blocks) => { let mut bytes = Vec::new(); for block in blocks { - let block_bytes = block.to_ssz() - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")))?; + let block_bytes = block.to_ssz().map_err(|e| { + io::Error::new(io::ErrorKind::Other, format!("SSZ encode failed: {e}")) + })?; bytes.extend_from_slice(&block_bytes); } bytes } LeanResponse::Empty => Vec::new(), }; - + if ssz_bytes.is_empty() { return Ok(Vec::new()); } - + Self::compress(&ssz_bytes) } @@ -132,22 +147,33 @@ impl LeanCodec { if data.is_empty() { return Ok(LeanResponse::Empty); } - + let ssz_bytes = Self::decompress(data)?; - + if protocol.contains("status") { - let status = Status::from_ssz_default(&ssz_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ decode Status failed: {e:?}")))?; + let status = Status::from_ssz_default(&ssz_bytes).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("SSZ decode Status failed: {e:?}"), + ) + })?; Ok(LeanResponse::Status(status)) } else if protocol.contains("blocks_by_root") { if ssz_bytes.is_empty() { return Ok(LeanResponse::BlocksByRoot(Vec::new())); } - let block = SignedBlockWithAttestation::from_ssz_default(&ssz_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::Other, format!("SSZ decode Block failed: {e:?}")))?; + let block = SignedBlockWithAttestation::from_ssz_default(&ssz_bytes).map_err(|e| { + io::Error::new( + io::ErrorKind::Other, + format!("SSZ decode Block failed: {e:?}"), + ) + })?; Ok(LeanResponse::BlocksByRoot(vec![block])) } else { - Err(io::Error::new(io::ErrorKind::Other, format!("Unknown protocol: {protocol}"))) + Err(io::Error::new( + io::ErrorKind::Other, + format!("Unknown protocol: {protocol}"), + )) } } } diff --git a/lean_client/networking/src/types.rs b/lean_client/networking/src/types.rs index 028a883..bbe7cba 100644 --- a/lean_client/networking/src/types.rs +++ b/lean_client/networking/src/types.rs @@ -70,7 +70,9 @@ pub enum ChainMessage { } impl ChainMessage { - pub fn block_with_attestation(signed_block_with_attestation: SignedBlockWithAttestation) -> Self { + pub fn block_with_attestation( + signed_block_with_attestation: SignedBlockWithAttestation, + ) -> Self { ChainMessage::ProcessBlock { signed_block_with_attestation, is_trusted: false, @@ -90,16 +92,35 @@ impl ChainMessage { impl Display for ChainMessage { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ChainMessage::ProcessBlock { signed_block_with_attestation, .. } => { - write!(f, "ProcessBlockWithAttestation(slot={})", signed_block_with_attestation.message.block.slot.0) + ChainMessage::ProcessBlock { + signed_block_with_attestation, + .. + } => { + write!( + f, + "ProcessBlockWithAttestation(slot={})", + signed_block_with_attestation.message.block.slot.0 + ) } #[cfg(feature = "devnet1")] - ChainMessage::ProcessAttestation { signed_attestation, .. } => { - write!(f, "ProcessAttestation(slot={})", signed_attestation.message.data.slot.0) + ChainMessage::ProcessAttestation { + signed_attestation, .. + } => { + write!( + f, + "ProcessAttestation(slot={})", + signed_attestation.message.data.slot.0 + ) } #[cfg(feature = "devnet2")] - ChainMessage::ProcessAttestation { signed_attestation, .. } => { - write!(f, "ProcessAttestation(slot={})", signed_attestation.message.slot.0) + ChainMessage::ProcessAttestation { + signed_attestation, .. + } => { + write!( + f, + "ProcessAttestation(slot={})", + signed_attestation.message.slot.0 + ) } } } diff --git a/lean_client/validator/src/keys.rs b/lean_client/validator/src/keys.rs index cae38f1..7680102 100644 --- a/lean_client/validator/src/keys.rs +++ b/lean_client/validator/src/keys.rs @@ -1,8 +1,8 @@ +use containers::attestation::U3112; +use containers::ssz::ByteVector; +use containers::Signature; use std::collections::HashMap; use std::path::{Path, PathBuf}; -use containers::Signature; -use containers::ssz::ByteVector; -use containers::attestation::U3112; use tracing::info; #[cfg(feature = "xmss-signing")] @@ -42,7 +42,9 @@ impl KeyManager { /// Load a secret key for a specific validator index pub fn load_key(&mut self, validator_index: u64) -> Result<(), Box> { - let sk_path = self.keys_dir.join(format!("validator_{}_sk.ssz", validator_index)); + let sk_path = self + .keys_dir + .join(format!("validator_{}_sk.ssz", validator_index)); if !sk_path.exists() { return Err(format!("Secret key file not found: {:?}", sk_path).into()); @@ -69,20 +71,20 @@ impl KeyManager { ) -> Result> { #[cfg(feature = "xmss-signing")] { - let key_bytes = self.keys + let key_bytes = self + .keys .get(&validator_index) .ok_or_else(|| format!("No key loaded for validator {}", validator_index))?; - type SecretKey = ::SecretKey; + type SecretKey = + ::SecretKey; let secret_key = SecretKey::from_bytes(key_bytes) .map_err(|e| format!("Failed to deserialize secret key: {:?}", e))?; - let leansig_signature = SIGTopLevelTargetSumLifetime32Dim64Base8::sign( - &secret_key, - epoch, - message, - ).map_err(|e| format!("Failed to sign message: {:?}", e))?; + let leansig_signature = + SIGTopLevelTargetSumLifetime32Dim64Base8::sign(&secret_key, epoch, message) + .map_err(|e| format!("Failed to sign message: {:?}", e))?; let sig_bytes = leansig_signature.to_bytes(); @@ -90,7 +92,8 @@ impl KeyManager { return Err(format!( "Invalid signature size: expected 3112, got {}", sig_bytes.len() - ).into()); + ) + .into()); } // Convert to ByteVector using unsafe pointer copy (same pattern as PublicKey) @@ -105,7 +108,7 @@ impl KeyManager { #[cfg(not(feature = "xmss-signing"))] { - let _ = (epoch, message); // Suppress unused warnings + let _ = (epoch, message); // Suppress unused warnings warn!( validator = validator_index, "XMSS signing disabled - using zero signature" diff --git a/lean_client/validator/src/lib.rs b/lean_client/validator/src/lib.rs index 2c65fa7..752cda8 100644 --- a/lean_client/validator/src/lib.rs +++ b/lean_client/validator/src/lib.rs @@ -2,9 +2,9 @@ use std::collections::HashMap; use std::path::Path; -use containers::attestation::{AggregatedAttestations}; +use containers::attestation::AggregatedAttestations; #[cfg(feature = "devnet2")] -use containers::attestation::{NaiveAggregatedSignature}; +use containers::attestation::NaiveAggregatedSignature; use containers::block::BlockSignatures; use containers::{ attestation::{Attestation, AttestationData, Signature, SignedAttestation}, From e14a31d72acb3d7f3eb520e9656da9338144fdc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Titas=20Stankevi=C4=8Dius?= Date: Sun, 18 Jan 2026 22:09:19 +0200 Subject: [PATCH 09/27] initial implementation --- lean_client/containers/src/lib.rs | 4 +- lean_client/containers/src/types.rs | 48 +- .../networking/src/gossipsub/config.rs | 282 ++++++++++- .../networking/src/gossipsub/control.rs | 167 +++++++ .../networking/src/gossipsub/mcache.rs | 410 ++++++++++++++++ lean_client/networking/src/gossipsub/mesh.rs | 458 ++++++++++++++++++ .../networking/src/gossipsub/message.rs | 233 +++++++++ lean_client/networking/src/gossipsub/mod.rs | 16 + .../networking/src/gossipsub/tests/control.rs | 89 ++++ .../networking/src/gossipsub/tests/mcache.rs | 90 ++++ .../networking/src/gossipsub/tests/mesh.rs | 100 ++++ .../networking/src/gossipsub/tests/mod.rs | 4 + lean_client/networking/src/gossipsub/topic.rs | 387 +++++++++++++-- lean_client/networking/src/gossipsub/types.rs | 33 ++ lean_client/networking/src/types.rs | 4 +- 15 files changed, 2269 insertions(+), 56 deletions(-) create mode 100644 lean_client/networking/src/gossipsub/control.rs create mode 100644 lean_client/networking/src/gossipsub/mcache.rs create mode 100644 lean_client/networking/src/gossipsub/mesh.rs create mode 100644 lean_client/networking/src/gossipsub/tests/control.rs create mode 100644 lean_client/networking/src/gossipsub/tests/mcache.rs create mode 100644 lean_client/networking/src/gossipsub/tests/mesh.rs create mode 100644 lean_client/networking/src/gossipsub/types.rs diff --git a/lean_client/containers/src/lib.rs b/lean_client/containers/src/lib.rs index f0590ca..323d22f 100644 --- a/lean_client/containers/src/lib.rs +++ b/lean_client/containers/src/lib.rs @@ -22,8 +22,8 @@ pub use slot::Slot; pub use state::State; pub use status::Status; pub use types::{ - Bytes32, HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, - Uint64, ValidatorIndex, Validators, + Bytes20, Bytes32, HistoricalBlockHashes, JustificationRoots, JustificationsValidators, + JustifiedSlots, Uint64, ValidatorIndex, Validators, }; pub use types::Bytes32 as Root; diff --git a/lean_client/containers/src/types.rs b/lean_client/containers/src/types.rs index 7d9aa4d..fba0554 100644 --- a/lean_client/containers/src/types.rs +++ b/lean_client/containers/src/types.rs @@ -3,9 +3,55 @@ use serde::{Deserialize, Serialize}; use ssz::H256; use ssz_derive::Ssz; use std::fmt; -use std::hash::Hash; +use std::hash::{Hash, Hasher}; use std::str::FromStr; +/// 20-byte array for message IDs (gossipsub message IDs) +/// Using transparent SSZ encoding - just the raw bytes +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] +pub struct Bytes20(pub [u8; 20]); + +impl Default for Bytes20 { + fn default() -> Self { + Bytes20([0u8; 20]) + } +} + +impl Bytes20 { + pub fn new(data: [u8; 20]) -> Self { + Bytes20(data) + } + + pub fn len(&self) -> usize { + 20 + } + + pub fn is_empty(&self) -> bool { + false + } +} + +impl Hash for Bytes20 { + fn hash(&self, state: &mut H) { + self.0.hash(state); + } +} + +impl From<&[u8]> for Bytes20 { + fn from(slice: &[u8]) -> Self { + let mut data = [0u8; 20]; + let len = slice.len().min(20); + data[..len].copy_from_slice(&slice[..len]); + Bytes20(data) + } +} + +impl AsRef<[u8]> for Bytes20 { + fn as_ref(&self) -> &[u8] { + &self.0 + } +} + #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Ssz, Default, Serialize, Deserialize, )] diff --git a/lean_client/networking/src/gossipsub/config.rs b/lean_client/networking/src/gossipsub/config.rs index 67061bc..769b286 100644 --- a/lean_client/networking/src/gossipsub/config.rs +++ b/lean_client/networking/src/gossipsub/config.rs @@ -1,10 +1,239 @@ +/// Gossipsub Parameters +/// +/// Configuration parameters controlling gossipsub mesh behavior. +/// +/// ## Overview +/// +/// Gossipsub maintains a mesh of peers for each subscribed topic. +/// These parameters tune the mesh size, timing, and caching behavior. +/// +/// ## Parameter Categories +/// +/// **Mesh Degree (D parameters):** +/// +/// Controls how many peers are in the mesh for each topic. +/// +/// ```text +/// D_low <= D <= D_high +/// +/// D Target mesh size (8 for Ethereum) +/// D_low Minimum before grafting new peers (6) +/// D_high Maximum before pruning excess peers (12) +/// D_lazy Peers to gossip IHAVE messages to (6) +/// ``` +/// +/// **Timing:** +/// +/// ```text +/// heartbeat_interval Mesh maintenance frequency (0.7s for Ethereum) +/// fanout_ttl How long to keep fanout peers (60s) +/// ``` +/// +/// **Caching:** +/// +/// ```text +/// mcache_len Total history windows kept (6) +/// mcache_gossip Windows included in IHAVE gossip (3) +/// seen_ttl Duplicate detection window +/// ``` +/// +/// ## Ethereum Values +/// +/// The Ethereum consensus layer specifies: +/// +/// - D = 8, D_low = 6, D_high = 12, D_lazy = 6 +/// - Heartbeat = 700ms (0.7s) +/// - Message cache = 6 windows, gossip last 3 +/// +/// ## References +/// +/// - Ethereum P2P spec: +/// - Gossipsub v1.0: +/// - Gossipsub v1.2: + use crate::gossipsub::topic::GossipsubTopic; use crate::types::MESSAGE_DOMAIN_VALID_SNAPPY; use libp2p::gossipsub::{Config, ConfigBuilder, Message, MessageId, ValidationMode}; +use serde::{Deserialize, Serialize}; use sha2::Digest; use sha2::Sha256; use std::time::Duration; +/// Core gossipsub configuration. +/// +/// Defines the mesh topology and timing parameters. +/// +/// Default values follow the Ethereum consensus P2P specification. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct GossipsubParameters { + /// The protocol ID for gossip messages. + #[serde(default = "default_protocol_id")] + pub protocol_id: String, + + // ------------------------------------------------------------------------- + // Mesh Degree Parameters + // ------------------------------------------------------------------------- + + /// Target number of mesh peers per topic. + /// + /// The heartbeat procedure adjusts the mesh toward this size: + /// + /// - If |mesh| < D_low: graft peers up to D + /// - If |mesh| > D_high: prune peers down to D + #[serde(default = "default_d")] + pub d: usize, + + /// Minimum mesh peers before grafting. + /// + /// When mesh size drops below this threshold, the heartbeat + /// will graft new peers to reach the target D. + #[serde(default = "default_d_low")] + pub d_low: usize, + + /// Maximum mesh peers before pruning. + /// + /// When mesh size exceeds this threshold, the heartbeat + /// will prune excess peers down to the target D. + #[serde(default = "default_d_high")] + pub d_high: usize, + + /// Number of non-mesh peers for IHAVE gossip. + /// + /// During heartbeat, IHAVE messages are sent to this many + /// randomly selected peers outside the mesh. This enables + /// the lazy pull protocol for reliability. + #[serde(default = "default_d_lazy")] + pub d_lazy: usize, + + // ------------------------------------------------------------------------- + // Timing Parameters + // ------------------------------------------------------------------------- + + /// Interval between heartbeat ticks in seconds. + /// + /// The heartbeat procedure runs periodically to: + /// + /// - Maintain mesh size (graft/prune) + /// - Send IHAVE gossip to non-mesh peers + /// - Clean up stale fanout entries + /// - Shift the message cache window + #[serde(default = "default_heartbeat_interval_secs")] + pub heartbeat_interval_secs: f64, + + /// Time-to-live for fanout entries in seconds. + /// + /// Fanout peers are used when publishing to topics we don't + /// subscribe to. Entries expire after this duration of + /// inactivity to free resources. + #[serde(default = "default_fanout_ttl_secs")] + pub fanout_ttl_secs: u64, + + // ------------------------------------------------------------------------- + // Message Cache Parameters + // ------------------------------------------------------------------------- + + /// Total number of history windows in the message cache. + /// + /// - Messages are stored for this many heartbeat intervals. + /// - After mcache_len heartbeats, messages are evicted. + #[serde(default = "default_mcache_len")] + pub mcache_len: usize, + + /// Number of recent windows included in IHAVE gossip. + /// + /// Only messages from the most recent mcache_gossip windows + /// are advertised via IHAVE. Older cached messages can still + /// be retrieved via IWANT but won't be actively gossiped. + #[serde(default = "default_mcache_gossip")] + pub mcache_gossip: usize, + + /// Time-to-live for seen message IDs in seconds. + /// + /// Message IDs are tracked to detect duplicates. This should + /// be long enough to cover network propagation delays but + /// short enough to bound memory usage. + #[serde(default = "default_seen_ttl_secs")] + pub seen_ttl_secs: u64, + + // ------------------------------------------------------------------------- + // IDONTWANT Optimization (v1.2) + // ------------------------------------------------------------------------- + + /// Minimum message size in bytes to trigger IDONTWANT. + /// + /// When receiving a message larger than this threshold, + /// immediately send IDONTWANT to mesh peers to prevent + /// redundant transmissions. + /// + /// Set to 1KB by default. + #[serde(default = "default_idontwant_threshold")] + pub idontwant_message_size_threshold: usize, +} + +fn default_protocol_id() -> String { + "/meshsub/1.3.0".to_string() +} + +fn default_d() -> usize { + 8 +} + +fn default_d_low() -> usize { + 6 +} + +fn default_d_high() -> usize { + 12 +} + +fn default_d_lazy() -> usize { + 6 +} + +fn default_heartbeat_interval_secs() -> f64 { + 0.7 +} + +fn default_fanout_ttl_secs() -> u64 { + 60 +} + +fn default_mcache_len() -> usize { + 6 +} + +fn default_mcache_gossip() -> usize { + 3 +} + +fn default_seen_ttl_secs() -> u64 { + let justification_lookback_slots: u64 = 3; + let seconds_per_slot: u64 = 12; + seconds_per_slot * justification_lookback_slots * 2 +} + +fn default_idontwant_threshold() -> usize { + 1000 +} + +impl Default for GossipsubParameters { + fn default() -> Self { + Self { + protocol_id: default_protocol_id(), + d: default_d(), + d_low: default_d_low(), + d_high: default_d_high(), + d_lazy: default_d_lazy(), + heartbeat_interval_secs: default_heartbeat_interval_secs(), + fanout_ttl_secs: default_fanout_ttl_secs(), + mcache_len: default_mcache_len(), + mcache_gossip: default_mcache_gossip(), + seen_ttl_secs: default_seen_ttl_secs(), + idontwant_message_size_threshold: default_idontwant_threshold(), + } + } +} + #[derive(Debug, Clone)] pub struct GossipsubConfig { pub config: Config, @@ -13,30 +242,27 @@ pub struct GossipsubConfig { impl GossipsubConfig { pub fn new() -> Self { - let justification_lookback_slots: u64 = 3; - let seconds_per_slot: u64 = 12; - - let seen_ttl_secs = seconds_per_slot * justification_lookback_slots * 2; - + let params = GossipsubParameters::default(); + let config = ConfigBuilder::default() // leanSpec: heartbeat_interval_secs = 0.7 .heartbeat_interval(Duration::from_millis(700)) // leanSpec: fanout_ttl_secs = 60 - .fanout_ttl(Duration::from_secs(60)) + .fanout_ttl(Duration::from_secs(params.fanout_ttl_secs)) // leanSpec: mcache_len = 6 - .history_length(6) + .history_length(params.mcache_len) // leanSpec: mcache_gossip = 3 - .history_gossip(3) + .history_gossip(params.mcache_gossip) // leanSpec: seen_ttl_secs = SECONDS_PER_SLOT * JUSTIFICATION_LOOKBACK_SLOTS * 2 - .duplicate_cache_time(Duration::from_secs(seen_ttl_secs)) + .duplicate_cache_time(Duration::from_secs(params.seen_ttl_secs)) // leanSpec: d = 8 - .mesh_n(8) + .mesh_n(params.d) // leanSpec: d_low = 6 - .mesh_n_low(6) + .mesh_n_low(params.d_low) // leanSpec: d_high = 12 - .mesh_n_high(12) + .mesh_n_high(params.d_high) // leanSpec: d_lazy = 6 - .gossip_lazy(6) + .gossip_lazy(params.d_lazy) .validation_mode(ValidationMode::Anonymous) .validate_messages() .message_id_fn(compute_message_id) @@ -61,7 +287,7 @@ pub fn compute_message_id(message: &Message) -> MessageId { let topic_len = topic_bytes.len() as u64; let mut digest_input = Vec::new(); - // Domain: 4 bytes + // Domain: 1 byte digest_input.extend_from_slice(MESSAGE_DOMAIN_VALID_SNAPPY); // Topic length: 8 bytes (uint64 little-endian) digest_input.extend_from_slice(&topic_len.to_le_bytes()); @@ -75,3 +301,31 @@ pub fn compute_message_id(message: &Message) -> MessageId { // Return first 20 bytes MessageId::from(&hash[..20]) } + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_parameters() { + let params = GossipsubParameters::default(); + + // Test Ethereum spec values + assert_eq!(params.d, 8); + assert_eq!(params.d_low, 6); + assert_eq!(params.d_high, 12); + assert_eq!(params.d_lazy, 6); + assert_eq!(params.heartbeat_interval_secs, 0.7); + assert_eq!(params.fanout_ttl_secs, 60); + assert_eq!(params.mcache_len, 6); + assert_eq!(params.mcache_gossip, 3); + assert_eq!(params.protocol_id, "/meshsub/1.3.0"); + assert_eq!(params.idontwant_message_size_threshold, 1000); + + // Test relationships + assert!(params.d_low < params.d); + assert!(params.d < params.d_high); + assert!(params.d_lazy <= params.d); + assert!(params.mcache_gossip <= params.mcache_len); + } +} diff --git a/lean_client/networking/src/gossipsub/control.rs b/lean_client/networking/src/gossipsub/control.rs new file mode 100644 index 0000000..8d64aa8 --- /dev/null +++ b/lean_client/networking/src/gossipsub/control.rs @@ -0,0 +1,167 @@ +/// Gossipsub Control Messages +/// +/// Control messages orchestrate the gossip mesh topology and message propagation. +/// +/// ## Overview +/// +/// Gossipsub uses control messages piggybacked on regular RPC messages to: +/// +/// - Manage mesh membership (GRAFT/PRUNE) +/// - Enable lazy message propagation (IHAVE/IWANT) +/// - Reduce bandwidth for large messages (IDONTWANT) +/// +/// ## Control Message Types +/// +/// | Message | Purpose | +/// |-------------|--------------------------------------------------------| +/// | GRAFT | Request to join a peer's mesh for a topic | +/// | PRUNE | Notify peer of removal from mesh | +/// | IHAVE | Advertise message IDs available for a topic | +/// | IWANT | Request full messages by their IDs | +/// | IDONTWANT | Signal that specific messages are not needed (v1.2) | +/// +/// ## Protocol Flow +/// +/// **Mesh Management:** +/// +/// 1. Peer A sends GRAFT to peer B for topic T +/// 2. Peer B adds A to its mesh for T (or sends PRUNE if refusing) +/// 3. Both peers now exchange full messages for topic T +/// +/// **Lazy Pull:** +/// +/// 1. Peer A receives message M, adds to cache +/// 2. Peer A sends IHAVE with M's ID to non-mesh peers +/// 3. Peer B responds with IWANT if it needs M +/// 4. Peer A sends full message M +/// +/// ## References +/// +/// - Gossipsub v1.0: +/// - Gossipsub v1.2: + +use serde::{Deserialize, Serialize}; + +use super::types::MessageId; + +/// Request to join a peer's mesh for a topic. +/// +/// Sent when a peer wants to upgrade from gossip-only to full message exchange. +/// +/// The receiving peer should add the sender to its mesh unless: +/// +/// - The peer is already in the mesh +/// - The mesh is at capacity (|mesh| >= D_high) +/// - The peer is in a backoff period from a recent PRUNE +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Graft { + /// Topic identifier to join the mesh for. + pub topic_id: String, +} + +/// Notification of removal from a peer's mesh. +/// +/// Sent when: +/// +/// - A peer unsubscribes from a topic +/// - Mesh size exceeds D_high during heartbeat +/// - A GRAFT is rejected +/// +/// The pruned peer should not send GRAFT for this topic +/// until the backoff period expires. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct Prune { + /// Topic identifier being pruned from. + pub topic_id: String, +} + +/// Advertisement of cached message IDs for a topic. +/// +/// Sent to non-mesh peers during heartbeat to enable lazy pull. +/// Recipients can request any missing messages via IWANT. +/// +/// Only includes messages from recent cache windows (mcache_gossip). +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct IHave { + /// Topic the advertised messages belong to. + pub topic_id: String, + + /// IDs of messages available in the sender's cache. + pub message_ids: Vec, +} + +/// Request for full messages by their IDs. +/// +/// Sent in response to IHAVE when the peer needs specific messages. +/// The peer should respond with the requested messages if still cached. +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct IWant { + /// IDs of messages being requested. + pub message_ids: Vec, +} + +/// Signal that specific messages are not needed. +/// +/// Introduced in gossipsub v1.2 for bandwidth optimization. +/// +/// Sent immediately after receiving a large message to tell mesh peers +/// not to forward their copy. Only used for messages exceeding the +/// IDONTWANT size threshold (typically 1KB). +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct IDontWant { + /// IDs of messages the sender does not want to receive. + pub message_ids: Vec, +} + +/// Container for aggregated control messages. +/// +/// Multiple control messages are batched into a single RPC +/// for efficiency. An RPC can contain any combination of +/// control message types. +/// +/// # Example +/// +/// ``` +/// use lean_client_networking::gossipsub::control::*; +/// +/// let control = ControlMessage { +/// grafts: vec![Graft { topic_id: "blocks".to_string() }], +/// ihaves: vec![], +/// prunes: vec![], +/// iwants: vec![], +/// idontwants: vec![], +/// }; +/// ``` +#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] +pub struct ControlMessage { + /// GRAFT messages requesting mesh membership. + #[serde(default)] + pub grafts: Vec, + + /// PRUNE messages notifying mesh removal. + #[serde(default)] + pub prunes: Vec, + + /// IHAVE messages advertising cached message IDs. + #[serde(default)] + pub ihaves: Vec, + + /// IWANT messages requesting full messages. + #[serde(default)] + pub iwants: Vec, + + /// IDONTWANT messages declining specific messages (v1.2). + #[serde(default)] + pub idontwants: Vec, +} + +impl ControlMessage { + /// Check if this control message contains no data. + pub fn is_empty(&self) -> bool { + self.grafts.is_empty() + && self.prunes.is_empty() + && self.ihaves.is_empty() + && self.iwants.is_empty() + && self.idontwants.is_empty() + } +} diff --git a/lean_client/networking/src/gossipsub/mcache.rs b/lean_client/networking/src/gossipsub/mcache.rs new file mode 100644 index 0000000..6d8fcd6 --- /dev/null +++ b/lean_client/networking/src/gossipsub/mcache.rs @@ -0,0 +1,410 @@ +/// Gossipsub Message Cache +/// +/// Caches recent messages for gossip dissemination and IWANT responses. +/// +/// ## Overview +/// +/// The message cache enables the lazy pull protocol by storing messages +/// that can be requested via IWANT after receiving IHAVE advertisements. +/// +/// ```text +/// Peer A Peer B (non-mesh) +/// | | +/// |--- IHAVE [msg1, msg2] ------>| +/// | | +/// |<----- IWANT [msg2] ----------| +/// | | +/// |--- MESSAGE [msg2] ---------->| <- Retrieved from cache +/// ``` +/// +/// ## Sliding Window Design +/// +/// The cache is organized as a sliding window of history buckets: +/// +/// ```text +/// +----------+----------+----------+----------+ +/// | Window 0 | Window 1 | Window 2 | Window 3 | ... +/// | (newest) | | | (oldest) | +/// +----------+----------+----------+----------+ +/// ^ +/// | +/// New messages go here +/// ``` +/// +/// Each heartbeat: +/// +/// 1. Oldest window is evicted (messages cleaned up) +/// 2. New empty window is prepended +/// 3. Windows shift: 0 -> 1 -> 2 -> ... +/// +/// ## Key Parameters +/// +/// - **mcache_len** (6): Total windows retained +/// - **mcache_gossip** (3): Recent windows included in IHAVE +/// +/// Only the first `mcache_gossip` windows are advertised via IHAVE. +/// Older messages can still be retrieved via IWANT but won't be +/// actively gossiped. +/// +/// ## Seen Cache +/// +/// A separate `SeenCache` tracks message IDs for deduplication +/// without storing full messages. Uses TTL-based expiry. +/// +/// ## References +/// +/// - Gossipsub v1.0: + +use std::collections::{HashMap, HashSet, VecDeque}; + +use super::message::RawGossipsubMessage; +use super::types::{MessageId, Timestamp, TopicId}; + +/// A single entry in the message cache. +/// +/// Stores the message along with its topic for efficient retrieval +/// during IWANT responses and topic-filtered IHAVE gossip. +#[derive(Debug, Clone)] +pub struct CacheEntry { + /// The cached gossipsub message. + pub message: RawGossipsubMessage, + + /// Topic this message was published to. + /// + /// Used to filter messages when generating IHAVE gossip for a specific topic. + pub topic: TopicId, +} + +/// Sliding window cache for gossipsub messages. +/// +/// Maintains recent messages for: +/// +/// - **IWANT responses**: Retrieve full messages by ID +/// - **IHAVE gossip**: Get message IDs for advertisement +/// +/// # Example +/// +/// ``` +/// use lean_client_networking::gossipsub::mcache::MessageCache; +/// use lean_client_networking::gossipsub::message::RawGossipsubMessage; +/// +/// let mut cache = MessageCache::new(6, 3); +/// +/// // Add messages +/// let msg1 = RawGossipsubMessage::new(b"topic".to_vec(), b"data1".to_vec(), None); +/// cache.put("blocks".to_string(), msg1.clone()); +/// +/// // Get message IDs for IHAVE +/// let ids = cache.get_gossip_ids("blocks"); +/// +/// // Respond to IWANT +/// let msg = cache.get(&msg1.id()); +/// +/// // Shift window (called each heartbeat) +/// let evicted = cache.shift(); +/// ``` +#[derive(Debug, Clone)] +pub struct MessageCache { + /// Number of history windows to retain. + /// + /// Messages are evicted after this many heartbeat intervals. + /// + /// Higher values increase memory usage but improve message + /// availability for late IWANT requests. + mcache_len: usize, + + /// Number of recent windows to include in IHAVE gossip. + /// + /// Only messages from the most recent windows are advertised. + /// Should be less than or equal to mcache_len. + mcache_gossip: usize, + + /// Sliding window of message ID sets. + /// + /// Index 0 is the newest window. Each heartbeat, windows shift + /// right and a new empty window is prepended. + windows: VecDeque>, + + /// Message lookup index keyed by ID. + /// + /// Provides O(1) retrieval for IWANT responses. + by_id: HashMap, +} + +impl MessageCache { + /// Create a new message cache. + /// + /// # Arguments + /// + /// * `mcache_len` - Number of history windows to retain + /// * `mcache_gossip` - Number of recent windows to include in IHAVE gossip + pub fn new(mcache_len: usize, mcache_gossip: usize) -> Self { + let mut windows = VecDeque::with_capacity(mcache_len); + windows.push_back(HashSet::new()); + + Self { + mcache_len, + mcache_gossip, + windows, + by_id: HashMap::new(), + } + } + + /// Add a message to the cache. + /// + /// Messages are added to the newest window (index 0) and + /// indexed for fast retrieval. Duplicates are ignored. + /// + /// # Arguments + /// + /// * `topic` - Topic this message belongs to + /// * `message` - Message to cache + /// + /// # Returns + /// + /// `true` if added (not a duplicate) + pub fn put(&mut self, topic: TopicId, message: RawGossipsubMessage) -> bool { + let msg_id = message.id(); + + if self.by_id.contains_key(&msg_id) { + return false; + } + + if let Some(window) = self.windows.front_mut() { + window.insert(msg_id.clone()); + } + + self.by_id.insert(msg_id, CacheEntry { message, topic }); + true + } + + /// Retrieve a message by ID. + /// + /// Used to respond to IWANT requests from peers. + /// + /// # Arguments + /// + /// * `msg_id` - Message ID to look up + /// + /// # Returns + /// + /// The cached message, or `None` if not found/evicted + pub fn get(&self, msg_id: &MessageId) -> Option<&RawGossipsubMessage> { + self.by_id.get(msg_id).map(|entry| &entry.message) + } + + /// Check if a message is cached. + /// + /// # Arguments + /// + /// * `msg_id` - Message ID to check + /// + /// # Returns + /// + /// `true` if the message is in the cache + pub fn has(&self, msg_id: &MessageId) -> bool { + self.by_id.contains_key(msg_id) + } + + /// Get message IDs for IHAVE gossip. + /// + /// Returns IDs from the most recent `mcache_gossip` windows + /// that belong to the specified topic. + /// + /// # Arguments + /// + /// * `topic` - Topic to filter messages by + /// + /// # Returns + /// + /// List of message IDs for IHAVE advertisement + pub fn get_gossip_ids(&self, topic: &str) -> Vec { + let mut result = Vec::new(); + let windows_to_check = self.mcache_gossip.min(self.windows.len()); + + for i in 0..windows_to_check { + if let Some(window) = self.windows.get(i) { + for msg_id in window { + if let Some(entry) = self.by_id.get(msg_id) { + if entry.topic == topic { + result.push(msg_id.clone()); + } + } + } + } + } + + result + } + + /// Shift the cache window, evicting the oldest. + /// + /// Called at each heartbeat to age the cache: + /// + /// 1. If at capacity, remove oldest window and its messages + /// 2. Prepend new empty window + /// + /// # Returns + /// + /// Number of messages evicted + pub fn shift(&mut self) -> usize { + let mut evicted = 0; + + if self.windows.len() >= self.mcache_len { + if let Some(oldest) = self.windows.pop_back() { + for msg_id in oldest { + if self.by_id.remove(&msg_id).is_some() { + evicted += 1; + } + } + } + } + + self.windows.push_front(HashSet::new()); + evicted + } + + /// Clear all cached messages. + pub fn clear(&mut self) { + self.windows.clear(); + self.windows.push_back(HashSet::new()); + self.by_id.clear(); + } + + /// Get the total number of cached messages. + pub fn len(&self) -> usize { + self.by_id.len() + } + + /// Check if the cache is empty. + pub fn is_empty(&self) -> bool { + self.by_id.is_empty() + } +} + +/// TTL-based cache for deduplicating messages. +/// +/// Tracks message IDs that have been seen to prevent reprocessing +/// duplicates. Unlike `MessageCache`, this only stores IDs (not +/// full messages) with time-based expiry. +/// +/// ## Use Cases +/// +/// - Skip processing of already-seen messages +/// - Avoid forwarding duplicates to mesh peers +/// - Bound memory with automatic TTL cleanup +#[derive(Debug, Clone)] +pub struct SeenCache { + /// Time-to-live for entries in seconds. + /// + /// Entries older than this are removed during cleanup. + /// + /// Should be: + /// - long enough to cover network propagation, + /// - short enough to bound memory usage. + ttl_seconds: u64, + + /// Set of message IDs that have been seen. + /// + /// Provides O(1) membership testing. + seen: HashSet, + + /// Timestamp when each message was first seen. + /// + /// Used to determine expiry during cleanup. + timestamps: HashMap, +} + +impl SeenCache { + /// Create a new seen cache. + /// + /// # Arguments + /// + /// * `ttl_seconds` - Time-to-live for entries in seconds + pub fn new(ttl_seconds: u64) -> Self { + Self { + ttl_seconds, + seen: HashSet::new(), + timestamps: HashMap::new(), + } + } + + /// Mark a message as seen. + /// + /// # Arguments + /// + /// * `msg_id` - Message ID to mark as seen + /// * `timestamp` - Current Unix timestamp + /// + /// # Returns + /// + /// `true` if newly seen (not a duplicate) + pub fn add(&mut self, msg_id: MessageId, timestamp: Timestamp) -> bool { + if self.seen.contains(&msg_id) { + return false; + } + + self.seen.insert(msg_id.clone()); + self.timestamps.insert(msg_id, timestamp); + true + } + + /// Check if a message has been seen. + /// + /// # Arguments + /// + /// * `msg_id` - Message ID to check + /// + /// # Returns + /// + /// `true` if the message has been seen + pub fn has(&self, msg_id: &MessageId) -> bool { + self.seen.contains(msg_id) + } + + /// Remove expired entries. + /// + /// Should be called periodically (e.g., each heartbeat) + /// to prevent unbounded memory growth. + /// + /// # Arguments + /// + /// * `current_time` - Current Unix timestamp + /// + /// # Returns + /// + /// Number of entries removed + pub fn cleanup(&mut self, current_time: f64) -> usize { + let cutoff = current_time - self.ttl_seconds as f64; + let expired: Vec = self + .timestamps + .iter() + .filter(|(_, ts)| **ts < cutoff) + .map(|(id, _)| id.clone()) + .collect(); + + let count = expired.len(); + for msg_id in expired { + self.seen.remove(&msg_id); + self.timestamps.remove(&msg_id); + } + + count + } + + /// Clear all seen entries. + pub fn clear(&mut self) { + self.seen.clear(); + self.timestamps.clear(); + } + + /// Get the number of seen message IDs. + pub fn len(&self) -> usize { + self.seen.len() + } + + /// Check if the seen cache is empty. + pub fn is_empty(&self) -> bool { + self.seen.is_empty() + } +} diff --git a/lean_client/networking/src/gossipsub/mesh.rs b/lean_client/networking/src/gossipsub/mesh.rs new file mode 100644 index 0000000..7133035 --- /dev/null +++ b/lean_client/networking/src/gossipsub/mesh.rs @@ -0,0 +1,458 @@ +/// Gossipsub Mesh State +/// +/// Manages the mesh topology for gossipsub topics. +/// +/// ## Overview +/// +/// Each subscribed topic maintains a **mesh**: a set of peers for full +/// message exchange. The mesh is the core data structure enabling +/// gossipsub's eager push protocol. +/// +/// - **Mesh peers**: Exchange full messages immediately (eager push) +/// - **Non-mesh peers**: Receive IHAVE advertisements, request via IWANT (lazy pull) +/// +/// ## Mesh vs Fanout +/// +/// | Type | Description | +/// |--------|-----------------------------------------------------------| +/// | Mesh | Peers for topics we subscribe to | +/// | Fanout | Temporary peers for topics we publish to but don't | +/// | | subscribe to. Expires after fanout_ttl. | +/// +/// ## Heartbeat Maintenance +/// +/// The mesh is maintained through periodic heartbeat: +/// +/// 1. **Graft** if |mesh| < D_low: add peers up to D +/// 2. **Prune** if |mesh| > D_high: remove peers down to D +/// 3. **Gossip**: send IHAVE to D_lazy non-mesh peers +/// +/// ## References +/// +/// - Gossipsub v1.0: + +use rand::seq::SliceRandom; +use std::collections::{HashMap, HashSet}; +use std::time::{SystemTime, UNIX_EPOCH}; + +use super::config::GossipsubParameters; +use super::types::{PeerId, TopicId}; + +/// Fanout state for a publish-only topic. +/// +/// Tracks peers used when publishing to topics we don't subscribe to. +/// Fanout entries expire after a period of inactivity (fanout_ttl). +/// +/// Unlike mesh peers, fanout peers only receive our published messages. +/// We don't receive their messages since we're not subscribed. +#[derive(Debug, Clone)] +pub struct FanoutEntry { + /// Peers in the fanout for this topic. + /// + /// Selected randomly from available topic peers, up to D peers. + pub peers: HashSet, + + /// Unix timestamp of the last publish to this topic. + /// + /// Used to determine if the entry has expired. + pub last_published: f64, +} + +impl FanoutEntry { + /// Create a new empty fanout entry. + pub fn new() -> Self { + Self { + peers: HashSet::new(), + last_published: 0.0, + } + } + + /// Check if this fanout entry has expired. + /// + /// # Arguments + /// + /// * `current_time` - Current Unix timestamp + /// * `ttl` - Time-to-live in seconds + /// + /// # Returns + /// + /// `true` if the entry hasn't been used within ttl seconds + pub fn is_stale(&self, current_time: f64, ttl: f64) -> bool { + current_time - self.last_published > ttl + } +} + +impl Default for FanoutEntry { + fn default() -> Self { + Self::new() + } +} + +/// Mesh state for a single topic. +/// +/// Represents the set of peers we exchange full messages with +/// for a specific topic. Mesh membership is managed via +/// GRAFT and PRUNE control messages. +#[derive(Debug, Clone)] +pub struct TopicMesh { + /// Peers in the mesh for this topic. + /// + /// These peers receive all published messages immediately + /// and forward all received messages to us. + pub peers: HashSet, +} + +impl TopicMesh { + /// Create a new empty topic mesh. + pub fn new() -> Self { + Self { + peers: HashSet::new(), + } + } + + /// Add a peer to this topic's mesh. + /// + /// # Arguments + /// + /// * `peer_id` - Peer to add + /// + /// # Returns + /// + /// `true` if the peer was added, `false` if already present + pub fn add_peer(&mut self, peer_id: PeerId) -> bool { + self.peers.insert(peer_id) + } + + /// Remove a peer from this topic's mesh. + /// + /// # Arguments + /// + /// * `peer_id` - Peer to remove + /// + /// # Returns + /// + /// `true` if the peer was removed, `false` if not present + pub fn remove_peer(&mut self, peer_id: &PeerId) -> bool { + self.peers.remove(peer_id) + } +} + +impl Default for TopicMesh { + fn default() -> Self { + Self::new() + } +} + +/// Complete mesh state for all subscribed topics. +/// +/// Central data structure managing mesh topology across all topics. +/// Provides operations for subscription management, peer tracking, +/// and gossip peer selection. +/// +/// # Example +/// +/// ``` +/// use lean_client_networking::gossipsub::mesh::MeshState; +/// use lean_client_networking::gossipsub::config::GossipsubParameters; +/// +/// let mut state = MeshState::new(GossipsubParameters::default()); +/// +/// // Subscribe and build mesh +/// state.subscribe("blocks".to_string()); +/// state.add_to_mesh("blocks", "peer1".to_string()); +/// state.add_to_mesh("blocks", "peer2".to_string()); +/// +/// // Get mesh peers for message forwarding +/// let peers = state.get_mesh_peers("blocks"); +/// +/// // Select peers for IHAVE gossip +/// let all_peers: HashSet<_> = vec!["peer1", "peer2", "peer3", "peer4"] +/// .into_iter() +/// .map(String::from) +/// .collect(); +/// let gossip_peers = state.select_peers_for_gossip("blocks", &all_peers); +/// ``` +#[derive(Debug, Clone)] +pub struct MeshState { + /// Gossipsub parameters controlling mesh behavior. + params: GossipsubParameters, + + /// Mesh state for each subscribed topic. Keyed by topic ID. + meshes: HashMap, + + /// Fanout state for publish-only topics. Keyed by topic ID. + fanouts: HashMap, + + /// Set of topics we are subscribed to. + subscriptions: HashSet, +} + +impl MeshState { + /// Create a new mesh state with the given parameters. + pub fn new(params: GossipsubParameters) -> Self { + Self { + params, + meshes: HashMap::new(), + fanouts: HashMap::new(), + subscriptions: HashSet::new(), + } + } + + /// Get the target mesh size per topic. + pub fn d(&self) -> usize { + self.params.d + } + + /// Get the low watermark - graft when mesh is smaller. + pub fn d_low(&self) -> usize { + self.params.d_low + } + + /// Get the high watermark - prune when mesh is larger. + pub fn d_high(&self) -> usize { + self.params.d_high + } + + /// Get the number of peers for IHAVE gossip. + pub fn d_lazy(&self) -> usize { + self.params.d_lazy + } + + /// Subscribe to a topic, initializing its mesh. + /// + /// If we have fanout peers for this topic, they are + /// promoted to the mesh automatically. + /// + /// # Arguments + /// + /// * `topic` - Topic identifier to subscribe to + pub fn subscribe(&mut self, topic: TopicId) { + if self.subscriptions.contains(&topic) { + return; + } + + self.subscriptions.insert(topic.clone()); + + // Promote fanout peers to mesh if any + let mut mesh = TopicMesh::new(); + if let Some(fanout) = self.fanouts.remove(&topic) { + mesh.peers = fanout.peers; + } + self.meshes.insert(topic, mesh); + } + + /// Unsubscribe from a topic. + /// + /// # Arguments + /// + /// * `topic` - Topic identifier to unsubscribe from + /// + /// # Returns + /// + /// Set of peers that were in the mesh (need PRUNE) + pub fn unsubscribe(&mut self, topic: &TopicId) -> HashSet { + self.subscriptions.remove(topic); + self.meshes + .remove(topic) + .map(|mesh| mesh.peers) + .unwrap_or_default() + } + + /// Check if subscribed to a topic. + /// + /// # Arguments + /// + /// * `topic` - Topic identifier to check + /// + /// # Returns + /// + /// `true` if subscribed + pub fn is_subscribed(&self, topic: &TopicId) -> bool { + self.subscriptions.contains(topic) + } + + /// Get mesh peers for a topic. + /// + /// # Arguments + /// + /// * `topic` - Topic identifier + /// + /// # Returns + /// + /// Copy of the mesh peer set, or empty set if not subscribed + pub fn get_mesh_peers(&self, topic: &str) -> HashSet { + self.meshes + .get(topic) + .map(|mesh| mesh.peers.clone()) + .unwrap_or_default() + } + + /// Add a peer to a topic's mesh. + /// + /// # Arguments + /// + /// * `topic` - Topic identifier + /// * `peer_id` - Peer to add + /// + /// # Returns + /// + /// - `true` if added, + /// - `false` if already present or not subscribed + pub fn add_to_mesh(&mut self, topic: &str, peer_id: PeerId) -> bool { + if let Some(mesh) = self.meshes.get_mut(topic) { + mesh.add_peer(peer_id) + } else { + false + } + } + + /// Remove a peer from a topic's mesh. + /// + /// # Arguments + /// + /// * `topic` - Topic identifier + /// * `peer_id` - Peer to remove + /// + /// # Returns + /// + /// - `true` if removed, + /// - `false` if not present or not subscribed + pub fn remove_from_mesh(&mut self, topic: &str, peer_id: &PeerId) -> bool { + if let Some(mesh) = self.meshes.get_mut(topic) { + mesh.remove_peer(peer_id) + } else { + false + } + } + + /// Get fanout peers for a topic. + /// + /// # Arguments + /// + /// * `topic` - Topic identifier + /// + /// # Returns + /// + /// Copy of the fanout peer set, or empty set if none + pub fn get_fanout_peers(&self, topic: &str) -> HashSet { + self.fanouts + .get(topic) + .map(|fanout| fanout.peers.clone()) + .unwrap_or_default() + } + + /// Update fanout for publishing to a non-subscribed topic. + /// + /// For subscribed topics, returns mesh peers instead. + /// + /// # Arguments + /// + /// * `topic` - Topic identifier + /// * `available_peers` - All known peers for this topic + /// + /// # Returns + /// + /// Peers to publish to (mesh or fanout) + pub fn update_fanout( + &mut self, + topic: &str, + available_peers: &HashSet, + ) -> HashSet { + if self.subscriptions.contains(topic) { + return self.get_mesh_peers(topic); + } + + let d = self.d(); + let fanout = self + .fanouts + .entry(topic.to_string()) + .or_insert_with(FanoutEntry::new); + + fanout.last_published = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs_f64(); + + // Fill fanout up to D peers + if fanout.peers.len() < d { + let candidates: Vec<_> = available_peers + .difference(&fanout.peers) + .cloned() + .collect(); + let needed = d - fanout.peers.len(); + let mut rng = rand::thread_rng(); + let new_peers: Vec<_> = candidates + .choose_multiple(&mut rng, needed.min(candidates.len())) + .cloned() + .collect(); + fanout.peers.extend(new_peers); + } + + fanout.peers.clone() + } + + /// Remove expired fanout entries. + /// + /// # Arguments + /// + /// * `ttl` - Time-to-live in seconds + /// + /// # Returns + /// + /// Number of entries removed + pub fn cleanup_fanouts(&mut self, ttl: f64) -> usize { + let current_time = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs_f64(); + + let stale: Vec<_> = self + .fanouts + .iter() + .filter(|(_, fanout)| fanout.is_stale(current_time, ttl)) + .map(|(topic, _)| topic.clone()) + .collect(); + + let count = stale.len(); + for topic in stale { + self.fanouts.remove(&topic); + } + + count + } + + /// Select non-mesh peers for IHAVE gossip. + /// + /// Randomly selects up to D_lazy peers from those not in the mesh. + /// These peers receive IHAVE messages during heartbeat. + /// + /// # Arguments + /// + /// * `topic` - Topic identifier + /// * `all_topic_peers` - All known peers subscribed to this topic + /// + /// # Returns + /// + /// List of peers to send IHAVE gossip to + pub fn select_peers_for_gossip( + &self, + topic: &str, + all_topic_peers: &HashSet, + ) -> Vec { + let mesh_peers = self.get_mesh_peers(topic); + let candidates: Vec<_> = all_topic_peers + .difference(&mesh_peers) + .cloned() + .collect(); + + if candidates.len() <= self.d_lazy() { + return candidates; + } + + let mut rng = rand::thread_rng(); + candidates + .choose_multiple(&mut rng, self.d_lazy()) + .cloned() + .collect() + } +} \ No newline at end of file diff --git a/lean_client/networking/src/gossipsub/message.rs b/lean_client/networking/src/gossipsub/message.rs index 4ac1ae1..ff8fdcc 100644 --- a/lean_client/networking/src/gossipsub/message.rs +++ b/lean_client/networking/src/gossipsub/message.rs @@ -1,3 +1,235 @@ +/// Gossipsub Message +/// +/// Message representation and ID computation for the gossipsub protocol. +/// +/// ## Overview +/// +/// Each gossipsub message carries a topic and payload. Messages are +/// identified by a 20-byte ID computed from their contents. +/// +/// ## Message ID Function +/// +/// Ethereum consensus uses a custom message ID function based on SHA256: +/// +/// ```text +/// message_id = SHA256(domain + uint64_le(len(topic)) + topic + data)[:20] +/// ``` +/// +/// **Components:** +/// +/// | Component | Description | +/// |-----------------|--------------------------------------------------------| +/// | domain | 1-byte prefix indicating snappy validity (0x00/0x01) | +/// | uint64_le | Topic length as 8-byte little-endian integer | +/// | topic | Topic string as UTF-8 bytes | +/// | data | Message payload (decompressed if snappy is valid) | +/// +/// **Domain Bytes:** +/// +/// - `0x01` (VALID_SNAPPY): Snappy decompression succeeded, use decompressed data +/// - `0x00` (INVALID_SNAPPY): Decompression failed or no decompressor, use raw data +/// +/// This ensures messages with compression issues get different IDs, +/// preventing cache pollution from invalid variants. +/// +/// ## Snappy Compression +/// +/// Ethereum consensus requires SSZ data to be snappy-compressed. +/// The message ID computation attempts decompression to determine +/// which domain byte to use. +/// +/// ## References +/// +/// - [Ethereum P2P spec](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md) +/// - [Gossipsub v1.0](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) + +use containers::Bytes20; +use sha2::{Digest, Sha256}; +use std::sync::Arc; + +use crate::types::{MESSAGE_DOMAIN_INVALID_SNAPPY, MESSAGE_DOMAIN_VALID_SNAPPY}; + +/// Trait for snappy decompression functions. +/// +/// Any type implementing this trait can be used for decompression. +/// The function should return an error if decompression fails. +pub trait SnappyDecompressor: Send + Sync { + /// Decompress snappy-compressed data. + /// + /// # Arguments + /// + /// * `data` - Compressed bytes + /// + /// # Returns + /// + /// Decompressed bytes, or an error if decompression fails + fn decompress(&self, data: &[u8]) -> Result, Box>; +} + +/// A raw gossipsub message with lazy ID computation. +/// +/// Encapsulates topic, payload, and message ID logic. The ID is +/// computed lazily on first access and cached thereafter. +/// +/// ## Message ID Computation +/// +/// The 20-byte ID is computed as: +/// +/// ```text +/// SHA256(domain + uint64_le(len(topic)) + topic + data)[:20] +/// ``` +/// +/// Where `domain` depends on snappy decompression success. +#[derive(Clone)] +pub struct RawGossipsubMessage { + /// Topic string as UTF-8 encoded bytes. + /// + /// Example: `b"/leanconsensus/0x12345678/block/ssz_snappy"` + pub topic: Vec, + + /// Raw message payload. + /// + /// Typically snappy-compressed SSZ data. The actual content + /// depends on the topic (block, attestation, etc.). + pub raw_data: Vec, + + /// Optional snappy decompression function. + /// + /// If provided, decompression is attempted during ID computation + /// to determine the domain byte. + snappy_decompress: Option>, + + /// Cached message ID. + /// + /// Computed lazily on first access to `id()` method. Once computed, + /// the same ID is returned for all subsequent accesses. + cached_id: Option, +} + +impl RawGossipsubMessage { + /// Create a new gossipsub message. + /// + /// # Arguments + /// + /// * `topic` - Topic string as bytes + /// * `raw_data` - Raw message payload + /// * `snappy_decompress` - Optional decompression function + pub fn new( + topic: Vec, + raw_data: Vec, + snappy_decompress: Option>, + ) -> Self { + Self { + topic, + raw_data, + snappy_decompress, + cached_id: None, + } + } + + /// Get the 20-byte message ID. + /// + /// Computed lazily on first access using the Ethereum consensus + /// message ID function. The result is cached. + /// + /// # Returns + /// + /// 20-byte message ID (Bytes20) + pub fn id(&self) -> Bytes20 { + if let Some(id) = &self.cached_id { + return id.clone(); + } + + // Compute ID + let id = Self::compute_id(&self.topic, &self.raw_data, self.snappy_decompress.as_ref()); + + // Note: We can't cache here because self is immutable + // In practice, callers should use a mutable reference or compute once + id + } + + /// Compute a 20-byte message ID from raw data. + /// + /// Implements the Ethereum consensus message ID function: + /// + /// ```text + /// SHA256(domain + uint64_le(len(topic)) + topic + data)[:20] + /// ``` + /// + /// ## Domain Selection + /// + /// - If `snappy_decompress` is provided and succeeds: + /// domain = 0x01, use decompressed data + /// - Otherwise: + /// domain = 0x00, use raw data + /// + /// # Arguments + /// + /// * `topic` - Topic string as bytes + /// * `data` - Message payload (potentially compressed) + /// * `snappy_decompress` - Optional decompression function + /// + /// # Returns + /// + /// 20-byte message ID + pub fn compute_id( + topic: &[u8], + data: &[u8], + snappy_decompress: Option<&Arc>, + ) -> Bytes20 { + let (domain, data_for_hash) = if let Some(decompressor) = snappy_decompress { + match decompressor.decompress(data) { + Ok(decompressed) => (MESSAGE_DOMAIN_VALID_SNAPPY, decompressed), + Err(_) => (MESSAGE_DOMAIN_INVALID_SNAPPY, data.to_vec()), + } + } else { + (MESSAGE_DOMAIN_INVALID_SNAPPY, data.to_vec()) + }; + + let mut preimage = Vec::new(); + preimage.extend_from_slice(domain); + preimage.extend_from_slice(&(topic.len() as u64).to_le_bytes()); + preimage.extend_from_slice(topic); + preimage.extend_from_slice(&data_for_hash); + + let hash = Sha256::digest(&preimage); + Bytes20::from(&hash[..20]) + } + + /// Get the topic as a UTF-8 string. + /// + /// # Returns + /// + /// Topic decoded from bytes to string + pub fn topic_str(&self) -> String { + String::from_utf8_lossy(&self.topic).to_string() + } +} + +impl PartialEq for RawGossipsubMessage { + fn eq(&self, other: &Self) -> bool { + self.id() == other.id() + } +} + +impl Eq for RawGossipsubMessage {} + +impl std::fmt::Debug for RawGossipsubMessage { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("RawGossipsubMessage") + .field("topic", &self.topic_str()) + .field("raw_data_len", &self.raw_data.len()) + .field("cached_id", &self.cached_id) + .finish() + } +} + +impl std::hash::Hash for RawGossipsubMessage { + fn hash(&self, state: &mut H) { + self.id().hash(state); + } +} + use crate::gossipsub::topic::GossipsubKind; use crate::gossipsub::topic::GossipsubTopic; use containers::SignedAttestation; @@ -5,6 +237,7 @@ use containers::SignedBlockWithAttestation; use containers::ssz::SszReadDefault; use libp2p::gossipsub::TopicHash; +/// Decoded gossipsub message by type. pub enum GossipsubMessage { Block(SignedBlockWithAttestation), Attestation(SignedAttestation), diff --git a/lean_client/networking/src/gossipsub/mod.rs b/lean_client/networking/src/gossipsub/mod.rs index cf88fd0..f019a7c 100644 --- a/lean_client/networking/src/gossipsub/mod.rs +++ b/lean_client/networking/src/gossipsub/mod.rs @@ -1,6 +1,10 @@ pub mod config; +pub mod control; +pub mod mcache; +pub mod mesh; pub mod message; pub mod topic; +pub mod types; #[cfg(test)] mod tests; @@ -9,3 +13,15 @@ use crate::compressor::Compressor; use libp2p::gossipsub::{AllowAllSubscriptionFilter, Behaviour}; pub type GossipsubBehaviour = Behaviour; + +// Re-export commonly used types +pub use config::{GossipsubConfig, GossipsubParameters}; +pub use control::{ControlMessage, Graft, IDontWant, IHave, IWant, Prune}; +pub use mcache::{CacheEntry, MessageCache, SeenCache}; +pub use mesh::{FanoutEntry, MeshState, TopicMesh}; +pub use message::{GossipsubMessage, RawGossipsubMessage, SnappyDecompressor}; +pub use topic::{ + format_topic_string, get_topics, parse_topic_string, GossipsubKind, GossipsubTopic, + ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, +}; +pub use types::{MessageId, PeerId, Timestamp, TopicId}; diff --git a/lean_client/networking/src/gossipsub/tests/control.rs b/lean_client/networking/src/gossipsub/tests/control.rs new file mode 100644 index 0000000..42fb115 --- /dev/null +++ b/lean_client/networking/src/gossipsub/tests/control.rs @@ -0,0 +1,89 @@ +use crate::gossipsub::control::{ControlMessage, Graft, IDontWant, IHave, IWant, Prune}; +use containers::Bytes20; + +#[test] +fn test_graft_creation() { + let graft = Graft { + topic_id: "test_topic".to_string(), + }; + assert_eq!(graft.topic_id, "test_topic"); +} + +#[test] +fn test_prune_creation() { + let prune = Prune { + topic_id: "test_topic".to_string(), + }; + assert_eq!(prune.topic_id, "test_topic"); +} + +#[test] +fn test_ihave_creation() { + let msg_ids = vec![ + Bytes20::from(&[1u8; 20][..]), + Bytes20::from(&[2u8; 20][..]), + ]; + let ihave = IHave { + topic_id: "test_topic".to_string(), + message_ids: msg_ids.clone(), + }; + + assert_eq!(ihave.topic_id, "test_topic"); + assert_eq!(ihave.message_ids.len(), 2); +} + +#[test] +fn test_iwant_creation() { + let msg_ids = vec![Bytes20::from(&[1u8; 20][..])]; + let iwant = IWant { + message_ids: msg_ids, + }; + + assert_eq!(iwant.message_ids.len(), 1); +} + +#[test] +fn test_idontwant_creation() { + let msg_ids = vec![Bytes20::from(&[1u8; 20][..])]; + let idontwant = IDontWant { + message_ids: msg_ids, + }; + + assert_eq!(idontwant.message_ids.len(), 1); +} + +#[test] +fn test_control_message_aggregation() { + let graft = Graft { + topic_id: "topic1".to_string(), + }; + let prune = Prune { + topic_id: "topic2".to_string(), + }; + + let control = ControlMessage { + grafts: vec![graft], + prunes: vec![prune], + ihaves: vec![], + iwants: vec![], + idontwants: vec![], + }; + + assert_eq!(control.grafts.len(), 1); + assert_eq!(control.prunes.len(), 1); + assert!(!control.is_empty()); +} + +#[test] +fn test_control_message_empty_check() { + let empty_control = ControlMessage::default(); + assert!(empty_control.is_empty()); + + let non_empty = ControlMessage { + grafts: vec![Graft { + topic_id: "topic".to_string(), + }], + ..Default::default() + }; + assert!(!non_empty.is_empty()); +} diff --git a/lean_client/networking/src/gossipsub/tests/mcache.rs b/lean_client/networking/src/gossipsub/tests/mcache.rs new file mode 100644 index 0000000..b573348 --- /dev/null +++ b/lean_client/networking/src/gossipsub/tests/mcache.rs @@ -0,0 +1,90 @@ +use crate::gossipsub::mcache::{MessageCache, SeenCache}; +use crate::gossipsub::message::RawGossipsubMessage; +use containers::Bytes20; + +#[test] +fn test_cache_put_and_get() { + let mut cache = MessageCache::new(6, 3); + let message = RawGossipsubMessage::new(b"topic".to_vec(), b"data".to_vec(), None); + + assert!(cache.put("topic".to_string(), message.clone())); + assert!(!cache.put("topic".to_string(), message.clone())); // Duplicate + + let retrieved = cache.get(&message.id()); + assert!(retrieved.is_some()); + assert_eq!(retrieved.unwrap().id(), message.id()); +} + +#[test] +fn test_cache_has() { + let mut cache = MessageCache::new(6, 3); + let message = RawGossipsubMessage::new(b"topic".to_vec(), b"data".to_vec(), None); + + assert!(!cache.has(&message.id())); + cache.put("topic".to_string(), message.clone()); + assert!(cache.has(&message.id())); +} + +#[test] +fn test_cache_shift() { + let mut cache = MessageCache::new(3, 2); + + let mut messages = Vec::new(); + for i in 0..5 { + let msg = RawGossipsubMessage::new( + b"topic".to_vec(), + format!("data{}", i).into_bytes(), + None, + ); + cache.put("topic".to_string(), msg.clone()); + messages.push(msg); + cache.shift(); + } + + // Old messages should be evicted + assert!(!cache.has(&messages[0].id())); + assert!(!cache.has(&messages[1].id())); +} + +#[test] +fn test_get_gossip_ids() { + let mut cache = MessageCache::new(6, 3); + + let msg1 = RawGossipsubMessage::new(b"topic1".to_vec(), b"data1".to_vec(), None); + let msg2 = RawGossipsubMessage::new(b"topic2".to_vec(), b"data2".to_vec(), None); + let msg3 = RawGossipsubMessage::new(b"topic1".to_vec(), b"data3".to_vec(), None); + + cache.put("topic1".to_string(), msg1.clone()); + cache.put("topic2".to_string(), msg2.clone()); + cache.put("topic1".to_string(), msg3.clone()); + + let gossip_ids = cache.get_gossip_ids("topic1"); + + assert!(gossip_ids.contains(&msg1.id())); + assert!(!gossip_ids.contains(&msg2.id())); + assert!(gossip_ids.contains(&msg3.id())); +} + +#[test] +fn test_seen_cache_add_and_check() { + let mut cache = SeenCache::new(60); + let msg_id = Bytes20::new([1u8; 20]); + + assert!(!cache.has(&msg_id)); + assert!(cache.add(msg_id.clone(), 1000.0)); + assert!(cache.has(&msg_id)); + assert!(!cache.add(msg_id.clone(), 1001.0)); // Duplicate +} + +#[test] +fn test_seen_cache_cleanup() { + let mut cache = SeenCache::new(10); + let msg_id = Bytes20::new([1u8; 20]); + + cache.add(msg_id.clone(), 1000.0); + assert!(cache.has(&msg_id)); + + let removed = cache.cleanup(1015.0); + assert_eq!(removed, 1); + assert!(!cache.has(&msg_id)); +} diff --git a/lean_client/networking/src/gossipsub/tests/mesh.rs b/lean_client/networking/src/gossipsub/tests/mesh.rs new file mode 100644 index 0000000..3dce268 --- /dev/null +++ b/lean_client/networking/src/gossipsub/tests/mesh.rs @@ -0,0 +1,100 @@ +use crate::gossipsub::config::GossipsubParameters; +use crate::gossipsub::mesh::{FanoutEntry, MeshState, TopicMesh}; +use std::collections::HashSet; + +#[test] +fn test_mesh_state_initialization() { + let params = GossipsubParameters { + d: 8, + d_low: 6, + d_high: 12, + d_lazy: 6, + ..Default::default() + }; + let mesh = MeshState::new(params); + + assert_eq!(mesh.d(), 8); + assert_eq!(mesh.d_low(), 6); + assert_eq!(mesh.d_high(), 12); + assert_eq!(mesh.d_lazy(), 6); +} + +#[test] +fn test_subscribe_and_unsubscribe() { + let mesh = &mut MeshState::new(GossipsubParameters::default()); + + mesh.subscribe("topic1".to_string()); + assert!(mesh.is_subscribed(&"topic1".to_string())); + assert!(!mesh.is_subscribed(&"topic2".to_string())); + + let peers = mesh.unsubscribe(&"topic1".to_string()); + assert!(!mesh.is_subscribed(&"topic1".to_string())); + assert!(peers.is_empty()); +} + +#[test] +fn test_add_remove_mesh_peers() { + let mesh = &mut MeshState::new(GossipsubParameters::default()); + mesh.subscribe("topic1".to_string()); + + assert!(mesh.add_to_mesh("topic1", "peer1".to_string())); + assert!(mesh.add_to_mesh("topic1", "peer2".to_string())); + assert!(!mesh.add_to_mesh("topic1", "peer1".to_string())); // Already in mesh + + let peers = mesh.get_mesh_peers("topic1"); + assert!(peers.contains("peer1")); + assert!(peers.contains("peer2")); + + assert!(mesh.remove_from_mesh("topic1", &"peer1".to_string())); + assert!(!mesh.remove_from_mesh("topic1", &"peer1".to_string())); // Already removed + + let peers = mesh.get_mesh_peers("topic1"); + assert!(!peers.contains("peer1")); + assert!(peers.contains("peer2")); +} + +#[test] +fn test_gossip_peer_selection() { + let params = GossipsubParameters { + d_lazy: 3, + ..Default::default() + }; + let mesh = &mut MeshState::new(params); + mesh.subscribe("topic1".to_string()); + mesh.add_to_mesh("topic1", "peer1".to_string()); + mesh.add_to_mesh("topic1", "peer2".to_string()); + + let all_peers: HashSet<_> = vec!["peer1", "peer2", "peer3", "peer4", "peer5", "peer6"] + .into_iter() + .map(String::from) + .collect(); + + let gossip_peers = mesh.select_peers_for_gossip("topic1", &all_peers); + + let mesh_peers = mesh.get_mesh_peers("topic1"); + for peer in &gossip_peers { + assert!(!mesh_peers.contains(peer)); + } +} + +#[test] +fn test_topic_mesh_add_remove() { + let topic_mesh = &mut TopicMesh::new(); + + assert!(topic_mesh.add_peer("peer1".to_string())); + assert!(!topic_mesh.add_peer("peer1".to_string())); // Already exists + assert!(topic_mesh.peers.contains("peer1")); + + assert!(topic_mesh.remove_peer(&"peer1".to_string())); + assert!(!topic_mesh.remove_peer(&"peer1".to_string())); // Already removed + assert!(!topic_mesh.peers.contains("peer1")); +} + +#[test] +fn test_fanout_entry_staleness() { + let mut entry = FanoutEntry::new(); + entry.last_published = 1000.0; + + assert!(!entry.is_stale(1050.0, 60.0)); + assert!(entry.is_stale(1070.0, 60.0)); +} diff --git a/lean_client/networking/src/gossipsub/tests/mod.rs b/lean_client/networking/src/gossipsub/tests/mod.rs index 15f330a..046af03 100644 --- a/lean_client/networking/src/gossipsub/tests/mod.rs +++ b/lean_client/networking/src/gossipsub/tests/mod.rs @@ -1,4 +1,8 @@ mod config; +mod control; +mod mcache; +mod mesh; mod message; mod message_id; +mod raw_message; mod topic; diff --git a/lean_client/networking/src/gossipsub/topic.rs b/lean_client/networking/src/gossipsub/topic.rs index 09fcd33..ce9af7a 100644 --- a/lean_client/networking/src/gossipsub/topic.rs +++ b/lean_client/networking/src/gossipsub/topic.rs @@ -1,44 +1,211 @@ +/// Gossipsub Topics +/// +/// Topic definitions for the Lean Ethereum gossipsub network. +/// +/// ## Overview +/// +/// Gossipsub organizes messages by topic. Each topic identifies a specific +/// message type (blocks, attestations, etc.) within a specific fork. +/// +/// ## Topic Format +/// +/// Topics follow a structured format: +/// +/// ```text +/// /{prefix}/{fork_digest}/{topic_name}/{encoding} +/// +/// Example: /leanconsensus/0x12345678/block/ssz_snappy +/// ``` +/// +/// **Components:** +/// +/// | Component | Description | +/// |----------------|----------------------------------------------------------| +/// | prefix | Network identifier (`leanconsensus`) | +/// | fork_digest | 4-byte fork identifier as hex (`0x12345678`) | +/// | topic_name | Message type (`block`, `attestation`) | +/// | encoding | Serialization format (always `ssz_snappy`) | +/// +/// ## Fork Digest +/// +/// The fork digest ensures peers on different forks don't exchange +/// incompatible messages. It's derived from the fork version and +/// genesis validators root. +/// +/// ## Topic Types +/// +/// | Topic | Content | +/// |----------------|----------------------------------------------------------| +/// | block | Signed beacon blocks | +/// | attestation | Signed attestations | +/// +/// ## References +/// +/// - Ethereum P2P: + use libp2p::gossipsub::{IdentTopic, TopicHash}; +/// Network prefix for Lean consensus gossip topics. +/// +/// Identifies this network in topic strings. Different networks +/// (mainnet, testnets) may use different prefixes. pub const TOPIC_PREFIX: &str = "leanconsensus"; + +/// Encoding suffix for SSZ with Snappy compression. +/// +/// All Ethereum consensus gossip messages use SSZ serialization +/// with Snappy compression. pub const SSZ_SNAPPY_ENCODING_POSTFIX: &str = "ssz_snappy"; +/// Topic name for block messages. +/// +/// Used in the topic string to identify signed beacon block messages. pub const BLOCK_TOPIC: &str = "block"; -pub const ATTESTATION_TOPIC: &str = "attestation"; -#[derive(Debug, Clone, Hash, PartialEq, Eq)] -pub struct GossipsubTopic { - pub fork: String, - pub kind: GossipsubKind, -} +/// Topic name for attestation messages. +/// +/// Used in the topic string to identify signed attestation messages. +pub const ATTESTATION_TOPIC: &str = "attestation"; +/// Gossip topic types. +/// +/// Enumerates the different message types that can be gossiped. +/// +/// Each variant corresponds to a specific `topic_name` in the +/// topic string format. #[derive(Debug, Hash, Clone, Copy, PartialEq, Eq)] pub enum GossipsubKind { + /// Signed beacon block messages. Block, + + /// Signed attestation messages. Attestation, } -pub fn get_topics(fork: String) -> Vec { - vec![ - GossipsubTopic { - fork: fork.clone(), - kind: GossipsubKind::Block, - }, - GossipsubTopic { - fork: fork.clone(), - kind: GossipsubKind::Attestation, - }, - ] +impl std::fmt::Display for GossipsubKind { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + GossipsubKind::Block => write!(f, "{BLOCK_TOPIC}"), + GossipsubKind::Attestation => write!(f, "{ATTESTATION_TOPIC}"), + } + } +} + +impl GossipsubKind { + /// Get the topic name string for this kind. + pub fn as_str(&self) -> &'static str { + match self { + GossipsubKind::Block => BLOCK_TOPIC, + GossipsubKind::Attestation => ATTESTATION_TOPIC, + } + } +} + +/// A fully-qualified gossipsub topic. +/// +/// Immutable representation of a topic that combines the message type +/// and fork digest. Can be converted to/from the string format. +#[derive(Debug, Clone, Hash, PartialEq, Eq)] +pub struct GossipsubTopic { + /// Fork digest as 0x-prefixed hex string. + /// + /// Identifies the fork this topic belongs to. + /// + /// Peers must match on fork digest to exchange messages on a topic. + pub fork: String, + + /// The topic type (block, attestation, etc.). + /// + /// Determines what kind of messages are exchanged on this topic. + pub kind: GossipsubKind, } impl GossipsubTopic { - pub fn decode(topic: &TopicHash) -> Result { - let topic_parts = Self::split_topic(topic)?; - Self::validate_parts(&topic_parts, topic)?; - let fork = Self::extract_fork(&topic_parts); - let kind = Self::extract_kind(&topic_parts)?; + /// Create a new gossipsub topic. + /// + /// # Arguments + /// + /// * `fork` - Fork digest as 0x-prefixed hex string + /// * `kind` - Topic type + pub fn new(fork: String, kind: GossipsubKind) -> Self { + Self { fork, kind } + } + + /// Create a block topic for the given fork. + /// + /// # Arguments + /// + /// * `fork_digest` - Fork digest as 0x-prefixed hex string + /// + /// # Returns + /// + /// GossipsubTopic for block messages + pub fn block(fork_digest: String) -> Self { + Self::new(fork_digest, GossipsubKind::Block) + } + + /// Create an attestation topic for the given fork. + /// + /// # Arguments + /// + /// * `fork_digest` - Fork digest as 0x-prefixed hex string + /// + /// # Returns + /// + /// GossipsubTopic for attestation messages + pub fn attestation(fork_digest: String) -> Self { + Self::new(fork_digest, GossipsubKind::Attestation) + } + + /// Parse a topic string into a GossipsubTopic. + /// + /// # Arguments + /// + /// * `topic_str` - Full topic string to parse + /// + /// # Returns + /// + /// Parsed GossipsubTopic instance + /// + /// # Errors + /// + /// Returns an error if the topic string is malformed + /// + /// # Example + /// + /// ``` + /// use lean_client_networking::gossipsub::topic::GossipsubTopic; + /// + /// let topic = GossipsubTopic::from_string("/leanconsensus/0x12345678/block/ssz_snappy")?; + /// # Ok::<(), String>(()) + /// ``` + pub fn from_string(topic_str: &str) -> Result { + let (prefix, fork_digest, topic_name, encoding) = parse_topic_string(topic_str)?; + + if prefix != TOPIC_PREFIX { + return Err(format!("Invalid prefix: expected '{TOPIC_PREFIX}', got '{prefix}'")); + } + + if encoding != SSZ_SNAPPY_ENCODING_POSTFIX { + return Err(format!( + "Invalid encoding: expected '{SSZ_SNAPPY_ENCODING_POSTFIX}', got '{encoding}'" + )); + } + + let kind = match topic_name { + BLOCK_TOPIC => GossipsubKind::Block, + ATTESTATION_TOPIC => GossipsubKind::Attestation, + other => return Err(format!("Unknown topic: '{other}'")), + }; + + Ok(Self::new(fork_digest.to_string(), kind)) + } - Ok(GossipsubTopic { fork, kind }) + /// Decode a TopicHash into a GossipsubTopic. + /// + /// This is the existing method for compatibility with libp2p. + pub fn decode(topic: &TopicHash) -> Result { + Self::from_string(topic.as_str()) } fn split_topic(topic: &TopicHash) -> Result, String> { @@ -69,6 +236,11 @@ impl GossipsubTopic { other => Err(format!("Invalid topic kind: {other:?}")), } } + + /// Convert to topic string as bytes. + pub fn as_bytes(&self) -> Vec { + self.to_string().into_bytes() + } } impl std::fmt::Display for GossipsubTopic { @@ -95,22 +267,163 @@ impl From for String { impl From for TopicHash { fn from(val: GossipsubTopic) -> Self { - let kind_str = match &val.kind { - GossipsubKind::Block => BLOCK_TOPIC, - GossipsubKind::Attestation => ATTESTATION_TOPIC, - }; - TopicHash::from_raw(format!( - "/{}/{}/{}/{}", - TOPIC_PREFIX, val.fork, kind_str, SSZ_SNAPPY_ENCODING_POSTFIX - )) + TopicHash::from_raw(val.to_string()) } } -impl std::fmt::Display for GossipsubKind { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - GossipsubKind::Block => write!(f, "{BLOCK_TOPIC}"), - GossipsubKind::Attestation => write!(f, "{ATTESTATION_TOPIC}"), - } +/// Get all topics for a given fork. +/// +/// # Arguments +/// +/// * `fork` - Fork digest as 0x-prefixed hex string +/// +/// # Returns +/// +/// Vector of all gossipsub topics for the fork +pub fn get_topics(fork: String) -> Vec { + vec![ + GossipsubTopic::block(fork.clone()), + GossipsubTopic::attestation(fork), + ] +} + +/// Format a complete gossip topic string. +/// +/// Low-level function for constructing topic strings. For most cases, +/// use `GossipsubTopic` instead. +/// +/// # Arguments +/// +/// * `topic_name` - Message type (e.g., "block", "attestation") +/// * `fork_digest` - Fork digest as 0x-prefixed hex string +/// * `prefix` - Network prefix (defaults to TOPIC_PREFIX) +/// * `encoding` - Encoding suffix (defaults to SSZ_SNAPPY_ENCODING_POSTFIX) +/// +/// # Returns +/// +/// Formatted topic string +/// +/// # Example +/// +/// ``` +/// use lean_client_networking::gossipsub::topic::format_topic_string; +/// +/// let topic_str = format_topic_string("block", "0x12345678", None, None); +/// assert_eq!(topic_str, "/leanconsensus/0x12345678/block/ssz_snappy"); +/// ``` +pub fn format_topic_string( + topic_name: &str, + fork_digest: &str, + prefix: Option<&str>, + encoding: Option<&str>, +) -> String { + format!( + "/{}/{}/{}/{}", + prefix.unwrap_or(TOPIC_PREFIX), + fork_digest, + topic_name, + encoding.unwrap_or(SSZ_SNAPPY_ENCODING_POSTFIX) + ) +} + +/// Parse a topic string into its components. +/// +/// Low-level function for deconstructing topic strings. For most cases, +/// use `GossipsubTopic::from_string()` instead. +/// +/// # Arguments +/// +/// * `topic_str` - Topic string to parse +/// +/// # Returns +/// +/// Tuple of (prefix, fork_digest, topic_name, encoding) +/// +/// # Errors +/// +/// Returns an error if the topic string is malformed +/// +/// # Example +/// +/// ``` +/// use lean_client_networking::gossipsub::topic::parse_topic_string; +/// +/// let (prefix, fork, name, enc) = parse_topic_string("/leanconsensus/0x12345678/block/ssz_snappy")?; +/// assert_eq!(prefix, "leanconsensus"); +/// assert_eq!(fork, "0x12345678"); +/// assert_eq!(name, "block"); +/// assert_eq!(enc, "ssz_snappy"); +/// # Ok::<(), String>(()) +/// ``` +pub fn parse_topic_string(topic_str: &str) -> Result<(&str, &str, &str, &str), String> { + let parts: Vec<&str> = topic_str.trim_start_matches('/').split('/').collect(); + + if parts.len() != 4 { + return Err(format!("Invalid topic format: expected 4 parts, got {}", parts.len())); + } + + Ok((parts[0], parts[1], parts[2], parts[3])) +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_gossip_topic_creation() { + let topic = GossipsubTopic::new("0x12345678".to_string(), GossipsubKind::Block); + + assert_eq!(topic.kind, GossipsubKind::Block); + assert_eq!(topic.fork, "0x12345678"); + assert_eq!(topic.to_string(), "/leanconsensus/0x12345678/block/ssz_snappy"); + } + + #[test] + fn test_gossip_topic_from_string() { + let topic = GossipsubTopic::from_string("/leanconsensus/0x12345678/block/ssz_snappy") + .expect("Failed to parse topic"); + + assert_eq!(topic.kind, GossipsubKind::Block); + assert_eq!(topic.fork, "0x12345678"); + } + + #[test] + fn test_gossip_topic_factory_methods() { + let block_topic = GossipsubTopic::block("0xabcd1234".to_string()); + assert_eq!(block_topic.kind, GossipsubKind::Block); + + let attestation_topic = GossipsubTopic::attestation("0xabcd1234".to_string()); + assert_eq!(attestation_topic.kind, GossipsubKind::Attestation); + } + + #[test] + fn test_format_topic_string() { + let result = format_topic_string("block", "0x12345678", None, None); + assert_eq!(result, "/leanconsensus/0x12345678/block/ssz_snappy"); + } + + #[test] + fn test_parse_topic_string() { + let (prefix, fork_digest, topic_name, encoding) = + parse_topic_string("/leanconsensus/0x12345678/block/ssz_snappy") + .expect("Failed to parse"); + + assert_eq!(prefix, "leanconsensus"); + assert_eq!(fork_digest, "0x12345678"); + assert_eq!(topic_name, "block"); + assert_eq!(encoding, "ssz_snappy"); + } + + #[test] + fn test_invalid_topic_string() { + assert!(GossipsubTopic::from_string("/invalid/topic").is_err()); + assert!(GossipsubTopic::from_string("/wrongprefix/0x123/block/ssz_snappy").is_err()); + } + + #[test] + fn test_topic_kind_enum() { + assert_eq!(GossipsubKind::Block.as_str(), "block"); + assert_eq!(GossipsubKind::Attestation.as_str(), "attestation"); + assert_eq!(GossipsubKind::Block.to_string(), "block"); } } diff --git a/lean_client/networking/src/gossipsub/types.rs b/lean_client/networking/src/gossipsub/types.rs new file mode 100644 index 0000000..8e4ca6d --- /dev/null +++ b/lean_client/networking/src/gossipsub/types.rs @@ -0,0 +1,33 @@ +/// Gossipsub Type Definitions +/// +/// Type aliases for common gossipsub types. + +use containers::Bytes20; + +/// 20-byte message identifier. +/// +/// Computed from message contents using SHA256: +/// `SHA256(domain + uint64_le(len(topic)) + topic + data)[:20]` +/// +/// The domain byte distinguishes valid/invalid snappy compression. +pub type MessageId = Bytes20; + +/// Libp2p peer identifier. +/// +/// Derived from the peer's public key as a base58-encoded multihash. +/// Uniquely identifies peers in the P2P network. +pub type PeerId = String; + +/// Topic string identifier. +/// +/// Follows the Ethereum consensus format: +/// `/{prefix}/{fork_digest}/{topic_name}/{encoding}` +pub type TopicId = String; + +/// Unix timestamp in seconds since epoch. +/// +/// Used for: +/// - Message arrival times +/// - Peer activity tracking +/// - Seen cache expiry +pub type Timestamp = f64; diff --git a/lean_client/networking/src/types.rs b/lean_client/networking/src/types.rs index bbe7cba..0661c4b 100644 --- a/lean_client/networking/src/types.rs +++ b/lean_client/networking/src/types.rs @@ -8,8 +8,8 @@ use tokio::sync::mpsc; use crate::serde_utils::quoted_u64; -pub const MESSAGE_DOMAIN_VALID_SNAPPY: &[u8; 4] = &[0x01, 0x00, 0x00, 0x00]; -pub const MESSAGE_DOMAIN_INVALID_SNAPPY: &[u8; 4] = &[0x00, 0x00, 0x00, 0x00]; +pub const MESSAGE_DOMAIN_VALID_SNAPPY: &[u8; 1] = &[0x01]; +pub const MESSAGE_DOMAIN_INVALID_SNAPPY: &[u8; 1] = &[0x00]; #[derive(Debug, Serialize, Clone, Copy, PartialEq, Eq, Hash)] #[serde(rename_all = "lowercase")] From ae512b9a62c6065373e5f3e3e66049e50943714b Mon Sep 17 00:00:00 2001 From: Dariusspr <108625236+Dariusspr@users.noreply.github.com> Date: Sun, 18 Jan 2026 23:34:59 +0200 Subject: [PATCH 10/27] Remove all-features flag. Cant build both dev-nets together --- lean_client/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/lean_client/Makefile b/lean_client/Makefile index 582e56b..6539dea 100644 --- a/lean_client/Makefile +++ b/lean_client/Makefile @@ -10,7 +10,7 @@ check-format: .PHONY: test test: - cargo test --workspace --all-features --no-fail-fast + cargo test --workspace --no-fail-fast .PHONY: build build: From 67497f6d13e3a58cf4c61c2e76cb83042c21f8a7 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:47:57 +0200 Subject: [PATCH 11/27] feat: add discv5 dependencies --- lean_client/Cargo.lock | 285 +++++++++++------------------- lean_client/networking/Cargo.toml | 9 +- 2 files changed, 107 insertions(+), 187 deletions(-) diff --git a/lean_client/Cargo.lock b/lean_client/Cargo.lock index 910d9c1..61be8fc 100644 --- a/lean_client/Cargo.lock +++ b/lean_client/Cargo.lock @@ -79,7 +79,7 @@ dependencies = [ "hashbrown 0.16.0", "indexmap 2.11.4", "itoa", - "k256 0.13.4", + "k256", "keccak-asm", "paste", "proptest", @@ -500,7 +500,7 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" dependencies = [ - "base64 0.22.1", + "base64", "http", "log", "url", @@ -529,12 +529,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base16ct" version = "0.2.0" @@ -551,12 +545,6 @@ dependencies = [ "match-lookup", ] -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - [[package]] name = "base64" version = "0.22.1" @@ -962,18 +950,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - [[package]] name = "crypto-bigint" version = "0.5.5" @@ -1109,13 +1085,14 @@ dependencies = [ ] [[package]] -name = "der" -version = "0.6.1" +name = "delay_map" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +checksum = "88e365f083a5cb5972d50ce8b1b2c9f125dc5ec0f50c0248cfb568ae59efcf0b" dependencies = [ - "const-oid", - "zeroize", + "futures", + "tokio", + "tokio-util", ] [[package]] @@ -1212,6 +1189,37 @@ dependencies = [ "subtle", ] +[[package]] +name = "discv5" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f170f4f6ed0e1df52bf43b403899f0081917ecf1500bfe312505cc3b515a8899" +dependencies = [ + "aes", + "aes-gcm", + "alloy-rlp", + "arrayvec", + "ctr", + "delay_map", + "enr", + "fnv", + "futures", + "hashlink", + "hex", + "hkdf", + "lazy_static", + "lru", + "more-asserts", + "parking_lot", + "rand 0.8.5", + "smallvec", + "socket2 0.5.10", + "tokio", + "tracing", + "uint 0.10.0", + "zeroize", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -1241,30 +1249,18 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc5d6d6a8504f8caedd7de14576464383900cd3840b7033a7a3dce5ac00121ca" -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] - [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.10", + "der", "digest 0.10.7", - "elliptic-curve 0.13.8", - "rfc6979 0.4.0", - "signature 2.2.0", - "spki 0.7.3", + "elliptic-curve", + "rfc6979", + "signature", + "spki", ] [[package]] @@ -1273,8 +1269,8 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8 0.10.2", - "signature 2.2.0", + "pkcs8", + "signature", ] [[package]] @@ -1310,59 +1306,39 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff 0.12.1", - "generic-array", - "group 0.12.1", - "pkcs8 0.9.0", - "rand_core 0.6.4", - "sec1 0.3.0", - "subtle", - "zeroize", -] - [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.5", + "base16ct", + "crypto-bigint", "digest 0.10.7", - "ff 0.13.1", + "ff", "generic-array", - "group 0.13.0", - "pkcs8 0.10.2", + "group", + "pkcs8", "rand_core 0.6.4", - "sec1 0.7.3", + "sec1", "subtle", "zeroize", ] [[package]] name = "enr" -version = "0.7.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "492a7e5fc2504d5fdce8e124d3e263b244a68b283cac67a69eda0cd43e0aebad" +checksum = "851bd664a3d3a3c175cff92b2f0df02df3c541b4895d0ae307611827aae46152" dependencies = [ - "base64 0.13.1", - "bs58 0.4.0", + "alloy-rlp", + "base64", "bytes", + "ed25519-dalek", "hex", - "k256 0.11.6", + "k256", "log", "rand 0.8.5", - "rlp", "serde", "sha3", "zeroize", @@ -1444,7 +1420,7 @@ dependencies = [ "impl-rlp", "impl-serde", "primitive-types", - "uint", + "uint 0.9.5", ] [[package]] @@ -1524,16 +1500,6 @@ dependencies = [ "bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "ff" version = "0.13.1" @@ -1797,24 +1763,13 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.1", + "ff", "rand_core 0.6.4", "subtle", ] @@ -2407,18 +2362,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if", - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "k256" version = "0.13.4" @@ -2426,11 +2369,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", + "ecdsa", + "elliptic-curve", "once_cell", "sha2 0.10.9 (registry+https://github.com/rust-lang/crates.io-index)", - "signature 2.2.0", + "signature", ] [[package]] @@ -2639,7 +2582,7 @@ checksum = "c7f58e37d8d6848e5c4c9e3c35c6f61133235bff2960c9c00a663b0849301221" dependencies = [ "async-channel", "asynchronous-codec 0.7.0", - "base64 0.22.1", + "base64", "byteorder", "bytes", "either", @@ -2710,7 +2653,7 @@ dependencies = [ "bs58 0.5.1", "ed25519-dalek", "hkdf", - "k256 0.13.4", + "k256", "multihash 0.19.3", "quick-protobuf", "rand 0.8.5", @@ -3037,6 +2980,12 @@ dependencies = [ "uuid", ] +[[package]] +name = "more-asserts" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" + [[package]] name = "multiaddr" version = "0.17.1" @@ -3222,12 +3171,17 @@ dependencies = [ "anyhow", "async-trait", "containers", + "discv5", "enr", "env-config", "futures", + "hex", + "k256", "libp2p", "libp2p-identity 0.2.12", "libp2p-mplex", + "num-bigint", + "num-traits", "parking_lot", "rand 0.8.5", "serde", @@ -3559,7 +3513,7 @@ version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ - "base64 0.22.1", + "base64", "serde_core", ] @@ -3611,24 +3565,14 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.10", - "spki 0.7.3", + "der", + "spki", ] [[package]] @@ -3718,7 +3662,7 @@ dependencies = [ "impl-codec", "impl-rlp", "impl-serde", - "uint", + "uint 0.9.5", ] [[package]] @@ -4100,17 +4044,6 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac", - "zeroize", -] - [[package]] name = "rfc6979" version = "0.4.0" @@ -4389,30 +4322,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", - "generic-array", - "pkcs8 0.9.0", - "subtle", - "zeroize", -] - [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct 0.2.0", - "der 0.7.10", + "base16ct", + "der", "generic-array", - "pkcs8 0.10.2", + "pkcs8", "subtle", "zeroize", ] @@ -4507,7 +4426,7 @@ version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" dependencies = [ - "base64 0.22.1", + "base64", "chrono", "hex", "indexmap 1.9.3", @@ -4624,16 +4543,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - [[package]] name = "signature" version = "2.2.0" @@ -4708,16 +4617,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] - [[package]] name = "spki" version = "0.7.3" @@ -4725,7 +4624,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.10", + "der", ] [[package]] @@ -5081,6 +4980,7 @@ dependencies = [ "futures-core", "futures-sink", "pin-project-lite", + "slab", "tokio", ] @@ -5135,6 +5035,7 @@ version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -5245,6 +5146,18 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" diff --git a/lean_client/networking/Cargo.toml b/lean_client/networking/Cargo.toml index 8f47702..6b116c9 100644 --- a/lean_client/networking/Cargo.toml +++ b/lean_client/networking/Cargo.toml @@ -17,7 +17,9 @@ snap = {workspace = true} sha2 = { workspace = true } anyhow = { workspace = true } async-trait = "0.1" -enr = "0.7" +discv5 = "0.10" +enr = { version = "0.13", features = ["k256"] } +k256 = "0.13" futures = "0.3" libp2p-identity = { version = "0.2", features = ["secp256k1"] } libp2p-mplex = "0.39" @@ -28,3 +30,8 @@ tracing = "0.1" yamux = "0.12" ssz = { workspace = true } serde = { workspace = true } + +[dev-dependencies] +hex = "0.4" +num-bigint = "0.4" +num-traits = "0.2" From 24bbed93e346e034311b9ddbe75e7a5a26a6f232 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:09 +0200 Subject: [PATCH 12/27] feat: add discovery config --- .../networking/src/discovery/config.rs | 40 +++++++++++++++++++ 1 file changed, 40 insertions(+) create mode 100644 lean_client/networking/src/discovery/config.rs diff --git a/lean_client/networking/src/discovery/config.rs b/lean_client/networking/src/discovery/config.rs new file mode 100644 index 0000000..b613cc7 --- /dev/null +++ b/lean_client/networking/src/discovery/config.rs @@ -0,0 +1,40 @@ +use std::net::IpAddr; + +use discv5::enr::CombinedKey; +use enr::Enr; + +#[derive(Debug, Clone)] +pub struct DiscoveryConfig { + pub enabled: bool, + pub udp_port: u16, + pub libp2p_port: u16, + pub listen_address: IpAddr, + pub bootnodes: Vec>, +} + +impl DiscoveryConfig { + pub fn new(listen_address: IpAddr, udp_port: u16, libp2p_port: u16) -> Self { + Self { + enabled: true, + udp_port, + libp2p_port, + listen_address, + bootnodes: Vec::new(), + } + } + + pub fn with_bootnodes(mut self, bootnodes: Vec>) -> Self { + self.bootnodes = bootnodes; + self + } + + pub fn disabled() -> Self { + Self { + enabled: false, + udp_port: 0, + libp2p_port: 0, + listen_address: IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), + bootnodes: Vec::new(), + } + } +} From 3de4a45800455f4cc3db71780616d5bdce7e528c Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:22 +0200 Subject: [PATCH 13/27] feat: add discovery service --- lean_client/networking/src/discovery/mod.rs | 219 ++++++++++++++++++++ 1 file changed, 219 insertions(+) create mode 100644 lean_client/networking/src/discovery/mod.rs diff --git a/lean_client/networking/src/discovery/mod.rs b/lean_client/networking/src/discovery/mod.rs new file mode 100644 index 0000000..d0b67db --- /dev/null +++ b/lean_client/networking/src/discovery/mod.rs @@ -0,0 +1,219 @@ +pub mod config; + +#[cfg(test)] +mod tests; + +use std::net::IpAddr; +use std::sync::Arc; + +use anyhow::{Result, anyhow}; +use discv5::enr::{CombinedKey, NodeId}; +use discv5::{ConfigBuilder, Discv5, Event as Discv5Event, ListenConfig}; +use enr::{Builder as EnrBuilder, Enr}; +use libp2p::Multiaddr; +use libp2p::multiaddr::Protocol; +use libp2p_identity::{Keypair, PeerId}; +use tokio::sync::mpsc; +use tracing::{debug, info, warn}; + +pub use config::DiscoveryConfig; + +/// Discovery service that wraps discv5 for peer discovery. +pub struct DiscoveryService { + discv5: Arc, + local_enr: Enr, + event_receiver: mpsc::Receiver, +} + +impl DiscoveryService { + pub async fn new(config: DiscoveryConfig, keypair: &Keypair) -> Result { + let enr_key = keypair_to_enr_key(keypair)?; + + let local_enr = build_enr(&enr_key, config.listen_address, config.udp_port, config.libp2p_port)?; + + info!( + enr = %local_enr, + node_id = %local_enr.node_id(), + "Built local ENR" + ); + + let listen_config = ListenConfig::from_ip(config.listen_address, config.udp_port); + + let discv5_config = ConfigBuilder::new(listen_config).build(); + + let mut discv5 = Discv5::new(local_enr.clone(), enr_key, discv5_config) + .map_err(|e| anyhow!("Failed to create discv5: {e}"))?; + + for bootnode in &config.bootnodes { + if let Err(e) = discv5.add_enr(bootnode.clone()) { + warn!(enr = %bootnode, error = ?e, "Failed to add bootnode ENR"); + } else { + info!(enr = %bootnode, "Added bootnode ENR"); + } + } + + discv5 + .start() + .await + .map_err(|e| anyhow!("Failed to start discv5: {e}"))?; + + let event_receiver = discv5 + .event_stream() + .await + .map_err(|e| anyhow!("Failed to get discv5 event stream: {e}"))?; + + info!("Discovery service started"); + + Ok(Self { + discv5: Arc::new(discv5), + local_enr, + event_receiver, + }) + } + + pub fn local_enr(&self) -> &Enr { + &self.local_enr + } + + pub async fn recv(&mut self) -> Option> { + loop { + match self.event_receiver.recv().await { + Some(event) => { + match event { + Discv5Event::Discovered(enr) => { + info!( + node_id = %enr.node_id(), + "Discovered peer via discv5" + ); + return Some(enr); + } + Discv5Event::SocketUpdated(addr) => { + info!(?addr, "discv5 socket updated"); + } + Discv5Event::SessionEstablished(enr, addr) => { + debug!( + node_id = %enr.node_id(), + ?addr, + "discv5 session established" + ); + } + Discv5Event::TalkRequest(_) => { + // We don't handle TALKREQ for now + } + Discv5Event::NodeInserted { node_id, replaced } => { + debug!( + %node_id, + ?replaced, + "Node inserted into routing table" + ); + } + _ => { + // Handle any new event types added in future versions + } + } + } + None => return None, + } + } + } + + pub fn enr_to_multiaddr(enr: &Enr) -> Option { + let ip = enr.ip4().map(IpAddr::V4).or_else(|| enr.ip6().map(IpAddr::V6))?; + let libp2p_port = enr.tcp4().or_else(|| enr.tcp6())?; + + let peer_id = enr_to_peer_id(enr)?; + + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Udp(libp2p_port)); + multiaddr.push(Protocol::QuicV1); + multiaddr.push(Protocol::P2p(peer_id)); + + Some(multiaddr) + } + + pub fn find_random_peers(&self) { + let random_node_id = generate_random_node_id(); + debug!(%random_node_id, "Starting random peer discovery lookup"); + + let discv5 = Arc::clone(&self.discv5); + tokio::spawn(async move { + match discv5.find_node(random_node_id).await { + Ok(nodes) => { + info!(count = nodes.len(), "Random lookup completed"); + } + Err(e) => { + warn!(error = ?e, "Random lookup failed"); + } + } + }); + } + + pub fn connected_peers(&self) -> usize { + self.discv5.connected_peers() + } +} + +fn keypair_to_enr_key(keypair: &Keypair) -> Result { + match keypair.key_type() { + libp2p_identity::KeyType::Secp256k1 => { + let secp_keypair = keypair + .clone() + .try_into_secp256k1() + .map_err(|_| anyhow!("Failed to convert to secp256k1"))?; + + let secret_bytes = secp_keypair.secret().to_bytes(); + let secret_key = k256::ecdsa::SigningKey::from_slice(&secret_bytes) + .map_err(|e| anyhow!("Failed to create signing key: {e}"))?; + + Ok(CombinedKey::Secp256k1(secret_key)) + } + other => Err(anyhow!("Unsupported key type for discv5: {:?}", other)), + } +} + +fn build_enr(key: &CombinedKey, ip: IpAddr, udp_port: u16, libp2p_port: u16) -> Result> { + let mut builder = EnrBuilder::default(); + + // libp2p port is stored in tcp field, since Enr doesn't have a field for a quic port + match ip { + IpAddr::V4(ipv4) => { + builder.ip4(ipv4); + builder.udp4(udp_port); + builder.tcp4(libp2p_port); + } + IpAddr::V6(ipv6) => { + builder.ip6(ipv6); + builder.udp6(udp_port); + builder.tcp6(libp2p_port); + } + } + + builder + .build(key) + .map_err(|e| anyhow!("Failed to build ENR: {e}")) +} + +fn enr_to_peer_id(enr: &Enr) -> Option { + let public_key = enr.public_key(); + + match public_key { + discv5::enr::CombinedPublicKey::Secp256k1(pk) => { + let compressed = pk.to_sec1_bytes(); + let libp2p_pk = libp2p_identity::secp256k1::PublicKey::try_from_bytes(&compressed).ok()?; + let public = libp2p_identity::PublicKey::from(libp2p_pk); + Some(PeerId::from_public_key(&public)) + } + _ => None, + } +} + +pub fn parse_enr(enr_str: &str) -> Result> { + enr_str + .parse() + .map_err(|e| anyhow!("Failed to parse ENR: {e}")) +} + +fn generate_random_node_id() -> NodeId { + let random_bytes: [u8; 32] = rand::random(); + NodeId::new(&random_bytes) +} From 117f1e7e76872748989a05e3a9ac046e76d971a1 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:34 +0200 Subject: [PATCH 14/27] feat: add discovery module export --- lean_client/networking/src/lib.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/lean_client/networking/src/lib.rs b/lean_client/networking/src/lib.rs index 2a54c28..d8cd874 100644 --- a/lean_client/networking/src/lib.rs +++ b/lean_client/networking/src/lib.rs @@ -1,5 +1,6 @@ pub mod bootnodes; pub mod compressor; +pub mod discovery; pub mod gossipsub; pub mod network; pub mod req_resp; From 91fb543fb8a1ceee6ccb124efb15b12770e4a96c Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:45 +0200 Subject: [PATCH 15/27] feat: add ENR bootnode support --- lean_client/networking/src/bootnodes.rs | 86 +++++++++++++++++++++++-- 1 file changed, 82 insertions(+), 4 deletions(-) diff --git a/lean_client/networking/src/bootnodes.rs b/lean_client/networking/src/bootnodes.rs index 264ec02..427f4ae 100644 --- a/lean_client/networking/src/bootnodes.rs +++ b/lean_client/networking/src/bootnodes.rs @@ -1,6 +1,11 @@ use std::sync::Arc; +use discv5::enr::CombinedKey; +use enr::Enr; use libp2p::Multiaddr; +use tracing::warn; + +use crate::discovery::{DiscoveryService, parse_enr}; pub trait BootnodeSource: Send + Sync { fn to_multiaddrs(&self) -> Vec; @@ -24,17 +29,90 @@ impl BootnodeSource for Arc<[Multiaddr]> { } } +#[derive(Debug, Clone)] +pub enum Bootnode { + Multiaddr(Multiaddr), + Enr(Enr), +} + +impl Bootnode { + pub fn parse(s: &str) -> Option { + if s.starts_with("enr:") { + match parse_enr(s) { + Ok(enr) => Some(Bootnode::Enr(enr)), + Err(e) => { + warn!(bootnode = s, error = ?e, "Failed to parse ENR bootnode"); + None + } + } + } else { + match s.parse::() { + Ok(addr) => Some(Bootnode::Multiaddr(addr)), + Err(e) => { + warn!(bootnode = s, error = ?e, "Failed to parse Multiaddr bootnode"); + None + } + } + } + } + + pub fn to_multiaddr(&self) -> Option { + match self { + Bootnode::Multiaddr(addr) => Some(addr.clone()), + Bootnode::Enr(enr) => DiscoveryService::enr_to_multiaddr(enr), + } + } + + pub fn as_enr(&self) -> Option<&Enr> { + match self { + Bootnode::Enr(enr) => Some(enr), + Bootnode::Multiaddr(_) => None, + } + } +} + #[derive(Debug, Clone, Default)] -pub struct StaticBootnodes(Vec); +pub struct StaticBootnodes { + multiaddrs: Vec, + enrs: Vec>, +} impl StaticBootnodes { - pub fn new>>(addrs: T) -> Self { - StaticBootnodes(addrs.into()) + pub fn new(bootnodes: Vec) -> Self { + let mut multiaddrs = Vec::new(); + let mut enrs = Vec::new(); + + for bootnode in bootnodes { + match bootnode { + Bootnode::Multiaddr(addr) => multiaddrs.push(addr), + Bootnode::Enr(enr) => { + // Convert ENR to multiaddr for libp2p connection + if let Some(addr) = DiscoveryService::enr_to_multiaddr(&enr) { + multiaddrs.push(addr); + } + enrs.push(enr); + } + } + } + + StaticBootnodes { multiaddrs, enrs } + } + + pub fn parse(bootnode_strs: &[String]) -> Self { + let bootnodes: Vec = bootnode_strs + .iter() + .filter_map(|s| Bootnode::parse(s)) + .collect(); + Self::new(bootnodes) + } + + pub fn enrs(&self) -> &[Enr] { + &self.enrs } } impl BootnodeSource for StaticBootnodes { fn to_multiaddrs(&self) -> Vec { - self.0.clone() + self.multiaddrs.clone() } } From c81b048bd18f12e80cc395b5a7631bfc4a2b0aa7 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:48:55 +0200 Subject: [PATCH 16/27] feat: integrate discovery into network service --- lean_client/networking/src/network/service.rs | 81 +++++++++++++++++-- 1 file changed, 75 insertions(+), 6 deletions(-) diff --git a/lean_client/networking/src/network/service.rs b/lean_client/networking/src/network/service.rs index 8ae5729..daa735c 100644 --- a/lean_client/networking/src/network/service.rs +++ b/lean_client/networking/src/network/service.rs @@ -26,6 +26,7 @@ use tracing::{debug, info, trace, warn}; use crate::{ bootnodes::{BootnodeSource, StaticBootnodes}, compressor::Compressor, + discovery::{DiscoveryConfig, DiscoveryService}, gossipsub::{self, config::GossipsubConfig, message::GossipsubMessage, topic::GossipsubKind}, network::behaviour::{LeanNetworkBehaviour, LeanNetworkBehaviourEvent}, req_resp::{self, BLOCKS_BY_ROOT_PROTOCOL_V1, LeanRequest, ReqRespMessage, STATUS_PROTOCOL_V1}, @@ -39,6 +40,8 @@ pub struct NetworkServiceConfig { pub gossipsub_config: GossipsubConfig, pub socket_address: IpAddr, pub socket_port: u16, + pub discovery_port: u16, + pub discovery_enabled: bool, bootnodes: StaticBootnodes, } @@ -47,22 +50,26 @@ impl NetworkServiceConfig { gossipsub_config: GossipsubConfig, socket_address: IpAddr, socket_port: u16, + discovery_port: u16, + discovery_enabled: bool, bootnodes: Vec, ) -> Self { - let bootnodes = StaticBootnodes::new( - bootnodes - .iter() - .filter_map(|addr_str| addr_str.parse().ok()) - .collect::>(), - ); + let bootnodes = StaticBootnodes::parse(&bootnodes); NetworkServiceConfig { gossipsub_config, socket_address, socket_port, + discovery_port, + discovery_enabled, bootnodes, } } + + /// Get ENR bootnodes for discv5. + pub fn enr_bootnodes(&self) -> Vec> { + self.bootnodes.enrs().to_vec() + } } #[derive(Debug)] @@ -83,6 +90,7 @@ where { network_config: Arc, swarm: Swarm, + discovery: Option, peer_table: Arc>>, peer_count: Arc, outbound_p2p_requests: R, @@ -147,9 +155,36 @@ where .with_swarm_config(|_| config) .build(); + let discovery = if network_config.discovery_enabled { + let discovery_config = DiscoveryConfig::new( + network_config.socket_address, + network_config.discovery_port, + network_config.socket_port, + ) + .with_bootnodes(network_config.enr_bootnodes()); + + match DiscoveryService::new(discovery_config, &local_key).await { + Ok(disc) => { + info!( + enr = %disc.local_enr(), + "Discovery service initialized" + ); + Some(disc) + } + Err(e) => { + warn!(error = ?e, "Failed to initialize discovery service, continuing without it"); + None + } + } + } else { + info!("Discovery service disabled"); + None + }; + let mut service = Self { network_config, swarm, + discovery, peer_table: Arc::new(Mutex::new(HashMap::new())), peer_count, outbound_p2p_requests, @@ -166,11 +201,24 @@ where // Periodic reconnect attempts to bootnodes let mut reconnect_interval = interval(Duration::from_secs(30)); reconnect_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + // Periodic discovery searches + let mut discovery_interval = interval(Duration::from_secs(30)); + discovery_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + loop { select! { _ = reconnect_interval.tick() => { self.connect_to_peers(self.network_config.bootnodes.to_multiaddrs()).await; } + _ = discovery_interval.tick() => { + // Trigger active peer discovery + if let Some(ref discovery) = self.discovery { + let known_peers = discovery.connected_peers(); + debug!(known_peers, "Triggering random peer discovery lookup"); + discovery.find_random_peers(); + } + } request = self.outbound_p2p_requests.recv() => { if let Some(request) = request { self.dispatch_outbound_request(request).await; @@ -181,6 +229,23 @@ where info!(?event, "Swarm event"); } } + enr = async { + match &mut self.discovery { + Some(disc) => disc.recv().await, + None => std::future::pending().await, + } + } => { + if let Some(enr) = enr { + if let Some(multiaddr) = DiscoveryService::enr_to_multiaddr(&enr) { + info!( + node_id = %enr.node_id(), + %multiaddr, + "Discovered peer via discv5, attempting connection" + ); + self.connect_to_peers(vec![multiaddr]).await; + } + } + } } } } @@ -595,6 +660,10 @@ where *self.swarm.local_peer_id() } + pub fn local_enr(&self) -> Option<&enr::Enr> { + self.discovery.as_ref().map(|d| d.local_enr()) + } + pub fn swarm_mut(&mut self) -> &mut Swarm { &mut self.swarm } From 1987e788bcf2a16a3ea382207d82452f667fcfbb Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:49:07 +0200 Subject: [PATCH 17/27] feat: add discovery CLI arguments --- lean_client/src/main.rs | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/lean_client/src/main.rs b/lean_client/src/main.rs index ad2276e..e19e186 100644 --- a/lean_client/src/main.rs +++ b/lean_client/src/main.rs @@ -120,6 +120,12 @@ struct Args { #[arg(short, long, default_value_t = 8083)] port: u16, + #[arg(short, long, default_value_t = 8084)] + discovery_port: u16, + + #[arg(long, default_value_t = false)] + disable_discovery: bool, + #[arg(short, long)] bootnodes: Vec, @@ -297,10 +303,14 @@ async fn main() { let mut gossipsub_config = GossipsubConfig::new(); gossipsub_config.set_topics(gossipsub_topics); + let discovery_enabled = !args.disable_discovery; + let network_service_config = Arc::new(NetworkServiceConfig::new( gossipsub_config, args.address, args.port, + args.discovery_port, + discovery_enabled, args.bootnodes, )); From 605f812c71fc9b807fb2e9e5178059f3b0f14723 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 19:49:18 +0200 Subject: [PATCH 18/27] test: add discovery protocol tests --- lean_client/networking/src/discovery/tests.rs | 1422 +++++++++++++++++ 1 file changed, 1422 insertions(+) create mode 100644 lean_client/networking/src/discovery/tests.rs diff --git a/lean_client/networking/src/discovery/tests.rs b/lean_client/networking/src/discovery/tests.rs new file mode 100644 index 0000000..6566e29 --- /dev/null +++ b/lean_client/networking/src/discovery/tests.rs @@ -0,0 +1,1422 @@ +//! Tests for Discovery v5 Protocol Specification +//! +//! Based on the official Discovery v5 specification and test vectors from: +//! https://github.com/ethereum/devp2p/blob/master/discv5/discv5-wire-test-vectors.md + +use std::net::{Ipv4Addr, Ipv6Addr}; + +/// Protocol constants matching Discovery v5 specification +mod constants { + /// Protocol identifier + pub const PROTOCOL_ID: &[u8] = b"discv5"; + /// Protocol version (v5.1) + pub const PROTOCOL_VERSION: u16 = 0x0001; + /// Maximum request ID length in bytes + pub const MAX_REQUEST_ID_LENGTH: usize = 8; + /// K-bucket size per Kademlia standard + pub const K_BUCKET_SIZE: usize = 16; + /// Alpha (lookup concurrency) + pub const ALPHA: usize = 3; + /// Number of buckets for 256-bit node ID space + pub const BUCKET_COUNT: usize = 256; + /// Request timeout in seconds (spec: 500ms) + pub const REQUEST_TIMEOUT_SECS: f64 = 0.5; + /// Handshake timeout in seconds + pub const HANDSHAKE_TIMEOUT_SECS: f64 = 1.0; + /// Maximum ENRs per NODES response + pub const MAX_NODES_RESPONSE: usize = 16; + /// Bond expiry in seconds (24 hours) + pub const BOND_EXPIRY_SECS: u64 = 86400; + /// Maximum packet size + pub const MAX_PACKET_SIZE: usize = 1280; + /// Minimum packet size + pub const MIN_PACKET_SIZE: usize = 63; +} + +/// Packet type flags +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PacketFlag { + Message = 0, + WhoAreYou = 1, + Handshake = 2, +} + +/// Message type codes matching wire protocol spec +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum MessageType { + Ping = 0x01, + Pong = 0x02, + FindNode = 0x03, + Nodes = 0x04, + TalkReq = 0x05, + TalkResp = 0x06, + RegTopic = 0x07, + Ticket = 0x08, + RegConfirmation = 0x09, + TopicQuery = 0x0A, +} + +/// Request ID (variable length, max 8 bytes) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RequestId(pub Vec); + +impl RequestId { + pub fn new(data: Vec) -> Self { + assert!(data.len() <= constants::MAX_REQUEST_ID_LENGTH); + Self(data) + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +/// IPv4 address (4 bytes) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IPv4(pub [u8; 4]); + +impl IPv4 { + pub fn new(bytes: [u8; 4]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 4 + } +} + +/// IPv6 address (16 bytes) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IPv6(pub [u8; 16]); + +impl IPv6 { + pub fn new(bytes: [u8; 16]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 16 + } +} + +/// ID Nonce (16 bytes / 128 bits) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IdNonce(pub [u8; 16]); + +impl IdNonce { + pub fn new(bytes: [u8; 16]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 16 + } +} + +/// Nonce (12 bytes / 96 bits) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Nonce(pub [u8; 12]); + +impl Nonce { + pub fn new(bytes: [u8; 12]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 12 + } +} + +/// Distance type (u16) +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct Distance(pub u16); + +/// Port type (u16) +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Port(pub u16); + +/// ENR sequence number (u64) +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub struct SeqNumber(pub u64); + +/// Node ID (32 bytes / 256 bits) +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct NodeId(pub [u8; 32]); + +impl NodeId { + pub fn new(bytes: [u8; 32]) -> Self { + Self(bytes) + } + + pub fn from_slice(slice: &[u8]) -> Self { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(slice); + Self(bytes) + } +} + +/// Discovery configuration +#[derive(Debug, Clone)] +pub struct DiscoveryConfig { + pub k_bucket_size: usize, + pub alpha: usize, + pub request_timeout_secs: f64, + pub handshake_timeout_secs: f64, + pub max_nodes_response: usize, + pub bond_expiry_secs: u64, +} + +impl Default for DiscoveryConfig { + fn default() -> Self { + Self { + k_bucket_size: constants::K_BUCKET_SIZE, + alpha: constants::ALPHA, + request_timeout_secs: constants::REQUEST_TIMEOUT_SECS, + handshake_timeout_secs: constants::HANDSHAKE_TIMEOUT_SECS, + max_nodes_response: constants::MAX_NODES_RESPONSE, + bond_expiry_secs: constants::BOND_EXPIRY_SECS, + } + } +} + +/// PING message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Ping { + pub request_id: RequestId, + pub enr_seq: SeqNumber, +} + +/// PONG message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Pong { + pub request_id: RequestId, + pub enr_seq: SeqNumber, + pub recipient_ip: Vec, + pub recipient_port: Port, +} + +/// FINDNODE message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct FindNode { + pub request_id: RequestId, + pub distances: Vec, +} + +/// NODES message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Nodes { + pub request_id: RequestId, + pub total: u8, + pub enrs: Vec>, +} + +/// TALKREQ message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TalkReq { + pub request_id: RequestId, + pub protocol: Vec, + pub request: Vec, +} + +/// TALKRESP message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TalkResp { + pub request_id: RequestId, + pub response: Vec, +} + +/// Static header +#[derive(Debug, Clone)] +pub struct StaticHeader { + pub protocol_id: [u8; 6], + pub version: u16, + pub flag: u8, + pub nonce: Nonce, + pub authdata_size: u16, +} + +impl StaticHeader { + pub fn new(flag: u8, nonce: Nonce, authdata_size: u16) -> Self { + Self { + protocol_id: *b"discv5", + version: constants::PROTOCOL_VERSION, + flag, + nonce, + authdata_size, + } + } +} + +/// WHOAREYOU authdata +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WhoAreYouAuthdata { + pub id_nonce: IdNonce, + pub enr_seq: SeqNumber, +} + +/// Node entry in routing table +#[derive(Debug, Clone)] +pub struct NodeEntry { + pub node_id: NodeId, + pub enr_seq: SeqNumber, + pub last_seen: f64, + pub endpoint: Option, + pub verified: bool, +} + +impl NodeEntry { + pub fn new(node_id: NodeId) -> Self { + Self { + node_id, + enr_seq: SeqNumber::default(), + last_seen: 0.0, + endpoint: None, + verified: false, + } + } + + pub fn with_enr_seq(mut self, enr_seq: SeqNumber) -> Self { + self.enr_seq = enr_seq; + self + } + + pub fn with_last_seen(mut self, last_seen: f64) -> Self { + self.last_seen = last_seen; + self + } + + pub fn with_endpoint(mut self, endpoint: String) -> Self { + self.endpoint = Some(endpoint); + self + } + + pub fn with_verified(mut self, verified: bool) -> Self { + self.verified = verified; + self + } +} + +/// K-bucket for storing nodes at a specific distance +#[derive(Debug, Clone, Default)] +pub struct KBucket { + nodes: Vec, +} + +impl KBucket { + pub fn new() -> Self { + Self { nodes: Vec::new() } + } + + pub fn is_empty(&self) -> bool { + self.nodes.is_empty() + } + + pub fn is_full(&self) -> bool { + self.nodes.len() >= constants::K_BUCKET_SIZE + } + + pub fn len(&self) -> usize { + self.nodes.len() + } + + pub fn add(&mut self, entry: NodeEntry) -> bool { + // Check if node already exists + if let Some(pos) = self.nodes.iter().position(|e| e.node_id == entry.node_id) { + // Move to tail (most recent) + self.nodes.remove(pos); + self.nodes.push(entry); + return true; + } + + // Reject if full + if self.is_full() { + return false; + } + + self.nodes.push(entry); + true + } + + pub fn remove(&mut self, node_id: &NodeId) -> bool { + if let Some(pos) = self.nodes.iter().position(|e| &e.node_id == node_id) { + self.nodes.remove(pos); + true + } else { + false + } + } + + pub fn contains(&self, node_id: &NodeId) -> bool { + self.nodes.iter().any(|e| &e.node_id == node_id) + } + + pub fn get(&self, node_id: &NodeId) -> Option<&NodeEntry> { + self.nodes.iter().find(|e| &e.node_id == node_id) + } + + pub fn head(&self) -> Option<&NodeEntry> { + self.nodes.first() + } + + pub fn tail(&self) -> Option<&NodeEntry> { + self.nodes.last() + } + + pub fn iter(&self) -> impl Iterator { + self.nodes.iter() + } +} + +/// Calculate XOR distance between two node IDs +pub fn xor_distance(a: &NodeId, b: &NodeId) -> num_bigint::BigUint { + use num_bigint::BigUint; + + let a_int = BigUint::from_bytes_be(&a.0); + let b_int = BigUint::from_bytes_be(&b.0); + a_int ^ b_int +} + +/// Calculate log2 distance between two node IDs +pub fn log2_distance(a: &NodeId, b: &NodeId) -> Distance { + let xor = xor_distance(a, b); + if xor.bits() == 0 { + Distance(0) + } else { + Distance(xor.bits() as u16) + } +} + +/// Kademlia routing table +pub struct RoutingTable { + local_id: NodeId, + pub buckets: Vec, +} + +impl RoutingTable { + pub fn new(local_id: NodeId) -> Self { + let buckets = (0..constants::BUCKET_COUNT) + .map(|_| KBucket::new()) + .collect(); + Self { local_id, buckets } + } + + pub fn node_count(&self) -> usize { + self.buckets.iter().map(|b| b.len()).sum() + } + + pub fn bucket_index(&self, node_id: &NodeId) -> usize { + let dist = log2_distance(&self.local_id, node_id); + if dist.0 == 0 { + 0 + } else { + (dist.0 - 1) as usize + } + } + + pub fn add(&mut self, entry: NodeEntry) -> bool { + // Cannot add self + if entry.node_id == self.local_id { + return false; + } + + let idx = self.bucket_index(&entry.node_id); + self.buckets[idx].add(entry) + } + + pub fn remove(&mut self, node_id: &NodeId) -> bool { + let idx = self.bucket_index(node_id); + self.buckets[idx].remove(node_id) + } + + pub fn contains(&self, node_id: &NodeId) -> bool { + let idx = self.bucket_index(node_id); + self.buckets[idx].contains(node_id) + } + + pub fn get(&self, node_id: &NodeId) -> Option<&NodeEntry> { + let idx = self.bucket_index(node_id); + self.buckets[idx].get(node_id) + } + + pub fn closest_nodes(&self, target: &NodeId, count: usize) -> Vec<&NodeEntry> { + let mut all_nodes: Vec<&NodeEntry> = self + .buckets + .iter() + .flat_map(|b| b.iter()) + .collect(); + + all_nodes.sort_by(|a, b| { + let dist_a = xor_distance(&a.node_id, target); + let dist_b = xor_distance(&b.node_id, target); + dist_a.cmp(&dist_b) + }); + + all_nodes.into_iter().take(count).collect() + } + + pub fn nodes_at_distance(&self, distance: Distance) -> Vec<&NodeEntry> { + if distance.0 == 0 || distance.0 > 256 { + return Vec::new(); + } + + let idx = (distance.0 - 1) as usize; + self.buckets[idx].iter().collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use num_bigint::BigUint; + use num_traits::One; + + // ============================================================ + // Protocol Constants Tests + // ============================================================ + + mod protocol_constants { + use super::*; + + #[test] + fn test_protocol_id() { + assert_eq!(constants::PROTOCOL_ID, b"discv5"); + assert_eq!(constants::PROTOCOL_ID.len(), 6); + } + + #[test] + fn test_protocol_version() { + assert_eq!(constants::PROTOCOL_VERSION, 0x0001); + } + + #[test] + fn test_max_request_id_length() { + assert_eq!(constants::MAX_REQUEST_ID_LENGTH, 8); + } + + #[test] + fn test_k_bucket_size() { + assert_eq!(constants::K_BUCKET_SIZE, 16); + } + + #[test] + fn test_alpha_concurrency() { + assert_eq!(constants::ALPHA, 3); + } + + #[test] + fn test_bucket_count() { + assert_eq!(constants::BUCKET_COUNT, 256); + } + + #[test] + fn test_request_timeout() { + assert!((constants::REQUEST_TIMEOUT_SECS - 0.5).abs() < f64::EPSILON); + } + + #[test] + fn test_handshake_timeout() { + assert!((constants::HANDSHAKE_TIMEOUT_SECS - 1.0).abs() < f64::EPSILON); + } + + #[test] + fn test_max_nodes_response() { + assert_eq!(constants::MAX_NODES_RESPONSE, 16); + } + + #[test] + fn test_bond_expiry() { + assert_eq!(constants::BOND_EXPIRY_SECS, 86400); + } + + #[test] + fn test_packet_size_limits() { + assert_eq!(constants::MAX_PACKET_SIZE, 1280); + assert_eq!(constants::MIN_PACKET_SIZE, 63); + } + } + + // ============================================================ + // Custom Types Tests + // ============================================================ + + mod custom_types { + use super::*; + + #[test] + fn test_request_id_limit() { + let req_id = RequestId::new(vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]); + assert_eq!(req_id.len(), 8); + } + + #[test] + fn test_request_id_variable_length() { + let req_id = RequestId::new(vec![0x01]); + assert_eq!(req_id.len(), 1); + } + + #[test] + fn test_ipv4_length() { + let ip = IPv4::new([0xc0, 0xa8, 0x01, 0x01]); // 192.168.1.1 + assert_eq!(ip.len(), 4); + } + + #[test] + fn test_ipv6_length() { + let mut bytes = [0u8; 16]; + bytes[15] = 0x01; // ::1 + let ip = IPv6::new(bytes); + assert_eq!(ip.len(), 16); + } + + #[test] + fn test_id_nonce_length() { + let nonce = IdNonce::new([0x01; 16]); + assert_eq!(nonce.len(), 16); + } + + #[test] + fn test_nonce_length() { + let nonce = Nonce::new([0x01; 12]); + assert_eq!(nonce.len(), 12); + } + + #[test] + fn test_distance_type() { + let d = Distance(256); + assert_eq!(d.0, 256u16); + } + + #[test] + fn test_port_type() { + let p = Port(30303); + assert_eq!(p.0, 30303u16); + } + + #[test] + fn test_enr_seq_type() { + let seq = SeqNumber(42); + assert_eq!(seq.0, 42u64); + } + } + + // ============================================================ + // Packet Flag Tests + // ============================================================ + + mod packet_flags { + use super::*; + + #[test] + fn test_message_flag() { + assert_eq!(PacketFlag::Message as u8, 0); + } + + #[test] + fn test_whoareyou_flag() { + assert_eq!(PacketFlag::WhoAreYou as u8, 1); + } + + #[test] + fn test_handshake_flag() { + assert_eq!(PacketFlag::Handshake as u8, 2); + } + } + + // ============================================================ + // Message Types Tests + // ============================================================ + + mod message_types { + use super::*; + + #[test] + fn test_ping_type() { + assert_eq!(MessageType::Ping as u8, 0x01); + } + + #[test] + fn test_pong_type() { + assert_eq!(MessageType::Pong as u8, 0x02); + } + + #[test] + fn test_findnode_type() { + assert_eq!(MessageType::FindNode as u8, 0x03); + } + + #[test] + fn test_nodes_type() { + assert_eq!(MessageType::Nodes as u8, 0x04); + } + + #[test] + fn test_talkreq_type() { + assert_eq!(MessageType::TalkReq as u8, 0x05); + } + + #[test] + fn test_talkresp_type() { + assert_eq!(MessageType::TalkResp as u8, 0x06); + } + + #[test] + fn test_experimental_types() { + assert_eq!(MessageType::RegTopic as u8, 0x07); + assert_eq!(MessageType::Ticket as u8, 0x08); + assert_eq!(MessageType::RegConfirmation as u8, 0x09); + assert_eq!(MessageType::TopicQuery as u8, 0x0A); + } + } + + // ============================================================ + // Discovery Config Tests + // ============================================================ + + mod discovery_config { + use super::*; + + #[test] + fn test_default_values() { + let config = DiscoveryConfig::default(); + + assert_eq!(config.k_bucket_size, constants::K_BUCKET_SIZE); + assert_eq!(config.alpha, constants::ALPHA); + assert!((config.request_timeout_secs - constants::REQUEST_TIMEOUT_SECS).abs() < f64::EPSILON); + assert!((config.handshake_timeout_secs - constants::HANDSHAKE_TIMEOUT_SECS).abs() < f64::EPSILON); + assert_eq!(config.max_nodes_response, constants::MAX_NODES_RESPONSE); + assert_eq!(config.bond_expiry_secs, constants::BOND_EXPIRY_SECS); + } + + #[test] + fn test_custom_values() { + let config = DiscoveryConfig { + k_bucket_size: 8, + alpha: 5, + request_timeout_secs: 2.0, + ..Default::default() + }; + assert_eq!(config.k_bucket_size, 8); + assert_eq!(config.alpha, 5); + assert!((config.request_timeout_secs - 2.0).abs() < f64::EPSILON); + } + } + + // ============================================================ + // Ping Message Tests + // ============================================================ + + mod ping_message { + use super::*; + + #[test] + fn test_creation_with_types() { + let ping = Ping { + request_id: RequestId::new(vec![0x00, 0x00, 0x00, 0x01]), + enr_seq: SeqNumber(2), + }; + + assert_eq!(ping.request_id.0, vec![0x00, 0x00, 0x00, 0x01]); + assert_eq!(ping.enr_seq, SeqNumber(2)); + } + + #[test] + fn test_max_request_id_length() { + let ping = Ping { + request_id: RequestId::new(vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]), + enr_seq: SeqNumber(1), + }; + assert_eq!(ping.request_id.len(), 8); + } + } + + // ============================================================ + // Pong Message Tests + // ============================================================ + + mod pong_message { + use super::*; + + #[test] + fn test_creation_ipv4() { + let pong = Pong { + request_id: RequestId::new(vec![0x00, 0x00, 0x00, 0x01]), + enr_seq: SeqNumber(42), + recipient_ip: vec![0xc0, 0xa8, 0x01, 0x01], // 192.168.1.1 + recipient_port: Port(9000), + }; + + assert_eq!(pong.enr_seq, SeqNumber(42)); + assert_eq!(pong.recipient_ip.len(), 4); + assert_eq!(pong.recipient_port, Port(9000)); + } + + #[test] + fn test_creation_ipv6() { + let mut ipv6 = vec![0u8; 16]; + ipv6[15] = 0x01; // ::1 + let pong = Pong { + request_id: RequestId::new(vec![0x01]), + enr_seq: SeqNumber(1), + recipient_ip: ipv6.clone(), + recipient_port: Port(30303), + }; + + assert_eq!(pong.recipient_ip.len(), 16); + } + } + + // ============================================================ + // FindNode Message Tests + // ============================================================ + + mod findnode_message { + use super::*; + + #[test] + fn test_single_distance() { + let findnode = FindNode { + request_id: RequestId::new(vec![0x01]), + distances: vec![Distance(256)], + }; + + assert_eq!(findnode.distances, vec![Distance(256)]); + } + + #[test] + fn test_multiple_distances() { + let findnode = FindNode { + request_id: RequestId::new(vec![0x01]), + distances: vec![Distance(0), Distance(1), Distance(255), Distance(256)], + }; + + assert!(findnode.distances.contains(&Distance(0))); + assert!(findnode.distances.contains(&Distance(256))); + } + + #[test] + fn test_distance_zero_returns_self() { + let findnode = FindNode { + request_id: RequestId::new(vec![0x01]), + distances: vec![Distance(0)], + }; + assert_eq!(findnode.distances, vec![Distance(0)]); + } + } + + // ============================================================ + // Nodes Message Tests + // ============================================================ + + mod nodes_message { + use super::*; + + #[test] + fn test_single_response() { + let nodes = Nodes { + request_id: RequestId::new(vec![0x01]), + total: 1, + enrs: vec![b"enr:-example".to_vec()], + }; + + assert_eq!(nodes.total, 1); + assert_eq!(nodes.enrs.len(), 1); + } + + #[test] + fn test_multiple_responses() { + let nodes = Nodes { + request_id: RequestId::new(vec![0x01]), + total: 3, + enrs: vec![b"enr1".to_vec(), b"enr2".to_vec()], + }; + + assert_eq!(nodes.total, 3); + assert_eq!(nodes.enrs.len(), 2); + } + } + + // ============================================================ + // TalkReq Message Tests + // ============================================================ + + mod talkreq_message { + use super::*; + + #[test] + fn test_creation() { + let req = TalkReq { + request_id: RequestId::new(vec![0x01]), + protocol: b"portal".to_vec(), + request: b"payload".to_vec(), + }; + + assert_eq!(req.protocol, b"portal".to_vec()); + assert_eq!(req.request, b"payload".to_vec()); + } + } + + // ============================================================ + // TalkResp Message Tests + // ============================================================ + + mod talkresp_message { + use super::*; + + #[test] + fn test_creation() { + let resp = TalkResp { + request_id: RequestId::new(vec![0x01]), + response: b"response_data".to_vec(), + }; + + assert_eq!(resp.response, b"response_data".to_vec()); + } + + #[test] + fn test_empty_response_unknown_protocol() { + let resp = TalkResp { + request_id: RequestId::new(vec![0x01]), + response: Vec::new(), + }; + assert!(resp.response.is_empty()); + } + } + + // ============================================================ + // Static Header Tests + // ============================================================ + + mod static_header { + use super::*; + + #[test] + fn test_default_protocol_id() { + let header = StaticHeader::new(0, Nonce::new([0x00; 12]), 32); + + assert_eq!(&header.protocol_id, b"discv5"); + assert_eq!(header.version, 0x0001); + } + + #[test] + fn test_flag_values() { + for flag in [0u8, 1, 2] { + let header = StaticHeader::new(flag, Nonce::new([0xff; 12]), 32); + assert_eq!(header.flag, flag); + } + } + } + + // ============================================================ + // WhoAreYou Authdata Tests + // ============================================================ + + mod whoareyou_authdata { + use super::*; + + #[test] + fn test_creation() { + let id_nonce_bytes: [u8; 16] = [ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + ]; + let authdata = WhoAreYouAuthdata { + id_nonce: IdNonce::new(id_nonce_bytes), + enr_seq: SeqNumber(0), + }; + + assert_eq!(authdata.id_nonce.len(), 16); + assert_eq!(authdata.enr_seq, SeqNumber(0)); + } + } + + // ============================================================ + // XOR Distance Tests + // ============================================================ + + mod xor_distance_tests { + use super::*; + + #[test] + fn test_identical_ids_zero_distance() { + let node_id = NodeId::new([0x00; 32]); + assert_eq!(xor_distance(&node_id, &node_id), BigUint::from(0u32)); + } + + #[test] + fn test_complementary_ids_max_distance() { + let a = NodeId::new([0x00; 32]); + let b = NodeId::new([0xff; 32]); + let expected = (BigUint::one() << 256) - BigUint::one(); + assert_eq!(xor_distance(&a, &b), expected); + } + + #[test] + fn test_distance_is_symmetric() { + let a = NodeId::new([0x12; 32]); + let b = NodeId::new([0x34; 32]); + assert_eq!(xor_distance(&a, &b), xor_distance(&b, &a)); + } + + #[test] + fn test_specific_xor_values() { + let mut a_bytes = [0x00; 32]; + a_bytes[31] = 0x05; // 5 + let mut b_bytes = [0x00; 32]; + b_bytes[31] = 0x03; // 3 + let a = NodeId::new(a_bytes); + let b = NodeId::new(b_bytes); + assert_eq!(xor_distance(&a, &b), BigUint::from(6u32)); // 5 XOR 3 = 6 + } + } + + // ============================================================ + // Log2 Distance Tests + // ============================================================ + + mod log2_distance_tests { + use super::*; + + #[test] + fn test_identical_ids_return_zero() { + let node_id = NodeId::new([0x00; 32]); + assert_eq!(log2_distance(&node_id, &node_id), Distance(0)); + } + + #[test] + fn test_single_bit_difference() { + let a = NodeId::new([0x00; 32]); + let mut b_bytes = [0x00; 32]; + b_bytes[31] = 0x01; + let b = NodeId::new(b_bytes); + assert_eq!(log2_distance(&a, &b), Distance(1)); + } + + #[test] + fn test_high_bit_difference() { + let a = NodeId::new([0x00; 32]); + let mut b_bytes = [0x00; 32]; + b_bytes[31] = 0x80; // 0b10000000 + let b = NodeId::new(b_bytes); + assert_eq!(log2_distance(&a, &b), Distance(8)); + } + + #[test] + fn test_maximum_distance() { + let a = NodeId::new([0x00; 32]); + let mut b_bytes = [0x00; 32]; + b_bytes[0] = 0x80; // High bit of first byte set + let b = NodeId::new(b_bytes); + assert_eq!(log2_distance(&a, &b), Distance(256)); + } + } + + // ============================================================ + // K-Bucket Tests + // ============================================================ + + mod kbucket_tests { + use super::*; + + #[test] + fn test_new_bucket_is_empty() { + let bucket = KBucket::new(); + + assert!(bucket.is_empty()); + assert!(!bucket.is_full()); + assert_eq!(bucket.len(), 0); + } + + #[test] + fn test_add_single_node() { + let mut bucket = KBucket::new(); + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + + assert!(bucket.add(entry)); + assert_eq!(bucket.len(), 1); + assert!(bucket.contains(&NodeId::new([0x01; 32]))); + } + + #[test] + fn test_bucket_capacity_limit() { + let mut bucket = KBucket::new(); + + for i in 0..constants::K_BUCKET_SIZE { + let mut bytes = [0x00; 32]; + bytes[0] = i as u8; + let entry = NodeEntry::new(NodeId::new(bytes)); + assert!(bucket.add(entry)); + } + + assert!(bucket.is_full()); + assert_eq!(bucket.len(), constants::K_BUCKET_SIZE); + + let extra = NodeEntry::new(NodeId::new([0xff; 32])); + assert!(!bucket.add(extra)); + assert_eq!(bucket.len(), constants::K_BUCKET_SIZE); + } + + #[test] + fn test_update_moves_to_tail() { + let mut bucket = KBucket::new(); + + let entry1 = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(1)); + let entry2 = NodeEntry::new(NodeId::new([0x02; 32])).with_enr_seq(SeqNumber(1)); + bucket.add(entry1); + bucket.add(entry2); + + let updated = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(2)); + bucket.add(updated); + + let tail = bucket.tail().unwrap(); + assert_eq!(tail.node_id, NodeId::new([0x01; 32])); + assert_eq!(tail.enr_seq, SeqNumber(2)); + } + + #[test] + fn test_remove_node() { + let mut bucket = KBucket::new(); + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + bucket.add(entry); + + assert!(bucket.remove(&NodeId::new([0x01; 32]))); + assert!(bucket.is_empty()); + assert!(!bucket.contains(&NodeId::new([0x01; 32]))); + } + + #[test] + fn test_remove_nonexistent_returns_false() { + let mut bucket = KBucket::new(); + assert!(!bucket.remove(&NodeId::new([0x01; 32]))); + } + + #[test] + fn test_get_existing_node() { + let mut bucket = KBucket::new(); + let entry = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(42)); + bucket.add(entry); + + let retrieved = bucket.get(&NodeId::new([0x01; 32])).unwrap(); + assert_eq!(retrieved.enr_seq, SeqNumber(42)); + } + + #[test] + fn test_get_nonexistent_returns_none() { + let bucket = KBucket::new(); + assert!(bucket.get(&NodeId::new([0x01; 32])).is_none()); + } + + #[test] + fn test_head_returns_oldest() { + let mut bucket = KBucket::new(); + bucket.add(NodeEntry::new(NodeId::new([0x01; 32]))); + bucket.add(NodeEntry::new(NodeId::new([0x02; 32]))); + + let head = bucket.head().unwrap(); + assert_eq!(head.node_id, NodeId::new([0x01; 32])); + } + + #[test] + fn test_tail_returns_newest() { + let mut bucket = KBucket::new(); + bucket.add(NodeEntry::new(NodeId::new([0x01; 32]))); + bucket.add(NodeEntry::new(NodeId::new([0x02; 32]))); + + let tail = bucket.tail().unwrap(); + assert_eq!(tail.node_id, NodeId::new([0x02; 32])); + } + + #[test] + fn test_iteration() { + let mut bucket = KBucket::new(); + bucket.add(NodeEntry::new(NodeId::new([0x01; 32]))); + bucket.add(NodeEntry::new(NodeId::new([0x02; 32]))); + + let node_ids: Vec<_> = bucket.iter().map(|e| e.node_id.clone()).collect(); + assert_eq!(node_ids.len(), 2); + } + } + + // ============================================================ + // Routing Table Tests + // ============================================================ + + mod routing_table_tests { + use super::*; + + #[test] + fn test_new_table_is_empty() { + let local_id = NodeId::new([0x00; 32]); + let table = RoutingTable::new(local_id); + + assert_eq!(table.node_count(), 0); + } + + #[test] + fn test_has_256_buckets() { + let local_id = NodeId::new([0x00; 32]); + let table = RoutingTable::new(local_id); + + assert_eq!(table.buckets.len(), constants::BUCKET_COUNT); + } + + #[test] + fn test_add_node() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let mut node_bytes = [0x00; 32]; + node_bytes[31] = 0x01; + let entry = NodeEntry::new(NodeId::new(node_bytes)); + assert!(table.add(entry.clone())); + assert_eq!(table.node_count(), 1); + assert!(table.contains(&entry.node_id)); + } + + #[test] + fn test_cannot_add_self() { + let local_id = NodeId::new([0xab; 32]); + let mut table = RoutingTable::new(local_id.clone()); + + let entry = NodeEntry::new(local_id); + assert!(!table.add(entry)); + assert_eq!(table.node_count(), 0); + } + + #[test] + fn test_bucket_assignment_by_distance() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let mut node_bytes = [0x00; 32]; + node_bytes[31] = 0x01; // log2 distance = 1 + let node_id = NodeId::new(node_bytes); + let entry = NodeEntry::new(node_id.clone()); + table.add(entry); + + let bucket_idx = table.bucket_index(&node_id); + assert_eq!(bucket_idx, 0); // distance 1 -> bucket 0 + assert!(table.buckets[0].contains(&node_id)); + } + + #[test] + fn test_get_existing_node() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let entry = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(99)); + let node_id = entry.node_id.clone(); + table.add(entry); + + let retrieved = table.get(&node_id).unwrap(); + assert_eq!(retrieved.enr_seq, SeqNumber(99)); + } + + #[test] + fn test_remove_node() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + let node_id = entry.node_id.clone(); + table.add(entry); + assert!(table.remove(&node_id)); + assert!(!table.contains(&node_id)); + } + + #[test] + fn test_closest_nodes_sorted_by_distance() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + for i in 1..5u8 { + let mut bytes = [0x00; 32]; + bytes[0] = i; + let entry = NodeEntry::new(NodeId::new(bytes)); + table.add(entry); + } + + let mut target_bytes = [0x00; 32]; + target_bytes[0] = 0x01; + let target = NodeId::new(target_bytes); + let closest = table.closest_nodes(&target, 3); + + assert_eq!(closest.len(), 3); + assert_eq!(closest[0].node_id, target); // Distance 0 to itself + } + + #[test] + fn test_closest_nodes_respects_count() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + for i in 1..11u8 { + let mut bytes = [0x00; 32]; + bytes[0] = i; + let entry = NodeEntry::new(NodeId::new(bytes)); + table.add(entry); + } + + let mut target_bytes = [0x00; 32]; + target_bytes[0] = 0x05; + let closest = table.closest_nodes(&NodeId::new(target_bytes), 3); + assert_eq!(closest.len(), 3); + } + + #[test] + fn test_nodes_at_distance() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let mut node_bytes = [0x00; 32]; + node_bytes[31] = 0x01; // distance 1 + let node_id = NodeId::new(node_bytes); + let entry = NodeEntry::new(node_id.clone()); + table.add(entry); + + let nodes = table.nodes_at_distance(Distance(1)); + assert_eq!(nodes.len(), 1); + assert_eq!(nodes[0].node_id, node_id); + } + + #[test] + fn test_nodes_at_invalid_distance() { + let local_id = NodeId::new([0x00; 32]); + let table = RoutingTable::new(local_id); + + assert!(table.nodes_at_distance(Distance(0)).is_empty()); + assert!(table.nodes_at_distance(Distance(257)).is_empty()); + } + } + + // ============================================================ + // Node Entry Tests + // ============================================================ + + mod node_entry_tests { + use super::*; + + #[test] + fn test_default_values() { + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + + assert_eq!(entry.node_id, NodeId::new([0x01; 32])); + assert_eq!(entry.enr_seq, SeqNumber(0)); + assert!((entry.last_seen - 0.0).abs() < f64::EPSILON); + assert!(entry.endpoint.is_none()); + assert!(!entry.verified); + } + + #[test] + fn test_full_construction() { + let entry = NodeEntry::new(NodeId::new([0x01; 32])) + .with_enr_seq(SeqNumber(42)) + .with_last_seen(1234567890.0) + .with_endpoint("192.168.1.1:30303".to_string()) + .with_verified(true); + + assert_eq!(entry.enr_seq, SeqNumber(42)); + assert_eq!(entry.endpoint, Some("192.168.1.1:30303".to_string())); + assert!(entry.verified); + } + } + + // ============================================================ + // Test Vector Tests + // ============================================================ + + mod test_vectors { + use super::*; + + // From https://github.com/ethereum/devp2p/blob/master/discv5/discv5-wire-test-vectors.md + const PING_REQUEST_ID: [u8; 4] = [0x00, 0x00, 0x00, 0x01]; + const PING_ENR_SEQ: u64 = 2; + const WHOAREYOU_ID_NONCE: [u8; 16] = [ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, + 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + ]; + + #[test] + fn test_ping_message_construction() { + let ping = Ping { + request_id: RequestId::new(PING_REQUEST_ID.to_vec()), + enr_seq: SeqNumber(PING_ENR_SEQ), + }; + + assert_eq!(ping.request_id.0, PING_REQUEST_ID.to_vec()); + assert_eq!(ping.enr_seq, SeqNumber(2)); + } + + #[test] + fn test_whoareyou_authdata_construction() { + let authdata = WhoAreYouAuthdata { + id_nonce: IdNonce::new(WHOAREYOU_ID_NONCE), + enr_seq: SeqNumber(0), + }; + + assert_eq!(authdata.id_nonce, IdNonce::new(WHOAREYOU_ID_NONCE)); + assert_eq!(authdata.enr_seq, SeqNumber(0)); + } + + #[test] + fn test_plaintext_message_type() { + // From AES-GCM test vector plaintext + let plaintext = hex::decode("01c20101").unwrap(); + assert_eq!(plaintext[0], MessageType::Ping as u8); + } + } + + // ============================================================ + // Packet Structure Tests + // ============================================================ + + mod packet_structure { + #[test] + fn test_static_header_size() { + // protocol-id (6) + version (2) + flag (1) + nonce (12) + authdata-size (2) + let expected_size = 6 + 2 + 1 + 12 + 2; + assert_eq!(expected_size, 23); + } + } + + // ============================================================ + // Routing with Test Vector Node IDs + // ============================================================ + + mod routing_test_vectors { + use super::*; + + // Node IDs from official test vectors (keccak256 of uncompressed pubkey) + fn node_a_id() -> NodeId { + NodeId::from_slice(&hex::decode("aaaa8419e9f49d0083561b48287df592939a8d19947d8c0ef88f2a4856a69fbb").unwrap()) + } + + fn node_b_id() -> NodeId { + NodeId::from_slice(&hex::decode("bbbb9d047f0488c0b5a93c1c3f2d8bafc7c8ff337024a55434a0d0555de64db9").unwrap()) + } + + #[test] + fn test_xor_distance_is_symmetric() { + let node_a = node_a_id(); + let node_b = node_b_id(); + + let distance = xor_distance(&node_a, &node_b); + assert!(distance > BigUint::from(0u32)); + assert_eq!(xor_distance(&node_a, &node_b), xor_distance(&node_b, &node_a)); + } + + #[test] + fn test_log2_distance_is_high() { + let node_a = node_a_id(); + let node_b = node_b_id(); + + let log_dist = log2_distance(&node_a, &node_b); + assert!(log_dist > Distance(200)); + } + } +} From 6e3040d4e1d6094e5edc9ebf2b43341601ae4eaf Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 21:26:35 +0200 Subject: [PATCH 19/27] fix: update outdated readme --- README.md | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 5028b8f..d473719 100644 --- a/README.md +++ b/README.md @@ -23,12 +23,13 @@ leanEthereum Consensus Client written in Rust using Grandine's libraries. Run in debug mode via terminal (with XMSS signing): ``` RUST_LOG=info ./target/release/lean_client \ - --genesis ../lean-quickstart/local-devnet/genesis/config.yaml \ - --validator-registry-path ../lean-quickstart/local-devnet/genesis/validators.yaml \ - --hash-sig-key-dir ../lean-quickstart/local-devnet/genesis/hash-sig-keys \ + --genesis ../../lean-quickstart/local-devnet/genesis/config.yaml \ + --validator-registry-path ../../lean-quickstart/local-devnet/genesis/validators.yaml \ + --hash-sig-key-dir ../../lean-quickstart/local-devnet/genesis/hash-sig-keys \ --node-id qlean_0 \ - --node-key ../lean-quickstart/local-devnet/genesis/qlean_0.key \ + --node-key ../../lean-quickstart/local-devnet/genesis/qlean_0.key \ --port 9003 \ + --disable-discovery --bootnodes "/ip4/127.0.0.1/udp/9001/quic-v1/p2p/16Uiu2HAkvi2sxT75Bpq1c7yV2FjnSQJJ432d6jeshbmfdJss1i6f" \ --bootnodes "/ip4/127.0.0.1/udp/9002/quic-v1/p2p/16Uiu2HAmPQhkD6Zg5Co2ee8ShshkiY4tDePKFARPpCS2oKSLj1E1" \ --bootnodes "/ip4/127.0.0.1/udp/9004/quic-v1/p2p/16Uiu2HAm7TYVs6qvDKnrovd9m4vvRikc4HPXm1WyLumKSe5fHxBv" From a3574808ff6a570faaf683261459e9cdbbc0dc20 Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 21:49:01 +0200 Subject: [PATCH 20/27] feat: update README.md to include instructions for testing discovery --- README.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) diff --git a/README.md b/README.md index d473719..81627d1 100644 --- a/README.md +++ b/README.md @@ -35,3 +35,33 @@ leanEthereum Consensus Client written in Rust using Grandine's libraries. --bootnodes "/ip4/127.0.0.1/udp/9004/quic-v1/p2p/16Uiu2HAm7TYVs6qvDKnrovd9m4vvRikc4HPXm1WyLumKSe5fHxBv" ``` 4. Leave client running for a few minutes and observe warnings, errors, check if blocks are being justified and finalized (don't need debug mode for this last one) + +## Testing discovery + +1. Start the bootnode + + Run in the terminal: + ``` + RUST_LOG=info cargo run --features devnet2 -- \ + --port 9000 \ + --discovery-port 9100 + ``` + +2. Start the other nodes + + Run in the terminal: + ``` + RUST_LOG=info cargo run --features devnet2 -- \ + --port 9001 \ + --discovery-port 9101 \ + --bootnodes "" + ``` + + ``` + RUST_LOG=info cargo run --features devnet2 -- \ + --port 9002 \ + --discovery-port 9102 \ + --bootnodes "" + ``` + +After a minute all the nodes should be synced up and see each other From 94ffee89995195efc675c3e1e8c15da4e43ec26b Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 21:51:54 +0200 Subject: [PATCH 21/27] fix: update README.md to build the client --- README.md | 16 +++++++++++----- 1 file changed, 11 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 81627d1..ea3b63b 100644 --- a/README.md +++ b/README.md @@ -38,27 +38,33 @@ leanEthereum Consensus Client written in Rust using Grandine's libraries. ## Testing discovery -1. Start the bootnode +1. Build the client: + ```bash + cd lean_client/ + cargo build --release + ``` + +2. Start the bootnode Run in the terminal: ``` - RUST_LOG=info cargo run --features devnet2 -- \ + RUST_LOG=info ./target/release/lean_client \ --port 9000 \ --discovery-port 9100 ``` -2. Start the other nodes +3. Start the other nodes Run in the terminal: ``` - RUST_LOG=info cargo run --features devnet2 -- \ + RUST_LOG=info ./target/release/lean_client \ --port 9001 \ --discovery-port 9101 \ --bootnodes "" ``` ``` - RUST_LOG=info cargo run --features devnet2 -- \ + RUST_LOG=info ./target/release/lean_client \ --port 9002 \ --discovery-port 9102 \ --bootnodes "" From ddbff9c416db274b6cf2ee25d3c8ac9f12585b5e Mon Sep 17 00:00:00 2001 From: Domas Klimavicius Date: Sun, 18 Jan 2026 21:59:33 +0200 Subject: [PATCH 22/27] fix: format files --- lean_client/networking/src/discovery/mod.rs | 22 +++++++++-- lean_client/networking/src/discovery/tests.rs | 39 ++++++++++++------- 2 files changed, 43 insertions(+), 18 deletions(-) diff --git a/lean_client/networking/src/discovery/mod.rs b/lean_client/networking/src/discovery/mod.rs index d0b67db..7ee532b 100644 --- a/lean_client/networking/src/discovery/mod.rs +++ b/lean_client/networking/src/discovery/mod.rs @@ -29,7 +29,12 @@ impl DiscoveryService { pub async fn new(config: DiscoveryConfig, keypair: &Keypair) -> Result { let enr_key = keypair_to_enr_key(keypair)?; - let local_enr = build_enr(&enr_key, config.listen_address, config.udp_port, config.libp2p_port)?; + let local_enr = build_enr( + &enr_key, + config.listen_address, + config.udp_port, + config.libp2p_port, + )?; info!( enr = %local_enr, @@ -118,7 +123,10 @@ impl DiscoveryService { } pub fn enr_to_multiaddr(enr: &Enr) -> Option { - let ip = enr.ip4().map(IpAddr::V4).or_else(|| enr.ip6().map(IpAddr::V6))?; + let ip = enr + .ip4() + .map(IpAddr::V4) + .or_else(|| enr.ip6().map(IpAddr::V6))?; let libp2p_port = enr.tcp4().or_else(|| enr.tcp6())?; let peer_id = enr_to_peer_id(enr)?; @@ -171,7 +179,12 @@ fn keypair_to_enr_key(keypair: &Keypair) -> Result { } } -fn build_enr(key: &CombinedKey, ip: IpAddr, udp_port: u16, libp2p_port: u16) -> Result> { +fn build_enr( + key: &CombinedKey, + ip: IpAddr, + udp_port: u16, + libp2p_port: u16, +) -> Result> { let mut builder = EnrBuilder::default(); // libp2p port is stored in tcp field, since Enr doesn't have a field for a quic port @@ -199,7 +212,8 @@ fn enr_to_peer_id(enr: &Enr) -> Option { match public_key { discv5::enr::CombinedPublicKey::Secp256k1(pk) => { let compressed = pk.to_sec1_bytes(); - let libp2p_pk = libp2p_identity::secp256k1::PublicKey::try_from_bytes(&compressed).ok()?; + let libp2p_pk = + libp2p_identity::secp256k1::PublicKey::try_from_bytes(&compressed).ok()?; let public = libp2p_identity::PublicKey::from(libp2p_pk); Some(PeerId::from_public_key(&public)) } diff --git a/lean_client/networking/src/discovery/tests.rs b/lean_client/networking/src/discovery/tests.rs index 6566e29..8bdbf82 100644 --- a/lean_client/networking/src/discovery/tests.rs +++ b/lean_client/networking/src/discovery/tests.rs @@ -445,11 +445,7 @@ impl RoutingTable { } pub fn closest_nodes(&self, target: &NodeId, count: usize) -> Vec<&NodeEntry> { - let mut all_nodes: Vec<&NodeEntry> = self - .buckets - .iter() - .flat_map(|b| b.iter()) - .collect(); + let mut all_nodes: Vec<&NodeEntry> = self.buckets.iter().flat_map(|b| b.iter()).collect(); all_nodes.sort_by(|a, b| { let dist_a = xor_distance(&a.node_id, target); @@ -687,8 +683,14 @@ mod tests { assert_eq!(config.k_bucket_size, constants::K_BUCKET_SIZE); assert_eq!(config.alpha, constants::ALPHA); - assert!((config.request_timeout_secs - constants::REQUEST_TIMEOUT_SECS).abs() < f64::EPSILON); - assert!((config.handshake_timeout_secs - constants::HANDSHAKE_TIMEOUT_SECS).abs() < f64::EPSILON); + assert!( + (config.request_timeout_secs - constants::REQUEST_TIMEOUT_SECS).abs() + < f64::EPSILON + ); + assert!( + (config.handshake_timeout_secs - constants::HANDSHAKE_TIMEOUT_SECS).abs() + < f64::EPSILON + ); assert_eq!(config.max_nodes_response, constants::MAX_NODES_RESPONSE); assert_eq!(config.bond_expiry_secs, constants::BOND_EXPIRY_SECS); } @@ -922,8 +924,8 @@ mod tests { #[test] fn test_creation() { let id_nonce_bytes: [u8; 16] = [ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, ]; let authdata = WhoAreYouAuthdata { id_nonce: IdNonce::new(id_nonce_bytes), @@ -1337,8 +1339,8 @@ mod tests { const PING_REQUEST_ID: [u8; 4] = [0x00, 0x00, 0x00, 0x01]; const PING_ENR_SEQ: u64 = 2; const WHOAREYOU_ID_NONCE: [u8; 16] = [ - 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, - 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, ]; #[test] @@ -1393,11 +1395,17 @@ mod tests { // Node IDs from official test vectors (keccak256 of uncompressed pubkey) fn node_a_id() -> NodeId { - NodeId::from_slice(&hex::decode("aaaa8419e9f49d0083561b48287df592939a8d19947d8c0ef88f2a4856a69fbb").unwrap()) + NodeId::from_slice( + &hex::decode("aaaa8419e9f49d0083561b48287df592939a8d19947d8c0ef88f2a4856a69fbb") + .unwrap(), + ) } fn node_b_id() -> NodeId { - NodeId::from_slice(&hex::decode("bbbb9d047f0488c0b5a93c1c3f2d8bafc7c8ff337024a55434a0d0555de64db9").unwrap()) + NodeId::from_slice( + &hex::decode("bbbb9d047f0488c0b5a93c1c3f2d8bafc7c8ff337024a55434a0d0555de64db9") + .unwrap(), + ) } #[test] @@ -1407,7 +1415,10 @@ mod tests { let distance = xor_distance(&node_a, &node_b); assert!(distance > BigUint::from(0u32)); - assert_eq!(xor_distance(&node_a, &node_b), xor_distance(&node_b, &node_a)); + assert_eq!( + xor_distance(&node_a, &node_b), + xor_distance(&node_b, &node_a) + ); } #[test] From 3c7408f3c9d2166473ea880a2250bfb641879b76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Titas=20Stankevi=C4=8Dius?= Date: Tue, 20 Jan 2026 19:33:11 +0200 Subject: [PATCH 23/27] fixing tests --- .../networking/src/gossipsub/control.rs | 2 +- .../networking/src/gossipsub/mcache.rs | 4 +- lean_client/networking/src/gossipsub/mesh.rs | 5 +- .../src/gossipsub/tests/raw_message.rs | 98 +++++++++++++++++++ lean_client/networking/src/gossipsub/topic.rs | 6 +- 5 files changed, 107 insertions(+), 8 deletions(-) create mode 100644 lean_client/networking/src/gossipsub/tests/raw_message.rs diff --git a/lean_client/networking/src/gossipsub/control.rs b/lean_client/networking/src/gossipsub/control.rs index 8d64aa8..82cf8ba 100644 --- a/lean_client/networking/src/gossipsub/control.rs +++ b/lean_client/networking/src/gossipsub/control.rs @@ -122,7 +122,7 @@ pub struct IDontWant { /// # Example /// /// ``` -/// use lean_client_networking::gossipsub::control::*; +/// use networking::gossipsub::control::*; /// /// let control = ControlMessage { /// grafts: vec![Graft { topic_id: "blocks".to_string() }], diff --git a/lean_client/networking/src/gossipsub/mcache.rs b/lean_client/networking/src/gossipsub/mcache.rs index 6d8fcd6..b708cef 100644 --- a/lean_client/networking/src/gossipsub/mcache.rs +++ b/lean_client/networking/src/gossipsub/mcache.rs @@ -85,8 +85,8 @@ pub struct CacheEntry { /// # Example /// /// ``` -/// use lean_client_networking::gossipsub::mcache::MessageCache; -/// use lean_client_networking::gossipsub::message::RawGossipsubMessage; +/// use networking::gossipsub::mcache::MessageCache; +/// use networking::gossipsub::message::RawGossipsubMessage; /// /// let mut cache = MessageCache::new(6, 3); /// diff --git a/lean_client/networking/src/gossipsub/mesh.rs b/lean_client/networking/src/gossipsub/mesh.rs index 7133035..8239249 100644 --- a/lean_client/networking/src/gossipsub/mesh.rs +++ b/lean_client/networking/src/gossipsub/mesh.rs @@ -152,8 +152,9 @@ impl Default for TopicMesh { /// # Example /// /// ``` -/// use lean_client_networking::gossipsub::mesh::MeshState; -/// use lean_client_networking::gossipsub::config::GossipsubParameters; +/// use networking::gossipsub::mesh::MeshState; +/// use networking::gossipsub::config::GossipsubParameters; +/// use std::collections::HashSet; /// /// let mut state = MeshState::new(GossipsubParameters::default()); /// diff --git a/lean_client/networking/src/gossipsub/tests/raw_message.rs b/lean_client/networking/src/gossipsub/tests/raw_message.rs new file mode 100644 index 0000000..1b83157 --- /dev/null +++ b/lean_client/networking/src/gossipsub/tests/raw_message.rs @@ -0,0 +1,98 @@ +use crate::gossipsub::message::{RawGossipsubMessage, SnappyDecompressor}; +use std::sync::Arc; + +struct TestDecompressor; + +impl SnappyDecompressor for TestDecompressor { + fn decompress(&self, _data: &[u8]) -> Result, Box> { + Ok(b"decompressed_test_data".to_vec()) + } +} + +struct FailingDecompressor; + +impl SnappyDecompressor for FailingDecompressor { + fn decompress(&self, _data: &[u8]) -> Result, Box> { + Err("Decompression failed".into()) + } +} + +#[test] +fn test_message_id_computation_no_snappy() { + let topic = b"test_topic"; + let raw_data = b"raw_test_data"; + + let message = RawGossipsubMessage::new(topic.to_vec(), raw_data.to_vec(), None); + let message_id = message.id(); + + assert_eq!(message_id.len(), 20); +} + +#[test] +fn test_message_id_computation_with_snappy() { + let topic = b"test_topic"; + let raw_data = b"raw_test_data"; + + let message = RawGossipsubMessage::new( + topic.to_vec(), + raw_data.to_vec(), + Some(Arc::new(TestDecompressor)), + ); + let message_id = message.id(); + + assert_eq!(message_id.len(), 20); +} + +#[test] +fn test_message_id_computation_snappy_fails() { + let topic = b"test_topic"; + let raw_data = b"raw_test_data"; + + let message = RawGossipsubMessage::new( + topic.to_vec(), + raw_data.to_vec(), + Some(Arc::new(FailingDecompressor)), + ); + let message_id = message.id(); + + assert_eq!(message_id.len(), 20); +} + +#[test] +fn test_message_id_determinism() { + let topic = b"test_topic"; + let data = b"test_data"; + + let message1 = RawGossipsubMessage::new( + topic.to_vec(), + data.to_vec(), + Some(Arc::new(TestDecompressor)), + ); + let message2 = RawGossipsubMessage::new( + topic.to_vec(), + data.to_vec(), + Some(Arc::new(TestDecompressor)), + ); + + assert_eq!(message1.id(), message2.id()); +} + +#[test] +fn test_message_uniqueness() { + let test_cases = vec![ + (b"topic1".to_vec(), b"data".to_vec()), + (b"topic2".to_vec(), b"data".to_vec()), + (b"topic".to_vec(), b"data1".to_vec()), + (b"topic".to_vec(), b"data2".to_vec()), + ]; + + let messages: Vec<_> = test_cases + .into_iter() + .map(|(topic, data)| RawGossipsubMessage::new(topic, data, None)) + .collect(); + + let ids: Vec<_> = messages.iter().map(|msg| msg.id()).collect(); + let unique_ids: std::collections::HashSet<_> = ids.iter().collect(); + + assert_eq!(ids.len(), unique_ids.len()); +} diff --git a/lean_client/networking/src/gossipsub/topic.rs b/lean_client/networking/src/gossipsub/topic.rs index ce9af7a..5cc3c75 100644 --- a/lean_client/networking/src/gossipsub/topic.rs +++ b/lean_client/networking/src/gossipsub/topic.rs @@ -174,7 +174,7 @@ impl GossipsubTopic { /// # Example /// /// ``` - /// use lean_client_networking::gossipsub::topic::GossipsubTopic; + /// use networking::gossipsub::topic::GossipsubTopic; /// /// let topic = GossipsubTopic::from_string("/leanconsensus/0x12345678/block/ssz_snappy")?; /// # Ok::<(), String>(()) @@ -306,7 +306,7 @@ pub fn get_topics(fork: String) -> Vec { /// # Example /// /// ``` -/// use lean_client_networking::gossipsub::topic::format_topic_string; +/// use networking::gossipsub::topic::format_topic_string; /// /// let topic_str = format_topic_string("block", "0x12345678", None, None); /// assert_eq!(topic_str, "/leanconsensus/0x12345678/block/ssz_snappy"); @@ -346,7 +346,7 @@ pub fn format_topic_string( /// # Example /// /// ``` -/// use lean_client_networking::gossipsub::topic::parse_topic_string; +/// use networking::gossipsub::topic::parse_topic_string; /// /// let (prefix, fork, name, enc) = parse_topic_string("/leanconsensus/0x12345678/block/ssz_snappy")?; /// assert_eq!(prefix, "leanconsensus"); From 5f1f756ee1529c893f9fcd2f2b4e46e3182f7f62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Titas=20Stankevi=C4=8Dius?= Date: Tue, 20 Jan 2026 19:53:02 +0200 Subject: [PATCH 24/27] fixing formatting to match Rust guidelines --- lean_client/containers/src/types.rs | 4 +- .../networking/src/gossipsub/config.rs | 27 +++---- .../networking/src/gossipsub/control.rs | 11 ++- .../networking/src/gossipsub/mcache.rs | 63 ++++++++------- lean_client/networking/src/gossipsub/mesh.rs | 77 +++++++++---------- .../networking/src/gossipsub/message.rs | 21 +++-- lean_client/networking/src/gossipsub/mod.rs | 4 +- .../networking/src/gossipsub/tests/control.rs | 5 +- .../networking/src/gossipsub/tests/mcache.rs | 31 ++++---- .../networking/src/gossipsub/tests/mesh.rs | 26 +++---- lean_client/networking/src/gossipsub/topic.rs | 47 ++++++----- lean_client/networking/src/gossipsub/types.rs | 1 - 12 files changed, 151 insertions(+), 166 deletions(-) diff --git a/lean_client/containers/src/types.rs b/lean_client/containers/src/types.rs index fba0554..fd8779f 100644 --- a/lean_client/containers/src/types.rs +++ b/lean_client/containers/src/types.rs @@ -21,11 +21,11 @@ impl Bytes20 { pub fn new(data: [u8; 20]) -> Self { Bytes20(data) } - + pub fn len(&self) -> usize { 20 } - + pub fn is_empty(&self) -> bool { false } diff --git a/lean_client/networking/src/gossipsub/config.rs b/lean_client/networking/src/gossipsub/config.rs index 769b286..1e6d7c4 100644 --- a/lean_client/networking/src/gossipsub/config.rs +++ b/lean_client/networking/src/gossipsub/config.rs @@ -50,7 +50,6 @@ /// - Ethereum P2P spec: /// - Gossipsub v1.0: /// - Gossipsub v1.2: - use crate::gossipsub::topic::GossipsubTopic; use crate::types::MESSAGE_DOMAIN_VALID_SNAPPY; use libp2p::gossipsub::{Config, ConfigBuilder, Message, MessageId, ValidationMode}; @@ -69,11 +68,10 @@ pub struct GossipsubParameters { /// The protocol ID for gossip messages. #[serde(default = "default_protocol_id")] pub protocol_id: String, - + // ------------------------------------------------------------------------- // Mesh Degree Parameters // ------------------------------------------------------------------------- - /// Target number of mesh peers per topic. /// /// The heartbeat procedure adjusts the mesh toward this size: @@ -82,21 +80,21 @@ pub struct GossipsubParameters { /// - If |mesh| > D_high: prune peers down to D #[serde(default = "default_d")] pub d: usize, - + /// Minimum mesh peers before grafting. /// /// When mesh size drops below this threshold, the heartbeat /// will graft new peers to reach the target D. #[serde(default = "default_d_low")] pub d_low: usize, - + /// Maximum mesh peers before pruning. /// /// When mesh size exceeds this threshold, the heartbeat /// will prune excess peers down to the target D. #[serde(default = "default_d_high")] pub d_high: usize, - + /// Number of non-mesh peers for IHAVE gossip. /// /// During heartbeat, IHAVE messages are sent to this many @@ -104,11 +102,10 @@ pub struct GossipsubParameters { /// the lazy pull protocol for reliability. #[serde(default = "default_d_lazy")] pub d_lazy: usize, - + // ------------------------------------------------------------------------- // Timing Parameters // ------------------------------------------------------------------------- - /// Interval between heartbeat ticks in seconds. /// /// The heartbeat procedure runs periodically to: @@ -119,7 +116,7 @@ pub struct GossipsubParameters { /// - Shift the message cache window #[serde(default = "default_heartbeat_interval_secs")] pub heartbeat_interval_secs: f64, - + /// Time-to-live for fanout entries in seconds. /// /// Fanout peers are used when publishing to topics we don't @@ -127,18 +124,17 @@ pub struct GossipsubParameters { /// inactivity to free resources. #[serde(default = "default_fanout_ttl_secs")] pub fanout_ttl_secs: u64, - + // ------------------------------------------------------------------------- // Message Cache Parameters // ------------------------------------------------------------------------- - /// Total number of history windows in the message cache. /// /// - Messages are stored for this many heartbeat intervals. /// - After mcache_len heartbeats, messages are evicted. #[serde(default = "default_mcache_len")] pub mcache_len: usize, - + /// Number of recent windows included in IHAVE gossip. /// /// Only messages from the most recent mcache_gossip windows @@ -146,7 +142,7 @@ pub struct GossipsubParameters { /// be retrieved via IWANT but won't be actively gossiped. #[serde(default = "default_mcache_gossip")] pub mcache_gossip: usize, - + /// Time-to-live for seen message IDs in seconds. /// /// Message IDs are tracked to detect duplicates. This should @@ -154,11 +150,10 @@ pub struct GossipsubParameters { /// short enough to bound memory usage. #[serde(default = "default_seen_ttl_secs")] pub seen_ttl_secs: u64, - + // ------------------------------------------------------------------------- // IDONTWANT Optimization (v1.2) // ------------------------------------------------------------------------- - /// Minimum message size in bytes to trigger IDONTWANT. /// /// When receiving a message larger than this threshold, @@ -243,7 +238,7 @@ pub struct GossipsubConfig { impl GossipsubConfig { pub fn new() -> Self { let params = GossipsubParameters::default(); - + let config = ConfigBuilder::default() // leanSpec: heartbeat_interval_secs = 0.7 .heartbeat_interval(Duration::from_millis(700)) diff --git a/lean_client/networking/src/gossipsub/control.rs b/lean_client/networking/src/gossipsub/control.rs index 82cf8ba..6059ca6 100644 --- a/lean_client/networking/src/gossipsub/control.rs +++ b/lean_client/networking/src/gossipsub/control.rs @@ -39,7 +39,6 @@ /// /// - Gossipsub v1.0: /// - Gossipsub v1.2: - use serde::{Deserialize, Serialize}; use super::types::MessageId; @@ -85,7 +84,7 @@ pub struct Prune { pub struct IHave { /// Topic the advertised messages belong to. pub topic_id: String, - + /// IDs of messages available in the sender's cache. pub message_ids: Vec, } @@ -137,19 +136,19 @@ pub struct ControlMessage { /// GRAFT messages requesting mesh membership. #[serde(default)] pub grafts: Vec, - + /// PRUNE messages notifying mesh removal. #[serde(default)] pub prunes: Vec, - + /// IHAVE messages advertising cached message IDs. #[serde(default)] pub ihaves: Vec, - + /// IWANT messages requesting full messages. #[serde(default)] pub iwants: Vec, - + /// IDONTWANT messages declining specific messages (v1.2). #[serde(default)] pub idontwants: Vec, diff --git a/lean_client/networking/src/gossipsub/mcache.rs b/lean_client/networking/src/gossipsub/mcache.rs index b708cef..e3b3679 100644 --- a/lean_client/networking/src/gossipsub/mcache.rs +++ b/lean_client/networking/src/gossipsub/mcache.rs @@ -54,7 +54,6 @@ /// ## References /// /// - Gossipsub v1.0: - use std::collections::{HashMap, HashSet, VecDeque}; use super::message::RawGossipsubMessage; @@ -68,7 +67,7 @@ use super::types::{MessageId, Timestamp, TopicId}; pub struct CacheEntry { /// The cached gossipsub message. pub message: RawGossipsubMessage, - + /// Topic this message was published to. /// /// Used to filter messages when generating IHAVE gossip for a specific topic. @@ -112,19 +111,19 @@ pub struct MessageCache { /// Higher values increase memory usage but improve message /// availability for late IWANT requests. mcache_len: usize, - + /// Number of recent windows to include in IHAVE gossip. /// /// Only messages from the most recent windows are advertised. /// Should be less than or equal to mcache_len. mcache_gossip: usize, - + /// Sliding window of message ID sets. /// /// Index 0 is the newest window. Each heartbeat, windows shift /// right and a new empty window is prepended. windows: VecDeque>, - + /// Message lookup index keyed by ID. /// /// Provides O(1) retrieval for IWANT responses. @@ -141,7 +140,7 @@ impl MessageCache { pub fn new(mcache_len: usize, mcache_gossip: usize) -> Self { let mut windows = VecDeque::with_capacity(mcache_len); windows.push_back(HashSet::new()); - + Self { mcache_len, mcache_gossip, @@ -149,7 +148,7 @@ impl MessageCache { by_id: HashMap::new(), } } - + /// Add a message to the cache. /// /// Messages are added to the newest window (index 0) and @@ -165,19 +164,19 @@ impl MessageCache { /// `true` if added (not a duplicate) pub fn put(&mut self, topic: TopicId, message: RawGossipsubMessage) -> bool { let msg_id = message.id(); - + if self.by_id.contains_key(&msg_id) { return false; } - + if let Some(window) = self.windows.front_mut() { window.insert(msg_id.clone()); } - + self.by_id.insert(msg_id, CacheEntry { message, topic }); true } - + /// Retrieve a message by ID. /// /// Used to respond to IWANT requests from peers. @@ -192,7 +191,7 @@ impl MessageCache { pub fn get(&self, msg_id: &MessageId) -> Option<&RawGossipsubMessage> { self.by_id.get(msg_id).map(|entry| &entry.message) } - + /// Check if a message is cached. /// /// # Arguments @@ -205,7 +204,7 @@ impl MessageCache { pub fn has(&self, msg_id: &MessageId) -> bool { self.by_id.contains_key(msg_id) } - + /// Get message IDs for IHAVE gossip. /// /// Returns IDs from the most recent `mcache_gossip` windows @@ -221,7 +220,7 @@ impl MessageCache { pub fn get_gossip_ids(&self, topic: &str) -> Vec { let mut result = Vec::new(); let windows_to_check = self.mcache_gossip.min(self.windows.len()); - + for i in 0..windows_to_check { if let Some(window) = self.windows.get(i) { for msg_id in window { @@ -233,10 +232,10 @@ impl MessageCache { } } } - + result } - + /// Shift the cache window, evicting the oldest. /// /// Called at each heartbeat to age the cache: @@ -249,7 +248,7 @@ impl MessageCache { /// Number of messages evicted pub fn shift(&mut self) -> usize { let mut evicted = 0; - + if self.windows.len() >= self.mcache_len { if let Some(oldest) = self.windows.pop_back() { for msg_id in oldest { @@ -259,23 +258,23 @@ impl MessageCache { } } } - + self.windows.push_front(HashSet::new()); evicted } - + /// Clear all cached messages. pub fn clear(&mut self) { self.windows.clear(); self.windows.push_back(HashSet::new()); self.by_id.clear(); } - + /// Get the total number of cached messages. pub fn len(&self) -> usize { self.by_id.len() } - + /// Check if the cache is empty. pub fn is_empty(&self) -> bool { self.by_id.is_empty() @@ -303,12 +302,12 @@ pub struct SeenCache { /// - long enough to cover network propagation, /// - short enough to bound memory usage. ttl_seconds: u64, - + /// Set of message IDs that have been seen. /// /// Provides O(1) membership testing. seen: HashSet, - + /// Timestamp when each message was first seen. /// /// Used to determine expiry during cleanup. @@ -328,7 +327,7 @@ impl SeenCache { timestamps: HashMap::new(), } } - + /// Mark a message as seen. /// /// # Arguments @@ -343,12 +342,12 @@ impl SeenCache { if self.seen.contains(&msg_id) { return false; } - + self.seen.insert(msg_id.clone()); self.timestamps.insert(msg_id, timestamp); true } - + /// Check if a message has been seen. /// /// # Arguments @@ -361,7 +360,7 @@ impl SeenCache { pub fn has(&self, msg_id: &MessageId) -> bool { self.seen.contains(msg_id) } - + /// Remove expired entries. /// /// Should be called periodically (e.g., each heartbeat) @@ -382,27 +381,27 @@ impl SeenCache { .filter(|(_, ts)| **ts < cutoff) .map(|(id, _)| id.clone()) .collect(); - + let count = expired.len(); for msg_id in expired { self.seen.remove(&msg_id); self.timestamps.remove(&msg_id); } - + count } - + /// Clear all seen entries. pub fn clear(&mut self) { self.seen.clear(); self.timestamps.clear(); } - + /// Get the number of seen message IDs. pub fn len(&self) -> usize { self.seen.len() } - + /// Check if the seen cache is empty. pub fn is_empty(&self) -> bool { self.seen.is_empty() diff --git a/lean_client/networking/src/gossipsub/mesh.rs b/lean_client/networking/src/gossipsub/mesh.rs index 8239249..d4efbc5 100644 --- a/lean_client/networking/src/gossipsub/mesh.rs +++ b/lean_client/networking/src/gossipsub/mesh.rs @@ -30,7 +30,6 @@ /// ## References /// /// - Gossipsub v1.0: - use rand::seq::SliceRandom; use std::collections::{HashMap, HashSet}; use std::time::{SystemTime, UNIX_EPOCH}; @@ -51,7 +50,7 @@ pub struct FanoutEntry { /// /// Selected randomly from available topic peers, up to D peers. pub peers: HashSet, - + /// Unix timestamp of the last publish to this topic. /// /// Used to determine if the entry has expired. @@ -66,7 +65,7 @@ impl FanoutEntry { last_published: 0.0, } } - + /// Check if this fanout entry has expired. /// /// # Arguments @@ -109,7 +108,7 @@ impl TopicMesh { peers: HashSet::new(), } } - + /// Add a peer to this topic's mesh. /// /// # Arguments @@ -122,7 +121,7 @@ impl TopicMesh { pub fn add_peer(&mut self, peer_id: PeerId) -> bool { self.peers.insert(peer_id) } - + /// Remove a peer from this topic's mesh. /// /// # Arguments @@ -177,13 +176,13 @@ impl Default for TopicMesh { pub struct MeshState { /// Gossipsub parameters controlling mesh behavior. params: GossipsubParameters, - + /// Mesh state for each subscribed topic. Keyed by topic ID. meshes: HashMap, - + /// Fanout state for publish-only topics. Keyed by topic ID. fanouts: HashMap, - + /// Set of topics we are subscribed to. subscriptions: HashSet, } @@ -198,27 +197,27 @@ impl MeshState { subscriptions: HashSet::new(), } } - + /// Get the target mesh size per topic. pub fn d(&self) -> usize { self.params.d } - + /// Get the low watermark - graft when mesh is smaller. pub fn d_low(&self) -> usize { self.params.d_low } - + /// Get the high watermark - prune when mesh is larger. pub fn d_high(&self) -> usize { self.params.d_high } - + /// Get the number of peers for IHAVE gossip. pub fn d_lazy(&self) -> usize { self.params.d_lazy } - + /// Subscribe to a topic, initializing its mesh. /// /// If we have fanout peers for this topic, they are @@ -231,9 +230,9 @@ impl MeshState { if self.subscriptions.contains(&topic) { return; } - + self.subscriptions.insert(topic.clone()); - + // Promote fanout peers to mesh if any let mut mesh = TopicMesh::new(); if let Some(fanout) = self.fanouts.remove(&topic) { @@ -241,7 +240,7 @@ impl MeshState { } self.meshes.insert(topic, mesh); } - + /// Unsubscribe from a topic. /// /// # Arguments @@ -258,7 +257,7 @@ impl MeshState { .map(|mesh| mesh.peers) .unwrap_or_default() } - + /// Check if subscribed to a topic. /// /// # Arguments @@ -271,7 +270,7 @@ impl MeshState { pub fn is_subscribed(&self, topic: &TopicId) -> bool { self.subscriptions.contains(topic) } - + /// Get mesh peers for a topic. /// /// # Arguments @@ -287,7 +286,7 @@ impl MeshState { .map(|mesh| mesh.peers.clone()) .unwrap_or_default() } - + /// Add a peer to a topic's mesh. /// /// # Arguments @@ -306,7 +305,7 @@ impl MeshState { false } } - + /// Remove a peer from a topic's mesh. /// /// # Arguments @@ -325,7 +324,7 @@ impl MeshState { false } } - + /// Get fanout peers for a topic. /// /// # Arguments @@ -341,7 +340,7 @@ impl MeshState { .map(|fanout| fanout.peers.clone()) .unwrap_or_default() } - + /// Update fanout for publishing to a non-subscribed topic. /// /// For subscribed topics, returns mesh peers instead. @@ -362,24 +361,21 @@ impl MeshState { if self.subscriptions.contains(topic) { return self.get_mesh_peers(topic); } - + let d = self.d(); let fanout = self .fanouts .entry(topic.to_string()) .or_insert_with(FanoutEntry::new); - + fanout.last_published = SystemTime::now() .duration_since(UNIX_EPOCH) .unwrap() .as_secs_f64(); - + // Fill fanout up to D peers if fanout.peers.len() < d { - let candidates: Vec<_> = available_peers - .difference(&fanout.peers) - .cloned() - .collect(); + let candidates: Vec<_> = available_peers.difference(&fanout.peers).cloned().collect(); let needed = d - fanout.peers.len(); let mut rng = rand::thread_rng(); let new_peers: Vec<_> = candidates @@ -388,10 +384,10 @@ impl MeshState { .collect(); fanout.peers.extend(new_peers); } - + fanout.peers.clone() } - + /// Remove expired fanout entries. /// /// # Arguments @@ -406,22 +402,22 @@ impl MeshState { .duration_since(UNIX_EPOCH) .unwrap() .as_secs_f64(); - + let stale: Vec<_> = self .fanouts .iter() .filter(|(_, fanout)| fanout.is_stale(current_time, ttl)) .map(|(topic, _)| topic.clone()) .collect(); - + let count = stale.len(); for topic in stale { self.fanouts.remove(&topic); } - + count } - + /// Select non-mesh peers for IHAVE gossip. /// /// Randomly selects up to D_lazy peers from those not in the mesh. @@ -441,19 +437,16 @@ impl MeshState { all_topic_peers: &HashSet, ) -> Vec { let mesh_peers = self.get_mesh_peers(topic); - let candidates: Vec<_> = all_topic_peers - .difference(&mesh_peers) - .cloned() - .collect(); - + let candidates: Vec<_> = all_topic_peers.difference(&mesh_peers).cloned().collect(); + if candidates.len() <= self.d_lazy() { return candidates; } - + let mut rng = rand::thread_rng(); candidates .choose_multiple(&mut rng, self.d_lazy()) .cloned() .collect() } -} \ No newline at end of file +} diff --git a/lean_client/networking/src/gossipsub/message.rs b/lean_client/networking/src/gossipsub/message.rs index ff8fdcc..19f40b9 100644 --- a/lean_client/networking/src/gossipsub/message.rs +++ b/lean_client/networking/src/gossipsub/message.rs @@ -42,7 +42,6 @@ /// /// - [Ethereum P2P spec](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md) /// - [Gossipsub v1.0](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) - use containers::Bytes20; use sha2::{Digest, Sha256}; use std::sync::Arc; @@ -86,19 +85,19 @@ pub struct RawGossipsubMessage { /// /// Example: `b"/leanconsensus/0x12345678/block/ssz_snappy"` pub topic: Vec, - + /// Raw message payload. /// /// Typically snappy-compressed SSZ data. The actual content /// depends on the topic (block, attestation, etc.). pub raw_data: Vec, - + /// Optional snappy decompression function. /// /// If provided, decompression is attempted during ID computation /// to determine the domain byte. snappy_decompress: Option>, - + /// Cached message ID. /// /// Computed lazily on first access to `id()` method. Once computed, @@ -126,7 +125,7 @@ impl RawGossipsubMessage { cached_id: None, } } - + /// Get the 20-byte message ID. /// /// Computed lazily on first access using the Ethereum consensus @@ -139,15 +138,15 @@ impl RawGossipsubMessage { if let Some(id) = &self.cached_id { return id.clone(); } - + // Compute ID let id = Self::compute_id(&self.topic, &self.raw_data, self.snappy_decompress.as_ref()); - + // Note: We can't cache here because self is immutable // In practice, callers should use a mutable reference or compute once id } - + /// Compute a 20-byte message ID from raw data. /// /// Implements the Ethereum consensus message ID function: @@ -185,17 +184,17 @@ impl RawGossipsubMessage { } else { (MESSAGE_DOMAIN_INVALID_SNAPPY, data.to_vec()) }; - + let mut preimage = Vec::new(); preimage.extend_from_slice(domain); preimage.extend_from_slice(&(topic.len() as u64).to_le_bytes()); preimage.extend_from_slice(topic); preimage.extend_from_slice(&data_for_hash); - + let hash = Sha256::digest(&preimage); Bytes20::from(&hash[..20]) } - + /// Get the topic as a UTF-8 string. /// /// # Returns diff --git a/lean_client/networking/src/gossipsub/mod.rs b/lean_client/networking/src/gossipsub/mod.rs index f019a7c..1251801 100644 --- a/lean_client/networking/src/gossipsub/mod.rs +++ b/lean_client/networking/src/gossipsub/mod.rs @@ -21,7 +21,7 @@ pub use mcache::{CacheEntry, MessageCache, SeenCache}; pub use mesh::{FanoutEntry, MeshState, TopicMesh}; pub use message::{GossipsubMessage, RawGossipsubMessage, SnappyDecompressor}; pub use topic::{ - format_topic_string, get_topics, parse_topic_string, GossipsubKind, GossipsubTopic, - ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, + ATTESTATION_TOPIC, BLOCK_TOPIC, GossipsubKind, GossipsubTopic, SSZ_SNAPPY_ENCODING_POSTFIX, + TOPIC_PREFIX, format_topic_string, get_topics, parse_topic_string, }; pub use types::{MessageId, PeerId, Timestamp, TopicId}; diff --git a/lean_client/networking/src/gossipsub/tests/control.rs b/lean_client/networking/src/gossipsub/tests/control.rs index 42fb115..6d66cbc 100644 --- a/lean_client/networking/src/gossipsub/tests/control.rs +++ b/lean_client/networking/src/gossipsub/tests/control.rs @@ -19,10 +19,7 @@ fn test_prune_creation() { #[test] fn test_ihave_creation() { - let msg_ids = vec![ - Bytes20::from(&[1u8; 20][..]), - Bytes20::from(&[2u8; 20][..]), - ]; + let msg_ids = vec![Bytes20::from(&[1u8; 20][..]), Bytes20::from(&[2u8; 20][..])]; let ihave = IHave { topic_id: "test_topic".to_string(), message_ids: msg_ids.clone(), diff --git a/lean_client/networking/src/gossipsub/tests/mcache.rs b/lean_client/networking/src/gossipsub/tests/mcache.rs index b573348..77e87bf 100644 --- a/lean_client/networking/src/gossipsub/tests/mcache.rs +++ b/lean_client/networking/src/gossipsub/tests/mcache.rs @@ -6,10 +6,10 @@ use containers::Bytes20; fn test_cache_put_and_get() { let mut cache = MessageCache::new(6, 3); let message = RawGossipsubMessage::new(b"topic".to_vec(), b"data".to_vec(), None); - + assert!(cache.put("topic".to_string(), message.clone())); assert!(!cache.put("topic".to_string(), message.clone())); // Duplicate - + let retrieved = cache.get(&message.id()); assert!(retrieved.is_some()); assert_eq!(retrieved.unwrap().id(), message.id()); @@ -19,7 +19,7 @@ fn test_cache_put_and_get() { fn test_cache_has() { let mut cache = MessageCache::new(6, 3); let message = RawGossipsubMessage::new(b"topic".to_vec(), b"data".to_vec(), None); - + assert!(!cache.has(&message.id())); cache.put("topic".to_string(), message.clone()); assert!(cache.has(&message.id())); @@ -28,19 +28,16 @@ fn test_cache_has() { #[test] fn test_cache_shift() { let mut cache = MessageCache::new(3, 2); - + let mut messages = Vec::new(); for i in 0..5 { - let msg = RawGossipsubMessage::new( - b"topic".to_vec(), - format!("data{}", i).into_bytes(), - None, - ); + let msg = + RawGossipsubMessage::new(b"topic".to_vec(), format!("data{}", i).into_bytes(), None); cache.put("topic".to_string(), msg.clone()); messages.push(msg); cache.shift(); } - + // Old messages should be evicted assert!(!cache.has(&messages[0].id())); assert!(!cache.has(&messages[1].id())); @@ -49,17 +46,17 @@ fn test_cache_shift() { #[test] fn test_get_gossip_ids() { let mut cache = MessageCache::new(6, 3); - + let msg1 = RawGossipsubMessage::new(b"topic1".to_vec(), b"data1".to_vec(), None); let msg2 = RawGossipsubMessage::new(b"topic2".to_vec(), b"data2".to_vec(), None); let msg3 = RawGossipsubMessage::new(b"topic1".to_vec(), b"data3".to_vec(), None); - + cache.put("topic1".to_string(), msg1.clone()); cache.put("topic2".to_string(), msg2.clone()); cache.put("topic1".to_string(), msg3.clone()); - + let gossip_ids = cache.get_gossip_ids("topic1"); - + assert!(gossip_ids.contains(&msg1.id())); assert!(!gossip_ids.contains(&msg2.id())); assert!(gossip_ids.contains(&msg3.id())); @@ -69,7 +66,7 @@ fn test_get_gossip_ids() { fn test_seen_cache_add_and_check() { let mut cache = SeenCache::new(60); let msg_id = Bytes20::new([1u8; 20]); - + assert!(!cache.has(&msg_id)); assert!(cache.add(msg_id.clone(), 1000.0)); assert!(cache.has(&msg_id)); @@ -80,10 +77,10 @@ fn test_seen_cache_add_and_check() { fn test_seen_cache_cleanup() { let mut cache = SeenCache::new(10); let msg_id = Bytes20::new([1u8; 20]); - + cache.add(msg_id.clone(), 1000.0); assert!(cache.has(&msg_id)); - + let removed = cache.cleanup(1015.0); assert_eq!(removed, 1); assert!(!cache.has(&msg_id)); diff --git a/lean_client/networking/src/gossipsub/tests/mesh.rs b/lean_client/networking/src/gossipsub/tests/mesh.rs index 3dce268..5f10cd9 100644 --- a/lean_client/networking/src/gossipsub/tests/mesh.rs +++ b/lean_client/networking/src/gossipsub/tests/mesh.rs @@ -12,7 +12,7 @@ fn test_mesh_state_initialization() { ..Default::default() }; let mesh = MeshState::new(params); - + assert_eq!(mesh.d(), 8); assert_eq!(mesh.d_low(), 6); assert_eq!(mesh.d_high(), 12); @@ -22,11 +22,11 @@ fn test_mesh_state_initialization() { #[test] fn test_subscribe_and_unsubscribe() { let mesh = &mut MeshState::new(GossipsubParameters::default()); - + mesh.subscribe("topic1".to_string()); assert!(mesh.is_subscribed(&"topic1".to_string())); assert!(!mesh.is_subscribed(&"topic2".to_string())); - + let peers = mesh.unsubscribe(&"topic1".to_string()); assert!(!mesh.is_subscribed(&"topic1".to_string())); assert!(peers.is_empty()); @@ -36,18 +36,18 @@ fn test_subscribe_and_unsubscribe() { fn test_add_remove_mesh_peers() { let mesh = &mut MeshState::new(GossipsubParameters::default()); mesh.subscribe("topic1".to_string()); - + assert!(mesh.add_to_mesh("topic1", "peer1".to_string())); assert!(mesh.add_to_mesh("topic1", "peer2".to_string())); assert!(!mesh.add_to_mesh("topic1", "peer1".to_string())); // Already in mesh - + let peers = mesh.get_mesh_peers("topic1"); assert!(peers.contains("peer1")); assert!(peers.contains("peer2")); - + assert!(mesh.remove_from_mesh("topic1", &"peer1".to_string())); assert!(!mesh.remove_from_mesh("topic1", &"peer1".to_string())); // Already removed - + let peers = mesh.get_mesh_peers("topic1"); assert!(!peers.contains("peer1")); assert!(peers.contains("peer2")); @@ -63,14 +63,14 @@ fn test_gossip_peer_selection() { mesh.subscribe("topic1".to_string()); mesh.add_to_mesh("topic1", "peer1".to_string()); mesh.add_to_mesh("topic1", "peer2".to_string()); - + let all_peers: HashSet<_> = vec!["peer1", "peer2", "peer3", "peer4", "peer5", "peer6"] .into_iter() .map(String::from) .collect(); - + let gossip_peers = mesh.select_peers_for_gossip("topic1", &all_peers); - + let mesh_peers = mesh.get_mesh_peers("topic1"); for peer in &gossip_peers { assert!(!mesh_peers.contains(peer)); @@ -80,11 +80,11 @@ fn test_gossip_peer_selection() { #[test] fn test_topic_mesh_add_remove() { let topic_mesh = &mut TopicMesh::new(); - + assert!(topic_mesh.add_peer("peer1".to_string())); assert!(!topic_mesh.add_peer("peer1".to_string())); // Already exists assert!(topic_mesh.peers.contains("peer1")); - + assert!(topic_mesh.remove_peer(&"peer1".to_string())); assert!(!topic_mesh.remove_peer(&"peer1".to_string())); // Already removed assert!(!topic_mesh.peers.contains("peer1")); @@ -94,7 +94,7 @@ fn test_topic_mesh_add_remove() { fn test_fanout_entry_staleness() { let mut entry = FanoutEntry::new(); entry.last_published = 1000.0; - + assert!(!entry.is_stale(1050.0, 60.0)); assert!(entry.is_stale(1070.0, 60.0)); } diff --git a/lean_client/networking/src/gossipsub/topic.rs b/lean_client/networking/src/gossipsub/topic.rs index 5cc3c75..3005e30 100644 --- a/lean_client/networking/src/gossipsub/topic.rs +++ b/lean_client/networking/src/gossipsub/topic.rs @@ -42,7 +42,6 @@ /// ## References /// /// - Ethereum P2P: - use libp2p::gossipsub::{IdentTopic, TopicHash}; /// Network prefix for Lean consensus gossip topics. @@ -77,7 +76,7 @@ pub const ATTESTATION_TOPIC: &str = "attestation"; pub enum GossipsubKind { /// Signed beacon block messages. Block, - + /// Signed attestation messages. Attestation, } @@ -113,7 +112,7 @@ pub struct GossipsubTopic { /// /// Peers must match on fork digest to exchange messages on a topic. pub fork: String, - + /// The topic type (block, attestation, etc.). /// /// Determines what kind of messages are exchanged on this topic. @@ -130,7 +129,7 @@ impl GossipsubTopic { pub fn new(fork: String, kind: GossipsubKind) -> Self { Self { fork, kind } } - + /// Create a block topic for the given fork. /// /// # Arguments @@ -143,7 +142,7 @@ impl GossipsubTopic { pub fn block(fork_digest: String) -> Self { Self::new(fork_digest, GossipsubKind::Block) } - + /// Create an attestation topic for the given fork. /// /// # Arguments @@ -156,7 +155,7 @@ impl GossipsubTopic { pub fn attestation(fork_digest: String) -> Self { Self::new(fork_digest, GossipsubKind::Attestation) } - + /// Parse a topic string into a GossipsubTopic. /// /// # Arguments @@ -181,23 +180,25 @@ impl GossipsubTopic { /// ``` pub fn from_string(topic_str: &str) -> Result { let (prefix, fork_digest, topic_name, encoding) = parse_topic_string(topic_str)?; - + if prefix != TOPIC_PREFIX { - return Err(format!("Invalid prefix: expected '{TOPIC_PREFIX}', got '{prefix}'")); + return Err(format!( + "Invalid prefix: expected '{TOPIC_PREFIX}', got '{prefix}'" + )); } - + if encoding != SSZ_SNAPPY_ENCODING_POSTFIX { return Err(format!( "Invalid encoding: expected '{SSZ_SNAPPY_ENCODING_POSTFIX}', got '{encoding}'" )); } - + let kind = match topic_name { BLOCK_TOPIC => GossipsubKind::Block, ATTESTATION_TOPIC => GossipsubKind::Attestation, other => return Err(format!("Unknown topic: '{other}'")), }; - + Ok(Self::new(fork_digest.to_string(), kind)) } @@ -236,7 +237,7 @@ impl GossipsubTopic { other => Err(format!("Invalid topic kind: {other:?}")), } } - + /// Convert to topic string as bytes. pub fn as_bytes(&self) -> Vec { self.to_string().into_bytes() @@ -357,11 +358,14 @@ pub fn format_topic_string( /// ``` pub fn parse_topic_string(topic_str: &str) -> Result<(&str, &str, &str, &str), String> { let parts: Vec<&str> = topic_str.trim_start_matches('/').split('/').collect(); - + if parts.len() != 4 { - return Err(format!("Invalid topic format: expected 4 parts, got {}", parts.len())); + return Err(format!( + "Invalid topic format: expected 4 parts, got {}", + parts.len() + )); } - + Ok((parts[0], parts[1], parts[2], parts[3])) } @@ -372,17 +376,20 @@ mod tests { #[test] fn test_gossip_topic_creation() { let topic = GossipsubTopic::new("0x12345678".to_string(), GossipsubKind::Block); - + assert_eq!(topic.kind, GossipsubKind::Block); assert_eq!(topic.fork, "0x12345678"); - assert_eq!(topic.to_string(), "/leanconsensus/0x12345678/block/ssz_snappy"); + assert_eq!( + topic.to_string(), + "/leanconsensus/0x12345678/block/ssz_snappy" + ); } #[test] fn test_gossip_topic_from_string() { let topic = GossipsubTopic::from_string("/leanconsensus/0x12345678/block/ssz_snappy") .expect("Failed to parse topic"); - + assert_eq!(topic.kind, GossipsubKind::Block); assert_eq!(topic.fork, "0x12345678"); } @@ -391,7 +398,7 @@ mod tests { fn test_gossip_topic_factory_methods() { let block_topic = GossipsubTopic::block("0xabcd1234".to_string()); assert_eq!(block_topic.kind, GossipsubKind::Block); - + let attestation_topic = GossipsubTopic::attestation("0xabcd1234".to_string()); assert_eq!(attestation_topic.kind, GossipsubKind::Attestation); } @@ -407,7 +414,7 @@ mod tests { let (prefix, fork_digest, topic_name, encoding) = parse_topic_string("/leanconsensus/0x12345678/block/ssz_snappy") .expect("Failed to parse"); - + assert_eq!(prefix, "leanconsensus"); assert_eq!(fork_digest, "0x12345678"); assert_eq!(topic_name, "block"); diff --git a/lean_client/networking/src/gossipsub/types.rs b/lean_client/networking/src/gossipsub/types.rs index 8e4ca6d..4d24b94 100644 --- a/lean_client/networking/src/gossipsub/types.rs +++ b/lean_client/networking/src/gossipsub/types.rs @@ -1,7 +1,6 @@ /// Gossipsub Type Definitions /// /// Type aliases for common gossipsub types. - use containers::Bytes20; /// 20-byte message identifier. From b1eae9a03338e51cba3f706502f6a28aa71f4f53 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Titas=20Stankevi=C4=8Dius?= Date: Wed, 21 Jan 2026 14:38:47 +0200 Subject: [PATCH 25/27] removing separate Bytes20 type --- lean_client/Cargo.lock | 108 +++++++++++++++--- lean_client/Cargo.toml | 1 + lean_client/containers/Cargo.toml | 1 + lean_client/containers/src/types.rs | 48 +------- .../networking/src/gossipsub/message.rs | 4 +- .../networking/src/gossipsub/tests/control.rs | 6 +- .../networking/src/gossipsub/tests/mcache.rs | 4 +- .../src/gossipsub/tests/raw_message.rs | 6 +- 8 files changed, 109 insertions(+), 69 deletions(-) diff --git a/lean_client/Cargo.lock b/lean_client/Cargo.lock index 61be8fc..ec04856 100644 --- a/lean_client/Cargo.lock +++ b/lean_client/Cargo.lock @@ -848,6 +848,7 @@ name = "containers" version = "0.1.0" dependencies = [ "env-config", + "ethereum-types 0.15.1", "hex", "leansig", "pretty_assertions", @@ -1404,8 +1405,21 @@ checksum = "c22d4b5885b6aa2fe5e8b9329fb8d232bf739e434e6b87347c63bdd00c120f60" dependencies = [ "crunchy", "fixed-hash", - "impl-rlp", - "impl-serde", + "impl-rlp 0.3.0", + "impl-serde 0.4.0", + "tiny-keccak", +] + +[[package]] +name = "ethbloom" +version = "0.14.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8c321610643004cf908ec0f5f2aa0d8f1f8e14b540562a2887a1111ff1ecbf7b" +dependencies = [ + "crunchy", + "fixed-hash", + "impl-rlp 0.4.0", + "impl-serde 0.5.0", "tiny-keccak", ] @@ -1415,14 +1429,28 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02d215cbf040552efcbe99a38372fe80ab9d00268e20012b79fcd0f073edd8ee" dependencies = [ - "ethbloom", + "ethbloom 0.13.0", "fixed-hash", - "impl-rlp", - "impl-serde", - "primitive-types", + "impl-rlp 0.3.0", + "impl-serde 0.4.0", + "primitive-types 0.12.2", "uint 0.9.5", ] +[[package]] +name = "ethereum-types" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ab15ed80916029f878e0267c3a9f92b67df55e79af370bf66199059ae2b4ee3" +dependencies = [ + "ethbloom 0.14.1", + "fixed-hash", + "impl-rlp 0.4.0", + "impl-serde 0.5.0", + "primitive-types 0.13.1", + "uint 0.10.0", +] + [[package]] name = "ethereum_serde_utils" version = "0.8.0" @@ -1834,7 +1862,7 @@ name = "hashing" version = "0.0.0" source = "git+https://github.com/grandinetech/grandine?branch=develop#5bdc78763c8959ad689c79d51d7d59978460bb1e" dependencies = [ - "ethereum-types", + "ethereum-types 0.14.1", "generic-array", "hex-literal", "sha2 0.10.9 (git+https://github.com/grandinetech/universal-precompiles.git?tag=sha2-v0.10.9-up.1)", @@ -2225,13 +2253,31 @@ dependencies = [ "parity-scale-codec", ] +[[package]] +name = "impl-codec" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2d40b9d5e17727407e55028eafc22b2dc68781786e6d7eb8a21103f5058e3a14" +dependencies = [ + "parity-scale-codec", +] + [[package]] name = "impl-rlp" version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f28220f89297a075ddc7245cd538076ee98b01f2a9c23a53a4f1105d5a322808" dependencies = [ - "rlp", + "rlp 0.5.2", +] + +[[package]] +name = "impl-rlp" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54ed8ad1f3877f7e775b8cbf30ed1bd3209a95401817f19a0eb4402d13f8cf90" +dependencies = [ + "rlp 0.6.1", ] [[package]] @@ -2243,6 +2289,15 @@ dependencies = [ "serde", ] +[[package]] +name = "impl-serde" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4a143eada6a1ec4aefa5049037a26a6d597bfd64f8c026d07b77133e02b7dd0b" +dependencies = [ + "serde", +] + [[package]] name = "impl-trait-for-tuples" version = "0.2.3" @@ -3659,12 +3714,25 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b34d9fd68ae0b74a41b21c03c2f62847aa0ffea044eee893b4c140b37e244e2" dependencies = [ "fixed-hash", - "impl-codec", - "impl-rlp", - "impl-serde", + "impl-codec 0.6.0", + "impl-rlp 0.3.0", + "impl-serde 0.4.0", "uint 0.9.5", ] +[[package]] +name = "primitive-types" +version = "0.13.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d15600a7d856470b7d278b3fe0e311fe28c2526348549f8ef2ff7db3299c87f5" +dependencies = [ + "fixed-hash", + "impl-codec 0.7.1", + "impl-rlp 0.4.0", + "impl-serde 0.5.0", + "uint 0.10.0", +] + [[package]] name = "proc-macro-crate" version = "1.1.3" @@ -4078,6 +4146,16 @@ dependencies = [ "rustc-hex", ] +[[package]] +name = "rlp" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa24e92bb2a83198bb76d661a71df9f7076b8c420b8696e4d3d97d50d94479e3" +dependencies = [ + "bytes", + "rustc-hex", +] + [[package]] name = "rstest" version = "0.18.2" @@ -4142,11 +4220,11 @@ dependencies = [ "num-integer", "num-traits", "parity-scale-codec", - "primitive-types", + "primitive-types 0.12.2", "proptest", "rand 0.8.5", "rand 0.9.2", - "rlp", + "rlp 0.5.2", "ruint-macro", "serde_core", "valuable", @@ -4639,13 +4717,13 @@ dependencies = [ "derivative", "derive_more", "easy-ext", - "ethereum-types", + "ethereum-types 0.14.1", "generic-array", "hashing", "itertools 0.14.0", "num-traits", "once_cell", - "primitive-types", + "primitive-types 0.12.2", "replace_with", "serde", "serde_utils", diff --git a/lean_client/Cargo.toml b/lean_client/Cargo.toml index 9e72c43..1ad28c6 100644 --- a/lean_client/Cargo.toml +++ b/lean_client/Cargo.toml @@ -36,6 +36,7 @@ snap = "1.1" ssz = { git = "https://github.com/grandinetech/grandine", package = "ssz", branch = "develop" } ssz-derive = { git = "https://github.com/grandinetech/grandine", package = "ssz_derive", branch = "develop" } ssz-types = "0.3.0" +ethereum-types = "0.15" tokio = { version = "1.0", features = ["full"] } tree-hash = "0.4.0" typenum = "1.19" diff --git a/lean_client/containers/Cargo.toml b/lean_client/containers/Cargo.toml index c136a17..6d29a6d 100644 --- a/lean_client/containers/Cargo.toml +++ b/lean_client/containers/Cargo.toml @@ -23,6 +23,7 @@ serde_json = "1.0" serde_yaml = "0.9" hex = "0.4.3" sha2 = "0.10" +ethereum-types = { workspace = true } leansig = { git = "https://github.com/leanEthereum/leanSig", branch = "main", optional = true } [dev-dependencies] diff --git a/lean_client/containers/src/types.rs b/lean_client/containers/src/types.rs index fd8779f..bf01e62 100644 --- a/lean_client/containers/src/types.rs +++ b/lean_client/containers/src/types.rs @@ -1,56 +1,14 @@ +use ethereum_types::H160; use hex::FromHex; use serde::{Deserialize, Serialize}; use ssz::H256; use ssz_derive::Ssz; use std::fmt; -use std::hash::{Hash, Hasher}; use std::str::FromStr; /// 20-byte array for message IDs (gossipsub message IDs) -/// Using transparent SSZ encoding - just the raw bytes -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)] -pub struct Bytes20(pub [u8; 20]); - -impl Default for Bytes20 { - fn default() -> Self { - Bytes20([0u8; 20]) - } -} - -impl Bytes20 { - pub fn new(data: [u8; 20]) -> Self { - Bytes20(data) - } - - pub fn len(&self) -> usize { - 20 - } - - pub fn is_empty(&self) -> bool { - false - } -} - -impl Hash for Bytes20 { - fn hash(&self, state: &mut H) { - self.0.hash(state); - } -} - -impl From<&[u8]> for Bytes20 { - fn from(slice: &[u8]) -> Self { - let mut data = [0u8; 20]; - let len = slice.len().min(20); - data[..len].copy_from_slice(&slice[..len]); - Bytes20(data) - } -} - -impl AsRef<[u8]> for Bytes20 { - fn as_ref(&self) -> &[u8] { - &self.0 - } -} +/// Using H160 from ethereum_types which has SSZ support +pub type Bytes20 = H160; #[derive( Clone, Copy, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Ssz, Default, Serialize, Deserialize, diff --git a/lean_client/networking/src/gossipsub/message.rs b/lean_client/networking/src/gossipsub/message.rs index 19f40b9..11ed606 100644 --- a/lean_client/networking/src/gossipsub/message.rs +++ b/lean_client/networking/src/gossipsub/message.rs @@ -192,7 +192,9 @@ impl RawGossipsubMessage { preimage.extend_from_slice(&data_for_hash); let hash = Sha256::digest(&preimage); - Bytes20::from(&hash[..20]) + let mut bytes = [0u8; 20]; + bytes.copy_from_slice(&hash[..20]); + Bytes20::from(bytes) } /// Get the topic as a UTF-8 string. diff --git a/lean_client/networking/src/gossipsub/tests/control.rs b/lean_client/networking/src/gossipsub/tests/control.rs index 6d66cbc..b85fb2a 100644 --- a/lean_client/networking/src/gossipsub/tests/control.rs +++ b/lean_client/networking/src/gossipsub/tests/control.rs @@ -19,7 +19,7 @@ fn test_prune_creation() { #[test] fn test_ihave_creation() { - let msg_ids = vec![Bytes20::from(&[1u8; 20][..]), Bytes20::from(&[2u8; 20][..])]; + let msg_ids = vec![Bytes20::from([1u8; 20]), Bytes20::from([2u8; 20])]; let ihave = IHave { topic_id: "test_topic".to_string(), message_ids: msg_ids.clone(), @@ -31,7 +31,7 @@ fn test_ihave_creation() { #[test] fn test_iwant_creation() { - let msg_ids = vec![Bytes20::from(&[1u8; 20][..])]; + let msg_ids = vec![Bytes20::from([1u8; 20])]; let iwant = IWant { message_ids: msg_ids, }; @@ -41,7 +41,7 @@ fn test_iwant_creation() { #[test] fn test_idontwant_creation() { - let msg_ids = vec![Bytes20::from(&[1u8; 20][..])]; + let msg_ids = vec![Bytes20::from([1u8; 20])]; let idontwant = IDontWant { message_ids: msg_ids, }; diff --git a/lean_client/networking/src/gossipsub/tests/mcache.rs b/lean_client/networking/src/gossipsub/tests/mcache.rs index 77e87bf..3fbdf2c 100644 --- a/lean_client/networking/src/gossipsub/tests/mcache.rs +++ b/lean_client/networking/src/gossipsub/tests/mcache.rs @@ -65,7 +65,7 @@ fn test_get_gossip_ids() { #[test] fn test_seen_cache_add_and_check() { let mut cache = SeenCache::new(60); - let msg_id = Bytes20::new([1u8; 20]); + let msg_id = Bytes20::from([1u8; 20]); assert!(!cache.has(&msg_id)); assert!(cache.add(msg_id.clone(), 1000.0)); @@ -76,7 +76,7 @@ fn test_seen_cache_add_and_check() { #[test] fn test_seen_cache_cleanup() { let mut cache = SeenCache::new(10); - let msg_id = Bytes20::new([1u8; 20]); + let msg_id = Bytes20::from([1u8; 20]); cache.add(msg_id.clone(), 1000.0); assert!(cache.has(&msg_id)); diff --git a/lean_client/networking/src/gossipsub/tests/raw_message.rs b/lean_client/networking/src/gossipsub/tests/raw_message.rs index 1b83157..071a4d0 100644 --- a/lean_client/networking/src/gossipsub/tests/raw_message.rs +++ b/lean_client/networking/src/gossipsub/tests/raw_message.rs @@ -25,7 +25,7 @@ fn test_message_id_computation_no_snappy() { let message = RawGossipsubMessage::new(topic.to_vec(), raw_data.to_vec(), None); let message_id = message.id(); - assert_eq!(message_id.len(), 20); + assert_eq!(message_id.0.len(), 20); } #[test] @@ -40,7 +40,7 @@ fn test_message_id_computation_with_snappy() { ); let message_id = message.id(); - assert_eq!(message_id.len(), 20); + assert_eq!(message_id.0.len(), 20); } #[test] @@ -55,7 +55,7 @@ fn test_message_id_computation_snappy_fails() { ); let message_id = message.id(); - assert_eq!(message_id.len(), 20); + assert_eq!(message_id.0.len(), 20); } #[test] From cdfb8844b49aa62d4a40a0b0a97d3c66e5e2b5b5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Titas=20Stankevi=C4=8Dius?= Date: Wed, 21 Jan 2026 14:57:56 +0200 Subject: [PATCH 26/27] Refactor gossipsub module --- .../networking/src/gossipsub/control.rs | 166 ------- .../networking/src/gossipsub/mcache.rs | 409 ---------------- lean_client/networking/src/gossipsub/mesh.rs | 452 ------------------ .../networking/src/gossipsub/message.rs | 259 ---------- lean_client/networking/src/gossipsub/mod.rs | 12 +- .../networking/src/gossipsub/tests/control.rs | 86 ---- .../networking/src/gossipsub/tests/mcache.rs | 87 ---- .../networking/src/gossipsub/tests/mesh.rs | 100 ---- .../networking/src/gossipsub/tests/message.rs | 84 ---- .../src/gossipsub/tests/message_id.rs | 188 -------- .../networking/src/gossipsub/tests/mod.rs | 6 - .../src/gossipsub/tests/raw_message.rs | 98 ---- lean_client/networking/src/gossipsub/types.rs | 32 -- lean_client/networking/src/network/service.rs | 90 ++-- 14 files changed, 54 insertions(+), 2015 deletions(-) delete mode 100644 lean_client/networking/src/gossipsub/control.rs delete mode 100644 lean_client/networking/src/gossipsub/mcache.rs delete mode 100644 lean_client/networking/src/gossipsub/mesh.rs delete mode 100644 lean_client/networking/src/gossipsub/message.rs delete mode 100644 lean_client/networking/src/gossipsub/tests/control.rs delete mode 100644 lean_client/networking/src/gossipsub/tests/mcache.rs delete mode 100644 lean_client/networking/src/gossipsub/tests/mesh.rs delete mode 100644 lean_client/networking/src/gossipsub/tests/message.rs delete mode 100644 lean_client/networking/src/gossipsub/tests/message_id.rs delete mode 100644 lean_client/networking/src/gossipsub/tests/raw_message.rs delete mode 100644 lean_client/networking/src/gossipsub/types.rs diff --git a/lean_client/networking/src/gossipsub/control.rs b/lean_client/networking/src/gossipsub/control.rs deleted file mode 100644 index 6059ca6..0000000 --- a/lean_client/networking/src/gossipsub/control.rs +++ /dev/null @@ -1,166 +0,0 @@ -/// Gossipsub Control Messages -/// -/// Control messages orchestrate the gossip mesh topology and message propagation. -/// -/// ## Overview -/// -/// Gossipsub uses control messages piggybacked on regular RPC messages to: -/// -/// - Manage mesh membership (GRAFT/PRUNE) -/// - Enable lazy message propagation (IHAVE/IWANT) -/// - Reduce bandwidth for large messages (IDONTWANT) -/// -/// ## Control Message Types -/// -/// | Message | Purpose | -/// |-------------|--------------------------------------------------------| -/// | GRAFT | Request to join a peer's mesh for a topic | -/// | PRUNE | Notify peer of removal from mesh | -/// | IHAVE | Advertise message IDs available for a topic | -/// | IWANT | Request full messages by their IDs | -/// | IDONTWANT | Signal that specific messages are not needed (v1.2) | -/// -/// ## Protocol Flow -/// -/// **Mesh Management:** -/// -/// 1. Peer A sends GRAFT to peer B for topic T -/// 2. Peer B adds A to its mesh for T (or sends PRUNE if refusing) -/// 3. Both peers now exchange full messages for topic T -/// -/// **Lazy Pull:** -/// -/// 1. Peer A receives message M, adds to cache -/// 2. Peer A sends IHAVE with M's ID to non-mesh peers -/// 3. Peer B responds with IWANT if it needs M -/// 4. Peer A sends full message M -/// -/// ## References -/// -/// - Gossipsub v1.0: -/// - Gossipsub v1.2: -use serde::{Deserialize, Serialize}; - -use super::types::MessageId; - -/// Request to join a peer's mesh for a topic. -/// -/// Sent when a peer wants to upgrade from gossip-only to full message exchange. -/// -/// The receiving peer should add the sender to its mesh unless: -/// -/// - The peer is already in the mesh -/// - The mesh is at capacity (|mesh| >= D_high) -/// - The peer is in a backoff period from a recent PRUNE -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Graft { - /// Topic identifier to join the mesh for. - pub topic_id: String, -} - -/// Notification of removal from a peer's mesh. -/// -/// Sent when: -/// -/// - A peer unsubscribes from a topic -/// - Mesh size exceeds D_high during heartbeat -/// - A GRAFT is rejected -/// -/// The pruned peer should not send GRAFT for this topic -/// until the backoff period expires. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Prune { - /// Topic identifier being pruned from. - pub topic_id: String, -} - -/// Advertisement of cached message IDs for a topic. -/// -/// Sent to non-mesh peers during heartbeat to enable lazy pull. -/// Recipients can request any missing messages via IWANT. -/// -/// Only includes messages from recent cache windows (mcache_gossip). -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct IHave { - /// Topic the advertised messages belong to. - pub topic_id: String, - - /// IDs of messages available in the sender's cache. - pub message_ids: Vec, -} - -/// Request for full messages by their IDs. -/// -/// Sent in response to IHAVE when the peer needs specific messages. -/// The peer should respond with the requested messages if still cached. -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct IWant { - /// IDs of messages being requested. - pub message_ids: Vec, -} - -/// Signal that specific messages are not needed. -/// -/// Introduced in gossipsub v1.2 for bandwidth optimization. -/// -/// Sent immediately after receiving a large message to tell mesh peers -/// not to forward their copy. Only used for messages exceeding the -/// IDONTWANT size threshold (typically 1KB). -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct IDontWant { - /// IDs of messages the sender does not want to receive. - pub message_ids: Vec, -} - -/// Container for aggregated control messages. -/// -/// Multiple control messages are batched into a single RPC -/// for efficiency. An RPC can contain any combination of -/// control message types. -/// -/// # Example -/// -/// ``` -/// use networking::gossipsub::control::*; -/// -/// let control = ControlMessage { -/// grafts: vec![Graft { topic_id: "blocks".to_string() }], -/// ihaves: vec![], -/// prunes: vec![], -/// iwants: vec![], -/// idontwants: vec![], -/// }; -/// ``` -#[derive(Debug, Clone, Default, PartialEq, Eq, Serialize, Deserialize)] -pub struct ControlMessage { - /// GRAFT messages requesting mesh membership. - #[serde(default)] - pub grafts: Vec, - - /// PRUNE messages notifying mesh removal. - #[serde(default)] - pub prunes: Vec, - - /// IHAVE messages advertising cached message IDs. - #[serde(default)] - pub ihaves: Vec, - - /// IWANT messages requesting full messages. - #[serde(default)] - pub iwants: Vec, - - /// IDONTWANT messages declining specific messages (v1.2). - #[serde(default)] - pub idontwants: Vec, -} - -impl ControlMessage { - /// Check if this control message contains no data. - pub fn is_empty(&self) -> bool { - self.grafts.is_empty() - && self.prunes.is_empty() - && self.ihaves.is_empty() - && self.iwants.is_empty() - && self.idontwants.is_empty() - } -} diff --git a/lean_client/networking/src/gossipsub/mcache.rs b/lean_client/networking/src/gossipsub/mcache.rs deleted file mode 100644 index e3b3679..0000000 --- a/lean_client/networking/src/gossipsub/mcache.rs +++ /dev/null @@ -1,409 +0,0 @@ -/// Gossipsub Message Cache -/// -/// Caches recent messages for gossip dissemination and IWANT responses. -/// -/// ## Overview -/// -/// The message cache enables the lazy pull protocol by storing messages -/// that can be requested via IWANT after receiving IHAVE advertisements. -/// -/// ```text -/// Peer A Peer B (non-mesh) -/// | | -/// |--- IHAVE [msg1, msg2] ------>| -/// | | -/// |<----- IWANT [msg2] ----------| -/// | | -/// |--- MESSAGE [msg2] ---------->| <- Retrieved from cache -/// ``` -/// -/// ## Sliding Window Design -/// -/// The cache is organized as a sliding window of history buckets: -/// -/// ```text -/// +----------+----------+----------+----------+ -/// | Window 0 | Window 1 | Window 2 | Window 3 | ... -/// | (newest) | | | (oldest) | -/// +----------+----------+----------+----------+ -/// ^ -/// | -/// New messages go here -/// ``` -/// -/// Each heartbeat: -/// -/// 1. Oldest window is evicted (messages cleaned up) -/// 2. New empty window is prepended -/// 3. Windows shift: 0 -> 1 -> 2 -> ... -/// -/// ## Key Parameters -/// -/// - **mcache_len** (6): Total windows retained -/// - **mcache_gossip** (3): Recent windows included in IHAVE -/// -/// Only the first `mcache_gossip` windows are advertised via IHAVE. -/// Older messages can still be retrieved via IWANT but won't be -/// actively gossiped. -/// -/// ## Seen Cache -/// -/// A separate `SeenCache` tracks message IDs for deduplication -/// without storing full messages. Uses TTL-based expiry. -/// -/// ## References -/// -/// - Gossipsub v1.0: -use std::collections::{HashMap, HashSet, VecDeque}; - -use super::message::RawGossipsubMessage; -use super::types::{MessageId, Timestamp, TopicId}; - -/// A single entry in the message cache. -/// -/// Stores the message along with its topic for efficient retrieval -/// during IWANT responses and topic-filtered IHAVE gossip. -#[derive(Debug, Clone)] -pub struct CacheEntry { - /// The cached gossipsub message. - pub message: RawGossipsubMessage, - - /// Topic this message was published to. - /// - /// Used to filter messages when generating IHAVE gossip for a specific topic. - pub topic: TopicId, -} - -/// Sliding window cache for gossipsub messages. -/// -/// Maintains recent messages for: -/// -/// - **IWANT responses**: Retrieve full messages by ID -/// - **IHAVE gossip**: Get message IDs for advertisement -/// -/// # Example -/// -/// ``` -/// use networking::gossipsub::mcache::MessageCache; -/// use networking::gossipsub::message::RawGossipsubMessage; -/// -/// let mut cache = MessageCache::new(6, 3); -/// -/// // Add messages -/// let msg1 = RawGossipsubMessage::new(b"topic".to_vec(), b"data1".to_vec(), None); -/// cache.put("blocks".to_string(), msg1.clone()); -/// -/// // Get message IDs for IHAVE -/// let ids = cache.get_gossip_ids("blocks"); -/// -/// // Respond to IWANT -/// let msg = cache.get(&msg1.id()); -/// -/// // Shift window (called each heartbeat) -/// let evicted = cache.shift(); -/// ``` -#[derive(Debug, Clone)] -pub struct MessageCache { - /// Number of history windows to retain. - /// - /// Messages are evicted after this many heartbeat intervals. - /// - /// Higher values increase memory usage but improve message - /// availability for late IWANT requests. - mcache_len: usize, - - /// Number of recent windows to include in IHAVE gossip. - /// - /// Only messages from the most recent windows are advertised. - /// Should be less than or equal to mcache_len. - mcache_gossip: usize, - - /// Sliding window of message ID sets. - /// - /// Index 0 is the newest window. Each heartbeat, windows shift - /// right and a new empty window is prepended. - windows: VecDeque>, - - /// Message lookup index keyed by ID. - /// - /// Provides O(1) retrieval for IWANT responses. - by_id: HashMap, -} - -impl MessageCache { - /// Create a new message cache. - /// - /// # Arguments - /// - /// * `mcache_len` - Number of history windows to retain - /// * `mcache_gossip` - Number of recent windows to include in IHAVE gossip - pub fn new(mcache_len: usize, mcache_gossip: usize) -> Self { - let mut windows = VecDeque::with_capacity(mcache_len); - windows.push_back(HashSet::new()); - - Self { - mcache_len, - mcache_gossip, - windows, - by_id: HashMap::new(), - } - } - - /// Add a message to the cache. - /// - /// Messages are added to the newest window (index 0) and - /// indexed for fast retrieval. Duplicates are ignored. - /// - /// # Arguments - /// - /// * `topic` - Topic this message belongs to - /// * `message` - Message to cache - /// - /// # Returns - /// - /// `true` if added (not a duplicate) - pub fn put(&mut self, topic: TopicId, message: RawGossipsubMessage) -> bool { - let msg_id = message.id(); - - if self.by_id.contains_key(&msg_id) { - return false; - } - - if let Some(window) = self.windows.front_mut() { - window.insert(msg_id.clone()); - } - - self.by_id.insert(msg_id, CacheEntry { message, topic }); - true - } - - /// Retrieve a message by ID. - /// - /// Used to respond to IWANT requests from peers. - /// - /// # Arguments - /// - /// * `msg_id` - Message ID to look up - /// - /// # Returns - /// - /// The cached message, or `None` if not found/evicted - pub fn get(&self, msg_id: &MessageId) -> Option<&RawGossipsubMessage> { - self.by_id.get(msg_id).map(|entry| &entry.message) - } - - /// Check if a message is cached. - /// - /// # Arguments - /// - /// * `msg_id` - Message ID to check - /// - /// # Returns - /// - /// `true` if the message is in the cache - pub fn has(&self, msg_id: &MessageId) -> bool { - self.by_id.contains_key(msg_id) - } - - /// Get message IDs for IHAVE gossip. - /// - /// Returns IDs from the most recent `mcache_gossip` windows - /// that belong to the specified topic. - /// - /// # Arguments - /// - /// * `topic` - Topic to filter messages by - /// - /// # Returns - /// - /// List of message IDs for IHAVE advertisement - pub fn get_gossip_ids(&self, topic: &str) -> Vec { - let mut result = Vec::new(); - let windows_to_check = self.mcache_gossip.min(self.windows.len()); - - for i in 0..windows_to_check { - if let Some(window) = self.windows.get(i) { - for msg_id in window { - if let Some(entry) = self.by_id.get(msg_id) { - if entry.topic == topic { - result.push(msg_id.clone()); - } - } - } - } - } - - result - } - - /// Shift the cache window, evicting the oldest. - /// - /// Called at each heartbeat to age the cache: - /// - /// 1. If at capacity, remove oldest window and its messages - /// 2. Prepend new empty window - /// - /// # Returns - /// - /// Number of messages evicted - pub fn shift(&mut self) -> usize { - let mut evicted = 0; - - if self.windows.len() >= self.mcache_len { - if let Some(oldest) = self.windows.pop_back() { - for msg_id in oldest { - if self.by_id.remove(&msg_id).is_some() { - evicted += 1; - } - } - } - } - - self.windows.push_front(HashSet::new()); - evicted - } - - /// Clear all cached messages. - pub fn clear(&mut self) { - self.windows.clear(); - self.windows.push_back(HashSet::new()); - self.by_id.clear(); - } - - /// Get the total number of cached messages. - pub fn len(&self) -> usize { - self.by_id.len() - } - - /// Check if the cache is empty. - pub fn is_empty(&self) -> bool { - self.by_id.is_empty() - } -} - -/// TTL-based cache for deduplicating messages. -/// -/// Tracks message IDs that have been seen to prevent reprocessing -/// duplicates. Unlike `MessageCache`, this only stores IDs (not -/// full messages) with time-based expiry. -/// -/// ## Use Cases -/// -/// - Skip processing of already-seen messages -/// - Avoid forwarding duplicates to mesh peers -/// - Bound memory with automatic TTL cleanup -#[derive(Debug, Clone)] -pub struct SeenCache { - /// Time-to-live for entries in seconds. - /// - /// Entries older than this are removed during cleanup. - /// - /// Should be: - /// - long enough to cover network propagation, - /// - short enough to bound memory usage. - ttl_seconds: u64, - - /// Set of message IDs that have been seen. - /// - /// Provides O(1) membership testing. - seen: HashSet, - - /// Timestamp when each message was first seen. - /// - /// Used to determine expiry during cleanup. - timestamps: HashMap, -} - -impl SeenCache { - /// Create a new seen cache. - /// - /// # Arguments - /// - /// * `ttl_seconds` - Time-to-live for entries in seconds - pub fn new(ttl_seconds: u64) -> Self { - Self { - ttl_seconds, - seen: HashSet::new(), - timestamps: HashMap::new(), - } - } - - /// Mark a message as seen. - /// - /// # Arguments - /// - /// * `msg_id` - Message ID to mark as seen - /// * `timestamp` - Current Unix timestamp - /// - /// # Returns - /// - /// `true` if newly seen (not a duplicate) - pub fn add(&mut self, msg_id: MessageId, timestamp: Timestamp) -> bool { - if self.seen.contains(&msg_id) { - return false; - } - - self.seen.insert(msg_id.clone()); - self.timestamps.insert(msg_id, timestamp); - true - } - - /// Check if a message has been seen. - /// - /// # Arguments - /// - /// * `msg_id` - Message ID to check - /// - /// # Returns - /// - /// `true` if the message has been seen - pub fn has(&self, msg_id: &MessageId) -> bool { - self.seen.contains(msg_id) - } - - /// Remove expired entries. - /// - /// Should be called periodically (e.g., each heartbeat) - /// to prevent unbounded memory growth. - /// - /// # Arguments - /// - /// * `current_time` - Current Unix timestamp - /// - /// # Returns - /// - /// Number of entries removed - pub fn cleanup(&mut self, current_time: f64) -> usize { - let cutoff = current_time - self.ttl_seconds as f64; - let expired: Vec = self - .timestamps - .iter() - .filter(|(_, ts)| **ts < cutoff) - .map(|(id, _)| id.clone()) - .collect(); - - let count = expired.len(); - for msg_id in expired { - self.seen.remove(&msg_id); - self.timestamps.remove(&msg_id); - } - - count - } - - /// Clear all seen entries. - pub fn clear(&mut self) { - self.seen.clear(); - self.timestamps.clear(); - } - - /// Get the number of seen message IDs. - pub fn len(&self) -> usize { - self.seen.len() - } - - /// Check if the seen cache is empty. - pub fn is_empty(&self) -> bool { - self.seen.is_empty() - } -} diff --git a/lean_client/networking/src/gossipsub/mesh.rs b/lean_client/networking/src/gossipsub/mesh.rs deleted file mode 100644 index d4efbc5..0000000 --- a/lean_client/networking/src/gossipsub/mesh.rs +++ /dev/null @@ -1,452 +0,0 @@ -/// Gossipsub Mesh State -/// -/// Manages the mesh topology for gossipsub topics. -/// -/// ## Overview -/// -/// Each subscribed topic maintains a **mesh**: a set of peers for full -/// message exchange. The mesh is the core data structure enabling -/// gossipsub's eager push protocol. -/// -/// - **Mesh peers**: Exchange full messages immediately (eager push) -/// - **Non-mesh peers**: Receive IHAVE advertisements, request via IWANT (lazy pull) -/// -/// ## Mesh vs Fanout -/// -/// | Type | Description | -/// |--------|-----------------------------------------------------------| -/// | Mesh | Peers for topics we subscribe to | -/// | Fanout | Temporary peers for topics we publish to but don't | -/// | | subscribe to. Expires after fanout_ttl. | -/// -/// ## Heartbeat Maintenance -/// -/// The mesh is maintained through periodic heartbeat: -/// -/// 1. **Graft** if |mesh| < D_low: add peers up to D -/// 2. **Prune** if |mesh| > D_high: remove peers down to D -/// 3. **Gossip**: send IHAVE to D_lazy non-mesh peers -/// -/// ## References -/// -/// - Gossipsub v1.0: -use rand::seq::SliceRandom; -use std::collections::{HashMap, HashSet}; -use std::time::{SystemTime, UNIX_EPOCH}; - -use super::config::GossipsubParameters; -use super::types::{PeerId, TopicId}; - -/// Fanout state for a publish-only topic. -/// -/// Tracks peers used when publishing to topics we don't subscribe to. -/// Fanout entries expire after a period of inactivity (fanout_ttl). -/// -/// Unlike mesh peers, fanout peers only receive our published messages. -/// We don't receive their messages since we're not subscribed. -#[derive(Debug, Clone)] -pub struct FanoutEntry { - /// Peers in the fanout for this topic. - /// - /// Selected randomly from available topic peers, up to D peers. - pub peers: HashSet, - - /// Unix timestamp of the last publish to this topic. - /// - /// Used to determine if the entry has expired. - pub last_published: f64, -} - -impl FanoutEntry { - /// Create a new empty fanout entry. - pub fn new() -> Self { - Self { - peers: HashSet::new(), - last_published: 0.0, - } - } - - /// Check if this fanout entry has expired. - /// - /// # Arguments - /// - /// * `current_time` - Current Unix timestamp - /// * `ttl` - Time-to-live in seconds - /// - /// # Returns - /// - /// `true` if the entry hasn't been used within ttl seconds - pub fn is_stale(&self, current_time: f64, ttl: f64) -> bool { - current_time - self.last_published > ttl - } -} - -impl Default for FanoutEntry { - fn default() -> Self { - Self::new() - } -} - -/// Mesh state for a single topic. -/// -/// Represents the set of peers we exchange full messages with -/// for a specific topic. Mesh membership is managed via -/// GRAFT and PRUNE control messages. -#[derive(Debug, Clone)] -pub struct TopicMesh { - /// Peers in the mesh for this topic. - /// - /// These peers receive all published messages immediately - /// and forward all received messages to us. - pub peers: HashSet, -} - -impl TopicMesh { - /// Create a new empty topic mesh. - pub fn new() -> Self { - Self { - peers: HashSet::new(), - } - } - - /// Add a peer to this topic's mesh. - /// - /// # Arguments - /// - /// * `peer_id` - Peer to add - /// - /// # Returns - /// - /// `true` if the peer was added, `false` if already present - pub fn add_peer(&mut self, peer_id: PeerId) -> bool { - self.peers.insert(peer_id) - } - - /// Remove a peer from this topic's mesh. - /// - /// # Arguments - /// - /// * `peer_id` - Peer to remove - /// - /// # Returns - /// - /// `true` if the peer was removed, `false` if not present - pub fn remove_peer(&mut self, peer_id: &PeerId) -> bool { - self.peers.remove(peer_id) - } -} - -impl Default for TopicMesh { - fn default() -> Self { - Self::new() - } -} - -/// Complete mesh state for all subscribed topics. -/// -/// Central data structure managing mesh topology across all topics. -/// Provides operations for subscription management, peer tracking, -/// and gossip peer selection. -/// -/// # Example -/// -/// ``` -/// use networking::gossipsub::mesh::MeshState; -/// use networking::gossipsub::config::GossipsubParameters; -/// use std::collections::HashSet; -/// -/// let mut state = MeshState::new(GossipsubParameters::default()); -/// -/// // Subscribe and build mesh -/// state.subscribe("blocks".to_string()); -/// state.add_to_mesh("blocks", "peer1".to_string()); -/// state.add_to_mesh("blocks", "peer2".to_string()); -/// -/// // Get mesh peers for message forwarding -/// let peers = state.get_mesh_peers("blocks"); -/// -/// // Select peers for IHAVE gossip -/// let all_peers: HashSet<_> = vec!["peer1", "peer2", "peer3", "peer4"] -/// .into_iter() -/// .map(String::from) -/// .collect(); -/// let gossip_peers = state.select_peers_for_gossip("blocks", &all_peers); -/// ``` -#[derive(Debug, Clone)] -pub struct MeshState { - /// Gossipsub parameters controlling mesh behavior. - params: GossipsubParameters, - - /// Mesh state for each subscribed topic. Keyed by topic ID. - meshes: HashMap, - - /// Fanout state for publish-only topics. Keyed by topic ID. - fanouts: HashMap, - - /// Set of topics we are subscribed to. - subscriptions: HashSet, -} - -impl MeshState { - /// Create a new mesh state with the given parameters. - pub fn new(params: GossipsubParameters) -> Self { - Self { - params, - meshes: HashMap::new(), - fanouts: HashMap::new(), - subscriptions: HashSet::new(), - } - } - - /// Get the target mesh size per topic. - pub fn d(&self) -> usize { - self.params.d - } - - /// Get the low watermark - graft when mesh is smaller. - pub fn d_low(&self) -> usize { - self.params.d_low - } - - /// Get the high watermark - prune when mesh is larger. - pub fn d_high(&self) -> usize { - self.params.d_high - } - - /// Get the number of peers for IHAVE gossip. - pub fn d_lazy(&self) -> usize { - self.params.d_lazy - } - - /// Subscribe to a topic, initializing its mesh. - /// - /// If we have fanout peers for this topic, they are - /// promoted to the mesh automatically. - /// - /// # Arguments - /// - /// * `topic` - Topic identifier to subscribe to - pub fn subscribe(&mut self, topic: TopicId) { - if self.subscriptions.contains(&topic) { - return; - } - - self.subscriptions.insert(topic.clone()); - - // Promote fanout peers to mesh if any - let mut mesh = TopicMesh::new(); - if let Some(fanout) = self.fanouts.remove(&topic) { - mesh.peers = fanout.peers; - } - self.meshes.insert(topic, mesh); - } - - /// Unsubscribe from a topic. - /// - /// # Arguments - /// - /// * `topic` - Topic identifier to unsubscribe from - /// - /// # Returns - /// - /// Set of peers that were in the mesh (need PRUNE) - pub fn unsubscribe(&mut self, topic: &TopicId) -> HashSet { - self.subscriptions.remove(topic); - self.meshes - .remove(topic) - .map(|mesh| mesh.peers) - .unwrap_or_default() - } - - /// Check if subscribed to a topic. - /// - /// # Arguments - /// - /// * `topic` - Topic identifier to check - /// - /// # Returns - /// - /// `true` if subscribed - pub fn is_subscribed(&self, topic: &TopicId) -> bool { - self.subscriptions.contains(topic) - } - - /// Get mesh peers for a topic. - /// - /// # Arguments - /// - /// * `topic` - Topic identifier - /// - /// # Returns - /// - /// Copy of the mesh peer set, or empty set if not subscribed - pub fn get_mesh_peers(&self, topic: &str) -> HashSet { - self.meshes - .get(topic) - .map(|mesh| mesh.peers.clone()) - .unwrap_or_default() - } - - /// Add a peer to a topic's mesh. - /// - /// # Arguments - /// - /// * `topic` - Topic identifier - /// * `peer_id` - Peer to add - /// - /// # Returns - /// - /// - `true` if added, - /// - `false` if already present or not subscribed - pub fn add_to_mesh(&mut self, topic: &str, peer_id: PeerId) -> bool { - if let Some(mesh) = self.meshes.get_mut(topic) { - mesh.add_peer(peer_id) - } else { - false - } - } - - /// Remove a peer from a topic's mesh. - /// - /// # Arguments - /// - /// * `topic` - Topic identifier - /// * `peer_id` - Peer to remove - /// - /// # Returns - /// - /// - `true` if removed, - /// - `false` if not present or not subscribed - pub fn remove_from_mesh(&mut self, topic: &str, peer_id: &PeerId) -> bool { - if let Some(mesh) = self.meshes.get_mut(topic) { - mesh.remove_peer(peer_id) - } else { - false - } - } - - /// Get fanout peers for a topic. - /// - /// # Arguments - /// - /// * `topic` - Topic identifier - /// - /// # Returns - /// - /// Copy of the fanout peer set, or empty set if none - pub fn get_fanout_peers(&self, topic: &str) -> HashSet { - self.fanouts - .get(topic) - .map(|fanout| fanout.peers.clone()) - .unwrap_or_default() - } - - /// Update fanout for publishing to a non-subscribed topic. - /// - /// For subscribed topics, returns mesh peers instead. - /// - /// # Arguments - /// - /// * `topic` - Topic identifier - /// * `available_peers` - All known peers for this topic - /// - /// # Returns - /// - /// Peers to publish to (mesh or fanout) - pub fn update_fanout( - &mut self, - topic: &str, - available_peers: &HashSet, - ) -> HashSet { - if self.subscriptions.contains(topic) { - return self.get_mesh_peers(topic); - } - - let d = self.d(); - let fanout = self - .fanouts - .entry(topic.to_string()) - .or_insert_with(FanoutEntry::new); - - fanout.last_published = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs_f64(); - - // Fill fanout up to D peers - if fanout.peers.len() < d { - let candidates: Vec<_> = available_peers.difference(&fanout.peers).cloned().collect(); - let needed = d - fanout.peers.len(); - let mut rng = rand::thread_rng(); - let new_peers: Vec<_> = candidates - .choose_multiple(&mut rng, needed.min(candidates.len())) - .cloned() - .collect(); - fanout.peers.extend(new_peers); - } - - fanout.peers.clone() - } - - /// Remove expired fanout entries. - /// - /// # Arguments - /// - /// * `ttl` - Time-to-live in seconds - /// - /// # Returns - /// - /// Number of entries removed - pub fn cleanup_fanouts(&mut self, ttl: f64) -> usize { - let current_time = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_secs_f64(); - - let stale: Vec<_> = self - .fanouts - .iter() - .filter(|(_, fanout)| fanout.is_stale(current_time, ttl)) - .map(|(topic, _)| topic.clone()) - .collect(); - - let count = stale.len(); - for topic in stale { - self.fanouts.remove(&topic); - } - - count - } - - /// Select non-mesh peers for IHAVE gossip. - /// - /// Randomly selects up to D_lazy peers from those not in the mesh. - /// These peers receive IHAVE messages during heartbeat. - /// - /// # Arguments - /// - /// * `topic` - Topic identifier - /// * `all_topic_peers` - All known peers subscribed to this topic - /// - /// # Returns - /// - /// List of peers to send IHAVE gossip to - pub fn select_peers_for_gossip( - &self, - topic: &str, - all_topic_peers: &HashSet, - ) -> Vec { - let mesh_peers = self.get_mesh_peers(topic); - let candidates: Vec<_> = all_topic_peers.difference(&mesh_peers).cloned().collect(); - - if candidates.len() <= self.d_lazy() { - return candidates; - } - - let mut rng = rand::thread_rng(); - candidates - .choose_multiple(&mut rng, self.d_lazy()) - .cloned() - .collect() - } -} diff --git a/lean_client/networking/src/gossipsub/message.rs b/lean_client/networking/src/gossipsub/message.rs deleted file mode 100644 index 11ed606..0000000 --- a/lean_client/networking/src/gossipsub/message.rs +++ /dev/null @@ -1,259 +0,0 @@ -/// Gossipsub Message -/// -/// Message representation and ID computation for the gossipsub protocol. -/// -/// ## Overview -/// -/// Each gossipsub message carries a topic and payload. Messages are -/// identified by a 20-byte ID computed from their contents. -/// -/// ## Message ID Function -/// -/// Ethereum consensus uses a custom message ID function based on SHA256: -/// -/// ```text -/// message_id = SHA256(domain + uint64_le(len(topic)) + topic + data)[:20] -/// ``` -/// -/// **Components:** -/// -/// | Component | Description | -/// |-----------------|--------------------------------------------------------| -/// | domain | 1-byte prefix indicating snappy validity (0x00/0x01) | -/// | uint64_le | Topic length as 8-byte little-endian integer | -/// | topic | Topic string as UTF-8 bytes | -/// | data | Message payload (decompressed if snappy is valid) | -/// -/// **Domain Bytes:** -/// -/// - `0x01` (VALID_SNAPPY): Snappy decompression succeeded, use decompressed data -/// - `0x00` (INVALID_SNAPPY): Decompression failed or no decompressor, use raw data -/// -/// This ensures messages with compression issues get different IDs, -/// preventing cache pollution from invalid variants. -/// -/// ## Snappy Compression -/// -/// Ethereum consensus requires SSZ data to be snappy-compressed. -/// The message ID computation attempts decompression to determine -/// which domain byte to use. -/// -/// ## References -/// -/// - [Ethereum P2P spec](https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/p2p-interface.md) -/// - [Gossipsub v1.0](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) -use containers::Bytes20; -use sha2::{Digest, Sha256}; -use std::sync::Arc; - -use crate::types::{MESSAGE_DOMAIN_INVALID_SNAPPY, MESSAGE_DOMAIN_VALID_SNAPPY}; - -/// Trait for snappy decompression functions. -/// -/// Any type implementing this trait can be used for decompression. -/// The function should return an error if decompression fails. -pub trait SnappyDecompressor: Send + Sync { - /// Decompress snappy-compressed data. - /// - /// # Arguments - /// - /// * `data` - Compressed bytes - /// - /// # Returns - /// - /// Decompressed bytes, or an error if decompression fails - fn decompress(&self, data: &[u8]) -> Result, Box>; -} - -/// A raw gossipsub message with lazy ID computation. -/// -/// Encapsulates topic, payload, and message ID logic. The ID is -/// computed lazily on first access and cached thereafter. -/// -/// ## Message ID Computation -/// -/// The 20-byte ID is computed as: -/// -/// ```text -/// SHA256(domain + uint64_le(len(topic)) + topic + data)[:20] -/// ``` -/// -/// Where `domain` depends on snappy decompression success. -#[derive(Clone)] -pub struct RawGossipsubMessage { - /// Topic string as UTF-8 encoded bytes. - /// - /// Example: `b"/leanconsensus/0x12345678/block/ssz_snappy"` - pub topic: Vec, - - /// Raw message payload. - /// - /// Typically snappy-compressed SSZ data. The actual content - /// depends on the topic (block, attestation, etc.). - pub raw_data: Vec, - - /// Optional snappy decompression function. - /// - /// If provided, decompression is attempted during ID computation - /// to determine the domain byte. - snappy_decompress: Option>, - - /// Cached message ID. - /// - /// Computed lazily on first access to `id()` method. Once computed, - /// the same ID is returned for all subsequent accesses. - cached_id: Option, -} - -impl RawGossipsubMessage { - /// Create a new gossipsub message. - /// - /// # Arguments - /// - /// * `topic` - Topic string as bytes - /// * `raw_data` - Raw message payload - /// * `snappy_decompress` - Optional decompression function - pub fn new( - topic: Vec, - raw_data: Vec, - snappy_decompress: Option>, - ) -> Self { - Self { - topic, - raw_data, - snappy_decompress, - cached_id: None, - } - } - - /// Get the 20-byte message ID. - /// - /// Computed lazily on first access using the Ethereum consensus - /// message ID function. The result is cached. - /// - /// # Returns - /// - /// 20-byte message ID (Bytes20) - pub fn id(&self) -> Bytes20 { - if let Some(id) = &self.cached_id { - return id.clone(); - } - - // Compute ID - let id = Self::compute_id(&self.topic, &self.raw_data, self.snappy_decompress.as_ref()); - - // Note: We can't cache here because self is immutable - // In practice, callers should use a mutable reference or compute once - id - } - - /// Compute a 20-byte message ID from raw data. - /// - /// Implements the Ethereum consensus message ID function: - /// - /// ```text - /// SHA256(domain + uint64_le(len(topic)) + topic + data)[:20] - /// ``` - /// - /// ## Domain Selection - /// - /// - If `snappy_decompress` is provided and succeeds: - /// domain = 0x01, use decompressed data - /// - Otherwise: - /// domain = 0x00, use raw data - /// - /// # Arguments - /// - /// * `topic` - Topic string as bytes - /// * `data` - Message payload (potentially compressed) - /// * `snappy_decompress` - Optional decompression function - /// - /// # Returns - /// - /// 20-byte message ID - pub fn compute_id( - topic: &[u8], - data: &[u8], - snappy_decompress: Option<&Arc>, - ) -> Bytes20 { - let (domain, data_for_hash) = if let Some(decompressor) = snappy_decompress { - match decompressor.decompress(data) { - Ok(decompressed) => (MESSAGE_DOMAIN_VALID_SNAPPY, decompressed), - Err(_) => (MESSAGE_DOMAIN_INVALID_SNAPPY, data.to_vec()), - } - } else { - (MESSAGE_DOMAIN_INVALID_SNAPPY, data.to_vec()) - }; - - let mut preimage = Vec::new(); - preimage.extend_from_slice(domain); - preimage.extend_from_slice(&(topic.len() as u64).to_le_bytes()); - preimage.extend_from_slice(topic); - preimage.extend_from_slice(&data_for_hash); - - let hash = Sha256::digest(&preimage); - let mut bytes = [0u8; 20]; - bytes.copy_from_slice(&hash[..20]); - Bytes20::from(bytes) - } - - /// Get the topic as a UTF-8 string. - /// - /// # Returns - /// - /// Topic decoded from bytes to string - pub fn topic_str(&self) -> String { - String::from_utf8_lossy(&self.topic).to_string() - } -} - -impl PartialEq for RawGossipsubMessage { - fn eq(&self, other: &Self) -> bool { - self.id() == other.id() - } -} - -impl Eq for RawGossipsubMessage {} - -impl std::fmt::Debug for RawGossipsubMessage { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - f.debug_struct("RawGossipsubMessage") - .field("topic", &self.topic_str()) - .field("raw_data_len", &self.raw_data.len()) - .field("cached_id", &self.cached_id) - .finish() - } -} - -impl std::hash::Hash for RawGossipsubMessage { - fn hash(&self, state: &mut H) { - self.id().hash(state); - } -} - -use crate::gossipsub::topic::GossipsubKind; -use crate::gossipsub::topic::GossipsubTopic; -use containers::SignedAttestation; -use containers::SignedBlockWithAttestation; -use containers::ssz::SszReadDefault; -use libp2p::gossipsub::TopicHash; - -/// Decoded gossipsub message by type. -pub enum GossipsubMessage { - Block(SignedBlockWithAttestation), - Attestation(SignedAttestation), -} - -impl GossipsubMessage { - pub fn decode(topic: &TopicHash, data: &[u8]) -> Result { - match GossipsubTopic::decode(topic)?.kind { - GossipsubKind::Block => Ok(Self::Block( - SignedBlockWithAttestation::from_ssz_default(data) - .map_err(|e| format!("{:?}", e))?, - )), - GossipsubKind::Attestation => Ok(Self::Attestation( - SignedAttestation::from_ssz_default(data).map_err(|e| format!("{:?}", e))?, - )), - } - } -} diff --git a/lean_client/networking/src/gossipsub/mod.rs b/lean_client/networking/src/gossipsub/mod.rs index 1251801..169a670 100644 --- a/lean_client/networking/src/gossipsub/mod.rs +++ b/lean_client/networking/src/gossipsub/mod.rs @@ -1,10 +1,5 @@ pub mod config; -pub mod control; -pub mod mcache; -pub mod mesh; -pub mod message; pub mod topic; -pub mod types; #[cfg(test)] mod tests; @@ -15,13 +10,8 @@ use libp2p::gossipsub::{AllowAllSubscriptionFilter, Behaviour}; pub type GossipsubBehaviour = Behaviour; // Re-export commonly used types -pub use config::{GossipsubConfig, GossipsubParameters}; -pub use control::{ControlMessage, Graft, IDontWant, IHave, IWant, Prune}; -pub use mcache::{CacheEntry, MessageCache, SeenCache}; -pub use mesh::{FanoutEntry, MeshState, TopicMesh}; -pub use message::{GossipsubMessage, RawGossipsubMessage, SnappyDecompressor}; +pub use config::{GossipsubConfig, GossipsubParameters, compute_message_id}; pub use topic::{ ATTESTATION_TOPIC, BLOCK_TOPIC, GossipsubKind, GossipsubTopic, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, format_topic_string, get_topics, parse_topic_string, }; -pub use types::{MessageId, PeerId, Timestamp, TopicId}; diff --git a/lean_client/networking/src/gossipsub/tests/control.rs b/lean_client/networking/src/gossipsub/tests/control.rs deleted file mode 100644 index b85fb2a..0000000 --- a/lean_client/networking/src/gossipsub/tests/control.rs +++ /dev/null @@ -1,86 +0,0 @@ -use crate::gossipsub::control::{ControlMessage, Graft, IDontWant, IHave, IWant, Prune}; -use containers::Bytes20; - -#[test] -fn test_graft_creation() { - let graft = Graft { - topic_id: "test_topic".to_string(), - }; - assert_eq!(graft.topic_id, "test_topic"); -} - -#[test] -fn test_prune_creation() { - let prune = Prune { - topic_id: "test_topic".to_string(), - }; - assert_eq!(prune.topic_id, "test_topic"); -} - -#[test] -fn test_ihave_creation() { - let msg_ids = vec![Bytes20::from([1u8; 20]), Bytes20::from([2u8; 20])]; - let ihave = IHave { - topic_id: "test_topic".to_string(), - message_ids: msg_ids.clone(), - }; - - assert_eq!(ihave.topic_id, "test_topic"); - assert_eq!(ihave.message_ids.len(), 2); -} - -#[test] -fn test_iwant_creation() { - let msg_ids = vec![Bytes20::from([1u8; 20])]; - let iwant = IWant { - message_ids: msg_ids, - }; - - assert_eq!(iwant.message_ids.len(), 1); -} - -#[test] -fn test_idontwant_creation() { - let msg_ids = vec![Bytes20::from([1u8; 20])]; - let idontwant = IDontWant { - message_ids: msg_ids, - }; - - assert_eq!(idontwant.message_ids.len(), 1); -} - -#[test] -fn test_control_message_aggregation() { - let graft = Graft { - topic_id: "topic1".to_string(), - }; - let prune = Prune { - topic_id: "topic2".to_string(), - }; - - let control = ControlMessage { - grafts: vec![graft], - prunes: vec![prune], - ihaves: vec![], - iwants: vec![], - idontwants: vec![], - }; - - assert_eq!(control.grafts.len(), 1); - assert_eq!(control.prunes.len(), 1); - assert!(!control.is_empty()); -} - -#[test] -fn test_control_message_empty_check() { - let empty_control = ControlMessage::default(); - assert!(empty_control.is_empty()); - - let non_empty = ControlMessage { - grafts: vec![Graft { - topic_id: "topic".to_string(), - }], - ..Default::default() - }; - assert!(!non_empty.is_empty()); -} diff --git a/lean_client/networking/src/gossipsub/tests/mcache.rs b/lean_client/networking/src/gossipsub/tests/mcache.rs deleted file mode 100644 index 3fbdf2c..0000000 --- a/lean_client/networking/src/gossipsub/tests/mcache.rs +++ /dev/null @@ -1,87 +0,0 @@ -use crate::gossipsub::mcache::{MessageCache, SeenCache}; -use crate::gossipsub::message::RawGossipsubMessage; -use containers::Bytes20; - -#[test] -fn test_cache_put_and_get() { - let mut cache = MessageCache::new(6, 3); - let message = RawGossipsubMessage::new(b"topic".to_vec(), b"data".to_vec(), None); - - assert!(cache.put("topic".to_string(), message.clone())); - assert!(!cache.put("topic".to_string(), message.clone())); // Duplicate - - let retrieved = cache.get(&message.id()); - assert!(retrieved.is_some()); - assert_eq!(retrieved.unwrap().id(), message.id()); -} - -#[test] -fn test_cache_has() { - let mut cache = MessageCache::new(6, 3); - let message = RawGossipsubMessage::new(b"topic".to_vec(), b"data".to_vec(), None); - - assert!(!cache.has(&message.id())); - cache.put("topic".to_string(), message.clone()); - assert!(cache.has(&message.id())); -} - -#[test] -fn test_cache_shift() { - let mut cache = MessageCache::new(3, 2); - - let mut messages = Vec::new(); - for i in 0..5 { - let msg = - RawGossipsubMessage::new(b"topic".to_vec(), format!("data{}", i).into_bytes(), None); - cache.put("topic".to_string(), msg.clone()); - messages.push(msg); - cache.shift(); - } - - // Old messages should be evicted - assert!(!cache.has(&messages[0].id())); - assert!(!cache.has(&messages[1].id())); -} - -#[test] -fn test_get_gossip_ids() { - let mut cache = MessageCache::new(6, 3); - - let msg1 = RawGossipsubMessage::new(b"topic1".to_vec(), b"data1".to_vec(), None); - let msg2 = RawGossipsubMessage::new(b"topic2".to_vec(), b"data2".to_vec(), None); - let msg3 = RawGossipsubMessage::new(b"topic1".to_vec(), b"data3".to_vec(), None); - - cache.put("topic1".to_string(), msg1.clone()); - cache.put("topic2".to_string(), msg2.clone()); - cache.put("topic1".to_string(), msg3.clone()); - - let gossip_ids = cache.get_gossip_ids("topic1"); - - assert!(gossip_ids.contains(&msg1.id())); - assert!(!gossip_ids.contains(&msg2.id())); - assert!(gossip_ids.contains(&msg3.id())); -} - -#[test] -fn test_seen_cache_add_and_check() { - let mut cache = SeenCache::new(60); - let msg_id = Bytes20::from([1u8; 20]); - - assert!(!cache.has(&msg_id)); - assert!(cache.add(msg_id.clone(), 1000.0)); - assert!(cache.has(&msg_id)); - assert!(!cache.add(msg_id.clone(), 1001.0)); // Duplicate -} - -#[test] -fn test_seen_cache_cleanup() { - let mut cache = SeenCache::new(10); - let msg_id = Bytes20::from([1u8; 20]); - - cache.add(msg_id.clone(), 1000.0); - assert!(cache.has(&msg_id)); - - let removed = cache.cleanup(1015.0); - assert_eq!(removed, 1); - assert!(!cache.has(&msg_id)); -} diff --git a/lean_client/networking/src/gossipsub/tests/mesh.rs b/lean_client/networking/src/gossipsub/tests/mesh.rs deleted file mode 100644 index 5f10cd9..0000000 --- a/lean_client/networking/src/gossipsub/tests/mesh.rs +++ /dev/null @@ -1,100 +0,0 @@ -use crate::gossipsub::config::GossipsubParameters; -use crate::gossipsub::mesh::{FanoutEntry, MeshState, TopicMesh}; -use std::collections::HashSet; - -#[test] -fn test_mesh_state_initialization() { - let params = GossipsubParameters { - d: 8, - d_low: 6, - d_high: 12, - d_lazy: 6, - ..Default::default() - }; - let mesh = MeshState::new(params); - - assert_eq!(mesh.d(), 8); - assert_eq!(mesh.d_low(), 6); - assert_eq!(mesh.d_high(), 12); - assert_eq!(mesh.d_lazy(), 6); -} - -#[test] -fn test_subscribe_and_unsubscribe() { - let mesh = &mut MeshState::new(GossipsubParameters::default()); - - mesh.subscribe("topic1".to_string()); - assert!(mesh.is_subscribed(&"topic1".to_string())); - assert!(!mesh.is_subscribed(&"topic2".to_string())); - - let peers = mesh.unsubscribe(&"topic1".to_string()); - assert!(!mesh.is_subscribed(&"topic1".to_string())); - assert!(peers.is_empty()); -} - -#[test] -fn test_add_remove_mesh_peers() { - let mesh = &mut MeshState::new(GossipsubParameters::default()); - mesh.subscribe("topic1".to_string()); - - assert!(mesh.add_to_mesh("topic1", "peer1".to_string())); - assert!(mesh.add_to_mesh("topic1", "peer2".to_string())); - assert!(!mesh.add_to_mesh("topic1", "peer1".to_string())); // Already in mesh - - let peers = mesh.get_mesh_peers("topic1"); - assert!(peers.contains("peer1")); - assert!(peers.contains("peer2")); - - assert!(mesh.remove_from_mesh("topic1", &"peer1".to_string())); - assert!(!mesh.remove_from_mesh("topic1", &"peer1".to_string())); // Already removed - - let peers = mesh.get_mesh_peers("topic1"); - assert!(!peers.contains("peer1")); - assert!(peers.contains("peer2")); -} - -#[test] -fn test_gossip_peer_selection() { - let params = GossipsubParameters { - d_lazy: 3, - ..Default::default() - }; - let mesh = &mut MeshState::new(params); - mesh.subscribe("topic1".to_string()); - mesh.add_to_mesh("topic1", "peer1".to_string()); - mesh.add_to_mesh("topic1", "peer2".to_string()); - - let all_peers: HashSet<_> = vec!["peer1", "peer2", "peer3", "peer4", "peer5", "peer6"] - .into_iter() - .map(String::from) - .collect(); - - let gossip_peers = mesh.select_peers_for_gossip("topic1", &all_peers); - - let mesh_peers = mesh.get_mesh_peers("topic1"); - for peer in &gossip_peers { - assert!(!mesh_peers.contains(peer)); - } -} - -#[test] -fn test_topic_mesh_add_remove() { - let topic_mesh = &mut TopicMesh::new(); - - assert!(topic_mesh.add_peer("peer1".to_string())); - assert!(!topic_mesh.add_peer("peer1".to_string())); // Already exists - assert!(topic_mesh.peers.contains("peer1")); - - assert!(topic_mesh.remove_peer(&"peer1".to_string())); - assert!(!topic_mesh.remove_peer(&"peer1".to_string())); // Already removed - assert!(!topic_mesh.peers.contains("peer1")); -} - -#[test] -fn test_fanout_entry_staleness() { - let mut entry = FanoutEntry::new(); - entry.last_published = 1000.0; - - assert!(!entry.is_stale(1050.0, 60.0)); - assert!(entry.is_stale(1070.0, 60.0)); -} diff --git a/lean_client/networking/src/gossipsub/tests/message.rs b/lean_client/networking/src/gossipsub/tests/message.rs deleted file mode 100644 index 9fd25dd..0000000 --- a/lean_client/networking/src/gossipsub/tests/message.rs +++ /dev/null @@ -1,84 +0,0 @@ -use crate::gossipsub::message::GossipsubMessage; -use crate::gossipsub::topic::{ - ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, -}; -use libp2p::gossipsub::TopicHash; - -#[test] -fn test_message_decode_invalid_topic() { - let topic = TopicHash::from_raw("/invalid/topic/format"); - let data = b"some_data"; - - let result = GossipsubMessage::decode(&topic, data); - assert!(result.is_err()); -} - -#[test] -fn test_message_decode_invalid_ssz_for_block() { - let topic_str = format!( - "/{}/{}/{}/{}", - TOPIC_PREFIX, "genesis", BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX - ); - let topic = TopicHash::from_raw(topic_str); - let invalid_ssz = b"not_valid_ssz"; - - let result = GossipsubMessage::decode(&topic, invalid_ssz); - assert!(result.is_err()); -} - -#[test] -fn test_message_decode_invalid_ssz_for_attestation() { - let topic_str = format!( - "/{}/{}/{}/{}", - TOPIC_PREFIX, "genesis", ATTESTATION_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX - ); - let topic = TopicHash::from_raw(topic_str); - let invalid_ssz = b"not_valid_ssz"; - - let result = GossipsubMessage::decode(&topic, invalid_ssz); - assert!(result.is_err()); -} - -#[test] -fn test_message_decode_empty_data_fails() { - let topic_str = format!( - "/{}/{}/{}/{}", - TOPIC_PREFIX, "genesis", BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX - ); - let topic = TopicHash::from_raw(topic_str); - - let result = GossipsubMessage::decode(&topic, &[]); - assert!(result.is_err()); -} - -#[test] -fn test_message_decode_wrong_prefix() { - let topic = TopicHash::from_raw("/eth2/genesis/block/ssz_snappy"); - let data = b"some_data"; - - let result = GossipsubMessage::decode(&topic, data); - assert!(result.is_err()); -} - -#[test] -fn test_message_decode_wrong_encoding() { - let topic_str = format!("/{}/{}/{}/json", TOPIC_PREFIX, "genesis", BLOCK_TOPIC); - let topic = TopicHash::from_raw(topic_str); - let data = b"some_data"; - - let result = GossipsubMessage::decode(&topic, data); - assert!(result.is_err()); -} - -#[test] -fn test_message_decode_unsupported_kind() { - let topic_str = format!( - "/{}/{}/{}/{}", - TOPIC_PREFIX, "genesis", "voluntary_exit", SSZ_SNAPPY_ENCODING_POSTFIX - ); - let topic = TopicHash::from_raw(topic_str); - let data = b"some_data"; - - let result = GossipsubMessage::decode(&topic, data); - assert!(result.is_err()); -} diff --git a/lean_client/networking/src/gossipsub/tests/message_id.rs b/lean_client/networking/src/gossipsub/tests/message_id.rs deleted file mode 100644 index 17a0b51..0000000 --- a/lean_client/networking/src/gossipsub/tests/message_id.rs +++ /dev/null @@ -1,188 +0,0 @@ -use crate::gossipsub::config::compute_message_id; -use crate::gossipsub::topic::{ - ATTESTATION_TOPIC, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX, TOPIC_PREFIX, -}; -use crate::types::MESSAGE_DOMAIN_VALID_SNAPPY; -use libp2p::gossipsub::{Message, TopicHash}; -use sha2::{Digest, Sha256}; - -fn create_test_message(topic: &str, data: Vec) -> Message { - Message { - source: None, - data, - sequence_number: None, - topic: TopicHash::from_raw(topic), - } -} - -#[test] -fn test_message_id_length_20_bytes() { - let message = create_test_message("/test/topic", b"test_data".to_vec()); - let message_id = compute_message_id(&message); - - assert_eq!(message_id.0.len(), 20); -} - -#[test] -fn test_message_id_deterministic() { - let message1 = create_test_message("/test/topic", b"test_data".to_vec()); - let message2 = create_test_message("/test/topic", b"test_data".to_vec()); - - let id1 = compute_message_id(&message1); - let id2 = compute_message_id(&message2); - - assert_eq!(id1, id2); -} - -#[test] -fn test_message_id_different_data() { - let message1 = create_test_message("/test/topic", b"data1".to_vec()); - let message2 = create_test_message("/test/topic", b"data2".to_vec()); - - let id1 = compute_message_id(&message1); - let id2 = compute_message_id(&message2); - - assert_ne!(id1, id2); -} - -#[test] -fn test_message_id_different_topics() { - let message1 = create_test_message("/topic1", b"same_data".to_vec()); - let message2 = create_test_message("/topic2", b"same_data".to_vec()); - - let id1 = compute_message_id(&message1); - let id2 = compute_message_id(&message2); - - assert_ne!(id1, id2); -} - -#[test] -fn test_message_id_edge_cases_empty() { - let message_empty_data = create_test_message("/topic", vec![]); - let message_empty_topic = create_test_message("", b"data".to_vec()); - let message_both_empty = create_test_message("", vec![]); - - assert_eq!(compute_message_id(&message_empty_data).0.len(), 20); - assert_eq!(compute_message_id(&message_empty_topic).0.len(), 20); - assert_eq!(compute_message_id(&message_both_empty).0.len(), 20); -} - -#[test] -fn test_message_id_edge_cases_large_inputs() { - let large_topic = "x".repeat(1000); - let large_data = vec![0xFFu8; 5000]; - - let message = create_test_message(&large_topic, large_data); - let id = compute_message_id(&message); - - assert_eq!(id.0.len(), 20); -} - -#[test] -fn test_message_id_edge_cases_binary_data() { - let binary_data: Vec = (0..=255).collect(); - let message = create_test_message("/binary/topic", binary_data); - let id = compute_message_id(&message); - - assert_eq!(id.0.len(), 20); -} - -#[test] -fn test_message_id_uniqueness_and_collision_resistance() { - let test_cases = vec![ - // Basic different inputs - ("/topic1", b"data".to_vec()), - ("/topic2", b"data".to_vec()), - ("/topic", b"data1".to_vec()), - ("/topic", b"data2".to_vec()), - // Topic/data similarity - ("/abc", b"def".to_vec()), - ("/def", b"abc".to_vec()), - // Length-based variations - ("/ab", b"cd".to_vec()), - ("/a", b"bcd".to_vec()), - // Null byte insertion - ("/topic", b"data".to_vec()), - ("/top\x00ic", b"data".to_vec()), - ]; - - let ids: Vec<_> = test_cases - .iter() - .map(|(topic, data)| { - let message = create_test_message(topic, data.clone()); - compute_message_id(&message) - }) - .collect(); - - let unique_ids: std::collections::HashSet<_> = ids.iter().collect(); - assert_eq!( - unique_ids.len(), - ids.len(), - "Expected all message IDs to be unique" - ); - - for id in &ids { - assert_eq!(id.0.len(), 20); - } -} - -#[test] -fn test_message_id_uses_valid_snappy_domain() { - let topic = "/test/topic"; - let data = b"test_data"; - - let message = create_test_message(topic, data.to_vec()); - let computed_id = compute_message_id(&message); - - let topic_bytes = topic.as_bytes(); - let topic_len = topic_bytes.len() as u64; - - let mut digest_input = Vec::new(); - - digest_input.extend_from_slice(MESSAGE_DOMAIN_VALID_SNAPPY); - - digest_input.extend_from_slice(&topic_len.to_le_bytes()); - digest_input.extend_from_slice(topic_bytes); - digest_input.extend_from_slice(data); - - let hash = Sha256::digest(&digest_input); - let expected_id: Vec = hash[..20].to_vec(); - - assert_eq!(computed_id.0, expected_id); -} - -#[test] -fn test_realistic_blockchain_scenarios() { - let scenarios = vec![ - ( - format!( - "/{}/genesis/{}/{}", - TOPIC_PREFIX, BLOCK_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX - ), - b"beacon_block_ssz_data".to_vec(), - ), - ( - format!( - "/{}/genesis/{}/{}", - TOPIC_PREFIX, ATTESTATION_TOPIC, SSZ_SNAPPY_ENCODING_POSTFIX - ), - b"aggregate_proof_ssz".to_vec(), - ), - ]; - - // All messages should produce valid, unique IDs - let ids: Vec<_> = scenarios - .iter() - .map(|(topic, data)| { - let message = create_test_message(topic, data.clone()); - compute_message_id(&message) - }) - .collect(); - - let unique_ids: std::collections::HashSet<_> = ids.iter().collect(); - assert_eq!(unique_ids.len(), ids.len()); - - for id in &ids { - assert_eq!(id.0.len(), 20); - } -} diff --git a/lean_client/networking/src/gossipsub/tests/mod.rs b/lean_client/networking/src/gossipsub/tests/mod.rs index 046af03..8922ecb 100644 --- a/lean_client/networking/src/gossipsub/tests/mod.rs +++ b/lean_client/networking/src/gossipsub/tests/mod.rs @@ -1,8 +1,2 @@ mod config; -mod control; -mod mcache; -mod mesh; -mod message; -mod message_id; -mod raw_message; mod topic; diff --git a/lean_client/networking/src/gossipsub/tests/raw_message.rs b/lean_client/networking/src/gossipsub/tests/raw_message.rs deleted file mode 100644 index 071a4d0..0000000 --- a/lean_client/networking/src/gossipsub/tests/raw_message.rs +++ /dev/null @@ -1,98 +0,0 @@ -use crate::gossipsub::message::{RawGossipsubMessage, SnappyDecompressor}; -use std::sync::Arc; - -struct TestDecompressor; - -impl SnappyDecompressor for TestDecompressor { - fn decompress(&self, _data: &[u8]) -> Result, Box> { - Ok(b"decompressed_test_data".to_vec()) - } -} - -struct FailingDecompressor; - -impl SnappyDecompressor for FailingDecompressor { - fn decompress(&self, _data: &[u8]) -> Result, Box> { - Err("Decompression failed".into()) - } -} - -#[test] -fn test_message_id_computation_no_snappy() { - let topic = b"test_topic"; - let raw_data = b"raw_test_data"; - - let message = RawGossipsubMessage::new(topic.to_vec(), raw_data.to_vec(), None); - let message_id = message.id(); - - assert_eq!(message_id.0.len(), 20); -} - -#[test] -fn test_message_id_computation_with_snappy() { - let topic = b"test_topic"; - let raw_data = b"raw_test_data"; - - let message = RawGossipsubMessage::new( - topic.to_vec(), - raw_data.to_vec(), - Some(Arc::new(TestDecompressor)), - ); - let message_id = message.id(); - - assert_eq!(message_id.0.len(), 20); -} - -#[test] -fn test_message_id_computation_snappy_fails() { - let topic = b"test_topic"; - let raw_data = b"raw_test_data"; - - let message = RawGossipsubMessage::new( - topic.to_vec(), - raw_data.to_vec(), - Some(Arc::new(FailingDecompressor)), - ); - let message_id = message.id(); - - assert_eq!(message_id.0.len(), 20); -} - -#[test] -fn test_message_id_determinism() { - let topic = b"test_topic"; - let data = b"test_data"; - - let message1 = RawGossipsubMessage::new( - topic.to_vec(), - data.to_vec(), - Some(Arc::new(TestDecompressor)), - ); - let message2 = RawGossipsubMessage::new( - topic.to_vec(), - data.to_vec(), - Some(Arc::new(TestDecompressor)), - ); - - assert_eq!(message1.id(), message2.id()); -} - -#[test] -fn test_message_uniqueness() { - let test_cases = vec![ - (b"topic1".to_vec(), b"data".to_vec()), - (b"topic2".to_vec(), b"data".to_vec()), - (b"topic".to_vec(), b"data1".to_vec()), - (b"topic".to_vec(), b"data2".to_vec()), - ]; - - let messages: Vec<_> = test_cases - .into_iter() - .map(|(topic, data)| RawGossipsubMessage::new(topic, data, None)) - .collect(); - - let ids: Vec<_> = messages.iter().map(|msg| msg.id()).collect(); - let unique_ids: std::collections::HashSet<_> = ids.iter().collect(); - - assert_eq!(ids.len(), unique_ids.len()); -} diff --git a/lean_client/networking/src/gossipsub/types.rs b/lean_client/networking/src/gossipsub/types.rs deleted file mode 100644 index 4d24b94..0000000 --- a/lean_client/networking/src/gossipsub/types.rs +++ /dev/null @@ -1,32 +0,0 @@ -/// Gossipsub Type Definitions -/// -/// Type aliases for common gossipsub types. -use containers::Bytes20; - -/// 20-byte message identifier. -/// -/// Computed from message contents using SHA256: -/// `SHA256(domain + uint64_le(len(topic)) + topic + data)[:20]` -/// -/// The domain byte distinguishes valid/invalid snappy compression. -pub type MessageId = Bytes20; - -/// Libp2p peer identifier. -/// -/// Derived from the peer's public key as a base58-encoded multihash. -/// Uniquely identifies peers in the P2P network. -pub type PeerId = String; - -/// Topic string identifier. -/// -/// Follows the Ethereum consensus format: -/// `/{prefix}/{fork_digest}/{topic_name}/{encoding}` -pub type TopicId = String; - -/// Unix timestamp in seconds since epoch. -/// -/// Used for: -/// - Message arrival times -/// - Peer activity tracking -/// - Seen cache expiry -pub type Timestamp = f64; diff --git a/lean_client/networking/src/network/service.rs b/lean_client/networking/src/network/service.rs index daa735c..6b8a4a9 100644 --- a/lean_client/networking/src/network/service.rs +++ b/lean_client/networking/src/network/service.rs @@ -7,7 +7,7 @@ use std::{ }; use anyhow::{Result, anyhow}; -use containers::ssz::SszWrite; +use containers::ssz::{SszReadDefault, SszWrite}; use futures::StreamExt; use libp2p::{ Multiaddr, SwarmBuilder, @@ -27,7 +27,7 @@ use crate::{ bootnodes::{BootnodeSource, StaticBootnodes}, compressor::Compressor, discovery::{DiscoveryConfig, DiscoveryService}, - gossipsub::{self, config::GossipsubConfig, message::GossipsubMessage, topic::GossipsubKind}, + gossipsub::{self, config::GossipsubConfig, topic::{GossipsubKind, GossipsubTopic}}, network::behaviour::{LeanNetworkBehaviour, LeanNetworkBehaviourEvent}, req_resp::{self, BLOCKS_BY_ROOT_PROTOCOL_V1, LeanRequest, ReqRespMessage, STATUS_PROTOCOL_V1}, types::{ @@ -357,44 +357,60 @@ where } Event::Message { message, .. } => { - match GossipsubMessage::decode(&message.topic, &message.data) { - Ok(GossipsubMessage::Block(signed_block_with_attestation)) => { - let slot = signed_block_with_attestation.message.block.slot.0; - - if let Err(err) = self - .chain_message_sink - .send(ChainMessage::ProcessBlock { - signed_block_with_attestation, - is_trusted: false, - should_gossip: true, - }) - .await - { - warn!( - "failed to send block with attestation for slot {slot} to chain: {err:?}" - ); + match GossipsubTopic::decode(&message.topic) { + Ok(topic) => match topic.kind { + GossipsubKind::Block => { + match containers::SignedBlockWithAttestation::from_ssz_default(&message.data) { + Ok(signed_block_with_attestation) => { + let slot = signed_block_with_attestation.message.block.slot.0; + + if let Err(err) = self + .chain_message_sink + .send(ChainMessage::ProcessBlock { + signed_block_with_attestation, + is_trusted: false, + should_gossip: true, + }) + .await + { + warn!( + "failed to send block with attestation for slot {slot} to chain: {err:?}" + ); + } + } + Err(err) => { + warn!(?err, topic = %message.topic, "failed to decode block"); + } + } } - } - Ok(GossipsubMessage::Attestation(signed_attestation)) => { - #[cfg(feature = "devnet1")] - let slot = signed_attestation.message.data.slot.0; - #[cfg(feature = "devnet2")] - let slot = signed_attestation.message.slot.0; - - if let Err(err) = self - .chain_message_sink - .send(ChainMessage::ProcessAttestation { - signed_attestation: signed_attestation, - is_trusted: false, - should_gossip: true, - }) - .await - { - warn!("failed to send vote for slot {slot} to chain: {err:?}"); + GossipsubKind::Attestation => { + match containers::SignedAttestation::from_ssz_default(&message.data) { + Ok(signed_attestation) => { + #[cfg(feature = "devnet1")] + let slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet2")] + let slot = signed_attestation.message.slot.0; + + if let Err(err) = self + .chain_message_sink + .send(ChainMessage::ProcessAttestation { + signed_attestation, + is_trusted: false, + should_gossip: true, + }) + .await + { + warn!("failed to send vote for slot {slot} to chain: {err:?}"); + } + } + Err(err) => { + warn!(?err, topic = %message.topic, "failed to decode attestation"); + } + } } - } + }, Err(err) => { - warn!(%err, topic = %message.topic, "gossip decode failed"); + warn!(%err, topic = %message.topic, "unknown gossip topic"); } } } From 152d338271a438a8fee116db08468a8275d458fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Titas=20Stankevi=C4=8Dius?= Date: Wed, 21 Jan 2026 14:59:04 +0200 Subject: [PATCH 27/27] fixing formatting --- lean_client/networking/src/network/service.rs | 106 +++++++++--------- 1 file changed, 55 insertions(+), 51 deletions(-) diff --git a/lean_client/networking/src/network/service.rs b/lean_client/networking/src/network/service.rs index 6b8a4a9..af66896 100644 --- a/lean_client/networking/src/network/service.rs +++ b/lean_client/networking/src/network/service.rs @@ -27,7 +27,11 @@ use crate::{ bootnodes::{BootnodeSource, StaticBootnodes}, compressor::Compressor, discovery::{DiscoveryConfig, DiscoveryService}, - gossipsub::{self, config::GossipsubConfig, topic::{GossipsubKind, GossipsubTopic}}, + gossipsub::{ + self, + config::GossipsubConfig, + topic::{GossipsubKind, GossipsubTopic}, + }, network::behaviour::{LeanNetworkBehaviour, LeanNetworkBehaviourEvent}, req_resp::{self, BLOCKS_BY_ROOT_PROTOCOL_V1, LeanRequest, ReqRespMessage, STATUS_PROTOCOL_V1}, types::{ @@ -356,64 +360,64 @@ where info!(peer = %peer_id, topic = %topic, "A peer unsubscribed from topic"); } - Event::Message { message, .. } => { - match GossipsubTopic::decode(&message.topic) { - Ok(topic) => match topic.kind { - GossipsubKind::Block => { - match containers::SignedBlockWithAttestation::from_ssz_default(&message.data) { - Ok(signed_block_with_attestation) => { - let slot = signed_block_with_attestation.message.block.slot.0; - - if let Err(err) = self - .chain_message_sink - .send(ChainMessage::ProcessBlock { - signed_block_with_attestation, - is_trusted: false, - should_gossip: true, - }) - .await - { - warn!( - "failed to send block with attestation for slot {slot} to chain: {err:?}" - ); - } - } - Err(err) => { - warn!(?err, topic = %message.topic, "failed to decode block"); + Event::Message { message, .. } => match GossipsubTopic::decode(&message.topic) { + Ok(topic) => match topic.kind { + GossipsubKind::Block => { + match containers::SignedBlockWithAttestation::from_ssz_default( + &message.data, + ) { + Ok(signed_block_with_attestation) => { + let slot = signed_block_with_attestation.message.block.slot.0; + + if let Err(err) = self + .chain_message_sink + .send(ChainMessage::ProcessBlock { + signed_block_with_attestation, + is_trusted: false, + should_gossip: true, + }) + .await + { + warn!( + "failed to send block with attestation for slot {slot} to chain: {err:?}" + ); } } + Err(err) => { + warn!(?err, topic = %message.topic, "failed to decode block"); + } } - GossipsubKind::Attestation => { - match containers::SignedAttestation::from_ssz_default(&message.data) { - Ok(signed_attestation) => { - #[cfg(feature = "devnet1")] - let slot = signed_attestation.message.data.slot.0; - #[cfg(feature = "devnet2")] - let slot = signed_attestation.message.slot.0; - - if let Err(err) = self - .chain_message_sink - .send(ChainMessage::ProcessAttestation { - signed_attestation, - is_trusted: false, - should_gossip: true, - }) - .await - { - warn!("failed to send vote for slot {slot} to chain: {err:?}"); - } - } - Err(err) => { - warn!(?err, topic = %message.topic, "failed to decode attestation"); + } + GossipsubKind::Attestation => { + match containers::SignedAttestation::from_ssz_default(&message.data) { + Ok(signed_attestation) => { + #[cfg(feature = "devnet1")] + let slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet2")] + let slot = signed_attestation.message.slot.0; + + if let Err(err) = self + .chain_message_sink + .send(ChainMessage::ProcessAttestation { + signed_attestation, + is_trusted: false, + should_gossip: true, + }) + .await + { + warn!("failed to send vote for slot {slot} to chain: {err:?}"); } } + Err(err) => { + warn!(?err, topic = %message.topic, "failed to decode attestation"); + } } - }, - Err(err) => { - warn!(%err, topic = %message.topic, "unknown gossip topic"); } + }, + Err(err) => { + warn!(%err, topic = %message.topic, "unknown gossip topic"); } - } + }, _ => { info!(?event, "Unhandled gossipsub event"); }