diff --git a/README.md b/README.md index 5028b8f..ea3b63b 100644 --- a/README.md +++ b/README.md @@ -23,14 +23,51 @@ leanEthereum Consensus Client written in Rust using Grandine's libraries. Run in debug mode via terminal (with XMSS signing): ``` RUST_LOG=info ./target/release/lean_client \ - --genesis ../lean-quickstart/local-devnet/genesis/config.yaml \ - --validator-registry-path ../lean-quickstart/local-devnet/genesis/validators.yaml \ - --hash-sig-key-dir ../lean-quickstart/local-devnet/genesis/hash-sig-keys \ + --genesis ../../lean-quickstart/local-devnet/genesis/config.yaml \ + --validator-registry-path ../../lean-quickstart/local-devnet/genesis/validators.yaml \ + --hash-sig-key-dir ../../lean-quickstart/local-devnet/genesis/hash-sig-keys \ --node-id qlean_0 \ - --node-key ../lean-quickstart/local-devnet/genesis/qlean_0.key \ + --node-key ../../lean-quickstart/local-devnet/genesis/qlean_0.key \ --port 9003 \ + --disable-discovery --bootnodes "/ip4/127.0.0.1/udp/9001/quic-v1/p2p/16Uiu2HAkvi2sxT75Bpq1c7yV2FjnSQJJ432d6jeshbmfdJss1i6f" \ --bootnodes "/ip4/127.0.0.1/udp/9002/quic-v1/p2p/16Uiu2HAmPQhkD6Zg5Co2ee8ShshkiY4tDePKFARPpCS2oKSLj1E1" \ --bootnodes "/ip4/127.0.0.1/udp/9004/quic-v1/p2p/16Uiu2HAm7TYVs6qvDKnrovd9m4vvRikc4HPXm1WyLumKSe5fHxBv" ``` 4. Leave client running for a few minutes and observe warnings, errors, check if blocks are being justified and finalized (don't need debug mode for this last one) + +## Testing discovery + +1. Build the client: + ```bash + cd lean_client/ + cargo build --release + ``` + +2. Start the bootnode + + Run in the terminal: + ``` + RUST_LOG=info ./target/release/lean_client \ + --port 9000 \ + --discovery-port 9100 + ``` + +3. Start the other nodes + + Run in the terminal: + ``` + RUST_LOG=info ./target/release/lean_client \ + --port 9001 \ + --discovery-port 9101 \ + --bootnodes "" + ``` + + ``` + RUST_LOG=info ./target/release/lean_client \ + --port 9002 \ + --discovery-port 9102 \ + --bootnodes "" + ``` + +After a minute all the nodes should be synced up and see each other diff --git a/lean_client/Cargo.lock b/lean_client/Cargo.lock index 93fb9dd..61be8fc 100644 --- a/lean_client/Cargo.lock +++ b/lean_client/Cargo.lock @@ -79,7 +79,7 @@ dependencies = [ "hashbrown 0.16.0", "indexmap 2.11.4", "itoa", - "k256 0.13.4", + "k256", "keccak-asm", "paste", "proptest", @@ -500,7 +500,7 @@ version = "0.30.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "16e2cdb6d5ed835199484bb92bb8b3edd526effe995c61732580439c1a67e2e9" dependencies = [ - "base64 0.22.1", + "base64", "http", "log", "url", @@ -529,12 +529,6 @@ version = "0.2.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4cbbc9d0964165b47557570cce6c952866c2678457aca742aafc9fb771d30270" -[[package]] -name = "base16ct" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349a06037c7bf932dd7e7d1f653678b2038b9ad46a74102f1fc7bd7872678cce" - [[package]] name = "base16ct" version = "0.2.0" @@ -551,12 +545,6 @@ dependencies = [ "match-lookup", ] -[[package]] -name = "base64" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e1b586273c5702936fe7b7d6896644d8be71e6314cfe09d3167c95f712589e8" - [[package]] name = "base64" version = "0.22.1" @@ -859,6 +847,7 @@ dependencies = [ name = "containers" version = "0.1.0" dependencies = [ + "env-config", "hex", "leansig", "pretty_assertions", @@ -961,18 +950,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "460fbee9c2c2f33933d720630a6a0bac33ba7053db5344fac858d4b8952d77d5" -[[package]] -name = "crypto-bigint" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" -dependencies = [ - "generic-array", - "rand_core 0.6.4", - "subtle", - "zeroize", -] - [[package]] name = "crypto-bigint" version = "0.5.5" @@ -1108,13 +1085,14 @@ dependencies = [ ] [[package]] -name = "der" -version = "0.6.1" +name = "delay_map" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1a467a65c5e759bce6e65eaf91cc29f466cdc57cb65777bd646872a8a1fd4de" +checksum = "88e365f083a5cb5972d50ce8b1b2c9f125dc5ec0f50c0248cfb568ae59efcf0b" dependencies = [ - "const-oid", - "zeroize", + "futures", + "tokio", + "tokio-util", ] [[package]] @@ -1211,6 +1189,37 @@ dependencies = [ "subtle", ] +[[package]] +name = "discv5" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f170f4f6ed0e1df52bf43b403899f0081917ecf1500bfe312505cc3b515a8899" +dependencies = [ + "aes", + "aes-gcm", + "alloy-rlp", + "arrayvec", + "ctr", + "delay_map", + "enr", + "fnv", + "futures", + "hashlink", + "hex", + "hkdf", + "lazy_static", + "lru", + "more-asserts", + "parking_lot", + "rand 0.8.5", + "smallvec", + "socket2 0.5.10", + "tokio", + "tracing", + "uint 0.10.0", + "zeroize", +] + [[package]] name = "displaydoc" version = "0.2.5" @@ -1240,30 +1249,18 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cc5d6d6a8504f8caedd7de14576464383900cd3840b7033a7a3dce5ac00121ca" -[[package]] -name = "ecdsa" -version = "0.14.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413301934810f597c1d19ca71c8710e99a3f1ba28a0d2ebc01551a2daeea3c5c" -dependencies = [ - "der 0.6.1", - "elliptic-curve 0.12.3", - "rfc6979 0.3.1", - "signature 1.6.4", -] - [[package]] name = "ecdsa" version = "0.16.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" dependencies = [ - "der 0.7.10", + "der", "digest 0.10.7", - "elliptic-curve 0.13.8", - "rfc6979 0.4.0", - "signature 2.2.0", - "spki 0.7.3", + "elliptic-curve", + "rfc6979", + "signature", + "spki", ] [[package]] @@ -1272,8 +1269,8 @@ version = "2.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "115531babc129696a58c64a4fef0a8bf9e9698629fb97e9e40767d235cfbcd53" dependencies = [ - "pkcs8 0.10.2", - "signature 2.2.0", + "pkcs8", + "signature", ] [[package]] @@ -1309,59 +1306,39 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" -[[package]] -name = "elliptic-curve" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7bb888ab5300a19b8e5bceef25ac745ad065f3c9f7efc6de1b91958110891d3" -dependencies = [ - "base16ct 0.1.1", - "crypto-bigint 0.4.9", - "der 0.6.1", - "digest 0.10.7", - "ff 0.12.1", - "generic-array", - "group 0.12.1", - "pkcs8 0.9.0", - "rand_core 0.6.4", - "sec1 0.3.0", - "subtle", - "zeroize", -] - [[package]] name = "elliptic-curve" version = "0.13.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" dependencies = [ - "base16ct 0.2.0", - "crypto-bigint 0.5.5", + "base16ct", + "crypto-bigint", "digest 0.10.7", - "ff 0.13.1", + "ff", "generic-array", - "group 0.13.0", - "pkcs8 0.10.2", + "group", + "pkcs8", "rand_core 0.6.4", - "sec1 0.7.3", + "sec1", "subtle", "zeroize", ] [[package]] name = "enr" -version = "0.7.0" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "492a7e5fc2504d5fdce8e124d3e263b244a68b283cac67a69eda0cd43e0aebad" +checksum = "851bd664a3d3a3c175cff92b2f0df02df3c541b4895d0ae307611827aae46152" dependencies = [ - "base64 0.13.1", - "bs58 0.4.0", + "alloy-rlp", + "base64", "bytes", + "ed25519-dalek", "hex", - "k256 0.11.6", + "k256", "log", "rand 0.8.5", - "rlp", "serde", "sha3", "zeroize", @@ -1399,6 +1376,10 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "env-config" +version = "0.1.0" + [[package]] name = "equivalent" version = "1.0.2" @@ -1412,7 +1393,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.52.0", + "windows-sys 0.61.1", ] [[package]] @@ -1439,7 +1420,7 @@ dependencies = [ "impl-rlp", "impl-serde", "primitive-types", - "uint", + "uint 0.9.5", ] [[package]] @@ -1519,16 +1500,6 @@ dependencies = [ "bytes", ] -[[package]] -name = "ff" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" -dependencies = [ - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "ff" version = "0.13.1" @@ -1586,6 +1557,7 @@ name = "fork-choice" version = "0.1.0" dependencies = [ "containers", + "env-config", "serde", "serde_json", "ssz", @@ -1791,24 +1763,13 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" -[[package]] -name = "group" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" -dependencies = [ - "ff 0.12.1", - "rand_core 0.6.4", - "subtle", -] - [[package]] name = "group" version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" dependencies = [ - "ff 0.13.1", + "ff", "rand_core 0.6.4", "subtle", ] @@ -2311,7 +2272,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.0", "serde", "serde_core", ] @@ -2401,18 +2362,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "k256" -version = "0.11.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72c1e0b51e7ec0a97369623508396067a486bd0cbed95a2659a4b863d28cfc8b" -dependencies = [ - "cfg-if", - "ecdsa 0.14.8", - "elliptic-curve 0.12.3", - "sha2 0.10.9 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "k256" version = "0.13.4" @@ -2420,11 +2369,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6e3919bbaa2945715f0bb6d3934a173d1e9a59ac23767fbaaef277265a7411b" dependencies = [ "cfg-if", - "ecdsa 0.16.9", - "elliptic-curve 0.13.8", + "ecdsa", + "elliptic-curve", "once_cell", "sha2 0.10.9 (registry+https://github.com/rust-lang/crates.io-index)", - "signature 2.2.0", + "signature", ] [[package]] @@ -2633,7 +2582,7 @@ checksum = "c7f58e37d8d6848e5c4c9e3c35c6f61133235bff2960c9c00a663b0849301221" dependencies = [ "async-channel", "asynchronous-codec 0.7.0", - "base64 0.22.1", + "base64", "byteorder", "bytes", "either", @@ -2704,7 +2653,7 @@ dependencies = [ "bs58 0.5.1", "ed25519-dalek", "hkdf", - "k256 0.13.4", + "k256", "multihash 0.19.3", "quick-protobuf", "rand 0.8.5", @@ -3031,6 +2980,12 @@ dependencies = [ "uuid", ] +[[package]] +name = "more-asserts" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1fafa6961cabd9c63bcd77a45d7e3b7f3b552b70417831fb0f56db717e72407e" + [[package]] name = "multiaddr" version = "0.17.1" @@ -3216,11 +3171,17 @@ dependencies = [ "anyhow", "async-trait", "containers", + "discv5", "enr", + "env-config", "futures", + "hex", + "k256", "libp2p", "libp2p-identity 0.2.12", "libp2p-mplex", + "num-bigint", + "num-traits", "parking_lot", "rand 0.8.5", "serde", @@ -3265,7 +3226,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.60.2", + "windows-sys 0.61.1", ] [[package]] @@ -3552,7 +3513,7 @@ version = "3.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1d30c53c26bc5b31a98cd02d20f25a7c8567146caf63ed593a9d87b2775291be" dependencies = [ - "base64 0.22.1", + "base64", "serde_core", ] @@ -3604,24 +3565,14 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs8" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" -dependencies = [ - "der 0.6.1", - "spki 0.6.0", -] - [[package]] name = "pkcs8" version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f950b2377845cebe5cf8b5165cb3cc1a5e0fa5cfa3e1f7f55707d8fd82e0a7b7" dependencies = [ - "der 0.7.10", - "spki 0.7.3", + "der", + "spki", ] [[package]] @@ -3711,7 +3662,7 @@ dependencies = [ "impl-codec", "impl-rlp", "impl-serde", - "uint", + "uint 0.9.5", ] [[package]] @@ -4093,17 +4044,6 @@ version = "0.7.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6b3789b30bd25ba102de4beabd95d21ac45b69b1be7d14522bab988c526d6799" -[[package]] -name = "rfc6979" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7743f17af12fa0b03b803ba12cd6a8d9483a587e89c69445e3909655c0b9fabb" -dependencies = [ - "crypto-bigint 0.4.9", - "hmac", - "zeroize", -] - [[package]] name = "rfc6979" version = "0.4.0" @@ -4268,7 +4208,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys", - "windows-sys 0.52.0", + "windows-sys 0.61.1", ] [[package]] @@ -4382,30 +4322,16 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" -[[package]] -name = "sec1" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be24c1842290c45df0a7bf069e0c268a747ad05a192f2fd7dcfdbc1cba40928" -dependencies = [ - "base16ct 0.1.1", - "der 0.6.1", - "generic-array", - "pkcs8 0.9.0", - "subtle", - "zeroize", -] - [[package]] name = "sec1" version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" dependencies = [ - "base16ct 0.2.0", - "der 0.7.10", + "base16ct", + "der", "generic-array", - "pkcs8 0.10.2", + "pkcs8", "subtle", "zeroize", ] @@ -4500,7 +4426,7 @@ version = "3.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c522100790450cf78eeac1507263d0a350d4d5b30df0c8e1fe051a10c22b376e" dependencies = [ - "base64 0.22.1", + "base64", "chrono", "hex", "indexmap 1.9.3", @@ -4617,16 +4543,6 @@ dependencies = [ "libc", ] -[[package]] -name = "signature" -version = "1.6.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" -dependencies = [ - "digest 0.10.7", - "rand_core 0.6.4", -] - [[package]] name = "signature" version = "2.2.0" @@ -4701,16 +4617,6 @@ dependencies = [ "lock_api", ] -[[package]] -name = "spki" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" -dependencies = [ - "base64ct", - "der 0.6.1", -] - [[package]] name = "spki" version = "0.7.3" @@ -4718,7 +4624,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d91ed6c858b01f942cd56b37a94b3e0a1798290327d1236e4d9cf4eaca44d29d" dependencies = [ "base64ct", - "der 0.7.10", + "der", ] [[package]] @@ -4919,7 +4825,7 @@ dependencies = [ "getrandom 0.3.3", "once_cell", "rustix", - "windows-sys 0.52.0", + "windows-sys 0.61.1", ] [[package]] @@ -5074,6 +4980,7 @@ dependencies = [ "futures-core", "futures-sink", "pin-project-lite", + "slab", "tokio", ] @@ -5128,6 +5035,7 @@ version = "0.1.41" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" dependencies = [ + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -5238,6 +5146,18 @@ dependencies = [ "static_assertions", ] +[[package]] +name = "uint" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "909988d098b2f738727b161a106cfc7cab00c539c2687a8836f8e565976fb53e" +dependencies = [ + "byteorder", + "crunchy", + "hex", + "static_assertions", +] + [[package]] name = "unarray" version = "0.1.4" @@ -5340,6 +5260,7 @@ name = "validator" version = "0.1.0" dependencies = [ "containers", + "env-config", "fork-choice", "leansig", "serde", diff --git a/lean_client/Cargo.toml b/lean_client/Cargo.toml index 7d98ae7..9e72c43 100644 --- a/lean_client/Cargo.toml +++ b/lean_client/Cargo.toml @@ -1,5 +1,5 @@ [workspace] -members = ["chain", "containers", "fork_choice", "networking", "validator"] +members = ["chain", "containers", "env-config", "fork_choice", "networking", "validator"] resolver = "2" [workspace.package] @@ -14,7 +14,7 @@ containers = { path = "./containers" } fork_choice = { path = "./fork_choice" } networking = { path = "./networking" } validator = { path = "./validator" } -libp2p = {version = "0.56.0", default-features = false, features = [ +libp2p = { version = "0.56.0", default-features = false, features = [ 'dns', 'gossipsub', 'identify', @@ -52,8 +52,10 @@ version = "0.1.0" edition = "2021" [features] -default = ["xmss-signing"] +default = ["devnet2", "xmss-signing"] xmss-signing = ["validator/xmss-signing"] +devnet1 = ["containers/devnet1", "fork-choice/devnet1", "networking/devnet1", "validator/devnet1"] +devnet2 = ["containers/devnet2", "fork-choice/devnet2", "networking/devnet2", "validator/devnet2"] [dependencies] chain = { path = "./chain" } diff --git a/lean_client/ENVIRONMENT_SELECTION.md b/lean_client/ENVIRONMENT_SELECTION.md new file mode 100644 index 0000000..d906c9d --- /dev/null +++ b/lean_client/ENVIRONMENT_SELECTION.md @@ -0,0 +1,26 @@ +### To select which devnet you want to compile + +#### Option A +- Change the default features in root `Cargo.toml`: +```toml +[features] +default = ["devnet1", "<...other features>"] # Change to "devnet2" if needed +devnet1 = [...] +devnet2 = [...] +``` + +#### Option B +- Use the `--no-default-features` flag and specify the desired devnet feature when building or running the project: +```bash +cargo build --no-default-features --features devnet1 # Change to devnet2 +``` + + +### Running tests for a specific devnet + +From root directory, use the following command: +```bash +cargo test -p --no-default-features --features devnet1 # Change to devnet2 +``` + +Use `` to specify the crate you want to test. \ No newline at end of file diff --git a/lean_client/Makefile b/lean_client/Makefile index 582e56b..6539dea 100644 --- a/lean_client/Makefile +++ b/lean_client/Makefile @@ -10,7 +10,7 @@ check-format: .PHONY: test test: - cargo test --workspace --all-features --no-fail-fast + cargo test --workspace --no-fail-fast .PHONY: build build: diff --git a/lean_client/chain/src/config.rs b/lean_client/chain/src/config.rs index ca4edb2..1d762de 100644 --- a/lean_client/chain/src/config.rs +++ b/lean_client/chain/src/config.rs @@ -3,6 +3,7 @@ pub struct BasisPoint(pub u64); impl BasisPoint { pub const MAX: u64 = 10_000; + pub const fn new(value: u64) -> Option { if value <= Self::MAX { Some(BasisPoint(value)) @@ -10,42 +11,19 @@ impl BasisPoint { None } } + #[inline] pub fn get(&self) -> u64 { self.0 } } -pub const INTERVALS_PER_SLOT: u64 = 4; -pub const SLOT_DURATION_MS: u64 = 4_000; -pub const SECONDS_PER_SLOT: u64 = SLOT_DURATION_MS / 1_000; -pub const SECONDS_PER_INTERVAL: u64 = SECONDS_PER_SLOT / INTERVALS_PER_SLOT; -pub const JUSTIFICATION_LOOKBACK_SLOTS: u64 = 3; - -pub const PROPOSER_REORG_CUTOFF_BPS: BasisPoint = match BasisPoint::new(2_500) { - Some(x) => x, - None => panic!(), -}; -pub const VOTE_DUE_BPS: BasisPoint = match BasisPoint::new(5_000) { - Some(x) => x, - None => panic!(), -}; -pub const FAST_CONFIRM_DUE_BPS: BasisPoint = match BasisPoint::new(7_500) { - Some(x) => x, - None => panic!(), -}; -pub const VIEW_FREEZE_CUTOFF_BPS: BasisPoint = match BasisPoint::new(7_500) { - Some(x) => x, - None => panic!(), -}; - -pub const HISTORICAL_ROOTS_LIMIT: u64 = 1u64 << 18; -pub const VALIDATOR_REGISTRY_LIMIT: u64 = 1u64 << 12; - #[derive(Clone, Debug)] pub struct ChainConfig { + pub intervals_per_slot: u64, pub slot_duration_ms: u64, pub second_per_slot: u64, + pub seconds_per_interval: u64, pub justification_lookback_slots: u64, pub proposer_reorg_cutoff_bps: BasisPoint, pub vote_due_bps: BasisPoint, @@ -55,25 +33,24 @@ pub struct ChainConfig { pub validator_registry_limit: u64, } -pub const DEVNET_CONFIG: ChainConfig = ChainConfig { - slot_duration_ms: SLOT_DURATION_MS, - second_per_slot: SECONDS_PER_SLOT, - justification_lookback_slots: JUSTIFICATION_LOOKBACK_SLOTS, - proposer_reorg_cutoff_bps: PROPOSER_REORG_CUTOFF_BPS, - vote_due_bps: VOTE_DUE_BPS, - fast_confirm_due_bps: FAST_CONFIRM_DUE_BPS, - view_freeze_cutoff_bps: VIEW_FREEZE_CUTOFF_BPS, - historical_roots_limit: HISTORICAL_ROOTS_LIMIT, - validator_registry_limit: VALIDATOR_REGISTRY_LIMIT, -}; +impl ChainConfig { + pub fn devnet() -> Self { + let slot_duration_ms = 4_000; + let seconds_per_slot = slot_duration_ms / 1_000; + let intervals_per_slot = 4; -#[cfg(test)] -mod tests { - use super::*; - #[test] - fn time_math_is_consistent() { - assert_eq!(SLOT_DURATION_MS, 4_000); - assert_eq!(SECONDS_PER_SLOT, 4); - assert_eq!(SECONDS_PER_INTERVAL, 1); + Self { + slot_duration_ms, + second_per_slot: seconds_per_slot, + intervals_per_slot, + seconds_per_interval: seconds_per_slot / intervals_per_slot, + justification_lookback_slots: 3, + proposer_reorg_cutoff_bps: BasisPoint::new(2_500).expect("Valid BPS"), + vote_due_bps: BasisPoint::new(5_000).expect("Valid BPS"), + fast_confirm_due_bps: BasisPoint::new(7_500).expect("Valid BPS"), + view_freeze_cutoff_bps: BasisPoint::new(7_500).expect("Valid BPS"), + historical_roots_limit: 1u64 << 18, + validator_registry_limit: 1u64 << 12, + } } } diff --git a/lean_client/chain/src/lib.rs b/lean_client/chain/src/lib.rs index ef68c36..9496841 100644 --- a/lean_client/chain/src/lib.rs +++ b/lean_client/chain/src/lib.rs @@ -1 +1,2 @@ -pub mod config; +mod config; +pub use config::ChainConfig; diff --git a/lean_client/containers/Cargo.toml b/lean_client/containers/Cargo.toml index 2d5b0ff..c136a17 100644 --- a/lean_client/containers/Cargo.toml +++ b/lean_client/containers/Cargo.toml @@ -5,12 +5,16 @@ edition = "2021" [features] xmss-verify = ["leansig"] +default = [] +devnet1 = ["env-config/devnet1"] +devnet2 = ["env-config/devnet2"] [lib] name = "containers" path = "src/lib.rs" [dependencies] +env-config = { path = "../env-config", default-features = false } ssz = { git = "https://github.com/grandinetech/grandine", package = "ssz", branch = "develop" } ssz_derive = { git = "https://github.com/grandinetech/grandine", package = "ssz_derive", branch = "develop" } typenum = "1" diff --git a/lean_client/containers/src/attestation.rs b/lean_client/containers/src/attestation.rs index ae1b88c..6779b0f 100644 --- a/lean_client/containers/src/attestation.rs +++ b/lean_client/containers/src/attestation.rs @@ -1,5 +1,6 @@ use crate::{Checkpoint, Slot, Uint64}; use serde::{Deserialize, Serialize}; +use ssz::BitList; use ssz::ByteVector; use ssz_derive::Ssz; use typenum::{Prod, Sum, U100, U12, U31}; @@ -19,13 +20,66 @@ use typenum::U4096; /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). pub type Attestations = ssz::PersistentList; -/// List of signatures corresponding to attestations in a block. -/// Limit is VALIDATOR_REGISTRY_LIMIT (4096). -pub type BlockSignatures = ssz::PersistentList; +pub type AggregatedAttestations = ssz::PersistentList; + +#[cfg(feature = "devnet1")] +pub type AttestationSignatures = ssz::PersistentList; + +#[cfg(feature = "devnet2")] +pub type AttestationSignatures = ssz::PersistentList; + +#[cfg(feature = "devnet2")] +pub type NaiveAggregatedSignature = ssz::PersistentList; /// Bitlist representing validator participation in an attestation. /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). -pub type AggregationBits = ssz::BitList; +#[derive(Clone, Debug, PartialEq, Eq, Default, Ssz, Serialize, Deserialize)] +pub struct AggregationBits(pub BitList); + +impl AggregationBits { + pub const LIMIT: u64 = 4096; + + pub fn from_validator_indices(indices: &[u64]) -> Self { + assert!( + !indices.is_empty(), + "Aggregated attestation must reference at least one validator" + ); + + let max_id = *indices.iter().max().unwrap(); + assert!( + max_id < Self::LIMIT, + "Validator index out of range for aggregation bits" + ); + + let mut bits = BitList::::with_length((max_id + 1) as usize); + + for i in 0..=max_id { + bits.set(i as usize, false); + } + + for &i in indices { + bits.set(i as usize, true); + } + + AggregationBits(bits) + } + + pub fn to_validator_indices(&self) -> Vec { + let indices: Vec = self + .0 + .iter() + .enumerate() + .filter_map(|(i, bit)| if *bit { Some(i as u64) } else { None }) + .collect(); + + assert!( + !indices.is_empty(), + "Aggregated attestation must reference at least one validator" + ); + + indices + } +} /// Naive list of validator signatures used for aggregation placeholders. /// Limit is VALIDATOR_REGISTRY_LIMIT (4096). @@ -57,15 +111,18 @@ pub struct Attestation { /// Validator attestation bundled with its signature. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct SignedAttestation { - /// The attestation message signed by the validator. + #[cfg(feature = "devnet2")] + pub validator_id: u64, + #[cfg(feature = "devnet2")] + pub message: AttestationData, + #[cfg(feature = "devnet1")] pub message: Attestation, - /// Signature aggregation produced by the leanVM (SNARKs in the future). pub signature: Signature, } /// Aggregated attestation consisting of participation bits and message. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] -pub struct AggregatedAttestations { +pub struct AggregatedAttestation { /// Bitfield indicating which validators participated in the aggregation. pub aggregation_bits: AggregationBits, /// Combined attestation data similar to the beacon chain format. @@ -75,11 +132,50 @@ pub struct AggregatedAttestations { pub data: AttestationData, } +impl AggregatedAttestation { + pub fn aggregate_by_data(attestations: &[Attestation]) -> Vec { + let mut groups: Vec<(AttestationData, Vec)> = Vec::new(); + + for attestation in attestations { + // Try to find an existing group with the same data + if let Some((_, validator_ids)) = groups + .iter_mut() + .find(|(data, _)| *data == attestation.data) + { + validator_ids.push(attestation.validator_id.0); + } else { + // Create a new group + groups.push((attestation.data.clone(), vec![attestation.validator_id.0])); + } + } + + groups + .into_iter() + .map(|(data, validator_ids)| AggregatedAttestation { + aggregation_bits: AggregationBits::from_validator_indices(&validator_ids), + data, + }) + .collect() + } + + pub fn to_plain(&self) -> Vec { + let validator_indices = self.aggregation_bits.to_validator_indices(); + + validator_indices + .into_iter() + .map(|validator_id| Attestation { + validator_id: Uint64(validator_id), + data: self.data.clone(), + }) + .collect() + } +} + /// Aggregated attestation bundled with aggregated signatures. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] -pub struct SignedAggregatedAttestations { +pub struct SignedAggregatedAttestation { /// Aggregated attestation data. - pub message: AggregatedAttestations, + pub message: AggregatedAttestation, /// Aggregated attestation plus its combined signature. /// /// Stores a naive list of validator signatures that mirrors the attestation diff --git a/lean_client/containers/src/block.rs b/lean_client/containers/src/block.rs index 5df3b22..00f5894 100644 --- a/lean_client/containers/src/block.rs +++ b/lean_client/containers/src/block.rs @@ -1,11 +1,12 @@ -use crate::{ - Attestation, Attestations, BlockSignatures, Bytes32, Signature, Slot, State, ValidatorIndex, -}; +use crate::{Attestation, Attestations, Bytes32, Signature, Slot, State, ValidatorIndex}; use serde::{Deserialize, Serialize}; use ssz_derive::Ssz; #[cfg(feature = "xmss-verify")] use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to_the_20::target_sum::SIGTargetSumLifetime20W2NoOff; +use ssz::{PersistentList, SszHash}; +use typenum::U4096; +use crate::attestation::{AggregatedAttestations, AttestationSignatures}; /// The body of a block, containing payload data. /// @@ -13,6 +14,9 @@ use leansig::signature::generalized_xmss::instantiations_poseidon::lifetime_2_to /// separately in BlockSignatures to match the spec architecture. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] pub struct BlockBody { + #[cfg(feature = "devnet2")] + pub attestations: AggregatedAttestations, + #[cfg(feature = "devnet1")] #[serde(with = "crate::serde_helpers")] pub attestations: Attestations, } @@ -47,6 +51,12 @@ pub struct BlockWithAttestation { pub proposer_attestation: Attestation, } +#[derive(Debug, PartialEq, Eq, Clone, Serialize, Ssz, Deserialize, Default)] +pub struct BlockSignatures { + pub attestation_signatures: AttestationSignatures, + pub proposer_signature: Signature, +} + /// Envelope carrying a block, an attestation from proposer, and aggregated signatures. #[derive(Clone, Debug, PartialEq, Eq, Ssz, Default, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] @@ -56,7 +66,10 @@ pub struct SignedBlockWithAttestation { /// Aggregated signature payload for the block. /// /// Signatures remain in attestation order followed by the proposer signature. + #[cfg(feature = "devnet1")] #[serde(with = "crate::serde_helpers::block_signatures")] + pub signature: PersistentList, + #[cfg(feature = "devnet2")] pub signature: BlockSignatures, } @@ -114,6 +127,7 @@ impl SignedBlockWithAttestation { /// /// - Spec: /// - XMSS Library: + #[cfg(feature = "devnet1")] pub fn verify_signatures(&self, parent_state: State) -> bool { // Unpack the signed block components let block = &self.message.block; @@ -157,25 +171,14 @@ impl SignedBlockWithAttestation { // The ordering must be preserved: // 1. Block body attestations, // 2. The proposer attestation. - assert!( - signatures_vec.len() == all_attestations.len(), + assert_eq!( + signatures_vec.len(), + all_attestations.len(), "Number of signatures does not match number of attestations" ); let validators = &parent_state.validators; - - // Count validators (PersistentList doesn't expose len directly) - let mut num_validators: u64 = 0; - let mut k: u64 = 0; - loop { - match validators.get(k) { - Ok(_) => { - num_validators += 1; - k += 1; - } - Err(_) => break, - } - } + let num_validators = validators.len_u64(); // Verify each attestation signature for (attestation, signature) in all_attestations.iter().zip(signatures_vec.iter()) { @@ -196,68 +199,148 @@ impl SignedBlockWithAttestation { // - The attestation has not been tampered with // - The signature was created at the correct epoch (slot) - #[cfg(feature = "xmss-verify")] - { - use leansig::serialization::Serializable; - use leansig::signature::SignatureScheme; - - // Compute the message hash from the attestation - let message_bytes: [u8; 32] = hash_tree_root(attestation).0.into(); - let epoch = attestation.data.slot.0 as u32; - - // Get public key bytes - use as_bytes() method - let pubkey_bytes = validator.pubkey.0.as_bytes(); - - // Deserialize the public key using Serializable trait - type PubKey = ::PublicKey; - let pubkey = match PubKey::from_bytes(pubkey_bytes) { - Ok(pk) => pk, - Err(e) => { - eprintln!( - "Failed to deserialize public key at slot {:?}: {:?}", - attestation.data.slot, e - ); - return false; - } - }; - - // Get signature bytes - use as_bytes() method - let sig_bytes = signature.as_bytes(); - - // Deserialize the signature using Serializable trait - type Sig = ::Signature; - let sig = match Sig::from_bytes(sig_bytes) { - Ok(s) => s, - Err(e) => { - eprintln!( - "Failed to deserialize signature at slot {:?}: {:?}", - attestation.data.slot, e - ); - return false; - } - }; - - // Verify the signature - if !SIGTargetSumLifetime20W2NoOff::verify(&pubkey, epoch, &message_bytes, &sig) { - eprintln!( - "XMSS signature verification failed at slot {:?}", - attestation.data.slot - ); - return false; - } - } + let message_bytes: [u8; 32] = hash_tree_root(attestation).0.into(); + + assert!( + verify_xmss_signature( + validator.pubkey.0.as_bytes(), + attestation.data.slot, + &message_bytes, + &signature, + ), + "Attestation signature verification failed" + ); + } + + true + } + + #[cfg(feature = "devnet2")] + pub fn verify_signatures(&self, parent_state: State) -> bool { + // Unpack the signed block components + let block = &self.message.block; + let signatures = &self.signature; + let aggregated_attestations = block.body.attestations.clone(); + let attestation_signatures = signatures.attestation_signatures.clone(); + + // Verify signature count matches aggregated attestation count + assert_eq!( + aggregated_attestations.len_u64(), + attestation_signatures.len_u64(), + "Number of signatures does not match number of attestations" + ); + + let validators = &parent_state.validators; + let num_validators = validators.len_u64(); - #[cfg(not(feature = "xmss-verify"))] + // Verify each attestation signature + for (aggregated_attestation, aggregated_signature) in (&aggregated_attestations) + .into_iter() + .zip((&attestation_signatures).into_iter()) + { + let validator_ids = aggregated_attestation + .aggregation_bits + .to_validator_indices(); + + assert_eq!( + aggregated_signature.len_u64(), + validator_ids.len() as u64, + "Aggregated attestation signature count mismatch" + ); + + let attestation_root = aggregated_attestation.data.hash_tree_root(); + + // Loop through zipped validator IDs and their corresponding signatures + // Verify each individual signature within the aggregated attestation + for (validator_id, signature) in + validator_ids.iter().zip(aggregated_signature.into_iter()) { - // Placeholder: XMSS verification disabled - // To enable, compile with --features xmss-verify - let _pubkey = &validator.pubkey; - let _slot = attestation.data.slot; - let _message = hash_tree_root(attestation); - let _sig = signature; + // Ensure validator exists in the active set + assert!( + *validator_id < num_validators, + "Validator index out of range" + ); + + let validator = validators.get(*validator_id).expect("validator must exist"); + + // Get the actual payload root for the attestation data + let attestation_root: [u8; 32] = + hash_tree_root(&aggregated_attestation.data).0.into(); + + // Verify the XMSS signature + assert!( + verify_xmss_signature( + validator.pubkey.0.as_bytes(), + aggregated_attestation.data.slot, + &attestation_root, + signature, + ), + "Attestation signature verification failed" + ); } + + // Verify the proposer attestation signature + let proposer_attestation = self.message.proposer_attestation.clone(); + let proposer_signature = signatures.proposer_signature; + + assert!( + proposer_attestation.validator_id.0 < num_validators, + "Proposer index out of range" + ); + + let proposer = validators + .get(proposer_attestation.validator_id.0) + .expect("proposer must exist"); + + let proposer_root: [u8; 32] = hash_tree_root(&proposer_attestation).0.into(); + assert!( + verify_xmss_signature( + proposer.pubkey.0.as_bytes(), + proposer_attestation.data.slot, + &proposer_root, + &proposer_signature, + ), + "Proposer attestation signature verification failed" + ); } true } } + +#[cfg(feature = "xmss-verify")] +pub fn verify_xmss_signature( + pubkey_bytes: &[u8], + slot: Slot, + message_bytes: &[u8; 32], + signature: &Signature, +) -> bool { + use leansig::serialization::Serializable; + use leansig::signature::SignatureScheme; + + let epoch = slot.0 as u32; + + type PubKey = ::PublicKey; + let pubkey = match PubKey::from_bytes(pubkey_bytes) { + Ok(pk) => pk, + Err(_) => return false, + }; + + type Sig = ::Signature; + let sig = match Sig::from_bytes(signature.as_bytes()) { + Ok(s) => s, + Err(_) => return false, + }; + + SIGTargetSumLifetime20W2NoOff::verify(&pubkey, epoch, message_bytes, &sig) +} + +#[cfg(not(feature = "xmss-verify"))] +pub fn verify_xmss_signature( + _pubkey_bytes: &[u8], + _slot: Slot, + _message_bytes: &[u8; 32], + _signature: &Signature, +) -> bool { + true +} diff --git a/lean_client/containers/src/lib.rs b/lean_client/containers/src/lib.rs index 28b13d1..f0590ca 100644 --- a/lean_client/containers/src/lib.rs +++ b/lean_client/containers/src/lib.rs @@ -10,8 +10,8 @@ pub mod types; pub mod validator; pub use attestation::{ - AggregatedAttestations, AggregatedSignatures, AggregationBits, Attestation, AttestationData, - Attestations, BlockSignatures, Signature, SignedAggregatedAttestations, SignedAttestation, + AggregatedAttestation, AggregatedSignatures, AggregationBits, Attestation, AttestationData, + Attestations, Signature, SignedAggregatedAttestation, SignedAttestation, }; pub use block::{ Block, BlockBody, BlockHeader, BlockWithAttestation, SignedBlock, SignedBlockWithAttestation, diff --git a/lean_client/containers/src/serde_helpers.rs b/lean_client/containers/src/serde_helpers.rs index 7cff787..01604e5 100644 --- a/lean_client/containers/src/serde_helpers.rs +++ b/lean_client/containers/src/serde_helpers.rs @@ -187,9 +187,11 @@ pub mod signature { /// where each signature can be either hex string or structured XMSS format pub mod block_signatures { use super::*; - use crate::{BlockSignatures, Signature}; + use crate::block::BlockSignatures; + use crate::Signature; use serde_json::Value; use ssz::PersistentList; + use typenum::U4096; /// Structured XMSS signature format from test vectors #[derive(Deserialize, Clone)] @@ -247,7 +249,10 @@ pub mod block_signatures { Signature::try_from(bytes.as_slice()).map_err(|_| "Failed to create signature".to_string()) } - pub fn deserialize<'de, D>(deserializer: D) -> Result + #[cfg(feature = "devnet1")] + pub fn deserialize<'de, D>( + deserializer: D, + ) -> Result, D::Error> where D: Deserializer<'de>, { @@ -269,7 +274,21 @@ pub mod block_signatures { Ok(signatures) } - pub fn serialize(value: &BlockSignatures, serializer: S) -> Result + #[cfg(feature = "devnet2")] + pub fn deserialize<'de, D>(_: D) -> Result + where + D: Deserializer<'de>, + { + Err(serde::de::Error::custom( + "BlockSignatures deserialization not implemented for devnet2", + )) + } + + #[cfg(feature = "devnet1")] + pub fn serialize( + value: &PersistentList, + serializer: S, + ) -> Result where S: Serializer, { @@ -289,4 +308,14 @@ pub mod block_signatures { let wrapper = DataWrapper { data: sigs }; wrapper.serialize(serializer) } + + #[cfg(feature = "devnet2")] + pub fn serialize(_value: &BlockSignatures, _serializer: S) -> Result + where + S: Serializer, + { + Err(serde::ser::Error::custom( + "BlockSignatures serialization not implemented for devnet2", + )) + } } diff --git a/lean_client/containers/src/state.rs b/lean_client/containers/src/state.rs index 7ff8456..5056fb7 100644 --- a/lean_client/containers/src/state.rs +++ b/lean_client/containers/src/state.rs @@ -1,16 +1,19 @@ +use crate::attestation::AggregatedAttestations; +use crate::block::BlockSignatures; use crate::validator::Validator; use crate::{ block::{hash_tree_root, Block, BlockBody, BlockHeader, SignedBlockWithAttestation}, - Attestation, Attestations, BlockSignatures, Bytes32, Checkpoint, Config, Slot, Uint64, - ValidatorIndex, + Attestation, Attestations, Bytes32, Checkpoint, Config, Signature, SignedAttestation, Slot, + Uint64, ValidatorIndex, }; use crate::{ HistoricalBlockHashes, JustificationRoots, JustificationsValidators, JustifiedSlots, Validators, }; use serde::{Deserialize, Serialize}; -use ssz::PersistentList as List; +use ssz::{PersistentList as List, PersistentList}; use ssz_derive::Ssz; use std::collections::BTreeMap; +use typenum::U4096; pub const VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const JUSTIFICATION_ROOTS_LIMIT: usize = 1 << 18; // 262144 @@ -108,7 +111,7 @@ impl State { let mut validators = List::default(); for i in 0..num_validators.0 { let validator = Validator { - pubkey: crate::validator::BlsPublicKey::default(), + pubkey: crate::validator::PublicKey::default(), index: Uint64(i), }; validators.push(validator).expect("Failed to add validator"); @@ -138,18 +141,7 @@ impl State { /// Simple RR proposer rule (round-robin). pub fn is_proposer(&self, index: ValidatorIndex) -> bool { - // Count validators by iterating (since PersistentList doesn't have len()) - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match self.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = self.validators.len_u64(); if num_validators == 0 { return false; // No validators @@ -308,7 +300,22 @@ impl State { pub fn process_block(&self, block: &Block) -> Result { let state = self.process_block_header(block)?; + #[cfg(feature = "devnet1")] let state_after_ops = state.process_attestations(&block.body.attestations); + #[cfg(feature = "devnet2")] + let state_after_ops = { + let mut unaggregated_attestations = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + unaggregated_attestations + .push(attestation) + .map_err(|e| format!("Failed to push attestation: {:?}", e))?; + } + } + state.process_attestations(&unaggregated_attestations) + }; // State root validation is handled by state_transition_with_validation when needed @@ -489,18 +496,7 @@ impl State { if validator_id < votes.len() && !votes[validator_id] { votes[validator_id] = true; - // Count validators - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match self.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = self.validators.len_u64(); let count = votes.iter().filter(|&&v| v).count(); if 3 * count >= 2 * num_validators as usize { @@ -567,6 +563,7 @@ impl State { /// # Returns /// /// Tuple of (Block, post-State, collected attestations, signatures) + #[cfg(feature = "devnet1")] pub fn build_block( &self, slot: Slot, @@ -575,10 +572,18 @@ impl State { initial_attestations: Option>, available_signed_attestations: Option<&[SignedBlockWithAttestation]>, known_block_roots: Option<&std::collections::HashSet>, - ) -> Result<(Block, Self, Vec, BlockSignatures), String> { + ) -> Result< + ( + Block, + Self, + Vec, + PersistentList, + ), + String, + > { // Initialize empty attestation set for iterative collection let mut attestations = initial_attestations.unwrap_or_default(); - let mut signatures = BlockSignatures::default(); + let mut signatures = PersistentList::default(); // Advance state to target slot // Note: parent_root comes from fork choice and is already validated. @@ -687,6 +692,19 @@ impl State { } } } + + #[cfg(feature = "devnet2")] + pub fn build_block( + &self, + _slot: Slot, + _proposer_index: ValidatorIndex, + _parent_root: Bytes32, + _initial_attestations: Option>, + _available_signed_attestations: Option<&[SignedAttestation]>, + _known_block_roots: Option<&std::collections::HashSet>, + ) -> Result<(Block, Self, Vec, BlockSignatures), String> { + Err("build_block is not implemented for devnet2".to_string()) + } } #[cfg(test)] @@ -743,6 +761,7 @@ mod tests { } #[test] + #[cfg(feature = "devnet1")] fn test_build_block() { // Create genesis state with validators let genesis_state = State::generate_genesis(Uint64(0), Uint64(4)); @@ -838,6 +857,7 @@ mod tests { } #[test] + #[cfg(feature = "devnet1")] fn test_build_block_advances_state() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(10)); @@ -865,7 +885,7 @@ mod tests { block: block.clone(), proposer_attestation: Attestation::default(), }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), }, true, // signatures are considered valid (not validating, just marking as valid) true, @@ -878,6 +898,7 @@ mod tests { } #[test] + #[cfg(feature = "devnet1")] fn test_build_block_state_root_matches() { // Create genesis state let genesis_state = State::generate_genesis(Uint64(0), Uint64(3)); diff --git a/lean_client/containers/src/validator.rs b/lean_client/containers/src/validator.rs index 2649f55..8a2da60 100644 --- a/lean_client/containers/src/validator.rs +++ b/lean_client/containers/src/validator.rs @@ -1,21 +1,24 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer}; use ssz::ByteVector; use ssz_derive::Ssz; -use typenum::U52; +use typenum::{Unsigned, U52}; -/// BLS public key - 52 bytes (as defined in lean spec) +/// Size of XMSS public keys in bytes (as defined in lean spec) +type PublicKeySize = U52; + +/// XMSS public key (as defined in lean spec) #[derive(Clone, Debug, PartialEq, Eq, Ssz)] #[ssz(transparent)] -pub struct BlsPublicKey(pub ByteVector); +pub struct PublicKey(pub ByteVector); -impl Default for BlsPublicKey { +impl Default for PublicKey { fn default() -> Self { - BlsPublicKey(ByteVector::default()) + PublicKey(ByteVector::default()) } } // Custom serde implementation -impl Serialize for BlsPublicKey { +impl Serialize for PublicKey { fn serialize(&self, serializer: S) -> Result where S: Serializer, @@ -23,14 +26,17 @@ impl Serialize for BlsPublicKey { // ByteVector might have to_vec() or similar // For now, use unsafe to access the underlying bytes let bytes = unsafe { - std::slice::from_raw_parts(&self.0 as *const ByteVector as *const u8, 52) + std::slice::from_raw_parts( + &self.0 as *const ByteVector as *const u8, + PublicKeySize::USIZE, + ) }; let hex_string = format!("0x{}", hex::encode(bytes)); serializer.serialize_str(&hex_string) } } -impl<'de> Deserialize<'de> for BlsPublicKey { +impl<'de> Deserialize<'de> for PublicKey { fn deserialize(deserializer: D) -> Result where D: Deserializer<'de>, @@ -39,9 +45,10 @@ impl<'de> Deserialize<'de> for BlsPublicKey { let s = s.strip_prefix("0x").unwrap_or(&s); let decoded = hex::decode(s).map_err(serde::de::Error::custom)?; - if decoded.len() != 52 { + if decoded.len() != PublicKeySize::USIZE { return Err(serde::de::Error::custom(format!( - "Expected 52 bytes, got {}", + "Expected {} bytes, got {}", + PublicKeySize::USIZE, decoded.len() ))); } @@ -49,33 +56,37 @@ impl<'de> Deserialize<'de> for BlsPublicKey { // Create ByteVector from decoded bytes using unsafe let mut byte_vec = ByteVector::default(); unsafe { - let dest = &mut byte_vec as *mut ByteVector as *mut u8; - std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, 52); + let dest = &mut byte_vec as *mut ByteVector as *mut u8; + std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, PublicKeySize::USIZE); } - Ok(BlsPublicKey(byte_vec)) + Ok(PublicKey(byte_vec)) } } -impl BlsPublicKey { +impl PublicKey { pub fn from_hex(s: &str) -> Result { let s = s.strip_prefix("0x").unwrap_or(s); let decoded = hex::decode(s).map_err(|e| e.to_string())?; - if decoded.len() != 52 { - return Err(format!("Expected 52 bytes, got {}", decoded.len())); + if decoded.len() != PublicKeySize::USIZE { + return Err(format!( + "Expected {} bytes, got {}", + PublicKeySize::USIZE, + decoded.len() + )); } let mut byte_vec = ByteVector::default(); unsafe { - let dest = &mut byte_vec as *mut ByteVector as *mut u8; - std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, 52); + let dest = &mut byte_vec as *mut ByteVector as *mut u8; + std::ptr::copy_nonoverlapping(decoded.as_ptr(), dest, PublicKeySize::USIZE); } - Ok(BlsPublicKey(byte_vec)) + Ok(PublicKey(byte_vec)) } } #[derive(Clone, Debug, PartialEq, Eq, Default, Ssz, Serialize, Deserialize)] pub struct Validator { - pub pubkey: BlsPublicKey, + pub pubkey: PublicKey, #[serde(default)] pub index: crate::Uint64, } diff --git a/lean_client/containers/tests/main.rs b/lean_client/containers/tests/main.rs index ee67df1..f951ffe 100644 --- a/lean_client/containers/tests/main.rs +++ b/lean_client/containers/tests/main.rs @@ -1,4 +1,4 @@ -// tests/main.rs - Test entry point +// tests/lib - Test entry point mod debug_deserialize; mod test_vectors; mod unit_tests; diff --git a/lean_client/containers/tests/test_vectors/block_processing.rs b/lean_client/containers/tests/test_vectors/block_processing.rs index d2e1c9e..5bbc997 100644 --- a/lean_client/containers/tests/test_vectors/block_processing.rs +++ b/lean_client/containers/tests/test_vectors/block_processing.rs @@ -2,6 +2,7 @@ use super::runner::TestRunner; #[test] +#[cfg(feature = "devnet1")] fn test_process_first_block_after_genesis() { let test_path = "../tests/test_vectors/test_blocks/test_process_first_block_after_genesis.json"; TestRunner::run_block_processing_test(test_path) @@ -9,12 +10,14 @@ fn test_process_first_block_after_genesis() { } #[test] +#[cfg(feature = "devnet1")] fn test_blocks_with_gaps() { let test_path = "../tests/test_vectors/test_blocks/test_blocks_with_gaps.json"; TestRunner::run_block_processing_test(test_path).expect("test_blocks_with_gaps failed"); } #[test] +#[cfg(feature = "devnet1")] fn test_linear_chain_multiple_blocks() { let test_path = "../tests/test_vectors/test_blocks/test_linear_chain_multiple_blocks.json"; TestRunner::run_block_processing_test(test_path) @@ -22,18 +25,21 @@ fn test_linear_chain_multiple_blocks() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_extends_deep_chain() { let test_path = "../tests/test_vectors/test_blocks/test_block_extends_deep_chain.json"; TestRunner::run_block_processing_test(test_path).expect("test_block_extends_deep_chain failed"); } #[test] +#[cfg(feature = "devnet1")] fn test_empty_blocks() { let test_path = "../tests/test_vectors/test_blocks/test_empty_blocks.json"; TestRunner::run_block_processing_test(test_path).expect("test_empty_blocks failed"); } #[test] +#[cfg(feature = "devnet1")] fn test_empty_blocks_with_missed_slots() { let test_path = "../tests/test_vectors/test_blocks/test_empty_blocks_with_missed_slots.json"; TestRunner::run_block_processing_test(test_path) @@ -41,6 +47,7 @@ fn test_empty_blocks_with_missed_slots() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_at_large_slot_number() { let test_path = "../tests/test_vectors/test_blocks/test_block_at_large_slot_number.json"; TestRunner::run_block_processing_test(test_path) @@ -50,6 +57,7 @@ fn test_block_at_large_slot_number() { // Invalid block tests (expecting failures) #[test] +#[cfg(feature = "devnet1")] fn test_block_with_invalid_parent_root() { let test_path = "../tests/test_vectors/test_blocks/test_block_with_invalid_parent_root.json"; TestRunner::run_block_processing_test(test_path) @@ -57,6 +65,7 @@ fn test_block_with_invalid_parent_root() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_with_invalid_proposer() { let test_path = "../tests/test_vectors/test_blocks/test_block_with_invalid_proposer.json"; TestRunner::run_block_processing_test(test_path) @@ -64,6 +73,7 @@ fn test_block_with_invalid_proposer() { } #[test] +#[cfg(feature = "devnet1")] fn test_block_with_invalid_state_root() { let test_path = "../tests/test_vectors/test_blocks/test_block_with_invalid_state_root.json"; TestRunner::run_block_processing_test(test_path) diff --git a/lean_client/containers/tests/test_vectors/runner.rs b/lean_client/containers/tests/test_vectors/runner.rs index 3f1b7dd..0ed8ac5 100644 --- a/lean_client/containers/tests/test_vectors/runner.rs +++ b/lean_client/containers/tests/test_vectors/runner.rs @@ -89,18 +89,7 @@ impl TestRunner { // Only check validator count if specified in post-state if let Some(expected_count) = post.validator_count { - // Count validators - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match state.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = state.validators.len_u64(); if num_validators as usize != expected_count { return Err(format!( @@ -481,18 +470,7 @@ impl TestRunner { let state = &test_case.pre; - // Count validators - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match state.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = state.validators.len_u64(); println!( " Genesis time: {}, slot: {}, validators: {}", state.config.genesis_time, state.slot.0, num_validators @@ -617,17 +595,7 @@ impl TestRunner { // Verify validator count if specified if let Some(expected_count) = post.validator_count { - let mut num_validators: u64 = 0; - let mut i: u64 = 0; - loop { - match state.validators.get(i) { - Ok(_) => { - num_validators += 1; - i += 1; - } - Err(_) => break, - } - } + let num_validators = state.validators.len_u64(); if num_validators as usize != expected_count { return Err(format!( @@ -650,6 +618,7 @@ impl TestRunner { /// Test runner for verify_signatures test vectors /// Tests XMSS signature verification on SignedBlockWithAttestation + #[cfg(feature = "devnet1")] pub fn run_verify_signatures_test>( path: P, ) -> Result<(), Box> { @@ -677,34 +646,14 @@ impl TestRunner { signed_block.message.block.proposer_index.0 ); - // Count attestations - let mut attestation_count = 0u64; - loop { - match signed_block - .message - .block - .body - .attestations - .get(attestation_count) - { - Ok(_) => attestation_count += 1, - Err(_) => break, - } - } + let attestation_count = signed_block.message.block.body.attestations.len_u64(); println!(" Attestations in block: {}", attestation_count); println!( " Proposer attestation validator: {}", signed_block.message.proposer_attestation.validator_id.0 ); - // Count signatures - let mut signature_count = 0u64; - loop { - match signed_block.signature.get(signature_count) { - Ok(_) => signature_count += 1, - Err(_) => break, - } - } + let signature_count = signed_block.signature.len_u64(); println!(" Signatures: {}", signature_count); // Check if we expect this test to fail diff --git a/lean_client/containers/tests/test_vectors/verify_signatures.rs b/lean_client/containers/tests/test_vectors/verify_signatures.rs index cd813a9..b9f426b 100644 --- a/lean_client/containers/tests/test_vectors/verify_signatures.rs +++ b/lean_client/containers/tests/test_vectors/verify_signatures.rs @@ -15,6 +15,7 @@ use super::runner::TestRunner; // Without xmss-verify feature, they pass because structural validation succeeds. #[test] +#[cfg(feature = "devnet1")] #[ignore = "TODO"] fn test_proposer_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_valid_signatures/test_proposer_signature.json"; @@ -22,6 +23,7 @@ fn test_proposer_signature() { } #[test] +#[cfg(feature = "devnet1")] #[ignore = "TODO"] fn test_proposer_and_attester_signatures() { let test_path = "../tests/test_vectors/test_verify_signatures/test_valid_signatures/test_proposer_and_attester_signatures.json"; @@ -35,6 +37,7 @@ fn test_proposer_and_attester_signatures() { // Run with `cargo test --features xmss-verify` to enable full signature verification. #[test] +#[cfg(feature = "devnet1")] #[ignore = "Requires xmss-verify feature for actual signature validation. Run with: cargo test --features xmss-verify"] fn test_invalid_signature() { let test_path = "../tests/test_vectors/test_verify_signatures/test_invalid_signatures/test_invalid_signature.json"; @@ -42,6 +45,7 @@ fn test_invalid_signature() { } #[test] +#[cfg(feature = "devnet1")] #[ignore = "Requires xmss-verify feature for actual signature validation. Run with: cargo test --features xmss-verify"] fn test_mixed_valid_invalid_signatures() { let test_path = "../tests/test_vectors/test_verify_signatures/test_invalid_signatures/test_mixed_valid_invalid_signatures.json"; diff --git a/lean_client/containers/tests/unit_tests/attestation_aggregation.rs b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs new file mode 100644 index 0000000..72d48b4 --- /dev/null +++ b/lean_client/containers/tests/unit_tests/attestation_aggregation.rs @@ -0,0 +1,144 @@ +#[cfg(feature = "devnet2")] +#[cfg(test)] +mod tests { + use containers::attestation::{ + AggregatedAttestation, AggregationBits, Attestation, AttestationData, + }; + use containers::checkpoint::Checkpoint; + use containers::slot::Slot; + use containers::{Bytes32, Uint64}; + + #[test] + fn test_aggregated_attestation_structure() { + let att_data = AttestationData { + slot: Slot(5), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(2), + }, + }; + + let bits = AggregationBits::from_validator_indices(&vec![2, 7]); + let agg = AggregatedAttestation { + aggregation_bits: bits.clone(), + data: att_data.clone(), + }; + + let indices = agg.aggregation_bits.to_validator_indices(); + assert_eq!( + indices + .into_iter() + .collect::>(), + vec![2, 7].into_iter().collect() + ); + assert_eq!(agg.data, att_data); + } + + #[test] + fn test_aggregate_attestations_by_common_data() { + let att_data1 = AttestationData { + slot: Slot(5), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(2), + }, + }; + let att_data2 = AttestationData { + slot: Slot(6), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(5), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + }; + + let attestations = vec![ + Attestation { + validator_id: Uint64(1), + data: att_data1.clone(), + }, + Attestation { + validator_id: Uint64(3), + data: att_data1.clone(), + }, + Attestation { + validator_id: Uint64(5), + data: att_data2.clone(), + }, + ]; + + let aggregated = AggregatedAttestation::aggregate_by_data(&attestations); + assert_eq!(aggregated.len(), 2); + + let agg1 = aggregated.iter().find(|agg| agg.data == att_data1).unwrap(); + let validator_ids1 = agg1.aggregation_bits.to_validator_indices(); + assert_eq!( + validator_ids1 + .into_iter() + .collect::>(), + vec![1, 3].into_iter().collect() + ); + + let agg2 = aggregated.iter().find(|agg| agg.data == att_data2).unwrap(); + let validator_ids2 = agg2.aggregation_bits.to_validator_indices(); + assert_eq!(validator_ids2, vec![5]); + } + + #[test] + fn test_aggregate_empty_attestations() { + let aggregated = AggregatedAttestation::aggregate_by_data(&[]); + assert!(aggregated.is_empty()); + } + + #[test] + fn test_aggregate_single_attestation() { + let att_data = AttestationData { + slot: Slot(5), + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(4), + }, + target: Checkpoint { + root: Bytes32::default(), + slot: Slot(3), + }, + source: Checkpoint { + root: Bytes32::default(), + slot: Slot(2), + }, + }; + + let attestations = vec![Attestation { + validator_id: Uint64(5), + data: att_data.clone(), + }]; + let aggregated = AggregatedAttestation::aggregate_by_data(&attestations); + + assert_eq!(aggregated.len(), 1); + let validator_ids = aggregated[0].aggregation_bits.to_validator_indices(); + assert_eq!(validator_ids, vec![5]); + } +} diff --git a/lean_client/containers/tests/unit_tests/common.rs b/lean_client/containers/tests/unit_tests/common.rs index 1535732..1a648b8 100644 --- a/lean_client/containers/tests/unit_tests/common.rs +++ b/lean_client/containers/tests/unit_tests/common.rs @@ -1,13 +1,15 @@ +use containers::block::BlockSignatures; use containers::{ block::{hash_tree_root, Block, BlockBody, BlockHeader}, checkpoint::Checkpoint, slot::Slot, state::State, types::{Bytes32, ValidatorIndex}, - Attestation, Attestations, BlockSignatures, BlockWithAttestation, Config, + AggregatedAttestation, Attestation, Attestations, BlockWithAttestation, Config, Signature, SignedBlockWithAttestation, Validators, }; -use ssz::PersistentList as List; +use ssz::PersistentList; +use typenum::U4096; pub const DEVNET_CONFIG_VALIDATOR_REGISTRY_LIMIT: usize = 1 << 12; // 4096 pub const TEST_VALIDATOR_COUNT: usize = 4; // Actual validator count used in tests @@ -21,8 +23,39 @@ pub fn create_block( parent_header: &mut BlockHeader, attestations: Option, ) -> SignedBlockWithAttestation { + #[cfg(feature = "devnet1")] let body = BlockBody { - attestations: attestations.unwrap_or_else(List::default), + attestations: attestations.unwrap_or_else(PersistentList::default), + }; + #[cfg(feature = "devnet2")] + let body = BlockBody { + attestations: { + let attestations_vec = attestations.unwrap_or_default(); + + // Convert PersistentList into a Vec + let attestations_vec: Vec = + attestations_vec.into_iter().cloned().collect(); + + let aggregated: Vec = + AggregatedAttestation::aggregate_by_data(&attestations_vec); + + let aggregated: Vec = + AggregatedAttestation::aggregate_by_data(&attestations_vec); + + // Create a new empty PersistentList + let mut persistent_list: PersistentList = + PersistentList::default(); + + // Push each aggregated attestation + for agg in aggregated { + persistent_list + .push(agg) + .expect("PersistentList capacity exceeded"); + } + + persistent_list + }, + // other BlockBody fields... }; let block_message = Block { @@ -33,13 +66,28 @@ pub fn create_block( body: body, }; - SignedBlockWithAttestation { + #[cfg(feature = "devnet1")] + let return_value = SignedBlockWithAttestation { message: BlockWithAttestation { block: block_message, proposer_attestation: Attestation::default(), }, - signature: BlockSignatures::default(), - } + signature: PersistentList::default(), + }; + + #[cfg(feature = "devnet2")] + let return_value = SignedBlockWithAttestation { + message: BlockWithAttestation { + block: block_message, + proposer_attestation: Attestation::default(), + }, + signature: BlockSignatures { + attestation_signatures: PersistentList::default(), + proposer_signature: Signature::default(), + }, + }; + + return_value } pub fn create_attestations(indices: &[usize]) -> Vec { diff --git a/lean_client/containers/tests/unit_tests/mod.rs b/lean_client/containers/tests/unit_tests/mod.rs index 16a5646..1bef390 100644 --- a/lean_client/containers/tests/unit_tests/mod.rs +++ b/lean_client/containers/tests/unit_tests/mod.rs @@ -1,4 +1,5 @@ // tests/unit_tests/mod.rs +mod attestation_aggregation; mod common; mod state_basic; mod state_justifications; diff --git a/lean_client/containers/tests/unit_tests/state_process.rs b/lean_client/containers/tests/unit_tests/state_process.rs index f423818..5df98cf 100644 --- a/lean_client/containers/tests/unit_tests/state_process.rs +++ b/lean_client/containers/tests/unit_tests/state_process.rs @@ -130,6 +130,7 @@ fn test_process_block_header_invalid( } // This test verifies that attestations correctly justify and finalize slots +#[cfg(feature = "devnet1")] #[test] fn test_process_attestations_justification_and_finalization() { let mut state = genesis_state(); diff --git a/lean_client/containers/tests/unit_tests/state_transition.rs b/lean_client/containers/tests/unit_tests/state_transition.rs index e530dde..7725210 100644 --- a/lean_client/containers/tests/unit_tests/state_transition.rs +++ b/lean_client/containers/tests/unit_tests/state_transition.rs @@ -3,10 +3,11 @@ use containers::{ block::{hash_tree_root, Block, BlockWithAttestation, SignedBlockWithAttestation}, state::State, types::{Bytes32, Uint64}, - Attestation, BlockSignatures, Slot, + Attestation, Attestations, Slot, }; use pretty_assertions::assert_eq; use rstest::fixture; +use ssz::PersistentList; #[path = "common.rs"] mod common; @@ -29,8 +30,23 @@ fn test_state_transition_full() { // Use process_block_header + process_operations to avoid state root validation during setup let state_after_header = state_at_slot_1.process_block_header(&block).unwrap(); + + #[cfg(feature = "devnet1")] let expected_state = state_after_header.process_attestations(&block.body.attestations); + #[cfg(feature = "devnet2")] + let expected_state = { + let mut unaggregated_attestations = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + unaggregated_attestations.push(attestation); + } + } + state_after_header.process_attestations(&unaggregated_attestations) + }; + let block_with_correct_root = Block { state_root: hash_tree_root(&expected_state), ..block @@ -62,8 +78,23 @@ fn test_state_transition_invalid_signatures() { // Use process_block_header + process_operations to avoid state root validation during setup let state_after_header = state_at_slot_1.process_block_header(&block).unwrap(); + + #[cfg(feature = "devnet1")] let expected_state = state_after_header.process_attestations(&block.body.attestations); + #[cfg(feature = "devnet2")] + let expected_state = { + let mut list = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + list.push(attestation); + } + } + list + }; + let block_with_correct_root = Block { state_root: hash_tree_root(&expected_state), ..block @@ -82,6 +113,7 @@ fn test_state_transition_invalid_signatures() { assert_eq!(result.unwrap_err(), "Block signatures must be valid"); } +#[cfg(feature = "devnet1")] #[test] fn test_state_transition_bad_state_root() { let state = genesis_state(); @@ -98,10 +130,62 @@ fn test_state_transition_bad_state_root() { block, proposer_attestation: Attestation::default(), }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), }; let result = state.state_transition(final_signed_block_with_attestation, true); assert!(result.is_err()); assert_eq!(result.unwrap_err(), "Invalid block state root"); } + +#[cfg(feature = "devnet2")] +#[test] +fn test_state_transition_devnet2() { + let state = genesis_state(); + let mut state_at_slot_1 = state.process_slots(Slot(1)).unwrap(); + + // Create a block with attestations for devnet2 + let signed_block_with_attestation = + create_block(1, &mut state_at_slot_1.latest_block_header, None); + let block = signed_block_with_attestation.message.block.clone(); + + // Process the block header and attestations + let state_after_header = state_at_slot_1.process_block_header(&block).unwrap(); + + #[cfg(feature = "devnet1")] + let expected_state = state_after_header.process_attestations(&block.body.attestations); + + #[cfg(feature = "devnet2")] + let expected_state = { + let mut unaggregated_attestations = Attestations::default(); + for aggregated_attestation in &block.body.attestations { + let plain_attestations = aggregated_attestation.to_plain(); + // For each attestatio in the vector, push to the list + for attestation in plain_attestations { + unaggregated_attestations.push(attestation); + } + } + state_after_header.process_attestations(&unaggregated_attestations) + }; + + // Ensure the state root matches the expected state + let block_with_correct_root = Block { + state_root: hash_tree_root(&expected_state), + ..block + }; + + let final_signed_block_with_attestation = SignedBlockWithAttestation { + message: BlockWithAttestation { + block: block_with_correct_root, + proposer_attestation: signed_block_with_attestation.message.proposer_attestation, + }, + signature: signed_block_with_attestation.signature, + }; + + // Perform the state transition and validate the result + let final_state = state + .state_transition(final_signed_block_with_attestation, true) + .unwrap(); + + assert_eq!(final_state, expected_state); +} diff --git a/lean_client/env-config/Cargo.toml b/lean_client/env-config/Cargo.toml new file mode 100644 index 0000000..4b761e5 --- /dev/null +++ b/lean_client/env-config/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "env-config" +version.workspace = true +edition.workspace = true +authors.workspace = true +license.workspace = true + +[features] +devnet1 = [] +devnet2 = [] + +[dependencies] diff --git a/lean_client/env-config/src/lib.rs b/lean_client/env-config/src/lib.rs new file mode 100644 index 0000000..109ac2d --- /dev/null +++ b/lean_client/env-config/src/lib.rs @@ -0,0 +1 @@ +// Empty on purpose diff --git a/lean_client/fork_choice/Cargo.toml b/lean_client/fork_choice/Cargo.toml index f906f59..b16f561 100644 --- a/lean_client/fork_choice/Cargo.toml +++ b/lean_client/fork_choice/Cargo.toml @@ -3,8 +3,14 @@ name = "fork-choice" version = "0.1.0" edition = "2021" +[features] +default = [] +devnet1 = ["containers/devnet1", "env-config/devnet1"] +devnet2 = ["containers/devnet2", "env-config/devnet1"] + [dependencies] -containers = { path = "../containers" } +env-config = { path = "../env-config", default-features = false } +containers = { path = "../containers", default-features = false } ssz = { git = "https://github.com/grandinetech/grandine", package = "ssz", branch = "develop"} ssz_derive = { git = "https://github.com/grandinetech/grandine", package = "ssz_derive", branch = "develop" } typenum = "1.17.0" diff --git a/lean_client/fork_choice/src/handlers.rs b/lean_client/fork_choice/src/handlers.rs index 3b1d1a6..9f3837d 100644 --- a/lean_client/fork_choice/src/handlers.rs +++ b/lean_client/fork_choice/src/handlers.rs @@ -25,11 +25,24 @@ pub fn on_attestation( signed_attestation: SignedAttestation, is_from_block: bool, ) -> Result<(), String> { + #[cfg(feature = "devnet1")] let validator_id = ValidatorIndex(signed_attestation.message.validator_id.0); + #[cfg(feature = "devnet1")] let attestation_slot = signed_attestation.message.data.slot; + #[cfg(feature = "devnet1")] let source_slot = signed_attestation.message.data.source.slot; + #[cfg(feature = "devnet1")] let target_slot = signed_attestation.message.data.target.slot; + #[cfg(feature = "devnet2")] + let validator_id = ValidatorIndex(signed_attestation.validator_id); + #[cfg(feature = "devnet2")] + let attestation_slot = signed_attestation.message.slot; + #[cfg(feature = "devnet2")] + let source_slot = signed_attestation.message.source.slot; + #[cfg(feature = "devnet2")] + let target_slot = signed_attestation.message.target.slot; + // Validate attestation is not from future let curr_slot = store.time / INTERVALS_PER_SLOT; if attestation_slot.0 > curr_slot { @@ -49,6 +62,7 @@ pub fn on_attestation( if is_from_block { // On-chain attestation processing - immediately becomes "known" + #[cfg(feature = "devnet1")] if store .latest_known_attestations .get(&validator_id) @@ -61,14 +75,31 @@ pub fn on_attestation( .insert(validator_id, signed_attestation.clone()); } + #[cfg(feature = "devnet2")] + if store + .latest_known_attestations + .get(&validator_id) + .map_or(true, |existing| existing.message.slot < attestation_slot) + { + store + .latest_known_attestations + .insert(validator_id, signed_attestation.clone()); + } + // Remove from new attestations if superseded if let Some(existing_new) = store.latest_new_attestations.get(&validator_id) { + #[cfg(feature = "devnet1")] if existing_new.message.data.slot <= attestation_slot { store.latest_new_attestations.remove(&validator_id); } + #[cfg(feature = "devnet2")] + if existing_new.message.slot <= attestation_slot { + store.latest_new_attestations.remove(&validator_id); + } } } else { // Network gossip attestation processing - goes to "new" stage + #[cfg(feature = "devnet1")] if store .latest_new_attestations .get(&validator_id) @@ -80,6 +111,17 @@ pub fn on_attestation( .latest_new_attestations .insert(validator_id, signed_attestation); } + + #[cfg(feature = "devnet2")] + if store + .latest_new_attestations + .get(&validator_id) + .map_or(true, |existing| existing.message.slot < attestation_slot) + { + store + .latest_new_attestations + .insert(validator_id, signed_attestation); + } } Ok(()) } @@ -147,49 +189,94 @@ fn process_block_internal( let attestations = &signed_block.message.block.body.attestations; let signatures = &signed_block.signature; - for i in 0.. { - match (attestations.get(i), signatures.get(i)) { - (Ok(attestation), Ok(signature)) => { - let signed_attestation = SignedAttestation { - message: attestation.clone(), - signature: signature.clone(), - }; - on_attestation(store, signed_attestation, true)?; + #[cfg(feature = "devnet1")] + { + for i in 0.. { + match (attestations.get(i), signatures.get(i)) { + (Ok(attestation), Ok(signature)) => { + let signed_attestation = SignedAttestation { + message: attestation.clone(), + signature: signature.clone(), + }; + on_attestation(store, signed_attestation, true)?; + } + _ => break, } - _ => break, } + + // Update head BEFORE processing proposer attestation + update_head(store); + + // Process proposer attestation as gossip (is_from_block=false) + // This ensures it goes to "new" attestations and doesn't immediately affect fork choice + let num_body_attestations = attestations.len_u64(); + + // Get proposer signature or use default if not present (for tests) + use containers::attestation::Signature; + let proposer_signature = signatures + .get(num_body_attestations) + .map(|sig| sig.clone()) + .unwrap_or_else(|_| Signature::default()); + + let proposer_signed_attestation = SignedAttestation { + message: signed_block.message.proposer_attestation.clone(), + signature: proposer_signature, + }; + + // Process proposer attestation as if received via gossip (is_from_block=false) + // This ensures it goes to "new" attestations and doesn't immediately affect fork choice + on_attestation(store, proposer_signed_attestation, false)?; + + Ok(()) } - // Update head BEFORE processing proposer attestation - update_head(store); + #[cfg(feature = "devnet2")] + { + let aggregated_attestations = &signed_block.message.block.body.attestations; + let attestation_signatures = &signed_block.signature.attestation_signatures; + let proposer_attestation = &signed_block.message.proposer_attestation; + + for (aggregated_attestation, aggregated_signature) in aggregated_attestations + .into_iter() + .zip(attestation_signatures) + { + let validator_ids: Vec = aggregated_attestation + .aggregation_bits + .0 + .iter() + .enumerate() + .filter(|(_, bit)| **bit) + .map(|(index, _)| index as u64) + .collect(); - // Process proposer attestation as gossip (is_from_block=false) - // This ensures it goes to "new" attestations and doesn't immediately affect fork choice - let num_body_attestations = { - let mut count = 0; - while attestations.get(count).is_ok() { - count += 1; + for (validator_id, signature) in validator_ids.into_iter().zip(aggregated_signature) { + on_attestation( + store, + SignedAttestation { + validator_id, + message: aggregated_attestation.data.clone(), + signature: *signature, + }, + true, + )?; + } } - count - }; - // Get proposer signature or use default if not present (for tests) - use containers::attestation::Signature; - let proposer_signature = signatures - .get(num_body_attestations) - .map(|sig| sig.clone()) - .unwrap_or_else(|_| Signature::default()); + // Update head BEFORE processing proposer attestation + update_head(store); - let proposer_signed_attestation = SignedAttestation { - message: signed_block.message.proposer_attestation.clone(), - signature: proposer_signature, - }; + let proposer_signed_attestation = SignedAttestation { + validator_id: proposer_attestation.validator_id.0, + message: proposer_attestation.data.clone(), + signature: signed_block.signature.proposer_signature, + }; - // Process proposer attestation as if received via gossip (is_from_block=false) - // This ensures it goes to "new" attestations and doesn't immediately affect fork choice - on_attestation(store, proposer_signed_attestation, false)?; + // Process proposer attestation as if received via gossip (is_from_block=false) + // This ensures it goes to "new" attestations and doesn't immediately affect fork choice + on_attestation(store, proposer_signed_attestation, false)?; - Ok(()) + Ok(()) + } } fn process_pending_blocks(store: &mut Store, mut roots: Vec) { diff --git a/lean_client/fork_choice/src/store.rs b/lean_client/fork_choice/src/store.rs index 715c90d..8165443 100644 --- a/lean_client/fork_choice/src/store.rs +++ b/lean_client/fork_choice/src/store.rs @@ -84,7 +84,10 @@ pub fn get_fork_choice_head( // stage 1: accumulate weights by walking up from each attestation's head for attestation in latest_attestations.values() { + #[cfg(feature = "devnet1")] let mut curr = attestation.message.data.head.root; + #[cfg(feature = "devnet2")] + let mut curr = attestation.message.head.root; if let Some(block) = store.blocks.get(&curr) { let mut curr_slot = block.message.block.slot; diff --git a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs index d35c0bf..bb32273 100644 --- a/lean_client/fork_choice/tests/fork_choice_test_vectors.rs +++ b/lean_client/fork_choice/tests/fork_choice_test_vectors.rs @@ -4,7 +4,7 @@ use fork_choice::{ }; use containers::{ - attestation::{Attestation, AttestationData, BlockSignatures, Signature, SignedAttestation}, + attestation::{Attestation, AttestationData, Signature, SignedAttestation}, block::{ hash_tree_root, Block, BlockBody, BlockHeader, BlockWithAttestation, SignedBlockWithAttestation, @@ -16,7 +16,7 @@ use containers::{ }; use serde::Deserialize; -use ssz::SszHash; +use ssz::{PersistentList, SszHash}; use std::collections::HashMap; use std::panic::AssertUnwindSafe; @@ -259,6 +259,7 @@ fn convert_test_attestation(test_att: &TestAttestation) -> Attestation { } } +#[cfg(feature = "devnet1")] fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAttestation { let mut attestations = ssz::PersistentList::default(); @@ -302,10 +303,11 @@ fn convert_test_anchor_block(test_block: &TestAnchorBlock) -> SignedBlockWithAtt block, proposer_attestation, }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), } } +#[cfg(feature = "devnet1")] fn convert_test_block( test_block_with_att: &TestBlockWithAttestation, ) -> SignedBlockWithAttestation { @@ -334,7 +336,7 @@ fn convert_test_block( block, proposer_attestation, }, - signature: BlockSignatures::default(), + signature: PersistentList::default(), } } @@ -387,7 +389,7 @@ fn initialize_state_from_test(test_state: &TestAnchorState) -> State { let mut validators = List::default(); for test_validator in &test_state.validators.data { - let pubkey = containers::validator::BlsPublicKey::from_hex(&test_validator.pubkey) + let pubkey = containers::validator::PublicKey::from_hex(&test_validator.pubkey) .expect("Failed to parse validator pubkey"); let validator = containers::validator::Validator { pubkey, @@ -410,6 +412,7 @@ fn initialize_state_from_test(test_state: &TestAnchorState) -> State { } } +#[cfg(feature = "devnet1")] fn verify_checks( store: &Store, checks: &Option, @@ -498,6 +501,7 @@ fn verify_checks( Ok(()) } +#[cfg(feature = "devnet1")] fn run_single_test(_test_name: &str, test: TestVector) -> Result<(), String> { println!(" Running: {}", test.info.test_id); @@ -630,6 +634,7 @@ fn run_single_test(_test_name: &str, test: TestVector) -> Result<(), String> { Ok(()) } +#[cfg(feature = "devnet1")] fn run_test_vector_file(test_path: &str) -> Result<(), String> { let json_str = std::fs::read_to_string(test_path) .map_err(|e| format!("Failed to read file {}: {}", test_path, e))?; @@ -645,6 +650,7 @@ fn run_test_vector_file(test_path: &str) -> Result<(), String> { } #[test] +#[cfg(feature = "devnet1")] fn test_fork_choice_head_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_fork_choice_head"; @@ -688,6 +694,7 @@ fn test_fork_choice_head_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_attestation_processing_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_attestation_processing"; @@ -731,6 +738,7 @@ fn test_attestation_processing_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_fork_choice_reorgs_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_fork_choice_reorgs"; @@ -774,6 +782,7 @@ fn test_fork_choice_reorgs_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_attestation_target_selection_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_attestation_target_selection"; @@ -817,6 +826,7 @@ fn test_attestation_target_selection_vectors() { } #[test] +#[cfg(feature = "devnet1")] fn test_lexicographic_tiebreaker_vectors() { let test_dir = "../tests/test_vectors/test_fork_choice/test_lexicographic_tiebreaker"; diff --git a/lean_client/fork_choice/tests/unit_tests/votes.rs b/lean_client/fork_choice/tests/unit_tests/votes.rs index 3cdaabb..d6c2ad4 100644 --- a/lean_client/fork_choice/tests/unit_tests/votes.rs +++ b/lean_client/fork_choice/tests/unit_tests/votes.rs @@ -7,6 +7,7 @@ use containers::{ use fork_choice::handlers::on_attestation; use fork_choice::store::{accept_new_attestations, INTERVALS_PER_SLOT}; +#[cfg(feature = "devnet1")] fn create_signed_attestation( validator_id: u64, slot: Slot, @@ -36,6 +37,7 @@ fn create_signed_attestation( } #[test] +#[cfg(feature = "devnet1")] fn test_accept_new_attestations() { let mut store = create_test_store(); @@ -81,6 +83,7 @@ fn test_accept_new_attestations() { } #[test] +#[cfg(feature = "devnet1")] fn test_accept_new_attestations_multiple() { let mut store = create_test_store(); @@ -112,6 +115,7 @@ fn test_accept_new_attestations_empty() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_lifecycle() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -173,6 +177,7 @@ fn test_on_attestation_lifecycle() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_future_slot() { let mut store = create_test_store(); let future_slot = Slot(100); // Far in the future @@ -184,6 +189,7 @@ fn test_on_attestation_future_slot() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_update_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -217,6 +223,7 @@ fn test_on_attestation_update_vote() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_ignore_old_vote() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -252,6 +259,7 @@ fn test_on_attestation_ignore_old_vote() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_from_block_supersedes_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); @@ -273,6 +281,7 @@ fn test_on_attestation_from_block_supersedes_new() { } #[test] +#[cfg(feature = "devnet1")] fn test_on_attestation_newer_from_block_removes_older_new() { let mut store = create_test_store(); let validator_idx = ValidatorIndex(1); diff --git a/lean_client/networking/Cargo.toml b/lean_client/networking/Cargo.toml index f107994..6b116c9 100644 --- a/lean_client/networking/Cargo.toml +++ b/lean_client/networking/Cargo.toml @@ -3,7 +3,13 @@ name = "networking" version = "0.1.0" edition = "2024" +[features] +default = [] +devnet1 = ["containers/devnet1", "env-config/devnet1"] +devnet2 = ["containers/devnet2", "env-config/devnet1"] + [dependencies] +env-config = { path = "../env-config", default-features = false } containers = {workspace = true} alloy-primitives = { workspace = true} libp2p = {workspace = true} @@ -11,7 +17,9 @@ snap = {workspace = true} sha2 = { workspace = true } anyhow = { workspace = true } async-trait = "0.1" -enr = "0.7" +discv5 = "0.10" +enr = { version = "0.13", features = ["k256"] } +k256 = "0.13" futures = "0.3" libp2p-identity = { version = "0.2", features = ["secp256k1"] } libp2p-mplex = "0.39" @@ -22,3 +30,8 @@ tracing = "0.1" yamux = "0.12" ssz = { workspace = true } serde = { workspace = true } + +[dev-dependencies] +hex = "0.4" +num-bigint = "0.4" +num-traits = "0.2" diff --git a/lean_client/networking/src/bootnodes.rs b/lean_client/networking/src/bootnodes.rs index 264ec02..427f4ae 100644 --- a/lean_client/networking/src/bootnodes.rs +++ b/lean_client/networking/src/bootnodes.rs @@ -1,6 +1,11 @@ use std::sync::Arc; +use discv5::enr::CombinedKey; +use enr::Enr; use libp2p::Multiaddr; +use tracing::warn; + +use crate::discovery::{DiscoveryService, parse_enr}; pub trait BootnodeSource: Send + Sync { fn to_multiaddrs(&self) -> Vec; @@ -24,17 +29,90 @@ impl BootnodeSource for Arc<[Multiaddr]> { } } +#[derive(Debug, Clone)] +pub enum Bootnode { + Multiaddr(Multiaddr), + Enr(Enr), +} + +impl Bootnode { + pub fn parse(s: &str) -> Option { + if s.starts_with("enr:") { + match parse_enr(s) { + Ok(enr) => Some(Bootnode::Enr(enr)), + Err(e) => { + warn!(bootnode = s, error = ?e, "Failed to parse ENR bootnode"); + None + } + } + } else { + match s.parse::() { + Ok(addr) => Some(Bootnode::Multiaddr(addr)), + Err(e) => { + warn!(bootnode = s, error = ?e, "Failed to parse Multiaddr bootnode"); + None + } + } + } + } + + pub fn to_multiaddr(&self) -> Option { + match self { + Bootnode::Multiaddr(addr) => Some(addr.clone()), + Bootnode::Enr(enr) => DiscoveryService::enr_to_multiaddr(enr), + } + } + + pub fn as_enr(&self) -> Option<&Enr> { + match self { + Bootnode::Enr(enr) => Some(enr), + Bootnode::Multiaddr(_) => None, + } + } +} + #[derive(Debug, Clone, Default)] -pub struct StaticBootnodes(Vec); +pub struct StaticBootnodes { + multiaddrs: Vec, + enrs: Vec>, +} impl StaticBootnodes { - pub fn new>>(addrs: T) -> Self { - StaticBootnodes(addrs.into()) + pub fn new(bootnodes: Vec) -> Self { + let mut multiaddrs = Vec::new(); + let mut enrs = Vec::new(); + + for bootnode in bootnodes { + match bootnode { + Bootnode::Multiaddr(addr) => multiaddrs.push(addr), + Bootnode::Enr(enr) => { + // Convert ENR to multiaddr for libp2p connection + if let Some(addr) = DiscoveryService::enr_to_multiaddr(&enr) { + multiaddrs.push(addr); + } + enrs.push(enr); + } + } + } + + StaticBootnodes { multiaddrs, enrs } + } + + pub fn parse(bootnode_strs: &[String]) -> Self { + let bootnodes: Vec = bootnode_strs + .iter() + .filter_map(|s| Bootnode::parse(s)) + .collect(); + Self::new(bootnodes) + } + + pub fn enrs(&self) -> &[Enr] { + &self.enrs } } impl BootnodeSource for StaticBootnodes { fn to_multiaddrs(&self) -> Vec { - self.0.clone() + self.multiaddrs.clone() } } diff --git a/lean_client/networking/src/discovery/config.rs b/lean_client/networking/src/discovery/config.rs new file mode 100644 index 0000000..b613cc7 --- /dev/null +++ b/lean_client/networking/src/discovery/config.rs @@ -0,0 +1,40 @@ +use std::net::IpAddr; + +use discv5::enr::CombinedKey; +use enr::Enr; + +#[derive(Debug, Clone)] +pub struct DiscoveryConfig { + pub enabled: bool, + pub udp_port: u16, + pub libp2p_port: u16, + pub listen_address: IpAddr, + pub bootnodes: Vec>, +} + +impl DiscoveryConfig { + pub fn new(listen_address: IpAddr, udp_port: u16, libp2p_port: u16) -> Self { + Self { + enabled: true, + udp_port, + libp2p_port, + listen_address, + bootnodes: Vec::new(), + } + } + + pub fn with_bootnodes(mut self, bootnodes: Vec>) -> Self { + self.bootnodes = bootnodes; + self + } + + pub fn disabled() -> Self { + Self { + enabled: false, + udp_port: 0, + libp2p_port: 0, + listen_address: IpAddr::V4(std::net::Ipv4Addr::UNSPECIFIED), + bootnodes: Vec::new(), + } + } +} diff --git a/lean_client/networking/src/discovery/mod.rs b/lean_client/networking/src/discovery/mod.rs new file mode 100644 index 0000000..7ee532b --- /dev/null +++ b/lean_client/networking/src/discovery/mod.rs @@ -0,0 +1,233 @@ +pub mod config; + +#[cfg(test)] +mod tests; + +use std::net::IpAddr; +use std::sync::Arc; + +use anyhow::{Result, anyhow}; +use discv5::enr::{CombinedKey, NodeId}; +use discv5::{ConfigBuilder, Discv5, Event as Discv5Event, ListenConfig}; +use enr::{Builder as EnrBuilder, Enr}; +use libp2p::Multiaddr; +use libp2p::multiaddr::Protocol; +use libp2p_identity::{Keypair, PeerId}; +use tokio::sync::mpsc; +use tracing::{debug, info, warn}; + +pub use config::DiscoveryConfig; + +/// Discovery service that wraps discv5 for peer discovery. +pub struct DiscoveryService { + discv5: Arc, + local_enr: Enr, + event_receiver: mpsc::Receiver, +} + +impl DiscoveryService { + pub async fn new(config: DiscoveryConfig, keypair: &Keypair) -> Result { + let enr_key = keypair_to_enr_key(keypair)?; + + let local_enr = build_enr( + &enr_key, + config.listen_address, + config.udp_port, + config.libp2p_port, + )?; + + info!( + enr = %local_enr, + node_id = %local_enr.node_id(), + "Built local ENR" + ); + + let listen_config = ListenConfig::from_ip(config.listen_address, config.udp_port); + + let discv5_config = ConfigBuilder::new(listen_config).build(); + + let mut discv5 = Discv5::new(local_enr.clone(), enr_key, discv5_config) + .map_err(|e| anyhow!("Failed to create discv5: {e}"))?; + + for bootnode in &config.bootnodes { + if let Err(e) = discv5.add_enr(bootnode.clone()) { + warn!(enr = %bootnode, error = ?e, "Failed to add bootnode ENR"); + } else { + info!(enr = %bootnode, "Added bootnode ENR"); + } + } + + discv5 + .start() + .await + .map_err(|e| anyhow!("Failed to start discv5: {e}"))?; + + let event_receiver = discv5 + .event_stream() + .await + .map_err(|e| anyhow!("Failed to get discv5 event stream: {e}"))?; + + info!("Discovery service started"); + + Ok(Self { + discv5: Arc::new(discv5), + local_enr, + event_receiver, + }) + } + + pub fn local_enr(&self) -> &Enr { + &self.local_enr + } + + pub async fn recv(&mut self) -> Option> { + loop { + match self.event_receiver.recv().await { + Some(event) => { + match event { + Discv5Event::Discovered(enr) => { + info!( + node_id = %enr.node_id(), + "Discovered peer via discv5" + ); + return Some(enr); + } + Discv5Event::SocketUpdated(addr) => { + info!(?addr, "discv5 socket updated"); + } + Discv5Event::SessionEstablished(enr, addr) => { + debug!( + node_id = %enr.node_id(), + ?addr, + "discv5 session established" + ); + } + Discv5Event::TalkRequest(_) => { + // We don't handle TALKREQ for now + } + Discv5Event::NodeInserted { node_id, replaced } => { + debug!( + %node_id, + ?replaced, + "Node inserted into routing table" + ); + } + _ => { + // Handle any new event types added in future versions + } + } + } + None => return None, + } + } + } + + pub fn enr_to_multiaddr(enr: &Enr) -> Option { + let ip = enr + .ip4() + .map(IpAddr::V4) + .or_else(|| enr.ip6().map(IpAddr::V6))?; + let libp2p_port = enr.tcp4().or_else(|| enr.tcp6())?; + + let peer_id = enr_to_peer_id(enr)?; + + let mut multiaddr: Multiaddr = ip.into(); + multiaddr.push(Protocol::Udp(libp2p_port)); + multiaddr.push(Protocol::QuicV1); + multiaddr.push(Protocol::P2p(peer_id)); + + Some(multiaddr) + } + + pub fn find_random_peers(&self) { + let random_node_id = generate_random_node_id(); + debug!(%random_node_id, "Starting random peer discovery lookup"); + + let discv5 = Arc::clone(&self.discv5); + tokio::spawn(async move { + match discv5.find_node(random_node_id).await { + Ok(nodes) => { + info!(count = nodes.len(), "Random lookup completed"); + } + Err(e) => { + warn!(error = ?e, "Random lookup failed"); + } + } + }); + } + + pub fn connected_peers(&self) -> usize { + self.discv5.connected_peers() + } +} + +fn keypair_to_enr_key(keypair: &Keypair) -> Result { + match keypair.key_type() { + libp2p_identity::KeyType::Secp256k1 => { + let secp_keypair = keypair + .clone() + .try_into_secp256k1() + .map_err(|_| anyhow!("Failed to convert to secp256k1"))?; + + let secret_bytes = secp_keypair.secret().to_bytes(); + let secret_key = k256::ecdsa::SigningKey::from_slice(&secret_bytes) + .map_err(|e| anyhow!("Failed to create signing key: {e}"))?; + + Ok(CombinedKey::Secp256k1(secret_key)) + } + other => Err(anyhow!("Unsupported key type for discv5: {:?}", other)), + } +} + +fn build_enr( + key: &CombinedKey, + ip: IpAddr, + udp_port: u16, + libp2p_port: u16, +) -> Result> { + let mut builder = EnrBuilder::default(); + + // libp2p port is stored in tcp field, since Enr doesn't have a field for a quic port + match ip { + IpAddr::V4(ipv4) => { + builder.ip4(ipv4); + builder.udp4(udp_port); + builder.tcp4(libp2p_port); + } + IpAddr::V6(ipv6) => { + builder.ip6(ipv6); + builder.udp6(udp_port); + builder.tcp6(libp2p_port); + } + } + + builder + .build(key) + .map_err(|e| anyhow!("Failed to build ENR: {e}")) +} + +fn enr_to_peer_id(enr: &Enr) -> Option { + let public_key = enr.public_key(); + + match public_key { + discv5::enr::CombinedPublicKey::Secp256k1(pk) => { + let compressed = pk.to_sec1_bytes(); + let libp2p_pk = + libp2p_identity::secp256k1::PublicKey::try_from_bytes(&compressed).ok()?; + let public = libp2p_identity::PublicKey::from(libp2p_pk); + Some(PeerId::from_public_key(&public)) + } + _ => None, + } +} + +pub fn parse_enr(enr_str: &str) -> Result> { + enr_str + .parse() + .map_err(|e| anyhow!("Failed to parse ENR: {e}")) +} + +fn generate_random_node_id() -> NodeId { + let random_bytes: [u8; 32] = rand::random(); + NodeId::new(&random_bytes) +} diff --git a/lean_client/networking/src/discovery/tests.rs b/lean_client/networking/src/discovery/tests.rs new file mode 100644 index 0000000..8bdbf82 --- /dev/null +++ b/lean_client/networking/src/discovery/tests.rs @@ -0,0 +1,1433 @@ +//! Tests for Discovery v5 Protocol Specification +//! +//! Based on the official Discovery v5 specification and test vectors from: +//! https://github.com/ethereum/devp2p/blob/master/discv5/discv5-wire-test-vectors.md + +use std::net::{Ipv4Addr, Ipv6Addr}; + +/// Protocol constants matching Discovery v5 specification +mod constants { + /// Protocol identifier + pub const PROTOCOL_ID: &[u8] = b"discv5"; + /// Protocol version (v5.1) + pub const PROTOCOL_VERSION: u16 = 0x0001; + /// Maximum request ID length in bytes + pub const MAX_REQUEST_ID_LENGTH: usize = 8; + /// K-bucket size per Kademlia standard + pub const K_BUCKET_SIZE: usize = 16; + /// Alpha (lookup concurrency) + pub const ALPHA: usize = 3; + /// Number of buckets for 256-bit node ID space + pub const BUCKET_COUNT: usize = 256; + /// Request timeout in seconds (spec: 500ms) + pub const REQUEST_TIMEOUT_SECS: f64 = 0.5; + /// Handshake timeout in seconds + pub const HANDSHAKE_TIMEOUT_SECS: f64 = 1.0; + /// Maximum ENRs per NODES response + pub const MAX_NODES_RESPONSE: usize = 16; + /// Bond expiry in seconds (24 hours) + pub const BOND_EXPIRY_SECS: u64 = 86400; + /// Maximum packet size + pub const MAX_PACKET_SIZE: usize = 1280; + /// Minimum packet size + pub const MIN_PACKET_SIZE: usize = 63; +} + +/// Packet type flags +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum PacketFlag { + Message = 0, + WhoAreYou = 1, + Handshake = 2, +} + +/// Message type codes matching wire protocol spec +#[repr(u8)] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum MessageType { + Ping = 0x01, + Pong = 0x02, + FindNode = 0x03, + Nodes = 0x04, + TalkReq = 0x05, + TalkResp = 0x06, + RegTopic = 0x07, + Ticket = 0x08, + RegConfirmation = 0x09, + TopicQuery = 0x0A, +} + +/// Request ID (variable length, max 8 bytes) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RequestId(pub Vec); + +impl RequestId { + pub fn new(data: Vec) -> Self { + assert!(data.len() <= constants::MAX_REQUEST_ID_LENGTH); + Self(data) + } + + pub fn len(&self) -> usize { + self.0.len() + } + + pub fn is_empty(&self) -> bool { + self.0.is_empty() + } +} + +/// IPv4 address (4 bytes) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IPv4(pub [u8; 4]); + +impl IPv4 { + pub fn new(bytes: [u8; 4]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 4 + } +} + +/// IPv6 address (16 bytes) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IPv6(pub [u8; 16]); + +impl IPv6 { + pub fn new(bytes: [u8; 16]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 16 + } +} + +/// ID Nonce (16 bytes / 128 bits) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IdNonce(pub [u8; 16]); + +impl IdNonce { + pub fn new(bytes: [u8; 16]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 16 + } +} + +/// Nonce (12 bytes / 96 bits) +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Nonce(pub [u8; 12]); + +impl Nonce { + pub fn new(bytes: [u8; 12]) -> Self { + Self(bytes) + } + + pub fn len(&self) -> usize { + 12 + } +} + +/// Distance type (u16) +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub struct Distance(pub u16); + +/// Port type (u16) +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub struct Port(pub u16); + +/// ENR sequence number (u64) +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub struct SeqNumber(pub u64); + +/// Node ID (32 bytes / 256 bits) +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub struct NodeId(pub [u8; 32]); + +impl NodeId { + pub fn new(bytes: [u8; 32]) -> Self { + Self(bytes) + } + + pub fn from_slice(slice: &[u8]) -> Self { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(slice); + Self(bytes) + } +} + +/// Discovery configuration +#[derive(Debug, Clone)] +pub struct DiscoveryConfig { + pub k_bucket_size: usize, + pub alpha: usize, + pub request_timeout_secs: f64, + pub handshake_timeout_secs: f64, + pub max_nodes_response: usize, + pub bond_expiry_secs: u64, +} + +impl Default for DiscoveryConfig { + fn default() -> Self { + Self { + k_bucket_size: constants::K_BUCKET_SIZE, + alpha: constants::ALPHA, + request_timeout_secs: constants::REQUEST_TIMEOUT_SECS, + handshake_timeout_secs: constants::HANDSHAKE_TIMEOUT_SECS, + max_nodes_response: constants::MAX_NODES_RESPONSE, + bond_expiry_secs: constants::BOND_EXPIRY_SECS, + } + } +} + +/// PING message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Ping { + pub request_id: RequestId, + pub enr_seq: SeqNumber, +} + +/// PONG message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Pong { + pub request_id: RequestId, + pub enr_seq: SeqNumber, + pub recipient_ip: Vec, + pub recipient_port: Port, +} + +/// FINDNODE message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct FindNode { + pub request_id: RequestId, + pub distances: Vec, +} + +/// NODES message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct Nodes { + pub request_id: RequestId, + pub total: u8, + pub enrs: Vec>, +} + +/// TALKREQ message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TalkReq { + pub request_id: RequestId, + pub protocol: Vec, + pub request: Vec, +} + +/// TALKRESP message +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct TalkResp { + pub request_id: RequestId, + pub response: Vec, +} + +/// Static header +#[derive(Debug, Clone)] +pub struct StaticHeader { + pub protocol_id: [u8; 6], + pub version: u16, + pub flag: u8, + pub nonce: Nonce, + pub authdata_size: u16, +} + +impl StaticHeader { + pub fn new(flag: u8, nonce: Nonce, authdata_size: u16) -> Self { + Self { + protocol_id: *b"discv5", + version: constants::PROTOCOL_VERSION, + flag, + nonce, + authdata_size, + } + } +} + +/// WHOAREYOU authdata +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct WhoAreYouAuthdata { + pub id_nonce: IdNonce, + pub enr_seq: SeqNumber, +} + +/// Node entry in routing table +#[derive(Debug, Clone)] +pub struct NodeEntry { + pub node_id: NodeId, + pub enr_seq: SeqNumber, + pub last_seen: f64, + pub endpoint: Option, + pub verified: bool, +} + +impl NodeEntry { + pub fn new(node_id: NodeId) -> Self { + Self { + node_id, + enr_seq: SeqNumber::default(), + last_seen: 0.0, + endpoint: None, + verified: false, + } + } + + pub fn with_enr_seq(mut self, enr_seq: SeqNumber) -> Self { + self.enr_seq = enr_seq; + self + } + + pub fn with_last_seen(mut self, last_seen: f64) -> Self { + self.last_seen = last_seen; + self + } + + pub fn with_endpoint(mut self, endpoint: String) -> Self { + self.endpoint = Some(endpoint); + self + } + + pub fn with_verified(mut self, verified: bool) -> Self { + self.verified = verified; + self + } +} + +/// K-bucket for storing nodes at a specific distance +#[derive(Debug, Clone, Default)] +pub struct KBucket { + nodes: Vec, +} + +impl KBucket { + pub fn new() -> Self { + Self { nodes: Vec::new() } + } + + pub fn is_empty(&self) -> bool { + self.nodes.is_empty() + } + + pub fn is_full(&self) -> bool { + self.nodes.len() >= constants::K_BUCKET_SIZE + } + + pub fn len(&self) -> usize { + self.nodes.len() + } + + pub fn add(&mut self, entry: NodeEntry) -> bool { + // Check if node already exists + if let Some(pos) = self.nodes.iter().position(|e| e.node_id == entry.node_id) { + // Move to tail (most recent) + self.nodes.remove(pos); + self.nodes.push(entry); + return true; + } + + // Reject if full + if self.is_full() { + return false; + } + + self.nodes.push(entry); + true + } + + pub fn remove(&mut self, node_id: &NodeId) -> bool { + if let Some(pos) = self.nodes.iter().position(|e| &e.node_id == node_id) { + self.nodes.remove(pos); + true + } else { + false + } + } + + pub fn contains(&self, node_id: &NodeId) -> bool { + self.nodes.iter().any(|e| &e.node_id == node_id) + } + + pub fn get(&self, node_id: &NodeId) -> Option<&NodeEntry> { + self.nodes.iter().find(|e| &e.node_id == node_id) + } + + pub fn head(&self) -> Option<&NodeEntry> { + self.nodes.first() + } + + pub fn tail(&self) -> Option<&NodeEntry> { + self.nodes.last() + } + + pub fn iter(&self) -> impl Iterator { + self.nodes.iter() + } +} + +/// Calculate XOR distance between two node IDs +pub fn xor_distance(a: &NodeId, b: &NodeId) -> num_bigint::BigUint { + use num_bigint::BigUint; + + let a_int = BigUint::from_bytes_be(&a.0); + let b_int = BigUint::from_bytes_be(&b.0); + a_int ^ b_int +} + +/// Calculate log2 distance between two node IDs +pub fn log2_distance(a: &NodeId, b: &NodeId) -> Distance { + let xor = xor_distance(a, b); + if xor.bits() == 0 { + Distance(0) + } else { + Distance(xor.bits() as u16) + } +} + +/// Kademlia routing table +pub struct RoutingTable { + local_id: NodeId, + pub buckets: Vec, +} + +impl RoutingTable { + pub fn new(local_id: NodeId) -> Self { + let buckets = (0..constants::BUCKET_COUNT) + .map(|_| KBucket::new()) + .collect(); + Self { local_id, buckets } + } + + pub fn node_count(&self) -> usize { + self.buckets.iter().map(|b| b.len()).sum() + } + + pub fn bucket_index(&self, node_id: &NodeId) -> usize { + let dist = log2_distance(&self.local_id, node_id); + if dist.0 == 0 { + 0 + } else { + (dist.0 - 1) as usize + } + } + + pub fn add(&mut self, entry: NodeEntry) -> bool { + // Cannot add self + if entry.node_id == self.local_id { + return false; + } + + let idx = self.bucket_index(&entry.node_id); + self.buckets[idx].add(entry) + } + + pub fn remove(&mut self, node_id: &NodeId) -> bool { + let idx = self.bucket_index(node_id); + self.buckets[idx].remove(node_id) + } + + pub fn contains(&self, node_id: &NodeId) -> bool { + let idx = self.bucket_index(node_id); + self.buckets[idx].contains(node_id) + } + + pub fn get(&self, node_id: &NodeId) -> Option<&NodeEntry> { + let idx = self.bucket_index(node_id); + self.buckets[idx].get(node_id) + } + + pub fn closest_nodes(&self, target: &NodeId, count: usize) -> Vec<&NodeEntry> { + let mut all_nodes: Vec<&NodeEntry> = self.buckets.iter().flat_map(|b| b.iter()).collect(); + + all_nodes.sort_by(|a, b| { + let dist_a = xor_distance(&a.node_id, target); + let dist_b = xor_distance(&b.node_id, target); + dist_a.cmp(&dist_b) + }); + + all_nodes.into_iter().take(count).collect() + } + + pub fn nodes_at_distance(&self, distance: Distance) -> Vec<&NodeEntry> { + if distance.0 == 0 || distance.0 > 256 { + return Vec::new(); + } + + let idx = (distance.0 - 1) as usize; + self.buckets[idx].iter().collect() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use num_bigint::BigUint; + use num_traits::One; + + // ============================================================ + // Protocol Constants Tests + // ============================================================ + + mod protocol_constants { + use super::*; + + #[test] + fn test_protocol_id() { + assert_eq!(constants::PROTOCOL_ID, b"discv5"); + assert_eq!(constants::PROTOCOL_ID.len(), 6); + } + + #[test] + fn test_protocol_version() { + assert_eq!(constants::PROTOCOL_VERSION, 0x0001); + } + + #[test] + fn test_max_request_id_length() { + assert_eq!(constants::MAX_REQUEST_ID_LENGTH, 8); + } + + #[test] + fn test_k_bucket_size() { + assert_eq!(constants::K_BUCKET_SIZE, 16); + } + + #[test] + fn test_alpha_concurrency() { + assert_eq!(constants::ALPHA, 3); + } + + #[test] + fn test_bucket_count() { + assert_eq!(constants::BUCKET_COUNT, 256); + } + + #[test] + fn test_request_timeout() { + assert!((constants::REQUEST_TIMEOUT_SECS - 0.5).abs() < f64::EPSILON); + } + + #[test] + fn test_handshake_timeout() { + assert!((constants::HANDSHAKE_TIMEOUT_SECS - 1.0).abs() < f64::EPSILON); + } + + #[test] + fn test_max_nodes_response() { + assert_eq!(constants::MAX_NODES_RESPONSE, 16); + } + + #[test] + fn test_bond_expiry() { + assert_eq!(constants::BOND_EXPIRY_SECS, 86400); + } + + #[test] + fn test_packet_size_limits() { + assert_eq!(constants::MAX_PACKET_SIZE, 1280); + assert_eq!(constants::MIN_PACKET_SIZE, 63); + } + } + + // ============================================================ + // Custom Types Tests + // ============================================================ + + mod custom_types { + use super::*; + + #[test] + fn test_request_id_limit() { + let req_id = RequestId::new(vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]); + assert_eq!(req_id.len(), 8); + } + + #[test] + fn test_request_id_variable_length() { + let req_id = RequestId::new(vec![0x01]); + assert_eq!(req_id.len(), 1); + } + + #[test] + fn test_ipv4_length() { + let ip = IPv4::new([0xc0, 0xa8, 0x01, 0x01]); // 192.168.1.1 + assert_eq!(ip.len(), 4); + } + + #[test] + fn test_ipv6_length() { + let mut bytes = [0u8; 16]; + bytes[15] = 0x01; // ::1 + let ip = IPv6::new(bytes); + assert_eq!(ip.len(), 16); + } + + #[test] + fn test_id_nonce_length() { + let nonce = IdNonce::new([0x01; 16]); + assert_eq!(nonce.len(), 16); + } + + #[test] + fn test_nonce_length() { + let nonce = Nonce::new([0x01; 12]); + assert_eq!(nonce.len(), 12); + } + + #[test] + fn test_distance_type() { + let d = Distance(256); + assert_eq!(d.0, 256u16); + } + + #[test] + fn test_port_type() { + let p = Port(30303); + assert_eq!(p.0, 30303u16); + } + + #[test] + fn test_enr_seq_type() { + let seq = SeqNumber(42); + assert_eq!(seq.0, 42u64); + } + } + + // ============================================================ + // Packet Flag Tests + // ============================================================ + + mod packet_flags { + use super::*; + + #[test] + fn test_message_flag() { + assert_eq!(PacketFlag::Message as u8, 0); + } + + #[test] + fn test_whoareyou_flag() { + assert_eq!(PacketFlag::WhoAreYou as u8, 1); + } + + #[test] + fn test_handshake_flag() { + assert_eq!(PacketFlag::Handshake as u8, 2); + } + } + + // ============================================================ + // Message Types Tests + // ============================================================ + + mod message_types { + use super::*; + + #[test] + fn test_ping_type() { + assert_eq!(MessageType::Ping as u8, 0x01); + } + + #[test] + fn test_pong_type() { + assert_eq!(MessageType::Pong as u8, 0x02); + } + + #[test] + fn test_findnode_type() { + assert_eq!(MessageType::FindNode as u8, 0x03); + } + + #[test] + fn test_nodes_type() { + assert_eq!(MessageType::Nodes as u8, 0x04); + } + + #[test] + fn test_talkreq_type() { + assert_eq!(MessageType::TalkReq as u8, 0x05); + } + + #[test] + fn test_talkresp_type() { + assert_eq!(MessageType::TalkResp as u8, 0x06); + } + + #[test] + fn test_experimental_types() { + assert_eq!(MessageType::RegTopic as u8, 0x07); + assert_eq!(MessageType::Ticket as u8, 0x08); + assert_eq!(MessageType::RegConfirmation as u8, 0x09); + assert_eq!(MessageType::TopicQuery as u8, 0x0A); + } + } + + // ============================================================ + // Discovery Config Tests + // ============================================================ + + mod discovery_config { + use super::*; + + #[test] + fn test_default_values() { + let config = DiscoveryConfig::default(); + + assert_eq!(config.k_bucket_size, constants::K_BUCKET_SIZE); + assert_eq!(config.alpha, constants::ALPHA); + assert!( + (config.request_timeout_secs - constants::REQUEST_TIMEOUT_SECS).abs() + < f64::EPSILON + ); + assert!( + (config.handshake_timeout_secs - constants::HANDSHAKE_TIMEOUT_SECS).abs() + < f64::EPSILON + ); + assert_eq!(config.max_nodes_response, constants::MAX_NODES_RESPONSE); + assert_eq!(config.bond_expiry_secs, constants::BOND_EXPIRY_SECS); + } + + #[test] + fn test_custom_values() { + let config = DiscoveryConfig { + k_bucket_size: 8, + alpha: 5, + request_timeout_secs: 2.0, + ..Default::default() + }; + assert_eq!(config.k_bucket_size, 8); + assert_eq!(config.alpha, 5); + assert!((config.request_timeout_secs - 2.0).abs() < f64::EPSILON); + } + } + + // ============================================================ + // Ping Message Tests + // ============================================================ + + mod ping_message { + use super::*; + + #[test] + fn test_creation_with_types() { + let ping = Ping { + request_id: RequestId::new(vec![0x00, 0x00, 0x00, 0x01]), + enr_seq: SeqNumber(2), + }; + + assert_eq!(ping.request_id.0, vec![0x00, 0x00, 0x00, 0x01]); + assert_eq!(ping.enr_seq, SeqNumber(2)); + } + + #[test] + fn test_max_request_id_length() { + let ping = Ping { + request_id: RequestId::new(vec![0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08]), + enr_seq: SeqNumber(1), + }; + assert_eq!(ping.request_id.len(), 8); + } + } + + // ============================================================ + // Pong Message Tests + // ============================================================ + + mod pong_message { + use super::*; + + #[test] + fn test_creation_ipv4() { + let pong = Pong { + request_id: RequestId::new(vec![0x00, 0x00, 0x00, 0x01]), + enr_seq: SeqNumber(42), + recipient_ip: vec![0xc0, 0xa8, 0x01, 0x01], // 192.168.1.1 + recipient_port: Port(9000), + }; + + assert_eq!(pong.enr_seq, SeqNumber(42)); + assert_eq!(pong.recipient_ip.len(), 4); + assert_eq!(pong.recipient_port, Port(9000)); + } + + #[test] + fn test_creation_ipv6() { + let mut ipv6 = vec![0u8; 16]; + ipv6[15] = 0x01; // ::1 + let pong = Pong { + request_id: RequestId::new(vec![0x01]), + enr_seq: SeqNumber(1), + recipient_ip: ipv6.clone(), + recipient_port: Port(30303), + }; + + assert_eq!(pong.recipient_ip.len(), 16); + } + } + + // ============================================================ + // FindNode Message Tests + // ============================================================ + + mod findnode_message { + use super::*; + + #[test] + fn test_single_distance() { + let findnode = FindNode { + request_id: RequestId::new(vec![0x01]), + distances: vec![Distance(256)], + }; + + assert_eq!(findnode.distances, vec![Distance(256)]); + } + + #[test] + fn test_multiple_distances() { + let findnode = FindNode { + request_id: RequestId::new(vec![0x01]), + distances: vec![Distance(0), Distance(1), Distance(255), Distance(256)], + }; + + assert!(findnode.distances.contains(&Distance(0))); + assert!(findnode.distances.contains(&Distance(256))); + } + + #[test] + fn test_distance_zero_returns_self() { + let findnode = FindNode { + request_id: RequestId::new(vec![0x01]), + distances: vec![Distance(0)], + }; + assert_eq!(findnode.distances, vec![Distance(0)]); + } + } + + // ============================================================ + // Nodes Message Tests + // ============================================================ + + mod nodes_message { + use super::*; + + #[test] + fn test_single_response() { + let nodes = Nodes { + request_id: RequestId::new(vec![0x01]), + total: 1, + enrs: vec![b"enr:-example".to_vec()], + }; + + assert_eq!(nodes.total, 1); + assert_eq!(nodes.enrs.len(), 1); + } + + #[test] + fn test_multiple_responses() { + let nodes = Nodes { + request_id: RequestId::new(vec![0x01]), + total: 3, + enrs: vec![b"enr1".to_vec(), b"enr2".to_vec()], + }; + + assert_eq!(nodes.total, 3); + assert_eq!(nodes.enrs.len(), 2); + } + } + + // ============================================================ + // TalkReq Message Tests + // ============================================================ + + mod talkreq_message { + use super::*; + + #[test] + fn test_creation() { + let req = TalkReq { + request_id: RequestId::new(vec![0x01]), + protocol: b"portal".to_vec(), + request: b"payload".to_vec(), + }; + + assert_eq!(req.protocol, b"portal".to_vec()); + assert_eq!(req.request, b"payload".to_vec()); + } + } + + // ============================================================ + // TalkResp Message Tests + // ============================================================ + + mod talkresp_message { + use super::*; + + #[test] + fn test_creation() { + let resp = TalkResp { + request_id: RequestId::new(vec![0x01]), + response: b"response_data".to_vec(), + }; + + assert_eq!(resp.response, b"response_data".to_vec()); + } + + #[test] + fn test_empty_response_unknown_protocol() { + let resp = TalkResp { + request_id: RequestId::new(vec![0x01]), + response: Vec::new(), + }; + assert!(resp.response.is_empty()); + } + } + + // ============================================================ + // Static Header Tests + // ============================================================ + + mod static_header { + use super::*; + + #[test] + fn test_default_protocol_id() { + let header = StaticHeader::new(0, Nonce::new([0x00; 12]), 32); + + assert_eq!(&header.protocol_id, b"discv5"); + assert_eq!(header.version, 0x0001); + } + + #[test] + fn test_flag_values() { + for flag in [0u8, 1, 2] { + let header = StaticHeader::new(flag, Nonce::new([0xff; 12]), 32); + assert_eq!(header.flag, flag); + } + } + } + + // ============================================================ + // WhoAreYou Authdata Tests + // ============================================================ + + mod whoareyou_authdata { + use super::*; + + #[test] + fn test_creation() { + let id_nonce_bytes: [u8; 16] = [ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, + ]; + let authdata = WhoAreYouAuthdata { + id_nonce: IdNonce::new(id_nonce_bytes), + enr_seq: SeqNumber(0), + }; + + assert_eq!(authdata.id_nonce.len(), 16); + assert_eq!(authdata.enr_seq, SeqNumber(0)); + } + } + + // ============================================================ + // XOR Distance Tests + // ============================================================ + + mod xor_distance_tests { + use super::*; + + #[test] + fn test_identical_ids_zero_distance() { + let node_id = NodeId::new([0x00; 32]); + assert_eq!(xor_distance(&node_id, &node_id), BigUint::from(0u32)); + } + + #[test] + fn test_complementary_ids_max_distance() { + let a = NodeId::new([0x00; 32]); + let b = NodeId::new([0xff; 32]); + let expected = (BigUint::one() << 256) - BigUint::one(); + assert_eq!(xor_distance(&a, &b), expected); + } + + #[test] + fn test_distance_is_symmetric() { + let a = NodeId::new([0x12; 32]); + let b = NodeId::new([0x34; 32]); + assert_eq!(xor_distance(&a, &b), xor_distance(&b, &a)); + } + + #[test] + fn test_specific_xor_values() { + let mut a_bytes = [0x00; 32]; + a_bytes[31] = 0x05; // 5 + let mut b_bytes = [0x00; 32]; + b_bytes[31] = 0x03; // 3 + let a = NodeId::new(a_bytes); + let b = NodeId::new(b_bytes); + assert_eq!(xor_distance(&a, &b), BigUint::from(6u32)); // 5 XOR 3 = 6 + } + } + + // ============================================================ + // Log2 Distance Tests + // ============================================================ + + mod log2_distance_tests { + use super::*; + + #[test] + fn test_identical_ids_return_zero() { + let node_id = NodeId::new([0x00; 32]); + assert_eq!(log2_distance(&node_id, &node_id), Distance(0)); + } + + #[test] + fn test_single_bit_difference() { + let a = NodeId::new([0x00; 32]); + let mut b_bytes = [0x00; 32]; + b_bytes[31] = 0x01; + let b = NodeId::new(b_bytes); + assert_eq!(log2_distance(&a, &b), Distance(1)); + } + + #[test] + fn test_high_bit_difference() { + let a = NodeId::new([0x00; 32]); + let mut b_bytes = [0x00; 32]; + b_bytes[31] = 0x80; // 0b10000000 + let b = NodeId::new(b_bytes); + assert_eq!(log2_distance(&a, &b), Distance(8)); + } + + #[test] + fn test_maximum_distance() { + let a = NodeId::new([0x00; 32]); + let mut b_bytes = [0x00; 32]; + b_bytes[0] = 0x80; // High bit of first byte set + let b = NodeId::new(b_bytes); + assert_eq!(log2_distance(&a, &b), Distance(256)); + } + } + + // ============================================================ + // K-Bucket Tests + // ============================================================ + + mod kbucket_tests { + use super::*; + + #[test] + fn test_new_bucket_is_empty() { + let bucket = KBucket::new(); + + assert!(bucket.is_empty()); + assert!(!bucket.is_full()); + assert_eq!(bucket.len(), 0); + } + + #[test] + fn test_add_single_node() { + let mut bucket = KBucket::new(); + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + + assert!(bucket.add(entry)); + assert_eq!(bucket.len(), 1); + assert!(bucket.contains(&NodeId::new([0x01; 32]))); + } + + #[test] + fn test_bucket_capacity_limit() { + let mut bucket = KBucket::new(); + + for i in 0..constants::K_BUCKET_SIZE { + let mut bytes = [0x00; 32]; + bytes[0] = i as u8; + let entry = NodeEntry::new(NodeId::new(bytes)); + assert!(bucket.add(entry)); + } + + assert!(bucket.is_full()); + assert_eq!(bucket.len(), constants::K_BUCKET_SIZE); + + let extra = NodeEntry::new(NodeId::new([0xff; 32])); + assert!(!bucket.add(extra)); + assert_eq!(bucket.len(), constants::K_BUCKET_SIZE); + } + + #[test] + fn test_update_moves_to_tail() { + let mut bucket = KBucket::new(); + + let entry1 = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(1)); + let entry2 = NodeEntry::new(NodeId::new([0x02; 32])).with_enr_seq(SeqNumber(1)); + bucket.add(entry1); + bucket.add(entry2); + + let updated = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(2)); + bucket.add(updated); + + let tail = bucket.tail().unwrap(); + assert_eq!(tail.node_id, NodeId::new([0x01; 32])); + assert_eq!(tail.enr_seq, SeqNumber(2)); + } + + #[test] + fn test_remove_node() { + let mut bucket = KBucket::new(); + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + bucket.add(entry); + + assert!(bucket.remove(&NodeId::new([0x01; 32]))); + assert!(bucket.is_empty()); + assert!(!bucket.contains(&NodeId::new([0x01; 32]))); + } + + #[test] + fn test_remove_nonexistent_returns_false() { + let mut bucket = KBucket::new(); + assert!(!bucket.remove(&NodeId::new([0x01; 32]))); + } + + #[test] + fn test_get_existing_node() { + let mut bucket = KBucket::new(); + let entry = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(42)); + bucket.add(entry); + + let retrieved = bucket.get(&NodeId::new([0x01; 32])).unwrap(); + assert_eq!(retrieved.enr_seq, SeqNumber(42)); + } + + #[test] + fn test_get_nonexistent_returns_none() { + let bucket = KBucket::new(); + assert!(bucket.get(&NodeId::new([0x01; 32])).is_none()); + } + + #[test] + fn test_head_returns_oldest() { + let mut bucket = KBucket::new(); + bucket.add(NodeEntry::new(NodeId::new([0x01; 32]))); + bucket.add(NodeEntry::new(NodeId::new([0x02; 32]))); + + let head = bucket.head().unwrap(); + assert_eq!(head.node_id, NodeId::new([0x01; 32])); + } + + #[test] + fn test_tail_returns_newest() { + let mut bucket = KBucket::new(); + bucket.add(NodeEntry::new(NodeId::new([0x01; 32]))); + bucket.add(NodeEntry::new(NodeId::new([0x02; 32]))); + + let tail = bucket.tail().unwrap(); + assert_eq!(tail.node_id, NodeId::new([0x02; 32])); + } + + #[test] + fn test_iteration() { + let mut bucket = KBucket::new(); + bucket.add(NodeEntry::new(NodeId::new([0x01; 32]))); + bucket.add(NodeEntry::new(NodeId::new([0x02; 32]))); + + let node_ids: Vec<_> = bucket.iter().map(|e| e.node_id.clone()).collect(); + assert_eq!(node_ids.len(), 2); + } + } + + // ============================================================ + // Routing Table Tests + // ============================================================ + + mod routing_table_tests { + use super::*; + + #[test] + fn test_new_table_is_empty() { + let local_id = NodeId::new([0x00; 32]); + let table = RoutingTable::new(local_id); + + assert_eq!(table.node_count(), 0); + } + + #[test] + fn test_has_256_buckets() { + let local_id = NodeId::new([0x00; 32]); + let table = RoutingTable::new(local_id); + + assert_eq!(table.buckets.len(), constants::BUCKET_COUNT); + } + + #[test] + fn test_add_node() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let mut node_bytes = [0x00; 32]; + node_bytes[31] = 0x01; + let entry = NodeEntry::new(NodeId::new(node_bytes)); + assert!(table.add(entry.clone())); + assert_eq!(table.node_count(), 1); + assert!(table.contains(&entry.node_id)); + } + + #[test] + fn test_cannot_add_self() { + let local_id = NodeId::new([0xab; 32]); + let mut table = RoutingTable::new(local_id.clone()); + + let entry = NodeEntry::new(local_id); + assert!(!table.add(entry)); + assert_eq!(table.node_count(), 0); + } + + #[test] + fn test_bucket_assignment_by_distance() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let mut node_bytes = [0x00; 32]; + node_bytes[31] = 0x01; // log2 distance = 1 + let node_id = NodeId::new(node_bytes); + let entry = NodeEntry::new(node_id.clone()); + table.add(entry); + + let bucket_idx = table.bucket_index(&node_id); + assert_eq!(bucket_idx, 0); // distance 1 -> bucket 0 + assert!(table.buckets[0].contains(&node_id)); + } + + #[test] + fn test_get_existing_node() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let entry = NodeEntry::new(NodeId::new([0x01; 32])).with_enr_seq(SeqNumber(99)); + let node_id = entry.node_id.clone(); + table.add(entry); + + let retrieved = table.get(&node_id).unwrap(); + assert_eq!(retrieved.enr_seq, SeqNumber(99)); + } + + #[test] + fn test_remove_node() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + let node_id = entry.node_id.clone(); + table.add(entry); + assert!(table.remove(&node_id)); + assert!(!table.contains(&node_id)); + } + + #[test] + fn test_closest_nodes_sorted_by_distance() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + for i in 1..5u8 { + let mut bytes = [0x00; 32]; + bytes[0] = i; + let entry = NodeEntry::new(NodeId::new(bytes)); + table.add(entry); + } + + let mut target_bytes = [0x00; 32]; + target_bytes[0] = 0x01; + let target = NodeId::new(target_bytes); + let closest = table.closest_nodes(&target, 3); + + assert_eq!(closest.len(), 3); + assert_eq!(closest[0].node_id, target); // Distance 0 to itself + } + + #[test] + fn test_closest_nodes_respects_count() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + for i in 1..11u8 { + let mut bytes = [0x00; 32]; + bytes[0] = i; + let entry = NodeEntry::new(NodeId::new(bytes)); + table.add(entry); + } + + let mut target_bytes = [0x00; 32]; + target_bytes[0] = 0x05; + let closest = table.closest_nodes(&NodeId::new(target_bytes), 3); + assert_eq!(closest.len(), 3); + } + + #[test] + fn test_nodes_at_distance() { + let local_id = NodeId::new([0x00; 32]); + let mut table = RoutingTable::new(local_id); + + let mut node_bytes = [0x00; 32]; + node_bytes[31] = 0x01; // distance 1 + let node_id = NodeId::new(node_bytes); + let entry = NodeEntry::new(node_id.clone()); + table.add(entry); + + let nodes = table.nodes_at_distance(Distance(1)); + assert_eq!(nodes.len(), 1); + assert_eq!(nodes[0].node_id, node_id); + } + + #[test] + fn test_nodes_at_invalid_distance() { + let local_id = NodeId::new([0x00; 32]); + let table = RoutingTable::new(local_id); + + assert!(table.nodes_at_distance(Distance(0)).is_empty()); + assert!(table.nodes_at_distance(Distance(257)).is_empty()); + } + } + + // ============================================================ + // Node Entry Tests + // ============================================================ + + mod node_entry_tests { + use super::*; + + #[test] + fn test_default_values() { + let entry = NodeEntry::new(NodeId::new([0x01; 32])); + + assert_eq!(entry.node_id, NodeId::new([0x01; 32])); + assert_eq!(entry.enr_seq, SeqNumber(0)); + assert!((entry.last_seen - 0.0).abs() < f64::EPSILON); + assert!(entry.endpoint.is_none()); + assert!(!entry.verified); + } + + #[test] + fn test_full_construction() { + let entry = NodeEntry::new(NodeId::new([0x01; 32])) + .with_enr_seq(SeqNumber(42)) + .with_last_seen(1234567890.0) + .with_endpoint("192.168.1.1:30303".to_string()) + .with_verified(true); + + assert_eq!(entry.enr_seq, SeqNumber(42)); + assert_eq!(entry.endpoint, Some("192.168.1.1:30303".to_string())); + assert!(entry.verified); + } + } + + // ============================================================ + // Test Vector Tests + // ============================================================ + + mod test_vectors { + use super::*; + + // From https://github.com/ethereum/devp2p/blob/master/discv5/discv5-wire-test-vectors.md + const PING_REQUEST_ID: [u8; 4] = [0x00, 0x00, 0x00, 0x01]; + const PING_ENR_SEQ: u64 = 2; + const WHOAREYOU_ID_NONCE: [u8; 16] = [ + 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, + 0x0f, 0x10, + ]; + + #[test] + fn test_ping_message_construction() { + let ping = Ping { + request_id: RequestId::new(PING_REQUEST_ID.to_vec()), + enr_seq: SeqNumber(PING_ENR_SEQ), + }; + + assert_eq!(ping.request_id.0, PING_REQUEST_ID.to_vec()); + assert_eq!(ping.enr_seq, SeqNumber(2)); + } + + #[test] + fn test_whoareyou_authdata_construction() { + let authdata = WhoAreYouAuthdata { + id_nonce: IdNonce::new(WHOAREYOU_ID_NONCE), + enr_seq: SeqNumber(0), + }; + + assert_eq!(authdata.id_nonce, IdNonce::new(WHOAREYOU_ID_NONCE)); + assert_eq!(authdata.enr_seq, SeqNumber(0)); + } + + #[test] + fn test_plaintext_message_type() { + // From AES-GCM test vector plaintext + let plaintext = hex::decode("01c20101").unwrap(); + assert_eq!(plaintext[0], MessageType::Ping as u8); + } + } + + // ============================================================ + // Packet Structure Tests + // ============================================================ + + mod packet_structure { + #[test] + fn test_static_header_size() { + // protocol-id (6) + version (2) + flag (1) + nonce (12) + authdata-size (2) + let expected_size = 6 + 2 + 1 + 12 + 2; + assert_eq!(expected_size, 23); + } + } + + // ============================================================ + // Routing with Test Vector Node IDs + // ============================================================ + + mod routing_test_vectors { + use super::*; + + // Node IDs from official test vectors (keccak256 of uncompressed pubkey) + fn node_a_id() -> NodeId { + NodeId::from_slice( + &hex::decode("aaaa8419e9f49d0083561b48287df592939a8d19947d8c0ef88f2a4856a69fbb") + .unwrap(), + ) + } + + fn node_b_id() -> NodeId { + NodeId::from_slice( + &hex::decode("bbbb9d047f0488c0b5a93c1c3f2d8bafc7c8ff337024a55434a0d0555de64db9") + .unwrap(), + ) + } + + #[test] + fn test_xor_distance_is_symmetric() { + let node_a = node_a_id(); + let node_b = node_b_id(); + + let distance = xor_distance(&node_a, &node_b); + assert!(distance > BigUint::from(0u32)); + assert_eq!( + xor_distance(&node_a, &node_b), + xor_distance(&node_b, &node_a) + ); + } + + #[test] + fn test_log2_distance_is_high() { + let node_a = node_a_id(); + let node_b = node_b_id(); + + let log_dist = log2_distance(&node_a, &node_b); + assert!(log_dist > Distance(200)); + } + } +} diff --git a/lean_client/networking/src/lib.rs b/lean_client/networking/src/lib.rs index 2a54c28..3f333cd 100644 --- a/lean_client/networking/src/lib.rs +++ b/lean_client/networking/src/lib.rs @@ -1,7 +1,9 @@ pub mod bootnodes; pub mod compressor; +pub mod discovery; pub mod gossipsub; pub mod network; pub mod req_resp; pub mod serde_utils; +pub mod sync; pub mod types; diff --git a/lean_client/networking/src/network/service.rs b/lean_client/networking/src/network/service.rs index 63b6f9c..daa735c 100644 --- a/lean_client/networking/src/network/service.rs +++ b/lean_client/networking/src/network/service.rs @@ -26,6 +26,7 @@ use tracing::{debug, info, trace, warn}; use crate::{ bootnodes::{BootnodeSource, StaticBootnodes}, compressor::Compressor, + discovery::{DiscoveryConfig, DiscoveryService}, gossipsub::{self, config::GossipsubConfig, message::GossipsubMessage, topic::GossipsubKind}, network::behaviour::{LeanNetworkBehaviour, LeanNetworkBehaviourEvent}, req_resp::{self, BLOCKS_BY_ROOT_PROTOCOL_V1, LeanRequest, ReqRespMessage, STATUS_PROTOCOL_V1}, @@ -39,6 +40,8 @@ pub struct NetworkServiceConfig { pub gossipsub_config: GossipsubConfig, pub socket_address: IpAddr, pub socket_port: u16, + pub discovery_port: u16, + pub discovery_enabled: bool, bootnodes: StaticBootnodes, } @@ -47,22 +50,26 @@ impl NetworkServiceConfig { gossipsub_config: GossipsubConfig, socket_address: IpAddr, socket_port: u16, + discovery_port: u16, + discovery_enabled: bool, bootnodes: Vec, ) -> Self { - let bootnodes = StaticBootnodes::new( - bootnodes - .iter() - .filter_map(|addr_str| addr_str.parse().ok()) - .collect::>(), - ); + let bootnodes = StaticBootnodes::parse(&bootnodes); NetworkServiceConfig { gossipsub_config, socket_address, socket_port, + discovery_port, + discovery_enabled, bootnodes, } } + + /// Get ENR bootnodes for discv5. + pub fn enr_bootnodes(&self) -> Vec> { + self.bootnodes.enrs().to_vec() + } } #[derive(Debug)] @@ -83,6 +90,7 @@ where { network_config: Arc, swarm: Swarm, + discovery: Option, peer_table: Arc>>, peer_count: Arc, outbound_p2p_requests: R, @@ -147,9 +155,36 @@ where .with_swarm_config(|_| config) .build(); + let discovery = if network_config.discovery_enabled { + let discovery_config = DiscoveryConfig::new( + network_config.socket_address, + network_config.discovery_port, + network_config.socket_port, + ) + .with_bootnodes(network_config.enr_bootnodes()); + + match DiscoveryService::new(discovery_config, &local_key).await { + Ok(disc) => { + info!( + enr = %disc.local_enr(), + "Discovery service initialized" + ); + Some(disc) + } + Err(e) => { + warn!(error = ?e, "Failed to initialize discovery service, continuing without it"); + None + } + } + } else { + info!("Discovery service disabled"); + None + }; + let mut service = Self { network_config, swarm, + discovery, peer_table: Arc::new(Mutex::new(HashMap::new())), peer_count, outbound_p2p_requests, @@ -166,11 +201,24 @@ where // Periodic reconnect attempts to bootnodes let mut reconnect_interval = interval(Duration::from_secs(30)); reconnect_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + // Periodic discovery searches + let mut discovery_interval = interval(Duration::from_secs(30)); + discovery_interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + loop { select! { _ = reconnect_interval.tick() => { self.connect_to_peers(self.network_config.bootnodes.to_multiaddrs()).await; } + _ = discovery_interval.tick() => { + // Trigger active peer discovery + if let Some(ref discovery) = self.discovery { + let known_peers = discovery.connected_peers(); + debug!(known_peers, "Triggering random peer discovery lookup"); + discovery.find_random_peers(); + } + } request = self.outbound_p2p_requests.recv() => { if let Some(request) = request { self.dispatch_outbound_request(request).await; @@ -181,6 +229,23 @@ where info!(?event, "Swarm event"); } } + enr = async { + match &mut self.discovery { + Some(disc) => disc.recv().await, + None => std::future::pending().await, + } + } => { + if let Some(enr) = enr { + if let Some(multiaddr) = DiscoveryService::enr_to_multiaddr(&enr) { + info!( + node_id = %enr.node_id(), + %multiaddr, + "Discovered peer via discv5, attempting connection" + ); + self.connect_to_peers(vec![multiaddr]).await; + } + } + } } } } @@ -311,7 +376,10 @@ where } } Ok(GossipsubMessage::Attestation(signed_attestation)) => { + #[cfg(feature = "devnet1")] let slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet2")] + let slot = signed_attestation.message.slot.0; if let Err(err) = self .chain_message_sink @@ -533,7 +601,11 @@ where } } OutboundP2pRequest::GossipAttestation(signed_attestation) => { + #[cfg(feature = "devnet1")] let slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet2")] + let slot = signed_attestation.message.slot.0; + match signed_attestation.to_ssz() { Ok(bytes) => { if let Err(err) = self.publish_to_topic(GossipsubKind::Attestation, bytes) { @@ -588,6 +660,10 @@ where *self.swarm.local_peer_id() } + pub fn local_enr(&self) -> Option<&enr::Enr> { + self.discovery.as_ref().map(|d| d.local_enr()) + } + pub fn swarm_mut(&mut self) -> &mut Swarm { &mut self.swarm } diff --git a/lean_client/networking/src/sync/backfill_sync.rs b/lean_client/networking/src/sync/backfill_sync.rs new file mode 100644 index 0000000..3a6887f --- /dev/null +++ b/lean_client/networking/src/sync/backfill_sync.rs @@ -0,0 +1,265 @@ +use containers::{Bytes32, SignedBlockWithAttestation}; +use libp2p_identity::PeerId; +/// Backfill synchronization for resolving orphan blocks. +/// +/// When a block arrives whose parent is unknown, we need to fetch that parent. +/// If the parent also has an unknown parent, we continue recursively. This process +/// is called "backfill" because we are filling in gaps going backward in time. +/// +/// ## The Challenge +/// +/// Blocks can arrive out of order for several reasons: +/// 1. **Gossip timing**: A child block gossips faster than its parent +/// 2. **Parallel downloads**: Responses arrive in different order than requests +/// 3. **Network partitions**: Some blocks were missed during a brief disconnect +/// +/// Without backfill, these orphan blocks would be useless. With backfill, we can +/// resolve their parent chains and process them. +/// +/// ## Safety: Depth Limits +/// +/// - An attacker could send a block claiming to have a parent millions of slots ago +/// - Without limits, we would exhaust memory trying to fetch the entire chain +/// - MAX_BACKFILL_DEPTH (512) covers legitimate reorgs while bounding resources +use std::collections::HashSet; +use tracing::{debug, warn}; + +use super::{ + block_cache::BlockCache, + config::{MAX_BACKFILL_DEPTH, MAX_BLOCKS_PER_REQUEST}, + peer_manager::PeerManager, +}; + +/// Network requester trait for fetching blocks. +/// +/// Abstracts the network layer to allow testing with mocks. +#[async_trait::async_trait] +pub trait NetworkRequester: Send + Sync { + /// Request blocks by their roots from a peer. + /// + /// Returns the blocks if successful, or None if the request failed. + async fn request_blocks_by_root( + &self, + peer_id: PeerId, + roots: Vec, + ) -> Option>; +} + +/// Backfill synchronization manager. +/// +/// Resolves orphan blocks by fetching their missing parents. When blocks +/// arrive with unknown parents, this class orchestrates fetching those parents. +/// +/// ## How It Works +/// +/// 1. **Detection**: BlockCache marks blocks as orphans when added +/// 2. **Request**: BackfillSync requests missing parents from peers +/// 3. **Recursion**: If fetched parents are also orphans, continue fetching +/// 4. **Resolution**: When parent chain is complete, blocks become processable +/// +/// ## Integration +/// +/// BackfillSync does not process blocks itself. It only ensures parents exist +/// in the BlockCache. The SyncService is responsible for: +/// - Calling `fill_missing()` when orphans are detected +/// - Processing blocks when they become processable +/// - Integrating blocks into the Store +/// +/// ## Thread Safety +/// +/// This class is designed for single-threaded async operation. The `_pending` +/// set prevents duplicate requests for the same root. +pub struct BackfillSync { + peer_manager: PeerManager, + block_cache: BlockCache, + network: N, + + /// Roots currently being fetched (prevents duplicate requests) + pending: HashSet, +} + +impl BackfillSync { + pub fn new(peer_manager: PeerManager, block_cache: BlockCache, network: N) -> Self { + Self { + peer_manager, + block_cache, + network, + pending: HashSet::new(), + } + } + + /// Fill missing parent blocks for orphans. + /// + /// Recursively fetches parents until: + /// - All parents are found + /// - MAX_BACKFILL_DEPTH is reached + /// - No peers are available + /// + /// This method is idempotent and safe to call multiple times. + pub async fn fill_missing(&mut self, roots: Vec, depth: usize) { + self.fill_missing_internal(roots, depth).await; + } + + fn fill_missing_internal<'a>( + &'a mut self, + roots: Vec, + depth: usize, + ) -> std::pin::Pin + Send + 'a>> { + Box::pin(async move { + if depth >= MAX_BACKFILL_DEPTH { + // Depth limit reached. Stop fetching to prevent resource exhaustion. + // This is a safety measure, not an error. Deep chains may be + // legitimate but we cannot fetch them via backfill. + debug!( + depth = depth, + max_depth = MAX_BACKFILL_DEPTH, + "Backfill depth limit reached" + ); + return; + } + + // Filter out roots we are already fetching or have cached + let roots_to_fetch: Vec = roots + .into_iter() + .filter(|root| !self.pending.contains(root) && !self.block_cache.contains(root)) + .collect(); + + if roots_to_fetch.is_empty() { + return; + } + + debug!( + num_roots = roots_to_fetch.len(), + depth = depth, + "Backfilling missing parents" + ); + + // Mark roots as pending to avoid duplicate requests + for root in &roots_to_fetch { + self.pending.insert(*root); + } + + // Fetch in batches to respect request limits + for batch_start in (0..roots_to_fetch.len()).step_by(MAX_BLOCKS_PER_REQUEST) { + let batch_end = (batch_start + MAX_BLOCKS_PER_REQUEST).min(roots_to_fetch.len()); + let batch = roots_to_fetch[batch_start..batch_end].to_vec(); + + self.fetch_batch(batch, depth).await; + } + + // Clear pending status + for root in &roots_to_fetch { + self.pending.remove(root); + } + }) + } + + async fn fetch_batch(&mut self, roots: Vec, depth: usize) { + // Select a peer for the request + let peer = match self.peer_manager.select_peer_for_request(None) { + Some(p) => p.peer_id, + None => { + debug!("No available peer for backfill request"); + return; + } + }; + + debug!( + peer = %peer, + num_roots = roots.len(), + depth = depth, + "Requesting blocks from peer" + ); + + // Mark request as started + self.peer_manager.on_request_start(&peer); + + // Request blocks + match self + .network + .request_blocks_by_root(peer, roots.clone()) + .await + { + Some(blocks) if !blocks.is_empty() => { + debug!( + peer = %peer, + num_blocks = blocks.len(), + "Received blocks from peer" + ); + + self.peer_manager.on_request_complete(&peer); + self.process_received_blocks(blocks, peer, depth).await; + } + Some(_) => { + // Empty response. Peer may not have the blocks. + debug!(peer = %peer, "Peer returned no blocks"); + self.peer_manager.on_request_complete(&peer); + } + None => { + // Network error + warn!(peer = %peer, "Block request failed"); + self.peer_manager + .on_request_failure(&peer, "backfill request failed"); + } + } + } + + async fn process_received_blocks( + &mut self, + blocks: Vec, + peer_id: PeerId, + depth: usize, + ) { + let mut new_orphan_parents = Vec::new(); + + for block in blocks { + let root = self.block_cache.add_block(block); + + // If this block is an orphan, we need to fetch its parent + if self.block_cache.is_orphan(&root) { + if let Some(parent_root) = self + .block_cache + .get_block(&root) + .map(|b| b.message.block.parent_root) + { + if !parent_root.0.is_zero() { + new_orphan_parents.push(parent_root); + } + } + } + } + + // Recursively fetch parents of newly discovered orphans + if !new_orphan_parents.is_empty() { + debug!( + peer = %peer_id, + num_parents = new_orphan_parents.len(), + next_depth = depth + 1, + "Found orphan parents, continuing backfill" + ); + + self.fill_missing_internal(new_orphan_parents, depth + 1) + .await; + } + } + + /// Get reference to block cache. + pub fn block_cache(&self) -> &BlockCache { + &self.block_cache + } + + /// Get mutable reference to block cache. + pub fn block_cache_mut(&mut self) -> &mut BlockCache { + &mut self.block_cache + } + + /// Get reference to peer manager. + pub fn peer_manager(&self) -> &PeerManager { + &self.peer_manager + } + + /// Get mutable reference to peer manager. + pub fn peer_manager_mut(&mut self) -> &mut PeerManager { + &mut self.peer_manager + } +} diff --git a/lean_client/networking/src/sync/block_cache.rs b/lean_client/networking/src/sync/block_cache.rs new file mode 100644 index 0000000..c43ab69 --- /dev/null +++ b/lean_client/networking/src/sync/block_cache.rs @@ -0,0 +1,211 @@ +use containers::block::hash_tree_root; +use containers::{Bytes32, SignedBlockWithAttestation, Slot}; +/// Block cache for managing blocks and tracking orphans. +/// +/// Maintains a cache of blocks and identifies orphans (blocks whose parent +/// is not yet known). This is essential for handling out-of-order block arrival. +use std::collections::{HashMap, HashSet}; + +/// Block cache for sync operations. +/// +/// Manages blocks during synchronization and tracks orphans (blocks with +/// unknown parents). When blocks arrive out of order, orphans are cached +/// until their parent chains can be resolved. +#[derive(Debug, Default, Clone)] +pub struct BlockCache { + /// All cached blocks, indexed by block root + blocks: HashMap, + + /// Blocks whose parent is not in the cache (orphans) + orphans: HashSet, + + /// Children of each block (parent_root -> set of child roots) + children: HashMap>, +} + +impl BlockCache { + pub fn new() -> Self { + Self::default() + } + + /// Add a block to the cache. + /// + /// Automatically detects if the block is an orphan and tracks it. + /// Returns the block root. + pub fn add_block(&mut self, block: SignedBlockWithAttestation) -> Bytes32 { + let root = hash_tree_root(&block.message.block); + let parent_root = block.message.block.parent_root; + + // Add to cache + self.blocks.insert(root, block); + + // Track parent-child relationship + self.children + .entry(parent_root) + .or_insert_with(HashSet::new) + .insert(root); + + // Check if this is an orphan (parent not in cache) + if !parent_root.0.is_zero() && !self.blocks.contains_key(&parent_root) { + self.orphans.insert(root); + } + + // If adding this block resolves any orphans, remove them from orphan set + if let Some(children) = self.children.get(&root) { + for child in children { + self.orphans.remove(child); + } + } + + root + } + + /// Get a block by its root. + pub fn get_block(&self, root: &Bytes32) -> Option<&SignedBlockWithAttestation> { + self.blocks.get(root) + } + + /// Check if a block exists in the cache. + pub fn contains(&self, root: &Bytes32) -> bool { + self.blocks.contains_key(root) + } + + /// Check if a block is an orphan (parent unknown). + pub fn is_orphan(&self, root: &Bytes32) -> bool { + self.orphans.contains(root) + } + + /// Get all orphan block roots. + pub fn get_orphans(&self) -> Vec { + self.orphans.iter().copied().collect() + } + + /// Get missing parent roots for orphan blocks. + /// + /// Returns roots of parents that are not in the cache. + pub fn get_missing_parents(&self) -> Vec { + self.orphans + .iter() + .filter_map(|orphan_root| { + self.blocks + .get(orphan_root) + .map(|block| block.message.block.parent_root) + }) + .filter(|parent_root| { + !parent_root.0.is_zero() && !self.blocks.contains_key(parent_root) + }) + .collect::>() // Deduplicate + .into_iter() + .collect() + } + + /// Get all processable blocks (blocks whose parent is known or is genesis). + /// + /// Returns blocks that can be processed because their parent exists + /// in the cache or they are genesis blocks (parent_root is zero). + pub fn get_processable_blocks(&self) -> Vec { + self.blocks + .iter() + .filter_map(|(root, block)| { + let parent_root = block.message.block.parent_root; + if parent_root.0.is_zero() || self.blocks.contains_key(&parent_root) { + Some(*root) + } else { + None + } + }) + .collect() + } + + /// Remove a block from the cache. + /// + /// Also updates orphan tracking and parent-child relationships. + pub fn remove_block(&mut self, root: &Bytes32) -> Option { + if let Some(block) = self.blocks.remove(root) { + // Remove from orphan set if present + self.orphans.remove(root); + + // Remove from parent's children set + let parent_root = block.message.block.parent_root; + if let Some(children) = self.children.get_mut(&parent_root) { + children.remove(root); + if children.is_empty() { + self.children.remove(&parent_root); + } + } + + // Mark children as orphans if removing this block orphans them + if let Some(children) = self.children.get(root) { + for child in children { + self.orphans.insert(*child); + } + } + + Some(block) + } else { + None + } + } + + /// Get the slot of a block. + pub fn get_slot(&self, root: &Bytes32) -> Option { + self.blocks.get(root).map(|block| block.message.block.slot) + } + + /// Get children of a block. + pub fn get_children(&self, root: &Bytes32) -> Vec { + self.children + .get(root) + .map(|children| children.iter().copied().collect()) + .unwrap_or_default() + } + + /// Get chain length from a block back to genesis or earliest cached ancestor. + /// + /// Returns None if the block is not in the cache. + pub fn get_chain_length(&self, root: &Bytes32) -> Option { + if !self.blocks.contains_key(root) { + return None; + } + + let mut length = 0; + let mut current = *root; + + loop { + if let Some(block) = self.blocks.get(¤t) { + let parent_root = block.message.block.parent_root; + if parent_root.0.is_zero() { + // Reached genesis + break; + } + length += 1; + if !self.blocks.contains_key(&parent_root) { + // Parent not in cache, can't continue + break; + } + current = parent_root; + } else { + break; + } + } + + Some(length) + } + + /// Clear all blocks from the cache. + pub fn clear(&mut self) { + self.blocks.clear(); + self.orphans.clear(); + self.children.clear(); + } + + /// Get the number of cached blocks. + pub fn len(&self) -> usize { + self.blocks.len() + } + + /// Check if the cache is empty. + pub fn is_empty(&self) -> bool { + self.blocks.is_empty() + } +} diff --git a/lean_client/networking/src/sync/config.rs b/lean_client/networking/src/sync/config.rs new file mode 100644 index 0000000..b51ff16 --- /dev/null +++ b/lean_client/networking/src/sync/config.rs @@ -0,0 +1,16 @@ +/// Sync service configuration constants. +/// +/// Operational parameters for synchronization: batch sizes, timeouts, and limits. + +/// Maximum blocks to request in a single BlocksByRoot request. +pub const MAX_BLOCKS_PER_REQUEST: usize = 10; + +/// Maximum concurrent requests to a single peer. +pub const MAX_CONCURRENT_REQUESTS: usize = 2; + +/// Maximum depth to backfill when resolving orphan chains. +/// This prevents resource exhaustion from malicious deep chains. +pub const MAX_BACKFILL_DEPTH: usize = 512; + +/// Interval between sync state evaluations (in seconds). +pub const SYNC_TICK_INTERVAL_SECS: u64 = 1; diff --git a/lean_client/networking/src/sync/head_sync.rs b/lean_client/networking/src/sync/head_sync.rs new file mode 100644 index 0000000..fb57c78 --- /dev/null +++ b/lean_client/networking/src/sync/head_sync.rs @@ -0,0 +1,181 @@ +/// Head synchronization for processing gossip blocks. +/// +/// Manages the processing of blocks received via gossip to advance the chain head. +/// Works in coordination with backfill sync to handle out-of-order block arrivals. +use containers::{Bytes32, SignedBlockWithAttestation, Slot}; +use tracing::debug; + +use super::block_cache::BlockCache; + +/// Head synchronization manager. +/// +/// Processes blocks to advance the chain head. Works with BlockCache to +/// handle blocks that arrive in any order. +/// +/// ## How It Works +/// +/// 1. Blocks arrive via gossip +/// 2. HeadSync adds them to the BlockCache +/// 3. If parent exists, block is processable immediately +/// 4. If parent missing, block is cached as orphan (BackfillSync will fetch parent) +/// 5. Once parent chain is complete, all descendants become processable +/// +/// ## Integration +/// +/// HeadSync coordinates with: +/// - **BlockCache**: Tracks blocks and identifies orphans +/// - **BackfillSync**: Fetches missing parents for orphans +/// - **SyncService**: Orchestrates overall sync flow +pub struct HeadSync { + block_cache: BlockCache, +} + +impl HeadSync { + pub fn new(block_cache: BlockCache) -> Self { + Self { block_cache } + } + + /// Process a gossip block. + /// + /// Adds the block to the cache and returns information about what happened: + /// - The block root + /// - Whether the block is processable (parent exists) + /// - Missing parent roots (if block is orphan) + pub fn process_gossip_block(&mut self, block: SignedBlockWithAttestation) -> ProcessResult { + let slot = block.message.block.slot; + let parent_root = block.message.block.parent_root; + + debug!( + slot = slot.0, + parent = ?parent_root, + "Processing gossip block" + ); + + // Add to cache + let root = self.block_cache.add_block(block); + + // Check if processable + let is_orphan = self.block_cache.is_orphan(&root); + + if is_orphan { + debug!( + slot = slot.0, + root = ?root, + "Block is orphan (parent unknown)" + ); + + // Get missing parents for backfill + let missing_parents = if parent_root.0.is_zero() { + vec![] + } else if !self.block_cache.contains(&parent_root) { + vec![parent_root] + } else { + vec![] + }; + + ProcessResult { + root, + is_processable: false, + missing_parents, + } + } else { + debug!( + slot = slot.0, + root = ?root, + "Block is processable (parent known)" + ); + + ProcessResult { + root, + is_processable: true, + missing_parents: vec![], + } + } + } + + /// Get all blocks ready for processing. + /// + /// Returns blocks whose parents exist in the cache or are genesis. + /// These blocks can be safely processed in topological order. + pub fn get_processable_blocks(&self) -> Vec { + self.block_cache.get_processable_blocks() + } + + /// Get a block by its root. + pub fn get_block(&self, root: &Bytes32) -> Option<&SignedBlockWithAttestation> { + self.block_cache.get_block(root) + } + + /// Remove a block from the cache after processing. + pub fn remove_block(&mut self, root: &Bytes32) -> Option { + self.block_cache.remove_block(root) + } + + /// Check if a block exists in the cache. + pub fn contains_block(&self, root: &Bytes32) -> bool { + self.block_cache.contains(root) + } + + /// Get all orphan blocks. + pub fn get_orphans(&self) -> Vec { + self.block_cache.get_orphans() + } + + /// Get missing parent roots for all orphans. + pub fn get_missing_parents(&self) -> Vec { + self.block_cache.get_missing_parents() + } + + /// Get reference to block cache. + pub fn block_cache(&self) -> &BlockCache { + &self.block_cache + } + + /// Get mutable reference to block cache. + pub fn block_cache_mut(&mut self) -> &mut BlockCache { + &mut self.block_cache + } + + /// Get the highest slot among cached blocks. + pub fn get_highest_cached_slot(&self) -> Option { + self.block_cache + .get_processable_blocks() + .iter() + .filter_map(|root| self.block_cache.get_slot(root)) + .max() + } + + /// Get statistics about the cache. + pub fn get_stats(&self) -> HeadSyncStats { + let total_blocks = self.block_cache.len(); + let orphan_blocks = self.block_cache.get_orphans().len(); + let processable_blocks = self.block_cache.get_processable_blocks().len(); + + HeadSyncStats { + total_blocks, + orphan_blocks, + processable_blocks, + } + } +} + +/// Result of processing a gossip block. +#[derive(Debug, Clone)] +pub struct ProcessResult { + /// The root of the processed block + pub root: Bytes32, + + /// Whether the block can be processed immediately + pub is_processable: bool, + + /// Missing parent roots (if block is orphan) + pub missing_parents: Vec, +} + +/// Statistics about the head sync cache. +#[derive(Debug, Clone, Copy)] +pub struct HeadSyncStats { + pub total_blocks: usize, + pub orphan_blocks: usize, + pub processable_blocks: usize, +} diff --git a/lean_client/networking/src/sync/mod.rs b/lean_client/networking/src/sync/mod.rs new file mode 100644 index 0000000..b8d2fb3 --- /dev/null +++ b/lean_client/networking/src/sync/mod.rs @@ -0,0 +1,43 @@ +pub mod backfill_sync; +pub mod block_cache; +/// Sync service for the lean Ethereum consensus client. +/// +/// This module provides synchronization capabilities for downloading and +/// validating blocks to catch up with the network. It includes: +/// +/// - **Block Cache**: Manages blocks and tracks orphans (blocks with unknown parents) +/// - **Peer Manager**: Tracks peer chain status and selects peers for requests +/// - **Backfill Sync**: Resolves orphan chains by fetching missing parent blocks +/// - **Head Sync**: Advances the chain head by processing gossip blocks +/// - **Sync Service**: Coordinates all sync operations and manages state transitions +/// +/// ## Architecture +/// +/// The sync service operates reactively: +/// 1. Blocks arrive via gossip +/// 2. If parent is known, process immediately +/// 3. If parent is unknown, cache block and trigger backfill +/// 4. Backfill fetches missing parents recursively (up to MAX_BACKFILL_DEPTH) +/// 5. Once parent chain is complete, process all cached blocks +/// +/// ## State Machine +/// +/// - **IDLE**: No peers, waiting to start +/// - **SYNCING**: Processing blocks to catch up +/// - **SYNCED**: Reached network finalized checkpoint +pub mod config; +pub mod head_sync; +pub mod peer_manager; +pub mod service; +pub mod states; + +pub use backfill_sync::BackfillSync; +pub use block_cache::BlockCache; +pub use config::*; +pub use head_sync::HeadSync; +pub use peer_manager::{PeerManager, SyncPeer}; +pub use service::SyncService; +pub use states::SyncState; + +#[cfg(test)] +mod tests; diff --git a/lean_client/networking/src/sync/peer_manager.rs b/lean_client/networking/src/sync/peer_manager.rs new file mode 100644 index 0000000..575e722 --- /dev/null +++ b/lean_client/networking/src/sync/peer_manager.rs @@ -0,0 +1,205 @@ +use super::config::MAX_CONCURRENT_REQUESTS; +use crate::types::ConnectionState; +use containers::{Slot, Status}; +use libp2p_identity::PeerId; +/// Peer manager for sync operations. +/// +/// Tracks peer chain status and selects peers for block requests. +use std::collections::HashMap; + +/// Sync-specific peer state. +/// +/// Wraps peer information with sync-specific state: chain status and request tracking. +#[derive(Debug, Clone)] +pub struct SyncPeer { + pub peer_id: PeerId, + pub connection_state: ConnectionState, + pub status: Option, + pub requests_in_flight: usize, +} + +impl SyncPeer { + pub fn new(peer_id: PeerId, connection_state: ConnectionState) -> Self { + Self { + peer_id, + connection_state, + status: None, + requests_in_flight: 0, + } + } + + /// Check if peer is connected. + pub fn is_connected(&self) -> bool { + self.connection_state == ConnectionState::Connected + } + + /// Check if peer is available for new requests. + /// + /// A peer is available if: + /// - Connected + /// - Below MAX_CONCURRENT_REQUESTS limit + pub fn is_available(&self) -> bool { + self.is_connected() && self.requests_in_flight < MAX_CONCURRENT_REQUESTS + } + + /// Check if peer likely has data for given slot. + pub fn has_slot(&self, slot: Slot) -> bool { + if let Some(status) = &self.status { + status.head.slot >= slot + } else { + false + } + } + + /// Mark that a request has been sent to this peer. + pub fn on_request_start(&mut self) { + self.requests_in_flight += 1; + } + + /// Mark that a request has completed. + pub fn on_request_complete(&mut self) { + self.requests_in_flight = self.requests_in_flight.saturating_sub(1); + } +} + +/// Peer manager for sync operations. +/// +/// Tracks peer chain status, selects peers for requests, and manages +/// request concurrency limits. +#[derive(Debug, Default, Clone)] +pub struct PeerManager { + peers: HashMap, +} + +impl PeerManager { + pub fn new() -> Self { + Self::default() + } + + /// Add a peer to the manager. + pub fn add_peer( + &mut self, + peer_id: PeerId, + connection_state: ConnectionState, + ) -> &mut SyncPeer { + self.peers + .entry(peer_id) + .or_insert_with(|| SyncPeer::new(peer_id, connection_state)) + } + + /// Remove a peer from the manager. + pub fn remove_peer(&mut self, peer_id: &PeerId) -> Option { + self.peers.remove(peer_id) + } + + /// Get a peer by ID. + pub fn get_peer(&self, peer_id: &PeerId) -> Option<&SyncPeer> { + self.peers.get(peer_id) + } + + /// Get a mutable peer by ID. + pub fn get_peer_mut(&mut self, peer_id: &PeerId) -> Option<&mut SyncPeer> { + self.peers.get_mut(peer_id) + } + + /// Update peer connection state. + pub fn update_connection_state(&mut self, peer_id: &PeerId, state: ConnectionState) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.connection_state = state; + } + } + + /// Update peer chain status. + pub fn update_status(&mut self, peer_id: &PeerId, status: Status) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.status = Some(status); + } + } + + /// Select an available peer for a request. + /// + /// Returns the first available peer. If min_slot is provided, only + /// considers peers that likely have data for that slot. + pub fn select_peer_for_request(&self, min_slot: Option) -> Option<&SyncPeer> { + self.peers.values().find(|peer| { + if !peer.is_available() { + return false; + } + if let Some(slot) = min_slot { + peer.has_slot(slot) + } else { + true + } + }) + } + + /// Get network's finalized slot (most common among connected peers). + /// + /// Returns the mode (most common) finalized slot reported by connected peers. + pub fn get_network_finalized_slot(&self) -> Option { + let mut finalized_slots: Vec = self + .peers + .values() + .filter(|peer| peer.status.is_some() && peer.is_connected()) + .map(|peer| peer.status.as_ref().unwrap().finalized.slot) + .collect(); + + if finalized_slots.is_empty() { + return None; + } + + // Find mode (most common value) + finalized_slots.sort(); + let mut max_count = 0; + let mut mode = finalized_slots[0]; + let mut current_count = 1; + let mut current_slot = finalized_slots[0]; + + for i in 1..finalized_slots.len() { + if finalized_slots[i] == current_slot { + current_count += 1; + } else { + if current_count > max_count { + max_count = current_count; + mode = current_slot; + } + current_slot = finalized_slots[i]; + current_count = 1; + } + } + + // Check last group + if current_count > max_count { + mode = current_slot; + } + + Some(mode) + } + + /// Mark that a request has been sent to a peer. + pub fn on_request_start(&mut self, peer_id: &PeerId) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.on_request_start(); + } + } + + /// Mark that a request has completed successfully. + pub fn on_request_complete(&mut self, peer_id: &PeerId) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.on_request_complete(); + } + } + + /// Mark that a request has failed. + pub fn on_request_failure(&mut self, peer_id: &PeerId, _reason: &str) { + if let Some(peer) = self.peers.get_mut(peer_id) { + peer.on_request_complete(); + // Could implement reputation/scoring here + } + } + + /// Get all tracked peers. + pub fn get_all_peers(&self) -> impl Iterator { + self.peers.values() + } +} diff --git a/lean_client/networking/src/sync/service.rs b/lean_client/networking/src/sync/service.rs new file mode 100644 index 0000000..0ff3c77 --- /dev/null +++ b/lean_client/networking/src/sync/service.rs @@ -0,0 +1,286 @@ +use containers::{Bytes32, SignedBlockWithAttestation, Slot}; +use libp2p_identity::PeerId; +use parking_lot::Mutex; +/// Sync service coordinating all synchronization operations. +/// +/// The SyncService is the main entry point for synchronization. It coordinates: +/// - HeadSync: Processing gossip blocks +/// - BackfillSync: Fetching missing parent blocks +/// - PeerManager: Tracking peer status +/// - State machine: Managing IDLE -> SYNCING -> SYNCED transitions +use std::sync::Arc; +use tracing::{debug, info, warn}; + +use super::{ + backfill_sync::{BackfillSync, NetworkRequester}, + block_cache::BlockCache, + peer_manager::PeerManager, + states::SyncState, +}; +use crate::types::ConnectionState; + +/// Sync service coordinating all sync operations. +/// +/// This is the main sync coordinator that: +/// 1. Receives blocks from gossip via HeadSync +/// 2. Triggers backfill for orphan blocks via BackfillSync +/// 3. Manages sync state (IDLE -> SYNCING -> SYNCED) +/// 4. Provides blocks to the fork choice for processing +pub struct SyncService { + state: SyncState, + head_sync: Arc>, + backfill_sync: Arc>>, + peer_manager: Arc>, + local_head_slot: Slot, +} + +impl SyncService { + pub fn new(network: N, peer_manager: PeerManager, block_cache: BlockCache) -> Self { + let peer_manager_arc = Arc::new(Mutex::new(peer_manager)); + let block_cache_arc = Arc::new(Mutex::new(block_cache)); + + let pm_clone = peer_manager_arc.lock().clone(); + let bc_clone = block_cache_arc.lock().clone(); + + Self { + state: SyncState::default(), + head_sync: block_cache_arc.clone(), + backfill_sync: Arc::new(Mutex::new(BackfillSync::new(pm_clone, bc_clone, network))), + peer_manager: peer_manager_arc, + local_head_slot: Slot(0), + } + } + + /// Get current sync state. + pub fn state(&self) -> SyncState { + self.state + } + + /// Add a peer to the sync service. + pub fn add_peer(&self, peer_id: PeerId, connection_state: ConnectionState) { + let mut pm = self.peer_manager.lock(); + pm.add_peer(peer_id, connection_state); + info!(peer = %peer_id, "Peer added to sync service"); + } + + /// Remove a peer from the sync service. + pub fn remove_peer(&self, peer_id: &PeerId) { + let mut pm = self.peer_manager.lock(); + pm.remove_peer(peer_id); + info!(peer = %peer_id, "Peer removed from sync service"); + } + + /// Update peer connection state. + pub fn update_peer_connection(&self, peer_id: &PeerId, state: ConnectionState) { + let mut pm = self.peer_manager.lock(); + pm.update_connection_state(peer_id, state); + } + + /// Update peer chain status. + pub fn update_peer_status(&self, peer_id: &PeerId, status: containers::Status) { + let finalized_slot = status.finalized.slot; + let mut pm = self.peer_manager.lock(); + pm.update_status(peer_id, status); + debug!(peer = %peer_id, finalized_slot = finalized_slot.0, "Updated peer status"); + } + + /// Process a gossip block. + /// + /// Returns the block root and whether backfill is needed. + pub async fn process_gossip_block( + &mut self, + block: SignedBlockWithAttestation, + ) -> (Bytes32, bool) { + let slot = block.message.block.slot; + let parent_root = block.message.block.parent_root; + + let (root, is_orphan, missing_parents) = { + let mut cache = self.head_sync.lock(); + let root = cache.add_block(block); + let is_orphan = cache.is_orphan(&root); + + let missing_parents = if is_orphan && !parent_root.0.is_zero() { + if !cache.contains(&parent_root) { + vec![parent_root] + } else { + vec![] + } + } else { + vec![] + }; + + (root, is_orphan, missing_parents) + }; + + debug!( + slot = slot.0, + root = ?root, + processable = !is_orphan, + "Processed gossip block" + ); + + // If block has missing parents, trigger backfill + if !missing_parents.is_empty() { + debug!( + num_missing = missing_parents.len(), + "Triggering backfill for missing parents" + ); + + let mut bs = self.backfill_sync.lock(); + bs.fill_missing(missing_parents, 0).await; + } + + (root, !is_orphan) + } + + /// Get all blocks ready for processing. + /// + /// Returns blocks in topological order (parents before children). + pub fn get_processable_blocks(&self) -> Vec { + let cache = self.head_sync.lock(); + let roots = cache.get_processable_blocks(); + + // Sort by slot to ensure topological order + let mut blocks: Vec<_> = roots + .iter() + .filter_map(|root| { + cache + .get_block(root) + .map(|b| (b.clone(), b.message.block.slot)) + }) + .collect(); + + blocks.sort_by_key(|(_, slot)| *slot); + blocks.into_iter().map(|(block, _)| block).collect() + } + + /// Remove a block from the cache after processing. + pub fn remove_processed_block(&self, root: &Bytes32) { + let mut cache = self.head_sync.lock(); + cache.remove_block(root); + } + + /// Update local head slot (from fork choice). + pub fn update_local_head(&mut self, slot: Slot) { + self.local_head_slot = slot; + self.update_sync_state(); + } + + /// Update sync state based on current conditions. + fn update_sync_state(&mut self) { + let pm = self.peer_manager.lock(); + let network_finalized = pm.get_network_finalized_slot(); + drop(pm); + + let new_state = match (self.state, network_finalized) { + // IDLE -> SYNCING: Peers connected and we need to sync + (SyncState::Idle, Some(finalized)) if self.local_head_slot < finalized => { + info!( + local_head = self.local_head_slot.0, + network_finalized = finalized.0, + "Transitioning to SYNCING" + ); + SyncState::Syncing + } + + // SYNCING -> SYNCED: Caught up with network + (SyncState::Syncing, Some(finalized)) if self.local_head_slot >= finalized => { + info!( + local_head = self.local_head_slot.0, + network_finalized = finalized.0, + "Transitioning to SYNCED" + ); + SyncState::Synced + } + + // SYNCED -> SYNCING: Fell behind network + (SyncState::Synced, Some(finalized)) if self.local_head_slot < finalized => { + warn!( + local_head = self.local_head_slot.0, + network_finalized = finalized.0, + "Fell behind, transitioning to SYNCING" + ); + SyncState::Syncing + } + + // Any state -> IDLE: No peers or no network info + (_, None) => { + if self.state != SyncState::Idle { + info!("No peer information, transitioning to IDLE"); + } + SyncState::Idle + } + + // No transition needed + _ => self.state, + }; + + if new_state != self.state { + if !self.state.can_transition_to(new_state) { + warn!( + from = ?self.state, + to = ?new_state, + "Invalid state transition attempted" + ); + return; + } + self.state = new_state; + } + } + + /// Periodic tick for sync service. + /// + /// Should be called regularly (e.g., every SYNC_TICK_INTERVAL_SECS). + /// Performs periodic tasks like state evaluation and orphan resolution. + pub async fn tick(&mut self) { + self.update_sync_state(); + + // Check for orphans and trigger backfill if needed + let missing_parents = { + let cache = self.head_sync.lock(); + cache.get_missing_parents() + }; + + if !missing_parents.is_empty() { + debug!( + num_missing = missing_parents.len(), + "Found missing parents, triggering backfill" + ); + + let mut bs = self.backfill_sync.lock(); + bs.fill_missing(missing_parents, 0).await; + } + } + + /// Get sync statistics. + pub fn get_stats(&self) -> SyncStats { + let cache = self.head_sync.lock(); + let orphan_blocks = cache.get_orphans().len(); + let processable_blocks = cache.get_processable_blocks().len(); + let cached_blocks = cache.len(); + drop(cache); + + let pm = self.peer_manager.lock(); + let connected_peers = pm.get_all_peers().filter(|p| p.is_connected()).count(); + + SyncStats { + state: self.state, + local_head_slot: self.local_head_slot, + cached_blocks, + orphan_blocks, + processable_blocks, + connected_peers, + } + } +} + +/// Statistics about the sync service. +#[derive(Debug, Clone, Copy)] +pub struct SyncStats { + pub state: SyncState, + pub local_head_slot: Slot, + pub cached_blocks: usize, + pub orphan_blocks: usize, + pub processable_blocks: usize, + pub connected_peers: usize, +} diff --git a/lean_client/networking/src/sync/states.rs b/lean_client/networking/src/sync/states.rs new file mode 100644 index 0000000..5506fb4 --- /dev/null +++ b/lean_client/networking/src/sync/states.rs @@ -0,0 +1,45 @@ +/// Sync service state machine. + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum SyncState { + /// Idle state: No peers connected or sync not yet started. + /// + /// Initial state when the sync service starts. The client remains idle + /// until peers connect and provide chain status. + Idle, + + /// Syncing state: Processing blocks to catch up with the network. + /// + /// The client is actively processing blocks (from gossip or request/response) + /// to reach the network's finalized checkpoint. Backfill happens naturally + /// within this state when orphan blocks are detected. + Syncing, + + /// Synced state: Caught up with the network's finalized checkpoint. + /// + /// Local head has reached or exceeded the network's most common finalized slot. + /// The client continues to process new blocks via gossip but is considered + /// fully synchronized. + Synced, +} + +impl SyncState { + /// Check if a transition to the target state is valid. + /// + /// State machines enforce invariants through transition rules. This method + /// encodes those rules. Callers should check validity before transitioning + /// to catch logic errors early. + pub fn can_transition_to(&self, target: SyncState) -> bool { + match self { + SyncState::Idle => matches!(target, SyncState::Syncing), + SyncState::Syncing => matches!(target, SyncState::Synced | SyncState::Idle), + SyncState::Synced => matches!(target, SyncState::Syncing | SyncState::Idle), + } + } +} + +impl Default for SyncState { + fn default() -> Self { + SyncState::Idle + } +} diff --git a/lean_client/networking/src/sync/tests/backfill_sync_tests.rs b/lean_client/networking/src/sync/tests/backfill_sync_tests.rs new file mode 100644 index 0000000..f646fb2 --- /dev/null +++ b/lean_client/networking/src/sync/tests/backfill_sync_tests.rs @@ -0,0 +1,87 @@ +use crate::sync::backfill_sync::NetworkRequester; +use crate::sync::{BackfillSync, BlockCache, PeerManager}; +use crate::types::ConnectionState; +use containers::{ + Attestation, Block, BlockBody, BlockWithAttestation, Bytes32, SignedBlockWithAttestation, Slot, + ValidatorIndex, +}; +use libp2p_identity::PeerId; + +// Mock network for testing +struct MockNetwork { + blocks: std::collections::HashMap, +} + +impl MockNetwork { + fn new() -> Self { + Self { + blocks: std::collections::HashMap::new(), + } + } + + fn add_block(&mut self, block: SignedBlockWithAttestation) -> Bytes32 { + let root = containers::block::hash_tree_root(&block.message.block); + self.blocks.insert(root, block); + root + } +} + +#[async_trait::async_trait] +impl NetworkRequester for MockNetwork { + async fn request_blocks_by_root( + &self, + _peer_id: PeerId, + roots: Vec, + ) -> Option> { + let blocks: Vec<_> = roots + .iter() + .filter_map(|root| self.blocks.get(root).cloned()) + .collect(); + + if blocks.is_empty() { + None + } else { + Some(blocks) + } + } +} + +fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { + let block = Block { + slot: Slot(slot), + proposer_index: ValidatorIndex(0), + parent_root, + state_root: Bytes32::default(), + body: BlockBody::default(), + }; + + SignedBlockWithAttestation { + message: BlockWithAttestation { + block, + proposer_attestation: Attestation::default(), + }, + signature: Default::default(), + } +} + +#[tokio::test] +async fn test_backfill_single_missing_block() { + let mut peer_manager = PeerManager::new(); + let peer_id = PeerId::random(); + peer_manager.add_peer(peer_id, ConnectionState::Connected); + + let mut network = MockNetwork::new(); + let block_cache = BlockCache::new(); + + // Create parent block and add to network + let parent = create_test_block(1, Bytes32::default()); + let parent_root = network.add_block(parent); + + let mut backfill = BackfillSync::new(peer_manager, block_cache, network); + + // Request the missing parent + backfill.fill_missing(vec![parent_root], 0).await; + + // Parent should now be in cache + assert!(backfill.block_cache().contains(&parent_root)); +} diff --git a/lean_client/networking/src/sync/tests/block_cache_tests.rs b/lean_client/networking/src/sync/tests/block_cache_tests.rs new file mode 100644 index 0000000..7a0e39a --- /dev/null +++ b/lean_client/networking/src/sync/tests/block_cache_tests.rs @@ -0,0 +1,105 @@ +use crate::sync::BlockCache; +use containers::{ + Attestation, Block, BlockBody, BlockWithAttestation, Bytes32, SignedBlockWithAttestation, Slot, + ValidatorIndex, +}; + +fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { + let block = Block { + slot: Slot(slot), + proposer_index: ValidatorIndex(0), + parent_root, + state_root: Bytes32::default(), + body: BlockBody::default(), + }; + + SignedBlockWithAttestation { + message: BlockWithAttestation { + block, + proposer_attestation: Attestation::default(), + }, + signature: Default::default(), + } +} + +#[test] +fn test_add_block() { + let mut cache = BlockCache::new(); + let block = create_test_block(1, Bytes32::default()); + + let root = cache.add_block(block); + assert!(cache.contains(&root)); +} + +#[test] +fn test_orphan_detection() { + let mut cache = BlockCache::new(); + + // Create a block with unknown parent + let unknown_parent = Bytes32(ssz::H256::from([1u8; 32])); + let orphan_block = create_test_block(2, unknown_parent); + + let orphan_root = cache.add_block(orphan_block); + + assert!(cache.is_orphan(&orphan_root)); + assert_eq!(cache.get_orphans().len(), 1); +} + +#[test] +fn test_orphan_resolution() { + let mut cache = BlockCache::new(); + + // Add genesis + let genesis = create_test_block(0, Bytes32::default()); + let genesis_root = cache.add_block(genesis.clone()); + + // Add child (should not be orphan) + let child = create_test_block(1, genesis_root); + let child_root = cache.add_block(child); + + assert!(!cache.is_orphan(&child_root)); + assert_eq!(cache.get_orphans().len(), 0); +} + +#[test] +fn test_get_missing_parents() { + let mut cache = BlockCache::new(); + + let parent1 = Bytes32(ssz::H256::from([1u8; 32])); + let parent2 = Bytes32(ssz::H256::from([2u8; 32])); + + let orphan1 = create_test_block(1, parent1); + let orphan2 = create_test_block(2, parent2); + let orphan3 = create_test_block(3, parent1); // Same parent as orphan1 + + cache.add_block(orphan1); + cache.add_block(orphan2); + cache.add_block(orphan3); + + let missing = cache.get_missing_parents(); + assert_eq!(missing.len(), 2); // Only 2 unique parents + assert!(missing.contains(&parent1)); + assert!(missing.contains(&parent2)); +} + +#[test] +fn test_get_processable_blocks() { + let mut cache = BlockCache::new(); + + // Add genesis (processable) + let genesis = create_test_block(0, Bytes32::default()); + let genesis_root = cache.add_block(genesis); + + // Add child (processable) + let child = create_test_block(1, genesis_root); + let child_root = cache.add_block(child); + + // Add orphan (not processable) + let orphan = create_test_block(2, Bytes32(ssz::H256::from([99u8; 32]))); + cache.add_block(orphan); + + let processable = cache.get_processable_blocks(); + assert_eq!(processable.len(), 2); + assert!(processable.contains(&genesis_root)); + assert!(processable.contains(&child_root)); +} diff --git a/lean_client/networking/src/sync/tests/head_sync_tests.rs b/lean_client/networking/src/sync/tests/head_sync_tests.rs new file mode 100644 index 0000000..2c0e199 --- /dev/null +++ b/lean_client/networking/src/sync/tests/head_sync_tests.rs @@ -0,0 +1,107 @@ +use crate::sync::{BlockCache, HeadSync}; +use containers::{ + Attestation, Block, BlockBody, BlockWithAttestation, Bytes32, SignedBlockWithAttestation, Slot, + ValidatorIndex, +}; + +fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { + let block = Block { + slot: Slot(slot), + proposer_index: ValidatorIndex(0), + parent_root, + state_root: Bytes32::default(), + body: BlockBody::default(), + }; + + SignedBlockWithAttestation { + message: BlockWithAttestation { + block, + proposer_attestation: Attestation::default(), + }, + signature: Default::default(), + } +} + +#[test] +fn test_process_genesis_block() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + let genesis = create_test_block(0, Bytes32::default()); + let result = head_sync.process_gossip_block(genesis); + + assert!(result.is_processable); + assert!(result.missing_parents.is_empty()); +} + +#[test] +fn test_process_orphan_block() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + let unknown_parent = Bytes32(ssz::H256::from([1u8; 32])); + let orphan = create_test_block(1, unknown_parent); + + let result = head_sync.process_gossip_block(orphan); + + assert!(!result.is_processable); + assert_eq!(result.missing_parents.len(), 1); + assert_eq!(result.missing_parents[0], unknown_parent); +} + +#[test] +fn test_process_chain_in_order() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + // Add genesis + let genesis = create_test_block(0, Bytes32::default()); + let genesis_result = head_sync.process_gossip_block(genesis); + + // Add child + let child = create_test_block(1, genesis_result.root); + let child_result = head_sync.process_gossip_block(child); + + assert!(child_result.is_processable); + assert!(child_result.missing_parents.is_empty()); +} + +#[test] +fn test_get_processable_blocks() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + // Add genesis + let genesis = create_test_block(0, Bytes32::default()); + let genesis_root = head_sync.process_gossip_block(genesis).root; + + // Add child + let child = create_test_block(1, genesis_root); + let child_root = head_sync.process_gossip_block(child).root; + + // Add orphan + let orphan = create_test_block(2, Bytes32(ssz::H256::from([99u8; 32]))); + head_sync.process_gossip_block(orphan); + + let processable = head_sync.get_processable_blocks(); + assert_eq!(processable.len(), 2); + assert!(processable.contains(&genesis_root)); + assert!(processable.contains(&child_root)); +} + +#[test] +fn test_stats() { + let mut head_sync = HeadSync::new(BlockCache::new()); + + // Add genesis and child + let genesis = create_test_block(0, Bytes32::default()); + let genesis_root = head_sync.process_gossip_block(genesis).root; + + let child = create_test_block(1, genesis_root); + head_sync.process_gossip_block(child); + + // Add orphan + let orphan = create_test_block(2, Bytes32(ssz::H256::from([99u8; 32]))); + head_sync.process_gossip_block(orphan); + + let stats = head_sync.get_stats(); + assert_eq!(stats.total_blocks, 3); + assert_eq!(stats.orphan_blocks, 1); + assert_eq!(stats.processable_blocks, 2); +} diff --git a/lean_client/networking/src/sync/tests/mod.rs b/lean_client/networking/src/sync/tests/mod.rs new file mode 100644 index 0000000..995bce4 --- /dev/null +++ b/lean_client/networking/src/sync/tests/mod.rs @@ -0,0 +1,6 @@ +mod backfill_sync_tests; +/// Tests for sync module +mod block_cache_tests; +mod head_sync_tests; +mod peer_manager_tests; +mod service_tests; diff --git a/lean_client/networking/src/sync/tests/peer_manager_tests.rs b/lean_client/networking/src/sync/tests/peer_manager_tests.rs new file mode 100644 index 0000000..eef61ee --- /dev/null +++ b/lean_client/networking/src/sync/tests/peer_manager_tests.rs @@ -0,0 +1,47 @@ +use crate::sync::config::MAX_CONCURRENT_REQUESTS; +use crate::sync::{PeerManager, SyncPeer}; +use crate::types::ConnectionState; +use containers::{Bytes32, Checkpoint, Slot, Status}; +use libp2p_identity::PeerId; + +#[test] +fn test_sync_peer_is_available() { + let mut peer = SyncPeer::new(PeerId::random(), ConnectionState::Connected); + assert!(peer.is_available()); + + peer.requests_in_flight = MAX_CONCURRENT_REQUESTS; + assert!(!peer.is_available()); +} + +#[test] +fn test_peer_manager_add_and_get() { + let mut manager = PeerManager::new(); + let peer_id = PeerId::random(); + + manager.add_peer(peer_id, ConnectionState::Connected); + assert!(manager.get_peer(&peer_id).is_some()); +} + +#[test] +fn test_peer_manager_update_status() { + let mut manager = PeerManager::new(); + let peer_id = PeerId::random(); + + manager.add_peer(peer_id, ConnectionState::Connected); + + let status = Status { + finalized: Checkpoint { + root: Bytes32::default(), + slot: Slot(100), + }, + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(150), + }, + }; + + manager.update_status(&peer_id, status.clone()); + + let peer = manager.get_peer(&peer_id).unwrap(); + assert_eq!(peer.status.as_ref().unwrap().finalized.slot, Slot(100)); +} diff --git a/lean_client/networking/src/sync/tests/service_tests.rs b/lean_client/networking/src/sync/tests/service_tests.rs new file mode 100644 index 0000000..1799397 --- /dev/null +++ b/lean_client/networking/src/sync/tests/service_tests.rs @@ -0,0 +1,108 @@ +use crate::sync::backfill_sync::NetworkRequester; +use crate::sync::{BlockCache, PeerManager, SyncService, SyncState}; +use crate::types::ConnectionState; +use containers::{ + Attestation, Block, BlockBody, BlockWithAttestation, Bytes32, Checkpoint, + SignedBlockWithAttestation, Slot, ValidatorIndex, +}; +use libp2p_identity::PeerId; + +// Mock network for testing +struct MockNetwork; + +#[async_trait::async_trait] +impl NetworkRequester for MockNetwork { + async fn request_blocks_by_root( + &self, + _peer_id: PeerId, + _roots: Vec, + ) -> Option> { + None + } +} + +fn create_test_block(slot: u64, parent_root: Bytes32) -> SignedBlockWithAttestation { + let block = Block { + slot: Slot(slot), + proposer_index: ValidatorIndex(0), + parent_root, + state_root: Bytes32::default(), + body: BlockBody::default(), + }; + + SignedBlockWithAttestation { + message: BlockWithAttestation { + block, + proposer_attestation: Attestation::default(), + }, + signature: Default::default(), + } +} + +#[tokio::test] +async fn test_sync_service_creation() { + let service: SyncService = + SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + assert_eq!(service.state(), SyncState::Idle); +} + +#[tokio::test] +async fn test_process_genesis_block() { + let mut service: SyncService = + SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + + let genesis = create_test_block(0, Bytes32::default()); + let (_root, is_processable) = service.process_gossip_block(genesis).await; + + assert!(is_processable); + assert!(service.get_processable_blocks().len() > 0); +} + +#[test] +fn test_add_remove_peer() { + let service: SyncService = + SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + let peer_id = PeerId::random(); + + service.add_peer(peer_id, ConnectionState::Connected); + + // Verify peer was added by checking stats + let stats = service.get_stats(); + assert!(stats.connected_peers >= 1); + + service.remove_peer(&peer_id); + + // Note: Stats may not reflect removal immediately in a real impl, + // but this tests the API works +} + +#[test] +fn test_sync_state_transitions() { + let mut service: SyncService = + SyncService::new(MockNetwork, PeerManager::new(), BlockCache::new()); + assert_eq!(service.state(), SyncState::Idle); + + // Add peer with finalized slot ahead of local head + let peer_id = PeerId::random(); + service.add_peer(peer_id, ConnectionState::Connected); + + let status = containers::Status { + finalized: Checkpoint { + root: Bytes32::default(), + slot: Slot(100), + }, + head: Checkpoint { + root: Bytes32::default(), + slot: Slot(150), + }, + }; + service.update_peer_status(&peer_id, status); + + // Should transition to SYNCING + service.update_local_head(Slot(0)); + assert_eq!(service.state(), SyncState::Syncing); + + // Catch up to network finalized + service.update_local_head(Slot(100)); + assert_eq!(service.state(), SyncState::Synced); +} diff --git a/lean_client/networking/src/types.rs b/lean_client/networking/src/types.rs index b15c737..bbe7cba 100644 --- a/lean_client/networking/src/types.rs +++ b/lean_client/networking/src/types.rs @@ -102,6 +102,7 @@ impl Display for ChainMessage { signed_block_with_attestation.message.block.slot.0 ) } + #[cfg(feature = "devnet1")] ChainMessage::ProcessAttestation { signed_attestation, .. } => { @@ -111,6 +112,16 @@ impl Display for ChainMessage { signed_attestation.message.data.slot.0 ) } + #[cfg(feature = "devnet2")] + ChainMessage::ProcessAttestation { + signed_attestation, .. + } => { + write!( + f, + "ProcessAttestation(slot={})", + signed_attestation.message.slot.0 + ) + } } } } diff --git a/lean_client/src/main.rs b/lean_client/src/main.rs index 92fb59a..e19e186 100644 --- a/lean_client/src/main.rs +++ b/lean_client/src/main.rs @@ -1,14 +1,15 @@ use clap::Parser; -use containers::ssz::SszHash; +use containers::block::BlockSignatures; +use containers::ssz::{PersistentList, SszHash}; use containers::{ - attestation::{Attestation, AttestationData, BlockSignatures}, + attestation::{Attestation, AttestationData}, block::{Block, BlockBody, BlockWithAttestation, SignedBlockWithAttestation}, checkpoint::Checkpoint, config::Config, ssz, state::State, types::{Bytes32, Uint64, ValidatorIndex}, - Slot, + Signature, Slot, }; use fork_choice::{ handlers::{on_attestation, on_block, on_tick}, @@ -119,6 +120,12 @@ struct Args { #[arg(short, long, default_value_t = 8083)] port: u16, + #[arg(short, long, default_value_t = 8084)] + discovery_port: u16, + + #[arg(long, default_value_t = false)] + disable_discovery: bool, + #[arg(short, long)] bootnodes: Vec, @@ -163,7 +170,7 @@ async fn main() { .iter() .enumerate() .map(|(i, v_str)| { - let pubkey = containers::validator::BlsPublicKey::from_hex(v_str) + let pubkey = containers::validator::PublicKey::from_hex(v_str) .expect("Invalid genesis validator pubkey"); containers::validator::Validator { pubkey, @@ -177,7 +184,7 @@ async fn main() { let num_validators = 3; let validators = (0..num_validators) .map(|i| containers::validator::Validator { - pubkey: containers::validator::BlsPublicKey::default(), + pubkey: containers::validator::PublicKey::default(), index: Uint64(i as u64), }) .collect(); @@ -219,7 +226,13 @@ async fn main() { block: genesis_block, proposer_attestation: genesis_proposer_attestation, }, - signature: BlockSignatures::default(), + #[cfg(feature = "devnet1")] + signature: PersistentList::default(), + #[cfg(feature = "devnet2")] + signature: BlockSignatures { + attestation_signatures: PersistentList::default(), + proposer_signature: Signature::default(), + }, }; let config = Config { genesis_time }; @@ -290,10 +303,14 @@ async fn main() { let mut gossipsub_config = GossipsubConfig::new(); gossipsub_config.set_topics(gossipsub_topics); + let discovery_enabled = !args.disable_discovery; + let network_service_config = Arc::new(NetworkServiceConfig::new( gossipsub_config, args.address, args.port, + args.discovery_port, + discovery_enabled, args.bootnodes, )); @@ -427,14 +444,29 @@ async fn main() { if last_attestation_slot != Some(current_slot) { let attestations = vs.create_attestations(&store, Slot(current_slot)); for signed_att in attestations { + #[cfg(feature = "devnet1")] let validator_id = signed_att.message.validator_id.0; + #[cfg(feature = "devnet2")] + let validator_id = signed_att.validator_id; info!( slot = current_slot, validator = validator_id, "Broadcasting attestation" ); + #[cfg(feature = "devnet1")] + match on_attestation(&mut store, signed_att.clone(), false) { + Ok(()) => { + if let Err(e) = chain_outbound_sender.send( + OutboundP2pRequest::GossipAttestation(signed_att) + ) { + warn!("Failed to gossip attestation: {}", e); + } + } + Err(e) => warn!("Error processing own attestation: {}", e), + } + #[cfg(feature = "devnet2")] match on_attestation(&mut store, signed_att.clone(), false) { Ok(()) => { if let Err(e) = chain_outbound_sender.send( @@ -530,10 +562,24 @@ async fn main() { should_gossip, .. } => { + #[cfg(feature = "devnet1")] let att_slot = signed_attestation.message.data.slot.0; + #[cfg(feature = "devnet1")] let source_slot = signed_attestation.message.data.source.slot.0; + #[cfg(feature = "devnet1")] let target_slot = signed_attestation.message.data.target.slot.0; + #[cfg(feature = "devnet1")] let validator_id = signed_attestation.message.validator_id.0; + + #[cfg(feature = "devnet2")] + let att_slot = signed_attestation.message.slot.0; + #[cfg(feature = "devnet2")] + let source_slot = signed_attestation.message.source.slot.0; + #[cfg(feature = "devnet2")] + let target_slot = signed_attestation.message.target.slot.0; + #[cfg(feature = "devnet2")] + let validator_id = signed_attestation.validator_id; + info!( slot = att_slot, source_slot = source_slot, diff --git a/lean_client/validator/Cargo.toml b/lean_client/validator/Cargo.toml index b658c48..8311b9d 100644 --- a/lean_client/validator/Cargo.toml +++ b/lean_client/validator/Cargo.toml @@ -6,8 +6,11 @@ edition = "2021" [features] default = ["xmss-signing"] xmss-signing = ["leansig"] +devnet1 = ["containers/devnet1", "fork-choice/devnet1", "env-config/devnet1"] +devnet2 = ["containers/devnet2", "fork-choice/devnet2", "env-config/devnet1"] [dependencies] +env-config = { path = "../env-config", default-features = false } serde = { version = "1.0", features = ["derive"] } serde_yaml = "0.9" containers = { path = "../containers" } diff --git a/lean_client/validator/src/keys.rs b/lean_client/validator/src/keys.rs index 392fd95..7680102 100644 --- a/lean_client/validator/src/keys.rs +++ b/lean_client/validator/src/keys.rs @@ -96,7 +96,7 @@ impl KeyManager { .into()); } - // Convert to ByteVector using unsafe pointer copy (same pattern as BlsPublicKey) + // Convert to ByteVector using unsafe pointer copy (same pattern as PublicKey) let mut byte_vec: ByteVector = ByteVector::default(); unsafe { let dest = &mut byte_vec as *mut ByteVector as *mut u8; diff --git a/lean_client/validator/src/lib.rs b/lean_client/validator/src/lib.rs index 6c6a4a4..752cda8 100644 --- a/lean_client/validator/src/lib.rs +++ b/lean_client/validator/src/lib.rs @@ -2,12 +2,16 @@ use std::collections::HashMap; use std::path::Path; +use containers::attestation::AggregatedAttestations; +#[cfg(feature = "devnet2")] +use containers::attestation::NaiveAggregatedSignature; +use containers::block::BlockSignatures; use containers::{ attestation::{Attestation, AttestationData, Signature, SignedAttestation}, block::{hash_tree_root, BlockWithAttestation, SignedBlockWithAttestation}, checkpoint::Checkpoint, types::{Uint64, ValidatorIndex}, - Slot, + AggregatedAttestation, Slot, }; use fork_choice::store::{get_proposal_head, get_vote_target, Store}; use tracing::{info, warn}; @@ -172,7 +176,10 @@ impl ValidatorService { .latest_new_attestations .values() .filter(|att| { + #[cfg(feature = "devnet1")] let data = &att.message.data; + #[cfg(feature = "devnet2")] + let data = &att.message; // Source must match the parent state's justified checkpoint (not store's!) let source_matches = data.source == parent_state.latest_justified; // Target must be strictly after source @@ -184,11 +191,18 @@ impl ValidatorService { }) .collect(); + #[cfg(feature = "devnet1")] let valid_attestations: Vec = valid_signed_attestations .iter() .map(|att| att.message.clone()) .collect(); + #[cfg(feature = "devnet2")] + let valid_attestations: Vec = valid_signed_attestations + .iter() + .map(|att| att.message.clone()) + .collect(); + info!( slot = slot.0, valid_attestations = valid_attestations.len(), @@ -197,6 +211,7 @@ impl ValidatorService { ); // Build block with collected attestations (empty body - attestations go to state) + #[cfg(feature = "devnet1")] let (block, _post_state, _collected_atts, sigs) = parent_state.build_block( slot, proposer_index, @@ -205,13 +220,43 @@ impl ValidatorService { None, None, )?; + #[cfg(feature = "devnet2")] + let (block, _post_state, _collected_atts, sigs) = { + let valid_attestations: Vec = valid_attestations + .iter() + .map(|data| Attestation { + validator_id: Uint64(0), // Placeholder, real validator IDs should be used + data: data.clone(), + }) + .collect(); + parent_state.build_block( + slot, + proposer_index, + parent_root, + Some(valid_attestations), + None, + None, + )? + }; // Collect signatures from the attestations we included + #[cfg(feature = "devnet1")] let mut signatures = sigs; + #[cfg(feature = "devnet2")] + let mut signatures = sigs.attestation_signatures; for signed_att in &valid_signed_attestations { + #[cfg(feature = "devnet1")] signatures .push(signed_att.signature.clone()) .map_err(|e| format!("Failed to add attestation signature: {:?}", e))?; + #[cfg(feature = "devnet2")] + { + // TODO: Use real aggregation instead of naive placeholder when spec is more up to date + let aggregated_sig: NaiveAggregatedSignature = NaiveAggregatedSignature::default(); + signatures + .push(aggregated_sig) + .map_err(|e| format!("Failed to add attestation signature: {:?}", e))?; + } } info!( @@ -231,9 +276,19 @@ impl ValidatorService { match key_manager.sign(proposer_index.0, epoch, &message.0.into()) { Ok(sig) => { + #[cfg(feature = "devnet1")] signatures .push(sig) .map_err(|e| format!("Failed to add proposer signature: {:?}", e))?; + #[cfg(feature = "devnet2")] + { + // TODO: Use real aggregation instead of naive placeholder when spec is more up to date + let aggregated_sig: NaiveAggregatedSignature = + NaiveAggregatedSignature::default(); + signatures + .push(aggregated_sig) + .map_err(|e| format!("Failed to add proposer signature: {:?}", e))?; + } info!(proposer = proposer_index.0, "Signed proposer attestation"); } Err(e) => { @@ -250,7 +305,13 @@ impl ValidatorService { block, proposer_attestation, }, + #[cfg(feature = "devnet1")] signature: signatures, + #[cfg(feature = "devnet2")] + signature: BlockSignatures { + attestation_signatures: signatures, + proposer_signature: Signature::default(), + }, }; Ok(signed_block) @@ -290,6 +351,7 @@ impl ValidatorService { .validator_indices .iter() .filter_map(|&idx| { + #[cfg(feature = "devnet1")] let attestation = Attestation { validator_id: Uint64(idx), data: AttestationData { @@ -300,6 +362,14 @@ impl ValidatorService { }, }; + #[cfg(feature = "devnet2")] + let attestation = AttestationData { + slot, + head: head_checkpoint.clone(), + target: vote_target.clone(), + source: store.latest_justified.clone(), + }; + let signature = if let Some(ref key_manager) = self.key_manager { // Sign with XMSS let message = hash_tree_root(&attestation); @@ -337,10 +407,24 @@ impl ValidatorService { Signature::default() }; - Some(SignedAttestation { - message: attestation, - signature, - }) + { + #[cfg(feature = "devnet1")] + { + Some(SignedAttestation { + message: attestation, + signature, + }) + } + + #[cfg(feature = "devnet2")] + { + Some(SignedAttestation { + validator_id: idx, + message: attestation, + signature, + }) + } + } }) .collect() }